diff options
Diffstat (limited to 'drivers/net/ipa/ipa_endpoint.c')
-rw-r--r-- | drivers/net/ipa/ipa_endpoint.c | 38 |
1 files changed, 19 insertions, 19 deletions
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 7a46c790afbe..7209ee3c3124 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -174,9 +174,6 @@ static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, enum ipa_endpoint_name name; u32 limit; - /* Not sure where this constraint come from... */ - BUILD_BUG_ON(sizeof(struct ipa_status) % 4); - if (count > IPA_ENDPOINT_COUNT) { dev_err(dev, "too many endpoints specified (%u > %u)\n", count, IPA_ENDPOINT_COUNT); @@ -1020,31 +1017,34 @@ err_free_pages: } /** - * ipa_endpoint_replenish() - Replenish the Rx packets cache. + * ipa_endpoint_replenish() - Replenish endpoint receive buffers * @endpoint: Endpoint to be replenished - * @count: Number of buffers to send to hardware + * @add_one: Whether this is replacing a just-consumed buffer * - * Allocate RX packet wrapper structures with maximal socket buffers - * for an endpoint. These are supplied to the hardware, which fills - * them with incoming data. + * The IPA hardware can hold a fixed number of receive buffers for an RX + * endpoint, based on the number of entries in the underlying channel ring + * buffer. If an endpoint's "backlog" is non-zero, it indicates how many + * more receive buffers can be supplied to the hardware. Replenishing for + * an endpoint can be disabled, in which case requests to replenish a + * buffer are "saved", and transferred to the backlog once it is re-enabled + * again. */ -static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count) +static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, bool add_one) { struct gsi *gsi; u32 backlog; if (!endpoint->replenish_enabled) { - if (count) - atomic_add(count, &endpoint->replenish_saved); + if (add_one) + atomic_inc(&endpoint->replenish_saved); return; } - while (atomic_dec_not_zero(&endpoint->replenish_backlog)) if (ipa_endpoint_replenish_one(endpoint)) goto try_again_later; - if (count) - atomic_add(count, &endpoint->replenish_backlog); + if (add_one) + atomic_inc(&endpoint->replenish_backlog); return; @@ -1052,8 +1052,8 @@ try_again_later: /* The last one didn't succeed, so fix the backlog */ backlog = atomic_inc_return(&endpoint->replenish_backlog); - if (count) - atomic_add(count, &endpoint->replenish_backlog); + if (add_one) + atomic_inc(&endpoint->replenish_backlog); /* Whenever a receive buffer transaction completes we'll try to * replenish again. It's unlikely, but if we fail to supply even @@ -1080,7 +1080,7 @@ static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint) /* Start replenishing if hardware currently has no buffers */ max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id); if (atomic_read(&endpoint->replenish_backlog) == max_backlog) - ipa_endpoint_replenish(endpoint, 0); + ipa_endpoint_replenish(endpoint, false); } static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint) @@ -1099,7 +1099,7 @@ static void ipa_endpoint_replenish_work(struct work_struct *work) endpoint = container_of(dwork, struct ipa_endpoint, replenish_work); - ipa_endpoint_replenish(endpoint, 0); + ipa_endpoint_replenish(endpoint, false); } static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint, @@ -1300,7 +1300,7 @@ static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint, { struct page *page; - ipa_endpoint_replenish(endpoint, 1); + ipa_endpoint_replenish(endpoint, true); if (trans->cancelled) return; |