diff options
Diffstat (limited to 'drivers/net/ipa/ipa_endpoint.c')
-rw-r--r-- | drivers/net/ipa/ipa_endpoint.c | 217 |
1 files changed, 101 insertions, 116 deletions
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 68291a3efd04..888e94278a84 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -25,10 +25,8 @@ #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) -#define IPA_REPLENISH_BATCH 16 - -/* RX buffer is 1 page (or a power-of-2 contiguous pages) */ -#define IPA_RX_BUFFER_SIZE 8192 /* PAGE_SIZE > 4096 wastes a LOT */ +/* Hardware is told about receive buffers once a "batch" has been queued */ +#define IPA_REPLENISH_BATCH 16 /* Must be non-zero */ /* The amount of RX buffer space consumed by standard skb overhead */ #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0)) @@ -75,6 +73,14 @@ struct ipa_status { #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22) #define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16) +static u32 aggr_byte_limit_max(enum ipa_version version) +{ + if (version < IPA_VERSION_4_5) + return field_max(aggr_byte_limit_fmask(true)); + + return field_max(aggr_byte_limit_fmask(false)); +} + static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, const struct ipa_gsi_endpoint_data *all_data, const struct ipa_gsi_endpoint_data *data) @@ -87,6 +93,9 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, return true; if (!data->toward_ipa) { + u32 buffer_size; + u32 limit; + if (data->endpoint.filter_support) { dev_err(dev, "filtering not supported for " "RX endpoint %u\n", @@ -94,6 +103,41 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, return false; } + /* Nothing more to check for non-AP RX */ + if (data->ee_id != GSI_EE_AP) + return true; + + buffer_size = data->endpoint.config.rx.buffer_size; + /* The buffer size must hold an MTU plus overhead */ + limit = IPA_MTU + IPA_RX_BUFFER_OVERHEAD; + if (buffer_size < limit) { + dev_err(dev, "RX buffer size too small for RX endpoint %u (%u < %u)\n", + data->endpoint_id, buffer_size, limit); + return false; + } + + /* For an endpoint supporting receive aggregation, the + * aggregation byte limit defines the point at which an + * aggregation window will close. It is programmed into the + * IPA hardware as a number of KB. We don't use "hard byte + * limit" aggregation, so we need to supply enough space in + * a receive buffer to hold a complete MTU plus normal skb + * overhead *after* that aggregation byte limit has been + * crossed. + * + * This check just ensures the receive buffer size doesn't + * exceed what's representable in the aggregation limit field. + */ + if (data->endpoint.config.aggregation) { + limit += SZ_1K * aggr_byte_limit_max(ipa->version); + if (buffer_size > limit) { + dev_err(dev, "RX buffer size too large for aggregated RX endpoint %u (%u > %u)\n", + data->endpoint_id, buffer_size, limit); + + return false; + } + } + return true; /* Nothing more to check for RX */ } @@ -156,21 +200,12 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, return true; } -static u32 aggr_byte_limit_max(enum ipa_version version) -{ - if (version < IPA_VERSION_4_5) - return field_max(aggr_byte_limit_fmask(true)); - - return field_max(aggr_byte_limit_fmask(false)); -} - static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, const struct ipa_gsi_endpoint_data *data) { const struct ipa_gsi_endpoint_data *dp = data; struct device *dev = &ipa->pdev->dev; enum ipa_endpoint_name name; - u32 limit; if (count > IPA_ENDPOINT_COUNT) { dev_err(dev, "too many endpoints specified (%u > %u)\n", @@ -178,26 +213,6 @@ static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, return false; } - /* The aggregation byte limit defines the point at which an - * aggregation window will close. It is programmed into the - * IPA hardware as a number of KB. We don't use "hard byte - * limit" aggregation, which means that we need to supply - * enough space in a receive buffer to hold a complete MTU - * plus normal skb overhead *after* that aggregation byte - * limit has been crossed. - * - * This check ensures we don't define a receive buffer size - * that would exceed what we can represent in the field that - * is used to program its size. - */ - limit = aggr_byte_limit_max(ipa->version) * SZ_1K; - limit += IPA_MTU + IPA_RX_BUFFER_OVERHEAD; - if (limit < IPA_RX_BUFFER_SIZE) { - dev_err(dev, "buffer size too big for aggregation (%u > %u)\n", - IPA_RX_BUFFER_SIZE, limit); - return false; - } - /* Make sure needed endpoints have defined data */ if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) { dev_err(dev, "command TX endpoint not defined\n"); @@ -723,13 +738,15 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) if (endpoint->data->aggregation) { if (!endpoint->toward_ipa) { + const struct ipa_endpoint_rx_data *rx_data; bool close_eof; u32 limit; + rx_data = &endpoint->data->rx; val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK); val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK); - limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE); + limit = ipa_aggr_size_kb(rx_data->buffer_size); val |= aggr_byte_limit_encoded(version, limit); limit = IPA_AGGR_TIME_LIMIT; @@ -737,7 +754,7 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) /* AGGR_PKT_LIMIT is 0 (unlimited) */ - close_eof = endpoint->data->rx.aggr_close_eof; + close_eof = rx_data->aggr_close_eof; val |= aggr_sw_eof_active_encoded(version, close_eof); /* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */ @@ -1020,134 +1037,98 @@ static void ipa_endpoint_status(struct ipa_endpoint *endpoint) iowrite32(val, ipa->reg_virt + offset); } -static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint) +static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint, + struct gsi_trans *trans) { - struct gsi_trans *trans; - bool doorbell = false; struct page *page; + u32 buffer_size; u32 offset; u32 len; int ret; - page = dev_alloc_pages(get_order(IPA_RX_BUFFER_SIZE)); + buffer_size = endpoint->data->rx.buffer_size; + page = dev_alloc_pages(get_order(buffer_size)); if (!page) return -ENOMEM; - trans = ipa_endpoint_trans_alloc(endpoint, 1); - if (!trans) - goto err_free_pages; - /* Offset the buffer to make space for skb headroom */ offset = NET_SKB_PAD; - len = IPA_RX_BUFFER_SIZE - offset; + len = buffer_size - offset; ret = gsi_trans_page_add(trans, page, len, offset); if (ret) - goto err_trans_free; - trans->data = page; /* transaction owns page now */ - - if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) { - doorbell = true; - endpoint->replenish_ready = 0; - } - - gsi_trans_commit(trans, doorbell); - - return 0; - -err_trans_free: - gsi_trans_free(trans); -err_free_pages: - __free_pages(page, get_order(IPA_RX_BUFFER_SIZE)); + __free_pages(page, get_order(buffer_size)); + else + trans->data = page; /* transaction owns page now */ - return -ENOMEM; + return ret; } /** * ipa_endpoint_replenish() - Replenish endpoint receive buffers * @endpoint: Endpoint to be replenished - * @add_one: Whether this is replacing a just-consumed buffer * * The IPA hardware can hold a fixed number of receive buffers for an RX * endpoint, based on the number of entries in the underlying channel ring * buffer. If an endpoint's "backlog" is non-zero, it indicates how many * more receive buffers can be supplied to the hardware. Replenishing for - * an endpoint can be disabled, in which case requests to replenish a - * buffer are "saved", and transferred to the backlog once it is re-enabled - * again. + * an endpoint can be disabled, in which case buffers are not queued to + * the hardware. */ -static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, bool add_one) +static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint) { - struct gsi *gsi; - u32 backlog; - int delta; + struct gsi_trans *trans; - if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags)) { - if (add_one) - atomic_inc(&endpoint->replenish_saved); + if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags)) return; - } - /* If already active, just update the backlog */ - if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags)) { - if (add_one) - atomic_inc(&endpoint->replenish_backlog); + /* Skip it if it's already active */ + if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags)) return; - } - while (atomic_dec_not_zero(&endpoint->replenish_backlog)) - if (ipa_endpoint_replenish_one(endpoint)) + while ((trans = ipa_endpoint_trans_alloc(endpoint, 1))) { + bool doorbell; + + if (ipa_endpoint_replenish_one(endpoint, trans)) goto try_again_later; - clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); - if (add_one) - atomic_inc(&endpoint->replenish_backlog); + /* Ring the doorbell if we've got a full batch */ + doorbell = !(++endpoint->replenish_count % IPA_REPLENISH_BATCH); + gsi_trans_commit(trans, doorbell); + } + + clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); return; try_again_later: + gsi_trans_free(trans); clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); - /* The last one didn't succeed, so fix the backlog */ - delta = add_one ? 2 : 1; - backlog = atomic_add_return(delta, &endpoint->replenish_backlog); - /* Whenever a receive buffer transaction completes we'll try to * replenish again. It's unlikely, but if we fail to supply even * one buffer, nothing will trigger another replenish attempt. - * Receive buffer transactions use one TRE, so schedule work to - * try replenishing again if our backlog is *all* available TREs. + * If the hardware has no receive buffers queued, schedule work to + * try replenishing again. */ - gsi = &endpoint->ipa->gsi; - if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id)) + if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id)) schedule_delayed_work(&endpoint->replenish_work, msecs_to_jiffies(1)); } static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint) { - struct gsi *gsi = &endpoint->ipa->gsi; - u32 max_backlog; - u32 saved; - set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); - while ((saved = atomic_xchg(&endpoint->replenish_saved, 0))) - atomic_add(saved, &endpoint->replenish_backlog); /* Start replenishing if hardware currently has no buffers */ - max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id); - if (atomic_read(&endpoint->replenish_backlog) == max_backlog) - ipa_endpoint_replenish(endpoint, false); + if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id)) + ipa_endpoint_replenish(endpoint); } static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint) { - u32 backlog; - clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); - while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0))) - atomic_add(backlog, &endpoint->replenish_saved); } static void ipa_endpoint_replenish_work(struct work_struct *work) @@ -1157,7 +1138,7 @@ static void ipa_endpoint_replenish_work(struct work_struct *work) endpoint = container_of(dwork, struct ipa_endpoint, replenish_work); - ipa_endpoint_replenish(endpoint, false); + ipa_endpoint_replenish(endpoint); } static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint, @@ -1183,15 +1164,16 @@ static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint, static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint, struct page *page, u32 len) { + u32 buffer_size = endpoint->data->rx.buffer_size; struct sk_buff *skb; /* Nothing to do if there's no netdev */ if (!endpoint->netdev) return false; - WARN_ON(len > SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE - NET_SKB_PAD)); + WARN_ON(len > SKB_WITH_OVERHEAD(buffer_size - NET_SKB_PAD)); - skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE); + skb = build_skb(page_address(page), buffer_size); if (skb) { /* Reserve the headroom and account for the data */ skb_reserve(skb, NET_SKB_PAD); @@ -1289,8 +1271,9 @@ static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint, static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint, struct page *page, u32 total_len) { + u32 buffer_size = endpoint->data->rx.buffer_size; void *data = page_address(page) + NET_SKB_PAD; - u32 unused = IPA_RX_BUFFER_SIZE - total_len; + u32 unused = buffer_size - total_len; u32 resid = total_len; while (resid) { @@ -1360,10 +1343,8 @@ static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint, { struct page *page; - ipa_endpoint_replenish(endpoint, true); - if (trans->cancelled) - return; + goto done; /* Parse or build a socket buffer using the actual received length */ page = trans->data; @@ -1371,6 +1352,8 @@ static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint, ipa_endpoint_status_parse(endpoint, page, trans->len); else if (ipa_endpoint_skb_build(endpoint, page, trans->len)) trans->data = NULL; /* Pages have been consumed */ +done: + ipa_endpoint_replenish(endpoint); } void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint, @@ -1398,8 +1381,11 @@ void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint, } else { struct page *page = trans->data; - if (page) - __free_pages(page, get_order(IPA_RX_BUFFER_SIZE)); + if (page) { + u32 buffer_size = endpoint->data->rx.buffer_size; + + __free_pages(page, get_order(buffer_size)); + } } } @@ -1704,9 +1690,6 @@ static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint) */ clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); - atomic_set(&endpoint->replenish_saved, - gsi_channel_tre_max(gsi, endpoint->channel_id)); - atomic_set(&endpoint->replenish_backlog, 0); INIT_DELAYED_WORK(&endpoint->replenish_work, ipa_endpoint_replenish_work); } @@ -1882,6 +1865,8 @@ u32 ipa_endpoint_init(struct ipa *ipa, u32 count, enum ipa_endpoint_name name; u32 filter_map; + BUILD_BUG_ON(!IPA_REPLENISH_BATCH); + if (!ipa_endpoint_data_valid(ipa, count, data)) return 0; /* Error */ |