diff options
Diffstat (limited to 'drivers/net/ethernet/ibm/ibmvnic.c')
-rw-r--r-- | drivers/net/ethernet/ibm/ibmvnic.c | 626 |
1 files changed, 386 insertions, 240 deletions
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 1b3cc8bb0705..b492af6affc3 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -90,7 +90,7 @@ MODULE_VERSION(IBMVNIC_DRIVER_VERSION); static int ibmvnic_version = IBMVNIC_INITIAL_VERSION; static int ibmvnic_remove(struct vio_dev *); -static void release_sub_crqs(struct ibmvnic_adapter *); +static void release_sub_crqs(struct ibmvnic_adapter *, bool); static int ibmvnic_reset_crq(struct ibmvnic_adapter *); static int ibmvnic_send_crq_init(struct ibmvnic_adapter *); static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *); @@ -111,7 +111,7 @@ static int ibmvnic_poll(struct napi_struct *napi, int data); static void send_map_query(struct ibmvnic_adapter *adapter); static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8); static void send_request_unmap(struct ibmvnic_adapter *, u8); -static void send_login(struct ibmvnic_adapter *adapter); +static int send_login(struct ibmvnic_adapter *adapter); static void send_cap_queries(struct ibmvnic_adapter *adapter); static int init_sub_crqs(struct ibmvnic_adapter *); static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); @@ -361,14 +361,14 @@ static void release_stats_buffers(struct ibmvnic_adapter *adapter) static int init_stats_buffers(struct ibmvnic_adapter *adapter) { adapter->tx_stats_buffers = - kcalloc(adapter->req_tx_queues, + kcalloc(IBMVNIC_MAX_QUEUES, sizeof(struct ibmvnic_tx_queue_stats), GFP_KERNEL); if (!adapter->tx_stats_buffers) return -ENOMEM; adapter->rx_stats_buffers = - kcalloc(adapter->req_rx_queues, + kcalloc(IBMVNIC_MAX_QUEUES, sizeof(struct ibmvnic_rx_queue_stats), GFP_KERNEL); if (!adapter->rx_stats_buffers) @@ -509,7 +509,7 @@ static int init_rx_pools(struct net_device *netdev) return -1; } - adapter->num_active_rx_pools = 0; + adapter->num_active_rx_pools = rxadd_subcrqs; for (i = 0; i < rxadd_subcrqs; i++) { rx_pool = &adapter->rx_pool[i]; @@ -554,41 +554,44 @@ static int init_rx_pools(struct net_device *netdev) rx_pool->next_free = 0; } - adapter->num_active_rx_pools = rxadd_subcrqs; + return 0; +} + +static int reset_one_tx_pool(struct ibmvnic_adapter *adapter, + struct ibmvnic_tx_pool *tx_pool) +{ + int rc, i; + + rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff); + if (rc) + return rc; + + memset(tx_pool->tx_buff, 0, + tx_pool->num_buffers * + sizeof(struct ibmvnic_tx_buff)); + + for (i = 0; i < tx_pool->num_buffers; i++) + tx_pool->free_map[i] = i; + + tx_pool->consumer_index = 0; + tx_pool->producer_index = 0; return 0; } static int reset_tx_pools(struct ibmvnic_adapter *adapter) { - struct ibmvnic_tx_pool *tx_pool; int tx_scrqs; - int i, j, rc; + int i, rc; tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); for (i = 0; i < tx_scrqs; i++) { - netdev_dbg(adapter->netdev, "Re-setting tx_pool[%d]\n", i); - - tx_pool = &adapter->tx_pool[i]; - - rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff); + rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]); if (rc) return rc; - - rc = reset_long_term_buff(adapter, &tx_pool->tso_ltb); + rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]); if (rc) return rc; - - memset(tx_pool->tx_buff, 0, - adapter->req_tx_entries_per_subcrq * - sizeof(struct ibmvnic_tx_buff)); - - for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++) - tx_pool->free_map[j] = j; - - tx_pool->consumer_index = 0; - tx_pool->producer_index = 0; - tx_pool->tso_index = 0; } return 0; @@ -605,35 +608,70 @@ static void release_vpd_data(struct ibmvnic_adapter *adapter) adapter->vpd = NULL; } +static void release_one_tx_pool(struct ibmvnic_adapter *adapter, + struct ibmvnic_tx_pool *tx_pool) +{ + kfree(tx_pool->tx_buff); + kfree(tx_pool->free_map); + free_long_term_buff(adapter, &tx_pool->long_term_buff); +} + static void release_tx_pools(struct ibmvnic_adapter *adapter) { - struct ibmvnic_tx_pool *tx_pool; int i; if (!adapter->tx_pool) return; for (i = 0; i < adapter->num_active_tx_pools; i++) { - netdev_dbg(adapter->netdev, "Releasing tx_pool[%d]\n", i); - tx_pool = &adapter->tx_pool[i]; - kfree(tx_pool->tx_buff); - free_long_term_buff(adapter, &tx_pool->long_term_buff); - free_long_term_buff(adapter, &tx_pool->tso_ltb); - kfree(tx_pool->free_map); + release_one_tx_pool(adapter, &adapter->tx_pool[i]); + release_one_tx_pool(adapter, &adapter->tso_pool[i]); } kfree(adapter->tx_pool); adapter->tx_pool = NULL; + kfree(adapter->tso_pool); + adapter->tso_pool = NULL; adapter->num_active_tx_pools = 0; } +static int init_one_tx_pool(struct net_device *netdev, + struct ibmvnic_tx_pool *tx_pool, + int num_entries, int buf_size) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + int i; + + tx_pool->tx_buff = kcalloc(num_entries, + sizeof(struct ibmvnic_tx_buff), + GFP_KERNEL); + if (!tx_pool->tx_buff) + return -1; + + if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff, + num_entries * buf_size)) + return -1; + + tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL); + if (!tx_pool->free_map) + return -1; + + for (i = 0; i < num_entries; i++) + tx_pool->free_map[i] = i; + + tx_pool->consumer_index = 0; + tx_pool->producer_index = 0; + tx_pool->num_buffers = num_entries; + tx_pool->buf_size = buf_size; + + return 0; +} + static int init_tx_pools(struct net_device *netdev) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); - struct device *dev = &adapter->vdev->dev; - struct ibmvnic_tx_pool *tx_pool; int tx_subcrqs; - int i, j; + int i, rc; tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); adapter->tx_pool = kcalloc(tx_subcrqs, @@ -641,57 +679,31 @@ static int init_tx_pools(struct net_device *netdev) if (!adapter->tx_pool) return -1; - adapter->num_active_tx_pools = 0; - - for (i = 0; i < tx_subcrqs; i++) { - tx_pool = &adapter->tx_pool[i]; - - netdev_dbg(adapter->netdev, - "Initializing tx_pool[%d], %lld buffs\n", - i, adapter->req_tx_entries_per_subcrq); - - tx_pool->tx_buff = kcalloc(adapter->req_tx_entries_per_subcrq, - sizeof(struct ibmvnic_tx_buff), - GFP_KERNEL); - if (!tx_pool->tx_buff) { - dev_err(dev, "tx pool buffer allocation failed\n"); - release_tx_pools(adapter); - return -1; - } + adapter->tso_pool = kcalloc(tx_subcrqs, + sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); + if (!adapter->tso_pool) + return -1; - if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff, - adapter->req_tx_entries_per_subcrq * - adapter->req_mtu)) { - release_tx_pools(adapter); - return -1; - } + adapter->num_active_tx_pools = tx_subcrqs; - /* alloc TSO ltb */ - if (alloc_long_term_buff(adapter, &tx_pool->tso_ltb, - IBMVNIC_TSO_BUFS * - IBMVNIC_TSO_BUF_SZ)) { + for (i = 0; i < tx_subcrqs; i++) { + rc = init_one_tx_pool(netdev, &adapter->tx_pool[i], + adapter->req_tx_entries_per_subcrq, + adapter->req_mtu + VLAN_HLEN); + if (rc) { release_tx_pools(adapter); - return -1; + return rc; } - tx_pool->tso_index = 0; - - tx_pool->free_map = kcalloc(adapter->req_tx_entries_per_subcrq, - sizeof(int), GFP_KERNEL); - if (!tx_pool->free_map) { + init_one_tx_pool(netdev, &adapter->tso_pool[i], + IBMVNIC_TSO_BUFS, + IBMVNIC_TSO_BUF_SZ); + if (rc) { release_tx_pools(adapter); - return -1; + return rc; } - - for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++) - tx_pool->free_map[j] = j; - - tx_pool->consumer_index = 0; - tx_pool->producer_index = 0; } - adapter->num_active_tx_pools = tx_subcrqs; - return 0; } @@ -740,6 +752,45 @@ static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter) adapter->napi_enabled = false; } +static int init_napi(struct ibmvnic_adapter *adapter) +{ + int i; + + adapter->napi = kcalloc(adapter->req_rx_queues, + sizeof(struct napi_struct), GFP_KERNEL); + if (!adapter->napi) + return -ENOMEM; + + for (i = 0; i < adapter->req_rx_queues; i++) { + netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i); + netif_napi_add(adapter->netdev, &adapter->napi[i], + ibmvnic_poll, NAPI_POLL_WEIGHT); + } + + adapter->num_active_rx_napi = adapter->req_rx_queues; + return 0; +} + +static void release_napi(struct ibmvnic_adapter *adapter) +{ + int i; + + if (!adapter->napi) + return; + + for (i = 0; i < adapter->num_active_rx_napi; i++) { + if (&adapter->napi[i]) { + netdev_dbg(adapter->netdev, + "Releasing napi[%d]\n", i); + netif_napi_del(&adapter->napi[i]); + } + } + + kfree(adapter->napi); + adapter->napi = NULL; + adapter->num_active_rx_napi = 0; +} + static int ibmvnic_login(struct net_device *netdev) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); @@ -750,7 +801,7 @@ static int ibmvnic_login(struct net_device *netdev) do { if (adapter->renegotiate) { adapter->renegotiate = false; - release_sub_crqs(adapter); + release_sub_crqs(adapter, 1); reinit_completion(&adapter->init_done); send_cap_queries(adapter); @@ -774,8 +825,11 @@ static int ibmvnic_login(struct net_device *netdev) } reinit_completion(&adapter->init_done); - send_login(adapter); - if (!wait_for_completion_timeout(&adapter->init_done, + rc = send_login(adapter); + if (rc) { + dev_err(dev, "Unable to attempt device login\n"); + return rc; + } else if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { dev_err(dev, "Login timeout\n"); return -1; @@ -805,29 +859,13 @@ static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter) static void release_resources(struct ibmvnic_adapter *adapter) { - int i; - release_vpd_data(adapter); release_tx_pools(adapter); release_rx_pools(adapter); - release_stats_token(adapter); - release_stats_buffers(adapter); release_error_buffers(adapter); - - if (adapter->napi) { - for (i = 0; i < adapter->req_rx_queues; i++) { - if (&adapter->napi[i]) { - netdev_dbg(adapter->netdev, - "Releasing napi[%d]\n", i); - netif_napi_del(&adapter->napi[i]); - } - } - } - kfree(adapter->napi); - adapter->napi = NULL; - + release_napi(adapter); release_login_rsp_buffer(adapter); } @@ -947,20 +985,12 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) static int init_resources(struct ibmvnic_adapter *adapter) { struct net_device *netdev = adapter->netdev; - int i, rc; + int rc; rc = set_real_num_queues(netdev); if (rc) return rc; - rc = init_stats_buffers(adapter); - if (rc) - return rc; - - rc = init_stats_token(adapter); - if (rc) - return rc; - adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL); if (!adapter->vpd) return -ENOMEM; @@ -973,16 +1003,10 @@ static int init_resources(struct ibmvnic_adapter *adapter) } adapter->map_id = 1; - adapter->napi = kcalloc(adapter->req_rx_queues, - sizeof(struct napi_struct), GFP_KERNEL); - if (!adapter->napi) - return -ENOMEM; - for (i = 0; i < adapter->req_rx_queues; i++) { - netdev_dbg(netdev, "Adding napi[%d]\n", i); - netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll, - NAPI_POLL_WEIGHT); - } + rc = init_napi(adapter); + if (rc) + return rc; send_map_query(adapter); @@ -1076,6 +1100,7 @@ static int ibmvnic_open(struct net_device *netdev) static void clean_rx_pools(struct ibmvnic_adapter *adapter) { struct ibmvnic_rx_pool *rx_pool; + struct ibmvnic_rx_buff *rx_buff; u64 rx_entries; int rx_scrqs; int i, j; @@ -1089,100 +1114,113 @@ static void clean_rx_pools(struct ibmvnic_adapter *adapter) /* Free any remaining skbs in the rx buffer pools */ for (i = 0; i < rx_scrqs; i++) { rx_pool = &adapter->rx_pool[i]; - if (!rx_pool) + if (!rx_pool || !rx_pool->rx_buff) continue; netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i); for (j = 0; j < rx_entries; j++) { - if (rx_pool->rx_buff[j].skb) { - dev_kfree_skb_any(rx_pool->rx_buff[j].skb); - rx_pool->rx_buff[j].skb = NULL; + rx_buff = &rx_pool->rx_buff[j]; + if (rx_buff && rx_buff->skb) { + dev_kfree_skb_any(rx_buff->skb); + rx_buff->skb = NULL; } } } } -static void clean_tx_pools(struct ibmvnic_adapter *adapter) +static void clean_one_tx_pool(struct ibmvnic_adapter *adapter, + struct ibmvnic_tx_pool *tx_pool) { - struct ibmvnic_tx_pool *tx_pool; + struct ibmvnic_tx_buff *tx_buff; u64 tx_entries; + int i; + + if (!tx_pool || !tx_pool->tx_buff) + return; + + tx_entries = tx_pool->num_buffers; + + for (i = 0; i < tx_entries; i++) { + tx_buff = &tx_pool->tx_buff[i]; + if (tx_buff && tx_buff->skb) { + dev_kfree_skb_any(tx_buff->skb); + tx_buff->skb = NULL; + } + } +} + +static void clean_tx_pools(struct ibmvnic_adapter *adapter) +{ int tx_scrqs; - int i, j; + int i; - if (!adapter->tx_pool) + if (!adapter->tx_pool || !adapter->tso_pool) return; tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); - tx_entries = adapter->req_tx_entries_per_subcrq; /* Free any remaining skbs in the tx buffer pools */ for (i = 0; i < tx_scrqs; i++) { - tx_pool = &adapter->tx_pool[i]; - if (!tx_pool) - continue; - netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i); - for (j = 0; j < tx_entries; j++) { - if (tx_pool->tx_buff[j].skb) { - dev_kfree_skb_any(tx_pool->tx_buff[j].skb); - tx_pool->tx_buff[j].skb = NULL; - } - } + clean_one_tx_pool(adapter, &adapter->tx_pool[i]); + clean_one_tx_pool(adapter, &adapter->tso_pool[i]); } } -static int __ibmvnic_close(struct net_device *netdev) +static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter) { - struct ibmvnic_adapter *adapter = netdev_priv(netdev); - int rc = 0; + struct net_device *netdev = adapter->netdev; int i; - adapter->state = VNIC_CLOSING; - - /* ensure that transmissions are stopped if called by do_reset */ - if (adapter->resetting) - netif_tx_disable(netdev); - else - netif_tx_stop_all_queues(netdev); - - ibmvnic_napi_disable(adapter); - if (adapter->tx_scrq) { for (i = 0; i < adapter->req_tx_queues; i++) if (adapter->tx_scrq[i]->irq) { - netdev_dbg(adapter->netdev, + netdev_dbg(netdev, "Disabling tx_scrq[%d] irq\n", i); disable_irq(adapter->tx_scrq[i]->irq); } } - rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); - if (rc) - return rc; - if (adapter->rx_scrq) { for (i = 0; i < adapter->req_rx_queues; i++) { - int retries = 10; - - while (pending_scrq(adapter, adapter->rx_scrq[i])) { - retries--; - mdelay(100); - - if (retries == 0) - break; - } - if (adapter->rx_scrq[i]->irq) { - netdev_dbg(adapter->netdev, + netdev_dbg(netdev, "Disabling rx_scrq[%d] irq\n", i); disable_irq(adapter->rx_scrq[i]->irq); } } } +} + +static void ibmvnic_cleanup(struct net_device *netdev) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + + /* ensure that transmissions are stopped if called by do_reset */ + if (adapter->resetting) + netif_tx_disable(netdev); + else + netif_tx_stop_all_queues(netdev); + + ibmvnic_napi_disable(adapter); + ibmvnic_disable_irqs(adapter); + clean_rx_pools(adapter); clean_tx_pools(adapter); +} + +static int __ibmvnic_close(struct net_device *netdev) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + int rc = 0; + + adapter->state = VNIC_CLOSING; + rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); + if (rc) + return rc; + ibmvnic_cleanup(netdev); adapter->state = VNIC_CLOSED; - return rc; + return 0; } static int ibmvnic_close(struct net_device *netdev) @@ -1214,7 +1252,10 @@ static int build_hdr_data(u8 hdr_field, struct sk_buff *skb, int len = 0; u8 *hdr; - hdr_len[0] = sizeof(struct ethhdr); + if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb)) + hdr_len[0] = sizeof(struct vlan_ethhdr); + else + hdr_len[0] = sizeof(struct ethhdr); if (skb->protocol == htons(ETH_P_IP)) { hdr_len[1] = ip_hdr(skb)->ihl * 4; @@ -1330,6 +1371,21 @@ static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff, txbuff->indir_arr + 1); } +static int ibmvnic_xmit_workarounds(struct sk_buff *skb, + struct net_device *netdev) +{ + /* For some backing devices, mishandling of small packets + * can result in a loss of connection or TX stall. Device + * architects recommend that no packet should be smaller + * than the minimum MTU value provided to the driver, so + * pad any packets to that length + */ + if (skb->len < netdev->min_mtu) + return skb_put_padto(skb, netdev->min_mtu); + + return 0; +} + static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); @@ -1367,7 +1423,17 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) goto out; } - tx_pool = &adapter->tx_pool[queue_num]; + if (ibmvnic_xmit_workarounds(skb, netdev)) { + tx_dropped++; + tx_send_failed++; + ret = NETDEV_TX_OK; + goto out; + } + if (skb_is_gso(skb)) + tx_pool = &adapter->tso_pool[queue_num]; + else + tx_pool = &adapter->tx_pool[queue_num]; + tx_scrq = adapter->tx_scrq[queue_num]; txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb)); handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + @@ -1375,21 +1441,21 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) index = tx_pool->free_map[tx_pool->consumer_index]; - if (skb_is_gso(skb)) { - offset = tx_pool->tso_index * IBMVNIC_TSO_BUF_SZ; - dst = tx_pool->tso_ltb.buff + offset; - memset(dst, 0, IBMVNIC_TSO_BUF_SZ); - data_dma_addr = tx_pool->tso_ltb.addr + offset; - tx_pool->tso_index++; - if (tx_pool->tso_index == IBMVNIC_TSO_BUFS) - tx_pool->tso_index = 0; - } else { - offset = index * adapter->req_mtu; - dst = tx_pool->long_term_buff.buff + offset; - memset(dst, 0, adapter->req_mtu); - data_dma_addr = tx_pool->long_term_buff.addr + offset; + if (index == IBMVNIC_INVALID_MAP) { + dev_kfree_skb_any(skb); + tx_send_failed++; + tx_dropped++; + ret = NETDEV_TX_OK; + goto out; } + tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP; + + offset = index * tx_pool->buf_size; + dst = tx_pool->long_term_buff.buff + offset; + memset(dst, 0, tx_pool->buf_size); + data_dma_addr = tx_pool->long_term_buff.addr + offset; + if (skb_shinfo(skb)->nr_frags) { int cur, i; @@ -1411,8 +1477,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) } tx_pool->consumer_index = - (tx_pool->consumer_index + 1) % - adapter->req_tx_entries_per_subcrq; + (tx_pool->consumer_index + 1) % tx_pool->num_buffers; tx_buff = &tx_pool->tx_buff[index]; tx_buff->skb = skb; @@ -1428,11 +1493,13 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_crq.v1.n_crq_elem = 1; tx_crq.v1.n_sge = 1; tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED; - tx_crq.v1.correlator = cpu_to_be32(index); + if (skb_is_gso(skb)) - tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->tso_ltb.map_id); + tx_crq.v1.correlator = + cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK); else - tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id); + tx_crq.v1.correlator = cpu_to_be32(index); + tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id); tx_crq.v1.sge_len = cpu_to_be32(skb->len); tx_crq.v1.ioba = cpu_to_be64(data_dma_addr); @@ -1467,6 +1534,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) if ((*hdrs >> 7) & 1) { build_hdr_descs_arr(tx_buff, &num_entries, *hdrs); tx_crq.v1.n_crq_elem = num_entries; + tx_buff->num_entries = num_entries; tx_buff->indir_arr[0] = tx_crq; tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr, sizeof(tx_buff->indir_arr), @@ -1479,24 +1547,18 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_map_failed++; tx_dropped++; ret = NETDEV_TX_OK; - goto out; + goto tx_err_out; } lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num], (u64)tx_buff->indir_dma, (u64)num_entries); } else { + tx_buff->num_entries = num_entries; lpar_rc = send_subcrq(adapter, handle_array[queue_num], &tx_crq); } if (lpar_rc != H_SUCCESS) { dev_err(dev, "tx failed with code %ld\n", lpar_rc); - - if (tx_pool->consumer_index == 0) - tx_pool->consumer_index = - adapter->req_tx_entries_per_subcrq - 1; - else - tx_pool->consumer_index--; - dev_kfree_skb_any(skb); tx_buff->skb = NULL; @@ -1512,12 +1574,12 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_send_failed++; tx_dropped++; ret = NETDEV_TX_OK; - goto out; + goto tx_err_out; } - if (atomic_inc_return(&tx_scrq->used) + if (atomic_add_return(num_entries, &tx_scrq->used) >= adapter->req_tx_entries_per_subcrq) { - netdev_info(netdev, "Stopping queue %d\n", queue_num); + netdev_dbg(netdev, "Stopping queue %d\n", queue_num); netif_stop_subqueue(netdev, queue_num); } @@ -1525,7 +1587,16 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_bytes += skb->len; txq->trans_start = jiffies; ret = NETDEV_TX_OK; + goto out; +tx_err_out: + /* roll back consumer index and map array*/ + if (tx_pool->consumer_index == 0) + tx_pool->consumer_index = + tx_pool->num_buffers - 1; + else + tx_pool->consumer_index--; + tx_pool->free_map[tx_pool->consumer_index] = index; out: netdev->stats.tx_dropped += tx_dropped; netdev->stats.tx_bytes += tx_bytes; @@ -1644,16 +1715,19 @@ static int do_reset(struct ibmvnic_adapter *adapter, rc = ibmvnic_reenable_crq_queue(adapter); if (rc) return 0; + ibmvnic_cleanup(netdev); + } else if (rwi->reset_reason == VNIC_RESET_FAILOVER) { + ibmvnic_cleanup(netdev); + } else { + rc = __ibmvnic_close(netdev); + if (rc) + return rc; } - rc = __ibmvnic_close(netdev); - if (rc) - return rc; - if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM || adapter->wait_for_reset) { release_resources(adapter); - release_sub_crqs(adapter); + release_sub_crqs(adapter, 1); release_crq_queue(adapter); } @@ -1691,6 +1765,9 @@ static int do_reset(struct ibmvnic_adapter *adapter, release_tx_pools(adapter); init_rx_pools(netdev); init_tx_pools(netdev); + + release_napi(adapter); + init_napi(adapter); } else { rc = reset_tx_pools(adapter); if (rc) @@ -1699,12 +1776,15 @@ static int do_reset(struct ibmvnic_adapter *adapter, rc = reset_rx_pools(adapter); if (rc) return rc; - - if (reset_state == VNIC_CLOSED) - return 0; } } + ibmvnic_disable_irqs(adapter); + adapter->state = VNIC_CLOSED; + + if (reset_state == VNIC_CLOSED) + return 0; + rc = __ibmvnic_open(netdev); if (rc) { if (list_empty(&adapter->rwi_list)) @@ -2011,6 +2091,23 @@ static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu) return wait_for_reset(adapter); } +static netdev_features_t ibmvnic_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + /* Some backing hardware adapters can not + * handle packets with a MSS less than 224 + * or with only one segment. + */ + if (skb_is_gso(skb)) { + if (skb_shinfo(skb)->gso_size < 224 || + skb_shinfo(skb)->gso_segs == 1) + features &= ~NETIF_F_GSO_MASK; + } + + return features; +} + static const struct net_device_ops ibmvnic_netdev_ops = { .ndo_open = ibmvnic_open, .ndo_stop = ibmvnic_close, @@ -2023,6 +2120,7 @@ static const struct net_device_ops ibmvnic_netdev_ops = { .ndo_poll_controller = ibmvnic_netpoll_controller, #endif .ndo_change_mtu = ibmvnic_change_mtu, + .ndo_features_check = ibmvnic_features_check, }; /* ethtool functions */ @@ -2295,24 +2393,27 @@ static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter) } static void release_sub_crq_queue(struct ibmvnic_adapter *adapter, - struct ibmvnic_sub_crq_queue *scrq) + struct ibmvnic_sub_crq_queue *scrq, + bool do_h_free) { struct device *dev = &adapter->vdev->dev; long rc; netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n"); - /* Close the sub-crqs */ - do { - rc = plpar_hcall_norets(H_FREE_SUB_CRQ, - adapter->vdev->unit_address, - scrq->crq_num); - } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); + if (do_h_free) { + /* Close the sub-crqs */ + do { + rc = plpar_hcall_norets(H_FREE_SUB_CRQ, + adapter->vdev->unit_address, + scrq->crq_num); + } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); - if (rc) { - netdev_err(adapter->netdev, - "Failed to release sub-CRQ %16lx, rc = %ld\n", - scrq->crq_num, rc); + if (rc) { + netdev_err(adapter->netdev, + "Failed to release sub-CRQ %16lx, rc = %ld\n", + scrq->crq_num, rc); + } } dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, @@ -2380,12 +2481,12 @@ zero_page_failed: return NULL; } -static void release_sub_crqs(struct ibmvnic_adapter *adapter) +static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free) { int i; if (adapter->tx_scrq) { - for (i = 0; i < adapter->req_tx_queues; i++) { + for (i = 0; i < adapter->num_active_tx_scrqs; i++) { if (!adapter->tx_scrq[i]) continue; @@ -2398,15 +2499,17 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter) adapter->tx_scrq[i]->irq = 0; } - release_sub_crq_queue(adapter, adapter->tx_scrq[i]); + release_sub_crq_queue(adapter, adapter->tx_scrq[i], + do_h_free); } kfree(adapter->tx_scrq); adapter->tx_scrq = NULL; + adapter->num_active_tx_scrqs = 0; } if (adapter->rx_scrq) { - for (i = 0; i < adapter->req_rx_queues; i++) { + for (i = 0; i < adapter->num_active_rx_scrqs; i++) { if (!adapter->rx_scrq[i]) continue; @@ -2419,11 +2522,13 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter) adapter->rx_scrq[i]->irq = 0; } - release_sub_crq_queue(adapter, adapter->rx_scrq[i]); + release_sub_crq_queue(adapter, adapter->rx_scrq[i], + do_h_free); } kfree(adapter->rx_scrq); adapter->rx_scrq = NULL; + adapter->num_active_rx_scrqs = 0; } } @@ -2464,6 +2569,7 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, struct ibmvnic_sub_crq_queue *scrq) { struct device *dev = &adapter->vdev->dev; + struct ibmvnic_tx_pool *tx_pool; struct ibmvnic_tx_buff *txbuff; union sub_crq *next; int index; @@ -2473,6 +2579,7 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, restart_loop: while (pending_scrq(adapter, scrq)) { unsigned int pool = scrq->pool_index; + int num_entries = 0; next = ibmvnic_next_scrq(adapter, scrq); for (i = 0; i < next->tx_comp.num_comps; i++) { @@ -2482,7 +2589,14 @@ restart_loop: continue; } index = be32_to_cpu(next->tx_comp.correlators[i]); - txbuff = &adapter->tx_pool[pool].tx_buff[index]; + if (index & IBMVNIC_TSO_POOL_MASK) { + tx_pool = &adapter->tso_pool[pool]; + index &= ~IBMVNIC_TSO_POOL_MASK; + } else { + tx_pool = &adapter->tx_pool[pool]; + } + + txbuff = &tx_pool->tx_buff[index]; for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) { if (!txbuff->data_dma[j]) @@ -2503,22 +2617,23 @@ restart_loop: txbuff->skb = NULL; } - adapter->tx_pool[pool].free_map[adapter->tx_pool[pool]. - producer_index] = index; - adapter->tx_pool[pool].producer_index = - (adapter->tx_pool[pool].producer_index + 1) % - adapter->req_tx_entries_per_subcrq; + num_entries += txbuff->num_entries; + + tx_pool->free_map[tx_pool->producer_index] = index; + tx_pool->producer_index = + (tx_pool->producer_index + 1) % + tx_pool->num_buffers; } /* remove tx_comp scrq*/ next->tx_comp.first = 0; - if (atomic_sub_return(next->tx_comp.num_comps, &scrq->used) <= + if (atomic_sub_return(num_entries, &scrq->used) <= (adapter->req_tx_entries_per_subcrq / 2) && __netif_subqueue_stopped(adapter->netdev, scrq->pool_index)) { netif_wake_subqueue(adapter->netdev, scrq->pool_index); - netdev_info(adapter->netdev, "Started queue %d\n", - scrq->pool_index); + netdev_dbg(adapter->netdev, "Started queue %d\n", + scrq->pool_index); } } @@ -2590,7 +2705,7 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n", scrq->irq, rc); irq_dispose_mapping(scrq->irq); - goto req_rx_irq_failed; + goto req_tx_irq_failed; } } @@ -2626,7 +2741,7 @@ req_tx_irq_failed: free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); irq_dispose_mapping(adapter->rx_scrq[j]->irq); } - release_sub_crqs(adapter); + release_sub_crqs(adapter, 1); return rc; } @@ -2688,6 +2803,7 @@ static int init_sub_crqs(struct ibmvnic_adapter *adapter) for (i = 0; i < adapter->req_tx_queues; i++) { adapter->tx_scrq[i] = allqueues[i]; adapter->tx_scrq[i]->pool_index = i; + adapter->num_active_tx_scrqs++; } adapter->rx_scrq = kcalloc(adapter->req_rx_queues, @@ -2698,6 +2814,7 @@ static int init_sub_crqs(struct ibmvnic_adapter *adapter) for (i = 0; i < adapter->req_rx_queues; i++) { adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues]; adapter->rx_scrq[i]->scrq_num = i; + adapter->num_active_rx_scrqs++; } kfree(allqueues); @@ -2708,7 +2825,7 @@ rx_failed: adapter->tx_scrq = NULL; tx_failed: for (i = 0; i < registered_queues; i++) - release_sub_crq_queue(adapter, allqueues[i]); + release_sub_crq_queue(adapter, allqueues[i], 1); kfree(allqueues); return -1; } @@ -3048,7 +3165,7 @@ static void vnic_add_client_data(struct ibmvnic_adapter *adapter, strncpy(&vlcd->name, adapter->netdev->name, len); } -static void send_login(struct ibmvnic_adapter *adapter) +static int send_login(struct ibmvnic_adapter *adapter) { struct ibmvnic_login_rsp_buffer *login_rsp_buffer; struct ibmvnic_login_buffer *login_buffer; @@ -3064,6 +3181,12 @@ static void send_login(struct ibmvnic_adapter *adapter) struct vnic_login_client_data *vlcd; int i; + if (!adapter->tx_scrq || !adapter->rx_scrq) { + netdev_err(adapter->netdev, + "RX or TX queues are not allocated, device login failed\n"); + return -1; + } + release_login_rsp_buffer(adapter); client_data_len = vnic_client_data_len(adapter); @@ -3161,7 +3284,7 @@ static void send_login(struct ibmvnic_adapter *adapter) crq.login.len = cpu_to_be32(buffer_size); ibmvnic_send_crq(adapter, &crq); - return; + return 0; buf_rsp_map_failed: kfree(login_rsp_buffer); @@ -3170,7 +3293,7 @@ buf_rsp_alloc_failed: buf_map_failed: kfree(login_buffer); buf_alloc_failed: - return; + return -1; } static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr, @@ -4335,6 +4458,7 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter) { struct device *dev = &adapter->vdev->dev; unsigned long timeout = msecs_to_jiffies(30000); + u64 old_num_rx_queues, old_num_tx_queues; int rc; if (adapter->resetting && !adapter->wait_for_reset) { @@ -4352,6 +4476,9 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter) adapter->from_passive_init = false; + old_num_rx_queues = adapter->req_rx_queues; + old_num_tx_queues = adapter->req_tx_queues; + init_completion(&adapter->init_done); adapter->init_done_rc = 0; ibmvnic_send_crq_init(adapter); @@ -4371,10 +4498,18 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter) return -1; } - if (adapter->resetting && !adapter->wait_for_reset) - rc = reset_sub_crq_queues(adapter); - else + if (adapter->resetting && !adapter->wait_for_reset) { + if (adapter->req_rx_queues != old_num_rx_queues || + adapter->req_tx_queues != old_num_tx_queues) { + release_sub_crqs(adapter, 0); + rc = init_sub_crqs(adapter); + } else { + rc = reset_sub_crq_queues(adapter); + } + } else { rc = init_sub_crqs(adapter); + } + if (rc) { dev_err(dev, "Initialization of sub crqs failed\n"); release_crq_queue(adapter); @@ -4387,6 +4522,14 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter) release_crq_queue(adapter); } + rc = init_stats_buffers(adapter); + if (rc) + return rc; + + rc = init_stats_token(adapter); + if (rc) + return rc; + return rc; } @@ -4474,7 +4617,7 @@ ibmvnic_register_fail: device_remove_file(&dev->dev, &dev_attr_failover); ibmvnic_init_fail: - release_sub_crqs(adapter); + release_sub_crqs(adapter, 1); release_crq_queue(adapter); free_netdev(netdev); @@ -4491,9 +4634,12 @@ static int ibmvnic_remove(struct vio_dev *dev) mutex_lock(&adapter->reset_lock); release_resources(adapter); - release_sub_crqs(adapter); + release_sub_crqs(adapter, 1); release_crq_queue(adapter); + release_stats_token(adapter); + release_stats_buffers(adapter); + adapter->state = VNIC_REMOVED; mutex_unlock(&adapter->reset_lock); |