diff options
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/Kconfig | 12 | ||||
-rw-r--r-- | drivers/net/Makefile | 2 | ||||
-rw-r--r-- | drivers/net/bnx2.c | 103 | ||||
-rw-r--r-- | drivers/net/bnx2.h | 10 | ||||
-rw-r--r-- | drivers/net/hamradio/baycom_epp.c | 2 | ||||
-rw-r--r-- | drivers/net/pppol2tp.c | 18 | ||||
-rw-r--r-- | drivers/net/sunvnet.c | 260 | ||||
-rw-r--r-- | drivers/net/sunvnet.h | 4 | ||||
-rw-r--r-- | drivers/net/xen-netfront.c | 1863 |
9 files changed, 2097 insertions, 177 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 43d03178064d..5fb659f8b20e 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -2486,6 +2486,18 @@ source "drivers/atm/Kconfig" source "drivers/s390/net/Kconfig" +config XEN_NETDEV_FRONTEND + tristate "Xen network device frontend driver" + depends on XEN + default y + help + The network device frontend driver allows the kernel to + access network devices exported exported by a virtual + machine containing a physical network device driver. The + frontend driver is intended for unprivileged guest domains; + if you are compiling a kernel for a Xen guest, you almost + certainly want to enable this. + config ISERIES_VETH tristate "iSeries Virtual Ethernet driver support" depends on PPC_ISERIES diff --git a/drivers/net/Makefile b/drivers/net/Makefile index eb4167622a6a..0e286ab8855a 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -127,6 +127,8 @@ obj-$(CONFIG_PPPOL2TP) += pppox.o pppol2tp.o obj-$(CONFIG_SLIP) += slip.o obj-$(CONFIG_SLHC) += slhc.o +obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o + obj-$(CONFIG_DUMMY) += dummy.o obj-$(CONFIG_IFB) += ifb.o obj-$(CONFIG_MACVLAN) += macvlan.o diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index d23861c8658c..a729da061bbb 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -54,8 +54,8 @@ #define DRV_MODULE_NAME "bnx2" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "1.6.2" -#define DRV_MODULE_RELDATE "July 6, 2007" +#define DRV_MODULE_VERSION "1.6.3" +#define DRV_MODULE_RELDATE "July 16, 2007" #define RUN_AT(x) (jiffies + (x)) @@ -126,91 +126,102 @@ static struct pci_device_id bnx2_pci_tbl[] = { static struct flash_spec flash_table[] = { +#define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE) +#define NONBUFFERED_FLAGS (BNX2_NV_WREN) /* Slow EEPROM */ {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400, - 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, + BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, "EEPROM - slow"}, /* Expansion entry 0001 */ {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406, - 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, 0, "Entry 0001"}, /* Saifun SA25F010 (non-buffered flash) */ /* strap, cfg1, & write1 need updates */ {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406, - 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2, "Non-buffered flash (128kB)"}, /* Saifun SA25F020 (non-buffered flash) */ /* strap, cfg1, & write1 need updates */ {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406, - 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4, "Non-buffered flash (256kB)"}, /* Expansion entry 0100 */ {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406, - 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, 0, "Entry 0100"}, /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */ {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406, - 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, + NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2, "Entry 0101: ST M45PE10 (128kB non-bufferred)"}, /* Entry 0110: ST M45PE20 (non-buffered flash)*/ {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406, - 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, + NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4, "Entry 0110: ST M45PE20 (256kB non-bufferred)"}, /* Saifun SA25F005 (non-buffered flash) */ /* strap, cfg1, & write1 need updates */ {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406, - 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE, "Non-buffered flash (64kB)"}, /* Fast EEPROM */ {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400, - 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, + BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, "EEPROM - fast"}, /* Expansion entry 1001 */ {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406, - 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, 0, "Entry 1001"}, /* Expansion entry 1010 */ {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406, - 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, 0, "Entry 1010"}, /* ATMEL AT45DB011B (buffered flash) */ {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400, - 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, + BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE, "Buffered flash (128kB)"}, /* Expansion entry 1100 */ {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406, - 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, 0, "Entry 1100"}, /* Expansion entry 1101 */ {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406, - 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, 0, "Entry 1101"}, /* Ateml Expansion entry 1110 */ {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400, - 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, + BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, BUFFERED_FLASH_BYTE_ADDR_MASK, 0, "Entry 1110 (Atmel)"}, /* ATMEL AT45DB021B (buffered flash) */ {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400, - 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, + BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2, "Buffered flash (256kB)"}, }; +static struct flash_spec flash_5709 = { + .flags = BNX2_NV_BUFFERED, + .page_bits = BCM5709_FLASH_PAGE_BITS, + .page_size = BCM5709_FLASH_PAGE_SIZE, + .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK, + .total_size = BUFFERED_FLASH_TOTAL_SIZE*2, + .name = "5709 Buffered flash (256kB)", +}; + MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); static inline u32 bnx2_tx_avail(struct bnx2 *bp) @@ -3289,7 +3300,7 @@ bnx2_enable_nvram_write(struct bnx2 *bp) val = REG_RD(bp, BNX2_MISC_CFG); REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI); - if (!bp->flash_info->buffered) { + if (bp->flash_info->flags & BNX2_NV_WREN) { int j; REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); @@ -3349,7 +3360,7 @@ bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset) u32 cmd; int j; - if (bp->flash_info->buffered) + if (bp->flash_info->flags & BNX2_NV_BUFFERED) /* Buffered flash, no erase needed */ return 0; @@ -3392,8 +3403,8 @@ bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags) /* Build the command word. */ cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags; - /* Calculate an offset of a buffered flash. */ - if (bp->flash_info->buffered) { + /* Calculate an offset of a buffered flash, not needed for 5709. */ + if (bp->flash_info->flags & BNX2_NV_TRANSLATE) { offset = ((offset / bp->flash_info->page_size) << bp->flash_info->page_bits) + (offset % bp->flash_info->page_size); @@ -3439,8 +3450,8 @@ bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags) /* Build the command word. */ cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags; - /* Calculate an offset of a buffered flash. */ - if (bp->flash_info->buffered) { + /* Calculate an offset of a buffered flash, not needed for 5709. */ + if (bp->flash_info->flags & BNX2_NV_TRANSLATE) { offset = ((offset / bp->flash_info->page_size) << bp->flash_info->page_bits) + (offset % bp->flash_info->page_size); @@ -3478,15 +3489,19 @@ static int bnx2_init_nvram(struct bnx2 *bp) { u32 val; - int j, entry_count, rc; + int j, entry_count, rc = 0; struct flash_spec *flash; + if (CHIP_NUM(bp) == CHIP_NUM_5709) { + bp->flash_info = &flash_5709; + goto get_flash_size; + } + /* Determine the selected interface. */ val = REG_RD(bp, BNX2_NVM_CFG1); entry_count = sizeof(flash_table) / sizeof(struct flash_spec); - rc = 0; if (val & 0x40000000) { /* Flash interface has been reconfigured */ @@ -3542,6 +3557,7 @@ bnx2_init_nvram(struct bnx2 *bp) return -ENODEV; } +get_flash_size: val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2); val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK; if (val) @@ -3706,7 +3722,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, buf = align_buf; } - if (bp->flash_info->buffered == 0) { + if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { flash_buffer = kmalloc(264, GFP_KERNEL); if (flash_buffer == NULL) { rc = -ENOMEM; @@ -3739,7 +3755,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, bnx2_enable_nvram_access(bp); cmd_flags = BNX2_NVM_COMMAND_FIRST; - if (bp->flash_info->buffered == 0) { + if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { int j; /* Read the whole page into the buffer @@ -3767,7 +3783,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, /* Loop to write back the buffer data from page_start to * data_start */ i = 0; - if (bp->flash_info->buffered == 0) { + if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { /* Erase the page */ if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0) goto nvram_write_end; @@ -3791,7 +3807,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, /* Loop to write the new data from data_start to data_end */ for (addr = data_start; addr < data_end; addr += 4, i += 4) { if ((addr == page_end - 4) || - ((bp->flash_info->buffered) && + ((bp->flash_info->flags & BNX2_NV_BUFFERED) && (addr == data_end - 4))) { cmd_flags |= BNX2_NVM_COMMAND_LAST; @@ -3808,7 +3824,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, /* Loop to write back the buffer data from data_end * to page_end */ - if (bp->flash_info->buffered == 0) { + if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { for (addr = data_end; addr < page_end; addr += 4, i += 4) { @@ -4107,7 +4123,7 @@ bnx2_init_chip(struct bnx2 *bp) if (CHIP_NUM(bp) == CHIP_NUM_5708) REG_WR(bp, BNX2_HC_STATS_TICKS, 0); else - REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00); + REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks); REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ if (CHIP_ID(bp) == CHIP_ID_5706_A1) @@ -4127,10 +4143,6 @@ bnx2_init_chip(struct bnx2 *bp) REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS); - if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) & - BNX2_PORT_FEATURE_ASF_ENABLED) - bp->flags |= ASF_ENABLE_FLAG; - /* Initialize the receive filter. */ bnx2_set_rx_mode(bp->dev); @@ -5786,8 +5798,9 @@ bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC) bp->stats_ticks = USEC_PER_SEC; } - if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00; - bp->stats_ticks &= 0xffff00; + if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS) + bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS; + bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS; if (netif_running(bp->dev)) { bnx2_netif_stop(bp); @@ -6629,6 +6642,18 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) if (i != 2) bp->fw_version[j++] = '.'; } + if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) & + BNX2_PORT_FEATURE_ASF_ENABLED) { + bp->flags |= ASF_ENABLE_FLAG; + + for (i = 0; i < 30; i++) { + reg = REG_RD_IND(bp, bp->shmem_base + + BNX2_BC_STATE_CONDITION); + if (reg & BNX2_CONDITION_MFW_RUN_MASK) + break; + msleep(10); + } + } reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION); reg &= BNX2_CONDITION_MFW_RUN_MASK; if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN && @@ -6672,7 +6697,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) bp->rx_ticks_int = 18; bp->rx_ticks = 18; - bp->stats_ticks = 1000000 & 0xffff00; + bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS; bp->timer_interval = HZ; bp->current_interval = HZ; diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h index d8cd1afeb23d..102adfe1e923 100644 --- a/drivers/net/bnx2.h +++ b/drivers/net/bnx2.h @@ -6433,6 +6433,11 @@ struct sw_bd { #define ST_MICRO_FLASH_PAGE_SIZE 256 #define ST_MICRO_FLASH_BASE_TOTAL_SIZE 65536 +#define BCM5709_FLASH_PAGE_BITS 8 +#define BCM5709_FLASH_PHY_PAGE_SIZE (1 << BCM5709_FLASH_PAGE_BITS) +#define BCM5709_FLASH_BYTE_ADDR_MASK (BCM5709_FLASH_PHY_PAGE_SIZE-1) +#define BCM5709_FLASH_PAGE_SIZE 256 + #define NVRAM_TIMEOUT_COUNT 30000 @@ -6449,7 +6454,10 @@ struct flash_spec { u32 config2; u32 config3; u32 write1; - u32 buffered; + u32 flags; +#define BNX2_NV_BUFFERED 0x00000001 +#define BNX2_NV_TRANSLATE 0x00000002 +#define BNX2_NV_WREN 0x00000004 u32 page_bits; u32 page_size; u32 addr_mask; diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index 84aa2117c0ee..355c6cf3d112 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c @@ -320,7 +320,7 @@ static int eppconfig(struct baycom_state *bc) sprintf(portarg, "%ld", bc->pdev->port->base); printk(KERN_DEBUG "%s: %s -s -p %s -m %s\n", bc_drvname, eppconfig_path, portarg, modearg); - return call_usermodehelper(eppconfig_path, argv, envp, 1); + return call_usermodehelper(eppconfig_path, argv, envp, UMH_WAIT_PROC); } /* ---------------------------------------------------------------------- */ diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c index 5891a0fbdc8b..f87176055d0e 100644 --- a/drivers/net/pppol2tp.c +++ b/drivers/net/pppol2tp.c @@ -824,6 +824,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh struct pppol2tp_session *session; struct pppol2tp_tunnel *tunnel; struct udphdr *uh; + unsigned int len; error = -ENOTCONN; if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) @@ -912,14 +913,15 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh } /* Queue the packet to IP for output */ + len = skb->len; error = ip_queue_xmit(skb, 1); /* Update stats */ if (error >= 0) { tunnel->stats.tx_packets++; - tunnel->stats.tx_bytes += skb->len; + tunnel->stats.tx_bytes += len; session->stats.tx_packets++; - session->stats.tx_bytes += skb->len; + session->stats.tx_bytes += len; } else { tunnel->stats.tx_errors++; session->stats.tx_errors++; @@ -958,6 +960,7 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) __wsum csum = 0; struct sk_buff *skb2 = NULL; struct udphdr *uh; + unsigned int len; if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) goto abort; @@ -1046,18 +1049,25 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) printk("\n"); } + memset(&(IPCB(skb2)->opt), 0, sizeof(IPCB(skb2)->opt)); + IPCB(skb2)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | + IPSKB_REROUTED); + nf_reset(skb2); + /* Get routing info from the tunnel socket */ + dst_release(skb2->dst); skb2->dst = sk_dst_get(sk_tun); /* Queue the packet to IP for output */ + len = skb2->len; rc = ip_queue_xmit(skb2, 1); /* Update stats */ if (rc >= 0) { tunnel->stats.tx_packets++; - tunnel->stats.tx_bytes += skb2->len; + tunnel->stats.tx_bytes += len; session->stats.tx_packets++; - session->stats.tx_bytes += skb2->len; + session->stats.tx_bytes += len; } else { tunnel->stats.tx_errors++; session->stats.tx_errors++; diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c index 8a667c13faef..b801e3b3a11a 100644 --- a/drivers/net/sunvnet.c +++ b/drivers/net/sunvnet.c @@ -12,6 +12,7 @@ #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/etherdevice.h> +#include <linux/mutex.h> #include <asm/vio.h> #include <asm/ldc.h> @@ -497,6 +498,8 @@ static void vnet_event(void *arg, int event) vio_link_state_change(vio, event); spin_unlock_irqrestore(&vio->lock, flags); + if (event == LDC_EVENT_RESET) + vio_port_up(vio); return; } @@ -875,6 +878,115 @@ err_out: return err; } +static LIST_HEAD(vnet_list); +static DEFINE_MUTEX(vnet_list_mutex); + +static struct vnet * __devinit vnet_new(const u64 *local_mac) +{ + struct net_device *dev; + struct vnet *vp; + int err, i; + + dev = alloc_etherdev(sizeof(*vp)); + if (!dev) { + printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); + return ERR_PTR(-ENOMEM); + } + + for (i = 0; i < ETH_ALEN; i++) + dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff; + + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); + + vp = netdev_priv(dev); + + spin_lock_init(&vp->lock); + vp->dev = dev; + + INIT_LIST_HEAD(&vp->port_list); + for (i = 0; i < VNET_PORT_HASH_SIZE; i++) + INIT_HLIST_HEAD(&vp->port_hash[i]); + INIT_LIST_HEAD(&vp->list); + vp->local_mac = *local_mac; + + dev->open = vnet_open; + dev->stop = vnet_close; + dev->set_multicast_list = vnet_set_rx_mode; + dev->set_mac_address = vnet_set_mac_addr; + dev->tx_timeout = vnet_tx_timeout; + dev->ethtool_ops = &vnet_ethtool_ops; + dev->watchdog_timeo = VNET_TX_TIMEOUT; + dev->change_mtu = vnet_change_mtu; + dev->hard_start_xmit = vnet_start_xmit; + + err = register_netdev(dev); + if (err) { + printk(KERN_ERR PFX "Cannot register net device, " + "aborting.\n"); + goto err_out_free_dev; + } + + printk(KERN_INFO "%s: Sun LDOM vnet ", dev->name); + + for (i = 0; i < 6; i++) + printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':'); + + list_add(&vp->list, &vnet_list); + + return vp; + +err_out_free_dev: + free_netdev(dev); + + return ERR_PTR(err); +} + +static struct vnet * __devinit vnet_find_or_create(const u64 *local_mac) +{ + struct vnet *iter, *vp; + + mutex_lock(&vnet_list_mutex); + vp = NULL; + list_for_each_entry(iter, &vnet_list, list) { + if (iter->local_mac == *local_mac) { + vp = iter; + break; + } + } + if (!vp) + vp = vnet_new(local_mac); + mutex_unlock(&vnet_list_mutex); + + return vp; +} + +static const char *local_mac_prop = "local-mac-address"; + +static struct vnet * __devinit vnet_find_parent(struct mdesc_handle *hp, + u64 port_node) +{ + const u64 *local_mac = NULL; + u64 a; + + mdesc_for_each_arc(a, hp, port_node, MDESC_ARC_TYPE_BACK) { + u64 target = mdesc_arc_target(hp, a); + const char *name; + + name = mdesc_get_property(hp, target, "name", NULL); + if (!name || strcmp(name, "network")) + continue; + + local_mac = mdesc_get_property(hp, target, + local_mac_prop, NULL); + if (local_mac) + break; + } + if (!local_mac) + return ERR_PTR(-ENODEV); + + return vnet_find_or_create(local_mac); +} + static struct ldc_channel_config vnet_ldc_cfg = { .event = vnet_event, .mtu = 64, @@ -887,6 +999,14 @@ static struct vio_driver_ops vnet_vio_ops = { .handshake_complete = vnet_handshake_complete, }; +static void print_version(void) +{ + static int version_printed; + + if (version_printed++ == 0) + printk(KERN_INFO "%s", version); +} + const char *remote_macaddr_prop = "remote-mac-address"; static int __devinit vnet_port_probe(struct vio_dev *vdev, @@ -899,14 +1019,17 @@ static int __devinit vnet_port_probe(struct vio_dev *vdev, const u64 *rmac; int len, i, err, switch_port; - vp = dev_get_drvdata(vdev->dev.parent); - if (!vp) { - printk(KERN_ERR PFX "Cannot find port parent vnet.\n"); - return -ENODEV; - } + print_version(); hp = mdesc_grab(); + vp = vnet_find_parent(hp, vdev->mp); + if (IS_ERR(vp)) { + printk(KERN_ERR PFX "Cannot find port parent vnet.\n"); + err = PTR_ERR(vp); + goto err_out_put_mdesc; + } + rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len); err = -ENODEV; if (!rmac) { @@ -1025,139 +1148,14 @@ static struct vio_driver vnet_port_driver = { } }; -const char *local_mac_prop = "local-mac-address"; - -static int __devinit vnet_probe(struct vio_dev *vdev, - const struct vio_device_id *id) -{ - static int vnet_version_printed; - struct mdesc_handle *hp; - struct net_device *dev; - struct vnet *vp; - const u64 *mac; - int err, i, len; - - if (vnet_version_printed++ == 0) - printk(KERN_INFO "%s", version); - - hp = mdesc_grab(); - - mac = mdesc_get_property(hp, vdev->mp, local_mac_prop, &len); - if (!mac) { - printk(KERN_ERR PFX "vnet lacks %s property.\n", - local_mac_prop); - err = -ENODEV; - goto err_out; - } - - dev = alloc_etherdev(sizeof(*vp)); - if (!dev) { - printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); - err = -ENOMEM; - goto err_out; - } - - for (i = 0; i < ETH_ALEN; i++) - dev->dev_addr[i] = (*mac >> (5 - i) * 8) & 0xff; - - memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); - - SET_NETDEV_DEV(dev, &vdev->dev); - - vp = netdev_priv(dev); - - spin_lock_init(&vp->lock); - vp->dev = dev; - vp->vdev = vdev; - - INIT_LIST_HEAD(&vp->port_list); - for (i = 0; i < VNET_PORT_HASH_SIZE; i++) - INIT_HLIST_HEAD(&vp->port_hash[i]); - - dev->open = vnet_open; - dev->stop = vnet_close; - dev->set_multicast_list = vnet_set_rx_mode; - dev->set_mac_address = vnet_set_mac_addr; - dev->tx_timeout = vnet_tx_timeout; - dev->ethtool_ops = &vnet_ethtool_ops; - dev->watchdog_timeo = VNET_TX_TIMEOUT; - dev->change_mtu = vnet_change_mtu; - dev->hard_start_xmit = vnet_start_xmit; - - err = register_netdev(dev); - if (err) { - printk(KERN_ERR PFX "Cannot register net device, " - "aborting.\n"); - goto err_out_free_dev; - } - - printk(KERN_INFO "%s: Sun LDOM vnet ", dev->name); - - for (i = 0; i < 6; i++) - printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':'); - - dev_set_drvdata(&vdev->dev, vp); - - mdesc_release(hp); - - return 0; - -err_out_free_dev: - free_netdev(dev); - -err_out: - mdesc_release(hp); - return err; -} - -static int vnet_remove(struct vio_dev *vdev) -{ - - struct vnet *vp = dev_get_drvdata(&vdev->dev); - - if (vp) { - /* XXX unregister port, or at least check XXX */ - unregister_netdevice(vp->dev); - dev_set_drvdata(&vdev->dev, NULL); - } - return 0; -} - -static struct vio_device_id vnet_match[] = { - { - .type = "network", - }, - {}, -}; -MODULE_DEVICE_TABLE(vio, vnet_match); - -static struct vio_driver vnet_driver = { - .id_table = vnet_match, - .probe = vnet_probe, - .remove = vnet_remove, - .driver = { - .name = "vnet", - .owner = THIS_MODULE, - } -}; - static int __init vnet_init(void) { - int err = vio_register_driver(&vnet_driver); - - if (!err) { - err = vio_register_driver(&vnet_port_driver); - if (err) - vio_unregister_driver(&vnet_driver); - } - - return err; + return vio_register_driver(&vnet_port_driver); } static void __exit vnet_exit(void) { vio_unregister_driver(&vnet_port_driver); - vio_unregister_driver(&vnet_driver); } module_init(vnet_init); diff --git a/drivers/net/sunvnet.h b/drivers/net/sunvnet.h index 1c887302d46d..7d3a0cac727b 100644 --- a/drivers/net/sunvnet.h +++ b/drivers/net/sunvnet.h @@ -60,11 +60,13 @@ struct vnet { struct net_device *dev; u32 msg_enable; - struct vio_dev *vdev; struct list_head port_list; struct hlist_head port_hash[VNET_PORT_HASH_SIZE]; + + struct list_head list; + u64 local_mac; }; #endif /* _SUNVNET_H */ diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c new file mode 100644 index 000000000000..489f69c5d6ca --- /dev/null +++ b/drivers/net/xen-netfront.c @@ -0,0 +1,1863 @@ +/* + * Virtual network driver for conversing with remote driver backends. + * + * Copyright (c) 2002-2005, K A Fraser + * Copyright (c) 2005, XenSource Ltd + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation; or, when distributed + * separately from the Linux kernel or incorporated into other + * software packages, subject to the following license: + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this source file (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/ethtool.h> +#include <linux/if_ether.h> +#include <linux/tcp.h> +#include <linux/udp.h> +#include <linux/moduleparam.h> +#include <linux/mm.h> +#include <net/ip.h> + +#include <xen/xenbus.h> +#include <xen/events.h> +#include <xen/page.h> +#include <xen/grant_table.h> + +#include <xen/interface/io/netif.h> +#include <xen/interface/memory.h> +#include <xen/interface/grant_table.h> + +static struct ethtool_ops xennet_ethtool_ops; + +struct netfront_cb { + struct page *page; + unsigned offset; +}; + +#define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) + +#define RX_COPY_THRESHOLD 256 + +#define GRANT_INVALID_REF 0 + +#define NET_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE) +#define NET_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE) +#define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) + +struct netfront_info { + struct list_head list; + struct net_device *netdev; + + struct net_device_stats stats; + + struct xen_netif_tx_front_ring tx; + struct xen_netif_rx_front_ring rx; + + spinlock_t tx_lock; + spinlock_t rx_lock; + + unsigned int evtchn; + + /* Receive-ring batched refills. */ +#define RX_MIN_TARGET 8 +#define RX_DFL_MIN_TARGET 64 +#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) + unsigned rx_min_target, rx_max_target, rx_target; + struct sk_buff_head rx_batch; + + struct timer_list rx_refill_timer; + + /* + * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries + * are linked from tx_skb_freelist through skb_entry.link. + * + * NB. Freelist index entries are always going to be less than + * PAGE_OFFSET, whereas pointers to skbs will always be equal or + * greater than PAGE_OFFSET: we use this property to distinguish + * them. + */ + union skb_entry { + struct sk_buff *skb; + unsigned link; + } tx_skbs[NET_TX_RING_SIZE]; + grant_ref_t gref_tx_head; + grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; + unsigned tx_skb_freelist; + + struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; + grant_ref_t gref_rx_head; + grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; + + struct xenbus_device *xbdev; + int tx_ring_ref; + int rx_ring_ref; + + unsigned long rx_pfn_array[NET_RX_RING_SIZE]; + struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; + struct mmu_update rx_mmu[NET_RX_RING_SIZE]; +}; + +struct netfront_rx_info { + struct xen_netif_rx_response rx; + struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; +}; + +/* + * Access macros for acquiring freeing slots in tx_skbs[]. + */ + +static void add_id_to_freelist(unsigned *head, union skb_entry *list, + unsigned short id) +{ + list[id].link = *head; + *head = id; +} + +static unsigned short get_id_from_freelist(unsigned *head, + union skb_entry *list) +{ + unsigned int id = *head; + *head = list[id].link; + return id; +} + +static int xennet_rxidx(RING_IDX idx) +{ + return idx & (NET_RX_RING_SIZE - 1); +} + +static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np, + RING_IDX ri) +{ + int i = xennet_rxidx(ri); + struct sk_buff *skb = np->rx_skbs[i]; + np->rx_skbs[i] = NULL; + return skb; +} + +static grant_ref_t xennet_get_rx_ref(struct netfront_info *np, + RING_IDX ri) +{ + int i = xennet_rxidx(ri); + grant_ref_t ref = np->grant_rx_ref[i]; + np->grant_rx_ref[i] = GRANT_INVALID_REF; + return ref; +} + +#ifdef CONFIG_SYSFS +static int xennet_sysfs_addif(struct net_device *netdev); +static void xennet_sysfs_delif(struct net_device *netdev); +#else /* !CONFIG_SYSFS */ +#define xennet_sysfs_addif(dev) (0) +#define xennet_sysfs_delif(dev) do { } while (0) +#endif + +static int xennet_can_sg(struct net_device *dev) +{ + return dev->features & NETIF_F_SG; +} + + +static void rx_refill_timeout(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + netif_rx_schedule(dev); +} + +static int netfront_tx_slot_available(struct netfront_info *np) +{ + return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < + (TX_MAX_TARGET - MAX_SKB_FRAGS - 2)); +} + +static void xennet_maybe_wake_tx(struct net_device *dev) +{ + struct netfront_info *np = netdev_priv(dev); + + if (unlikely(netif_queue_stopped(dev)) && + netfront_tx_slot_available(np) && + likely(netif_running(dev))) + netif_wake_queue(dev); +} + +static void xennet_alloc_rx_buffers(struct net_device *dev) +{ + unsigned short id; + struct netfront_info *np = netdev_priv(dev); + struct sk_buff *skb; + struct page *page; + int i, batch_target, notify; + RING_IDX req_prod = np->rx.req_prod_pvt; + struct xen_memory_reservation reservation; + grant_ref_t ref; + unsigned long pfn; + void *vaddr; + int nr_flips; + struct xen_netif_rx_request *req; + + if (unlikely(!netif_carrier_ok(dev))) + return; + + /* + * Allocate skbuffs greedily, even though we batch updates to the + * receive ring. This creates a less bursty demand on the memory + * allocator, so should reduce the chance of failed allocation requests + * both for ourself and for other kernel subsystems. + */ + batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); + for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { + skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + goto no_skb; + + page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); + if (!page) { + kfree_skb(skb); +no_skb: + /* Any skbuffs queued for refill? Force them out. */ + if (i != 0) + goto refill; + /* Could not allocate any skbuffs. Try again later. */ + mod_timer(&np->rx_refill_timer, + jiffies + (HZ/10)); + break; + } + + skb_shinfo(skb)->frags[0].page = page; + skb_shinfo(skb)->nr_frags = 1; + __skb_queue_tail(&np->rx_batch, skb); + } + + /* Is the batch large enough to be worthwhile? */ + if (i < (np->rx_target/2)) { + if (req_prod > np->rx.sring->req_prod) + goto push; + return; + } + + /* Adjust our fill target if we risked running out of buffers. */ + if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) && + ((np->rx_target *= 2) > np->rx_max_target)) + np->rx_target = np->rx_max_target; + + refill: + for (nr_flips = i = 0; ; i++) { + skb = __skb_dequeue(&np->rx_batch); + if (skb == NULL) + break; + + skb->dev = dev; + + id = xennet_rxidx(req_prod + i); + + BUG_ON(np->rx_skbs[id]); + np->rx_skbs[id] = skb; + + ref = gnttab_claim_grant_reference(&np->gref_rx_head); + BUG_ON((signed short)ref < 0); + np->grant_rx_ref[id] = ref; + + pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page); + vaddr = page_address(skb_shinfo(skb)->frags[0].page); + + req = RING_GET_REQUEST(&np->rx, req_prod + i); + gnttab_grant_foreign_access_ref(ref, + np->xbdev->otherend_id, + pfn_to_mfn(pfn), + 0); + + req->id = id; + req->gref = ref; + } + + if (nr_flips != 0) { + reservation.extent_start = np->rx_pfn_array; + reservation.nr_extents = nr_flips; + reservation.extent_order = 0; + reservation.address_bits = 0; + reservation.domid = DOMID_SELF; + + if (!xen_feature(XENFEAT_auto_translated_physmap)) { + /* After all PTEs have been zapped, flush the TLB. */ + np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = + UVMF_TLB_FLUSH|UVMF_ALL; + + /* Give away a batch of pages. */ + np->rx_mcl[i].op = __HYPERVISOR_memory_op; + np->rx_mcl[i].args[0] = XENMEM_decrease_reservation; + np->rx_mcl[i].args[1] = (unsigned long)&reservation; + + /* Zap PTEs and give away pages in one big + * multicall. */ + (void)HYPERVISOR_multicall(np->rx_mcl, i+1); + + /* Check return status of HYPERVISOR_memory_op(). */ + if (unlikely(np->rx_mcl[i].result != i)) + panic("Unable to reduce memory reservation\n"); + } else { + if (HYPERVISOR_memory_op(XENMEM_decrease_reservation, + &reservation) != i) + panic("Unable to reduce memory reservation\n"); + } + } else { + wmb(); /* barrier so backend seens requests */ + } + + /* Above is a suitable barrier to ensure backend will see requests. */ + np->rx.req_prod_pvt = req_prod + i; + push: + RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); + if (notify) + notify_remote_via_irq(np->netdev->irq); +} + +static int xennet_open(struct net_device *dev) +{ + struct netfront_info *np = netdev_priv(dev); + + memset(&np->stats, 0, sizeof(np->stats)); + + spin_lock_bh(&np->rx_lock); + if (netif_carrier_ok(dev)) { + xennet_alloc_rx_buffers(dev); + np->rx.sring->rsp_event = np->rx.rsp_cons + 1; + if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) + netif_rx_schedule(dev); + } + spin_unlock_bh(&np->rx_lock); + + xennet_maybe_wake_tx(dev); + + return 0; +} + +static void xennet_tx_buf_gc(struct net_device *dev) +{ + RING_IDX cons, prod; + unsigned short id; + struct netfront_info *np = netdev_priv(dev); + struct sk_buff *skb; + + BUG_ON(!netif_carrier_ok(dev)); + + do { + prod = np->tx.sring->rsp_prod; + rmb(); /* Ensure we see responses up to 'rp'. */ + + for (cons = np->tx.rsp_cons; cons != prod; cons++) { + struct xen_netif_tx_response *txrsp; + + txrsp = RING_GET_RESPONSE(&np->tx, cons); + if (txrsp->status == NETIF_RSP_NULL) + continue; + + id = txrsp->id; + skb = np->tx_skbs[id].skb; + if (unlikely(gnttab_query_foreign_access( + np->grant_tx_ref[id]) != 0)) { + printk(KERN_ALERT "xennet_tx_buf_gc: warning " + "-- grant still in use by backend " + "domain.\n"); + BUG(); + } + gnttab_end_foreign_access_ref( + np->grant_tx_ref[id], GNTMAP_readonly); + gnttab_release_grant_reference( + &np->gref_tx_head, np->grant_tx_ref[id]); + np->grant_tx_ref[id] = GRANT_INVALID_REF; + add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id); + dev_kfree_skb_irq(skb); + } + + np->tx.rsp_cons = prod; + + /* + * Set a new event, then check for race with update of tx_cons. + * Note that it is essential to schedule a callback, no matter + * how few buffers are pending. Even if there is space in the + * transmit ring, higher layers may be blocked because too much + * data is outstanding: in such cases notification from Xen is + * likely to be the only kick that we'll get. + */ + np->tx.sring->rsp_event = + prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; + mb(); /* update shared area */ + } while ((cons == prod) && (prod != np->tx.sring->rsp_prod)); + + xennet_maybe_wake_tx(dev); +} + +static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, + struct xen_netif_tx_request *tx) +{ + struct netfront_info *np = netdev_priv(dev); + char *data = skb->data; + unsigned long mfn; + RING_IDX prod = np->tx.req_prod_pvt; + int frags = skb_shinfo(skb)->nr_frags; + unsigned int offset = offset_in_page(data); + unsigned int len = skb_headlen(skb); + unsigned int id; + grant_ref_t ref; + int i; + + /* While the header overlaps a page boundary (including being + larger than a page), split it it into page-sized chunks. */ + while (len > PAGE_SIZE - offset) { + tx->size = PAGE_SIZE - offset; + tx->flags |= NETTXF_more_data; + len -= tx->size; + data += tx->size; + offset = 0; + + id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); + np->tx_skbs[id].skb = skb_get(skb); + tx = RING_GET_REQUEST(&np->tx, prod++); + tx->id = id; + ref = gnttab_claim_grant_reference(&np->gref_tx_head); + BUG_ON((signed short)ref < 0); + + mfn = virt_to_mfn(data); + gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, + mfn, GNTMAP_readonly); + + tx->gref = np->grant_tx_ref[id] = ref; + tx->offset = offset; + tx->size = len; + tx->flags = 0; + } + + /* Grant backend access to each skb fragment page. */ + for (i = 0; i < frags; i++) { + skb_frag_t *frag = skb_shinfo(skb)->frags + i; + + tx->flags |= NETTXF_more_data; + + id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); + np->tx_skbs[id].skb = skb_get(skb); + tx = RING_GET_REQUEST(&np->tx, prod++); + tx->id = id; + ref = gnttab_claim_grant_reference(&np->gref_tx_head); + BUG_ON((signed short)ref < 0); + + mfn = pfn_to_mfn(page_to_pfn(frag->page)); + gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, + mfn, GNTMAP_readonly); + + tx->gref = np->grant_tx_ref[id] = ref; + tx->offset = frag->page_offset; + tx->size = frag->size; + tx->flags = 0; + } + + np->tx.req_prod_pvt = prod; +} + +static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + unsigned short id; + struct netfront_info *np = netdev_priv(dev); + struct xen_netif_tx_request *tx; + struct xen_netif_extra_info *extra; + char *data = skb->data; + RING_IDX i; + grant_ref_t ref; + unsigned long mfn; + int notify; + int frags = skb_shinfo(skb)->nr_frags; + unsigned int offset = offset_in_page(data); + unsigned int len = skb_headlen(skb); + + frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE; + if (unlikely(frags > MAX_SKB_FRAGS + 1)) { + printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n", + frags); + dump_stack(); + goto drop; + } + + spin_lock_irq(&np->tx_lock); + + if (unlikely(!netif_carrier_ok(dev) || + (frags > 1 && !xennet_can_sg(dev)) || + netif_needs_gso(dev, skb))) { + spin_unlock_irq(&np->tx_lock); + goto drop; + } + + i = np->tx.req_prod_pvt; + + id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); + np->tx_skbs[id].skb = skb; + + tx = RING_GET_REQUEST(&np->tx, i); + + tx->id = id; + ref = gnttab_claim_grant_reference(&np->gref_tx_head); + BUG_ON((signed short)ref < 0); + mfn = virt_to_mfn(data); + gnttab_grant_foreign_access_ref( + ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); + tx->gref = np->grant_tx_ref[id] = ref; + tx->offset = offset; + tx->size = len; + extra = NULL; + + tx->flags = 0; + if (skb->ip_summed == CHECKSUM_PARTIAL) + /* local packet? */ + tx->flags |= NETTXF_csum_blank | NETTXF_data_validated; + else if (skb->ip_summed == CHECKSUM_UNNECESSARY) + /* remote but checksummed. */ + tx->flags |= NETTXF_data_validated; + + if (skb_shinfo(skb)->gso_size) { + struct xen_netif_extra_info *gso; + + gso = (struct xen_netif_extra_info *) + RING_GET_REQUEST(&np->tx, ++i); + + if (extra) + extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; + else + tx->flags |= NETTXF_extra_info; + + gso->u.gso.size = skb_shinfo(skb)->gso_size; + gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; + gso->u.gso.pad = 0; + gso->u.gso.features = 0; + + gso->type = XEN_NETIF_EXTRA_TYPE_GSO; + gso->flags = 0; + extra = gso; + } + + np->tx.req_prod_pvt = i + 1; + + xennet_make_frags(skb, dev, tx); + tx->size = skb->len; + + RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); + if (notify) + notify_remote_via_irq(np->netdev->irq); + + xennet_tx_buf_gc(dev); + + if (!netfront_tx_slot_available(np)) + netif_stop_queue(dev); + + spin_unlock_irq(&np->tx_lock); + + np->stats.tx_bytes += skb->len; + np->stats.tx_packets++; + + return 0; + + drop: + np->stats.tx_dropped++; + dev_kfree_skb(skb); + return 0; +} + +static int xennet_close(struct net_device *dev) +{ + struct netfront_info *np = netdev_priv(dev); + netif_stop_queue(np->netdev); + return 0; +} + +static struct net_device_stats *xennet_get_stats(struct net_device *dev) +{ + struct netfront_info *np = netdev_priv(dev); + return &np->stats; +} + +static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, + grant_ref_t ref) +{ + int new = xennet_rxidx(np->rx.req_prod_pvt); + + BUG_ON(np->rx_skbs[new]); + np->rx_skbs[new] = skb; + np->grant_rx_ref[new] = ref; + RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; + RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; + np->rx.req_prod_pvt++; +} + +static int xennet_get_extras(struct netfront_info *np, + struct xen_netif_extra_info *extras, + RING_IDX rp) + +{ + struct xen_netif_extra_info *extra; + struct device *dev = &np->netdev->dev; + RING_IDX cons = np->rx.rsp_cons; + int err = 0; + + do { + struct sk_buff *skb; + grant_ref_t ref; + + if (unlikely(cons + 1 == rp)) { + if (net_ratelimit()) + dev_warn(dev, "Missing extra info\n"); + err = -EBADR; + break; + } + + extra = (struct xen_netif_extra_info *) + RING_GET_RESPONSE(&np->rx, ++cons); + + if (unlikely(!extra->type || + extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { + if (net_ratelimit()) + dev_warn(dev, "Invalid extra type: %d\n", + extra->type); + err = -EINVAL; + } else { + memcpy(&extras[extra->type - 1], extra, + sizeof(*extra)); + } + + skb = xennet_get_rx_skb(np, cons); + ref = xennet_get_rx_ref(np, cons); + xennet_move_rx_slot(np, skb, ref); + } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); + + np->rx.rsp_cons = cons; + return err; +} + +static int xennet_get_responses(struct netfront_info *np, + struct netfront_rx_info *rinfo, RING_IDX rp, + struct sk_buff_head *list) +{ + struct xen_netif_rx_response *rx = &rinfo->rx; + struct xen_netif_extra_info *extras = rinfo->extras; + struct device *dev = &np->netdev->dev; + RING_IDX cons = np->rx.rsp_cons; + struct sk_buff *skb = xennet_get_rx_skb(np, cons); + grant_ref_t ref = xennet_get_rx_ref(np, cons); + int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); + int frags = 1; + int err = 0; + unsigned long ret; + + if (rx->flags & NETRXF_extra_info) { + err = xennet_get_extras(np, extras, rp); + cons = np->rx.rsp_cons; + } + + for (;;) { + if (unlikely(rx->status < 0 || + rx->offset + rx->status > PAGE_SIZE)) { + if (net_ratelimit()) + dev_warn(dev, "rx->offset: %x, size: %u\n", + rx->offset, rx->status); + xennet_move_rx_slot(np, skb, ref); + err = -EINVAL; + goto next; + } + + /* + * This definitely indicates a bug, either in this driver or in + * the backend driver. In future this should flag the bad + * situation to the system controller to reboot the backed. + */ + if (ref == GRANT_INVALID_REF) { + if (net_ratelimit()) + dev_warn(dev, "Bad rx response id %d.\n", + rx->id); + err = -EINVAL; + goto next; + } + + ret = gnttab_end_foreign_access_ref(ref, 0); + BUG_ON(!ret); + + gnttab_release_grant_reference(&np->gref_rx_head, ref); + + __skb_queue_tail(list, skb); + +next: + if (!(rx->flags & NETRXF_more_data)) + break; + + if (cons + frags == rp) { + if (net_ratelimit()) + dev_warn(dev, "Need more frags\n"); + err = -ENOENT; + break; + } + + rx = RING_GET_RESPONSE(&np->rx, cons + frags); + skb = xennet_get_rx_skb(np, cons + frags); + ref = xennet_get_rx_ref(np, cons + frags); + frags++; + } + + if (unlikely(frags > max)) { + if (net_ratelimit()) + dev_warn(dev, "Too many frags\n"); + err = -E2BIG; + } + + if (unlikely(err)) + np->rx.rsp_cons = cons + frags; + + return err; +} + +static int xennet_set_skb_gso(struct sk_buff *skb, + struct xen_netif_extra_info *gso) +{ + if (!gso->u.gso.size) { + if (net_ratelimit()) + printk(KERN_WARNING "GSO size must not be zero.\n"); + return -EINVAL; + } + + /* Currently only TCPv4 S.O. is supported. */ + if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { + if (net_ratelimit()) + printk(KERN_WARNING "Bad GSO type %d.\n", gso->u.gso.type); + return -EINVAL; + } + + skb_shinfo(skb)->gso_size = gso->u.gso.size; + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; + + /* Header must be checked, and gso_segs computed. */ + skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; + skb_shinfo(skb)->gso_segs = 0; + + return 0; +} + +static RING_IDX xennet_fill_frags(struct netfront_info *np, + struct sk_buff *skb, + struct sk_buff_head *list) +{ + struct skb_shared_info *shinfo = skb_shinfo(skb); + int nr_frags = shinfo->nr_frags; + RING_IDX cons = np->rx.rsp_cons; + skb_frag_t *frag = shinfo->frags + nr_frags; + struct sk_buff *nskb; + + while ((nskb = __skb_dequeue(list))) { + struct xen_netif_rx_response *rx = + RING_GET_RESPONSE(&np->rx, ++cons); + + frag->page = skb_shinfo(nskb)->frags[0].page; + frag->page_offset = rx->offset; + frag->size = rx->status; + + skb->data_len += rx->status; + + skb_shinfo(nskb)->nr_frags = 0; + kfree_skb(nskb); + + frag++; + nr_frags++; + } + + shinfo->nr_frags = nr_frags; + return cons; +} + +static int skb_checksum_setup(struct sk_buff *skb) +{ + struct iphdr *iph; + unsigned char *th; + int err = -EPROTO; + + if (skb->protocol != htons(ETH_P_IP)) + goto out; + + iph = (void *)skb->data; + th = skb->data + 4 * iph->ihl; + if (th >= skb_tail_pointer(skb)) + goto out; + + skb->csum_start = th - skb->head; + switch (iph->protocol) { + case IPPROTO_TCP: + skb->csum_offset = offsetof(struct tcphdr, check); + break; + case IPPROTO_UDP: + skb->csum_offset = offsetof(struct udphdr, check); + break; + default: + if (net_ratelimit()) + printk(KERN_ERR "Attempting to checksum a non-" + "TCP/UDP packet, dropping a protocol" + " %d packet", iph->protocol); + goto out; + } + + if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb)) + goto out; + + err = 0; + +out: + return err; +} + +static int handle_incoming_queue(struct net_device *dev, + struct sk_buff_head *rxq) +{ + struct netfront_info *np = netdev_priv(dev); + int packets_dropped = 0; + struct sk_buff *skb; + + while ((skb = __skb_dequeue(rxq)) != NULL) { + struct page *page = NETFRONT_SKB_CB(skb)->page; + void *vaddr = page_address(page); + unsigned offset = NETFRONT_SKB_CB(skb)->offset; + + memcpy(skb->data, vaddr + offset, + skb_headlen(skb)); + + if (page != skb_shinfo(skb)->frags[0].page) + __free_page(page); + + /* Ethernet work: Delayed to here as it peeks the header. */ + skb->protocol = eth_type_trans(skb, dev); + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (skb_checksum_setup(skb)) { + kfree_skb(skb); + packets_dropped++; + np->stats.rx_errors++; + continue; + } + } + + np->stats.rx_packets++; + np->stats.rx_bytes += skb->len; + + /* Pass it up. */ + netif_receive_skb(skb); + dev->last_rx = jiffies; + } + + return packets_dropped; +} + +static int xennet_poll(struct net_device *dev, int *pbudget) +{ + struct netfront_info *np = netdev_priv(dev); + struct sk_buff *skb; + struct netfront_rx_info rinfo; + struct xen_netif_rx_response *rx = &rinfo.rx; + struct xen_netif_extra_info *extras = rinfo.extras; + RING_IDX i, rp; + int work_done, budget, more_to_do = 1; + struct sk_buff_head rxq; + struct sk_buff_head errq; + struct sk_buff_head tmpq; + unsigned long flags; + unsigned int len; + int err; + + spin_lock(&np->rx_lock); + + if (unlikely(!netif_carrier_ok(dev))) { + spin_unlock(&np->rx_lock); + return 0; + } + + skb_queue_head_init(&rxq); + skb_queue_head_init(&errq); + skb_queue_head_init(&tmpq); + + budget = *pbudget; + if (budget > dev->quota) + budget = dev->quota; + rp = np->rx.sring->rsp_prod; + rmb(); /* Ensure we see queued responses up to 'rp'. */ + + i = np->rx.rsp_cons; + work_done = 0; + while ((i != rp) && (work_done < budget)) { + memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); + memset(extras, 0, sizeof(rinfo.extras)); + + err = xennet_get_responses(np, &rinfo, rp, &tmpq); + + if (unlikely(err)) { +err: + while ((skb = __skb_dequeue(&tmpq))) + __skb_queue_tail(&errq, skb); + np->stats.rx_errors++; + i = np->rx.rsp_cons; + continue; + } + + skb = __skb_dequeue(&tmpq); + + if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { + struct xen_netif_extra_info *gso; + gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; + + if (unlikely(xennet_set_skb_gso(skb, gso))) { + __skb_queue_head(&tmpq, skb); + np->rx.rsp_cons += skb_queue_len(&tmpq); + goto err; + } + } + + NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page; + NETFRONT_SKB_CB(skb)->offset = rx->offset; + + len = rx->status; + if (len > RX_COPY_THRESHOLD) + len = RX_COPY_THRESHOLD; + skb_put(skb, len); + + if (rx->status > len) { + skb_shinfo(skb)->frags[0].page_offset = + rx->offset + len; + skb_shinfo(skb)->frags[0].size = rx->status - len; + skb->data_len = rx->status - len; + } else { + skb_shinfo(skb)->frags[0].page = NULL; + skb_shinfo(skb)->nr_frags = 0; + } + + i = xennet_fill_frags(np, skb, &tmpq); + + /* + * Truesize approximates the size of true data plus + * any supervisor overheads. Adding hypervisor + * overheads has been shown to significantly reduce + * achievable bandwidth with the default receive + * buffer size. It is therefore not wise to account + * for it here. + * + * After alloc_skb(RX_COPY_THRESHOLD), truesize is set + * to RX_COPY_THRESHOLD + the supervisor + * overheads. Here, we add the size of the data pulled + * in xennet_fill_frags(). + * + * We also adjust for any unused space in the main + * data area by subtracting (RX_COPY_THRESHOLD - + * len). This is especially important with drivers + * which split incoming packets into header and data, + * using only 66 bytes of the main data area (see the + * e1000 driver for example.) On such systems, + * without this last adjustement, our achievable + * receive throughout using the standard receive + * buffer size was cut by 25%(!!!). + */ + skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); + skb->len += skb->data_len; + + if (rx->flags & NETRXF_csum_blank) + skb->ip_summed = CHECKSUM_PARTIAL; + else if (rx->flags & NETRXF_data_validated) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + __skb_queue_tail(&rxq, skb); + + np->rx.rsp_cons = ++i; + work_done++; + } + + while ((skb = __skb_dequeue(&errq))) + kfree_skb(skb); + + work_done -= handle_incoming_queue(dev, &rxq); + + /* If we get a callback with very few responses, reduce fill target. */ + /* NB. Note exponential increase, linear decrease. */ + if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > + ((3*np->rx_target) / 4)) && + (--np->rx_target < np->rx_min_target)) + np->rx_target = np->rx_min_target; + + xennet_alloc_rx_buffers(dev); + + *pbudget -= work_done; + dev->quota -= work_done; + + if (work_done < budget) { + local_irq_save(flags); + + RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); + if (!more_to_do) + __netif_rx_complete(dev); + + local_irq_restore(flags); + } + + spin_unlock(&np->rx_lock); + + return more_to_do; +} + +static int xennet_change_mtu(struct net_device *dev, int mtu) +{ + int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN; + + if (mtu > max) + return -EINVAL; + dev->mtu = mtu; + return 0; +} + +static void xennet_release_tx_bufs(struct netfront_info *np) +{ + struct sk_buff *skb; + int i; + + for (i = 0; i < NET_TX_RING_SIZE; i++) { + /* Skip over entries which are actually freelist references */ + if ((unsigned long)np->tx_skbs[i].skb < PAGE_OFFSET) + continue; + + skb = np->tx_skbs[i].skb; + gnttab_end_foreign_access_ref(np->grant_tx_ref[i], + GNTMAP_readonly); + gnttab_release_grant_reference(&np->gref_tx_head, + np->grant_tx_ref[i]); + np->grant_tx_ref[i] = GRANT_INVALID_REF; + add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i); + dev_kfree_skb_irq(skb); + } +} + +static void xennet_release_rx_bufs(struct netfront_info *np) +{ + struct mmu_update *mmu = np->rx_mmu; + struct multicall_entry *mcl = np->rx_mcl; + struct sk_buff_head free_list; + struct sk_buff *skb; + unsigned long mfn; + int xfer = 0, noxfer = 0, unused = 0; + int id, ref; + + dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n", + __func__); + return; + + skb_queue_head_init(&free_list); + + spin_lock_bh(&np->rx_lock); + + for (id = 0; id < NET_RX_RING_SIZE; id++) { + ref = np->grant_rx_ref[id]; + if (ref == GRANT_INVALID_REF) { + unused++; + continue; + } + + skb = np->rx_skbs[id]; + mfn = gnttab_end_foreign_transfer_ref(ref); + gnttab_release_grant_reference(&np->gref_rx_head, ref); + np->grant_rx_ref[id] = GRANT_INVALID_REF; + + if (0 == mfn) { + skb_shinfo(skb)->nr_frags = 0; + dev_kfree_skb(skb); + noxfer++; + continue; + } + + if (!xen_feature(XENFEAT_auto_translated_physmap)) { + /* Remap the page. */ + struct page *page = skb_shinfo(skb)->frags[0].page; + unsigned long pfn = page_to_pfn(page); + void *vaddr = page_address(page); + + MULTI_update_va_mapping(mcl, (unsigned long)vaddr, + mfn_pte(mfn, PAGE_KERNEL), + 0); + mcl++; + mmu->ptr = ((u64)mfn << PAGE_SHIFT) + | MMU_MACHPHYS_UPDATE; + mmu->val = pfn; + mmu++; + + set_phys_to_machine(pfn, mfn); + } + __skb_queue_tail(&free_list, skb); + xfer++; + } + + dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n", + __func__, xfer, noxfer, unused); + + if (xfer) { + if (!xen_feature(XENFEAT_auto_translated_physmap)) { + /* Do all the remapping work and M2P updates. */ + MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu, + 0, DOMID_SELF); + mcl++; + HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl); + } + } + + while ((skb = __skb_dequeue(&free_list)) != NULL) + dev_kfree_skb(skb); + + spin_unlock_bh(&np->rx_lock); +} + +static void xennet_uninit(struct net_device *dev) +{ + struct netfront_info *np = netdev_priv(dev); + xennet_release_tx_bufs(np); + xennet_release_rx_bufs(np); + gnttab_free_grant_references(np->gref_tx_head); + gnttab_free_grant_references(np->gref_rx_head); +} + +static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev) +{ + int i, err; + struct net_device *netdev; + struct netfront_info *np; + + netdev = alloc_etherdev(sizeof(struct netfront_info)); + if (!netdev) { + printk(KERN_WARNING "%s> alloc_etherdev failed.\n", + __func__); + return ERR_PTR(-ENOMEM); + } + + np = netdev_priv(netdev); + np->xbdev = dev; + + spin_lock_init(&np->tx_lock); + spin_lock_init(&np->rx_lock); + + skb_queue_head_init(&np->rx_batch); + np->rx_target = RX_DFL_MIN_TARGET; + np->rx_min_target = RX_DFL_MIN_TARGET; + np->rx_max_target = RX_MAX_TARGET; + + init_timer(&np->rx_refill_timer); + np->rx_refill_timer.data = (unsigned long)netdev; + np->rx_refill_timer.function = rx_refill_timeout; + + /* Initialise tx_skbs as a free chain containing every entry. */ + np->tx_skb_freelist = 0; + for (i = 0; i < NET_TX_RING_SIZE; i++) { + np->tx_skbs[i].link = i+1; + np->grant_tx_ref[i] = GRANT_INVALID_REF; + } + + /* Clear out rx_skbs */ + for (i = 0; i < NET_RX_RING_SIZE; i++) { + np->rx_skbs[i] = NULL; + np->grant_rx_ref[i] = GRANT_INVALID_REF; + } + + /* A grant for every tx ring slot */ + if (gnttab_alloc_grant_references(TX_MAX_TARGET, + &np->gref_tx_head) < 0) { + printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n"); + err = -ENOMEM; + goto exit; + } + /* A grant for every rx ring slot */ + if (gnttab_alloc_grant_references(RX_MAX_TARGET, + &np->gref_rx_head) < 0) { + printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n"); + err = -ENOMEM; + goto exit_free_tx; + } + + netdev->open = xennet_open; + netdev->hard_start_xmit = xennet_start_xmit; + netdev->stop = xennet_close; + netdev->get_stats = xennet_get_stats; + netdev->poll = xennet_poll; + netdev->uninit = xennet_uninit; + netdev->change_mtu = xennet_change_mtu; + netdev->weight = 64; + netdev->features = NETIF_F_IP_CSUM; + + SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops); + SET_MODULE_OWNER(netdev); + SET_NETDEV_DEV(netdev, &dev->dev); + + np->netdev = netdev; + + netif_carrier_off(netdev); + + return netdev; + + exit_free_tx: + gnttab_free_grant_references(np->gref_tx_head); + exit: + free_netdev(netdev); + return ERR_PTR(err); +} + +/** + * Entry point to this code when a new device is created. Allocate the basic + * structures and the ring buffers for communication with the backend, and + * inform the backend of the appropriate details for those. + */ +static int __devinit netfront_probe(struct xenbus_device *dev, + const struct xenbus_device_id *id) +{ + int err; + struct net_device *netdev; + struct netfront_info *info; + + netdev = xennet_create_dev(dev); + if (IS_ERR(netdev)) { + err = PTR_ERR(netdev); + xenbus_dev_fatal(dev, err, "creating netdev"); + return err; + } + + info = netdev_priv(netdev); + dev->dev.driver_data = info; + + err = register_netdev(info->netdev); + if (err) { + printk(KERN_WARNING "%s: register_netdev err=%d\n", + __func__, err); + goto fail; + } + + err = xennet_sysfs_addif(info->netdev); + if (err) { + unregister_netdev(info->netdev); + printk(KERN_WARNING "%s: add sysfs failed err=%d\n", + __func__, err); + goto fail; + } + + return 0; + + fail: + free_netdev(netdev); + dev->dev.driver_data = NULL; + return err; +} + +static void xennet_end_access(int ref, void *page) +{ + /* This frees the page as a side-effect */ + if (ref != GRANT_INVALID_REF) + gnttab_end_foreign_access(ref, 0, (unsigned long)page); +} + +static void xennet_disconnect_backend(struct netfront_info *info) +{ + /* Stop old i/f to prevent errors whilst we rebuild the state. */ + spin_lock_bh(&info->rx_lock); + spin_lock_irq(&info->tx_lock); + netif_carrier_off(info->netdev); + spin_unlock_irq(&info->tx_lock); + spin_unlock_bh(&info->rx_lock); + + if (info->netdev->irq) + unbind_from_irqhandler(info->netdev->irq, info->netdev); + info->evtchn = info->netdev->irq = 0; + + /* End access and free the pages */ + xennet_end_access(info->tx_ring_ref, info->tx.sring); + xennet_end_access(info->rx_ring_ref, info->rx.sring); + + info->tx_ring_ref = GRANT_INVALID_REF; + info->rx_ring_ref = GRANT_INVALID_REF; + info->tx.sring = NULL; + info->rx.sring = NULL; +} + +/** + * We are reconnecting to the backend, due to a suspend/resume, or a backend + * driver restart. We tear down our netif structure and recreate it, but + * leave the device-layer structures intact so that this is transparent to the + * rest of the kernel. + */ +static int netfront_resume(struct xenbus_device *dev) +{ + struct netfront_info *info = dev->dev.driver_data; + + dev_dbg(&dev->dev, "%s\n", dev->nodename); + + xennet_disconnect_backend(info); + return 0; +} + +static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) +{ + char *s, *e, *macstr; + int i; + + macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); + if (IS_ERR(macstr)) + return PTR_ERR(macstr); + + for (i = 0; i < ETH_ALEN; i++) { + mac[i] = simple_strtoul(s, &e, 16); + if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { + kfree(macstr); + return -ENOENT; + } + s = e+1; + } + + kfree(macstr); + return 0; +} + +static irqreturn_t xennet_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct netfront_info *np = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&np->tx_lock, flags); + + if (likely(netif_carrier_ok(dev))) { + xennet_tx_buf_gc(dev); + /* Under tx_lock: protects access to rx shared-ring indexes. */ + if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) + netif_rx_schedule(dev); + } + + spin_unlock_irqrestore(&np->tx_lock, flags); + + return IRQ_HANDLED; +} + +static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info) +{ + struct xen_netif_tx_sring *txs; + struct xen_netif_rx_sring *rxs; + int err; + struct net_device *netdev = info->netdev; + + info->tx_ring_ref = GRANT_INVALID_REF; + info->rx_ring_ref = GRANT_INVALID_REF; + info->rx.sring = NULL; + info->tx.sring = NULL; + netdev->irq = 0; + + err = xen_net_read_mac(dev, netdev->dev_addr); + if (err) { + xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); + goto fail; + } + + txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_KERNEL); + if (!txs) { + err = -ENOMEM; + xenbus_dev_fatal(dev, err, "allocating tx ring page"); + goto fail; + } + SHARED_RING_INIT(txs); + FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); + + err = xenbus_grant_ring(dev, virt_to_mfn(txs)); + if (err < 0) { + free_page((unsigned long)txs); + goto fail; + } + + info->tx_ring_ref = err; + rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_KERNEL); + if (!rxs) { + err = -ENOMEM; + xenbus_dev_fatal(dev, err, "allocating rx ring page"); + goto fail; + } + SHARED_RING_INIT(rxs); + FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); + + err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); + if (err < 0) { + free_page((unsigned long)rxs); + goto fail; + } + info->rx_ring_ref = err; + + err = xenbus_alloc_evtchn(dev, &info->evtchn); + if (err) + goto fail; + + err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt, + IRQF_SAMPLE_RANDOM, netdev->name, + netdev); + if (err < 0) + goto fail; + netdev->irq = err; + return 0; + + fail: + return err; +} + +/* Common code used when first setting up, and when resuming. */ +static int talk_to_backend(struct xenbus_device *dev, + struct netfront_info *info) +{ + const char *message; + struct xenbus_transaction xbt; + int err; + + /* Create shared ring, alloc event channel. */ + err = setup_netfront(dev, info); + if (err) + goto out; + +again: + err = xenbus_transaction_start(&xbt); + if (err) { + xenbus_dev_fatal(dev, err, "starting transaction"); + goto destroy_ring; + } + + err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u", + info->tx_ring_ref); + if (err) { + message = "writing tx ring-ref"; + goto abort_transaction; + } + err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u", + info->rx_ring_ref); + if (err) { + message = "writing rx ring-ref"; + goto abort_transaction; + } + err = xenbus_printf(xbt, dev->nodename, + "event-channel", "%u", info->evtchn); + if (err) { + message = "writing event-channel"; + goto abort_transaction; + } + + err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", + 1); + if (err) { + message = "writing request-rx-copy"; + goto abort_transaction; + } + + err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1); + if (err) { + message = "writing feature-rx-notify"; + goto abort_transaction; + } + + err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1); + if (err) { + message = "writing feature-sg"; + goto abort_transaction; + } + + err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1); + if (err) { + message = "writing feature-gso-tcpv4"; + goto abort_transaction; + } + + err = xenbus_transaction_end(xbt, 0); + if (err) { + if (err == -EAGAIN) + goto again; + xenbus_dev_fatal(dev, err, "completing transaction"); + goto destroy_ring; + } + + return 0; + + abort_transaction: + xenbus_transaction_end(xbt, 1); + xenbus_dev_fatal(dev, err, "%s", message); + destroy_ring: + xennet_disconnect_backend(info); + out: + return err; +} + +static int xennet_set_sg(struct net_device *dev, u32 data) +{ + if (data) { + struct netfront_info *np = netdev_priv(dev); + int val; + + if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", + "%d", &val) < 0) + val = 0; + if (!val) + return -ENOSYS; + } else if (dev->mtu > ETH_DATA_LEN) + dev->mtu = ETH_DATA_LEN; + + return ethtool_op_set_sg(dev, data); +} + +static int xennet_set_tso(struct net_device *dev, u32 data) +{ + if (data) { + struct netfront_info *np = netdev_priv(dev); + int val; + + if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, + "feature-gso-tcpv4", "%d", &val) < 0) + val = 0; + if (!val) + return -ENOSYS; + } + + return ethtool_op_set_tso(dev, data); +} + +static void xennet_set_features(struct net_device *dev) +{ + /* Turn off all GSO bits except ROBUST. */ + dev->features &= (1 << NETIF_F_GSO_SHIFT) - 1; + dev->features |= NETIF_F_GSO_ROBUST; + xennet_set_sg(dev, 0); + + /* We need checksum offload to enable scatter/gather and TSO. */ + if (!(dev->features & NETIF_F_IP_CSUM)) + return; + + if (!xennet_set_sg(dev, 1)) + xennet_set_tso(dev, 1); +} + +static int xennet_connect(struct net_device *dev) +{ + struct netfront_info *np = netdev_priv(dev); + int i, requeue_idx, err; + struct sk_buff *skb; + grant_ref_t ref; + struct xen_netif_rx_request *req; + unsigned int feature_rx_copy; + + err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, + "feature-rx-copy", "%u", &feature_rx_copy); + if (err != 1) + feature_rx_copy = 0; + + if (!feature_rx_copy) { + dev_info(&dev->dev, + "backend does not support copying recieve path"); + return -ENODEV; + } + + err = talk_to_backend(np->xbdev, np); + if (err) + return err; + + xennet_set_features(dev); + + spin_lock_bh(&np->rx_lock); + spin_lock_irq(&np->tx_lock); + + /* Step 1: Discard all pending TX packet fragments. */ + xennet_release_tx_bufs(np); + + /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ + for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { + if (!np->rx_skbs[i]) + continue; + + skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i); + ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); + req = RING_GET_REQUEST(&np->rx, requeue_idx); + + gnttab_grant_foreign_access_ref( + ref, np->xbdev->otherend_id, + pfn_to_mfn(page_to_pfn(skb_shinfo(skb)-> + frags->page)), + 0); + req->gref = ref; + req->id = requeue_idx; + + requeue_idx++; + } + + np->rx.req_prod_pvt = requeue_idx; + + /* + * Step 3: All public and private state should now be sane. Get + * ready to start sending and receiving packets and give the driver + * domain a kick because we've probably just requeued some + * packets. + */ + netif_carrier_on(np->netdev); + notify_remote_via_irq(np->netdev->irq); + xennet_tx_buf_gc(dev); + xennet_alloc_rx_buffers(dev); + + spin_unlock_irq(&np->tx_lock); + spin_unlock_bh(&np->rx_lock); + + return 0; +} + +/** + * Callback received when the backend's state changes. + */ +static void backend_changed(struct xenbus_device *dev, + enum xenbus_state backend_state) +{ + struct netfront_info *np = dev->dev.driver_data; + struct net_device *netdev = np->netdev; + + dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); + + switch (backend_state) { + case XenbusStateInitialising: + case XenbusStateInitialised: + case XenbusStateConnected: + case XenbusStateUnknown: + case XenbusStateClosed: + break; + + case XenbusStateInitWait: + if (dev->state != XenbusStateInitialising) + break; + if (xennet_connect(netdev) != 0) + break; + xenbus_switch_state(dev, XenbusStateConnected); + break; + + case XenbusStateClosing: + xenbus_frontend_closed(dev); + break; + } +} + +static struct ethtool_ops xennet_ethtool_ops = +{ + .get_tx_csum = ethtool_op_get_tx_csum, + .set_tx_csum = ethtool_op_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = xennet_set_sg, + .get_tso = ethtool_op_get_tso, + .set_tso = xennet_set_tso, + .get_link = ethtool_op_get_link, +}; + +#ifdef CONFIG_SYSFS +static ssize_t show_rxbuf_min(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_dev(dev); + struct netfront_info *info = netdev_priv(netdev); + + return sprintf(buf, "%u\n", info->rx_min_target); +} + +static ssize_t store_rxbuf_min(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct net_device *netdev = to_net_dev(dev); + struct netfront_info *np = netdev_priv(netdev); + char *endp; + unsigned long target; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + target = simple_strtoul(buf, &endp, 0); + if (endp == buf) + return -EBADMSG; + + if (target < RX_MIN_TARGET) + target = RX_MIN_TARGET; + if (target > RX_MAX_TARGET) + target = RX_MAX_TARGET; + + spin_lock_bh(&np->rx_lock); + if (target > np->rx_max_target) + np->rx_max_target = target; + np->rx_min_target = target; + if (target > np->rx_target) + np->rx_target = target; + + xennet_alloc_rx_buffers(netdev); + + spin_unlock_bh(&np->rx_lock); + return len; +} + +static ssize_t show_rxbuf_max(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_dev(dev); + struct netfront_info *info = netdev_priv(netdev); + + return sprintf(buf, "%u\n", info->rx_max_target); +} + +static ssize_t store_rxbuf_max(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct net_device *netdev = to_net_dev(dev); + struct netfront_info *np = netdev_priv(netdev); + char *endp; + unsigned long target; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + target = simple_strtoul(buf, &endp, 0); + if (endp == buf) + return -EBADMSG; + + if (target < RX_MIN_TARGET) + target = RX_MIN_TARGET; + if (target > RX_MAX_TARGET) + target = RX_MAX_TARGET; + + spin_lock_bh(&np->rx_lock); + if (target < np->rx_min_target) + np->rx_min_target = target; + np->rx_max_target = target; + if (target < np->rx_target) + np->rx_target = target; + + xennet_alloc_rx_buffers(netdev); + + spin_unlock_bh(&np->rx_lock); + return len; +} + +static ssize_t show_rxbuf_cur(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_dev(dev); + struct netfront_info *info = netdev_priv(netdev); + + return sprintf(buf, "%u\n", info->rx_target); +} + +static struct device_attribute xennet_attrs[] = { + __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), + __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), + __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), +}; + +static int xennet_sysfs_addif(struct net_device *netdev) +{ + int i; + int err; + + for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { + err = device_create_file(&netdev->dev, + &xennet_attrs[i]); + if (err) + goto fail; + } + return 0; + + fail: + while (--i >= 0) + device_remove_file(&netdev->dev, &xennet_attrs[i]); + return err; +} + +static void xennet_sysfs_delif(struct net_device *netdev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) + device_remove_file(&netdev->dev, &xennet_attrs[i]); +} + +#endif /* CONFIG_SYSFS */ + +static struct xenbus_device_id netfront_ids[] = { + { "vif" }, + { "" } +}; + + +static int __devexit xennet_remove(struct xenbus_device *dev) +{ + struct netfront_info *info = dev->dev.driver_data; + + dev_dbg(&dev->dev, "%s\n", dev->nodename); + + unregister_netdev(info->netdev); + + xennet_disconnect_backend(info); + + del_timer_sync(&info->rx_refill_timer); + + xennet_sysfs_delif(info->netdev); + + free_netdev(info->netdev); + + return 0; +} + +static struct xenbus_driver netfront = { + .name = "vif", + .owner = THIS_MODULE, + .ids = netfront_ids, + .probe = netfront_probe, + .remove = __devexit_p(xennet_remove), + .resume = netfront_resume, + .otherend_changed = backend_changed, +}; + +static int __init netif_init(void) +{ + if (!is_running_on_xen()) + return -ENODEV; + + if (is_initial_xendomain()) + return 0; + + printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n"); + + return xenbus_register_frontend(&netfront); +} +module_init(netif_init); + + +static void __exit netif_exit(void) +{ + if (is_initial_xendomain()) + return; + + return xenbus_unregister_driver(&netfront); +} +module_exit(netif_exit); + +MODULE_DESCRIPTION("Xen virtual network device frontend"); +MODULE_LICENSE("GPL"); |