From 3d5d9501b634fd268eb56428cda92cd317752d69 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Mon, 6 Mar 2023 13:54:50 -0500 Subject: fs: prevent out-of-bounds array speculation when closing a file descriptor commit 609d54441493c99f21c1823dfd66fa7f4c512ff4 upstream. Google-Bug-Id: 114199369 Signed-off-by: Theodore Ts'o Signed-off-by: Al Viro Signed-off-by: Greg Kroah-Hartman --- fs/file.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/file.c b/fs/file.c index d6ca500a1053..928ba7b8df1e 100644 --- a/fs/file.c +++ b/fs/file.c @@ -627,6 +627,7 @@ int __close_fd(struct files_struct *files, unsigned fd) fdt = files_fdtable(files); if (fd >= fdt->max_fds) goto out_unlock; + fd = array_index_nospec(fd, fdt->max_fds); file = fdt->fd[fd]; if (!file) goto out_unlock; -- cgit v1.2.1 From f028a7db98240a917e8e071d7dbfa2075ca734d2 Mon Sep 17 00:00:00 2001 From: Andrew Cooper Date: Tue, 7 Mar 2023 17:46:43 +0000 Subject: x86/CPU/AMD: Disable XSAVES on AMD family 0x17 commit b0563468eeac88ebc70559d52a0b66efc37e4e9d upstream. AMD Erratum 1386 is summarised as: XSAVES Instruction May Fail to Save XMM Registers to the Provided State Save Area This piece of accidental chronomancy causes the %xmm registers to occasionally reset back to an older value. Ignore the XSAVES feature on all AMD Zen1/2 hardware. The XSAVEC instruction (which works fine) is equivalent on affected parts. [ bp: Typos, move it into the F17h-specific function. ] Reported-by: Tavis Ormandy Signed-off-by: Andrew Cooper Signed-off-by: Borislav Petkov (AMD) Cc: Link: https://lore.kernel.org/r/20230307174643.1240184-1-andrew.cooper3@citrix.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/amd.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index e017f64e09d6..c8979f8cbce5 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -199,6 +199,15 @@ static void init_amd_k6(struct cpuinfo_x86 *c) return; } #endif + /* + * Work around Erratum 1386. The XSAVES instruction malfunctions in + * certain circumstances on Zen1/2 uarch, and not all parts have had + * updated microcode at the time of writing (March 2023). + * + * Affected parts all have no supervisor XSAVE states, meaning that + * the XSAVEC instruction (which works fine) is equivalent. + */ + clear_cpu_cap(c, X86_FEATURE_XSAVES); } static void init_amd_k7(struct cpuinfo_x86 *c) -- cgit v1.2.1 From a6ac7e490d2ce08c2319b2768a52f698171d59bd Mon Sep 17 00:00:00 2001 From: Eric Whitney Date: Fri, 10 Feb 2023 12:32:44 -0500 Subject: ext4: fix RENAME_WHITEOUT handling for inline directories commit c9f62c8b2dbf7240536c0cc9a4529397bb8bf38e upstream. A significant number of xfstests can cause ext4 to log one or more warning messages when they are run on a test file system where the inline_data feature has been enabled. An example: "EXT4-fs warning (device vdc): ext4_dirblock_csum_set:425: inode #16385: comm fsstress: No space for directory leaf checksum. Please run e2fsck -D." The xfstests include: ext4/057, 058, and 307; generic/013, 051, 068, 070, 076, 078, 083, 232, 269, 270, 390, 461, 475, 476, 482, 579, 585, 589, 626, 631, and 650. In this situation, the warning message indicates a bug in the code that performs the RENAME_WHITEOUT operation on a directory entry that has been stored inline. It doesn't detect that the directory is stored inline, and incorrectly attempts to compute a dirent block checksum on the whiteout inode when creating it. This attempt fails as a result of the integrity checking in get_dirent_tail (usually due to a failure to match the EXT4_FT_DIR_CSUM magic cookie), and the warning message is then emitted. Fix this by simply collecting the inlined data state at the time the search for the source directory entry is performed. Existing code handles the rest, and this is sufficient to eliminate all spurious warning messages produced by the tests above. Go one step further and do the same in the code that resets the source directory entry in the event of failure. The inlined state should be present in the "old" struct, but given the possibility of a race there's no harm in taking a conservative approach and getting that information again since the directory entry is being reread anyway. Fixes: b7ff91fd030d ("ext4: find old entry again if failed to rename whiteout") Cc: stable@kernel.org Signed-off-by: Eric Whitney Reviewed-by: Jan Kara Link: https://lore.kernel.org/r/20230210173244.679890-1-enwlinux@gmail.com Signed-off-by: Theodore Ts'o Signed-off-by: Greg Kroah-Hartman --- fs/ext4/namei.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 2c68591102bd..db9bba3473b5 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -1419,11 +1419,10 @@ static struct buffer_head *__ext4_find_entry(struct inode *dir, int has_inline_data = 1; ret = ext4_find_inline_entry(dir, fname, res_dir, &has_inline_data); - if (has_inline_data) { - if (inlined) - *inlined = 1; + if (inlined) + *inlined = has_inline_data; + if (has_inline_data) goto cleanup_and_exit; - } } if ((namelen <= 2) && (name[0] == '.') && @@ -3515,7 +3514,8 @@ static void ext4_resetent(handle_t *handle, struct ext4_renament *ent, * so the old->de may no longer valid and need to find it again * before reset old inode info. */ - old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL); + old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, + &old.inlined); if (IS_ERR(old.bh)) retval = PTR_ERR(old.bh); if (!old.bh) @@ -3677,7 +3677,8 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, return retval; } - old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL); + old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, + &old.inlined); if (IS_ERR(old.bh)) return PTR_ERR(old.bh); /* -- cgit v1.2.1 From f16054ac1774915160ca4e1c73ff7a269465a1b9 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Thu, 16 Feb 2023 10:55:48 -0800 Subject: ext4: fix another off-by-one fsmap error on 1k block filesystems commit c993799baf9c5861f8df91beb80e1611b12efcbd upstream. Apparently syzbot figured out that issuing this FSMAP call: struct fsmap_head cmd = { .fmh_count = ...; .fmh_keys = { { .fmr_device = /* ext4 dev */, .fmr_physical = 0, }, { .fmr_device = /* ext4 dev */, .fmr_physical = 0, }, }, ... }; ret = ioctl(fd, FS_IOC_GETFSMAP, &cmd); Produces this crash if the underlying filesystem is a 1k-block ext4 filesystem: kernel BUG at fs/ext4/ext4.h:3331! invalid opcode: 0000 [#1] PREEMPT SMP CPU: 3 PID: 3227965 Comm: xfs_io Tainted: G W O 6.2.0-rc8-achx Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.15.0-1 04/01/2014 RIP: 0010:ext4_mb_load_buddy_gfp+0x47c/0x570 [ext4] RSP: 0018:ffffc90007c03998 EFLAGS: 00010246 RAX: ffff888004978000 RBX: ffffc90007c03a20 RCX: ffff888041618000 RDX: 0000000000000000 RSI: 00000000000005a4 RDI: ffffffffa0c99b11 RBP: ffff888012330000 R08: ffffffffa0c2b7d0 R09: 0000000000000400 R10: ffffc90007c03950 R11: 0000000000000000 R12: 0000000000000001 R13: 00000000ffffffff R14: 0000000000000c40 R15: ffff88802678c398 FS: 00007fdf2020c880(0000) GS:ffff88807e100000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007ffd318a5fe8 CR3: 000000007f80f001 CR4: 00000000001706e0 Call Trace: ext4_mballoc_query_range+0x4b/0x210 [ext4 dfa189daddffe8fecd3cdfd00564e0f265a8ab80] ext4_getfsmap_datadev+0x713/0x890 [ext4 dfa189daddffe8fecd3cdfd00564e0f265a8ab80] ext4_getfsmap+0x2b7/0x330 [ext4 dfa189daddffe8fecd3cdfd00564e0f265a8ab80] ext4_ioc_getfsmap+0x153/0x2b0 [ext4 dfa189daddffe8fecd3cdfd00564e0f265a8ab80] __ext4_ioctl+0x2a7/0x17e0 [ext4 dfa189daddffe8fecd3cdfd00564e0f265a8ab80] __x64_sys_ioctl+0x82/0xa0 do_syscall_64+0x2b/0x80 entry_SYSCALL_64_after_hwframe+0x46/0xb0 RIP: 0033:0x7fdf20558aff RSP: 002b:00007ffd318a9e30 EFLAGS: 00000246 ORIG_RAX: 0000000000000010 RAX: ffffffffffffffda RBX: 00000000000200c0 RCX: 00007fdf20558aff RDX: 00007fdf1feb2010 RSI: 00000000c0c0583b RDI: 0000000000000003 RBP: 00005625c0634be0 R08: 00005625c0634c40 R09: 0000000000000001 R10: 0000000000000000 R11: 0000000000000246 R12: 00007fdf1feb2010 R13: 00005625be70d994 R14: 0000000000000800 R15: 0000000000000000 For GETFSMAP calls, the caller selects a physical block device by writing its block number into fsmap_head.fmh_keys[01].fmr_device. To query mappings for a subrange of the device, the starting byte of the range is written to fsmap_head.fmh_keys[0].fmr_physical and the last byte of the range goes in fsmap_head.fmh_keys[1].fmr_physical. IOWs, to query what mappings overlap with bytes 3-14 of /dev/sda, you'd set the inputs as follows: fmh_keys[0] = { .fmr_device = major(8, 0), .fmr_physical = 3}, fmh_keys[1] = { .fmr_device = major(8, 0), .fmr_physical = 14}, Which would return you whatever is mapped in the 12 bytes starting at physical offset 3. The crash is due to insufficient range validation of keys[1] in ext4_getfsmap_datadev. On 1k-block filesystems, block 0 is not part of the filesystem, which means that s_first_data_block is nonzero. ext4_get_group_no_and_offset subtracts this quantity from the blocknr argument before cracking it into a group number and a block number within a group. IOWs, block group 0 spans blocks 1-8192 (1-based) instead of 0-8191 (0-based) like what happens with larger blocksizes. The net result of this encoding is that blocknr < s_first_data_block is not a valid input to this function. The end_fsb variable is set from the keys that are copied from userspace, which means that in the above example, its value is zero. That leads to an underflow here: blocknr = blocknr - le32_to_cpu(es->s_first_data_block); The division then operates on -1: offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb)) >> EXT4_SB(sb)->s_cluster_bits; Leaving an impossibly large group number (2^32-1) in blocknr. ext4_getfsmap_check_keys checked that keys[0].fmr_physical and keys[1].fmr_physical are in increasing order, but ext4_getfsmap_datadev adjusts keys[0].fmr_physical to be at least s_first_data_block. This implies that we have to check it again after the adjustment, which is the piece that I forgot. Reported-by: syzbot+6be2b977c89f79b6b153@syzkaller.appspotmail.com Fixes: 4a4956249dac ("ext4: fix off-by-one fsmap error on 1k block filesystems") Link: https://syzkaller.appspot.com/bug?id=79d5768e9bfe362911ac1a5057a36fc6b5c30002 Cc: stable@vger.kernel.org Signed-off-by: Darrick J. Wong Link: https://lore.kernel.org/r/Y+58NPTH7VNGgzdd@magnolia Signed-off-by: Theodore Ts'o Signed-off-by: Greg Kroah-Hartman --- fs/ext4/fsmap.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/ext4/fsmap.c b/fs/ext4/fsmap.c index 6f3f245f3a80..6b52ace1463c 100644 --- a/fs/ext4/fsmap.c +++ b/fs/ext4/fsmap.c @@ -486,6 +486,8 @@ static int ext4_getfsmap_datadev(struct super_block *sb, keys[0].fmr_physical = bofs; if (keys[1].fmr_physical >= eofs) keys[1].fmr_physical = eofs - 1; + if (keys[1].fmr_physical < keys[0].fmr_physical) + return 0; start_fsb = keys[0].fmr_physical; end_fsb = keys[1].fmr_physical; -- cgit v1.2.1 From 2efff7f35e9ad624da35b8efa4fd00f1721e95f5 Mon Sep 17 00:00:00 2001 From: Ye Bin Date: Tue, 7 Mar 2023 09:52:52 +0800 Subject: ext4: move where set the MAY_INLINE_DATA flag is set commit 1dcdce5919115a471bf4921a57f20050c545a236 upstream. The only caller of ext4_find_inline_data_nolock() that needs setting of EXT4_STATE_MAY_INLINE_DATA flag is ext4_iget_extra_inode(). In ext4_write_inline_data_end() we just need to update inode->i_inline_off. Since we are going to add one more caller that does not need to set EXT4_STATE_MAY_INLINE_DATA, just move setting of EXT4_STATE_MAY_INLINE_DATA out to ext4_iget_extra_inode(). Signed-off-by: Ye Bin Cc: stable@kernel.org Reviewed-by: Jan Kara Link: https://lore.kernel.org/r/20230307015253.2232062-2-yebin@huaweicloud.com Signed-off-by: Theodore Ts'o Signed-off-by: Greg Kroah-Hartman --- fs/ext4/inline.c | 1 - fs/ext4/inode.c | 7 ++++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index b1c6b9398eef..07bb69cd2023 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -157,7 +157,6 @@ int ext4_find_inline_data_nolock(struct inode *inode) (void *)ext4_raw_inode(&is.iloc)); EXT4_I(inode)->i_inline_size = EXT4_MIN_INLINE_DATA_SIZE + le32_to_cpu(is.s.here->e_value_size); - ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA); } out: brelse(is.iloc.bh); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 3c7bbdaa425a..6e7989b04d2b 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -4865,8 +4865,13 @@ static inline int ext4_iget_extra_inode(struct inode *inode, if (EXT4_INODE_HAS_XATTR_SPACE(inode) && *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) { + int err; + ext4_set_inode_state(inode, EXT4_STATE_XATTR); - return ext4_find_inline_data_nolock(inode); + err = ext4_find_inline_data_nolock(inode); + if (!err && ext4_has_inline_data(inode)) + ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA); + return err; } else EXT4_I(inode)->i_inline_off = 0; return 0; -- cgit v1.2.1 From 39c5df2ca544368b44b59d0f6d80131e90763371 Mon Sep 17 00:00:00 2001 From: Ye Bin Date: Tue, 7 Mar 2023 09:52:53 +0800 Subject: ext4: fix WARNING in ext4_update_inline_data commit 2b96b4a5d9443ca4cad58b0040be455803c05a42 upstream. Syzbot found the following issue: EXT4-fs (loop0): mounted filesystem 00000000-0000-0000-0000-000000000000 without journal. Quota mode: none. fscrypt: AES-256-CTS-CBC using implementation "cts-cbc-aes-aesni" fscrypt: AES-256-XTS using implementation "xts-aes-aesni" ------------[ cut here ]------------ WARNING: CPU: 0 PID: 5071 at mm/page_alloc.c:5525 __alloc_pages+0x30a/0x560 mm/page_alloc.c:5525 Modules linked in: CPU: 1 PID: 5071 Comm: syz-executor263 Not tainted 6.2.0-rc1-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 10/26/2022 RIP: 0010:__alloc_pages+0x30a/0x560 mm/page_alloc.c:5525 RSP: 0018:ffffc90003c2f1c0 EFLAGS: 00010246 RAX: ffffc90003c2f220 RBX: 0000000000000014 RCX: 0000000000000000 RDX: 0000000000000028 RSI: 0000000000000000 RDI: ffffc90003c2f248 RBP: ffffc90003c2f2d8 R08: dffffc0000000000 R09: ffffc90003c2f220 R10: fffff52000785e49 R11: 1ffff92000785e44 R12: 0000000000040d40 R13: 1ffff92000785e40 R14: dffffc0000000000 R15: 1ffff92000785e3c FS: 0000555556c0d300(0000) GS:ffff8880b9800000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007f95d5e04138 CR3: 00000000793aa000 CR4: 00000000003506f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: __alloc_pages_node include/linux/gfp.h:237 [inline] alloc_pages_node include/linux/gfp.h:260 [inline] __kmalloc_large_node+0x95/0x1e0 mm/slab_common.c:1113 __do_kmalloc_node mm/slab_common.c:956 [inline] __kmalloc+0xfe/0x190 mm/slab_common.c:981 kmalloc include/linux/slab.h:584 [inline] kzalloc include/linux/slab.h:720 [inline] ext4_update_inline_data+0x236/0x6b0 fs/ext4/inline.c:346 ext4_update_inline_dir fs/ext4/inline.c:1115 [inline] ext4_try_add_inline_entry+0x328/0x990 fs/ext4/inline.c:1307 ext4_add_entry+0x5a4/0xeb0 fs/ext4/namei.c:2385 ext4_add_nondir+0x96/0x260 fs/ext4/namei.c:2772 ext4_create+0x36c/0x560 fs/ext4/namei.c:2817 lookup_open fs/namei.c:3413 [inline] open_last_lookups fs/namei.c:3481 [inline] path_openat+0x12ac/0x2dd0 fs/namei.c:3711 do_filp_open+0x264/0x4f0 fs/namei.c:3741 do_sys_openat2+0x124/0x4e0 fs/open.c:1310 do_sys_open fs/open.c:1326 [inline] __do_sys_openat fs/open.c:1342 [inline] __se_sys_openat fs/open.c:1337 [inline] __x64_sys_openat+0x243/0x290 fs/open.c:1337 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3d/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x63/0xcd Above issue happens as follows: ext4_iget ext4_find_inline_data_nolock ->i_inline_off=164 i_inline_size=60 ext4_try_add_inline_entry __ext4_mark_inode_dirty ext4_expand_extra_isize_ea ->i_extra_isize=32 s_want_extra_isize=44 ext4_xattr_shift_entries ->after shift i_inline_off is incorrect, actually is change to 176 ext4_try_add_inline_entry ext4_update_inline_dir get_max_inline_xattr_value_size if (EXT4_I(inode)->i_inline_off) entry = (struct ext4_xattr_entry *)((void *)raw_inode + EXT4_I(inode)->i_inline_off); free += EXT4_XATTR_SIZE(le32_to_cpu(entry->e_value_size)); ->As entry is incorrect, then 'free' may be negative ext4_update_inline_data value = kzalloc(len, GFP_NOFS); -> len is unsigned int, maybe very large, then trigger warning when 'kzalloc()' To resolve the above issue we need to update 'i_inline_off' after 'ext4_xattr_shift_entries()'. We do not need to set EXT4_STATE_MAY_INLINE_DATA flag here, since ext4_mark_inode_dirty() already sets this flag if needed. Setting EXT4_STATE_MAY_INLINE_DATA when it is needed may trigger a BUG_ON in ext4_writepages(). Reported-by: syzbot+d30838395804afc2fa6f@syzkaller.appspotmail.com Cc: stable@kernel.org Signed-off-by: Ye Bin Reviewed-by: Jan Kara Link: https://lore.kernel.org/r/20230307015253.2232062-3-yebin@huaweicloud.com Signed-off-by: Theodore Ts'o Signed-off-by: Greg Kroah-Hartman --- fs/ext4/xattr.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 23334cfeac55..2a70b7556e41 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -2823,6 +2823,9 @@ shift: (void *)header, total_ino); EXT4_I(inode)->i_extra_isize = new_extra_isize; + if (ext4_has_inline_data(inode)) + error = ext4_find_inline_data_nolock(inode); + cleanup: if (error && (mnt_count != le16_to_cpu(sbi->s_es->s_mnt_count))) { ext4_warning(inode->i_sb, "Unable to expand inode %lu. Delete some EAs or run e2fsck.", -- cgit v1.2.1 From 59eee0cdf8c036f554add97a4da7c06d7a9ff34a Mon Sep 17 00:00:00 2001 From: Zhihao Cheng Date: Wed, 8 Mar 2023 11:26:43 +0800 Subject: ext4: zero i_disksize when initializing the bootloader inode commit f5361da1e60d54ec81346aee8e3d8baf1be0b762 upstream. If the boot loader inode has never been used before, the EXT4_IOC_SWAP_BOOT inode will initialize it, including setting the i_size to 0. However, if the "never before used" boot loader has a non-zero i_size, then i_disksize will be non-zero, and the inconsistency between i_size and i_disksize can trigger a kernel warning: WARNING: CPU: 0 PID: 2580 at fs/ext4/file.c:319 CPU: 0 PID: 2580 Comm: bb Not tainted 6.3.0-rc1-00004-g703695902cfa RIP: 0010:ext4_file_write_iter+0xbc7/0xd10 Call Trace: vfs_write+0x3b1/0x5c0 ksys_write+0x77/0x160 __x64_sys_write+0x22/0x30 do_syscall_64+0x39/0x80 Reproducer: 1. create corrupted image and mount it: mke2fs -t ext4 /tmp/foo.img 200 debugfs -wR "sif <5> size 25700" /tmp/foo.img mount -t ext4 /tmp/foo.img /mnt cd /mnt echo 123 > file 2. Run the reproducer program: posix_memalign(&buf, 1024, 1024) fd = open("file", O_RDWR | O_DIRECT); ioctl(fd, EXT4_IOC_SWAP_BOOT); write(fd, buf, 1024); Fix this by setting i_disksize as well as i_size to zero when initiaizing the boot loader inode. Link: https://bugzilla.kernel.org/show_bug.cgi?id=217159 Cc: stable@kernel.org Signed-off-by: Zhihao Cheng Link: https://lore.kernel.org/r/20230308032643.641113-1-chengzhihao1@huawei.com Signed-off-by: Theodore Ts'o Signed-off-by: Greg Kroah-Hartman --- fs/ext4/ioctl.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index fd0353017d89..b930e8d559d4 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -178,6 +178,7 @@ static long swap_inode_boot_loader(struct super_block *sb, ei_bl->i_flags = 0; inode_set_iversion(inode_bl, 1); i_size_write(inode_bl, 0); + EXT4_I(inode_bl)->i_disksize = inode_bl->i_size; inode_bl->i_mode = S_IFREG; if (ext4_has_feature_extents(sb)) { ext4_set_inode_flag(inode_bl, EXT4_INODE_EXTENTS); -- cgit v1.2.1 From 6fc2e5b445a7e5610d089639e4e9992fbfcb5d27 Mon Sep 17 00:00:00 2001 From: Fedor Pchelkin Date: Tue, 7 Mar 2023 00:26:50 +0300 Subject: nfc: change order inside nfc_se_io error path commit 7d834b4d1ab66c48e8c0810fdeadaabb80fa2c81 upstream. cb_context should be freed on the error path in nfc_se_io as stated by commit 25ff6f8a5a3b ("nfc: fix memory leak of se_io context in nfc_genl_se_io"). Make the error path in nfc_se_io unwind everything in reverse order, i.e. free the cb_context after unlocking the device. Suggested-by: Krzysztof Kozlowski Signed-off-by: Fedor Pchelkin Reviewed-by: Krzysztof Kozlowski Link: https://lore.kernel.org/r/20230306212650.230322-1-pchelkin@ispras.ru Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- net/nfc/netlink.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index 2c5443ce449c..f705800b2248 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -1464,8 +1464,8 @@ static int nfc_se_io(struct nfc_dev *dev, u32 se_idx, return rc; error: - kfree(cb_context); device_unlock(&dev->dev); + kfree(cb_context); return rc; } -- cgit v1.2.1 From cb13ac57e95da14c6e39b2423efbfb73e25e41fc Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 19 Mar 2019 09:48:59 +0100 Subject: udf: Explain handling of load_nls() failure [ Upstream commit a768a9abc625d554f7b6428517089c193fcb5962 ] Add comment explaining that load_nls() failure gets handled back in udf_fill_super() to avoid false impression that it is unhandled. Signed-off-by: Jan Kara Stable-dep-of: fc8033a34a3c ("udf: Preserve link count of system files") Signed-off-by: Sasha Levin --- fs/udf/super.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/fs/udf/super.c b/fs/udf/super.c index b7fb7cd35d89..5d179616578c 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -572,6 +572,11 @@ static int udf_parse_options(char *options, struct udf_options *uopt, if (!remount) { if (uopt->nls_map) unload_nls(uopt->nls_map); + /* + * load_nls() failure is handled later in + * udf_fill_super() after all options are + * parsed. + */ uopt->nls_map = load_nls(args[0].from); uopt->flags |= (1 << UDF_FLAG_NLS_MAP); } -- cgit v1.2.1 From 6da42219d24fbdfc0691d6147dab83330efd0208 Mon Sep 17 00:00:00 2001 From: "Steven J. Magnani" Date: Wed, 14 Aug 2019 07:50:02 -0500 Subject: udf: reduce leakage of blocks related to named streams [ Upstream commit ab9a3a737284b3d9e1d2ba43a0ef31b3ef2e2417 ] Windows is capable of creating UDF files having named streams. One example is the "Zone.Identifier" stream attached automatically to files downloaded from a network. See: https://msdn.microsoft.com/en-us/library/dn392609.aspx Modification of a file having one or more named streams in Linux causes the stream directory to become detached from the file, essentially leaking all blocks pertaining to the file's streams. Fix by saving off information about an inode's streams when reading it, for later use when its on-disk data is updated. Link: https://lore.kernel.org/r/20190814125002.10869-1-steve@digidescorp.com Signed-off-by: Steven J. Magnani Signed-off-by: Jan Kara Stable-dep-of: fc8033a34a3c ("udf: Preserve link count of system files") Signed-off-by: Sasha Levin --- fs/udf/inode.c | 24 +++++++++++++++++++++++- fs/udf/super.c | 2 ++ fs/udf/udf_i.h | 5 ++++- 3 files changed, 29 insertions(+), 2 deletions(-) diff --git a/fs/udf/inode.c b/fs/udf/inode.c index af1180104e56..99d297b667ce 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -1470,6 +1470,8 @@ reread: iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr); iinfo->i_lenAlloc = le32_to_cpu(fe->lengthAllocDescs); iinfo->i_checkpoint = le32_to_cpu(fe->checkpoint); + iinfo->i_streamdir = 0; + iinfo->i_lenStreams = 0; } else { inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) << (inode->i_sb->s_blocksize_bits - 9); @@ -1483,6 +1485,16 @@ reread: iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr); iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs); iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint); + + /* Named streams */ + iinfo->i_streamdir = (efe->streamDirectoryICB.extLength != 0); + iinfo->i_locStreamdir = + lelb_to_cpu(efe->streamDirectoryICB.extLocation); + iinfo->i_lenStreams = le64_to_cpu(efe->objectSize); + if (iinfo->i_lenStreams >= inode->i_size) + iinfo->i_lenStreams -= inode->i_size; + else + iinfo->i_lenStreams = 0; } inode->i_generation = iinfo->i_unique; @@ -1745,9 +1757,19 @@ static int udf_update_inode(struct inode *inode, int do_sync) iinfo->i_ext.i_data, inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry)); - efe->objectSize = cpu_to_le64(inode->i_size); + efe->objectSize = + cpu_to_le64(inode->i_size + iinfo->i_lenStreams); efe->logicalBlocksRecorded = cpu_to_le64(lb_recorded); + if (iinfo->i_streamdir) { + struct long_ad *icb_lad = &efe->streamDirectoryICB; + + icb_lad->extLocation = + cpu_to_lelb(iinfo->i_locStreamdir); + icb_lad->extLength = + cpu_to_le32(inode->i_sb->s_blocksize); + } + udf_adjust_time(iinfo, inode->i_atime); udf_adjust_time(iinfo, inode->i_mtime); udf_adjust_time(iinfo, inode->i_ctime); diff --git a/fs/udf/super.c b/fs/udf/super.c index 5d179616578c..ec082b27e9fb 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -146,9 +146,11 @@ static struct inode *udf_alloc_inode(struct super_block *sb) ei->i_unique = 0; ei->i_lenExtents = 0; + ei->i_lenStreams = 0; ei->i_next_alloc_block = 0; ei->i_next_alloc_goal = 0; ei->i_strat4096 = 0; + ei->i_streamdir = 0; init_rwsem(&ei->i_data_sem); ei->cached_extent.lstart = -1; spin_lock_init(&ei->i_extent_cache_lock); diff --git a/fs/udf/udf_i.h b/fs/udf/udf_i.h index 2ef0e212f08a..00d773d1b7cf 100644 --- a/fs/udf/udf_i.h +++ b/fs/udf/udf_i.h @@ -42,12 +42,15 @@ struct udf_inode_info { unsigned i_efe : 1; /* extendedFileEntry */ unsigned i_use : 1; /* unallocSpaceEntry */ unsigned i_strat4096 : 1; - unsigned reserved : 26; + unsigned i_streamdir : 1; + unsigned reserved : 25; union { struct short_ad *i_sad; struct long_ad *i_lad; __u8 *i_data; } i_ext; + struct kernel_lb_addr i_locStreamdir; + __u64 i_lenStreams; struct rw_semaphore i_data_sem; struct udf_ext_cache cached_extent; /* Spinlock for protecting extent cache */ -- cgit v1.2.1 From c72225ea9ffa2ba458d6efc973e2c17f0166d41e Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Fri, 25 Sep 2020 12:29:54 +0200 Subject: udf: Remove pointless union in udf_inode_info [ Upstream commit 382a2287bf9cd283206764572f66ab12657218aa ] We use only a single member out of the i_ext union in udf_inode_info. Just remove the pointless union. Signed-off-by: Jan Kara Stable-dep-of: fc8033a34a3c ("udf: Preserve link count of system files") Signed-off-by: Sasha Levin --- fs/udf/directory.c | 2 +- fs/udf/file.c | 7 +++---- fs/udf/ialloc.c | 14 +++++++------- fs/udf/inode.c | 36 +++++++++++++++++------------------- fs/udf/misc.c | 6 +++--- fs/udf/namei.c | 7 +++---- fs/udf/partition.c | 2 +- fs/udf/super.c | 4 ++-- fs/udf/symlink.c | 2 +- fs/udf/udf_i.h | 6 +----- 10 files changed, 39 insertions(+), 47 deletions(-) diff --git a/fs/udf/directory.c b/fs/udf/directory.c index d9523013096f..73720320f0ab 100644 --- a/fs/udf/directory.c +++ b/fs/udf/directory.c @@ -34,7 +34,7 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos, fibh->soffset = fibh->eoffset; if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { - fi = udf_get_fileident(iinfo->i_ext.i_data - + fi = udf_get_fileident(iinfo->i_data - (iinfo->i_efe ? sizeof(struct extendedFileEntry) : sizeof(struct fileEntry)), diff --git a/fs/udf/file.c b/fs/udf/file.c index 88b7fb8e9998..8fff7ffc33a8 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c @@ -50,7 +50,7 @@ static void __udf_adinicb_readpage(struct page *page) * So just sample it once and use the same value everywhere. */ kaddr = kmap_atomic(page); - memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, isize); + memcpy(kaddr, iinfo->i_data + iinfo->i_lenEAttr, isize); memset(kaddr + isize, 0, PAGE_SIZE - isize); flush_dcache_page(page); SetPageUptodate(page); @@ -76,8 +76,7 @@ static int udf_adinicb_writepage(struct page *page, BUG_ON(!PageLocked(page)); kaddr = kmap_atomic(page); - memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr, kaddr, - i_size_read(inode)); + memcpy(iinfo->i_data + iinfo->i_lenEAttr, kaddr, i_size_read(inode)); SetPageUptodate(page); kunmap_atomic(kaddr); mark_inode_dirty(inode); @@ -213,7 +212,7 @@ long udf_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return put_user(UDF_I(inode)->i_lenEAttr, (int __user *)arg); case UDF_GETEABLOCK: return copy_to_user((char __user *)arg, - UDF_I(inode)->i_ext.i_data, + UDF_I(inode)->i_data, UDF_I(inode)->i_lenEAttr) ? -EFAULT : 0; default: return -ENOIOCTLCMD; diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c index f8e5872f7cc2..cdaa86e077b2 100644 --- a/fs/udf/ialloc.c +++ b/fs/udf/ialloc.c @@ -67,16 +67,16 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode) iinfo->i_efe = 1; if (UDF_VERS_USE_EXTENDED_FE > sbi->s_udfrev) sbi->s_udfrev = UDF_VERS_USE_EXTENDED_FE; - iinfo->i_ext.i_data = kzalloc(inode->i_sb->s_blocksize - - sizeof(struct extendedFileEntry), - GFP_KERNEL); + iinfo->i_data = kzalloc(inode->i_sb->s_blocksize - + sizeof(struct extendedFileEntry), + GFP_KERNEL); } else { iinfo->i_efe = 0; - iinfo->i_ext.i_data = kzalloc(inode->i_sb->s_blocksize - - sizeof(struct fileEntry), - GFP_KERNEL); + iinfo->i_data = kzalloc(inode->i_sb->s_blocksize - + sizeof(struct fileEntry), + GFP_KERNEL); } - if (!iinfo->i_ext.i_data) { + if (!iinfo->i_data) { iput(inode); return ERR_PTR(-ENOMEM); } diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 99d297b667ce..415f1186d250 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -150,8 +150,8 @@ void udf_evict_inode(struct inode *inode) truncate_inode_pages_final(&inode->i_data); invalidate_inode_buffers(inode); clear_inode(inode); - kfree(iinfo->i_ext.i_data); - iinfo->i_ext.i_data = NULL; + kfree(iinfo->i_data); + iinfo->i_data = NULL; udf_clear_extent_cache(inode); if (want_delete) { udf_free_inode(inode); @@ -278,14 +278,14 @@ int udf_expand_file_adinicb(struct inode *inode) kaddr = kmap_atomic(page); memset(kaddr + iinfo->i_lenAlloc, 0x00, PAGE_SIZE - iinfo->i_lenAlloc); - memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, + memcpy(kaddr, iinfo->i_data + iinfo->i_lenEAttr, iinfo->i_lenAlloc); flush_dcache_page(page); SetPageUptodate(page); kunmap_atomic(kaddr); } down_write(&iinfo->i_data_sem); - memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0x00, + memset(iinfo->i_data + iinfo->i_lenEAttr, 0x00, iinfo->i_lenAlloc); iinfo->i_lenAlloc = 0; if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) @@ -303,8 +303,7 @@ int udf_expand_file_adinicb(struct inode *inode) lock_page(page); down_write(&iinfo->i_data_sem); kaddr = kmap_atomic(page); - memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr, kaddr, - inode->i_size); + memcpy(iinfo->i_data + iinfo->i_lenEAttr, kaddr, inode->i_size); kunmap_atomic(kaddr); unlock_page(page); iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB; @@ -392,8 +391,7 @@ struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, } mark_buffer_dirty_inode(dbh, inode); - memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0, - iinfo->i_lenAlloc); + memset(iinfo->i_data + iinfo->i_lenEAttr, 0, iinfo->i_lenAlloc); iinfo->i_lenAlloc = 0; eloc.logicalBlockNum = *block; eloc.partitionReferenceNum = @@ -1241,7 +1239,7 @@ set_size: if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { down_write(&iinfo->i_data_sem); udf_clear_extent_cache(inode); - memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr + newsize, + memset(iinfo->i_data + iinfo->i_lenEAttr + newsize, 0x00, bsize - newsize - udf_file_entry_alloc_offset(inode)); iinfo->i_lenAlloc = newsize; @@ -1390,7 +1388,7 @@ reread: sizeof(struct extendedFileEntry)); if (ret) goto out; - memcpy(iinfo->i_ext.i_data, + memcpy(iinfo->i_data, bh->b_data + sizeof(struct extendedFileEntry), bs - sizeof(struct extendedFileEntry)); } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) { @@ -1399,7 +1397,7 @@ reread: ret = udf_alloc_i_data(inode, bs - sizeof(struct fileEntry)); if (ret) goto out; - memcpy(iinfo->i_ext.i_data, + memcpy(iinfo->i_data, bh->b_data + sizeof(struct fileEntry), bs - sizeof(struct fileEntry)); } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) { @@ -1412,7 +1410,7 @@ reread: sizeof(struct unallocSpaceEntry)); if (ret) goto out; - memcpy(iinfo->i_ext.i_data, + memcpy(iinfo->i_data, bh->b_data + sizeof(struct unallocSpaceEntry), bs - sizeof(struct unallocSpaceEntry)); return 0; @@ -1591,8 +1589,8 @@ out: static int udf_alloc_i_data(struct inode *inode, size_t size) { struct udf_inode_info *iinfo = UDF_I(inode); - iinfo->i_ext.i_data = kmalloc(size, GFP_KERNEL); - if (!iinfo->i_ext.i_data) + iinfo->i_data = kmalloc(size, GFP_KERNEL); + if (!iinfo->i_data) return -ENOMEM; return 0; } @@ -1666,7 +1664,7 @@ static int udf_update_inode(struct inode *inode, int do_sync) use->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc); memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), - iinfo->i_ext.i_data, inode->i_sb->s_blocksize - + iinfo->i_data, inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry)); use->descTag.tagIdent = cpu_to_le16(TAG_IDENT_USE); crclen = sizeof(struct unallocSpaceEntry); @@ -1735,7 +1733,7 @@ static int udf_update_inode(struct inode *inode, int do_sync) if (iinfo->i_efe == 0) { memcpy(bh->b_data + sizeof(struct fileEntry), - iinfo->i_ext.i_data, + iinfo->i_data, inode->i_sb->s_blocksize - sizeof(struct fileEntry)); fe->logicalBlocksRecorded = cpu_to_le64(lb_recorded); @@ -1754,7 +1752,7 @@ static int udf_update_inode(struct inode *inode, int do_sync) crclen = sizeof(struct fileEntry); } else { memcpy(bh->b_data + sizeof(struct extendedFileEntry), - iinfo->i_ext.i_data, + iinfo->i_data, inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry)); efe->objectSize = @@ -2050,7 +2048,7 @@ void udf_write_aext(struct inode *inode, struct extent_position *epos, struct udf_inode_info *iinfo = UDF_I(inode); if (!epos->bh) - ptr = iinfo->i_ext.i_data + epos->offset - + ptr = iinfo->i_data + epos->offset - udf_file_entry_alloc_offset(inode) + iinfo->i_lenEAttr; else @@ -2142,7 +2140,7 @@ int8_t udf_current_aext(struct inode *inode, struct extent_position *epos, if (!epos->bh) { if (!epos->offset) epos->offset = udf_file_entry_alloc_offset(inode); - ptr = iinfo->i_ext.i_data + epos->offset - + ptr = iinfo->i_data + epos->offset - udf_file_entry_alloc_offset(inode) + iinfo->i_lenEAttr; alen = udf_file_entry_alloc_offset(inode) + diff --git a/fs/udf/misc.c b/fs/udf/misc.c index 853bcff51043..1614d308d0f0 100644 --- a/fs/udf/misc.c +++ b/fs/udf/misc.c @@ -52,9 +52,9 @@ struct genericFormat *udf_add_extendedattr(struct inode *inode, uint32_t size, uint16_t crclen; struct udf_inode_info *iinfo = UDF_I(inode); - ea = iinfo->i_ext.i_data; + ea = iinfo->i_data; if (iinfo->i_lenEAttr) { - ad = iinfo->i_ext.i_data + iinfo->i_lenEAttr; + ad = iinfo->i_data + iinfo->i_lenEAttr; } else { ad = ea; size += sizeof(struct extendedAttrHeaderDesc); @@ -153,7 +153,7 @@ struct genericFormat *udf_get_extendedattr(struct inode *inode, uint32_t type, uint32_t offset; struct udf_inode_info *iinfo = UDF_I(inode); - ea = iinfo->i_ext.i_data; + ea = iinfo->i_data; if (iinfo->i_lenEAttr) { struct extendedAttrHeaderDesc *eahd; diff --git a/fs/udf/namei.c b/fs/udf/namei.c index ef251622da13..05dd1f45ba90 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c @@ -478,8 +478,7 @@ add: if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { block = dinfo->i_location.logicalBlockNum; fi = (struct fileIdentDesc *) - (dinfo->i_ext.i_data + - fibh->soffset - + (dinfo->i_data + fibh->soffset - udf_ext0_offset(dir) + dinfo->i_lenEAttr); } else { @@ -962,7 +961,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry, mark_buffer_dirty_inode(epos.bh, inode); ea = epos.bh->b_data + udf_ext0_offset(inode); } else - ea = iinfo->i_ext.i_data + iinfo->i_lenEAttr; + ea = iinfo->i_data + iinfo->i_lenEAttr; eoffset = sb->s_blocksize - udf_ext0_offset(inode); pc = (struct pathComponent *)ea; @@ -1142,7 +1141,7 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry, retval = -EIO; if (old_iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { dir_fi = udf_get_fileident( - old_iinfo->i_ext.i_data - + old_iinfo->i_data - (old_iinfo->i_efe ? sizeof(struct extendedFileEntry) : sizeof(struct fileEntry)), diff --git a/fs/udf/partition.c b/fs/udf/partition.c index 090baff83990..4cbf40575965 100644 --- a/fs/udf/partition.c +++ b/fs/udf/partition.c @@ -65,7 +65,7 @@ uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block, } if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { - loc = le32_to_cpu(((__le32 *)(iinfo->i_ext.i_data + + loc = le32_to_cpu(((__le32 *)(iinfo->i_data + vdata->s_start_offset))[block]); goto translate; } diff --git a/fs/udf/super.c b/fs/udf/super.c index ec082b27e9fb..cdaef406f389 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -174,7 +174,7 @@ static void init_once(void *foo) { struct udf_inode_info *ei = (struct udf_inode_info *)foo; - ei->i_ext.i_data = NULL; + ei->i_data = NULL; inode_init_once(&ei->vfs_inode); } @@ -1207,7 +1207,7 @@ static int udf_load_vat(struct super_block *sb, int p_index, int type1_index) vat20 = (struct virtualAllocationTable20 *)bh->b_data; } else { vat20 = (struct virtualAllocationTable20 *) - vati->i_ext.i_data; + vati->i_data; } map->s_type_specific.s_virtual.s_start_offset = diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c index 6023c97c6da2..aef3e4d9014d 100644 --- a/fs/udf/symlink.c +++ b/fs/udf/symlink.c @@ -122,7 +122,7 @@ static int udf_symlink_filler(struct file *file, struct page *page) down_read(&iinfo->i_data_sem); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { - symlink = iinfo->i_ext.i_data + iinfo->i_lenEAttr; + symlink = iinfo->i_data + iinfo->i_lenEAttr; } else { bh = sb_bread(inode->i_sb, pos); diff --git a/fs/udf/udf_i.h b/fs/udf/udf_i.h index 00d773d1b7cf..2a4731314d51 100644 --- a/fs/udf/udf_i.h +++ b/fs/udf/udf_i.h @@ -44,11 +44,7 @@ struct udf_inode_info { unsigned i_strat4096 : 1; unsigned i_streamdir : 1; unsigned reserved : 25; - union { - struct short_ad *i_sad; - struct long_ad *i_lad; - __u8 *i_data; - } i_ext; + __u8 *i_data; struct kernel_lb_addr i_locStreamdir; __u64 i_lenStreams; struct rw_semaphore i_data_sem; -- cgit v1.2.1 From ec852375bb9766b0c205fb95fb19b28319655644 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 3 Jan 2023 09:56:56 +0100 Subject: udf: Preserve link count of system files [ Upstream commit fc8033a34a3ca7d23353e645e6dde5d364ac5f12 ] System files in UDF filesystem have link count 0. To not confuse VFS we fudge the link count to be 1 when reading such inodes however we forget to restore the link count of 0 when writing such inodes. Fix that. CC: stable@vger.kernel.org Signed-off-by: Jan Kara Signed-off-by: Sasha Levin --- fs/udf/inode.c | 9 +++++++-- fs/udf/super.c | 1 + fs/udf/udf_i.h | 3 ++- 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 415f1186d250..7436337914b1 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -1375,6 +1375,7 @@ reread: ret = -EIO; goto out; } + iinfo->i_hidden = hidden_inode; iinfo->i_unique = 0; iinfo->i_lenEAttr = 0; iinfo->i_lenExtents = 0; @@ -1694,8 +1695,12 @@ static int udf_update_inode(struct inode *inode, int do_sync) if (S_ISDIR(inode->i_mode) && inode->i_nlink > 0) fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1); - else - fe->fileLinkCount = cpu_to_le16(inode->i_nlink); + else { + if (iinfo->i_hidden) + fe->fileLinkCount = cpu_to_le16(0); + else + fe->fileLinkCount = cpu_to_le16(inode->i_nlink); + } fe->informationLength = cpu_to_le64(inode->i_size); diff --git a/fs/udf/super.c b/fs/udf/super.c index cdaef406f389..bce48a07790c 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -151,6 +151,7 @@ static struct inode *udf_alloc_inode(struct super_block *sb) ei->i_next_alloc_goal = 0; ei->i_strat4096 = 0; ei->i_streamdir = 0; + ei->i_hidden = 0; init_rwsem(&ei->i_data_sem); ei->cached_extent.lstart = -1; spin_lock_init(&ei->i_extent_cache_lock); diff --git a/fs/udf/udf_i.h b/fs/udf/udf_i.h index 2a4731314d51..b77bf713a1b6 100644 --- a/fs/udf/udf_i.h +++ b/fs/udf/udf_i.h @@ -43,7 +43,8 @@ struct udf_inode_info { unsigned i_use : 1; /* unallocSpaceEntry */ unsigned i_strat4096 : 1; unsigned i_streamdir : 1; - unsigned reserved : 25; + unsigned i_hidden : 1; /* hidden system inode */ + unsigned reserved : 24; __u8 *i_data; struct kernel_lb_addr i_locStreamdir; __u64 i_lenStreams; -- cgit v1.2.1 From 1dc71eeb198a8daa17d0c995998a53b0b749a158 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 3 Jan 2023 10:03:35 +0100 Subject: udf: Detect system inodes linked into directory hierarchy [ Upstream commit 85a37983ec69cc9fcd188bc37c4de15ee326355a ] When UDF filesystem is corrupted, hidden system inodes can be linked into directory hierarchy which is an avenue for further serious corruption of the filesystem and kernel confusion as noticed by syzbot fuzzed images. Refuse to access system inodes linked into directory hierarchy and vice versa. CC: stable@vger.kernel.org Reported-by: syzbot+38695a20b8addcbc1084@syzkaller.appspotmail.com Signed-off-by: Jan Kara Signed-off-by: Sasha Levin --- fs/udf/inode.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 7436337914b1..77421e65623a 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -1871,8 +1871,13 @@ struct inode *__udf_iget(struct super_block *sb, struct kernel_lb_addr *ino, if (!inode) return ERR_PTR(-ENOMEM); - if (!(inode->i_state & I_NEW)) + if (!(inode->i_state & I_NEW)) { + if (UDF_I(inode)->i_hidden != hidden_inode) { + iput(inode); + return ERR_PTR(-EFSCORRUPTED); + } return inode; + } memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr)); err = udf_read_inode(inode, hidden_inode); -- cgit v1.2.1 From a4912c4b0b1cdf8d03ab450f3aabf24c7b1a37db Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 6 Aug 2019 19:03:21 +0900 Subject: kbuild: fix false-positive need-builtin calculation [ Upstream commit d9f78edfd81b9e484423534360350ef7253cc888 ] The current implementation of need-builtin is false-positive, for example, in the following Makefile: obj-m := foo/ obj-y := foo/bar/ ..., where foo/built-in.a is not required. Signed-off-by: Masahiro Yamada Stable-dep-of: 2e3d0e20d845 ("ARM: dts: exynos: correct TMU phandle in Odroid HC1") Signed-off-by: Sasha Levin --- scripts/Makefile.build | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 64fac0ad32d6..7635cc05fcfa 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -541,7 +541,8 @@ targets += $(call intermediate_targets, .asn1.o, .asn1.c .asn1.h) \ PHONY += $(subdir-ym) $(subdir-ym): - $(Q)$(MAKE) $(build)=$@ need-builtin=$(if $(findstring $@,$(subdir-obj-y)),1) + $(Q)$(MAKE) $(build)=$@ \ + need-builtin=$(if $(filter $@/built-in.a, $(subdir-obj-y)),1) # Add FORCE to the prequisites of a target to force it to be always rebuilt. # --------------------------------------------------------------------------- -- cgit v1.2.1 From 6db49c4532fd920e6d3585cec0c9df9f6d28b590 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 6 Aug 2019 19:03:22 +0900 Subject: kbuild: generate modules.order only in directories visited by obj-y/m [ Upstream commit 4f2c8f3089f538f556c86f26603a062865e4aa94 ] The modules.order files in directories visited by the chain of obj-y or obj-m are merged to the upper-level ones, and become parts of the top-level modules.order. On the other hand, there is no need to generate modules.order in directories visited by subdir-y or subdir-m since they would become orphan anyway. Signed-off-by: Masahiro Yamada Stable-dep-of: 2e3d0e20d845 ("ARM: dts: exynos: correct TMU phandle in Odroid HC1") Signed-off-by: Sasha Levin --- scripts/Makefile.build | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 7635cc05fcfa..97f59fa3e0ed 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -542,7 +542,8 @@ targets += $(call intermediate_targets, .asn1.o, .asn1.c .asn1.h) \ PHONY += $(subdir-ym) $(subdir-ym): $(Q)$(MAKE) $(build)=$@ \ - need-builtin=$(if $(filter $@/built-in.a, $(subdir-obj-y)),1) + need-builtin=$(if $(filter $@/built-in.a, $(subdir-obj-y)),1) \ + need-modorder=$(if $(need-modorder),$(if $(filter $@/modules.order, $(modorder)),1)) # Add FORCE to the prequisites of a target to force it to be always rebuilt. # --------------------------------------------------------------------------- -- cgit v1.2.1 From 13daafe1e209b03e9bda16ff2bd2b2da145a139b Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 10 Feb 2023 12:52:00 -0800 Subject: scsi: core: Remove the /proc/scsi/${proc_name} directory earlier [ Upstream commit fc663711b94468f4e1427ebe289c9f05669699c9 ] Remove the /proc/scsi/${proc_name} directory earlier to fix a race condition between unloading and reloading kernel modules. This fixes a bug introduced in 2009 by commit 77c019768f06 ("[SCSI] fix /proc memory leak in the SCSI core"). Fix the following kernel warning: proc_dir_entry 'scsi/scsi_debug' already registered WARNING: CPU: 19 PID: 27986 at fs/proc/generic.c:376 proc_register+0x27d/0x2e0 Call Trace: proc_mkdir+0xb5/0xe0 scsi_proc_hostdir_add+0xb5/0x170 scsi_host_alloc+0x683/0x6c0 sdebug_driver_probe+0x6b/0x2d0 [scsi_debug] really_probe+0x159/0x540 __driver_probe_device+0xdc/0x230 driver_probe_device+0x4f/0x120 __device_attach_driver+0xef/0x180 bus_for_each_drv+0xe5/0x130 __device_attach+0x127/0x290 device_initial_probe+0x17/0x20 bus_probe_device+0x110/0x130 device_add+0x673/0xc80 device_register+0x1e/0x30 sdebug_add_host_helper+0x1a7/0x3b0 [scsi_debug] scsi_debug_init+0x64f/0x1000 [scsi_debug] do_one_initcall+0xd7/0x470 do_init_module+0xe7/0x330 load_module+0x122a/0x12c0 __do_sys_finit_module+0x124/0x1a0 __x64_sys_finit_module+0x46/0x50 do_syscall_64+0x38/0x80 entry_SYSCALL_64_after_hwframe+0x46/0xb0 Link: https://lore.kernel.org/r/20230210205200.36973-3-bvanassche@acm.org Cc: Alan Stern Cc: Yi Zhang Cc: stable@vger.kernel.org Fixes: 77c019768f06 ("[SCSI] fix /proc memory leak in the SCSI core") Reported-by: Yi Zhang Signed-off-by: Bart Van Assche Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/hosts.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index b3d6ea92b4f7..2ffc2e15d822 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -178,6 +178,7 @@ void scsi_remove_host(struct Scsi_Host *shost) scsi_forget_host(shost); mutex_unlock(&shost->scan_mutex); scsi_proc_host_rm(shost); + scsi_proc_hostdir_rm(shost->hostt); spin_lock_irqsave(shost->host_lock, flags); if (scsi_host_set_state(shost, SHOST_DEL)) @@ -329,6 +330,7 @@ static void scsi_host_dev_release(struct device *dev) struct Scsi_Host *shost = dev_to_shost(dev); struct device *parent = dev->parent; + /* In case scsi_remove_host() has not been called. */ scsi_proc_hostdir_rm(shost->hostt); /* Wait for functions invoked through call_rcu(&shost->rcu, ...) */ -- cgit v1.2.1 From 6f2e20f70caea3956fef1c9c2d6676778289b600 Mon Sep 17 00:00:00 2001 From: Nobuhiro Iwamatsu Date: Thu, 2 Mar 2023 06:23:50 +0900 Subject: Revert "spi: mt7621: Fix an error message in mt7621_spi_probe()" This reverts commit 269f650a0b26067092873308117e0bf0c6ec8289 which is commit 2b2bf6b7faa9010fae10dc7de76627a3fdb525b3 upstream. dev_err_probe() does not suppot in 4.19.y. So this driver will fail to build. ``` CC drivers/staging/mt7621-spi/spi-mt7621.o drivers/staging/mt7621-spi/spi-mt7621.c: In function 'mt7621_spi_probe': drivers/staging/mt7621-spi/spi-mt7621.c:446:24: error: implicit declaration of function 'dev_err_probe'; did you mean 'device_reprobe'? [-Werror=implicit-function-declaration] 446 | return dev_err_probe(&pdev->dev, PTR_ERR(clk), | ^~~~~~~~~~~~~ | device_reprobe ``` Signed-off-by: Nobuhiro Iwamatsu (CIP) Signed-off-by: Greg Kroah-Hartman --- drivers/staging/mt7621-spi/spi-mt7621.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/staging/mt7621-spi/spi-mt7621.c b/drivers/staging/mt7621-spi/spi-mt7621.c index b73823830e3a..75ed48f60c8c 100644 --- a/drivers/staging/mt7621-spi/spi-mt7621.c +++ b/drivers/staging/mt7621-spi/spi-mt7621.c @@ -442,9 +442,11 @@ static int mt7621_spi_probe(struct platform_device *pdev) return PTR_ERR(base); clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(clk)) - return dev_err_probe(&pdev->dev, PTR_ERR(clk), - "unable to get SYS clock\n"); + if (IS_ERR(clk)) { + dev_err(&pdev->dev, "unable to get SYS clock, err=%d\n", + status); + return PTR_ERR(clk); + } status = clk_prepare_enable(clk); if (status) -- cgit v1.2.1 From c7fd84f7e2ea36ae1bc0f6f433b256cb3bc14bf6 Mon Sep 17 00:00:00 2001 From: xurui Date: Wed, 18 Jan 2023 16:59:12 +0800 Subject: MIPS: Fix a compilation issue MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 109d587a4b4d7ccca2200ab1f808f43ae23e2585 ] arch/mips/include/asm/mach-rc32434/pci.h:377: cc1: error: result of β€˜-117440512 << 16’ requires 44 bits to represent, but β€˜int’ only has 32 bits [-Werror=shift-overflow=] All bits in KORINA_STAT are already at the correct position, so there is no addtional shift needed. Signed-off-by: xurui Signed-off-by: Thomas Bogendoerfer Signed-off-by: Sasha Levin --- arch/mips/include/asm/mach-rc32434/pci.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/include/asm/mach-rc32434/pci.h b/arch/mips/include/asm/mach-rc32434/pci.h index 6f40d1515580..1ff8a987025c 100644 --- a/arch/mips/include/asm/mach-rc32434/pci.h +++ b/arch/mips/include/asm/mach-rc32434/pci.h @@ -377,7 +377,7 @@ struct pci_msu { PCI_CFG04_STAT_SSE | \ PCI_CFG04_STAT_PE) -#define KORINA_CNFG1 ((KORINA_STAT<<16)|KORINA_CMD) +#define KORINA_CNFG1 (KORINA_STAT | KORINA_CMD) #define KORINA_REVID 0 #define KORINA_CLASS_CODE 0 -- cgit v1.2.1 From 2cb5c515e0be337b81972daae2d35d8bc96b2965 Mon Sep 17 00:00:00 2001 From: Edward Humes Date: Sat, 27 Aug 2022 02:49:39 -0400 Subject: alpha: fix R_ALPHA_LITERAL reloc for large modules [ Upstream commit b6b17a8b3ecd878d98d5472a9023ede9e669ca72 ] Previously, R_ALPHA_LITERAL relocations would overflow for large kernel modules. This was because the Alpha's apply_relocate_add was relying on the kernel's module loader to have sorted the GOT towards the very end of the module as it was mapped into memory in order to correctly assign the global pointer. While this behavior would mostly work fine for small kernel modules, this approach would overflow on kernel modules with large GOT's since the global pointer would be very far away from the GOT, and thus, certain entries would be out of range. This patch fixes this by instead using the Tru64 behavior of assigning the global pointer to be 32KB away from the start of the GOT. The change made in this patch won't work for multi-GOT kernel modules as it makes the assumption the module only has one GOT located at the beginning of .got, although for the vast majority kernel modules, this should be fine. Of the kernel modules that would previously result in a relocation error, none of them, even modules like nouveau, have even come close to filling up a single GOT, and they've all worked fine under this patch. Signed-off-by: Edward Humes Signed-off-by: Matt Turner Signed-off-by: Sasha Levin --- arch/alpha/kernel/module.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c index 47632fa8c24e..b169dc9a9ac1 100644 --- a/arch/alpha/kernel/module.c +++ b/arch/alpha/kernel/module.c @@ -158,10 +158,8 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab, base = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr; symtab = (Elf64_Sym *)sechdrs[symindex].sh_addr; - /* The small sections were sorted to the end of the segment. - The following should definitely cover them. */ - gp = (u64)me->core_layout.base + me->core_layout.size - 0x8000; got = sechdrs[me->arch.gotsecindex].sh_addr; + gp = got + 0x8000; for (i = 0; i < n; i++) { unsigned long r_sym = ELF64_R_SYM (rela[i].r_info); -- cgit v1.2.1 From af346580d43db0974571073aabd3245fc75c6a19 Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Wed, 15 Feb 2023 10:12:12 -0700 Subject: macintosh: windfarm: Use unsigned type for 1-bit bitfields [ Upstream commit 748ea32d2dbd813d3bd958117bde5191182f909a ] Clang warns: drivers/macintosh/windfarm_lm75_sensor.c:63:14: error: implicit truncation from 'int' to a one-bit wide bit-field changes value from 1 to -1 [-Werror,-Wsingle-bit-bitfield-constant-conversion] lm->inited = 1; ^ ~ drivers/macintosh/windfarm_smu_sensors.c:356:19: error: implicit truncation from 'int' to a one-bit wide bit-field changes value from 1 to -1 [-Werror,-Wsingle-bit-bitfield-constant-conversion] pow->fake_volts = 1; ^ ~ drivers/macintosh/windfarm_smu_sensors.c:368:18: error: implicit truncation from 'int' to a one-bit wide bit-field changes value from 1 to -1 [-Werror,-Wsingle-bit-bitfield-constant-conversion] pow->quadratic = 1; ^ ~ There is no bug here since no code checks the actual value of these fields, just whether or not they are zero (boolean context), but this can be easily fixed by switching to an unsigned type. Signed-off-by: Nathan Chancellor Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20230215-windfarm-wsingle-bit-bitfield-constant-conversion-v1-1-26415072e855@kernel.org Signed-off-by: Sasha Levin --- drivers/macintosh/windfarm_lm75_sensor.c | 4 ++-- drivers/macintosh/windfarm_smu_sensors.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c index f48ad2445ed6..f5b1ac8347db 100644 --- a/drivers/macintosh/windfarm_lm75_sensor.c +++ b/drivers/macintosh/windfarm_lm75_sensor.c @@ -35,8 +35,8 @@ #endif struct wf_lm75_sensor { - int ds1775 : 1; - int inited : 1; + unsigned int ds1775 : 1; + unsigned int inited : 1; struct i2c_client *i2c; struct wf_sensor sens; }; diff --git a/drivers/macintosh/windfarm_smu_sensors.c b/drivers/macintosh/windfarm_smu_sensors.c index 172fd267dcf6..0f4017a8189e 100644 --- a/drivers/macintosh/windfarm_smu_sensors.c +++ b/drivers/macintosh/windfarm_smu_sensors.c @@ -275,8 +275,8 @@ struct smu_cpu_power_sensor { struct list_head link; struct wf_sensor *volts; struct wf_sensor *amps; - int fake_volts : 1; - int quadratic : 1; + unsigned int fake_volts : 1; + unsigned int quadratic : 1; struct wf_sensor sens; }; #define to_smu_cpu_power(c) container_of(c, struct smu_cpu_power_sensor, sens) -- cgit v1.2.1 From 56e82c07c67eef062d612cfe23069e153813c3da Mon Sep 17 00:00:00 2001 From: Alvaro Karsz Date: Tue, 10 Jan 2023 18:56:36 +0200 Subject: PCI: Add SolidRun vendor ID [ Upstream commit db6c4dee4c104f50ed163af71c53bfdb878a8318 ] Add SolidRun vendor ID to pci_ids.h The vendor ID is used in 2 different source files, the SNET vDPA driver and PCI quirks. Signed-off-by: Alvaro Karsz Acked-by: Bjorn Helgaas Message-Id: <20230110165638.123745-2-alvaro.karsz@solid-run.com> Signed-off-by: Michael S. Tsirkin Signed-off-by: Sasha Levin --- include/linux/pci_ids.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 1658e9f8d803..78c1cd4dfdc0 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -3097,6 +3097,8 @@ #define PCI_VENDOR_ID_3COM_2 0xa727 +#define PCI_VENDOR_ID_SOLIDRUN 0xd063 + #define PCI_VENDOR_ID_DIGIUM 0xd161 #define PCI_DEVICE_ID_DIGIUM_HFC4S 0xb410 -- cgit v1.2.1 From 311192b4ba1c34a7d59cb475a4e057a870178efc Mon Sep 17 00:00:00 2001 From: Paul Elder Date: Mon, 28 Nov 2022 09:02:01 +0100 Subject: media: ov5640: Fix analogue gain control [ Upstream commit afa4805799c1d332980ad23339fdb07b5e0cf7e0 ] Gain control is badly documented in publicly available (including leaked) documentation. There is an AGC pre-gain in register 0x3a13, expressed as a 6-bit value (plus an enable bit in bit 6). The driver hardcodes it to 0x43, which one application note states is equal to x1.047. The documentation also states that 0x40 is equel to x1.000. The pre-gain thus seems to be expressed as in 1/64 increments, and thus ranges from x1.00 to x1.984. What the pre-gain does is however unspecified. There is then an AGC gain limit, in registers 0x3a18 and 0x3a19, expressed as a 10-bit "real gain format" value. One application note sets it to 0x00f8 and states it is equal to x15.5, so it appears to be expressed in 1/16 increments, up to x63.9375. The manual gain is stored in registers 0x350a and 0x350b, also as a 10-bit "real gain format" value. It is documented in the application note as a Q6.4 values, up to x63.9375. One version of the datasheet indicates that the sensor supports a digital gain: The OV5640 supports 1/2/4 digital gain. Normally, the gain is controlled automatically by the automatic gain control (AGC) block. It isn't clear how that would be controlled manually. There appears to be no indication regarding whether the gain controlled through registers 0x350a and 0x350b is an analogue gain only or also includes digital gain. The words "real gain" don't necessarily mean "combined analogue and digital gains". Some OmniVision sensors (such as the OV8858) are documented as supoprting different formats for the gain values, selectable through a register bit, and they are called "real gain format" and "sensor gain format". For that sensor, we have (one of) the gain registers documented as 0x3503[2]=0, gain[7:0] is real gain format, where low 4 bits are fraction bits, for example, 0x10 is 1x gain, 0x28 is 2.5x gain If 0x3503[2]=1, gain[7:0] is sensor gain format, gain[7:4] is coarse gain, 00000: 1x, 00001: 2x, 00011: 4x, 00111: 8x, gain[7] is 1, gain[3:0] is fine gain. For example, 0x10 is 1x gain, 0x30 is 2x gain, 0x70 is 4x gain (The second part of the text makes little sense) "Real gain" may thus refer to the combination of the coarse and fine analogue gains as a single value. The OV5640 0x350a and 0x350b registers thus appear to control analogue gain. The driver incorrectly uses V4L2_CID_GAIN as V4L2 has a specific control for analogue gain, V4L2_CID_ANALOGUE_GAIN. Use it. If registers 0x350a and 0x350b are later found to control digital gain as well, the driver could then restrict the range of the analogue gain control value to lower than x64 and add a separate digital gain control. Signed-off-by: Paul Elder Signed-off-by: Laurent Pinchart Reviewed-by: Jacopo Mondi Reviewed-by: Jai Luthra Signed-off-by: Sakari Ailus Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Sasha Levin --- drivers/media/i2c/ov5640.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c index aa9c0b7ee7a2..31b26b18e525 100644 --- a/drivers/media/i2c/ov5640.c +++ b/drivers/media/i2c/ov5640.c @@ -2447,7 +2447,7 @@ static int ov5640_init_controls(struct ov5640_dev *sensor) /* Auto/manual gain */ ctrls->auto_gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_AUTOGAIN, 0, 1, 1, 1); - ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAIN, + ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_ANALOGUE_GAIN, 0, 1023, 1, 0); ctrls->saturation = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_SATURATION, -- cgit v1.2.1 From bd37050a1200c969c1db10764cb9c2e82a12ea49 Mon Sep 17 00:00:00 2001 From: Tung Nguyen Date: Tue, 19 Feb 2019 11:20:47 +0700 Subject: tipc: improve function tipc_wait_for_cond() commit 223b7329ec6a0dae1b7f7db7b770e93f4a069ef9 upstream. Commit 844cf763fba6 ("tipc: make macro tipc_wait_for_cond() smp safe") replaced finish_wait() with remove_wait_queue() but still used prepare_to_wait(). This causes unnecessary conditional checking before adding to wait queue in prepare_to_wait(). This commit replaces prepare_to_wait() with add_wait_queue() as the pair function with remove_wait_queue(). Acked-by: Ying Xue Acked-by: Jon Maloy Signed-off-by: Tung Nguyen Signed-off-by: David S. Miller Cc: Lee Jones Signed-off-by: Greg Kroah-Hartman --- net/tipc/socket.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 8266452c143b..c83eaa718369 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -388,7 +388,7 @@ static int tipc_sk_sock_err(struct socket *sock, long *timeout) rc_ = tipc_sk_sock_err((sock_), timeo_); \ if (rc_) \ break; \ - prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE); \ + add_wait_queue(sk_sleep(sk_), &wait_); \ release_sock(sk_); \ *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \ sched_annotate_sleep(); \ -- cgit v1.2.1 From 9a0789a26289d7122fb58f43754cad272fc1b18f Mon Sep 17 00:00:00 2001 From: John Harrison Date: Wed, 15 Feb 2023 17:11:01 -0800 Subject: drm/i915: Don't use BAR mappings for ring buffers with LLC MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 85636167e3206c3fbd52254fc432991cc4e90194 upstream. Direction from hardware is that ring buffers should never be mapped via the BAR on systems with LLC. There are too many caching pitfalls due to the way BAR accesses are routed. So it is safest to just not use it. Signed-off-by: John Harrison Fixes: 9d80841ea4c9 ("drm/i915: Allow ringbuffers to be bound anywhere") Cc: Chris Wilson Cc: Joonas Lahtinen Cc: Jani Nikula Cc: Rodrigo Vivi Cc: Tvrtko Ursulin Cc: intel-gfx@lists.freedesktop.org Cc: # v4.9+ Tested-by: Jouni HΓΆgander Reviewed-by: Daniele Ceraolo Spurio Link: https://patchwork.freedesktop.org/patch/msgid/20230216011101.1909009-3-John.C.Harrison@Intel.com (cherry picked from commit 65c08339db1ada87afd6cfe7db8e60bb4851d919) Signed-off-by: Jani Nikula Signed-off-by: John Harrison Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/i915/intel_ringbuffer.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 3b8218dd9bb1..979d130b24c4 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1083,7 +1083,7 @@ int intel_ring_pin(struct intel_ring *ring, if (unlikely(ret)) return ret; - if (i915_vma_is_map_and_fenceable(vma)) + if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915)) addr = (void __force *)i915_vma_pin_iomap(vma); else addr = i915_gem_object_pin_map(vma->obj, map); @@ -1118,7 +1118,7 @@ void intel_ring_unpin(struct intel_ring *ring) /* Discard any unused bytes beyond that submitted to hw. */ intel_ring_reset(ring, ring->tail); - if (i915_vma_is_map_and_fenceable(ring->vma)) + if (i915_vma_is_map_and_fenceable(ring->vma) && !HAS_LLC(ring->vma->vm->i915)) i915_vma_unpin_iomap(ring->vma); else i915_gem_object_unpin_map(ring->vma->obj); -- cgit v1.2.1 From 1793da97a23e31c5bf06631f3f3e5a25f368fd64 Mon Sep 17 00:00:00 2001 From: Shigeru Yoshida Date: Thu, 2 Mar 2023 01:39:13 +0900 Subject: net: caif: Fix use-after-free in cfusbl_device_notify() commit 9781e98a97110f5e76999058368b4be76a788484 upstream. syzbot reported use-after-free in cfusbl_device_notify() [1]. This causes a stack trace like below: BUG: KASAN: use-after-free in cfusbl_device_notify+0x7c9/0x870 net/caif/caif_usb.c:138 Read of size 8 at addr ffff88807ac4e6f0 by task kworker/u4:6/1214 CPU: 0 PID: 1214 Comm: kworker/u4:6 Not tainted 5.19.0-rc3-syzkaller-00146-g92f20ff72066 #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Workqueue: netns cleanup_net Call Trace: __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0xcd/0x134 lib/dump_stack.c:106 print_address_description.constprop.0.cold+0xeb/0x467 mm/kasan/report.c:313 print_report mm/kasan/report.c:429 [inline] kasan_report.cold+0xf4/0x1c6 mm/kasan/report.c:491 cfusbl_device_notify+0x7c9/0x870 net/caif/caif_usb.c:138 notifier_call_chain+0xb5/0x200 kernel/notifier.c:87 call_netdevice_notifiers_info+0xb5/0x130 net/core/dev.c:1945 call_netdevice_notifiers_extack net/core/dev.c:1983 [inline] call_netdevice_notifiers net/core/dev.c:1997 [inline] netdev_wait_allrefs_any net/core/dev.c:10227 [inline] netdev_run_todo+0xbc0/0x10f0 net/core/dev.c:10341 default_device_exit_batch+0x44e/0x590 net/core/dev.c:11334 ops_exit_list+0x125/0x170 net/core/net_namespace.c:167 cleanup_net+0x4ea/0xb00 net/core/net_namespace.c:594 process_one_work+0x996/0x1610 kernel/workqueue.c:2289 worker_thread+0x665/0x1080 kernel/workqueue.c:2436 kthread+0x2e9/0x3a0 kernel/kthread.c:376 ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:302 When unregistering a net device, unregister_netdevice_many_notify() sets the device's reg_state to NETREG_UNREGISTERING, calls notifiers with NETDEV_UNREGISTER, and adds the device to the todo list. Later on, devices in the todo list are processed by netdev_run_todo(). netdev_run_todo() waits devices' reference count become 1 while rebdoadcasting NETDEV_UNREGISTER notification. When cfusbl_device_notify() is called with NETDEV_UNREGISTER multiple times, the parent device might be freed. This could cause UAF. Processing NETDEV_UNREGISTER multiple times also causes inbalance of reference count for the module. This patch fixes the issue by accepting only first NETDEV_UNREGISTER notification. Fixes: 7ad65bf68d70 ("caif: Add support for CAIF over CDC NCM USB interface") CC: sjur.brandeland@stericsson.com Reported-by: syzbot+b563d33852b893653a9e@syzkaller.appspotmail.com Link: https://syzkaller.appspot.com/bug?id=c3bfd8e2450adab3bffe4d80821fbbced600407f [1] Signed-off-by: Shigeru Yoshida Link: https://lore.kernel.org/r/20230301163913.391304-1-syoshida@redhat.com Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- net/caif/caif_usb.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c index 76d49a1bc6f6..609c5793f45a 100644 --- a/net/caif/caif_usb.c +++ b/net/caif/caif_usb.c @@ -135,6 +135,9 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what, struct usb_device *usbdev; int res; + if (what == NETDEV_UNREGISTER && dev->reg_state >= NETREG_UNREGISTERED) + return 0; + /* Check whether we have a NCM device, and find its VID/PID. */ if (!(dev->dev.parent && dev->dev.parent->driver && strcmp(dev->dev.parent->driver->name, "cdc_ncm") == 0)) -- cgit v1.2.1 From 98f49e693e02c1dafd5786be3468657840dd6f06 Mon Sep 17 00:00:00 2001 From: Kang Chen Date: Mon, 27 Feb 2023 17:30:37 +0800 Subject: nfc: fdp: add null check of devm_kmalloc_array in fdp_nci_i2c_read_device_properties commit 11f180a5d62a51b484e9648f9b310e1bd50b1a57 upstream. devm_kmalloc_array may fails, *fw_vsc_cfg might be null and cause out-of-bounds write in device_property_read_u8_array later. Fixes: a06347c04c13 ("NFC: Add Intel Fields Peak NFC solution driver") Signed-off-by: Kang Chen Reviewed-by: Krzysztof Kozlowski Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230227093037.907654-1-void0red@gmail.com Signed-off-by: Paolo Abeni Signed-off-by: Greg Kroah-Hartman --- drivers/nfc/fdp/i2c.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c index 7f143387b9ff..64d16b195fc0 100644 --- a/drivers/nfc/fdp/i2c.c +++ b/drivers/nfc/fdp/i2c.c @@ -263,6 +263,9 @@ static void fdp_nci_i2c_read_device_properties(struct device *dev, len, sizeof(**fw_vsc_cfg), GFP_KERNEL); + if (!*fw_vsc_cfg) + goto alloc_err; + r = device_property_read_u8_array(dev, FDP_DP_FW_VSC_CFG_NAME, *fw_vsc_cfg, len); @@ -276,6 +279,7 @@ vsc_read_err: *fw_vsc_cfg = NULL; } +alloc_err: dev_dbg(dev, "Clock type: %d, clock frequency: %d, VSC: %s", *clock_type, *clock_freq, *fw_vsc_cfg != NULL ? "yes" : "no"); } -- cgit v1.2.1 From c631e52aea0fc8d4deea06e439f5810a8b40ad0f Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 27 Feb 2023 15:30:24 +0000 Subject: ila: do not generate empty messages in ila_xlat_nl_cmd_get_mapping() commit 693aa2c0d9b6d5b1f2745d31b6e70d09dbbaf06e upstream. ila_xlat_nl_cmd_get_mapping() generates an empty skb, triggerring a recent sanity check [1]. Instead, return an error code, so that user space can get it. [1] skb_assert_len WARNING: CPU: 0 PID: 5923 at include/linux/skbuff.h:2527 skb_assert_len include/linux/skbuff.h:2527 [inline] WARNING: CPU: 0 PID: 5923 at include/linux/skbuff.h:2527 __dev_queue_xmit+0x1bc0/0x3488 net/core/dev.c:4156 Modules linked in: CPU: 0 PID: 5923 Comm: syz-executor269 Not tainted 6.2.0-syzkaller-18300-g2ebd1fbb946d #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/21/2023 pstate: 60400005 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) pc : skb_assert_len include/linux/skbuff.h:2527 [inline] pc : __dev_queue_xmit+0x1bc0/0x3488 net/core/dev.c:4156 lr : skb_assert_len include/linux/skbuff.h:2527 [inline] lr : __dev_queue_xmit+0x1bc0/0x3488 net/core/dev.c:4156 sp : ffff80001e0d6c40 x29: ffff80001e0d6e60 x28: dfff800000000000 x27: ffff0000c86328c0 x26: dfff800000000000 x25: ffff0000c8632990 x24: ffff0000c8632a00 x23: 0000000000000000 x22: 1fffe000190c6542 x21: ffff0000c8632a10 x20: ffff0000c8632a00 x19: ffff80001856e000 x18: ffff80001e0d5fc0 x17: 0000000000000000 x16: ffff80001235d16c x15: 0000000000000000 x14: 0000000000000000 x13: 0000000000000001 x12: 0000000000000001 x11: ff80800008353a30 x10: 0000000000000000 x9 : 21567eaf25bfb600 x8 : 21567eaf25bfb600 x7 : 0000000000000001 x6 : 0000000000000001 x5 : ffff80001e0d6558 x4 : ffff800015c74760 x3 : ffff800008596744 x2 : 0000000000000001 x1 : 0000000100000000 x0 : 000000000000000e Call trace: skb_assert_len include/linux/skbuff.h:2527 [inline] __dev_queue_xmit+0x1bc0/0x3488 net/core/dev.c:4156 dev_queue_xmit include/linux/netdevice.h:3033 [inline] __netlink_deliver_tap_skb net/netlink/af_netlink.c:307 [inline] __netlink_deliver_tap+0x45c/0x6f8 net/netlink/af_netlink.c:325 netlink_deliver_tap+0xf4/0x174 net/netlink/af_netlink.c:338 __netlink_sendskb net/netlink/af_netlink.c:1283 [inline] netlink_sendskb+0x6c/0x154 net/netlink/af_netlink.c:1292 netlink_unicast+0x334/0x8d4 net/netlink/af_netlink.c:1380 nlmsg_unicast include/net/netlink.h:1099 [inline] genlmsg_unicast include/net/genetlink.h:433 [inline] genlmsg_reply include/net/genetlink.h:443 [inline] ila_xlat_nl_cmd_get_mapping+0x620/0x7d0 net/ipv6/ila/ila_xlat.c:493 genl_family_rcv_msg_doit net/netlink/genetlink.c:968 [inline] genl_family_rcv_msg net/netlink/genetlink.c:1048 [inline] genl_rcv_msg+0x938/0xc1c net/netlink/genetlink.c:1065 netlink_rcv_skb+0x214/0x3c4 net/netlink/af_netlink.c:2574 genl_rcv+0x38/0x50 net/netlink/genetlink.c:1076 netlink_unicast_kernel net/netlink/af_netlink.c:1339 [inline] netlink_unicast+0x660/0x8d4 net/netlink/af_netlink.c:1365 netlink_sendmsg+0x800/0xae0 net/netlink/af_netlink.c:1942 sock_sendmsg_nosec net/socket.c:714 [inline] sock_sendmsg net/socket.c:734 [inline] ____sys_sendmsg+0x558/0x844 net/socket.c:2479 ___sys_sendmsg net/socket.c:2533 [inline] __sys_sendmsg+0x26c/0x33c net/socket.c:2562 __do_sys_sendmsg net/socket.c:2571 [inline] __se_sys_sendmsg net/socket.c:2569 [inline] __arm64_sys_sendmsg+0x80/0x94 net/socket.c:2569 __invoke_syscall arch/arm64/kernel/syscall.c:38 [inline] invoke_syscall+0x98/0x2c0 arch/arm64/kernel/syscall.c:52 el0_svc_common+0x138/0x258 arch/arm64/kernel/syscall.c:142 do_el0_svc+0x64/0x198 arch/arm64/kernel/syscall.c:193 el0_svc+0x58/0x168 arch/arm64/kernel/entry-common.c:637 el0t_64_sync_handler+0x84/0xf0 arch/arm64/kernel/entry-common.c:655 el0t_64_sync+0x190/0x194 arch/arm64/kernel/entry.S:591 irq event stamp: 136484 hardirqs last enabled at (136483): [] __up_console_sem+0x60/0xb4 kernel/printk/printk.c:345 hardirqs last disabled at (136484): [] el1_dbg+0x24/0x80 arch/arm64/kernel/entry-common.c:405 softirqs last enabled at (136418): [] softirq_handle_end kernel/softirq.c:414 [inline] softirqs last enabled at (136418): [] __do_softirq+0xd4c/0xfa4 kernel/softirq.c:600 softirqs last disabled at (136371): [] ____do_softirq+0x14/0x20 arch/arm64/kernel/irq.c:80 ---[ end trace 0000000000000000 ]--- skb len=0 headroom=0 headlen=0 tailroom=192 mac=(0,0) net=(0,-1) trans=-1 shinfo(txflags=0 nr_frags=0 gso(size=0 type=0 segs=0)) csum(0x0 ip_summed=0 complete_sw=0 valid=0 level=0) hash(0x0 sw=0 l4=0) proto=0x0010 pkttype=6 iif=0 dev name=nlmon0 feat=0x0000000000005861 Fixes: 7f00feaf1076 ("ila: Add generic ILA translation facility") Reported-by: syzbot Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv6/ila/ila_xlat.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c index 7858fa9ea103..87744eb8d0c4 100644 --- a/net/ipv6/ila/ila_xlat.c +++ b/net/ipv6/ila/ila_xlat.c @@ -480,6 +480,7 @@ int ila_xlat_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info) rcu_read_lock(); + ret = -ESRCH; ila = ila_lookup_by_params(&xp, ilan); if (ila) { ret = ila_dump_info(ila, -- cgit v1.2.1 From 7e0df88a369006df976f46481292a07f3f68e54b Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Fri, 17 Mar 2023 08:31:44 +0100 Subject: Linux 4.19.278 Link: https://lore.kernel.org/r/20230315115721.234756306@linuxfoundation.org Tested-by: Shuah Khan Link: https://lore.kernel.org/r/20230316083339.388760723@linuxfoundation.org Link: https://lore.kernel.org/r/20230316094129.846802350@linuxfoundation.org Tested-by: Chris Paterson (CIP) Tested-by: Guenter Roeck Tested-by: Linux Kernel Functional Testing Signed-off-by: Greg Kroah-Hartman --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index e00f4bbcd737..a8104c8024a4 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 19 -SUBLEVEL = 277 +SUBLEVEL = 278 EXTRAVERSION = NAME = "People's Front" -- cgit v1.2.1 From c3c0387c22cab69ad56b8ec14ac68b88943dc844 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 2 Feb 2023 16:55:03 -0800 Subject: ext4: fix cgroup writeback accounting with fs-layer encryption commit ffec85d53d0f39ee4680a2cf0795255e000e1feb upstream. When writing a page from an encrypted file that is using filesystem-layer encryption (not inline encryption), ext4 encrypts the pagecache page into a bounce page, then writes the bounce page. It also passes the bounce page to wbc_account_cgroup_owner(). That's incorrect, because the bounce page is a newly allocated temporary page that doesn't have the memory cgroup of the original pagecache page. This makes wbc_account_cgroup_owner() not account the I/O to the owner of the pagecache page as it should. Fix this by always passing the pagecache page to wbc_account_cgroup_owner(). Fixes: 001e4a8775f6 ("ext4: implement cgroup writeback support") Cc: stable@vger.kernel.org Reported-by: Matthew Wilcox (Oracle) Signed-off-by: Eric Biggers Acked-by: Tejun Heo Link: https://lore.kernel.org/r/20230203005503.141557-1-ebiggers@kernel.org Signed-off-by: Theodore Ts'o Signed-off-by: Greg Kroah-Hartman --- fs/ext4/page-io.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 3de933354a08..bf910f266469 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -388,7 +388,8 @@ static int io_submit_init_bio(struct ext4_io_submit *io, static int io_submit_add_bh(struct ext4_io_submit *io, struct inode *inode, - struct page *page, + struct page *pagecache_page, + struct page *bounce_page, struct buffer_head *bh) { int ret; @@ -403,10 +404,11 @@ submit_and_retry: return ret; io->io_bio->bi_write_hint = inode->i_write_hint; } - ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh)); + ret = bio_add_page(io->io_bio, bounce_page ?: pagecache_page, + bh->b_size, bh_offset(bh)); if (ret != bh->b_size) goto submit_and_retry; - wbc_account_io(io->io_wbc, page, bh->b_size); + wbc_account_io(io->io_wbc, pagecache_page, bh->b_size); io->io_next_block++; return 0; } @@ -514,8 +516,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io, do { if (!buffer_async_write(bh)) continue; - ret = io_submit_add_bh(io, inode, - data_page ? data_page : page, bh); + ret = io_submit_add_bh(io, inode, page, data_page, bh); if (ret) { /* * We only get here on ENOMEM. Not much else -- cgit v1.2.1 From 2a8664583d4d3655cfe5d36cf03f56b11530b69b Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 16 Mar 2023 23:27:43 -0700 Subject: fs: sysfs_emit_at: Remove PAGE_SIZE alignment check From: Eric Biggers [No upstream commit because this fixes a bug in a backport.] Before upstream commit 59bb47985c1d ("mm, sl[aou]b: guarantee natural alignment for kmalloc(power-of-two)") which went into v5.4, kmalloc did *not* always guarantee that PAGE_SIZE allocations are PAGE_SIZE-aligned. Upstream commit 2efc459d06f1 ("sysfs: Add sysfs_emit and sysfs_emit_at to format sysfs output") added two WARN()s that trigger when PAGE_SIZE allocations are not PAGE_SIZE-aligned. This was backported to old kernels that don't guarantee PAGE_SIZE alignment. Commit 10ddfb495232 ("fs: sysfs_emit: Remove PAGE_SIZE alignment check") in 4.19.y, and its equivalent in 4.14.y and 4.9.y, tried to fix this bug. However, only it handled sysfs_emit(), not sysfs_emit_at(). Fix it in sysfs_emit_at() too. A reproducer is to build the kernel with the following options: CONFIG_SLUB=y CONFIG_SLUB_DEBUG=y CONFIG_SLUB_DEBUG_ON=y CONFIG_PM=y CONFIG_SUSPEND=y CONFIG_PM_WAKELOCKS=y Then run: echo foo > /sys/power/wake_lock && cat /sys/power/wake_lock Fixes: cb1f69d53ac8 ("sysfs: Add sysfs_emit and sysfs_emit_at to format sysfs output") Reported-by: kernel test robot Link: https://lore.kernel.org/r/202303141634.1e64fd76-yujie.liu@intel.com Signed-off-by: Eric Biggers Signed-off-by: Greg Kroah-Hartman --- fs/sysfs/file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index 011e391497f4..cd70dbeeab22 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c @@ -599,7 +599,7 @@ int sysfs_emit_at(char *buf, int at, const char *fmt, ...) va_list args; int len; - if (WARN(!buf || offset_in_page(buf) || at < 0 || at >= PAGE_SIZE, + if (WARN(!buf || at < 0 || at >= PAGE_SIZE, "invalid sysfs_emit_at: buf:%p at:%d\n", buf, at)) return 0; -- cgit v1.2.1 From 1e6933f7a3d6bc8dcdfafeda68b014bd2aa5c934 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 25 Feb 2023 21:39:47 -0800 Subject: clk: HI655X: select REGMAP instead of depending on it [ Upstream commit 0ffad67784a097beccf34d297ddd1b0773b3b8a3 ] REGMAP is a hidden (not user visible) symbol. Users cannot set it directly thru "make *config", so drivers should select it instead of depending on it if they need it. Consistently using "select" or "depends on" can also help reduce Kconfig circular dependency issues. Therefore, change the use of "depends on REGMAP" to "select REGMAP". Fixes: 3a49afb84ca0 ("clk: enable hi655x common clk automatically") Signed-off-by: Randy Dunlap Cc: Riku Voipio Cc: Stephen Boyd Cc: Michael Turquette Cc: linux-clk@vger.kernel.org Link: https://lore.kernel.org/r/20230226053953.4681-3-rdunlap@infradead.org Signed-off-by: Stephen Boyd Signed-off-by: Sasha Levin --- drivers/clk/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 292056bbb30e..ffe81449ce24 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -63,7 +63,7 @@ config COMMON_CLK_RK808 config COMMON_CLK_HI655X tristate "Clock driver for Hi655x" if EXPERT depends on (MFD_HI655X_PMIC || COMPILE_TEST) - depends on REGMAP + select REGMAP default MFD_HI655X_PMIC ---help--- This driver supports the hi655x PMIC clock. This -- cgit v1.2.1 From e23ca307745be3df7fe9762f3e2a7e311a57852e Mon Sep 17 00:00:00 2001 From: Breno Leitao Date: Wed, 8 Mar 2023 11:07:45 -0800 Subject: tcp: tcp_make_synack() can be called from process context [ Upstream commit bced3f7db95ff2e6ca29dc4d1c9751ab5e736a09 ] tcp_rtx_synack() now could be called in process context as explained in 0a375c822497 ("tcp: tcp_rtx_synack() can be called from process context"). tcp_rtx_synack() might call tcp_make_synack(), which will touch per-CPU variables with preemption enabled. This causes the following BUG: BUG: using __this_cpu_add() in preemptible [00000000] code: ThriftIO1/5464 caller is tcp_make_synack+0x841/0xac0 Call Trace: dump_stack_lvl+0x10d/0x1a0 check_preemption_disabled+0x104/0x110 tcp_make_synack+0x841/0xac0 tcp_v6_send_synack+0x5c/0x450 tcp_rtx_synack+0xeb/0x1f0 inet_rtx_syn_ack+0x34/0x60 tcp_check_req+0x3af/0x9e0 tcp_rcv_state_process+0x59b/0x2030 tcp_v6_do_rcv+0x5f5/0x700 release_sock+0x3a/0xf0 tcp_sendmsg+0x33/0x40 ____sys_sendmsg+0x2f2/0x490 __sys_sendmsg+0x184/0x230 do_syscall_64+0x3d/0x90 Avoid calling __TCP_INC_STATS() with will touch per-cpu variables. Use TCP_INC_STATS() which is safe to be called from context switch. Fixes: 8336886f786f ("tcp: TCP Fast Open Server - support TFO listeners") Signed-off-by: Breno Leitao Reviewed-by: Eric Dumazet Link: https://lore.kernel.org/r/20230308190745.780221-1-leitao@debian.org Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- net/ipv4/tcp_output.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 8962864223b4..9299de0da351 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -3307,7 +3307,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, th->window = htons(min(req->rsk_rcv_wnd, 65535U)); tcp_options_write((__be32 *)(th + 1), NULL, &opts); th->doff = (tcp_header_size >> 2); - __TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); + TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); #ifdef CONFIG_TCP_MD5SIG /* Okay, we have all we need - do the md5 hash if needed */ -- cgit v1.2.1 From 4c20a07ed26a71a8ccc9c6d935fc181573f5462e Mon Sep 17 00:00:00 2001 From: Fedor Pchelkin Date: Thu, 9 Mar 2023 19:50:50 +0300 Subject: nfc: pn533: initialize struct pn533_out_arg properly [ Upstream commit 484b7059796e3bc1cb527caa61dfc60da649b4f6 ] struct pn533_out_arg used as a temporary context for out_urb is not initialized properly. Its uninitialized 'phy' field can be dereferenced in error cases inside pn533_out_complete() callback function. It causes the following failure: general protection fault, probably for non-canonical address 0xdffffc0000000000: 0000 [#1] PREEMPT SMP KASAN KASAN: null-ptr-deref in range [0x0000000000000000-0x0000000000000007] CPU: 1 PID: 0 Comm: swapper/1 Not tainted 6.2.0-rc3-next-20230110-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 10/26/2022 RIP: 0010:pn533_out_complete.cold+0x15/0x44 drivers/nfc/pn533/usb.c:441 Call Trace: __usb_hcd_giveback_urb+0x2b6/0x5c0 drivers/usb/core/hcd.c:1671 usb_hcd_giveback_urb+0x384/0x430 drivers/usb/core/hcd.c:1754 dummy_timer+0x1203/0x32d0 drivers/usb/gadget/udc/dummy_hcd.c:1988 call_timer_fn+0x1da/0x800 kernel/time/timer.c:1700 expire_timers+0x234/0x330 kernel/time/timer.c:1751 __run_timers kernel/time/timer.c:2022 [inline] __run_timers kernel/time/timer.c:1995 [inline] run_timer_softirq+0x326/0x910 kernel/time/timer.c:2035 __do_softirq+0x1fb/0xaf6 kernel/softirq.c:571 invoke_softirq kernel/softirq.c:445 [inline] __irq_exit_rcu+0x123/0x180 kernel/softirq.c:650 irq_exit_rcu+0x9/0x20 kernel/softirq.c:662 sysvec_apic_timer_interrupt+0x97/0xc0 arch/x86/kernel/apic/apic.c:1107 Initialize the field with the pn533_usb_phy currently used. Found by Linux Verification Center (linuxtesting.org) with Syzkaller. Fixes: 9dab880d675b ("nfc: pn533: Wait for out_urb's completion in pn533_usb_send_frame()") Reported-by: syzbot+1e608ba4217c96d1952f@syzkaller.appspotmail.com Signed-off-by: Fedor Pchelkin Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230309165050.207390-1-pchelkin@ispras.ru Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/nfc/pn533/usb.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c index c7da364b6358..a2d61d824024 100644 --- a/drivers/nfc/pn533/usb.c +++ b/drivers/nfc/pn533/usb.c @@ -187,6 +187,7 @@ static int pn533_usb_send_frame(struct pn533 *dev, print_hex_dump_debug("PN533 TX: ", DUMP_PREFIX_NONE, 16, 1, out->data, out->len, false); + arg.phy = phy; init_completion(&arg.done); cntx = phy->out_urb->context; phy->out_urb->context = &arg; -- cgit v1.2.1 From de69dc8700a2c8f0cc77f2819a7fdf00ce2fcd9d Mon Sep 17 00:00:00 2001 From: Daniil Tatianin Date: Thu, 9 Mar 2023 23:15:56 +0300 Subject: qed/qed_dev: guard against a possible division by zero [ Upstream commit 1a9dc5610ef89d807acdcfbff93a558f341a44da ] Previously we would divide total_left_rate by zero if num_vports happened to be 1 because non_requested_count is calculated as num_vports - req_count. Guard against this by validating num_vports at the beginning and returning an error otherwise. Found by Linux Verification Center (linuxtesting.org) with the SVACE static analysis tool. Fixes: bcd197c81f63 ("qed: Add vport WFQ configuration APIs") Signed-off-by: Daniil Tatianin Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230309201556.191392-1-d-tatianin@yandex-team.ru Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/net/ethernet/qlogic/qed/qed_dev.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index e50fc8f714dc..7e5beb413601 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -4062,6 +4062,11 @@ static int qed_init_wfq_param(struct qed_hwfn *p_hwfn, num_vports = p_hwfn->qm_info.num_vports; + if (num_vports < 2) { + DP_NOTICE(p_hwfn, "Unexpected num_vports: %d\n", num_vports); + return -EINVAL; + } + /* Accounting for the vports which are configured for WFQ explicitly */ for (i = 0; i < num_vports; i++) { u32 tmp_speed; -- cgit v1.2.1 From 51f3bd3765bc5ca4583af07a00833da00d2ace1d Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 10 Mar 2023 19:11:09 +0000 Subject: net: tunnels: annotate lockless accesses to dev->needed_headroom [ Upstream commit 4b397c06cb987935b1b097336532aa6b4210e091 ] IP tunnels can apparently update dev->needed_headroom in their xmit path. This patch takes care of three tunnels xmit, and also the core LL_RESERVED_SPACE() and LL_RESERVED_SPACE_EXTRA() helpers. More changes might be needed for completeness. BUG: KCSAN: data-race in ip_tunnel_xmit / ip_tunnel_xmit read to 0xffff88815b9da0ec of 2 bytes by task 888 on cpu 1: ip_tunnel_xmit+0x1270/0x1730 net/ipv4/ip_tunnel.c:803 __gre_xmit net/ipv4/ip_gre.c:469 [inline] ipgre_xmit+0x516/0x570 net/ipv4/ip_gre.c:661 __netdev_start_xmit include/linux/netdevice.h:4881 [inline] netdev_start_xmit include/linux/netdevice.h:4895 [inline] xmit_one net/core/dev.c:3580 [inline] dev_hard_start_xmit+0x127/0x400 net/core/dev.c:3596 __dev_queue_xmit+0x1007/0x1eb0 net/core/dev.c:4246 dev_queue_xmit include/linux/netdevice.h:3051 [inline] neigh_direct_output+0x17/0x20 net/core/neighbour.c:1623 neigh_output include/net/neighbour.h:546 [inline] ip_finish_output2+0x740/0x840 net/ipv4/ip_output.c:228 ip_finish_output+0xf4/0x240 net/ipv4/ip_output.c:316 NF_HOOK_COND include/linux/netfilter.h:291 [inline] ip_output+0xe5/0x1b0 net/ipv4/ip_output.c:430 dst_output include/net/dst.h:444 [inline] ip_local_out+0x64/0x80 net/ipv4/ip_output.c:126 iptunnel_xmit+0x34a/0x4b0 net/ipv4/ip_tunnel_core.c:82 ip_tunnel_xmit+0x1451/0x1730 net/ipv4/ip_tunnel.c:813 __gre_xmit net/ipv4/ip_gre.c:469 [inline] ipgre_xmit+0x516/0x570 net/ipv4/ip_gre.c:661 __netdev_start_xmit include/linux/netdevice.h:4881 [inline] netdev_start_xmit include/linux/netdevice.h:4895 [inline] xmit_one net/core/dev.c:3580 [inline] dev_hard_start_xmit+0x127/0x400 net/core/dev.c:3596 __dev_queue_xmit+0x1007/0x1eb0 net/core/dev.c:4246 dev_queue_xmit include/linux/netdevice.h:3051 [inline] neigh_direct_output+0x17/0x20 net/core/neighbour.c:1623 neigh_output include/net/neighbour.h:546 [inline] ip_finish_output2+0x740/0x840 net/ipv4/ip_output.c:228 ip_finish_output+0xf4/0x240 net/ipv4/ip_output.c:316 NF_HOOK_COND include/linux/netfilter.h:291 [inline] ip_output+0xe5/0x1b0 net/ipv4/ip_output.c:430 dst_output include/net/dst.h:444 [inline] ip_local_out+0x64/0x80 net/ipv4/ip_output.c:126 iptunnel_xmit+0x34a/0x4b0 net/ipv4/ip_tunnel_core.c:82 ip_tunnel_xmit+0x1451/0x1730 net/ipv4/ip_tunnel.c:813 __gre_xmit net/ipv4/ip_gre.c:469 [inline] ipgre_xmit+0x516/0x570 net/ipv4/ip_gre.c:661 __netdev_start_xmit include/linux/netdevice.h:4881 [inline] netdev_start_xmit include/linux/netdevice.h:4895 [inline] xmit_one net/core/dev.c:3580 [inline] dev_hard_start_xmit+0x127/0x400 net/core/dev.c:3596 __dev_queue_xmit+0x1007/0x1eb0 net/core/dev.c:4246 dev_queue_xmit include/linux/netdevice.h:3051 [inline] neigh_direct_output+0x17/0x20 net/core/neighbour.c:1623 neigh_output include/net/neighbour.h:546 [inline] ip_finish_output2+0x740/0x840 net/ipv4/ip_output.c:228 ip_finish_output+0xf4/0x240 net/ipv4/ip_output.c:316 NF_HOOK_COND include/linux/netfilter.h:291 [inline] ip_output+0xe5/0x1b0 net/ipv4/ip_output.c:430 dst_output include/net/dst.h:444 [inline] ip_local_out+0x64/0x80 net/ipv4/ip_output.c:126 iptunnel_xmit+0x34a/0x4b0 net/ipv4/ip_tunnel_core.c:82 ip_tunnel_xmit+0x1451/0x1730 net/ipv4/ip_tunnel.c:813 __gre_xmit net/ipv4/ip_gre.c:469 [inline] ipgre_xmit+0x516/0x570 net/ipv4/ip_gre.c:661 __netdev_start_xmit include/linux/netdevice.h:4881 [inline] netdev_start_xmit include/linux/netdevice.h:4895 [inline] xmit_one net/core/dev.c:3580 [inline] dev_hard_start_xmit+0x127/0x400 net/core/dev.c:3596 __dev_queue_xmit+0x1007/0x1eb0 net/core/dev.c:4246 dev_queue_xmit include/linux/netdevice.h:3051 [inline] neigh_direct_output+0x17/0x20 net/core/neighbour.c:1623 neigh_output include/net/neighbour.h:546 [inline] ip_finish_output2+0x740/0x840 net/ipv4/ip_output.c:228 ip_finish_output+0xf4/0x240 net/ipv4/ip_output.c:316 NF_HOOK_COND include/linux/netfilter.h:291 [inline] ip_output+0xe5/0x1b0 net/ipv4/ip_output.c:430 dst_output include/net/dst.h:444 [inline] ip_local_out+0x64/0x80 net/ipv4/ip_output.c:126 iptunnel_xmit+0x34a/0x4b0 net/ipv4/ip_tunnel_core.c:82 ip_tunnel_xmit+0x1451/0x1730 net/ipv4/ip_tunnel.c:813 __gre_xmit net/ipv4/ip_gre.c:469 [inline] ipgre_xmit+0x516/0x570 net/ipv4/ip_gre.c:661 __netdev_start_xmit include/linux/netdevice.h:4881 [inline] netdev_start_xmit include/linux/netdevice.h:4895 [inline] xmit_one net/core/dev.c:3580 [inline] dev_hard_start_xmit+0x127/0x400 net/core/dev.c:3596 __dev_queue_xmit+0x1007/0x1eb0 net/core/dev.c:4246 dev_queue_xmit include/linux/netdevice.h:3051 [inline] neigh_direct_output+0x17/0x20 net/core/neighbour.c:1623 neigh_output include/net/neighbour.h:546 [inline] ip_finish_output2+0x740/0x840 net/ipv4/ip_output.c:228 ip_finish_output+0xf4/0x240 net/ipv4/ip_output.c:316 NF_HOOK_COND include/linux/netfilter.h:291 [inline] ip_output+0xe5/0x1b0 net/ipv4/ip_output.c:430 dst_output include/net/dst.h:444 [inline] ip_local_out+0x64/0x80 net/ipv4/ip_output.c:126 iptunnel_xmit+0x34a/0x4b0 net/ipv4/ip_tunnel_core.c:82 ip_tunnel_xmit+0x1451/0x1730 net/ipv4/ip_tunnel.c:813 __gre_xmit net/ipv4/ip_gre.c:469 [inline] ipgre_xmit+0x516/0x570 net/ipv4/ip_gre.c:661 __netdev_start_xmit include/linux/netdevice.h:4881 [inline] netdev_start_xmit include/linux/netdevice.h:4895 [inline] xmit_one net/core/dev.c:3580 [inline] dev_hard_start_xmit+0x127/0x400 net/core/dev.c:3596 __dev_queue_xmit+0x1007/0x1eb0 net/core/dev.c:4246 dev_queue_xmit include/linux/netdevice.h:3051 [inline] neigh_direct_output+0x17/0x20 net/core/neighbour.c:1623 neigh_output include/net/neighbour.h:546 [inline] ip_finish_output2+0x740/0x840 net/ipv4/ip_output.c:228 ip_finish_output+0xf4/0x240 net/ipv4/ip_output.c:316 NF_HOOK_COND include/linux/netfilter.h:291 [inline] ip_output+0xe5/0x1b0 net/ipv4/ip_output.c:430 dst_output include/net/dst.h:444 [inline] ip_local_out+0x64/0x80 net/ipv4/ip_output.c:126 iptunnel_xmit+0x34a/0x4b0 net/ipv4/ip_tunnel_core.c:82 ip_tunnel_xmit+0x1451/0x1730 net/ipv4/ip_tunnel.c:813 __gre_xmit net/ipv4/ip_gre.c:469 [inline] ipgre_xmit+0x516/0x570 net/ipv4/ip_gre.c:661 __netdev_start_xmit include/linux/netdevice.h:4881 [inline] netdev_start_xmit include/linux/netdevice.h:4895 [inline] xmit_one net/core/dev.c:3580 [inline] dev_hard_start_xmit+0x127/0x400 net/core/dev.c:3596 __dev_queue_xmit+0x1007/0x1eb0 net/core/dev.c:4246 write to 0xffff88815b9da0ec of 2 bytes by task 2379 on cpu 0: ip_tunnel_xmit+0x1294/0x1730 net/ipv4/ip_tunnel.c:804 __gre_xmit net/ipv4/ip_gre.c:469 [inline] ipgre_xmit+0x516/0x570 net/ipv4/ip_gre.c:661 __netdev_start_xmit include/linux/netdevice.h:4881 [inline] netdev_start_xmit include/linux/netdevice.h:4895 [inline] xmit_one net/core/dev.c:3580 [inline] dev_hard_start_xmit+0x127/0x400 net/core/dev.c:3596 __dev_queue_xmit+0x1007/0x1eb0 net/core/dev.c:4246 dev_queue_xmit include/linux/netdevice.h:3051 [inline] neigh_direct_output+0x17/0x20 net/core/neighbour.c:1623 neigh_output include/net/neighbour.h:546 [inline] ip6_finish_output2+0x9bc/0xc50 net/ipv6/ip6_output.c:134 __ip6_finish_output net/ipv6/ip6_output.c:195 [inline] ip6_finish_output+0x39a/0x4e0 net/ipv6/ip6_output.c:206 NF_HOOK_COND include/linux/netfilter.h:291 [inline] ip6_output+0xeb/0x220 net/ipv6/ip6_output.c:227 dst_output include/net/dst.h:444 [inline] NF_HOOK include/linux/netfilter.h:302 [inline] mld_sendpack+0x438/0x6a0 net/ipv6/mcast.c:1820 mld_send_cr net/ipv6/mcast.c:2121 [inline] mld_ifc_work+0x519/0x7b0 net/ipv6/mcast.c:2653 process_one_work+0x3e6/0x750 kernel/workqueue.c:2390 worker_thread+0x5f2/0xa10 kernel/workqueue.c:2537 kthread+0x1ac/0x1e0 kernel/kthread.c:376 ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:308 value changed: 0x0dd4 -> 0x0e14 Reported by Kernel Concurrency Sanitizer on: CPU: 0 PID: 2379 Comm: kworker/0:0 Not tainted 6.3.0-rc1-syzkaller-00002-g8ca09d5fa354-dirty #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 03/02/2023 Workqueue: mld mld_ifc_work Fixes: 8eb30be0352d ("ipv6: Create ip6_tnl_xmit") Reported-by: syzbot Signed-off-by: Eric Dumazet Link: https://lore.kernel.org/r/20230310191109.2384387-1-edumazet@google.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- include/linux/netdevice.h | 6 ++++-- net/ipv4/ip_tunnel.c | 12 ++++++------ net/ipv6/ip6_tunnel.c | 4 ++-- 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 8d48b352ee74..4d0f48e74755 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -260,9 +260,11 @@ struct hh_cache { * relationship HH alignment <= LL alignment. */ #define LL_RESERVED_SPACE(dev) \ - ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) + ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom)) \ + & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD) #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ - ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) + ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom) + (extra)) \ + & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD) struct header_ops { int (*create) (struct sk_buff *skb, struct net_device *dev, diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 30e93b4f831f..9c2381cf675d 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -609,10 +609,10 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, u8 proto) else if (skb->protocol == htons(ETH_P_IP)) df = inner_iph->frag_off & htons(IP_DF); headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len; - if (headroom > dev->needed_headroom) - dev->needed_headroom = headroom; + if (headroom > READ_ONCE(dev->needed_headroom)) + WRITE_ONCE(dev->needed_headroom, headroom); - if (skb_cow_head(skb, dev->needed_headroom)) { + if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) { ip_rt_put(rt); goto tx_dropped; } @@ -777,10 +777,10 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr) + rt->dst.header_len + ip_encap_hlen(&tunnel->encap); - if (max_headroom > dev->needed_headroom) - dev->needed_headroom = max_headroom; + if (max_headroom > READ_ONCE(dev->needed_headroom)) + WRITE_ONCE(dev->needed_headroom, max_headroom); - if (skb_cow_head(skb, dev->needed_headroom)) { + if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) { ip_rt_put(rt); dev->stats.tx_dropped++; kfree_skb(skb); diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 75a1ec2605fc..48a658b541d7 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1206,8 +1206,8 @@ route_lookup: */ max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr) + dst->header_len + t->hlen; - if (max_headroom > dev->needed_headroom) - dev->needed_headroom = max_headroom; + if (max_headroom > READ_ONCE(dev->needed_headroom)) + WRITE_ONCE(dev->needed_headroom, max_headroom); err = ip6_tnl_encap(skb, t, &proto, fl6); if (err) -- cgit v1.2.1 From cf98933ced59f298eefff9a03bf712d24efa1e98 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sat, 11 Mar 2023 19:34:45 +0100 Subject: net: phy: smsc: bail out in lan87xx_read_status if genphy_read_status fails [ Upstream commit c22c3bbf351e4ce905f082649cffa1ff893ea8c1 ] If genphy_read_status fails then further access to the PHY may result in unpredictable behavior. To prevent this bail out immediately if genphy_read_status fails. Fixes: 4223dbffed9f ("net: phy: smsc: Re-enable EDPD mode for LAN87xx") Signed-off-by: Heiner Kallweit Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/026aa4f2-36f5-1c10-ab9f-cdb17dda6ac4@gmail.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/net/phy/smsc.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index c328208388da..fd7c9f5ff99e 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -112,8 +112,11 @@ static int lan911x_config_init(struct phy_device *phydev) static int lan87xx_read_status(struct phy_device *phydev) { struct smsc_phy_priv *priv = phydev->priv; + int err; - int err = genphy_read_status(phydev); + err = genphy_read_status(phydev); + if (err) + return err; if (!phydev->link && priv->energy_enable) { int i; -- cgit v1.2.1 From 3405eb641dafcc8b28d174784b203c1622c121bf Mon Sep 17 00:00:00 2001 From: Zheng Wang Date: Mon, 13 Mar 2023 00:08:37 +0800 Subject: nfc: st-nci: Fix use after free bug in ndlc_remove due to race condition [ Upstream commit 5000fe6c27827a61d8250a7e4a1d26c3298ef4f6 ] This bug influences both st_nci_i2c_remove and st_nci_spi_remove. Take st_nci_i2c_remove as an example. In st_nci_i2c_probe, it called ndlc_probe and bound &ndlc->sm_work with llt_ndlc_sm_work. When it calls ndlc_recv or timeout handler, it will finally call schedule_work to start the work. When we call st_nci_i2c_remove to remove the driver, there may be a sequence as follows: Fix it by finishing the work before cleanup in ndlc_remove CPU0 CPU1 |llt_ndlc_sm_work st_nci_i2c_remove | ndlc_remove | st_nci_remove | nci_free_device| kfree(ndev) | //free ndlc->ndev | |llt_ndlc_rcv_queue |nci_recv_frame |//use ndlc->ndev Fixes: 35630df68d60 ("NFC: st21nfcb: Add driver for STMicroelectronics ST21NFCB NFC chip") Signed-off-by: Zheng Wang Reviewed-by: Krzysztof Kozlowski Link: https://lore.kernel.org/r/20230312160837.2040857-1-zyytlz.wz@163.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/nfc/st-nci/ndlc.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/nfc/st-nci/ndlc.c b/drivers/nfc/st-nci/ndlc.c index f26d938d240f..12d73f9dbe9f 100644 --- a/drivers/nfc/st-nci/ndlc.c +++ b/drivers/nfc/st-nci/ndlc.c @@ -297,13 +297,15 @@ EXPORT_SYMBOL(ndlc_probe); void ndlc_remove(struct llt_ndlc *ndlc) { - st_nci_remove(ndlc->ndev); - /* cancel timers */ del_timer_sync(&ndlc->t1_timer); del_timer_sync(&ndlc->t2_timer); ndlc->t2_active = false; ndlc->t1_active = false; + /* cancel work */ + cancel_work_sync(&ndlc->sm_work); + + st_nci_remove(ndlc->ndev); skb_queue_purge(&ndlc->rcv_q); skb_queue_purge(&ndlc->send_q); -- cgit v1.2.1 From 53966d572d056d6b234cfe76a5f9d60049d3c178 Mon Sep 17 00:00:00 2001 From: Szymon Heidrich Date: Mon, 13 Mar 2023 23:00:45 +0100 Subject: net: usb: smsc75xx: Limit packet length to skb->len [ Upstream commit d8b228318935044dafe3a5bc07ee71a1f1424b8d ] Packet length retrieved from skb data may be larger than the actual socket buffer length (up to 9026 bytes). In such case the cloned skb passed up the network stack will leak kernel memory contents. Fixes: d0cad871703b ("smsc75xx: SMSC LAN75xx USB gigabit ethernet adapter driver") Signed-off-by: Szymon Heidrich Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/usb/smsc75xx.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 8b9fd4e071f3..b4705dee2b75 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -2225,7 +2225,8 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) dev->net->stats.rx_frame_errors++; } else { /* MAX_SINGLE_PACKET_SIZE + 4(CRC) + 2(COE) + 4(Vlan) */ - if (unlikely(size > (MAX_SINGLE_PACKET_SIZE + ETH_HLEN + 12))) { + if (unlikely(size > (MAX_SINGLE_PACKET_SIZE + ETH_HLEN + 12) || + size > skb->len)) { netif_dbg(dev, rx_err, dev->net, "size err rx_cmd_a=0x%08x\n", rx_cmd_a); -- cgit v1.2.1 From fafcb4b26393870c45462f9af6a48e581dbbcf7e Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Mon, 6 Mar 2023 10:13:13 +0900 Subject: nvmet: avoid potential UAF in nvmet_req_complete() [ Upstream commit 6173a77b7e9d3e202bdb9897b23f2a8afe7bf286 ] An nvme target ->queue_response() operation implementation may free the request passed as argument. Such implementation potentially could result in a use after free of the request pointer when percpu_ref_put() is called in nvmet_req_complete(). Avoid such problem by using a local variable to save the sq pointer before calling __nvmet_req_complete(), thus avoiding dereferencing the req pointer after that function call. Fixes: a07b4970f464 ("nvmet: add a generic NVMe target") Signed-off-by: Damien Le Moal Reviewed-by: Chaitanya Kulkarni Signed-off-by: Christoph Hellwig Signed-off-by: Sasha Levin --- drivers/nvme/target/core.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 80b5aae1bdc9..aff18a3f002f 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -528,8 +528,10 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status) void nvmet_req_complete(struct nvmet_req *req, u16 status) { + struct nvmet_sq *sq = req->sq; + __nvmet_req_complete(req, status); - percpu_ref_put(&req->sq->ref); + percpu_ref_put(&sq->ref); } EXPORT_SYMBOL_GPL(nvmet_req_complete); -- cgit v1.2.1 From c891037e945c42dc685153b4b267c1ddb800edbf Mon Sep 17 00:00:00 2001 From: Liang He Date: Wed, 15 Mar 2023 14:20:32 +0800 Subject: block: sunvdc: add check for mdesc_grab() returning NULL [ Upstream commit 6030363199e3a6341afb467ddddbed56640cbf6a ] In vdc_port_probe(), we should check the return value of mdesc_grab() as it may return NULL, which can cause potential NPD bug. Fixes: 43fdf27470b2 ("[SPARC64]: Abstract out mdesc accesses for better MD update handling.") Signed-off-by: Liang He Link: https://lore.kernel.org/r/20230315062032.1741692-1-windhl@126.com [axboe: style cleanup] Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin --- drivers/block/sunvdc.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index 6b7b0d8a2acb..d2e9ffd2255f 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c @@ -947,6 +947,8 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) print_version(); hp = mdesc_grab(); + if (!hp) + return -ENODEV; err = -ENODEV; if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) { -- cgit v1.2.1 From 1ed6495ff34c6e08113c11e6a18f2bf901b510ca Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 15 Mar 2023 14:40:09 +0200 Subject: ipv4: Fix incorrect table ID in IOCTL path [ Upstream commit 8a2618e14f81604a9b6ad305d57e0c8da939cd65 ] Commit f96a3d74554d ("ipv4: Fix incorrect route flushing when source address is deleted") started to take the table ID field in the FIB info structure into account when determining if two structures are identical or not. This field is initialized using the 'fc_table' field in the route configuration structure, which is not set when adding a route via IOCTL. The above can result in user space being able to install two identical routes that only differ in the table ID field of their associated FIB info. Fix by initializing the table ID field in the route configuration structure in the IOCTL path. Before the fix: # ip route add default via 192.0.2.2 # route add default gw 192.0.2.2 # ip -4 r show default # default via 192.0.2.2 dev dummy10 # default via 192.0.2.2 dev dummy10 After the fix: # ip route add default via 192.0.2.2 # route add default gw 192.0.2.2 SIOCADDRT: File exists # ip -4 r show default default via 192.0.2.2 dev dummy10 Audited the code paths to ensure there are no other paths that do not properly initialize the route configuration structure when installing a route. Fixes: 5a56a0b3a45d ("net: Don't delete routes in different VRFs") Fixes: f96a3d74554d ("ipv4: Fix incorrect route flushing when source address is deleted") Reported-by: gaoxingwang Link: https://lore.kernel.org/netdev/20230314144159.2354729-1-gaoxingwang1@huawei.com/ Tested-by: gaoxingwang Signed-off-by: Ido Schimmel Reviewed-by: David Ahern Link: https://lore.kernel.org/r/20230315124009.4015212-1-idosch@nvidia.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- net/ipv4/fib_frontend.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 1885a2fbad86..9aa48b4c4096 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -557,6 +557,9 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt, cfg->fc_scope = RT_SCOPE_UNIVERSE; } + if (!cfg->fc_table) + cfg->fc_table = RT_TABLE_MAIN; + if (cmd == SIOCDELRT) return 0; -- cgit v1.2.1 From 89441504d66d116eb5ce58c132f58cdcca5b498a Mon Sep 17 00:00:00 2001 From: Szymon Heidrich Date: Thu, 16 Mar 2023 12:05:40 +0100 Subject: net: usb: smsc75xx: Move packet length check to prevent kernel panic in skb_pull [ Upstream commit 43ffe6caccc7a1bb9d7442fbab521efbf6c1378c ] Packet length check needs to be located after size and align_count calculation to prevent kernel panic in skb_pull() in case rx_cmd_a & RX_CMD_A_RED evaluates to true. Fixes: d8b228318935 ("net: usb: smsc75xx: Limit packet length to skb->len") Signed-off-by: Szymon Heidrich Link: https://lore.kernel.org/r/20230316110540.77531-1-szymon.heidrich@gmail.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/net/usb/smsc75xx.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index b4705dee2b75..313a4b0edc6b 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -2213,6 +2213,13 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) size = (rx_cmd_a & RX_CMD_A_LEN) - RXW_PADDING; align_count = (4 - ((size + RXW_PADDING) % 4)) % 4; + if (unlikely(size > skb->len)) { + netif_dbg(dev, rx_err, dev->net, + "size err rx_cmd_a=0x%08x\n", + rx_cmd_a); + return 0; + } + if (unlikely(rx_cmd_a & RX_CMD_A_RED)) { netif_dbg(dev, rx_err, dev->net, "Error rx_cmd_a=0x%08x\n", rx_cmd_a); @@ -2225,8 +2232,7 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) dev->net->stats.rx_frame_errors++; } else { /* MAX_SINGLE_PACKET_SIZE + 4(CRC) + 2(COE) + 4(Vlan) */ - if (unlikely(size > (MAX_SINGLE_PACKET_SIZE + ETH_HLEN + 12) || - size > skb->len)) { + if (unlikely(size > (MAX_SINGLE_PACKET_SIZE + ETH_HLEN + 12))) { netif_dbg(dev, rx_err, dev->net, "size err rx_cmd_a=0x%08x\n", rx_cmd_a); -- cgit v1.2.1 From b0d2bb5e31a693ebc8888eb407f8a257a3680efa Mon Sep 17 00:00:00 2001 From: Alexandra Winter Date: Wed, 15 Mar 2023 14:14:35 +0100 Subject: net/iucv: Fix size of interrupt data [ Upstream commit 3d87debb8ed2649608ff432699e7c961c0c6f03b ] iucv_irq_data needs to be 4 bytes larger. These bytes are not used by the iucv module, but written by the z/VM hypervisor in case a CPU is deconfigured. Reported as: BUG dma-kmalloc-64 (Not tainted): kmalloc Redzone overwritten ----------------------------------------------------------------------------- 0x0000000000400564-0x0000000000400567 @offset=1380. First byte 0x80 instead of 0xcc Allocated in iucv_cpu_prepare+0x44/0xd0 age=167839 cpu=2 pid=1 __kmem_cache_alloc_node+0x166/0x450 kmalloc_node_trace+0x3a/0x70 iucv_cpu_prepare+0x44/0xd0 cpuhp_invoke_callback+0x156/0x2f0 cpuhp_issue_call+0xf0/0x298 __cpuhp_setup_state_cpuslocked+0x136/0x338 __cpuhp_setup_state+0xf4/0x288 iucv_init+0xf4/0x280 do_one_initcall+0x78/0x390 do_initcalls+0x11a/0x140 kernel_init_freeable+0x25e/0x2a0 kernel_init+0x2e/0x170 __ret_from_fork+0x3c/0x58 ret_from_fork+0xa/0x40 Freed in iucv_init+0x92/0x280 age=167839 cpu=2 pid=1 __kmem_cache_free+0x308/0x358 iucv_init+0x92/0x280 do_one_initcall+0x78/0x390 do_initcalls+0x11a/0x140 kernel_init_freeable+0x25e/0x2a0 kernel_init+0x2e/0x170 __ret_from_fork+0x3c/0x58 ret_from_fork+0xa/0x40 Slab 0x0000037200010000 objects=32 used=30 fp=0x0000000000400640 flags=0x1ffff00000010200(slab|head|node=0|zone=0| Object 0x0000000000400540 @offset=1344 fp=0x0000000000000000 Redzone 0000000000400500: cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc ................ Redzone 0000000000400510: cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc ................ Redzone 0000000000400520: cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc ................ Redzone 0000000000400530: cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc ................ Object 0000000000400540: 00 01 00 03 00 00 00 00 00 00 00 00 00 00 00 00 ................ Object 0000000000400550: f3 86 81 f2 f4 82 f8 82 f0 f0 f0 f0 f0 f0 f0 f2 ................ Object 0000000000400560: 00 00 00 00 80 00 00 00 cc cc cc cc cc cc cc cc ................ Object 0000000000400570: cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc ................ Redzone 0000000000400580: cc cc cc cc cc cc cc cc ........ Padding 00000000004005d4: 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZZZZZZZZZ Padding 00000000004005e4: 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZZZZZZZZZ Padding 00000000004005f4: 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZZZZZ CPU: 6 PID: 121030 Comm: 116-pai-crypto. Not tainted 6.3.0-20230221.rc0.git4.99b8246b2d71.300.fc37.s390x+debug #1 Hardware name: IBM 3931 A01 704 (z/VM 7.3.0) Call Trace: [<000000032aa034ec>] dump_stack_lvl+0xac/0x100 [<0000000329f5a6cc>] check_bytes_and_report+0x104/0x140 [<0000000329f5aa78>] check_object+0x370/0x3c0 [<0000000329f5ede6>] free_debug_processing+0x15e/0x348 [<0000000329f5f06a>] free_to_partial_list+0x9a/0x2f0 [<0000000329f5f4a4>] __slab_free+0x1e4/0x3a8 [<0000000329f61768>] __kmem_cache_free+0x308/0x358 [<000000032a91465c>] iucv_cpu_dead+0x6c/0x88 [<0000000329c2fc66>] cpuhp_invoke_callback+0x156/0x2f0 [<000000032aa062da>] _cpu_down.constprop.0+0x22a/0x5e0 [<0000000329c3243e>] cpu_device_down+0x4e/0x78 [<000000032a61dee0>] device_offline+0xc8/0x118 [<000000032a61e048>] online_store+0x60/0xe0 [<000000032a08b6b0>] kernfs_fop_write_iter+0x150/0x1e8 [<0000000329fab65c>] vfs_write+0x174/0x360 [<0000000329fab9fc>] ksys_write+0x74/0x100 [<000000032aa03a5a>] __do_syscall+0x1da/0x208 [<000000032aa177b2>] system_call+0x82/0xb0 INFO: lockdep is turned off. FIX dma-kmalloc-64: Restoring kmalloc Redzone 0x0000000000400564-0x0000000000400567=0xcc FIX dma-kmalloc-64: Object at 0x0000000000400540 not freed Fixes: 2356f4cb1911 ("[S390]: Rewrite of the IUCV base code, part 2") Signed-off-by: Alexandra Winter Link: https://lore.kernel.org/r/20230315131435.4113889-1-wintera@linux.ibm.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- net/iucv/iucv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index eb502c6290c2..aacaa5119b45 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c @@ -119,7 +119,7 @@ struct iucv_irq_data { u16 ippathid; u8 ipflags1; u8 iptype; - u32 res2[8]; + u32 res2[9]; }; struct iucv_irq_list { -- cgit v1.2.1 From 7b5179890e585a5dc524f92b0193b2756b059e92 Mon Sep 17 00:00:00 2001 From: Liang He Date: Wed, 15 Mar 2023 14:00:21 +0800 Subject: ethernet: sun: add check for the mdesc_grab() [ Upstream commit 90de546d9a0b3c771667af18bb3f80567eabb89b ] In vnet_port_probe() and vsw_port_probe(), we should check the return value of mdesc_grab() as it may return NULL which can caused NPD bugs. Fixes: 5d01fa0c6bd8 ("ldmvsw: Add ldmvsw.c driver code") Fixes: 43fdf27470b2 ("[SPARC64]: Abstract out mdesc accesses for better MD update handling.") Signed-off-by: Liang He Reviewed-by: Piotr Raczynski Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/sun/ldmvsw.c | 3 +++ drivers/net/ethernet/sun/sunvnet.c | 3 +++ 2 files changed, 6 insertions(+) diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c index 644e42c181ee..1c9522ad3178 100644 --- a/drivers/net/ethernet/sun/ldmvsw.c +++ b/drivers/net/ethernet/sun/ldmvsw.c @@ -291,6 +291,9 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) hp = mdesc_grab(); + if (!hp) + return -ENODEV; + rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len); err = -ENODEV; if (!rmac) { diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 590172818b92..3a1f0653cfb7 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -432,6 +432,9 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) hp = mdesc_grab(); + if (!hp) + return -ENODEV; + vp = vnet_find_parent(hp, vdev->mp, vdev); if (IS_ERR(vp)) { pr_err("Cannot find port parent vnet\n"); -- cgit v1.2.1 From ad2ae163934b50c58f9141066753e69ef181d234 Mon Sep 17 00:00:00 2001 From: Tony O'Brien Date: Wed, 22 Feb 2023 13:52:27 +1300 Subject: hwmon: (adt7475) Display smoothing attributes in correct order [ Upstream commit 5f8d1e3b6f9b5971f9c06d5846ce00c49e3a8d94 ] Throughout the ADT7475 driver, attributes relating to the temperature sensors are displayed in the order Remote 1, Local, Remote 2. Make temp_st_show() conform to this expectation so that values set by temp_st_store() can be displayed using the correct attribute. Fixes: 8f05bcc33e74 ("hwmon: (adt7475) temperature smoothing") Signed-off-by: Tony O'Brien Link: https://lore.kernel.org/r/20230222005228.158661-2-tony.obrien@alliedtelesis.co.nz Signed-off-by: Guenter Roeck Signed-off-by: Sasha Levin --- drivers/hwmon/adt7475.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c index 0a87c5b51286..bde5fe10a95a 100644 --- a/drivers/hwmon/adt7475.c +++ b/drivers/hwmon/adt7475.c @@ -554,11 +554,11 @@ static ssize_t show_temp_st(struct device *dev, struct device_attribute *attr, val = data->enh_acoustics[0] & 0xf; break; case 1: - val = (data->enh_acoustics[1] >> 4) & 0xf; + val = data->enh_acoustics[1] & 0xf; break; case 2: default: - val = data->enh_acoustics[1] & 0xf; + val = (data->enh_acoustics[1] >> 4) & 0xf; break; } -- cgit v1.2.1 From 55d01536a2945b44ecb9128edf2e0bdbc4c913dd Mon Sep 17 00:00:00 2001 From: Tony O'Brien Date: Wed, 22 Feb 2023 13:52:28 +1300 Subject: hwmon: (adt7475) Fix masking of hysteresis registers [ Upstream commit 48e8186870d9d0902e712d601ccb7098cb220688 ] The wrong bits are masked in the hysteresis register; indices 0 and 2 should zero bits [7:4] and preserve bits [3:0], and index 1 should zero bits [3:0] and preserve bits [7:4]. Fixes: 1c301fc5394f ("hwmon: Add a driver for the ADT7475 hardware monitoring chip") Signed-off-by: Tony O'Brien Link: https://lore.kernel.org/r/20230222005228.158661-3-tony.obrien@alliedtelesis.co.nz Signed-off-by: Guenter Roeck Signed-off-by: Sasha Levin --- drivers/hwmon/adt7475.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c index bde5fe10a95a..2db2665dcd4d 100644 --- a/drivers/hwmon/adt7475.c +++ b/drivers/hwmon/adt7475.c @@ -485,10 +485,10 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *attr, val = (temp - val) / 1000; if (sattr->index != 1) { - data->temp[HYSTERSIS][sattr->index] &= 0xF0; + data->temp[HYSTERSIS][sattr->index] &= 0x0F; data->temp[HYSTERSIS][sattr->index] |= (val & 0xF) << 4; } else { - data->temp[HYSTERSIS][sattr->index] &= 0x0F; + data->temp[HYSTERSIS][sattr->index] &= 0xF0; data->temp[HYSTERSIS][sattr->index] |= (val & 0xF); } -- cgit v1.2.1 From e0a37b43cd732038e37b4e7f6c6c0658fe0b6d73 Mon Sep 17 00:00:00 2001 From: Zheng Wang Date: Fri, 10 Mar 2023 16:40:07 +0800 Subject: hwmon: (xgene) Fix use after free bug in xgene_hwmon_remove due to race condition [ Upstream commit cb090e64cf25602b9adaf32d5dfc9c8bec493cd1 ] In xgene_hwmon_probe, &ctx->workq is bound with xgene_hwmon_evt_work. Then it will be started. If we remove the driver which will call xgene_hwmon_remove to clean up, there may be unfinished work. The possible sequence is as follows: Fix it by finishing the work before cleanup in xgene_hwmon_remove. CPU0 CPU1 |xgene_hwmon_evt_work xgene_hwmon_remove | kfifo_free(&ctx->async_msg_fifo);| | |kfifo_out_spinlocked |//use &ctx->async_msg_fifo Fixes: 2ca492e22cb7 ("hwmon: (xgene) Fix crash when alarm occurs before driver probe") Signed-off-by: Zheng Wang Link: https://lore.kernel.org/r/20230310084007.1403388-1-zyytlz.wz@163.com Signed-off-by: Guenter Roeck Signed-off-by: Sasha Levin --- drivers/hwmon/xgene-hwmon.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c index a3cd91f23267..2dd19a420305 100644 --- a/drivers/hwmon/xgene-hwmon.c +++ b/drivers/hwmon/xgene-hwmon.c @@ -780,6 +780,7 @@ static int xgene_hwmon_remove(struct platform_device *pdev) { struct xgene_hwmon_dev *ctx = platform_get_drvdata(pdev); + cancel_work_sync(&ctx->workq); hwmon_device_unregister(ctx->hwmon_dev); kfifo_free(&ctx->async_msg_fifo); if (acpi_disabled) -- cgit v1.2.1 From 5deeac0bc82670d0c58334cfdffdad10b4f0c8bd Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 17 Mar 2023 13:51:17 -0700 Subject: media: m5mols: fix off-by-one loop termination error [ Upstream commit efbcbb12ee99f750c9f25c873b55ad774871de2a ] The __find_restype() function loops over the m5mols_default_ffmt[] array, and the termination condition ends up being wrong: instead of stopping when the iterator becomes the size of the array it traverses, it stops after it has already overshot the array. Now, in practice this doesn't likely matter, because the code will always find the entry it looks for, and will thus return early and never hit that last extra iteration. But it turns out that clang will unroll the loop fully, because it has only two iterations (well, three due to the off-by-one bug), and then clang will end up just giving up in the middle of the loop unrolling when it notices that the code walks past the end of the array. And that made 'objtool' very unhappy indeed, because the generated code just falls off the edge of the universe, and ends up falling through to the next function, causing this warning: drivers/media/i2c/m5mols/m5mols.o: warning: objtool: m5mols_set_fmt() falls through to next function m5mols_get_frame_desc() Fix the loop ending condition. Reported-by: Jens Axboe Analyzed-by: Miguel Ojeda Analyzed-by: Nick Desaulniers Link: https://lore.kernel.org/linux-block/CAHk-=wgTSdKYbmB1JYM5vmHMcD9J9UZr0mn7BOYM_LudrP+Xvw@mail.gmail.com/ Fixes: bc125106f8af ("[media] Add support for M-5MOLS 8 Mega Pixel camera ISP") Cc: HeungJun, Kim Cc: Sylwester Nawrocki Cc: Kyungmin Park Cc: Mauro Carvalho Chehab Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- drivers/media/i2c/m5mols/m5mols_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c index d9a964430609..9e6827dedab3 100644 --- a/drivers/media/i2c/m5mols/m5mols_core.c +++ b/drivers/media/i2c/m5mols/m5mols_core.c @@ -492,7 +492,7 @@ static enum m5mols_restype __find_restype(u32 code) do { if (code == m5mols_default_ffmt[type].code) return type; - } while (type++ != SIZE_DEFAULT_FFMT); + } while (++type != SIZE_DEFAULT_FFMT); return 0; } -- cgit v1.2.1 From 913e215c18c02afb71919938224a3bde70bb6c06 Mon Sep 17 00:00:00 2001 From: Tobias Schramm Date: Fri, 30 Dec 2022 20:43:15 +0100 Subject: mmc: atmel-mci: fix race between stop command and start of next command [ Upstream commit eca5bd666b0aa7dc0bca63292e4778968241134e ] This commit fixes a race between completion of stop command and start of a new command. Previously the command ready interrupt was enabled before stop command was written to the command register. This caused the command ready interrupt to fire immediately since the CMDRDY flag is asserted constantly while there is no command in progress. Consequently the command state machine will immediately advance to the next state when the tasklet function is executed again, no matter actual completion state of the stop command. Thus a new command can then be dispatched immediately, interrupting and corrupting the stop command on the CMD line. Fix that by dropping the command ready interrupt enable before calling atmci_send_stop_cmd. atmci_send_stop_cmd does already enable the command ready interrupt, no further writes to ATMCI_IER are necessary. Signed-off-by: Tobias Schramm Acked-by: Ludovic Desroches Link: https://lore.kernel.org/r/20221230194315.809903-2-t.schramm@manjaro.org Signed-off-by: Ulf Hansson Signed-off-by: Sasha Levin --- drivers/mmc/host/atmel-mci.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index d40bab3d9f4a..fb435a8d3721 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -1857,7 +1857,6 @@ static void atmci_tasklet_func(unsigned long priv) atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); state = STATE_WAITING_NOTBUSY; } else if (host->mrq->stop) { - atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY); atmci_send_stop_cmd(host, data); state = STATE_SENDING_STOP; } else { @@ -1890,8 +1889,6 @@ static void atmci_tasklet_func(unsigned long priv) * command to send. */ if (host->mrq->stop) { - atmci_writel(host, ATMCI_IER, - ATMCI_CMDRDY); atmci_send_stop_cmd(host, data); state = STATE_SENDING_STOP; } else { -- cgit v1.2.1 From 38dede42e2d40839277ea4eb01f65bec09b44a09 Mon Sep 17 00:00:00 2001 From: Yifei Liu Date: Wed, 3 Aug 2022 15:53:12 +0000 Subject: jffs2: correct logic when creating a hole in jffs2_write_begin [ Upstream commit 23892d383bee15b64f5463bd7195615734bb2415 ] Bug description and fix: 1. Write data to a file, say all 1s from offset 0 to 16. 2. Truncate the file to a smaller size, say 8 bytes. 3. Write new bytes (say 2s) from an offset past the original size of the file, say at offset 20, for 4 bytes. This is supposed to create a "hole" in the file, meaning that the bytes from offset 8 (where it was truncated above) up to the new write at offset 20, should all be 0s (zeros). 4. Flush all caches using "echo 3 > /proc/sys/vm/drop_caches" (or unmount and remount) the f/s. 5. Check the content of the file. It is wrong. The 1s that used to be between bytes 9 and 16, before the truncation, have REAPPEARED (they should be 0s). We wrote a script and helper C program to reproduce the bug (reproduce_jffs2_write_begin_issue.sh, write_file.c, and Makefile). We can make them available to anyone. The above example is shown when writing a small file within the same first page. But the bug happens for larger files, as long as steps 1, 2, and 3 above all happen within the same page. The problem was traced to the jffs2_write_begin code, where it goes into an 'if' statement intended to handle writes past the current EOF (i.e., writes that may create a hole). The code computes a 'pageofs' that is the floor of the write position (pos), aligned to the page size boundary. In other words, 'pageofs' will never be larger than 'pos'. The code then sets the internal jffs2_raw_inode->isize to the size of max(current inode size, pageofs) but that is wrong: the new file size should be the 'pos', which is larger than both the current inode size and pageofs. Similarly, the code incorrectly sets the internal jffs2_raw_inode->dsize to the difference between the pageofs minus current inode size; instead it should be the current pos minus the current inode size. Finally, inode->i_size was also set incorrectly. The patch below fixes this bug. The bug was discovered using a new tool for finding f/s bugs using model checking, called MCFS (Model Checking File Systems). Signed-off-by: Yifei Liu Signed-off-by: Erez Zadok Signed-off-by: Manish Adkar Signed-off-by: Richard Weinberger Signed-off-by: Sasha Levin --- fs/jffs2/file.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c index 3047872fdac9..bf3d8a4516a5 100644 --- a/fs/jffs2/file.c +++ b/fs/jffs2/file.c @@ -137,19 +137,18 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); pgoff_t index = pos >> PAGE_SHIFT; - uint32_t pageofs = index << PAGE_SHIFT; int ret = 0; jffs2_dbg(1, "%s()\n", __func__); - if (pageofs > inode->i_size) { - /* Make new hole frag from old EOF to new page */ + if (pos > inode->i_size) { + /* Make new hole frag from old EOF to new position */ struct jffs2_raw_inode ri; struct jffs2_full_dnode *fn; uint32_t alloc_len; - jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", - (unsigned int)inode->i_size, pageofs); + jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new position\n", + (unsigned int)inode->i_size, (uint32_t)pos); ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); @@ -169,10 +168,10 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, ri.mode = cpu_to_jemode(inode->i_mode); ri.uid = cpu_to_je16(i_uid_read(inode)); ri.gid = cpu_to_je16(i_gid_read(inode)); - ri.isize = cpu_to_je32(max((uint32_t)inode->i_size, pageofs)); + ri.isize = cpu_to_je32((uint32_t)pos); ri.atime = ri.ctime = ri.mtime = cpu_to_je32(JFFS2_NOW()); ri.offset = cpu_to_je32(inode->i_size); - ri.dsize = cpu_to_je32(pageofs - inode->i_size); + ri.dsize = cpu_to_je32((uint32_t)pos - inode->i_size); ri.csize = cpu_to_je32(0); ri.compr = JFFS2_COMPR_ZERO; ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); @@ -202,7 +201,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, goto out_err; } jffs2_complete_reservation(c); - inode->i_size = pageofs; + inode->i_size = pos; mutex_unlock(&f->sem); } -- cgit v1.2.1 From 3aea195acd977e82d970cbc7078f983880c7ee6a Mon Sep 17 00:00:00 2001 From: Baokun Li Date: Sat, 7 Jan 2023 11:21:25 +0800 Subject: ext4: fail ext4_iget if special inode unallocated MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 5cd740287ae5e3f9d1c46f5bfe8778972fd6d3fe ] In ext4_fill_super(), EXT4_ORPHAN_FS flag is cleared after ext4_orphan_cleanup() is executed. Therefore, when __ext4_iget() is called to get an inode whose i_nlink is 0 when the flag exists, no error is returned. If the inode is a special inode, a null pointer dereference may occur. If the value of i_nlink is 0 for any inodes (except boot loader inodes) got by using the EXT4_IGET_SPECIAL flag, the current file system is corrupted. Therefore, make the ext4_iget() function return an error if it gets such an abnormal special inode. Link: https://bugzilla.kernel.org/show_bug.cgi?id=199179 Link: https://bugzilla.kernel.org/show_bug.cgi?id=216541 Link: https://bugzilla.kernel.org/show_bug.cgi?id=216539 Reported-by: LuΓ­s Henriques Suggested-by: Theodore Ts'o Signed-off-by: Baokun Li Reviewed-by: Jan Kara Link: https://lore.kernel.org/r/20230107032126.4165860-2-libaokun1@huawei.com Signed-off-by: Theodore Ts'o Signed-off-by: Sasha Levin --- fs/ext4/inode.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 6e7989b04d2b..e844d91c461b 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -4947,13 +4947,6 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, goto bad_inode; raw_inode = ext4_raw_inode(&iloc); - if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) { - ext4_error_inode(inode, function, line, 0, - "iget: root inode unallocated"); - ret = -EFSCORRUPTED; - goto bad_inode; - } - if ((flags & EXT4_IGET_HANDLE) && (raw_inode->i_links_count == 0) && (raw_inode->i_mode == 0)) { ret = -ESTALE; @@ -5024,11 +5017,16 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, * NeilBrown 1999oct15 */ if (inode->i_nlink == 0) { - if ((inode->i_mode == 0 || + if ((inode->i_mode == 0 || flags & EXT4_IGET_SPECIAL || !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) && ino != EXT4_BOOT_LOADER_INO) { - /* this inode is deleted */ - ret = -ESTALE; + /* this inode is deleted or unallocated */ + if (flags & EXT4_IGET_SPECIAL) { + ext4_error_inode(inode, function, line, 0, + "iget: special inode unallocated"); + ret = -EFSCORRUPTED; + } else + ret = -ESTALE; goto bad_inode; } /* The only unlinked inodes we let through here have -- cgit v1.2.1 From 64b72f5e7574020dea62ab733d88a54d903c42a1 Mon Sep 17 00:00:00 2001 From: Baokun Li Date: Tue, 10 Jan 2023 21:34:36 +0800 Subject: ext4: fix task hung in ext4_xattr_delete_inode [ Upstream commit 0f7bfd6f8164be32dbbdf36aa1e5d00485c53cd7 ] Syzbot reported a hung task problem: ================================================================== INFO: task syz-executor232:5073 blocked for more than 143 seconds. Not tainted 6.2.0-rc2-syzkaller-00024-g512dee0c00ad #0 "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. task:syz-exec232 state:D stack:21024 pid:5073 ppid:5072 flags:0x00004004 Call Trace: context_switch kernel/sched/core.c:5244 [inline] __schedule+0x995/0xe20 kernel/sched/core.c:6555 schedule+0xcb/0x190 kernel/sched/core.c:6631 __wait_on_freeing_inode fs/inode.c:2196 [inline] find_inode_fast+0x35a/0x4c0 fs/inode.c:950 iget_locked+0xb1/0x830 fs/inode.c:1273 __ext4_iget+0x22e/0x3ed0 fs/ext4/inode.c:4861 ext4_xattr_inode_iget+0x68/0x4e0 fs/ext4/xattr.c:389 ext4_xattr_inode_dec_ref_all+0x1a7/0xe50 fs/ext4/xattr.c:1148 ext4_xattr_delete_inode+0xb04/0xcd0 fs/ext4/xattr.c:2880 ext4_evict_inode+0xd7c/0x10b0 fs/ext4/inode.c:296 evict+0x2a4/0x620 fs/inode.c:664 ext4_orphan_cleanup+0xb60/0x1340 fs/ext4/orphan.c:474 __ext4_fill_super fs/ext4/super.c:5516 [inline] ext4_fill_super+0x81cd/0x8700 fs/ext4/super.c:5644 get_tree_bdev+0x400/0x620 fs/super.c:1282 vfs_get_tree+0x88/0x270 fs/super.c:1489 do_new_mount+0x289/0xad0 fs/namespace.c:3145 do_mount fs/namespace.c:3488 [inline] __do_sys_mount fs/namespace.c:3697 [inline] __se_sys_mount+0x2d3/0x3c0 fs/namespace.c:3674 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3d/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x63/0xcd RIP: 0033:0x7fa5406fd5ea RSP: 002b:00007ffc7232f968 EFLAGS: 00000202 ORIG_RAX: 00000000000000a5 RAX: ffffffffffffffda RBX: 0000000000000003 RCX: 00007fa5406fd5ea RDX: 0000000020000440 RSI: 0000000020000000 RDI: 00007ffc7232f970 RBP: 00007ffc7232f970 R08: 00007ffc7232f9b0 R09: 0000000000000432 R10: 0000000000804a03 R11: 0000000000000202 R12: 0000000000000004 R13: 0000555556a7a2c0 R14: 00007ffc7232f9b0 R15: 0000000000000000 ================================================================== The problem is that the inode contains an xattr entry with ea_inum of 15 when cleaning up an orphan inode <15>. When evict inode <15>, the reference counting of the corresponding EA inode is decreased. When EA inode <15> is found by find_inode_fast() in __ext4_iget(), it is found that the EA inode holds the I_FREEING flag and waits for the EA inode to complete deletion. As a result, when inode <15> is being deleted, we wait for inode <15> to complete the deletion, resulting in an infinite loop and triggering Hung Task. To solve this problem, we only need to check whether the ino of EA inode and parent is the same before getting EA inode. Link: https://syzkaller.appspot.com/bug?extid=77d6fcc37bbb92f26048 Reported-by: syzbot+77d6fcc37bbb92f26048@syzkaller.appspotmail.com Signed-off-by: Baokun Li Reviewed-by: Jan Kara Link: https://lore.kernel.org/r/20230110133436.996350-1-libaokun1@huawei.com Signed-off-by: Theodore Ts'o Signed-off-by: Sasha Levin --- fs/ext4/xattr.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 2a70b7556e41..a91b02091b16 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -384,6 +384,17 @@ static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino, struct inode *inode; int err; + /* + * We have to check for this corruption early as otherwise + * iget_locked() could wait indefinitely for the state of our + * parent inode. + */ + if (parent->i_ino == ea_ino) { + ext4_error(parent->i_sb, + "Parent and EA inode have the same ino %lu", ea_ino); + return -EFSCORRUPTED; + } + inode = ext4_iget(parent->i_sb, ea_ino, EXT4_IGET_NORMAL); if (IS_ERR(inode)) { err = PTR_ERR(inode); -- cgit v1.2.1 From 5a3fb3b745af0ce46ec2e0c8e507bae45b937334 Mon Sep 17 00:00:00 2001 From: Qu Huang Date: Tue, 21 Feb 2023 11:35:16 +0000 Subject: drm/amdkfd: Fix an illegal memory access [ Upstream commit 4fc8fff378b2f2039f2a666d9f8c570f4e58352c ] In the kfd_wait_on_events() function, the kfd_event_waiter structure is allocated by alloc_event_waiters(), but the event field of the waiter structure is not initialized; When copy_from_user() fails in the kfd_wait_on_events() function, it will enter exception handling to release the previously allocated memory of the waiter structure; Due to the event field of the waiters structure being accessed in the free_waiters() function, this results in illegal memory access and system crash, here is the crash log: localhost kernel: RIP: 0010:native_queued_spin_lock_slowpath+0x185/0x1e0 localhost kernel: RSP: 0018:ffffaa53c362bd60 EFLAGS: 00010082 localhost kernel: RAX: ff3d3d6bff4007cb RBX: 0000000000000282 RCX: 00000000002c0000 localhost kernel: RDX: ffff9e855eeacb80 RSI: 000000000000279c RDI: ffffe7088f6a21d0 localhost kernel: RBP: ffffe7088f6a21d0 R08: 00000000002c0000 R09: ffffaa53c362be64 localhost kernel: R10: ffffaa53c362bbd8 R11: 0000000000000001 R12: 0000000000000002 localhost kernel: R13: ffff9e7ead15d600 R14: 0000000000000000 R15: ffff9e7ead15d698 localhost kernel: FS: 0000152a3d111700(0000) GS:ffff9e855ee80000(0000) knlGS:0000000000000000 localhost kernel: CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 localhost kernel: CR2: 0000152938000010 CR3: 000000044d7a4000 CR4: 00000000003506e0 localhost kernel: Call Trace: localhost kernel: _raw_spin_lock_irqsave+0x30/0x40 localhost kernel: remove_wait_queue+0x12/0x50 localhost kernel: kfd_wait_on_events+0x1b6/0x490 [hydcu] localhost kernel: ? ftrace_graph_caller+0xa0/0xa0 localhost kernel: kfd_ioctl+0x38c/0x4a0 [hydcu] localhost kernel: ? kfd_ioctl_set_trap_handler+0x70/0x70 [hydcu] localhost kernel: ? kfd_ioctl_create_queue+0x5a0/0x5a0 [hydcu] localhost kernel: ? ftrace_graph_caller+0xa0/0xa0 localhost kernel: __x64_sys_ioctl+0x8e/0xd0 localhost kernel: ? syscall_trace_enter.isra.18+0x143/0x1b0 localhost kernel: do_syscall_64+0x33/0x80 localhost kernel: entry_SYSCALL_64_after_hwframe+0x44/0xa9 localhost kernel: RIP: 0033:0x152a4dff68d7 Allocate the structure with kcalloc, and remove redundant 0-initialization and a redundant loop condition check. Signed-off-by: Qu Huang Signed-off-by: Felix Kuehling Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/amdkfd/kfd_events.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index 892077377339..8f23192b6709 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -529,16 +529,13 @@ static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events) struct kfd_event_waiter *event_waiters; uint32_t i; - event_waiters = kmalloc_array(num_events, - sizeof(struct kfd_event_waiter), - GFP_KERNEL); + event_waiters = kcalloc(num_events, sizeof(struct kfd_event_waiter), + GFP_KERNEL); if (!event_waiters) return NULL; - for (i = 0; (event_waiters) && (i < num_events) ; i++) { + for (i = 0; i < num_events; i++) init_wait(&event_waiters[i].wait); - event_waiters[i].activated = false; - } return event_waiters; } -- cgit v1.2.1 From a7971d284b4c86c21e5470601128f7431d57896e Mon Sep 17 00:00:00 2001 From: Michael Karcher Date: Tue, 24 Jan 2023 22:48:16 +0100 Subject: sh: intc: Avoid spurious sizeof-pointer-div warning [ Upstream commit 250870824c1cf199b032b1ef889c8e8d69d9123a ] GCC warns about the pattern sizeof(void*)/sizeof(void), as it looks like the abuse of a pattern to calculate the array size. This pattern appears in the unevaluated part of the ternary operator in _INTC_ARRAY if the parameter is NULL. The replacement uses an alternate approach to return 0 in case of NULL which does not generate the pattern sizeof(void*)/sizeof(void), but still emits the warning if _INTC_ARRAY is called with a nonarray parameter. This patch is required for successful compilation with -Werror enabled. The idea to use _Generic for type distinction is taken from Comment #7 in https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108483 by Jakub Jelinek Signed-off-by: Michael Karcher Acked-by: Randy Dunlap # build-tested Link: https://lore.kernel.org/r/619fa552-c988-35e5-b1d7-fe256c46a272@mkarcher.dialup.fu-berlin.de Signed-off-by: John Paul Adrian Glaubitz Signed-off-by: Sasha Levin --- include/linux/sh_intc.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h index c255273b0281..37ad81058d6a 100644 --- a/include/linux/sh_intc.h +++ b/include/linux/sh_intc.h @@ -97,7 +97,10 @@ struct intc_hw_desc { unsigned int nr_subgroups; }; -#define _INTC_ARRAY(a) a, __same_type(a, NULL) ? 0 : sizeof(a)/sizeof(*a) +#define _INTC_SIZEOF_OR_ZERO(a) (_Generic(a, \ + typeof(NULL): 0, \ + default: sizeof(a))) +#define _INTC_ARRAY(a) a, _INTC_SIZEOF_OR_ZERO(a)/sizeof(*a) #define INTC_HW_DESC(vectors, groups, mask_regs, \ prio_regs, sense_regs, ack_regs) \ -- cgit v1.2.1 From 508db45bb4a458e508b36a26d7dea9c3372138f3 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Google)" Date: Wed, 1 Mar 2023 20:00:53 -0500 Subject: tracing: Check field value in hist_field_name() commit 9f116f76fa8c04c81aef33ad870dbf9a158e5b70 upstream. The function hist_field_name() cannot handle being passed a NULL field parameter. It should never be NULL, but due to a previous bug, NULL was passed to the function and the kernel crashed due to a NULL dereference. Mark Rutland reported this to me on IRC. The bug was fixed, but to prevent future bugs from crashing the kernel, check the field and add a WARN_ON() if it is NULL. Link: https://lkml.kernel.org/r/20230302020810.762384440@goodmis.org Cc: stable@vger.kernel.org Cc: Masami Hiramatsu Cc: Andrew Morton Reported-by: Mark Rutland Fixes: c6afad49d127f ("tracing: Add hist trigger 'sym' and 'sym-offset' modifiers") Tested-by: Mark Rutland Signed-off-by: Steven Rostedt (Google) Signed-off-by: Greg Kroah-Hartman --- kernel/trace/trace_events_hist.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index a56ee4ba2afb..455cf41aedbb 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -1764,6 +1764,9 @@ static const char *hist_field_name(struct hist_field *field, { const char *field_name = ""; + if (WARN_ON_ONCE(!field)) + return field_name; + if (level > 1) return field_name; -- cgit v1.2.1 From 4bc5a4dfb4ccb36b23d6f94340c5b4356b75e9fb Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Google)" Date: Fri, 10 Mar 2023 17:28:56 -0500 Subject: tracing: Make tracepoint lockdep check actually test something commit c2679254b9c9980d9045f0f722cf093a2b1f7590 upstream. A while ago where the trace events had the following: rcu_read_lock_sched_notrace(); rcu_dereference_sched(...); rcu_read_unlock_sched_notrace(); If the tracepoint is enabled, it could trigger RCU issues if called in the wrong place. And this warning was only triggered if lockdep was enabled. If the tracepoint was never enabled with lockdep, the bug would not be caught. To handle this, the above sequence was done when lockdep was enabled regardless if the tracepoint was enabled or not (although the always enabled code really didn't do anything, it would still trigger a warning). But a lot has changed since that lockdep code was added. One is, that sequence no longer triggers any warning. Another is, the tracepoint when enabled doesn't even do that sequence anymore. The main check we care about today is whether RCU is "watching" or not. So if lockdep is enabled, always check if rcu_is_watching() which will trigger a warning if it is not (tracepoints require RCU to be watching). Note, that old sequence did add a bit of overhead when lockdep was enabled, and with the latest kernel updates, would cause the system to slow down enough to trigger kernel "stalled" warnings. Link: http://lore.kernel.org/lkml/20140806181801.GA4605@redhat.com Link: http://lore.kernel.org/lkml/20140807175204.C257CAC5@viggo.jf.intel.com Link: https://lore.kernel.org/lkml/20230307184645.521db5c9@gandalf.local.home/ Link: https://lore.kernel.org/linux-trace-kernel/20230310172856.77406446@gandalf.local.home Cc: stable@vger.kernel.org Cc: Masami Hiramatsu Cc: Dave Hansen Cc: "Paul E. McKenney" Cc: Mathieu Desnoyers Cc: Joel Fernandes Acked-by: Peter Zijlstra (Intel) Acked-by: Paul E. McKenney Fixes: e6753f23d961 ("tracepoint: Make rcuidle tracepoint callers use SRCU") Signed-off-by: Steven Rostedt (Google) Signed-off-by: Greg Kroah-Hartman --- include/linux/tracepoint.h | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 4251cbfdb3c8..bff2f76aeff7 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -233,12 +233,11 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) * not add unwanted padding between the beginning of the section and the * structure. Force alignment to the same alignment as the section start. * - * When lockdep is enabled, we make sure to always do the RCU portions of - * the tracepoint code, regardless of whether tracing is on. However, - * don't check if the condition is false, due to interaction with idle - * instrumentation. This lets us find RCU issues triggered with tracepoints - * even when this tracepoint is off. This code has no purpose other than - * poking RCU a bit. + * When lockdep is enabled, we make sure to always test if RCU is + * "watching" regardless if the tracepoint is enabled or not. Tracepoints + * require RCU to be active, and it should always warn at the tracepoint + * site if it is not watching, as it will need to be active when the + * tracepoint is enabled. */ #define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ extern struct tracepoint __tracepoint_##name; \ @@ -250,9 +249,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) TP_ARGS(data_args), \ TP_CONDITION(cond), 0); \ if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \ - rcu_read_lock_sched_notrace(); \ - rcu_dereference_sched(__tracepoint_##name.funcs);\ - rcu_read_unlock_sched_notrace(); \ + WARN_ON_ONCE(!rcu_is_watching()); \ } \ } \ __DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \ -- cgit v1.2.1 From 7569ee04b0e3b32df79f64db3a7138573edad9bc Mon Sep 17 00:00:00 2001 From: Chen Zhongjin Date: Thu, 9 Mar 2023 16:02:30 +0800 Subject: ftrace: Fix invalid address access in lookup_rec() when index is 0 commit ee92fa443358f4fc0017c1d0d325c27b37802504 upstream. KASAN reported follow problem: BUG: KASAN: use-after-free in lookup_rec Read of size 8 at addr ffff000199270ff0 by task modprobe CPU: 2 Comm: modprobe Call trace: kasan_report __asan_load8 lookup_rec ftrace_location arch_check_ftrace_location check_kprobe_address_safe register_kprobe When checking pg->records[pg->index - 1].ip in lookup_rec(), it can get a pg which is newly added to ftrace_pages_start in ftrace_process_locs(). Before the first pg->index++, index is 0 and accessing pg->records[-1].ip will cause this problem. Don't check the ip when pg->index is 0. Link: https://lore.kernel.org/linux-trace-kernel/20230309080230.36064-1-chenzhongjin@huawei.com Cc: stable@vger.kernel.org Fixes: 9644302e3315 ("ftrace: Speed up search by skipping pages by address") Suggested-by: Steven Rostedt (Google) Signed-off-by: Chen Zhongjin Signed-off-by: Steven Rostedt (Google) Signed-off-by: Greg Kroah-Hartman --- kernel/trace/ftrace.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 9c7795566436..5c0463dbe16e 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1581,7 +1581,8 @@ unsigned long ftrace_location_range(unsigned long start, unsigned long end) key.flags = end; /* overload flags, as it is unsigned long */ for (pg = ftrace_pages_start; pg; pg = pg->next) { - if (end < pg->records[0].ip || + if (pg->index == 0 || + end < pg->records[0].ip || start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) continue; rec = bsearch(&key, pg->records, pg->index, -- cgit v1.2.1 From e6d5a0a1e6820f7a7c47f757240b25b43f884eff Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Thu, 16 Mar 2023 11:38:19 +0100 Subject: fbdev: stifb: Provide valid pixelclock and add fb_check_var() checks commit 203873a535d627c668f293be0cb73e26c30f9cc7 upstream. Find a valid modeline depending on the machine graphic card configuration and add the fb_check_var() function to validate Xorg provided graphics settings. Signed-off-by: Helge Deller Cc: stable@vger.kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/video/fbdev/stifb.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c index 9530ed46f435..e606fc728794 100644 --- a/drivers/video/fbdev/stifb.c +++ b/drivers/video/fbdev/stifb.c @@ -921,6 +921,28 @@ SETUP_HCRX(struct stifb_info *fb) /* ------------------- driver specific functions --------------------------- */ +static int +stifb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) +{ + struct stifb_info *fb = container_of(info, struct stifb_info, info); + + if (var->xres != fb->info.var.xres || + var->yres != fb->info.var.yres || + var->bits_per_pixel != fb->info.var.bits_per_pixel) + return -EINVAL; + + var->xres_virtual = var->xres; + var->yres_virtual = var->yres; + var->xoffset = 0; + var->yoffset = 0; + var->grayscale = fb->info.var.grayscale; + var->red.length = fb->info.var.red.length; + var->green.length = fb->info.var.green.length; + var->blue.length = fb->info.var.blue.length; + + return 0; +} + static int stifb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info) @@ -1103,6 +1125,7 @@ stifb_init_display(struct stifb_info *fb) static struct fb_ops stifb_ops = { .owner = THIS_MODULE, + .fb_check_var = stifb_check_var, .fb_setcolreg = stifb_setcolreg, .fb_blank = stifb_blank, .fb_fillrect = cfb_fillrect, @@ -1122,6 +1145,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref) struct stifb_info *fb; struct fb_info *info; unsigned long sti_rom_address; + char modestr[32]; char *dev_name; int bpp, xres, yres; @@ -1300,6 +1324,9 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref) info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA; info->pseudo_palette = &fb->pseudo_palette; + scnprintf(modestr, sizeof(modestr), "%dx%d-%d", xres, yres, bpp); + fb_find_mode(&info->var, info, modestr, NULL, 0, NULL, bpp); + /* This has to be done !!! */ if (fb_alloc_cmap(&info->cmap, NR_PALETTE, 0)) goto out_err1; -- cgit v1.2.1 From ffdf8d81c48822a329af9f31dc239090f4a60761 Mon Sep 17 00:00:00 2001 From: Nikita Zhandarovich Date: Mon, 6 Mar 2023 08:06:56 -0800 Subject: x86/mm: Fix use of uninitialized buffer in sme_enable() commit cbebd68f59f03633469f3ecf9bea99cd6cce3854 upstream. cmdline_find_option() may fail before doing any initialization of the buffer array. This may lead to unpredictable results when the same buffer is used later in calls to strncmp() function. Fix the issue by returning early if cmdline_find_option() returns an error. Found by Linux Verification Center (linuxtesting.org) with static analysis tool SVACE. Fixes: aca20d546214 ("x86/mm: Add support to make use of Secure Memory Encryption") Signed-off-by: Nikita Zhandarovich Signed-off-by: Borislav Petkov (AMD) Acked-by: Tom Lendacky Cc: Link: https://lore.kernel.org/r/20230306160656.14844-1-n.zhandarovich@fintech.ru Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/mem_encrypt_identity.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c index 650d5a6cafc7..832c899b7b73 100644 --- a/arch/x86/mm/mem_encrypt_identity.c +++ b/arch/x86/mm/mem_encrypt_identity.c @@ -563,7 +563,8 @@ void __init sme_enable(struct boot_params *bp) cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr | ((u64)bp->ext_cmd_line_ptr << 32)); - cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)); + if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0) + return; if (!strncmp(buffer, cmdline_on, sizeof(buffer))) sme_me_mask = me_mask; -- cgit v1.2.1 From 2c04adc4f07a2528040c7546a852d84f9f4735b1 Mon Sep 17 00:00:00 2001 From: John Harrison Date: Wed, 15 Feb 2023 17:11:00 -0800 Subject: drm/i915: Don't use stolen memory for ring buffers with LLC MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 690e0ec8e63da9a29b39fedc6ed5da09c7c82651 upstream. Direction from hardware is that stolen memory should never be used for ring buffer allocations on platforms with LLC. There are too many caching pitfalls due to the way stolen memory accesses are routed. So it is safest to just not use it. Signed-off-by: John Harrison Fixes: c58b735fc762 ("drm/i915: Allocate rings from stolen") Cc: Chris Wilson Cc: Joonas Lahtinen Cc: Jani Nikula Cc: Rodrigo Vivi Cc: Tvrtko Ursulin Cc: intel-gfx@lists.freedesktop.org Cc: # v4.9+ Tested-by: Jouni HΓΆgander Reviewed-by: Daniele Ceraolo Spurio Link: https://patchwork.freedesktop.org/patch/msgid/20230216011101.1909009-2-John.C.Harrison@Intel.com (cherry picked from commit f54c1f6c697c4297f7ed94283c184acc338a5cf8) Signed-off-by: Jani Nikula Signed-off-by: John Harrison Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/i915/intel_ringbuffer.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 979d130b24c4..16eec72f0fed 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1132,10 +1132,11 @@ static struct i915_vma * intel_ring_create_vma(struct drm_i915_private *dev_priv, int size) { struct i915_address_space *vm = &dev_priv->ggtt.vm; - struct drm_i915_gem_object *obj; + struct drm_i915_gem_object *obj = NULL; struct i915_vma *vma; - obj = i915_gem_object_create_stolen(dev_priv, size); + if (!HAS_LLC(dev_priv)) + obj = i915_gem_object_create_stolen(dev_priv, size); if (!obj) obj = i915_gem_object_create_internal(dev_priv, size); if (IS_ERR(obj)) -- cgit v1.2.1 From 1715b382e230a0d13ea4fa4cfa017956426d5566 Mon Sep 17 00:00:00 2001 From: Biju Das Date: Mon, 27 Feb 2023 11:41:46 +0000 Subject: serial: 8250_em: Fix UART port type commit 32e293be736b853f168cd065d9cbc1b0c69f545d upstream. As per HW manual for EMEV2 "R19UH0040EJ0400 Rev.4.00", the UART IP found on EMMA mobile SoC is Register-compatible with the general-purpose 16750 UART chip. Fix UART port type as 16750 and enable 64-bytes fifo support. Fixes: 22886ee96895 ("serial8250-em: Emma Mobile UART driver V2") Cc: stable@vger.kernel.org Signed-off-by: Biju Das Link: https://lore.kernel.org/r/20230227114152.22265-2-biju.das.jz@bp.renesas.com [biju: manually fixed the conflicts] Signed-off-by: Biju Das Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/8250/8250_em.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/tty/serial/8250/8250_em.c b/drivers/tty/serial/8250/8250_em.c index 2a76e22d2ec0..5670c8a267d8 100644 --- a/drivers/tty/serial/8250/8250_em.c +++ b/drivers/tty/serial/8250/8250_em.c @@ -102,8 +102,8 @@ static int serial8250_em_probe(struct platform_device *pdev) memset(&up, 0, sizeof(up)); up.port.mapbase = regs->start; up.port.irq = irq->start; - up.port.type = PORT_UNKNOWN; - up.port.flags = UPF_BOOT_AUTOCONF | UPF_FIXED_PORT | UPF_IOREMAP; + up.port.type = PORT_16750; + up.port.flags = UPF_FIXED_PORT | UPF_IOREMAP | UPF_FIXED_TYPE; up.port.dev = &pdev->dev; up.port.private_data = priv; -- cgit v1.2.1 From e4bcc58a9e5bb388390ecd6cd4941389de556648 Mon Sep 17 00:00:00 2001 From: Lee Jones Date: Mon, 20 Mar 2023 13:08:23 +0000 Subject: HID: core: Provide new max_buffer_size attribute to over-ride the default commit b1a37ed00d7908a991c1d0f18a8cba3c2aa99bdc upstream. Presently, when a report is processed, its proposed size, provided by the user of the API (as Report Size * Report Count) is compared against the subsystem default HID_MAX_BUFFER_SIZE (16k). However, some low-level HID drivers allocate a reduced amount of memory to their buffers (e.g. UHID only allocates UHID_DATA_MAX (4k) buffers), rending this check inadequate in some cases. In these circumstances, if the received report ends up being smaller than the proposed report size, the remainder of the buffer is zeroed. That is, the space between sizeof(csize) (size of the current report) and the rsize (size proposed i.e. Report Size * Report Count), which can be handled up to HID_MAX_BUFFER_SIZE (16k). Meaning that memset() shoots straight past the end of the buffer boundary and starts zeroing out in-use values, often resulting in calamity. This patch introduces a new variable into 'struct hid_ll_driver' where individual low-level drivers can over-ride the default maximum value of HID_MAX_BUFFER_SIZE (16k) with something more sympathetic to the interface. Signed-off-by: Lee Jones Signed-off-by: Jiri Kosina [Lee: Backported to v4.19.y] Signed-off-by: Lee Jones Signed-off-by: Greg Kroah-Hartman --- drivers/hid/hid-core.c | 18 +++++++++++++----- include/linux/hid.h | 3 +++ 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 8cc79d0d11fb..c8d687f795ca 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -258,6 +258,7 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign { struct hid_report *report; struct hid_field *field; + unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE; unsigned int usages; unsigned int offset; unsigned int i; @@ -288,8 +289,11 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign offset = report->size; report->size += parser->global.report_size * parser->global.report_count; + if (parser->device->ll_driver->max_buffer_size) + max_buffer_size = parser->device->ll_driver->max_buffer_size; + /* Total size check: Allow for possible report index byte */ - if (report->size > (HID_MAX_BUFFER_SIZE - 1) << 3) { + if (report->size > (max_buffer_size - 1) << 3) { hid_err(parser->device, "report is too long\n"); return -1; } @@ -1567,6 +1571,7 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size, struct hid_report_enum *report_enum = hid->report_enum + type; struct hid_report *report; struct hid_driver *hdrv; + int max_buffer_size = HID_MAX_BUFFER_SIZE; unsigned int a; u32 rsize, csize = size; u8 *cdata = data; @@ -1583,10 +1588,13 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size, rsize = hid_compute_report_size(report); - if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE) - rsize = HID_MAX_BUFFER_SIZE - 1; - else if (rsize > HID_MAX_BUFFER_SIZE) - rsize = HID_MAX_BUFFER_SIZE; + if (hid->ll_driver->max_buffer_size) + max_buffer_size = hid->ll_driver->max_buffer_size; + + if (report_enum->numbered && rsize >= max_buffer_size) + rsize = max_buffer_size - 1; + else if (rsize > max_buffer_size) + rsize = max_buffer_size; if (csize < rsize) { dbg_hid("report %d is too short, (%d < %d)\n", report->id, diff --git a/include/linux/hid.h b/include/linux/hid.h index c51ebce2197e..79c6c3b4e004 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -799,6 +799,7 @@ struct hid_driver { * @raw_request: send raw report request to device (e.g. feature report) * @output_report: send output report to device * @idle: send idle request to device + * @max_buffer_size: over-ride maximum data buffer size (default: HID_MAX_BUFFER_SIZE) */ struct hid_ll_driver { int (*start)(struct hid_device *hdev); @@ -823,6 +824,8 @@ struct hid_ll_driver { int (*output_report) (struct hid_device *hdev, __u8 *buf, size_t len); int (*idle)(struct hid_device *hdev, int report, int idle, int reqtype); + + unsigned int max_buffer_size; }; extern struct hid_ll_driver i2c_hid_ll_driver; -- cgit v1.2.1 From cf6939a8f6bf4c3e11fdd80f477be626f08fda13 Mon Sep 17 00:00:00 2001 From: Lee Jones Date: Mon, 20 Mar 2023 13:08:24 +0000 Subject: HID: uhid: Over-ride the default maximum data buffer value with our own commit 1c5d4221240a233df2440fe75c881465cdf8da07 upstream. The default maximum data buffer size for this interface is UHID_DATA_MAX (4k). When data buffers are being processed, ensure this value is used when ensuring the sanity, rather than a value between the user provided value and HID_MAX_BUFFER_SIZE (16k). Signed-off-by: Lee Jones Signed-off-by: Jiri Kosina Signed-off-by: Lee Jones Signed-off-by: Greg Kroah-Hartman --- drivers/hid/uhid.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c index e128b9ce156d..44df81d56d9c 100644 --- a/drivers/hid/uhid.c +++ b/drivers/hid/uhid.c @@ -398,6 +398,7 @@ struct hid_ll_driver uhid_hid_driver = { .parse = uhid_hid_parse, .raw_request = uhid_hid_raw_request, .output_report = uhid_hid_output_report, + .max_buffer_size = UHID_DATA_MAX, }; EXPORT_SYMBOL_GPL(uhid_hid_driver); -- cgit v1.2.1 From 30baa0923a27fb9a04444a29562344e0573992e4 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 22 Mar 2023 13:27:13 +0100 Subject: Linux 4.19.279 Link: https://lore.kernel.org/r/20230320145424.191578432@linuxfoundation.org Tested-by: Chris Paterson (CIP) Tested-by: Shuah Khan Tested-by: Linux Kernel Functional Testing Tested-by: Jon Hunter Tested-by: Guenter Roeck Signed-off-by: Greg Kroah-Hartman --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index a8104c8024a4..d6c4a53bf505 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 19 -SUBLEVEL = 278 +SUBLEVEL = 279 EXTRAVERSION = NAME = "People's Front" -- cgit v1.2.1 From 533d915899b4a5a7b5b5a99eec24b2920ccd1f11 Mon Sep 17 00:00:00 2001 From: Zheng Wang Date: Sun, 12 Mar 2023 01:46:50 +0800 Subject: power: supply: da9150: Fix use after free bug in da9150_charger_remove due to race condition [ Upstream commit 06615d11cc78162dfd5116efb71f29eb29502d37 ] In da9150_charger_probe, &charger->otg_work is bound with da9150_charger_otg_work. da9150_charger_otg_ncb may be called to start the work. If we remove the module which will call da9150_charger_remove to make cleanup, there may be a unfinished work. The possible sequence is as follows: Fix it by canceling the work before cleanup in the da9150_charger_remove CPU0 CPUc1 |da9150_charger_otg_work da9150_charger_remove | power_supply_unregister | device_unregister | power_supply_dev_release| kfree(psy) | | | power_supply_changed(charger->usb); | //use Fixes: c1a281e34dae ("power: Add support for DA9150 Charger") Signed-off-by: Zheng Wang Signed-off-by: Sebastian Reichel Signed-off-by: Sasha Levin --- drivers/power/supply/da9150-charger.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/power/supply/da9150-charger.c b/drivers/power/supply/da9150-charger.c index 60099815296e..b2d38eb32288 100644 --- a/drivers/power/supply/da9150-charger.c +++ b/drivers/power/supply/da9150-charger.c @@ -666,6 +666,7 @@ static int da9150_charger_remove(struct platform_device *pdev) if (!IS_ERR_OR_NULL(charger->usb_phy)) usb_unregister_notifier(charger->usb_phy, &charger->otg_nb); + cancel_work_sync(&charger->otg_work); power_supply_unregister(charger->battery); power_supply_unregister(charger->usb); -- cgit v1.2.1 From a71e4c85c5a5d941e916b932cb63322fefc782f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Jab=C5=82o=C5=84ski?= Date: Mon, 20 Aug 2018 08:12:26 -0700 Subject: i40evf: Change a VF mac without reloading the VF driver MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit ae1e29f671b467f3e9e9aa2b82ee40e4300ea810 ] Add possibility to change a VF mac address from host side without reloading the VF driver on the guest side. Without this patch it is not possible to change the VF mac because executing i40evf_virtchnl_completion function with VIRTCHNL_OP_GET_VF_RESOURCES opcode resets the VF mac address to previous value. Signed-off-by: PaweΕ‚ JabΕ‚oΕ„ski Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher Stable-dep-of: 32d57f667f87 ("iavf: fix inverted Rx hash condition leading to disabled hash") Signed-off-by: Sasha Levin --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 8 +++++--- drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c | 11 +++++++++-- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 240083201dbf..1527c67b487b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2595,7 +2595,7 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, !is_multicast_ether_addr(addr) && vf->pf_set_mac && !ether_addr_equal(addr, vf->default_lan_addr.addr)) { dev_err(&pf->pdev->dev, - "VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n"); + "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); return -EPERM; } } @@ -4019,9 +4019,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) mac, vf_id); } - /* Force the VF driver stop so it has to reload with new MAC address */ + /* Force the VF interface down so it has to bring up with new MAC + * address + */ i40e_vc_disable_vf(vf); - dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); + dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n"); error_param: return ret; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 94dabc9d89f7..6579dabab78c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -1362,8 +1362,15 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, memcpy(adapter->vf_res, msg, min(msglen, len)); i40evf_validate_num_queues(adapter); i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res); - /* restore current mac address */ - ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); + if (is_zero_ether_addr(adapter->hw.mac.addr)) { + /* restore current mac address */ + ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); + } else { + /* refresh current mac address if changed */ + ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); + ether_addr_copy(netdev->perm_addr, + adapter->hw.mac.addr); + } i40evf_process_config(adapter); } break; -- cgit v1.2.1 From 0b7c849c2c00c3c596922ded9b80bddf154789da Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Fri, 14 Sep 2018 17:37:44 -0700 Subject: intel-ethernet: rename i40evf to iavf [ Upstream commit 8062b2263a9fc294ddeb4024b113e8e26b82d5de ] Rename the Intel Ethernet Adaptive Virtual Function driver (i40evf) to a new name (iavf) that is more consistent with the ongoing maintenance of the driver as the universal VF driver for multiple product lines. This first patch fixes up the directory names and the .ko name, intentionally ignoring the function names inside the driver for now. Basically this is the simplest patch that gets the rename done and will be followed by other patches that rename the internal functions. This patch also addresses a couple of string/name issues and updates the Copyright year. Also, made sure to add a MODULE_ALIAS to the old name. Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher Stable-dep-of: 32d57f667f87 ("iavf: fix inverted Rx hash condition leading to disabled hash") Signed-off-by: Sasha Levin --- Documentation/networking/00-INDEX | 4 +- Documentation/networking/i40evf.txt | 54 - Documentation/networking/iavf.txt | 56 + MAINTAINERS | 2 +- drivers/net/ethernet/intel/Kconfig | 15 +- drivers/net/ethernet/intel/Makefile | 2 +- drivers/net/ethernet/intel/i40evf/Makefile | 16 - drivers/net/ethernet/intel/i40evf/i40e_adminq.c | 967 ----- drivers/net/ethernet/intel/i40evf/i40e_adminq.h | 136 - .../net/ethernet/intel/i40evf/i40e_adminq_cmd.h | 2717 ------------- drivers/net/ethernet/intel/i40evf/i40e_alloc.h | 35 - drivers/net/ethernet/intel/i40evf/i40e_common.c | 1320 ------- drivers/net/ethernet/intel/i40evf/i40e_devids.h | 34 - drivers/net/ethernet/intel/i40evf/i40e_hmc.h | 215 -- drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h | 158 - drivers/net/ethernet/intel/i40evf/i40e_osdep.h | 52 - drivers/net/ethernet/intel/i40evf/i40e_prototype.h | 130 - drivers/net/ethernet/intel/i40evf/i40e_register.h | 313 -- drivers/net/ethernet/intel/i40evf/i40e_status.h | 78 - drivers/net/ethernet/intel/i40evf/i40e_trace.h | 209 - drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 2513 ------------ drivers/net/ethernet/intel/i40evf/i40e_txrx.h | 524 --- drivers/net/ethernet/intel/i40evf/i40e_type.h | 1496 -------- drivers/net/ethernet/intel/i40evf/i40evf.h | 427 --- drivers/net/ethernet/intel/i40evf/i40evf_client.c | 579 --- drivers/net/ethernet/intel/i40evf/i40evf_client.h | 169 - drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c | 820 ---- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 3989 ------------------- .../net/ethernet/intel/i40evf/i40evf_virtchnl.c | 1465 ------- drivers/net/ethernet/intel/iavf/Makefile | 15 + drivers/net/ethernet/intel/iavf/i40e_adminq.c | 967 +++++ drivers/net/ethernet/intel/iavf/i40e_adminq.h | 136 + drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h | 2717 +++++++++++++ drivers/net/ethernet/intel/iavf/i40e_alloc.h | 35 + drivers/net/ethernet/intel/iavf/i40e_common.c | 1320 +++++++ drivers/net/ethernet/intel/iavf/i40e_devids.h | 34 + drivers/net/ethernet/intel/iavf/i40e_hmc.h | 215 ++ drivers/net/ethernet/intel/iavf/i40e_lan_hmc.h | 158 + drivers/net/ethernet/intel/iavf/i40e_osdep.h | 52 + drivers/net/ethernet/intel/iavf/i40e_prototype.h | 130 + drivers/net/ethernet/intel/iavf/i40e_register.h | 313 ++ drivers/net/ethernet/intel/iavf/i40e_status.h | 78 + drivers/net/ethernet/intel/iavf/i40e_trace.h | 209 + drivers/net/ethernet/intel/iavf/i40e_txrx.c | 2513 ++++++++++++ drivers/net/ethernet/intel/iavf/i40e_txrx.h | 524 +++ drivers/net/ethernet/intel/iavf/i40e_type.h | 1496 ++++++++ drivers/net/ethernet/intel/iavf/i40evf.h | 427 +++ drivers/net/ethernet/intel/iavf/i40evf_client.c | 579 +++ drivers/net/ethernet/intel/iavf/i40evf_client.h | 169 + drivers/net/ethernet/intel/iavf/i40evf_ethtool.c | 820 ++++ drivers/net/ethernet/intel/iavf/i40evf_main.c | 3990 ++++++++++++++++++++ drivers/net/ethernet/intel/iavf/i40evf_virtchnl.c | 1465 +++++++ 52 files changed, 18433 insertions(+), 18424 deletions(-) delete mode 100644 Documentation/networking/i40evf.txt create mode 100644 Documentation/networking/iavf.txt delete mode 100644 drivers/net/ethernet/intel/i40evf/Makefile delete mode 100644 drivers/net/ethernet/intel/i40evf/i40e_adminq.c delete mode 100644 drivers/net/ethernet/intel/i40evf/i40e_adminq.h delete mode 100644 drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h delete mode 100644 drivers/net/ethernet/intel/i40evf/i40e_alloc.h delete mode 100644 drivers/net/ethernet/intel/i40evf/i40e_common.c delete mode 100644 drivers/net/ethernet/intel/i40evf/i40e_devids.h delete mode 100644 drivers/net/ethernet/intel/i40evf/i40e_hmc.h delete mode 100644 drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h delete mode 100644 drivers/net/ethernet/intel/i40evf/i40e_osdep.h delete mode 100644 drivers/net/ethernet/intel/i40evf/i40e_prototype.h delete mode 100644 drivers/net/ethernet/intel/i40evf/i40e_register.h delete mode 100644 drivers/net/ethernet/intel/i40evf/i40e_status.h delete mode 100644 drivers/net/ethernet/intel/i40evf/i40e_trace.h delete mode 100644 drivers/net/ethernet/intel/i40evf/i40e_txrx.c delete mode 100644 drivers/net/ethernet/intel/i40evf/i40e_txrx.h delete mode 100644 drivers/net/ethernet/intel/i40evf/i40e_type.h delete mode 100644 drivers/net/ethernet/intel/i40evf/i40evf.h delete mode 100644 drivers/net/ethernet/intel/i40evf/i40evf_client.c delete mode 100644 drivers/net/ethernet/intel/i40evf/i40evf_client.h delete mode 100644 drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c delete mode 100644 drivers/net/ethernet/intel/i40evf/i40evf_main.c delete mode 100644 drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c create mode 100644 drivers/net/ethernet/intel/iavf/Makefile create mode 100644 drivers/net/ethernet/intel/iavf/i40e_adminq.c create mode 100644 drivers/net/ethernet/intel/iavf/i40e_adminq.h create mode 100644 drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h create mode 100644 drivers/net/ethernet/intel/iavf/i40e_alloc.h create mode 100644 drivers/net/ethernet/intel/iavf/i40e_common.c create mode 100644 drivers/net/ethernet/intel/iavf/i40e_devids.h create mode 100644 drivers/net/ethernet/intel/iavf/i40e_hmc.h create mode 100644 drivers/net/ethernet/intel/iavf/i40e_lan_hmc.h create mode 100644 drivers/net/ethernet/intel/iavf/i40e_osdep.h create mode 100644 drivers/net/ethernet/intel/iavf/i40e_prototype.h create mode 100644 drivers/net/ethernet/intel/iavf/i40e_register.h create mode 100644 drivers/net/ethernet/intel/iavf/i40e_status.h create mode 100644 drivers/net/ethernet/intel/iavf/i40e_trace.h create mode 100644 drivers/net/ethernet/intel/iavf/i40e_txrx.c create mode 100644 drivers/net/ethernet/intel/iavf/i40e_txrx.h create mode 100644 drivers/net/ethernet/intel/iavf/i40e_type.h create mode 100644 drivers/net/ethernet/intel/iavf/i40evf.h create mode 100644 drivers/net/ethernet/intel/iavf/i40evf_client.c create mode 100644 drivers/net/ethernet/intel/iavf/i40evf_client.h create mode 100644 drivers/net/ethernet/intel/iavf/i40evf_ethtool.c create mode 100644 drivers/net/ethernet/intel/iavf/i40evf_main.c create mode 100644 drivers/net/ethernet/intel/iavf/i40evf_virtchnl.c diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX index 02a323c43261..2a9dbac38b4e 100644 --- a/Documentation/networking/00-INDEX +++ b/Documentation/networking/00-INDEX @@ -94,8 +94,8 @@ gianfar.txt - Gianfar Ethernet Driver. i40e.txt - README for the Intel Ethernet Controller XL710 Driver (i40e). -i40evf.txt - - Short note on the Driver for the Intel(R) XL710 X710 Virtual Function +iavf.txt + - README for the Intel Ethernet Adaptive Virtual Function Driver (iavf). ieee802154.txt - Linux IEEE 802.15.4 implementation, API and drivers igb.txt diff --git a/Documentation/networking/i40evf.txt b/Documentation/networking/i40evf.txt deleted file mode 100644 index e9b3035b95d0..000000000000 --- a/Documentation/networking/i40evf.txt +++ /dev/null @@ -1,54 +0,0 @@ -Linux* Base Driver for Intel(R) Network Connection -================================================== - -Intel Ethernet Adaptive Virtual Function Linux driver. -Copyright(c) 2013-2017 Intel Corporation. - -Contents -======== - -- Identifying Your Adapter -- Known Issues/Troubleshooting -- Support - -This file describes the i40evf Linux* Base Driver. - -The i40evf driver supports the below mentioned virtual function -devices and can only be activated on kernels running the i40e or -newer Physical Function (PF) driver compiled with CONFIG_PCI_IOV. -The i40evf driver requires CONFIG_PCI_MSI to be enabled. - -The guest OS loading the i40evf driver must support MSI-X interrupts. - -Supported Hardware -================== -Intel XL710 X710 Virtual Function -Intel Ethernet Adaptive Virtual Function -Intel X722 Virtual Function - -Identifying Your Adapter -======================== - -For more information on how to identify your adapter, go to the -Adapter & Driver ID Guide at: - - http://support.intel.com/support/go/network/adapter/idguide.htm - -Known Issues/Troubleshooting -============================ - - -Support -======= - -For general information, go to the Intel support website at: - - http://support.intel.com - -or the Intel Wired Networking project hosted by Sourceforge at: - - http://sourceforge.net/projects/e1000 - -If an issue is identified with the released source code on the supported -kernel with a supported adapter, email the specific information related -to the issue to e1000-devel@lists.sf.net diff --git a/Documentation/networking/iavf.txt b/Documentation/networking/iavf.txt new file mode 100644 index 000000000000..cc902a2369d6 --- /dev/null +++ b/Documentation/networking/iavf.txt @@ -0,0 +1,56 @@ +Linux* Base Driver for Intel(R) Network Connection +================================================== + +Intel Ethernet Adaptive Virtual Function Linux driver. +Copyright(c) 2013-2018 Intel Corporation. + +Contents +======== + +- Identifying Your Adapter +- Known Issues/Troubleshooting +- Support + +This file describes the iavf Linux* Base Driver. This driver +was formerly called i40evf. + +The iavf driver supports the below mentioned virtual function +devices and can only be activated on kernels running the i40e or +newer Physical Function (PF) driver compiled with CONFIG_PCI_IOV. +The iavf driver requires CONFIG_PCI_MSI to be enabled. + +The guest OS loading the iavf driver must support MSI-X interrupts. + +Supported Hardware +================== +Intel XL710 X710 Virtual Function +Intel X722 Virtual Function +Intel Ethernet Adaptive Virtual Function + +Identifying Your Adapter +======================== + +For more information on how to identify your adapter, go to the +Adapter & Driver ID Guide at: + + https://www.intel.com/content/www/us/en/support/articles/000005584/network-and-i-o/ethernet-products.html + + +Known Issues/Troubleshooting +============================ + + +Support +======= + +For general information, go to the Intel support website at: + + http://support.intel.com + +or the Intel Wired Networking project hosted by Sourceforge at: + + http://sourceforge.net/projects/e1000 + +If an issue is identified with the released source code on the supported +kernel with a supported adapter, email the specific information related +to the issue to e1000-devel@lists.sf.net diff --git a/MAINTAINERS b/MAINTAINERS index af0f322cf2f7..a8015db6b37e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7377,7 +7377,7 @@ F: Documentation/networking/ixgb.txt F: Documentation/networking/ixgbe.txt F: Documentation/networking/ixgbevf.txt F: Documentation/networking/i40e.txt -F: Documentation/networking/i40evf.txt +F: Documentation/networking/iavf.txt F: Documentation/networking/ice.txt F: drivers/net/ethernet/intel/ F: drivers/net/ethernet/intel/*/ diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 1ab613eb5796..b542aba6f0e8 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -235,20 +235,27 @@ config I40E_DCB If unsure, say N. +# this is here to allow seamless migration from I40EVF --> IAVF name +# so that CONFIG_IAVF symbol will always mirror the state of CONFIG_I40EVF +config IAVF + tristate config I40EVF tristate "Intel(R) Ethernet Adaptive Virtual Function support" + select IAVF depends on PCI_MSI ---help--- This driver supports virtual functions for Intel XL710, - X710, X722, and all devices advertising support for Intel - Ethernet Adaptive Virtual Function devices. For more + X710, X722, XXV710, and all devices advertising support for + Intel Ethernet Adaptive Virtual Function devices. For more information on how to identify your adapter, go to the Adapter & Driver ID Guide that can be located at: - + + + This driver was formerly named i40evf. To compile this driver as a module, choose M here. The module - will be called i40evf. MSI-X interrupt support is required + will be called iavf. MSI-X interrupt support is required for this driver to work correctly. config ICE diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile index 807a4f8c7e4e..b91153df6ee8 100644 --- a/drivers/net/ethernet/intel/Makefile +++ b/drivers/net/ethernet/intel/Makefile @@ -12,6 +12,6 @@ obj-$(CONFIG_IXGBE) += ixgbe/ obj-$(CONFIG_IXGBEVF) += ixgbevf/ obj-$(CONFIG_I40E) += i40e/ obj-$(CONFIG_IXGB) += ixgb/ -obj-$(CONFIG_I40EVF) += i40evf/ +obj-$(CONFIG_IAVF) += iavf/ obj-$(CONFIG_FM10K) += fm10k/ obj-$(CONFIG_ICE) += ice/ diff --git a/drivers/net/ethernet/intel/i40evf/Makefile b/drivers/net/ethernet/intel/i40evf/Makefile deleted file mode 100644 index 3c5c6e962280..000000000000 --- a/drivers/net/ethernet/intel/i40evf/Makefile +++ /dev/null @@ -1,16 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# Copyright(c) 2013 - 2018 Intel Corporation. - -# -## Makefile for the Intel(R) 40GbE VF driver -# -# - -ccflags-y += -I$(src) -subdir-ccflags-y += -I$(src) - -obj-$(CONFIG_I40EVF) += i40evf.o - -i40evf-objs := i40evf_main.o i40evf_ethtool.o i40evf_virtchnl.o \ - i40e_txrx.o i40e_common.o i40e_adminq.o i40evf_client.o - diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c deleted file mode 100644 index 21a0dbf6ccf6..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c +++ /dev/null @@ -1,967 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#include "i40e_status.h" -#include "i40e_type.h" -#include "i40e_register.h" -#include "i40e_adminq.h" -#include "i40e_prototype.h" - -/** - * i40e_is_nvm_update_op - return true if this is an NVM update operation - * @desc: API request descriptor - **/ -static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc) -{ - return (desc->opcode == i40e_aqc_opc_nvm_erase) || - (desc->opcode == i40e_aqc_opc_nvm_update); -} - -/** - * i40e_adminq_init_regs - Initialize AdminQ registers - * @hw: pointer to the hardware structure - * - * This assumes the alloc_asq and alloc_arq functions have already been called - **/ -static void i40e_adminq_init_regs(struct i40e_hw *hw) -{ - /* set head and tail registers in our local struct */ - if (i40e_is_vf(hw)) { - hw->aq.asq.tail = I40E_VF_ATQT1; - hw->aq.asq.head = I40E_VF_ATQH1; - hw->aq.asq.len = I40E_VF_ATQLEN1; - hw->aq.asq.bal = I40E_VF_ATQBAL1; - hw->aq.asq.bah = I40E_VF_ATQBAH1; - hw->aq.arq.tail = I40E_VF_ARQT1; - hw->aq.arq.head = I40E_VF_ARQH1; - hw->aq.arq.len = I40E_VF_ARQLEN1; - hw->aq.arq.bal = I40E_VF_ARQBAL1; - hw->aq.arq.bah = I40E_VF_ARQBAH1; - } -} - -/** - * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings - * @hw: pointer to the hardware structure - **/ -static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw) -{ - i40e_status ret_code; - - ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf, - i40e_mem_atq_ring, - (hw->aq.num_asq_entries * - sizeof(struct i40e_aq_desc)), - I40E_ADMINQ_DESC_ALIGNMENT); - if (ret_code) - return ret_code; - - ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf, - (hw->aq.num_asq_entries * - sizeof(struct i40e_asq_cmd_details))); - if (ret_code) { - i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); - return ret_code; - } - - return ret_code; -} - -/** - * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings - * @hw: pointer to the hardware structure - **/ -static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw) -{ - i40e_status ret_code; - - ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf, - i40e_mem_arq_ring, - (hw->aq.num_arq_entries * - sizeof(struct i40e_aq_desc)), - I40E_ADMINQ_DESC_ALIGNMENT); - - return ret_code; -} - -/** - * i40e_free_adminq_asq - Free Admin Queue send rings - * @hw: pointer to the hardware structure - * - * This assumes the posted send buffers have already been cleaned - * and de-allocated - **/ -static void i40e_free_adminq_asq(struct i40e_hw *hw) -{ - i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); -} - -/** - * i40e_free_adminq_arq - Free Admin Queue receive rings - * @hw: pointer to the hardware structure - * - * This assumes the posted receive buffers have already been cleaned - * and de-allocated - **/ -static void i40e_free_adminq_arq(struct i40e_hw *hw) -{ - i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); -} - -/** - * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue - * @hw: pointer to the hardware structure - **/ -static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw) -{ - i40e_status ret_code; - struct i40e_aq_desc *desc; - struct i40e_dma_mem *bi; - int i; - - /* We'll be allocating the buffer info memory first, then we can - * allocate the mapped buffers for the event processing - */ - - /* buffer_info structures do not need alignment */ - ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head, - (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem))); - if (ret_code) - goto alloc_arq_bufs; - hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va; - - /* allocate the mapped buffers */ - for (i = 0; i < hw->aq.num_arq_entries; i++) { - bi = &hw->aq.arq.r.arq_bi[i]; - ret_code = i40e_allocate_dma_mem(hw, bi, - i40e_mem_arq_buf, - hw->aq.arq_buf_size, - I40E_ADMINQ_DESC_ALIGNMENT); - if (ret_code) - goto unwind_alloc_arq_bufs; - - /* now configure the descriptors for use */ - desc = I40E_ADMINQ_DESC(hw->aq.arq, i); - - desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF); - if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) - desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB); - desc->opcode = 0; - /* This is in accordance with Admin queue design, there is no - * register for buffer size configuration - */ - desc->datalen = cpu_to_le16((u16)bi->size); - desc->retval = 0; - desc->cookie_high = 0; - desc->cookie_low = 0; - desc->params.external.addr_high = - cpu_to_le32(upper_32_bits(bi->pa)); - desc->params.external.addr_low = - cpu_to_le32(lower_32_bits(bi->pa)); - desc->params.external.param0 = 0; - desc->params.external.param1 = 0; - } - -alloc_arq_bufs: - return ret_code; - -unwind_alloc_arq_bufs: - /* don't try to free the one that failed... */ - i--; - for (; i >= 0; i--) - i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); - i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); - - return ret_code; -} - -/** - * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue - * @hw: pointer to the hardware structure - **/ -static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw) -{ - i40e_status ret_code; - struct i40e_dma_mem *bi; - int i; - - /* No mapped memory needed yet, just the buffer info structures */ - ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head, - (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem))); - if (ret_code) - goto alloc_asq_bufs; - hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va; - - /* allocate the mapped buffers */ - for (i = 0; i < hw->aq.num_asq_entries; i++) { - bi = &hw->aq.asq.r.asq_bi[i]; - ret_code = i40e_allocate_dma_mem(hw, bi, - i40e_mem_asq_buf, - hw->aq.asq_buf_size, - I40E_ADMINQ_DESC_ALIGNMENT); - if (ret_code) - goto unwind_alloc_asq_bufs; - } -alloc_asq_bufs: - return ret_code; - -unwind_alloc_asq_bufs: - /* don't try to free the one that failed... */ - i--; - for (; i >= 0; i--) - i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); - i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); - - return ret_code; -} - -/** - * i40e_free_arq_bufs - Free receive queue buffer info elements - * @hw: pointer to the hardware structure - **/ -static void i40e_free_arq_bufs(struct i40e_hw *hw) -{ - int i; - - /* free descriptors */ - for (i = 0; i < hw->aq.num_arq_entries; i++) - i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); - - /* free the descriptor memory */ - i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); - - /* free the dma header */ - i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); -} - -/** - * i40e_free_asq_bufs - Free send queue buffer info elements - * @hw: pointer to the hardware structure - **/ -static void i40e_free_asq_bufs(struct i40e_hw *hw) -{ - int i; - - /* only unmap if the address is non-NULL */ - for (i = 0; i < hw->aq.num_asq_entries; i++) - if (hw->aq.asq.r.asq_bi[i].pa) - i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); - - /* free the buffer info list */ - i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf); - - /* free the descriptor memory */ - i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); - - /* free the dma header */ - i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); -} - -/** - * i40e_config_asq_regs - configure ASQ registers - * @hw: pointer to the hardware structure - * - * Configure base address and length registers for the transmit queue - **/ -static i40e_status i40e_config_asq_regs(struct i40e_hw *hw) -{ - i40e_status ret_code = 0; - u32 reg = 0; - - /* Clear Head and Tail */ - wr32(hw, hw->aq.asq.head, 0); - wr32(hw, hw->aq.asq.tail, 0); - - /* set starting point */ - wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | - I40E_VF_ATQLEN1_ATQENABLE_MASK)); - wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa)); - wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa)); - - /* Check one register to verify that config was applied */ - reg = rd32(hw, hw->aq.asq.bal); - if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa)) - ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; - - return ret_code; -} - -/** - * i40e_config_arq_regs - ARQ register configuration - * @hw: pointer to the hardware structure - * - * Configure base address and length registers for the receive (event queue) - **/ -static i40e_status i40e_config_arq_regs(struct i40e_hw *hw) -{ - i40e_status ret_code = 0; - u32 reg = 0; - - /* Clear Head and Tail */ - wr32(hw, hw->aq.arq.head, 0); - wr32(hw, hw->aq.arq.tail, 0); - - /* set starting point */ - wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | - I40E_VF_ARQLEN1_ARQENABLE_MASK)); - wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa)); - wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa)); - - /* Update tail in the HW to post pre-allocated buffers */ - wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); - - /* Check one register to verify that config was applied */ - reg = rd32(hw, hw->aq.arq.bal); - if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa)) - ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; - - return ret_code; -} - -/** - * i40e_init_asq - main initialization routine for ASQ - * @hw: pointer to the hardware structure - * - * This is the main initialization routine for the Admin Send Queue - * Prior to calling this function, drivers *MUST* set the following fields - * in the hw->aq structure: - * - hw->aq.num_asq_entries - * - hw->aq.arq_buf_size - * - * Do *NOT* hold the lock when calling this as the memory allocation routines - * called are not going to be atomic context safe - **/ -static i40e_status i40e_init_asq(struct i40e_hw *hw) -{ - i40e_status ret_code = 0; - - if (hw->aq.asq.count > 0) { - /* queue already initialized */ - ret_code = I40E_ERR_NOT_READY; - goto init_adminq_exit; - } - - /* verify input for valid configuration */ - if ((hw->aq.num_asq_entries == 0) || - (hw->aq.asq_buf_size == 0)) { - ret_code = I40E_ERR_CONFIG; - goto init_adminq_exit; - } - - hw->aq.asq.next_to_use = 0; - hw->aq.asq.next_to_clean = 0; - - /* allocate the ring memory */ - ret_code = i40e_alloc_adminq_asq_ring(hw); - if (ret_code) - goto init_adminq_exit; - - /* allocate buffers in the rings */ - ret_code = i40e_alloc_asq_bufs(hw); - if (ret_code) - goto init_adminq_free_rings; - - /* initialize base registers */ - ret_code = i40e_config_asq_regs(hw); - if (ret_code) - goto init_adminq_free_rings; - - /* success! */ - hw->aq.asq.count = hw->aq.num_asq_entries; - goto init_adminq_exit; - -init_adminq_free_rings: - i40e_free_adminq_asq(hw); - -init_adminq_exit: - return ret_code; -} - -/** - * i40e_init_arq - initialize ARQ - * @hw: pointer to the hardware structure - * - * The main initialization routine for the Admin Receive (Event) Queue. - * Prior to calling this function, drivers *MUST* set the following fields - * in the hw->aq structure: - * - hw->aq.num_asq_entries - * - hw->aq.arq_buf_size - * - * Do *NOT* hold the lock when calling this as the memory allocation routines - * called are not going to be atomic context safe - **/ -static i40e_status i40e_init_arq(struct i40e_hw *hw) -{ - i40e_status ret_code = 0; - - if (hw->aq.arq.count > 0) { - /* queue already initialized */ - ret_code = I40E_ERR_NOT_READY; - goto init_adminq_exit; - } - - /* verify input for valid configuration */ - if ((hw->aq.num_arq_entries == 0) || - (hw->aq.arq_buf_size == 0)) { - ret_code = I40E_ERR_CONFIG; - goto init_adminq_exit; - } - - hw->aq.arq.next_to_use = 0; - hw->aq.arq.next_to_clean = 0; - - /* allocate the ring memory */ - ret_code = i40e_alloc_adminq_arq_ring(hw); - if (ret_code) - goto init_adminq_exit; - - /* allocate buffers in the rings */ - ret_code = i40e_alloc_arq_bufs(hw); - if (ret_code) - goto init_adminq_free_rings; - - /* initialize base registers */ - ret_code = i40e_config_arq_regs(hw); - if (ret_code) - goto init_adminq_free_rings; - - /* success! */ - hw->aq.arq.count = hw->aq.num_arq_entries; - goto init_adminq_exit; - -init_adminq_free_rings: - i40e_free_adminq_arq(hw); - -init_adminq_exit: - return ret_code; -} - -/** - * i40e_shutdown_asq - shutdown the ASQ - * @hw: pointer to the hardware structure - * - * The main shutdown routine for the Admin Send Queue - **/ -static i40e_status i40e_shutdown_asq(struct i40e_hw *hw) -{ - i40e_status ret_code = 0; - - mutex_lock(&hw->aq.asq_mutex); - - if (hw->aq.asq.count == 0) { - ret_code = I40E_ERR_NOT_READY; - goto shutdown_asq_out; - } - - /* Stop firmware AdminQ processing */ - wr32(hw, hw->aq.asq.head, 0); - wr32(hw, hw->aq.asq.tail, 0); - wr32(hw, hw->aq.asq.len, 0); - wr32(hw, hw->aq.asq.bal, 0); - wr32(hw, hw->aq.asq.bah, 0); - - hw->aq.asq.count = 0; /* to indicate uninitialized queue */ - - /* free ring buffers */ - i40e_free_asq_bufs(hw); - -shutdown_asq_out: - mutex_unlock(&hw->aq.asq_mutex); - return ret_code; -} - -/** - * i40e_shutdown_arq - shutdown ARQ - * @hw: pointer to the hardware structure - * - * The main shutdown routine for the Admin Receive Queue - **/ -static i40e_status i40e_shutdown_arq(struct i40e_hw *hw) -{ - i40e_status ret_code = 0; - - mutex_lock(&hw->aq.arq_mutex); - - if (hw->aq.arq.count == 0) { - ret_code = I40E_ERR_NOT_READY; - goto shutdown_arq_out; - } - - /* Stop firmware AdminQ processing */ - wr32(hw, hw->aq.arq.head, 0); - wr32(hw, hw->aq.arq.tail, 0); - wr32(hw, hw->aq.arq.len, 0); - wr32(hw, hw->aq.arq.bal, 0); - wr32(hw, hw->aq.arq.bah, 0); - - hw->aq.arq.count = 0; /* to indicate uninitialized queue */ - - /* free ring buffers */ - i40e_free_arq_bufs(hw); - -shutdown_arq_out: - mutex_unlock(&hw->aq.arq_mutex); - return ret_code; -} - -/** - * i40evf_init_adminq - main initialization routine for Admin Queue - * @hw: pointer to the hardware structure - * - * Prior to calling this function, drivers *MUST* set the following fields - * in the hw->aq structure: - * - hw->aq.num_asq_entries - * - hw->aq.num_arq_entries - * - hw->aq.arq_buf_size - * - hw->aq.asq_buf_size - **/ -i40e_status i40evf_init_adminq(struct i40e_hw *hw) -{ - i40e_status ret_code; - - /* verify input for valid configuration */ - if ((hw->aq.num_arq_entries == 0) || - (hw->aq.num_asq_entries == 0) || - (hw->aq.arq_buf_size == 0) || - (hw->aq.asq_buf_size == 0)) { - ret_code = I40E_ERR_CONFIG; - goto init_adminq_exit; - } - - /* Set up register offsets */ - i40e_adminq_init_regs(hw); - - /* setup ASQ command write back timeout */ - hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT; - - /* allocate the ASQ */ - ret_code = i40e_init_asq(hw); - if (ret_code) - goto init_adminq_destroy_locks; - - /* allocate the ARQ */ - ret_code = i40e_init_arq(hw); - if (ret_code) - goto init_adminq_free_asq; - - /* success! */ - goto init_adminq_exit; - -init_adminq_free_asq: - i40e_shutdown_asq(hw); -init_adminq_destroy_locks: - -init_adminq_exit: - return ret_code; -} - -/** - * i40evf_shutdown_adminq - shutdown routine for the Admin Queue - * @hw: pointer to the hardware structure - **/ -i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw) -{ - i40e_status ret_code = 0; - - if (i40evf_check_asq_alive(hw)) - i40evf_aq_queue_shutdown(hw, true); - - i40e_shutdown_asq(hw); - i40e_shutdown_arq(hw); - - if (hw->nvm_buff.va) - i40e_free_virt_mem(hw, &hw->nvm_buff); - - return ret_code; -} - -/** - * i40e_clean_asq - cleans Admin send queue - * @hw: pointer to the hardware structure - * - * returns the number of free desc - **/ -static u16 i40e_clean_asq(struct i40e_hw *hw) -{ - struct i40e_adminq_ring *asq = &(hw->aq.asq); - struct i40e_asq_cmd_details *details; - u16 ntc = asq->next_to_clean; - struct i40e_aq_desc desc_cb; - struct i40e_aq_desc *desc; - - desc = I40E_ADMINQ_DESC(*asq, ntc); - details = I40E_ADMINQ_DETAILS(*asq, ntc); - while (rd32(hw, hw->aq.asq.head) != ntc) { - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, - "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head)); - - if (details->callback) { - I40E_ADMINQ_CALLBACK cb_func = - (I40E_ADMINQ_CALLBACK)details->callback; - desc_cb = *desc; - cb_func(hw, &desc_cb); - } - memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); - memset((void *)details, 0, - sizeof(struct i40e_asq_cmd_details)); - ntc++; - if (ntc == asq->count) - ntc = 0; - desc = I40E_ADMINQ_DESC(*asq, ntc); - details = I40E_ADMINQ_DETAILS(*asq, ntc); - } - - asq->next_to_clean = ntc; - - return I40E_DESC_UNUSED(asq); -} - -/** - * i40evf_asq_done - check if FW has processed the Admin Send Queue - * @hw: pointer to the hw struct - * - * Returns true if the firmware has processed all descriptors on the - * admin send queue. Returns false if there are still requests pending. - **/ -bool i40evf_asq_done(struct i40e_hw *hw) -{ - /* AQ designers suggest use of head for better - * timing reliability than DD bit - */ - return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use; - -} - -/** - * i40evf_asq_send_command - send command to Admin Queue - * @hw: pointer to the hw struct - * @desc: prefilled descriptor describing the command (non DMA mem) - * @buff: buffer to use for indirect commands - * @buff_size: size of buffer for indirect commands - * @cmd_details: pointer to command details structure - * - * This is the main send command driver routine for the Admin Queue send - * queue. It runs the queue, cleans the queue, etc - **/ -i40e_status i40evf_asq_send_command(struct i40e_hw *hw, - struct i40e_aq_desc *desc, - void *buff, /* can be NULL */ - u16 buff_size, - struct i40e_asq_cmd_details *cmd_details) -{ - i40e_status status = 0; - struct i40e_dma_mem *dma_buff = NULL; - struct i40e_asq_cmd_details *details; - struct i40e_aq_desc *desc_on_ring; - bool cmd_completed = false; - u16 retval = 0; - u32 val = 0; - - mutex_lock(&hw->aq.asq_mutex); - - if (hw->aq.asq.count == 0) { - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, - "AQTX: Admin queue not initialized.\n"); - status = I40E_ERR_QUEUE_EMPTY; - goto asq_send_command_error; - } - - hw->aq.asq_last_status = I40E_AQ_RC_OK; - - val = rd32(hw, hw->aq.asq.head); - if (val >= hw->aq.num_asq_entries) { - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, - "AQTX: head overrun at %d\n", val); - status = I40E_ERR_QUEUE_EMPTY; - goto asq_send_command_error; - } - - details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); - if (cmd_details) { - *details = *cmd_details; - - /* If the cmd_details are defined copy the cookie. The - * cpu_to_le32 is not needed here because the data is ignored - * by the FW, only used by the driver - */ - if (details->cookie) { - desc->cookie_high = - cpu_to_le32(upper_32_bits(details->cookie)); - desc->cookie_low = - cpu_to_le32(lower_32_bits(details->cookie)); - } - } else { - memset(details, 0, sizeof(struct i40e_asq_cmd_details)); - } - - /* clear requested flags and then set additional flags if defined */ - desc->flags &= ~cpu_to_le16(details->flags_dis); - desc->flags |= cpu_to_le16(details->flags_ena); - - if (buff_size > hw->aq.asq_buf_size) { - i40e_debug(hw, - I40E_DEBUG_AQ_MESSAGE, - "AQTX: Invalid buffer size: %d.\n", - buff_size); - status = I40E_ERR_INVALID_SIZE; - goto asq_send_command_error; - } - - if (details->postpone && !details->async) { - i40e_debug(hw, - I40E_DEBUG_AQ_MESSAGE, - "AQTX: Async flag not set along with postpone flag"); - status = I40E_ERR_PARAM; - goto asq_send_command_error; - } - - /* call clean and check queue available function to reclaim the - * descriptors that were processed by FW, the function returns the - * number of desc available - */ - /* the clean function called here could be called in a separate thread - * in case of asynchronous completions - */ - if (i40e_clean_asq(hw) == 0) { - i40e_debug(hw, - I40E_DEBUG_AQ_MESSAGE, - "AQTX: Error queue is full.\n"); - status = I40E_ERR_ADMIN_QUEUE_FULL; - goto asq_send_command_error; - } - - /* initialize the temp desc pointer with the right desc */ - desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); - - /* if the desc is available copy the temp desc to the right place */ - *desc_on_ring = *desc; - - /* if buff is not NULL assume indirect command */ - if (buff != NULL) { - dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]); - /* copy the user buff into the respective DMA buff */ - memcpy(dma_buff->va, buff, buff_size); - desc_on_ring->datalen = cpu_to_le16(buff_size); - - /* Update the address values in the desc with the pa value - * for respective buffer - */ - desc_on_ring->params.external.addr_high = - cpu_to_le32(upper_32_bits(dma_buff->pa)); - desc_on_ring->params.external.addr_low = - cpu_to_le32(lower_32_bits(dma_buff->pa)); - } - - /* bump the tail */ - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n"); - i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, - buff, buff_size); - (hw->aq.asq.next_to_use)++; - if (hw->aq.asq.next_to_use == hw->aq.asq.count) - hw->aq.asq.next_to_use = 0; - if (!details->postpone) - wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use); - - /* if cmd_details are not defined or async flag is not set, - * we need to wait for desc write back - */ - if (!details->async && !details->postpone) { - u32 total_delay = 0; - - do { - /* AQ designers suggest use of head for better - * timing reliability than DD bit - */ - if (i40evf_asq_done(hw)) - break; - udelay(50); - total_delay += 50; - } while (total_delay < hw->aq.asq_cmd_timeout); - } - - /* if ready, copy the desc back to temp */ - if (i40evf_asq_done(hw)) { - *desc = *desc_on_ring; - if (buff != NULL) - memcpy(buff, dma_buff->va, buff_size); - retval = le16_to_cpu(desc->retval); - if (retval != 0) { - i40e_debug(hw, - I40E_DEBUG_AQ_MESSAGE, - "AQTX: Command completed with error 0x%X.\n", - retval); - - /* strip off FW internal code */ - retval &= 0xff; - } - cmd_completed = true; - if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK) - status = 0; - else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY) - status = I40E_ERR_NOT_READY; - else - status = I40E_ERR_ADMIN_QUEUE_ERROR; - hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; - } - - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, - "AQTX: desc and buffer writeback:\n"); - i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, - buff_size); - - /* save writeback aq if requested */ - if (details->wb_desc) - *details->wb_desc = *desc_on_ring; - - /* update the error if time out occurred */ - if ((!cmd_completed) && - (!details->async && !details->postpone)) { - if (rd32(hw, hw->aq.asq.len) & I40E_VF_ATQLEN1_ATQCRIT_MASK) { - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, - "AQTX: AQ Critical error.\n"); - status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR; - } else { - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, - "AQTX: Writeback timeout.\n"); - status = I40E_ERR_ADMIN_QUEUE_TIMEOUT; - } - } - -asq_send_command_error: - mutex_unlock(&hw->aq.asq_mutex); - return status; -} - -/** - * i40evf_fill_default_direct_cmd_desc - AQ descriptor helper function - * @desc: pointer to the temp descriptor (non DMA mem) - * @opcode: the opcode can be used to decide which flags to turn off or on - * - * Fill the desc with default values - **/ -void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, - u16 opcode) -{ - /* zero out the desc */ - memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); - desc->opcode = cpu_to_le16(opcode); - desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI); -} - -/** - * i40evf_clean_arq_element - * @hw: pointer to the hw struct - * @e: event info from the receive descriptor, includes any buffers - * @pending: number of events that could be left to process - * - * This function cleans one Admin Receive Queue element and returns - * the contents through e. It can also return how many events are - * left to process through 'pending' - **/ -i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, - struct i40e_arq_event_info *e, - u16 *pending) -{ - i40e_status ret_code = 0; - u16 ntc = hw->aq.arq.next_to_clean; - struct i40e_aq_desc *desc; - struct i40e_dma_mem *bi; - u16 desc_idx; - u16 datalen; - u16 flags; - u16 ntu; - - /* pre-clean the event info */ - memset(&e->desc, 0, sizeof(e->desc)); - - /* take the lock before we start messing with the ring */ - mutex_lock(&hw->aq.arq_mutex); - - if (hw->aq.arq.count == 0) { - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, - "AQRX: Admin queue not initialized.\n"); - ret_code = I40E_ERR_QUEUE_EMPTY; - goto clean_arq_element_err; - } - - /* set next_to_use to head */ - ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK; - if (ntu == ntc) { - /* nothing to do - shouldn't need to update ring's values */ - ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK; - goto clean_arq_element_out; - } - - /* now clean the next descriptor */ - desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc); - desc_idx = ntc; - - hw->aq.arq_last_status = - (enum i40e_admin_queue_err)le16_to_cpu(desc->retval); - flags = le16_to_cpu(desc->flags); - if (flags & I40E_AQ_FLAG_ERR) { - ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; - i40e_debug(hw, - I40E_DEBUG_AQ_MESSAGE, - "AQRX: Event received with error 0x%X.\n", - hw->aq.arq_last_status); - } - - e->desc = *desc; - datalen = le16_to_cpu(desc->datalen); - e->msg_len = min(datalen, e->buf_len); - if (e->msg_buf != NULL && (e->msg_len != 0)) - memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va, - e->msg_len); - - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); - i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, - hw->aq.arq_buf_size); - - /* Restore the original datalen and buffer address in the desc, - * FW updates datalen to indicate the event message - * size - */ - bi = &hw->aq.arq.r.arq_bi[ntc]; - memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); - - desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF); - if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) - desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB); - desc->datalen = cpu_to_le16((u16)bi->size); - desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa)); - desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); - - /* set tail = the last cleaned desc index. */ - wr32(hw, hw->aq.arq.tail, ntc); - /* ntc is updated to tail + 1 */ - ntc++; - if (ntc == hw->aq.num_arq_entries) - ntc = 0; - hw->aq.arq.next_to_clean = ntc; - hw->aq.arq.next_to_use = ntu; - -clean_arq_element_out: - /* Set pending if needed, unlock and return */ - if (pending != NULL) - *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); - -clean_arq_element_err: - mutex_unlock(&hw->aq.arq_mutex); - - return ret_code; -} - -void i40evf_resume_aq(struct i40e_hw *hw) -{ - /* Registers are reset after PF reset */ - hw->aq.asq.next_to_use = 0; - hw->aq.asq.next_to_clean = 0; - - i40e_config_asq_regs(hw); - - hw->aq.arq.next_to_use = 0; - hw->aq.arq.next_to_clean = 0; - - i40e_config_arq_regs(hw); -} diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h deleted file mode 100644 index 1f264b9b6805..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h +++ /dev/null @@ -1,136 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_ADMINQ_H_ -#define _I40E_ADMINQ_H_ - -#include "i40e_osdep.h" -#include "i40e_status.h" -#include "i40e_adminq_cmd.h" - -#define I40E_ADMINQ_DESC(R, i) \ - (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i])) - -#define I40E_ADMINQ_DESC_ALIGNMENT 4096 - -struct i40e_adminq_ring { - struct i40e_virt_mem dma_head; /* space for dma structures */ - struct i40e_dma_mem desc_buf; /* descriptor ring memory */ - struct i40e_virt_mem cmd_buf; /* command buffer memory */ - - union { - struct i40e_dma_mem *asq_bi; - struct i40e_dma_mem *arq_bi; - } r; - - u16 count; /* Number of descriptors */ - u16 rx_buf_len; /* Admin Receive Queue buffer length */ - - /* used for interrupt processing */ - u16 next_to_use; - u16 next_to_clean; - - /* used for queue tracking */ - u32 head; - u32 tail; - u32 len; - u32 bah; - u32 bal; -}; - -/* ASQ transaction details */ -struct i40e_asq_cmd_details { - void *callback; /* cast from type I40E_ADMINQ_CALLBACK */ - u64 cookie; - u16 flags_ena; - u16 flags_dis; - bool async; - bool postpone; - struct i40e_aq_desc *wb_desc; -}; - -#define I40E_ADMINQ_DETAILS(R, i) \ - (&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i])) - -/* ARQ event information */ -struct i40e_arq_event_info { - struct i40e_aq_desc desc; - u16 msg_len; - u16 buf_len; - u8 *msg_buf; -}; - -/* Admin Queue information */ -struct i40e_adminq_info { - struct i40e_adminq_ring arq; /* receive queue */ - struct i40e_adminq_ring asq; /* send queue */ - u32 asq_cmd_timeout; /* send queue cmd write back timeout*/ - u16 num_arq_entries; /* receive queue depth */ - u16 num_asq_entries; /* send queue depth */ - u16 arq_buf_size; /* receive queue buffer size */ - u16 asq_buf_size; /* send queue buffer size */ - u16 fw_maj_ver; /* firmware major version */ - u16 fw_min_ver; /* firmware minor version */ - u32 fw_build; /* firmware build number */ - u16 api_maj_ver; /* api major version */ - u16 api_min_ver; /* api minor version */ - - struct mutex asq_mutex; /* Send queue lock */ - struct mutex arq_mutex; /* Receive queue lock */ - - /* last status values on send and receive queues */ - enum i40e_admin_queue_err asq_last_status; - enum i40e_admin_queue_err arq_last_status; -}; - -/** - * i40e_aq_rc_to_posix - convert errors to user-land codes - * aq_ret: AdminQ handler error code can override aq_rc - * aq_rc: AdminQ firmware error code to convert - **/ -static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc) -{ - int aq_to_posix[] = { - 0, /* I40E_AQ_RC_OK */ - -EPERM, /* I40E_AQ_RC_EPERM */ - -ENOENT, /* I40E_AQ_RC_ENOENT */ - -ESRCH, /* I40E_AQ_RC_ESRCH */ - -EINTR, /* I40E_AQ_RC_EINTR */ - -EIO, /* I40E_AQ_RC_EIO */ - -ENXIO, /* I40E_AQ_RC_ENXIO */ - -E2BIG, /* I40E_AQ_RC_E2BIG */ - -EAGAIN, /* I40E_AQ_RC_EAGAIN */ - -ENOMEM, /* I40E_AQ_RC_ENOMEM */ - -EACCES, /* I40E_AQ_RC_EACCES */ - -EFAULT, /* I40E_AQ_RC_EFAULT */ - -EBUSY, /* I40E_AQ_RC_EBUSY */ - -EEXIST, /* I40E_AQ_RC_EEXIST */ - -EINVAL, /* I40E_AQ_RC_EINVAL */ - -ENOTTY, /* I40E_AQ_RC_ENOTTY */ - -ENOSPC, /* I40E_AQ_RC_ENOSPC */ - -ENOSYS, /* I40E_AQ_RC_ENOSYS */ - -ERANGE, /* I40E_AQ_RC_ERANGE */ - -EPIPE, /* I40E_AQ_RC_EFLUSHED */ - -ESPIPE, /* I40E_AQ_RC_BAD_ADDR */ - -EROFS, /* I40E_AQ_RC_EMODE */ - -EFBIG, /* I40E_AQ_RC_EFBIG */ - }; - - /* aq_rc is invalid if AQ timed out */ - if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT) - return -EAGAIN; - - if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0])))) - return -ERANGE; - - return aq_to_posix[aq_rc]; -} - -/* general information */ -#define I40E_AQ_LARGE_BUF 512 -#define I40E_ASQ_CMD_TIMEOUT 250000 /* usecs */ - -void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, - u16 opcode); - -#endif /* _I40E_ADMINQ_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h deleted file mode 100644 index 5fd8529465d4..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ /dev/null @@ -1,2717 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_ADMINQ_CMD_H_ -#define _I40E_ADMINQ_CMD_H_ - -/* This header file defines the i40e Admin Queue commands and is shared between - * i40e Firmware and Software. - * - * This file needs to comply with the Linux Kernel coding style. - */ - -#define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR_X722 0x0005 -#define I40E_FW_API_VERSION_MINOR_X710 0x0007 - -#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \ - I40E_FW_API_VERSION_MINOR_X710 : \ - I40E_FW_API_VERSION_MINOR_X722) - -/* API version 1.7 implements additional link and PHY-specific APIs */ -#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007 - -struct i40e_aq_desc { - __le16 flags; - __le16 opcode; - __le16 datalen; - __le16 retval; - __le32 cookie_high; - __le32 cookie_low; - union { - struct { - __le32 param0; - __le32 param1; - __le32 param2; - __le32 param3; - } internal; - struct { - __le32 param0; - __le32 param1; - __le32 addr_high; - __le32 addr_low; - } external; - u8 raw[16]; - } params; -}; - -/* Flags sub-structure - * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 | - * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE | - */ - -/* command flags and offsets*/ -#define I40E_AQ_FLAG_DD_SHIFT 0 -#define I40E_AQ_FLAG_CMP_SHIFT 1 -#define I40E_AQ_FLAG_ERR_SHIFT 2 -#define I40E_AQ_FLAG_VFE_SHIFT 3 -#define I40E_AQ_FLAG_LB_SHIFT 9 -#define I40E_AQ_FLAG_RD_SHIFT 10 -#define I40E_AQ_FLAG_VFC_SHIFT 11 -#define I40E_AQ_FLAG_BUF_SHIFT 12 -#define I40E_AQ_FLAG_SI_SHIFT 13 -#define I40E_AQ_FLAG_EI_SHIFT 14 -#define I40E_AQ_FLAG_FE_SHIFT 15 - -#define I40E_AQ_FLAG_DD BIT(I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ -#define I40E_AQ_FLAG_CMP BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ -#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ -#define I40E_AQ_FLAG_VFE BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ -#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ -#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ -#define I40E_AQ_FLAG_VFC BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ -#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ -#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ -#define I40E_AQ_FLAG_EI BIT(I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ -#define I40E_AQ_FLAG_FE BIT(I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ - -/* error codes */ -enum i40e_admin_queue_err { - I40E_AQ_RC_OK = 0, /* success */ - I40E_AQ_RC_EPERM = 1, /* Operation not permitted */ - I40E_AQ_RC_ENOENT = 2, /* No such element */ - I40E_AQ_RC_ESRCH = 3, /* Bad opcode */ - I40E_AQ_RC_EINTR = 4, /* operation interrupted */ - I40E_AQ_RC_EIO = 5, /* I/O error */ - I40E_AQ_RC_ENXIO = 6, /* No such resource */ - I40E_AQ_RC_E2BIG = 7, /* Arg too long */ - I40E_AQ_RC_EAGAIN = 8, /* Try again */ - I40E_AQ_RC_ENOMEM = 9, /* Out of memory */ - I40E_AQ_RC_EACCES = 10, /* Permission denied */ - I40E_AQ_RC_EFAULT = 11, /* Bad address */ - I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */ - I40E_AQ_RC_EEXIST = 13, /* object already exists */ - I40E_AQ_RC_EINVAL = 14, /* Invalid argument */ - I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */ - I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ - I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */ - I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */ - I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */ - I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ - I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ - I40E_AQ_RC_EFBIG = 22, /* File too large */ -}; - -/* Admin Queue command opcodes */ -enum i40e_admin_queue_opc { - /* aq commands */ - i40e_aqc_opc_get_version = 0x0001, - i40e_aqc_opc_driver_version = 0x0002, - i40e_aqc_opc_queue_shutdown = 0x0003, - i40e_aqc_opc_set_pf_context = 0x0004, - - /* resource ownership */ - i40e_aqc_opc_request_resource = 0x0008, - i40e_aqc_opc_release_resource = 0x0009, - - i40e_aqc_opc_list_func_capabilities = 0x000A, - i40e_aqc_opc_list_dev_capabilities = 0x000B, - - /* Proxy commands */ - i40e_aqc_opc_set_proxy_config = 0x0104, - i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105, - - /* LAA */ - i40e_aqc_opc_mac_address_read = 0x0107, - i40e_aqc_opc_mac_address_write = 0x0108, - - /* PXE */ - i40e_aqc_opc_clear_pxe_mode = 0x0110, - - /* WoL commands */ - i40e_aqc_opc_set_wol_filter = 0x0120, - i40e_aqc_opc_get_wake_reason = 0x0121, - - /* internal switch commands */ - i40e_aqc_opc_get_switch_config = 0x0200, - i40e_aqc_opc_add_statistics = 0x0201, - i40e_aqc_opc_remove_statistics = 0x0202, - i40e_aqc_opc_set_port_parameters = 0x0203, - i40e_aqc_opc_get_switch_resource_alloc = 0x0204, - i40e_aqc_opc_set_switch_config = 0x0205, - i40e_aqc_opc_rx_ctl_reg_read = 0x0206, - i40e_aqc_opc_rx_ctl_reg_write = 0x0207, - - i40e_aqc_opc_add_vsi = 0x0210, - i40e_aqc_opc_update_vsi_parameters = 0x0211, - i40e_aqc_opc_get_vsi_parameters = 0x0212, - - i40e_aqc_opc_add_pv = 0x0220, - i40e_aqc_opc_update_pv_parameters = 0x0221, - i40e_aqc_opc_get_pv_parameters = 0x0222, - - i40e_aqc_opc_add_veb = 0x0230, - i40e_aqc_opc_update_veb_parameters = 0x0231, - i40e_aqc_opc_get_veb_parameters = 0x0232, - - i40e_aqc_opc_delete_element = 0x0243, - - i40e_aqc_opc_add_macvlan = 0x0250, - i40e_aqc_opc_remove_macvlan = 0x0251, - i40e_aqc_opc_add_vlan = 0x0252, - i40e_aqc_opc_remove_vlan = 0x0253, - i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254, - i40e_aqc_opc_add_tag = 0x0255, - i40e_aqc_opc_remove_tag = 0x0256, - i40e_aqc_opc_add_multicast_etag = 0x0257, - i40e_aqc_opc_remove_multicast_etag = 0x0258, - i40e_aqc_opc_update_tag = 0x0259, - i40e_aqc_opc_add_control_packet_filter = 0x025A, - i40e_aqc_opc_remove_control_packet_filter = 0x025B, - i40e_aqc_opc_add_cloud_filters = 0x025C, - i40e_aqc_opc_remove_cloud_filters = 0x025D, - i40e_aqc_opc_clear_wol_switch_filters = 0x025E, - - i40e_aqc_opc_add_mirror_rule = 0x0260, - i40e_aqc_opc_delete_mirror_rule = 0x0261, - - /* Dynamic Device Personalization */ - i40e_aqc_opc_write_personalization_profile = 0x0270, - i40e_aqc_opc_get_personalization_profile_list = 0x0271, - - /* DCB commands */ - i40e_aqc_opc_dcb_ignore_pfc = 0x0301, - i40e_aqc_opc_dcb_updated = 0x0302, - i40e_aqc_opc_set_dcb_parameters = 0x0303, - - /* TX scheduler */ - i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, - i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, - i40e_aqc_opc_configure_vsi_tc_bw = 0x0407, - i40e_aqc_opc_query_vsi_bw_config = 0x0408, - i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A, - i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410, - - i40e_aqc_opc_enable_switching_comp_ets = 0x0413, - i40e_aqc_opc_modify_switching_comp_ets = 0x0414, - i40e_aqc_opc_disable_switching_comp_ets = 0x0415, - i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, - i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417, - i40e_aqc_opc_query_switching_comp_ets_config = 0x0418, - i40e_aqc_opc_query_port_ets_config = 0x0419, - i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, - i40e_aqc_opc_suspend_port_tx = 0x041B, - i40e_aqc_opc_resume_port_tx = 0x041C, - i40e_aqc_opc_configure_partition_bw = 0x041D, - /* hmc */ - i40e_aqc_opc_query_hmc_resource_profile = 0x0500, - i40e_aqc_opc_set_hmc_resource_profile = 0x0501, - - /* phy commands*/ - i40e_aqc_opc_get_phy_abilities = 0x0600, - i40e_aqc_opc_set_phy_config = 0x0601, - i40e_aqc_opc_set_mac_config = 0x0603, - i40e_aqc_opc_set_link_restart_an = 0x0605, - i40e_aqc_opc_get_link_status = 0x0607, - i40e_aqc_opc_set_phy_int_mask = 0x0613, - i40e_aqc_opc_get_local_advt_reg = 0x0614, - i40e_aqc_opc_set_local_advt_reg = 0x0615, - i40e_aqc_opc_get_partner_advt = 0x0616, - i40e_aqc_opc_set_lb_modes = 0x0618, - i40e_aqc_opc_get_phy_wol_caps = 0x0621, - i40e_aqc_opc_set_phy_debug = 0x0622, - i40e_aqc_opc_upload_ext_phy_fm = 0x0625, - i40e_aqc_opc_run_phy_activity = 0x0626, - i40e_aqc_opc_set_phy_register = 0x0628, - i40e_aqc_opc_get_phy_register = 0x0629, - - /* NVM commands */ - i40e_aqc_opc_nvm_read = 0x0701, - i40e_aqc_opc_nvm_erase = 0x0702, - i40e_aqc_opc_nvm_update = 0x0703, - i40e_aqc_opc_nvm_config_read = 0x0704, - i40e_aqc_opc_nvm_config_write = 0x0705, - i40e_aqc_opc_oem_post_update = 0x0720, - i40e_aqc_opc_thermal_sensor = 0x0721, - - /* virtualization commands */ - i40e_aqc_opc_send_msg_to_pf = 0x0801, - i40e_aqc_opc_send_msg_to_vf = 0x0802, - i40e_aqc_opc_send_msg_to_peer = 0x0803, - - /* alternate structure */ - i40e_aqc_opc_alternate_write = 0x0900, - i40e_aqc_opc_alternate_write_indirect = 0x0901, - i40e_aqc_opc_alternate_read = 0x0902, - i40e_aqc_opc_alternate_read_indirect = 0x0903, - i40e_aqc_opc_alternate_write_done = 0x0904, - i40e_aqc_opc_alternate_set_mode = 0x0905, - i40e_aqc_opc_alternate_clear_port = 0x0906, - - /* LLDP commands */ - i40e_aqc_opc_lldp_get_mib = 0x0A00, - i40e_aqc_opc_lldp_update_mib = 0x0A01, - i40e_aqc_opc_lldp_add_tlv = 0x0A02, - i40e_aqc_opc_lldp_update_tlv = 0x0A03, - i40e_aqc_opc_lldp_delete_tlv = 0x0A04, - i40e_aqc_opc_lldp_stop = 0x0A05, - i40e_aqc_opc_lldp_start = 0x0A06, - - /* Tunnel commands */ - i40e_aqc_opc_add_udp_tunnel = 0x0B00, - i40e_aqc_opc_del_udp_tunnel = 0x0B01, - i40e_aqc_opc_set_rss_key = 0x0B02, - i40e_aqc_opc_set_rss_lut = 0x0B03, - i40e_aqc_opc_get_rss_key = 0x0B04, - i40e_aqc_opc_get_rss_lut = 0x0B05, - - /* Async Events */ - i40e_aqc_opc_event_lan_overflow = 0x1001, - - /* OEM commands */ - i40e_aqc_opc_oem_parameter_change = 0xFE00, - i40e_aqc_opc_oem_device_status_change = 0xFE01, - i40e_aqc_opc_oem_ocsd_initialize = 0xFE02, - i40e_aqc_opc_oem_ocbb_initialize = 0xFE03, - - /* debug commands */ - i40e_aqc_opc_debug_read_reg = 0xFF03, - i40e_aqc_opc_debug_write_reg = 0xFF04, - i40e_aqc_opc_debug_modify_reg = 0xFF07, - i40e_aqc_opc_debug_dump_internals = 0xFF08, -}; - -/* command structures and indirect data structures */ - -/* Structure naming conventions: - * - no suffix for direct command descriptor structures - * - _data for indirect sent data - * - _resp for indirect return data (data which is both will use _data) - * - _completion for direct return data - * - _element_ for repeated elements (may also be _data or _resp) - * - * Command structures are expected to overlay the params.raw member of the basic - * descriptor, and as such cannot exceed 16 bytes in length. - */ - -/* This macro is used to generate a compilation error if a structure - * is not exactly the correct length. It gives a divide by zero error if the - * structure is not of the correct size, otherwise it creates an enum that is - * never used. - */ -#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \ - { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } - -/* This macro is used extensively to ensure that command structures are 16 - * bytes in length as they have to map to the raw array of that size. - */ -#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) - -/* internal (0x00XX) commands */ - -/* Get version (direct 0x0001) */ -struct i40e_aqc_get_version { - __le32 rom_ver; - __le32 fw_build; - __le16 fw_major; - __le16 fw_minor; - __le16 api_major; - __le16 api_minor; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version); - -/* Send driver version (indirect 0x0002) */ -struct i40e_aqc_driver_version { - u8 driver_major_ver; - u8 driver_minor_ver; - u8 driver_build_ver; - u8 driver_subbuild_ver; - u8 reserved[4]; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version); - -/* Queue Shutdown (direct 0x0003) */ -struct i40e_aqc_queue_shutdown { - __le32 driver_unloading; -#define I40E_AQ_DRIVER_UNLOADING 0x1 - u8 reserved[12]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); - -/* Set PF context (0x0004, direct) */ -struct i40e_aqc_set_pf_context { - u8 pf_id; - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context); - -/* Request resource ownership (direct 0x0008) - * Release resource ownership (direct 0x0009) - */ -#define I40E_AQ_RESOURCE_NVM 1 -#define I40E_AQ_RESOURCE_SDP 2 -#define I40E_AQ_RESOURCE_ACCESS_READ 1 -#define I40E_AQ_RESOURCE_ACCESS_WRITE 2 -#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000 -#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000 - -struct i40e_aqc_request_resource { - __le16 resource_id; - __le16 access_type; - __le32 timeout; - __le32 resource_number; - u8 reserved[4]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); - -/* Get function capabilities (indirect 0x000A) - * Get device capabilities (indirect 0x000B) - */ -struct i40e_aqc_list_capabilites { - u8 command_flags; -#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1 - u8 pf_index; - u8 reserved[2]; - __le32 count; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites); - -struct i40e_aqc_list_capabilities_element_resp { - __le16 id; - u8 major_rev; - u8 minor_rev; - __le32 number; - __le32 logical_id; - __le32 phys_id; - u8 reserved[16]; -}; - -/* list of caps */ - -#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001 -#define I40E_AQ_CAP_ID_MNG_MODE 0x0002 -#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003 -#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 -#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 -#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 -#define I40E_AQ_CAP_ID_WOL_AND_PROXY 0x0008 -#define I40E_AQ_CAP_ID_SRIOV 0x0012 -#define I40E_AQ_CAP_ID_VF 0x0013 -#define I40E_AQ_CAP_ID_VMDQ 0x0014 -#define I40E_AQ_CAP_ID_8021QBG 0x0015 -#define I40E_AQ_CAP_ID_8021QBR 0x0016 -#define I40E_AQ_CAP_ID_VSI 0x0017 -#define I40E_AQ_CAP_ID_DCB 0x0018 -#define I40E_AQ_CAP_ID_FCOE 0x0021 -#define I40E_AQ_CAP_ID_ISCSI 0x0022 -#define I40E_AQ_CAP_ID_RSS 0x0040 -#define I40E_AQ_CAP_ID_RXQ 0x0041 -#define I40E_AQ_CAP_ID_TXQ 0x0042 -#define I40E_AQ_CAP_ID_MSIX 0x0043 -#define I40E_AQ_CAP_ID_VF_MSIX 0x0044 -#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045 -#define I40E_AQ_CAP_ID_1588 0x0046 -#define I40E_AQ_CAP_ID_IWARP 0x0051 -#define I40E_AQ_CAP_ID_LED 0x0061 -#define I40E_AQ_CAP_ID_SDP 0x0062 -#define I40E_AQ_CAP_ID_MDIO 0x0063 -#define I40E_AQ_CAP_ID_WSR_PROT 0x0064 -#define I40E_AQ_CAP_ID_NVM_MGMT 0x0080 -#define I40E_AQ_CAP_ID_FLEX10 0x00F1 -#define I40E_AQ_CAP_ID_CEM 0x00F2 - -/* Set CPPM Configuration (direct 0x0103) */ -struct i40e_aqc_cppm_configuration { - __le16 command_flags; -#define I40E_AQ_CPPM_EN_LTRC 0x0800 -#define I40E_AQ_CPPM_EN_DMCTH 0x1000 -#define I40E_AQ_CPPM_EN_DMCTLX 0x2000 -#define I40E_AQ_CPPM_EN_HPTC 0x4000 -#define I40E_AQ_CPPM_EN_DMARC 0x8000 - __le16 ttlx; - __le32 dmacr; - __le16 dmcth; - u8 hptc; - u8 reserved; - __le32 pfltrc; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration); - -/* Set ARP Proxy command / response (indirect 0x0104) */ -struct i40e_aqc_arp_proxy_data { - __le16 command_flags; -#define I40E_AQ_ARP_INIT_IPV4 0x0800 -#define I40E_AQ_ARP_UNSUP_CTL 0x1000 -#define I40E_AQ_ARP_ENA 0x2000 -#define I40E_AQ_ARP_ADD_IPV4 0x4000 -#define I40E_AQ_ARP_DEL_IPV4 0x8000 - __le16 table_id; - __le32 enabled_offloads; -#define I40E_AQ_ARP_DIRECTED_OFFLOAD_ENABLE 0x00000020 -#define I40E_AQ_ARP_OFFLOAD_ENABLE 0x00000800 - __le32 ip_addr; - u8 mac_addr[6]; - u8 reserved[2]; -}; - -I40E_CHECK_STRUCT_LEN(0x14, i40e_aqc_arp_proxy_data); - -/* Set NS Proxy Table Entry Command (indirect 0x0105) */ -struct i40e_aqc_ns_proxy_data { - __le16 table_idx_mac_addr_0; - __le16 table_idx_mac_addr_1; - __le16 table_idx_ipv6_0; - __le16 table_idx_ipv6_1; - __le16 control; -#define I40E_AQ_NS_PROXY_ADD_0 0x0001 -#define I40E_AQ_NS_PROXY_DEL_0 0x0002 -#define I40E_AQ_NS_PROXY_ADD_1 0x0004 -#define I40E_AQ_NS_PROXY_DEL_1 0x0008 -#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x0010 -#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x0020 -#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x0040 -#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x0080 -#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0100 -#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0200 -#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0400 -#define I40E_AQ_NS_PROXY_OFFLOAD_ENABLE 0x0800 -#define I40E_AQ_NS_PROXY_DIRECTED_OFFLOAD_ENABLE 0x1000 - u8 mac_addr_0[6]; - u8 mac_addr_1[6]; - u8 local_mac_addr[6]; - u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */ - u8 ipv6_addr_1[16]; -}; - -I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data); - -/* Manage LAA Command (0x0106) - obsolete */ -struct i40e_aqc_mng_laa { - __le16 command_flags; -#define I40E_AQ_LAA_FLAG_WR 0x8000 - u8 reserved[2]; - __le32 sal; - __le16 sah; - u8 reserved2[6]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa); - -/* Manage MAC Address Read Command (indirect 0x0107) */ -struct i40e_aqc_mac_address_read { - __le16 command_flags; -#define I40E_AQC_LAN_ADDR_VALID 0x10 -#define I40E_AQC_SAN_ADDR_VALID 0x20 -#define I40E_AQC_PORT_ADDR_VALID 0x40 -#define I40E_AQC_WOL_ADDR_VALID 0x80 -#define I40E_AQC_MC_MAG_EN_VALID 0x100 -#define I40E_AQC_ADDR_VALID_MASK 0x3F0 - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read); - -struct i40e_aqc_mac_address_read_data { - u8 pf_lan_mac[6]; - u8 pf_san_mac[6]; - u8 port_mac[6]; - u8 pf_wol_mac[6]; -}; - -I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data); - -/* Manage MAC Address Write Command (0x0108) */ -struct i40e_aqc_mac_address_write { - __le16 command_flags; -#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 -#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 -#define I40E_AQC_WRITE_TYPE_PORT 0x8000 -#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000 -#define I40E_AQC_WRITE_TYPE_MASK 0xC000 - - __le16 mac_sah; - __le32 mac_sal; - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write); - -/* PXE commands (0x011x) */ - -/* Clear PXE Command and response (direct 0x0110) */ -struct i40e_aqc_clear_pxe { - u8 rx_cnt; - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe); - -/* Set WoL Filter (0x0120) */ - -struct i40e_aqc_set_wol_filter { - __le16 filter_index; -#define I40E_AQC_MAX_NUM_WOL_FILTERS 8 -#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT 15 -#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK (0x1 << \ - I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT) - -#define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT 0 -#define I40E_AQC_SET_WOL_FILTER_INDEX_MASK (0x7 << \ - I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT) - __le16 cmd_flags; -#define I40E_AQC_SET_WOL_FILTER 0x8000 -#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000 -#define I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR 0x2000 -#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0 -#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1 - __le16 valid_flags; -#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000 -#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000 - u8 reserved[2]; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter); - -struct i40e_aqc_set_wol_filter_data { - u8 filter[128]; - u8 mask[16]; -}; - -I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data); - -/* Get Wake Reason (0x0121) */ - -struct i40e_aqc_get_wake_reason_completion { - u8 reserved_1[2]; - __le16 wake_reason; -#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT 0 -#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \ - I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT) -#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT 8 -#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK (0xFF << \ - I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT) - u8 reserved_2[12]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion); - -/* Switch configuration commands (0x02xx) */ - -/* Used by many indirect commands that only pass an seid and a buffer in the - * command - */ -struct i40e_aqc_switch_seid { - __le16 seid; - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid); - -/* Get Switch Configuration command (indirect 0x0200) - * uses i40e_aqc_switch_seid for the descriptor - */ -struct i40e_aqc_get_switch_config_header_resp { - __le16 num_reported; - __le16 num_total; - u8 reserved[12]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp); - -struct i40e_aqc_switch_config_element_resp { - u8 element_type; -#define I40E_AQ_SW_ELEM_TYPE_MAC 1 -#define I40E_AQ_SW_ELEM_TYPE_PF 2 -#define I40E_AQ_SW_ELEM_TYPE_VF 3 -#define I40E_AQ_SW_ELEM_TYPE_EMP 4 -#define I40E_AQ_SW_ELEM_TYPE_BMC 5 -#define I40E_AQ_SW_ELEM_TYPE_PV 16 -#define I40E_AQ_SW_ELEM_TYPE_VEB 17 -#define I40E_AQ_SW_ELEM_TYPE_PA 18 -#define I40E_AQ_SW_ELEM_TYPE_VSI 19 - u8 revision; -#define I40E_AQ_SW_ELEM_REV_1 1 - __le16 seid; - __le16 uplink_seid; - __le16 downlink_seid; - u8 reserved[3]; - u8 connection_type; -#define I40E_AQ_CONN_TYPE_REGULAR 0x1 -#define I40E_AQ_CONN_TYPE_DEFAULT 0x2 -#define I40E_AQ_CONN_TYPE_CASCADED 0x3 - __le16 scheduler_id; - __le16 element_info; -}; - -I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_config_element_resp); - -/* Get Switch Configuration (indirect 0x0200) - * an array of elements are returned in the response buffer - * the first in the array is the header, remainder are elements - */ -struct i40e_aqc_get_switch_config_resp { - struct i40e_aqc_get_switch_config_header_resp header; - struct i40e_aqc_switch_config_element_resp element[1]; -}; - -I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_switch_config_resp); - -/* Add Statistics (direct 0x0201) - * Remove Statistics (direct 0x0202) - */ -struct i40e_aqc_add_remove_statistics { - __le16 seid; - __le16 vlan; - __le16 stat_index; - u8 reserved[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics); - -/* Set Port Parameters command (direct 0x0203) */ -struct i40e_aqc_set_port_parameters { - __le16 command_flags; -#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1 -#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ -#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 - __le16 bad_frame_vsi; -#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0 -#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK 0x3FF - __le16 default_seid; /* reserved for command */ - u8 reserved[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters); - -/* Get Switch Resource Allocation (indirect 0x0204) */ -struct i40e_aqc_get_switch_resource_alloc { - u8 num_entries; /* reserved for command */ - u8 reserved[7]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc); - -/* expect an array of these structs in the response buffer */ -struct i40e_aqc_switch_resource_alloc_element_resp { - u8 resource_type; -#define I40E_AQ_RESOURCE_TYPE_VEB 0x0 -#define I40E_AQ_RESOURCE_TYPE_VSI 0x1 -#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2 -#define I40E_AQ_RESOURCE_TYPE_STAG 0x3 -#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4 -#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5 -#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6 -#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7 -#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8 -#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9 -#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA -#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB -#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC -#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD -#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF -#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10 -#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11 -#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12 -#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13 - u8 reserved1; - __le16 guaranteed; - __le16 total; - __le16 used; - __le16 total_unalloced; - u8 reserved2[6]; -}; - -I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp); - -/* Set Switch Configuration (direct 0x0205) */ -struct i40e_aqc_set_switch_config { - __le16 flags; -/* flags used for both fields below */ -#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001 -#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002 - __le16 valid_flags; - /* The ethertype in switch_tag is dropped on ingress and used - * internally by the switch. Set this to zero for the default - * of 0x88a8 (802.1ad). Should be zero for firmware API - * versions lower than 1.7. - */ - __le16 switch_tag; - /* The ethertypes in first_tag and second_tag are used to - * match the outer and inner VLAN tags (respectively) when HW - * double VLAN tagging is enabled via the set port parameters - * AQ command. Otherwise these are both ignored. Set them to - * zero for their defaults of 0x8100 (802.1Q). Should be zero - * for firmware API versions lower than 1.7. - */ - __le16 first_tag; - __le16 second_tag; - u8 reserved[6]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config); - -/* Read Receive control registers (direct 0x0206) - * Write Receive control registers (direct 0x0207) - * used for accessing Rx control registers that can be - * slow and need special handling when under high Rx load - */ -struct i40e_aqc_rx_ctl_reg_read_write { - __le32 reserved1; - __le32 address; - __le32 reserved2; - __le32 value; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_rx_ctl_reg_read_write); - -/* Add VSI (indirect 0x0210) - * this indirect command uses struct i40e_aqc_vsi_properties_data - * as the indirect buffer (128 bytes) - * - * Update VSI (indirect 0x211) - * uses the same data structure as Add VSI - * - * Get VSI (indirect 0x0212) - * uses the same completion and data structure as Add VSI - */ -struct i40e_aqc_add_get_update_vsi { - __le16 uplink_seid; - u8 connection_type; -#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1 -#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2 -#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3 - u8 reserved1; - u8 vf_id; - u8 reserved2; - __le16 vsi_flags; -#define I40E_AQ_VSI_TYPE_SHIFT 0x0 -#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT) -#define I40E_AQ_VSI_TYPE_VF 0x0 -#define I40E_AQ_VSI_TYPE_VMDQ2 0x1 -#define I40E_AQ_VSI_TYPE_PF 0x2 -#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 -#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi); - -struct i40e_aqc_add_get_update_vsi_completion { - __le16 seid; - __le16 vsi_number; - __le16 vsi_used; - __le16 vsi_free; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion); - -struct i40e_aqc_vsi_properties_data { - /* first 96 byte are written by SW */ - __le16 valid_sections; -#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 -#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 -#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 -#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 -#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 -#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 -#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 -#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 -#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 -#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 - /* switch section */ - __le16 switch_id; /* 12bit id combined with flags below */ -#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 -#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) -#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 -#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 -#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 - u8 sw_reserved[2]; - /* security section */ - u8 sec_flags; -#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 -#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 -#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 - u8 sec_reserved; - /* VLAN section */ - __le16 pvid; /* VLANS include priority bits */ - __le16 fcoe_pvid; - u8 port_vlan_flags; -#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00 -#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ - I40E_AQ_VSI_PVLAN_MODE_SHIFT) -#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 -#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 -#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 -#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 -#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 -#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ - I40E_AQ_VSI_PVLAN_EMOD_SHIFT) -#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 -#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 -#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 -#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 - u8 pvlan_reserved[3]; - /* ingress egress up sections */ - __le32 ingress_table; /* bitmap, 3 bits per up */ -#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 -#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 -#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 -#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 -#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 -#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 -#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 -#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 -#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) - __le32 egress_table; /* same defines as for ingress table */ - /* cascaded PV section */ - __le16 cas_pv_tag; - u8 cas_pv_flags; -#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 -#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ - I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) -#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 -#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 -#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 -#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 -#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 -#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 - u8 cas_pv_reserved; - /* queue mapping section */ - __le16 mapping_flags; -#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 -#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 - __le16 queue_mapping[16]; -#define I40E_AQ_VSI_QUEUE_SHIFT 0x0 -#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) - __le16 tc_mapping[8]; -#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 -#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ - I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) -#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 -#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ - I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) - /* queueing option section */ - u8 queueing_opt_flags; -#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04 -#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08 -#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 -#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 -#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00 -#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40 - u8 queueing_opt_reserved[3]; - /* scheduler section */ - u8 up_enable_bits; - u8 sched_reserved; - /* outer up section */ - __le32 outer_up_table; /* same structure and defines as ingress tbl */ - u8 cmd_reserved[8]; - /* last 32 bytes are written by FW */ - __le16 qs_handle[8]; -#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF - __le16 stat_counter_idx; - __le16 sched_id; - u8 resp_reserved[12]; -}; - -I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); - -/* Add Port Virtualizer (direct 0x0220) - * also used for update PV (direct 0x0221) but only flags are used - * (IS_CTRL_PORT only works on add PV) - */ -struct i40e_aqc_add_update_pv { - __le16 command_flags; -#define I40E_AQC_PV_FLAG_PV_TYPE 0x1 -#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2 -#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4 -#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8 - __le16 uplink_seid; - __le16 connected_seid; - u8 reserved[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv); - -struct i40e_aqc_add_update_pv_completion { - /* reserved for update; for add also encodes error if rc == ENOSPC */ - __le16 pv_seid; -#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1 -#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2 -#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4 -#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8 - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion); - -/* Get PV Params (direct 0x0222) - * uses i40e_aqc_switch_seid for the descriptor - */ - -struct i40e_aqc_get_pv_params_completion { - __le16 seid; - __le16 default_stag; - __le16 pv_flags; /* same flags as add_pv */ -#define I40E_AQC_GET_PV_PV_TYPE 0x1 -#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2 -#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4 - u8 reserved[8]; - __le16 default_port_seid; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion); - -/* Add VEB (direct 0x0230) */ -struct i40e_aqc_add_veb { - __le16 uplink_seid; - __le16 downlink_seid; - __le16 veb_flags; -#define I40E_AQC_ADD_VEB_FLOATING 0x1 -#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1 -#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \ - I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT) -#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 -#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 -#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 /* deprecated */ -#define I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS 0x10 - u8 enable_tcs; - u8 reserved[9]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb); - -struct i40e_aqc_add_veb_completion { - u8 reserved[6]; - __le16 switch_seid; - /* also encodes error if rc == ENOSPC; codes are the same as add_pv */ - __le16 veb_seid; -#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1 -#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2 -#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4 -#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8 - __le16 statistic_index; - __le16 vebs_used; - __le16 vebs_free; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion); - -/* Get VEB Parameters (direct 0x0232) - * uses i40e_aqc_switch_seid for the descriptor - */ -struct i40e_aqc_get_veb_parameters_completion { - __le16 seid; - __le16 switch_id; - __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ - __le16 statistic_index; - __le16 vebs_used; - __le16 vebs_free; - u8 reserved[4]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); - -/* Delete Element (direct 0x0243) - * uses the generic i40e_aqc_switch_seid - */ - -/* Add MAC-VLAN (indirect 0x0250) */ - -/* used for the command for most vlan commands */ -struct i40e_aqc_macvlan { - __le16 num_addresses; - __le16 seid[3]; -#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) -#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000 - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan); - -/* indirect data for command and response */ -struct i40e_aqc_add_macvlan_element_data { - u8 mac_addr[6]; - __le16 vlan_tag; - __le16 flags; -#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001 -#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 -#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 -#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 -#define I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC 0x0010 - __le16 queue_number; -#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 -#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ - I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) - /* response section */ - u8 match_method; -#define I40E_AQC_MM_PERFECT_MATCH 0x01 -#define I40E_AQC_MM_HASH_MATCH 0x02 -#define I40E_AQC_MM_ERR_NO_RES 0xFF - u8 reserved1[3]; -}; - -struct i40e_aqc_add_remove_macvlan_completion { - __le16 perfect_mac_used; - __le16 perfect_mac_free; - __le16 unicast_hash_free; - __le16 multicast_hash_free; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion); - -/* Remove MAC-VLAN (indirect 0x0251) - * uses i40e_aqc_macvlan for the descriptor - * data points to an array of num_addresses of elements - */ - -struct i40e_aqc_remove_macvlan_element_data { - u8 mac_addr[6]; - __le16 vlan_tag; - u8 flags; -#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01 -#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02 -#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08 -#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10 - u8 reserved[3]; - /* reply section */ - u8 error_code; -#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0 -#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF - u8 reply_reserved[3]; -}; - -/* Add VLAN (indirect 0x0252) - * Remove VLAN (indirect 0x0253) - * use the generic i40e_aqc_macvlan for the command - */ -struct i40e_aqc_add_remove_vlan_element_data { - __le16 vlan_tag; - u8 vlan_flags; -/* flags for add VLAN */ -#define I40E_AQC_ADD_VLAN_LOCAL 0x1 -#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1 -#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT) -#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0 -#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2 -#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4 -#define I40E_AQC_VLAN_PTYPE_SHIFT 3 -#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT) -#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0 -#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8 -#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10 -#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18 -/* flags for remove VLAN */ -#define I40E_AQC_REMOVE_VLAN_ALL 0x1 - u8 reserved; - u8 result; -/* flags for add VLAN */ -#define I40E_AQC_ADD_VLAN_SUCCESS 0x0 -#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE -#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF -/* flags for remove VLAN */ -#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0 -#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF - u8 reserved1[3]; -}; - -struct i40e_aqc_add_remove_vlan_completion { - u8 reserved[4]; - __le16 vlans_used; - __le16 vlans_free; - __le32 addr_high; - __le32 addr_low; -}; - -/* Set VSI Promiscuous Modes (direct 0x0254) */ -struct i40e_aqc_set_vsi_promiscuous_modes { - __le16 promiscuous_flags; - __le16 valid_flags; -/* flags used for both fields above */ -#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01 -#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02 -#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 -#define I40E_AQC_SET_VSI_DEFAULT 0x08 -#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 -#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000 - __le16 seid; -#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF - __le16 vlan_tag; -#define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF -#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); - -/* Add S/E-tag command (direct 0x0255) - * Uses generic i40e_aqc_add_remove_tag_completion for completion - */ -struct i40e_aqc_add_tag { - __le16 flags; -#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001 - __le16 seid; -#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT) - __le16 tag; - __le16 queue_number; - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag); - -struct i40e_aqc_add_remove_tag_completion { - u8 reserved[12]; - __le16 tags_used; - __le16 tags_free; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); - -/* Remove S/E-tag command (direct 0x0256) - * Uses generic i40e_aqc_add_remove_tag_completion for completion - */ -struct i40e_aqc_remove_tag { - __le16 seid; -#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT) - __le16 tag; - u8 reserved[12]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_tag); - -/* Add multicast E-Tag (direct 0x0257) - * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields - * and no external data - */ -struct i40e_aqc_add_remove_mcast_etag { - __le16 pv_seid; - __le16 etag; - u8 num_unicast_etags; - u8 reserved[3]; - __le32 addr_high; /* address of array of 2-byte s-tags */ - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag); - -struct i40e_aqc_add_remove_mcast_etag_completion { - u8 reserved[4]; - __le16 mcast_etags_used; - __le16 mcast_etags_free; - __le32 addr_high; - __le32 addr_low; - -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion); - -/* Update S/E-Tag (direct 0x0259) */ -struct i40e_aqc_update_tag { - __le16 seid; -#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT) - __le16 old_tag; - __le16 new_tag; - u8 reserved[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag); - -struct i40e_aqc_update_tag_completion { - u8 reserved[12]; - __le16 tags_used; - __le16 tags_free; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion); - -/* Add Control Packet filter (direct 0x025A) - * Remove Control Packet filter (direct 0x025B) - * uses the i40e_aqc_add_oveb_cloud, - * and the generic direct completion structure - */ -struct i40e_aqc_add_remove_control_packet_filter { - u8 mac[6]; - __le16 etype; - __le16 flags; -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000 - __le16 seid; -#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT) - __le16 queue; - u8 reserved[2]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter); - -struct i40e_aqc_add_remove_control_packet_filter_completion { - __le16 mac_etype_used; - __le16 etype_used; - __le16 mac_etype_free; - __le16 etype_free; - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion); - -/* Add Cloud filters (indirect 0x025C) - * Remove Cloud filters (indirect 0x025D) - * uses the i40e_aqc_add_remove_cloud_filters, - * and the generic indirect completion structure - */ -struct i40e_aqc_add_remove_cloud_filters { - u8 num_filters; - u8 reserved; - __le16 seid; -#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT) - u8 big_buffer_flag; -#define I40E_AQC_ADD_CLOUD_CMD_BB 1 - u8 reserved2[3]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters); - -struct i40e_aqc_cloud_filters_element_data { - u8 outer_mac[6]; - u8 inner_mac[6]; - __le16 inner_vlan; - union { - struct { - u8 reserved[12]; - u8 data[4]; - } v4; - struct { - u8 data[16]; - } v6; - struct { - __le16 data[8]; - } raw_v6; - } ipaddr; - __le16 flags; -#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ - I40E_AQC_ADD_CLOUD_FILTER_SHIFT) -/* 0x0000 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 -/* 0x0002 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 -/* 0x0005 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006 -/* 0x0007 reserved */ -/* 0x0008 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009 -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A -#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B -#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C -/* 0x0010 to 0x0017 is for custom filters */ -#define I40E_AQC_ADD_CLOUD_FILTER_IP_PORT 0x0010 /* Dest IP + L4 Port */ -#define I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT 0x0011 /* Dest MAC + L4 Port */ -#define I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT 0x0012 /* Dest MAC + VLAN + L4 Port */ - -#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 -#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 -#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 -#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 -#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 - -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN 0 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE 2 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_RESERVED 4 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE 5 - -#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_MAC 0x2000 -#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_INNER_MAC 0x4000 -#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_IP 0x8000 - - __le32 tenant_id; - u8 reserved[4]; - __le16 queue_number; -#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x7FF << \ - I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) - u8 reserved2[14]; - /* response section */ - u8 allocation_result; -#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 -#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF - u8 response_reserved[7]; -}; - -I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_cloud_filters_element_data); - -/* i40e_aqc_cloud_filters_element_bb is used when - * I40E_AQC_ADD_CLOUD_CMD_BB flag is set. - */ -struct i40e_aqc_cloud_filters_element_bb { - struct i40e_aqc_cloud_filters_element_data element; - u16 general_fields[32]; -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2 2 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0 3 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1 4 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2 5 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0 6 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1 7 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2 8 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0 9 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1 10 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2 11 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0 12 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1 13 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2 14 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1 16 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2 17 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3 18 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4 19 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5 20 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6 21 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7 22 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0 23 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1 24 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2 25 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3 26 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4 27 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5 28 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6 29 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30 -}; - -I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_cloud_filters_element_bb); - -struct i40e_aqc_remove_cloud_filters_completion { - __le16 perfect_ovlan_used; - __le16 perfect_ovlan_free; - __le16 vlan_used; - __le16 vlan_free; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion); - -/* Replace filter Command 0x025F - * uses the i40e_aqc_replace_cloud_filters, - * and the generic indirect completion structure - */ -struct i40e_filter_data { - u8 filter_type; - u8 input[3]; -}; - -I40E_CHECK_STRUCT_LEN(4, i40e_filter_data); - -struct i40e_aqc_replace_cloud_filters_cmd { - u8 valid_flags; -#define I40E_AQC_REPLACE_L1_FILTER 0x0 -#define I40E_AQC_REPLACE_CLOUD_FILTER 0x1 -#define I40E_AQC_GET_CLOUD_FILTERS 0x2 -#define I40E_AQC_MIRROR_CLOUD_FILTER 0x4 -#define I40E_AQC_HIGH_PRIORITY_CLOUD_FILTER 0x8 - u8 old_filter_type; - u8 new_filter_type; - u8 tr_bit; - u8 reserved[4]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_replace_cloud_filters_cmd); - -struct i40e_aqc_replace_cloud_filters_cmd_buf { - u8 data[32]; -/* Filter type INPUT codes*/ -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_ENTRIES_MAX 3 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED BIT(7) - -/* Field Vector offsets */ -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_MAC_DA 0 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_ETH 6 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG 7 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN 8 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_OVLAN 9 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN 10 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY 11 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC 12 -/* big FLU */ -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IP_DA 14 -/* big FLU */ -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_OIP_DA 15 - -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN 37 - struct i40e_filter_data filters[8]; -}; - -I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_replace_cloud_filters_cmd_buf); - -/* Add Mirror Rule (indirect or direct 0x0260) - * Delete Mirror Rule (indirect or direct 0x0261) - * note: some rule types (4,5) do not use an external buffer. - * take care to set the flags correctly. - */ -struct i40e_aqc_add_delete_mirror_rule { - __le16 seid; - __le16 rule_type; -#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0 -#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \ - I40E_AQC_MIRROR_RULE_TYPE_SHIFT) -#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1 -#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2 -#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3 -#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4 -#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5 - __le16 num_entries; - __le16 destination; /* VSI for add, rule id for delete */ - __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */ - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule); - -struct i40e_aqc_add_delete_mirror_rule_completion { - u8 reserved[2]; - __le16 rule_id; /* only used on add */ - __le16 mirror_rules_used; - __le16 mirror_rules_free; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); - -/* Dynamic Device Personalization */ -struct i40e_aqc_write_personalization_profile { - u8 flags; - u8 reserved[3]; - __le32 profile_track_id; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile); - -struct i40e_aqc_write_ddp_resp { - __le32 error_offset; - __le32 error_info; - __le32 addr_high; - __le32 addr_low; -}; - -struct i40e_aqc_get_applied_profiles { - u8 flags; -#define I40E_AQC_GET_DDP_GET_CONF 0x1 -#define I40E_AQC_GET_DDP_GET_RDPU_CONF 0x2 - u8 rsv[3]; - __le32 reserved; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_applied_profiles); - -/* DCB 0x03xx*/ - -/* PFC Ignore (direct 0x0301) - * the command and response use the same descriptor structure - */ -struct i40e_aqc_pfc_ignore { - u8 tc_bitmap; - u8 command_flags; /* unused on response */ -#define I40E_AQC_PFC_IGNORE_SET 0x80 -#define I40E_AQC_PFC_IGNORE_CLEAR 0x0 - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore); - -/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure - * with no parameters - */ - -/* TX scheduler 0x04xx */ - -/* Almost all the indirect commands use - * this generic struct to pass the SEID in param0 - */ -struct i40e_aqc_tx_sched_ind { - __le16 vsi_seid; - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind); - -/* Several commands respond with a set of queue set handles */ -struct i40e_aqc_qs_handles_resp { - __le16 qs_handles[8]; -}; - -/* Configure VSI BW limits (direct 0x0400) */ -struct i40e_aqc_configure_vsi_bw_limit { - __le16 vsi_seid; - u8 reserved[2]; - __le16 credit; - u8 reserved1[2]; - u8 max_credit; /* 0-3, limit = 2^max */ - u8 reserved2[7]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit); - -/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406) - * responds with i40e_aqc_qs_handles_resp - */ -struct i40e_aqc_configure_vsi_ets_sla_bw_data { - u8 tc_valid_bits; - u8 reserved[15]; - __le16 tc_bw_credits[8]; /* FW writesback QS handles here */ - - /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved1[28]; -}; - -I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_vsi_ets_sla_bw_data); - -/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407) - * responds with i40e_aqc_qs_handles_resp - */ -struct i40e_aqc_configure_vsi_tc_bw_data { - u8 tc_valid_bits; - u8 reserved[3]; - u8 tc_bw_credits[8]; - u8 reserved1[4]; - __le16 qs_handles[8]; -}; - -I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_vsi_tc_bw_data); - -/* Query vsi bw configuration (indirect 0x0408) */ -struct i40e_aqc_query_vsi_bw_config_resp { - u8 tc_valid_bits; - u8 tc_suspended_bits; - u8 reserved[14]; - __le16 qs_handles[8]; - u8 reserved1[4]; - __le16 port_bw_limit; - u8 reserved2[2]; - u8 max_bw; /* 0-3, limit = 2^max */ - u8 reserved3[23]; -}; - -I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_vsi_bw_config_resp); - -/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */ -struct i40e_aqc_query_vsi_ets_sla_config_resp { - u8 tc_valid_bits; - u8 reserved[3]; - u8 share_credits[8]; - __le16 credits[8]; - - /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; -}; - -I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_vsi_ets_sla_config_resp); - -/* Configure Switching Component Bandwidth Limit (direct 0x0410) */ -struct i40e_aqc_configure_switching_comp_bw_limit { - __le16 seid; - u8 reserved[2]; - __le16 credit; - u8 reserved1[2]; - u8 max_bw; /* 0-3, limit = 2^max */ - u8 reserved2[7]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); - -/* Enable Physical Port ETS (indirect 0x0413) - * Modify Physical Port ETS (indirect 0x0414) - * Disable Physical Port ETS (indirect 0x0415) - */ -struct i40e_aqc_configure_switching_comp_ets_data { - u8 reserved[4]; - u8 tc_valid_bits; - u8 seepage; -#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1 - u8 tc_strict_priority_flags; - u8 reserved1[17]; - u8 tc_bw_share_credits[8]; - u8 reserved2[96]; -}; - -I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_configure_switching_comp_ets_data); - -/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ -struct i40e_aqc_configure_switching_comp_ets_bw_limit_data { - u8 tc_valid_bits; - u8 reserved[15]; - __le16 tc_bw_credit[8]; - - /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved1[28]; -}; - -I40E_CHECK_STRUCT_LEN(0x40, - i40e_aqc_configure_switching_comp_ets_bw_limit_data); - -/* Configure Switching Component Bandwidth Allocation per Tc - * (indirect 0x0417) - */ -struct i40e_aqc_configure_switching_comp_bw_config_data { - u8 tc_valid_bits; - u8 reserved[2]; - u8 absolute_credits; /* bool */ - u8 tc_bw_share_credits[8]; - u8 reserved1[20]; -}; - -I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_switching_comp_bw_config_data); - -/* Query Switching Component Configuration (indirect 0x0418) */ -struct i40e_aqc_query_switching_comp_ets_config_resp { - u8 tc_valid_bits; - u8 reserved[35]; - __le16 port_bw_limit; - u8 reserved1[2]; - u8 tc_bw_max; /* 0-3, limit = 2^max */ - u8 reserved2[23]; -}; - -I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_switching_comp_ets_config_resp); - -/* Query PhysicalPort ETS Configuration (indirect 0x0419) */ -struct i40e_aqc_query_port_ets_config_resp { - u8 reserved[4]; - u8 tc_valid_bits; - u8 reserved1; - u8 tc_strict_priority_bits; - u8 reserved2; - u8 tc_bw_share_credits[8]; - __le16 tc_bw_limits[8]; - - /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved3[32]; -}; - -I40E_CHECK_STRUCT_LEN(0x44, i40e_aqc_query_port_ets_config_resp); - -/* Query Switching Component Bandwidth Allocation per Traffic Type - * (indirect 0x041A) - */ -struct i40e_aqc_query_switching_comp_bw_config_resp { - u8 tc_valid_bits; - u8 reserved[2]; - u8 absolute_credits_enable; /* bool */ - u8 tc_bw_share_credits[8]; - __le16 tc_bw_limits[8]; - - /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; -}; - -I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_switching_comp_bw_config_resp); - -/* Suspend/resume port TX traffic - * (direct 0x041B and 0x041C) uses the generic SEID struct - */ - -/* Configure partition BW - * (indirect 0x041D) - */ -struct i40e_aqc_configure_partition_bw_data { - __le16 pf_valid_bits; - u8 min_bw[16]; /* guaranteed bandwidth */ - u8 max_bw[16]; /* bandwidth limit */ -}; - -I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data); - -/* Get and set the active HMC resource profile and status. - * (direct 0x0500) and (direct 0x0501) - */ -struct i40e_aq_get_set_hmc_resource_profile { - u8 pm_profile; - u8 pe_vf_enabled; - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); - -enum i40e_aq_hmc_profile { - /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ - I40E_HMC_PROFILE_DEFAULT = 1, - I40E_HMC_PROFILE_FAVOR_VF = 2, - I40E_HMC_PROFILE_EQUAL = 3, -}; - -/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ - -/* set in param0 for get phy abilities to report qualified modules */ -#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001 -#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002 - -enum i40e_aq_phy_type { - I40E_PHY_TYPE_SGMII = 0x0, - I40E_PHY_TYPE_1000BASE_KX = 0x1, - I40E_PHY_TYPE_10GBASE_KX4 = 0x2, - I40E_PHY_TYPE_10GBASE_KR = 0x3, - I40E_PHY_TYPE_40GBASE_KR4 = 0x4, - I40E_PHY_TYPE_XAUI = 0x5, - I40E_PHY_TYPE_XFI = 0x6, - I40E_PHY_TYPE_SFI = 0x7, - I40E_PHY_TYPE_XLAUI = 0x8, - I40E_PHY_TYPE_XLPPI = 0x9, - I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA, - I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB, - I40E_PHY_TYPE_10GBASE_AOC = 0xC, - I40E_PHY_TYPE_40GBASE_AOC = 0xD, - I40E_PHY_TYPE_UNRECOGNIZED = 0xE, - I40E_PHY_TYPE_UNSUPPORTED = 0xF, - I40E_PHY_TYPE_100BASE_TX = 0x11, - I40E_PHY_TYPE_1000BASE_T = 0x12, - I40E_PHY_TYPE_10GBASE_T = 0x13, - I40E_PHY_TYPE_10GBASE_SR = 0x14, - I40E_PHY_TYPE_10GBASE_LR = 0x15, - I40E_PHY_TYPE_10GBASE_SFPP_CU = 0x16, - I40E_PHY_TYPE_10GBASE_CR1 = 0x17, - I40E_PHY_TYPE_40GBASE_CR4 = 0x18, - I40E_PHY_TYPE_40GBASE_SR4 = 0x19, - I40E_PHY_TYPE_40GBASE_LR4 = 0x1A, - I40E_PHY_TYPE_1000BASE_SX = 0x1B, - I40E_PHY_TYPE_1000BASE_LX = 0x1C, - I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D, - I40E_PHY_TYPE_20GBASE_KR2 = 0x1E, - I40E_PHY_TYPE_25GBASE_KR = 0x1F, - I40E_PHY_TYPE_25GBASE_CR = 0x20, - I40E_PHY_TYPE_25GBASE_SR = 0x21, - I40E_PHY_TYPE_25GBASE_LR = 0x22, - I40E_PHY_TYPE_25GBASE_AOC = 0x23, - I40E_PHY_TYPE_25GBASE_ACC = 0x24, - I40E_PHY_TYPE_MAX, - I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD, - I40E_PHY_TYPE_EMPTY = 0xFE, - I40E_PHY_TYPE_DEFAULT = 0xFF, -}; - -#define I40E_LINK_SPEED_100MB_SHIFT 0x1 -#define I40E_LINK_SPEED_1000MB_SHIFT 0x2 -#define I40E_LINK_SPEED_10GB_SHIFT 0x3 -#define I40E_LINK_SPEED_40GB_SHIFT 0x4 -#define I40E_LINK_SPEED_20GB_SHIFT 0x5 -#define I40E_LINK_SPEED_25GB_SHIFT 0x6 - -enum i40e_aq_link_speed { - I40E_LINK_SPEED_UNKNOWN = 0, - I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT), - I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT), - I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT), - I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT), - I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT), - I40E_LINK_SPEED_25GB = BIT(I40E_LINK_SPEED_25GB_SHIFT), -}; - -struct i40e_aqc_module_desc { - u8 oui[3]; - u8 reserved1; - u8 part_number[16]; - u8 revision[4]; - u8 reserved2[8]; -}; - -I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_module_desc); - -struct i40e_aq_get_phy_abilities_resp { - __le32 phy_type; /* bitmap using the above enum for offsets */ - u8 link_speed; /* bitmap using the above enum bit patterns */ - u8 abilities; -#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 -#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 -#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 -#define I40E_AQ_PHY_LINK_ENABLED 0x08 -#define I40E_AQ_PHY_AN_ENABLED 0x10 -#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 -#define I40E_AQ_PHY_FEC_ABILITY_KR 0x40 -#define I40E_AQ_PHY_FEC_ABILITY_RS 0x80 - __le16 eee_capability; -#define I40E_AQ_EEE_100BASE_TX 0x0002 -#define I40E_AQ_EEE_1000BASE_T 0x0004 -#define I40E_AQ_EEE_10GBASE_T 0x0008 -#define I40E_AQ_EEE_1000BASE_KX 0x0010 -#define I40E_AQ_EEE_10GBASE_KX4 0x0020 -#define I40E_AQ_EEE_10GBASE_KR 0x0040 - __le32 eeer_val; - u8 d3_lpan; -#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 - u8 phy_type_ext; -#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01 -#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02 -#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04 -#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08 -#define I40E_AQ_PHY_TYPE_EXT_25G_AOC 0x10 -#define I40E_AQ_PHY_TYPE_EXT_25G_ACC 0x20 - u8 fec_cfg_curr_mod_ext_info; -#define I40E_AQ_ENABLE_FEC_KR 0x01 -#define I40E_AQ_ENABLE_FEC_RS 0x02 -#define I40E_AQ_REQUEST_FEC_KR 0x04 -#define I40E_AQ_REQUEST_FEC_RS 0x08 -#define I40E_AQ_ENABLE_FEC_AUTO 0x10 -#define I40E_AQ_FEC -#define I40E_AQ_MODULE_TYPE_EXT_MASK 0xE0 -#define I40E_AQ_MODULE_TYPE_EXT_SHIFT 5 - - u8 ext_comp_code; - u8 phy_id[4]; - u8 module_type[3]; - u8 qualified_module_count; -#define I40E_AQ_PHY_MAX_QMS 16 - struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; -}; - -I40E_CHECK_STRUCT_LEN(0x218, i40e_aq_get_phy_abilities_resp); - -/* Set PHY Config (direct 0x0601) */ -struct i40e_aq_set_phy_config { /* same bits as above in all */ - __le32 phy_type; - u8 link_speed; - u8 abilities; -/* bits 0-2 use the values from get_phy_abilities_resp */ -#define I40E_AQ_PHY_ENABLE_LINK 0x08 -#define I40E_AQ_PHY_ENABLE_AN 0x10 -#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20 - __le16 eee_capability; - __le32 eeer; - u8 low_power_ctrl; - u8 phy_type_ext; -#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01 -#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02 -#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04 -#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08 - u8 fec_config; -#define I40E_AQ_SET_FEC_ABILITY_KR BIT(0) -#define I40E_AQ_SET_FEC_ABILITY_RS BIT(1) -#define I40E_AQ_SET_FEC_REQUEST_KR BIT(2) -#define I40E_AQ_SET_FEC_REQUEST_RS BIT(3) -#define I40E_AQ_SET_FEC_AUTO BIT(4) -#define I40E_AQ_PHY_FEC_CONFIG_SHIFT 0x0 -#define I40E_AQ_PHY_FEC_CONFIG_MASK (0x1F << I40E_AQ_PHY_FEC_CONFIG_SHIFT) - u8 reserved; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config); - -/* Set MAC Config command data structure (direct 0x0603) */ -struct i40e_aq_set_mac_config { - __le16 max_frame_size; - u8 params; -#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 -#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 -#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 -#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 -#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 -#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 - u8 tx_timer_priority; /* bitmap */ - __le16 tx_timer_value; - __le16 fc_refresh_threshold; - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config); - -/* Restart Auto-Negotiation (direct 0x605) */ -struct i40e_aqc_set_link_restart_an { - u8 command; -#define I40E_AQ_PHY_RESTART_AN 0x02 -#define I40E_AQ_PHY_LINK_ENABLE 0x04 - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an); - -/* Get Link Status cmd & response data structure (direct 0x0607) */ -struct i40e_aqc_get_link_status { - __le16 command_flags; /* only field set on command */ -#define I40E_AQ_LSE_MASK 0x3 -#define I40E_AQ_LSE_NOP 0x0 -#define I40E_AQ_LSE_DISABLE 0x2 -#define I40E_AQ_LSE_ENABLE 0x3 -/* only response uses this flag */ -#define I40E_AQ_LSE_IS_ENABLED 0x1 - u8 phy_type; /* i40e_aq_phy_type */ - u8 link_speed; /* i40e_aq_link_speed */ - u8 link_info; -#define I40E_AQ_LINK_UP 0x01 /* obsolete */ -#define I40E_AQ_LINK_UP_FUNCTION 0x01 -#define I40E_AQ_LINK_FAULT 0x02 -#define I40E_AQ_LINK_FAULT_TX 0x04 -#define I40E_AQ_LINK_FAULT_RX 0x08 -#define I40E_AQ_LINK_FAULT_REMOTE 0x10 -#define I40E_AQ_LINK_UP_PORT 0x20 -#define I40E_AQ_MEDIA_AVAILABLE 0x40 -#define I40E_AQ_SIGNAL_DETECT 0x80 - u8 an_info; -#define I40E_AQ_AN_COMPLETED 0x01 -#define I40E_AQ_LP_AN_ABILITY 0x02 -#define I40E_AQ_PD_FAULT 0x04 -#define I40E_AQ_FEC_EN 0x08 -#define I40E_AQ_PHY_LOW_POWER 0x10 -#define I40E_AQ_LINK_PAUSE_TX 0x20 -#define I40E_AQ_LINK_PAUSE_RX 0x40 -#define I40E_AQ_QUALIFIED_MODULE 0x80 - u8 ext_info; -#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01 -#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02 -#define I40E_AQ_LINK_TX_SHIFT 0x02 -#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT) -#define I40E_AQ_LINK_TX_ACTIVE 0x00 -#define I40E_AQ_LINK_TX_DRAINED 0x01 -#define I40E_AQ_LINK_TX_FLUSHED 0x03 -#define I40E_AQ_LINK_FORCED_40G 0x10 -/* 25G Error Codes */ -#define I40E_AQ_25G_NO_ERR 0X00 -#define I40E_AQ_25G_NOT_PRESENT 0X01 -#define I40E_AQ_25G_NVM_CRC_ERR 0X02 -#define I40E_AQ_25G_SBUS_UCODE_ERR 0X03 -#define I40E_AQ_25G_SERDES_UCODE_ERR 0X04 -#define I40E_AQ_25G_NIMB_UCODE_ERR 0X05 - u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ -/* Since firmware API 1.7 loopback field keeps power class info as well */ -#define I40E_AQ_LOOPBACK_MASK 0x07 -#define I40E_AQ_PWR_CLASS_SHIFT_LB 6 -#define I40E_AQ_PWR_CLASS_MASK_LB (0x03 << I40E_AQ_PWR_CLASS_SHIFT_LB) - __le16 max_frame_size; - u8 config; -#define I40E_AQ_CONFIG_FEC_KR_ENA 0x01 -#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02 -#define I40E_AQ_CONFIG_CRC_ENA 0x04 -#define I40E_AQ_CONFIG_PACING_MASK 0x78 - union { - struct { - u8 power_desc; -#define I40E_AQ_LINK_POWER_CLASS_1 0x00 -#define I40E_AQ_LINK_POWER_CLASS_2 0x01 -#define I40E_AQ_LINK_POWER_CLASS_3 0x02 -#define I40E_AQ_LINK_POWER_CLASS_4 0x03 -#define I40E_AQ_PWR_CLASS_MASK 0x03 - u8 reserved[4]; - }; - struct { - u8 link_type[4]; - u8 link_type_ext; - }; - }; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status); - -/* Set event mask command (direct 0x613) */ -struct i40e_aqc_set_phy_int_mask { - u8 reserved[8]; - __le16 event_mask; -#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002 -#define I40E_AQ_EVENT_MEDIA_NA 0x0004 -#define I40E_AQ_EVENT_LINK_FAULT 0x0008 -#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010 -#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020 -#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040 -#define I40E_AQ_EVENT_AN_COMPLETED 0x0080 -#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100 -#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200 - u8 reserved1[6]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask); - -/* Get Local AN advt register (direct 0x0614) - * Set Local AN advt register (direct 0x0615) - * Get Link Partner AN advt register (direct 0x0616) - */ -struct i40e_aqc_an_advt_reg { - __le32 local_an_reg0; - __le16 local_an_reg1; - u8 reserved[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg); - -/* Set Loopback mode (0x0618) */ -struct i40e_aqc_set_lb_mode { - __le16 lb_mode; -#define I40E_AQ_LB_PHY_LOCAL 0x01 -#define I40E_AQ_LB_PHY_REMOTE 0x02 -#define I40E_AQ_LB_MAC_LOCAL 0x04 - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); - -/* Set PHY Debug command (0x0622) */ -struct i40e_aqc_set_phy_debug { - u8 command_flags; -#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \ - I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT) -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02 -#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10 - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug); - -enum i40e_aq_phy_reg_type { - I40E_AQC_PHY_REG_INTERNAL = 0x1, - I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2, - I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 -}; - -/* Run PHY Activity (0x0626) */ -struct i40e_aqc_run_phy_activity { - __le16 activity_id; - u8 flags; - u8 reserved1; - __le32 control; - __le32 data; - u8 reserved2[4]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity); - -/* Set PHY Register command (0x0628) */ -/* Get PHY Register command (0x0629) */ -struct i40e_aqc_phy_register_access { - u8 phy_interface; -#define I40E_AQ_PHY_REG_ACCESS_INTERNAL 0 -#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL 1 -#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2 - u8 dev_address; - u8 reserved1[2]; - __le32 reg_address; - __le32 reg_value; - u8 reserved2[4]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access); - -/* NVM Read command (indirect 0x0701) - * NVM Erase commands (direct 0x0702) - * NVM Update commands (indirect 0x0703) - */ -struct i40e_aqc_nvm_update { - u8 command_flags; -#define I40E_AQ_NVM_LAST_CMD 0x01 -#define I40E_AQ_NVM_REARRANGE_TO_FLAT 0x20 -#define I40E_AQ_NVM_REARRANGE_TO_STRUCT 0x40 -#define I40E_AQ_NVM_FLASH_ONLY 0x80 -#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1 -#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03 -#define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03 -#define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01 - u8 module_pointer; - __le16 length; - __le32 offset; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); - -/* NVM Config Read (indirect 0x0704) */ -struct i40e_aqc_nvm_config_read { - __le16 cmd_flags; -#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1 -#define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0 -#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1 - __le16 element_count; - __le16 element_id; /* Feature/field ID */ - __le16 element_id_msw; /* MSWord of field ID */ - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read); - -/* NVM Config Write (indirect 0x0705) */ -struct i40e_aqc_nvm_config_write { - __le16 cmd_flags; - __le16 element_count; - u8 reserved[4]; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); - -/* Used for 0x0704 as well as for 0x0705 commands */ -#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1 -#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \ - BIT(I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT) -#define I40E_AQ_ANVM_FEATURE 0 -#define I40E_AQ_ANVM_IMMEDIATE_FIELD BIT(FEATURE_OR_IMMEDIATE_SHIFT) -struct i40e_aqc_nvm_config_data_feature { - __le16 feature_id; -#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01 -#define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP 0x08 -#define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR 0x10 - __le16 feature_options; - __le16 feature_selection; -}; - -I40E_CHECK_STRUCT_LEN(0x6, i40e_aqc_nvm_config_data_feature); - -struct i40e_aqc_nvm_config_data_immediate_field { - __le32 field_id; - __le32 field_value; - __le16 field_options; - __le16 reserved; -}; - -I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field); - -/* OEM Post Update (indirect 0x0720) - * no command data struct used - */ - struct i40e_aqc_nvm_oem_post_update { -#define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA 0x01 - u8 sel_data; - u8 reserved[7]; -}; - -I40E_CHECK_STRUCT_LEN(0x8, i40e_aqc_nvm_oem_post_update); - -struct i40e_aqc_nvm_oem_post_update_buffer { - u8 str_len; - u8 dev_addr; - __le16 eeprom_addr; - u8 data[36]; -}; - -I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer); - -/* Thermal Sensor (indirect 0x0721) - * read or set thermal sensor configs and values - * takes a sensor and command specific data buffer, not detailed here - */ -struct i40e_aqc_thermal_sensor { - u8 sensor_action; -#define I40E_AQ_THERMAL_SENSOR_READ_CONFIG 0 -#define I40E_AQ_THERMAL_SENSOR_SET_CONFIG 1 -#define I40E_AQ_THERMAL_SENSOR_READ_TEMP 2 - u8 reserved[7]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_thermal_sensor); - -/* Send to PF command (indirect 0x0801) id is only used by PF - * Send to VF command (indirect 0x0802) id is only used by PF - * Send to Peer PF command (indirect 0x0803) - */ -struct i40e_aqc_pf_vf_message { - __le32 id; - u8 reserved[4]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message); - -/* Alternate structure */ - -/* Direct write (direct 0x0900) - * Direct read (direct 0x0902) - */ -struct i40e_aqc_alternate_write { - __le32 address0; - __le32 data0; - __le32 address1; - __le32 data1; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write); - -/* Indirect write (indirect 0x0901) - * Indirect read (indirect 0x0903) - */ - -struct i40e_aqc_alternate_ind_write { - __le32 address; - __le32 length; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write); - -/* Done alternate write (direct 0x0904) - * uses i40e_aq_desc - */ -struct i40e_aqc_alternate_write_done { - __le16 cmd_flags; -#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1 -#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0 -#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1 -#define I40E_AQ_ALTERNATE_RESET_NEEDED 2 - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done); - -/* Set OEM mode (direct 0x0905) */ -struct i40e_aqc_alternate_set_mode { - __le32 mode; -#define I40E_AQ_ALTERNATE_MODE_NONE 0 -#define I40E_AQ_ALTERNATE_MODE_OEM 1 - u8 reserved[12]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode); - -/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */ - -/* async events 0x10xx */ - -/* Lan Queue Overflow Event (direct, 0x1001) */ -struct i40e_aqc_lan_overflow { - __le32 prtdcb_rupto; - __le32 otx_ctl; - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow); - -/* Get LLDP MIB (indirect 0x0A00) */ -struct i40e_aqc_lldp_get_mib { - u8 type; - u8 reserved1; -#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3 -#define I40E_AQ_LLDP_MIB_LOCAL 0x0 -#define I40E_AQ_LLDP_MIB_REMOTE 0x1 -#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2 -#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC -#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2 -#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0 -#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1 -#define I40E_AQ_LLDP_TX_SHIFT 0x4 -#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT) -/* TX pause flags use I40E_AQ_LINK_TX_* above */ - __le16 local_len; - __le16 remote_len; - u8 reserved2[2]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); - -/* Configure LLDP MIB Change Event (direct 0x0A01) - * also used for the event (with type in the command field) - */ -struct i40e_aqc_lldp_update_mib { - u8 command; -#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 -#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1 - u8 reserved[7]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib); - -/* Add LLDP TLV (indirect 0x0A02) - * Delete LLDP TLV (indirect 0x0A04) - */ -struct i40e_aqc_lldp_add_tlv { - u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ - u8 reserved1[1]; - __le16 len; - u8 reserved2[4]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv); - -/* Update LLDP TLV (indirect 0x0A03) */ -struct i40e_aqc_lldp_update_tlv { - u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ - u8 reserved; - __le16 old_len; - __le16 new_offset; - __le16 new_len; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv); - -/* Stop LLDP (direct 0x0A05) */ -struct i40e_aqc_lldp_stop { - u8 command; -#define I40E_AQ_LLDP_AGENT_STOP 0x0 -#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop); - -/* Start LLDP (direct 0x0A06) */ - -struct i40e_aqc_lldp_start { - u8 command; -#define I40E_AQ_LLDP_AGENT_START 0x1 - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); - -/* Set DCB (direct 0x0303) */ -struct i40e_aqc_set_dcb_parameters { - u8 command; -#define I40E_AQ_DCB_SET_AGENT 0x1 -#define I40E_DCB_VALID 0x1 - u8 valid_flags; - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_dcb_parameters); - -/* Apply MIB changes (0x0A07) - * uses the generic struc as it contains no data - */ - -/* Add Udp Tunnel command and completion (direct 0x0B00) */ -struct i40e_aqc_add_udp_tunnel { - __le16 udp_port; - u8 reserved0[3]; - u8 protocol_type; -#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00 -#define I40E_AQC_TUNNEL_TYPE_NGE 0x01 -#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10 -#define I40E_AQC_TUNNEL_TYPE_VXLAN_GPE 0x11 - u8 reserved1[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel); - -struct i40e_aqc_add_udp_tunnel_completion { - __le16 udp_port; - u8 filter_entry_index; - u8 multiple_pfs; -#define I40E_AQC_SINGLE_PF 0x0 -#define I40E_AQC_MULTIPLE_PFS 0x1 - u8 total_filters; - u8 reserved[11]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion); - -/* remove UDP Tunnel command (0x0B01) */ -struct i40e_aqc_remove_udp_tunnel { - u8 reserved[2]; - u8 index; /* 0 to 15 */ - u8 reserved2[13]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel); - -struct i40e_aqc_del_udp_tunnel_completion { - __le16 udp_port; - u8 index; /* 0 to 15 */ - u8 multiple_pfs; - u8 total_filters_used; - u8 reserved1[11]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); - -struct i40e_aqc_get_set_rss_key { -#define I40E_AQC_SET_RSS_KEY_VSI_VALID BIT(15) -#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0 -#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \ - I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) - __le16 vsi_id; - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key); - -struct i40e_aqc_get_set_rss_key_data { - u8 standard_rss_key[0x28]; - u8 extended_hash_key[0xc]; -}; - -I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data); - -struct i40e_aqc_get_set_rss_lut { -#define I40E_AQC_SET_RSS_LUT_VSI_VALID BIT(15) -#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0 -#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \ - I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) - __le16 vsi_id; -#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0 -#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK \ - BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) - -#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0 -#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1 - __le16 flags; - u8 reserved[4]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut); - -/* tunnel key structure 0x0B10 */ - -struct i40e_aqc_tunnel_key_structure_A0 { - __le16 key1_off; - __le16 key1_len; - __le16 key2_off; - __le16 key2_len; - __le16 flags; -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 -/* response flags */ -#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 -#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 - u8 resreved[6]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure_A0); - -struct i40e_aqc_tunnel_key_structure { - u8 key1_off; - u8 key2_off; - u8 key1_len; /* 0 to 15 */ - u8 key2_len; /* 0 to 15 */ - u8 flags; -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 -/* response flags */ -#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 -#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 - u8 network_key_index; -#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0 -#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1 -#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2 -#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3 - u8 reserved[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure); - -/* OEM mode commands (direct 0xFE0x) */ -struct i40e_aqc_oem_param_change { - __le32 param_type; -#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0 -#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 -#define I40E_AQ_OEM_PARAM_MAC 2 - __le32 param_value1; - __le16 param_value2; - u8 reserved[6]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change); - -struct i40e_aqc_oem_state_change { - __le32 state; -#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0 -#define I40E_AQ_OEM_STATE_LINK_UP 0x1 - u8 reserved[12]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); - -/* Initialize OCSD (0xFE02, direct) */ -struct i40e_aqc_opc_oem_ocsd_initialize { - u8 type_status; - u8 reserved1[3]; - __le32 ocsd_memory_block_addr_high; - __le32 ocsd_memory_block_addr_low; - __le32 requested_update_interval; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocsd_initialize); - -/* Initialize OCBB (0xFE03, direct) */ -struct i40e_aqc_opc_oem_ocbb_initialize { - u8 type_status; - u8 reserved1[3]; - __le32 ocbb_memory_block_addr_high; - __le32 ocbb_memory_block_addr_low; - u8 reserved2[4]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize); - -/* debug commands */ - -/* get device id (0xFF00) uses the generic structure */ - -/* set test more (0xFF01, internal) */ - -struct i40e_acq_set_test_mode { - u8 mode; -#define I40E_AQ_TEST_PARTIAL 0 -#define I40E_AQ_TEST_FULL 1 -#define I40E_AQ_TEST_NVM 2 - u8 reserved[3]; - u8 command; -#define I40E_AQ_TEST_OPEN 0 -#define I40E_AQ_TEST_CLOSE 1 -#define I40E_AQ_TEST_INC 2 - u8 reserved2[3]; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode); - -/* Debug Read Register command (0xFF03) - * Debug Write Register command (0xFF04) - */ -struct i40e_aqc_debug_reg_read_write { - __le32 reserved; - __le32 address; - __le32 value_high; - __le32 value_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write); - -/* Scatter/gather Reg Read (indirect 0xFF05) - * Scatter/gather Reg Write (indirect 0xFF06) - */ - -/* i40e_aq_desc is used for the command */ -struct i40e_aqc_debug_reg_sg_element_data { - __le32 address; - __le32 value; -}; - -/* Debug Modify register (direct 0xFF07) */ -struct i40e_aqc_debug_modify_reg { - __le32 address; - __le32 value; - __le32 clear_mask; - __le32 set_mask; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg); - -/* dump internal data (0xFF08, indirect) */ - -#define I40E_AQ_CLUSTER_ID_AUX 0 -#define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1 -#define I40E_AQ_CLUSTER_ID_TXSCHED 2 -#define I40E_AQ_CLUSTER_ID_HMC 3 -#define I40E_AQ_CLUSTER_ID_MAC0 4 -#define I40E_AQ_CLUSTER_ID_MAC1 5 -#define I40E_AQ_CLUSTER_ID_MAC2 6 -#define I40E_AQ_CLUSTER_ID_MAC3 7 -#define I40E_AQ_CLUSTER_ID_DCB 8 -#define I40E_AQ_CLUSTER_ID_EMP_MEM 9 -#define I40E_AQ_CLUSTER_ID_PKT_BUF 10 -#define I40E_AQ_CLUSTER_ID_ALTRAM 11 - -struct i40e_aqc_debug_dump_internals { - u8 cluster_id; - u8 table_id; - __le16 data_size; - __le32 idx; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals); - -struct i40e_aqc_debug_modify_internals { - u8 cluster_id; - u8 cluster_specific_params[7]; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals); - -#endif /* _I40E_ADMINQ_CMD_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h deleted file mode 100644 index cb8689222c8b..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h +++ /dev/null @@ -1,35 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_ALLOC_H_ -#define _I40E_ALLOC_H_ - -struct i40e_hw; - -/* Memory allocation types */ -enum i40e_memory_type { - i40e_mem_arq_buf = 0, /* ARQ indirect command buffer */ - i40e_mem_asq_buf = 1, - i40e_mem_atq_buf = 2, /* ATQ indirect command buffer */ - i40e_mem_arq_ring = 3, /* ARQ descriptor ring */ - i40e_mem_atq_ring = 4, /* ATQ descriptor ring */ - i40e_mem_pd = 5, /* Page Descriptor */ - i40e_mem_bp = 6, /* Backing Page - 4KB */ - i40e_mem_bp_jumbo = 7, /* Backing Page - > 4KB */ - i40e_mem_reserved -}; - -/* prototype for functions used for dynamic memory allocation */ -i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw, - struct i40e_dma_mem *mem, - enum i40e_memory_type type, - u64 size, u32 alignment); -i40e_status i40e_free_dma_mem(struct i40e_hw *hw, - struct i40e_dma_mem *mem); -i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw, - struct i40e_virt_mem *mem, - u32 size); -i40e_status i40e_free_virt_mem(struct i40e_hw *hw, - struct i40e_virt_mem *mem); - -#endif /* _I40E_ALLOC_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c deleted file mode 100644 index eea280ba411e..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ /dev/null @@ -1,1320 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#include "i40e_type.h" -#include "i40e_adminq.h" -#include "i40e_prototype.h" -#include - -/** - * i40e_set_mac_type - Sets MAC type - * @hw: pointer to the HW structure - * - * This function sets the mac type of the adapter based on the - * vendor ID and device ID stored in the hw structure. - **/ -i40e_status i40e_set_mac_type(struct i40e_hw *hw) -{ - i40e_status status = 0; - - if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { - switch (hw->device_id) { - case I40E_DEV_ID_SFP_XL710: - case I40E_DEV_ID_QEMU: - case I40E_DEV_ID_KX_B: - case I40E_DEV_ID_KX_C: - case I40E_DEV_ID_QSFP_A: - case I40E_DEV_ID_QSFP_B: - case I40E_DEV_ID_QSFP_C: - case I40E_DEV_ID_10G_BASE_T: - case I40E_DEV_ID_10G_BASE_T4: - case I40E_DEV_ID_20G_KR2: - case I40E_DEV_ID_20G_KR2_A: - case I40E_DEV_ID_25G_B: - case I40E_DEV_ID_25G_SFP28: - hw->mac.type = I40E_MAC_XL710; - break; - case I40E_DEV_ID_SFP_X722: - case I40E_DEV_ID_1G_BASE_T_X722: - case I40E_DEV_ID_10G_BASE_T_X722: - case I40E_DEV_ID_SFP_I_X722: - hw->mac.type = I40E_MAC_X722; - break; - case I40E_DEV_ID_X722_VF: - hw->mac.type = I40E_MAC_X722_VF; - break; - case I40E_DEV_ID_VF: - case I40E_DEV_ID_VF_HV: - case I40E_DEV_ID_ADAPTIVE_VF: - hw->mac.type = I40E_MAC_VF; - break; - default: - hw->mac.type = I40E_MAC_GENERIC; - break; - } - } else { - status = I40E_ERR_DEVICE_NOT_SUPPORTED; - } - - hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n", - hw->mac.type, status); - return status; -} - -/** - * i40evf_aq_str - convert AQ err code to a string - * @hw: pointer to the HW structure - * @aq_err: the AQ error code to convert - **/ -const char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) -{ - switch (aq_err) { - case I40E_AQ_RC_OK: - return "OK"; - case I40E_AQ_RC_EPERM: - return "I40E_AQ_RC_EPERM"; - case I40E_AQ_RC_ENOENT: - return "I40E_AQ_RC_ENOENT"; - case I40E_AQ_RC_ESRCH: - return "I40E_AQ_RC_ESRCH"; - case I40E_AQ_RC_EINTR: - return "I40E_AQ_RC_EINTR"; - case I40E_AQ_RC_EIO: - return "I40E_AQ_RC_EIO"; - case I40E_AQ_RC_ENXIO: - return "I40E_AQ_RC_ENXIO"; - case I40E_AQ_RC_E2BIG: - return "I40E_AQ_RC_E2BIG"; - case I40E_AQ_RC_EAGAIN: - return "I40E_AQ_RC_EAGAIN"; - case I40E_AQ_RC_ENOMEM: - return "I40E_AQ_RC_ENOMEM"; - case I40E_AQ_RC_EACCES: - return "I40E_AQ_RC_EACCES"; - case I40E_AQ_RC_EFAULT: - return "I40E_AQ_RC_EFAULT"; - case I40E_AQ_RC_EBUSY: - return "I40E_AQ_RC_EBUSY"; - case I40E_AQ_RC_EEXIST: - return "I40E_AQ_RC_EEXIST"; - case I40E_AQ_RC_EINVAL: - return "I40E_AQ_RC_EINVAL"; - case I40E_AQ_RC_ENOTTY: - return "I40E_AQ_RC_ENOTTY"; - case I40E_AQ_RC_ENOSPC: - return "I40E_AQ_RC_ENOSPC"; - case I40E_AQ_RC_ENOSYS: - return "I40E_AQ_RC_ENOSYS"; - case I40E_AQ_RC_ERANGE: - return "I40E_AQ_RC_ERANGE"; - case I40E_AQ_RC_EFLUSHED: - return "I40E_AQ_RC_EFLUSHED"; - case I40E_AQ_RC_BAD_ADDR: - return "I40E_AQ_RC_BAD_ADDR"; - case I40E_AQ_RC_EMODE: - return "I40E_AQ_RC_EMODE"; - case I40E_AQ_RC_EFBIG: - return "I40E_AQ_RC_EFBIG"; - } - - snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err); - return hw->err_str; -} - -/** - * i40evf_stat_str - convert status err code to a string - * @hw: pointer to the HW structure - * @stat_err: the status error code to convert - **/ -const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err) -{ - switch (stat_err) { - case 0: - return "OK"; - case I40E_ERR_NVM: - return "I40E_ERR_NVM"; - case I40E_ERR_NVM_CHECKSUM: - return "I40E_ERR_NVM_CHECKSUM"; - case I40E_ERR_PHY: - return "I40E_ERR_PHY"; - case I40E_ERR_CONFIG: - return "I40E_ERR_CONFIG"; - case I40E_ERR_PARAM: - return "I40E_ERR_PARAM"; - case I40E_ERR_MAC_TYPE: - return "I40E_ERR_MAC_TYPE"; - case I40E_ERR_UNKNOWN_PHY: - return "I40E_ERR_UNKNOWN_PHY"; - case I40E_ERR_LINK_SETUP: - return "I40E_ERR_LINK_SETUP"; - case I40E_ERR_ADAPTER_STOPPED: - return "I40E_ERR_ADAPTER_STOPPED"; - case I40E_ERR_INVALID_MAC_ADDR: - return "I40E_ERR_INVALID_MAC_ADDR"; - case I40E_ERR_DEVICE_NOT_SUPPORTED: - return "I40E_ERR_DEVICE_NOT_SUPPORTED"; - case I40E_ERR_MASTER_REQUESTS_PENDING: - return "I40E_ERR_MASTER_REQUESTS_PENDING"; - case I40E_ERR_INVALID_LINK_SETTINGS: - return "I40E_ERR_INVALID_LINK_SETTINGS"; - case I40E_ERR_AUTONEG_NOT_COMPLETE: - return "I40E_ERR_AUTONEG_NOT_COMPLETE"; - case I40E_ERR_RESET_FAILED: - return "I40E_ERR_RESET_FAILED"; - case I40E_ERR_SWFW_SYNC: - return "I40E_ERR_SWFW_SYNC"; - case I40E_ERR_NO_AVAILABLE_VSI: - return "I40E_ERR_NO_AVAILABLE_VSI"; - case I40E_ERR_NO_MEMORY: - return "I40E_ERR_NO_MEMORY"; - case I40E_ERR_BAD_PTR: - return "I40E_ERR_BAD_PTR"; - case I40E_ERR_RING_FULL: - return "I40E_ERR_RING_FULL"; - case I40E_ERR_INVALID_PD_ID: - return "I40E_ERR_INVALID_PD_ID"; - case I40E_ERR_INVALID_QP_ID: - return "I40E_ERR_INVALID_QP_ID"; - case I40E_ERR_INVALID_CQ_ID: - return "I40E_ERR_INVALID_CQ_ID"; - case I40E_ERR_INVALID_CEQ_ID: - return "I40E_ERR_INVALID_CEQ_ID"; - case I40E_ERR_INVALID_AEQ_ID: - return "I40E_ERR_INVALID_AEQ_ID"; - case I40E_ERR_INVALID_SIZE: - return "I40E_ERR_INVALID_SIZE"; - case I40E_ERR_INVALID_ARP_INDEX: - return "I40E_ERR_INVALID_ARP_INDEX"; - case I40E_ERR_INVALID_FPM_FUNC_ID: - return "I40E_ERR_INVALID_FPM_FUNC_ID"; - case I40E_ERR_QP_INVALID_MSG_SIZE: - return "I40E_ERR_QP_INVALID_MSG_SIZE"; - case I40E_ERR_QP_TOOMANY_WRS_POSTED: - return "I40E_ERR_QP_TOOMANY_WRS_POSTED"; - case I40E_ERR_INVALID_FRAG_COUNT: - return "I40E_ERR_INVALID_FRAG_COUNT"; - case I40E_ERR_QUEUE_EMPTY: - return "I40E_ERR_QUEUE_EMPTY"; - case I40E_ERR_INVALID_ALIGNMENT: - return "I40E_ERR_INVALID_ALIGNMENT"; - case I40E_ERR_FLUSHED_QUEUE: - return "I40E_ERR_FLUSHED_QUEUE"; - case I40E_ERR_INVALID_PUSH_PAGE_INDEX: - return "I40E_ERR_INVALID_PUSH_PAGE_INDEX"; - case I40E_ERR_INVALID_IMM_DATA_SIZE: - return "I40E_ERR_INVALID_IMM_DATA_SIZE"; - case I40E_ERR_TIMEOUT: - return "I40E_ERR_TIMEOUT"; - case I40E_ERR_OPCODE_MISMATCH: - return "I40E_ERR_OPCODE_MISMATCH"; - case I40E_ERR_CQP_COMPL_ERROR: - return "I40E_ERR_CQP_COMPL_ERROR"; - case I40E_ERR_INVALID_VF_ID: - return "I40E_ERR_INVALID_VF_ID"; - case I40E_ERR_INVALID_HMCFN_ID: - return "I40E_ERR_INVALID_HMCFN_ID"; - case I40E_ERR_BACKING_PAGE_ERROR: - return "I40E_ERR_BACKING_PAGE_ERROR"; - case I40E_ERR_NO_PBLCHUNKS_AVAILABLE: - return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE"; - case I40E_ERR_INVALID_PBLE_INDEX: - return "I40E_ERR_INVALID_PBLE_INDEX"; - case I40E_ERR_INVALID_SD_INDEX: - return "I40E_ERR_INVALID_SD_INDEX"; - case I40E_ERR_INVALID_PAGE_DESC_INDEX: - return "I40E_ERR_INVALID_PAGE_DESC_INDEX"; - case I40E_ERR_INVALID_SD_TYPE: - return "I40E_ERR_INVALID_SD_TYPE"; - case I40E_ERR_MEMCPY_FAILED: - return "I40E_ERR_MEMCPY_FAILED"; - case I40E_ERR_INVALID_HMC_OBJ_INDEX: - return "I40E_ERR_INVALID_HMC_OBJ_INDEX"; - case I40E_ERR_INVALID_HMC_OBJ_COUNT: - return "I40E_ERR_INVALID_HMC_OBJ_COUNT"; - case I40E_ERR_INVALID_SRQ_ARM_LIMIT: - return "I40E_ERR_INVALID_SRQ_ARM_LIMIT"; - case I40E_ERR_SRQ_ENABLED: - return "I40E_ERR_SRQ_ENABLED"; - case I40E_ERR_ADMIN_QUEUE_ERROR: - return "I40E_ERR_ADMIN_QUEUE_ERROR"; - case I40E_ERR_ADMIN_QUEUE_TIMEOUT: - return "I40E_ERR_ADMIN_QUEUE_TIMEOUT"; - case I40E_ERR_BUF_TOO_SHORT: - return "I40E_ERR_BUF_TOO_SHORT"; - case I40E_ERR_ADMIN_QUEUE_FULL: - return "I40E_ERR_ADMIN_QUEUE_FULL"; - case I40E_ERR_ADMIN_QUEUE_NO_WORK: - return "I40E_ERR_ADMIN_QUEUE_NO_WORK"; - case I40E_ERR_BAD_IWARP_CQE: - return "I40E_ERR_BAD_IWARP_CQE"; - case I40E_ERR_NVM_BLANK_MODE: - return "I40E_ERR_NVM_BLANK_MODE"; - case I40E_ERR_NOT_IMPLEMENTED: - return "I40E_ERR_NOT_IMPLEMENTED"; - case I40E_ERR_PE_DOORBELL_NOT_ENABLED: - return "I40E_ERR_PE_DOORBELL_NOT_ENABLED"; - case I40E_ERR_DIAG_TEST_FAILED: - return "I40E_ERR_DIAG_TEST_FAILED"; - case I40E_ERR_NOT_READY: - return "I40E_ERR_NOT_READY"; - case I40E_NOT_SUPPORTED: - return "I40E_NOT_SUPPORTED"; - case I40E_ERR_FIRMWARE_API_VERSION: - return "I40E_ERR_FIRMWARE_API_VERSION"; - case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR: - return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR"; - } - - snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); - return hw->err_str; -} - -/** - * i40evf_debug_aq - * @hw: debug mask related to admin queue - * @mask: debug mask - * @desc: pointer to admin queue descriptor - * @buffer: pointer to command buffer - * @buf_len: max length of buffer - * - * Dumps debug log about adminq command with descriptor contents. - **/ -void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, - void *buffer, u16 buf_len) -{ - struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; - u8 *buf = (u8 *)buffer; - - if ((!(mask & hw->debug_mask)) || (desc == NULL)) - return; - - i40e_debug(hw, mask, - "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", - le16_to_cpu(aq_desc->opcode), - le16_to_cpu(aq_desc->flags), - le16_to_cpu(aq_desc->datalen), - le16_to_cpu(aq_desc->retval)); - i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", - le32_to_cpu(aq_desc->cookie_high), - le32_to_cpu(aq_desc->cookie_low)); - i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", - le32_to_cpu(aq_desc->params.internal.param0), - le32_to_cpu(aq_desc->params.internal.param1)); - i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", - le32_to_cpu(aq_desc->params.external.addr_high), - le32_to_cpu(aq_desc->params.external.addr_low)); - - if ((buffer != NULL) && (aq_desc->datalen != 0)) { - u16 len = le16_to_cpu(aq_desc->datalen); - - i40e_debug(hw, mask, "AQ CMD Buffer:\n"); - if (buf_len < len) - len = buf_len; - /* write the full 16-byte chunks */ - if (hw->debug_mask & mask) { - char prefix[27]; - - snprintf(prefix, sizeof(prefix), - "i40evf %02x:%02x.%x: \t0x", - hw->bus.bus_id, - hw->bus.device, - hw->bus.func); - - print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, - 16, 1, buf, len, false); - } - } -} - -/** - * i40evf_check_asq_alive - * @hw: pointer to the hw struct - * - * Returns true if Queue is enabled else false. - **/ -bool i40evf_check_asq_alive(struct i40e_hw *hw) -{ - if (hw->aq.asq.len) - return !!(rd32(hw, hw->aq.asq.len) & - I40E_VF_ATQLEN1_ATQENABLE_MASK); - else - return false; -} - -/** - * i40evf_aq_queue_shutdown - * @hw: pointer to the hw struct - * @unloading: is the driver unloading itself - * - * Tell the Firmware that we're shutting down the AdminQ and whether - * or not the driver is unloading as well. - **/ -i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, - bool unloading) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_queue_shutdown *cmd = - (struct i40e_aqc_queue_shutdown *)&desc.params.raw; - i40e_status status; - - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_queue_shutdown); - - if (unloading) - cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING); - status = i40evf_asq_send_command(hw, &desc, NULL, 0, NULL); - - return status; -} - -/** - * i40e_aq_get_set_rss_lut - * @hw: pointer to the hardware structure - * @vsi_id: vsi fw index - * @pf_lut: for PF table set true, for VSI table set false - * @lut: pointer to the lut buffer provided by the caller - * @lut_size: size of the lut buffer - * @set: set true to set the table, false to get the table - * - * Internal function to get or set RSS look up table - **/ -static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw, - u16 vsi_id, bool pf_lut, - u8 *lut, u16 lut_size, - bool set) -{ - i40e_status status; - struct i40e_aq_desc desc; - struct i40e_aqc_get_set_rss_lut *cmd_resp = - (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; - - if (set) - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_set_rss_lut); - else - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_get_rss_lut); - - /* Indirect command */ - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); - - cmd_resp->vsi_id = - cpu_to_le16((u16)((vsi_id << - I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & - I40E_AQC_SET_RSS_LUT_VSI_ID_MASK)); - cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID); - - if (pf_lut) - cmd_resp->flags |= cpu_to_le16((u16) - ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF << - I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & - I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); - else - cmd_resp->flags |= cpu_to_le16((u16) - ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << - I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & - I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); - - status = i40evf_asq_send_command(hw, &desc, lut, lut_size, NULL); - - return status; -} - -/** - * i40evf_aq_get_rss_lut - * @hw: pointer to the hardware structure - * @vsi_id: vsi fw index - * @pf_lut: for PF table set true, for VSI table set false - * @lut: pointer to the lut buffer provided by the caller - * @lut_size: size of the lut buffer - * - * get the RSS lookup table, PF or VSI type - **/ -i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, - bool pf_lut, u8 *lut, u16 lut_size) -{ - return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, - false); -} - -/** - * i40evf_aq_set_rss_lut - * @hw: pointer to the hardware structure - * @vsi_id: vsi fw index - * @pf_lut: for PF table set true, for VSI table set false - * @lut: pointer to the lut buffer provided by the caller - * @lut_size: size of the lut buffer - * - * set the RSS lookup table, PF or VSI type - **/ -i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, - bool pf_lut, u8 *lut, u16 lut_size) -{ - return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); -} - -/** - * i40e_aq_get_set_rss_key - * @hw: pointer to the hw struct - * @vsi_id: vsi fw index - * @key: pointer to key info struct - * @set: set true to set the key, false to get the key - * - * get the RSS key per VSI - **/ -static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, - u16 vsi_id, - struct i40e_aqc_get_set_rss_key_data *key, - bool set) -{ - i40e_status status; - struct i40e_aq_desc desc; - struct i40e_aqc_get_set_rss_key *cmd_resp = - (struct i40e_aqc_get_set_rss_key *)&desc.params.raw; - u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data); - - if (set) - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_set_rss_key); - else - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_get_rss_key); - - /* Indirect command */ - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); - - cmd_resp->vsi_id = - cpu_to_le16((u16)((vsi_id << - I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & - I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); - cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); - - status = i40evf_asq_send_command(hw, &desc, key, key_size, NULL); - - return status; -} - -/** - * i40evf_aq_get_rss_key - * @hw: pointer to the hw struct - * @vsi_id: vsi fw index - * @key: pointer to key info struct - * - **/ -i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw, - u16 vsi_id, - struct i40e_aqc_get_set_rss_key_data *key) -{ - return i40e_aq_get_set_rss_key(hw, vsi_id, key, false); -} - -/** - * i40evf_aq_set_rss_key - * @hw: pointer to the hw struct - * @vsi_id: vsi fw index - * @key: pointer to key info struct - * - * set the RSS key per VSI - **/ -i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw, - u16 vsi_id, - struct i40e_aqc_get_set_rss_key_data *key) -{ - return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); -} - - -/* The i40evf_ptype_lookup table is used to convert from the 8-bit ptype in the - * hardware to a bit-field that can be used by SW to more easily determine the - * packet type. - * - * Macros are used to shorten the table lines and make this table human - * readable. - * - * We store the PTYPE in the top byte of the bit field - this is just so that - * we can check that the table doesn't have a row missing, as the index into - * the table should be the PTYPE. - * - * Typical work flow: - * - * IF NOT i40evf_ptype_lookup[ptype].known - * THEN - * Packet is unknown - * ELSE IF i40evf_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP - * Use the rest of the fields to look at the tunnels, inner protocols, etc - * ELSE - * Use the enum i40e_rx_l2_ptype to decode the packet type - * ENDIF - */ - -/* macro to make the table lines short */ -#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ - { PTYPE, \ - 1, \ - I40E_RX_PTYPE_OUTER_##OUTER_IP, \ - I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ - I40E_RX_PTYPE_##OUTER_FRAG, \ - I40E_RX_PTYPE_TUNNEL_##T, \ - I40E_RX_PTYPE_TUNNEL_END_##TE, \ - I40E_RX_PTYPE_##TEF, \ - I40E_RX_PTYPE_INNER_PROT_##I, \ - I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } - -#define I40E_PTT_UNUSED_ENTRY(PTYPE) \ - { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 } - -/* shorter macros makes the table fit but are terse */ -#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG -#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG -#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC - -/* Lookup table mapping the HW PTYPE to the bit field for decoding */ -struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = { - /* L2 Packet types */ - I40E_PTT_UNUSED_ENTRY(0), - I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), - I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - I40E_PTT_UNUSED_ENTRY(4), - I40E_PTT_UNUSED_ENTRY(5), - I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - I40E_PTT_UNUSED_ENTRY(8), - I40E_PTT_UNUSED_ENTRY(9), - I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), - I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - - /* Non Tunneled IPv4 */ - I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(25), - I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), - I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), - I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), - - /* IPv4 --> IPv4 */ - I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), - I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), - I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(32), - I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), - I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), - I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), - - /* IPv4 --> IPv6 */ - I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), - I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), - I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(39), - I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), - I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), - I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT */ - I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), - - /* IPv4 --> GRE/NAT --> IPv4 */ - I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), - I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), - I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(47), - I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), - I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), - I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT --> IPv6 */ - I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), - I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), - I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(54), - I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), - I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), - I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT --> MAC */ - I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), - - /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ - I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), - I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), - I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(62), - I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), - I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), - I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ - I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), - I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), - I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(69), - I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), - I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), - I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT --> MAC/VLAN */ - I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), - - /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ - I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), - I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), - I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(77), - I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), - I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), - I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), - - /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ - I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), - I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), - I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(84), - I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), - I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), - I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), - - /* Non Tunneled IPv6 */ - I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3), - I40E_PTT_UNUSED_ENTRY(91), - I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), - I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), - I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), - - /* IPv6 --> IPv4 */ - I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), - I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), - I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(98), - I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), - I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), - I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> IPv6 */ - I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), - I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), - I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(105), - I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), - I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), - I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT */ - I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), - - /* IPv6 --> GRE/NAT -> IPv4 */ - I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), - I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), - I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(113), - I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), - I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), - I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> IPv6 */ - I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), - I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), - I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(120), - I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), - I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), - I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC */ - I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), - - /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ - I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), - I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), - I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(128), - I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), - I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), - I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ - I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), - I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), - I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(135), - I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), - I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), - I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC/VLAN */ - I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), - - /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ - I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), - I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), - I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(143), - I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), - I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), - I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ - I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), - I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), - I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(150), - I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), - I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), - I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), - - /* unused entries */ - I40E_PTT_UNUSED_ENTRY(154), - I40E_PTT_UNUSED_ENTRY(155), - I40E_PTT_UNUSED_ENTRY(156), - I40E_PTT_UNUSED_ENTRY(157), - I40E_PTT_UNUSED_ENTRY(158), - I40E_PTT_UNUSED_ENTRY(159), - - I40E_PTT_UNUSED_ENTRY(160), - I40E_PTT_UNUSED_ENTRY(161), - I40E_PTT_UNUSED_ENTRY(162), - I40E_PTT_UNUSED_ENTRY(163), - I40E_PTT_UNUSED_ENTRY(164), - I40E_PTT_UNUSED_ENTRY(165), - I40E_PTT_UNUSED_ENTRY(166), - I40E_PTT_UNUSED_ENTRY(167), - I40E_PTT_UNUSED_ENTRY(168), - I40E_PTT_UNUSED_ENTRY(169), - - I40E_PTT_UNUSED_ENTRY(170), - I40E_PTT_UNUSED_ENTRY(171), - I40E_PTT_UNUSED_ENTRY(172), - I40E_PTT_UNUSED_ENTRY(173), - I40E_PTT_UNUSED_ENTRY(174), - I40E_PTT_UNUSED_ENTRY(175), - I40E_PTT_UNUSED_ENTRY(176), - I40E_PTT_UNUSED_ENTRY(177), - I40E_PTT_UNUSED_ENTRY(178), - I40E_PTT_UNUSED_ENTRY(179), - - I40E_PTT_UNUSED_ENTRY(180), - I40E_PTT_UNUSED_ENTRY(181), - I40E_PTT_UNUSED_ENTRY(182), - I40E_PTT_UNUSED_ENTRY(183), - I40E_PTT_UNUSED_ENTRY(184), - I40E_PTT_UNUSED_ENTRY(185), - I40E_PTT_UNUSED_ENTRY(186), - I40E_PTT_UNUSED_ENTRY(187), - I40E_PTT_UNUSED_ENTRY(188), - I40E_PTT_UNUSED_ENTRY(189), - - I40E_PTT_UNUSED_ENTRY(190), - I40E_PTT_UNUSED_ENTRY(191), - I40E_PTT_UNUSED_ENTRY(192), - I40E_PTT_UNUSED_ENTRY(193), - I40E_PTT_UNUSED_ENTRY(194), - I40E_PTT_UNUSED_ENTRY(195), - I40E_PTT_UNUSED_ENTRY(196), - I40E_PTT_UNUSED_ENTRY(197), - I40E_PTT_UNUSED_ENTRY(198), - I40E_PTT_UNUSED_ENTRY(199), - - I40E_PTT_UNUSED_ENTRY(200), - I40E_PTT_UNUSED_ENTRY(201), - I40E_PTT_UNUSED_ENTRY(202), - I40E_PTT_UNUSED_ENTRY(203), - I40E_PTT_UNUSED_ENTRY(204), - I40E_PTT_UNUSED_ENTRY(205), - I40E_PTT_UNUSED_ENTRY(206), - I40E_PTT_UNUSED_ENTRY(207), - I40E_PTT_UNUSED_ENTRY(208), - I40E_PTT_UNUSED_ENTRY(209), - - I40E_PTT_UNUSED_ENTRY(210), - I40E_PTT_UNUSED_ENTRY(211), - I40E_PTT_UNUSED_ENTRY(212), - I40E_PTT_UNUSED_ENTRY(213), - I40E_PTT_UNUSED_ENTRY(214), - I40E_PTT_UNUSED_ENTRY(215), - I40E_PTT_UNUSED_ENTRY(216), - I40E_PTT_UNUSED_ENTRY(217), - I40E_PTT_UNUSED_ENTRY(218), - I40E_PTT_UNUSED_ENTRY(219), - - I40E_PTT_UNUSED_ENTRY(220), - I40E_PTT_UNUSED_ENTRY(221), - I40E_PTT_UNUSED_ENTRY(222), - I40E_PTT_UNUSED_ENTRY(223), - I40E_PTT_UNUSED_ENTRY(224), - I40E_PTT_UNUSED_ENTRY(225), - I40E_PTT_UNUSED_ENTRY(226), - I40E_PTT_UNUSED_ENTRY(227), - I40E_PTT_UNUSED_ENTRY(228), - I40E_PTT_UNUSED_ENTRY(229), - - I40E_PTT_UNUSED_ENTRY(230), - I40E_PTT_UNUSED_ENTRY(231), - I40E_PTT_UNUSED_ENTRY(232), - I40E_PTT_UNUSED_ENTRY(233), - I40E_PTT_UNUSED_ENTRY(234), - I40E_PTT_UNUSED_ENTRY(235), - I40E_PTT_UNUSED_ENTRY(236), - I40E_PTT_UNUSED_ENTRY(237), - I40E_PTT_UNUSED_ENTRY(238), - I40E_PTT_UNUSED_ENTRY(239), - - I40E_PTT_UNUSED_ENTRY(240), - I40E_PTT_UNUSED_ENTRY(241), - I40E_PTT_UNUSED_ENTRY(242), - I40E_PTT_UNUSED_ENTRY(243), - I40E_PTT_UNUSED_ENTRY(244), - I40E_PTT_UNUSED_ENTRY(245), - I40E_PTT_UNUSED_ENTRY(246), - I40E_PTT_UNUSED_ENTRY(247), - I40E_PTT_UNUSED_ENTRY(248), - I40E_PTT_UNUSED_ENTRY(249), - - I40E_PTT_UNUSED_ENTRY(250), - I40E_PTT_UNUSED_ENTRY(251), - I40E_PTT_UNUSED_ENTRY(252), - I40E_PTT_UNUSED_ENTRY(253), - I40E_PTT_UNUSED_ENTRY(254), - I40E_PTT_UNUSED_ENTRY(255) -}; - -/** - * i40evf_aq_rx_ctl_read_register - use FW to read from an Rx control register - * @hw: pointer to the hw struct - * @reg_addr: register address - * @reg_val: ptr to register value - * @cmd_details: pointer to command details structure or NULL - * - * Use the firmware to read the Rx control register, - * especially useful if the Rx unit is under heavy pressure - **/ -i40e_status i40evf_aq_rx_ctl_read_register(struct i40e_hw *hw, - u32 reg_addr, u32 *reg_val, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp = - (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; - i40e_status status; - - if (!reg_val) - return I40E_ERR_PARAM; - - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_rx_ctl_reg_read); - - cmd_resp->address = cpu_to_le32(reg_addr); - - status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details); - - if (status == 0) - *reg_val = le32_to_cpu(cmd_resp->value); - - return status; -} - -/** - * i40evf_read_rx_ctl - read from an Rx control register - * @hw: pointer to the hw struct - * @reg_addr: register address - **/ -u32 i40evf_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) -{ - i40e_status status = 0; - bool use_register; - int retry = 5; - u32 val = 0; - - use_register = (((hw->aq.api_maj_ver == 1) && - (hw->aq.api_min_ver < 5)) || - (hw->mac.type == I40E_MAC_X722)); - if (!use_register) { -do_retry: - status = i40evf_aq_rx_ctl_read_register(hw, reg_addr, - &val, NULL); - if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { - usleep_range(1000, 2000); - retry--; - goto do_retry; - } - } - - /* if the AQ access failed, try the old-fashioned way */ - if (status || use_register) - val = rd32(hw, reg_addr); - - return val; -} - -/** - * i40evf_aq_rx_ctl_write_register - * @hw: pointer to the hw struct - * @reg_addr: register address - * @reg_val: register value - * @cmd_details: pointer to command details structure or NULL - * - * Use the firmware to write to an Rx control register, - * especially useful if the Rx unit is under heavy pressure - **/ -i40e_status i40evf_aq_rx_ctl_write_register(struct i40e_hw *hw, - u32 reg_addr, u32 reg_val, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_rx_ctl_reg_read_write *cmd = - (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; - i40e_status status; - - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_rx_ctl_reg_write); - - cmd->address = cpu_to_le32(reg_addr); - cmd->value = cpu_to_le32(reg_val); - - status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details); - - return status; -} - -/** - * i40evf_write_rx_ctl - write to an Rx control register - * @hw: pointer to the hw struct - * @reg_addr: register address - * @reg_val: register value - **/ -void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) -{ - i40e_status status = 0; - bool use_register; - int retry = 5; - - use_register = (((hw->aq.api_maj_ver == 1) && - (hw->aq.api_min_ver < 5)) || - (hw->mac.type == I40E_MAC_X722)); - if (!use_register) { -do_retry: - status = i40evf_aq_rx_ctl_write_register(hw, reg_addr, - reg_val, NULL); - if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { - usleep_range(1000, 2000); - retry--; - goto do_retry; - } - } - - /* if the AQ access failed, try the old-fashioned way */ - if (status || use_register) - wr32(hw, reg_addr, reg_val); -} - -/** - * i40e_aq_send_msg_to_pf - * @hw: pointer to the hardware structure - * @v_opcode: opcodes for VF-PF communication - * @v_retval: return error code - * @msg: pointer to the msg buffer - * @msglen: msg length - * @cmd_details: pointer to command details - * - * Send message to PF driver using admin queue. By default, this message - * is sent asynchronously, i.e. i40evf_asq_send_command() does not wait for - * completion before returning. - **/ -i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw, - enum virtchnl_ops v_opcode, - i40e_status v_retval, - u8 *msg, u16 msglen, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_asq_cmd_details details; - i40e_status status; - - i40evf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf); - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI); - desc.cookie_high = cpu_to_le32(v_opcode); - desc.cookie_low = cpu_to_le32(v_retval); - if (msglen) { - desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF - | I40E_AQ_FLAG_RD)); - if (msglen > I40E_AQ_LARGE_BUF) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); - desc.datalen = cpu_to_le16(msglen); - } - if (!cmd_details) { - memset(&details, 0, sizeof(details)); - details.async = true; - cmd_details = &details; - } - status = i40evf_asq_send_command(hw, &desc, msg, msglen, cmd_details); - return status; -} - -/** - * i40e_vf_parse_hw_config - * @hw: pointer to the hardware structure - * @msg: pointer to the virtual channel VF resource structure - * - * Given a VF resource message from the PF, populate the hw struct - * with appropriate information. - **/ -void i40e_vf_parse_hw_config(struct i40e_hw *hw, - struct virtchnl_vf_resource *msg) -{ - struct virtchnl_vsi_resource *vsi_res; - int i; - - vsi_res = &msg->vsi_res[0]; - - hw->dev_caps.num_vsis = msg->num_vsis; - hw->dev_caps.num_rx_qp = msg->num_queue_pairs; - hw->dev_caps.num_tx_qp = msg->num_queue_pairs; - hw->dev_caps.num_msix_vectors_vf = msg->max_vectors; - hw->dev_caps.dcb = msg->vf_cap_flags & - VIRTCHNL_VF_OFFLOAD_L2; - hw->dev_caps.fcoe = 0; - for (i = 0; i < msg->num_vsis; i++) { - if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) { - ether_addr_copy(hw->mac.perm_addr, - vsi_res->default_mac_addr); - ether_addr_copy(hw->mac.addr, - vsi_res->default_mac_addr); - } - vsi_res++; - } -} - -/** - * i40e_vf_reset - * @hw: pointer to the hardware structure - * - * Send a VF_RESET message to the PF. Does not wait for response from PF - * as none will be forthcoming. Immediately after calling this function, - * the admin queue should be shut down and (optionally) reinitialized. - **/ -i40e_status i40e_vf_reset(struct i40e_hw *hw) -{ - return i40e_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF, - 0, NULL, 0, NULL); -} - -/** - * i40evf_aq_write_ddp - Write dynamic device personalization (ddp) - * @hw: pointer to the hw struct - * @buff: command buffer (size in bytes = buff_size) - * @buff_size: buffer size in bytes - * @track_id: package tracking id - * @error_offset: returns error offset - * @error_info: returns error information - * @cmd_details: pointer to command details structure or NULL - **/ -enum -i40e_status_code i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff, - u16 buff_size, u32 track_id, - u32 *error_offset, u32 *error_info, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_write_personalization_profile *cmd = - (struct i40e_aqc_write_personalization_profile *) - &desc.params.raw; - struct i40e_aqc_write_ddp_resp *resp; - i40e_status status; - - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_write_personalization_profile); - - desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); - if (buff_size > I40E_AQ_LARGE_BUF) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); - - desc.datalen = cpu_to_le16(buff_size); - - cmd->profile_track_id = cpu_to_le32(track_id); - - status = i40evf_asq_send_command(hw, &desc, buff, buff_size, cmd_details); - if (!status) { - resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw; - if (error_offset) - *error_offset = le32_to_cpu(resp->error_offset); - if (error_info) - *error_info = le32_to_cpu(resp->error_info); - } - - return status; -} - -/** - * i40evf_aq_get_ddp_list - Read dynamic device personalization (ddp) - * @hw: pointer to the hw struct - * @buff: command buffer (size in bytes = buff_size) - * @buff_size: buffer size in bytes - * @flags: AdminQ command flags - * @cmd_details: pointer to command details structure or NULL - **/ -enum -i40e_status_code i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff, - u16 buff_size, u8 flags, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_get_applied_profiles *cmd = - (struct i40e_aqc_get_applied_profiles *)&desc.params.raw; - i40e_status status; - - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_get_personalization_profile_list); - - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); - if (buff_size > I40E_AQ_LARGE_BUF) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); - desc.datalen = cpu_to_le16(buff_size); - - cmd->flags = flags; - - status = i40evf_asq_send_command(hw, &desc, buff, buff_size, cmd_details); - - return status; -} - -/** - * i40evf_find_segment_in_package - * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E) - * @pkg_hdr: pointer to the package header to be searched - * - * This function searches a package file for a particular segment type. On - * success it returns a pointer to the segment header, otherwise it will - * return NULL. - **/ -struct i40e_generic_seg_header * -i40evf_find_segment_in_package(u32 segment_type, - struct i40e_package_header *pkg_hdr) -{ - struct i40e_generic_seg_header *segment; - u32 i; - - /* Search all package segments for the requested segment type */ - for (i = 0; i < pkg_hdr->segment_count; i++) { - segment = - (struct i40e_generic_seg_header *)((u8 *)pkg_hdr + - pkg_hdr->segment_offset[i]); - - if (segment->type == segment_type) - return segment; - } - - return NULL; -} - -/** - * i40evf_write_profile - * @hw: pointer to the hardware structure - * @profile: pointer to the profile segment of the package to be downloaded - * @track_id: package tracking id - * - * Handles the download of a complete package. - */ -enum i40e_status_code -i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, - u32 track_id) -{ - i40e_status status = 0; - struct i40e_section_table *sec_tbl; - struct i40e_profile_section_header *sec = NULL; - u32 dev_cnt; - u32 vendor_dev_id; - u32 *nvm; - u32 section_size = 0; - u32 offset = 0, info = 0; - u32 i; - - dev_cnt = profile->device_table_count; - - for (i = 0; i < dev_cnt; i++) { - vendor_dev_id = profile->device_table[i].vendor_dev_id; - if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL) - if (hw->device_id == (vendor_dev_id & 0xFFFF)) - break; - } - if (i == dev_cnt) { - i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP"); - return I40E_ERR_DEVICE_NOT_SUPPORTED; - } - - nvm = (u32 *)&profile->device_table[dev_cnt]; - sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; - - for (i = 0; i < sec_tbl->section_count; i++) { - sec = (struct i40e_profile_section_header *)((u8 *)profile + - sec_tbl->section_offset[i]); - - /* Skip 'AQ', 'note' and 'name' sections */ - if (sec->section.type != SECTION_TYPE_MMIO) - continue; - - section_size = sec->section.size + - sizeof(struct i40e_profile_section_header); - - /* Write profile */ - status = i40evf_aq_write_ddp(hw, (void *)sec, (u16)section_size, - track_id, &offset, &info, NULL); - if (status) { - i40e_debug(hw, I40E_DEBUG_PACKAGE, - "Failed to write profile: offset %d, info %d", - offset, info); - break; - } - } - return status; -} - -/** - * i40evf_add_pinfo_to_list - * @hw: pointer to the hardware structure - * @profile: pointer to the profile segment of the package - * @profile_info_sec: buffer for information section - * @track_id: package tracking id - * - * Register a profile to the list of loaded profiles. - */ -enum i40e_status_code -i40evf_add_pinfo_to_list(struct i40e_hw *hw, - struct i40e_profile_segment *profile, - u8 *profile_info_sec, u32 track_id) -{ - i40e_status status = 0; - struct i40e_profile_section_header *sec = NULL; - struct i40e_profile_info *pinfo; - u32 offset = 0, info = 0; - - sec = (struct i40e_profile_section_header *)profile_info_sec; - sec->tbl_size = 1; - sec->data_end = sizeof(struct i40e_profile_section_header) + - sizeof(struct i40e_profile_info); - sec->section.type = SECTION_TYPE_INFO; - sec->section.offset = sizeof(struct i40e_profile_section_header); - sec->section.size = sizeof(struct i40e_profile_info); - pinfo = (struct i40e_profile_info *)(profile_info_sec + - sec->section.offset); - pinfo->track_id = track_id; - pinfo->version = profile->version; - pinfo->op = I40E_DDP_ADD_TRACKID; - memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE); - - status = i40evf_aq_write_ddp(hw, (void *)sec, sec->data_end, - track_id, &offset, &info, NULL); - return status; -} diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h deleted file mode 100644 index f300bf271824..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h +++ /dev/null @@ -1,34 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_DEVIDS_H_ -#define _I40E_DEVIDS_H_ - -/* Device IDs */ -#define I40E_DEV_ID_SFP_XL710 0x1572 -#define I40E_DEV_ID_QEMU 0x1574 -#define I40E_DEV_ID_KX_B 0x1580 -#define I40E_DEV_ID_KX_C 0x1581 -#define I40E_DEV_ID_QSFP_A 0x1583 -#define I40E_DEV_ID_QSFP_B 0x1584 -#define I40E_DEV_ID_QSFP_C 0x1585 -#define I40E_DEV_ID_10G_BASE_T 0x1586 -#define I40E_DEV_ID_20G_KR2 0x1587 -#define I40E_DEV_ID_20G_KR2_A 0x1588 -#define I40E_DEV_ID_10G_BASE_T4 0x1589 -#define I40E_DEV_ID_25G_B 0x158A -#define I40E_DEV_ID_25G_SFP28 0x158B -#define I40E_DEV_ID_VF 0x154C -#define I40E_DEV_ID_VF_HV 0x1571 -#define I40E_DEV_ID_ADAPTIVE_VF 0x1889 -#define I40E_DEV_ID_SFP_X722 0x37D0 -#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 -#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 -#define I40E_DEV_ID_SFP_I_X722 0x37D3 -#define I40E_DEV_ID_X722_VF 0x37CD - -#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ - (d) == I40E_DEV_ID_QSFP_B || \ - (d) == I40E_DEV_ID_QSFP_C) - -#endif /* _I40E_DEVIDS_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h deleted file mode 100644 index 1c78de838857..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h +++ /dev/null @@ -1,215 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_HMC_H_ -#define _I40E_HMC_H_ - -#define I40E_HMC_MAX_BP_COUNT 512 - -/* forward-declare the HW struct for the compiler */ -struct i40e_hw; - -#define I40E_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */ -#define I40E_HMC_PD_CNT_IN_SD 512 -#define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */ -#define I40E_HMC_PAGED_BP_SIZE 4096 -#define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096 -#define I40E_FIRST_VF_FPM_ID 16 - -struct i40e_hmc_obj_info { - u64 base; /* base addr in FPM */ - u32 max_cnt; /* max count available for this hmc func */ - u32 cnt; /* count of objects driver actually wants to create */ - u64 size; /* size in bytes of one object */ -}; - -enum i40e_sd_entry_type { - I40E_SD_TYPE_INVALID = 0, - I40E_SD_TYPE_PAGED = 1, - I40E_SD_TYPE_DIRECT = 2 -}; - -struct i40e_hmc_bp { - enum i40e_sd_entry_type entry_type; - struct i40e_dma_mem addr; /* populate to be used by hw */ - u32 sd_pd_index; - u32 ref_cnt; -}; - -struct i40e_hmc_pd_entry { - struct i40e_hmc_bp bp; - u32 sd_index; - bool rsrc_pg; - bool valid; -}; - -struct i40e_hmc_pd_table { - struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */ - struct i40e_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */ - struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */ - - u32 ref_cnt; - u32 sd_index; -}; - -struct i40e_hmc_sd_entry { - enum i40e_sd_entry_type entry_type; - bool valid; - - union { - struct i40e_hmc_pd_table pd_table; - struct i40e_hmc_bp bp; - } u; -}; - -struct i40e_hmc_sd_table { - struct i40e_virt_mem addr; /* used to track sd_entry allocations */ - u32 sd_cnt; - u32 ref_cnt; - struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */ -}; - -struct i40e_hmc_info { - u32 signature; - /* equals to pci func num for PF and dynamically allocated for VFs */ - u8 hmc_fn_id; - u16 first_sd_index; /* index of the first available SD */ - - /* hmc objects */ - struct i40e_hmc_obj_info *hmc_obj; - struct i40e_virt_mem hmc_obj_virt_mem; - struct i40e_hmc_sd_table sd_table; -}; - -#define I40E_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++) -#define I40E_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++) -#define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++) - -#define I40E_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--) -#define I40E_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--) -#define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--) - -/** - * I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware - * @hw: pointer to our hw struct - * @pa: pointer to physical address - * @sd_index: segment descriptor index - * @type: if sd entry is direct or paged - **/ -#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \ -{ \ - u32 val1, val2, val3; \ - val1 = (u32)(upper_32_bits(pa)); \ - val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT << \ - I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ - ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ - I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \ - BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \ - val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ - wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \ - wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ - wr32((hw), I40E_PFHMC_SDCMD, val3); \ -} - -/** - * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware - * @hw: pointer to our hw struct - * @sd_index: segment descriptor index - * @type: if sd entry is direct or paged - **/ -#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \ -{ \ - u32 val2, val3; \ - val2 = (I40E_HMC_MAX_BP_COUNT << \ - I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ - ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ - I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \ - val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ - wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \ - wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ - wr32((hw), I40E_PFHMC_SDCMD, val3); \ -} - -/** - * I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware - * @hw: pointer to our hw struct - * @sd_idx: segment descriptor index - * @pd_idx: page descriptor index - **/ -#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \ - wr32((hw), I40E_PFHMC_PDINV, \ - (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \ - ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT))) - -/** - * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit - * @hmc_info: pointer to the HMC configuration information structure - * @type: type of HMC resources we're searching - * @index: starting index for the object - * @cnt: number of objects we're trying to create - * @sd_idx: pointer to return index of the segment descriptor in question - * @sd_limit: pointer to return the maximum number of segment descriptors - * - * This function calculates the segment descriptor index and index limit - * for the resource defined by i40e_hmc_rsrc_type. - **/ -#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\ -{ \ - u64 fpm_addr, fpm_limit; \ - fpm_addr = (hmc_info)->hmc_obj[(type)].base + \ - (hmc_info)->hmc_obj[(type)].size * (index); \ - fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\ - *(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE); \ - *(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE); \ - /* add one more to the limit to correct our range */ \ - *(sd_limit) += 1; \ -} - -/** - * I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit - * @hmc_info: pointer to the HMC configuration information struct - * @type: HMC resource type we're examining - * @idx: starting index for the object - * @cnt: number of objects we're trying to create - * @pd_index: pointer to return page descriptor index - * @pd_limit: pointer to return page descriptor index limit - * - * Calculates the page descriptor index and index limit for the resource - * defined by i40e_hmc_rsrc_type. - **/ -#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\ -{ \ - u64 fpm_adr, fpm_limit; \ - fpm_adr = (hmc_info)->hmc_obj[(type)].base + \ - (hmc_info)->hmc_obj[(type)].size * (idx); \ - fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \ - *(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE); \ - *(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE); \ - /* add one more to the limit to correct our range */ \ - *(pd_limit) += 1; \ -} -i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 sd_index, - enum i40e_sd_entry_type type, - u64 direct_mode_sz); - -i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 pd_index, - struct i40e_dma_mem *rsrc_pg); -i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 idx); -i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, - u32 idx); -i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 idx, bool is_pf); -i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info, - u32 idx); -i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 idx, bool is_pf); - -#endif /* _I40E_HMC_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h deleted file mode 100644 index 82b00f70a632..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h +++ /dev/null @@ -1,158 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_LAN_HMC_H_ -#define _I40E_LAN_HMC_H_ - -/* forward-declare the HW struct for the compiler */ -struct i40e_hw; - -/* HMC element context information */ - -/* Rx queue context data - * - * The sizes of the variables may be larger than needed due to crossing byte - * boundaries. If we do not have the width of the variable set to the correct - * size then we could end up shifting bits off the top of the variable when the - * variable is at the top of a byte and crosses over into the next byte. - */ -struct i40e_hmc_obj_rxq { - u16 head; - u16 cpuid; /* bigger than needed, see above for reason */ - u64 base; - u16 qlen; -#define I40E_RXQ_CTX_DBUFF_SHIFT 7 - u16 dbuff; /* bigger than needed, see above for reason */ -#define I40E_RXQ_CTX_HBUFF_SHIFT 6 - u16 hbuff; /* bigger than needed, see above for reason */ - u8 dtype; - u8 dsize; - u8 crcstrip; - u8 fc_ena; - u8 l2tsel; - u8 hsplit_0; - u8 hsplit_1; - u8 showiv; - u32 rxmax; /* bigger than needed, see above for reason */ - u8 tphrdesc_ena; - u8 tphwdesc_ena; - u8 tphdata_ena; - u8 tphhead_ena; - u16 lrxqthresh; /* bigger than needed, see above for reason */ - u8 prefena; /* NOTE: normally must be set to 1 at init */ -}; - -/* Tx queue context data -* -* The sizes of the variables may be larger than needed due to crossing byte -* boundaries. If we do not have the width of the variable set to the correct -* size then we could end up shifting bits off the top of the variable when the -* variable is at the top of a byte and crosses over into the next byte. -*/ -struct i40e_hmc_obj_txq { - u16 head; - u8 new_context; - u64 base; - u8 fc_ena; - u8 timesync_ena; - u8 fd_ena; - u8 alt_vlan_ena; - u16 thead_wb; - u8 cpuid; - u8 head_wb_ena; - u16 qlen; - u8 tphrdesc_ena; - u8 tphrpacket_ena; - u8 tphwdesc_ena; - u64 head_wb_addr; - u32 crc; - u16 rdylist; - u8 rdylist_act; -}; - -/* for hsplit_0 field of Rx HMC context */ -enum i40e_hmc_obj_rx_hsplit_0 { - I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0, - I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1, - I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2, - I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4, - I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8, -}; - -/* fcoe_cntx and fcoe_filt are for debugging purpose only */ -struct i40e_hmc_obj_fcoe_cntx { - u32 rsv[32]; -}; - -struct i40e_hmc_obj_fcoe_filt { - u32 rsv[8]; -}; - -/* Context sizes for LAN objects */ -enum i40e_hmc_lan_object_size { - I40E_HMC_LAN_OBJ_SZ_8 = 0x3, - I40E_HMC_LAN_OBJ_SZ_16 = 0x4, - I40E_HMC_LAN_OBJ_SZ_32 = 0x5, - I40E_HMC_LAN_OBJ_SZ_64 = 0x6, - I40E_HMC_LAN_OBJ_SZ_128 = 0x7, - I40E_HMC_LAN_OBJ_SZ_256 = 0x8, - I40E_HMC_LAN_OBJ_SZ_512 = 0x9, -}; - -#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512 -#define I40E_HMC_OBJ_SIZE_TXQ 128 -#define I40E_HMC_OBJ_SIZE_RXQ 32 -#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 128 -#define I40E_HMC_OBJ_SIZE_FCOE_FILT 64 - -enum i40e_hmc_lan_rsrc_type { - I40E_HMC_LAN_FULL = 0, - I40E_HMC_LAN_TX = 1, - I40E_HMC_LAN_RX = 2, - I40E_HMC_FCOE_CTX = 3, - I40E_HMC_FCOE_FILT = 4, - I40E_HMC_LAN_MAX = 5 -}; - -enum i40e_hmc_model { - I40E_HMC_MODEL_DIRECT_PREFERRED = 0, - I40E_HMC_MODEL_DIRECT_ONLY = 1, - I40E_HMC_MODEL_PAGED_ONLY = 2, - I40E_HMC_MODEL_UNKNOWN, -}; - -struct i40e_hmc_lan_create_obj_info { - struct i40e_hmc_info *hmc_info; - u32 rsrc_type; - u32 start_idx; - u32 count; - enum i40e_sd_entry_type entry_type; - u64 direct_mode_sz; -}; - -struct i40e_hmc_lan_delete_obj_info { - struct i40e_hmc_info *hmc_info; - u32 rsrc_type; - u32 start_idx; - u32 count; -}; - -i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, - u32 rxq_num, u32 fcoe_cntx_num, - u32 fcoe_filt_num); -i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw, - enum i40e_hmc_model model); -i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw); - -i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw, - u16 queue); -i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw, - u16 queue, - struct i40e_hmc_obj_txq *s); -i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw, - u16 queue); -i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw, - u16 queue, - struct i40e_hmc_obj_rxq *s); - -#endif /* _I40E_LAN_HMC_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h deleted file mode 100644 index 3ddddb46455b..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h +++ /dev/null @@ -1,52 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_OSDEP_H_ -#define _I40E_OSDEP_H_ - -#include -#include -#include -#include -#include - -/* get readq/writeq support for 32 bit kernels, use the low-first version */ -#include - -/* File to be the magic between shared code and - * actual OS primitives - */ - -#define hw_dbg(hw, S, A...) do {} while (0) - -#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) -#define rd32(a, reg) readl((a)->hw_addr + (reg)) - -#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg))) -#define rd64(a, reg) readq((a)->hw_addr + (reg)) -#define i40e_flush(a) readl((a)->hw_addr + I40E_VFGEN_RSTAT) - -/* memory allocation tracking */ -struct i40e_dma_mem { - void *va; - dma_addr_t pa; - u32 size; -}; - -#define i40e_allocate_dma_mem(h, m, unused, s, a) \ - i40evf_allocate_dma_mem_d(h, m, s, a) -#define i40e_free_dma_mem(h, m) i40evf_free_dma_mem_d(h, m) - -struct i40e_virt_mem { - void *va; - u32 size; -}; -#define i40e_allocate_virt_mem(h, m, s) i40evf_allocate_virt_mem_d(h, m, s) -#define i40e_free_virt_mem(h, m) i40evf_free_virt_mem_d(h, m) - -#define i40e_debug(h, m, s, ...) i40evf_debug_d(h, m, s, ##__VA_ARGS__) -extern void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...) - __attribute__ ((format(gnu_printf, 3, 4))); - -typedef enum i40e_status_code i40e_status; -#endif /* _I40E_OSDEP_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h deleted file mode 100644 index a358f4b9d5aa..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h +++ /dev/null @@ -1,130 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_PROTOTYPE_H_ -#define _I40E_PROTOTYPE_H_ - -#include "i40e_type.h" -#include "i40e_alloc.h" -#include - -/* Prototypes for shared code functions that are not in - * the standard function pointer structures. These are - * mostly because they are needed even before the init - * has happened and will assist in the early SW and FW - * setup. - */ - -/* adminq functions */ -i40e_status i40evf_init_adminq(struct i40e_hw *hw); -i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw); -void i40e_adminq_init_ring_data(struct i40e_hw *hw); -i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, - struct i40e_arq_event_info *e, - u16 *events_pending); -i40e_status i40evf_asq_send_command(struct i40e_hw *hw, - struct i40e_aq_desc *desc, - void *buff, /* can be NULL */ - u16 buff_size, - struct i40e_asq_cmd_details *cmd_details); -bool i40evf_asq_done(struct i40e_hw *hw); - -/* debug function for adminq */ -void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, - void *desc, void *buffer, u16 buf_len); - -void i40e_idle_aq(struct i40e_hw *hw); -void i40evf_resume_aq(struct i40e_hw *hw); -bool i40evf_check_asq_alive(struct i40e_hw *hw); -i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); -const char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err); -const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err); - -i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 seid, - bool pf_lut, u8 *lut, u16 lut_size); -i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 seid, - bool pf_lut, u8 *lut, u16 lut_size); -i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw, - u16 seid, - struct i40e_aqc_get_set_rss_key_data *key); -i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw, - u16 seid, - struct i40e_aqc_get_set_rss_key_data *key); - -i40e_status i40e_set_mac_type(struct i40e_hw *hw); - -extern struct i40e_rx_ptype_decoded i40evf_ptype_lookup[]; - -static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) -{ - return i40evf_ptype_lookup[ptype]; -} - -/* prototype for functions used for SW locks */ - -/* i40e_common for VF drivers*/ -void i40e_vf_parse_hw_config(struct i40e_hw *hw, - struct virtchnl_vf_resource *msg); -i40e_status i40e_vf_reset(struct i40e_hw *hw); -i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw, - enum virtchnl_ops v_opcode, - i40e_status v_retval, - u8 *msg, u16 msglen, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_set_filter_control(struct i40e_hw *hw, - struct i40e_filter_control_settings *settings); -i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, - u8 *mac_addr, u16 ethtype, u16 flags, - u16 vsi_seid, u16 queue, bool is_add, - struct i40e_control_filter_stats *stats, - struct i40e_asq_cmd_details *cmd_details); -void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, - u16 vsi_seid); -i40e_status i40evf_aq_rx_ctl_read_register(struct i40e_hw *hw, - u32 reg_addr, u32 *reg_val, - struct i40e_asq_cmd_details *cmd_details); -u32 i40evf_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr); -i40e_status i40evf_aq_rx_ctl_write_register(struct i40e_hw *hw, - u32 reg_addr, u32 reg_val, - struct i40e_asq_cmd_details *cmd_details); -void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val); -i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw, - u8 phy_select, u8 dev_addr, - u32 reg_addr, u32 reg_val, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw, - u8 phy_select, u8 dev_addr, - u32 reg_addr, u32 *reg_val, - struct i40e_asq_cmd_details *cmd_details); - -i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, - u16 reg, u8 phy_addr, u16 *value); -i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, - u16 reg, u8 phy_addr, u16 value); -i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg, - u8 phy_addr, u16 *value); -i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg, - u8 phy_addr, u16 value); -u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num); -i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, - u32 time, u32 interval); -i40e_status i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff, - u16 buff_size, u32 track_id, - u32 *error_offset, u32 *error_info, - struct i40e_asq_cmd_details * - cmd_details); -i40e_status i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff, - u16 buff_size, u8 flags, - struct i40e_asq_cmd_details * - cmd_details); -struct i40e_generic_seg_header * -i40evf_find_segment_in_package(u32 segment_type, - struct i40e_package_header *pkg_header); -enum i40e_status_code -i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg, - u32 track_id); -enum i40e_status_code -i40evf_add_pinfo_to_list(struct i40e_hw *hw, - struct i40e_profile_segment *profile, - u8 *profile_info_sec, u32 track_id); -#endif /* _I40E_PROTOTYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h deleted file mode 100644 index 49e1f57d99cc..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_register.h +++ /dev/null @@ -1,313 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_REGISTER_H_ -#define _I40E_REGISTER_H_ - -#define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */ -#define I40E_VFMSIX_PBA1_MAX_INDEX 19 -#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0 -#define I40E_VFMSIX_PBA1_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA1_PENBIT_SHIFT) -#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TADD1_MAX_INDEX 639 -#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0 -#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT) -#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2 -#define I40E_VFMSIX_TADD1_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD1_MSIXTADD_SHIFT) -#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TMSG1_MAX_INDEX 639 -#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0 -#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT) -#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TUADD1_MAX_INDEX 639 -#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0 -#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT) -#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639 -#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0 -#define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT) -#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */ -#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0 -#define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT) -#define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */ -#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0 -#define I40E_VF_ARQBAL1_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL1_ARQBAL_SHIFT) -#define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */ -#define I40E_VF_ARQH1_ARQH_SHIFT 0 -#define I40E_VF_ARQH1_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH1_ARQH_SHIFT) -#define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */ -#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0 -#define I40E_VF_ARQLEN1_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN1_ARQLEN_SHIFT) -#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28 -#define I40E_VF_ARQLEN1_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQVFE_SHIFT) -#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29 -#define I40E_VF_ARQLEN1_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQOVFL_SHIFT) -#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30 -#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT) -#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31 -#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT) -#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */ -#define I40E_VF_ARQT1_ARQT_SHIFT 0 -#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT) -#define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */ -#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0 -#define I40E_VF_ATQBAH1_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH1_ATQBAH_SHIFT) -#define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */ -#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0 -#define I40E_VF_ATQBAL1_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL1_ATQBAL_SHIFT) -#define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */ -#define I40E_VF_ATQH1_ATQH_SHIFT 0 -#define I40E_VF_ATQH1_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH1_ATQH_SHIFT) -#define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */ -#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0 -#define I40E_VF_ATQLEN1_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN1_ATQLEN_SHIFT) -#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28 -#define I40E_VF_ATQLEN1_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQVFE_SHIFT) -#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29 -#define I40E_VF_ATQLEN1_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQOVFL_SHIFT) -#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30 -#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT) -#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31 -#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT) -#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */ -#define I40E_VF_ATQT1_ATQT_SHIFT 0 -#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT) -#define I40E_VFGEN_RSTAT 0x00008800 /* Reset: VFR */ -#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0 -#define I40E_VFGEN_RSTAT_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT_VFR_STATE_SHIFT) -#define I40E_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */ -#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0 -#define I40E_VFINT_DYN_CTL01_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_SHIFT) -#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1 -#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT) -#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2 -#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT) -#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3 -#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5 -#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT) -#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24 -#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT) -#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25 -#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31 -#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT) -#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */ -#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15 -#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0 -#define I40E_VFINT_DYN_CTLN1_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_SHIFT) -#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1 -#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT) -#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2 -#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT) -#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3 -#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5 -#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT) -#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24 -#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT) -#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25 -#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31 -#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT) -#define I40E_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */ -#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT) -#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30 -#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT) -#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31 -#define I40E_VFINT_ICR0_ENA1_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_RSVD_SHIFT) -#define I40E_VFINT_ICR01 0x00004800 /* Reset: CORER */ -#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0 -#define I40E_VFINT_ICR01_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_INTEVENT_SHIFT) -#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1 -#define I40E_VFINT_ICR01_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_0_SHIFT) -#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2 -#define I40E_VFINT_ICR01_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_1_SHIFT) -#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3 -#define I40E_VFINT_ICR01_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_2_SHIFT) -#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4 -#define I40E_VFINT_ICR01_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_3_SHIFT) -#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT) -#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30 -#define I40E_VFINT_ICR01_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_ADMINQ_SHIFT) -#define I40E_VFINT_ICR01_SWINT_SHIFT 31 -#define I40E_VFINT_ICR01_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_SWINT_SHIFT) -#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */ -#define I40E_VFINT_ITR01_MAX_INDEX 2 -#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0 -#define I40E_VFINT_ITR01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR01_INTERVAL_SHIFT) -#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */ -#define I40E_VFINT_ITRN1_MAX_INDEX 2 -#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0 -#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT) -#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */ -#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2 -#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT) -#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_QRX_TAIL1_MAX_INDEX 15 -#define I40E_QRX_TAIL1_TAIL_SHIFT 0 -#define I40E_QRX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL1_TAIL_SHIFT) -#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */ -#define I40E_QTX_TAIL1_MAX_INDEX 15 -#define I40E_QTX_TAIL1_TAIL_SHIFT 0 -#define I40E_QTX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL1_TAIL_SHIFT) -#define I40E_VFMSIX_PBA 0x00002000 /* Reset: VFLR */ -#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0 -#define I40E_VFMSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA_PENBIT_SHIFT) -#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TADD_MAX_INDEX 16 -#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0 -#define I40E_VFMSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD_MSIXTADD10_SHIFT) -#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2 -#define I40E_VFMSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD_MSIXTADD_SHIFT) -#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TMSG_MAX_INDEX 16 -#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0 -#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT) -#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TUADD_MAX_INDEX 16 -#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0 -#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT) -#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16 -#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0 -#define I40E_VFMSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL_MASK_SHIFT) -#define I40E_VFCM_PE_ERRDATA 0x0000DC00 /* Reset: VFR */ -#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0 -#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT) -#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4 -#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT) -#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8 -#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT) -#define I40E_VFCM_PE_ERRINFO 0x0000D800 /* Reset: VFR */ -#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0 -#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT) -#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4 -#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT) -#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8 -#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT) -#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16 -#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT) -#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24 -#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT) -#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ -#define I40E_VFQF_HENA_MAX_INDEX 1 -#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0 -#define I40E_VFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA_PTYPE_ENA_SHIFT) -#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */ -#define I40E_VFQF_HKEY_MAX_INDEX 12 -#define I40E_VFQF_HKEY_KEY_0_SHIFT 0 -#define I40E_VFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_0_SHIFT) -#define I40E_VFQF_HKEY_KEY_1_SHIFT 8 -#define I40E_VFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_1_SHIFT) -#define I40E_VFQF_HKEY_KEY_2_SHIFT 16 -#define I40E_VFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_2_SHIFT) -#define I40E_VFQF_HKEY_KEY_3_SHIFT 24 -#define I40E_VFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_3_SHIFT) -#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_VFQF_HLUT_MAX_INDEX 15 -#define I40E_VFQF_HLUT_LUT0_SHIFT 0 -#define I40E_VFQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT0_SHIFT) -#define I40E_VFQF_HLUT_LUT1_SHIFT 8 -#define I40E_VFQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT1_SHIFT) -#define I40E_VFQF_HLUT_LUT2_SHIFT 16 -#define I40E_VFQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT2_SHIFT) -#define I40E_VFQF_HLUT_LUT3_SHIFT 24 -#define I40E_VFQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT3_SHIFT) -#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_VFQF_HREGION_MAX_INDEX 7 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT) -#define I40E_VFQF_HREGION_REGION_0_SHIFT 1 -#define I40E_VFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_0_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT) -#define I40E_VFQF_HREGION_REGION_1_SHIFT 5 -#define I40E_VFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_1_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT) -#define I40E_VFQF_HREGION_REGION_2_SHIFT 9 -#define I40E_VFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_2_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT) -#define I40E_VFQF_HREGION_REGION_3_SHIFT 13 -#define I40E_VFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_3_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT) -#define I40E_VFQF_HREGION_REGION_4_SHIFT 17 -#define I40E_VFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_4_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT) -#define I40E_VFQF_HREGION_REGION_5_SHIFT 21 -#define I40E_VFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_5_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT) -#define I40E_VFQF_HREGION_REGION_6_SHIFT 25 -#define I40E_VFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_6_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT) -#define I40E_VFQF_HREGION_REGION_7_SHIFT 29 -#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT) -#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30 -#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT) -#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30 -#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT) -#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */ -#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0 -#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT) -#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */ -#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0 -#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT) -#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */ -#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0 -#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT) -#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */ -#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0 -#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT) -#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4 -#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT) -#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16 -#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT) -#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31 -#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT) -#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */ -#define I40E_VFPE_CQACK1_PECQID_SHIFT 0 -#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT) -#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */ -#define I40E_VFPE_CQARM1_PECQID_SHIFT 0 -#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT) -#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */ -#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0 -#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT) -#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */ -#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0 -#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT) -#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16 -#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT) -#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */ -#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0 -#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT) -#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31 -#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT) -#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */ -#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0 -#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT) -#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16 -#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT) -#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */ -#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0 -#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT) -#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */ -#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0 -#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT) -#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */ -#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0 -#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT) -#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */ -#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0 -#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT) -#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20 -#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT) -#endif /* _I40E_REGISTER_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_status.h b/drivers/net/ethernet/intel/i40evf/i40e_status.h deleted file mode 100644 index 77be0702d07c..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_status.h +++ /dev/null @@ -1,78 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_STATUS_H_ -#define _I40E_STATUS_H_ - -/* Error Codes */ -enum i40e_status_code { - I40E_SUCCESS = 0, - I40E_ERR_NVM = -1, - I40E_ERR_NVM_CHECKSUM = -2, - I40E_ERR_PHY = -3, - I40E_ERR_CONFIG = -4, - I40E_ERR_PARAM = -5, - I40E_ERR_MAC_TYPE = -6, - I40E_ERR_UNKNOWN_PHY = -7, - I40E_ERR_LINK_SETUP = -8, - I40E_ERR_ADAPTER_STOPPED = -9, - I40E_ERR_INVALID_MAC_ADDR = -10, - I40E_ERR_DEVICE_NOT_SUPPORTED = -11, - I40E_ERR_MASTER_REQUESTS_PENDING = -12, - I40E_ERR_INVALID_LINK_SETTINGS = -13, - I40E_ERR_AUTONEG_NOT_COMPLETE = -14, - I40E_ERR_RESET_FAILED = -15, - I40E_ERR_SWFW_SYNC = -16, - I40E_ERR_NO_AVAILABLE_VSI = -17, - I40E_ERR_NO_MEMORY = -18, - I40E_ERR_BAD_PTR = -19, - I40E_ERR_RING_FULL = -20, - I40E_ERR_INVALID_PD_ID = -21, - I40E_ERR_INVALID_QP_ID = -22, - I40E_ERR_INVALID_CQ_ID = -23, - I40E_ERR_INVALID_CEQ_ID = -24, - I40E_ERR_INVALID_AEQ_ID = -25, - I40E_ERR_INVALID_SIZE = -26, - I40E_ERR_INVALID_ARP_INDEX = -27, - I40E_ERR_INVALID_FPM_FUNC_ID = -28, - I40E_ERR_QP_INVALID_MSG_SIZE = -29, - I40E_ERR_QP_TOOMANY_WRS_POSTED = -30, - I40E_ERR_INVALID_FRAG_COUNT = -31, - I40E_ERR_QUEUE_EMPTY = -32, - I40E_ERR_INVALID_ALIGNMENT = -33, - I40E_ERR_FLUSHED_QUEUE = -34, - I40E_ERR_INVALID_PUSH_PAGE_INDEX = -35, - I40E_ERR_INVALID_IMM_DATA_SIZE = -36, - I40E_ERR_TIMEOUT = -37, - I40E_ERR_OPCODE_MISMATCH = -38, - I40E_ERR_CQP_COMPL_ERROR = -39, - I40E_ERR_INVALID_VF_ID = -40, - I40E_ERR_INVALID_HMCFN_ID = -41, - I40E_ERR_BACKING_PAGE_ERROR = -42, - I40E_ERR_NO_PBLCHUNKS_AVAILABLE = -43, - I40E_ERR_INVALID_PBLE_INDEX = -44, - I40E_ERR_INVALID_SD_INDEX = -45, - I40E_ERR_INVALID_PAGE_DESC_INDEX = -46, - I40E_ERR_INVALID_SD_TYPE = -47, - I40E_ERR_MEMCPY_FAILED = -48, - I40E_ERR_INVALID_HMC_OBJ_INDEX = -49, - I40E_ERR_INVALID_HMC_OBJ_COUNT = -50, - I40E_ERR_INVALID_SRQ_ARM_LIMIT = -51, - I40E_ERR_SRQ_ENABLED = -52, - I40E_ERR_ADMIN_QUEUE_ERROR = -53, - I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54, - I40E_ERR_BUF_TOO_SHORT = -55, - I40E_ERR_ADMIN_QUEUE_FULL = -56, - I40E_ERR_ADMIN_QUEUE_NO_WORK = -57, - I40E_ERR_BAD_IWARP_CQE = -58, - I40E_ERR_NVM_BLANK_MODE = -59, - I40E_ERR_NOT_IMPLEMENTED = -60, - I40E_ERR_PE_DOORBELL_NOT_ENABLED = -61, - I40E_ERR_DIAG_TEST_FAILED = -62, - I40E_ERR_NOT_READY = -63, - I40E_NOT_SUPPORTED = -64, - I40E_ERR_FIRMWARE_API_VERSION = -65, - I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66, -}; - -#endif /* _I40E_STATUS_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_trace.h b/drivers/net/ethernet/intel/i40evf/i40e_trace.h deleted file mode 100644 index d7a4e68820a8..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_trace.h +++ /dev/null @@ -1,209 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -/* Modeled on trace-events-sample.h */ - -/* The trace subsystem name for i40evf will be "i40evf". - * - * This file is named i40e_trace.h. - * - * Since this include file's name is different from the trace - * subsystem name, we'll have to define TRACE_INCLUDE_FILE at the end - * of this file. - */ -#undef TRACE_SYSTEM -#define TRACE_SYSTEM i40evf - -/* See trace-events-sample.h for a detailed description of why this - * guard clause is different from most normal include files. - */ -#if !defined(_I40E_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) -#define _I40E_TRACE_H_ - -#include - -/** - * i40e_trace() macro enables shared code to refer to trace points - * like: - * - * trace_i40e{,vf}_example(args...) - * - * ... as: - * - * i40e_trace(example, args...) - * - * ... to resolve to the PF or VF version of the tracepoint without - * ifdefs, and to allow tracepoints to be disabled entirely at build - * time. - * - * Trace point should always be referred to in the driver via this - * macro. - * - * Similarly, i40e_trace_enabled(trace_name) wraps references to - * trace_i40e{,vf}__enabled() functions. - */ -#define _I40E_TRACE_NAME(trace_name) (trace_ ## i40evf ## _ ## trace_name) -#define I40E_TRACE_NAME(trace_name) _I40E_TRACE_NAME(trace_name) - -#define i40e_trace(trace_name, args...) I40E_TRACE_NAME(trace_name)(args) - -#define i40e_trace_enabled(trace_name) I40E_TRACE_NAME(trace_name##_enabled)() - -/* Events common to PF and VF. Corresponding versions will be defined - * for both, named trace_i40e_* and trace_i40evf_*. The i40e_trace() - * macro above will select the right trace point name for the driver - * being built from shared code. - */ - -/* Events related to a vsi & ring */ -DECLARE_EVENT_CLASS( - i40evf_tx_template, - - TP_PROTO(struct i40e_ring *ring, - struct i40e_tx_desc *desc, - struct i40e_tx_buffer *buf), - - TP_ARGS(ring, desc, buf), - - /* The convention here is to make the first fields in the - * TP_STRUCT match the TP_PROTO exactly. This enables the use - * of the args struct generated by the tplist tool (from the - * bcc-tools package) to be used for those fields. To access - * fields other than the tracepoint args will require the - * tplist output to be adjusted. - */ - TP_STRUCT__entry( - __field(void*, ring) - __field(void*, desc) - __field(void*, buf) - __string(devname, ring->netdev->name) - ), - - TP_fast_assign( - __entry->ring = ring; - __entry->desc = desc; - __entry->buf = buf; - __assign_str(devname, ring->netdev->name); - ), - - TP_printk( - "netdev: %s ring: %p desc: %p buf %p", - __get_str(devname), __entry->ring, - __entry->desc, __entry->buf) -); - -DEFINE_EVENT( - i40evf_tx_template, i40evf_clean_tx_irq, - TP_PROTO(struct i40e_ring *ring, - struct i40e_tx_desc *desc, - struct i40e_tx_buffer *buf), - - TP_ARGS(ring, desc, buf)); - -DEFINE_EVENT( - i40evf_tx_template, i40evf_clean_tx_irq_unmap, - TP_PROTO(struct i40e_ring *ring, - struct i40e_tx_desc *desc, - struct i40e_tx_buffer *buf), - - TP_ARGS(ring, desc, buf)); - -DECLARE_EVENT_CLASS( - i40evf_rx_template, - - TP_PROTO(struct i40e_ring *ring, - union i40e_32byte_rx_desc *desc, - struct sk_buff *skb), - - TP_ARGS(ring, desc, skb), - - TP_STRUCT__entry( - __field(void*, ring) - __field(void*, desc) - __field(void*, skb) - __string(devname, ring->netdev->name) - ), - - TP_fast_assign( - __entry->ring = ring; - __entry->desc = desc; - __entry->skb = skb; - __assign_str(devname, ring->netdev->name); - ), - - TP_printk( - "netdev: %s ring: %p desc: %p skb %p", - __get_str(devname), __entry->ring, - __entry->desc, __entry->skb) -); - -DEFINE_EVENT( - i40evf_rx_template, i40evf_clean_rx_irq, - TP_PROTO(struct i40e_ring *ring, - union i40e_32byte_rx_desc *desc, - struct sk_buff *skb), - - TP_ARGS(ring, desc, skb)); - -DEFINE_EVENT( - i40evf_rx_template, i40evf_clean_rx_irq_rx, - TP_PROTO(struct i40e_ring *ring, - union i40e_32byte_rx_desc *desc, - struct sk_buff *skb), - - TP_ARGS(ring, desc, skb)); - -DECLARE_EVENT_CLASS( - i40evf_xmit_template, - - TP_PROTO(struct sk_buff *skb, - struct i40e_ring *ring), - - TP_ARGS(skb, ring), - - TP_STRUCT__entry( - __field(void*, skb) - __field(void*, ring) - __string(devname, ring->netdev->name) - ), - - TP_fast_assign( - __entry->skb = skb; - __entry->ring = ring; - __assign_str(devname, ring->netdev->name); - ), - - TP_printk( - "netdev: %s skb: %p ring: %p", - __get_str(devname), __entry->skb, - __entry->ring) -); - -DEFINE_EVENT( - i40evf_xmit_template, i40evf_xmit_frame_ring, - TP_PROTO(struct sk_buff *skb, - struct i40e_ring *ring), - - TP_ARGS(skb, ring)); - -DEFINE_EVENT( - i40evf_xmit_template, i40evf_xmit_frame_ring_drop, - TP_PROTO(struct sk_buff *skb, - struct i40e_ring *ring), - - TP_ARGS(skb, ring)); - -/* Events unique to the VF. */ - -#endif /* _I40E_TRACE_H_ */ -/* This must be outside ifdef _I40E_TRACE_H */ - -/* This trace include file is not located in the .../include/trace - * with the kernel tracepoint definitions, because we're a loadable - * module. - */ -#undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH . -#undef TRACE_INCLUDE_FILE -#define TRACE_INCLUDE_FILE i40e_trace -#include diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c deleted file mode 100644 index 1bf9734ae9cf..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ /dev/null @@ -1,2513 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#include -#include - -#include "i40evf.h" -#include "i40e_trace.h" -#include "i40e_prototype.h" - -static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, - u32 td_tag) -{ - return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA | - ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) | - ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) | - ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) | - ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT)); -} - -#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) - -/** - * i40e_unmap_and_free_tx_resource - Release a Tx buffer - * @ring: the ring that owns the buffer - * @tx_buffer: the buffer to free - **/ -static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, - struct i40e_tx_buffer *tx_buffer) -{ - if (tx_buffer->skb) { - if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) - kfree(tx_buffer->raw_buf); - else - dev_kfree_skb_any(tx_buffer->skb); - if (dma_unmap_len(tx_buffer, len)) - dma_unmap_single(ring->dev, - dma_unmap_addr(tx_buffer, dma), - dma_unmap_len(tx_buffer, len), - DMA_TO_DEVICE); - } else if (dma_unmap_len(tx_buffer, len)) { - dma_unmap_page(ring->dev, - dma_unmap_addr(tx_buffer, dma), - dma_unmap_len(tx_buffer, len), - DMA_TO_DEVICE); - } - - tx_buffer->next_to_watch = NULL; - tx_buffer->skb = NULL; - dma_unmap_len_set(tx_buffer, len, 0); - /* tx_buffer must be completely set up in the transmit path */ -} - -/** - * i40evf_clean_tx_ring - Free any empty Tx buffers - * @tx_ring: ring to be cleaned - **/ -void i40evf_clean_tx_ring(struct i40e_ring *tx_ring) -{ - unsigned long bi_size; - u16 i; - - /* ring already cleared, nothing to do */ - if (!tx_ring->tx_bi) - return; - - /* Free all the Tx ring sk_buffs */ - for (i = 0; i < tx_ring->count; i++) - i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); - - bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; - memset(tx_ring->tx_bi, 0, bi_size); - - /* Zero out the descriptor ring */ - memset(tx_ring->desc, 0, tx_ring->size); - - tx_ring->next_to_use = 0; - tx_ring->next_to_clean = 0; - - if (!tx_ring->netdev) - return; - - /* cleanup Tx queue statistics */ - netdev_tx_reset_queue(txring_txq(tx_ring)); -} - -/** - * i40evf_free_tx_resources - Free Tx resources per queue - * @tx_ring: Tx descriptor ring for a specific queue - * - * Free all transmit software resources - **/ -void i40evf_free_tx_resources(struct i40e_ring *tx_ring) -{ - i40evf_clean_tx_ring(tx_ring); - kfree(tx_ring->tx_bi); - tx_ring->tx_bi = NULL; - - if (tx_ring->desc) { - dma_free_coherent(tx_ring->dev, tx_ring->size, - tx_ring->desc, tx_ring->dma); - tx_ring->desc = NULL; - } -} - -/** - * i40evf_get_tx_pending - how many Tx descriptors not processed - * @ring: the ring of descriptors - * @in_sw: is tx_pending being checked in SW or HW - * - * Since there is no access to the ring head register - * in XL710, we need to use our local copies - **/ -u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw) -{ - u32 head, tail; - - /* underlying hardware might not allow access and/or always return - * 0 for the head/tail registers so just use the cached values - */ - head = ring->next_to_clean; - tail = ring->next_to_use; - - if (head != tail) - return (head < tail) ? - tail - head : (tail + ring->count - head); - - return 0; -} - -/** - * i40evf_detect_recover_hung - Function to detect and recover hung_queues - * @vsi: pointer to vsi struct with tx queues - * - * VSI has netdev and netdev has TX queues. This function is to check each of - * those TX queues if they are hung, trigger recovery by issuing SW interrupt. - **/ -void i40evf_detect_recover_hung(struct i40e_vsi *vsi) -{ - struct i40e_ring *tx_ring = NULL; - struct net_device *netdev; - unsigned int i; - int packets; - - if (!vsi) - return; - - if (test_bit(__I40E_VSI_DOWN, vsi->state)) - return; - - netdev = vsi->netdev; - if (!netdev) - return; - - if (!netif_carrier_ok(netdev)) - return; - - for (i = 0; i < vsi->back->num_active_queues; i++) { - tx_ring = &vsi->back->tx_rings[i]; - if (tx_ring && tx_ring->desc) { - /* If packet counter has not changed the queue is - * likely stalled, so force an interrupt for this - * queue. - * - * prev_pkt_ctr would be negative if there was no - * pending work. - */ - packets = tx_ring->stats.packets & INT_MAX; - if (tx_ring->tx_stats.prev_pkt_ctr == packets) { - i40evf_force_wb(vsi, tx_ring->q_vector); - continue; - } - - /* Memory barrier between read of packet count and call - * to i40evf_get_tx_pending() - */ - smp_rmb(); - tx_ring->tx_stats.prev_pkt_ctr = - i40evf_get_tx_pending(tx_ring, true) ? packets : -1; - } - } -} - -#define WB_STRIDE 4 - -/** - * i40e_clean_tx_irq - Reclaim resources after transmit completes - * @vsi: the VSI we care about - * @tx_ring: Tx ring to clean - * @napi_budget: Used to determine if we are in netpoll - * - * Returns true if there's any budget left (e.g. the clean is finished) - **/ -static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, - struct i40e_ring *tx_ring, int napi_budget) -{ - u16 i = tx_ring->next_to_clean; - struct i40e_tx_buffer *tx_buf; - struct i40e_tx_desc *tx_desc; - unsigned int total_bytes = 0, total_packets = 0; - unsigned int budget = vsi->work_limit; - - tx_buf = &tx_ring->tx_bi[i]; - tx_desc = I40E_TX_DESC(tx_ring, i); - i -= tx_ring->count; - - do { - struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; - - /* if next_to_watch is not set then there is no work pending */ - if (!eop_desc) - break; - - /* prevent any other reads prior to eop_desc */ - smp_rmb(); - - i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); - /* if the descriptor isn't done, no work yet to do */ - if (!(eop_desc->cmd_type_offset_bsz & - cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE))) - break; - - /* clear next_to_watch to prevent false hangs */ - tx_buf->next_to_watch = NULL; - - /* update the statistics for this packet */ - total_bytes += tx_buf->bytecount; - total_packets += tx_buf->gso_segs; - - /* free the skb */ - napi_consume_skb(tx_buf->skb, napi_budget); - - /* unmap skb header data */ - dma_unmap_single(tx_ring->dev, - dma_unmap_addr(tx_buf, dma), - dma_unmap_len(tx_buf, len), - DMA_TO_DEVICE); - - /* clear tx_buffer data */ - tx_buf->skb = NULL; - dma_unmap_len_set(tx_buf, len, 0); - - /* unmap remaining buffers */ - while (tx_desc != eop_desc) { - i40e_trace(clean_tx_irq_unmap, - tx_ring, tx_desc, tx_buf); - - tx_buf++; - tx_desc++; - i++; - if (unlikely(!i)) { - i -= tx_ring->count; - tx_buf = tx_ring->tx_bi; - tx_desc = I40E_TX_DESC(tx_ring, 0); - } - - /* unmap any remaining paged data */ - if (dma_unmap_len(tx_buf, len)) { - dma_unmap_page(tx_ring->dev, - dma_unmap_addr(tx_buf, dma), - dma_unmap_len(tx_buf, len), - DMA_TO_DEVICE); - dma_unmap_len_set(tx_buf, len, 0); - } - } - - /* move us one more past the eop_desc for start of next pkt */ - tx_buf++; - tx_desc++; - i++; - if (unlikely(!i)) { - i -= tx_ring->count; - tx_buf = tx_ring->tx_bi; - tx_desc = I40E_TX_DESC(tx_ring, 0); - } - - prefetch(tx_desc); - - /* update budget accounting */ - budget--; - } while (likely(budget)); - - i += tx_ring->count; - tx_ring->next_to_clean = i; - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->stats.bytes += total_bytes; - tx_ring->stats.packets += total_packets; - u64_stats_update_end(&tx_ring->syncp); - tx_ring->q_vector->tx.total_bytes += total_bytes; - tx_ring->q_vector->tx.total_packets += total_packets; - - if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) { - /* check to see if there are < 4 descriptors - * waiting to be written back, then kick the hardware to force - * them to be written back in case we stay in NAPI. - * In this mode on X722 we do not enable Interrupt. - */ - unsigned int j = i40evf_get_tx_pending(tx_ring, false); - - if (budget && - ((j / WB_STRIDE) == 0) && (j > 0) && - !test_bit(__I40E_VSI_DOWN, vsi->state) && - (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) - tx_ring->arm_wb = true; - } - - /* notify netdev of completed buffers */ - netdev_tx_completed_queue(txring_txq(tx_ring), - total_packets, total_bytes); - -#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) - if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && - (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { - /* Make sure that anybody stopping the queue after this - * sees the new next_to_clean. - */ - smp_mb(); - if (__netif_subqueue_stopped(tx_ring->netdev, - tx_ring->queue_index) && - !test_bit(__I40E_VSI_DOWN, vsi->state)) { - netif_wake_subqueue(tx_ring->netdev, - tx_ring->queue_index); - ++tx_ring->tx_stats.restart_queue; - } - } - - return !!budget; -} - -/** - * i40evf_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled - * @vsi: the VSI we care about - * @q_vector: the vector on which to enable writeback - * - **/ -static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi, - struct i40e_q_vector *q_vector) -{ - u16 flags = q_vector->tx.ring[0].flags; - u32 val; - - if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR)) - return; - - if (q_vector->arm_wb_state) - return; - - val = I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK | - I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */ - - wr32(&vsi->back->hw, - I40E_VFINT_DYN_CTLN1(q_vector->reg_idx), val); - q_vector->arm_wb_state = true; -} - -/** - * i40evf_force_wb - Issue SW Interrupt so HW does a wb - * @vsi: the VSI we care about - * @q_vector: the vector on which to force writeback - * - **/ -void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) -{ - u32 val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | - I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */ - I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK | - I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK - /* allow 00 to be written to the index */; - - wr32(&vsi->back->hw, - I40E_VFINT_DYN_CTLN1(q_vector->reg_idx), - val); -} - -static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector, - struct i40e_ring_container *rc) -{ - return &q_vector->rx == rc; -} - -static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector) -{ - unsigned int divisor; - - switch (q_vector->adapter->link_speed) { - case I40E_LINK_SPEED_40GB: - divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024; - break; - case I40E_LINK_SPEED_25GB: - case I40E_LINK_SPEED_20GB: - divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512; - break; - default: - case I40E_LINK_SPEED_10GB: - divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256; - break; - case I40E_LINK_SPEED_1GB: - case I40E_LINK_SPEED_100MB: - divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32; - break; - } - - return divisor; -} - -/** - * i40e_update_itr - update the dynamic ITR value based on statistics - * @q_vector: structure containing interrupt and ring information - * @rc: structure containing ring performance data - * - * Stores a new ITR value based on packets and byte - * counts during the last interrupt. The advantage of per interrupt - * computation is faster updates and more accurate ITR for the current - * traffic pattern. Constants in this function were computed - * based on theoretical maximum wire speed and thresholds were set based - * on testing data as well as attempting to minimize response time - * while increasing bulk throughput. - **/ -static void i40e_update_itr(struct i40e_q_vector *q_vector, - struct i40e_ring_container *rc) -{ - unsigned int avg_wire_size, packets, bytes, itr; - unsigned long next_update = jiffies; - - /* If we don't have any rings just leave ourselves set for maximum - * possible latency so we take ourselves out of the equation. - */ - if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting)) - return; - - /* For Rx we want to push the delay up and default to low latency. - * for Tx we want to pull the delay down and default to high latency. - */ - itr = i40e_container_is_rx(q_vector, rc) ? - I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY : - I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY; - - /* If we didn't update within up to 1 - 2 jiffies we can assume - * that either packets are coming in so slow there hasn't been - * any work, or that there is so much work that NAPI is dealing - * with interrupt moderation and we don't need to do anything. - */ - if (time_after(next_update, rc->next_update)) - goto clear_counts; - - /* If itr_countdown is set it means we programmed an ITR within - * the last 4 interrupt cycles. This has a side effect of us - * potentially firing an early interrupt. In order to work around - * this we need to throw out any data received for a few - * interrupts following the update. - */ - if (q_vector->itr_countdown) { - itr = rc->target_itr; - goto clear_counts; - } - - packets = rc->total_packets; - bytes = rc->total_bytes; - - if (i40e_container_is_rx(q_vector, rc)) { - /* If Rx there are 1 to 4 packets and bytes are less than - * 9000 assume insufficient data to use bulk rate limiting - * approach unless Tx is already in bulk rate limiting. We - * are likely latency driven. - */ - if (packets && packets < 4 && bytes < 9000 && - (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) { - itr = I40E_ITR_ADAPTIVE_LATENCY; - goto adjust_by_size; - } - } else if (packets < 4) { - /* If we have Tx and Rx ITR maxed and Tx ITR is running in - * bulk mode and we are receiving 4 or fewer packets just - * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so - * that the Rx can relax. - */ - if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS && - (q_vector->rx.target_itr & I40E_ITR_MASK) == - I40E_ITR_ADAPTIVE_MAX_USECS) - goto clear_counts; - } else if (packets > 32) { - /* If we have processed over 32 packets in a single interrupt - * for Tx assume we need to switch over to "bulk" mode. - */ - rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY; - } - - /* We have no packets to actually measure against. This means - * either one of the other queues on this vector is active or - * we are a Tx queue doing TSO with too high of an interrupt rate. - * - * Between 4 and 56 we can assume that our current interrupt delay - * is only slightly too low. As such we should increase it by a small - * fixed amount. - */ - if (packets < 56) { - itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC; - if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) { - itr &= I40E_ITR_ADAPTIVE_LATENCY; - itr += I40E_ITR_ADAPTIVE_MAX_USECS; - } - goto clear_counts; - } - - if (packets <= 256) { - itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); - itr &= I40E_ITR_MASK; - - /* Between 56 and 112 is our "goldilocks" zone where we are - * working out "just right". Just report that our current - * ITR is good for us. - */ - if (packets <= 112) - goto clear_counts; - - /* If packet count is 128 or greater we are likely looking - * at a slight overrun of the delay we want. Try halving - * our delay to see if that will cut the number of packets - * in half per interrupt. - */ - itr /= 2; - itr &= I40E_ITR_MASK; - if (itr < I40E_ITR_ADAPTIVE_MIN_USECS) - itr = I40E_ITR_ADAPTIVE_MIN_USECS; - - goto clear_counts; - } - - /* The paths below assume we are dealing with a bulk ITR since - * number of packets is greater than 256. We are just going to have - * to compute a value and try to bring the count under control, - * though for smaller packet sizes there isn't much we can do as - * NAPI polling will likely be kicking in sooner rather than later. - */ - itr = I40E_ITR_ADAPTIVE_BULK; - -adjust_by_size: - /* If packet counts are 256 or greater we can assume we have a gross - * overestimation of what the rate should be. Instead of trying to fine - * tune it just use the formula below to try and dial in an exact value - * give the current packet size of the frame. - */ - avg_wire_size = bytes / packets; - - /* The following is a crude approximation of: - * wmem_default / (size + overhead) = desired_pkts_per_int - * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate - * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value - * - * Assuming wmem_default is 212992 and overhead is 640 bytes per - * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the - * formula down to - * - * (170 * (size + 24)) / (size + 640) = ITR - * - * We first do some math on the packet size and then finally bitshift - * by 8 after rounding up. We also have to account for PCIe link speed - * difference as ITR scales based on this. - */ - if (avg_wire_size <= 60) { - /* Start at 250k ints/sec */ - avg_wire_size = 4096; - } else if (avg_wire_size <= 380) { - /* 250K ints/sec to 60K ints/sec */ - avg_wire_size *= 40; - avg_wire_size += 1696; - } else if (avg_wire_size <= 1084) { - /* 60K ints/sec to 36K ints/sec */ - avg_wire_size *= 15; - avg_wire_size += 11452; - } else if (avg_wire_size <= 1980) { - /* 36K ints/sec to 30K ints/sec */ - avg_wire_size *= 5; - avg_wire_size += 22420; - } else { - /* plateau at a limit of 30K ints/sec */ - avg_wire_size = 32256; - } - - /* If we are in low latency mode halve our delay which doubles the - * rate to somewhere between 100K to 16K ints/sec - */ - if (itr & I40E_ITR_ADAPTIVE_LATENCY) - avg_wire_size /= 2; - - /* Resultant value is 256 times larger than it needs to be. This - * gives us room to adjust the value as needed to either increase - * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc. - * - * Use addition as we have already recorded the new latency flag - * for the ITR value. - */ - itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) * - I40E_ITR_ADAPTIVE_MIN_INC; - - if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) { - itr &= I40E_ITR_ADAPTIVE_LATENCY; - itr += I40E_ITR_ADAPTIVE_MAX_USECS; - } - -clear_counts: - /* write back value */ - rc->target_itr = itr; - - /* next update should occur within next jiffy */ - rc->next_update = next_update + 1; - - rc->total_bytes = 0; - rc->total_packets = 0; -} - -/** - * i40evf_setup_tx_descriptors - Allocate the Tx descriptors - * @tx_ring: the tx ring to set up - * - * Return 0 on success, negative on error - **/ -int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring) -{ - struct device *dev = tx_ring->dev; - int bi_size; - - if (!dev) - return -ENOMEM; - - /* warn if we are about to overwrite the pointer */ - WARN_ON(tx_ring->tx_bi); - bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; - tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); - if (!tx_ring->tx_bi) - goto err; - - /* round up to nearest 4K */ - tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); - tx_ring->size = ALIGN(tx_ring->size, 4096); - tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, - &tx_ring->dma, GFP_KERNEL); - if (!tx_ring->desc) { - dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", - tx_ring->size); - goto err; - } - - tx_ring->next_to_use = 0; - tx_ring->next_to_clean = 0; - tx_ring->tx_stats.prev_pkt_ctr = -1; - return 0; - -err: - kfree(tx_ring->tx_bi); - tx_ring->tx_bi = NULL; - return -ENOMEM; -} - -/** - * i40evf_clean_rx_ring - Free Rx buffers - * @rx_ring: ring to be cleaned - **/ -void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) -{ - unsigned long bi_size; - u16 i; - - /* ring already cleared, nothing to do */ - if (!rx_ring->rx_bi) - return; - - if (rx_ring->skb) { - dev_kfree_skb(rx_ring->skb); - rx_ring->skb = NULL; - } - - /* Free all the Rx ring sk_buffs */ - for (i = 0; i < rx_ring->count; i++) { - struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; - - if (!rx_bi->page) - continue; - - /* Invalidate cache lines that may have been written to by - * device so that we avoid corrupting memory. - */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_bi->dma, - rx_bi->page_offset, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - - /* free resources associated with mapping */ - dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, - i40e_rx_pg_size(rx_ring), - DMA_FROM_DEVICE, - I40E_RX_DMA_ATTR); - - __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias); - - rx_bi->page = NULL; - rx_bi->page_offset = 0; - } - - bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; - memset(rx_ring->rx_bi, 0, bi_size); - - /* Zero out the descriptor ring */ - memset(rx_ring->desc, 0, rx_ring->size); - - rx_ring->next_to_alloc = 0; - rx_ring->next_to_clean = 0; - rx_ring->next_to_use = 0; -} - -/** - * i40evf_free_rx_resources - Free Rx resources - * @rx_ring: ring to clean the resources from - * - * Free all receive software resources - **/ -void i40evf_free_rx_resources(struct i40e_ring *rx_ring) -{ - i40evf_clean_rx_ring(rx_ring); - kfree(rx_ring->rx_bi); - rx_ring->rx_bi = NULL; - - if (rx_ring->desc) { - dma_free_coherent(rx_ring->dev, rx_ring->size, - rx_ring->desc, rx_ring->dma); - rx_ring->desc = NULL; - } -} - -/** - * i40evf_setup_rx_descriptors - Allocate Rx descriptors - * @rx_ring: Rx descriptor ring (for a specific queue) to setup - * - * Returns 0 on success, negative on failure - **/ -int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring) -{ - struct device *dev = rx_ring->dev; - int bi_size; - - /* warn if we are about to overwrite the pointer */ - WARN_ON(rx_ring->rx_bi); - bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; - rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); - if (!rx_ring->rx_bi) - goto err; - - u64_stats_init(&rx_ring->syncp); - - /* Round up to nearest 4K */ - rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc); - rx_ring->size = ALIGN(rx_ring->size, 4096); - rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, - &rx_ring->dma, GFP_KERNEL); - - if (!rx_ring->desc) { - dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", - rx_ring->size); - goto err; - } - - rx_ring->next_to_alloc = 0; - rx_ring->next_to_clean = 0; - rx_ring->next_to_use = 0; - - return 0; -err: - kfree(rx_ring->rx_bi); - rx_ring->rx_bi = NULL; - return -ENOMEM; -} - -/** - * i40e_release_rx_desc - Store the new tail and head values - * @rx_ring: ring to bump - * @val: new head index - **/ -static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) -{ - rx_ring->next_to_use = val; - - /* update next to alloc since we have filled the ring */ - rx_ring->next_to_alloc = val; - - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - writel(val, rx_ring->tail); -} - -/** - * i40e_rx_offset - Return expected offset into page to access data - * @rx_ring: Ring we are requesting offset of - * - * Returns the offset value for ring into the data buffer. - */ -static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring) -{ - return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0; -} - -/** - * i40e_alloc_mapped_page - recycle or make a new page - * @rx_ring: ring to use - * @bi: rx_buffer struct to modify - * - * Returns true if the page was successfully allocated or - * reused. - **/ -static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, - struct i40e_rx_buffer *bi) -{ - struct page *page = bi->page; - dma_addr_t dma; - - /* since we are recycling buffers we should seldom need to alloc */ - if (likely(page)) { - rx_ring->rx_stats.page_reuse_count++; - return true; - } - - /* alloc new page for storage */ - page = dev_alloc_pages(i40e_rx_pg_order(rx_ring)); - if (unlikely(!page)) { - rx_ring->rx_stats.alloc_page_failed++; - return false; - } - - /* map page for use */ - dma = dma_map_page_attrs(rx_ring->dev, page, 0, - i40e_rx_pg_size(rx_ring), - DMA_FROM_DEVICE, - I40E_RX_DMA_ATTR); - - /* if mapping failed free memory back to system since - * there isn't much point in holding memory we can't use - */ - if (dma_mapping_error(rx_ring->dev, dma)) { - __free_pages(page, i40e_rx_pg_order(rx_ring)); - rx_ring->rx_stats.alloc_page_failed++; - return false; - } - - bi->dma = dma; - bi->page = page; - bi->page_offset = i40e_rx_offset(rx_ring); - - /* initialize pagecnt_bias to 1 representing we fully own page */ - bi->pagecnt_bias = 1; - - return true; -} - -/** - * i40e_receive_skb - Send a completed packet up the stack - * @rx_ring: rx ring in play - * @skb: packet to send up - * @vlan_tag: vlan tag for packet - **/ -static void i40e_receive_skb(struct i40e_ring *rx_ring, - struct sk_buff *skb, u16 vlan_tag) -{ - struct i40e_q_vector *q_vector = rx_ring->q_vector; - - if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && - (vlan_tag & VLAN_VID_MASK)) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); - - napi_gro_receive(&q_vector->napi, skb); -} - -/** - * i40evf_alloc_rx_buffers - Replace used receive buffers - * @rx_ring: ring to place buffers on - * @cleaned_count: number of buffers to replace - * - * Returns false if all allocations were successful, true if any fail - **/ -bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) -{ - u16 ntu = rx_ring->next_to_use; - union i40e_rx_desc *rx_desc; - struct i40e_rx_buffer *bi; - - /* do nothing if no valid netdev defined */ - if (!rx_ring->netdev || !cleaned_count) - return false; - - rx_desc = I40E_RX_DESC(rx_ring, ntu); - bi = &rx_ring->rx_bi[ntu]; - - do { - if (!i40e_alloc_mapped_page(rx_ring, bi)) - goto no_buffers; - - /* sync the buffer for use by the device */ - dma_sync_single_range_for_device(rx_ring->dev, bi->dma, - bi->page_offset, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - - /* Refresh the desc even if buffer_addrs didn't change - * because each write-back erases this info. - */ - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); - - rx_desc++; - bi++; - ntu++; - if (unlikely(ntu == rx_ring->count)) { - rx_desc = I40E_RX_DESC(rx_ring, 0); - bi = rx_ring->rx_bi; - ntu = 0; - } - - /* clear the status bits for the next_to_use descriptor */ - rx_desc->wb.qword1.status_error_len = 0; - - cleaned_count--; - } while (cleaned_count); - - if (rx_ring->next_to_use != ntu) - i40e_release_rx_desc(rx_ring, ntu); - - return false; - -no_buffers: - if (rx_ring->next_to_use != ntu) - i40e_release_rx_desc(rx_ring, ntu); - - /* make sure to come back via polling to try again after - * allocation failure - */ - return true; -} - -/** - * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum - * @vsi: the VSI we care about - * @skb: skb currently being received and modified - * @rx_desc: the receive descriptor - **/ -static inline void i40e_rx_checksum(struct i40e_vsi *vsi, - struct sk_buff *skb, - union i40e_rx_desc *rx_desc) -{ - struct i40e_rx_ptype_decoded decoded; - u32 rx_error, rx_status; - bool ipv4, ipv6; - u8 ptype; - u64 qword; - - qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; - rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> - I40E_RXD_QW1_ERROR_SHIFT; - rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> - I40E_RXD_QW1_STATUS_SHIFT; - decoded = decode_rx_desc_ptype(ptype); - - skb->ip_summed = CHECKSUM_NONE; - - skb_checksum_none_assert(skb); - - /* Rx csum enabled and ip headers found? */ - if (!(vsi->netdev->features & NETIF_F_RXCSUM)) - return; - - /* did the hardware decode the packet and checksum? */ - if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT))) - return; - - /* both known and outer_ip must be set for the below code to work */ - if (!(decoded.known && decoded.outer_ip)) - return; - - ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && - (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4); - ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && - (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6); - - if (ipv4 && - (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) | - BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT)))) - goto checksum_fail; - - /* likely incorrect csum if alternate IP extension headers found */ - if (ipv6 && - rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) - /* don't increment checksum err here, non-fatal err */ - return; - - /* there was some L4 error, count error and punt packet to the stack */ - if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT)) - goto checksum_fail; - - /* handle packets that were not able to be checksummed due - * to arrival speed, in this case the stack can compute - * the csum. - */ - if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) - return; - - /* Only report checksum unnecessary for TCP, UDP, or SCTP */ - switch (decoded.inner_prot) { - case I40E_RX_PTYPE_INNER_PROT_TCP: - case I40E_RX_PTYPE_INNER_PROT_UDP: - case I40E_RX_PTYPE_INNER_PROT_SCTP: - skb->ip_summed = CHECKSUM_UNNECESSARY; - /* fall though */ - default: - break; - } - - return; - -checksum_fail: - vsi->back->hw_csum_rx_error++; -} - -/** - * i40e_ptype_to_htype - get a hash type - * @ptype: the ptype value from the descriptor - * - * Returns a hash type to be used by skb_set_hash - **/ -static inline int i40e_ptype_to_htype(u8 ptype) -{ - struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); - - if (!decoded.known) - return PKT_HASH_TYPE_NONE; - - if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && - decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4) - return PKT_HASH_TYPE_L4; - else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && - decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3) - return PKT_HASH_TYPE_L3; - else - return PKT_HASH_TYPE_L2; -} - -/** - * i40e_rx_hash - set the hash value in the skb - * @ring: descriptor ring - * @rx_desc: specific descriptor - * @skb: skb currently being received and modified - * @rx_ptype: Rx packet type - **/ -static inline void i40e_rx_hash(struct i40e_ring *ring, - union i40e_rx_desc *rx_desc, - struct sk_buff *skb, - u8 rx_ptype) -{ - u32 hash; - const __le64 rss_mask = - cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << - I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); - - if (ring->netdev->features & NETIF_F_RXHASH) - return; - - if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { - hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); - skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype)); - } -} - -/** - * i40evf_process_skb_fields - Populate skb header fields from Rx descriptor - * @rx_ring: rx descriptor ring packet is being transacted on - * @rx_desc: pointer to the EOP Rx descriptor - * @skb: pointer to current skb being populated - * @rx_ptype: the packet type decoded by hardware - * - * This function checks the ring, descriptor, and packet information in - * order to populate the hash, checksum, VLAN, protocol, and - * other fields within the skb. - **/ -static inline -void i40evf_process_skb_fields(struct i40e_ring *rx_ring, - union i40e_rx_desc *rx_desc, struct sk_buff *skb, - u8 rx_ptype) -{ - i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); - - i40e_rx_checksum(rx_ring->vsi, skb, rx_desc); - - skb_record_rx_queue(skb, rx_ring->queue_index); - - /* modifies the skb - consumes the enet header */ - skb->protocol = eth_type_trans(skb, rx_ring->netdev); -} - -/** - * i40e_cleanup_headers - Correct empty headers - * @rx_ring: rx descriptor ring packet is being transacted on - * @skb: pointer to current skb being fixed - * - * Also address the case where we are pulling data in on pages only - * and as such no data is present in the skb header. - * - * In addition if skb is not at least 60 bytes we need to pad it so that - * it is large enough to qualify as a valid Ethernet frame. - * - * Returns true if an error was encountered and skb was freed. - **/ -static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb) -{ - /* if eth_skb_pad returns an error the skb was freed */ - if (eth_skb_pad(skb)) - return true; - - return false; -} - -/** - * i40e_reuse_rx_page - page flip buffer and store it back on the ring - * @rx_ring: rx descriptor ring to store buffers on - * @old_buff: donor buffer to have page reused - * - * Synchronizes page for reuse by the adapter - **/ -static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, - struct i40e_rx_buffer *old_buff) -{ - struct i40e_rx_buffer *new_buff; - u16 nta = rx_ring->next_to_alloc; - - new_buff = &rx_ring->rx_bi[nta]; - - /* update, and store next to alloc */ - nta++; - rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; - - /* transfer page from old buffer to new buffer */ - new_buff->dma = old_buff->dma; - new_buff->page = old_buff->page; - new_buff->page_offset = old_buff->page_offset; - new_buff->pagecnt_bias = old_buff->pagecnt_bias; -} - -/** - * i40e_page_is_reusable - check if any reuse is possible - * @page: page struct to check - * - * A page is not reusable if it was allocated under low memory - * conditions, or it's not in the same NUMA node as this CPU. - */ -static inline bool i40e_page_is_reusable(struct page *page) -{ - return (page_to_nid(page) == numa_mem_id()) && - !page_is_pfmemalloc(page); -} - -/** - * i40e_can_reuse_rx_page - Determine if this page can be reused by - * the adapter for another receive - * - * @rx_buffer: buffer containing the page - * - * If page is reusable, rx_buffer->page_offset is adjusted to point to - * an unused region in the page. - * - * For small pages, @truesize will be a constant value, half the size - * of the memory at page. We'll attempt to alternate between high and - * low halves of the page, with one half ready for use by the hardware - * and the other half being consumed by the stack. We use the page - * ref count to determine whether the stack has finished consuming the - * portion of this page that was passed up with a previous packet. If - * the page ref count is >1, we'll assume the "other" half page is - * still busy, and this page cannot be reused. - * - * For larger pages, @truesize will be the actual space used by the - * received packet (adjusted upward to an even multiple of the cache - * line size). This will advance through the page by the amount - * actually consumed by the received packets while there is still - * space for a buffer. Each region of larger pages will be used at - * most once, after which the page will not be reused. - * - * In either case, if the page is reusable its refcount is increased. - **/ -static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer) -{ - unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; - struct page *page = rx_buffer->page; - - /* Is any reuse possible? */ - if (unlikely(!i40e_page_is_reusable(page))) - return false; - -#if (PAGE_SIZE < 8192) - /* if we are only owner of page we can reuse it */ - if (unlikely((page_count(page) - pagecnt_bias) > 1)) - return false; -#else -#define I40E_LAST_OFFSET \ - (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048) - if (rx_buffer->page_offset > I40E_LAST_OFFSET) - return false; -#endif - - /* If we have drained the page fragment pool we need to update - * the pagecnt_bias and page count so that we fully restock the - * number of references the driver holds. - */ - if (unlikely(!pagecnt_bias)) { - page_ref_add(page, USHRT_MAX); - rx_buffer->pagecnt_bias = USHRT_MAX; - } - - return true; -} - -/** - * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff - * @rx_ring: rx descriptor ring to transact packets on - * @rx_buffer: buffer containing page to add - * @skb: sk_buff to place the data into - * @size: packet length from rx_desc - * - * This function will add the data contained in rx_buffer->page to the skb. - * It will just attach the page as a frag to the skb. - * - * The function will then update the page offset. - **/ -static void i40e_add_rx_frag(struct i40e_ring *rx_ring, - struct i40e_rx_buffer *rx_buffer, - struct sk_buff *skb, - unsigned int size) -{ -#if (PAGE_SIZE < 8192) - unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; -#else - unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring)); -#endif - - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, - rx_buffer->page_offset, size, truesize); - - /* page is being used so we must update the page offset */ -#if (PAGE_SIZE < 8192) - rx_buffer->page_offset ^= truesize; -#else - rx_buffer->page_offset += truesize; -#endif -} - -/** - * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use - * @rx_ring: rx descriptor ring to transact packets on - * @size: size of buffer to add to skb - * - * This function will pull an Rx buffer from the ring and synchronize it - * for use by the CPU. - */ -static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring, - const unsigned int size) -{ - struct i40e_rx_buffer *rx_buffer; - - rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; - prefetchw(rx_buffer->page); - - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_buffer->dma, - rx_buffer->page_offset, - size, - DMA_FROM_DEVICE); - - /* We have pulled a buffer for use, so decrement pagecnt_bias */ - rx_buffer->pagecnt_bias--; - - return rx_buffer; -} - -/** - * i40e_construct_skb - Allocate skb and populate it - * @rx_ring: rx descriptor ring to transact packets on - * @rx_buffer: rx buffer to pull data from - * @size: size of buffer to add to skb - * - * This function allocates an skb. It then populates it with the page - * data from the current receive descriptor, taking care to set up the - * skb correctly. - */ -static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, - struct i40e_rx_buffer *rx_buffer, - unsigned int size) -{ - void *va; -#if (PAGE_SIZE < 8192) - unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; -#else - unsigned int truesize = SKB_DATA_ALIGN(size); -#endif - unsigned int headlen; - struct sk_buff *skb; - - /* prefetch first cache line of first page */ - va = page_address(rx_buffer->page) + rx_buffer->page_offset; - prefetch(va); -#if L1_CACHE_BYTES < 128 - prefetch(va + L1_CACHE_BYTES); -#endif - - /* allocate a skb to store the frags */ - skb = __napi_alloc_skb(&rx_ring->q_vector->napi, - I40E_RX_HDR_SIZE, - GFP_ATOMIC | __GFP_NOWARN); - if (unlikely(!skb)) - return NULL; - - /* Determine available headroom for copy */ - headlen = size; - if (headlen > I40E_RX_HDR_SIZE) - headlen = eth_get_headlen(va, I40E_RX_HDR_SIZE); - - /* align pull length to size of long to optimize memcpy performance */ - memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); - - /* update all of the pointers */ - size -= headlen; - if (size) { - skb_add_rx_frag(skb, 0, rx_buffer->page, - rx_buffer->page_offset + headlen, - size, truesize); - - /* buffer is used by skb, update page_offset */ -#if (PAGE_SIZE < 8192) - rx_buffer->page_offset ^= truesize; -#else - rx_buffer->page_offset += truesize; -#endif - } else { - /* buffer is unused, reset bias back to rx_buffer */ - rx_buffer->pagecnt_bias++; - } - - return skb; -} - -/** - * i40e_build_skb - Build skb around an existing buffer - * @rx_ring: Rx descriptor ring to transact packets on - * @rx_buffer: Rx buffer to pull data from - * @size: size of buffer to add to skb - * - * This function builds an skb around an existing Rx buffer, taking care - * to set up the skb correctly and avoid any memcpy overhead. - */ -static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, - struct i40e_rx_buffer *rx_buffer, - unsigned int size) -{ - void *va; -#if (PAGE_SIZE < 8192) - unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; -#else - unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + - SKB_DATA_ALIGN(I40E_SKB_PAD + size); -#endif - struct sk_buff *skb; - - /* prefetch first cache line of first page */ - va = page_address(rx_buffer->page) + rx_buffer->page_offset; - prefetch(va); -#if L1_CACHE_BYTES < 128 - prefetch(va + L1_CACHE_BYTES); -#endif - /* build an skb around the page buffer */ - skb = build_skb(va - I40E_SKB_PAD, truesize); - if (unlikely(!skb)) - return NULL; - - /* update pointers within the skb to store the data */ - skb_reserve(skb, I40E_SKB_PAD); - __skb_put(skb, size); - - /* buffer is used by skb, update page_offset */ -#if (PAGE_SIZE < 8192) - rx_buffer->page_offset ^= truesize; -#else - rx_buffer->page_offset += truesize; -#endif - - return skb; -} - -/** - * i40e_put_rx_buffer - Clean up used buffer and either recycle or free - * @rx_ring: rx descriptor ring to transact packets on - * @rx_buffer: rx buffer to pull data from - * - * This function will clean up the contents of the rx_buffer. It will - * either recycle the buffer or unmap it and free the associated resources. - */ -static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, - struct i40e_rx_buffer *rx_buffer) -{ - if (i40e_can_reuse_rx_page(rx_buffer)) { - /* hand second half of page back to the ring */ - i40e_reuse_rx_page(rx_ring, rx_buffer); - rx_ring->rx_stats.page_reuse_count++; - } else { - /* we are not reusing the buffer so unmap it */ - dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, - i40e_rx_pg_size(rx_ring), - DMA_FROM_DEVICE, I40E_RX_DMA_ATTR); - __page_frag_cache_drain(rx_buffer->page, - rx_buffer->pagecnt_bias); - } - - /* clear contents of buffer_info */ - rx_buffer->page = NULL; -} - -/** - * i40e_is_non_eop - process handling of non-EOP buffers - * @rx_ring: Rx ring being processed - * @rx_desc: Rx descriptor for current buffer - * @skb: Current socket buffer containing buffer in progress - * - * This function updates next to clean. If the buffer is an EOP buffer - * this function exits returning false, otherwise it will place the - * sk_buff in the next buffer to be chained and return true indicating - * that this is in fact a non-EOP buffer. - **/ -static bool i40e_is_non_eop(struct i40e_ring *rx_ring, - union i40e_rx_desc *rx_desc, - struct sk_buff *skb) -{ - u32 ntc = rx_ring->next_to_clean + 1; - - /* fetch, update, and store next to clean */ - ntc = (ntc < rx_ring->count) ? ntc : 0; - rx_ring->next_to_clean = ntc; - - prefetch(I40E_RX_DESC(rx_ring, ntc)); - - /* if we are the last buffer then there is nothing else to do */ -#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT) - if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF))) - return false; - - rx_ring->rx_stats.non_eop_descs++; - - return true; -} - -/** - * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf - * @rx_ring: rx descriptor ring to transact packets on - * @budget: Total limit on number of packets to process - * - * This function provides a "bounce buffer" approach to Rx interrupt - * processing. The advantage to this is that on systems that have - * expensive overhead for IOMMU access this provides a means of avoiding - * it by maintaining the mapping of the page to the system. - * - * Returns amount of work completed - **/ -static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) -{ - unsigned int total_rx_bytes = 0, total_rx_packets = 0; - struct sk_buff *skb = rx_ring->skb; - u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); - bool failure = false; - - while (likely(total_rx_packets < (unsigned int)budget)) { - struct i40e_rx_buffer *rx_buffer; - union i40e_rx_desc *rx_desc; - unsigned int size; - u16 vlan_tag; - u8 rx_ptype; - u64 qword; - - /* return some buffers to hardware, one at a time is too slow */ - if (cleaned_count >= I40E_RX_BUFFER_WRITE) { - failure = failure || - i40evf_alloc_rx_buffers(rx_ring, cleaned_count); - cleaned_count = 0; - } - - rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean); - - /* status_error_len will always be zero for unused descriptors - * because it's cleared in cleanup, and overlaps with hdr_addr - * which is always zero because packet split isn't used, if the - * hardware wrote DD then the length will be non-zero - */ - qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - - /* This memory barrier is needed to keep us from reading - * any other fields out of the rx_desc until we have - * verified the descriptor has been written back. - */ - dma_rmb(); - - size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> - I40E_RXD_QW1_LENGTH_PBUF_SHIFT; - if (!size) - break; - - i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb); - rx_buffer = i40e_get_rx_buffer(rx_ring, size); - - /* retrieve a buffer from the ring */ - if (skb) - i40e_add_rx_frag(rx_ring, rx_buffer, skb, size); - else if (ring_uses_build_skb(rx_ring)) - skb = i40e_build_skb(rx_ring, rx_buffer, size); - else - skb = i40e_construct_skb(rx_ring, rx_buffer, size); - - /* exit if we failed to retrieve a buffer */ - if (!skb) { - rx_ring->rx_stats.alloc_buff_failed++; - rx_buffer->pagecnt_bias++; - break; - } - - i40e_put_rx_buffer(rx_ring, rx_buffer); - cleaned_count++; - - if (i40e_is_non_eop(rx_ring, rx_desc, skb)) - continue; - - /* ERR_MASK will only have valid bits if EOP set, and - * what we are doing here is actually checking - * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in - * the error field - */ - if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) { - dev_kfree_skb_any(skb); - skb = NULL; - continue; - } - - if (i40e_cleanup_headers(rx_ring, skb)) { - skb = NULL; - continue; - } - - /* probably a little skewed due to removing CRC */ - total_rx_bytes += skb->len; - - qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> - I40E_RXD_QW1_PTYPE_SHIFT; - - /* populate checksum, VLAN, and protocol */ - i40evf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); - - - vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ? - le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; - - i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb); - i40e_receive_skb(rx_ring, skb, vlan_tag); - skb = NULL; - - /* update budget accounting */ - total_rx_packets++; - } - - rx_ring->skb = skb; - - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->stats.packets += total_rx_packets; - rx_ring->stats.bytes += total_rx_bytes; - u64_stats_update_end(&rx_ring->syncp); - rx_ring->q_vector->rx.total_packets += total_rx_packets; - rx_ring->q_vector->rx.total_bytes += total_rx_bytes; - - /* guarantee a trip back through this routine if there was a failure */ - return failure ? budget : (int)total_rx_packets; -} - -static inline u32 i40e_buildreg_itr(const int type, u16 itr) -{ - u32 val; - - /* We don't bother with setting the CLEARPBA bit as the data sheet - * points out doing so is "meaningless since it was already - * auto-cleared". The auto-clearing happens when the interrupt is - * asserted. - * - * Hardware errata 28 for also indicates that writing to a - * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear - * an event in the PBA anyway so we need to rely on the automask - * to hold pending events for us until the interrupt is re-enabled - * - * The itr value is reported in microseconds, and the register - * value is recorded in 2 microsecond units. For this reason we - * only need to shift by the interval shift - 1 instead of the - * full value. - */ - itr &= I40E_ITR_MASK; - - val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | - (type << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | - (itr << (I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT - 1)); - - return val; -} - -/* a small macro to shorten up some long lines */ -#define INTREG I40E_VFINT_DYN_CTLN1 - -/* The act of updating the ITR will cause it to immediately trigger. In order - * to prevent this from throwing off adaptive update statistics we defer the - * update so that it can only happen so often. So after either Tx or Rx are - * updated we make the adaptive scheme wait until either the ITR completely - * expires via the next_update expiration or we have been through at least - * 3 interrupts. - */ -#define ITR_COUNTDOWN_START 3 - -/** - * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt - * @vsi: the VSI we care about - * @q_vector: q_vector for which itr is being updated and interrupt enabled - * - **/ -static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, - struct i40e_q_vector *q_vector) -{ - struct i40e_hw *hw = &vsi->back->hw; - u32 intval; - - /* These will do nothing if dynamic updates are not enabled */ - i40e_update_itr(q_vector, &q_vector->tx); - i40e_update_itr(q_vector, &q_vector->rx); - - /* This block of logic allows us to get away with only updating - * one ITR value with each interrupt. The idea is to perform a - * pseudo-lazy update with the following criteria. - * - * 1. Rx is given higher priority than Tx if both are in same state - * 2. If we must reduce an ITR that is given highest priority. - * 3. We then give priority to increasing ITR based on amount. - */ - if (q_vector->rx.target_itr < q_vector->rx.current_itr) { - /* Rx ITR needs to be reduced, this is highest priority */ - intval = i40e_buildreg_itr(I40E_RX_ITR, - q_vector->rx.target_itr); - q_vector->rx.current_itr = q_vector->rx.target_itr; - q_vector->itr_countdown = ITR_COUNTDOWN_START; - } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) || - ((q_vector->rx.target_itr - q_vector->rx.current_itr) < - (q_vector->tx.target_itr - q_vector->tx.current_itr))) { - /* Tx ITR needs to be reduced, this is second priority - * Tx ITR needs to be increased more than Rx, fourth priority - */ - intval = i40e_buildreg_itr(I40E_TX_ITR, - q_vector->tx.target_itr); - q_vector->tx.current_itr = q_vector->tx.target_itr; - q_vector->itr_countdown = ITR_COUNTDOWN_START; - } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) { - /* Rx ITR needs to be increased, third priority */ - intval = i40e_buildreg_itr(I40E_RX_ITR, - q_vector->rx.target_itr); - q_vector->rx.current_itr = q_vector->rx.target_itr; - q_vector->itr_countdown = ITR_COUNTDOWN_START; - } else { - /* No ITR update, lowest priority */ - intval = i40e_buildreg_itr(I40E_ITR_NONE, 0); - if (q_vector->itr_countdown) - q_vector->itr_countdown--; - } - - if (!test_bit(__I40E_VSI_DOWN, vsi->state)) - wr32(hw, INTREG(q_vector->reg_idx), intval); -} - -/** - * i40evf_napi_poll - NAPI polling Rx/Tx cleanup routine - * @napi: napi struct with our devices info in it - * @budget: amount of work driver is allowed to do this pass, in packets - * - * This function will clean all queues associated with a q_vector. - * - * Returns the amount of work done - **/ -int i40evf_napi_poll(struct napi_struct *napi, int budget) -{ - struct i40e_q_vector *q_vector = - container_of(napi, struct i40e_q_vector, napi); - struct i40e_vsi *vsi = q_vector->vsi; - struct i40e_ring *ring; - bool clean_complete = true; - bool arm_wb = false; - int budget_per_ring; - int work_done = 0; - - if (test_bit(__I40E_VSI_DOWN, vsi->state)) { - napi_complete(napi); - return 0; - } - - /* Since the actual Tx work is minimal, we can give the Tx a larger - * budget and be more aggressive about cleaning up the Tx descriptors. - */ - i40e_for_each_ring(ring, q_vector->tx) { - if (!i40e_clean_tx_irq(vsi, ring, budget)) { - clean_complete = false; - continue; - } - arm_wb |= ring->arm_wb; - ring->arm_wb = false; - } - - /* Handle case where we are called by netpoll with a budget of 0 */ - if (budget <= 0) - goto tx_only; - - /* We attempt to distribute budget to each Rx queue fairly, but don't - * allow the budget to go below 1 because that would exit polling early. - */ - budget_per_ring = max(budget/q_vector->num_ringpairs, 1); - - i40e_for_each_ring(ring, q_vector->rx) { - int cleaned = i40e_clean_rx_irq(ring, budget_per_ring); - - work_done += cleaned; - /* if we clean as many as budgeted, we must not be done */ - if (cleaned >= budget_per_ring) - clean_complete = false; - } - - /* If work not completed, return budget and polling will return */ - if (!clean_complete) { - int cpu_id = smp_processor_id(); - - /* It is possible that the interrupt affinity has changed but, - * if the cpu is pegged at 100%, polling will never exit while - * traffic continues and the interrupt will be stuck on this - * cpu. We check to make sure affinity is correct before we - * continue to poll, otherwise we must stop polling so the - * interrupt can move to the correct cpu. - */ - if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) { - /* Tell napi that we are done polling */ - napi_complete_done(napi, work_done); - - /* Force an interrupt */ - i40evf_force_wb(vsi, q_vector); - - /* Return budget-1 so that polling stops */ - return budget - 1; - } -tx_only: - if (arm_wb) { - q_vector->tx.ring[0].tx_stats.tx_force_wb++; - i40e_enable_wb_on_itr(vsi, q_vector); - } - return budget; - } - - if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR) - q_vector->arm_wb_state = false; - - /* Work is done so exit the polling mode and re-enable the interrupt */ - napi_complete_done(napi, work_done); - - i40e_update_enable_itr(vsi, q_vector); - - return min(work_done, budget - 1); -} - -/** - * i40evf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW - * @skb: send buffer - * @tx_ring: ring to send buffer on - * @flags: the tx flags to be set - * - * Checks the skb and set up correspondingly several generic transmit flags - * related to VLAN tagging for the HW, such as VLAN, DCB, etc. - * - * Returns error code indicate the frame should be dropped upon error and the - * otherwise returns 0 to indicate the flags has been set properly. - **/ -static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb, - struct i40e_ring *tx_ring, - u32 *flags) -{ - __be16 protocol = skb->protocol; - u32 tx_flags = 0; - - if (protocol == htons(ETH_P_8021Q) && - !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { - /* When HW VLAN acceleration is turned off by the user the - * stack sets the protocol to 8021q so that the driver - * can take any steps required to support the SW only - * VLAN handling. In our case the driver doesn't need - * to take any further steps so just set the protocol - * to the encapsulated ethertype. - */ - skb->protocol = vlan_get_protocol(skb); - goto out; - } - - /* if we have a HW VLAN tag being added, default to the HW one */ - if (skb_vlan_tag_present(skb)) { - tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT; - tx_flags |= I40E_TX_FLAGS_HW_VLAN; - /* else if it is a SW VLAN, check the next protocol and store the tag */ - } else if (protocol == htons(ETH_P_8021Q)) { - struct vlan_hdr *vhdr, _vhdr; - - vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); - if (!vhdr) - return -EINVAL; - - protocol = vhdr->h_vlan_encapsulated_proto; - tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT; - tx_flags |= I40E_TX_FLAGS_SW_VLAN; - } - -out: - *flags = tx_flags; - return 0; -} - -/** - * i40e_tso - set up the tso context descriptor - * @first: pointer to first Tx buffer for xmit - * @hdr_len: ptr to the size of the packet header - * @cd_type_cmd_tso_mss: Quad Word 1 - * - * Returns 0 if no TSO can happen, 1 if tso is going, or error - **/ -static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, - u64 *cd_type_cmd_tso_mss) -{ - struct sk_buff *skb = first->skb; - u64 cd_cmd, cd_tso_len, cd_mss; - union { - struct iphdr *v4; - struct ipv6hdr *v6; - unsigned char *hdr; - } ip; - union { - struct tcphdr *tcp; - struct udphdr *udp; - unsigned char *hdr; - } l4; - u32 paylen, l4_offset; - u16 gso_segs, gso_size; - int err; - - if (skb->ip_summed != CHECKSUM_PARTIAL) - return 0; - - if (!skb_is_gso(skb)) - return 0; - - err = skb_cow_head(skb, 0); - if (err < 0) - return err; - - ip.hdr = skb_network_header(skb); - l4.hdr = skb_transport_header(skb); - - /* initialize outer IP header fields */ - if (ip.v4->version == 4) { - ip.v4->tot_len = 0; - ip.v4->check = 0; - } else { - ip.v6->payload_len = 0; - } - - if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | - SKB_GSO_GRE_CSUM | - SKB_GSO_IPXIP4 | - SKB_GSO_IPXIP6 | - SKB_GSO_UDP_TUNNEL | - SKB_GSO_UDP_TUNNEL_CSUM)) { - if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && - (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { - l4.udp->len = 0; - - /* determine offset of outer transport header */ - l4_offset = l4.hdr - skb->data; - - /* remove payload length from outer checksum */ - paylen = skb->len - l4_offset; - csum_replace_by_diff(&l4.udp->check, - (__force __wsum)htonl(paylen)); - } - - /* reset pointers to inner headers */ - ip.hdr = skb_inner_network_header(skb); - l4.hdr = skb_inner_transport_header(skb); - - /* initialize inner IP header fields */ - if (ip.v4->version == 4) { - ip.v4->tot_len = 0; - ip.v4->check = 0; - } else { - ip.v6->payload_len = 0; - } - } - - /* determine offset of inner transport header */ - l4_offset = l4.hdr - skb->data; - - /* remove payload length from inner checksum */ - paylen = skb->len - l4_offset; - csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); - - /* compute length of segmentation header */ - *hdr_len = (l4.tcp->doff * 4) + l4_offset; - - /* pull values out of skb_shinfo */ - gso_size = skb_shinfo(skb)->gso_size; - gso_segs = skb_shinfo(skb)->gso_segs; - - /* update GSO size and bytecount with header size */ - first->gso_segs = gso_segs; - first->bytecount += (first->gso_segs - 1) * *hdr_len; - - /* find the field values */ - cd_cmd = I40E_TX_CTX_DESC_TSO; - cd_tso_len = skb->len - *hdr_len; - cd_mss = gso_size; - *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | - (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | - (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); - return 1; -} - -/** - * i40e_tx_enable_csum - Enable Tx checksum offloads - * @skb: send buffer - * @tx_flags: pointer to Tx flags currently set - * @td_cmd: Tx descriptor command bits to set - * @td_offset: Tx descriptor header offsets to set - * @tx_ring: Tx descriptor ring - * @cd_tunneling: ptr to context desc bits - **/ -static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, - u32 *td_cmd, u32 *td_offset, - struct i40e_ring *tx_ring, - u32 *cd_tunneling) -{ - union { - struct iphdr *v4; - struct ipv6hdr *v6; - unsigned char *hdr; - } ip; - union { - struct tcphdr *tcp; - struct udphdr *udp; - unsigned char *hdr; - } l4; - unsigned char *exthdr; - u32 offset, cmd = 0; - __be16 frag_off; - u8 l4_proto = 0; - - if (skb->ip_summed != CHECKSUM_PARTIAL) - return 0; - - ip.hdr = skb_network_header(skb); - l4.hdr = skb_transport_header(skb); - - /* compute outer L2 header size */ - offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; - - if (skb->encapsulation) { - u32 tunnel = 0; - /* define outer network header type */ - if (*tx_flags & I40E_TX_FLAGS_IPV4) { - tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ? - I40E_TX_CTX_EXT_IP_IPV4 : - I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; - - l4_proto = ip.v4->protocol; - } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { - tunnel |= I40E_TX_CTX_EXT_IP_IPV6; - - exthdr = ip.hdr + sizeof(*ip.v6); - l4_proto = ip.v6->nexthdr; - if (l4.hdr != exthdr) - ipv6_skip_exthdr(skb, exthdr - skb->data, - &l4_proto, &frag_off); - } - - /* define outer transport */ - switch (l4_proto) { - case IPPROTO_UDP: - tunnel |= I40E_TXD_CTX_UDP_TUNNELING; - *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; - break; - case IPPROTO_GRE: - tunnel |= I40E_TXD_CTX_GRE_TUNNELING; - *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; - break; - case IPPROTO_IPIP: - case IPPROTO_IPV6: - *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; - l4.hdr = skb_inner_network_header(skb); - break; - default: - if (*tx_flags & I40E_TX_FLAGS_TSO) - return -1; - - skb_checksum_help(skb); - return 0; - } - - /* compute outer L3 header size */ - tunnel |= ((l4.hdr - ip.hdr) / 4) << - I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT; - - /* switch IP header pointer from outer to inner header */ - ip.hdr = skb_inner_network_header(skb); - - /* compute tunnel header size */ - tunnel |= ((ip.hdr - l4.hdr) / 2) << - I40E_TXD_CTX_QW0_NATLEN_SHIFT; - - /* indicate if we need to offload outer UDP header */ - if ((*tx_flags & I40E_TX_FLAGS_TSO) && - !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && - (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) - tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK; - - /* record tunnel offload values */ - *cd_tunneling |= tunnel; - - /* switch L4 header pointer from outer to inner */ - l4.hdr = skb_inner_transport_header(skb); - l4_proto = 0; - - /* reset type as we transition from outer to inner headers */ - *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6); - if (ip.v4->version == 4) - *tx_flags |= I40E_TX_FLAGS_IPV4; - if (ip.v6->version == 6) - *tx_flags |= I40E_TX_FLAGS_IPV6; - } - - /* Enable IP checksum offloads */ - if (*tx_flags & I40E_TX_FLAGS_IPV4) { - l4_proto = ip.v4->protocol; - /* the stack computes the IP header already, the only time we - * need the hardware to recompute it is in the case of TSO. - */ - cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ? - I40E_TX_DESC_CMD_IIPT_IPV4_CSUM : - I40E_TX_DESC_CMD_IIPT_IPV4; - } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { - cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; - - exthdr = ip.hdr + sizeof(*ip.v6); - l4_proto = ip.v6->nexthdr; - if (l4.hdr != exthdr) - ipv6_skip_exthdr(skb, exthdr - skb->data, - &l4_proto, &frag_off); - } - - /* compute inner L3 header size */ - offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; - - /* Enable L4 checksum offloads */ - switch (l4_proto) { - case IPPROTO_TCP: - /* enable checksum offloads */ - cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; - offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; - break; - case IPPROTO_SCTP: - /* enable SCTP checksum offload */ - cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; - offset |= (sizeof(struct sctphdr) >> 2) << - I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; - break; - case IPPROTO_UDP: - /* enable UDP checksum offload */ - cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; - offset |= (sizeof(struct udphdr) >> 2) << - I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; - break; - default: - if (*tx_flags & I40E_TX_FLAGS_TSO) - return -1; - skb_checksum_help(skb); - return 0; - } - - *td_cmd |= cmd; - *td_offset |= offset; - - return 1; -} - -/** - * i40e_create_tx_ctx Build the Tx context descriptor - * @tx_ring: ring to create the descriptor on - * @cd_type_cmd_tso_mss: Quad Word 1 - * @cd_tunneling: Quad Word 0 - bits 0-31 - * @cd_l2tag2: Quad Word 0 - bits 32-63 - **/ -static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, - const u64 cd_type_cmd_tso_mss, - const u32 cd_tunneling, const u32 cd_l2tag2) -{ - struct i40e_tx_context_desc *context_desc; - int i = tx_ring->next_to_use; - - if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) && - !cd_tunneling && !cd_l2tag2) - return; - - /* grab the next descriptor */ - context_desc = I40E_TX_CTXTDESC(tx_ring, i); - - i++; - tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; - - /* cpu_to_le32 and assign to struct fields */ - context_desc->tunneling_params = cpu_to_le32(cd_tunneling); - context_desc->l2tag2 = cpu_to_le16(cd_l2tag2); - context_desc->rsvd = cpu_to_le16(0); - context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); -} - -/** - * __i40evf_chk_linearize - Check if there are more than 8 buffers per packet - * @skb: send buffer - * - * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire - * and so we need to figure out the cases where we need to linearize the skb. - * - * For TSO we need to count the TSO header and segment payload separately. - * As such we need to check cases where we have 7 fragments or more as we - * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for - * the segment payload in the first descriptor, and another 7 for the - * fragments. - **/ -bool __i40evf_chk_linearize(struct sk_buff *skb) -{ - const struct skb_frag_struct *frag, *stale; - int nr_frags, sum; - - /* no need to check if number of frags is less than 7 */ - nr_frags = skb_shinfo(skb)->nr_frags; - if (nr_frags < (I40E_MAX_BUFFER_TXD - 1)) - return false; - - /* We need to walk through the list and validate that each group - * of 6 fragments totals at least gso_size. - */ - nr_frags -= I40E_MAX_BUFFER_TXD - 2; - frag = &skb_shinfo(skb)->frags[0]; - - /* Initialize size to the negative value of gso_size minus 1. We - * use this as the worst case scenerio in which the frag ahead - * of us only provides one byte which is why we are limited to 6 - * descriptors for a single transmit as the header and previous - * fragment are already consuming 2 descriptors. - */ - sum = 1 - skb_shinfo(skb)->gso_size; - - /* Add size of frags 0 through 4 to create our initial sum */ - sum += skb_frag_size(frag++); - sum += skb_frag_size(frag++); - sum += skb_frag_size(frag++); - sum += skb_frag_size(frag++); - sum += skb_frag_size(frag++); - - /* Walk through fragments adding latest fragment, testing it, and - * then removing stale fragments from the sum. - */ - for (stale = &skb_shinfo(skb)->frags[0];; stale++) { - int stale_size = skb_frag_size(stale); - - sum += skb_frag_size(frag++); - - /* The stale fragment may present us with a smaller - * descriptor than the actual fragment size. To account - * for that we need to remove all the data on the front and - * figure out what the remainder would be in the last - * descriptor associated with the fragment. - */ - if (stale_size > I40E_MAX_DATA_PER_TXD) { - int align_pad = -(stale->page_offset) & - (I40E_MAX_READ_REQ_SIZE - 1); - - sum -= align_pad; - stale_size -= align_pad; - - do { - sum -= I40E_MAX_DATA_PER_TXD_ALIGNED; - stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED; - } while (stale_size > I40E_MAX_DATA_PER_TXD); - } - - /* if sum is negative we failed to make sufficient progress */ - if (sum < 0) - return true; - - if (!nr_frags--) - break; - - sum -= stale_size; - } - - return false; -} - -/** - * __i40evf_maybe_stop_tx - 2nd level check for tx stop conditions - * @tx_ring: the ring to be checked - * @size: the size buffer we want to assure is available - * - * Returns -EBUSY if a stop is needed, else 0 - **/ -int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) -{ - netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); - /* Memory barrier before checking head and tail */ - smp_mb(); - - /* Check again in a case another CPU has just made room available. */ - if (likely(I40E_DESC_UNUSED(tx_ring) < size)) - return -EBUSY; - - /* A reprieve! - use start_queue because it doesn't call schedule */ - netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); - ++tx_ring->tx_stats.restart_queue; - return 0; -} - -/** - * i40evf_tx_map - Build the Tx descriptor - * @tx_ring: ring to send buffer on - * @skb: send buffer - * @first: first buffer info buffer to use - * @tx_flags: collected send information - * @hdr_len: size of the packet header - * @td_cmd: the command field in the descriptor - * @td_offset: offset for checksum or crc - **/ -static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, - struct i40e_tx_buffer *first, u32 tx_flags, - const u8 hdr_len, u32 td_cmd, u32 td_offset) -{ - unsigned int data_len = skb->data_len; - unsigned int size = skb_headlen(skb); - struct skb_frag_struct *frag; - struct i40e_tx_buffer *tx_bi; - struct i40e_tx_desc *tx_desc; - u16 i = tx_ring->next_to_use; - u32 td_tag = 0; - dma_addr_t dma; - - if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { - td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; - td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >> - I40E_TX_FLAGS_VLAN_SHIFT; - } - - first->tx_flags = tx_flags; - - dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); - - tx_desc = I40E_TX_DESC(tx_ring, i); - tx_bi = first; - - for (frag = &skb_shinfo(skb)->frags[0];; frag++) { - unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED; - - if (dma_mapping_error(tx_ring->dev, dma)) - goto dma_error; - - /* record length, and DMA address */ - dma_unmap_len_set(tx_bi, len, size); - dma_unmap_addr_set(tx_bi, dma, dma); - - /* align size to end of page */ - max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1); - tx_desc->buffer_addr = cpu_to_le64(dma); - - while (unlikely(size > I40E_MAX_DATA_PER_TXD)) { - tx_desc->cmd_type_offset_bsz = - build_ctob(td_cmd, td_offset, - max_data, td_tag); - - tx_desc++; - i++; - - if (i == tx_ring->count) { - tx_desc = I40E_TX_DESC(tx_ring, 0); - i = 0; - } - - dma += max_data; - size -= max_data; - - max_data = I40E_MAX_DATA_PER_TXD_ALIGNED; - tx_desc->buffer_addr = cpu_to_le64(dma); - } - - if (likely(!data_len)) - break; - - tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, - size, td_tag); - - tx_desc++; - i++; - - if (i == tx_ring->count) { - tx_desc = I40E_TX_DESC(tx_ring, 0); - i = 0; - } - - size = skb_frag_size(frag); - data_len -= size; - - dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, - DMA_TO_DEVICE); - - tx_bi = &tx_ring->tx_bi[i]; - } - - netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); - - i++; - if (i == tx_ring->count) - i = 0; - - tx_ring->next_to_use = i; - - i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); - - /* write last descriptor with RS and EOP bits */ - td_cmd |= I40E_TXD_CMD; - tx_desc->cmd_type_offset_bsz = - build_ctob(td_cmd, td_offset, size, td_tag); - - /* Force memory writes to complete before letting h/w know there - * are new descriptors to fetch. - * - * We also use this memory barrier to make certain all of the - * status bits have been updated before next_to_watch is written. - */ - wmb(); - - /* set next_to_watch value indicating a packet is present */ - first->next_to_watch = tx_desc; - - /* notify HW of packet */ - if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { - writel(i, tx_ring->tail); - - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); - } - - return; - -dma_error: - dev_info(tx_ring->dev, "TX DMA map failed\n"); - - /* clear dma mappings for failed tx_bi map */ - for (;;) { - tx_bi = &tx_ring->tx_bi[i]; - i40e_unmap_and_free_tx_resource(tx_ring, tx_bi); - if (tx_bi == first) - break; - if (i == 0) - i = tx_ring->count; - i--; - } - - tx_ring->next_to_use = i; -} - -/** - * i40e_xmit_frame_ring - Sends buffer on Tx ring - * @skb: send buffer - * @tx_ring: ring to send buffer on - * - * Returns NETDEV_TX_OK if sent, else an error code - **/ -static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, - struct i40e_ring *tx_ring) -{ - u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT; - u32 cd_tunneling = 0, cd_l2tag2 = 0; - struct i40e_tx_buffer *first; - u32 td_offset = 0; - u32 tx_flags = 0; - __be16 protocol; - u32 td_cmd = 0; - u8 hdr_len = 0; - int tso, count; - - /* prefetch the data, we'll need it later */ - prefetch(skb->data); - - i40e_trace(xmit_frame_ring, skb, tx_ring); - - count = i40e_xmit_descriptor_count(skb); - if (i40e_chk_linearize(skb, count)) { - if (__skb_linearize(skb)) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - count = i40e_txd_use_count(skb->len); - tx_ring->tx_stats.tx_linearize++; - } - - /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, - * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, - * + 4 desc gap to avoid the cache line where head is, - * + 1 desc for context descriptor, - * otherwise try next time - */ - if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { - tx_ring->tx_stats.tx_busy++; - return NETDEV_TX_BUSY; - } - - /* record the location of the first descriptor for this packet */ - first = &tx_ring->tx_bi[tx_ring->next_to_use]; - first->skb = skb; - first->bytecount = skb->len; - first->gso_segs = 1; - - /* prepare the xmit flags */ - if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) - goto out_drop; - - /* obtain protocol of skb */ - protocol = vlan_get_protocol(skb); - - /* setup IPv4/IPv6 offloads */ - if (protocol == htons(ETH_P_IP)) - tx_flags |= I40E_TX_FLAGS_IPV4; - else if (protocol == htons(ETH_P_IPV6)) - tx_flags |= I40E_TX_FLAGS_IPV6; - - tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss); - - if (tso < 0) - goto out_drop; - else if (tso) - tx_flags |= I40E_TX_FLAGS_TSO; - - /* Always offload the checksum, since it's in the data descriptor */ - tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, - tx_ring, &cd_tunneling); - if (tso < 0) - goto out_drop; - - skb_tx_timestamp(skb); - - /* always enable CRC insertion offload */ - td_cmd |= I40E_TX_DESC_CMD_ICRC; - - i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, - cd_tunneling, cd_l2tag2); - - i40evf_tx_map(tx_ring, skb, first, tx_flags, hdr_len, - td_cmd, td_offset); - - return NETDEV_TX_OK; - -out_drop: - i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring); - dev_kfree_skb_any(first->skb); - first->skb = NULL; - return NETDEV_TX_OK; -} - -/** - * i40evf_xmit_frame - Selects the correct VSI and Tx queue to send buffer - * @skb: send buffer - * @netdev: network interface device structure - * - * Returns NETDEV_TX_OK if sent, else an error code - **/ -netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40e_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping]; - - /* hardware can't handle really short frames, hardware padding works - * beyond this point - */ - if (unlikely(skb->len < I40E_MIN_TX_LEN)) { - if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len)) - return NETDEV_TX_OK; - skb->len = I40E_MIN_TX_LEN; - skb_set_tail_pointer(skb, I40E_MIN_TX_LEN); - } - - return i40e_xmit_frame_ring(skb, tx_ring); -} diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h deleted file mode 100644 index 3b5a63b3236e..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ /dev/null @@ -1,524 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_TXRX_H_ -#define _I40E_TXRX_H_ - -/* Interrupt Throttling and Rate Limiting Goodies */ -#define I40E_DEFAULT_IRQ_WORK 256 - -/* The datasheet for the X710 and XL710 indicate that the maximum value for - * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec - * resolution. 8160 is 0x1FE0 when written out in hex. So instead of storing - * the register value which is divided by 2 lets use the actual values and - * avoid an excessive amount of translation. - */ -#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ -#define I40E_ITR_MASK 0x1FFE /* mask for ITR register value */ -#define I40E_MIN_ITR 2 /* reg uses 2 usec resolution */ -#define I40E_ITR_100K 10 /* all values below must be even */ -#define I40E_ITR_50K 20 -#define I40E_ITR_20K 50 -#define I40E_ITR_18K 60 -#define I40E_ITR_8K 122 -#define I40E_MAX_ITR 8160 /* maximum value as per datasheet */ -#define ITR_TO_REG(setting) ((setting) & ~I40E_ITR_DYNAMIC) -#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~I40E_ITR_MASK) -#define ITR_IS_DYNAMIC(setting) (!!((setting) & I40E_ITR_DYNAMIC)) - -#define I40E_ITR_RX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC) -#define I40E_ITR_TX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC) - -/* 0x40 is the enable bit for interrupt rate limiting, and must be set if - * the value of the rate limit is non-zero - */ -#define INTRL_ENA BIT(6) -#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */ -#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2) -#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0) -#define I40E_INTRL_8K 125 /* 8000 ints/sec */ -#define I40E_INTRL_62K 16 /* 62500 ints/sec */ -#define I40E_INTRL_83K 12 /* 83333 ints/sec */ - -#define I40E_QUEUE_END_OF_LIST 0x7FF - -/* this enum matches hardware bits and is meant to be used by DYN_CTLN - * registers and QINT registers or more generally anywhere in the manual - * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any - * register but instead is a special value meaning "don't update" ITR0/1/2. - */ -enum i40e_dyn_idx_t { - I40E_IDX_ITR0 = 0, - I40E_IDX_ITR1 = 1, - I40E_IDX_ITR2 = 2, - I40E_ITR_NONE = 3 /* ITR_NONE must not be used as an index */ -}; - -/* these are indexes into ITRN registers */ -#define I40E_RX_ITR I40E_IDX_ITR0 -#define I40E_TX_ITR I40E_IDX_ITR1 -#define I40E_PE_ITR I40E_IDX_ITR2 - -/* Supported RSS offloads */ -#define I40E_DEFAULT_RSS_HENA ( \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \ - BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD)) - -#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ - BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) - -/* Supported Rx Buffer Sizes (a multiple of 128) */ -#define I40E_RXBUFFER_256 256 -#define I40E_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */ -#define I40E_RXBUFFER_2048 2048 -#define I40E_RXBUFFER_3072 3072 /* Used for large frames w/ padding */ -#define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */ - -/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we - * reserve 2 more, and skb_shared_info adds an additional 384 bytes more, - * this adds up to 512 bytes of extra data meaning the smallest allocation - * we could have is 1K. - * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab) - * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab) - */ -#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256 -#define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) -#define i40e_rx_desc i40e_32byte_rx_desc - -#define I40E_RX_DMA_ATTR \ - (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) - -/* Attempt to maximize the headroom available for incoming frames. We - * use a 2K buffer for receives and need 1536/1534 to store the data for - * the frame. This leaves us with 512 bytes of room. From that we need - * to deduct the space needed for the shared info and the padding needed - * to IP align the frame. - * - * Note: For cache line sizes 256 or larger this value is going to end - * up negative. In these cases we should fall back to the legacy - * receive path. - */ -#if (PAGE_SIZE < 8192) -#define I40E_2K_TOO_SMALL_WITH_PADDING \ -((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048)) - -static inline int i40e_compute_pad(int rx_buf_len) -{ - int page_size, pad_size; - - page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); - pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len; - - return pad_size; -} - -static inline int i40e_skb_pad(void) -{ - int rx_buf_len; - - /* If a 2K buffer cannot handle a standard Ethernet frame then - * optimize padding for a 3K buffer instead of a 1.5K buffer. - * - * For a 3K buffer we need to add enough padding to allow for - * tailroom due to NET_IP_ALIGN possibly shifting us out of - * cache-line alignment. - */ - if (I40E_2K_TOO_SMALL_WITH_PADDING) - rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); - else - rx_buf_len = I40E_RXBUFFER_1536; - - /* if needed make room for NET_IP_ALIGN */ - rx_buf_len -= NET_IP_ALIGN; - - return i40e_compute_pad(rx_buf_len); -} - -#define I40E_SKB_PAD i40e_skb_pad() -#else -#define I40E_2K_TOO_SMALL_WITH_PADDING false -#define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) -#endif - -/** - * i40e_test_staterr - tests bits in Rx descriptor status and error fields - * @rx_desc: pointer to receive descriptor (in le64 format) - * @stat_err_bits: value to mask - * - * This function does some fast chicanery in order to return the - * value of the mask which is really only used for boolean tests. - * The status_error_len doesn't need to be shifted because it begins - * at offset zero. - */ -static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc, - const u64 stat_err_bits) -{ - return !!(rx_desc->wb.qword1.status_error_len & - cpu_to_le64(stat_err_bits)); -} - -/* How many Rx Buffers do we bundle into one write to the hardware ? */ -#define I40E_RX_BUFFER_WRITE 32 /* Must be power of 2 */ -#define I40E_RX_INCREMENT(r, i) \ - do { \ - (i)++; \ - if ((i) == (r)->count) \ - i = 0; \ - r->next_to_clean = i; \ - } while (0) - -#define I40E_RX_NEXT_DESC(r, i, n) \ - do { \ - (i)++; \ - if ((i) == (r)->count) \ - i = 0; \ - (n) = I40E_RX_DESC((r), (i)); \ - } while (0) - -#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \ - do { \ - I40E_RX_NEXT_DESC((r), (i), (n)); \ - prefetch((n)); \ - } while (0) - -#define I40E_MAX_BUFFER_TXD 8 -#define I40E_MIN_TX_LEN 17 - -/* The size limit for a transmit buffer in a descriptor is (16K - 1). - * In order to align with the read requests we will align the value to - * the nearest 4K which represents our maximum read request size. - */ -#define I40E_MAX_READ_REQ_SIZE 4096 -#define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1) -#define I40E_MAX_DATA_PER_TXD_ALIGNED \ - (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1)) - -/** - * i40e_txd_use_count - estimate the number of descriptors needed for Tx - * @size: transmit request size in bytes - * - * Due to hardware alignment restrictions (4K alignment), we need to - * assume that we can have no more than 12K of data per descriptor, even - * though each descriptor can take up to 16K - 1 bytes of aligned memory. - * Thus, we need to divide by 12K. But division is slow! Instead, - * we decompose the operation into shifts and one relatively cheap - * multiply operation. - * - * To divide by 12K, we first divide by 4K, then divide by 3: - * To divide by 4K, shift right by 12 bits - * To divide by 3, multiply by 85, then divide by 256 - * (Divide by 256 is done by shifting right by 8 bits) - * Finally, we add one to round up. Because 256 isn't an exact multiple of - * 3, we'll underestimate near each multiple of 12K. This is actually more - * accurate as we have 4K - 1 of wiggle room that we can fit into the last - * segment. For our purposes this is accurate out to 1M which is orders of - * magnitude greater than our largest possible GSO size. - * - * This would then be implemented as: - * return (((size >> 12) * 85) >> 8) + 1; - * - * Since multiplication and division are commutative, we can reorder - * operations into: - * return ((size * 85) >> 20) + 1; - */ -static inline unsigned int i40e_txd_use_count(unsigned int size) -{ - return ((size * 85) >> 20) + 1; -} - -/* Tx Descriptors needed, worst case */ -#define DESC_NEEDED (MAX_SKB_FRAGS + 6) -#define I40E_MIN_DESC_PENDING 4 - -#define I40E_TX_FLAGS_HW_VLAN BIT(1) -#define I40E_TX_FLAGS_SW_VLAN BIT(2) -#define I40E_TX_FLAGS_TSO BIT(3) -#define I40E_TX_FLAGS_IPV4 BIT(4) -#define I40E_TX_FLAGS_IPV6 BIT(5) -#define I40E_TX_FLAGS_FCCRC BIT(6) -#define I40E_TX_FLAGS_FSO BIT(7) -#define I40E_TX_FLAGS_FD_SB BIT(9) -#define I40E_TX_FLAGS_VXLAN_TUNNEL BIT(10) -#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 -#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 -#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 -#define I40E_TX_FLAGS_VLAN_SHIFT 16 - -struct i40e_tx_buffer { - struct i40e_tx_desc *next_to_watch; - union { - struct sk_buff *skb; - void *raw_buf; - }; - unsigned int bytecount; - unsigned short gso_segs; - - DEFINE_DMA_UNMAP_ADDR(dma); - DEFINE_DMA_UNMAP_LEN(len); - u32 tx_flags; -}; - -struct i40e_rx_buffer { - dma_addr_t dma; - struct page *page; -#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) - __u32 page_offset; -#else - __u16 page_offset; -#endif - __u16 pagecnt_bias; -}; - -struct i40e_queue_stats { - u64 packets; - u64 bytes; -}; - -struct i40e_tx_queue_stats { - u64 restart_queue; - u64 tx_busy; - u64 tx_done_old; - u64 tx_linearize; - u64 tx_force_wb; - int prev_pkt_ctr; - u64 tx_lost_interrupt; -}; - -struct i40e_rx_queue_stats { - u64 non_eop_descs; - u64 alloc_page_failed; - u64 alloc_buff_failed; - u64 page_reuse_count; - u64 realloc_count; -}; - -enum i40e_ring_state_t { - __I40E_TX_FDIR_INIT_DONE, - __I40E_TX_XPS_INIT_DONE, - __I40E_RING_STATE_NBITS /* must be last */ -}; - -/* some useful defines for virtchannel interface, which - * is the only remaining user of header split - */ -#define I40E_RX_DTYPE_NO_SPLIT 0 -#define I40E_RX_DTYPE_HEADER_SPLIT 1 -#define I40E_RX_DTYPE_SPLIT_ALWAYS 2 -#define I40E_RX_SPLIT_L2 0x1 -#define I40E_RX_SPLIT_IP 0x2 -#define I40E_RX_SPLIT_TCP_UDP 0x4 -#define I40E_RX_SPLIT_SCTP 0x8 - -/* struct that defines a descriptor ring, associated with a VSI */ -struct i40e_ring { - struct i40e_ring *next; /* pointer to next ring in q_vector */ - void *desc; /* Descriptor ring memory */ - struct device *dev; /* Used for DMA mapping */ - struct net_device *netdev; /* netdev ring maps to */ - union { - struct i40e_tx_buffer *tx_bi; - struct i40e_rx_buffer *rx_bi; - }; - DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS); - u16 queue_index; /* Queue number of ring */ - u8 dcb_tc; /* Traffic class of ring */ - u8 __iomem *tail; - - /* high bit set means dynamic, use accessors routines to read/write. - * hardware only supports 2us resolution for the ITR registers. - * these values always store the USER setting, and must be converted - * before programming to a register. - */ - u16 itr_setting; - - u16 count; /* Number of descriptors */ - u16 reg_idx; /* HW register index of the ring */ - u16 rx_buf_len; - - /* used in interrupt processing */ - u16 next_to_use; - u16 next_to_clean; - - u8 atr_sample_rate; - u8 atr_count; - - bool ring_active; /* is ring online or not */ - bool arm_wb; /* do something to arm write back */ - u8 packet_stride; - - u16 flags; -#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) -#define I40E_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1) - - /* stats structs */ - struct i40e_queue_stats stats; - struct u64_stats_sync syncp; - union { - struct i40e_tx_queue_stats tx_stats; - struct i40e_rx_queue_stats rx_stats; - }; - - unsigned int size; /* length of descriptor ring in bytes */ - dma_addr_t dma; /* physical address of ring */ - - struct i40e_vsi *vsi; /* Backreference to associated VSI */ - struct i40e_q_vector *q_vector; /* Backreference to associated vector */ - - struct rcu_head rcu; /* to avoid race on free */ - u16 next_to_alloc; - struct sk_buff *skb; /* When i40evf_clean_rx_ring_irq() must - * return before it sees the EOP for - * the current packet, we save that skb - * here and resume receiving this - * packet the next time - * i40evf_clean_rx_ring_irq() is called - * for this ring. - */ -} ____cacheline_internodealigned_in_smp; - -static inline bool ring_uses_build_skb(struct i40e_ring *ring) -{ - return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED); -} - -static inline void set_ring_build_skb_enabled(struct i40e_ring *ring) -{ - ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED; -} - -static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring) -{ - ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED; -} - -#define I40E_ITR_ADAPTIVE_MIN_INC 0x0002 -#define I40E_ITR_ADAPTIVE_MIN_USECS 0x0002 -#define I40E_ITR_ADAPTIVE_MAX_USECS 0x007e -#define I40E_ITR_ADAPTIVE_LATENCY 0x8000 -#define I40E_ITR_ADAPTIVE_BULK 0x0000 -#define ITR_IS_BULK(x) (!((x) & I40E_ITR_ADAPTIVE_LATENCY)) - -struct i40e_ring_container { - struct i40e_ring *ring; /* pointer to linked list of ring(s) */ - unsigned long next_update; /* jiffies value of next update */ - unsigned int total_bytes; /* total bytes processed this int */ - unsigned int total_packets; /* total packets processed this int */ - u16 count; - u16 target_itr; /* target ITR setting for ring(s) */ - u16 current_itr; /* current ITR setting for ring(s) */ -}; - -/* iterator for handling rings in ring container */ -#define i40e_for_each_ring(pos, head) \ - for (pos = (head).ring; pos != NULL; pos = pos->next) - -static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring) -{ -#if (PAGE_SIZE < 8192) - if (ring->rx_buf_len > (PAGE_SIZE / 2)) - return 1; -#endif - return 0; -} - -#define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring)) - -bool i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); -netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev); -void i40evf_clean_tx_ring(struct i40e_ring *tx_ring); -void i40evf_clean_rx_ring(struct i40e_ring *rx_ring); -int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring); -int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring); -void i40evf_free_tx_resources(struct i40e_ring *tx_ring); -void i40evf_free_rx_resources(struct i40e_ring *rx_ring); -int i40evf_napi_poll(struct napi_struct *napi, int budget); -void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); -u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw); -void i40evf_detect_recover_hung(struct i40e_vsi *vsi); -int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size); -bool __i40evf_chk_linearize(struct sk_buff *skb); - -/** - * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed - * @skb: send buffer - * @tx_ring: ring to send buffer on - * - * Returns number of data descriptors needed for this skb. Returns 0 to indicate - * there is not enough descriptors available in this ring since we need at least - * one descriptor. - **/ -static inline int i40e_xmit_descriptor_count(struct sk_buff *skb) -{ - const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; - unsigned int nr_frags = skb_shinfo(skb)->nr_frags; - int count = 0, size = skb_headlen(skb); - - for (;;) { - count += i40e_txd_use_count(size); - - if (!nr_frags--) - break; - - size = skb_frag_size(frag++); - } - - return count; -} - -/** - * i40e_maybe_stop_tx - 1st level check for Tx stop conditions - * @tx_ring: the ring to be checked - * @size: the size buffer we want to assure is available - * - * Returns 0 if stop is not needed - **/ -static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) -{ - if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) - return 0; - return __i40evf_maybe_stop_tx(tx_ring, size); -} - -/** - * i40e_chk_linearize - Check if there are more than 8 fragments per packet - * @skb: send buffer - * @count: number of buffers used - * - * Note: Our HW can't scatter-gather more than 8 fragments to build - * a packet on the wire and so we need to figure out the cases where we - * need to linearize the skb. - **/ -static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) -{ - /* Both TSO and single send will work if count is less than 8 */ - if (likely(count < I40E_MAX_BUFFER_TXD)) - return false; - - if (skb_is_gso(skb)) - return __i40evf_chk_linearize(skb); - - /* we can support up to 8 data buffers for a single send */ - return count != I40E_MAX_BUFFER_TXD; -} -/** - * @ring: Tx ring to find the netdev equivalent of - **/ -static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring) -{ - return netdev_get_tx_queue(ring->netdev, ring->queue_index); -} -#endif /* _I40E_TXRX_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h deleted file mode 100644 index 094387db3c11..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ /dev/null @@ -1,1496 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_TYPE_H_ -#define _I40E_TYPE_H_ - -#include "i40e_status.h" -#include "i40e_osdep.h" -#include "i40e_register.h" -#include "i40e_adminq.h" -#include "i40e_hmc.h" -#include "i40e_lan_hmc.h" -#include "i40e_devids.h" - -/* I40E_MASK is a macro used on 32 bit registers */ -#define I40E_MASK(mask, shift) ((u32)(mask) << (shift)) - -#define I40E_MAX_VSI_QP 16 -#define I40E_MAX_VF_VSI 3 -#define I40E_MAX_CHAINED_RX_BUFFERS 5 -#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16 - -/* Max default timeout in ms, */ -#define I40E_MAX_NVM_TIMEOUT 18000 - -/* Max timeout in ms for the phy to respond */ -#define I40E_MAX_PHY_TIMEOUT 500 - -/* Switch from ms to the 1usec global time (this is the GTIME resolution) */ -#define I40E_MS_TO_GTIME(time) ((time) * 1000) - -/* forward declaration */ -struct i40e_hw; -typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *); - -/* Data type manipulation macros. */ - -#define I40E_DESC_UNUSED(R) \ - ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ - (R)->next_to_clean - (R)->next_to_use - 1) - -/* bitfields for Tx queue mapping in QTX_CTL */ -#define I40E_QTX_CTL_VF_QUEUE 0x0 -#define I40E_QTX_CTL_VM_QUEUE 0x1 -#define I40E_QTX_CTL_PF_QUEUE 0x2 - -/* debug masks - set these bits in hw->debug_mask to control output */ -enum i40e_debug_mask { - I40E_DEBUG_INIT = 0x00000001, - I40E_DEBUG_RELEASE = 0x00000002, - - I40E_DEBUG_LINK = 0x00000010, - I40E_DEBUG_PHY = 0x00000020, - I40E_DEBUG_HMC = 0x00000040, - I40E_DEBUG_NVM = 0x00000080, - I40E_DEBUG_LAN = 0x00000100, - I40E_DEBUG_FLOW = 0x00000200, - I40E_DEBUG_DCB = 0x00000400, - I40E_DEBUG_DIAG = 0x00000800, - I40E_DEBUG_FD = 0x00001000, - I40E_DEBUG_PACKAGE = 0x00002000, - - I40E_DEBUG_AQ_MESSAGE = 0x01000000, - I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000, - I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000, - I40E_DEBUG_AQ_COMMAND = 0x06000000, - I40E_DEBUG_AQ = 0x0F000000, - - I40E_DEBUG_USER = 0xF0000000, - - I40E_DEBUG_ALL = 0xFFFFFFFF -}; - -/* These are structs for managing the hardware information and the operations. - * The structures of function pointers are filled out at init time when we - * know for sure exactly which hardware we're working with. This gives us the - * flexibility of using the same main driver code but adapting to slightly - * different hardware needs as new parts are developed. For this architecture, - * the Firmware and AdminQ are intended to insulate the driver from most of the - * future changes, but these structures will also do part of the job. - */ -enum i40e_mac_type { - I40E_MAC_UNKNOWN = 0, - I40E_MAC_XL710, - I40E_MAC_VF, - I40E_MAC_X722, - I40E_MAC_X722_VF, - I40E_MAC_GENERIC, -}; - -enum i40e_media_type { - I40E_MEDIA_TYPE_UNKNOWN = 0, - I40E_MEDIA_TYPE_FIBER, - I40E_MEDIA_TYPE_BASET, - I40E_MEDIA_TYPE_BACKPLANE, - I40E_MEDIA_TYPE_CX4, - I40E_MEDIA_TYPE_DA, - I40E_MEDIA_TYPE_VIRTUAL -}; - -enum i40e_fc_mode { - I40E_FC_NONE = 0, - I40E_FC_RX_PAUSE, - I40E_FC_TX_PAUSE, - I40E_FC_FULL, - I40E_FC_PFC, - I40E_FC_DEFAULT -}; - -enum i40e_set_fc_aq_failures { - I40E_SET_FC_AQ_FAIL_NONE = 0, - I40E_SET_FC_AQ_FAIL_GET = 1, - I40E_SET_FC_AQ_FAIL_SET = 2, - I40E_SET_FC_AQ_FAIL_UPDATE = 4, - I40E_SET_FC_AQ_FAIL_SET_UPDATE = 6 -}; - -enum i40e_vsi_type { - I40E_VSI_MAIN = 0, - I40E_VSI_VMDQ1 = 1, - I40E_VSI_VMDQ2 = 2, - I40E_VSI_CTRL = 3, - I40E_VSI_FCOE = 4, - I40E_VSI_MIRROR = 5, - I40E_VSI_SRIOV = 6, - I40E_VSI_FDIR = 7, - I40E_VSI_TYPE_UNKNOWN -}; - -enum i40e_queue_type { - I40E_QUEUE_TYPE_RX = 0, - I40E_QUEUE_TYPE_TX, - I40E_QUEUE_TYPE_PE_CEQ, - I40E_QUEUE_TYPE_UNKNOWN -}; - -struct i40e_link_status { - enum i40e_aq_phy_type phy_type; - enum i40e_aq_link_speed link_speed; - u8 link_info; - u8 an_info; - u8 req_fec_info; - u8 fec_info; - u8 ext_info; - u8 loopback; - /* is Link Status Event notification to SW enabled */ - bool lse_enable; - u16 max_frame_size; - bool crc_enable; - u8 pacing; - u8 requested_speeds; - u8 module_type[3]; - /* 1st byte: module identifier */ -#define I40E_MODULE_TYPE_SFP 0x03 -#define I40E_MODULE_TYPE_QSFP 0x0D - /* 2nd byte: ethernet compliance codes for 10/40G */ -#define I40E_MODULE_TYPE_40G_ACTIVE 0x01 -#define I40E_MODULE_TYPE_40G_LR4 0x02 -#define I40E_MODULE_TYPE_40G_SR4 0x04 -#define I40E_MODULE_TYPE_40G_CR4 0x08 -#define I40E_MODULE_TYPE_10G_BASE_SR 0x10 -#define I40E_MODULE_TYPE_10G_BASE_LR 0x20 -#define I40E_MODULE_TYPE_10G_BASE_LRM 0x40 -#define I40E_MODULE_TYPE_10G_BASE_ER 0x80 - /* 3rd byte: ethernet compliance codes for 1G */ -#define I40E_MODULE_TYPE_1000BASE_SX 0x01 -#define I40E_MODULE_TYPE_1000BASE_LX 0x02 -#define I40E_MODULE_TYPE_1000BASE_CX 0x04 -#define I40E_MODULE_TYPE_1000BASE_T 0x08 -}; - -struct i40e_phy_info { - struct i40e_link_status link_info; - struct i40e_link_status link_info_old; - bool get_link_info; - enum i40e_media_type media_type; - /* all the phy types the NVM is capable of */ - u64 phy_types; -}; - -#define I40E_CAP_PHY_TYPE_SGMII BIT_ULL(I40E_PHY_TYPE_SGMII) -#define I40E_CAP_PHY_TYPE_1000BASE_KX BIT_ULL(I40E_PHY_TYPE_1000BASE_KX) -#define I40E_CAP_PHY_TYPE_10GBASE_KX4 BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4) -#define I40E_CAP_PHY_TYPE_10GBASE_KR BIT_ULL(I40E_PHY_TYPE_10GBASE_KR) -#define I40E_CAP_PHY_TYPE_40GBASE_KR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4) -#define I40E_CAP_PHY_TYPE_XAUI BIT_ULL(I40E_PHY_TYPE_XAUI) -#define I40E_CAP_PHY_TYPE_XFI BIT_ULL(I40E_PHY_TYPE_XFI) -#define I40E_CAP_PHY_TYPE_SFI BIT_ULL(I40E_PHY_TYPE_SFI) -#define I40E_CAP_PHY_TYPE_XLAUI BIT_ULL(I40E_PHY_TYPE_XLAUI) -#define I40E_CAP_PHY_TYPE_XLPPI BIT_ULL(I40E_PHY_TYPE_XLPPI) -#define I40E_CAP_PHY_TYPE_40GBASE_CR4_CU BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU) -#define I40E_CAP_PHY_TYPE_10GBASE_CR1_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU) -#define I40E_CAP_PHY_TYPE_10GBASE_AOC BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC) -#define I40E_CAP_PHY_TYPE_40GBASE_AOC BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC) -#define I40E_CAP_PHY_TYPE_100BASE_TX BIT_ULL(I40E_PHY_TYPE_100BASE_TX) -#define I40E_CAP_PHY_TYPE_1000BASE_T BIT_ULL(I40E_PHY_TYPE_1000BASE_T) -#define I40E_CAP_PHY_TYPE_10GBASE_T BIT_ULL(I40E_PHY_TYPE_10GBASE_T) -#define I40E_CAP_PHY_TYPE_10GBASE_SR BIT_ULL(I40E_PHY_TYPE_10GBASE_SR) -#define I40E_CAP_PHY_TYPE_10GBASE_LR BIT_ULL(I40E_PHY_TYPE_10GBASE_LR) -#define I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU) -#define I40E_CAP_PHY_TYPE_10GBASE_CR1 BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1) -#define I40E_CAP_PHY_TYPE_40GBASE_CR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4) -#define I40E_CAP_PHY_TYPE_40GBASE_SR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4) -#define I40E_CAP_PHY_TYPE_40GBASE_LR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4) -#define I40E_CAP_PHY_TYPE_1000BASE_SX BIT_ULL(I40E_PHY_TYPE_1000BASE_SX) -#define I40E_CAP_PHY_TYPE_1000BASE_LX BIT_ULL(I40E_PHY_TYPE_1000BASE_LX) -#define I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL \ - BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL) -#define I40E_CAP_PHY_TYPE_20GBASE_KR2 BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2) -/* Defining the macro I40E_TYPE_OFFSET to implement a bit shift for some - * PHY types. There is an unused bit (31) in the I40E_CAP_PHY_TYPE_* bit - * fields but no corresponding gap in the i40e_aq_phy_type enumeration. So, - * a shift is needed to adjust for this with values larger than 31. The - * only affected values are I40E_PHY_TYPE_25GBASE_*. - */ -#define I40E_PHY_TYPE_OFFSET 1 -#define I40E_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(I40E_PHY_TYPE_25GBASE_KR + \ - I40E_PHY_TYPE_OFFSET) -#define I40E_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(I40E_PHY_TYPE_25GBASE_CR + \ - I40E_PHY_TYPE_OFFSET) -#define I40E_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(I40E_PHY_TYPE_25GBASE_SR + \ - I40E_PHY_TYPE_OFFSET) -#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_PHY_TYPE_25GBASE_LR + \ - I40E_PHY_TYPE_OFFSET) -#define I40E_HW_CAP_MAX_GPIO 30 -/* Capabilities of a PF or a VF or the whole device */ -struct i40e_hw_capabilities { - u32 switch_mode; -#define I40E_NVM_IMAGE_TYPE_EVB 0x0 -#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2 -#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3 - - u32 management_mode; - u32 mng_protocols_over_mctp; -#define I40E_MNG_PROTOCOL_PLDM 0x2 -#define I40E_MNG_PROTOCOL_OEM_COMMANDS 0x4 -#define I40E_MNG_PROTOCOL_NCSI 0x8 - u32 npar_enable; - u32 os2bmc; - u32 valid_functions; - bool sr_iov_1_1; - bool vmdq; - bool evb_802_1_qbg; /* Edge Virtual Bridging */ - bool evb_802_1_qbh; /* Bridge Port Extension */ - bool dcb; - bool fcoe; - bool iscsi; /* Indicates iSCSI enabled */ - bool flex10_enable; - bool flex10_capable; - u32 flex10_mode; -#define I40E_FLEX10_MODE_UNKNOWN 0x0 -#define I40E_FLEX10_MODE_DCC 0x1 -#define I40E_FLEX10_MODE_DCI 0x2 - - u32 flex10_status; -#define I40E_FLEX10_STATUS_DCC_ERROR 0x1 -#define I40E_FLEX10_STATUS_VC_MODE 0x2 - - bool sec_rev_disabled; - bool update_disabled; -#define I40E_NVM_MGMT_SEC_REV_DISABLED 0x1 -#define I40E_NVM_MGMT_UPDATE_DISABLED 0x2 - - bool mgmt_cem; - bool ieee_1588; - bool iwarp; - bool fd; - u32 fd_filters_guaranteed; - u32 fd_filters_best_effort; - bool rss; - u32 rss_table_size; - u32 rss_table_entry_width; - bool led[I40E_HW_CAP_MAX_GPIO]; - bool sdp[I40E_HW_CAP_MAX_GPIO]; - u32 nvm_image_type; - u32 num_flow_director_filters; - u32 num_vfs; - u32 vf_base_id; - u32 num_vsis; - u32 num_rx_qp; - u32 num_tx_qp; - u32 base_queue; - u32 num_msix_vectors; - u32 num_msix_vectors_vf; - u32 led_pin_num; - u32 sdp_pin_num; - u32 mdio_port_num; - u32 mdio_port_mode; - u8 rx_buf_chain_len; - u32 enabled_tcmap; - u32 maxtc; - u64 wr_csr_prot; -}; - -struct i40e_mac_info { - enum i40e_mac_type type; - u8 addr[ETH_ALEN]; - u8 perm_addr[ETH_ALEN]; - u8 san_addr[ETH_ALEN]; - u16 max_fcoeq; -}; - -enum i40e_aq_resources_ids { - I40E_NVM_RESOURCE_ID = 1 -}; - -enum i40e_aq_resource_access_type { - I40E_RESOURCE_READ = 1, - I40E_RESOURCE_WRITE -}; - -struct i40e_nvm_info { - u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */ - u32 timeout; /* [ms] */ - u16 sr_size; /* Shadow RAM size in words */ - bool blank_nvm_mode; /* is NVM empty (no FW present)*/ - u16 version; /* NVM package version */ - u32 eetrack; /* NVM data version */ - u32 oem_ver; /* OEM version info */ -}; - -/* definitions used in NVM update support */ - -enum i40e_nvmupd_cmd { - I40E_NVMUPD_INVALID, - I40E_NVMUPD_READ_CON, - I40E_NVMUPD_READ_SNT, - I40E_NVMUPD_READ_LCB, - I40E_NVMUPD_READ_SA, - I40E_NVMUPD_WRITE_ERA, - I40E_NVMUPD_WRITE_CON, - I40E_NVMUPD_WRITE_SNT, - I40E_NVMUPD_WRITE_LCB, - I40E_NVMUPD_WRITE_SA, - I40E_NVMUPD_CSUM_CON, - I40E_NVMUPD_CSUM_SA, - I40E_NVMUPD_CSUM_LCB, - I40E_NVMUPD_STATUS, - I40E_NVMUPD_EXEC_AQ, - I40E_NVMUPD_GET_AQ_RESULT, - I40E_NVMUPD_GET_AQ_EVENT, -}; - -enum i40e_nvmupd_state { - I40E_NVMUPD_STATE_INIT, - I40E_NVMUPD_STATE_READING, - I40E_NVMUPD_STATE_WRITING, - I40E_NVMUPD_STATE_INIT_WAIT, - I40E_NVMUPD_STATE_WRITE_WAIT, - I40E_NVMUPD_STATE_ERROR -}; - -/* nvm_access definition and its masks/shifts need to be accessible to - * application, core driver, and shared code. Where is the right file? - */ -#define I40E_NVM_READ 0xB -#define I40E_NVM_WRITE 0xC - -#define I40E_NVM_MOD_PNT_MASK 0xFF - -#define I40E_NVM_TRANS_SHIFT 8 -#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT) -#define I40E_NVM_PRESERVATION_FLAGS_SHIFT 12 -#define I40E_NVM_PRESERVATION_FLAGS_MASK \ - (0x3 << I40E_NVM_PRESERVATION_FLAGS_SHIFT) -#define I40E_NVM_PRESERVATION_FLAGS_SELECTED 0x01 -#define I40E_NVM_PRESERVATION_FLAGS_ALL 0x02 -#define I40E_NVM_CON 0x0 -#define I40E_NVM_SNT 0x1 -#define I40E_NVM_LCB 0x2 -#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB) -#define I40E_NVM_ERA 0x4 -#define I40E_NVM_CSUM 0x8 -#define I40E_NVM_AQE 0xe -#define I40E_NVM_EXEC 0xf - -#define I40E_NVM_ADAPT_SHIFT 16 -#define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT) - -#define I40E_NVMUPD_MAX_DATA 4096 -#define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */ - -struct i40e_nvm_access { - u32 command; - u32 config; - u32 offset; /* in bytes */ - u32 data_size; /* in bytes */ - u8 data[1]; -}; - -/* (Q)SFP module access definitions */ -#define I40E_I2C_EEPROM_DEV_ADDR 0xA0 -#define I40E_I2C_EEPROM_DEV_ADDR2 0xA2 -#define I40E_MODULE_TYPE_ADDR 0x00 -#define I40E_MODULE_REVISION_ADDR 0x01 -#define I40E_MODULE_SFF_8472_COMP 0x5E -#define I40E_MODULE_SFF_8472_SWAP 0x5C -#define I40E_MODULE_SFF_ADDR_MODE 0x04 -#define I40E_MODULE_TYPE_QSFP_PLUS 0x0D -#define I40E_MODULE_TYPE_QSFP28 0x11 -#define I40E_MODULE_QSFP_MAX_LEN 640 - -/* PCI bus types */ -enum i40e_bus_type { - i40e_bus_type_unknown = 0, - i40e_bus_type_pci, - i40e_bus_type_pcix, - i40e_bus_type_pci_express, - i40e_bus_type_reserved -}; - -/* PCI bus speeds */ -enum i40e_bus_speed { - i40e_bus_speed_unknown = 0, - i40e_bus_speed_33 = 33, - i40e_bus_speed_66 = 66, - i40e_bus_speed_100 = 100, - i40e_bus_speed_120 = 120, - i40e_bus_speed_133 = 133, - i40e_bus_speed_2500 = 2500, - i40e_bus_speed_5000 = 5000, - i40e_bus_speed_8000 = 8000, - i40e_bus_speed_reserved -}; - -/* PCI bus widths */ -enum i40e_bus_width { - i40e_bus_width_unknown = 0, - i40e_bus_width_pcie_x1 = 1, - i40e_bus_width_pcie_x2 = 2, - i40e_bus_width_pcie_x4 = 4, - i40e_bus_width_pcie_x8 = 8, - i40e_bus_width_32 = 32, - i40e_bus_width_64 = 64, - i40e_bus_width_reserved -}; - -/* Bus parameters */ -struct i40e_bus_info { - enum i40e_bus_speed speed; - enum i40e_bus_width width; - enum i40e_bus_type type; - - u16 func; - u16 device; - u16 lan_id; - u16 bus_id; -}; - -/* Flow control (FC) parameters */ -struct i40e_fc_info { - enum i40e_fc_mode current_mode; /* FC mode in effect */ - enum i40e_fc_mode requested_mode; /* FC mode requested by caller */ -}; - -#define I40E_MAX_TRAFFIC_CLASS 8 -#define I40E_MAX_USER_PRIORITY 8 -#define I40E_DCBX_MAX_APPS 32 -#define I40E_LLDPDU_SIZE 1500 - -/* IEEE 802.1Qaz ETS Configuration data */ -struct i40e_ieee_ets_config { - u8 willing; - u8 cbs; - u8 maxtcs; - u8 prioritytable[I40E_MAX_TRAFFIC_CLASS]; - u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS]; - u8 tsatable[I40E_MAX_TRAFFIC_CLASS]; -}; - -/* IEEE 802.1Qaz ETS Recommendation data */ -struct i40e_ieee_ets_recommend { - u8 prioritytable[I40E_MAX_TRAFFIC_CLASS]; - u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS]; - u8 tsatable[I40E_MAX_TRAFFIC_CLASS]; -}; - -/* IEEE 802.1Qaz PFC Configuration data */ -struct i40e_ieee_pfc_config { - u8 willing; - u8 mbc; - u8 pfccap; - u8 pfcenable; -}; - -/* IEEE 802.1Qaz Application Priority data */ -struct i40e_ieee_app_priority_table { - u8 priority; - u8 selector; - u16 protocolid; -}; - -struct i40e_dcbx_config { - u32 numapps; - u32 tlv_status; /* CEE mode TLV status */ - struct i40e_ieee_ets_config etscfg; - struct i40e_ieee_ets_recommend etsrec; - struct i40e_ieee_pfc_config pfc; - struct i40e_ieee_app_priority_table app[I40E_DCBX_MAX_APPS]; -}; - -/* Port hardware description */ -struct i40e_hw { - u8 __iomem *hw_addr; - void *back; - - /* subsystem structs */ - struct i40e_phy_info phy; - struct i40e_mac_info mac; - struct i40e_bus_info bus; - struct i40e_nvm_info nvm; - struct i40e_fc_info fc; - - /* pci info */ - u16 device_id; - u16 vendor_id; - u16 subsystem_device_id; - u16 subsystem_vendor_id; - u8 revision_id; - u8 port; - bool adapter_stopped; - - /* capabilities for entire device and PCI func */ - struct i40e_hw_capabilities dev_caps; - struct i40e_hw_capabilities func_caps; - - /* Flow Director shared filter space */ - u16 fdir_shared_filter_count; - - /* device profile info */ - u8 pf_id; - u16 main_vsi_seid; - - /* for multi-function MACs */ - u16 partition_id; - u16 num_partitions; - u16 num_ports; - - /* Closest numa node to the device */ - u16 numa_node; - - /* Admin Queue info */ - struct i40e_adminq_info aq; - - /* state of nvm update process */ - enum i40e_nvmupd_state nvmupd_state; - struct i40e_aq_desc nvm_wb_desc; - struct i40e_aq_desc nvm_aq_event_desc; - struct i40e_virt_mem nvm_buff; - bool nvm_release_on_done; - u16 nvm_wait_opcode; - - /* HMC info */ - struct i40e_hmc_info hmc; /* HMC info struct */ - - /* LLDP/DCBX Status */ - u16 dcbx_status; - -#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1) -#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2) - - /* DCBX info */ - struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */ - struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */ - struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */ - - /* Used in set switch config AQ command */ - u16 switch_tag; - u16 first_tag; - u16 second_tag; - - /* debug mask */ - u32 debug_mask; - char err_str[16]; -}; - -static inline bool i40e_is_vf(struct i40e_hw *hw) -{ - return (hw->mac.type == I40E_MAC_VF || - hw->mac.type == I40E_MAC_X722_VF); -} - -struct i40e_driver_version { - u8 major_version; - u8 minor_version; - u8 build_version; - u8 subbuild_version; - u8 driver_string[32]; -}; - -/* RX Descriptors */ -union i40e_16byte_rx_desc { - struct { - __le64 pkt_addr; /* Packet buffer address */ - __le64 hdr_addr; /* Header buffer address */ - } read; - struct { - struct { - struct { - union { - __le16 mirroring_status; - __le16 fcoe_ctx_id; - } mirr_fcoe; - __le16 l2tag1; - } lo_dword; - union { - __le32 rss; /* RSS Hash */ - __le32 fd_id; /* Flow director filter id */ - __le32 fcoe_param; /* FCoE DDP Context id */ - } hi_dword; - } qword0; - struct { - /* ext status/error/pktype/length */ - __le64 status_error_len; - } qword1; - } wb; /* writeback */ -}; - -union i40e_32byte_rx_desc { - struct { - __le64 pkt_addr; /* Packet buffer address */ - __le64 hdr_addr; /* Header buffer address */ - /* bit 0 of hdr_buffer_addr is DD bit */ - __le64 rsvd1; - __le64 rsvd2; - } read; - struct { - struct { - struct { - union { - __le16 mirroring_status; - __le16 fcoe_ctx_id; - } mirr_fcoe; - __le16 l2tag1; - } lo_dword; - union { - __le32 rss; /* RSS Hash */ - __le32 fcoe_param; /* FCoE DDP Context id */ - /* Flow director filter id in case of - * Programming status desc WB - */ - __le32 fd_id; - } hi_dword; - } qword0; - struct { - /* status/error/pktype/length */ - __le64 status_error_len; - } qword1; - struct { - __le16 ext_status; /* extended status */ - __le16 rsvd; - __le16 l2tag2_1; - __le16 l2tag2_2; - } qword2; - struct { - union { - __le32 flex_bytes_lo; - __le32 pe_status; - } lo_dword; - union { - __le32 flex_bytes_hi; - __le32 fd_id; - } hi_dword; - } qword3; - } wb; /* writeback */ -}; - -enum i40e_rx_desc_status_bits { - /* Note: These are predefined bit offsets */ - I40E_RX_DESC_STATUS_DD_SHIFT = 0, - I40E_RX_DESC_STATUS_EOF_SHIFT = 1, - I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2, - I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3, - I40E_RX_DESC_STATUS_CRCP_SHIFT = 4, - I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */ - I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7, - /* Note: Bit 8 is reserved in X710 and XL710 */ - I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8, - I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */ - I40E_RX_DESC_STATUS_FLM_SHIFT = 11, - I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */ - I40E_RX_DESC_STATUS_LPBK_SHIFT = 14, - I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, - I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */ - /* Note: For non-tunnel packets INT_UDP_0 is the right status for - * UDP header - */ - I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18, - I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */ -}; - -#define I40E_RXD_QW1_STATUS_SHIFT 0 -#define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \ - << I40E_RXD_QW1_STATUS_SHIFT) - -#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT -#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \ - I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT) - -#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT -#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \ - BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT) - -enum i40e_rx_desc_fltstat_values { - I40E_RX_DESC_FLTSTAT_NO_DATA = 0, - I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */ - I40E_RX_DESC_FLTSTAT_RSV = 2, - I40E_RX_DESC_FLTSTAT_RSS_HASH = 3, -}; - -#define I40E_RXD_QW1_ERROR_SHIFT 19 -#define I40E_RXD_QW1_ERROR_MASK (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT) - -enum i40e_rx_desc_error_bits { - /* Note: These are predefined bit offsets */ - I40E_RX_DESC_ERROR_RXE_SHIFT = 0, - I40E_RX_DESC_ERROR_RECIPE_SHIFT = 1, - I40E_RX_DESC_ERROR_HBO_SHIFT = 2, - I40E_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */ - I40E_RX_DESC_ERROR_IPE_SHIFT = 3, - I40E_RX_DESC_ERROR_L4E_SHIFT = 4, - I40E_RX_DESC_ERROR_EIPE_SHIFT = 5, - I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6, - I40E_RX_DESC_ERROR_PPRS_SHIFT = 7 -}; - -enum i40e_rx_desc_error_l3l4e_fcoe_masks { - I40E_RX_DESC_ERROR_L3L4E_NONE = 0, - I40E_RX_DESC_ERROR_L3L4E_PROT = 1, - I40E_RX_DESC_ERROR_L3L4E_FC = 2, - I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3, - I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4 -}; - -#define I40E_RXD_QW1_PTYPE_SHIFT 30 -#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT) - -/* Packet type non-ip values */ -enum i40e_rx_l2_ptype { - I40E_RX_PTYPE_L2_RESERVED = 0, - I40E_RX_PTYPE_L2_MAC_PAY2 = 1, - I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2, - I40E_RX_PTYPE_L2_FIP_PAY2 = 3, - I40E_RX_PTYPE_L2_OUI_PAY2 = 4, - I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5, - I40E_RX_PTYPE_L2_LLDP_PAY2 = 6, - I40E_RX_PTYPE_L2_ECP_PAY2 = 7, - I40E_RX_PTYPE_L2_EVB_PAY2 = 8, - I40E_RX_PTYPE_L2_QCN_PAY2 = 9, - I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10, - I40E_RX_PTYPE_L2_ARP = 11, - I40E_RX_PTYPE_L2_FCOE_PAY3 = 12, - I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13, - I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14, - I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15, - I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16, - I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17, - I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18, - I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19, - I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20, - I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21, - I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58, - I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87, - I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124, - I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153 -}; - -struct i40e_rx_ptype_decoded { - u32 ptype:8; - u32 known:1; - u32 outer_ip:1; - u32 outer_ip_ver:1; - u32 outer_frag:1; - u32 tunnel_type:3; - u32 tunnel_end_prot:2; - u32 tunnel_end_frag:1; - u32 inner_prot:4; - u32 payload_layer:3; -}; - -enum i40e_rx_ptype_outer_ip { - I40E_RX_PTYPE_OUTER_L2 = 0, - I40E_RX_PTYPE_OUTER_IP = 1 -}; - -enum i40e_rx_ptype_outer_ip_ver { - I40E_RX_PTYPE_OUTER_NONE = 0, - I40E_RX_PTYPE_OUTER_IPV4 = 0, - I40E_RX_PTYPE_OUTER_IPV6 = 1 -}; - -enum i40e_rx_ptype_outer_fragmented { - I40E_RX_PTYPE_NOT_FRAG = 0, - I40E_RX_PTYPE_FRAG = 1 -}; - -enum i40e_rx_ptype_tunnel_type { - I40E_RX_PTYPE_TUNNEL_NONE = 0, - I40E_RX_PTYPE_TUNNEL_IP_IP = 1, - I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2, - I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, - I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, -}; - -enum i40e_rx_ptype_tunnel_end_prot { - I40E_RX_PTYPE_TUNNEL_END_NONE = 0, - I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1, - I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2, -}; - -enum i40e_rx_ptype_inner_prot { - I40E_RX_PTYPE_INNER_PROT_NONE = 0, - I40E_RX_PTYPE_INNER_PROT_UDP = 1, - I40E_RX_PTYPE_INNER_PROT_TCP = 2, - I40E_RX_PTYPE_INNER_PROT_SCTP = 3, - I40E_RX_PTYPE_INNER_PROT_ICMP = 4, - I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5 -}; - -enum i40e_rx_ptype_payload_layer { - I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, - I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, - I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, - I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, -}; - -#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38 -#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \ - I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - -#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52 -#define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \ - I40E_RXD_QW1_LENGTH_HBUF_SHIFT) - -#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63 -#define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT) - -enum i40e_rx_desc_ext_status_bits { - /* Note: These are predefined bit offsets */ - I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0, - I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1, - I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */ - I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */ - I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9, - I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10, - I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11, -}; - -enum i40e_rx_desc_pe_status_bits { - /* Note: These are predefined bit offsets */ - I40E_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */ - I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */ - I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */ - I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24, - I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25, - I40E_RX_DESC_PE_STATUS_PORTV_SHIFT = 26, - I40E_RX_DESC_PE_STATUS_URG_SHIFT = 27, - I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28, - I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29 -}; - -#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38 -#define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000 - -#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2 -#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \ - I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT) - -#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19 -#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \ - I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT) - -enum i40e_rx_prog_status_desc_status_bits { - /* Note: These are predefined bit offsets */ - I40E_RX_PROG_STATUS_DESC_DD_SHIFT = 0, - I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */ -}; - -enum i40e_rx_prog_status_desc_prog_id_masks { - I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1, - I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2, - I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4, -}; - -enum i40e_rx_prog_status_desc_error_bits { - /* Note: These are predefined bit offsets */ - I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0, - I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1, - I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2, - I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3 -}; - -/* TX Descriptor */ -struct i40e_tx_desc { - __le64 buffer_addr; /* Address of descriptor's data buf */ - __le64 cmd_type_offset_bsz; -}; - -#define I40E_TXD_QW1_DTYPE_SHIFT 0 -#define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT) - -enum i40e_tx_desc_dtype_value { - I40E_TX_DESC_DTYPE_DATA = 0x0, - I40E_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */ - I40E_TX_DESC_DTYPE_CONTEXT = 0x1, - I40E_TX_DESC_DTYPE_FCOE_CTX = 0x2, - I40E_TX_DESC_DTYPE_FILTER_PROG = 0x8, - I40E_TX_DESC_DTYPE_DDP_CTX = 0x9, - I40E_TX_DESC_DTYPE_FLEX_DATA = 0xB, - I40E_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC, - I40E_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD, - I40E_TX_DESC_DTYPE_DESC_DONE = 0xF -}; - -#define I40E_TXD_QW1_CMD_SHIFT 4 -#define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT) - -enum i40e_tx_desc_cmd_bits { - I40E_TX_DESC_CMD_EOP = 0x0001, - I40E_TX_DESC_CMD_RS = 0x0002, - I40E_TX_DESC_CMD_ICRC = 0x0004, - I40E_TX_DESC_CMD_IL2TAG1 = 0x0008, - I40E_TX_DESC_CMD_DUMMY = 0x0010, - I40E_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */ - I40E_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */ - I40E_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */ - I40E_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */ - I40E_TX_DESC_CMD_FCOET = 0x0080, - I40E_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */ - I40E_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */ - I40E_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */ - I40E_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */ - I40E_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */ - I40E_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */ - I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */ - I40E_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */ -}; - -#define I40E_TXD_QW1_OFFSET_SHIFT 16 -#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \ - I40E_TXD_QW1_OFFSET_SHIFT) - -enum i40e_tx_desc_length_fields { - /* Note: These are predefined bit offsets */ - I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */ - I40E_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */ - I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */ -}; - -#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34 -#define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \ - I40E_TXD_QW1_TX_BUF_SZ_SHIFT) - -#define I40E_TXD_QW1_L2TAG1_SHIFT 48 -#define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT) - -/* Context descriptors */ -struct i40e_tx_context_desc { - __le32 tunneling_params; - __le16 l2tag2; - __le16 rsvd; - __le64 type_cmd_tso_mss; -}; - -#define I40E_TXD_CTX_QW1_DTYPE_SHIFT 0 -#define I40E_TXD_CTX_QW1_DTYPE_MASK (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT) - -#define I40E_TXD_CTX_QW1_CMD_SHIFT 4 -#define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT) - -enum i40e_tx_ctx_desc_cmd_bits { - I40E_TX_CTX_DESC_TSO = 0x01, - I40E_TX_CTX_DESC_TSYN = 0x02, - I40E_TX_CTX_DESC_IL2TAG2 = 0x04, - I40E_TX_CTX_DESC_IL2TAG2_IL2H = 0x08, - I40E_TX_CTX_DESC_SWTCH_NOTAG = 0x00, - I40E_TX_CTX_DESC_SWTCH_UPLINK = 0x10, - I40E_TX_CTX_DESC_SWTCH_LOCAL = 0x20, - I40E_TX_CTX_DESC_SWTCH_VSI = 0x30, - I40E_TX_CTX_DESC_SWPE = 0x40 -}; - -#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30 -#define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \ - I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) - -#define I40E_TXD_CTX_QW1_MSS_SHIFT 50 -#define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \ - I40E_TXD_CTX_QW1_MSS_SHIFT) - -#define I40E_TXD_CTX_QW1_VSI_SHIFT 50 -#define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT) - -#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0 -#define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \ - I40E_TXD_CTX_QW0_EXT_IP_SHIFT) - -enum i40e_tx_ctx_desc_eipt_offload { - I40E_TX_CTX_EXT_IP_NONE = 0x0, - I40E_TX_CTX_EXT_IP_IPV6 = 0x1, - I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2, - I40E_TX_CTX_EXT_IP_IPV4 = 0x3 -}; - -#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2 -#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \ - I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT) - -#define I40E_TXD_CTX_QW0_NATT_SHIFT 9 -#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) - -#define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT) -#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) - -#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11 -#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \ - BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT) - -#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK - -#define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12 -#define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \ - I40E_TXD_CTX_QW0_NATLEN_SHIFT) - -#define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19 -#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \ - I40E_TXD_CTX_QW0_DECTTL_SHIFT) - -#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23 -#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT) -struct i40e_filter_program_desc { - __le32 qindex_flex_ptype_vsi; - __le32 rsvd; - __le32 dtype_cmd_cntindex; - __le32 fd_id; -}; -#define I40E_TXD_FLTR_QW0_QINDEX_SHIFT 0 -#define I40E_TXD_FLTR_QW0_QINDEX_MASK (0x7FFUL << \ - I40E_TXD_FLTR_QW0_QINDEX_SHIFT) -#define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT 11 -#define I40E_TXD_FLTR_QW0_FLEXOFF_MASK (0x7UL << \ - I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) -#define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT 17 -#define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \ - I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) - -/* Packet Classifier Types for filters */ -enum i40e_filter_pctype { - /* Note: Values 0-28 are reserved for future use. - * Value 29, 30, 32 are not supported on XL710 and X710. - */ - I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29, - I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30, - I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31, - I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32, - I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33, - I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34, - I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35, - I40E_FILTER_PCTYPE_FRAG_IPV4 = 36, - /* Note: Values 37-38 are reserved for future use. - * Value 39, 40, 42 are not supported on XL710 and X710. - */ - I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39, - I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40, - I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41, - I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42, - I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43, - I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44, - I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45, - I40E_FILTER_PCTYPE_FRAG_IPV6 = 46, - /* Note: Value 47 is reserved for future use */ - I40E_FILTER_PCTYPE_FCOE_OX = 48, - I40E_FILTER_PCTYPE_FCOE_RX = 49, - I40E_FILTER_PCTYPE_FCOE_OTHER = 50, - /* Note: Values 51-62 are reserved for future use */ - I40E_FILTER_PCTYPE_L2_PAYLOAD = 63, -}; - -enum i40e_filter_program_desc_dest { - I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0, - I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1, - I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER = 0x2, -}; - -enum i40e_filter_program_desc_fd_status { - I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE = 0x0, - I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID = 0x1, - I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES = 0x2, - I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES = 0x3, -}; - -#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23 -#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \ - I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) - -#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4 -#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \ - I40E_TXD_FLTR_QW1_CMD_SHIFT) - -#define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) -#define I40E_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT) - -enum i40e_filter_program_desc_pcmd { - I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1, - I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE = 0x2, -}; - -#define I40E_TXD_FLTR_QW1_DEST_SHIFT (0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) -#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT) - -#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) -#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT) - -#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \ - I40E_TXD_FLTR_QW1_CMD_SHIFT) -#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \ - I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) - -#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20 -#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \ - I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) - -enum i40e_filter_type { - I40E_FLOW_DIRECTOR_FLTR = 0, - I40E_PE_QUAD_HASH_FLTR = 1, - I40E_ETHERTYPE_FLTR, - I40E_FCOE_CTX_FLTR, - I40E_MAC_VLAN_FLTR, - I40E_HASH_FLTR -}; - -struct i40e_vsi_context { - u16 seid; - u16 uplink_seid; - u16 vsi_number; - u16 vsis_allocated; - u16 vsis_unallocated; - u16 flags; - u8 pf_num; - u8 vf_num; - u8 connection_type; - struct i40e_aqc_vsi_properties_data info; -}; - -struct i40e_veb_context { - u16 seid; - u16 uplink_seid; - u16 veb_number; - u16 vebs_allocated; - u16 vebs_unallocated; - u16 flags; - struct i40e_aqc_get_veb_parameters_completion info; -}; - -/* Statistics collected by each port, VSI, VEB, and S-channel */ -struct i40e_eth_stats { - u64 rx_bytes; /* gorc */ - u64 rx_unicast; /* uprc */ - u64 rx_multicast; /* mprc */ - u64 rx_broadcast; /* bprc */ - u64 rx_discards; /* rdpc */ - u64 rx_unknown_protocol; /* rupp */ - u64 tx_bytes; /* gotc */ - u64 tx_unicast; /* uptc */ - u64 tx_multicast; /* mptc */ - u64 tx_broadcast; /* bptc */ - u64 tx_discards; /* tdpc */ - u64 tx_errors; /* tepc */ -}; - -/* Statistics collected per VEB per TC */ -struct i40e_veb_tc_stats { - u64 tc_rx_packets[I40E_MAX_TRAFFIC_CLASS]; - u64 tc_rx_bytes[I40E_MAX_TRAFFIC_CLASS]; - u64 tc_tx_packets[I40E_MAX_TRAFFIC_CLASS]; - u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS]; -}; - -/* Statistics collected by the MAC */ -struct i40e_hw_port_stats { - /* eth stats collected by the port */ - struct i40e_eth_stats eth; - - /* additional port specific stats */ - u64 tx_dropped_link_down; /* tdold */ - u64 crc_errors; /* crcerrs */ - u64 illegal_bytes; /* illerrc */ - u64 error_bytes; /* errbc */ - u64 mac_local_faults; /* mlfc */ - u64 mac_remote_faults; /* mrfc */ - u64 rx_length_errors; /* rlec */ - u64 link_xon_rx; /* lxonrxc */ - u64 link_xoff_rx; /* lxoffrxc */ - u64 priority_xon_rx[8]; /* pxonrxc[8] */ - u64 priority_xoff_rx[8]; /* pxoffrxc[8] */ - u64 link_xon_tx; /* lxontxc */ - u64 link_xoff_tx; /* lxofftxc */ - u64 priority_xon_tx[8]; /* pxontxc[8] */ - u64 priority_xoff_tx[8]; /* pxofftxc[8] */ - u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */ - u64 rx_size_64; /* prc64 */ - u64 rx_size_127; /* prc127 */ - u64 rx_size_255; /* prc255 */ - u64 rx_size_511; /* prc511 */ - u64 rx_size_1023; /* prc1023 */ - u64 rx_size_1522; /* prc1522 */ - u64 rx_size_big; /* prc9522 */ - u64 rx_undersize; /* ruc */ - u64 rx_fragments; /* rfc */ - u64 rx_oversize; /* roc */ - u64 rx_jabber; /* rjc */ - u64 tx_size_64; /* ptc64 */ - u64 tx_size_127; /* ptc127 */ - u64 tx_size_255; /* ptc255 */ - u64 tx_size_511; /* ptc511 */ - u64 tx_size_1023; /* ptc1023 */ - u64 tx_size_1522; /* ptc1522 */ - u64 tx_size_big; /* ptc9522 */ - u64 mac_short_packet_dropped; /* mspdc */ - u64 checksum_error; /* xec */ - /* flow director stats */ - u64 fd_atr_match; - u64 fd_sb_match; - u64 fd_atr_tunnel_match; - u32 fd_atr_status; - u32 fd_sb_status; - /* EEE LPI */ - u32 tx_lpi_status; - u32 rx_lpi_status; - u64 tx_lpi_count; /* etlpic */ - u64 rx_lpi_count; /* erlpic */ -}; - -/* Checksum and Shadow RAM pointers */ -#define I40E_SR_NVM_CONTROL_WORD 0x00 -#define I40E_EMP_MODULE_PTR 0x0F -#define I40E_SR_EMP_MODULE_PTR 0x48 -#define I40E_NVM_OEM_VER_OFF 0x83 -#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18 -#define I40E_SR_NVM_WAKE_ON_LAN 0x19 -#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27 -#define I40E_SR_NVM_EETRACK_LO 0x2D -#define I40E_SR_NVM_EETRACK_HI 0x2E -#define I40E_SR_VPD_PTR 0x2F -#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E -#define I40E_SR_SW_CHECKSUM_WORD 0x3F - -/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */ -#define I40E_SR_VPD_MODULE_MAX_SIZE 1024 -#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024 -#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06 -#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT) -#define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5) -#define I40E_SR_NVM_MAP_STRUCTURE_TYPE BIT(12) -#define I40E_PTR_TYPE BIT(15) - -/* Shadow RAM related */ -#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800 -#define I40E_SR_WORDS_IN_1KB 512 -/* Checksum should be calculated such that after adding all the words, - * including the checksum word itself, the sum should be 0xBABA. - */ -#define I40E_SR_SW_CHECKSUM_BASE 0xBABA - -#define I40E_SRRD_SRCTL_ATTEMPTS 100000 - -enum i40e_switch_element_types { - I40E_SWITCH_ELEMENT_TYPE_MAC = 1, - I40E_SWITCH_ELEMENT_TYPE_PF = 2, - I40E_SWITCH_ELEMENT_TYPE_VF = 3, - I40E_SWITCH_ELEMENT_TYPE_EMP = 4, - I40E_SWITCH_ELEMENT_TYPE_BMC = 6, - I40E_SWITCH_ELEMENT_TYPE_PE = 16, - I40E_SWITCH_ELEMENT_TYPE_VEB = 17, - I40E_SWITCH_ELEMENT_TYPE_PA = 18, - I40E_SWITCH_ELEMENT_TYPE_VSI = 19, -}; - -/* Supported EtherType filters */ -enum i40e_ether_type_index { - I40E_ETHER_TYPE_1588 = 0, - I40E_ETHER_TYPE_FIP = 1, - I40E_ETHER_TYPE_OUI_EXTENDED = 2, - I40E_ETHER_TYPE_MAC_CONTROL = 3, - I40E_ETHER_TYPE_LLDP = 4, - I40E_ETHER_TYPE_EVB_PROTOCOL1 = 5, - I40E_ETHER_TYPE_EVB_PROTOCOL2 = 6, - I40E_ETHER_TYPE_QCN_CNM = 7, - I40E_ETHER_TYPE_8021X = 8, - I40E_ETHER_TYPE_ARP = 9, - I40E_ETHER_TYPE_RSV1 = 10, - I40E_ETHER_TYPE_RSV2 = 11, -}; - -/* Filter context base size is 1K */ -#define I40E_HASH_FILTER_BASE_SIZE 1024 -/* Supported Hash filter values */ -enum i40e_hash_filter_size { - I40E_HASH_FILTER_SIZE_1K = 0, - I40E_HASH_FILTER_SIZE_2K = 1, - I40E_HASH_FILTER_SIZE_4K = 2, - I40E_HASH_FILTER_SIZE_8K = 3, - I40E_HASH_FILTER_SIZE_16K = 4, - I40E_HASH_FILTER_SIZE_32K = 5, - I40E_HASH_FILTER_SIZE_64K = 6, - I40E_HASH_FILTER_SIZE_128K = 7, - I40E_HASH_FILTER_SIZE_256K = 8, - I40E_HASH_FILTER_SIZE_512K = 9, - I40E_HASH_FILTER_SIZE_1M = 10, -}; - -/* DMA context base size is 0.5K */ -#define I40E_DMA_CNTX_BASE_SIZE 512 -/* Supported DMA context values */ -enum i40e_dma_cntx_size { - I40E_DMA_CNTX_SIZE_512 = 0, - I40E_DMA_CNTX_SIZE_1K = 1, - I40E_DMA_CNTX_SIZE_2K = 2, - I40E_DMA_CNTX_SIZE_4K = 3, - I40E_DMA_CNTX_SIZE_8K = 4, - I40E_DMA_CNTX_SIZE_16K = 5, - I40E_DMA_CNTX_SIZE_32K = 6, - I40E_DMA_CNTX_SIZE_64K = 7, - I40E_DMA_CNTX_SIZE_128K = 8, - I40E_DMA_CNTX_SIZE_256K = 9, -}; - -/* Supported Hash look up table (LUT) sizes */ -enum i40e_hash_lut_size { - I40E_HASH_LUT_SIZE_128 = 0, - I40E_HASH_LUT_SIZE_512 = 1, -}; - -/* Structure to hold a per PF filter control settings */ -struct i40e_filter_control_settings { - /* number of PE Quad Hash filter buckets */ - enum i40e_hash_filter_size pe_filt_num; - /* number of PE Quad Hash contexts */ - enum i40e_dma_cntx_size pe_cntx_num; - /* number of FCoE filter buckets */ - enum i40e_hash_filter_size fcoe_filt_num; - /* number of FCoE DDP contexts */ - enum i40e_dma_cntx_size fcoe_cntx_num; - /* size of the Hash LUT */ - enum i40e_hash_lut_size hash_lut_size; - /* enable FDIR filters for PF and its VFs */ - bool enable_fdir; - /* enable Ethertype filters for PF and its VFs */ - bool enable_ethtype; - /* enable MAC/VLAN filters for PF and its VFs */ - bool enable_macvlan; -}; - -/* Structure to hold device level control filter counts */ -struct i40e_control_filter_stats { - u16 mac_etype_used; /* Used perfect match MAC/EtherType filters */ - u16 etype_used; /* Used perfect EtherType filters */ - u16 mac_etype_free; /* Un-used perfect match MAC/EtherType filters */ - u16 etype_free; /* Un-used perfect EtherType filters */ -}; - -enum i40e_reset_type { - I40E_RESET_POR = 0, - I40E_RESET_CORER = 1, - I40E_RESET_GLOBR = 2, - I40E_RESET_EMPR = 3, -}; - -/* IEEE 802.1AB LLDP Agent Variables from NVM */ -#define I40E_NVM_LLDP_CFG_PTR 0x06 -#define I40E_SR_LLDP_CFG_PTR 0x31 - -/* RSS Hash Table Size */ -#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 - -/* INPUT SET MASK for RSS, flow director and flexible payload */ -#define I40E_FD_INSET_L3_SRC_SHIFT 47 -#define I40E_FD_INSET_L3_SRC_WORD_MASK (0x3ULL << \ - I40E_FD_INSET_L3_SRC_SHIFT) -#define I40E_FD_INSET_L3_DST_SHIFT 35 -#define I40E_FD_INSET_L3_DST_WORD_MASK (0x3ULL << \ - I40E_FD_INSET_L3_DST_SHIFT) -#define I40E_FD_INSET_L4_SRC_SHIFT 34 -#define I40E_FD_INSET_L4_SRC_WORD_MASK (0x1ULL << \ - I40E_FD_INSET_L4_SRC_SHIFT) -#define I40E_FD_INSET_L4_DST_SHIFT 33 -#define I40E_FD_INSET_L4_DST_WORD_MASK (0x1ULL << \ - I40E_FD_INSET_L4_DST_SHIFT) -#define I40E_FD_INSET_VERIFY_TAG_SHIFT 31 -#define I40E_FD_INSET_VERIFY_TAG_WORD_MASK (0x3ULL << \ - I40E_FD_INSET_VERIFY_TAG_SHIFT) - -#define I40E_FD_INSET_FLEX_WORD50_SHIFT 17 -#define I40E_FD_INSET_FLEX_WORD50_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD50_SHIFT) -#define I40E_FD_INSET_FLEX_WORD51_SHIFT 16 -#define I40E_FD_INSET_FLEX_WORD51_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD51_SHIFT) -#define I40E_FD_INSET_FLEX_WORD52_SHIFT 15 -#define I40E_FD_INSET_FLEX_WORD52_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD52_SHIFT) -#define I40E_FD_INSET_FLEX_WORD53_SHIFT 14 -#define I40E_FD_INSET_FLEX_WORD53_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD53_SHIFT) -#define I40E_FD_INSET_FLEX_WORD54_SHIFT 13 -#define I40E_FD_INSET_FLEX_WORD54_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD54_SHIFT) -#define I40E_FD_INSET_FLEX_WORD55_SHIFT 12 -#define I40E_FD_INSET_FLEX_WORD55_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD55_SHIFT) -#define I40E_FD_INSET_FLEX_WORD56_SHIFT 11 -#define I40E_FD_INSET_FLEX_WORD56_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD56_SHIFT) -#define I40E_FD_INSET_FLEX_WORD57_SHIFT 10 -#define I40E_FD_INSET_FLEX_WORD57_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD57_SHIFT) - -/* Version format for Dynamic Device Personalization(DDP) */ -struct i40e_ddp_version { - u8 major; - u8 minor; - u8 update; - u8 draft; -}; - -#define I40E_DDP_NAME_SIZE 32 - -/* Package header */ -struct i40e_package_header { - struct i40e_ddp_version version; - u32 segment_count; - u32 segment_offset[1]; -}; - -/* Generic segment header */ -struct i40e_generic_seg_header { -#define SEGMENT_TYPE_METADATA 0x00000001 -#define SEGMENT_TYPE_NOTES 0x00000002 -#define SEGMENT_TYPE_I40E 0x00000011 -#define SEGMENT_TYPE_X722 0x00000012 - u32 type; - struct i40e_ddp_version version; - u32 size; - char name[I40E_DDP_NAME_SIZE]; -}; - -struct i40e_metadata_segment { - struct i40e_generic_seg_header header; - struct i40e_ddp_version version; - u32 track_id; - char name[I40E_DDP_NAME_SIZE]; -}; - -struct i40e_device_id_entry { - u32 vendor_dev_id; - u32 sub_vendor_dev_id; -}; - -struct i40e_profile_segment { - struct i40e_generic_seg_header header; - struct i40e_ddp_version version; - char name[I40E_DDP_NAME_SIZE]; - u32 device_table_count; - struct i40e_device_id_entry device_table[1]; -}; - -struct i40e_section_table { - u32 section_count; - u32 section_offset[1]; -}; - -struct i40e_profile_section_header { - u16 tbl_size; - u16 data_end; - struct { -#define SECTION_TYPE_INFO 0x00000010 -#define SECTION_TYPE_MMIO 0x00000800 -#define SECTION_TYPE_AQ 0x00000801 -#define SECTION_TYPE_NOTE 0x80000000 -#define SECTION_TYPE_NAME 0x80000001 - u32 type; - u32 offset; - u32 size; - } section; -}; - -struct i40e_profile_info { - u32 track_id; - struct i40e_ddp_version version; - u8 op; -#define I40E_DDP_ADD_TRACKID 0x01 -#define I40E_DDP_REMOVE_TRACKID 0x02 - u8 reserved[7]; - u8 name[I40E_DDP_NAME_SIZE]; -}; -#endif /* _I40E_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h deleted file mode 100644 index 96e537a35000..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ /dev/null @@ -1,427 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40EVF_H_ -#define _I40EVF_H_ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "i40e_type.h" -#include -#include "i40e_txrx.h" - -#define DEFAULT_DEBUG_LEVEL_SHIFT 3 -#define PFX "i40evf: " - -/* VSI state flags shared with common code */ -enum i40evf_vsi_state_t { - __I40E_VSI_DOWN, - /* This must be last as it determines the size of the BITMAP */ - __I40E_VSI_STATE_SIZE__, -}; - -/* dummy struct to make common code less painful */ -struct i40e_vsi { - struct i40evf_adapter *back; - struct net_device *netdev; - unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; - u16 seid; - u16 id; - DECLARE_BITMAP(state, __I40E_VSI_STATE_SIZE__); - int base_vector; - u16 work_limit; - u16 qs_handle; - void *priv; /* client driver data reference. */ -}; - -/* How many Rx Buffers do we bundle into one write to the hardware ? */ -#define I40EVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ -#define I40EVF_DEFAULT_TXD 512 -#define I40EVF_DEFAULT_RXD 512 -#define I40EVF_MAX_TXD 4096 -#define I40EVF_MIN_TXD 64 -#define I40EVF_MAX_RXD 4096 -#define I40EVF_MIN_RXD 64 -#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32 -#define I40EVF_MAX_AQ_BUF_SIZE 4096 -#define I40EVF_AQ_LEN 32 -#define I40EVF_AQ_MAX_ERR 20 /* times to try before resetting AQ */ - -#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) - -#define I40E_RX_DESC(R, i) (&(((union i40e_32byte_rx_desc *)((R)->desc))[i])) -#define I40E_TX_DESC(R, i) (&(((struct i40e_tx_desc *)((R)->desc))[i])) -#define I40E_TX_CTXTDESC(R, i) \ - (&(((struct i40e_tx_context_desc *)((R)->desc))[i])) -#define I40EVF_MAX_REQ_QUEUES 4 - -#define I40EVF_HKEY_ARRAY_SIZE ((I40E_VFQF_HKEY_MAX_INDEX + 1) * 4) -#define I40EVF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT_MAX_INDEX + 1) * 4) -#define I40EVF_MBPS_DIVISOR 125000 /* divisor to convert to Mbps */ - -/* MAX_MSIX_Q_VECTORS of these are allocated, - * but we only use one per queue-specific vector. - */ -struct i40e_q_vector { - struct i40evf_adapter *adapter; - struct i40e_vsi *vsi; - struct napi_struct napi; - struct i40e_ring_container rx; - struct i40e_ring_container tx; - u32 ring_mask; - u8 itr_countdown; /* when 0 should adjust adaptive ITR */ - u8 num_ringpairs; /* total number of ring pairs in vector */ - u16 v_idx; /* index in the vsi->q_vector array. */ - u16 reg_idx; /* register index of the interrupt */ - char name[IFNAMSIZ + 15]; - bool arm_wb_state; - cpumask_t affinity_mask; - struct irq_affinity_notify affinity_notify; -}; - -/* Helper macros to switch between ints/sec and what the register uses. - * And yes, it's the same math going both ways. The lowest value - * supported by all of the i40e hardware is 8. - */ -#define EITR_INTS_PER_SEC_TO_REG(_eitr) \ - ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8) -#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG - -#define I40EVF_DESC_UNUSED(R) \ - ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ - (R)->next_to_clean - (R)->next_to_use - 1) - -#define I40EVF_RX_DESC_ADV(R, i) \ - (&(((union i40e_adv_rx_desc *)((R).desc))[i])) -#define I40EVF_TX_DESC_ADV(R, i) \ - (&(((union i40e_adv_tx_desc *)((R).desc))[i])) -#define I40EVF_TX_CTXTDESC_ADV(R, i) \ - (&(((struct i40e_adv_tx_context_desc *)((R).desc))[i])) - -#define OTHER_VECTOR 1 -#define NONQ_VECS (OTHER_VECTOR) - -#define MIN_MSIX_Q_VECTORS 1 -#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NONQ_VECS) - -#define I40EVF_QUEUE_END_OF_LIST 0x7FF -#define I40EVF_FREE_VECTOR 0x7FFF -struct i40evf_mac_filter { - struct list_head list; - u8 macaddr[ETH_ALEN]; - bool remove; /* filter needs to be removed */ - bool add; /* filter needs to be added */ -}; - -struct i40evf_vlan_filter { - struct list_head list; - u16 vlan; - bool remove; /* filter needs to be removed */ - bool add; /* filter needs to be added */ -}; - -#define I40EVF_MAX_TRAFFIC_CLASS 4 -/* State of traffic class creation */ -enum i40evf_tc_state_t { - __I40EVF_TC_INVALID, /* no traffic class, default state */ - __I40EVF_TC_RUNNING, /* traffic classes have been created */ -}; - -/* channel info */ -struct i40evf_channel_config { - struct virtchnl_channel_info ch_info[I40EVF_MAX_TRAFFIC_CLASS]; - enum i40evf_tc_state_t state; - u8 total_qps; -}; - -/* State of cloud filter */ -enum i40evf_cloud_filter_state_t { - __I40EVF_CF_INVALID, /* cloud filter not added */ - __I40EVF_CF_ADD_PENDING, /* cloud filter pending add by the PF */ - __I40EVF_CF_DEL_PENDING, /* cloud filter pending del by the PF */ - __I40EVF_CF_ACTIVE, /* cloud filter is active */ -}; - -/* Driver state. The order of these is important! */ -enum i40evf_state_t { - __I40EVF_STARTUP, /* driver loaded, probe complete */ - __I40EVF_REMOVE, /* driver is being unloaded */ - __I40EVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */ - __I40EVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */ - __I40EVF_INIT_SW, /* got resources, setting up structs */ - __I40EVF_RESETTING, /* in reset */ - /* Below here, watchdog is running */ - __I40EVF_DOWN, /* ready, can be opened */ - __I40EVF_DOWN_PENDING, /* descending, waiting for watchdog */ - __I40EVF_TESTING, /* in ethtool self-test */ - __I40EVF_RUNNING, /* opened, working */ -}; - -enum i40evf_critical_section_t { - __I40EVF_IN_CRITICAL_TASK, /* cannot be interrupted */ - __I40EVF_IN_CLIENT_TASK, - __I40EVF_IN_REMOVE_TASK, /* device being removed */ -}; - -#define I40EVF_CLOUD_FIELD_OMAC 0x01 -#define I40EVF_CLOUD_FIELD_IMAC 0x02 -#define I40EVF_CLOUD_FIELD_IVLAN 0x04 -#define I40EVF_CLOUD_FIELD_TEN_ID 0x08 -#define I40EVF_CLOUD_FIELD_IIP 0x10 - -#define I40EVF_CF_FLAGS_OMAC I40EVF_CLOUD_FIELD_OMAC -#define I40EVF_CF_FLAGS_IMAC I40EVF_CLOUD_FIELD_IMAC -#define I40EVF_CF_FLAGS_IMAC_IVLAN (I40EVF_CLOUD_FIELD_IMAC |\ - I40EVF_CLOUD_FIELD_IVLAN) -#define I40EVF_CF_FLAGS_IMAC_TEN_ID (I40EVF_CLOUD_FIELD_IMAC |\ - I40EVF_CLOUD_FIELD_TEN_ID) -#define I40EVF_CF_FLAGS_OMAC_TEN_ID_IMAC (I40EVF_CLOUD_FIELD_OMAC |\ - I40EVF_CLOUD_FIELD_IMAC |\ - I40EVF_CLOUD_FIELD_TEN_ID) -#define I40EVF_CF_FLAGS_IMAC_IVLAN_TEN_ID (I40EVF_CLOUD_FIELD_IMAC |\ - I40EVF_CLOUD_FIELD_IVLAN |\ - I40EVF_CLOUD_FIELD_TEN_ID) -#define I40EVF_CF_FLAGS_IIP I40E_CLOUD_FIELD_IIP - -/* bookkeeping of cloud filters */ -struct i40evf_cloud_filter { - enum i40evf_cloud_filter_state_t state; - struct list_head list; - struct virtchnl_filter f; - unsigned long cookie; - bool del; /* filter needs to be deleted */ - bool add; /* filter needs to be added */ -}; - -/* board specific private data structure */ -struct i40evf_adapter { - struct timer_list watchdog_timer; - struct work_struct reset_task; - struct work_struct adminq_task; - struct delayed_work client_task; - struct delayed_work init_task; - wait_queue_head_t down_waitqueue; - struct i40e_q_vector *q_vectors; - struct list_head vlan_filter_list; - struct list_head mac_filter_list; - /* Lock to protect accesses to MAC and VLAN lists */ - spinlock_t mac_vlan_list_lock; - char misc_vector_name[IFNAMSIZ + 9]; - int num_active_queues; - int num_req_queues; - - /* TX */ - struct i40e_ring *tx_rings; - u32 tx_timeout_count; - u32 tx_desc_count; - - /* RX */ - struct i40e_ring *rx_rings; - u64 hw_csum_rx_error; - u32 rx_desc_count; - int num_msix_vectors; - int num_iwarp_msix; - int iwarp_base_vector; - u32 client_pending; - struct i40e_client_instance *cinst; - struct msix_entry *msix_entries; - - u32 flags; -#define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0) -#define I40EVF_FLAG_PF_COMMS_FAILED BIT(3) -#define I40EVF_FLAG_RESET_PENDING BIT(4) -#define I40EVF_FLAG_RESET_NEEDED BIT(5) -#define I40EVF_FLAG_WB_ON_ITR_CAPABLE BIT(6) -#define I40EVF_FLAG_ADDR_SET_BY_PF BIT(8) -#define I40EVF_FLAG_SERVICE_CLIENT_REQUESTED BIT(9) -#define I40EVF_FLAG_CLIENT_NEEDS_OPEN BIT(10) -#define I40EVF_FLAG_CLIENT_NEEDS_CLOSE BIT(11) -#define I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS BIT(12) -#define I40EVF_FLAG_PROMISC_ON BIT(13) -#define I40EVF_FLAG_ALLMULTI_ON BIT(14) -#define I40EVF_FLAG_LEGACY_RX BIT(15) -#define I40EVF_FLAG_REINIT_ITR_NEEDED BIT(16) -#define I40EVF_FLAG_QUEUES_DISABLED BIT(17) -/* duplicates for common code */ -#define I40E_FLAG_DCB_ENABLED 0 -#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED -#define I40E_FLAG_LEGACY_RX I40EVF_FLAG_LEGACY_RX - /* flags for admin queue service task */ - u32 aq_required; -#define I40EVF_FLAG_AQ_ENABLE_QUEUES BIT(0) -#define I40EVF_FLAG_AQ_DISABLE_QUEUES BIT(1) -#define I40EVF_FLAG_AQ_ADD_MAC_FILTER BIT(2) -#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER BIT(3) -#define I40EVF_FLAG_AQ_DEL_MAC_FILTER BIT(4) -#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER BIT(5) -#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES BIT(6) -#define I40EVF_FLAG_AQ_MAP_VECTORS BIT(7) -#define I40EVF_FLAG_AQ_HANDLE_RESET BIT(8) -#define I40EVF_FLAG_AQ_CONFIGURE_RSS BIT(9) /* direct AQ config */ -#define I40EVF_FLAG_AQ_GET_CONFIG BIT(10) -/* Newer style, RSS done by the PF so we can ignore hardware vagaries. */ -#define I40EVF_FLAG_AQ_GET_HENA BIT(11) -#define I40EVF_FLAG_AQ_SET_HENA BIT(12) -#define I40EVF_FLAG_AQ_SET_RSS_KEY BIT(13) -#define I40EVF_FLAG_AQ_SET_RSS_LUT BIT(14) -#define I40EVF_FLAG_AQ_REQUEST_PROMISC BIT(15) -#define I40EVF_FLAG_AQ_RELEASE_PROMISC BIT(16) -#define I40EVF_FLAG_AQ_REQUEST_ALLMULTI BIT(17) -#define I40EVF_FLAG_AQ_RELEASE_ALLMULTI BIT(18) -#define I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT(19) -#define I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT(20) -#define I40EVF_FLAG_AQ_ENABLE_CHANNELS BIT(21) -#define I40EVF_FLAG_AQ_DISABLE_CHANNELS BIT(22) -#define I40EVF_FLAG_AQ_ADD_CLOUD_FILTER BIT(23) -#define I40EVF_FLAG_AQ_DEL_CLOUD_FILTER BIT(24) - - /* OS defined structs */ - struct net_device *netdev; - struct pci_dev *pdev; - - struct i40e_hw hw; /* defined in i40e_type.h */ - - enum i40evf_state_t state; - unsigned long crit_section; - - struct work_struct watchdog_task; - bool netdev_registered; - bool link_up; - enum virtchnl_link_speed link_speed; - enum virtchnl_ops current_op; -#define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \ - (_a)->vf_res->vf_cap_flags & \ - VIRTCHNL_VF_OFFLOAD_IWARP : \ - 0) -#define CLIENT_ENABLED(_a) ((_a)->cinst) -/* RSS by the PF should be preferred over RSS via other methods. */ -#define RSS_PF(_a) ((_a)->vf_res->vf_cap_flags & \ - VIRTCHNL_VF_OFFLOAD_RSS_PF) -#define RSS_AQ(_a) ((_a)->vf_res->vf_cap_flags & \ - VIRTCHNL_VF_OFFLOAD_RSS_AQ) -#define RSS_REG(_a) (!((_a)->vf_res->vf_cap_flags & \ - (VIRTCHNL_VF_OFFLOAD_RSS_AQ | \ - VIRTCHNL_VF_OFFLOAD_RSS_PF))) -#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \ - VIRTCHNL_VF_OFFLOAD_VLAN) - struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */ - struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */ - struct virtchnl_version_info pf_version; -#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \ - ((_a)->pf_version.minor == 1)) - u16 msg_enable; - struct i40e_eth_stats current_stats; - struct i40e_vsi vsi; - u32 aq_wait_count; - /* RSS stuff */ - u64 hena; - u16 rss_key_size; - u16 rss_lut_size; - u8 *rss_key; - u8 *rss_lut; - /* ADQ related members */ - struct i40evf_channel_config ch_config; - u8 num_tc; - struct list_head cloud_filter_list; - /* lock to protest access to the cloud filter list */ - spinlock_t cloud_filter_list_lock; - u16 num_cloud_filters; -}; - - -/* Ethtool Private Flags */ - -/* lan device */ -struct i40e_device { - struct list_head list; - struct i40evf_adapter *vf; -}; - -/* needed by i40evf_ethtool.c */ -extern char i40evf_driver_name[]; -extern const char i40evf_driver_version[]; - -int i40evf_up(struct i40evf_adapter *adapter); -void i40evf_down(struct i40evf_adapter *adapter); -int i40evf_process_config(struct i40evf_adapter *adapter); -void i40evf_schedule_reset(struct i40evf_adapter *adapter); -void i40evf_reset(struct i40evf_adapter *adapter); -void i40evf_set_ethtool_ops(struct net_device *netdev); -void i40evf_update_stats(struct i40evf_adapter *adapter); -void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter); -int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter); -void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask); -void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter); -void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter); - -void i40e_napi_add_all(struct i40evf_adapter *adapter); -void i40e_napi_del_all(struct i40evf_adapter *adapter); - -int i40evf_send_api_ver(struct i40evf_adapter *adapter); -int i40evf_verify_api_ver(struct i40evf_adapter *adapter); -int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter); -int i40evf_get_vf_config(struct i40evf_adapter *adapter); -void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush); -void i40evf_configure_queues(struct i40evf_adapter *adapter); -void i40evf_deconfigure_queues(struct i40evf_adapter *adapter); -void i40evf_enable_queues(struct i40evf_adapter *adapter); -void i40evf_disable_queues(struct i40evf_adapter *adapter); -void i40evf_map_queues(struct i40evf_adapter *adapter); -int i40evf_request_queues(struct i40evf_adapter *adapter, int num); -void i40evf_add_ether_addrs(struct i40evf_adapter *adapter); -void i40evf_del_ether_addrs(struct i40evf_adapter *adapter); -void i40evf_add_vlans(struct i40evf_adapter *adapter); -void i40evf_del_vlans(struct i40evf_adapter *adapter); -void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags); -void i40evf_request_stats(struct i40evf_adapter *adapter); -void i40evf_request_reset(struct i40evf_adapter *adapter); -void i40evf_get_hena(struct i40evf_adapter *adapter); -void i40evf_set_hena(struct i40evf_adapter *adapter); -void i40evf_set_rss_key(struct i40evf_adapter *adapter); -void i40evf_set_rss_lut(struct i40evf_adapter *adapter); -void i40evf_enable_vlan_stripping(struct i40evf_adapter *adapter); -void i40evf_disable_vlan_stripping(struct i40evf_adapter *adapter); -void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, - enum virtchnl_ops v_opcode, - i40e_status v_retval, u8 *msg, u16 msglen); -int i40evf_config_rss(struct i40evf_adapter *adapter); -int i40evf_lan_add_device(struct i40evf_adapter *adapter); -int i40evf_lan_del_device(struct i40evf_adapter *adapter); -void i40evf_client_subtask(struct i40evf_adapter *adapter); -void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len); -void i40evf_notify_client_l2_params(struct i40e_vsi *vsi); -void i40evf_notify_client_open(struct i40e_vsi *vsi); -void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset); -void i40evf_enable_channels(struct i40evf_adapter *adapter); -void i40evf_disable_channels(struct i40evf_adapter *adapter); -void i40evf_add_cloud_filter(struct i40evf_adapter *adapter); -void i40evf_del_cloud_filter(struct i40evf_adapter *adapter); -#endif /* _I40EVF_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.c b/drivers/net/ethernet/intel/i40evf/i40evf_client.c deleted file mode 100644 index 3cc9d60d0d72..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40evf_client.c +++ /dev/null @@ -1,579 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#include -#include - -#include "i40evf.h" -#include "i40e_prototype.h" -#include "i40evf_client.h" - -static -const char i40evf_client_interface_version_str[] = I40EVF_CLIENT_VERSION_STR; -static struct i40e_client *vf_registered_client; -static LIST_HEAD(i40evf_devices); -static DEFINE_MUTEX(i40evf_device_mutex); - -static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev, - struct i40e_client *client, - u8 *msg, u16 len); - -static int i40evf_client_setup_qvlist(struct i40e_info *ldev, - struct i40e_client *client, - struct i40e_qvlist_info *qvlist_info); - -static struct i40e_ops i40evf_lan_ops = { - .virtchnl_send = i40evf_client_virtchnl_send, - .setup_qvlist = i40evf_client_setup_qvlist, -}; - -/** - * i40evf_client_get_params - retrieve relevant client parameters - * @vsi: VSI with parameters - * @params: client param struct - **/ -static -void i40evf_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params) -{ - int i; - - memset(params, 0, sizeof(struct i40e_params)); - params->mtu = vsi->netdev->mtu; - params->link_up = vsi->back->link_up; - - for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { - params->qos.prio_qos[i].tc = 0; - params->qos.prio_qos[i].qs_handle = vsi->qs_handle; - } -} - -/** - * i40evf_notify_client_message - call the client message receive callback - * @vsi: the VSI associated with this client - * @msg: message buffer - * @len: length of message - * - * If there is a client to this VSI, call the client - **/ -void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len) -{ - struct i40e_client_instance *cinst; - - if (!vsi) - return; - - cinst = vsi->back->cinst; - if (!cinst || !cinst->client || !cinst->client->ops || - !cinst->client->ops->virtchnl_receive) { - dev_dbg(&vsi->back->pdev->dev, - "Cannot locate client instance virtchnl_receive function\n"); - return; - } - cinst->client->ops->virtchnl_receive(&cinst->lan_info, cinst->client, - msg, len); -} - -/** - * i40evf_notify_client_l2_params - call the client notify callback - * @vsi: the VSI with l2 param changes - * - * If there is a client to this VSI, call the client - **/ -void i40evf_notify_client_l2_params(struct i40e_vsi *vsi) -{ - struct i40e_client_instance *cinst; - struct i40e_params params; - - if (!vsi) - return; - - cinst = vsi->back->cinst; - - if (!cinst || !cinst->client || !cinst->client->ops || - !cinst->client->ops->l2_param_change) { - dev_dbg(&vsi->back->pdev->dev, - "Cannot locate client instance l2_param_change function\n"); - return; - } - i40evf_client_get_params(vsi, ¶ms); - cinst->lan_info.params = params; - cinst->client->ops->l2_param_change(&cinst->lan_info, cinst->client, - ¶ms); -} - -/** - * i40evf_notify_client_open - call the client open callback - * @vsi: the VSI with netdev opened - * - * If there is a client to this netdev, call the client with open - **/ -void i40evf_notify_client_open(struct i40e_vsi *vsi) -{ - struct i40evf_adapter *adapter = vsi->back; - struct i40e_client_instance *cinst = adapter->cinst; - int ret; - - if (!cinst || !cinst->client || !cinst->client->ops || - !cinst->client->ops->open) { - dev_dbg(&vsi->back->pdev->dev, - "Cannot locate client instance open function\n"); - return; - } - if (!(test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state))) { - ret = cinst->client->ops->open(&cinst->lan_info, cinst->client); - if (!ret) - set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state); - } -} - -/** - * i40evf_client_release_qvlist - send a message to the PF to release iwarp qv map - * @ldev: pointer to L2 context. - * - * Return 0 on success or < 0 on error - **/ -static int i40evf_client_release_qvlist(struct i40e_info *ldev) -{ - struct i40evf_adapter *adapter = ldev->vf; - i40e_status err; - - if (adapter->aq_required) - return -EAGAIN; - - err = i40e_aq_send_msg_to_pf(&adapter->hw, - VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, - I40E_SUCCESS, NULL, 0, NULL); - - if (err) - dev_err(&adapter->pdev->dev, - "Unable to send iWarp vector release message to PF, error %d, aq status %d\n", - err, adapter->hw.aq.asq_last_status); - - return err; -} - -/** - * i40evf_notify_client_close - call the client close callback - * @vsi: the VSI with netdev closed - * @reset: true when close called due to reset pending - * - * If there is a client to this netdev, call the client with close - **/ -void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset) -{ - struct i40evf_adapter *adapter = vsi->back; - struct i40e_client_instance *cinst = adapter->cinst; - - if (!cinst || !cinst->client || !cinst->client->ops || - !cinst->client->ops->close) { - dev_dbg(&vsi->back->pdev->dev, - "Cannot locate client instance close function\n"); - return; - } - cinst->client->ops->close(&cinst->lan_info, cinst->client, reset); - i40evf_client_release_qvlist(&cinst->lan_info); - clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state); -} - -/** - * i40evf_client_add_instance - add a client instance to the instance list - * @adapter: pointer to the board struct - * - * Returns cinst ptr on success, NULL on failure - **/ -static struct i40e_client_instance * -i40evf_client_add_instance(struct i40evf_adapter *adapter) -{ - struct i40e_client_instance *cinst = NULL; - struct i40e_vsi *vsi = &adapter->vsi; - struct netdev_hw_addr *mac = NULL; - struct i40e_params params; - - if (!vf_registered_client) - goto out; - - if (adapter->cinst) { - cinst = adapter->cinst; - goto out; - } - - cinst = kzalloc(sizeof(*cinst), GFP_KERNEL); - if (!cinst) - goto out; - - cinst->lan_info.vf = (void *)adapter; - cinst->lan_info.netdev = vsi->netdev; - cinst->lan_info.pcidev = adapter->pdev; - cinst->lan_info.fid = 0; - cinst->lan_info.ftype = I40E_CLIENT_FTYPE_VF; - cinst->lan_info.hw_addr = adapter->hw.hw_addr; - cinst->lan_info.ops = &i40evf_lan_ops; - cinst->lan_info.version.major = I40EVF_CLIENT_VERSION_MAJOR; - cinst->lan_info.version.minor = I40EVF_CLIENT_VERSION_MINOR; - cinst->lan_info.version.build = I40EVF_CLIENT_VERSION_BUILD; - i40evf_client_get_params(vsi, ¶ms); - cinst->lan_info.params = params; - set_bit(__I40E_CLIENT_INSTANCE_NONE, &cinst->state); - - cinst->lan_info.msix_count = adapter->num_iwarp_msix; - cinst->lan_info.msix_entries = - &adapter->msix_entries[adapter->iwarp_base_vector]; - - mac = list_first_entry(&cinst->lan_info.netdev->dev_addrs.list, - struct netdev_hw_addr, list); - if (mac) - ether_addr_copy(cinst->lan_info.lanmac, mac->addr); - else - dev_err(&adapter->pdev->dev, "MAC address list is empty!\n"); - - cinst->client = vf_registered_client; - adapter->cinst = cinst; -out: - return cinst; -} - -/** - * i40evf_client_del_instance - removes a client instance from the list - * @adapter: pointer to the board struct - * - **/ -static -void i40evf_client_del_instance(struct i40evf_adapter *adapter) -{ - kfree(adapter->cinst); - adapter->cinst = NULL; -} - -/** - * i40evf_client_subtask - client maintenance work - * @adapter: board private structure - **/ -void i40evf_client_subtask(struct i40evf_adapter *adapter) -{ - struct i40e_client *client = vf_registered_client; - struct i40e_client_instance *cinst; - int ret = 0; - - if (adapter->state < __I40EVF_DOWN) - return; - - /* first check client is registered */ - if (!client) - return; - - /* Add the client instance to the instance list */ - cinst = i40evf_client_add_instance(adapter); - if (!cinst) - return; - - dev_info(&adapter->pdev->dev, "Added instance of Client %s\n", - client->name); - - if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state)) { - /* Send an Open request to the client */ - - if (client->ops && client->ops->open) - ret = client->ops->open(&cinst->lan_info, client); - if (!ret) - set_bit(__I40E_CLIENT_INSTANCE_OPENED, - &cinst->state); - else - /* remove client instance */ - i40evf_client_del_instance(adapter); - } -} - -/** - * i40evf_lan_add_device - add a lan device struct to the list of lan devices - * @adapter: pointer to the board struct - * - * Returns 0 on success or none 0 on error - **/ -int i40evf_lan_add_device(struct i40evf_adapter *adapter) -{ - struct i40e_device *ldev; - int ret = 0; - - mutex_lock(&i40evf_device_mutex); - list_for_each_entry(ldev, &i40evf_devices, list) { - if (ldev->vf == adapter) { - ret = -EEXIST; - goto out; - } - } - ldev = kzalloc(sizeof(*ldev), GFP_KERNEL); - if (!ldev) { - ret = -ENOMEM; - goto out; - } - ldev->vf = adapter; - INIT_LIST_HEAD(&ldev->list); - list_add(&ldev->list, &i40evf_devices); - dev_info(&adapter->pdev->dev, "Added LAN device bus=0x%02x dev=0x%02x func=0x%02x\n", - adapter->hw.bus.bus_id, adapter->hw.bus.device, - adapter->hw.bus.func); - - /* Since in some cases register may have happened before a device gets - * added, we can schedule a subtask to go initiate the clients. - */ - adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED; - -out: - mutex_unlock(&i40evf_device_mutex); - return ret; -} - -/** - * i40evf_lan_del_device - removes a lan device from the device list - * @adapter: pointer to the board struct - * - * Returns 0 on success or non-0 on error - **/ -int i40evf_lan_del_device(struct i40evf_adapter *adapter) -{ - struct i40e_device *ldev, *tmp; - int ret = -ENODEV; - - mutex_lock(&i40evf_device_mutex); - list_for_each_entry_safe(ldev, tmp, &i40evf_devices, list) { - if (ldev->vf == adapter) { - dev_info(&adapter->pdev->dev, - "Deleted LAN device bus=0x%02x dev=0x%02x func=0x%02x\n", - adapter->hw.bus.bus_id, adapter->hw.bus.device, - adapter->hw.bus.func); - list_del(&ldev->list); - kfree(ldev); - ret = 0; - break; - } - } - - mutex_unlock(&i40evf_device_mutex); - return ret; -} - -/** - * i40evf_client_release - release client specific resources - * @client: pointer to the registered client - * - **/ -static void i40evf_client_release(struct i40e_client *client) -{ - struct i40e_client_instance *cinst; - struct i40e_device *ldev; - struct i40evf_adapter *adapter; - - mutex_lock(&i40evf_device_mutex); - list_for_each_entry(ldev, &i40evf_devices, list) { - adapter = ldev->vf; - cinst = adapter->cinst; - if (!cinst) - continue; - if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state)) { - if (client->ops && client->ops->close) - client->ops->close(&cinst->lan_info, client, - false); - i40evf_client_release_qvlist(&cinst->lan_info); - clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state); - - dev_warn(&adapter->pdev->dev, - "Client %s instance closed\n", client->name); - } - /* delete the client instance */ - i40evf_client_del_instance(adapter); - dev_info(&adapter->pdev->dev, "Deleted client instance of Client %s\n", - client->name); - } - mutex_unlock(&i40evf_device_mutex); -} - -/** - * i40evf_client_prepare - prepare client specific resources - * @client: pointer to the registered client - * - **/ -static void i40evf_client_prepare(struct i40e_client *client) -{ - struct i40e_device *ldev; - struct i40evf_adapter *adapter; - - mutex_lock(&i40evf_device_mutex); - list_for_each_entry(ldev, &i40evf_devices, list) { - adapter = ldev->vf; - /* Signal the watchdog to service the client */ - adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED; - } - mutex_unlock(&i40evf_device_mutex); -} - -/** - * i40evf_client_virtchnl_send - send a message to the PF instance - * @ldev: pointer to L2 context. - * @client: Client pointer. - * @msg: pointer to message buffer - * @len: message length - * - * Return 0 on success or < 0 on error - **/ -static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev, - struct i40e_client *client, - u8 *msg, u16 len) -{ - struct i40evf_adapter *adapter = ldev->vf; - i40e_status err; - - if (adapter->aq_required) - return -EAGAIN; - - err = i40e_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_IWARP, - I40E_SUCCESS, msg, len, NULL); - if (err) - dev_err(&adapter->pdev->dev, "Unable to send iWarp message to PF, error %d, aq status %d\n", - err, adapter->hw.aq.asq_last_status); - - return err; -} - -/** - * i40evf_client_setup_qvlist - send a message to the PF to setup iwarp qv map - * @ldev: pointer to L2 context. - * @client: Client pointer. - * @qvlist_info: queue and vector list - * - * Return 0 on success or < 0 on error - **/ -static int i40evf_client_setup_qvlist(struct i40e_info *ldev, - struct i40e_client *client, - struct i40e_qvlist_info *qvlist_info) -{ - struct virtchnl_iwarp_qvlist_info *v_qvlist_info; - struct i40evf_adapter *adapter = ldev->vf; - struct i40e_qv_info *qv_info; - i40e_status err; - u32 v_idx, i; - u32 msg_size; - - if (adapter->aq_required) - return -EAGAIN; - - /* A quick check on whether the vectors belong to the client */ - for (i = 0; i < qvlist_info->num_vectors; i++) { - qv_info = &qvlist_info->qv_info[i]; - if (!qv_info) - continue; - v_idx = qv_info->v_idx; - if ((v_idx >= - (adapter->iwarp_base_vector + adapter->num_iwarp_msix)) || - (v_idx < adapter->iwarp_base_vector)) - return -EINVAL; - } - - v_qvlist_info = (struct virtchnl_iwarp_qvlist_info *)qvlist_info; - msg_size = sizeof(struct virtchnl_iwarp_qvlist_info) + - (sizeof(struct virtchnl_iwarp_qv_info) * - (v_qvlist_info->num_vectors - 1)); - - adapter->client_pending |= BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP); - err = i40e_aq_send_msg_to_pf(&adapter->hw, - VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP, - I40E_SUCCESS, (u8 *)v_qvlist_info, msg_size, NULL); - - if (err) { - dev_err(&adapter->pdev->dev, - "Unable to send iWarp vector config message to PF, error %d, aq status %d\n", - err, adapter->hw.aq.asq_last_status); - goto out; - } - - err = -EBUSY; - for (i = 0; i < 5; i++) { - msleep(100); - if (!(adapter->client_pending & - BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP))) { - err = 0; - break; - } - } -out: - return err; -} - -/** - * i40evf_register_client - Register a i40e client driver with the L2 driver - * @client: pointer to the i40e_client struct - * - * Returns 0 on success or non-0 on error - **/ -int i40evf_register_client(struct i40e_client *client) -{ - int ret = 0; - - if (!client) { - ret = -EIO; - goto out; - } - - if (strlen(client->name) == 0) { - pr_info("i40evf: Failed to register client with no name\n"); - ret = -EIO; - goto out; - } - - if (vf_registered_client) { - pr_info("i40evf: Client %s has already been registered!\n", - client->name); - ret = -EEXIST; - goto out; - } - - if ((client->version.major != I40EVF_CLIENT_VERSION_MAJOR) || - (client->version.minor != I40EVF_CLIENT_VERSION_MINOR)) { - pr_info("i40evf: Failed to register client %s due to mismatched client interface version\n", - client->name); - pr_info("Client is using version: %02d.%02d.%02d while LAN driver supports %s\n", - client->version.major, client->version.minor, - client->version.build, - i40evf_client_interface_version_str); - ret = -EIO; - goto out; - } - - vf_registered_client = client; - - i40evf_client_prepare(client); - - pr_info("i40evf: Registered client %s with return code %d\n", - client->name, ret); -out: - return ret; -} -EXPORT_SYMBOL(i40evf_register_client); - -/** - * i40evf_unregister_client - Unregister a i40e client driver with the L2 driver - * @client: pointer to the i40e_client struct - * - * Returns 0 on success or non-0 on error - **/ -int i40evf_unregister_client(struct i40e_client *client) -{ - int ret = 0; - - /* When a unregister request comes through we would have to send - * a close for each of the client instances that were opened. - * client_release function is called to handle this. - */ - i40evf_client_release(client); - - if (vf_registered_client != client) { - pr_info("i40evf: Client %s has not been registered\n", - client->name); - ret = -ENODEV; - goto out; - } - vf_registered_client = NULL; - pr_info("i40evf: Unregistered client %s\n", client->name); -out: - return ret; -} -EXPORT_SYMBOL(i40evf_unregister_client); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.h b/drivers/net/ethernet/intel/i40evf/i40evf_client.h deleted file mode 100644 index 5585f362048a..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40evf_client.h +++ /dev/null @@ -1,169 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40EVF_CLIENT_H_ -#define _I40EVF_CLIENT_H_ - -#define I40EVF_CLIENT_STR_LENGTH 10 - -/* Client interface version should be updated anytime there is a change in the - * existing APIs or data structures. - */ -#define I40EVF_CLIENT_VERSION_MAJOR 0 -#define I40EVF_CLIENT_VERSION_MINOR 01 -#define I40EVF_CLIENT_VERSION_BUILD 00 -#define I40EVF_CLIENT_VERSION_STR \ - __stringify(I40EVF_CLIENT_VERSION_MAJOR) "." \ - __stringify(I40EVF_CLIENT_VERSION_MINOR) "." \ - __stringify(I40EVF_CLIENT_VERSION_BUILD) - -struct i40e_client_version { - u8 major; - u8 minor; - u8 build; - u8 rsvd; -}; - -enum i40e_client_state { - __I40E_CLIENT_NULL, - __I40E_CLIENT_REGISTERED -}; - -enum i40e_client_instance_state { - __I40E_CLIENT_INSTANCE_NONE, - __I40E_CLIENT_INSTANCE_OPENED, -}; - -struct i40e_ops; -struct i40e_client; - -/* HW does not define a type value for AEQ; only for RX/TX and CEQ. - * In order for us to keep the interface simple, SW will define a - * unique type value for AEQ. - */ -#define I40E_QUEUE_TYPE_PE_AEQ 0x80 -#define I40E_QUEUE_INVALID_IDX 0xFFFF - -struct i40e_qv_info { - u32 v_idx; /* msix_vector */ - u16 ceq_idx; - u16 aeq_idx; - u8 itr_idx; -}; - -struct i40e_qvlist_info { - u32 num_vectors; - struct i40e_qv_info qv_info[1]; -}; - -#define I40E_CLIENT_MSIX_ALL 0xFFFFFFFF - -/* set of LAN parameters useful for clients managed by LAN */ - -/* Struct to hold per priority info */ -struct i40e_prio_qos_params { - u16 qs_handle; /* qs handle for prio */ - u8 tc; /* TC mapped to prio */ - u8 reserved; -}; - -#define I40E_CLIENT_MAX_USER_PRIORITY 8 -/* Struct to hold Client QoS */ -struct i40e_qos_params { - struct i40e_prio_qos_params prio_qos[I40E_CLIENT_MAX_USER_PRIORITY]; -}; - -struct i40e_params { - struct i40e_qos_params qos; - u16 mtu; - u16 link_up; /* boolean */ -}; - -/* Structure to hold LAN device info for a client device */ -struct i40e_info { - struct i40e_client_version version; - u8 lanmac[6]; - struct net_device *netdev; - struct pci_dev *pcidev; - u8 __iomem *hw_addr; - u8 fid; /* function id, PF id or VF id */ -#define I40E_CLIENT_FTYPE_PF 0 -#define I40E_CLIENT_FTYPE_VF 1 - u8 ftype; /* function type, PF or VF */ - void *vf; /* cast to i40evf_adapter */ - - /* All L2 params that could change during the life span of the device - * and needs to be communicated to the client when they change - */ - struct i40e_params params; - struct i40e_ops *ops; - - u16 msix_count; /* number of msix vectors*/ - /* Array down below will be dynamically allocated based on msix_count */ - struct msix_entry *msix_entries; - u16 itr_index; /* Which ITR index the PE driver is suppose to use */ -}; - -struct i40e_ops { - /* setup_q_vector_list enables queues with a particular vector */ - int (*setup_qvlist)(struct i40e_info *ldev, struct i40e_client *client, - struct i40e_qvlist_info *qv_info); - - u32 (*virtchnl_send)(struct i40e_info *ldev, struct i40e_client *client, - u8 *msg, u16 len); - - /* If the PE Engine is unresponsive, RDMA driver can request a reset.*/ - void (*request_reset)(struct i40e_info *ldev, - struct i40e_client *client); -}; - -struct i40e_client_ops { - /* Should be called from register_client() or whenever the driver is - * ready to create a specific client instance. - */ - int (*open)(struct i40e_info *ldev, struct i40e_client *client); - - /* Should be closed when netdev is unavailable or when unregister - * call comes in. If the close happens due to a reset, set the reset - * bit to true. - */ - void (*close)(struct i40e_info *ldev, struct i40e_client *client, - bool reset); - - /* called when some l2 managed parameters changes - mss */ - void (*l2_param_change)(struct i40e_info *ldev, - struct i40e_client *client, - struct i40e_params *params); - - /* called when a message is received from the PF */ - int (*virtchnl_receive)(struct i40e_info *ldev, - struct i40e_client *client, - u8 *msg, u16 len); -}; - -/* Client device */ -struct i40e_client_instance { - struct list_head list; - struct i40e_info lan_info; - struct i40e_client *client; - unsigned long state; -}; - -struct i40e_client { - struct list_head list; /* list of registered clients */ - char name[I40EVF_CLIENT_STR_LENGTH]; - struct i40e_client_version version; - unsigned long state; /* client state */ - atomic_t ref_cnt; /* Count of all the client devices of this kind */ - u32 flags; -#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0) -#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2) - u8 type; -#define I40E_CLIENT_IWARP 0 - struct i40e_client_ops *ops; /* client ops provided by the client */ -}; - -/* used by clients */ -int i40evf_register_client(struct i40e_client *client); -int i40evf_unregister_client(struct i40e_client *client); -#endif /* _I40EVF_CLIENT_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c deleted file mode 100644 index 69efe0aec76a..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ /dev/null @@ -1,820 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -/* ethtool support for i40evf */ -#include "i40evf.h" - -#include - -struct i40evf_stats { - char stat_string[ETH_GSTRING_LEN]; - int stat_offset; -}; - -#define I40EVF_STAT(_name, _stat) { \ - .stat_string = _name, \ - .stat_offset = offsetof(struct i40evf_adapter, _stat) \ -} - -/* All stats are u64, so we don't need to track the size of the field. */ -static const struct i40evf_stats i40evf_gstrings_stats[] = { - I40EVF_STAT("rx_bytes", current_stats.rx_bytes), - I40EVF_STAT("rx_unicast", current_stats.rx_unicast), - I40EVF_STAT("rx_multicast", current_stats.rx_multicast), - I40EVF_STAT("rx_broadcast", current_stats.rx_broadcast), - I40EVF_STAT("rx_discards", current_stats.rx_discards), - I40EVF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol), - I40EVF_STAT("tx_bytes", current_stats.tx_bytes), - I40EVF_STAT("tx_unicast", current_stats.tx_unicast), - I40EVF_STAT("tx_multicast", current_stats.tx_multicast), - I40EVF_STAT("tx_broadcast", current_stats.tx_broadcast), - I40EVF_STAT("tx_discards", current_stats.tx_discards), - I40EVF_STAT("tx_errors", current_stats.tx_errors), -}; - -#define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats) -#define I40EVF_QUEUE_STATS_LEN(_dev) \ - (((struct i40evf_adapter *)\ - netdev_priv(_dev))->num_active_queues \ - * 2 * (sizeof(struct i40e_queue_stats) / sizeof(u64))) -#define I40EVF_STATS_LEN(_dev) \ - (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev)) - -/* For now we have one and only one private flag and it is only defined - * when we have support for the SKIP_CPU_SYNC DMA attribute. Instead - * of leaving all this code sitting around empty we will strip it unless - * our one private flag is actually available. - */ -struct i40evf_priv_flags { - char flag_string[ETH_GSTRING_LEN]; - u32 flag; - bool read_only; -}; - -#define I40EVF_PRIV_FLAG(_name, _flag, _read_only) { \ - .flag_string = _name, \ - .flag = _flag, \ - .read_only = _read_only, \ -} - -static const struct i40evf_priv_flags i40evf_gstrings_priv_flags[] = { - I40EVF_PRIV_FLAG("legacy-rx", I40EVF_FLAG_LEGACY_RX, 0), -}; - -#define I40EVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40evf_gstrings_priv_flags) - -/** - * i40evf_get_link_ksettings - Get Link Speed and Duplex settings - * @netdev: network interface device structure - * @cmd: ethtool command - * - * Reports speed/duplex settings. Because this is a VF, we don't know what - * kind of link we really have, so we fake it. - **/ -static int i40evf_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings *cmd) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - - ethtool_link_ksettings_zero_link_mode(cmd, supported); - cmd->base.autoneg = AUTONEG_DISABLE; - cmd->base.port = PORT_NONE; - /* Set speed and duplex */ - switch (adapter->link_speed) { - case I40E_LINK_SPEED_40GB: - cmd->base.speed = SPEED_40000; - break; - case I40E_LINK_SPEED_25GB: -#ifdef SPEED_25000 - cmd->base.speed = SPEED_25000; -#else - netdev_info(netdev, - "Speed is 25G, display not supported by this version of ethtool.\n"); -#endif - break; - case I40E_LINK_SPEED_20GB: - cmd->base.speed = SPEED_20000; - break; - case I40E_LINK_SPEED_10GB: - cmd->base.speed = SPEED_10000; - break; - case I40E_LINK_SPEED_1GB: - cmd->base.speed = SPEED_1000; - break; - case I40E_LINK_SPEED_100MB: - cmd->base.speed = SPEED_100; - break; - default: - break; - } - cmd->base.duplex = DUPLEX_FULL; - - return 0; -} - -/** - * i40evf_get_sset_count - Get length of string set - * @netdev: network interface device structure - * @sset: id of string set - * - * Reports size of string table. This driver only supports - * strings for statistics. - **/ -static int i40evf_get_sset_count(struct net_device *netdev, int sset) -{ - if (sset == ETH_SS_STATS) - return I40EVF_STATS_LEN(netdev); - else if (sset == ETH_SS_PRIV_FLAGS) - return I40EVF_PRIV_FLAGS_STR_LEN; - else - return -EINVAL; -} - -/** - * i40evf_get_ethtool_stats - report device statistics - * @netdev: network interface device structure - * @stats: ethtool statistics structure - * @data: pointer to data buffer - * - * All statistics are added to the data buffer as an array of u64. - **/ -static void i40evf_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats *stats, u64 *data) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - unsigned int i, j; - char *p; - - for (i = 0; i < I40EVF_GLOBAL_STATS_LEN; i++) { - p = (char *)adapter + i40evf_gstrings_stats[i].stat_offset; - data[i] = *(u64 *)p; - } - for (j = 0; j < adapter->num_active_queues; j++) { - data[i++] = adapter->tx_rings[j].stats.packets; - data[i++] = adapter->tx_rings[j].stats.bytes; - } - for (j = 0; j < adapter->num_active_queues; j++) { - data[i++] = adapter->rx_rings[j].stats.packets; - data[i++] = adapter->rx_rings[j].stats.bytes; - } -} - -/** - * i40evf_get_strings - Get string set - * @netdev: network interface device structure - * @sset: id of string set - * @data: buffer for string data - * - * Builds stats string table. - **/ -static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - u8 *p = data; - int i; - - if (sset == ETH_SS_STATS) { - for (i = 0; i < (int)I40EVF_GLOBAL_STATS_LEN; i++) { - memcpy(p, i40evf_gstrings_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < adapter->num_active_queues; i++) { - snprintf(p, ETH_GSTRING_LEN, "tx-%u.packets", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "tx-%u.bytes", i); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < adapter->num_active_queues; i++) { - snprintf(p, ETH_GSTRING_LEN, "rx-%u.packets", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i); - p += ETH_GSTRING_LEN; - } - } else if (sset == ETH_SS_PRIV_FLAGS) { - for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) { - snprintf(p, ETH_GSTRING_LEN, "%s", - i40evf_gstrings_priv_flags[i].flag_string); - p += ETH_GSTRING_LEN; - } - } -} - -/** - * i40evf_get_priv_flags - report device private flags - * @netdev: network interface device structure - * - * The get string set count and the string set should be matched for each - * flag returned. Add new strings for each flag to the i40e_gstrings_priv_flags - * array. - * - * Returns a u32 bitmap of flags. - **/ -static u32 i40evf_get_priv_flags(struct net_device *netdev) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - u32 i, ret_flags = 0; - - for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) { - const struct i40evf_priv_flags *priv_flags; - - priv_flags = &i40evf_gstrings_priv_flags[i]; - - if (priv_flags->flag & adapter->flags) - ret_flags |= BIT(i); - } - - return ret_flags; -} - -/** - * i40evf_set_priv_flags - set private flags - * @netdev: network interface device structure - * @flags: bit flags to be set - **/ -static int i40evf_set_priv_flags(struct net_device *netdev, u32 flags) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - u32 orig_flags, new_flags, changed_flags; - u32 i; - - orig_flags = READ_ONCE(adapter->flags); - new_flags = orig_flags; - - for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) { - const struct i40evf_priv_flags *priv_flags; - - priv_flags = &i40evf_gstrings_priv_flags[i]; - - if (flags & BIT(i)) - new_flags |= priv_flags->flag; - else - new_flags &= ~(priv_flags->flag); - - if (priv_flags->read_only && - ((orig_flags ^ new_flags) & ~BIT(i))) - return -EOPNOTSUPP; - } - - /* Before we finalize any flag changes, any checks which we need to - * perform to determine if the new flags will be supported should go - * here... - */ - - /* Compare and exchange the new flags into place. If we failed, that - * is if cmpxchg returns anything but the old value, this means - * something else must have modified the flags variable since we - * copied it. We'll just punt with an error and log something in the - * message buffer. - */ - if (cmpxchg(&adapter->flags, orig_flags, new_flags) != orig_flags) { - dev_warn(&adapter->pdev->dev, - "Unable to update adapter->flags as it was modified by another thread...\n"); - return -EAGAIN; - } - - changed_flags = orig_flags ^ new_flags; - - /* Process any additional changes needed as a result of flag changes. - * The changed_flags value reflects the list of bits that were changed - * in the code above. - */ - - /* issue a reset to force legacy-rx change to take effect */ - if (changed_flags & I40EVF_FLAG_LEGACY_RX) { - if (netif_running(netdev)) { - adapter->flags |= I40EVF_FLAG_RESET_NEEDED; - schedule_work(&adapter->reset_task); - } - } - - return 0; -} - -/** - * i40evf_get_msglevel - Get debug message level - * @netdev: network interface device structure - * - * Returns current debug message level. - **/ -static u32 i40evf_get_msglevel(struct net_device *netdev) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - - return adapter->msg_enable; -} - -/** - * i40evf_set_msglevel - Set debug message level - * @netdev: network interface device structure - * @data: message level - * - * Set current debug message level. Higher values cause the driver to - * be noisier. - **/ -static void i40evf_set_msglevel(struct net_device *netdev, u32 data) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - - if (I40E_DEBUG_USER & data) - adapter->hw.debug_mask = data; - adapter->msg_enable = data; -} - -/** - * i40evf_get_drvinfo - Get driver info - * @netdev: network interface device structure - * @drvinfo: ethool driver info structure - * - * Returns information about the driver and device for display to the user. - **/ -static void i40evf_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *drvinfo) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - - strlcpy(drvinfo->driver, i40evf_driver_name, 32); - strlcpy(drvinfo->version, i40evf_driver_version, 32); - strlcpy(drvinfo->fw_version, "N/A", 4); - strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); - drvinfo->n_priv_flags = I40EVF_PRIV_FLAGS_STR_LEN; -} - -/** - * i40evf_get_ringparam - Get ring parameters - * @netdev: network interface device structure - * @ring: ethtool ringparam structure - * - * Returns current ring parameters. TX and RX rings are reported separately, - * but the number of rings is not reported. - **/ -static void i40evf_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - - ring->rx_max_pending = I40EVF_MAX_RXD; - ring->tx_max_pending = I40EVF_MAX_TXD; - ring->rx_pending = adapter->rx_desc_count; - ring->tx_pending = adapter->tx_desc_count; -} - -/** - * i40evf_set_ringparam - Set ring parameters - * @netdev: network interface device structure - * @ring: ethtool ringparam structure - * - * Sets ring parameters. TX and RX rings are controlled separately, but the - * number of rings is not specified, so all rings get the same settings. - **/ -static int i40evf_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - u32 new_rx_count, new_tx_count; - - if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) - return -EINVAL; - - new_tx_count = clamp_t(u32, ring->tx_pending, - I40EVF_MIN_TXD, - I40EVF_MAX_TXD); - new_tx_count = ALIGN(new_tx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE); - - new_rx_count = clamp_t(u32, ring->rx_pending, - I40EVF_MIN_RXD, - I40EVF_MAX_RXD); - new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE); - - /* if nothing to do return success */ - if ((new_tx_count == adapter->tx_desc_count) && - (new_rx_count == adapter->rx_desc_count)) - return 0; - - adapter->tx_desc_count = new_tx_count; - adapter->rx_desc_count = new_rx_count; - - if (netif_running(netdev)) { - adapter->flags |= I40EVF_FLAG_RESET_NEEDED; - schedule_work(&adapter->reset_task); - } - - return 0; -} - -/** - * __i40evf_get_coalesce - get per-queue coalesce settings - * @netdev: the netdev to check - * @ec: ethtool coalesce data structure - * @queue: which queue to pick - * - * Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs - * are per queue. If queue is <0 then we default to queue 0 as the - * representative value. - **/ -static int __i40evf_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec, - int queue) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40e_vsi *vsi = &adapter->vsi; - struct i40e_ring *rx_ring, *tx_ring; - - ec->tx_max_coalesced_frames = vsi->work_limit; - ec->rx_max_coalesced_frames = vsi->work_limit; - - /* Rx and Tx usecs per queue value. If user doesn't specify the - * queue, return queue 0's value to represent. - */ - if (queue < 0) - queue = 0; - else if (queue >= adapter->num_active_queues) - return -EINVAL; - - rx_ring = &adapter->rx_rings[queue]; - tx_ring = &adapter->tx_rings[queue]; - - if (ITR_IS_DYNAMIC(rx_ring->itr_setting)) - ec->use_adaptive_rx_coalesce = 1; - - if (ITR_IS_DYNAMIC(tx_ring->itr_setting)) - ec->use_adaptive_tx_coalesce = 1; - - ec->rx_coalesce_usecs = rx_ring->itr_setting & ~I40E_ITR_DYNAMIC; - ec->tx_coalesce_usecs = tx_ring->itr_setting & ~I40E_ITR_DYNAMIC; - - return 0; -} - -/** - * i40evf_get_coalesce - Get interrupt coalescing settings - * @netdev: network interface device structure - * @ec: ethtool coalesce structure - * - * Returns current coalescing settings. This is referred to elsewhere in the - * driver as Interrupt Throttle Rate, as this is how the hardware describes - * this functionality. Note that if per-queue settings have been modified this - * only represents the settings of queue 0. - **/ -static int i40evf_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) -{ - return __i40evf_get_coalesce(netdev, ec, -1); -} - -/** - * i40evf_get_per_queue_coalesce - get coalesce values for specific queue - * @netdev: netdev to read - * @ec: coalesce settings from ethtool - * @queue: the queue to read - * - * Read specific queue's coalesce settings. - **/ -static int i40evf_get_per_queue_coalesce(struct net_device *netdev, - u32 queue, - struct ethtool_coalesce *ec) -{ - return __i40evf_get_coalesce(netdev, ec, queue); -} - -/** - * i40evf_set_itr_per_queue - set ITR values for specific queue - * @adapter: the VF adapter struct to set values for - * @ec: coalesce settings from ethtool - * @queue: the queue to modify - * - * Change the ITR settings for a specific queue. - **/ -static void i40evf_set_itr_per_queue(struct i40evf_adapter *adapter, - struct ethtool_coalesce *ec, - int queue) -{ - struct i40e_ring *rx_ring = &adapter->rx_rings[queue]; - struct i40e_ring *tx_ring = &adapter->tx_rings[queue]; - struct i40e_q_vector *q_vector; - - rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs); - tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs); - - rx_ring->itr_setting |= I40E_ITR_DYNAMIC; - if (!ec->use_adaptive_rx_coalesce) - rx_ring->itr_setting ^= I40E_ITR_DYNAMIC; - - tx_ring->itr_setting |= I40E_ITR_DYNAMIC; - if (!ec->use_adaptive_tx_coalesce) - tx_ring->itr_setting ^= I40E_ITR_DYNAMIC; - - q_vector = rx_ring->q_vector; - q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); - - q_vector = tx_ring->q_vector; - q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); - - /* The interrupt handler itself will take care of programming - * the Tx and Rx ITR values based on the values we have entered - * into the q_vector, no need to write the values now. - */ -} - -/** - * __i40evf_set_coalesce - set coalesce settings for particular queue - * @netdev: the netdev to change - * @ec: ethtool coalesce settings - * @queue: the queue to change - * - * Sets the coalesce settings for a particular queue. - **/ -static int __i40evf_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec, - int queue) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40e_vsi *vsi = &adapter->vsi; - int i; - - if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) - vsi->work_limit = ec->tx_max_coalesced_frames_irq; - - if (ec->rx_coalesce_usecs == 0) { - if (ec->use_adaptive_rx_coalesce) - netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n"); - } else if ((ec->rx_coalesce_usecs < I40E_MIN_ITR) || - (ec->rx_coalesce_usecs > I40E_MAX_ITR)) { - netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n"); - return -EINVAL; - } - - else - if (ec->tx_coalesce_usecs == 0) { - if (ec->use_adaptive_tx_coalesce) - netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n"); - } else if ((ec->tx_coalesce_usecs < I40E_MIN_ITR) || - (ec->tx_coalesce_usecs > I40E_MAX_ITR)) { - netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n"); - return -EINVAL; - } - - /* Rx and Tx usecs has per queue value. If user doesn't specify the - * queue, apply to all queues. - */ - if (queue < 0) { - for (i = 0; i < adapter->num_active_queues; i++) - i40evf_set_itr_per_queue(adapter, ec, i); - } else if (queue < adapter->num_active_queues) { - i40evf_set_itr_per_queue(adapter, ec, queue); - } else { - netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n", - adapter->num_active_queues - 1); - return -EINVAL; - } - - return 0; -} - -/** - * i40evf_set_coalesce - Set interrupt coalescing settings - * @netdev: network interface device structure - * @ec: ethtool coalesce structure - * - * Change current coalescing settings for every queue. - **/ -static int i40evf_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) -{ - return __i40evf_set_coalesce(netdev, ec, -1); -} - -/** - * i40evf_set_per_queue_coalesce - set specific queue's coalesce settings - * @netdev: the netdev to change - * @ec: ethtool's coalesce settings - * @queue: the queue to modify - * - * Modifies a specific queue's coalesce settings. - */ -static int i40evf_set_per_queue_coalesce(struct net_device *netdev, - u32 queue, - struct ethtool_coalesce *ec) -{ - return __i40evf_set_coalesce(netdev, ec, queue); -} - -/** - * i40evf_get_rxnfc - command to get RX flow classification rules - * @netdev: network interface device structure - * @cmd: ethtool rxnfc command - * @rule_locs: pointer to store rule locations - * - * Returns Success if the command is supported. - **/ -static int i40evf_get_rxnfc(struct net_device *netdev, - struct ethtool_rxnfc *cmd, - u32 *rule_locs) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - int ret = -EOPNOTSUPP; - - switch (cmd->cmd) { - case ETHTOOL_GRXRINGS: - cmd->data = adapter->num_active_queues; - ret = 0; - break; - case ETHTOOL_GRXFH: - netdev_info(netdev, - "RSS hash info is not available to vf, use pf.\n"); - break; - default: - break; - } - - return ret; -} -/** - * i40evf_get_channels: get the number of channels supported by the device - * @netdev: network interface device structure - * @ch: channel information structure - * - * For the purposes of our device, we only use combined channels, i.e. a tx/rx - * queue pair. Report one extra channel to match our "other" MSI-X vector. - **/ -static void i40evf_get_channels(struct net_device *netdev, - struct ethtool_channels *ch) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - - /* Report maximum channels */ - ch->max_combined = I40EVF_MAX_REQ_QUEUES; - - ch->max_other = NONQ_VECS; - ch->other_count = NONQ_VECS; - - ch->combined_count = adapter->num_active_queues; -} - -/** - * i40evf_set_channels: set the new channel count - * @netdev: network interface device structure - * @ch: channel information structure - * - * Negotiate a new number of channels with the PF then do a reset. During - * reset we'll realloc queues and fix the RSS table. Returns 0 on success, - * negative on failure. - **/ -static int i40evf_set_channels(struct net_device *netdev, - struct ethtool_channels *ch) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - int num_req = ch->combined_count; - - if (num_req != adapter->num_active_queues && - !(adapter->vf_res->vf_cap_flags & - VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)) { - dev_info(&adapter->pdev->dev, "PF is not capable of queue negotiation.\n"); - return -EINVAL; - } - - if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && - adapter->num_tc) { - dev_info(&adapter->pdev->dev, "Cannot set channels since ADq is enabled.\n"); - return -EINVAL; - } - - /* All of these should have already been checked by ethtool before this - * even gets to us, but just to be sure. - */ - if (num_req <= 0 || num_req > I40EVF_MAX_REQ_QUEUES) - return -EINVAL; - - if (ch->rx_count || ch->tx_count || ch->other_count != NONQ_VECS) - return -EINVAL; - - adapter->num_req_queues = num_req; - return i40evf_request_queues(adapter, num_req); -} - -/** - * i40evf_get_rxfh_key_size - get the RSS hash key size - * @netdev: network interface device structure - * - * Returns the table size. - **/ -static u32 i40evf_get_rxfh_key_size(struct net_device *netdev) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - - return adapter->rss_key_size; -} - -/** - * i40evf_get_rxfh_indir_size - get the rx flow hash indirection table size - * @netdev: network interface device structure - * - * Returns the table size. - **/ -static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - - return adapter->rss_lut_size; -} - -/** - * i40evf_get_rxfh - get the rx flow hash indirection table - * @netdev: network interface device structure - * @indir: indirection table - * @key: hash key - * @hfunc: hash function in use - * - * Reads the indirection table directly from the hardware. Always returns 0. - **/ -static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, - u8 *hfunc) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - u16 i; - - if (hfunc) - *hfunc = ETH_RSS_HASH_TOP; - if (!indir) - return 0; - - memcpy(key, adapter->rss_key, adapter->rss_key_size); - - /* Each 32 bits pointed by 'indir' is stored with a lut entry */ - for (i = 0; i < adapter->rss_lut_size; i++) - indir[i] = (u32)adapter->rss_lut[i]; - - return 0; -} - -/** - * i40evf_set_rxfh - set the rx flow hash indirection table - * @netdev: network interface device structure - * @indir: indirection table - * @key: hash key - * @hfunc: hash function to use - * - * Returns -EINVAL if the table specifies an inavlid queue id, otherwise - * returns 0 after programming the table. - **/ -static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir, - const u8 *key, const u8 hfunc) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - u16 i; - - /* We do not allow change in unsupported parameters */ - if (key || - (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) - return -EOPNOTSUPP; - if (!indir) - return 0; - - if (key) { - memcpy(adapter->rss_key, key, adapter->rss_key_size); - } - - /* Each 32 bits pointed by 'indir' is stored with a lut entry */ - for (i = 0; i < adapter->rss_lut_size; i++) - adapter->rss_lut[i] = (u8)(indir[i]); - - return i40evf_config_rss(adapter); -} - -static const struct ethtool_ops i40evf_ethtool_ops = { - .get_drvinfo = i40evf_get_drvinfo, - .get_link = ethtool_op_get_link, - .get_ringparam = i40evf_get_ringparam, - .set_ringparam = i40evf_set_ringparam, - .get_strings = i40evf_get_strings, - .get_ethtool_stats = i40evf_get_ethtool_stats, - .get_sset_count = i40evf_get_sset_count, - .get_priv_flags = i40evf_get_priv_flags, - .set_priv_flags = i40evf_set_priv_flags, - .get_msglevel = i40evf_get_msglevel, - .set_msglevel = i40evf_set_msglevel, - .get_coalesce = i40evf_get_coalesce, - .set_coalesce = i40evf_set_coalesce, - .get_per_queue_coalesce = i40evf_get_per_queue_coalesce, - .set_per_queue_coalesce = i40evf_set_per_queue_coalesce, - .get_rxnfc = i40evf_get_rxnfc, - .get_rxfh_indir_size = i40evf_get_rxfh_indir_size, - .get_rxfh = i40evf_get_rxfh, - .set_rxfh = i40evf_set_rxfh, - .get_channels = i40evf_get_channels, - .set_channels = i40evf_set_channels, - .get_rxfh_key_size = i40evf_get_rxfh_key_size, - .get_link_ksettings = i40evf_get_link_ksettings, -}; - -/** - * i40evf_set_ethtool_ops - Initialize ethtool ops struct - * @netdev: network interface device structure - * - * Sets ethtool ops struct in our netdev so that ethtool can call - * our functions. - **/ -void i40evf_set_ethtool_ops(struct net_device *netdev) -{ - netdev->ethtool_ops = &i40evf_ethtool_ops; -} diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c deleted file mode 100644 index 5a6e579e9e65..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ /dev/null @@ -1,3989 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#include "i40evf.h" -#include "i40e_prototype.h" -#include "i40evf_client.h" -/* All i40evf tracepoints are defined by the include below, which must - * be included exactly once across the whole kernel with - * CREATE_TRACE_POINTS defined - */ -#define CREATE_TRACE_POINTS -#include "i40e_trace.h" - -static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter); -static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter); -static int i40evf_close(struct net_device *netdev); - -char i40evf_driver_name[] = "i40evf"; -static const char i40evf_driver_string[] = - "Intel(R) 40-10 Gigabit Virtual Function Network Driver"; - -#define DRV_KERN "-k" - -#define DRV_VERSION_MAJOR 3 -#define DRV_VERSION_MINOR 2 -#define DRV_VERSION_BUILD 2 -#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ - __stringify(DRV_VERSION_MINOR) "." \ - __stringify(DRV_VERSION_BUILD) \ - DRV_KERN -const char i40evf_driver_version[] = DRV_VERSION; -static const char i40evf_copyright[] = - "Copyright (c) 2013 - 2015 Intel Corporation."; - -/* i40evf_pci_tbl - PCI Device ID Table - * - * Wildcard entries (PCI_ANY_ID) should come last - * Last entry must be all 0s - * - * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, - * Class, Class Mask, private data (not used) } - */ -static const struct pci_device_id i40evf_pci_tbl[] = { - {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0}, - {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF_HV), 0}, - {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0}, - {PCI_VDEVICE(INTEL, I40E_DEV_ID_ADAPTIVE_VF), 0}, - /* required last entry */ - {0, } -}; - -MODULE_DEVICE_TABLE(pci, i40evf_pci_tbl); - -MODULE_AUTHOR("Intel Corporation, "); -MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver"); -MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_VERSION); - -static struct workqueue_struct *i40evf_wq; - -/** - * i40evf_allocate_dma_mem_d - OS specific memory alloc for shared code - * @hw: pointer to the HW structure - * @mem: ptr to mem struct to fill out - * @size: size of memory requested - * @alignment: what to align the allocation to - **/ -i40e_status i40evf_allocate_dma_mem_d(struct i40e_hw *hw, - struct i40e_dma_mem *mem, - u64 size, u32 alignment) -{ - struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back; - - if (!mem) - return I40E_ERR_PARAM; - - mem->size = ALIGN(size, alignment); - mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, - (dma_addr_t *)&mem->pa, GFP_KERNEL); - if (mem->va) - return 0; - else - return I40E_ERR_NO_MEMORY; -} - -/** - * i40evf_free_dma_mem_d - OS specific memory free for shared code - * @hw: pointer to the HW structure - * @mem: ptr to mem struct to free - **/ -i40e_status i40evf_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) -{ - struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back; - - if (!mem || !mem->va) - return I40E_ERR_PARAM; - dma_free_coherent(&adapter->pdev->dev, mem->size, - mem->va, (dma_addr_t)mem->pa); - return 0; -} - -/** - * i40evf_allocate_virt_mem_d - OS specific memory alloc for shared code - * @hw: pointer to the HW structure - * @mem: ptr to mem struct to fill out - * @size: size of memory requested - **/ -i40e_status i40evf_allocate_virt_mem_d(struct i40e_hw *hw, - struct i40e_virt_mem *mem, u32 size) -{ - if (!mem) - return I40E_ERR_PARAM; - - mem->size = size; - mem->va = kzalloc(size, GFP_KERNEL); - - if (mem->va) - return 0; - else - return I40E_ERR_NO_MEMORY; -} - -/** - * i40evf_free_virt_mem_d - OS specific memory free for shared code - * @hw: pointer to the HW structure - * @mem: ptr to mem struct to free - **/ -i40e_status i40evf_free_virt_mem_d(struct i40e_hw *hw, - struct i40e_virt_mem *mem) -{ - if (!mem) - return I40E_ERR_PARAM; - - /* it's ok to kfree a NULL pointer */ - kfree(mem->va); - - return 0; -} - -/** - * i40evf_debug_d - OS dependent version of debug printing - * @hw: pointer to the HW structure - * @mask: debug level mask - * @fmt_str: printf-type format description - **/ -void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...) -{ - char buf[512]; - va_list argptr; - - if (!(mask & ((struct i40e_hw *)hw)->debug_mask)) - return; - - va_start(argptr, fmt_str); - vsnprintf(buf, sizeof(buf), fmt_str, argptr); - va_end(argptr); - - /* the debug string is already formatted with a newline */ - pr_info("%s", buf); -} - -/** - * i40evf_schedule_reset - Set the flags and schedule a reset event - * @adapter: board private structure - **/ -void i40evf_schedule_reset(struct i40evf_adapter *adapter) -{ - if (!(adapter->flags & - (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED))) { - adapter->flags |= I40EVF_FLAG_RESET_NEEDED; - schedule_work(&adapter->reset_task); - } -} - -/** - * i40evf_tx_timeout - Respond to a Tx Hang - * @netdev: network interface device structure - **/ -static void i40evf_tx_timeout(struct net_device *netdev) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - - adapter->tx_timeout_count++; - i40evf_schedule_reset(adapter); -} - -/** - * i40evf_misc_irq_disable - Mask off interrupt generation on the NIC - * @adapter: board private structure - **/ -static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter) -{ - struct i40e_hw *hw = &adapter->hw; - - if (!adapter->msix_entries) - return; - - wr32(hw, I40E_VFINT_DYN_CTL01, 0); - - /* read flush */ - rd32(hw, I40E_VFGEN_RSTAT); - - synchronize_irq(adapter->msix_entries[0].vector); -} - -/** - * i40evf_misc_irq_enable - Enable default interrupt generation settings - * @adapter: board private structure - **/ -static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter) -{ - struct i40e_hw *hw = &adapter->hw; - - wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK | - I40E_VFINT_DYN_CTL01_ITR_INDX_MASK); - wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK); - - /* read flush */ - rd32(hw, I40E_VFGEN_RSTAT); -} - -/** - * i40evf_irq_disable - Mask off interrupt generation on the NIC - * @adapter: board private structure - **/ -static void i40evf_irq_disable(struct i40evf_adapter *adapter) -{ - int i; - struct i40e_hw *hw = &adapter->hw; - - if (!adapter->msix_entries) - return; - - for (i = 1; i < adapter->num_msix_vectors; i++) { - wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0); - synchronize_irq(adapter->msix_entries[i].vector); - } - /* read flush */ - rd32(hw, I40E_VFGEN_RSTAT); -} - -/** - * i40evf_irq_enable_queues - Enable interrupt for specified queues - * @adapter: board private structure - * @mask: bitmap of queues to enable - **/ -void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask) -{ - struct i40e_hw *hw = &adapter->hw; - int i; - - for (i = 1; i < adapter->num_msix_vectors; i++) { - if (mask & BIT(i - 1)) { - wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), - I40E_VFINT_DYN_CTLN1_INTENA_MASK | - I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK); - } - } -} - -/** - * i40evf_irq_enable - Enable default interrupt generation settings - * @adapter: board private structure - * @flush: boolean value whether to run rd32() - **/ -void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush) -{ - struct i40e_hw *hw = &adapter->hw; - - i40evf_misc_irq_enable(adapter); - i40evf_irq_enable_queues(adapter, ~0); - - if (flush) - rd32(hw, I40E_VFGEN_RSTAT); -} - -/** - * i40evf_msix_aq - Interrupt handler for vector 0 - * @irq: interrupt number - * @data: pointer to netdev - **/ -static irqreturn_t i40evf_msix_aq(int irq, void *data) -{ - struct net_device *netdev = data; - struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40e_hw *hw = &adapter->hw; - - /* handle non-queue interrupts, these reads clear the registers */ - rd32(hw, I40E_VFINT_ICR01); - rd32(hw, I40E_VFINT_ICR0_ENA1); - - /* schedule work on the private workqueue */ - schedule_work(&adapter->adminq_task); - - return IRQ_HANDLED; -} - -/** - * i40evf_msix_clean_rings - MSIX mode Interrupt Handler - * @irq: interrupt number - * @data: pointer to a q_vector - **/ -static irqreturn_t i40evf_msix_clean_rings(int irq, void *data) -{ - struct i40e_q_vector *q_vector = data; - - if (!q_vector->tx.ring && !q_vector->rx.ring) - return IRQ_HANDLED; - - napi_schedule_irqoff(&q_vector->napi); - - return IRQ_HANDLED; -} - -/** - * i40evf_map_vector_to_rxq - associate irqs with rx queues - * @adapter: board private structure - * @v_idx: interrupt number - * @r_idx: queue number - **/ -static void -i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx) -{ - struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx]; - struct i40e_ring *rx_ring = &adapter->rx_rings[r_idx]; - struct i40e_hw *hw = &adapter->hw; - - rx_ring->q_vector = q_vector; - rx_ring->next = q_vector->rx.ring; - rx_ring->vsi = &adapter->vsi; - q_vector->rx.ring = rx_ring; - q_vector->rx.count++; - q_vector->rx.next_update = jiffies + 1; - q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); - q_vector->ring_mask |= BIT(r_idx); - wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, q_vector->reg_idx), - q_vector->rx.current_itr); - q_vector->rx.current_itr = q_vector->rx.target_itr; -} - -/** - * i40evf_map_vector_to_txq - associate irqs with tx queues - * @adapter: board private structure - * @v_idx: interrupt number - * @t_idx: queue number - **/ -static void -i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx) -{ - struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx]; - struct i40e_ring *tx_ring = &adapter->tx_rings[t_idx]; - struct i40e_hw *hw = &adapter->hw; - - tx_ring->q_vector = q_vector; - tx_ring->next = q_vector->tx.ring; - tx_ring->vsi = &adapter->vsi; - q_vector->tx.ring = tx_ring; - q_vector->tx.count++; - q_vector->tx.next_update = jiffies + 1; - q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); - q_vector->num_ringpairs++; - wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, q_vector->reg_idx), - q_vector->tx.target_itr); - q_vector->tx.current_itr = q_vector->tx.target_itr; -} - -/** - * i40evf_map_rings_to_vectors - Maps descriptor rings to vectors - * @adapter: board private structure to initialize - * - * This function maps descriptor rings to the queue-specific vectors - * we were allotted through the MSI-X enabling code. Ideally, we'd have - * one vector per ring/queue, but on a constrained vector budget, we - * group the rings as "efficiently" as possible. You would add new - * mapping configurations in here. - **/ -static void i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter) -{ - int rings_remaining = adapter->num_active_queues; - int ridx = 0, vidx = 0; - int q_vectors; - - q_vectors = adapter->num_msix_vectors - NONQ_VECS; - - for (; ridx < rings_remaining; ridx++) { - i40evf_map_vector_to_rxq(adapter, vidx, ridx); - i40evf_map_vector_to_txq(adapter, vidx, ridx); - - /* In the case where we have more queues than vectors, continue - * round-robin on vectors until all queues are mapped. - */ - if (++vidx >= q_vectors) - vidx = 0; - } - - adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS; -} - -/** - * i40evf_irq_affinity_notify - Callback for affinity changes - * @notify: context as to what irq was changed - * @mask: the new affinity mask - * - * This is a callback function used by the irq_set_affinity_notifier function - * so that we may register to receive changes to the irq affinity masks. - **/ -static void i40evf_irq_affinity_notify(struct irq_affinity_notify *notify, - const cpumask_t *mask) -{ - struct i40e_q_vector *q_vector = - container_of(notify, struct i40e_q_vector, affinity_notify); - - cpumask_copy(&q_vector->affinity_mask, mask); -} - -/** - * i40evf_irq_affinity_release - Callback for affinity notifier release - * @ref: internal core kernel usage - * - * This is a callback function used by the irq_set_affinity_notifier function - * to inform the current notification subscriber that they will no longer - * receive notifications. - **/ -static void i40evf_irq_affinity_release(struct kref *ref) {} - -/** - * i40evf_request_traffic_irqs - Initialize MSI-X interrupts - * @adapter: board private structure - * @basename: device basename - * - * Allocates MSI-X vectors for tx and rx handling, and requests - * interrupts from the kernel. - **/ -static int -i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename) -{ - unsigned int vector, q_vectors; - unsigned int rx_int_idx = 0, tx_int_idx = 0; - int irq_num, err; - int cpu; - - i40evf_irq_disable(adapter); - /* Decrement for Other and TCP Timer vectors */ - q_vectors = adapter->num_msix_vectors - NONQ_VECS; - - for (vector = 0; vector < q_vectors; vector++) { - struct i40e_q_vector *q_vector = &adapter->q_vectors[vector]; - irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; - - if (q_vector->tx.ring && q_vector->rx.ring) { - snprintf(q_vector->name, sizeof(q_vector->name), - "i40evf-%s-TxRx-%d", basename, rx_int_idx++); - tx_int_idx++; - } else if (q_vector->rx.ring) { - snprintf(q_vector->name, sizeof(q_vector->name), - "i40evf-%s-rx-%d", basename, rx_int_idx++); - } else if (q_vector->tx.ring) { - snprintf(q_vector->name, sizeof(q_vector->name), - "i40evf-%s-tx-%d", basename, tx_int_idx++); - } else { - /* skip this unused q_vector */ - continue; - } - err = request_irq(irq_num, - i40evf_msix_clean_rings, - 0, - q_vector->name, - q_vector); - if (err) { - dev_info(&adapter->pdev->dev, - "Request_irq failed, error: %d\n", err); - goto free_queue_irqs; - } - /* register for affinity change notifications */ - q_vector->affinity_notify.notify = i40evf_irq_affinity_notify; - q_vector->affinity_notify.release = - i40evf_irq_affinity_release; - irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); - /* Spread the IRQ affinity hints across online CPUs. Note that - * get_cpu_mask returns a mask with a permanent lifetime so - * it's safe to use as a hint for irq_set_affinity_hint. - */ - cpu = cpumask_local_spread(q_vector->v_idx, -1); - irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); - } - - return 0; - -free_queue_irqs: - while (vector) { - vector--; - irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; - irq_set_affinity_notifier(irq_num, NULL); - irq_set_affinity_hint(irq_num, NULL); - free_irq(irq_num, &adapter->q_vectors[vector]); - } - return err; -} - -/** - * i40evf_request_misc_irq - Initialize MSI-X interrupts - * @adapter: board private structure - * - * Allocates MSI-X vector 0 and requests interrupts from the kernel. This - * vector is only for the admin queue, and stays active even when the netdev - * is closed. - **/ -static int i40evf_request_misc_irq(struct i40evf_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - int err; - - snprintf(adapter->misc_vector_name, - sizeof(adapter->misc_vector_name) - 1, "i40evf-%s:mbx", - dev_name(&adapter->pdev->dev)); - err = request_irq(adapter->msix_entries[0].vector, - &i40evf_msix_aq, 0, - adapter->misc_vector_name, netdev); - if (err) { - dev_err(&adapter->pdev->dev, - "request_irq for %s failed: %d\n", - adapter->misc_vector_name, err); - free_irq(adapter->msix_entries[0].vector, netdev); - } - return err; -} - -/** - * i40evf_free_traffic_irqs - Free MSI-X interrupts - * @adapter: board private structure - * - * Frees all MSI-X vectors other than 0. - **/ -static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter) -{ - int vector, irq_num, q_vectors; - - if (!adapter->msix_entries) - return; - - q_vectors = adapter->num_msix_vectors - NONQ_VECS; - - for (vector = 0; vector < q_vectors; vector++) { - irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; - irq_set_affinity_notifier(irq_num, NULL); - irq_set_affinity_hint(irq_num, NULL); - free_irq(irq_num, &adapter->q_vectors[vector]); - } -} - -/** - * i40evf_free_misc_irq - Free MSI-X miscellaneous vector - * @adapter: board private structure - * - * Frees MSI-X vector 0. - **/ -static void i40evf_free_misc_irq(struct i40evf_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - - if (!adapter->msix_entries) - return; - - free_irq(adapter->msix_entries[0].vector, netdev); -} - -/** - * i40evf_configure_tx - Configure Transmit Unit after Reset - * @adapter: board private structure - * - * Configure the Tx unit of the MAC after a reset. - **/ -static void i40evf_configure_tx(struct i40evf_adapter *adapter) -{ - struct i40e_hw *hw = &adapter->hw; - int i; - - for (i = 0; i < adapter->num_active_queues; i++) - adapter->tx_rings[i].tail = hw->hw_addr + I40E_QTX_TAIL1(i); -} - -/** - * i40evf_configure_rx - Configure Receive Unit after Reset - * @adapter: board private structure - * - * Configure the Rx unit of the MAC after a reset. - **/ -static void i40evf_configure_rx(struct i40evf_adapter *adapter) -{ - unsigned int rx_buf_len = I40E_RXBUFFER_2048; - struct i40e_hw *hw = &adapter->hw; - int i; - - /* Legacy Rx will always default to a 2048 buffer size. */ -#if (PAGE_SIZE < 8192) - if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX)) { - struct net_device *netdev = adapter->netdev; - - /* For jumbo frames on systems with 4K pages we have to use - * an order 1 page, so we might as well increase the size - * of our Rx buffer to make better use of the available space - */ - rx_buf_len = I40E_RXBUFFER_3072; - - /* We use a 1536 buffer size for configurations with - * standard Ethernet mtu. On x86 this gives us enough room - * for shared info and 192 bytes of padding. - */ - if (!I40E_2K_TOO_SMALL_WITH_PADDING && - (netdev->mtu <= ETH_DATA_LEN)) - rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN; - } -#endif - - for (i = 0; i < adapter->num_active_queues; i++) { - adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i); - adapter->rx_rings[i].rx_buf_len = rx_buf_len; - - if (adapter->flags & I40EVF_FLAG_LEGACY_RX) - clear_ring_build_skb_enabled(&adapter->rx_rings[i]); - else - set_ring_build_skb_enabled(&adapter->rx_rings[i]); - } -} - -/** - * i40evf_find_vlan - Search filter list for specific vlan filter - * @adapter: board private structure - * @vlan: vlan tag - * - * Returns ptr to the filter object or NULL. Must be called while holding the - * mac_vlan_list_lock. - **/ -static struct -i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan) -{ - struct i40evf_vlan_filter *f; - - list_for_each_entry(f, &adapter->vlan_filter_list, list) { - if (vlan == f->vlan) - return f; - } - return NULL; -} - -/** - * i40evf_add_vlan - Add a vlan filter to the list - * @adapter: board private structure - * @vlan: VLAN tag - * - * Returns ptr to the filter object or NULL when no memory available. - **/ -static struct -i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan) -{ - struct i40evf_vlan_filter *f = NULL; - - spin_lock_bh(&adapter->mac_vlan_list_lock); - - f = i40evf_find_vlan(adapter, vlan); - if (!f) { - f = kzalloc(sizeof(*f), GFP_KERNEL); - if (!f) - goto clearout; - - f->vlan = vlan; - - INIT_LIST_HEAD(&f->list); - list_add(&f->list, &adapter->vlan_filter_list); - f->add = true; - adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; - } - -clearout: - spin_unlock_bh(&adapter->mac_vlan_list_lock); - return f; -} - -/** - * i40evf_del_vlan - Remove a vlan filter from the list - * @adapter: board private structure - * @vlan: VLAN tag - **/ -static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan) -{ - struct i40evf_vlan_filter *f; - - spin_lock_bh(&adapter->mac_vlan_list_lock); - - f = i40evf_find_vlan(adapter, vlan); - if (f) { - f->remove = true; - adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; - } - - spin_unlock_bh(&adapter->mac_vlan_list_lock); -} - -/** - * i40evf_vlan_rx_add_vid - Add a VLAN filter to a device - * @netdev: network device struct - * @proto: unused protocol data - * @vid: VLAN tag - **/ -static int i40evf_vlan_rx_add_vid(struct net_device *netdev, - __always_unused __be16 proto, u16 vid) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - - if (!VLAN_ALLOWED(adapter)) - return -EIO; - if (i40evf_add_vlan(adapter, vid) == NULL) - return -ENOMEM; - return 0; -} - -/** - * i40evf_vlan_rx_kill_vid - Remove a VLAN filter from a device - * @netdev: network device struct - * @proto: unused protocol data - * @vid: VLAN tag - **/ -static int i40evf_vlan_rx_kill_vid(struct net_device *netdev, - __always_unused __be16 proto, u16 vid) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - - if (VLAN_ALLOWED(adapter)) { - i40evf_del_vlan(adapter, vid); - return 0; - } - return -EIO; -} - -/** - * i40evf_find_filter - Search filter list for specific mac filter - * @adapter: board private structure - * @macaddr: the MAC address - * - * Returns ptr to the filter object or NULL. Must be called while holding the - * mac_vlan_list_lock. - **/ -static struct -i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter, - const u8 *macaddr) -{ - struct i40evf_mac_filter *f; - - if (!macaddr) - return NULL; - - list_for_each_entry(f, &adapter->mac_filter_list, list) { - if (ether_addr_equal(macaddr, f->macaddr)) - return f; - } - return NULL; -} - -/** - * i40e_add_filter - Add a mac filter to the filter list - * @adapter: board private structure - * @macaddr: the MAC address - * - * Returns ptr to the filter object or NULL when no memory available. - **/ -static struct -i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter, - const u8 *macaddr) -{ - struct i40evf_mac_filter *f; - - if (!macaddr) - return NULL; - - f = i40evf_find_filter(adapter, macaddr); - if (!f) { - f = kzalloc(sizeof(*f), GFP_ATOMIC); - if (!f) - return f; - - ether_addr_copy(f->macaddr, macaddr); - - list_add_tail(&f->list, &adapter->mac_filter_list); - f->add = true; - adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; - } else { - f->remove = false; - } - - return f; -} - -/** - * i40evf_set_mac - NDO callback to set port mac address - * @netdev: network interface device structure - * @p: pointer to an address structure - * - * Returns 0 on success, negative on failure - **/ -static int i40evf_set_mac(struct net_device *netdev, void *p) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40e_hw *hw = &adapter->hw; - struct i40evf_mac_filter *f; - struct sockaddr *addr = p; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - - if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) - return 0; - - if (adapter->flags & I40EVF_FLAG_ADDR_SET_BY_PF) - return -EPERM; - - spin_lock_bh(&adapter->mac_vlan_list_lock); - - f = i40evf_find_filter(adapter, hw->mac.addr); - if (f) { - f->remove = true; - adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; - } - - f = i40evf_add_filter(adapter, addr->sa_data); - - spin_unlock_bh(&adapter->mac_vlan_list_lock); - - if (f) { - ether_addr_copy(hw->mac.addr, addr->sa_data); - ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); - } - - return (f == NULL) ? -ENOMEM : 0; -} - -/** - * i40evf_addr_sync - Callback for dev_(mc|uc)_sync to add address - * @netdev: the netdevice - * @addr: address to add - * - * Called by __dev_(mc|uc)_sync when an address needs to be added. We call - * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. - */ -static int i40evf_addr_sync(struct net_device *netdev, const u8 *addr) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - - if (i40evf_add_filter(adapter, addr)) - return 0; - else - return -ENOMEM; -} - -/** - * i40evf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address - * @netdev: the netdevice - * @addr: address to add - * - * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call - * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. - */ -static int i40evf_addr_unsync(struct net_device *netdev, const u8 *addr) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40evf_mac_filter *f; - - /* Under some circumstances, we might receive a request to delete - * our own device address from our uc list. Because we store the - * device address in the VSI's MAC/VLAN filter list, we need to ignore - * such requests and not delete our device address from this list. - */ - if (ether_addr_equal(addr, netdev->dev_addr)) - return 0; - - f = i40evf_find_filter(adapter, addr); - if (f) { - f->remove = true; - adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; - } - return 0; -} - -/** - * i40evf_set_rx_mode - NDO callback to set the netdev filters - * @netdev: network interface device structure - **/ -static void i40evf_set_rx_mode(struct net_device *netdev) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - - spin_lock_bh(&adapter->mac_vlan_list_lock); - __dev_uc_sync(netdev, i40evf_addr_sync, i40evf_addr_unsync); - __dev_mc_sync(netdev, i40evf_addr_sync, i40evf_addr_unsync); - spin_unlock_bh(&adapter->mac_vlan_list_lock); - - if (netdev->flags & IFF_PROMISC && - !(adapter->flags & I40EVF_FLAG_PROMISC_ON)) - adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_PROMISC; - else if (!(netdev->flags & IFF_PROMISC) && - adapter->flags & I40EVF_FLAG_PROMISC_ON) - adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_PROMISC; - - if (netdev->flags & IFF_ALLMULTI && - !(adapter->flags & I40EVF_FLAG_ALLMULTI_ON)) - adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_ALLMULTI; - else if (!(netdev->flags & IFF_ALLMULTI) && - adapter->flags & I40EVF_FLAG_ALLMULTI_ON) - adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI; -} - -/** - * i40evf_napi_enable_all - enable NAPI on all queue vectors - * @adapter: board private structure - **/ -static void i40evf_napi_enable_all(struct i40evf_adapter *adapter) -{ - int q_idx; - struct i40e_q_vector *q_vector; - int q_vectors = adapter->num_msix_vectors - NONQ_VECS; - - for (q_idx = 0; q_idx < q_vectors; q_idx++) { - struct napi_struct *napi; - - q_vector = &adapter->q_vectors[q_idx]; - napi = &q_vector->napi; - napi_enable(napi); - } -} - -/** - * i40evf_napi_disable_all - disable NAPI on all queue vectors - * @adapter: board private structure - **/ -static void i40evf_napi_disable_all(struct i40evf_adapter *adapter) -{ - int q_idx; - struct i40e_q_vector *q_vector; - int q_vectors = adapter->num_msix_vectors - NONQ_VECS; - - for (q_idx = 0; q_idx < q_vectors; q_idx++) { - q_vector = &adapter->q_vectors[q_idx]; - napi_disable(&q_vector->napi); - } -} - -/** - * i40evf_configure - set up transmit and receive data structures - * @adapter: board private structure - **/ -static void i40evf_configure(struct i40evf_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - int i; - - i40evf_set_rx_mode(netdev); - - i40evf_configure_tx(adapter); - i40evf_configure_rx(adapter); - adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES; - - for (i = 0; i < adapter->num_active_queues; i++) { - struct i40e_ring *ring = &adapter->rx_rings[i]; - - i40evf_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); - } -} - -/** - * i40evf_up_complete - Finish the last steps of bringing up a connection - * @adapter: board private structure - * - * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock. - **/ -static void i40evf_up_complete(struct i40evf_adapter *adapter) -{ - adapter->state = __I40EVF_RUNNING; - clear_bit(__I40E_VSI_DOWN, adapter->vsi.state); - - i40evf_napi_enable_all(adapter); - - adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES; - if (CLIENT_ENABLED(adapter)) - adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_OPEN; - mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); -} - -/** - * i40e_down - Shutdown the connection processing - * @adapter: board private structure - * - * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock. - **/ -void i40evf_down(struct i40evf_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - struct i40evf_vlan_filter *vlf; - struct i40evf_mac_filter *f; - struct i40evf_cloud_filter *cf; - - if (adapter->state <= __I40EVF_DOWN_PENDING) - return; - - netif_carrier_off(netdev); - netif_tx_disable(netdev); - adapter->link_up = false; - i40evf_napi_disable_all(adapter); - i40evf_irq_disable(adapter); - - spin_lock_bh(&adapter->mac_vlan_list_lock); - - /* clear the sync flag on all filters */ - __dev_uc_unsync(adapter->netdev, NULL); - __dev_mc_unsync(adapter->netdev, NULL); - - /* remove all MAC filters */ - list_for_each_entry(f, &adapter->mac_filter_list, list) { - f->remove = true; - } - - /* remove all VLAN filters */ - list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { - vlf->remove = true; - } - - spin_unlock_bh(&adapter->mac_vlan_list_lock); - - /* remove all cloud filters */ - spin_lock_bh(&adapter->cloud_filter_list_lock); - list_for_each_entry(cf, &adapter->cloud_filter_list, list) { - cf->del = true; - } - spin_unlock_bh(&adapter->cloud_filter_list_lock); - - if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) && - adapter->state != __I40EVF_RESETTING) { - /* cancel any current operation */ - adapter->current_op = VIRTCHNL_OP_UNKNOWN; - /* Schedule operations to close down the HW. Don't wait - * here for this to complete. The watchdog is still running - * and it will take care of this. - */ - adapter->aq_required = I40EVF_FLAG_AQ_DEL_MAC_FILTER; - adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; - adapter->aq_required |= I40EVF_FLAG_AQ_DEL_CLOUD_FILTER; - adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES; - } - - mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); -} - -/** - * i40evf_acquire_msix_vectors - Setup the MSIX capability - * @adapter: board private structure - * @vectors: number of vectors to request - * - * Work with the OS to set up the MSIX vectors needed. - * - * Returns 0 on success, negative on failure - **/ -static int -i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors) -{ - int err, vector_threshold; - - /* We'll want at least 3 (vector_threshold): - * 0) Other (Admin Queue and link, mostly) - * 1) TxQ[0] Cleanup - * 2) RxQ[0] Cleanup - */ - vector_threshold = MIN_MSIX_COUNT; - - /* The more we get, the more we will assign to Tx/Rx Cleanup - * for the separate queues...where Rx Cleanup >= Tx Cleanup. - * Right now, we simply care about how many we'll get; we'll - * set them up later while requesting irq's. - */ - err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, - vector_threshold, vectors); - if (err < 0) { - dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n"); - kfree(adapter->msix_entries); - adapter->msix_entries = NULL; - return err; - } - - /* Adjust for only the vectors we'll use, which is minimum - * of max_msix_q_vectors + NONQ_VECS, or the number of - * vectors we were allocated. - */ - adapter->num_msix_vectors = err; - return 0; -} - -/** - * i40evf_free_queues - Free memory for all rings - * @adapter: board private structure to initialize - * - * Free all of the memory associated with queue pairs. - **/ -static void i40evf_free_queues(struct i40evf_adapter *adapter) -{ - if (!adapter->vsi_res) - return; - adapter->num_active_queues = 0; - kfree(adapter->tx_rings); - adapter->tx_rings = NULL; - kfree(adapter->rx_rings); - adapter->rx_rings = NULL; -} - -/** - * i40evf_alloc_queues - Allocate memory for all rings - * @adapter: board private structure to initialize - * - * We allocate one ring per queue at run-time since we don't know the - * number of queues at compile-time. The polling_netdev array is - * intended for Multiqueue, but should work fine with a single queue. - **/ -static int i40evf_alloc_queues(struct i40evf_adapter *adapter) -{ - int i, num_active_queues; - - /* If we're in reset reallocating queues we don't actually know yet for - * certain the PF gave us the number of queues we asked for but we'll - * assume it did. Once basic reset is finished we'll confirm once we - * start negotiating config with PF. - */ - if (adapter->num_req_queues) - num_active_queues = adapter->num_req_queues; - else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && - adapter->num_tc) - num_active_queues = adapter->ch_config.total_qps; - else - num_active_queues = min_t(int, - adapter->vsi_res->num_queue_pairs, - (int)(num_online_cpus())); - - - adapter->tx_rings = kcalloc(num_active_queues, - sizeof(struct i40e_ring), GFP_KERNEL); - if (!adapter->tx_rings) - goto err_out; - adapter->rx_rings = kcalloc(num_active_queues, - sizeof(struct i40e_ring), GFP_KERNEL); - if (!adapter->rx_rings) - goto err_out; - - for (i = 0; i < num_active_queues; i++) { - struct i40e_ring *tx_ring; - struct i40e_ring *rx_ring; - - tx_ring = &adapter->tx_rings[i]; - - tx_ring->queue_index = i; - tx_ring->netdev = adapter->netdev; - tx_ring->dev = &adapter->pdev->dev; - tx_ring->count = adapter->tx_desc_count; - tx_ring->itr_setting = I40E_ITR_TX_DEF; - if (adapter->flags & I40EVF_FLAG_WB_ON_ITR_CAPABLE) - tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR; - - rx_ring = &adapter->rx_rings[i]; - rx_ring->queue_index = i; - rx_ring->netdev = adapter->netdev; - rx_ring->dev = &adapter->pdev->dev; - rx_ring->count = adapter->rx_desc_count; - rx_ring->itr_setting = I40E_ITR_RX_DEF; - } - - adapter->num_active_queues = num_active_queues; - - return 0; - -err_out: - i40evf_free_queues(adapter); - return -ENOMEM; -} - -/** - * i40evf_set_interrupt_capability - set MSI-X or FAIL if not supported - * @adapter: board private structure to initialize - * - * Attempt to configure the interrupts using the best available - * capabilities of the hardware and the kernel. - **/ -static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter) -{ - int vector, v_budget; - int pairs = 0; - int err = 0; - - if (!adapter->vsi_res) { - err = -EIO; - goto out; - } - pairs = adapter->num_active_queues; - - /* It's easy to be greedy for MSI-X vectors, but it really doesn't do - * us much good if we have more vectors than CPUs. However, we already - * limit the total number of queues by the number of CPUs so we do not - * need any further limiting here. - */ - v_budget = min_t(int, pairs + NONQ_VECS, - (int)adapter->vf_res->max_vectors); - - adapter->msix_entries = kcalloc(v_budget, - sizeof(struct msix_entry), GFP_KERNEL); - if (!adapter->msix_entries) { - err = -ENOMEM; - goto out; - } - - for (vector = 0; vector < v_budget; vector++) - adapter->msix_entries[vector].entry = vector; - - err = i40evf_acquire_msix_vectors(adapter, v_budget); - -out: - netif_set_real_num_rx_queues(adapter->netdev, pairs); - netif_set_real_num_tx_queues(adapter->netdev, pairs); - return err; -} - -/** - * i40e_config_rss_aq - Configure RSS keys and lut by using AQ commands - * @adapter: board private structure - * - * Return 0 on success, negative on failure - **/ -static int i40evf_config_rss_aq(struct i40evf_adapter *adapter) -{ - struct i40e_aqc_get_set_rss_key_data *rss_key = - (struct i40e_aqc_get_set_rss_key_data *)adapter->rss_key; - struct i40e_hw *hw = &adapter->hw; - int ret = 0; - - if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { - /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n", - adapter->current_op); - return -EBUSY; - } - - ret = i40evf_aq_set_rss_key(hw, adapter->vsi.id, rss_key); - if (ret) { - dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n", - i40evf_stat_str(hw, ret), - i40evf_aq_str(hw, hw->aq.asq_last_status)); - return ret; - - } - - ret = i40evf_aq_set_rss_lut(hw, adapter->vsi.id, false, - adapter->rss_lut, adapter->rss_lut_size); - if (ret) { - dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n", - i40evf_stat_str(hw, ret), - i40evf_aq_str(hw, hw->aq.asq_last_status)); - } - - return ret; - -} - -/** - * i40evf_config_rss_reg - Configure RSS keys and lut by writing registers - * @adapter: board private structure - * - * Returns 0 on success, negative on failure - **/ -static int i40evf_config_rss_reg(struct i40evf_adapter *adapter) -{ - struct i40e_hw *hw = &adapter->hw; - u32 *dw; - u16 i; - - dw = (u32 *)adapter->rss_key; - for (i = 0; i <= adapter->rss_key_size / 4; i++) - wr32(hw, I40E_VFQF_HKEY(i), dw[i]); - - dw = (u32 *)adapter->rss_lut; - for (i = 0; i <= adapter->rss_lut_size / 4; i++) - wr32(hw, I40E_VFQF_HLUT(i), dw[i]); - - i40e_flush(hw); - - return 0; -} - -/** - * i40evf_config_rss - Configure RSS keys and lut - * @adapter: board private structure - * - * Returns 0 on success, negative on failure - **/ -int i40evf_config_rss(struct i40evf_adapter *adapter) -{ - - if (RSS_PF(adapter)) { - adapter->aq_required |= I40EVF_FLAG_AQ_SET_RSS_LUT | - I40EVF_FLAG_AQ_SET_RSS_KEY; - return 0; - } else if (RSS_AQ(adapter)) { - return i40evf_config_rss_aq(adapter); - } else { - return i40evf_config_rss_reg(adapter); - } -} - -/** - * i40evf_fill_rss_lut - Fill the lut with default values - * @adapter: board private structure - **/ -static void i40evf_fill_rss_lut(struct i40evf_adapter *adapter) -{ - u16 i; - - for (i = 0; i < adapter->rss_lut_size; i++) - adapter->rss_lut[i] = i % adapter->num_active_queues; -} - -/** - * i40evf_init_rss - Prepare for RSS - * @adapter: board private structure - * - * Return 0 on success, negative on failure - **/ -static int i40evf_init_rss(struct i40evf_adapter *adapter) -{ - struct i40e_hw *hw = &adapter->hw; - int ret; - - if (!RSS_PF(adapter)) { - /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ - if (adapter->vf_res->vf_cap_flags & - VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) - adapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED; - else - adapter->hena = I40E_DEFAULT_RSS_HENA; - - wr32(hw, I40E_VFQF_HENA(0), (u32)adapter->hena); - wr32(hw, I40E_VFQF_HENA(1), (u32)(adapter->hena >> 32)); - } - - i40evf_fill_rss_lut(adapter); - - netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size); - ret = i40evf_config_rss(adapter); - - return ret; -} - -/** - * i40evf_alloc_q_vectors - Allocate memory for interrupt vectors - * @adapter: board private structure to initialize - * - * We allocate one q_vector per queue interrupt. If allocation fails we - * return -ENOMEM. - **/ -static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter) -{ - int q_idx = 0, num_q_vectors; - struct i40e_q_vector *q_vector; - - num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; - adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector), - GFP_KERNEL); - if (!adapter->q_vectors) - return -ENOMEM; - - for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { - q_vector = &adapter->q_vectors[q_idx]; - q_vector->adapter = adapter; - q_vector->vsi = &adapter->vsi; - q_vector->v_idx = q_idx; - q_vector->reg_idx = q_idx; - cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); - netif_napi_add(adapter->netdev, &q_vector->napi, - i40evf_napi_poll, NAPI_POLL_WEIGHT); - } - - return 0; -} - -/** - * i40evf_free_q_vectors - Free memory allocated for interrupt vectors - * @adapter: board private structure to initialize - * - * This function frees the memory allocated to the q_vectors. In addition if - * NAPI is enabled it will delete any references to the NAPI struct prior - * to freeing the q_vector. - **/ -static void i40evf_free_q_vectors(struct i40evf_adapter *adapter) -{ - int q_idx, num_q_vectors; - int napi_vectors; - - if (!adapter->q_vectors) - return; - - num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; - napi_vectors = adapter->num_active_queues; - - for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { - struct i40e_q_vector *q_vector = &adapter->q_vectors[q_idx]; - if (q_idx < napi_vectors) - netif_napi_del(&q_vector->napi); - } - kfree(adapter->q_vectors); - adapter->q_vectors = NULL; -} - -/** - * i40evf_reset_interrupt_capability - Reset MSIX setup - * @adapter: board private structure - * - **/ -void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter) -{ - if (!adapter->msix_entries) - return; - - pci_disable_msix(adapter->pdev); - kfree(adapter->msix_entries); - adapter->msix_entries = NULL; -} - -/** - * i40evf_init_interrupt_scheme - Determine if MSIX is supported and init - * @adapter: board private structure to initialize - * - **/ -int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter) -{ - int err; - - err = i40evf_alloc_queues(adapter); - if (err) { - dev_err(&adapter->pdev->dev, - "Unable to allocate memory for queues\n"); - goto err_alloc_queues; - } - - rtnl_lock(); - err = i40evf_set_interrupt_capability(adapter); - rtnl_unlock(); - if (err) { - dev_err(&adapter->pdev->dev, - "Unable to setup interrupt capabilities\n"); - goto err_set_interrupt; - } - - err = i40evf_alloc_q_vectors(adapter); - if (err) { - dev_err(&adapter->pdev->dev, - "Unable to allocate memory for queue vectors\n"); - goto err_alloc_q_vectors; - } - - /* If we've made it so far while ADq flag being ON, then we haven't - * bailed out anywhere in middle. And ADq isn't just enabled but actual - * resources have been allocated in the reset path. - * Now we can truly claim that ADq is enabled. - */ - if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && - adapter->num_tc) - dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created", - adapter->num_tc); - - dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u", - (adapter->num_active_queues > 1) ? "Enabled" : "Disabled", - adapter->num_active_queues); - - return 0; -err_alloc_q_vectors: - i40evf_reset_interrupt_capability(adapter); -err_set_interrupt: - i40evf_free_queues(adapter); -err_alloc_queues: - return err; -} - -/** - * i40evf_free_rss - Free memory used by RSS structs - * @adapter: board private structure - **/ -static void i40evf_free_rss(struct i40evf_adapter *adapter) -{ - kfree(adapter->rss_key); - adapter->rss_key = NULL; - - kfree(adapter->rss_lut); - adapter->rss_lut = NULL; -} - -/** - * i40evf_reinit_interrupt_scheme - Reallocate queues and vectors - * @adapter: board private structure - * - * Returns 0 on success, negative on failure - **/ -static int i40evf_reinit_interrupt_scheme(struct i40evf_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - int err; - - if (netif_running(netdev)) - i40evf_free_traffic_irqs(adapter); - i40evf_free_misc_irq(adapter); - i40evf_reset_interrupt_capability(adapter); - i40evf_free_q_vectors(adapter); - i40evf_free_queues(adapter); - - err = i40evf_init_interrupt_scheme(adapter); - if (err) - goto err; - - netif_tx_stop_all_queues(netdev); - - err = i40evf_request_misc_irq(adapter); - if (err) - goto err; - - set_bit(__I40E_VSI_DOWN, adapter->vsi.state); - - i40evf_map_rings_to_vectors(adapter); - - if (RSS_AQ(adapter)) - adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS; - else - err = i40evf_init_rss(adapter); -err: - return err; -} - -/** - * i40evf_watchdog_timer - Periodic call-back timer - * @data: pointer to adapter disguised as unsigned long - **/ -static void i40evf_watchdog_timer(struct timer_list *t) -{ - struct i40evf_adapter *adapter = from_timer(adapter, t, - watchdog_timer); - - schedule_work(&adapter->watchdog_task); - /* timer will be rescheduled in watchdog task */ -} - -/** - * i40evf_watchdog_task - Periodic call-back task - * @work: pointer to work_struct - **/ -static void i40evf_watchdog_task(struct work_struct *work) -{ - struct i40evf_adapter *adapter = container_of(work, - struct i40evf_adapter, - watchdog_task); - struct i40e_hw *hw = &adapter->hw; - u32 reg_val; - - if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section)) - goto restart_watchdog; - - if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) { - reg_val = rd32(hw, I40E_VFGEN_RSTAT) & - I40E_VFGEN_RSTAT_VFR_STATE_MASK; - if ((reg_val == VIRTCHNL_VFR_VFACTIVE) || - (reg_val == VIRTCHNL_VFR_COMPLETED)) { - /* A chance for redemption! */ - dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n"); - adapter->state = __I40EVF_STARTUP; - adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED; - schedule_delayed_work(&adapter->init_task, 10); - clear_bit(__I40EVF_IN_CRITICAL_TASK, - &adapter->crit_section); - /* Don't reschedule the watchdog, since we've restarted - * the init task. When init_task contacts the PF and - * gets everything set up again, it'll restart the - * watchdog for us. Down, boy. Sit. Stay. Woof. - */ - return; - } - adapter->aq_required = 0; - adapter->current_op = VIRTCHNL_OP_UNKNOWN; - goto watchdog_done; - } - - if ((adapter->state < __I40EVF_DOWN) || - (adapter->flags & I40EVF_FLAG_RESET_PENDING)) - goto watchdog_done; - - /* check for reset */ - reg_val = rd32(hw, I40E_VF_ARQLEN1) & I40E_VF_ARQLEN1_ARQENABLE_MASK; - if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) && !reg_val) { - adapter->state = __I40EVF_RESETTING; - adapter->flags |= I40EVF_FLAG_RESET_PENDING; - dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); - schedule_work(&adapter->reset_task); - adapter->aq_required = 0; - adapter->current_op = VIRTCHNL_OP_UNKNOWN; - goto watchdog_done; - } - - /* Process admin queue tasks. After init, everything gets done - * here so we don't race on the admin queue. - */ - if (adapter->current_op) { - if (!i40evf_asq_done(hw)) { - dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n"); - i40evf_send_api_ver(adapter); - } - goto watchdog_done; - } - if (adapter->aq_required & I40EVF_FLAG_AQ_GET_CONFIG) { - i40evf_send_vf_config_msg(adapter); - goto watchdog_done; - } - - if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) { - i40evf_disable_queues(adapter); - goto watchdog_done; - } - - if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) { - i40evf_map_queues(adapter); - goto watchdog_done; - } - - if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_MAC_FILTER) { - i40evf_add_ether_addrs(adapter); - goto watchdog_done; - } - - if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_VLAN_FILTER) { - i40evf_add_vlans(adapter); - goto watchdog_done; - } - - if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_MAC_FILTER) { - i40evf_del_ether_addrs(adapter); - goto watchdog_done; - } - - if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_VLAN_FILTER) { - i40evf_del_vlans(adapter); - goto watchdog_done; - } - - if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) { - i40evf_enable_vlan_stripping(adapter); - goto watchdog_done; - } - - if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) { - i40evf_disable_vlan_stripping(adapter); - goto watchdog_done; - } - - if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_QUEUES) { - i40evf_configure_queues(adapter); - goto watchdog_done; - } - - if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_QUEUES) { - i40evf_enable_queues(adapter); - goto watchdog_done; - } - - if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_RSS) { - /* This message goes straight to the firmware, not the - * PF, so we don't have to set current_op as we will - * not get a response through the ARQ. - */ - i40evf_init_rss(adapter); - adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS; - goto watchdog_done; - } - if (adapter->aq_required & I40EVF_FLAG_AQ_GET_HENA) { - i40evf_get_hena(adapter); - goto watchdog_done; - } - if (adapter->aq_required & I40EVF_FLAG_AQ_SET_HENA) { - i40evf_set_hena(adapter); - goto watchdog_done; - } - if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_KEY) { - i40evf_set_rss_key(adapter); - goto watchdog_done; - } - if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_LUT) { - i40evf_set_rss_lut(adapter); - goto watchdog_done; - } - - if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_PROMISC) { - i40evf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC | - FLAG_VF_MULTICAST_PROMISC); - goto watchdog_done; - } - - if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_ALLMULTI) { - i40evf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC); - goto watchdog_done; - } - - if ((adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_PROMISC) && - (adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_ALLMULTI)) { - i40evf_set_promiscuous(adapter, 0); - goto watchdog_done; - } - - if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_CHANNELS) { - i40evf_enable_channels(adapter); - goto watchdog_done; - } - - if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_CHANNELS) { - i40evf_disable_channels(adapter); - goto watchdog_done; - } - - if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_CLOUD_FILTER) { - i40evf_add_cloud_filter(adapter); - goto watchdog_done; - } - - if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_CLOUD_FILTER) { - i40evf_del_cloud_filter(adapter); - goto watchdog_done; - } - - schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5)); - - if (adapter->state == __I40EVF_RUNNING) - i40evf_request_stats(adapter); -watchdog_done: - if (adapter->state == __I40EVF_RUNNING) - i40evf_detect_recover_hung(&adapter->vsi); - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); -restart_watchdog: - if (adapter->state == __I40EVF_REMOVE) - return; - if (adapter->aq_required) - mod_timer(&adapter->watchdog_timer, - jiffies + msecs_to_jiffies(20)); - else - mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2)); - schedule_work(&adapter->adminq_task); -} - -static void i40evf_disable_vf(struct i40evf_adapter *adapter) -{ - struct i40evf_mac_filter *f, *ftmp; - struct i40evf_vlan_filter *fv, *fvtmp; - struct i40evf_cloud_filter *cf, *cftmp; - - adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; - - /* We don't use netif_running() because it may be true prior to - * ndo_open() returning, so we can't assume it means all our open - * tasks have finished, since we're not holding the rtnl_lock here. - */ - if (adapter->state == __I40EVF_RUNNING) { - set_bit(__I40E_VSI_DOWN, adapter->vsi.state); - netif_carrier_off(adapter->netdev); - netif_tx_disable(adapter->netdev); - adapter->link_up = false; - i40evf_napi_disable_all(adapter); - i40evf_irq_disable(adapter); - i40evf_free_traffic_irqs(adapter); - i40evf_free_all_tx_resources(adapter); - i40evf_free_all_rx_resources(adapter); - } - - spin_lock_bh(&adapter->mac_vlan_list_lock); - - /* Delete all of the filters */ - list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { - list_del(&f->list); - kfree(f); - } - - list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) { - list_del(&fv->list); - kfree(fv); - } - - spin_unlock_bh(&adapter->mac_vlan_list_lock); - - spin_lock_bh(&adapter->cloud_filter_list_lock); - list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { - list_del(&cf->list); - kfree(cf); - adapter->num_cloud_filters--; - } - spin_unlock_bh(&adapter->cloud_filter_list_lock); - - i40evf_free_misc_irq(adapter); - i40evf_reset_interrupt_capability(adapter); - i40evf_free_queues(adapter); - i40evf_free_q_vectors(adapter); - kfree(adapter->vf_res); - i40evf_shutdown_adminq(&adapter->hw); - adapter->netdev->flags &= ~IFF_UP; - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); - adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; - adapter->state = __I40EVF_DOWN; - wake_up(&adapter->down_waitqueue); - dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); -} - -#define I40EVF_RESET_WAIT_MS 10 -#define I40EVF_RESET_WAIT_COUNT 500 -/** - * i40evf_reset_task - Call-back task to handle hardware reset - * @work: pointer to work_struct - * - * During reset we need to shut down and reinitialize the admin queue - * before we can use it to communicate with the PF again. We also clear - * and reinit the rings because that context is lost as well. - **/ -static void i40evf_reset_task(struct work_struct *work) -{ - struct i40evf_adapter *adapter = container_of(work, - struct i40evf_adapter, - reset_task); - struct virtchnl_vf_resource *vfres = adapter->vf_res; - struct net_device *netdev = adapter->netdev; - struct i40e_hw *hw = &adapter->hw; - struct i40evf_vlan_filter *vlf; - struct i40evf_cloud_filter *cf; - struct i40evf_mac_filter *f; - u32 reg_val; - int i = 0, err; - bool running; - - /* When device is being removed it doesn't make sense to run the reset - * task, just return in such a case. - */ - if (test_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section)) - return; - - while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK, - &adapter->crit_section)) - usleep_range(500, 1000); - if (CLIENT_ENABLED(adapter)) { - adapter->flags &= ~(I40EVF_FLAG_CLIENT_NEEDS_OPEN | - I40EVF_FLAG_CLIENT_NEEDS_CLOSE | - I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS | - I40EVF_FLAG_SERVICE_CLIENT_REQUESTED); - cancel_delayed_work_sync(&adapter->client_task); - i40evf_notify_client_close(&adapter->vsi, true); - } - i40evf_misc_irq_disable(adapter); - if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) { - adapter->flags &= ~I40EVF_FLAG_RESET_NEEDED; - /* Restart the AQ here. If we have been reset but didn't - * detect it, or if the PF had to reinit, our AQ will be hosed. - */ - i40evf_shutdown_adminq(hw); - i40evf_init_adminq(hw); - i40evf_request_reset(adapter); - } - adapter->flags |= I40EVF_FLAG_RESET_PENDING; - - /* poll until we see the reset actually happen */ - for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { - reg_val = rd32(hw, I40E_VF_ARQLEN1) & - I40E_VF_ARQLEN1_ARQENABLE_MASK; - if (!reg_val) - break; - usleep_range(5000, 10000); - } - if (i == I40EVF_RESET_WAIT_COUNT) { - dev_info(&adapter->pdev->dev, "Never saw reset\n"); - goto continue_reset; /* act like the reset happened */ - } - - /* wait until the reset is complete and the PF is responding to us */ - for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { - /* sleep first to make sure a minimum wait time is met */ - msleep(I40EVF_RESET_WAIT_MS); - - reg_val = rd32(hw, I40E_VFGEN_RSTAT) & - I40E_VFGEN_RSTAT_VFR_STATE_MASK; - if (reg_val == VIRTCHNL_VFR_VFACTIVE) - break; - } - - pci_set_master(adapter->pdev); - - if (i == I40EVF_RESET_WAIT_COUNT) { - dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", - reg_val); - i40evf_disable_vf(adapter); - clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section); - return; /* Do not attempt to reinit. It's dead, Jim. */ - } - -continue_reset: - /* We don't use netif_running() because it may be true prior to - * ndo_open() returning, so we can't assume it means all our open - * tasks have finished, since we're not holding the rtnl_lock here. - */ - running = ((adapter->state == __I40EVF_RUNNING) || - (adapter->state == __I40EVF_RESETTING)); - - if (running) { - netif_carrier_off(netdev); - netif_tx_stop_all_queues(netdev); - adapter->link_up = false; - i40evf_napi_disable_all(adapter); - } - i40evf_irq_disable(adapter); - - adapter->state = __I40EVF_RESETTING; - adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; - - /* free the Tx/Rx rings and descriptors, might be better to just - * re-use them sometime in the future - */ - i40evf_free_all_rx_resources(adapter); - i40evf_free_all_tx_resources(adapter); - - adapter->flags |= I40EVF_FLAG_QUEUES_DISABLED; - /* kill and reinit the admin queue */ - i40evf_shutdown_adminq(hw); - adapter->current_op = VIRTCHNL_OP_UNKNOWN; - err = i40evf_init_adminq(hw); - if (err) - dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n", - err); - adapter->aq_required = 0; - - if (adapter->flags & I40EVF_FLAG_REINIT_ITR_NEEDED) { - err = i40evf_reinit_interrupt_scheme(adapter); - if (err) - goto reset_err; - } - - adapter->aq_required |= I40EVF_FLAG_AQ_GET_CONFIG; - adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS; - - spin_lock_bh(&adapter->mac_vlan_list_lock); - - /* re-add all MAC filters */ - list_for_each_entry(f, &adapter->mac_filter_list, list) { - f->add = true; - } - /* re-add all VLAN filters */ - list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { - vlf->add = true; - } - - spin_unlock_bh(&adapter->mac_vlan_list_lock); - - /* check if TCs are running and re-add all cloud filters */ - spin_lock_bh(&adapter->cloud_filter_list_lock); - if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && - adapter->num_tc) { - list_for_each_entry(cf, &adapter->cloud_filter_list, list) { - cf->add = true; - } - } - spin_unlock_bh(&adapter->cloud_filter_list_lock); - - adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; - adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; - adapter->aq_required |= I40EVF_FLAG_AQ_ADD_CLOUD_FILTER; - i40evf_misc_irq_enable(adapter); - - mod_timer(&adapter->watchdog_timer, jiffies + 2); - - /* We were running when the reset started, so we need to restore some - * state here. - */ - if (running) { - /* allocate transmit descriptors */ - err = i40evf_setup_all_tx_resources(adapter); - if (err) - goto reset_err; - - /* allocate receive descriptors */ - err = i40evf_setup_all_rx_resources(adapter); - if (err) - goto reset_err; - - if (adapter->flags & I40EVF_FLAG_REINIT_ITR_NEEDED) { - err = i40evf_request_traffic_irqs(adapter, - netdev->name); - if (err) - goto reset_err; - - adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED; - } - - i40evf_configure(adapter); - - i40evf_up_complete(adapter); - - i40evf_irq_enable(adapter, true); - } else { - adapter->state = __I40EVF_DOWN; - wake_up(&adapter->down_waitqueue); - } - clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section); - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); - - return; -reset_err: - clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section); - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); - dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); - i40evf_close(netdev); -} - -/** - * i40evf_adminq_task - worker thread to clean the admin queue - * @work: pointer to work_struct containing our data - **/ -static void i40evf_adminq_task(struct work_struct *work) -{ - struct i40evf_adapter *adapter = - container_of(work, struct i40evf_adapter, adminq_task); - struct i40e_hw *hw = &adapter->hw; - struct i40e_arq_event_info event; - enum virtchnl_ops v_op; - i40e_status ret, v_ret; - u32 val, oldval; - u16 pending; - - if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) - goto out; - - event.buf_len = I40EVF_MAX_AQ_BUF_SIZE; - event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); - if (!event.msg_buf) - goto out; - - do { - ret = i40evf_clean_arq_element(hw, &event, &pending); - v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); - v_ret = (i40e_status)le32_to_cpu(event.desc.cookie_low); - - if (ret || !v_op) - break; /* No event to process or error cleaning ARQ */ - - i40evf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf, - event.msg_len); - if (pending != 0) - memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE); - } while (pending); - - if ((adapter->flags & - (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED)) || - adapter->state == __I40EVF_RESETTING) - goto freedom; - - /* check for error indications */ - val = rd32(hw, hw->aq.arq.len); - if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */ - goto freedom; - oldval = val; - if (val & I40E_VF_ARQLEN1_ARQVFE_MASK) { - dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n"); - val &= ~I40E_VF_ARQLEN1_ARQVFE_MASK; - } - if (val & I40E_VF_ARQLEN1_ARQOVFL_MASK) { - dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n"); - val &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK; - } - if (val & I40E_VF_ARQLEN1_ARQCRIT_MASK) { - dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n"); - val &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK; - } - if (oldval != val) - wr32(hw, hw->aq.arq.len, val); - - val = rd32(hw, hw->aq.asq.len); - oldval = val; - if (val & I40E_VF_ATQLEN1_ATQVFE_MASK) { - dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n"); - val &= ~I40E_VF_ATQLEN1_ATQVFE_MASK; - } - if (val & I40E_VF_ATQLEN1_ATQOVFL_MASK) { - dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n"); - val &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK; - } - if (val & I40E_VF_ATQLEN1_ATQCRIT_MASK) { - dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n"); - val &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK; - } - if (oldval != val) - wr32(hw, hw->aq.asq.len, val); - -freedom: - kfree(event.msg_buf); -out: - /* re-enable Admin queue interrupt cause */ - i40evf_misc_irq_enable(adapter); -} - -/** - * i40evf_client_task - worker thread to perform client work - * @work: pointer to work_struct containing our data - * - * This task handles client interactions. Because client calls can be - * reentrant, we can't handle them in the watchdog. - **/ -static void i40evf_client_task(struct work_struct *work) -{ - struct i40evf_adapter *adapter = - container_of(work, struct i40evf_adapter, client_task.work); - - /* If we can't get the client bit, just give up. We'll be rescheduled - * later. - */ - - if (test_and_set_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section)) - return; - - if (adapter->flags & I40EVF_FLAG_SERVICE_CLIENT_REQUESTED) { - i40evf_client_subtask(adapter); - adapter->flags &= ~I40EVF_FLAG_SERVICE_CLIENT_REQUESTED; - goto out; - } - if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) { - i40evf_notify_client_l2_params(&adapter->vsi); - adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS; - goto out; - } - if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_CLOSE) { - i40evf_notify_client_close(&adapter->vsi, false); - adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_CLOSE; - goto out; - } - if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_OPEN) { - i40evf_notify_client_open(&adapter->vsi); - adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_OPEN; - } -out: - clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section); -} - -/** - * i40evf_free_all_tx_resources - Free Tx Resources for All Queues - * @adapter: board private structure - * - * Free all transmit software resources - **/ -void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter) -{ - int i; - - if (!adapter->tx_rings) - return; - - for (i = 0; i < adapter->num_active_queues; i++) - if (adapter->tx_rings[i].desc) - i40evf_free_tx_resources(&adapter->tx_rings[i]); -} - -/** - * i40evf_setup_all_tx_resources - allocate all queues Tx resources - * @adapter: board private structure - * - * If this function returns with an error, then it's possible one or - * more of the rings is populated (while the rest are not). It is the - * callers duty to clean those orphaned rings. - * - * Return 0 on success, negative on failure - **/ -static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter) -{ - int i, err = 0; - - for (i = 0; i < adapter->num_active_queues; i++) { - adapter->tx_rings[i].count = adapter->tx_desc_count; - err = i40evf_setup_tx_descriptors(&adapter->tx_rings[i]); - if (!err) - continue; - dev_err(&adapter->pdev->dev, - "Allocation for Tx Queue %u failed\n", i); - break; - } - - return err; -} - -/** - * i40evf_setup_all_rx_resources - allocate all queues Rx resources - * @adapter: board private structure - * - * If this function returns with an error, then it's possible one or - * more of the rings is populated (while the rest are not). It is the - * callers duty to clean those orphaned rings. - * - * Return 0 on success, negative on failure - **/ -static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter) -{ - int i, err = 0; - - for (i = 0; i < adapter->num_active_queues; i++) { - adapter->rx_rings[i].count = adapter->rx_desc_count; - err = i40evf_setup_rx_descriptors(&adapter->rx_rings[i]); - if (!err) - continue; - dev_err(&adapter->pdev->dev, - "Allocation for Rx Queue %u failed\n", i); - break; - } - return err; -} - -/** - * i40evf_free_all_rx_resources - Free Rx Resources for All Queues - * @adapter: board private structure - * - * Free all receive software resources - **/ -void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter) -{ - int i; - - if (!adapter->rx_rings) - return; - - for (i = 0; i < adapter->num_active_queues; i++) - if (adapter->rx_rings[i].desc) - i40evf_free_rx_resources(&adapter->rx_rings[i]); -} - -/** - * i40evf_validate_tx_bandwidth - validate the max Tx bandwidth - * @adapter: board private structure - * @max_tx_rate: max Tx bw for a tc - **/ -static int i40evf_validate_tx_bandwidth(struct i40evf_adapter *adapter, - u64 max_tx_rate) -{ - int speed = 0, ret = 0; - - switch (adapter->link_speed) { - case I40E_LINK_SPEED_40GB: - speed = 40000; - break; - case I40E_LINK_SPEED_25GB: - speed = 25000; - break; - case I40E_LINK_SPEED_20GB: - speed = 20000; - break; - case I40E_LINK_SPEED_10GB: - speed = 10000; - break; - case I40E_LINK_SPEED_1GB: - speed = 1000; - break; - case I40E_LINK_SPEED_100MB: - speed = 100; - break; - default: - break; - } - - if (max_tx_rate > speed) { - dev_err(&adapter->pdev->dev, - "Invalid tx rate specified\n"); - ret = -EINVAL; - } - - return ret; -} - -/** - * i40evf_validate_channel_config - validate queue mapping info - * @adapter: board private structure - * @mqprio_qopt: queue parameters - * - * This function validates if the config provided by the user to - * configure queue channels is valid or not. Returns 0 on a valid - * config. - **/ -static int i40evf_validate_ch_config(struct i40evf_adapter *adapter, - struct tc_mqprio_qopt_offload *mqprio_qopt) -{ - u64 total_max_rate = 0; - int i, num_qps = 0; - u64 tx_rate = 0; - int ret = 0; - - if (mqprio_qopt->qopt.num_tc > I40EVF_MAX_TRAFFIC_CLASS || - mqprio_qopt->qopt.num_tc < 1) - return -EINVAL; - - for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) { - if (!mqprio_qopt->qopt.count[i] || - mqprio_qopt->qopt.offset[i] != num_qps) - return -EINVAL; - if (mqprio_qopt->min_rate[i]) { - dev_err(&adapter->pdev->dev, - "Invalid min tx rate (greater than 0) specified\n"); - return -EINVAL; - } - /*convert to Mbps */ - tx_rate = div_u64(mqprio_qopt->max_rate[i], - I40EVF_MBPS_DIVISOR); - total_max_rate += tx_rate; - num_qps += mqprio_qopt->qopt.count[i]; - } - if (num_qps > I40EVF_MAX_REQ_QUEUES) - return -EINVAL; - - ret = i40evf_validate_tx_bandwidth(adapter, total_max_rate); - return ret; -} - -/** - * i40evf_del_all_cloud_filters - delete all cloud filters - * on the traffic classes - **/ -static void i40evf_del_all_cloud_filters(struct i40evf_adapter *adapter) -{ - struct i40evf_cloud_filter *cf, *cftmp; - - spin_lock_bh(&adapter->cloud_filter_list_lock); - list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, - list) { - list_del(&cf->list); - kfree(cf); - adapter->num_cloud_filters--; - } - spin_unlock_bh(&adapter->cloud_filter_list_lock); -} - -/** - * __i40evf_setup_tc - configure multiple traffic classes - * @netdev: network interface device structure - * @type_date: tc offload data - * - * This function processes the config information provided by the - * user to configure traffic classes/queue channels and packages the - * information to request the PF to setup traffic classes. - * - * Returns 0 on success. - **/ -static int __i40evf_setup_tc(struct net_device *netdev, void *type_data) -{ - struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; - struct i40evf_adapter *adapter = netdev_priv(netdev); - struct virtchnl_vf_resource *vfres = adapter->vf_res; - u8 num_tc = 0, total_qps = 0; - int ret = 0, netdev_tc = 0; - u64 max_tx_rate; - u16 mode; - int i; - - num_tc = mqprio_qopt->qopt.num_tc; - mode = mqprio_qopt->mode; - - /* delete queue_channel */ - if (!mqprio_qopt->qopt.hw) { - if (adapter->ch_config.state == __I40EVF_TC_RUNNING) { - /* reset the tc configuration */ - netdev_reset_tc(netdev); - adapter->num_tc = 0; - netif_tx_stop_all_queues(netdev); - netif_tx_disable(netdev); - i40evf_del_all_cloud_filters(adapter); - adapter->aq_required = I40EVF_FLAG_AQ_DISABLE_CHANNELS; - goto exit; - } else { - return -EINVAL; - } - } - - /* add queue channel */ - if (mode == TC_MQPRIO_MODE_CHANNEL) { - if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) { - dev_err(&adapter->pdev->dev, "ADq not supported\n"); - return -EOPNOTSUPP; - } - if (adapter->ch_config.state != __I40EVF_TC_INVALID) { - dev_err(&adapter->pdev->dev, "TC configuration already exists\n"); - return -EINVAL; - } - - ret = i40evf_validate_ch_config(adapter, mqprio_qopt); - if (ret) - return ret; - /* Return if same TC config is requested */ - if (adapter->num_tc == num_tc) - return 0; - adapter->num_tc = num_tc; - - for (i = 0; i < I40EVF_MAX_TRAFFIC_CLASS; i++) { - if (i < num_tc) { - adapter->ch_config.ch_info[i].count = - mqprio_qopt->qopt.count[i]; - adapter->ch_config.ch_info[i].offset = - mqprio_qopt->qopt.offset[i]; - total_qps += mqprio_qopt->qopt.count[i]; - max_tx_rate = mqprio_qopt->max_rate[i]; - /* convert to Mbps */ - max_tx_rate = div_u64(max_tx_rate, - I40EVF_MBPS_DIVISOR); - adapter->ch_config.ch_info[i].max_tx_rate = - max_tx_rate; - } else { - adapter->ch_config.ch_info[i].count = 1; - adapter->ch_config.ch_info[i].offset = 0; - } - } - adapter->ch_config.total_qps = total_qps; - netif_tx_stop_all_queues(netdev); - netif_tx_disable(netdev); - adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_CHANNELS; - netdev_reset_tc(netdev); - /* Report the tc mapping up the stack */ - netdev_set_num_tc(adapter->netdev, num_tc); - for (i = 0; i < I40EVF_MAX_TRAFFIC_CLASS; i++) { - u16 qcount = mqprio_qopt->qopt.count[i]; - u16 qoffset = mqprio_qopt->qopt.offset[i]; - - if (i < num_tc) - netdev_set_tc_queue(netdev, netdev_tc++, qcount, - qoffset); - } - } -exit: - return ret; -} - -/** - * i40evf_parse_cls_flower - Parse tc flower filters provided by kernel - * @adapter: board private structure - * @cls_flower: pointer to struct tc_cls_flower_offload - * @filter: pointer to cloud filter structure - */ -static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter, - struct tc_cls_flower_offload *f, - struct i40evf_cloud_filter *filter) -{ - u16 n_proto_mask = 0; - u16 n_proto_key = 0; - u8 field_flags = 0; - u16 addr_type = 0; - u16 n_proto = 0; - int i = 0; - struct virtchnl_filter *vf = &filter->f; - - if (f->dissector->used_keys & - ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | - BIT(FLOW_DISSECTOR_KEY_BASIC) | - BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_VLAN) | - BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_PORTS) | - BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) { - dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n", - f->dissector->used_keys); - return -EOPNOTSUPP; - } - - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { - struct flow_dissector_key_keyid *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_KEYID, - f->mask); - - if (mask->keyid != 0) - field_flags |= I40EVF_CLOUD_FIELD_TEN_ID; - } - - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { - struct flow_dissector_key_basic *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, - f->key); - - struct flow_dissector_key_basic *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, - f->mask); - n_proto_key = ntohs(key->n_proto); - n_proto_mask = ntohs(mask->n_proto); - - if (n_proto_key == ETH_P_ALL) { - n_proto_key = 0; - n_proto_mask = 0; - } - n_proto = n_proto_key & n_proto_mask; - if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) - return -EINVAL; - if (n_proto == ETH_P_IPV6) { - /* specify flow type as TCP IPv6 */ - vf->flow_type = VIRTCHNL_TCP_V6_FLOW; - } - - if (key->ip_proto != IPPROTO_TCP) { - dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n"); - return -EINVAL; - } - } - - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { - struct flow_dissector_key_eth_addrs *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ETH_ADDRS, - f->key); - - struct flow_dissector_key_eth_addrs *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ETH_ADDRS, - f->mask); - /* use is_broadcast and is_zero to check for all 0xf or 0 */ - if (!is_zero_ether_addr(mask->dst)) { - if (is_broadcast_ether_addr(mask->dst)) { - field_flags |= I40EVF_CLOUD_FIELD_OMAC; - } else { - dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n", - mask->dst); - return I40E_ERR_CONFIG; - } - } - - if (!is_zero_ether_addr(mask->src)) { - if (is_broadcast_ether_addr(mask->src)) { - field_flags |= I40EVF_CLOUD_FIELD_IMAC; - } else { - dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n", - mask->src); - return I40E_ERR_CONFIG; - } - } - - if (!is_zero_ether_addr(key->dst)) - if (is_valid_ether_addr(key->dst) || - is_multicast_ether_addr(key->dst)) { - /* set the mask if a valid dst_mac address */ - for (i = 0; i < ETH_ALEN; i++) - vf->mask.tcp_spec.dst_mac[i] |= 0xff; - ether_addr_copy(vf->data.tcp_spec.dst_mac, - key->dst); - } - - if (!is_zero_ether_addr(key->src)) - if (is_valid_ether_addr(key->src) || - is_multicast_ether_addr(key->src)) { - /* set the mask if a valid dst_mac address */ - for (i = 0; i < ETH_ALEN; i++) - vf->mask.tcp_spec.src_mac[i] |= 0xff; - ether_addr_copy(vf->data.tcp_spec.src_mac, - key->src); - } - } - - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { - struct flow_dissector_key_vlan *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_VLAN, - f->key); - struct flow_dissector_key_vlan *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_VLAN, - f->mask); - - if (mask->vlan_id) { - if (mask->vlan_id == VLAN_VID_MASK) { - field_flags |= I40EVF_CLOUD_FIELD_IVLAN; - } else { - dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n", - mask->vlan_id); - return I40E_ERR_CONFIG; - } - } - vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff); - vf->data.tcp_spec.vlan_id = cpu_to_be16(key->vlan_id); - } - - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { - struct flow_dissector_key_control *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_CONTROL, - f->key); - - addr_type = key->addr_type; - } - - if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { - struct flow_dissector_key_ipv4_addrs *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, - f->key); - struct flow_dissector_key_ipv4_addrs *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, - f->mask); - - if (mask->dst) { - if (mask->dst == cpu_to_be32(0xffffffff)) { - field_flags |= I40EVF_CLOUD_FIELD_IIP; - } else { - dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n", - be32_to_cpu(mask->dst)); - return I40E_ERR_CONFIG; - } - } - - if (mask->src) { - if (mask->src == cpu_to_be32(0xffffffff)) { - field_flags |= I40EVF_CLOUD_FIELD_IIP; - } else { - dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n", - be32_to_cpu(mask->dst)); - return I40E_ERR_CONFIG; - } - } - - if (field_flags & I40EVF_CLOUD_FIELD_TEN_ID) { - dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n"); - return I40E_ERR_CONFIG; - } - if (key->dst) { - vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff); - vf->data.tcp_spec.dst_ip[0] = key->dst; - } - if (key->src) { - vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff); - vf->data.tcp_spec.src_ip[0] = key->src; - } - } - - if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { - struct flow_dissector_key_ipv6_addrs *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS, - f->key); - struct flow_dissector_key_ipv6_addrs *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS, - f->mask); - - /* validate mask, make sure it is not IPV6_ADDR_ANY */ - if (ipv6_addr_any(&mask->dst)) { - dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n", - IPV6_ADDR_ANY); - return I40E_ERR_CONFIG; - } - - /* src and dest IPv6 address should not be LOOPBACK - * (0:0:0:0:0:0:0:1) which can be represented as ::1 - */ - if (ipv6_addr_loopback(&key->dst) || - ipv6_addr_loopback(&key->src)) { - dev_err(&adapter->pdev->dev, - "ipv6 addr should not be loopback\n"); - return I40E_ERR_CONFIG; - } - if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src)) - field_flags |= I40EVF_CLOUD_FIELD_IIP; - - for (i = 0; i < 4; i++) - vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff); - memcpy(&vf->data.tcp_spec.dst_ip, &key->dst.s6_addr32, - sizeof(vf->data.tcp_spec.dst_ip)); - for (i = 0; i < 4; i++) - vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff); - memcpy(&vf->data.tcp_spec.src_ip, &key->src.s6_addr32, - sizeof(vf->data.tcp_spec.src_ip)); - } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) { - struct flow_dissector_key_ports *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_PORTS, - f->key); - struct flow_dissector_key_ports *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_PORTS, - f->mask); - - if (mask->src) { - if (mask->src == cpu_to_be16(0xffff)) { - field_flags |= I40EVF_CLOUD_FIELD_IIP; - } else { - dev_err(&adapter->pdev->dev, "Bad src port mask %u\n", - be16_to_cpu(mask->src)); - return I40E_ERR_CONFIG; - } - } - - if (mask->dst) { - if (mask->dst == cpu_to_be16(0xffff)) { - field_flags |= I40EVF_CLOUD_FIELD_IIP; - } else { - dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n", - be16_to_cpu(mask->dst)); - return I40E_ERR_CONFIG; - } - } - if (key->dst) { - vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff); - vf->data.tcp_spec.dst_port = key->dst; - } - - if (key->src) { - vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff); - vf->data.tcp_spec.src_port = key->src; - } - } - vf->field_flags = field_flags; - - return 0; -} - -/** - * i40evf_handle_tclass - Forward to a traffic class on the device - * @adapter: board private structure - * @tc: traffic class index on the device - * @filter: pointer to cloud filter structure - */ -static int i40evf_handle_tclass(struct i40evf_adapter *adapter, u32 tc, - struct i40evf_cloud_filter *filter) -{ - if (tc == 0) - return 0; - if (tc < adapter->num_tc) { - if (!filter->f.data.tcp_spec.dst_port) { - dev_err(&adapter->pdev->dev, - "Specify destination port to redirect to traffic class other than TC0\n"); - return -EINVAL; - } - } - /* redirect to a traffic class on the same device */ - filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT; - filter->f.action_meta = tc; - return 0; -} - -/** - * i40evf_configure_clsflower - Add tc flower filters - * @adapter: board private structure - * @cls_flower: Pointer to struct tc_cls_flower_offload - */ -static int i40evf_configure_clsflower(struct i40evf_adapter *adapter, - struct tc_cls_flower_offload *cls_flower) -{ - int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); - struct i40evf_cloud_filter *filter = NULL; - int err = -EINVAL, count = 50; - - if (tc < 0) { - dev_err(&adapter->pdev->dev, "Invalid traffic class\n"); - return -EINVAL; - } - - filter = kzalloc(sizeof(*filter), GFP_KERNEL); - if (!filter) - return -ENOMEM; - - while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, - &adapter->crit_section)) { - if (--count == 0) - goto err; - udelay(1); - } - - filter->cookie = cls_flower->cookie; - - /* set the mask to all zeroes to begin with */ - memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec)); - /* start out with flow type and eth type IPv4 to begin with */ - filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW; - err = i40evf_parse_cls_flower(adapter, cls_flower, filter); - if (err < 0) - goto err; - - err = i40evf_handle_tclass(adapter, tc, filter); - if (err < 0) - goto err; - - /* add filter to the list */ - spin_lock_bh(&adapter->cloud_filter_list_lock); - list_add_tail(&filter->list, &adapter->cloud_filter_list); - adapter->num_cloud_filters++; - filter->add = true; - adapter->aq_required |= I40EVF_FLAG_AQ_ADD_CLOUD_FILTER; - spin_unlock_bh(&adapter->cloud_filter_list_lock); -err: - if (err) - kfree(filter); - - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); - return err; -} - -/* i40evf_find_cf - Find the cloud filter in the list - * @adapter: Board private structure - * @cookie: filter specific cookie - * - * Returns ptr to the filter object or NULL. Must be called while holding the - * cloud_filter_list_lock. - */ -static struct i40evf_cloud_filter *i40evf_find_cf(struct i40evf_adapter *adapter, - unsigned long *cookie) -{ - struct i40evf_cloud_filter *filter = NULL; - - if (!cookie) - return NULL; - - list_for_each_entry(filter, &adapter->cloud_filter_list, list) { - if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie))) - return filter; - } - return NULL; -} - -/** - * i40evf_delete_clsflower - Remove tc flower filters - * @adapter: board private structure - * @cls_flower: Pointer to struct tc_cls_flower_offload - */ -static int i40evf_delete_clsflower(struct i40evf_adapter *adapter, - struct tc_cls_flower_offload *cls_flower) -{ - struct i40evf_cloud_filter *filter = NULL; - int err = 0; - - spin_lock_bh(&adapter->cloud_filter_list_lock); - filter = i40evf_find_cf(adapter, &cls_flower->cookie); - if (filter) { - filter->del = true; - adapter->aq_required |= I40EVF_FLAG_AQ_DEL_CLOUD_FILTER; - } else { - err = -EINVAL; - } - spin_unlock_bh(&adapter->cloud_filter_list_lock); - - return err; -} - -/** - * i40evf_setup_tc_cls_flower - flower classifier offloads - * @netdev: net device to configure - * @type_data: offload data - */ -static int i40evf_setup_tc_cls_flower(struct i40evf_adapter *adapter, - struct tc_cls_flower_offload *cls_flower) -{ - if (cls_flower->common.chain_index) - return -EOPNOTSUPP; - - switch (cls_flower->command) { - case TC_CLSFLOWER_REPLACE: - return i40evf_configure_clsflower(adapter, cls_flower); - case TC_CLSFLOWER_DESTROY: - return i40evf_delete_clsflower(adapter, cls_flower); - case TC_CLSFLOWER_STATS: - return -EOPNOTSUPP; - default: - return -EOPNOTSUPP; - } -} - -/** - * i40evf_setup_tc_block_cb - block callback for tc - * @type: type of offload - * @type_data: offload data - * @cb_priv: - * - * This function is the block callback for traffic classes - **/ -static int i40evf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, - void *cb_priv) -{ - switch (type) { - case TC_SETUP_CLSFLOWER: - return i40evf_setup_tc_cls_flower(cb_priv, type_data); - default: - return -EOPNOTSUPP; - } -} - -/** - * i40evf_setup_tc_block - register callbacks for tc - * @netdev: network interface device structure - * @f: tc offload data - * - * This function registers block callbacks for tc - * offloads - **/ -static int i40evf_setup_tc_block(struct net_device *dev, - struct tc_block_offload *f) -{ - struct i40evf_adapter *adapter = netdev_priv(dev); - - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) - return -EOPNOTSUPP; - - switch (f->command) { - case TC_BLOCK_BIND: - return tcf_block_cb_register(f->block, i40evf_setup_tc_block_cb, - adapter, adapter, f->extack); - case TC_BLOCK_UNBIND: - tcf_block_cb_unregister(f->block, i40evf_setup_tc_block_cb, - adapter); - return 0; - default: - return -EOPNOTSUPP; - } -} - -/** - * i40evf_setup_tc - configure multiple traffic classes - * @netdev: network interface device structure - * @type: type of offload - * @type_date: tc offload data - * - * This function is the callback to ndo_setup_tc in the - * netdev_ops. - * - * Returns 0 on success - **/ -static int i40evf_setup_tc(struct net_device *netdev, enum tc_setup_type type, - void *type_data) -{ - switch (type) { - case TC_SETUP_QDISC_MQPRIO: - return __i40evf_setup_tc(netdev, type_data); - case TC_SETUP_BLOCK: - return i40evf_setup_tc_block(netdev, type_data); - default: - return -EOPNOTSUPP; - } -} - -/** - * i40evf_open - Called when a network interface is made active - * @netdev: network interface device structure - * - * Returns 0 on success, negative value on failure - * - * The open entry point is called when a network interface is made - * active by the system (IFF_UP). At this point all resources needed - * for transmit and receive operations are allocated, the interrupt - * handler is registered with the OS, the watchdog timer is started, - * and the stack is notified that the interface is ready. - **/ -static int i40evf_open(struct net_device *netdev) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - int err; - - if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) { - dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n"); - return -EIO; - } - - while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, - &adapter->crit_section)) - usleep_range(500, 1000); - - if (adapter->state != __I40EVF_DOWN) { - err = -EBUSY; - goto err_unlock; - } - - /* allocate transmit descriptors */ - err = i40evf_setup_all_tx_resources(adapter); - if (err) - goto err_setup_tx; - - /* allocate receive descriptors */ - err = i40evf_setup_all_rx_resources(adapter); - if (err) - goto err_setup_rx; - - /* clear any pending interrupts, may auto mask */ - err = i40evf_request_traffic_irqs(adapter, netdev->name); - if (err) - goto err_req_irq; - - spin_lock_bh(&adapter->mac_vlan_list_lock); - - i40evf_add_filter(adapter, adapter->hw.mac.addr); - - spin_unlock_bh(&adapter->mac_vlan_list_lock); - - i40evf_configure(adapter); - - i40evf_up_complete(adapter); - - i40evf_irq_enable(adapter, true); - - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); - - return 0; - -err_req_irq: - i40evf_down(adapter); - i40evf_free_traffic_irqs(adapter); -err_setup_rx: - i40evf_free_all_rx_resources(adapter); -err_setup_tx: - i40evf_free_all_tx_resources(adapter); -err_unlock: - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); - - return err; -} - -/** - * i40evf_close - Disables a network interface - * @netdev: network interface device structure - * - * Returns 0, this is not allowed to fail - * - * The close entry point is called when an interface is de-activated - * by the OS. The hardware is still under the drivers control, but - * needs to be disabled. All IRQs except vector 0 (reserved for admin queue) - * are freed, along with all transmit and receive resources. - **/ -static int i40evf_close(struct net_device *netdev) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - int status; - - if (adapter->state <= __I40EVF_DOWN_PENDING) - return 0; - - while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, - &adapter->crit_section)) - usleep_range(500, 1000); - - set_bit(__I40E_VSI_DOWN, adapter->vsi.state); - if (CLIENT_ENABLED(adapter)) - adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_CLOSE; - - i40evf_down(adapter); - adapter->state = __I40EVF_DOWN_PENDING; - i40evf_free_traffic_irqs(adapter); - - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); - - /* We explicitly don't free resources here because the hardware is - * still active and can DMA into memory. Resources are cleared in - * i40evf_virtchnl_completion() after we get confirmation from the PF - * driver that the rings have been stopped. - * - * Also, we wait for state to transition to __I40EVF_DOWN before - * returning. State change occurs in i40evf_virtchnl_completion() after - * VF resources are released (which occurs after PF driver processes and - * responds to admin queue commands). - */ - - status = wait_event_timeout(adapter->down_waitqueue, - adapter->state == __I40EVF_DOWN, - msecs_to_jiffies(200)); - if (!status) - netdev_warn(netdev, "Device resources not yet released\n"); - return 0; -} - -/** - * i40evf_change_mtu - Change the Maximum Transfer Unit - * @netdev: network interface device structure - * @new_mtu: new value for maximum frame size - * - * Returns 0 on success, negative on failure - **/ -static int i40evf_change_mtu(struct net_device *netdev, int new_mtu) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - - netdev->mtu = new_mtu; - if (CLIENT_ENABLED(adapter)) { - i40evf_notify_client_l2_params(&adapter->vsi); - adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED; - } - adapter->flags |= I40EVF_FLAG_RESET_NEEDED; - schedule_work(&adapter->reset_task); - - return 0; -} - -/** - * i40e_set_features - set the netdev feature flags - * @netdev: ptr to the netdev being adjusted - * @features: the feature set that the stack is suggesting - * Note: expects to be called while under rtnl_lock() - **/ -static int i40evf_set_features(struct net_device *netdev, - netdev_features_t features) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - - /* Don't allow changing VLAN_RX flag when adapter is not capable - * of VLAN offload - */ - if (!VLAN_ALLOWED(adapter)) { - if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) - return -EINVAL; - } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) { - if (features & NETIF_F_HW_VLAN_CTAG_RX) - adapter->aq_required |= - I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; - else - adapter->aq_required |= - I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; - } - - return 0; -} - -/** - * i40evf_features_check - Validate encapsulated packet conforms to limits - * @skb: skb buff - * @dev: This physical port's netdev - * @features: Offload features that the stack believes apply - **/ -static netdev_features_t i40evf_features_check(struct sk_buff *skb, - struct net_device *dev, - netdev_features_t features) -{ - size_t len; - - /* No point in doing any of this if neither checksum nor GSO are - * being requested for this frame. We can rule out both by just - * checking for CHECKSUM_PARTIAL - */ - if (skb->ip_summed != CHECKSUM_PARTIAL) - return features; - - /* We cannot support GSO if the MSS is going to be less than - * 64 bytes. If it is then we need to drop support for GSO. - */ - if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) - features &= ~NETIF_F_GSO_MASK; - - /* MACLEN can support at most 63 words */ - len = skb_network_header(skb) - skb->data; - if (len & ~(63 * 2)) - goto out_err; - - /* IPLEN and EIPLEN can support at most 127 dwords */ - len = skb_transport_header(skb) - skb_network_header(skb); - if (len & ~(127 * 4)) - goto out_err; - - if (skb->encapsulation) { - /* L4TUNLEN can support 127 words */ - len = skb_inner_network_header(skb) - skb_transport_header(skb); - if (len & ~(127 * 2)) - goto out_err; - - /* IPLEN can support at most 127 dwords */ - len = skb_inner_transport_header(skb) - - skb_inner_network_header(skb); - if (len & ~(127 * 4)) - goto out_err; - } - - /* No need to validate L4LEN as TCP is the only protocol with a - * a flexible value and we support all possible values supported - * by TCP, which is at most 15 dwords - */ - - return features; -out_err: - return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); -} - -/** - * i40evf_fix_features - fix up the netdev feature bits - * @netdev: our net device - * @features: desired feature bits - * - * Returns fixed-up features bits - **/ -static netdev_features_t i40evf_fix_features(struct net_device *netdev, - netdev_features_t features) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - - if (adapter->vf_res && - !(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) - features &= ~(NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER); - - return features; -} - -static const struct net_device_ops i40evf_netdev_ops = { - .ndo_open = i40evf_open, - .ndo_stop = i40evf_close, - .ndo_start_xmit = i40evf_xmit_frame, - .ndo_set_rx_mode = i40evf_set_rx_mode, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = i40evf_set_mac, - .ndo_change_mtu = i40evf_change_mtu, - .ndo_tx_timeout = i40evf_tx_timeout, - .ndo_vlan_rx_add_vid = i40evf_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid, - .ndo_features_check = i40evf_features_check, - .ndo_fix_features = i40evf_fix_features, - .ndo_set_features = i40evf_set_features, - .ndo_setup_tc = i40evf_setup_tc, -}; - -/** - * i40evf_check_reset_complete - check that VF reset is complete - * @hw: pointer to hw struct - * - * Returns 0 if device is ready to use, or -EBUSY if it's in reset. - **/ -static int i40evf_check_reset_complete(struct i40e_hw *hw) -{ - u32 rstat; - int i; - - for (i = 0; i < 100; i++) { - rstat = rd32(hw, I40E_VFGEN_RSTAT) & - I40E_VFGEN_RSTAT_VFR_STATE_MASK; - if ((rstat == VIRTCHNL_VFR_VFACTIVE) || - (rstat == VIRTCHNL_VFR_COMPLETED)) - return 0; - usleep_range(10, 20); - } - return -EBUSY; -} - -/** - * i40evf_process_config - Process the config information we got from the PF - * @adapter: board private structure - * - * Verify that we have a valid config struct, and set up our netdev features - * and our VSI struct. - **/ -int i40evf_process_config(struct i40evf_adapter *adapter) -{ - struct virtchnl_vf_resource *vfres = adapter->vf_res; - int i, num_req_queues = adapter->num_req_queues; - struct net_device *netdev = adapter->netdev; - struct i40e_vsi *vsi = &adapter->vsi; - netdev_features_t hw_enc_features; - netdev_features_t hw_features; - - /* got VF config message back from PF, now we can parse it */ - for (i = 0; i < vfres->num_vsis; i++) { - if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV) - adapter->vsi_res = &vfres->vsi_res[i]; - } - if (!adapter->vsi_res) { - dev_err(&adapter->pdev->dev, "No LAN VSI found\n"); - return -ENODEV; - } - - if (num_req_queues && - num_req_queues != adapter->vsi_res->num_queue_pairs) { - /* Problem. The PF gave us fewer queues than what we had - * negotiated in our request. Need a reset to see if we can't - * get back to a working state. - */ - dev_err(&adapter->pdev->dev, - "Requested %d queues, but PF only gave us %d.\n", - num_req_queues, - adapter->vsi_res->num_queue_pairs); - adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED; - adapter->num_req_queues = adapter->vsi_res->num_queue_pairs; - i40evf_schedule_reset(adapter); - return -ENODEV; - } - adapter->num_req_queues = 0; - - hw_enc_features = NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | - NETIF_F_HIGHDMA | - NETIF_F_SOFT_FEATURES | - NETIF_F_TSO | - NETIF_F_TSO_ECN | - NETIF_F_TSO6 | - NETIF_F_SCTP_CRC | - NETIF_F_RXHASH | - NETIF_F_RXCSUM | - 0; - - /* advertise to stack only if offloads for encapsulated packets is - * supported - */ - if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) { - hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_GRE | - NETIF_F_GSO_GRE_CSUM | - NETIF_F_GSO_IPXIP4 | - NETIF_F_GSO_IPXIP6 | - NETIF_F_GSO_UDP_TUNNEL_CSUM | - NETIF_F_GSO_PARTIAL | - 0; - - if (!(vfres->vf_cap_flags & - VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) - netdev->gso_partial_features |= - NETIF_F_GSO_UDP_TUNNEL_CSUM; - - netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; - netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; - netdev->hw_enc_features |= hw_enc_features; - } - /* record features VLANs can make use of */ - netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; - - /* Write features and hw_features separately to avoid polluting - * with, or dropping, features that are set when we registered. - */ - hw_features = hw_enc_features; - - /* Enable VLAN features if supported */ - if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) - hw_features |= (NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX); - /* Enable cloud filter if ADQ is supported */ - if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) - hw_features |= NETIF_F_HW_TC; - - netdev->hw_features |= hw_features; - - netdev->features |= hw_features; - - if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) - netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; - - netdev->priv_flags |= IFF_UNICAST_FLT; - - /* Do not turn on offloads when they are requested to be turned off. - * TSO needs minimum 576 bytes to work correctly. - */ - if (netdev->wanted_features) { - if (!(netdev->wanted_features & NETIF_F_TSO) || - netdev->mtu < 576) - netdev->features &= ~NETIF_F_TSO; - if (!(netdev->wanted_features & NETIF_F_TSO6) || - netdev->mtu < 576) - netdev->features &= ~NETIF_F_TSO6; - if (!(netdev->wanted_features & NETIF_F_TSO_ECN)) - netdev->features &= ~NETIF_F_TSO_ECN; - if (!(netdev->wanted_features & NETIF_F_GRO)) - netdev->features &= ~NETIF_F_GRO; - if (!(netdev->wanted_features & NETIF_F_GSO)) - netdev->features &= ~NETIF_F_GSO; - } - - adapter->vsi.id = adapter->vsi_res->vsi_id; - - adapter->vsi.back = adapter; - adapter->vsi.base_vector = 1; - adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK; - vsi->netdev = adapter->netdev; - vsi->qs_handle = adapter->vsi_res->qset_handle; - if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { - adapter->rss_key_size = vfres->rss_key_size; - adapter->rss_lut_size = vfres->rss_lut_size; - } else { - adapter->rss_key_size = I40EVF_HKEY_ARRAY_SIZE; - adapter->rss_lut_size = I40EVF_HLUT_ARRAY_SIZE; - } - - return 0; -} - -/** - * i40evf_init_task - worker thread to perform delayed initialization - * @work: pointer to work_struct containing our data - * - * This task completes the work that was begun in probe. Due to the nature - * of VF-PF communications, we may need to wait tens of milliseconds to get - * responses back from the PF. Rather than busy-wait in probe and bog down the - * whole system, we'll do it in a task so we can sleep. - * This task only runs during driver init. Once we've established - * communications with the PF driver and set up our netdev, the watchdog - * takes over. - **/ -static void i40evf_init_task(struct work_struct *work) -{ - struct i40evf_adapter *adapter = container_of(work, - struct i40evf_adapter, - init_task.work); - struct net_device *netdev = adapter->netdev; - struct i40e_hw *hw = &adapter->hw; - struct pci_dev *pdev = adapter->pdev; - int err, bufsz; - - switch (adapter->state) { - case __I40EVF_STARTUP: - /* driver loaded, probe complete */ - adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED; - adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; - err = i40e_set_mac_type(hw); - if (err) { - dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", - err); - goto err; - } - err = i40evf_check_reset_complete(hw); - if (err) { - dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n", - err); - goto err; - } - hw->aq.num_arq_entries = I40EVF_AQ_LEN; - hw->aq.num_asq_entries = I40EVF_AQ_LEN; - hw->aq.arq_buf_size = I40EVF_MAX_AQ_BUF_SIZE; - hw->aq.asq_buf_size = I40EVF_MAX_AQ_BUF_SIZE; - - err = i40evf_init_adminq(hw); - if (err) { - dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", - err); - goto err; - } - err = i40evf_send_api_ver(adapter); - if (err) { - dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err); - i40evf_shutdown_adminq(hw); - goto err; - } - adapter->state = __I40EVF_INIT_VERSION_CHECK; - goto restart; - case __I40EVF_INIT_VERSION_CHECK: - if (!i40evf_asq_done(hw)) { - dev_err(&pdev->dev, "Admin queue command never completed\n"); - i40evf_shutdown_adminq(hw); - adapter->state = __I40EVF_STARTUP; - goto err; - } - - /* aq msg sent, awaiting reply */ - err = i40evf_verify_api_ver(adapter); - if (err) { - if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) - err = i40evf_send_api_ver(adapter); - else - dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n", - adapter->pf_version.major, - adapter->pf_version.minor, - VIRTCHNL_VERSION_MAJOR, - VIRTCHNL_VERSION_MINOR); - goto err; - } - err = i40evf_send_vf_config_msg(adapter); - if (err) { - dev_err(&pdev->dev, "Unable to send config request (%d)\n", - err); - goto err; - } - adapter->state = __I40EVF_INIT_GET_RESOURCES; - goto restart; - case __I40EVF_INIT_GET_RESOURCES: - /* aq msg sent, awaiting reply */ - if (!adapter->vf_res) { - bufsz = sizeof(struct virtchnl_vf_resource) + - (I40E_MAX_VF_VSI * - sizeof(struct virtchnl_vsi_resource)); - adapter->vf_res = kzalloc(bufsz, GFP_KERNEL); - if (!adapter->vf_res) - goto err; - } - err = i40evf_get_vf_config(adapter); - if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) { - err = i40evf_send_vf_config_msg(adapter); - goto err; - } else if (err == I40E_ERR_PARAM) { - /* We only get ERR_PARAM if the device is in a very bad - * state or if we've been disabled for previous bad - * behavior. Either way, we're done now. - */ - i40evf_shutdown_adminq(hw); - dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n"); - return; - } - if (err) { - dev_err(&pdev->dev, "Unable to get VF config (%d)\n", - err); - goto err_alloc; - } - adapter->state = __I40EVF_INIT_SW; - break; - default: - goto err_alloc; - } - - if (i40evf_process_config(adapter)) - goto err_alloc; - adapter->current_op = VIRTCHNL_OP_UNKNOWN; - - adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED; - - netdev->netdev_ops = &i40evf_netdev_ops; - i40evf_set_ethtool_ops(netdev); - netdev->watchdog_timeo = 5 * HZ; - - /* MTU range: 68 - 9710 */ - netdev->min_mtu = ETH_MIN_MTU; - netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD; - - if (!is_valid_ether_addr(adapter->hw.mac.addr)) { - dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", - adapter->hw.mac.addr); - eth_hw_addr_random(netdev); - ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); - } else { - adapter->flags |= I40EVF_FLAG_ADDR_SET_BY_PF; - ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); - ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); - } - - timer_setup(&adapter->watchdog_timer, i40evf_watchdog_timer, 0); - mod_timer(&adapter->watchdog_timer, jiffies + 1); - - adapter->tx_desc_count = I40EVF_DEFAULT_TXD; - adapter->rx_desc_count = I40EVF_DEFAULT_RXD; - err = i40evf_init_interrupt_scheme(adapter); - if (err) - goto err_sw_init; - i40evf_map_rings_to_vectors(adapter); - if (adapter->vf_res->vf_cap_flags & - VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) - adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE; - - err = i40evf_request_misc_irq(adapter); - if (err) - goto err_sw_init; - - netif_carrier_off(netdev); - adapter->link_up = false; - - if (!adapter->netdev_registered) { - err = register_netdev(netdev); - if (err) - goto err_register; - } - - adapter->netdev_registered = true; - - netif_tx_stop_all_queues(netdev); - if (CLIENT_ALLOWED(adapter)) { - err = i40evf_lan_add_device(adapter); - if (err) - dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n", - err); - } - - dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr); - if (netdev->features & NETIF_F_GRO) - dev_info(&pdev->dev, "GRO is enabled\n"); - - adapter->state = __I40EVF_DOWN; - set_bit(__I40E_VSI_DOWN, adapter->vsi.state); - i40evf_misc_irq_enable(adapter); - wake_up(&adapter->down_waitqueue); - - adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL); - adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL); - if (!adapter->rss_key || !adapter->rss_lut) - goto err_mem; - - if (RSS_AQ(adapter)) { - adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS; - mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); - } else { - i40evf_init_rss(adapter); - } - return; -restart: - schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30)); - return; -err_mem: - i40evf_free_rss(adapter); -err_register: - i40evf_free_misc_irq(adapter); -err_sw_init: - i40evf_reset_interrupt_capability(adapter); -err_alloc: - kfree(adapter->vf_res); - adapter->vf_res = NULL; -err: - /* Things went into the weeds, so try again later */ - if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) { - dev_err(&pdev->dev, "Failed to communicate with PF; waiting before retry\n"); - adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; - i40evf_shutdown_adminq(hw); - adapter->state = __I40EVF_STARTUP; - schedule_delayed_work(&adapter->init_task, HZ * 5); - return; - } - schedule_delayed_work(&adapter->init_task, HZ); -} - -/** - * i40evf_shutdown - Shutdown the device in preparation for a reboot - * @pdev: pci device structure - **/ -static void i40evf_shutdown(struct pci_dev *pdev) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct i40evf_adapter *adapter = netdev_priv(netdev); - - netif_device_detach(netdev); - - if (netif_running(netdev)) - i40evf_close(netdev); - - /* Prevent the watchdog from running. */ - adapter->state = __I40EVF_REMOVE; - adapter->aq_required = 0; - -#ifdef CONFIG_PM - pci_save_state(pdev); - -#endif - pci_disable_device(pdev); -} - -/** - * i40evf_probe - Device Initialization Routine - * @pdev: PCI device information struct - * @ent: entry in i40evf_pci_tbl - * - * Returns 0 on success, negative on failure - * - * i40evf_probe initializes an adapter identified by a pci_dev structure. - * The OS initialization, configuring of the adapter private structure, - * and a hardware reset occur. - **/ -static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - struct net_device *netdev; - struct i40evf_adapter *adapter = NULL; - struct i40e_hw *hw = NULL; - int err; - - err = pci_enable_device(pdev); - if (err) - return err; - - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); - if (err) { - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, - "DMA configuration failed: 0x%x\n", err); - goto err_dma; - } - } - - err = pci_request_regions(pdev, i40evf_driver_name); - if (err) { - dev_err(&pdev->dev, - "pci_request_regions failed 0x%x\n", err); - goto err_pci_reg; - } - - pci_enable_pcie_error_reporting(pdev); - - pci_set_master(pdev); - - netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter), - I40EVF_MAX_REQ_QUEUES); - if (!netdev) { - err = -ENOMEM; - goto err_alloc_etherdev; - } - - SET_NETDEV_DEV(netdev, &pdev->dev); - - pci_set_drvdata(pdev, netdev); - adapter = netdev_priv(netdev); - - adapter->netdev = netdev; - adapter->pdev = pdev; - - hw = &adapter->hw; - hw->back = adapter; - - adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; - adapter->state = __I40EVF_STARTUP; - - /* Call save state here because it relies on the adapter struct. */ - pci_save_state(pdev); - - hw->hw_addr = ioremap(pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); - if (!hw->hw_addr) { - err = -EIO; - goto err_ioremap; - } - hw->vendor_id = pdev->vendor; - hw->device_id = pdev->device; - pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); - hw->subsystem_vendor_id = pdev->subsystem_vendor; - hw->subsystem_device_id = pdev->subsystem_device; - hw->bus.device = PCI_SLOT(pdev->devfn); - hw->bus.func = PCI_FUNC(pdev->devfn); - hw->bus.bus_id = pdev->bus->number; - - /* set up the locks for the AQ, do this only once in probe - * and destroy them only once in remove - */ - mutex_init(&hw->aq.asq_mutex); - mutex_init(&hw->aq.arq_mutex); - - spin_lock_init(&adapter->mac_vlan_list_lock); - spin_lock_init(&adapter->cloud_filter_list_lock); - - INIT_LIST_HEAD(&adapter->mac_filter_list); - INIT_LIST_HEAD(&adapter->vlan_filter_list); - INIT_LIST_HEAD(&adapter->cloud_filter_list); - - INIT_WORK(&adapter->reset_task, i40evf_reset_task); - INIT_WORK(&adapter->adminq_task, i40evf_adminq_task); - INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task); - INIT_DELAYED_WORK(&adapter->client_task, i40evf_client_task); - INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task); - schedule_delayed_work(&adapter->init_task, - msecs_to_jiffies(5 * (pdev->devfn & 0x07))); - - /* Setup the wait queue for indicating transition to down status */ - init_waitqueue_head(&adapter->down_waitqueue); - - return 0; - -err_ioremap: - free_netdev(netdev); -err_alloc_etherdev: - pci_disable_pcie_error_reporting(pdev); - pci_release_regions(pdev); -err_pci_reg: -err_dma: - pci_disable_device(pdev); - return err; -} - -#ifdef CONFIG_PM -/** - * i40evf_suspend - Power management suspend routine - * @pdev: PCI device information struct - * @state: unused - * - * Called when the system (VM) is entering sleep/suspend. - **/ -static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct i40evf_adapter *adapter = netdev_priv(netdev); - int retval = 0; - - netif_device_detach(netdev); - - while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, - &adapter->crit_section)) - usleep_range(500, 1000); - - if (netif_running(netdev)) { - rtnl_lock(); - i40evf_down(adapter); - rtnl_unlock(); - } - i40evf_free_misc_irq(adapter); - i40evf_reset_interrupt_capability(adapter); - - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); - - retval = pci_save_state(pdev); - if (retval) - return retval; - - pci_disable_device(pdev); - - return 0; -} - -/** - * i40evf_resume - Power management resume routine - * @pdev: PCI device information struct - * - * Called when the system (VM) is resumed from sleep/suspend. - **/ -static int i40evf_resume(struct pci_dev *pdev) -{ - struct i40evf_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; - u32 err; - - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - /* pci_restore_state clears dev->state_saved so call - * pci_save_state to restore it. - */ - pci_save_state(pdev); - - err = pci_enable_device_mem(pdev); - if (err) { - dev_err(&pdev->dev, "Cannot enable PCI device from suspend.\n"); - return err; - } - pci_set_master(pdev); - - rtnl_lock(); - err = i40evf_set_interrupt_capability(adapter); - if (err) { - rtnl_unlock(); - dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n"); - return err; - } - err = i40evf_request_misc_irq(adapter); - rtnl_unlock(); - if (err) { - dev_err(&pdev->dev, "Cannot get interrupt vector.\n"); - return err; - } - - schedule_work(&adapter->reset_task); - - netif_device_attach(netdev); - - return err; -} - -#endif /* CONFIG_PM */ -/** - * i40evf_remove - Device Removal Routine - * @pdev: PCI device information struct - * - * i40evf_remove is called by the PCI subsystem to alert the driver - * that it should release a PCI device. The could be caused by a - * Hot-Plug event, or because the driver is going to be removed from - * memory. - **/ -static void i40evf_remove(struct pci_dev *pdev) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40evf_vlan_filter *vlf, *vlftmp; - struct i40evf_mac_filter *f, *ftmp; - struct i40evf_cloud_filter *cf, *cftmp; - struct i40e_hw *hw = &adapter->hw; - int err; - /* Indicate we are in remove and not to run reset_task */ - set_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section); - cancel_delayed_work_sync(&adapter->init_task); - cancel_work_sync(&adapter->reset_task); - cancel_delayed_work_sync(&adapter->client_task); - if (adapter->netdev_registered) { - unregister_netdev(netdev); - adapter->netdev_registered = false; - } - if (CLIENT_ALLOWED(adapter)) { - err = i40evf_lan_del_device(adapter); - if (err) - dev_warn(&pdev->dev, "Failed to delete client device: %d\n", - err); - } - - /* Shut down all the garbage mashers on the detention level */ - adapter->state = __I40EVF_REMOVE; - adapter->aq_required = 0; - adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED; - i40evf_request_reset(adapter); - msleep(50); - /* If the FW isn't responding, kick it once, but only once. */ - if (!i40evf_asq_done(hw)) { - i40evf_request_reset(adapter); - msleep(50); - } - i40evf_free_all_tx_resources(adapter); - i40evf_free_all_rx_resources(adapter); - i40evf_misc_irq_disable(adapter); - i40evf_free_misc_irq(adapter); - i40evf_reset_interrupt_capability(adapter); - i40evf_free_q_vectors(adapter); - - if (adapter->watchdog_timer.function) - del_timer_sync(&adapter->watchdog_timer); - - cancel_work_sync(&adapter->adminq_task); - - i40evf_free_rss(adapter); - - if (hw->aq.asq.count) - i40evf_shutdown_adminq(hw); - - /* destroy the locks only once, here */ - mutex_destroy(&hw->aq.arq_mutex); - mutex_destroy(&hw->aq.asq_mutex); - - iounmap(hw->hw_addr); - pci_release_regions(pdev); - i40evf_free_all_tx_resources(adapter); - i40evf_free_all_rx_resources(adapter); - i40evf_free_queues(adapter); - kfree(adapter->vf_res); - spin_lock_bh(&adapter->mac_vlan_list_lock); - /* If we got removed before an up/down sequence, we've got a filter - * hanging out there that we need to get rid of. - */ - list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { - list_del(&f->list); - kfree(f); - } - list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list, - list) { - list_del(&vlf->list); - kfree(vlf); - } - - spin_unlock_bh(&adapter->mac_vlan_list_lock); - - spin_lock_bh(&adapter->cloud_filter_list_lock); - list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { - list_del(&cf->list); - kfree(cf); - } - spin_unlock_bh(&adapter->cloud_filter_list_lock); - - free_netdev(netdev); - - pci_disable_pcie_error_reporting(pdev); - - pci_disable_device(pdev); -} - -static struct pci_driver i40evf_driver = { - .name = i40evf_driver_name, - .id_table = i40evf_pci_tbl, - .probe = i40evf_probe, - .remove = i40evf_remove, -#ifdef CONFIG_PM - .suspend = i40evf_suspend, - .resume = i40evf_resume, -#endif - .shutdown = i40evf_shutdown, -}; - -/** - * i40e_init_module - Driver Registration Routine - * - * i40e_init_module is the first routine called when the driver is - * loaded. All it does is register with the PCI subsystem. - **/ -static int __init i40evf_init_module(void) -{ - int ret; - - pr_info("i40evf: %s - version %s\n", i40evf_driver_string, - i40evf_driver_version); - - pr_info("%s\n", i40evf_copyright); - - i40evf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, - i40evf_driver_name); - if (!i40evf_wq) { - pr_err("%s: Failed to create workqueue\n", i40evf_driver_name); - return -ENOMEM; - } - ret = pci_register_driver(&i40evf_driver); - return ret; -} - -module_init(i40evf_init_module); - -/** - * i40e_exit_module - Driver Exit Cleanup Routine - * - * i40e_exit_module is called just before the driver is removed - * from memory. - **/ -static void __exit i40evf_exit_module(void) -{ - pci_unregister_driver(&i40evf_driver); - destroy_workqueue(i40evf_wq); -} - -module_exit(i40evf_exit_module); - -/* i40evf_main.c */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c deleted file mode 100644 index 6579dabab78c..000000000000 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ /dev/null @@ -1,1465 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#include "i40evf.h" -#include "i40e_prototype.h" -#include "i40evf_client.h" - -/* busy wait delay in msec */ -#define I40EVF_BUSY_WAIT_DELAY 10 -#define I40EVF_BUSY_WAIT_COUNT 50 - -/** - * i40evf_send_pf_msg - * @adapter: adapter structure - * @op: virtual channel opcode - * @msg: pointer to message buffer - * @len: message length - * - * Send message to PF and print status if failure. - **/ -static int i40evf_send_pf_msg(struct i40evf_adapter *adapter, - enum virtchnl_ops op, u8 *msg, u16 len) -{ - struct i40e_hw *hw = &adapter->hw; - i40e_status err; - - if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) - return 0; /* nothing to see here, move along */ - - err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL); - if (err) - dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n", - op, i40evf_stat_str(hw, err), - i40evf_aq_str(hw, hw->aq.asq_last_status)); - return err; -} - -/** - * i40evf_send_api_ver - * @adapter: adapter structure - * - * Send API version admin queue message to the PF. The reply is not checked - * in this function. Returns 0 if the message was successfully - * sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not. - **/ -int i40evf_send_api_ver(struct i40evf_adapter *adapter) -{ - struct virtchnl_version_info vvi; - - vvi.major = VIRTCHNL_VERSION_MAJOR; - vvi.minor = VIRTCHNL_VERSION_MINOR; - - return i40evf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi, - sizeof(vvi)); -} - -/** - * i40evf_verify_api_ver - * @adapter: adapter structure - * - * Compare API versions with the PF. Must be called after admin queue is - * initialized. Returns 0 if API versions match, -EIO if they do not, - * I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors - * from the firmware are propagated. - **/ -int i40evf_verify_api_ver(struct i40evf_adapter *adapter) -{ - struct virtchnl_version_info *pf_vvi; - struct i40e_hw *hw = &adapter->hw; - struct i40e_arq_event_info event; - enum virtchnl_ops op; - i40e_status err; - - event.buf_len = I40EVF_MAX_AQ_BUF_SIZE; - event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); - if (!event.msg_buf) { - err = -ENOMEM; - goto out; - } - - while (1) { - err = i40evf_clean_arq_element(hw, &event, NULL); - /* When the AQ is empty, i40evf_clean_arq_element will return - * nonzero and this loop will terminate. - */ - if (err) - goto out_alloc; - op = - (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); - if (op == VIRTCHNL_OP_VERSION) - break; - } - - - err = (i40e_status)le32_to_cpu(event.desc.cookie_low); - if (err) - goto out_alloc; - - if (op != VIRTCHNL_OP_VERSION) { - dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n", - op); - err = -EIO; - goto out_alloc; - } - - pf_vvi = (struct virtchnl_version_info *)event.msg_buf; - adapter->pf_version = *pf_vvi; - - if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) || - ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) && - (pf_vvi->minor > VIRTCHNL_VERSION_MINOR))) - err = -EIO; - -out_alloc: - kfree(event.msg_buf); -out: - return err; -} - -/** - * i40evf_send_vf_config_msg - * @adapter: adapter structure - * - * Send VF configuration request admin queue message to the PF. The reply - * is not checked in this function. Returns 0 if the message was - * successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not. - **/ -int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter) -{ - u32 caps; - - caps = VIRTCHNL_VF_OFFLOAD_L2 | - VIRTCHNL_VF_OFFLOAD_RSS_PF | - VIRTCHNL_VF_OFFLOAD_RSS_AQ | - VIRTCHNL_VF_OFFLOAD_RSS_REG | - VIRTCHNL_VF_OFFLOAD_VLAN | - VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | - VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 | - VIRTCHNL_VF_OFFLOAD_ENCAP | - VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM | - VIRTCHNL_VF_OFFLOAD_REQ_QUEUES | - VIRTCHNL_VF_OFFLOAD_ADQ; - - adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES; - adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG; - if (PF_IS_V11(adapter)) - return i40evf_send_pf_msg(adapter, - VIRTCHNL_OP_GET_VF_RESOURCES, - (u8 *)&caps, sizeof(caps)); - else - return i40evf_send_pf_msg(adapter, - VIRTCHNL_OP_GET_VF_RESOURCES, - NULL, 0); -} - -/** - * i40evf_validate_num_queues - * @adapter: adapter structure - * - * Validate that the number of queues the PF has sent in - * VIRTCHNL_OP_GET_VF_RESOURCES is not larger than the VF can handle. - **/ -static void i40evf_validate_num_queues(struct i40evf_adapter *adapter) -{ - if (adapter->vf_res->num_queue_pairs > I40EVF_MAX_REQ_QUEUES) { - struct virtchnl_vsi_resource *vsi_res; - int i; - - dev_info(&adapter->pdev->dev, "Received %d queues, but can only have a max of %d\n", - adapter->vf_res->num_queue_pairs, - I40EVF_MAX_REQ_QUEUES); - dev_info(&adapter->pdev->dev, "Fixing by reducing queues to %d\n", - I40EVF_MAX_REQ_QUEUES); - adapter->vf_res->num_queue_pairs = I40EVF_MAX_REQ_QUEUES; - for (i = 0; i < adapter->vf_res->num_vsis; i++) { - vsi_res = &adapter->vf_res->vsi_res[i]; - vsi_res->num_queue_pairs = I40EVF_MAX_REQ_QUEUES; - } - } -} - -/** - * i40evf_get_vf_config - * @adapter: private adapter structure - * - * Get VF configuration from PF and populate hw structure. Must be called after - * admin queue is initialized. Busy waits until response is received from PF, - * with maximum timeout. Response from PF is returned in the buffer for further - * processing by the caller. - **/ -int i40evf_get_vf_config(struct i40evf_adapter *adapter) -{ - struct i40e_hw *hw = &adapter->hw; - struct i40e_arq_event_info event; - enum virtchnl_ops op; - i40e_status err; - u16 len; - - len = sizeof(struct virtchnl_vf_resource) + - I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource); - event.buf_len = len; - event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); - if (!event.msg_buf) { - err = -ENOMEM; - goto out; - } - - while (1) { - /* When the AQ is empty, i40evf_clean_arq_element will return - * nonzero and this loop will terminate. - */ - err = i40evf_clean_arq_element(hw, &event, NULL); - if (err) - goto out_alloc; - op = - (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); - if (op == VIRTCHNL_OP_GET_VF_RESOURCES) - break; - } - - err = (i40e_status)le32_to_cpu(event.desc.cookie_low); - memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len)); - - /* some PFs send more queues than we should have so validate that - * we aren't getting too many queues - */ - if (!err) - i40evf_validate_num_queues(adapter); - i40e_vf_parse_hw_config(hw, adapter->vf_res); -out_alloc: - kfree(event.msg_buf); -out: - return err; -} - -/** - * i40evf_configure_queues - * @adapter: adapter structure - * - * Request that the PF set up our (previously allocated) queues. - **/ -void i40evf_configure_queues(struct i40evf_adapter *adapter) -{ - struct virtchnl_vsi_queue_config_info *vqci; - struct virtchnl_queue_pair_info *vqpi; - int pairs = adapter->num_active_queues; - int i, len, max_frame = I40E_MAX_RXBUFFER; - - if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { - /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n", - adapter->current_op); - return; - } - adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES; - len = sizeof(struct virtchnl_vsi_queue_config_info) + - (sizeof(struct virtchnl_queue_pair_info) * pairs); - vqci = kzalloc(len, GFP_KERNEL); - if (!vqci) - return; - - /* Limit maximum frame size when jumbo frames is not enabled */ - if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX) && - (adapter->netdev->mtu <= ETH_DATA_LEN)) - max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN; - - vqci->vsi_id = adapter->vsi_res->vsi_id; - vqci->num_queue_pairs = pairs; - vqpi = vqci->qpair; - /* Size check is not needed here - HW max is 16 queue pairs, and we - * can fit info for 31 of them into the AQ buffer before it overflows. - */ - for (i = 0; i < pairs; i++) { - vqpi->txq.vsi_id = vqci->vsi_id; - vqpi->txq.queue_id = i; - vqpi->txq.ring_len = adapter->tx_rings[i].count; - vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma; - vqpi->rxq.vsi_id = vqci->vsi_id; - vqpi->rxq.queue_id = i; - vqpi->rxq.ring_len = adapter->rx_rings[i].count; - vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma; - vqpi->rxq.max_pkt_size = max_frame; - vqpi->rxq.databuffer_size = - ALIGN(adapter->rx_rings[i].rx_buf_len, - BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT)); - vqpi++; - } - - adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES, - (u8 *)vqci, len); - kfree(vqci); -} - -/** - * i40evf_enable_queues - * @adapter: adapter structure - * - * Request that the PF enable all of our queues. - **/ -void i40evf_enable_queues(struct i40evf_adapter *adapter) -{ - struct virtchnl_queue_select vqs; - - if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { - /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n", - adapter->current_op); - return; - } - adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES; - vqs.vsi_id = adapter->vsi_res->vsi_id; - vqs.tx_queues = BIT(adapter->num_active_queues) - 1; - vqs.rx_queues = vqs.tx_queues; - adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES, - (u8 *)&vqs, sizeof(vqs)); -} - -/** - * i40evf_disable_queues - * @adapter: adapter structure - * - * Request that the PF disable all of our queues. - **/ -void i40evf_disable_queues(struct i40evf_adapter *adapter) -{ - struct virtchnl_queue_select vqs; - - if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { - /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n", - adapter->current_op); - return; - } - adapter->current_op = VIRTCHNL_OP_DISABLE_QUEUES; - vqs.vsi_id = adapter->vsi_res->vsi_id; - vqs.tx_queues = BIT(adapter->num_active_queues) - 1; - vqs.rx_queues = vqs.tx_queues; - adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES, - (u8 *)&vqs, sizeof(vqs)); -} - -/** - * i40evf_map_queues - * @adapter: adapter structure - * - * Request that the PF map queues to interrupt vectors. Misc causes, including - * admin queue, are always mapped to vector 0. - **/ -void i40evf_map_queues(struct i40evf_adapter *adapter) -{ - struct virtchnl_irq_map_info *vimi; - struct virtchnl_vector_map *vecmap; - int v_idx, q_vectors, len; - struct i40e_q_vector *q_vector; - - if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { - /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n", - adapter->current_op); - return; - } - adapter->current_op = VIRTCHNL_OP_CONFIG_IRQ_MAP; - - q_vectors = adapter->num_msix_vectors - NONQ_VECS; - - len = sizeof(struct virtchnl_irq_map_info) + - (adapter->num_msix_vectors * - sizeof(struct virtchnl_vector_map)); - vimi = kzalloc(len, GFP_KERNEL); - if (!vimi) - return; - - vimi->num_vectors = adapter->num_msix_vectors; - /* Queue vectors first */ - for (v_idx = 0; v_idx < q_vectors; v_idx++) { - q_vector = &adapter->q_vectors[v_idx]; - vecmap = &vimi->vecmap[v_idx]; - - vecmap->vsi_id = adapter->vsi_res->vsi_id; - vecmap->vector_id = v_idx + NONQ_VECS; - vecmap->txq_map = q_vector->ring_mask; - vecmap->rxq_map = q_vector->ring_mask; - vecmap->rxitr_idx = I40E_RX_ITR; - vecmap->txitr_idx = I40E_TX_ITR; - } - /* Misc vector last - this is only for AdminQ messages */ - vecmap = &vimi->vecmap[v_idx]; - vecmap->vsi_id = adapter->vsi_res->vsi_id; - vecmap->vector_id = 0; - vecmap->txq_map = 0; - vecmap->rxq_map = 0; - - adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP, - (u8 *)vimi, len); - kfree(vimi); -} - -/** - * i40evf_request_queues - * @adapter: adapter structure - * @num: number of requested queues - * - * We get a default number of queues from the PF. This enables us to request a - * different number. Returns 0 on success, negative on failure - **/ -int i40evf_request_queues(struct i40evf_adapter *adapter, int num) -{ - struct virtchnl_vf_res_request vfres; - - if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { - /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "Cannot request queues, command %d pending\n", - adapter->current_op); - return -EBUSY; - } - - vfres.num_queue_pairs = num; - - adapter->current_op = VIRTCHNL_OP_REQUEST_QUEUES; - adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED; - return i40evf_send_pf_msg(adapter, VIRTCHNL_OP_REQUEST_QUEUES, - (u8 *)&vfres, sizeof(vfres)); -} - -/** - * i40evf_add_ether_addrs - * @adapter: adapter structure - * - * Request that the PF add one or more addresses to our filters. - **/ -void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) -{ - struct virtchnl_ether_addr_list *veal; - int len, i = 0, count = 0; - struct i40evf_mac_filter *f; - bool more = false; - - if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { - /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n", - adapter->current_op); - return; - } - - spin_lock_bh(&adapter->mac_vlan_list_lock); - - list_for_each_entry(f, &adapter->mac_filter_list, list) { - if (f->add) - count++; - } - if (!count) { - adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; - spin_unlock_bh(&adapter->mac_vlan_list_lock); - return; - } - adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR; - - len = sizeof(struct virtchnl_ether_addr_list) + - (count * sizeof(struct virtchnl_ether_addr)); - if (len > I40EVF_MAX_AQ_BUF_SIZE) { - dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n"); - count = (I40EVF_MAX_AQ_BUF_SIZE - - sizeof(struct virtchnl_ether_addr_list)) / - sizeof(struct virtchnl_ether_addr); - len = sizeof(struct virtchnl_ether_addr_list) + - (count * sizeof(struct virtchnl_ether_addr)); - more = true; - } - - veal = kzalloc(len, GFP_ATOMIC); - if (!veal) { - spin_unlock_bh(&adapter->mac_vlan_list_lock); - return; - } - - veal->vsi_id = adapter->vsi_res->vsi_id; - veal->num_elements = count; - list_for_each_entry(f, &adapter->mac_filter_list, list) { - if (f->add) { - ether_addr_copy(veal->list[i].addr, f->macaddr); - i++; - f->add = false; - if (i == count) - break; - } - } - if (!more) - adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; - - spin_unlock_bh(&adapter->mac_vlan_list_lock); - - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, - (u8 *)veal, len); - kfree(veal); -} - -/** - * i40evf_del_ether_addrs - * @adapter: adapter structure - * - * Request that the PF remove one or more addresses from our filters. - **/ -void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) -{ - struct virtchnl_ether_addr_list *veal; - struct i40evf_mac_filter *f, *ftmp; - int len, i = 0, count = 0; - bool more = false; - - if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { - /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n", - adapter->current_op); - return; - } - - spin_lock_bh(&adapter->mac_vlan_list_lock); - - list_for_each_entry(f, &adapter->mac_filter_list, list) { - if (f->remove) - count++; - } - if (!count) { - adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; - spin_unlock_bh(&adapter->mac_vlan_list_lock); - return; - } - adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR; - - len = sizeof(struct virtchnl_ether_addr_list) + - (count * sizeof(struct virtchnl_ether_addr)); - if (len > I40EVF_MAX_AQ_BUF_SIZE) { - dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n"); - count = (I40EVF_MAX_AQ_BUF_SIZE - - sizeof(struct virtchnl_ether_addr_list)) / - sizeof(struct virtchnl_ether_addr); - len = sizeof(struct virtchnl_ether_addr_list) + - (count * sizeof(struct virtchnl_ether_addr)); - more = true; - } - veal = kzalloc(len, GFP_ATOMIC); - if (!veal) { - spin_unlock_bh(&adapter->mac_vlan_list_lock); - return; - } - - veal->vsi_id = adapter->vsi_res->vsi_id; - veal->num_elements = count; - list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { - if (f->remove) { - ether_addr_copy(veal->list[i].addr, f->macaddr); - i++; - list_del(&f->list); - kfree(f); - if (i == count) - break; - } - } - if (!more) - adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; - - spin_unlock_bh(&adapter->mac_vlan_list_lock); - - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, - (u8 *)veal, len); - kfree(veal); -} - -/** - * i40evf_add_vlans - * @adapter: adapter structure - * - * Request that the PF add one or more VLAN filters to our VSI. - **/ -void i40evf_add_vlans(struct i40evf_adapter *adapter) -{ - struct virtchnl_vlan_filter_list *vvfl; - int len, i = 0, count = 0; - struct i40evf_vlan_filter *f; - bool more = false; - - if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { - /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n", - adapter->current_op); - return; - } - - spin_lock_bh(&adapter->mac_vlan_list_lock); - - list_for_each_entry(f, &adapter->vlan_filter_list, list) { - if (f->add) - count++; - } - if (!count) { - adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; - spin_unlock_bh(&adapter->mac_vlan_list_lock); - return; - } - adapter->current_op = VIRTCHNL_OP_ADD_VLAN; - - len = sizeof(struct virtchnl_vlan_filter_list) + - (count * sizeof(u16)); - if (len > I40EVF_MAX_AQ_BUF_SIZE) { - dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); - count = (I40EVF_MAX_AQ_BUF_SIZE - - sizeof(struct virtchnl_vlan_filter_list)) / - sizeof(u16); - len = sizeof(struct virtchnl_vlan_filter_list) + - (count * sizeof(u16)); - more = true; - } - vvfl = kzalloc(len, GFP_ATOMIC); - if (!vvfl) { - spin_unlock_bh(&adapter->mac_vlan_list_lock); - return; - } - - vvfl->vsi_id = adapter->vsi_res->vsi_id; - vvfl->num_elements = count; - list_for_each_entry(f, &adapter->vlan_filter_list, list) { - if (f->add) { - vvfl->vlan_id[i] = f->vlan; - i++; - f->add = false; - if (i == count) - break; - } - } - if (!more) - adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; - - spin_unlock_bh(&adapter->mac_vlan_list_lock); - - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); - kfree(vvfl); -} - -/** - * i40evf_del_vlans - * @adapter: adapter structure - * - * Request that the PF remove one or more VLAN filters from our VSI. - **/ -void i40evf_del_vlans(struct i40evf_adapter *adapter) -{ - struct virtchnl_vlan_filter_list *vvfl; - struct i40evf_vlan_filter *f, *ftmp; - int len, i = 0, count = 0; - bool more = false; - - if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { - /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n", - adapter->current_op); - return; - } - - spin_lock_bh(&adapter->mac_vlan_list_lock); - - list_for_each_entry(f, &adapter->vlan_filter_list, list) { - if (f->remove) - count++; - } - if (!count) { - adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; - spin_unlock_bh(&adapter->mac_vlan_list_lock); - return; - } - adapter->current_op = VIRTCHNL_OP_DEL_VLAN; - - len = sizeof(struct virtchnl_vlan_filter_list) + - (count * sizeof(u16)); - if (len > I40EVF_MAX_AQ_BUF_SIZE) { - dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n"); - count = (I40EVF_MAX_AQ_BUF_SIZE - - sizeof(struct virtchnl_vlan_filter_list)) / - sizeof(u16); - len = sizeof(struct virtchnl_vlan_filter_list) + - (count * sizeof(u16)); - more = true; - } - vvfl = kzalloc(len, GFP_ATOMIC); - if (!vvfl) { - spin_unlock_bh(&adapter->mac_vlan_list_lock); - return; - } - - vvfl->vsi_id = adapter->vsi_res->vsi_id; - vvfl->num_elements = count; - list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { - if (f->remove) { - vvfl->vlan_id[i] = f->vlan; - i++; - list_del(&f->list); - kfree(f); - if (i == count) - break; - } - } - if (!more) - adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; - - spin_unlock_bh(&adapter->mac_vlan_list_lock); - - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); - kfree(vvfl); -} - -/** - * i40evf_set_promiscuous - * @adapter: adapter structure - * @flags: bitmask to control unicast/multicast promiscuous. - * - * Request that the PF enable promiscuous mode for our VSI. - **/ -void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags) -{ - struct virtchnl_promisc_info vpi; - int promisc_all; - - if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { - /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n", - adapter->current_op); - return; - } - - promisc_all = FLAG_VF_UNICAST_PROMISC | - FLAG_VF_MULTICAST_PROMISC; - if ((flags & promisc_all) == promisc_all) { - adapter->flags |= I40EVF_FLAG_PROMISC_ON; - adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_PROMISC; - dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n"); - } - - if (flags & FLAG_VF_MULTICAST_PROMISC) { - adapter->flags |= I40EVF_FLAG_ALLMULTI_ON; - adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_ALLMULTI; - dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n"); - } - - if (!flags) { - adapter->flags &= ~(I40EVF_FLAG_PROMISC_ON | - I40EVF_FLAG_ALLMULTI_ON); - adapter->aq_required &= ~(I40EVF_FLAG_AQ_RELEASE_PROMISC | - I40EVF_FLAG_AQ_RELEASE_ALLMULTI); - dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n"); - } - - adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; - vpi.vsi_id = adapter->vsi_res->vsi_id; - vpi.flags = flags; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, - (u8 *)&vpi, sizeof(vpi)); -} - -/** - * i40evf_request_stats - * @adapter: adapter structure - * - * Request VSI statistics from PF. - **/ -void i40evf_request_stats(struct i40evf_adapter *adapter) -{ - struct virtchnl_queue_select vqs; - - if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { - /* no error message, this isn't crucial */ - return; - } - adapter->current_op = VIRTCHNL_OP_GET_STATS; - vqs.vsi_id = adapter->vsi_res->vsi_id; - /* queue maps are ignored for this message - only the vsi is used */ - if (i40evf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS, - (u8 *)&vqs, sizeof(vqs))) - /* if the request failed, don't lock out others */ - adapter->current_op = VIRTCHNL_OP_UNKNOWN; -} - -/** - * i40evf_get_hena - * @adapter: adapter structure - * - * Request hash enable capabilities from PF - **/ -void i40evf_get_hena(struct i40evf_adapter *adapter) -{ - if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { - /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n", - adapter->current_op); - return; - } - adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS; - adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_HENA; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS, - NULL, 0); -} - -/** - * i40evf_set_hena - * @adapter: adapter structure - * - * Request the PF to set our RSS hash capabilities - **/ -void i40evf_set_hena(struct i40evf_adapter *adapter) -{ - struct virtchnl_rss_hena vrh; - - if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { - /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n", - adapter->current_op); - return; - } - vrh.hena = adapter->hena; - adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA; - adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_HENA; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA, - (u8 *)&vrh, sizeof(vrh)); -} - -/** - * i40evf_set_rss_key - * @adapter: adapter structure - * - * Request the PF to set our RSS hash key - **/ -void i40evf_set_rss_key(struct i40evf_adapter *adapter) -{ - struct virtchnl_rss_key *vrk; - int len; - - if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { - /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n", - adapter->current_op); - return; - } - len = sizeof(struct virtchnl_rss_key) + - (adapter->rss_key_size * sizeof(u8)) - 1; - vrk = kzalloc(len, GFP_KERNEL); - if (!vrk) - return; - vrk->vsi_id = adapter->vsi.id; - vrk->key_len = adapter->rss_key_size; - memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size); - - adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY; - adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_KEY; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY, - (u8 *)vrk, len); - kfree(vrk); -} - -/** - * i40evf_set_rss_lut - * @adapter: adapter structure - * - * Request the PF to set our RSS lookup table - **/ -void i40evf_set_rss_lut(struct i40evf_adapter *adapter) -{ - struct virtchnl_rss_lut *vrl; - int len; - - if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { - /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n", - adapter->current_op); - return; - } - len = sizeof(struct virtchnl_rss_lut) + - (adapter->rss_lut_size * sizeof(u8)) - 1; - vrl = kzalloc(len, GFP_KERNEL); - if (!vrl) - return; - vrl->vsi_id = adapter->vsi.id; - vrl->lut_entries = adapter->rss_lut_size; - memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size); - adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT; - adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_LUT; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT, - (u8 *)vrl, len); - kfree(vrl); -} - -/** - * i40evf_enable_vlan_stripping - * @adapter: adapter structure - * - * Request VLAN header stripping to be enabled - **/ -void i40evf_enable_vlan_stripping(struct i40evf_adapter *adapter) -{ - if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { - /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "Cannot enable stripping, command %d pending\n", - adapter->current_op); - return; - } - adapter->current_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING; - adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, - NULL, 0); -} - -/** - * i40evf_disable_vlan_stripping - * @adapter: adapter structure - * - * Request VLAN header stripping to be disabled - **/ -void i40evf_disable_vlan_stripping(struct i40evf_adapter *adapter) -{ - if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { - /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "Cannot disable stripping, command %d pending\n", - adapter->current_op); - return; - } - adapter->current_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING; - adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, - NULL, 0); -} - -/** - * i40evf_print_link_message - print link up or down - * @adapter: adapter structure - * - * Log a message telling the world of our wonderous link status - */ -static void i40evf_print_link_message(struct i40evf_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - char *speed = "Unknown "; - - if (!adapter->link_up) { - netdev_info(netdev, "NIC Link is Down\n"); - return; - } - - switch (adapter->link_speed) { - case I40E_LINK_SPEED_40GB: - speed = "40 G"; - break; - case I40E_LINK_SPEED_25GB: - speed = "25 G"; - break; - case I40E_LINK_SPEED_20GB: - speed = "20 G"; - break; - case I40E_LINK_SPEED_10GB: - speed = "10 G"; - break; - case I40E_LINK_SPEED_1GB: - speed = "1000 M"; - break; - case I40E_LINK_SPEED_100MB: - speed = "100 M"; - break; - default: - break; - } - - netdev_info(netdev, "NIC Link is Up %sbps Full Duplex\n", speed); -} - -/** - * i40evf_enable_channel - * @adapter: adapter structure - * - * Request that the PF enable channels as specified by - * the user via tc tool. - **/ -void i40evf_enable_channels(struct i40evf_adapter *adapter) -{ - struct virtchnl_tc_info *vti = NULL; - u16 len; - int i; - - if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { - /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n", - adapter->current_op); - return; - } - - len = (adapter->num_tc * sizeof(struct virtchnl_channel_info)) + - sizeof(struct virtchnl_tc_info); - - vti = kzalloc(len, GFP_KERNEL); - if (!vti) - return; - vti->num_tc = adapter->num_tc; - for (i = 0; i < vti->num_tc; i++) { - vti->list[i].count = adapter->ch_config.ch_info[i].count; - vti->list[i].offset = adapter->ch_config.ch_info[i].offset; - vti->list[i].pad = 0; - vti->list[i].max_tx_rate = - adapter->ch_config.ch_info[i].max_tx_rate; - } - - adapter->ch_config.state = __I40EVF_TC_RUNNING; - adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED; - adapter->current_op = VIRTCHNL_OP_ENABLE_CHANNELS; - adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_CHANNELS; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS, - (u8 *)vti, len); - kfree(vti); -} - -/** - * i40evf_disable_channel - * @adapter: adapter structure - * - * Request that the PF disable channels that are configured - **/ -void i40evf_disable_channels(struct i40evf_adapter *adapter) -{ - if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { - /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n", - adapter->current_op); - return; - } - - adapter->ch_config.state = __I40EVF_TC_INVALID; - adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED; - adapter->current_op = VIRTCHNL_OP_DISABLE_CHANNELS; - adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_CHANNELS; - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS, - NULL, 0); -} - -/** - * i40evf_print_cloud_filter - * @adapter: adapter structure - * @f: cloud filter to print - * - * Print the cloud filter - **/ -static void i40evf_print_cloud_filter(struct i40evf_adapter *adapter, - struct virtchnl_filter *f) -{ - switch (f->flow_type) { - case VIRTCHNL_TCP_V4_FLOW: - dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI4 src_ip %pI4 dst_port %hu src_port %hu\n", - &f->data.tcp_spec.dst_mac, - &f->data.tcp_spec.src_mac, - ntohs(f->data.tcp_spec.vlan_id), - &f->data.tcp_spec.dst_ip[0], - &f->data.tcp_spec.src_ip[0], - ntohs(f->data.tcp_spec.dst_port), - ntohs(f->data.tcp_spec.src_port)); - break; - case VIRTCHNL_TCP_V6_FLOW: - dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI6 src_ip %pI6 dst_port %hu src_port %hu\n", - &f->data.tcp_spec.dst_mac, - &f->data.tcp_spec.src_mac, - ntohs(f->data.tcp_spec.vlan_id), - &f->data.tcp_spec.dst_ip, - &f->data.tcp_spec.src_ip, - ntohs(f->data.tcp_spec.dst_port), - ntohs(f->data.tcp_spec.src_port)); - break; - } -} - -/** - * i40evf_add_cloud_filter - * @adapter: adapter structure - * - * Request that the PF add cloud filters as specified - * by the user via tc tool. - **/ -void i40evf_add_cloud_filter(struct i40evf_adapter *adapter) -{ - struct i40evf_cloud_filter *cf; - struct virtchnl_filter *f; - int len = 0, count = 0; - - if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { - /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "Cannot add cloud filter, command %d pending\n", - adapter->current_op); - return; - } - list_for_each_entry(cf, &adapter->cloud_filter_list, list) { - if (cf->add) { - count++; - break; - } - } - if (!count) { - adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_CLOUD_FILTER; - return; - } - adapter->current_op = VIRTCHNL_OP_ADD_CLOUD_FILTER; - - len = sizeof(struct virtchnl_filter); - f = kzalloc(len, GFP_KERNEL); - if (!f) - return; - - list_for_each_entry(cf, &adapter->cloud_filter_list, list) { - if (cf->add) { - memcpy(f, &cf->f, sizeof(struct virtchnl_filter)); - cf->add = false; - cf->state = __I40EVF_CF_ADD_PENDING; - i40evf_send_pf_msg(adapter, - VIRTCHNL_OP_ADD_CLOUD_FILTER, - (u8 *)f, len); - } - } - kfree(f); -} - -/** - * i40evf_del_cloud_filter - * @adapter: adapter structure - * - * Request that the PF delete cloud filters as specified - * by the user via tc tool. - **/ -void i40evf_del_cloud_filter(struct i40evf_adapter *adapter) -{ - struct i40evf_cloud_filter *cf, *cftmp; - struct virtchnl_filter *f; - int len = 0, count = 0; - - if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { - /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "Cannot remove cloud filter, command %d pending\n", - adapter->current_op); - return; - } - list_for_each_entry(cf, &adapter->cloud_filter_list, list) { - if (cf->del) { - count++; - break; - } - } - if (!count) { - adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_CLOUD_FILTER; - return; - } - adapter->current_op = VIRTCHNL_OP_DEL_CLOUD_FILTER; - - len = sizeof(struct virtchnl_filter); - f = kzalloc(len, GFP_KERNEL); - if (!f) - return; - - list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { - if (cf->del) { - memcpy(f, &cf->f, sizeof(struct virtchnl_filter)); - cf->del = false; - cf->state = __I40EVF_CF_DEL_PENDING; - i40evf_send_pf_msg(adapter, - VIRTCHNL_OP_DEL_CLOUD_FILTER, - (u8 *)f, len); - } - } - kfree(f); -} - -/** - * i40evf_request_reset - * @adapter: adapter structure - * - * Request that the PF reset this VF. No response is expected. - **/ -void i40evf_request_reset(struct i40evf_adapter *adapter) -{ - /* Don't check CURRENT_OP - this is always higher priority */ - i40evf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0); - adapter->current_op = VIRTCHNL_OP_UNKNOWN; -} - -/** - * i40evf_virtchnl_completion - * @adapter: adapter structure - * @v_opcode: opcode sent by PF - * @v_retval: retval sent by PF - * @msg: message sent by PF - * @msglen: message length - * - * Asynchronous completion function for admin queue messages. Rather than busy - * wait, we fire off our requests and assume that no errors will be returned. - * This function handles the reply messages. - **/ -void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, - enum virtchnl_ops v_opcode, - i40e_status v_retval, - u8 *msg, u16 msglen) -{ - struct net_device *netdev = adapter->netdev; - - if (v_opcode == VIRTCHNL_OP_EVENT) { - struct virtchnl_pf_event *vpe = - (struct virtchnl_pf_event *)msg; - bool link_up = vpe->event_data.link_event.link_status; - switch (vpe->event) { - case VIRTCHNL_EVENT_LINK_CHANGE: - adapter->link_speed = - vpe->event_data.link_event.link_speed; - - /* we've already got the right link status, bail */ - if (adapter->link_up == link_up) - break; - - if (link_up) { - /* If we get link up message and start queues - * before our queues are configured it will - * trigger a TX hang. In that case, just ignore - * the link status message,we'll get another one - * after we enable queues and actually prepared - * to send traffic. - */ - if (adapter->state != __I40EVF_RUNNING) - break; - - /* For ADq enabled VF, we reconfigure VSIs and - * re-allocate queues. Hence wait till all - * queues are enabled. - */ - if (adapter->flags & - I40EVF_FLAG_QUEUES_DISABLED) - break; - } - - adapter->link_up = link_up; - if (link_up) { - netif_tx_start_all_queues(netdev); - netif_carrier_on(netdev); - } else { - netif_tx_stop_all_queues(netdev); - netif_carrier_off(netdev); - } - i40evf_print_link_message(adapter); - break; - case VIRTCHNL_EVENT_RESET_IMPENDING: - dev_info(&adapter->pdev->dev, "Reset warning received from the PF\n"); - if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) { - adapter->flags |= I40EVF_FLAG_RESET_PENDING; - dev_info(&adapter->pdev->dev, "Scheduling reset task\n"); - schedule_work(&adapter->reset_task); - } - break; - default: - dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n", - vpe->event); - break; - } - return; - } - if (v_retval) { - switch (v_opcode) { - case VIRTCHNL_OP_ADD_VLAN: - dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n", - i40evf_stat_str(&adapter->hw, v_retval)); - break; - case VIRTCHNL_OP_ADD_ETH_ADDR: - dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n", - i40evf_stat_str(&adapter->hw, v_retval)); - break; - case VIRTCHNL_OP_DEL_VLAN: - dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n", - i40evf_stat_str(&adapter->hw, v_retval)); - break; - case VIRTCHNL_OP_DEL_ETH_ADDR: - dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n", - i40evf_stat_str(&adapter->hw, v_retval)); - break; - case VIRTCHNL_OP_ENABLE_CHANNELS: - dev_err(&adapter->pdev->dev, "Failed to configure queue channels, error %s\n", - i40evf_stat_str(&adapter->hw, v_retval)); - adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED; - adapter->ch_config.state = __I40EVF_TC_INVALID; - netdev_reset_tc(netdev); - netif_tx_start_all_queues(netdev); - break; - case VIRTCHNL_OP_DISABLE_CHANNELS: - dev_err(&adapter->pdev->dev, "Failed to disable queue channels, error %s\n", - i40evf_stat_str(&adapter->hw, v_retval)); - adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED; - adapter->ch_config.state = __I40EVF_TC_RUNNING; - netif_tx_start_all_queues(netdev); - break; - case VIRTCHNL_OP_ADD_CLOUD_FILTER: { - struct i40evf_cloud_filter *cf, *cftmp; - - list_for_each_entry_safe(cf, cftmp, - &adapter->cloud_filter_list, - list) { - if (cf->state == __I40EVF_CF_ADD_PENDING) { - cf->state = __I40EVF_CF_INVALID; - dev_info(&adapter->pdev->dev, "Failed to add cloud filter, error %s\n", - i40evf_stat_str(&adapter->hw, - v_retval)); - i40evf_print_cloud_filter(adapter, - &cf->f); - list_del(&cf->list); - kfree(cf); - adapter->num_cloud_filters--; - } - } - } - break; - case VIRTCHNL_OP_DEL_CLOUD_FILTER: { - struct i40evf_cloud_filter *cf; - - list_for_each_entry(cf, &adapter->cloud_filter_list, - list) { - if (cf->state == __I40EVF_CF_DEL_PENDING) { - cf->state = __I40EVF_CF_ACTIVE; - dev_info(&adapter->pdev->dev, "Failed to del cloud filter, error %s\n", - i40evf_stat_str(&adapter->hw, - v_retval)); - i40evf_print_cloud_filter(adapter, - &cf->f); - } - } - } - break; - default: - dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", - v_retval, - i40evf_stat_str(&adapter->hw, v_retval), - v_opcode); - } - } - switch (v_opcode) { - case VIRTCHNL_OP_GET_STATS: { - struct i40e_eth_stats *stats = - (struct i40e_eth_stats *)msg; - netdev->stats.rx_packets = stats->rx_unicast + - stats->rx_multicast + - stats->rx_broadcast; - netdev->stats.tx_packets = stats->tx_unicast + - stats->tx_multicast + - stats->tx_broadcast; - netdev->stats.rx_bytes = stats->rx_bytes; - netdev->stats.tx_bytes = stats->tx_bytes; - netdev->stats.tx_errors = stats->tx_errors; - netdev->stats.rx_dropped = stats->rx_discards; - netdev->stats.tx_dropped = stats->tx_discards; - adapter->current_stats = *stats; - } - break; - case VIRTCHNL_OP_GET_VF_RESOURCES: { - u16 len = sizeof(struct virtchnl_vf_resource) + - I40E_MAX_VF_VSI * - sizeof(struct virtchnl_vsi_resource); - memcpy(adapter->vf_res, msg, min(msglen, len)); - i40evf_validate_num_queues(adapter); - i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res); - if (is_zero_ether_addr(adapter->hw.mac.addr)) { - /* restore current mac address */ - ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); - } else { - /* refresh current mac address if changed */ - ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); - ether_addr_copy(netdev->perm_addr, - adapter->hw.mac.addr); - } - i40evf_process_config(adapter); - } - break; - case VIRTCHNL_OP_ENABLE_QUEUES: - /* enable transmits */ - i40evf_irq_enable(adapter, true); - adapter->flags &= ~I40EVF_FLAG_QUEUES_DISABLED; - break; - case VIRTCHNL_OP_DISABLE_QUEUES: - i40evf_free_all_tx_resources(adapter); - i40evf_free_all_rx_resources(adapter); - if (adapter->state == __I40EVF_DOWN_PENDING) { - adapter->state = __I40EVF_DOWN; - wake_up(&adapter->down_waitqueue); - } - break; - case VIRTCHNL_OP_VERSION: - case VIRTCHNL_OP_CONFIG_IRQ_MAP: - /* Don't display an error if we get these out of sequence. - * If the firmware needed to get kicked, we'll get these and - * it's no problem. - */ - if (v_opcode != adapter->current_op) - return; - break; - case VIRTCHNL_OP_IWARP: - /* Gobble zero-length replies from the PF. They indicate that - * a previous message was received OK, and the client doesn't - * care about that. - */ - if (msglen && CLIENT_ENABLED(adapter)) - i40evf_notify_client_message(&adapter->vsi, - msg, msglen); - break; - - case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: - adapter->client_pending &= - ~(BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP)); - break; - case VIRTCHNL_OP_GET_RSS_HENA_CAPS: { - struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg; - if (msglen == sizeof(*vrh)) - adapter->hena = vrh->hena; - else - dev_warn(&adapter->pdev->dev, - "Invalid message %d from PF\n", v_opcode); - } - break; - case VIRTCHNL_OP_REQUEST_QUEUES: { - struct virtchnl_vf_res_request *vfres = - (struct virtchnl_vf_res_request *)msg; - if (vfres->num_queue_pairs != adapter->num_req_queues) { - dev_info(&adapter->pdev->dev, - "Requested %d queues, PF can support %d\n", - adapter->num_req_queues, - vfres->num_queue_pairs); - adapter->num_req_queues = 0; - adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED; - } - } - break; - case VIRTCHNL_OP_ADD_CLOUD_FILTER: { - struct i40evf_cloud_filter *cf; - - list_for_each_entry(cf, &adapter->cloud_filter_list, list) { - if (cf->state == __I40EVF_CF_ADD_PENDING) - cf->state = __I40EVF_CF_ACTIVE; - } - } - break; - case VIRTCHNL_OP_DEL_CLOUD_FILTER: { - struct i40evf_cloud_filter *cf, *cftmp; - - list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, - list) { - if (cf->state == __I40EVF_CF_DEL_PENDING) { - cf->state = __I40EVF_CF_INVALID; - list_del(&cf->list); - kfree(cf); - adapter->num_cloud_filters--; - } - } - } - break; - default: - if (adapter->current_op && (v_opcode != adapter->current_op)) - dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n", - adapter->current_op, v_opcode); - break; - } /* switch v_opcode */ - adapter->current_op = VIRTCHNL_OP_UNKNOWN; -} diff --git a/drivers/net/ethernet/intel/iavf/Makefile b/drivers/net/ethernet/intel/iavf/Makefile new file mode 100644 index 000000000000..1b050d9d5f49 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright(c) 2013 - 2018 Intel Corporation. +# +# Makefile for the Intel(R) Ethernet Adaptive Virtual Function (iavf) +# driver +# +# + +ccflags-y += -I$(src) +subdir-ccflags-y += -I$(src) + +obj-$(CONFIG_IAVF) += iavf.o + +iavf-objs := i40evf_main.o i40evf_ethtool.o i40evf_virtchnl.o \ + i40e_txrx.o i40e_common.o i40e_adminq.o i40evf_client.o diff --git a/drivers/net/ethernet/intel/iavf/i40e_adminq.c b/drivers/net/ethernet/intel/iavf/i40e_adminq.c new file mode 100644 index 000000000000..21a0dbf6ccf6 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/i40e_adminq.c @@ -0,0 +1,967 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#include "i40e_status.h" +#include "i40e_type.h" +#include "i40e_register.h" +#include "i40e_adminq.h" +#include "i40e_prototype.h" + +/** + * i40e_is_nvm_update_op - return true if this is an NVM update operation + * @desc: API request descriptor + **/ +static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc) +{ + return (desc->opcode == i40e_aqc_opc_nvm_erase) || + (desc->opcode == i40e_aqc_opc_nvm_update); +} + +/** + * i40e_adminq_init_regs - Initialize AdminQ registers + * @hw: pointer to the hardware structure + * + * This assumes the alloc_asq and alloc_arq functions have already been called + **/ +static void i40e_adminq_init_regs(struct i40e_hw *hw) +{ + /* set head and tail registers in our local struct */ + if (i40e_is_vf(hw)) { + hw->aq.asq.tail = I40E_VF_ATQT1; + hw->aq.asq.head = I40E_VF_ATQH1; + hw->aq.asq.len = I40E_VF_ATQLEN1; + hw->aq.asq.bal = I40E_VF_ATQBAL1; + hw->aq.asq.bah = I40E_VF_ATQBAH1; + hw->aq.arq.tail = I40E_VF_ARQT1; + hw->aq.arq.head = I40E_VF_ARQH1; + hw->aq.arq.len = I40E_VF_ARQLEN1; + hw->aq.arq.bal = I40E_VF_ARQBAL1; + hw->aq.arq.bah = I40E_VF_ARQBAH1; + } +} + +/** + * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings + * @hw: pointer to the hardware structure + **/ +static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw) +{ + i40e_status ret_code; + + ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf, + i40e_mem_atq_ring, + (hw->aq.num_asq_entries * + sizeof(struct i40e_aq_desc)), + I40E_ADMINQ_DESC_ALIGNMENT); + if (ret_code) + return ret_code; + + ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf, + (hw->aq.num_asq_entries * + sizeof(struct i40e_asq_cmd_details))); + if (ret_code) { + i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); + return ret_code; + } + + return ret_code; +} + +/** + * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings + * @hw: pointer to the hardware structure + **/ +static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw) +{ + i40e_status ret_code; + + ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf, + i40e_mem_arq_ring, + (hw->aq.num_arq_entries * + sizeof(struct i40e_aq_desc)), + I40E_ADMINQ_DESC_ALIGNMENT); + + return ret_code; +} + +/** + * i40e_free_adminq_asq - Free Admin Queue send rings + * @hw: pointer to the hardware structure + * + * This assumes the posted send buffers have already been cleaned + * and de-allocated + **/ +static void i40e_free_adminq_asq(struct i40e_hw *hw) +{ + i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); +} + +/** + * i40e_free_adminq_arq - Free Admin Queue receive rings + * @hw: pointer to the hardware structure + * + * This assumes the posted receive buffers have already been cleaned + * and de-allocated + **/ +static void i40e_free_adminq_arq(struct i40e_hw *hw) +{ + i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); +} + +/** + * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue + * @hw: pointer to the hardware structure + **/ +static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw) +{ + i40e_status ret_code; + struct i40e_aq_desc *desc; + struct i40e_dma_mem *bi; + int i; + + /* We'll be allocating the buffer info memory first, then we can + * allocate the mapped buffers for the event processing + */ + + /* buffer_info structures do not need alignment */ + ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head, + (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem))); + if (ret_code) + goto alloc_arq_bufs; + hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va; + + /* allocate the mapped buffers */ + for (i = 0; i < hw->aq.num_arq_entries; i++) { + bi = &hw->aq.arq.r.arq_bi[i]; + ret_code = i40e_allocate_dma_mem(hw, bi, + i40e_mem_arq_buf, + hw->aq.arq_buf_size, + I40E_ADMINQ_DESC_ALIGNMENT); + if (ret_code) + goto unwind_alloc_arq_bufs; + + /* now configure the descriptors for use */ + desc = I40E_ADMINQ_DESC(hw->aq.arq, i); + + desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF); + if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) + desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB); + desc->opcode = 0; + /* This is in accordance with Admin queue design, there is no + * register for buffer size configuration + */ + desc->datalen = cpu_to_le16((u16)bi->size); + desc->retval = 0; + desc->cookie_high = 0; + desc->cookie_low = 0; + desc->params.external.addr_high = + cpu_to_le32(upper_32_bits(bi->pa)); + desc->params.external.addr_low = + cpu_to_le32(lower_32_bits(bi->pa)); + desc->params.external.param0 = 0; + desc->params.external.param1 = 0; + } + +alloc_arq_bufs: + return ret_code; + +unwind_alloc_arq_bufs: + /* don't try to free the one that failed... */ + i--; + for (; i >= 0; i--) + i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); + i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); + + return ret_code; +} + +/** + * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue + * @hw: pointer to the hardware structure + **/ +static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw) +{ + i40e_status ret_code; + struct i40e_dma_mem *bi; + int i; + + /* No mapped memory needed yet, just the buffer info structures */ + ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head, + (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem))); + if (ret_code) + goto alloc_asq_bufs; + hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va; + + /* allocate the mapped buffers */ + for (i = 0; i < hw->aq.num_asq_entries; i++) { + bi = &hw->aq.asq.r.asq_bi[i]; + ret_code = i40e_allocate_dma_mem(hw, bi, + i40e_mem_asq_buf, + hw->aq.asq_buf_size, + I40E_ADMINQ_DESC_ALIGNMENT); + if (ret_code) + goto unwind_alloc_asq_bufs; + } +alloc_asq_bufs: + return ret_code; + +unwind_alloc_asq_bufs: + /* don't try to free the one that failed... */ + i--; + for (; i >= 0; i--) + i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); + i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); + + return ret_code; +} + +/** + * i40e_free_arq_bufs - Free receive queue buffer info elements + * @hw: pointer to the hardware structure + **/ +static void i40e_free_arq_bufs(struct i40e_hw *hw) +{ + int i; + + /* free descriptors */ + for (i = 0; i < hw->aq.num_arq_entries; i++) + i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); + + /* free the descriptor memory */ + i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); + + /* free the dma header */ + i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); +} + +/** + * i40e_free_asq_bufs - Free send queue buffer info elements + * @hw: pointer to the hardware structure + **/ +static void i40e_free_asq_bufs(struct i40e_hw *hw) +{ + int i; + + /* only unmap if the address is non-NULL */ + for (i = 0; i < hw->aq.num_asq_entries; i++) + if (hw->aq.asq.r.asq_bi[i].pa) + i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); + + /* free the buffer info list */ + i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf); + + /* free the descriptor memory */ + i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); + + /* free the dma header */ + i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); +} + +/** + * i40e_config_asq_regs - configure ASQ registers + * @hw: pointer to the hardware structure + * + * Configure base address and length registers for the transmit queue + **/ +static i40e_status i40e_config_asq_regs(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + u32 reg = 0; + + /* Clear Head and Tail */ + wr32(hw, hw->aq.asq.head, 0); + wr32(hw, hw->aq.asq.tail, 0); + + /* set starting point */ + wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | + I40E_VF_ATQLEN1_ATQENABLE_MASK)); + wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa)); + wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa)); + + /* Check one register to verify that config was applied */ + reg = rd32(hw, hw->aq.asq.bal); + if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa)) + ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; + + return ret_code; +} + +/** + * i40e_config_arq_regs - ARQ register configuration + * @hw: pointer to the hardware structure + * + * Configure base address and length registers for the receive (event queue) + **/ +static i40e_status i40e_config_arq_regs(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + u32 reg = 0; + + /* Clear Head and Tail */ + wr32(hw, hw->aq.arq.head, 0); + wr32(hw, hw->aq.arq.tail, 0); + + /* set starting point */ + wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | + I40E_VF_ARQLEN1_ARQENABLE_MASK)); + wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa)); + wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa)); + + /* Update tail in the HW to post pre-allocated buffers */ + wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); + + /* Check one register to verify that config was applied */ + reg = rd32(hw, hw->aq.arq.bal); + if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa)) + ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; + + return ret_code; +} + +/** + * i40e_init_asq - main initialization routine for ASQ + * @hw: pointer to the hardware structure + * + * This is the main initialization routine for the Admin Send Queue + * Prior to calling this function, drivers *MUST* set the following fields + * in the hw->aq structure: + * - hw->aq.num_asq_entries + * - hw->aq.arq_buf_size + * + * Do *NOT* hold the lock when calling this as the memory allocation routines + * called are not going to be atomic context safe + **/ +static i40e_status i40e_init_asq(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + + if (hw->aq.asq.count > 0) { + /* queue already initialized */ + ret_code = I40E_ERR_NOT_READY; + goto init_adminq_exit; + } + + /* verify input for valid configuration */ + if ((hw->aq.num_asq_entries == 0) || + (hw->aq.asq_buf_size == 0)) { + ret_code = I40E_ERR_CONFIG; + goto init_adminq_exit; + } + + hw->aq.asq.next_to_use = 0; + hw->aq.asq.next_to_clean = 0; + + /* allocate the ring memory */ + ret_code = i40e_alloc_adminq_asq_ring(hw); + if (ret_code) + goto init_adminq_exit; + + /* allocate buffers in the rings */ + ret_code = i40e_alloc_asq_bufs(hw); + if (ret_code) + goto init_adminq_free_rings; + + /* initialize base registers */ + ret_code = i40e_config_asq_regs(hw); + if (ret_code) + goto init_adminq_free_rings; + + /* success! */ + hw->aq.asq.count = hw->aq.num_asq_entries; + goto init_adminq_exit; + +init_adminq_free_rings: + i40e_free_adminq_asq(hw); + +init_adminq_exit: + return ret_code; +} + +/** + * i40e_init_arq - initialize ARQ + * @hw: pointer to the hardware structure + * + * The main initialization routine for the Admin Receive (Event) Queue. + * Prior to calling this function, drivers *MUST* set the following fields + * in the hw->aq structure: + * - hw->aq.num_asq_entries + * - hw->aq.arq_buf_size + * + * Do *NOT* hold the lock when calling this as the memory allocation routines + * called are not going to be atomic context safe + **/ +static i40e_status i40e_init_arq(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + + if (hw->aq.arq.count > 0) { + /* queue already initialized */ + ret_code = I40E_ERR_NOT_READY; + goto init_adminq_exit; + } + + /* verify input for valid configuration */ + if ((hw->aq.num_arq_entries == 0) || + (hw->aq.arq_buf_size == 0)) { + ret_code = I40E_ERR_CONFIG; + goto init_adminq_exit; + } + + hw->aq.arq.next_to_use = 0; + hw->aq.arq.next_to_clean = 0; + + /* allocate the ring memory */ + ret_code = i40e_alloc_adminq_arq_ring(hw); + if (ret_code) + goto init_adminq_exit; + + /* allocate buffers in the rings */ + ret_code = i40e_alloc_arq_bufs(hw); + if (ret_code) + goto init_adminq_free_rings; + + /* initialize base registers */ + ret_code = i40e_config_arq_regs(hw); + if (ret_code) + goto init_adminq_free_rings; + + /* success! */ + hw->aq.arq.count = hw->aq.num_arq_entries; + goto init_adminq_exit; + +init_adminq_free_rings: + i40e_free_adminq_arq(hw); + +init_adminq_exit: + return ret_code; +} + +/** + * i40e_shutdown_asq - shutdown the ASQ + * @hw: pointer to the hardware structure + * + * The main shutdown routine for the Admin Send Queue + **/ +static i40e_status i40e_shutdown_asq(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + + mutex_lock(&hw->aq.asq_mutex); + + if (hw->aq.asq.count == 0) { + ret_code = I40E_ERR_NOT_READY; + goto shutdown_asq_out; + } + + /* Stop firmware AdminQ processing */ + wr32(hw, hw->aq.asq.head, 0); + wr32(hw, hw->aq.asq.tail, 0); + wr32(hw, hw->aq.asq.len, 0); + wr32(hw, hw->aq.asq.bal, 0); + wr32(hw, hw->aq.asq.bah, 0); + + hw->aq.asq.count = 0; /* to indicate uninitialized queue */ + + /* free ring buffers */ + i40e_free_asq_bufs(hw); + +shutdown_asq_out: + mutex_unlock(&hw->aq.asq_mutex); + return ret_code; +} + +/** + * i40e_shutdown_arq - shutdown ARQ + * @hw: pointer to the hardware structure + * + * The main shutdown routine for the Admin Receive Queue + **/ +static i40e_status i40e_shutdown_arq(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + + mutex_lock(&hw->aq.arq_mutex); + + if (hw->aq.arq.count == 0) { + ret_code = I40E_ERR_NOT_READY; + goto shutdown_arq_out; + } + + /* Stop firmware AdminQ processing */ + wr32(hw, hw->aq.arq.head, 0); + wr32(hw, hw->aq.arq.tail, 0); + wr32(hw, hw->aq.arq.len, 0); + wr32(hw, hw->aq.arq.bal, 0); + wr32(hw, hw->aq.arq.bah, 0); + + hw->aq.arq.count = 0; /* to indicate uninitialized queue */ + + /* free ring buffers */ + i40e_free_arq_bufs(hw); + +shutdown_arq_out: + mutex_unlock(&hw->aq.arq_mutex); + return ret_code; +} + +/** + * i40evf_init_adminq - main initialization routine for Admin Queue + * @hw: pointer to the hardware structure + * + * Prior to calling this function, drivers *MUST* set the following fields + * in the hw->aq structure: + * - hw->aq.num_asq_entries + * - hw->aq.num_arq_entries + * - hw->aq.arq_buf_size + * - hw->aq.asq_buf_size + **/ +i40e_status i40evf_init_adminq(struct i40e_hw *hw) +{ + i40e_status ret_code; + + /* verify input for valid configuration */ + if ((hw->aq.num_arq_entries == 0) || + (hw->aq.num_asq_entries == 0) || + (hw->aq.arq_buf_size == 0) || + (hw->aq.asq_buf_size == 0)) { + ret_code = I40E_ERR_CONFIG; + goto init_adminq_exit; + } + + /* Set up register offsets */ + i40e_adminq_init_regs(hw); + + /* setup ASQ command write back timeout */ + hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT; + + /* allocate the ASQ */ + ret_code = i40e_init_asq(hw); + if (ret_code) + goto init_adminq_destroy_locks; + + /* allocate the ARQ */ + ret_code = i40e_init_arq(hw); + if (ret_code) + goto init_adminq_free_asq; + + /* success! */ + goto init_adminq_exit; + +init_adminq_free_asq: + i40e_shutdown_asq(hw); +init_adminq_destroy_locks: + +init_adminq_exit: + return ret_code; +} + +/** + * i40evf_shutdown_adminq - shutdown routine for the Admin Queue + * @hw: pointer to the hardware structure + **/ +i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + + if (i40evf_check_asq_alive(hw)) + i40evf_aq_queue_shutdown(hw, true); + + i40e_shutdown_asq(hw); + i40e_shutdown_arq(hw); + + if (hw->nvm_buff.va) + i40e_free_virt_mem(hw, &hw->nvm_buff); + + return ret_code; +} + +/** + * i40e_clean_asq - cleans Admin send queue + * @hw: pointer to the hardware structure + * + * returns the number of free desc + **/ +static u16 i40e_clean_asq(struct i40e_hw *hw) +{ + struct i40e_adminq_ring *asq = &(hw->aq.asq); + struct i40e_asq_cmd_details *details; + u16 ntc = asq->next_to_clean; + struct i40e_aq_desc desc_cb; + struct i40e_aq_desc *desc; + + desc = I40E_ADMINQ_DESC(*asq, ntc); + details = I40E_ADMINQ_DETAILS(*asq, ntc); + while (rd32(hw, hw->aq.asq.head) != ntc) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head)); + + if (details->callback) { + I40E_ADMINQ_CALLBACK cb_func = + (I40E_ADMINQ_CALLBACK)details->callback; + desc_cb = *desc; + cb_func(hw, &desc_cb); + } + memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); + memset((void *)details, 0, + sizeof(struct i40e_asq_cmd_details)); + ntc++; + if (ntc == asq->count) + ntc = 0; + desc = I40E_ADMINQ_DESC(*asq, ntc); + details = I40E_ADMINQ_DETAILS(*asq, ntc); + } + + asq->next_to_clean = ntc; + + return I40E_DESC_UNUSED(asq); +} + +/** + * i40evf_asq_done - check if FW has processed the Admin Send Queue + * @hw: pointer to the hw struct + * + * Returns true if the firmware has processed all descriptors on the + * admin send queue. Returns false if there are still requests pending. + **/ +bool i40evf_asq_done(struct i40e_hw *hw) +{ + /* AQ designers suggest use of head for better + * timing reliability than DD bit + */ + return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use; + +} + +/** + * i40evf_asq_send_command - send command to Admin Queue + * @hw: pointer to the hw struct + * @desc: prefilled descriptor describing the command (non DMA mem) + * @buff: buffer to use for indirect commands + * @buff_size: size of buffer for indirect commands + * @cmd_details: pointer to command details structure + * + * This is the main send command driver routine for the Admin Queue send + * queue. It runs the queue, cleans the queue, etc + **/ +i40e_status i40evf_asq_send_command(struct i40e_hw *hw, + struct i40e_aq_desc *desc, + void *buff, /* can be NULL */ + u16 buff_size, + struct i40e_asq_cmd_details *cmd_details) +{ + i40e_status status = 0; + struct i40e_dma_mem *dma_buff = NULL; + struct i40e_asq_cmd_details *details; + struct i40e_aq_desc *desc_on_ring; + bool cmd_completed = false; + u16 retval = 0; + u32 val = 0; + + mutex_lock(&hw->aq.asq_mutex); + + if (hw->aq.asq.count == 0) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQTX: Admin queue not initialized.\n"); + status = I40E_ERR_QUEUE_EMPTY; + goto asq_send_command_error; + } + + hw->aq.asq_last_status = I40E_AQ_RC_OK; + + val = rd32(hw, hw->aq.asq.head); + if (val >= hw->aq.num_asq_entries) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQTX: head overrun at %d\n", val); + status = I40E_ERR_QUEUE_EMPTY; + goto asq_send_command_error; + } + + details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); + if (cmd_details) { + *details = *cmd_details; + + /* If the cmd_details are defined copy the cookie. The + * cpu_to_le32 is not needed here because the data is ignored + * by the FW, only used by the driver + */ + if (details->cookie) { + desc->cookie_high = + cpu_to_le32(upper_32_bits(details->cookie)); + desc->cookie_low = + cpu_to_le32(lower_32_bits(details->cookie)); + } + } else { + memset(details, 0, sizeof(struct i40e_asq_cmd_details)); + } + + /* clear requested flags and then set additional flags if defined */ + desc->flags &= ~cpu_to_le16(details->flags_dis); + desc->flags |= cpu_to_le16(details->flags_ena); + + if (buff_size > hw->aq.asq_buf_size) { + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQTX: Invalid buffer size: %d.\n", + buff_size); + status = I40E_ERR_INVALID_SIZE; + goto asq_send_command_error; + } + + if (details->postpone && !details->async) { + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQTX: Async flag not set along with postpone flag"); + status = I40E_ERR_PARAM; + goto asq_send_command_error; + } + + /* call clean and check queue available function to reclaim the + * descriptors that were processed by FW, the function returns the + * number of desc available + */ + /* the clean function called here could be called in a separate thread + * in case of asynchronous completions + */ + if (i40e_clean_asq(hw) == 0) { + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQTX: Error queue is full.\n"); + status = I40E_ERR_ADMIN_QUEUE_FULL; + goto asq_send_command_error; + } + + /* initialize the temp desc pointer with the right desc */ + desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); + + /* if the desc is available copy the temp desc to the right place */ + *desc_on_ring = *desc; + + /* if buff is not NULL assume indirect command */ + if (buff != NULL) { + dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]); + /* copy the user buff into the respective DMA buff */ + memcpy(dma_buff->va, buff, buff_size); + desc_on_ring->datalen = cpu_to_le16(buff_size); + + /* Update the address values in the desc with the pa value + * for respective buffer + */ + desc_on_ring->params.external.addr_high = + cpu_to_le32(upper_32_bits(dma_buff->pa)); + desc_on_ring->params.external.addr_low = + cpu_to_le32(lower_32_bits(dma_buff->pa)); + } + + /* bump the tail */ + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n"); + i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, + buff, buff_size); + (hw->aq.asq.next_to_use)++; + if (hw->aq.asq.next_to_use == hw->aq.asq.count) + hw->aq.asq.next_to_use = 0; + if (!details->postpone) + wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use); + + /* if cmd_details are not defined or async flag is not set, + * we need to wait for desc write back + */ + if (!details->async && !details->postpone) { + u32 total_delay = 0; + + do { + /* AQ designers suggest use of head for better + * timing reliability than DD bit + */ + if (i40evf_asq_done(hw)) + break; + udelay(50); + total_delay += 50; + } while (total_delay < hw->aq.asq_cmd_timeout); + } + + /* if ready, copy the desc back to temp */ + if (i40evf_asq_done(hw)) { + *desc = *desc_on_ring; + if (buff != NULL) + memcpy(buff, dma_buff->va, buff_size); + retval = le16_to_cpu(desc->retval); + if (retval != 0) { + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQTX: Command completed with error 0x%X.\n", + retval); + + /* strip off FW internal code */ + retval &= 0xff; + } + cmd_completed = true; + if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK) + status = 0; + else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY) + status = I40E_ERR_NOT_READY; + else + status = I40E_ERR_ADMIN_QUEUE_ERROR; + hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; + } + + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQTX: desc and buffer writeback:\n"); + i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, + buff_size); + + /* save writeback aq if requested */ + if (details->wb_desc) + *details->wb_desc = *desc_on_ring; + + /* update the error if time out occurred */ + if ((!cmd_completed) && + (!details->async && !details->postpone)) { + if (rd32(hw, hw->aq.asq.len) & I40E_VF_ATQLEN1_ATQCRIT_MASK) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQTX: AQ Critical error.\n"); + status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR; + } else { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQTX: Writeback timeout.\n"); + status = I40E_ERR_ADMIN_QUEUE_TIMEOUT; + } + } + +asq_send_command_error: + mutex_unlock(&hw->aq.asq_mutex); + return status; +} + +/** + * i40evf_fill_default_direct_cmd_desc - AQ descriptor helper function + * @desc: pointer to the temp descriptor (non DMA mem) + * @opcode: the opcode can be used to decide which flags to turn off or on + * + * Fill the desc with default values + **/ +void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, + u16 opcode) +{ + /* zero out the desc */ + memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); + desc->opcode = cpu_to_le16(opcode); + desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI); +} + +/** + * i40evf_clean_arq_element + * @hw: pointer to the hw struct + * @e: event info from the receive descriptor, includes any buffers + * @pending: number of events that could be left to process + * + * This function cleans one Admin Receive Queue element and returns + * the contents through e. It can also return how many events are + * left to process through 'pending' + **/ +i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, + struct i40e_arq_event_info *e, + u16 *pending) +{ + i40e_status ret_code = 0; + u16 ntc = hw->aq.arq.next_to_clean; + struct i40e_aq_desc *desc; + struct i40e_dma_mem *bi; + u16 desc_idx; + u16 datalen; + u16 flags; + u16 ntu; + + /* pre-clean the event info */ + memset(&e->desc, 0, sizeof(e->desc)); + + /* take the lock before we start messing with the ring */ + mutex_lock(&hw->aq.arq_mutex); + + if (hw->aq.arq.count == 0) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQRX: Admin queue not initialized.\n"); + ret_code = I40E_ERR_QUEUE_EMPTY; + goto clean_arq_element_err; + } + + /* set next_to_use to head */ + ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK; + if (ntu == ntc) { + /* nothing to do - shouldn't need to update ring's values */ + ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK; + goto clean_arq_element_out; + } + + /* now clean the next descriptor */ + desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc); + desc_idx = ntc; + + hw->aq.arq_last_status = + (enum i40e_admin_queue_err)le16_to_cpu(desc->retval); + flags = le16_to_cpu(desc->flags); + if (flags & I40E_AQ_FLAG_ERR) { + ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQRX: Event received with error 0x%X.\n", + hw->aq.arq_last_status); + } + + e->desc = *desc; + datalen = le16_to_cpu(desc->datalen); + e->msg_len = min(datalen, e->buf_len); + if (e->msg_buf != NULL && (e->msg_len != 0)) + memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va, + e->msg_len); + + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); + i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, + hw->aq.arq_buf_size); + + /* Restore the original datalen and buffer address in the desc, + * FW updates datalen to indicate the event message + * size + */ + bi = &hw->aq.arq.r.arq_bi[ntc]; + memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); + + desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF); + if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) + desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB); + desc->datalen = cpu_to_le16((u16)bi->size); + desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa)); + desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); + + /* set tail = the last cleaned desc index. */ + wr32(hw, hw->aq.arq.tail, ntc); + /* ntc is updated to tail + 1 */ + ntc++; + if (ntc == hw->aq.num_arq_entries) + ntc = 0; + hw->aq.arq.next_to_clean = ntc; + hw->aq.arq.next_to_use = ntu; + +clean_arq_element_out: + /* Set pending if needed, unlock and return */ + if (pending != NULL) + *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); + +clean_arq_element_err: + mutex_unlock(&hw->aq.arq_mutex); + + return ret_code; +} + +void i40evf_resume_aq(struct i40e_hw *hw) +{ + /* Registers are reset after PF reset */ + hw->aq.asq.next_to_use = 0; + hw->aq.asq.next_to_clean = 0; + + i40e_config_asq_regs(hw); + + hw->aq.arq.next_to_use = 0; + hw->aq.arq.next_to_clean = 0; + + i40e_config_arq_regs(hw); +} diff --git a/drivers/net/ethernet/intel/iavf/i40e_adminq.h b/drivers/net/ethernet/intel/iavf/i40e_adminq.h new file mode 100644 index 000000000000..1f264b9b6805 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/i40e_adminq.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _I40E_ADMINQ_H_ +#define _I40E_ADMINQ_H_ + +#include "i40e_osdep.h" +#include "i40e_status.h" +#include "i40e_adminq_cmd.h" + +#define I40E_ADMINQ_DESC(R, i) \ + (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i])) + +#define I40E_ADMINQ_DESC_ALIGNMENT 4096 + +struct i40e_adminq_ring { + struct i40e_virt_mem dma_head; /* space for dma structures */ + struct i40e_dma_mem desc_buf; /* descriptor ring memory */ + struct i40e_virt_mem cmd_buf; /* command buffer memory */ + + union { + struct i40e_dma_mem *asq_bi; + struct i40e_dma_mem *arq_bi; + } r; + + u16 count; /* Number of descriptors */ + u16 rx_buf_len; /* Admin Receive Queue buffer length */ + + /* used for interrupt processing */ + u16 next_to_use; + u16 next_to_clean; + + /* used for queue tracking */ + u32 head; + u32 tail; + u32 len; + u32 bah; + u32 bal; +}; + +/* ASQ transaction details */ +struct i40e_asq_cmd_details { + void *callback; /* cast from type I40E_ADMINQ_CALLBACK */ + u64 cookie; + u16 flags_ena; + u16 flags_dis; + bool async; + bool postpone; + struct i40e_aq_desc *wb_desc; +}; + +#define I40E_ADMINQ_DETAILS(R, i) \ + (&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i])) + +/* ARQ event information */ +struct i40e_arq_event_info { + struct i40e_aq_desc desc; + u16 msg_len; + u16 buf_len; + u8 *msg_buf; +}; + +/* Admin Queue information */ +struct i40e_adminq_info { + struct i40e_adminq_ring arq; /* receive queue */ + struct i40e_adminq_ring asq; /* send queue */ + u32 asq_cmd_timeout; /* send queue cmd write back timeout*/ + u16 num_arq_entries; /* receive queue depth */ + u16 num_asq_entries; /* send queue depth */ + u16 arq_buf_size; /* receive queue buffer size */ + u16 asq_buf_size; /* send queue buffer size */ + u16 fw_maj_ver; /* firmware major version */ + u16 fw_min_ver; /* firmware minor version */ + u32 fw_build; /* firmware build number */ + u16 api_maj_ver; /* api major version */ + u16 api_min_ver; /* api minor version */ + + struct mutex asq_mutex; /* Send queue lock */ + struct mutex arq_mutex; /* Receive queue lock */ + + /* last status values on send and receive queues */ + enum i40e_admin_queue_err asq_last_status; + enum i40e_admin_queue_err arq_last_status; +}; + +/** + * i40e_aq_rc_to_posix - convert errors to user-land codes + * aq_ret: AdminQ handler error code can override aq_rc + * aq_rc: AdminQ firmware error code to convert + **/ +static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc) +{ + int aq_to_posix[] = { + 0, /* I40E_AQ_RC_OK */ + -EPERM, /* I40E_AQ_RC_EPERM */ + -ENOENT, /* I40E_AQ_RC_ENOENT */ + -ESRCH, /* I40E_AQ_RC_ESRCH */ + -EINTR, /* I40E_AQ_RC_EINTR */ + -EIO, /* I40E_AQ_RC_EIO */ + -ENXIO, /* I40E_AQ_RC_ENXIO */ + -E2BIG, /* I40E_AQ_RC_E2BIG */ + -EAGAIN, /* I40E_AQ_RC_EAGAIN */ + -ENOMEM, /* I40E_AQ_RC_ENOMEM */ + -EACCES, /* I40E_AQ_RC_EACCES */ + -EFAULT, /* I40E_AQ_RC_EFAULT */ + -EBUSY, /* I40E_AQ_RC_EBUSY */ + -EEXIST, /* I40E_AQ_RC_EEXIST */ + -EINVAL, /* I40E_AQ_RC_EINVAL */ + -ENOTTY, /* I40E_AQ_RC_ENOTTY */ + -ENOSPC, /* I40E_AQ_RC_ENOSPC */ + -ENOSYS, /* I40E_AQ_RC_ENOSYS */ + -ERANGE, /* I40E_AQ_RC_ERANGE */ + -EPIPE, /* I40E_AQ_RC_EFLUSHED */ + -ESPIPE, /* I40E_AQ_RC_BAD_ADDR */ + -EROFS, /* I40E_AQ_RC_EMODE */ + -EFBIG, /* I40E_AQ_RC_EFBIG */ + }; + + /* aq_rc is invalid if AQ timed out */ + if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT) + return -EAGAIN; + + if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0])))) + return -ERANGE; + + return aq_to_posix[aq_rc]; +} + +/* general information */ +#define I40E_AQ_LARGE_BUF 512 +#define I40E_ASQ_CMD_TIMEOUT 250000 /* usecs */ + +void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, + u16 opcode); + +#endif /* _I40E_ADMINQ_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h new file mode 100644 index 000000000000..5fd8529465d4 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h @@ -0,0 +1,2717 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _I40E_ADMINQ_CMD_H_ +#define _I40E_ADMINQ_CMD_H_ + +/* This header file defines the i40e Admin Queue commands and is shared between + * i40e Firmware and Software. + * + * This file needs to comply with the Linux Kernel coding style. + */ + +#define I40E_FW_API_VERSION_MAJOR 0x0001 +#define I40E_FW_API_VERSION_MINOR_X722 0x0005 +#define I40E_FW_API_VERSION_MINOR_X710 0x0007 + +#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \ + I40E_FW_API_VERSION_MINOR_X710 : \ + I40E_FW_API_VERSION_MINOR_X722) + +/* API version 1.7 implements additional link and PHY-specific APIs */ +#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007 + +struct i40e_aq_desc { + __le16 flags; + __le16 opcode; + __le16 datalen; + __le16 retval; + __le32 cookie_high; + __le32 cookie_low; + union { + struct { + __le32 param0; + __le32 param1; + __le32 param2; + __le32 param3; + } internal; + struct { + __le32 param0; + __le32 param1; + __le32 addr_high; + __le32 addr_low; + } external; + u8 raw[16]; + } params; +}; + +/* Flags sub-structure + * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 | + * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE | + */ + +/* command flags and offsets*/ +#define I40E_AQ_FLAG_DD_SHIFT 0 +#define I40E_AQ_FLAG_CMP_SHIFT 1 +#define I40E_AQ_FLAG_ERR_SHIFT 2 +#define I40E_AQ_FLAG_VFE_SHIFT 3 +#define I40E_AQ_FLAG_LB_SHIFT 9 +#define I40E_AQ_FLAG_RD_SHIFT 10 +#define I40E_AQ_FLAG_VFC_SHIFT 11 +#define I40E_AQ_FLAG_BUF_SHIFT 12 +#define I40E_AQ_FLAG_SI_SHIFT 13 +#define I40E_AQ_FLAG_EI_SHIFT 14 +#define I40E_AQ_FLAG_FE_SHIFT 15 + +#define I40E_AQ_FLAG_DD BIT(I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ +#define I40E_AQ_FLAG_CMP BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ +#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ +#define I40E_AQ_FLAG_VFE BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ +#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ +#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ +#define I40E_AQ_FLAG_VFC BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ +#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ +#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ +#define I40E_AQ_FLAG_EI BIT(I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ +#define I40E_AQ_FLAG_FE BIT(I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ + +/* error codes */ +enum i40e_admin_queue_err { + I40E_AQ_RC_OK = 0, /* success */ + I40E_AQ_RC_EPERM = 1, /* Operation not permitted */ + I40E_AQ_RC_ENOENT = 2, /* No such element */ + I40E_AQ_RC_ESRCH = 3, /* Bad opcode */ + I40E_AQ_RC_EINTR = 4, /* operation interrupted */ + I40E_AQ_RC_EIO = 5, /* I/O error */ + I40E_AQ_RC_ENXIO = 6, /* No such resource */ + I40E_AQ_RC_E2BIG = 7, /* Arg too long */ + I40E_AQ_RC_EAGAIN = 8, /* Try again */ + I40E_AQ_RC_ENOMEM = 9, /* Out of memory */ + I40E_AQ_RC_EACCES = 10, /* Permission denied */ + I40E_AQ_RC_EFAULT = 11, /* Bad address */ + I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */ + I40E_AQ_RC_EEXIST = 13, /* object already exists */ + I40E_AQ_RC_EINVAL = 14, /* Invalid argument */ + I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */ + I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ + I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */ + I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */ + I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */ + I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ + I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ + I40E_AQ_RC_EFBIG = 22, /* File too large */ +}; + +/* Admin Queue command opcodes */ +enum i40e_admin_queue_opc { + /* aq commands */ + i40e_aqc_opc_get_version = 0x0001, + i40e_aqc_opc_driver_version = 0x0002, + i40e_aqc_opc_queue_shutdown = 0x0003, + i40e_aqc_opc_set_pf_context = 0x0004, + + /* resource ownership */ + i40e_aqc_opc_request_resource = 0x0008, + i40e_aqc_opc_release_resource = 0x0009, + + i40e_aqc_opc_list_func_capabilities = 0x000A, + i40e_aqc_opc_list_dev_capabilities = 0x000B, + + /* Proxy commands */ + i40e_aqc_opc_set_proxy_config = 0x0104, + i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105, + + /* LAA */ + i40e_aqc_opc_mac_address_read = 0x0107, + i40e_aqc_opc_mac_address_write = 0x0108, + + /* PXE */ + i40e_aqc_opc_clear_pxe_mode = 0x0110, + + /* WoL commands */ + i40e_aqc_opc_set_wol_filter = 0x0120, + i40e_aqc_opc_get_wake_reason = 0x0121, + + /* internal switch commands */ + i40e_aqc_opc_get_switch_config = 0x0200, + i40e_aqc_opc_add_statistics = 0x0201, + i40e_aqc_opc_remove_statistics = 0x0202, + i40e_aqc_opc_set_port_parameters = 0x0203, + i40e_aqc_opc_get_switch_resource_alloc = 0x0204, + i40e_aqc_opc_set_switch_config = 0x0205, + i40e_aqc_opc_rx_ctl_reg_read = 0x0206, + i40e_aqc_opc_rx_ctl_reg_write = 0x0207, + + i40e_aqc_opc_add_vsi = 0x0210, + i40e_aqc_opc_update_vsi_parameters = 0x0211, + i40e_aqc_opc_get_vsi_parameters = 0x0212, + + i40e_aqc_opc_add_pv = 0x0220, + i40e_aqc_opc_update_pv_parameters = 0x0221, + i40e_aqc_opc_get_pv_parameters = 0x0222, + + i40e_aqc_opc_add_veb = 0x0230, + i40e_aqc_opc_update_veb_parameters = 0x0231, + i40e_aqc_opc_get_veb_parameters = 0x0232, + + i40e_aqc_opc_delete_element = 0x0243, + + i40e_aqc_opc_add_macvlan = 0x0250, + i40e_aqc_opc_remove_macvlan = 0x0251, + i40e_aqc_opc_add_vlan = 0x0252, + i40e_aqc_opc_remove_vlan = 0x0253, + i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254, + i40e_aqc_opc_add_tag = 0x0255, + i40e_aqc_opc_remove_tag = 0x0256, + i40e_aqc_opc_add_multicast_etag = 0x0257, + i40e_aqc_opc_remove_multicast_etag = 0x0258, + i40e_aqc_opc_update_tag = 0x0259, + i40e_aqc_opc_add_control_packet_filter = 0x025A, + i40e_aqc_opc_remove_control_packet_filter = 0x025B, + i40e_aqc_opc_add_cloud_filters = 0x025C, + i40e_aqc_opc_remove_cloud_filters = 0x025D, + i40e_aqc_opc_clear_wol_switch_filters = 0x025E, + + i40e_aqc_opc_add_mirror_rule = 0x0260, + i40e_aqc_opc_delete_mirror_rule = 0x0261, + + /* Dynamic Device Personalization */ + i40e_aqc_opc_write_personalization_profile = 0x0270, + i40e_aqc_opc_get_personalization_profile_list = 0x0271, + + /* DCB commands */ + i40e_aqc_opc_dcb_ignore_pfc = 0x0301, + i40e_aqc_opc_dcb_updated = 0x0302, + i40e_aqc_opc_set_dcb_parameters = 0x0303, + + /* TX scheduler */ + i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, + i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, + i40e_aqc_opc_configure_vsi_tc_bw = 0x0407, + i40e_aqc_opc_query_vsi_bw_config = 0x0408, + i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A, + i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410, + + i40e_aqc_opc_enable_switching_comp_ets = 0x0413, + i40e_aqc_opc_modify_switching_comp_ets = 0x0414, + i40e_aqc_opc_disable_switching_comp_ets = 0x0415, + i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, + i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417, + i40e_aqc_opc_query_switching_comp_ets_config = 0x0418, + i40e_aqc_opc_query_port_ets_config = 0x0419, + i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, + i40e_aqc_opc_suspend_port_tx = 0x041B, + i40e_aqc_opc_resume_port_tx = 0x041C, + i40e_aqc_opc_configure_partition_bw = 0x041D, + /* hmc */ + i40e_aqc_opc_query_hmc_resource_profile = 0x0500, + i40e_aqc_opc_set_hmc_resource_profile = 0x0501, + + /* phy commands*/ + i40e_aqc_opc_get_phy_abilities = 0x0600, + i40e_aqc_opc_set_phy_config = 0x0601, + i40e_aqc_opc_set_mac_config = 0x0603, + i40e_aqc_opc_set_link_restart_an = 0x0605, + i40e_aqc_opc_get_link_status = 0x0607, + i40e_aqc_opc_set_phy_int_mask = 0x0613, + i40e_aqc_opc_get_local_advt_reg = 0x0614, + i40e_aqc_opc_set_local_advt_reg = 0x0615, + i40e_aqc_opc_get_partner_advt = 0x0616, + i40e_aqc_opc_set_lb_modes = 0x0618, + i40e_aqc_opc_get_phy_wol_caps = 0x0621, + i40e_aqc_opc_set_phy_debug = 0x0622, + i40e_aqc_opc_upload_ext_phy_fm = 0x0625, + i40e_aqc_opc_run_phy_activity = 0x0626, + i40e_aqc_opc_set_phy_register = 0x0628, + i40e_aqc_opc_get_phy_register = 0x0629, + + /* NVM commands */ + i40e_aqc_opc_nvm_read = 0x0701, + i40e_aqc_opc_nvm_erase = 0x0702, + i40e_aqc_opc_nvm_update = 0x0703, + i40e_aqc_opc_nvm_config_read = 0x0704, + i40e_aqc_opc_nvm_config_write = 0x0705, + i40e_aqc_opc_oem_post_update = 0x0720, + i40e_aqc_opc_thermal_sensor = 0x0721, + + /* virtualization commands */ + i40e_aqc_opc_send_msg_to_pf = 0x0801, + i40e_aqc_opc_send_msg_to_vf = 0x0802, + i40e_aqc_opc_send_msg_to_peer = 0x0803, + + /* alternate structure */ + i40e_aqc_opc_alternate_write = 0x0900, + i40e_aqc_opc_alternate_write_indirect = 0x0901, + i40e_aqc_opc_alternate_read = 0x0902, + i40e_aqc_opc_alternate_read_indirect = 0x0903, + i40e_aqc_opc_alternate_write_done = 0x0904, + i40e_aqc_opc_alternate_set_mode = 0x0905, + i40e_aqc_opc_alternate_clear_port = 0x0906, + + /* LLDP commands */ + i40e_aqc_opc_lldp_get_mib = 0x0A00, + i40e_aqc_opc_lldp_update_mib = 0x0A01, + i40e_aqc_opc_lldp_add_tlv = 0x0A02, + i40e_aqc_opc_lldp_update_tlv = 0x0A03, + i40e_aqc_opc_lldp_delete_tlv = 0x0A04, + i40e_aqc_opc_lldp_stop = 0x0A05, + i40e_aqc_opc_lldp_start = 0x0A06, + + /* Tunnel commands */ + i40e_aqc_opc_add_udp_tunnel = 0x0B00, + i40e_aqc_opc_del_udp_tunnel = 0x0B01, + i40e_aqc_opc_set_rss_key = 0x0B02, + i40e_aqc_opc_set_rss_lut = 0x0B03, + i40e_aqc_opc_get_rss_key = 0x0B04, + i40e_aqc_opc_get_rss_lut = 0x0B05, + + /* Async Events */ + i40e_aqc_opc_event_lan_overflow = 0x1001, + + /* OEM commands */ + i40e_aqc_opc_oem_parameter_change = 0xFE00, + i40e_aqc_opc_oem_device_status_change = 0xFE01, + i40e_aqc_opc_oem_ocsd_initialize = 0xFE02, + i40e_aqc_opc_oem_ocbb_initialize = 0xFE03, + + /* debug commands */ + i40e_aqc_opc_debug_read_reg = 0xFF03, + i40e_aqc_opc_debug_write_reg = 0xFF04, + i40e_aqc_opc_debug_modify_reg = 0xFF07, + i40e_aqc_opc_debug_dump_internals = 0xFF08, +}; + +/* command structures and indirect data structures */ + +/* Structure naming conventions: + * - no suffix for direct command descriptor structures + * - _data for indirect sent data + * - _resp for indirect return data (data which is both will use _data) + * - _completion for direct return data + * - _element_ for repeated elements (may also be _data or _resp) + * + * Command structures are expected to overlay the params.raw member of the basic + * descriptor, and as such cannot exceed 16 bytes in length. + */ + +/* This macro is used to generate a compilation error if a structure + * is not exactly the correct length. It gives a divide by zero error if the + * structure is not of the correct size, otherwise it creates an enum that is + * never used. + */ +#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \ + { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } + +/* This macro is used extensively to ensure that command structures are 16 + * bytes in length as they have to map to the raw array of that size. + */ +#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) + +/* internal (0x00XX) commands */ + +/* Get version (direct 0x0001) */ +struct i40e_aqc_get_version { + __le32 rom_ver; + __le32 fw_build; + __le16 fw_major; + __le16 fw_minor; + __le16 api_major; + __le16 api_minor; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version); + +/* Send driver version (indirect 0x0002) */ +struct i40e_aqc_driver_version { + u8 driver_major_ver; + u8 driver_minor_ver; + u8 driver_build_ver; + u8 driver_subbuild_ver; + u8 reserved[4]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version); + +/* Queue Shutdown (direct 0x0003) */ +struct i40e_aqc_queue_shutdown { + __le32 driver_unloading; +#define I40E_AQ_DRIVER_UNLOADING 0x1 + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); + +/* Set PF context (0x0004, direct) */ +struct i40e_aqc_set_pf_context { + u8 pf_id; + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context); + +/* Request resource ownership (direct 0x0008) + * Release resource ownership (direct 0x0009) + */ +#define I40E_AQ_RESOURCE_NVM 1 +#define I40E_AQ_RESOURCE_SDP 2 +#define I40E_AQ_RESOURCE_ACCESS_READ 1 +#define I40E_AQ_RESOURCE_ACCESS_WRITE 2 +#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000 +#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000 + +struct i40e_aqc_request_resource { + __le16 resource_id; + __le16 access_type; + __le32 timeout; + __le32 resource_number; + u8 reserved[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); + +/* Get function capabilities (indirect 0x000A) + * Get device capabilities (indirect 0x000B) + */ +struct i40e_aqc_list_capabilites { + u8 command_flags; +#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1 + u8 pf_index; + u8 reserved[2]; + __le32 count; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites); + +struct i40e_aqc_list_capabilities_element_resp { + __le16 id; + u8 major_rev; + u8 minor_rev; + __le32 number; + __le32 logical_id; + __le32 phys_id; + u8 reserved[16]; +}; + +/* list of caps */ + +#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001 +#define I40E_AQ_CAP_ID_MNG_MODE 0x0002 +#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003 +#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 +#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 +#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 +#define I40E_AQ_CAP_ID_WOL_AND_PROXY 0x0008 +#define I40E_AQ_CAP_ID_SRIOV 0x0012 +#define I40E_AQ_CAP_ID_VF 0x0013 +#define I40E_AQ_CAP_ID_VMDQ 0x0014 +#define I40E_AQ_CAP_ID_8021QBG 0x0015 +#define I40E_AQ_CAP_ID_8021QBR 0x0016 +#define I40E_AQ_CAP_ID_VSI 0x0017 +#define I40E_AQ_CAP_ID_DCB 0x0018 +#define I40E_AQ_CAP_ID_FCOE 0x0021 +#define I40E_AQ_CAP_ID_ISCSI 0x0022 +#define I40E_AQ_CAP_ID_RSS 0x0040 +#define I40E_AQ_CAP_ID_RXQ 0x0041 +#define I40E_AQ_CAP_ID_TXQ 0x0042 +#define I40E_AQ_CAP_ID_MSIX 0x0043 +#define I40E_AQ_CAP_ID_VF_MSIX 0x0044 +#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045 +#define I40E_AQ_CAP_ID_1588 0x0046 +#define I40E_AQ_CAP_ID_IWARP 0x0051 +#define I40E_AQ_CAP_ID_LED 0x0061 +#define I40E_AQ_CAP_ID_SDP 0x0062 +#define I40E_AQ_CAP_ID_MDIO 0x0063 +#define I40E_AQ_CAP_ID_WSR_PROT 0x0064 +#define I40E_AQ_CAP_ID_NVM_MGMT 0x0080 +#define I40E_AQ_CAP_ID_FLEX10 0x00F1 +#define I40E_AQ_CAP_ID_CEM 0x00F2 + +/* Set CPPM Configuration (direct 0x0103) */ +struct i40e_aqc_cppm_configuration { + __le16 command_flags; +#define I40E_AQ_CPPM_EN_LTRC 0x0800 +#define I40E_AQ_CPPM_EN_DMCTH 0x1000 +#define I40E_AQ_CPPM_EN_DMCTLX 0x2000 +#define I40E_AQ_CPPM_EN_HPTC 0x4000 +#define I40E_AQ_CPPM_EN_DMARC 0x8000 + __le16 ttlx; + __le32 dmacr; + __le16 dmcth; + u8 hptc; + u8 reserved; + __le32 pfltrc; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration); + +/* Set ARP Proxy command / response (indirect 0x0104) */ +struct i40e_aqc_arp_proxy_data { + __le16 command_flags; +#define I40E_AQ_ARP_INIT_IPV4 0x0800 +#define I40E_AQ_ARP_UNSUP_CTL 0x1000 +#define I40E_AQ_ARP_ENA 0x2000 +#define I40E_AQ_ARP_ADD_IPV4 0x4000 +#define I40E_AQ_ARP_DEL_IPV4 0x8000 + __le16 table_id; + __le32 enabled_offloads; +#define I40E_AQ_ARP_DIRECTED_OFFLOAD_ENABLE 0x00000020 +#define I40E_AQ_ARP_OFFLOAD_ENABLE 0x00000800 + __le32 ip_addr; + u8 mac_addr[6]; + u8 reserved[2]; +}; + +I40E_CHECK_STRUCT_LEN(0x14, i40e_aqc_arp_proxy_data); + +/* Set NS Proxy Table Entry Command (indirect 0x0105) */ +struct i40e_aqc_ns_proxy_data { + __le16 table_idx_mac_addr_0; + __le16 table_idx_mac_addr_1; + __le16 table_idx_ipv6_0; + __le16 table_idx_ipv6_1; + __le16 control; +#define I40E_AQ_NS_PROXY_ADD_0 0x0001 +#define I40E_AQ_NS_PROXY_DEL_0 0x0002 +#define I40E_AQ_NS_PROXY_ADD_1 0x0004 +#define I40E_AQ_NS_PROXY_DEL_1 0x0008 +#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x0010 +#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x0020 +#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x0040 +#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x0080 +#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0100 +#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0200 +#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0400 +#define I40E_AQ_NS_PROXY_OFFLOAD_ENABLE 0x0800 +#define I40E_AQ_NS_PROXY_DIRECTED_OFFLOAD_ENABLE 0x1000 + u8 mac_addr_0[6]; + u8 mac_addr_1[6]; + u8 local_mac_addr[6]; + u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */ + u8 ipv6_addr_1[16]; +}; + +I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data); + +/* Manage LAA Command (0x0106) - obsolete */ +struct i40e_aqc_mng_laa { + __le16 command_flags; +#define I40E_AQ_LAA_FLAG_WR 0x8000 + u8 reserved[2]; + __le32 sal; + __le16 sah; + u8 reserved2[6]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa); + +/* Manage MAC Address Read Command (indirect 0x0107) */ +struct i40e_aqc_mac_address_read { + __le16 command_flags; +#define I40E_AQC_LAN_ADDR_VALID 0x10 +#define I40E_AQC_SAN_ADDR_VALID 0x20 +#define I40E_AQC_PORT_ADDR_VALID 0x40 +#define I40E_AQC_WOL_ADDR_VALID 0x80 +#define I40E_AQC_MC_MAG_EN_VALID 0x100 +#define I40E_AQC_ADDR_VALID_MASK 0x3F0 + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read); + +struct i40e_aqc_mac_address_read_data { + u8 pf_lan_mac[6]; + u8 pf_san_mac[6]; + u8 port_mac[6]; + u8 pf_wol_mac[6]; +}; + +I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data); + +/* Manage MAC Address Write Command (0x0108) */ +struct i40e_aqc_mac_address_write { + __le16 command_flags; +#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 +#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 +#define I40E_AQC_WRITE_TYPE_PORT 0x8000 +#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000 +#define I40E_AQC_WRITE_TYPE_MASK 0xC000 + + __le16 mac_sah; + __le32 mac_sal; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write); + +/* PXE commands (0x011x) */ + +/* Clear PXE Command and response (direct 0x0110) */ +struct i40e_aqc_clear_pxe { + u8 rx_cnt; + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe); + +/* Set WoL Filter (0x0120) */ + +struct i40e_aqc_set_wol_filter { + __le16 filter_index; +#define I40E_AQC_MAX_NUM_WOL_FILTERS 8 +#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT 15 +#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK (0x1 << \ + I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT) + +#define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT 0 +#define I40E_AQC_SET_WOL_FILTER_INDEX_MASK (0x7 << \ + I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT) + __le16 cmd_flags; +#define I40E_AQC_SET_WOL_FILTER 0x8000 +#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000 +#define I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR 0x2000 +#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0 +#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1 + __le16 valid_flags; +#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000 +#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000 + u8 reserved[2]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter); + +struct i40e_aqc_set_wol_filter_data { + u8 filter[128]; + u8 mask[16]; +}; + +I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data); + +/* Get Wake Reason (0x0121) */ + +struct i40e_aqc_get_wake_reason_completion { + u8 reserved_1[2]; + __le16 wake_reason; +#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT 0 +#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \ + I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT) +#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT 8 +#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK (0xFF << \ + I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT) + u8 reserved_2[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion); + +/* Switch configuration commands (0x02xx) */ + +/* Used by many indirect commands that only pass an seid and a buffer in the + * command + */ +struct i40e_aqc_switch_seid { + __le16 seid; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid); + +/* Get Switch Configuration command (indirect 0x0200) + * uses i40e_aqc_switch_seid for the descriptor + */ +struct i40e_aqc_get_switch_config_header_resp { + __le16 num_reported; + __le16 num_total; + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp); + +struct i40e_aqc_switch_config_element_resp { + u8 element_type; +#define I40E_AQ_SW_ELEM_TYPE_MAC 1 +#define I40E_AQ_SW_ELEM_TYPE_PF 2 +#define I40E_AQ_SW_ELEM_TYPE_VF 3 +#define I40E_AQ_SW_ELEM_TYPE_EMP 4 +#define I40E_AQ_SW_ELEM_TYPE_BMC 5 +#define I40E_AQ_SW_ELEM_TYPE_PV 16 +#define I40E_AQ_SW_ELEM_TYPE_VEB 17 +#define I40E_AQ_SW_ELEM_TYPE_PA 18 +#define I40E_AQ_SW_ELEM_TYPE_VSI 19 + u8 revision; +#define I40E_AQ_SW_ELEM_REV_1 1 + __le16 seid; + __le16 uplink_seid; + __le16 downlink_seid; + u8 reserved[3]; + u8 connection_type; +#define I40E_AQ_CONN_TYPE_REGULAR 0x1 +#define I40E_AQ_CONN_TYPE_DEFAULT 0x2 +#define I40E_AQ_CONN_TYPE_CASCADED 0x3 + __le16 scheduler_id; + __le16 element_info; +}; + +I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_config_element_resp); + +/* Get Switch Configuration (indirect 0x0200) + * an array of elements are returned in the response buffer + * the first in the array is the header, remainder are elements + */ +struct i40e_aqc_get_switch_config_resp { + struct i40e_aqc_get_switch_config_header_resp header; + struct i40e_aqc_switch_config_element_resp element[1]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_switch_config_resp); + +/* Add Statistics (direct 0x0201) + * Remove Statistics (direct 0x0202) + */ +struct i40e_aqc_add_remove_statistics { + __le16 seid; + __le16 vlan; + __le16 stat_index; + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics); + +/* Set Port Parameters command (direct 0x0203) */ +struct i40e_aqc_set_port_parameters { + __le16 command_flags; +#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1 +#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ +#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 + __le16 bad_frame_vsi; +#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0 +#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK 0x3FF + __le16 default_seid; /* reserved for command */ + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters); + +/* Get Switch Resource Allocation (indirect 0x0204) */ +struct i40e_aqc_get_switch_resource_alloc { + u8 num_entries; /* reserved for command */ + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc); + +/* expect an array of these structs in the response buffer */ +struct i40e_aqc_switch_resource_alloc_element_resp { + u8 resource_type; +#define I40E_AQ_RESOURCE_TYPE_VEB 0x0 +#define I40E_AQ_RESOURCE_TYPE_VSI 0x1 +#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2 +#define I40E_AQ_RESOURCE_TYPE_STAG 0x3 +#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4 +#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5 +#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6 +#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7 +#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8 +#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9 +#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA +#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB +#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC +#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD +#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF +#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10 +#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11 +#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12 +#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13 + u8 reserved1; + __le16 guaranteed; + __le16 total; + __le16 used; + __le16 total_unalloced; + u8 reserved2[6]; +}; + +I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp); + +/* Set Switch Configuration (direct 0x0205) */ +struct i40e_aqc_set_switch_config { + __le16 flags; +/* flags used for both fields below */ +#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001 +#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002 + __le16 valid_flags; + /* The ethertype in switch_tag is dropped on ingress and used + * internally by the switch. Set this to zero for the default + * of 0x88a8 (802.1ad). Should be zero for firmware API + * versions lower than 1.7. + */ + __le16 switch_tag; + /* The ethertypes in first_tag and second_tag are used to + * match the outer and inner VLAN tags (respectively) when HW + * double VLAN tagging is enabled via the set port parameters + * AQ command. Otherwise these are both ignored. Set them to + * zero for their defaults of 0x8100 (802.1Q). Should be zero + * for firmware API versions lower than 1.7. + */ + __le16 first_tag; + __le16 second_tag; + u8 reserved[6]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config); + +/* Read Receive control registers (direct 0x0206) + * Write Receive control registers (direct 0x0207) + * used for accessing Rx control registers that can be + * slow and need special handling when under high Rx load + */ +struct i40e_aqc_rx_ctl_reg_read_write { + __le32 reserved1; + __le32 address; + __le32 reserved2; + __le32 value; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_rx_ctl_reg_read_write); + +/* Add VSI (indirect 0x0210) + * this indirect command uses struct i40e_aqc_vsi_properties_data + * as the indirect buffer (128 bytes) + * + * Update VSI (indirect 0x211) + * uses the same data structure as Add VSI + * + * Get VSI (indirect 0x0212) + * uses the same completion and data structure as Add VSI + */ +struct i40e_aqc_add_get_update_vsi { + __le16 uplink_seid; + u8 connection_type; +#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1 +#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2 +#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3 + u8 reserved1; + u8 vf_id; + u8 reserved2; + __le16 vsi_flags; +#define I40E_AQ_VSI_TYPE_SHIFT 0x0 +#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT) +#define I40E_AQ_VSI_TYPE_VF 0x0 +#define I40E_AQ_VSI_TYPE_VMDQ2 0x1 +#define I40E_AQ_VSI_TYPE_PF 0x2 +#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 +#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi); + +struct i40e_aqc_add_get_update_vsi_completion { + __le16 seid; + __le16 vsi_number; + __le16 vsi_used; + __le16 vsi_free; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion); + +struct i40e_aqc_vsi_properties_data { + /* first 96 byte are written by SW */ + __le16 valid_sections; +#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 +#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 +#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 +#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 +#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 +#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 +#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 +#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 +#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 +#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 + /* switch section */ + __le16 switch_id; /* 12bit id combined with flags below */ +#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 +#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) +#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 +#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 +#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 + u8 sw_reserved[2]; + /* security section */ + u8 sec_flags; +#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 +#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 +#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 + u8 sec_reserved; + /* VLAN section */ + __le16 pvid; /* VLANS include priority bits */ + __le16 fcoe_pvid; + u8 port_vlan_flags; +#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00 +#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ + I40E_AQ_VSI_PVLAN_MODE_SHIFT) +#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 +#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 +#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 +#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 +#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 +#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ + I40E_AQ_VSI_PVLAN_EMOD_SHIFT) +#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 +#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 +#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 +#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 + u8 pvlan_reserved[3]; + /* ingress egress up sections */ + __le32 ingress_table; /* bitmap, 3 bits per up */ +#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 +#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 +#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 +#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 +#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 +#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 +#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 +#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 +#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) + __le32 egress_table; /* same defines as for ingress table */ + /* cascaded PV section */ + __le16 cas_pv_tag; + u8 cas_pv_flags; +#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 +#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ + I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) +#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 +#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 +#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 +#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 +#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 +#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 + u8 cas_pv_reserved; + /* queue mapping section */ + __le16 mapping_flags; +#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 +#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 + __le16 queue_mapping[16]; +#define I40E_AQ_VSI_QUEUE_SHIFT 0x0 +#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) + __le16 tc_mapping[8]; +#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 +#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ + I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) +#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 +#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ + I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) + /* queueing option section */ + u8 queueing_opt_flags; +#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04 +#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08 +#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 +#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 +#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00 +#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40 + u8 queueing_opt_reserved[3]; + /* scheduler section */ + u8 up_enable_bits; + u8 sched_reserved; + /* outer up section */ + __le32 outer_up_table; /* same structure and defines as ingress tbl */ + u8 cmd_reserved[8]; + /* last 32 bytes are written by FW */ + __le16 qs_handle[8]; +#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF + __le16 stat_counter_idx; + __le16 sched_id; + u8 resp_reserved[12]; +}; + +I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); + +/* Add Port Virtualizer (direct 0x0220) + * also used for update PV (direct 0x0221) but only flags are used + * (IS_CTRL_PORT only works on add PV) + */ +struct i40e_aqc_add_update_pv { + __le16 command_flags; +#define I40E_AQC_PV_FLAG_PV_TYPE 0x1 +#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2 +#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4 +#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8 + __le16 uplink_seid; + __le16 connected_seid; + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv); + +struct i40e_aqc_add_update_pv_completion { + /* reserved for update; for add also encodes error if rc == ENOSPC */ + __le16 pv_seid; +#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1 +#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2 +#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4 +#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8 + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion); + +/* Get PV Params (direct 0x0222) + * uses i40e_aqc_switch_seid for the descriptor + */ + +struct i40e_aqc_get_pv_params_completion { + __le16 seid; + __le16 default_stag; + __le16 pv_flags; /* same flags as add_pv */ +#define I40E_AQC_GET_PV_PV_TYPE 0x1 +#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2 +#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4 + u8 reserved[8]; + __le16 default_port_seid; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion); + +/* Add VEB (direct 0x0230) */ +struct i40e_aqc_add_veb { + __le16 uplink_seid; + __le16 downlink_seid; + __le16 veb_flags; +#define I40E_AQC_ADD_VEB_FLOATING 0x1 +#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1 +#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \ + I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT) +#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 +#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 +#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 /* deprecated */ +#define I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS 0x10 + u8 enable_tcs; + u8 reserved[9]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb); + +struct i40e_aqc_add_veb_completion { + u8 reserved[6]; + __le16 switch_seid; + /* also encodes error if rc == ENOSPC; codes are the same as add_pv */ + __le16 veb_seid; +#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1 +#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2 +#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4 +#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8 + __le16 statistic_index; + __le16 vebs_used; + __le16 vebs_free; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion); + +/* Get VEB Parameters (direct 0x0232) + * uses i40e_aqc_switch_seid for the descriptor + */ +struct i40e_aqc_get_veb_parameters_completion { + __le16 seid; + __le16 switch_id; + __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ + __le16 statistic_index; + __le16 vebs_used; + __le16 vebs_free; + u8 reserved[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); + +/* Delete Element (direct 0x0243) + * uses the generic i40e_aqc_switch_seid + */ + +/* Add MAC-VLAN (indirect 0x0250) */ + +/* used for the command for most vlan commands */ +struct i40e_aqc_macvlan { + __le16 num_addresses; + __le16 seid[3]; +#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) +#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000 + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan); + +/* indirect data for command and response */ +struct i40e_aqc_add_macvlan_element_data { + u8 mac_addr[6]; + __le16 vlan_tag; + __le16 flags; +#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001 +#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 +#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 +#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 +#define I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC 0x0010 + __le16 queue_number; +#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 +#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ + I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) + /* response section */ + u8 match_method; +#define I40E_AQC_MM_PERFECT_MATCH 0x01 +#define I40E_AQC_MM_HASH_MATCH 0x02 +#define I40E_AQC_MM_ERR_NO_RES 0xFF + u8 reserved1[3]; +}; + +struct i40e_aqc_add_remove_macvlan_completion { + __le16 perfect_mac_used; + __le16 perfect_mac_free; + __le16 unicast_hash_free; + __le16 multicast_hash_free; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion); + +/* Remove MAC-VLAN (indirect 0x0251) + * uses i40e_aqc_macvlan for the descriptor + * data points to an array of num_addresses of elements + */ + +struct i40e_aqc_remove_macvlan_element_data { + u8 mac_addr[6]; + __le16 vlan_tag; + u8 flags; +#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01 +#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02 +#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08 +#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10 + u8 reserved[3]; + /* reply section */ + u8 error_code; +#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0 +#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF + u8 reply_reserved[3]; +}; + +/* Add VLAN (indirect 0x0252) + * Remove VLAN (indirect 0x0253) + * use the generic i40e_aqc_macvlan for the command + */ +struct i40e_aqc_add_remove_vlan_element_data { + __le16 vlan_tag; + u8 vlan_flags; +/* flags for add VLAN */ +#define I40E_AQC_ADD_VLAN_LOCAL 0x1 +#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1 +#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT) +#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0 +#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2 +#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4 +#define I40E_AQC_VLAN_PTYPE_SHIFT 3 +#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT) +#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0 +#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8 +#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10 +#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18 +/* flags for remove VLAN */ +#define I40E_AQC_REMOVE_VLAN_ALL 0x1 + u8 reserved; + u8 result; +/* flags for add VLAN */ +#define I40E_AQC_ADD_VLAN_SUCCESS 0x0 +#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE +#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF +/* flags for remove VLAN */ +#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0 +#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF + u8 reserved1[3]; +}; + +struct i40e_aqc_add_remove_vlan_completion { + u8 reserved[4]; + __le16 vlans_used; + __le16 vlans_free; + __le32 addr_high; + __le32 addr_low; +}; + +/* Set VSI Promiscuous Modes (direct 0x0254) */ +struct i40e_aqc_set_vsi_promiscuous_modes { + __le16 promiscuous_flags; + __le16 valid_flags; +/* flags used for both fields above */ +#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01 +#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02 +#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 +#define I40E_AQC_SET_VSI_DEFAULT 0x08 +#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 +#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000 + __le16 seid; +#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF + __le16 vlan_tag; +#define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF +#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); + +/* Add S/E-tag command (direct 0x0255) + * Uses generic i40e_aqc_add_remove_tag_completion for completion + */ +struct i40e_aqc_add_tag { + __le16 flags; +#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001 + __le16 seid; +#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT) + __le16 tag; + __le16 queue_number; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag); + +struct i40e_aqc_add_remove_tag_completion { + u8 reserved[12]; + __le16 tags_used; + __le16 tags_free; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); + +/* Remove S/E-tag command (direct 0x0256) + * Uses generic i40e_aqc_add_remove_tag_completion for completion + */ +struct i40e_aqc_remove_tag { + __le16 seid; +#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT) + __le16 tag; + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_tag); + +/* Add multicast E-Tag (direct 0x0257) + * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields + * and no external data + */ +struct i40e_aqc_add_remove_mcast_etag { + __le16 pv_seid; + __le16 etag; + u8 num_unicast_etags; + u8 reserved[3]; + __le32 addr_high; /* address of array of 2-byte s-tags */ + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag); + +struct i40e_aqc_add_remove_mcast_etag_completion { + u8 reserved[4]; + __le16 mcast_etags_used; + __le16 mcast_etags_free; + __le32 addr_high; + __le32 addr_low; + +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion); + +/* Update S/E-Tag (direct 0x0259) */ +struct i40e_aqc_update_tag { + __le16 seid; +#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT) + __le16 old_tag; + __le16 new_tag; + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag); + +struct i40e_aqc_update_tag_completion { + u8 reserved[12]; + __le16 tags_used; + __le16 tags_free; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion); + +/* Add Control Packet filter (direct 0x025A) + * Remove Control Packet filter (direct 0x025B) + * uses the i40e_aqc_add_oveb_cloud, + * and the generic direct completion structure + */ +struct i40e_aqc_add_remove_control_packet_filter { + u8 mac[6]; + __le16 etype; + __le16 flags; +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000 + __le16 seid; +#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT) + __le16 queue; + u8 reserved[2]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter); + +struct i40e_aqc_add_remove_control_packet_filter_completion { + __le16 mac_etype_used; + __le16 etype_used; + __le16 mac_etype_free; + __le16 etype_free; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion); + +/* Add Cloud filters (indirect 0x025C) + * Remove Cloud filters (indirect 0x025D) + * uses the i40e_aqc_add_remove_cloud_filters, + * and the generic indirect completion structure + */ +struct i40e_aqc_add_remove_cloud_filters { + u8 num_filters; + u8 reserved; + __le16 seid; +#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT) + u8 big_buffer_flag; +#define I40E_AQC_ADD_CLOUD_CMD_BB 1 + u8 reserved2[3]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters); + +struct i40e_aqc_cloud_filters_element_data { + u8 outer_mac[6]; + u8 inner_mac[6]; + __le16 inner_vlan; + union { + struct { + u8 reserved[12]; + u8 data[4]; + } v4; + struct { + u8 data[16]; + } v6; + struct { + __le16 data[8]; + } raw_v6; + } ipaddr; + __le16 flags; +#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ + I40E_AQC_ADD_CLOUD_FILTER_SHIFT) +/* 0x0000 reserved */ +#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 +/* 0x0002 reserved */ +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 +/* 0x0005 reserved */ +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006 +/* 0x0007 reserved */ +/* 0x0008 reserved */ +#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A +#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B +#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C +/* 0x0010 to 0x0017 is for custom filters */ +#define I40E_AQC_ADD_CLOUD_FILTER_IP_PORT 0x0010 /* Dest IP + L4 Port */ +#define I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT 0x0011 /* Dest MAC + L4 Port */ +#define I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT 0x0012 /* Dest MAC + VLAN + L4 Port */ + +#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 +#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 +#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 +#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 +#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 + +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN 0 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE 2 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_RESERVED 4 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE 5 + +#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_MAC 0x2000 +#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_INNER_MAC 0x4000 +#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_IP 0x8000 + + __le32 tenant_id; + u8 reserved[4]; + __le16 queue_number; +#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x7FF << \ + I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) + u8 reserved2[14]; + /* response section */ + u8 allocation_result; +#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 +#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF + u8 response_reserved[7]; +}; + +I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_cloud_filters_element_data); + +/* i40e_aqc_cloud_filters_element_bb is used when + * I40E_AQC_ADD_CLOUD_CMD_BB flag is set. + */ +struct i40e_aqc_cloud_filters_element_bb { + struct i40e_aqc_cloud_filters_element_data element; + u16 general_fields[32]; +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2 2 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0 3 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1 4 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2 5 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0 6 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1 7 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2 8 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0 9 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1 10 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2 11 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0 12 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1 13 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2 14 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1 16 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2 17 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3 18 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4 19 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5 20 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6 21 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7 22 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0 23 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1 24 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2 25 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3 26 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4 27 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5 28 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6 29 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30 +}; + +I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_cloud_filters_element_bb); + +struct i40e_aqc_remove_cloud_filters_completion { + __le16 perfect_ovlan_used; + __le16 perfect_ovlan_free; + __le16 vlan_used; + __le16 vlan_free; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion); + +/* Replace filter Command 0x025F + * uses the i40e_aqc_replace_cloud_filters, + * and the generic indirect completion structure + */ +struct i40e_filter_data { + u8 filter_type; + u8 input[3]; +}; + +I40E_CHECK_STRUCT_LEN(4, i40e_filter_data); + +struct i40e_aqc_replace_cloud_filters_cmd { + u8 valid_flags; +#define I40E_AQC_REPLACE_L1_FILTER 0x0 +#define I40E_AQC_REPLACE_CLOUD_FILTER 0x1 +#define I40E_AQC_GET_CLOUD_FILTERS 0x2 +#define I40E_AQC_MIRROR_CLOUD_FILTER 0x4 +#define I40E_AQC_HIGH_PRIORITY_CLOUD_FILTER 0x8 + u8 old_filter_type; + u8 new_filter_type; + u8 tr_bit; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_replace_cloud_filters_cmd); + +struct i40e_aqc_replace_cloud_filters_cmd_buf { + u8 data[32]; +/* Filter type INPUT codes*/ +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_ENTRIES_MAX 3 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED BIT(7) + +/* Field Vector offsets */ +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_MAC_DA 0 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_ETH 6 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG 7 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN 8 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_OVLAN 9 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN 10 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY 11 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC 12 +/* big FLU */ +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IP_DA 14 +/* big FLU */ +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_OIP_DA 15 + +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN 37 + struct i40e_filter_data filters[8]; +}; + +I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_replace_cloud_filters_cmd_buf); + +/* Add Mirror Rule (indirect or direct 0x0260) + * Delete Mirror Rule (indirect or direct 0x0261) + * note: some rule types (4,5) do not use an external buffer. + * take care to set the flags correctly. + */ +struct i40e_aqc_add_delete_mirror_rule { + __le16 seid; + __le16 rule_type; +#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0 +#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \ + I40E_AQC_MIRROR_RULE_TYPE_SHIFT) +#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1 +#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2 +#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3 +#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4 +#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5 + __le16 num_entries; + __le16 destination; /* VSI for add, rule id for delete */ + __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */ + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule); + +struct i40e_aqc_add_delete_mirror_rule_completion { + u8 reserved[2]; + __le16 rule_id; /* only used on add */ + __le16 mirror_rules_used; + __le16 mirror_rules_free; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); + +/* Dynamic Device Personalization */ +struct i40e_aqc_write_personalization_profile { + u8 flags; + u8 reserved[3]; + __le32 profile_track_id; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile); + +struct i40e_aqc_write_ddp_resp { + __le32 error_offset; + __le32 error_info; + __le32 addr_high; + __le32 addr_low; +}; + +struct i40e_aqc_get_applied_profiles { + u8 flags; +#define I40E_AQC_GET_DDP_GET_CONF 0x1 +#define I40E_AQC_GET_DDP_GET_RDPU_CONF 0x2 + u8 rsv[3]; + __le32 reserved; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_applied_profiles); + +/* DCB 0x03xx*/ + +/* PFC Ignore (direct 0x0301) + * the command and response use the same descriptor structure + */ +struct i40e_aqc_pfc_ignore { + u8 tc_bitmap; + u8 command_flags; /* unused on response */ +#define I40E_AQC_PFC_IGNORE_SET 0x80 +#define I40E_AQC_PFC_IGNORE_CLEAR 0x0 + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore); + +/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure + * with no parameters + */ + +/* TX scheduler 0x04xx */ + +/* Almost all the indirect commands use + * this generic struct to pass the SEID in param0 + */ +struct i40e_aqc_tx_sched_ind { + __le16 vsi_seid; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind); + +/* Several commands respond with a set of queue set handles */ +struct i40e_aqc_qs_handles_resp { + __le16 qs_handles[8]; +}; + +/* Configure VSI BW limits (direct 0x0400) */ +struct i40e_aqc_configure_vsi_bw_limit { + __le16 vsi_seid; + u8 reserved[2]; + __le16 credit; + u8 reserved1[2]; + u8 max_credit; /* 0-3, limit = 2^max */ + u8 reserved2[7]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit); + +/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406) + * responds with i40e_aqc_qs_handles_resp + */ +struct i40e_aqc_configure_vsi_ets_sla_bw_data { + u8 tc_valid_bits; + u8 reserved[15]; + __le16 tc_bw_credits[8]; /* FW writesback QS handles here */ + + /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ + __le16 tc_bw_max[2]; + u8 reserved1[28]; +}; + +I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_vsi_ets_sla_bw_data); + +/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407) + * responds with i40e_aqc_qs_handles_resp + */ +struct i40e_aqc_configure_vsi_tc_bw_data { + u8 tc_valid_bits; + u8 reserved[3]; + u8 tc_bw_credits[8]; + u8 reserved1[4]; + __le16 qs_handles[8]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_vsi_tc_bw_data); + +/* Query vsi bw configuration (indirect 0x0408) */ +struct i40e_aqc_query_vsi_bw_config_resp { + u8 tc_valid_bits; + u8 tc_suspended_bits; + u8 reserved[14]; + __le16 qs_handles[8]; + u8 reserved1[4]; + __le16 port_bw_limit; + u8 reserved2[2]; + u8 max_bw; /* 0-3, limit = 2^max */ + u8 reserved3[23]; +}; + +I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_vsi_bw_config_resp); + +/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */ +struct i40e_aqc_query_vsi_ets_sla_config_resp { + u8 tc_valid_bits; + u8 reserved[3]; + u8 share_credits[8]; + __le16 credits[8]; + + /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ + __le16 tc_bw_max[2]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_vsi_ets_sla_config_resp); + +/* Configure Switching Component Bandwidth Limit (direct 0x0410) */ +struct i40e_aqc_configure_switching_comp_bw_limit { + __le16 seid; + u8 reserved[2]; + __le16 credit; + u8 reserved1[2]; + u8 max_bw; /* 0-3, limit = 2^max */ + u8 reserved2[7]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); + +/* Enable Physical Port ETS (indirect 0x0413) + * Modify Physical Port ETS (indirect 0x0414) + * Disable Physical Port ETS (indirect 0x0415) + */ +struct i40e_aqc_configure_switching_comp_ets_data { + u8 reserved[4]; + u8 tc_valid_bits; + u8 seepage; +#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1 + u8 tc_strict_priority_flags; + u8 reserved1[17]; + u8 tc_bw_share_credits[8]; + u8 reserved2[96]; +}; + +I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_configure_switching_comp_ets_data); + +/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ +struct i40e_aqc_configure_switching_comp_ets_bw_limit_data { + u8 tc_valid_bits; + u8 reserved[15]; + __le16 tc_bw_credit[8]; + + /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ + __le16 tc_bw_max[2]; + u8 reserved1[28]; +}; + +I40E_CHECK_STRUCT_LEN(0x40, + i40e_aqc_configure_switching_comp_ets_bw_limit_data); + +/* Configure Switching Component Bandwidth Allocation per Tc + * (indirect 0x0417) + */ +struct i40e_aqc_configure_switching_comp_bw_config_data { + u8 tc_valid_bits; + u8 reserved[2]; + u8 absolute_credits; /* bool */ + u8 tc_bw_share_credits[8]; + u8 reserved1[20]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_switching_comp_bw_config_data); + +/* Query Switching Component Configuration (indirect 0x0418) */ +struct i40e_aqc_query_switching_comp_ets_config_resp { + u8 tc_valid_bits; + u8 reserved[35]; + __le16 port_bw_limit; + u8 reserved1[2]; + u8 tc_bw_max; /* 0-3, limit = 2^max */ + u8 reserved2[23]; +}; + +I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_switching_comp_ets_config_resp); + +/* Query PhysicalPort ETS Configuration (indirect 0x0419) */ +struct i40e_aqc_query_port_ets_config_resp { + u8 reserved[4]; + u8 tc_valid_bits; + u8 reserved1; + u8 tc_strict_priority_bits; + u8 reserved2; + u8 tc_bw_share_credits[8]; + __le16 tc_bw_limits[8]; + + /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */ + __le16 tc_bw_max[2]; + u8 reserved3[32]; +}; + +I40E_CHECK_STRUCT_LEN(0x44, i40e_aqc_query_port_ets_config_resp); + +/* Query Switching Component Bandwidth Allocation per Traffic Type + * (indirect 0x041A) + */ +struct i40e_aqc_query_switching_comp_bw_config_resp { + u8 tc_valid_bits; + u8 reserved[2]; + u8 absolute_credits_enable; /* bool */ + u8 tc_bw_share_credits[8]; + __le16 tc_bw_limits[8]; + + /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ + __le16 tc_bw_max[2]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_switching_comp_bw_config_resp); + +/* Suspend/resume port TX traffic + * (direct 0x041B and 0x041C) uses the generic SEID struct + */ + +/* Configure partition BW + * (indirect 0x041D) + */ +struct i40e_aqc_configure_partition_bw_data { + __le16 pf_valid_bits; + u8 min_bw[16]; /* guaranteed bandwidth */ + u8 max_bw[16]; /* bandwidth limit */ +}; + +I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data); + +/* Get and set the active HMC resource profile and status. + * (direct 0x0500) and (direct 0x0501) + */ +struct i40e_aq_get_set_hmc_resource_profile { + u8 pm_profile; + u8 pe_vf_enabled; + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); + +enum i40e_aq_hmc_profile { + /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ + I40E_HMC_PROFILE_DEFAULT = 1, + I40E_HMC_PROFILE_FAVOR_VF = 2, + I40E_HMC_PROFILE_EQUAL = 3, +}; + +/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ + +/* set in param0 for get phy abilities to report qualified modules */ +#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001 +#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002 + +enum i40e_aq_phy_type { + I40E_PHY_TYPE_SGMII = 0x0, + I40E_PHY_TYPE_1000BASE_KX = 0x1, + I40E_PHY_TYPE_10GBASE_KX4 = 0x2, + I40E_PHY_TYPE_10GBASE_KR = 0x3, + I40E_PHY_TYPE_40GBASE_KR4 = 0x4, + I40E_PHY_TYPE_XAUI = 0x5, + I40E_PHY_TYPE_XFI = 0x6, + I40E_PHY_TYPE_SFI = 0x7, + I40E_PHY_TYPE_XLAUI = 0x8, + I40E_PHY_TYPE_XLPPI = 0x9, + I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA, + I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB, + I40E_PHY_TYPE_10GBASE_AOC = 0xC, + I40E_PHY_TYPE_40GBASE_AOC = 0xD, + I40E_PHY_TYPE_UNRECOGNIZED = 0xE, + I40E_PHY_TYPE_UNSUPPORTED = 0xF, + I40E_PHY_TYPE_100BASE_TX = 0x11, + I40E_PHY_TYPE_1000BASE_T = 0x12, + I40E_PHY_TYPE_10GBASE_T = 0x13, + I40E_PHY_TYPE_10GBASE_SR = 0x14, + I40E_PHY_TYPE_10GBASE_LR = 0x15, + I40E_PHY_TYPE_10GBASE_SFPP_CU = 0x16, + I40E_PHY_TYPE_10GBASE_CR1 = 0x17, + I40E_PHY_TYPE_40GBASE_CR4 = 0x18, + I40E_PHY_TYPE_40GBASE_SR4 = 0x19, + I40E_PHY_TYPE_40GBASE_LR4 = 0x1A, + I40E_PHY_TYPE_1000BASE_SX = 0x1B, + I40E_PHY_TYPE_1000BASE_LX = 0x1C, + I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D, + I40E_PHY_TYPE_20GBASE_KR2 = 0x1E, + I40E_PHY_TYPE_25GBASE_KR = 0x1F, + I40E_PHY_TYPE_25GBASE_CR = 0x20, + I40E_PHY_TYPE_25GBASE_SR = 0x21, + I40E_PHY_TYPE_25GBASE_LR = 0x22, + I40E_PHY_TYPE_25GBASE_AOC = 0x23, + I40E_PHY_TYPE_25GBASE_ACC = 0x24, + I40E_PHY_TYPE_MAX, + I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD, + I40E_PHY_TYPE_EMPTY = 0xFE, + I40E_PHY_TYPE_DEFAULT = 0xFF, +}; + +#define I40E_LINK_SPEED_100MB_SHIFT 0x1 +#define I40E_LINK_SPEED_1000MB_SHIFT 0x2 +#define I40E_LINK_SPEED_10GB_SHIFT 0x3 +#define I40E_LINK_SPEED_40GB_SHIFT 0x4 +#define I40E_LINK_SPEED_20GB_SHIFT 0x5 +#define I40E_LINK_SPEED_25GB_SHIFT 0x6 + +enum i40e_aq_link_speed { + I40E_LINK_SPEED_UNKNOWN = 0, + I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT), + I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT), + I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT), + I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT), + I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT), + I40E_LINK_SPEED_25GB = BIT(I40E_LINK_SPEED_25GB_SHIFT), +}; + +struct i40e_aqc_module_desc { + u8 oui[3]; + u8 reserved1; + u8 part_number[16]; + u8 revision[4]; + u8 reserved2[8]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_module_desc); + +struct i40e_aq_get_phy_abilities_resp { + __le32 phy_type; /* bitmap using the above enum for offsets */ + u8 link_speed; /* bitmap using the above enum bit patterns */ + u8 abilities; +#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 +#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 +#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 +#define I40E_AQ_PHY_LINK_ENABLED 0x08 +#define I40E_AQ_PHY_AN_ENABLED 0x10 +#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 +#define I40E_AQ_PHY_FEC_ABILITY_KR 0x40 +#define I40E_AQ_PHY_FEC_ABILITY_RS 0x80 + __le16 eee_capability; +#define I40E_AQ_EEE_100BASE_TX 0x0002 +#define I40E_AQ_EEE_1000BASE_T 0x0004 +#define I40E_AQ_EEE_10GBASE_T 0x0008 +#define I40E_AQ_EEE_1000BASE_KX 0x0010 +#define I40E_AQ_EEE_10GBASE_KX4 0x0020 +#define I40E_AQ_EEE_10GBASE_KR 0x0040 + __le32 eeer_val; + u8 d3_lpan; +#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 + u8 phy_type_ext; +#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01 +#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02 +#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04 +#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08 +#define I40E_AQ_PHY_TYPE_EXT_25G_AOC 0x10 +#define I40E_AQ_PHY_TYPE_EXT_25G_ACC 0x20 + u8 fec_cfg_curr_mod_ext_info; +#define I40E_AQ_ENABLE_FEC_KR 0x01 +#define I40E_AQ_ENABLE_FEC_RS 0x02 +#define I40E_AQ_REQUEST_FEC_KR 0x04 +#define I40E_AQ_REQUEST_FEC_RS 0x08 +#define I40E_AQ_ENABLE_FEC_AUTO 0x10 +#define I40E_AQ_FEC +#define I40E_AQ_MODULE_TYPE_EXT_MASK 0xE0 +#define I40E_AQ_MODULE_TYPE_EXT_SHIFT 5 + + u8 ext_comp_code; + u8 phy_id[4]; + u8 module_type[3]; + u8 qualified_module_count; +#define I40E_AQ_PHY_MAX_QMS 16 + struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; +}; + +I40E_CHECK_STRUCT_LEN(0x218, i40e_aq_get_phy_abilities_resp); + +/* Set PHY Config (direct 0x0601) */ +struct i40e_aq_set_phy_config { /* same bits as above in all */ + __le32 phy_type; + u8 link_speed; + u8 abilities; +/* bits 0-2 use the values from get_phy_abilities_resp */ +#define I40E_AQ_PHY_ENABLE_LINK 0x08 +#define I40E_AQ_PHY_ENABLE_AN 0x10 +#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20 + __le16 eee_capability; + __le32 eeer; + u8 low_power_ctrl; + u8 phy_type_ext; +#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01 +#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02 +#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04 +#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08 + u8 fec_config; +#define I40E_AQ_SET_FEC_ABILITY_KR BIT(0) +#define I40E_AQ_SET_FEC_ABILITY_RS BIT(1) +#define I40E_AQ_SET_FEC_REQUEST_KR BIT(2) +#define I40E_AQ_SET_FEC_REQUEST_RS BIT(3) +#define I40E_AQ_SET_FEC_AUTO BIT(4) +#define I40E_AQ_PHY_FEC_CONFIG_SHIFT 0x0 +#define I40E_AQ_PHY_FEC_CONFIG_MASK (0x1F << I40E_AQ_PHY_FEC_CONFIG_SHIFT) + u8 reserved; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config); + +/* Set MAC Config command data structure (direct 0x0603) */ +struct i40e_aq_set_mac_config { + __le16 max_frame_size; + u8 params; +#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 +#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 +#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 +#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 +#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 +#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 +#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 +#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 +#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 +#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 + u8 tx_timer_priority; /* bitmap */ + __le16 tx_timer_value; + __le16 fc_refresh_threshold; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config); + +/* Restart Auto-Negotiation (direct 0x605) */ +struct i40e_aqc_set_link_restart_an { + u8 command; +#define I40E_AQ_PHY_RESTART_AN 0x02 +#define I40E_AQ_PHY_LINK_ENABLE 0x04 + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an); + +/* Get Link Status cmd & response data structure (direct 0x0607) */ +struct i40e_aqc_get_link_status { + __le16 command_flags; /* only field set on command */ +#define I40E_AQ_LSE_MASK 0x3 +#define I40E_AQ_LSE_NOP 0x0 +#define I40E_AQ_LSE_DISABLE 0x2 +#define I40E_AQ_LSE_ENABLE 0x3 +/* only response uses this flag */ +#define I40E_AQ_LSE_IS_ENABLED 0x1 + u8 phy_type; /* i40e_aq_phy_type */ + u8 link_speed; /* i40e_aq_link_speed */ + u8 link_info; +#define I40E_AQ_LINK_UP 0x01 /* obsolete */ +#define I40E_AQ_LINK_UP_FUNCTION 0x01 +#define I40E_AQ_LINK_FAULT 0x02 +#define I40E_AQ_LINK_FAULT_TX 0x04 +#define I40E_AQ_LINK_FAULT_RX 0x08 +#define I40E_AQ_LINK_FAULT_REMOTE 0x10 +#define I40E_AQ_LINK_UP_PORT 0x20 +#define I40E_AQ_MEDIA_AVAILABLE 0x40 +#define I40E_AQ_SIGNAL_DETECT 0x80 + u8 an_info; +#define I40E_AQ_AN_COMPLETED 0x01 +#define I40E_AQ_LP_AN_ABILITY 0x02 +#define I40E_AQ_PD_FAULT 0x04 +#define I40E_AQ_FEC_EN 0x08 +#define I40E_AQ_PHY_LOW_POWER 0x10 +#define I40E_AQ_LINK_PAUSE_TX 0x20 +#define I40E_AQ_LINK_PAUSE_RX 0x40 +#define I40E_AQ_QUALIFIED_MODULE 0x80 + u8 ext_info; +#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01 +#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02 +#define I40E_AQ_LINK_TX_SHIFT 0x02 +#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT) +#define I40E_AQ_LINK_TX_ACTIVE 0x00 +#define I40E_AQ_LINK_TX_DRAINED 0x01 +#define I40E_AQ_LINK_TX_FLUSHED 0x03 +#define I40E_AQ_LINK_FORCED_40G 0x10 +/* 25G Error Codes */ +#define I40E_AQ_25G_NO_ERR 0X00 +#define I40E_AQ_25G_NOT_PRESENT 0X01 +#define I40E_AQ_25G_NVM_CRC_ERR 0X02 +#define I40E_AQ_25G_SBUS_UCODE_ERR 0X03 +#define I40E_AQ_25G_SERDES_UCODE_ERR 0X04 +#define I40E_AQ_25G_NIMB_UCODE_ERR 0X05 + u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ +/* Since firmware API 1.7 loopback field keeps power class info as well */ +#define I40E_AQ_LOOPBACK_MASK 0x07 +#define I40E_AQ_PWR_CLASS_SHIFT_LB 6 +#define I40E_AQ_PWR_CLASS_MASK_LB (0x03 << I40E_AQ_PWR_CLASS_SHIFT_LB) + __le16 max_frame_size; + u8 config; +#define I40E_AQ_CONFIG_FEC_KR_ENA 0x01 +#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02 +#define I40E_AQ_CONFIG_CRC_ENA 0x04 +#define I40E_AQ_CONFIG_PACING_MASK 0x78 + union { + struct { + u8 power_desc; +#define I40E_AQ_LINK_POWER_CLASS_1 0x00 +#define I40E_AQ_LINK_POWER_CLASS_2 0x01 +#define I40E_AQ_LINK_POWER_CLASS_3 0x02 +#define I40E_AQ_LINK_POWER_CLASS_4 0x03 +#define I40E_AQ_PWR_CLASS_MASK 0x03 + u8 reserved[4]; + }; + struct { + u8 link_type[4]; + u8 link_type_ext; + }; + }; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status); + +/* Set event mask command (direct 0x613) */ +struct i40e_aqc_set_phy_int_mask { + u8 reserved[8]; + __le16 event_mask; +#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002 +#define I40E_AQ_EVENT_MEDIA_NA 0x0004 +#define I40E_AQ_EVENT_LINK_FAULT 0x0008 +#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010 +#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020 +#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040 +#define I40E_AQ_EVENT_AN_COMPLETED 0x0080 +#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100 +#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200 + u8 reserved1[6]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask); + +/* Get Local AN advt register (direct 0x0614) + * Set Local AN advt register (direct 0x0615) + * Get Link Partner AN advt register (direct 0x0616) + */ +struct i40e_aqc_an_advt_reg { + __le32 local_an_reg0; + __le16 local_an_reg1; + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg); + +/* Set Loopback mode (0x0618) */ +struct i40e_aqc_set_lb_mode { + __le16 lb_mode; +#define I40E_AQ_LB_PHY_LOCAL 0x01 +#define I40E_AQ_LB_PHY_REMOTE 0x02 +#define I40E_AQ_LB_MAC_LOCAL 0x04 + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); + +/* Set PHY Debug command (0x0622) */ +struct i40e_aqc_set_phy_debug { + u8 command_flags; +#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02 +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2 +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \ + I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT) +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00 +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01 +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02 +#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10 + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug); + +enum i40e_aq_phy_reg_type { + I40E_AQC_PHY_REG_INTERNAL = 0x1, + I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2, + I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 +}; + +/* Run PHY Activity (0x0626) */ +struct i40e_aqc_run_phy_activity { + __le16 activity_id; + u8 flags; + u8 reserved1; + __le32 control; + __le32 data; + u8 reserved2[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity); + +/* Set PHY Register command (0x0628) */ +/* Get PHY Register command (0x0629) */ +struct i40e_aqc_phy_register_access { + u8 phy_interface; +#define I40E_AQ_PHY_REG_ACCESS_INTERNAL 0 +#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL 1 +#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2 + u8 dev_address; + u8 reserved1[2]; + __le32 reg_address; + __le32 reg_value; + u8 reserved2[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access); + +/* NVM Read command (indirect 0x0701) + * NVM Erase commands (direct 0x0702) + * NVM Update commands (indirect 0x0703) + */ +struct i40e_aqc_nvm_update { + u8 command_flags; +#define I40E_AQ_NVM_LAST_CMD 0x01 +#define I40E_AQ_NVM_REARRANGE_TO_FLAT 0x20 +#define I40E_AQ_NVM_REARRANGE_TO_STRUCT 0x40 +#define I40E_AQ_NVM_FLASH_ONLY 0x80 +#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1 +#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03 +#define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03 +#define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01 + u8 module_pointer; + __le16 length; + __le32 offset; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); + +/* NVM Config Read (indirect 0x0704) */ +struct i40e_aqc_nvm_config_read { + __le16 cmd_flags; +#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1 +#define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0 +#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1 + __le16 element_count; + __le16 element_id; /* Feature/field ID */ + __le16 element_id_msw; /* MSWord of field ID */ + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read); + +/* NVM Config Write (indirect 0x0705) */ +struct i40e_aqc_nvm_config_write { + __le16 cmd_flags; + __le16 element_count; + u8 reserved[4]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); + +/* Used for 0x0704 as well as for 0x0705 commands */ +#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1 +#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \ + BIT(I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT) +#define I40E_AQ_ANVM_FEATURE 0 +#define I40E_AQ_ANVM_IMMEDIATE_FIELD BIT(FEATURE_OR_IMMEDIATE_SHIFT) +struct i40e_aqc_nvm_config_data_feature { + __le16 feature_id; +#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01 +#define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP 0x08 +#define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR 0x10 + __le16 feature_options; + __le16 feature_selection; +}; + +I40E_CHECK_STRUCT_LEN(0x6, i40e_aqc_nvm_config_data_feature); + +struct i40e_aqc_nvm_config_data_immediate_field { + __le32 field_id; + __le32 field_value; + __le16 field_options; + __le16 reserved; +}; + +I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field); + +/* OEM Post Update (indirect 0x0720) + * no command data struct used + */ + struct i40e_aqc_nvm_oem_post_update { +#define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA 0x01 + u8 sel_data; + u8 reserved[7]; +}; + +I40E_CHECK_STRUCT_LEN(0x8, i40e_aqc_nvm_oem_post_update); + +struct i40e_aqc_nvm_oem_post_update_buffer { + u8 str_len; + u8 dev_addr; + __le16 eeprom_addr; + u8 data[36]; +}; + +I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer); + +/* Thermal Sensor (indirect 0x0721) + * read or set thermal sensor configs and values + * takes a sensor and command specific data buffer, not detailed here + */ +struct i40e_aqc_thermal_sensor { + u8 sensor_action; +#define I40E_AQ_THERMAL_SENSOR_READ_CONFIG 0 +#define I40E_AQ_THERMAL_SENSOR_SET_CONFIG 1 +#define I40E_AQ_THERMAL_SENSOR_READ_TEMP 2 + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_thermal_sensor); + +/* Send to PF command (indirect 0x0801) id is only used by PF + * Send to VF command (indirect 0x0802) id is only used by PF + * Send to Peer PF command (indirect 0x0803) + */ +struct i40e_aqc_pf_vf_message { + __le32 id; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message); + +/* Alternate structure */ + +/* Direct write (direct 0x0900) + * Direct read (direct 0x0902) + */ +struct i40e_aqc_alternate_write { + __le32 address0; + __le32 data0; + __le32 address1; + __le32 data1; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write); + +/* Indirect write (indirect 0x0901) + * Indirect read (indirect 0x0903) + */ + +struct i40e_aqc_alternate_ind_write { + __le32 address; + __le32 length; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write); + +/* Done alternate write (direct 0x0904) + * uses i40e_aq_desc + */ +struct i40e_aqc_alternate_write_done { + __le16 cmd_flags; +#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1 +#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0 +#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1 +#define I40E_AQ_ALTERNATE_RESET_NEEDED 2 + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done); + +/* Set OEM mode (direct 0x0905) */ +struct i40e_aqc_alternate_set_mode { + __le32 mode; +#define I40E_AQ_ALTERNATE_MODE_NONE 0 +#define I40E_AQ_ALTERNATE_MODE_OEM 1 + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode); + +/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */ + +/* async events 0x10xx */ + +/* Lan Queue Overflow Event (direct, 0x1001) */ +struct i40e_aqc_lan_overflow { + __le32 prtdcb_rupto; + __le32 otx_ctl; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow); + +/* Get LLDP MIB (indirect 0x0A00) */ +struct i40e_aqc_lldp_get_mib { + u8 type; + u8 reserved1; +#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3 +#define I40E_AQ_LLDP_MIB_LOCAL 0x0 +#define I40E_AQ_LLDP_MIB_REMOTE 0x1 +#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2 +#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC +#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2 +#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0 +#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1 +#define I40E_AQ_LLDP_TX_SHIFT 0x4 +#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT) +/* TX pause flags use I40E_AQ_LINK_TX_* above */ + __le16 local_len; + __le16 remote_len; + u8 reserved2[2]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); + +/* Configure LLDP MIB Change Event (direct 0x0A01) + * also used for the event (with type in the command field) + */ +struct i40e_aqc_lldp_update_mib { + u8 command; +#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 +#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1 + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib); + +/* Add LLDP TLV (indirect 0x0A02) + * Delete LLDP TLV (indirect 0x0A04) + */ +struct i40e_aqc_lldp_add_tlv { + u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ + u8 reserved1[1]; + __le16 len; + u8 reserved2[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv); + +/* Update LLDP TLV (indirect 0x0A03) */ +struct i40e_aqc_lldp_update_tlv { + u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ + u8 reserved; + __le16 old_len; + __le16 new_offset; + __le16 new_len; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv); + +/* Stop LLDP (direct 0x0A05) */ +struct i40e_aqc_lldp_stop { + u8 command; +#define I40E_AQ_LLDP_AGENT_STOP 0x0 +#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop); + +/* Start LLDP (direct 0x0A06) */ + +struct i40e_aqc_lldp_start { + u8 command; +#define I40E_AQ_LLDP_AGENT_START 0x1 + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); + +/* Set DCB (direct 0x0303) */ +struct i40e_aqc_set_dcb_parameters { + u8 command; +#define I40E_AQ_DCB_SET_AGENT 0x1 +#define I40E_DCB_VALID 0x1 + u8 valid_flags; + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_dcb_parameters); + +/* Apply MIB changes (0x0A07) + * uses the generic struc as it contains no data + */ + +/* Add Udp Tunnel command and completion (direct 0x0B00) */ +struct i40e_aqc_add_udp_tunnel { + __le16 udp_port; + u8 reserved0[3]; + u8 protocol_type; +#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00 +#define I40E_AQC_TUNNEL_TYPE_NGE 0x01 +#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10 +#define I40E_AQC_TUNNEL_TYPE_VXLAN_GPE 0x11 + u8 reserved1[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel); + +struct i40e_aqc_add_udp_tunnel_completion { + __le16 udp_port; + u8 filter_entry_index; + u8 multiple_pfs; +#define I40E_AQC_SINGLE_PF 0x0 +#define I40E_AQC_MULTIPLE_PFS 0x1 + u8 total_filters; + u8 reserved[11]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion); + +/* remove UDP Tunnel command (0x0B01) */ +struct i40e_aqc_remove_udp_tunnel { + u8 reserved[2]; + u8 index; /* 0 to 15 */ + u8 reserved2[13]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel); + +struct i40e_aqc_del_udp_tunnel_completion { + __le16 udp_port; + u8 index; /* 0 to 15 */ + u8 multiple_pfs; + u8 total_filters_used; + u8 reserved1[11]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); + +struct i40e_aqc_get_set_rss_key { +#define I40E_AQC_SET_RSS_KEY_VSI_VALID BIT(15) +#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0 +#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \ + I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) + __le16 vsi_id; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key); + +struct i40e_aqc_get_set_rss_key_data { + u8 standard_rss_key[0x28]; + u8 extended_hash_key[0xc]; +}; + +I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data); + +struct i40e_aqc_get_set_rss_lut { +#define I40E_AQC_SET_RSS_LUT_VSI_VALID BIT(15) +#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0 +#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \ + I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) + __le16 vsi_id; +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0 +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK \ + BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) + +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0 +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1 + __le16 flags; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut); + +/* tunnel key structure 0x0B10 */ + +struct i40e_aqc_tunnel_key_structure_A0 { + __le16 key1_off; + __le16 key1_len; + __le16 key2_off; + __le16 key2_len; + __le16 flags; +#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 +/* response flags */ +#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 +#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 +#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 + u8 resreved[6]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure_A0); + +struct i40e_aqc_tunnel_key_structure { + u8 key1_off; + u8 key2_off; + u8 key1_len; /* 0 to 15 */ + u8 key2_len; /* 0 to 15 */ + u8 flags; +#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 +/* response flags */ +#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 +#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 +#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 + u8 network_key_index; +#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0 +#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1 +#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2 +#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3 + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure); + +/* OEM mode commands (direct 0xFE0x) */ +struct i40e_aqc_oem_param_change { + __le32 param_type; +#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0 +#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 +#define I40E_AQ_OEM_PARAM_MAC 2 + __le32 param_value1; + __le16 param_value2; + u8 reserved[6]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change); + +struct i40e_aqc_oem_state_change { + __le32 state; +#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0 +#define I40E_AQ_OEM_STATE_LINK_UP 0x1 + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); + +/* Initialize OCSD (0xFE02, direct) */ +struct i40e_aqc_opc_oem_ocsd_initialize { + u8 type_status; + u8 reserved1[3]; + __le32 ocsd_memory_block_addr_high; + __le32 ocsd_memory_block_addr_low; + __le32 requested_update_interval; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocsd_initialize); + +/* Initialize OCBB (0xFE03, direct) */ +struct i40e_aqc_opc_oem_ocbb_initialize { + u8 type_status; + u8 reserved1[3]; + __le32 ocbb_memory_block_addr_high; + __le32 ocbb_memory_block_addr_low; + u8 reserved2[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize); + +/* debug commands */ + +/* get device id (0xFF00) uses the generic structure */ + +/* set test more (0xFF01, internal) */ + +struct i40e_acq_set_test_mode { + u8 mode; +#define I40E_AQ_TEST_PARTIAL 0 +#define I40E_AQ_TEST_FULL 1 +#define I40E_AQ_TEST_NVM 2 + u8 reserved[3]; + u8 command; +#define I40E_AQ_TEST_OPEN 0 +#define I40E_AQ_TEST_CLOSE 1 +#define I40E_AQ_TEST_INC 2 + u8 reserved2[3]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode); + +/* Debug Read Register command (0xFF03) + * Debug Write Register command (0xFF04) + */ +struct i40e_aqc_debug_reg_read_write { + __le32 reserved; + __le32 address; + __le32 value_high; + __le32 value_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write); + +/* Scatter/gather Reg Read (indirect 0xFF05) + * Scatter/gather Reg Write (indirect 0xFF06) + */ + +/* i40e_aq_desc is used for the command */ +struct i40e_aqc_debug_reg_sg_element_data { + __le32 address; + __le32 value; +}; + +/* Debug Modify register (direct 0xFF07) */ +struct i40e_aqc_debug_modify_reg { + __le32 address; + __le32 value; + __le32 clear_mask; + __le32 set_mask; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg); + +/* dump internal data (0xFF08, indirect) */ + +#define I40E_AQ_CLUSTER_ID_AUX 0 +#define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1 +#define I40E_AQ_CLUSTER_ID_TXSCHED 2 +#define I40E_AQ_CLUSTER_ID_HMC 3 +#define I40E_AQ_CLUSTER_ID_MAC0 4 +#define I40E_AQ_CLUSTER_ID_MAC1 5 +#define I40E_AQ_CLUSTER_ID_MAC2 6 +#define I40E_AQ_CLUSTER_ID_MAC3 7 +#define I40E_AQ_CLUSTER_ID_DCB 8 +#define I40E_AQ_CLUSTER_ID_EMP_MEM 9 +#define I40E_AQ_CLUSTER_ID_PKT_BUF 10 +#define I40E_AQ_CLUSTER_ID_ALTRAM 11 + +struct i40e_aqc_debug_dump_internals { + u8 cluster_id; + u8 table_id; + __le16 data_size; + __le32 idx; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals); + +struct i40e_aqc_debug_modify_internals { + u8 cluster_id; + u8 cluster_specific_params[7]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals); + +#endif /* _I40E_ADMINQ_CMD_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/i40e_alloc.h b/drivers/net/ethernet/intel/iavf/i40e_alloc.h new file mode 100644 index 000000000000..cb8689222c8b --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/i40e_alloc.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _I40E_ALLOC_H_ +#define _I40E_ALLOC_H_ + +struct i40e_hw; + +/* Memory allocation types */ +enum i40e_memory_type { + i40e_mem_arq_buf = 0, /* ARQ indirect command buffer */ + i40e_mem_asq_buf = 1, + i40e_mem_atq_buf = 2, /* ATQ indirect command buffer */ + i40e_mem_arq_ring = 3, /* ARQ descriptor ring */ + i40e_mem_atq_ring = 4, /* ATQ descriptor ring */ + i40e_mem_pd = 5, /* Page Descriptor */ + i40e_mem_bp = 6, /* Backing Page - 4KB */ + i40e_mem_bp_jumbo = 7, /* Backing Page - > 4KB */ + i40e_mem_reserved +}; + +/* prototype for functions used for dynamic memory allocation */ +i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw, + struct i40e_dma_mem *mem, + enum i40e_memory_type type, + u64 size, u32 alignment); +i40e_status i40e_free_dma_mem(struct i40e_hw *hw, + struct i40e_dma_mem *mem); +i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw, + struct i40e_virt_mem *mem, + u32 size); +i40e_status i40e_free_virt_mem(struct i40e_hw *hw, + struct i40e_virt_mem *mem); + +#endif /* _I40E_ALLOC_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/i40e_common.c b/drivers/net/ethernet/intel/iavf/i40e_common.c new file mode 100644 index 000000000000..eea280ba411e --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/i40e_common.c @@ -0,0 +1,1320 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#include "i40e_type.h" +#include "i40e_adminq.h" +#include "i40e_prototype.h" +#include + +/** + * i40e_set_mac_type - Sets MAC type + * @hw: pointer to the HW structure + * + * This function sets the mac type of the adapter based on the + * vendor ID and device ID stored in the hw structure. + **/ +i40e_status i40e_set_mac_type(struct i40e_hw *hw) +{ + i40e_status status = 0; + + if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { + switch (hw->device_id) { + case I40E_DEV_ID_SFP_XL710: + case I40E_DEV_ID_QEMU: + case I40E_DEV_ID_KX_B: + case I40E_DEV_ID_KX_C: + case I40E_DEV_ID_QSFP_A: + case I40E_DEV_ID_QSFP_B: + case I40E_DEV_ID_QSFP_C: + case I40E_DEV_ID_10G_BASE_T: + case I40E_DEV_ID_10G_BASE_T4: + case I40E_DEV_ID_20G_KR2: + case I40E_DEV_ID_20G_KR2_A: + case I40E_DEV_ID_25G_B: + case I40E_DEV_ID_25G_SFP28: + hw->mac.type = I40E_MAC_XL710; + break; + case I40E_DEV_ID_SFP_X722: + case I40E_DEV_ID_1G_BASE_T_X722: + case I40E_DEV_ID_10G_BASE_T_X722: + case I40E_DEV_ID_SFP_I_X722: + hw->mac.type = I40E_MAC_X722; + break; + case I40E_DEV_ID_X722_VF: + hw->mac.type = I40E_MAC_X722_VF; + break; + case I40E_DEV_ID_VF: + case I40E_DEV_ID_VF_HV: + case I40E_DEV_ID_ADAPTIVE_VF: + hw->mac.type = I40E_MAC_VF; + break; + default: + hw->mac.type = I40E_MAC_GENERIC; + break; + } + } else { + status = I40E_ERR_DEVICE_NOT_SUPPORTED; + } + + hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n", + hw->mac.type, status); + return status; +} + +/** + * i40evf_aq_str - convert AQ err code to a string + * @hw: pointer to the HW structure + * @aq_err: the AQ error code to convert + **/ +const char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) +{ + switch (aq_err) { + case I40E_AQ_RC_OK: + return "OK"; + case I40E_AQ_RC_EPERM: + return "I40E_AQ_RC_EPERM"; + case I40E_AQ_RC_ENOENT: + return "I40E_AQ_RC_ENOENT"; + case I40E_AQ_RC_ESRCH: + return "I40E_AQ_RC_ESRCH"; + case I40E_AQ_RC_EINTR: + return "I40E_AQ_RC_EINTR"; + case I40E_AQ_RC_EIO: + return "I40E_AQ_RC_EIO"; + case I40E_AQ_RC_ENXIO: + return "I40E_AQ_RC_ENXIO"; + case I40E_AQ_RC_E2BIG: + return "I40E_AQ_RC_E2BIG"; + case I40E_AQ_RC_EAGAIN: + return "I40E_AQ_RC_EAGAIN"; + case I40E_AQ_RC_ENOMEM: + return "I40E_AQ_RC_ENOMEM"; + case I40E_AQ_RC_EACCES: + return "I40E_AQ_RC_EACCES"; + case I40E_AQ_RC_EFAULT: + return "I40E_AQ_RC_EFAULT"; + case I40E_AQ_RC_EBUSY: + return "I40E_AQ_RC_EBUSY"; + case I40E_AQ_RC_EEXIST: + return "I40E_AQ_RC_EEXIST"; + case I40E_AQ_RC_EINVAL: + return "I40E_AQ_RC_EINVAL"; + case I40E_AQ_RC_ENOTTY: + return "I40E_AQ_RC_ENOTTY"; + case I40E_AQ_RC_ENOSPC: + return "I40E_AQ_RC_ENOSPC"; + case I40E_AQ_RC_ENOSYS: + return "I40E_AQ_RC_ENOSYS"; + case I40E_AQ_RC_ERANGE: + return "I40E_AQ_RC_ERANGE"; + case I40E_AQ_RC_EFLUSHED: + return "I40E_AQ_RC_EFLUSHED"; + case I40E_AQ_RC_BAD_ADDR: + return "I40E_AQ_RC_BAD_ADDR"; + case I40E_AQ_RC_EMODE: + return "I40E_AQ_RC_EMODE"; + case I40E_AQ_RC_EFBIG: + return "I40E_AQ_RC_EFBIG"; + } + + snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err); + return hw->err_str; +} + +/** + * i40evf_stat_str - convert status err code to a string + * @hw: pointer to the HW structure + * @stat_err: the status error code to convert + **/ +const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err) +{ + switch (stat_err) { + case 0: + return "OK"; + case I40E_ERR_NVM: + return "I40E_ERR_NVM"; + case I40E_ERR_NVM_CHECKSUM: + return "I40E_ERR_NVM_CHECKSUM"; + case I40E_ERR_PHY: + return "I40E_ERR_PHY"; + case I40E_ERR_CONFIG: + return "I40E_ERR_CONFIG"; + case I40E_ERR_PARAM: + return "I40E_ERR_PARAM"; + case I40E_ERR_MAC_TYPE: + return "I40E_ERR_MAC_TYPE"; + case I40E_ERR_UNKNOWN_PHY: + return "I40E_ERR_UNKNOWN_PHY"; + case I40E_ERR_LINK_SETUP: + return "I40E_ERR_LINK_SETUP"; + case I40E_ERR_ADAPTER_STOPPED: + return "I40E_ERR_ADAPTER_STOPPED"; + case I40E_ERR_INVALID_MAC_ADDR: + return "I40E_ERR_INVALID_MAC_ADDR"; + case I40E_ERR_DEVICE_NOT_SUPPORTED: + return "I40E_ERR_DEVICE_NOT_SUPPORTED"; + case I40E_ERR_MASTER_REQUESTS_PENDING: + return "I40E_ERR_MASTER_REQUESTS_PENDING"; + case I40E_ERR_INVALID_LINK_SETTINGS: + return "I40E_ERR_INVALID_LINK_SETTINGS"; + case I40E_ERR_AUTONEG_NOT_COMPLETE: + return "I40E_ERR_AUTONEG_NOT_COMPLETE"; + case I40E_ERR_RESET_FAILED: + return "I40E_ERR_RESET_FAILED"; + case I40E_ERR_SWFW_SYNC: + return "I40E_ERR_SWFW_SYNC"; + case I40E_ERR_NO_AVAILABLE_VSI: + return "I40E_ERR_NO_AVAILABLE_VSI"; + case I40E_ERR_NO_MEMORY: + return "I40E_ERR_NO_MEMORY"; + case I40E_ERR_BAD_PTR: + return "I40E_ERR_BAD_PTR"; + case I40E_ERR_RING_FULL: + return "I40E_ERR_RING_FULL"; + case I40E_ERR_INVALID_PD_ID: + return "I40E_ERR_INVALID_PD_ID"; + case I40E_ERR_INVALID_QP_ID: + return "I40E_ERR_INVALID_QP_ID"; + case I40E_ERR_INVALID_CQ_ID: + return "I40E_ERR_INVALID_CQ_ID"; + case I40E_ERR_INVALID_CEQ_ID: + return "I40E_ERR_INVALID_CEQ_ID"; + case I40E_ERR_INVALID_AEQ_ID: + return "I40E_ERR_INVALID_AEQ_ID"; + case I40E_ERR_INVALID_SIZE: + return "I40E_ERR_INVALID_SIZE"; + case I40E_ERR_INVALID_ARP_INDEX: + return "I40E_ERR_INVALID_ARP_INDEX"; + case I40E_ERR_INVALID_FPM_FUNC_ID: + return "I40E_ERR_INVALID_FPM_FUNC_ID"; + case I40E_ERR_QP_INVALID_MSG_SIZE: + return "I40E_ERR_QP_INVALID_MSG_SIZE"; + case I40E_ERR_QP_TOOMANY_WRS_POSTED: + return "I40E_ERR_QP_TOOMANY_WRS_POSTED"; + case I40E_ERR_INVALID_FRAG_COUNT: + return "I40E_ERR_INVALID_FRAG_COUNT"; + case I40E_ERR_QUEUE_EMPTY: + return "I40E_ERR_QUEUE_EMPTY"; + case I40E_ERR_INVALID_ALIGNMENT: + return "I40E_ERR_INVALID_ALIGNMENT"; + case I40E_ERR_FLUSHED_QUEUE: + return "I40E_ERR_FLUSHED_QUEUE"; + case I40E_ERR_INVALID_PUSH_PAGE_INDEX: + return "I40E_ERR_INVALID_PUSH_PAGE_INDEX"; + case I40E_ERR_INVALID_IMM_DATA_SIZE: + return "I40E_ERR_INVALID_IMM_DATA_SIZE"; + case I40E_ERR_TIMEOUT: + return "I40E_ERR_TIMEOUT"; + case I40E_ERR_OPCODE_MISMATCH: + return "I40E_ERR_OPCODE_MISMATCH"; + case I40E_ERR_CQP_COMPL_ERROR: + return "I40E_ERR_CQP_COMPL_ERROR"; + case I40E_ERR_INVALID_VF_ID: + return "I40E_ERR_INVALID_VF_ID"; + case I40E_ERR_INVALID_HMCFN_ID: + return "I40E_ERR_INVALID_HMCFN_ID"; + case I40E_ERR_BACKING_PAGE_ERROR: + return "I40E_ERR_BACKING_PAGE_ERROR"; + case I40E_ERR_NO_PBLCHUNKS_AVAILABLE: + return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE"; + case I40E_ERR_INVALID_PBLE_INDEX: + return "I40E_ERR_INVALID_PBLE_INDEX"; + case I40E_ERR_INVALID_SD_INDEX: + return "I40E_ERR_INVALID_SD_INDEX"; + case I40E_ERR_INVALID_PAGE_DESC_INDEX: + return "I40E_ERR_INVALID_PAGE_DESC_INDEX"; + case I40E_ERR_INVALID_SD_TYPE: + return "I40E_ERR_INVALID_SD_TYPE"; + case I40E_ERR_MEMCPY_FAILED: + return "I40E_ERR_MEMCPY_FAILED"; + case I40E_ERR_INVALID_HMC_OBJ_INDEX: + return "I40E_ERR_INVALID_HMC_OBJ_INDEX"; + case I40E_ERR_INVALID_HMC_OBJ_COUNT: + return "I40E_ERR_INVALID_HMC_OBJ_COUNT"; + case I40E_ERR_INVALID_SRQ_ARM_LIMIT: + return "I40E_ERR_INVALID_SRQ_ARM_LIMIT"; + case I40E_ERR_SRQ_ENABLED: + return "I40E_ERR_SRQ_ENABLED"; + case I40E_ERR_ADMIN_QUEUE_ERROR: + return "I40E_ERR_ADMIN_QUEUE_ERROR"; + case I40E_ERR_ADMIN_QUEUE_TIMEOUT: + return "I40E_ERR_ADMIN_QUEUE_TIMEOUT"; + case I40E_ERR_BUF_TOO_SHORT: + return "I40E_ERR_BUF_TOO_SHORT"; + case I40E_ERR_ADMIN_QUEUE_FULL: + return "I40E_ERR_ADMIN_QUEUE_FULL"; + case I40E_ERR_ADMIN_QUEUE_NO_WORK: + return "I40E_ERR_ADMIN_QUEUE_NO_WORK"; + case I40E_ERR_BAD_IWARP_CQE: + return "I40E_ERR_BAD_IWARP_CQE"; + case I40E_ERR_NVM_BLANK_MODE: + return "I40E_ERR_NVM_BLANK_MODE"; + case I40E_ERR_NOT_IMPLEMENTED: + return "I40E_ERR_NOT_IMPLEMENTED"; + case I40E_ERR_PE_DOORBELL_NOT_ENABLED: + return "I40E_ERR_PE_DOORBELL_NOT_ENABLED"; + case I40E_ERR_DIAG_TEST_FAILED: + return "I40E_ERR_DIAG_TEST_FAILED"; + case I40E_ERR_NOT_READY: + return "I40E_ERR_NOT_READY"; + case I40E_NOT_SUPPORTED: + return "I40E_NOT_SUPPORTED"; + case I40E_ERR_FIRMWARE_API_VERSION: + return "I40E_ERR_FIRMWARE_API_VERSION"; + case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR: + return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR"; + } + + snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); + return hw->err_str; +} + +/** + * i40evf_debug_aq + * @hw: debug mask related to admin queue + * @mask: debug mask + * @desc: pointer to admin queue descriptor + * @buffer: pointer to command buffer + * @buf_len: max length of buffer + * + * Dumps debug log about adminq command with descriptor contents. + **/ +void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, + void *buffer, u16 buf_len) +{ + struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; + u8 *buf = (u8 *)buffer; + + if ((!(mask & hw->debug_mask)) || (desc == NULL)) + return; + + i40e_debug(hw, mask, + "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", + le16_to_cpu(aq_desc->opcode), + le16_to_cpu(aq_desc->flags), + le16_to_cpu(aq_desc->datalen), + le16_to_cpu(aq_desc->retval)); + i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", + le32_to_cpu(aq_desc->cookie_high), + le32_to_cpu(aq_desc->cookie_low)); + i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", + le32_to_cpu(aq_desc->params.internal.param0), + le32_to_cpu(aq_desc->params.internal.param1)); + i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", + le32_to_cpu(aq_desc->params.external.addr_high), + le32_to_cpu(aq_desc->params.external.addr_low)); + + if ((buffer != NULL) && (aq_desc->datalen != 0)) { + u16 len = le16_to_cpu(aq_desc->datalen); + + i40e_debug(hw, mask, "AQ CMD Buffer:\n"); + if (buf_len < len) + len = buf_len; + /* write the full 16-byte chunks */ + if (hw->debug_mask & mask) { + char prefix[27]; + + snprintf(prefix, sizeof(prefix), + "i40evf %02x:%02x.%x: \t0x", + hw->bus.bus_id, + hw->bus.device, + hw->bus.func); + + print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, + 16, 1, buf, len, false); + } + } +} + +/** + * i40evf_check_asq_alive + * @hw: pointer to the hw struct + * + * Returns true if Queue is enabled else false. + **/ +bool i40evf_check_asq_alive(struct i40e_hw *hw) +{ + if (hw->aq.asq.len) + return !!(rd32(hw, hw->aq.asq.len) & + I40E_VF_ATQLEN1_ATQENABLE_MASK); + else + return false; +} + +/** + * i40evf_aq_queue_shutdown + * @hw: pointer to the hw struct + * @unloading: is the driver unloading itself + * + * Tell the Firmware that we're shutting down the AdminQ and whether + * or not the driver is unloading as well. + **/ +i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, + bool unloading) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_queue_shutdown *cmd = + (struct i40e_aqc_queue_shutdown *)&desc.params.raw; + i40e_status status; + + i40evf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_queue_shutdown); + + if (unloading) + cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING); + status = i40evf_asq_send_command(hw, &desc, NULL, 0, NULL); + + return status; +} + +/** + * i40e_aq_get_set_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: vsi fw index + * @pf_lut: for PF table set true, for VSI table set false + * @lut: pointer to the lut buffer provided by the caller + * @lut_size: size of the lut buffer + * @set: set true to set the table, false to get the table + * + * Internal function to get or set RSS look up table + **/ +static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw, + u16 vsi_id, bool pf_lut, + u8 *lut, u16 lut_size, + bool set) +{ + i40e_status status; + struct i40e_aq_desc desc; + struct i40e_aqc_get_set_rss_lut *cmd_resp = + (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; + + if (set) + i40evf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_rss_lut); + else + i40evf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_rss_lut); + + /* Indirect command */ + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); + + cmd_resp->vsi_id = + cpu_to_le16((u16)((vsi_id << + I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & + I40E_AQC_SET_RSS_LUT_VSI_ID_MASK)); + cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID); + + if (pf_lut) + cmd_resp->flags |= cpu_to_le16((u16) + ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF << + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); + else + cmd_resp->flags |= cpu_to_le16((u16) + ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); + + status = i40evf_asq_send_command(hw, &desc, lut, lut_size, NULL); + + return status; +} + +/** + * i40evf_aq_get_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: vsi fw index + * @pf_lut: for PF table set true, for VSI table set false + * @lut: pointer to the lut buffer provided by the caller + * @lut_size: size of the lut buffer + * + * get the RSS lookup table, PF or VSI type + **/ +i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, + bool pf_lut, u8 *lut, u16 lut_size) +{ + return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, + false); +} + +/** + * i40evf_aq_set_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: vsi fw index + * @pf_lut: for PF table set true, for VSI table set false + * @lut: pointer to the lut buffer provided by the caller + * @lut_size: size of the lut buffer + * + * set the RSS lookup table, PF or VSI type + **/ +i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, + bool pf_lut, u8 *lut, u16 lut_size) +{ + return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); +} + +/** + * i40e_aq_get_set_rss_key + * @hw: pointer to the hw struct + * @vsi_id: vsi fw index + * @key: pointer to key info struct + * @set: set true to set the key, false to get the key + * + * get the RSS key per VSI + **/ +static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, + u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key, + bool set) +{ + i40e_status status; + struct i40e_aq_desc desc; + struct i40e_aqc_get_set_rss_key *cmd_resp = + (struct i40e_aqc_get_set_rss_key *)&desc.params.raw; + u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data); + + if (set) + i40evf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_rss_key); + else + i40evf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_rss_key); + + /* Indirect command */ + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); + + cmd_resp->vsi_id = + cpu_to_le16((u16)((vsi_id << + I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & + I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); + cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); + + status = i40evf_asq_send_command(hw, &desc, key, key_size, NULL); + + return status; +} + +/** + * i40evf_aq_get_rss_key + * @hw: pointer to the hw struct + * @vsi_id: vsi fw index + * @key: pointer to key info struct + * + **/ +i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw, + u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key) +{ + return i40e_aq_get_set_rss_key(hw, vsi_id, key, false); +} + +/** + * i40evf_aq_set_rss_key + * @hw: pointer to the hw struct + * @vsi_id: vsi fw index + * @key: pointer to key info struct + * + * set the RSS key per VSI + **/ +i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw, + u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key) +{ + return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); +} + + +/* The i40evf_ptype_lookup table is used to convert from the 8-bit ptype in the + * hardware to a bit-field that can be used by SW to more easily determine the + * packet type. + * + * Macros are used to shorten the table lines and make this table human + * readable. + * + * We store the PTYPE in the top byte of the bit field - this is just so that + * we can check that the table doesn't have a row missing, as the index into + * the table should be the PTYPE. + * + * Typical work flow: + * + * IF NOT i40evf_ptype_lookup[ptype].known + * THEN + * Packet is unknown + * ELSE IF i40evf_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP + * Use the rest of the fields to look at the tunnels, inner protocols, etc + * ELSE + * Use the enum i40e_rx_l2_ptype to decode the packet type + * ENDIF + */ + +/* macro to make the table lines short */ +#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ + { PTYPE, \ + 1, \ + I40E_RX_PTYPE_OUTER_##OUTER_IP, \ + I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ + I40E_RX_PTYPE_##OUTER_FRAG, \ + I40E_RX_PTYPE_TUNNEL_##T, \ + I40E_RX_PTYPE_TUNNEL_END_##TE, \ + I40E_RX_PTYPE_##TEF, \ + I40E_RX_PTYPE_INNER_PROT_##I, \ + I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } + +#define I40E_PTT_UNUSED_ENTRY(PTYPE) \ + { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 } + +/* shorter macros makes the table fit but are terse */ +#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG +#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG +#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC + +/* Lookup table mapping the HW PTYPE to the bit field for decoding */ +struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = { + /* L2 Packet types */ + I40E_PTT_UNUSED_ENTRY(0), + I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), + I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT_UNUSED_ENTRY(4), + I40E_PTT_UNUSED_ENTRY(5), + I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT_UNUSED_ENTRY(8), + I40E_PTT_UNUSED_ENTRY(9), + I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), + I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + + /* Non Tunneled IPv4 */ + I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(25), + I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), + I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), + I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), + + /* IPv4 --> IPv4 */ + I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), + I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), + I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(32), + I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), + I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), + I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> IPv6 */ + I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), + I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), + I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(39), + I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), + I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), + I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT */ + I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), + + /* IPv4 --> GRE/NAT --> IPv4 */ + I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), + I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), + I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(47), + I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), + I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), + I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> IPv6 */ + I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), + I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), + I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(54), + I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), + I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), + I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC */ + I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), + + /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ + I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), + I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), + I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(62), + I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), + I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), + I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ + I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), + I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), + I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(69), + I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), + I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), + I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC/VLAN */ + I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), + + /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ + I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), + I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), + I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(77), + I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), + I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), + I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), + + /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ + I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), + I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), + I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(84), + I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), + I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), + I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + + /* Non Tunneled IPv6 */ + I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3), + I40E_PTT_UNUSED_ENTRY(91), + I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), + I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), + I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), + + /* IPv6 --> IPv4 */ + I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), + I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), + I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(98), + I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), + I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), + I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> IPv6 */ + I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), + I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), + I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(105), + I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), + I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), + I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT */ + I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> IPv4 */ + I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), + I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), + I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(113), + I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), + I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), + I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> IPv6 */ + I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), + I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), + I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(120), + I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), + I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), + I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC */ + I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ + I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), + I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), + I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(128), + I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), + I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), + I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ + I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), + I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), + I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(135), + I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), + I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), + I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC/VLAN */ + I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ + I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), + I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), + I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(143), + I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), + I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), + I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ + I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), + I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), + I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(150), + I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), + I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), + I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + + /* unused entries */ + I40E_PTT_UNUSED_ENTRY(154), + I40E_PTT_UNUSED_ENTRY(155), + I40E_PTT_UNUSED_ENTRY(156), + I40E_PTT_UNUSED_ENTRY(157), + I40E_PTT_UNUSED_ENTRY(158), + I40E_PTT_UNUSED_ENTRY(159), + + I40E_PTT_UNUSED_ENTRY(160), + I40E_PTT_UNUSED_ENTRY(161), + I40E_PTT_UNUSED_ENTRY(162), + I40E_PTT_UNUSED_ENTRY(163), + I40E_PTT_UNUSED_ENTRY(164), + I40E_PTT_UNUSED_ENTRY(165), + I40E_PTT_UNUSED_ENTRY(166), + I40E_PTT_UNUSED_ENTRY(167), + I40E_PTT_UNUSED_ENTRY(168), + I40E_PTT_UNUSED_ENTRY(169), + + I40E_PTT_UNUSED_ENTRY(170), + I40E_PTT_UNUSED_ENTRY(171), + I40E_PTT_UNUSED_ENTRY(172), + I40E_PTT_UNUSED_ENTRY(173), + I40E_PTT_UNUSED_ENTRY(174), + I40E_PTT_UNUSED_ENTRY(175), + I40E_PTT_UNUSED_ENTRY(176), + I40E_PTT_UNUSED_ENTRY(177), + I40E_PTT_UNUSED_ENTRY(178), + I40E_PTT_UNUSED_ENTRY(179), + + I40E_PTT_UNUSED_ENTRY(180), + I40E_PTT_UNUSED_ENTRY(181), + I40E_PTT_UNUSED_ENTRY(182), + I40E_PTT_UNUSED_ENTRY(183), + I40E_PTT_UNUSED_ENTRY(184), + I40E_PTT_UNUSED_ENTRY(185), + I40E_PTT_UNUSED_ENTRY(186), + I40E_PTT_UNUSED_ENTRY(187), + I40E_PTT_UNUSED_ENTRY(188), + I40E_PTT_UNUSED_ENTRY(189), + + I40E_PTT_UNUSED_ENTRY(190), + I40E_PTT_UNUSED_ENTRY(191), + I40E_PTT_UNUSED_ENTRY(192), + I40E_PTT_UNUSED_ENTRY(193), + I40E_PTT_UNUSED_ENTRY(194), + I40E_PTT_UNUSED_ENTRY(195), + I40E_PTT_UNUSED_ENTRY(196), + I40E_PTT_UNUSED_ENTRY(197), + I40E_PTT_UNUSED_ENTRY(198), + I40E_PTT_UNUSED_ENTRY(199), + + I40E_PTT_UNUSED_ENTRY(200), + I40E_PTT_UNUSED_ENTRY(201), + I40E_PTT_UNUSED_ENTRY(202), + I40E_PTT_UNUSED_ENTRY(203), + I40E_PTT_UNUSED_ENTRY(204), + I40E_PTT_UNUSED_ENTRY(205), + I40E_PTT_UNUSED_ENTRY(206), + I40E_PTT_UNUSED_ENTRY(207), + I40E_PTT_UNUSED_ENTRY(208), + I40E_PTT_UNUSED_ENTRY(209), + + I40E_PTT_UNUSED_ENTRY(210), + I40E_PTT_UNUSED_ENTRY(211), + I40E_PTT_UNUSED_ENTRY(212), + I40E_PTT_UNUSED_ENTRY(213), + I40E_PTT_UNUSED_ENTRY(214), + I40E_PTT_UNUSED_ENTRY(215), + I40E_PTT_UNUSED_ENTRY(216), + I40E_PTT_UNUSED_ENTRY(217), + I40E_PTT_UNUSED_ENTRY(218), + I40E_PTT_UNUSED_ENTRY(219), + + I40E_PTT_UNUSED_ENTRY(220), + I40E_PTT_UNUSED_ENTRY(221), + I40E_PTT_UNUSED_ENTRY(222), + I40E_PTT_UNUSED_ENTRY(223), + I40E_PTT_UNUSED_ENTRY(224), + I40E_PTT_UNUSED_ENTRY(225), + I40E_PTT_UNUSED_ENTRY(226), + I40E_PTT_UNUSED_ENTRY(227), + I40E_PTT_UNUSED_ENTRY(228), + I40E_PTT_UNUSED_ENTRY(229), + + I40E_PTT_UNUSED_ENTRY(230), + I40E_PTT_UNUSED_ENTRY(231), + I40E_PTT_UNUSED_ENTRY(232), + I40E_PTT_UNUSED_ENTRY(233), + I40E_PTT_UNUSED_ENTRY(234), + I40E_PTT_UNUSED_ENTRY(235), + I40E_PTT_UNUSED_ENTRY(236), + I40E_PTT_UNUSED_ENTRY(237), + I40E_PTT_UNUSED_ENTRY(238), + I40E_PTT_UNUSED_ENTRY(239), + + I40E_PTT_UNUSED_ENTRY(240), + I40E_PTT_UNUSED_ENTRY(241), + I40E_PTT_UNUSED_ENTRY(242), + I40E_PTT_UNUSED_ENTRY(243), + I40E_PTT_UNUSED_ENTRY(244), + I40E_PTT_UNUSED_ENTRY(245), + I40E_PTT_UNUSED_ENTRY(246), + I40E_PTT_UNUSED_ENTRY(247), + I40E_PTT_UNUSED_ENTRY(248), + I40E_PTT_UNUSED_ENTRY(249), + + I40E_PTT_UNUSED_ENTRY(250), + I40E_PTT_UNUSED_ENTRY(251), + I40E_PTT_UNUSED_ENTRY(252), + I40E_PTT_UNUSED_ENTRY(253), + I40E_PTT_UNUSED_ENTRY(254), + I40E_PTT_UNUSED_ENTRY(255) +}; + +/** + * i40evf_aq_rx_ctl_read_register - use FW to read from an Rx control register + * @hw: pointer to the hw struct + * @reg_addr: register address + * @reg_val: ptr to register value + * @cmd_details: pointer to command details structure or NULL + * + * Use the firmware to read the Rx control register, + * especially useful if the Rx unit is under heavy pressure + **/ +i40e_status i40evf_aq_rx_ctl_read_register(struct i40e_hw *hw, + u32 reg_addr, u32 *reg_val, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp = + (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; + i40e_status status; + + if (!reg_val) + return I40E_ERR_PARAM; + + i40evf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_rx_ctl_reg_read); + + cmd_resp->address = cpu_to_le32(reg_addr); + + status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (status == 0) + *reg_val = le32_to_cpu(cmd_resp->value); + + return status; +} + +/** + * i40evf_read_rx_ctl - read from an Rx control register + * @hw: pointer to the hw struct + * @reg_addr: register address + **/ +u32 i40evf_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) +{ + i40e_status status = 0; + bool use_register; + int retry = 5; + u32 val = 0; + + use_register = (((hw->aq.api_maj_ver == 1) && + (hw->aq.api_min_ver < 5)) || + (hw->mac.type == I40E_MAC_X722)); + if (!use_register) { +do_retry: + status = i40evf_aq_rx_ctl_read_register(hw, reg_addr, + &val, NULL); + if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { + usleep_range(1000, 2000); + retry--; + goto do_retry; + } + } + + /* if the AQ access failed, try the old-fashioned way */ + if (status || use_register) + val = rd32(hw, reg_addr); + + return val; +} + +/** + * i40evf_aq_rx_ctl_write_register + * @hw: pointer to the hw struct + * @reg_addr: register address + * @reg_val: register value + * @cmd_details: pointer to command details structure or NULL + * + * Use the firmware to write to an Rx control register, + * especially useful if the Rx unit is under heavy pressure + **/ +i40e_status i40evf_aq_rx_ctl_write_register(struct i40e_hw *hw, + u32 reg_addr, u32 reg_val, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_rx_ctl_reg_read_write *cmd = + (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; + i40e_status status; + + i40evf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_rx_ctl_reg_write); + + cmd->address = cpu_to_le32(reg_addr); + cmd->value = cpu_to_le32(reg_val); + + status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40evf_write_rx_ctl - write to an Rx control register + * @hw: pointer to the hw struct + * @reg_addr: register address + * @reg_val: register value + **/ +void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) +{ + i40e_status status = 0; + bool use_register; + int retry = 5; + + use_register = (((hw->aq.api_maj_ver == 1) && + (hw->aq.api_min_ver < 5)) || + (hw->mac.type == I40E_MAC_X722)); + if (!use_register) { +do_retry: + status = i40evf_aq_rx_ctl_write_register(hw, reg_addr, + reg_val, NULL); + if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { + usleep_range(1000, 2000); + retry--; + goto do_retry; + } + } + + /* if the AQ access failed, try the old-fashioned way */ + if (status || use_register) + wr32(hw, reg_addr, reg_val); +} + +/** + * i40e_aq_send_msg_to_pf + * @hw: pointer to the hardware structure + * @v_opcode: opcodes for VF-PF communication + * @v_retval: return error code + * @msg: pointer to the msg buffer + * @msglen: msg length + * @cmd_details: pointer to command details + * + * Send message to PF driver using admin queue. By default, this message + * is sent asynchronously, i.e. i40evf_asq_send_command() does not wait for + * completion before returning. + **/ +i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw, + enum virtchnl_ops v_opcode, + i40e_status v_retval, + u8 *msg, u16 msglen, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_asq_cmd_details details; + i40e_status status; + + i40evf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf); + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI); + desc.cookie_high = cpu_to_le32(v_opcode); + desc.cookie_low = cpu_to_le32(v_retval); + if (msglen) { + desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF + | I40E_AQ_FLAG_RD)); + if (msglen > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + desc.datalen = cpu_to_le16(msglen); + } + if (!cmd_details) { + memset(&details, 0, sizeof(details)); + details.async = true; + cmd_details = &details; + } + status = i40evf_asq_send_command(hw, &desc, msg, msglen, cmd_details); + return status; +} + +/** + * i40e_vf_parse_hw_config + * @hw: pointer to the hardware structure + * @msg: pointer to the virtual channel VF resource structure + * + * Given a VF resource message from the PF, populate the hw struct + * with appropriate information. + **/ +void i40e_vf_parse_hw_config(struct i40e_hw *hw, + struct virtchnl_vf_resource *msg) +{ + struct virtchnl_vsi_resource *vsi_res; + int i; + + vsi_res = &msg->vsi_res[0]; + + hw->dev_caps.num_vsis = msg->num_vsis; + hw->dev_caps.num_rx_qp = msg->num_queue_pairs; + hw->dev_caps.num_tx_qp = msg->num_queue_pairs; + hw->dev_caps.num_msix_vectors_vf = msg->max_vectors; + hw->dev_caps.dcb = msg->vf_cap_flags & + VIRTCHNL_VF_OFFLOAD_L2; + hw->dev_caps.fcoe = 0; + for (i = 0; i < msg->num_vsis; i++) { + if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) { + ether_addr_copy(hw->mac.perm_addr, + vsi_res->default_mac_addr); + ether_addr_copy(hw->mac.addr, + vsi_res->default_mac_addr); + } + vsi_res++; + } +} + +/** + * i40e_vf_reset + * @hw: pointer to the hardware structure + * + * Send a VF_RESET message to the PF. Does not wait for response from PF + * as none will be forthcoming. Immediately after calling this function, + * the admin queue should be shut down and (optionally) reinitialized. + **/ +i40e_status i40e_vf_reset(struct i40e_hw *hw) +{ + return i40e_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF, + 0, NULL, 0, NULL); +} + +/** + * i40evf_aq_write_ddp - Write dynamic device personalization (ddp) + * @hw: pointer to the hw struct + * @buff: command buffer (size in bytes = buff_size) + * @buff_size: buffer size in bytes + * @track_id: package tracking id + * @error_offset: returns error offset + * @error_info: returns error information + * @cmd_details: pointer to command details structure or NULL + **/ +enum +i40e_status_code i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff, + u16 buff_size, u32 track_id, + u32 *error_offset, u32 *error_info, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_write_personalization_profile *cmd = + (struct i40e_aqc_write_personalization_profile *) + &desc.params.raw; + struct i40e_aqc_write_ddp_resp *resp; + i40e_status status; + + i40evf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_write_personalization_profile); + + desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); + if (buff_size > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + + desc.datalen = cpu_to_le16(buff_size); + + cmd->profile_track_id = cpu_to_le32(track_id); + + status = i40evf_asq_send_command(hw, &desc, buff, buff_size, cmd_details); + if (!status) { + resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw; + if (error_offset) + *error_offset = le32_to_cpu(resp->error_offset); + if (error_info) + *error_info = le32_to_cpu(resp->error_info); + } + + return status; +} + +/** + * i40evf_aq_get_ddp_list - Read dynamic device personalization (ddp) + * @hw: pointer to the hw struct + * @buff: command buffer (size in bytes = buff_size) + * @buff_size: buffer size in bytes + * @flags: AdminQ command flags + * @cmd_details: pointer to command details structure or NULL + **/ +enum +i40e_status_code i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff, + u16 buff_size, u8 flags, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_get_applied_profiles *cmd = + (struct i40e_aqc_get_applied_profiles *)&desc.params.raw; + i40e_status status; + + i40evf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_personalization_profile_list); + + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + if (buff_size > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + desc.datalen = cpu_to_le16(buff_size); + + cmd->flags = flags; + + status = i40evf_asq_send_command(hw, &desc, buff, buff_size, cmd_details); + + return status; +} + +/** + * i40evf_find_segment_in_package + * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E) + * @pkg_hdr: pointer to the package header to be searched + * + * This function searches a package file for a particular segment type. On + * success it returns a pointer to the segment header, otherwise it will + * return NULL. + **/ +struct i40e_generic_seg_header * +i40evf_find_segment_in_package(u32 segment_type, + struct i40e_package_header *pkg_hdr) +{ + struct i40e_generic_seg_header *segment; + u32 i; + + /* Search all package segments for the requested segment type */ + for (i = 0; i < pkg_hdr->segment_count; i++) { + segment = + (struct i40e_generic_seg_header *)((u8 *)pkg_hdr + + pkg_hdr->segment_offset[i]); + + if (segment->type == segment_type) + return segment; + } + + return NULL; +} + +/** + * i40evf_write_profile + * @hw: pointer to the hardware structure + * @profile: pointer to the profile segment of the package to be downloaded + * @track_id: package tracking id + * + * Handles the download of a complete package. + */ +enum i40e_status_code +i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, + u32 track_id) +{ + i40e_status status = 0; + struct i40e_section_table *sec_tbl; + struct i40e_profile_section_header *sec = NULL; + u32 dev_cnt; + u32 vendor_dev_id; + u32 *nvm; + u32 section_size = 0; + u32 offset = 0, info = 0; + u32 i; + + dev_cnt = profile->device_table_count; + + for (i = 0; i < dev_cnt; i++) { + vendor_dev_id = profile->device_table[i].vendor_dev_id; + if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL) + if (hw->device_id == (vendor_dev_id & 0xFFFF)) + break; + } + if (i == dev_cnt) { + i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP"); + return I40E_ERR_DEVICE_NOT_SUPPORTED; + } + + nvm = (u32 *)&profile->device_table[dev_cnt]; + sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; + + for (i = 0; i < sec_tbl->section_count; i++) { + sec = (struct i40e_profile_section_header *)((u8 *)profile + + sec_tbl->section_offset[i]); + + /* Skip 'AQ', 'note' and 'name' sections */ + if (sec->section.type != SECTION_TYPE_MMIO) + continue; + + section_size = sec->section.size + + sizeof(struct i40e_profile_section_header); + + /* Write profile */ + status = i40evf_aq_write_ddp(hw, (void *)sec, (u16)section_size, + track_id, &offset, &info, NULL); + if (status) { + i40e_debug(hw, I40E_DEBUG_PACKAGE, + "Failed to write profile: offset %d, info %d", + offset, info); + break; + } + } + return status; +} + +/** + * i40evf_add_pinfo_to_list + * @hw: pointer to the hardware structure + * @profile: pointer to the profile segment of the package + * @profile_info_sec: buffer for information section + * @track_id: package tracking id + * + * Register a profile to the list of loaded profiles. + */ +enum i40e_status_code +i40evf_add_pinfo_to_list(struct i40e_hw *hw, + struct i40e_profile_segment *profile, + u8 *profile_info_sec, u32 track_id) +{ + i40e_status status = 0; + struct i40e_profile_section_header *sec = NULL; + struct i40e_profile_info *pinfo; + u32 offset = 0, info = 0; + + sec = (struct i40e_profile_section_header *)profile_info_sec; + sec->tbl_size = 1; + sec->data_end = sizeof(struct i40e_profile_section_header) + + sizeof(struct i40e_profile_info); + sec->section.type = SECTION_TYPE_INFO; + sec->section.offset = sizeof(struct i40e_profile_section_header); + sec->section.size = sizeof(struct i40e_profile_info); + pinfo = (struct i40e_profile_info *)(profile_info_sec + + sec->section.offset); + pinfo->track_id = track_id; + pinfo->version = profile->version; + pinfo->op = I40E_DDP_ADD_TRACKID; + memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE); + + status = i40evf_aq_write_ddp(hw, (void *)sec, sec->data_end, + track_id, &offset, &info, NULL); + return status; +} diff --git a/drivers/net/ethernet/intel/iavf/i40e_devids.h b/drivers/net/ethernet/intel/iavf/i40e_devids.h new file mode 100644 index 000000000000..f300bf271824 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/i40e_devids.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _I40E_DEVIDS_H_ +#define _I40E_DEVIDS_H_ + +/* Device IDs */ +#define I40E_DEV_ID_SFP_XL710 0x1572 +#define I40E_DEV_ID_QEMU 0x1574 +#define I40E_DEV_ID_KX_B 0x1580 +#define I40E_DEV_ID_KX_C 0x1581 +#define I40E_DEV_ID_QSFP_A 0x1583 +#define I40E_DEV_ID_QSFP_B 0x1584 +#define I40E_DEV_ID_QSFP_C 0x1585 +#define I40E_DEV_ID_10G_BASE_T 0x1586 +#define I40E_DEV_ID_20G_KR2 0x1587 +#define I40E_DEV_ID_20G_KR2_A 0x1588 +#define I40E_DEV_ID_10G_BASE_T4 0x1589 +#define I40E_DEV_ID_25G_B 0x158A +#define I40E_DEV_ID_25G_SFP28 0x158B +#define I40E_DEV_ID_VF 0x154C +#define I40E_DEV_ID_VF_HV 0x1571 +#define I40E_DEV_ID_ADAPTIVE_VF 0x1889 +#define I40E_DEV_ID_SFP_X722 0x37D0 +#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 +#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 +#define I40E_DEV_ID_SFP_I_X722 0x37D3 +#define I40E_DEV_ID_X722_VF 0x37CD + +#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ + (d) == I40E_DEV_ID_QSFP_B || \ + (d) == I40E_DEV_ID_QSFP_C) + +#endif /* _I40E_DEVIDS_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/i40e_hmc.h b/drivers/net/ethernet/intel/iavf/i40e_hmc.h new file mode 100644 index 000000000000..1c78de838857 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/i40e_hmc.h @@ -0,0 +1,215 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _I40E_HMC_H_ +#define _I40E_HMC_H_ + +#define I40E_HMC_MAX_BP_COUNT 512 + +/* forward-declare the HW struct for the compiler */ +struct i40e_hw; + +#define I40E_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */ +#define I40E_HMC_PD_CNT_IN_SD 512 +#define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */ +#define I40E_HMC_PAGED_BP_SIZE 4096 +#define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096 +#define I40E_FIRST_VF_FPM_ID 16 + +struct i40e_hmc_obj_info { + u64 base; /* base addr in FPM */ + u32 max_cnt; /* max count available for this hmc func */ + u32 cnt; /* count of objects driver actually wants to create */ + u64 size; /* size in bytes of one object */ +}; + +enum i40e_sd_entry_type { + I40E_SD_TYPE_INVALID = 0, + I40E_SD_TYPE_PAGED = 1, + I40E_SD_TYPE_DIRECT = 2 +}; + +struct i40e_hmc_bp { + enum i40e_sd_entry_type entry_type; + struct i40e_dma_mem addr; /* populate to be used by hw */ + u32 sd_pd_index; + u32 ref_cnt; +}; + +struct i40e_hmc_pd_entry { + struct i40e_hmc_bp bp; + u32 sd_index; + bool rsrc_pg; + bool valid; +}; + +struct i40e_hmc_pd_table { + struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */ + struct i40e_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */ + struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */ + + u32 ref_cnt; + u32 sd_index; +}; + +struct i40e_hmc_sd_entry { + enum i40e_sd_entry_type entry_type; + bool valid; + + union { + struct i40e_hmc_pd_table pd_table; + struct i40e_hmc_bp bp; + } u; +}; + +struct i40e_hmc_sd_table { + struct i40e_virt_mem addr; /* used to track sd_entry allocations */ + u32 sd_cnt; + u32 ref_cnt; + struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */ +}; + +struct i40e_hmc_info { + u32 signature; + /* equals to pci func num for PF and dynamically allocated for VFs */ + u8 hmc_fn_id; + u16 first_sd_index; /* index of the first available SD */ + + /* hmc objects */ + struct i40e_hmc_obj_info *hmc_obj; + struct i40e_virt_mem hmc_obj_virt_mem; + struct i40e_hmc_sd_table sd_table; +}; + +#define I40E_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++) +#define I40E_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++) +#define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++) + +#define I40E_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--) +#define I40E_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--) +#define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--) + +/** + * I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware + * @hw: pointer to our hw struct + * @pa: pointer to physical address + * @sd_index: segment descriptor index + * @type: if sd entry is direct or paged + **/ +#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \ +{ \ + u32 val1, val2, val3; \ + val1 = (u32)(upper_32_bits(pa)); \ + val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT << \ + I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ + ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ + I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \ + BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \ + val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ + wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \ + wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ + wr32((hw), I40E_PFHMC_SDCMD, val3); \ +} + +/** + * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware + * @hw: pointer to our hw struct + * @sd_index: segment descriptor index + * @type: if sd entry is direct or paged + **/ +#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \ +{ \ + u32 val2, val3; \ + val2 = (I40E_HMC_MAX_BP_COUNT << \ + I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ + ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ + I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \ + val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ + wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \ + wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ + wr32((hw), I40E_PFHMC_SDCMD, val3); \ +} + +/** + * I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware + * @hw: pointer to our hw struct + * @sd_idx: segment descriptor index + * @pd_idx: page descriptor index + **/ +#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \ + wr32((hw), I40E_PFHMC_PDINV, \ + (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \ + ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT))) + +/** + * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit + * @hmc_info: pointer to the HMC configuration information structure + * @type: type of HMC resources we're searching + * @index: starting index for the object + * @cnt: number of objects we're trying to create + * @sd_idx: pointer to return index of the segment descriptor in question + * @sd_limit: pointer to return the maximum number of segment descriptors + * + * This function calculates the segment descriptor index and index limit + * for the resource defined by i40e_hmc_rsrc_type. + **/ +#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\ +{ \ + u64 fpm_addr, fpm_limit; \ + fpm_addr = (hmc_info)->hmc_obj[(type)].base + \ + (hmc_info)->hmc_obj[(type)].size * (index); \ + fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\ + *(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE); \ + *(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE); \ + /* add one more to the limit to correct our range */ \ + *(sd_limit) += 1; \ +} + +/** + * I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit + * @hmc_info: pointer to the HMC configuration information struct + * @type: HMC resource type we're examining + * @idx: starting index for the object + * @cnt: number of objects we're trying to create + * @pd_index: pointer to return page descriptor index + * @pd_limit: pointer to return page descriptor index limit + * + * Calculates the page descriptor index and index limit for the resource + * defined by i40e_hmc_rsrc_type. + **/ +#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\ +{ \ + u64 fpm_adr, fpm_limit; \ + fpm_adr = (hmc_info)->hmc_obj[(type)].base + \ + (hmc_info)->hmc_obj[(type)].size * (idx); \ + fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \ + *(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE); \ + *(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE); \ + /* add one more to the limit to correct our range */ \ + *(pd_limit) += 1; \ +} +i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 sd_index, + enum i40e_sd_entry_type type, + u64 direct_mode_sz); + +i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 pd_index, + struct i40e_dma_mem *rsrc_pg); +i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx); +i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, + u32 idx); +i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx, bool is_pf); +i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info, + u32 idx); +i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx, bool is_pf); + +#endif /* _I40E_HMC_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/i40e_lan_hmc.h b/drivers/net/ethernet/intel/iavf/i40e_lan_hmc.h new file mode 100644 index 000000000000..82b00f70a632 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/i40e_lan_hmc.h @@ -0,0 +1,158 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _I40E_LAN_HMC_H_ +#define _I40E_LAN_HMC_H_ + +/* forward-declare the HW struct for the compiler */ +struct i40e_hw; + +/* HMC element context information */ + +/* Rx queue context data + * + * The sizes of the variables may be larger than needed due to crossing byte + * boundaries. If we do not have the width of the variable set to the correct + * size then we could end up shifting bits off the top of the variable when the + * variable is at the top of a byte and crosses over into the next byte. + */ +struct i40e_hmc_obj_rxq { + u16 head; + u16 cpuid; /* bigger than needed, see above for reason */ + u64 base; + u16 qlen; +#define I40E_RXQ_CTX_DBUFF_SHIFT 7 + u16 dbuff; /* bigger than needed, see above for reason */ +#define I40E_RXQ_CTX_HBUFF_SHIFT 6 + u16 hbuff; /* bigger than needed, see above for reason */ + u8 dtype; + u8 dsize; + u8 crcstrip; + u8 fc_ena; + u8 l2tsel; + u8 hsplit_0; + u8 hsplit_1; + u8 showiv; + u32 rxmax; /* bigger than needed, see above for reason */ + u8 tphrdesc_ena; + u8 tphwdesc_ena; + u8 tphdata_ena; + u8 tphhead_ena; + u16 lrxqthresh; /* bigger than needed, see above for reason */ + u8 prefena; /* NOTE: normally must be set to 1 at init */ +}; + +/* Tx queue context data +* +* The sizes of the variables may be larger than needed due to crossing byte +* boundaries. If we do not have the width of the variable set to the correct +* size then we could end up shifting bits off the top of the variable when the +* variable is at the top of a byte and crosses over into the next byte. +*/ +struct i40e_hmc_obj_txq { + u16 head; + u8 new_context; + u64 base; + u8 fc_ena; + u8 timesync_ena; + u8 fd_ena; + u8 alt_vlan_ena; + u16 thead_wb; + u8 cpuid; + u8 head_wb_ena; + u16 qlen; + u8 tphrdesc_ena; + u8 tphrpacket_ena; + u8 tphwdesc_ena; + u64 head_wb_addr; + u32 crc; + u16 rdylist; + u8 rdylist_act; +}; + +/* for hsplit_0 field of Rx HMC context */ +enum i40e_hmc_obj_rx_hsplit_0 { + I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0, + I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1, + I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2, + I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4, + I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8, +}; + +/* fcoe_cntx and fcoe_filt are for debugging purpose only */ +struct i40e_hmc_obj_fcoe_cntx { + u32 rsv[32]; +}; + +struct i40e_hmc_obj_fcoe_filt { + u32 rsv[8]; +}; + +/* Context sizes for LAN objects */ +enum i40e_hmc_lan_object_size { + I40E_HMC_LAN_OBJ_SZ_8 = 0x3, + I40E_HMC_LAN_OBJ_SZ_16 = 0x4, + I40E_HMC_LAN_OBJ_SZ_32 = 0x5, + I40E_HMC_LAN_OBJ_SZ_64 = 0x6, + I40E_HMC_LAN_OBJ_SZ_128 = 0x7, + I40E_HMC_LAN_OBJ_SZ_256 = 0x8, + I40E_HMC_LAN_OBJ_SZ_512 = 0x9, +}; + +#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512 +#define I40E_HMC_OBJ_SIZE_TXQ 128 +#define I40E_HMC_OBJ_SIZE_RXQ 32 +#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 128 +#define I40E_HMC_OBJ_SIZE_FCOE_FILT 64 + +enum i40e_hmc_lan_rsrc_type { + I40E_HMC_LAN_FULL = 0, + I40E_HMC_LAN_TX = 1, + I40E_HMC_LAN_RX = 2, + I40E_HMC_FCOE_CTX = 3, + I40E_HMC_FCOE_FILT = 4, + I40E_HMC_LAN_MAX = 5 +}; + +enum i40e_hmc_model { + I40E_HMC_MODEL_DIRECT_PREFERRED = 0, + I40E_HMC_MODEL_DIRECT_ONLY = 1, + I40E_HMC_MODEL_PAGED_ONLY = 2, + I40E_HMC_MODEL_UNKNOWN, +}; + +struct i40e_hmc_lan_create_obj_info { + struct i40e_hmc_info *hmc_info; + u32 rsrc_type; + u32 start_idx; + u32 count; + enum i40e_sd_entry_type entry_type; + u64 direct_mode_sz; +}; + +struct i40e_hmc_lan_delete_obj_info { + struct i40e_hmc_info *hmc_info; + u32 rsrc_type; + u32 start_idx; + u32 count; +}; + +i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, + u32 rxq_num, u32 fcoe_cntx_num, + u32 fcoe_filt_num); +i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw, + enum i40e_hmc_model model); +i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw); + +i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw, + u16 queue); +i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw, + u16 queue, + struct i40e_hmc_obj_txq *s); +i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw, + u16 queue); +i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw, + u16 queue, + struct i40e_hmc_obj_rxq *s); + +#endif /* _I40E_LAN_HMC_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/i40e_osdep.h b/drivers/net/ethernet/intel/iavf/i40e_osdep.h new file mode 100644 index 000000000000..3ddddb46455b --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/i40e_osdep.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _I40E_OSDEP_H_ +#define _I40E_OSDEP_H_ + +#include +#include +#include +#include +#include + +/* get readq/writeq support for 32 bit kernels, use the low-first version */ +#include + +/* File to be the magic between shared code and + * actual OS primitives + */ + +#define hw_dbg(hw, S, A...) do {} while (0) + +#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) +#define rd32(a, reg) readl((a)->hw_addr + (reg)) + +#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg))) +#define rd64(a, reg) readq((a)->hw_addr + (reg)) +#define i40e_flush(a) readl((a)->hw_addr + I40E_VFGEN_RSTAT) + +/* memory allocation tracking */ +struct i40e_dma_mem { + void *va; + dma_addr_t pa; + u32 size; +}; + +#define i40e_allocate_dma_mem(h, m, unused, s, a) \ + i40evf_allocate_dma_mem_d(h, m, s, a) +#define i40e_free_dma_mem(h, m) i40evf_free_dma_mem_d(h, m) + +struct i40e_virt_mem { + void *va; + u32 size; +}; +#define i40e_allocate_virt_mem(h, m, s) i40evf_allocate_virt_mem_d(h, m, s) +#define i40e_free_virt_mem(h, m) i40evf_free_virt_mem_d(h, m) + +#define i40e_debug(h, m, s, ...) i40evf_debug_d(h, m, s, ##__VA_ARGS__) +extern void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...) + __attribute__ ((format(gnu_printf, 3, 4))); + +typedef enum i40e_status_code i40e_status; +#endif /* _I40E_OSDEP_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/i40e_prototype.h b/drivers/net/ethernet/intel/iavf/i40e_prototype.h new file mode 100644 index 000000000000..a358f4b9d5aa --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/i40e_prototype.h @@ -0,0 +1,130 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _I40E_PROTOTYPE_H_ +#define _I40E_PROTOTYPE_H_ + +#include "i40e_type.h" +#include "i40e_alloc.h" +#include + +/* Prototypes for shared code functions that are not in + * the standard function pointer structures. These are + * mostly because they are needed even before the init + * has happened and will assist in the early SW and FW + * setup. + */ + +/* adminq functions */ +i40e_status i40evf_init_adminq(struct i40e_hw *hw); +i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw); +void i40e_adminq_init_ring_data(struct i40e_hw *hw); +i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, + struct i40e_arq_event_info *e, + u16 *events_pending); +i40e_status i40evf_asq_send_command(struct i40e_hw *hw, + struct i40e_aq_desc *desc, + void *buff, /* can be NULL */ + u16 buff_size, + struct i40e_asq_cmd_details *cmd_details); +bool i40evf_asq_done(struct i40e_hw *hw); + +/* debug function for adminq */ +void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, + void *desc, void *buffer, u16 buf_len); + +void i40e_idle_aq(struct i40e_hw *hw); +void i40evf_resume_aq(struct i40e_hw *hw); +bool i40evf_check_asq_alive(struct i40e_hw *hw); +i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); +const char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err); +const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err); + +i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 seid, + bool pf_lut, u8 *lut, u16 lut_size); +i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 seid, + bool pf_lut, u8 *lut, u16 lut_size); +i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_get_set_rss_key_data *key); +i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_get_set_rss_key_data *key); + +i40e_status i40e_set_mac_type(struct i40e_hw *hw); + +extern struct i40e_rx_ptype_decoded i40evf_ptype_lookup[]; + +static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) +{ + return i40evf_ptype_lookup[ptype]; +} + +/* prototype for functions used for SW locks */ + +/* i40e_common for VF drivers*/ +void i40e_vf_parse_hw_config(struct i40e_hw *hw, + struct virtchnl_vf_resource *msg); +i40e_status i40e_vf_reset(struct i40e_hw *hw); +i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw, + enum virtchnl_ops v_opcode, + i40e_status v_retval, + u8 *msg, u16 msglen, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_set_filter_control(struct i40e_hw *hw, + struct i40e_filter_control_settings *settings); +i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, + u8 *mac_addr, u16 ethtype, u16 flags, + u16 vsi_seid, u16 queue, bool is_add, + struct i40e_control_filter_stats *stats, + struct i40e_asq_cmd_details *cmd_details); +void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, + u16 vsi_seid); +i40e_status i40evf_aq_rx_ctl_read_register(struct i40e_hw *hw, + u32 reg_addr, u32 *reg_val, + struct i40e_asq_cmd_details *cmd_details); +u32 i40evf_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr); +i40e_status i40evf_aq_rx_ctl_write_register(struct i40e_hw *hw, + u32 reg_addr, u32 reg_val, + struct i40e_asq_cmd_details *cmd_details); +void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val); +i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw, + u8 phy_select, u8 dev_addr, + u32 reg_addr, u32 reg_val, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw, + u8 phy_select, u8 dev_addr, + u32 reg_addr, u32 *reg_val, + struct i40e_asq_cmd_details *cmd_details); + +i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, + u16 reg, u8 phy_addr, u16 *value); +i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, + u16 reg, u8 phy_addr, u16 value); +i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg, + u8 phy_addr, u16 *value); +i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg, + u8 phy_addr, u16 value); +u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num); +i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, + u32 time, u32 interval); +i40e_status i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff, + u16 buff_size, u32 track_id, + u32 *error_offset, u32 *error_info, + struct i40e_asq_cmd_details * + cmd_details); +i40e_status i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff, + u16 buff_size, u8 flags, + struct i40e_asq_cmd_details * + cmd_details); +struct i40e_generic_seg_header * +i40evf_find_segment_in_package(u32 segment_type, + struct i40e_package_header *pkg_header); +enum i40e_status_code +i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg, + u32 track_id); +enum i40e_status_code +i40evf_add_pinfo_to_list(struct i40e_hw *hw, + struct i40e_profile_segment *profile, + u8 *profile_info_sec, u32 track_id); +#endif /* _I40E_PROTOTYPE_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/i40e_register.h b/drivers/net/ethernet/intel/iavf/i40e_register.h new file mode 100644 index 000000000000..49e1f57d99cc --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/i40e_register.h @@ -0,0 +1,313 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _I40E_REGISTER_H_ +#define _I40E_REGISTER_H_ + +#define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */ +#define I40E_VFMSIX_PBA1_MAX_INDEX 19 +#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0 +#define I40E_VFMSIX_PBA1_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA1_PENBIT_SHIFT) +#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TADD1_MAX_INDEX 639 +#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0 +#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT) +#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2 +#define I40E_VFMSIX_TADD1_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD1_MSIXTADD_SHIFT) +#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TMSG1_MAX_INDEX 639 +#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0 +#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT) +#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TUADD1_MAX_INDEX 639 +#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0 +#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT) +#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639 +#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0 +#define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT) +#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */ +#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0 +#define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT) +#define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */ +#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0 +#define I40E_VF_ARQBAL1_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL1_ARQBAL_SHIFT) +#define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */ +#define I40E_VF_ARQH1_ARQH_SHIFT 0 +#define I40E_VF_ARQH1_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH1_ARQH_SHIFT) +#define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */ +#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0 +#define I40E_VF_ARQLEN1_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN1_ARQLEN_SHIFT) +#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28 +#define I40E_VF_ARQLEN1_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQVFE_SHIFT) +#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29 +#define I40E_VF_ARQLEN1_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQOVFL_SHIFT) +#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30 +#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT) +#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31 +#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT) +#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */ +#define I40E_VF_ARQT1_ARQT_SHIFT 0 +#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT) +#define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */ +#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0 +#define I40E_VF_ATQBAH1_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH1_ATQBAH_SHIFT) +#define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */ +#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0 +#define I40E_VF_ATQBAL1_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL1_ATQBAL_SHIFT) +#define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */ +#define I40E_VF_ATQH1_ATQH_SHIFT 0 +#define I40E_VF_ATQH1_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH1_ATQH_SHIFT) +#define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */ +#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0 +#define I40E_VF_ATQLEN1_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN1_ATQLEN_SHIFT) +#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28 +#define I40E_VF_ATQLEN1_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQVFE_SHIFT) +#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29 +#define I40E_VF_ATQLEN1_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQOVFL_SHIFT) +#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30 +#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT) +#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31 +#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT) +#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */ +#define I40E_VF_ATQT1_ATQT_SHIFT 0 +#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT) +#define I40E_VFGEN_RSTAT 0x00008800 /* Reset: VFR */ +#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0 +#define I40E_VFGEN_RSTAT_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT_VFR_STATE_SHIFT) +#define I40E_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */ +#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0 +#define I40E_VFINT_DYN_CTL01_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_SHIFT) +#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1 +#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT) +#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2 +#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT) +#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3 +#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5 +#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT) +#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT) +#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25 +#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31 +#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT) +#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */ +#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15 +#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0 +#define I40E_VFINT_DYN_CTLN1_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_SHIFT) +#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1 +#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT) +#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2 +#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT) +#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3 +#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5 +#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT) +#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT) +#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25 +#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31 +#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT) +#define I40E_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */ +#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT) +#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30 +#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT) +#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31 +#define I40E_VFINT_ICR0_ENA1_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_RSVD_SHIFT) +#define I40E_VFINT_ICR01 0x00004800 /* Reset: CORER */ +#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0 +#define I40E_VFINT_ICR01_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_INTEVENT_SHIFT) +#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1 +#define I40E_VFINT_ICR01_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_0_SHIFT) +#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2 +#define I40E_VFINT_ICR01_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_1_SHIFT) +#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3 +#define I40E_VFINT_ICR01_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_2_SHIFT) +#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4 +#define I40E_VFINT_ICR01_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_3_SHIFT) +#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT) +#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30 +#define I40E_VFINT_ICR01_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_ADMINQ_SHIFT) +#define I40E_VFINT_ICR01_SWINT_SHIFT 31 +#define I40E_VFINT_ICR01_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_SWINT_SHIFT) +#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */ +#define I40E_VFINT_ITR01_MAX_INDEX 2 +#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0 +#define I40E_VFINT_ITR01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR01_INTERVAL_SHIFT) +#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */ +#define I40E_VFINT_ITRN1_MAX_INDEX 2 +#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0 +#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT) +#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */ +#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2 +#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT) +#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_QRX_TAIL1_MAX_INDEX 15 +#define I40E_QRX_TAIL1_TAIL_SHIFT 0 +#define I40E_QRX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL1_TAIL_SHIFT) +#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */ +#define I40E_QTX_TAIL1_MAX_INDEX 15 +#define I40E_QTX_TAIL1_TAIL_SHIFT 0 +#define I40E_QTX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL1_TAIL_SHIFT) +#define I40E_VFMSIX_PBA 0x00002000 /* Reset: VFLR */ +#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0 +#define I40E_VFMSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA_PENBIT_SHIFT) +#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TADD_MAX_INDEX 16 +#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0 +#define I40E_VFMSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD_MSIXTADD10_SHIFT) +#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2 +#define I40E_VFMSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD_MSIXTADD_SHIFT) +#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TMSG_MAX_INDEX 16 +#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0 +#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT) +#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TUADD_MAX_INDEX 16 +#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0 +#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT) +#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16 +#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0 +#define I40E_VFMSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL_MASK_SHIFT) +#define I40E_VFCM_PE_ERRDATA 0x0000DC00 /* Reset: VFR */ +#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0 +#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT) +#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4 +#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT) +#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8 +#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT) +#define I40E_VFCM_PE_ERRINFO 0x0000D800 /* Reset: VFR */ +#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0 +#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT) +#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4 +#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT) +#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8 +#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT) +#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16 +#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT) +#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24 +#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT) +#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ +#define I40E_VFQF_HENA_MAX_INDEX 1 +#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0 +#define I40E_VFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA_PTYPE_ENA_SHIFT) +#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */ +#define I40E_VFQF_HKEY_MAX_INDEX 12 +#define I40E_VFQF_HKEY_KEY_0_SHIFT 0 +#define I40E_VFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_0_SHIFT) +#define I40E_VFQF_HKEY_KEY_1_SHIFT 8 +#define I40E_VFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_1_SHIFT) +#define I40E_VFQF_HKEY_KEY_2_SHIFT 16 +#define I40E_VFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_2_SHIFT) +#define I40E_VFQF_HKEY_KEY_3_SHIFT 24 +#define I40E_VFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_3_SHIFT) +#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_VFQF_HLUT_MAX_INDEX 15 +#define I40E_VFQF_HLUT_LUT0_SHIFT 0 +#define I40E_VFQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT0_SHIFT) +#define I40E_VFQF_HLUT_LUT1_SHIFT 8 +#define I40E_VFQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT1_SHIFT) +#define I40E_VFQF_HLUT_LUT2_SHIFT 16 +#define I40E_VFQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT2_SHIFT) +#define I40E_VFQF_HLUT_LUT3_SHIFT 24 +#define I40E_VFQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT3_SHIFT) +#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_VFQF_HREGION_MAX_INDEX 7 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT) +#define I40E_VFQF_HREGION_REGION_0_SHIFT 1 +#define I40E_VFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_0_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT) +#define I40E_VFQF_HREGION_REGION_1_SHIFT 5 +#define I40E_VFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_1_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT) +#define I40E_VFQF_HREGION_REGION_2_SHIFT 9 +#define I40E_VFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_2_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT) +#define I40E_VFQF_HREGION_REGION_3_SHIFT 13 +#define I40E_VFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_3_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT) +#define I40E_VFQF_HREGION_REGION_4_SHIFT 17 +#define I40E_VFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_4_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT) +#define I40E_VFQF_HREGION_REGION_5_SHIFT 21 +#define I40E_VFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_5_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT) +#define I40E_VFQF_HREGION_REGION_6_SHIFT 25 +#define I40E_VFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_6_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT) +#define I40E_VFQF_HREGION_REGION_7_SHIFT 29 +#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT) +#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30 +#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT) +#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30 +#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT) +#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */ +#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0 +#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT) +#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */ +#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0 +#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT) +#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */ +#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0 +#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT) +#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */ +#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0 +#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT) +#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4 +#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT) +#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16 +#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT) +#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31 +#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT) +#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */ +#define I40E_VFPE_CQACK1_PECQID_SHIFT 0 +#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT) +#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */ +#define I40E_VFPE_CQARM1_PECQID_SHIFT 0 +#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT) +#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */ +#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0 +#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT) +#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */ +#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0 +#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT) +#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16 +#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT) +#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */ +#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0 +#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT) +#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31 +#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT) +#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */ +#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0 +#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT) +#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16 +#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT) +#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */ +#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0 +#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT) +#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */ +#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0 +#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT) +#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */ +#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0 +#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT) +#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */ +#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0 +#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT) +#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20 +#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT) +#endif /* _I40E_REGISTER_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/i40e_status.h b/drivers/net/ethernet/intel/iavf/i40e_status.h new file mode 100644 index 000000000000..77be0702d07c --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/i40e_status.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _I40E_STATUS_H_ +#define _I40E_STATUS_H_ + +/* Error Codes */ +enum i40e_status_code { + I40E_SUCCESS = 0, + I40E_ERR_NVM = -1, + I40E_ERR_NVM_CHECKSUM = -2, + I40E_ERR_PHY = -3, + I40E_ERR_CONFIG = -4, + I40E_ERR_PARAM = -5, + I40E_ERR_MAC_TYPE = -6, + I40E_ERR_UNKNOWN_PHY = -7, + I40E_ERR_LINK_SETUP = -8, + I40E_ERR_ADAPTER_STOPPED = -9, + I40E_ERR_INVALID_MAC_ADDR = -10, + I40E_ERR_DEVICE_NOT_SUPPORTED = -11, + I40E_ERR_MASTER_REQUESTS_PENDING = -12, + I40E_ERR_INVALID_LINK_SETTINGS = -13, + I40E_ERR_AUTONEG_NOT_COMPLETE = -14, + I40E_ERR_RESET_FAILED = -15, + I40E_ERR_SWFW_SYNC = -16, + I40E_ERR_NO_AVAILABLE_VSI = -17, + I40E_ERR_NO_MEMORY = -18, + I40E_ERR_BAD_PTR = -19, + I40E_ERR_RING_FULL = -20, + I40E_ERR_INVALID_PD_ID = -21, + I40E_ERR_INVALID_QP_ID = -22, + I40E_ERR_INVALID_CQ_ID = -23, + I40E_ERR_INVALID_CEQ_ID = -24, + I40E_ERR_INVALID_AEQ_ID = -25, + I40E_ERR_INVALID_SIZE = -26, + I40E_ERR_INVALID_ARP_INDEX = -27, + I40E_ERR_INVALID_FPM_FUNC_ID = -28, + I40E_ERR_QP_INVALID_MSG_SIZE = -29, + I40E_ERR_QP_TOOMANY_WRS_POSTED = -30, + I40E_ERR_INVALID_FRAG_COUNT = -31, + I40E_ERR_QUEUE_EMPTY = -32, + I40E_ERR_INVALID_ALIGNMENT = -33, + I40E_ERR_FLUSHED_QUEUE = -34, + I40E_ERR_INVALID_PUSH_PAGE_INDEX = -35, + I40E_ERR_INVALID_IMM_DATA_SIZE = -36, + I40E_ERR_TIMEOUT = -37, + I40E_ERR_OPCODE_MISMATCH = -38, + I40E_ERR_CQP_COMPL_ERROR = -39, + I40E_ERR_INVALID_VF_ID = -40, + I40E_ERR_INVALID_HMCFN_ID = -41, + I40E_ERR_BACKING_PAGE_ERROR = -42, + I40E_ERR_NO_PBLCHUNKS_AVAILABLE = -43, + I40E_ERR_INVALID_PBLE_INDEX = -44, + I40E_ERR_INVALID_SD_INDEX = -45, + I40E_ERR_INVALID_PAGE_DESC_INDEX = -46, + I40E_ERR_INVALID_SD_TYPE = -47, + I40E_ERR_MEMCPY_FAILED = -48, + I40E_ERR_INVALID_HMC_OBJ_INDEX = -49, + I40E_ERR_INVALID_HMC_OBJ_COUNT = -50, + I40E_ERR_INVALID_SRQ_ARM_LIMIT = -51, + I40E_ERR_SRQ_ENABLED = -52, + I40E_ERR_ADMIN_QUEUE_ERROR = -53, + I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54, + I40E_ERR_BUF_TOO_SHORT = -55, + I40E_ERR_ADMIN_QUEUE_FULL = -56, + I40E_ERR_ADMIN_QUEUE_NO_WORK = -57, + I40E_ERR_BAD_IWARP_CQE = -58, + I40E_ERR_NVM_BLANK_MODE = -59, + I40E_ERR_NOT_IMPLEMENTED = -60, + I40E_ERR_PE_DOORBELL_NOT_ENABLED = -61, + I40E_ERR_DIAG_TEST_FAILED = -62, + I40E_ERR_NOT_READY = -63, + I40E_NOT_SUPPORTED = -64, + I40E_ERR_FIRMWARE_API_VERSION = -65, + I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66, +}; + +#endif /* _I40E_STATUS_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/i40e_trace.h b/drivers/net/ethernet/intel/iavf/i40e_trace.h new file mode 100644 index 000000000000..d7a4e68820a8 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/i40e_trace.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +/* Modeled on trace-events-sample.h */ + +/* The trace subsystem name for i40evf will be "i40evf". + * + * This file is named i40e_trace.h. + * + * Since this include file's name is different from the trace + * subsystem name, we'll have to define TRACE_INCLUDE_FILE at the end + * of this file. + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM i40evf + +/* See trace-events-sample.h for a detailed description of why this + * guard clause is different from most normal include files. + */ +#if !defined(_I40E_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _I40E_TRACE_H_ + +#include + +/** + * i40e_trace() macro enables shared code to refer to trace points + * like: + * + * trace_i40e{,vf}_example(args...) + * + * ... as: + * + * i40e_trace(example, args...) + * + * ... to resolve to the PF or VF version of the tracepoint without + * ifdefs, and to allow tracepoints to be disabled entirely at build + * time. + * + * Trace point should always be referred to in the driver via this + * macro. + * + * Similarly, i40e_trace_enabled(trace_name) wraps references to + * trace_i40e{,vf}__enabled() functions. + */ +#define _I40E_TRACE_NAME(trace_name) (trace_ ## i40evf ## _ ## trace_name) +#define I40E_TRACE_NAME(trace_name) _I40E_TRACE_NAME(trace_name) + +#define i40e_trace(trace_name, args...) I40E_TRACE_NAME(trace_name)(args) + +#define i40e_trace_enabled(trace_name) I40E_TRACE_NAME(trace_name##_enabled)() + +/* Events common to PF and VF. Corresponding versions will be defined + * for both, named trace_i40e_* and trace_i40evf_*. The i40e_trace() + * macro above will select the right trace point name for the driver + * being built from shared code. + */ + +/* Events related to a vsi & ring */ +DECLARE_EVENT_CLASS( + i40evf_tx_template, + + TP_PROTO(struct i40e_ring *ring, + struct i40e_tx_desc *desc, + struct i40e_tx_buffer *buf), + + TP_ARGS(ring, desc, buf), + + /* The convention here is to make the first fields in the + * TP_STRUCT match the TP_PROTO exactly. This enables the use + * of the args struct generated by the tplist tool (from the + * bcc-tools package) to be used for those fields. To access + * fields other than the tracepoint args will require the + * tplist output to be adjusted. + */ + TP_STRUCT__entry( + __field(void*, ring) + __field(void*, desc) + __field(void*, buf) + __string(devname, ring->netdev->name) + ), + + TP_fast_assign( + __entry->ring = ring; + __entry->desc = desc; + __entry->buf = buf; + __assign_str(devname, ring->netdev->name); + ), + + TP_printk( + "netdev: %s ring: %p desc: %p buf %p", + __get_str(devname), __entry->ring, + __entry->desc, __entry->buf) +); + +DEFINE_EVENT( + i40evf_tx_template, i40evf_clean_tx_irq, + TP_PROTO(struct i40e_ring *ring, + struct i40e_tx_desc *desc, + struct i40e_tx_buffer *buf), + + TP_ARGS(ring, desc, buf)); + +DEFINE_EVENT( + i40evf_tx_template, i40evf_clean_tx_irq_unmap, + TP_PROTO(struct i40e_ring *ring, + struct i40e_tx_desc *desc, + struct i40e_tx_buffer *buf), + + TP_ARGS(ring, desc, buf)); + +DECLARE_EVENT_CLASS( + i40evf_rx_template, + + TP_PROTO(struct i40e_ring *ring, + union i40e_32byte_rx_desc *desc, + struct sk_buff *skb), + + TP_ARGS(ring, desc, skb), + + TP_STRUCT__entry( + __field(void*, ring) + __field(void*, desc) + __field(void*, skb) + __string(devname, ring->netdev->name) + ), + + TP_fast_assign( + __entry->ring = ring; + __entry->desc = desc; + __entry->skb = skb; + __assign_str(devname, ring->netdev->name); + ), + + TP_printk( + "netdev: %s ring: %p desc: %p skb %p", + __get_str(devname), __entry->ring, + __entry->desc, __entry->skb) +); + +DEFINE_EVENT( + i40evf_rx_template, i40evf_clean_rx_irq, + TP_PROTO(struct i40e_ring *ring, + union i40e_32byte_rx_desc *desc, + struct sk_buff *skb), + + TP_ARGS(ring, desc, skb)); + +DEFINE_EVENT( + i40evf_rx_template, i40evf_clean_rx_irq_rx, + TP_PROTO(struct i40e_ring *ring, + union i40e_32byte_rx_desc *desc, + struct sk_buff *skb), + + TP_ARGS(ring, desc, skb)); + +DECLARE_EVENT_CLASS( + i40evf_xmit_template, + + TP_PROTO(struct sk_buff *skb, + struct i40e_ring *ring), + + TP_ARGS(skb, ring), + + TP_STRUCT__entry( + __field(void*, skb) + __field(void*, ring) + __string(devname, ring->netdev->name) + ), + + TP_fast_assign( + __entry->skb = skb; + __entry->ring = ring; + __assign_str(devname, ring->netdev->name); + ), + + TP_printk( + "netdev: %s skb: %p ring: %p", + __get_str(devname), __entry->skb, + __entry->ring) +); + +DEFINE_EVENT( + i40evf_xmit_template, i40evf_xmit_frame_ring, + TP_PROTO(struct sk_buff *skb, + struct i40e_ring *ring), + + TP_ARGS(skb, ring)); + +DEFINE_EVENT( + i40evf_xmit_template, i40evf_xmit_frame_ring_drop, + TP_PROTO(struct sk_buff *skb, + struct i40e_ring *ring), + + TP_ARGS(skb, ring)); + +/* Events unique to the VF. */ + +#endif /* _I40E_TRACE_H_ */ +/* This must be outside ifdef _I40E_TRACE_H */ + +/* This trace include file is not located in the .../include/trace + * with the kernel tracepoint definitions, because we're a loadable + * module. + */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE i40e_trace +#include diff --git a/drivers/net/ethernet/intel/iavf/i40e_txrx.c b/drivers/net/ethernet/intel/iavf/i40e_txrx.c new file mode 100644 index 000000000000..1bf9734ae9cf --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/i40e_txrx.c @@ -0,0 +1,2513 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#include +#include + +#include "i40evf.h" +#include "i40e_trace.h" +#include "i40e_prototype.h" + +static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, + u32 td_tag) +{ + return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA | + ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) | + ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) | + ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) | + ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT)); +} + +#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) + +/** + * i40e_unmap_and_free_tx_resource - Release a Tx buffer + * @ring: the ring that owns the buffer + * @tx_buffer: the buffer to free + **/ +static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, + struct i40e_tx_buffer *tx_buffer) +{ + if (tx_buffer->skb) { + if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) + kfree(tx_buffer->raw_buf); + else + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + /* tx_buffer must be completely set up in the transmit path */ +} + +/** + * i40evf_clean_tx_ring - Free any empty Tx buffers + * @tx_ring: ring to be cleaned + **/ +void i40evf_clean_tx_ring(struct i40e_ring *tx_ring) +{ + unsigned long bi_size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!tx_ring->tx_bi) + return; + + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tx_ring->count; i++) + i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); + + bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; + memset(tx_ring->tx_bi, 0, bi_size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + if (!tx_ring->netdev) + return; + + /* cleanup Tx queue statistics */ + netdev_tx_reset_queue(txring_txq(tx_ring)); +} + +/** + * i40evf_free_tx_resources - Free Tx resources per queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +void i40evf_free_tx_resources(struct i40e_ring *tx_ring) +{ + i40evf_clean_tx_ring(tx_ring); + kfree(tx_ring->tx_bi); + tx_ring->tx_bi = NULL; + + if (tx_ring->desc) { + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + tx_ring->desc = NULL; + } +} + +/** + * i40evf_get_tx_pending - how many Tx descriptors not processed + * @ring: the ring of descriptors + * @in_sw: is tx_pending being checked in SW or HW + * + * Since there is no access to the ring head register + * in XL710, we need to use our local copies + **/ +u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw) +{ + u32 head, tail; + + /* underlying hardware might not allow access and/or always return + * 0 for the head/tail registers so just use the cached values + */ + head = ring->next_to_clean; + tail = ring->next_to_use; + + if (head != tail) + return (head < tail) ? + tail - head : (tail + ring->count - head); + + return 0; +} + +/** + * i40evf_detect_recover_hung - Function to detect and recover hung_queues + * @vsi: pointer to vsi struct with tx queues + * + * VSI has netdev and netdev has TX queues. This function is to check each of + * those TX queues if they are hung, trigger recovery by issuing SW interrupt. + **/ +void i40evf_detect_recover_hung(struct i40e_vsi *vsi) +{ + struct i40e_ring *tx_ring = NULL; + struct net_device *netdev; + unsigned int i; + int packets; + + if (!vsi) + return; + + if (test_bit(__I40E_VSI_DOWN, vsi->state)) + return; + + netdev = vsi->netdev; + if (!netdev) + return; + + if (!netif_carrier_ok(netdev)) + return; + + for (i = 0; i < vsi->back->num_active_queues; i++) { + tx_ring = &vsi->back->tx_rings[i]; + if (tx_ring && tx_ring->desc) { + /* If packet counter has not changed the queue is + * likely stalled, so force an interrupt for this + * queue. + * + * prev_pkt_ctr would be negative if there was no + * pending work. + */ + packets = tx_ring->stats.packets & INT_MAX; + if (tx_ring->tx_stats.prev_pkt_ctr == packets) { + i40evf_force_wb(vsi, tx_ring->q_vector); + continue; + } + + /* Memory barrier between read of packet count and call + * to i40evf_get_tx_pending() + */ + smp_rmb(); + tx_ring->tx_stats.prev_pkt_ctr = + i40evf_get_tx_pending(tx_ring, true) ? packets : -1; + } + } +} + +#define WB_STRIDE 4 + +/** + * i40e_clean_tx_irq - Reclaim resources after transmit completes + * @vsi: the VSI we care about + * @tx_ring: Tx ring to clean + * @napi_budget: Used to determine if we are in netpoll + * + * Returns true if there's any budget left (e.g. the clean is finished) + **/ +static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, + struct i40e_ring *tx_ring, int napi_budget) +{ + u16 i = tx_ring->next_to_clean; + struct i40e_tx_buffer *tx_buf; + struct i40e_tx_desc *tx_desc; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = vsi->work_limit; + + tx_buf = &tx_ring->tx_bi[i]; + tx_desc = I40E_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + smp_rmb(); + + i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); + /* if the descriptor isn't done, no work yet to do */ + if (!(eop_desc->cmd_type_offset_bsz & + cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buf->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buf->bytecount; + total_packets += tx_buf->gso_segs; + + /* free the skb */ + napi_consume_skb(tx_buf->skb, napi_budget); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + + /* clear tx_buffer data */ + tx_buf->skb = NULL; + dma_unmap_len_set(tx_buf, len, 0); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + i40e_trace(clean_tx_irq_unmap, + tx_ring, tx_desc, tx_buf); + + tx_buf++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buf = tx_ring->tx_bi; + tx_desc = I40E_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buf, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buf, len, 0); + } + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buf++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buf = tx_ring->tx_bi; + tx_desc = I40E_TX_DESC(tx_ring, 0); + } + + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + tx_ring->q_vector->tx.total_bytes += total_bytes; + tx_ring->q_vector->tx.total_packets += total_packets; + + if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) { + /* check to see if there are < 4 descriptors + * waiting to be written back, then kick the hardware to force + * them to be written back in case we stay in NAPI. + * In this mode on X722 we do not enable Interrupt. + */ + unsigned int j = i40evf_get_tx_pending(tx_ring, false); + + if (budget && + ((j / WB_STRIDE) == 0) && (j > 0) && + !test_bit(__I40E_VSI_DOWN, vsi->state) && + (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) + tx_ring->arm_wb = true; + } + + /* notify netdev of completed buffers */ + netdev_tx_completed_queue(txring_txq(tx_ring), + total_packets, total_bytes); + +#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) + if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && + (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && + !test_bit(__I40E_VSI_DOWN, vsi->state)) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + } + } + + return !!budget; +} + +/** + * i40evf_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled + * @vsi: the VSI we care about + * @q_vector: the vector on which to enable writeback + * + **/ +static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi, + struct i40e_q_vector *q_vector) +{ + u16 flags = q_vector->tx.ring[0].flags; + u32 val; + + if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR)) + return; + + if (q_vector->arm_wb_state) + return; + + val = I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK | + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */ + + wr32(&vsi->back->hw, + I40E_VFINT_DYN_CTLN1(q_vector->reg_idx), val); + q_vector->arm_wb_state = true; +} + +/** + * i40evf_force_wb - Issue SW Interrupt so HW does a wb + * @vsi: the VSI we care about + * @q_vector: the vector on which to force writeback + * + **/ +void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) +{ + u32 val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */ + I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK | + I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK + /* allow 00 to be written to the index */; + + wr32(&vsi->back->hw, + I40E_VFINT_DYN_CTLN1(q_vector->reg_idx), + val); +} + +static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector, + struct i40e_ring_container *rc) +{ + return &q_vector->rx == rc; +} + +static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector) +{ + unsigned int divisor; + + switch (q_vector->adapter->link_speed) { + case I40E_LINK_SPEED_40GB: + divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024; + break; + case I40E_LINK_SPEED_25GB: + case I40E_LINK_SPEED_20GB: + divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512; + break; + default: + case I40E_LINK_SPEED_10GB: + divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256; + break; + case I40E_LINK_SPEED_1GB: + case I40E_LINK_SPEED_100MB: + divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32; + break; + } + + return divisor; +} + +/** + * i40e_update_itr - update the dynamic ITR value based on statistics + * @q_vector: structure containing interrupt and ring information + * @rc: structure containing ring performance data + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + **/ +static void i40e_update_itr(struct i40e_q_vector *q_vector, + struct i40e_ring_container *rc) +{ + unsigned int avg_wire_size, packets, bytes, itr; + unsigned long next_update = jiffies; + + /* If we don't have any rings just leave ourselves set for maximum + * possible latency so we take ourselves out of the equation. + */ + if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting)) + return; + + /* For Rx we want to push the delay up and default to low latency. + * for Tx we want to pull the delay down and default to high latency. + */ + itr = i40e_container_is_rx(q_vector, rc) ? + I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY : + I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY; + + /* If we didn't update within up to 1 - 2 jiffies we can assume + * that either packets are coming in so slow there hasn't been + * any work, or that there is so much work that NAPI is dealing + * with interrupt moderation and we don't need to do anything. + */ + if (time_after(next_update, rc->next_update)) + goto clear_counts; + + /* If itr_countdown is set it means we programmed an ITR within + * the last 4 interrupt cycles. This has a side effect of us + * potentially firing an early interrupt. In order to work around + * this we need to throw out any data received for a few + * interrupts following the update. + */ + if (q_vector->itr_countdown) { + itr = rc->target_itr; + goto clear_counts; + } + + packets = rc->total_packets; + bytes = rc->total_bytes; + + if (i40e_container_is_rx(q_vector, rc)) { + /* If Rx there are 1 to 4 packets and bytes are less than + * 9000 assume insufficient data to use bulk rate limiting + * approach unless Tx is already in bulk rate limiting. We + * are likely latency driven. + */ + if (packets && packets < 4 && bytes < 9000 && + (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) { + itr = I40E_ITR_ADAPTIVE_LATENCY; + goto adjust_by_size; + } + } else if (packets < 4) { + /* If we have Tx and Rx ITR maxed and Tx ITR is running in + * bulk mode and we are receiving 4 or fewer packets just + * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so + * that the Rx can relax. + */ + if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS && + (q_vector->rx.target_itr & I40E_ITR_MASK) == + I40E_ITR_ADAPTIVE_MAX_USECS) + goto clear_counts; + } else if (packets > 32) { + /* If we have processed over 32 packets in a single interrupt + * for Tx assume we need to switch over to "bulk" mode. + */ + rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY; + } + + /* We have no packets to actually measure against. This means + * either one of the other queues on this vector is active or + * we are a Tx queue doing TSO with too high of an interrupt rate. + * + * Between 4 and 56 we can assume that our current interrupt delay + * is only slightly too low. As such we should increase it by a small + * fixed amount. + */ + if (packets < 56) { + itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC; + if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) { + itr &= I40E_ITR_ADAPTIVE_LATENCY; + itr += I40E_ITR_ADAPTIVE_MAX_USECS; + } + goto clear_counts; + } + + if (packets <= 256) { + itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); + itr &= I40E_ITR_MASK; + + /* Between 56 and 112 is our "goldilocks" zone where we are + * working out "just right". Just report that our current + * ITR is good for us. + */ + if (packets <= 112) + goto clear_counts; + + /* If packet count is 128 or greater we are likely looking + * at a slight overrun of the delay we want. Try halving + * our delay to see if that will cut the number of packets + * in half per interrupt. + */ + itr /= 2; + itr &= I40E_ITR_MASK; + if (itr < I40E_ITR_ADAPTIVE_MIN_USECS) + itr = I40E_ITR_ADAPTIVE_MIN_USECS; + + goto clear_counts; + } + + /* The paths below assume we are dealing with a bulk ITR since + * number of packets is greater than 256. We are just going to have + * to compute a value and try to bring the count under control, + * though for smaller packet sizes there isn't much we can do as + * NAPI polling will likely be kicking in sooner rather than later. + */ + itr = I40E_ITR_ADAPTIVE_BULK; + +adjust_by_size: + /* If packet counts are 256 or greater we can assume we have a gross + * overestimation of what the rate should be. Instead of trying to fine + * tune it just use the formula below to try and dial in an exact value + * give the current packet size of the frame. + */ + avg_wire_size = bytes / packets; + + /* The following is a crude approximation of: + * wmem_default / (size + overhead) = desired_pkts_per_int + * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate + * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value + * + * Assuming wmem_default is 212992 and overhead is 640 bytes per + * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the + * formula down to + * + * (170 * (size + 24)) / (size + 640) = ITR + * + * We first do some math on the packet size and then finally bitshift + * by 8 after rounding up. We also have to account for PCIe link speed + * difference as ITR scales based on this. + */ + if (avg_wire_size <= 60) { + /* Start at 250k ints/sec */ + avg_wire_size = 4096; + } else if (avg_wire_size <= 380) { + /* 250K ints/sec to 60K ints/sec */ + avg_wire_size *= 40; + avg_wire_size += 1696; + } else if (avg_wire_size <= 1084) { + /* 60K ints/sec to 36K ints/sec */ + avg_wire_size *= 15; + avg_wire_size += 11452; + } else if (avg_wire_size <= 1980) { + /* 36K ints/sec to 30K ints/sec */ + avg_wire_size *= 5; + avg_wire_size += 22420; + } else { + /* plateau at a limit of 30K ints/sec */ + avg_wire_size = 32256; + } + + /* If we are in low latency mode halve our delay which doubles the + * rate to somewhere between 100K to 16K ints/sec + */ + if (itr & I40E_ITR_ADAPTIVE_LATENCY) + avg_wire_size /= 2; + + /* Resultant value is 256 times larger than it needs to be. This + * gives us room to adjust the value as needed to either increase + * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc. + * + * Use addition as we have already recorded the new latency flag + * for the ITR value. + */ + itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) * + I40E_ITR_ADAPTIVE_MIN_INC; + + if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) { + itr &= I40E_ITR_ADAPTIVE_LATENCY; + itr += I40E_ITR_ADAPTIVE_MAX_USECS; + } + +clear_counts: + /* write back value */ + rc->target_itr = itr; + + /* next update should occur within next jiffy */ + rc->next_update = next_update + 1; + + rc->total_bytes = 0; + rc->total_packets = 0; +} + +/** + * i40evf_setup_tx_descriptors - Allocate the Tx descriptors + * @tx_ring: the tx ring to set up + * + * Return 0 on success, negative on error + **/ +int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int bi_size; + + if (!dev) + return -ENOMEM; + + /* warn if we are about to overwrite the pointer */ + WARN_ON(tx_ring->tx_bi); + bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; + tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); + if (!tx_ring->tx_bi) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) { + dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", + tx_ring->size); + goto err; + } + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + tx_ring->tx_stats.prev_pkt_ctr = -1; + return 0; + +err: + kfree(tx_ring->tx_bi); + tx_ring->tx_bi = NULL; + return -ENOMEM; +} + +/** + * i40evf_clean_rx_ring - Free Rx buffers + * @rx_ring: ring to be cleaned + **/ +void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) +{ + unsigned long bi_size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!rx_ring->rx_bi) + return; + + if (rx_ring->skb) { + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + } + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; + + if (!rx_bi->page) + continue; + + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_bi->dma, + rx_bi->page_offset, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, + i40e_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + I40E_RX_DMA_ATTR); + + __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias); + + rx_bi->page = NULL; + rx_bi->page_offset = 0; + } + + bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; + memset(rx_ring->rx_bi, 0, bi_size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +/** + * i40evf_free_rx_resources - Free Rx resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +void i40evf_free_rx_resources(struct i40e_ring *rx_ring) +{ + i40evf_clean_rx_ring(rx_ring); + kfree(rx_ring->rx_bi); + rx_ring->rx_bi = NULL; + + if (rx_ring->desc) { + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + rx_ring->desc = NULL; + } +} + +/** + * i40evf_setup_rx_descriptors - Allocate Rx descriptors + * @rx_ring: Rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + int bi_size; + + /* warn if we are about to overwrite the pointer */ + WARN_ON(rx_ring->rx_bi); + bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; + rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); + if (!rx_ring->rx_bi) + goto err; + + u64_stats_init(&rx_ring->syncp); + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + + if (!rx_ring->desc) { + dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", + rx_ring->size); + goto err; + } + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + return 0; +err: + kfree(rx_ring->rx_bi); + rx_ring->rx_bi = NULL; + return -ENOMEM; +} + +/** + * i40e_release_rx_desc - Store the new tail and head values + * @rx_ring: ring to bump + * @val: new head index + **/ +static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) +{ + rx_ring->next_to_use = val; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = val; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(val, rx_ring->tail); +} + +/** + * i40e_rx_offset - Return expected offset into page to access data + * @rx_ring: Ring we are requesting offset of + * + * Returns the offset value for ring into the data buffer. + */ +static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring) +{ + return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0; +} + +/** + * i40e_alloc_mapped_page - recycle or make a new page + * @rx_ring: ring to use + * @bi: rx_buffer struct to modify + * + * Returns true if the page was successfully allocated or + * reused. + **/ +static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) { + rx_ring->rx_stats.page_reuse_count++; + return true; + } + + /* alloc new page for storage */ + page = dev_alloc_pages(i40e_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_page_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page_attrs(rx_ring->dev, page, 0, + i40e_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + I40E_RX_DMA_ATTR); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_pages(page, i40e_rx_pg_order(rx_ring)); + rx_ring->rx_stats.alloc_page_failed++; + return false; + } + + bi->dma = dma; + bi->page = page; + bi->page_offset = i40e_rx_offset(rx_ring); + + /* initialize pagecnt_bias to 1 representing we fully own page */ + bi->pagecnt_bias = 1; + + return true; +} + +/** + * i40e_receive_skb - Send a completed packet up the stack + * @rx_ring: rx ring in play + * @skb: packet to send up + * @vlan_tag: vlan tag for packet + **/ +static void i40e_receive_skb(struct i40e_ring *rx_ring, + struct sk_buff *skb, u16 vlan_tag) +{ + struct i40e_q_vector *q_vector = rx_ring->q_vector; + + if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && + (vlan_tag & VLAN_VID_MASK)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); + + napi_gro_receive(&q_vector->napi, skb); +} + +/** + * i40evf_alloc_rx_buffers - Replace used receive buffers + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + * + * Returns false if all allocations were successful, true if any fail + **/ +bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) +{ + u16 ntu = rx_ring->next_to_use; + union i40e_rx_desc *rx_desc; + struct i40e_rx_buffer *bi; + + /* do nothing if no valid netdev defined */ + if (!rx_ring->netdev || !cleaned_count) + return false; + + rx_desc = I40E_RX_DESC(rx_ring, ntu); + bi = &rx_ring->rx_bi[ntu]; + + do { + if (!i40e_alloc_mapped_page(rx_ring, bi)) + goto no_buffers; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); + + rx_desc++; + bi++; + ntu++; + if (unlikely(ntu == rx_ring->count)) { + rx_desc = I40E_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_bi; + ntu = 0; + } + + /* clear the status bits for the next_to_use descriptor */ + rx_desc->wb.qword1.status_error_len = 0; + + cleaned_count--; + } while (cleaned_count); + + if (rx_ring->next_to_use != ntu) + i40e_release_rx_desc(rx_ring, ntu); + + return false; + +no_buffers: + if (rx_ring->next_to_use != ntu) + i40e_release_rx_desc(rx_ring, ntu); + + /* make sure to come back via polling to try again after + * allocation failure + */ + return true; +} + +/** + * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum + * @vsi: the VSI we care about + * @skb: skb currently being received and modified + * @rx_desc: the receive descriptor + **/ +static inline void i40e_rx_checksum(struct i40e_vsi *vsi, + struct sk_buff *skb, + union i40e_rx_desc *rx_desc) +{ + struct i40e_rx_ptype_decoded decoded; + u32 rx_error, rx_status; + bool ipv4, ipv6; + u8 ptype; + u64 qword; + + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; + rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> + I40E_RXD_QW1_ERROR_SHIFT; + rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> + I40E_RXD_QW1_STATUS_SHIFT; + decoded = decode_rx_desc_ptype(ptype); + + skb->ip_summed = CHECKSUM_NONE; + + skb_checksum_none_assert(skb); + + /* Rx csum enabled and ip headers found? */ + if (!(vsi->netdev->features & NETIF_F_RXCSUM)) + return; + + /* did the hardware decode the packet and checksum? */ + if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT))) + return; + + /* both known and outer_ip must be set for the below code to work */ + if (!(decoded.known && decoded.outer_ip)) + return; + + ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && + (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4); + ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && + (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6); + + if (ipv4 && + (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) | + BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT)))) + goto checksum_fail; + + /* likely incorrect csum if alternate IP extension headers found */ + if (ipv6 && + rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) + /* don't increment checksum err here, non-fatal err */ + return; + + /* there was some L4 error, count error and punt packet to the stack */ + if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT)) + goto checksum_fail; + + /* handle packets that were not able to be checksummed due + * to arrival speed, in this case the stack can compute + * the csum. + */ + if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) + return; + + /* Only report checksum unnecessary for TCP, UDP, or SCTP */ + switch (decoded.inner_prot) { + case I40E_RX_PTYPE_INNER_PROT_TCP: + case I40E_RX_PTYPE_INNER_PROT_UDP: + case I40E_RX_PTYPE_INNER_PROT_SCTP: + skb->ip_summed = CHECKSUM_UNNECESSARY; + /* fall though */ + default: + break; + } + + return; + +checksum_fail: + vsi->back->hw_csum_rx_error++; +} + +/** + * i40e_ptype_to_htype - get a hash type + * @ptype: the ptype value from the descriptor + * + * Returns a hash type to be used by skb_set_hash + **/ +static inline int i40e_ptype_to_htype(u8 ptype) +{ + struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); + + if (!decoded.known) + return PKT_HASH_TYPE_NONE; + + if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && + decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4) + return PKT_HASH_TYPE_L4; + else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && + decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3) + return PKT_HASH_TYPE_L3; + else + return PKT_HASH_TYPE_L2; +} + +/** + * i40e_rx_hash - set the hash value in the skb + * @ring: descriptor ring + * @rx_desc: specific descriptor + * @skb: skb currently being received and modified + * @rx_ptype: Rx packet type + **/ +static inline void i40e_rx_hash(struct i40e_ring *ring, + union i40e_rx_desc *rx_desc, + struct sk_buff *skb, + u8 rx_ptype) +{ + u32 hash; + const __le64 rss_mask = + cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << + I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); + + if (ring->netdev->features & NETIF_F_RXHASH) + return; + + if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { + hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); + skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype)); + } +} + +/** + * i40evf_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * @rx_ptype: the packet type decoded by hardware + * + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, VLAN, protocol, and + * other fields within the skb. + **/ +static inline +void i40evf_process_skb_fields(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, struct sk_buff *skb, + u8 rx_ptype) +{ + i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); + + i40e_rx_checksum(rx_ring->vsi, skb, rx_desc); + + skb_record_rx_queue(skb, rx_ring->queue_index); + + /* modifies the skb - consumes the enet header */ + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +} + +/** + * i40e_cleanup_headers - Correct empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being fixed + * + * Also address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + **/ +static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb) +{ + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +/** + * i40e_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + **/ +static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *old_buff) +{ + struct i40e_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_bi[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; +} + +/** + * i40e_page_is_reusable - check if any reuse is possible + * @page: page struct to check + * + * A page is not reusable if it was allocated under low memory + * conditions, or it's not in the same NUMA node as this CPU. + */ +static inline bool i40e_page_is_reusable(struct page *page) +{ + return (page_to_nid(page) == numa_mem_id()) && + !page_is_pfmemalloc(page); +} + +/** + * i40e_can_reuse_rx_page - Determine if this page can be reused by + * the adapter for another receive + * + * @rx_buffer: buffer containing the page + * + * If page is reusable, rx_buffer->page_offset is adjusted to point to + * an unused region in the page. + * + * For small pages, @truesize will be a constant value, half the size + * of the memory at page. We'll attempt to alternate between high and + * low halves of the page, with one half ready for use by the hardware + * and the other half being consumed by the stack. We use the page + * ref count to determine whether the stack has finished consuming the + * portion of this page that was passed up with a previous packet. If + * the page ref count is >1, we'll assume the "other" half page is + * still busy, and this page cannot be reused. + * + * For larger pages, @truesize will be the actual space used by the + * received packet (adjusted upward to an even multiple of the cache + * line size). This will advance through the page by the amount + * actually consumed by the received packets while there is still + * space for a buffer. Each region of larger pages will be used at + * most once, after which the page will not be reused. + * + * In either case, if the page is reusable its refcount is increased. + **/ +static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer) +{ + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; + + /* Is any reuse possible? */ + if (unlikely(!i40e_page_is_reusable(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely((page_count(page) - pagecnt_bias) > 1)) + return false; +#else +#define I40E_LAST_OFFSET \ + (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048) + if (rx_buffer->page_offset > I40E_LAST_OFFSET) + return false; +#endif + + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. + */ + if (unlikely(!pagecnt_bias)) { + page_ref_add(page, USHRT_MAX); + rx_buffer->pagecnt_bias = USHRT_MAX; + } + + return true; +} + +/** + * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @skb: sk_buff to place the data into + * @size: packet length from rx_desc + * + * This function will add the data contained in rx_buffer->page to the skb. + * It will just attach the page as a frag to the skb. + * + * The function will then update the page offset. + **/ +static void i40e_add_rx_frag(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *rx_buffer, + struct sk_buff *skb, + unsigned int size) +{ +#if (PAGE_SIZE < 8192) + unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring)); +#endif + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); + + /* page is being used so we must update the page offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +} + +/** + * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use + * @rx_ring: rx descriptor ring to transact packets on + * @size: size of buffer to add to skb + * + * This function will pull an Rx buffer from the ring and synchronize it + * for use by the CPU. + */ +static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring, + const unsigned int size) +{ + struct i40e_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; + prefetchw(rx_buffer->page); + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); + + /* We have pulled a buffer for use, so decrement pagecnt_bias */ + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} + +/** + * i40e_construct_skb - Allocate skb and populate it + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: rx buffer to pull data from + * @size: size of buffer to add to skb + * + * This function allocates an skb. It then populates it with the page + * data from the current receive descriptor, taking care to set up the + * skb correctly. + */ +static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *rx_buffer, + unsigned int size) +{ + void *va; +#if (PAGE_SIZE < 8192) + unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(size); +#endif + unsigned int headlen; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + va = page_address(rx_buffer->page) + rx_buffer->page_offset; + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = __napi_alloc_skb(&rx_ring->q_vector->napi, + I40E_RX_HDR_SIZE, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL; + + /* Determine available headroom for copy */ + headlen = size; + if (headlen > I40E_RX_HDR_SIZE) + headlen = eth_get_headlen(va, I40E_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); + + /* update all of the pointers */ + size -= headlen; + if (size) { + skb_add_rx_frag(skb, 0, rx_buffer->page, + rx_buffer->page_offset + headlen, + size, truesize); + + /* buffer is used by skb, update page_offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + } else { + /* buffer is unused, reset bias back to rx_buffer */ + rx_buffer->pagecnt_bias++; + } + + return skb; +} + +/** + * i40e_build_skb - Build skb around an existing buffer + * @rx_ring: Rx descriptor ring to transact packets on + * @rx_buffer: Rx buffer to pull data from + * @size: size of buffer to add to skb + * + * This function builds an skb around an existing Rx buffer, taking care + * to set up the skb correctly and avoid any memcpy overhead. + */ +static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *rx_buffer, + unsigned int size) +{ + void *va; +#if (PAGE_SIZE < 8192) + unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(I40E_SKB_PAD + size); +#endif + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + va = page_address(rx_buffer->page) + rx_buffer->page_offset; + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + /* build an skb around the page buffer */ + skb = build_skb(va - I40E_SKB_PAD, truesize); + if (unlikely(!skb)) + return NULL; + + /* update pointers within the skb to store the data */ + skb_reserve(skb, I40E_SKB_PAD); + __skb_put(skb, size); + + /* buffer is used by skb, update page_offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + + return skb; +} + +/** + * i40e_put_rx_buffer - Clean up used buffer and either recycle or free + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: rx buffer to pull data from + * + * This function will clean up the contents of the rx_buffer. It will + * either recycle the buffer or unmap it and free the associated resources. + */ +static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *rx_buffer) +{ + if (i40e_can_reuse_rx_page(rx_buffer)) { + /* hand second half of page back to the ring */ + i40e_reuse_rx_page(rx_ring, rx_buffer); + rx_ring->rx_stats.page_reuse_count++; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + i40e_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, I40E_RX_DMA_ATTR); + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + } + + /* clear contents of buffer_info */ + rx_buffer->page = NULL; +} + +/** + * i40e_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: Current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool i40e_is_non_eop(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(I40E_RX_DESC(rx_ring, ntc)); + + /* if we are the last buffer then there is nothing else to do */ +#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT) + if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF))) + return false; + + rx_ring->rx_stats.non_eop_descs++; + + return true; +} + +/** + * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf + * @rx_ring: rx descriptor ring to transact packets on + * @budget: Total limit on number of packets to process + * + * This function provides a "bounce buffer" approach to Rx interrupt + * processing. The advantage to this is that on systems that have + * expensive overhead for IOMMU access this provides a means of avoiding + * it by maintaining the mapping of the page to the system. + * + * Returns amount of work completed + **/ +static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + struct sk_buff *skb = rx_ring->skb; + u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); + bool failure = false; + + while (likely(total_rx_packets < (unsigned int)budget)) { + struct i40e_rx_buffer *rx_buffer; + union i40e_rx_desc *rx_desc; + unsigned int size; + u16 vlan_tag; + u8 rx_ptype; + u64 qword; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= I40E_RX_BUFFER_WRITE) { + failure = failure || + i40evf_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean); + + /* status_error_len will always be zero for unused descriptors + * because it's cleared in cleanup, and overlaps with hdr_addr + * which is always zero because packet split isn't used, if the + * hardware wrote DD then the length will be non-zero + */ + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we have + * verified the descriptor has been written back. + */ + dma_rmb(); + + size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + if (!size) + break; + + i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb); + rx_buffer = i40e_get_rx_buffer(rx_ring, size); + + /* retrieve a buffer from the ring */ + if (skb) + i40e_add_rx_frag(rx_ring, rx_buffer, skb, size); + else if (ring_uses_build_skb(rx_ring)) + skb = i40e_build_skb(rx_ring, rx_buffer, size); + else + skb = i40e_construct_skb(rx_ring, rx_buffer, size); + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_buff_failed++; + rx_buffer->pagecnt_bias++; + break; + } + + i40e_put_rx_buffer(rx_ring, rx_buffer); + cleaned_count++; + + if (i40e_is_non_eop(rx_ring, rx_desc, skb)) + continue; + + /* ERR_MASK will only have valid bits if EOP set, and + * what we are doing here is actually checking + * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in + * the error field + */ + if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) { + dev_kfree_skb_any(skb); + skb = NULL; + continue; + } + + if (i40e_cleanup_headers(rx_ring, skb)) { + skb = NULL; + continue; + } + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT; + + /* populate checksum, VLAN, and protocol */ + i40evf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); + + + vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ? + le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; + + i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb); + i40e_receive_skb(rx_ring, skb, vlan_tag); + skb = NULL; + + /* update budget accounting */ + total_rx_packets++; + } + + rx_ring->skb = skb; + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + rx_ring->q_vector->rx.total_packets += total_rx_packets; + rx_ring->q_vector->rx.total_bytes += total_rx_bytes; + + /* guarantee a trip back through this routine if there was a failure */ + return failure ? budget : (int)total_rx_packets; +} + +static inline u32 i40e_buildreg_itr(const int type, u16 itr) +{ + u32 val; + + /* We don't bother with setting the CLEARPBA bit as the data sheet + * points out doing so is "meaningless since it was already + * auto-cleared". The auto-clearing happens when the interrupt is + * asserted. + * + * Hardware errata 28 for also indicates that writing to a + * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear + * an event in the PBA anyway so we need to rely on the automask + * to hold pending events for us until the interrupt is re-enabled + * + * The itr value is reported in microseconds, and the register + * value is recorded in 2 microsecond units. For this reason we + * only need to shift by the interval shift - 1 instead of the + * full value. + */ + itr &= I40E_ITR_MASK; + + val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | + (type << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | + (itr << (I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT - 1)); + + return val; +} + +/* a small macro to shorten up some long lines */ +#define INTREG I40E_VFINT_DYN_CTLN1 + +/* The act of updating the ITR will cause it to immediately trigger. In order + * to prevent this from throwing off adaptive update statistics we defer the + * update so that it can only happen so often. So after either Tx or Rx are + * updated we make the adaptive scheme wait until either the ITR completely + * expires via the next_update expiration or we have been through at least + * 3 interrupts. + */ +#define ITR_COUNTDOWN_START 3 + +/** + * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt + * @vsi: the VSI we care about + * @q_vector: q_vector for which itr is being updated and interrupt enabled + * + **/ +static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, + struct i40e_q_vector *q_vector) +{ + struct i40e_hw *hw = &vsi->back->hw; + u32 intval; + + /* These will do nothing if dynamic updates are not enabled */ + i40e_update_itr(q_vector, &q_vector->tx); + i40e_update_itr(q_vector, &q_vector->rx); + + /* This block of logic allows us to get away with only updating + * one ITR value with each interrupt. The idea is to perform a + * pseudo-lazy update with the following criteria. + * + * 1. Rx is given higher priority than Tx if both are in same state + * 2. If we must reduce an ITR that is given highest priority. + * 3. We then give priority to increasing ITR based on amount. + */ + if (q_vector->rx.target_itr < q_vector->rx.current_itr) { + /* Rx ITR needs to be reduced, this is highest priority */ + intval = i40e_buildreg_itr(I40E_RX_ITR, + q_vector->rx.target_itr); + q_vector->rx.current_itr = q_vector->rx.target_itr; + q_vector->itr_countdown = ITR_COUNTDOWN_START; + } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) || + ((q_vector->rx.target_itr - q_vector->rx.current_itr) < + (q_vector->tx.target_itr - q_vector->tx.current_itr))) { + /* Tx ITR needs to be reduced, this is second priority + * Tx ITR needs to be increased more than Rx, fourth priority + */ + intval = i40e_buildreg_itr(I40E_TX_ITR, + q_vector->tx.target_itr); + q_vector->tx.current_itr = q_vector->tx.target_itr; + q_vector->itr_countdown = ITR_COUNTDOWN_START; + } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) { + /* Rx ITR needs to be increased, third priority */ + intval = i40e_buildreg_itr(I40E_RX_ITR, + q_vector->rx.target_itr); + q_vector->rx.current_itr = q_vector->rx.target_itr; + q_vector->itr_countdown = ITR_COUNTDOWN_START; + } else { + /* No ITR update, lowest priority */ + intval = i40e_buildreg_itr(I40E_ITR_NONE, 0); + if (q_vector->itr_countdown) + q_vector->itr_countdown--; + } + + if (!test_bit(__I40E_VSI_DOWN, vsi->state)) + wr32(hw, INTREG(q_vector->reg_idx), intval); +} + +/** + * i40evf_napi_poll - NAPI polling Rx/Tx cleanup routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function will clean all queues associated with a q_vector. + * + * Returns the amount of work done + **/ +int i40evf_napi_poll(struct napi_struct *napi, int budget) +{ + struct i40e_q_vector *q_vector = + container_of(napi, struct i40e_q_vector, napi); + struct i40e_vsi *vsi = q_vector->vsi; + struct i40e_ring *ring; + bool clean_complete = true; + bool arm_wb = false; + int budget_per_ring; + int work_done = 0; + + if (test_bit(__I40E_VSI_DOWN, vsi->state)) { + napi_complete(napi); + return 0; + } + + /* Since the actual Tx work is minimal, we can give the Tx a larger + * budget and be more aggressive about cleaning up the Tx descriptors. + */ + i40e_for_each_ring(ring, q_vector->tx) { + if (!i40e_clean_tx_irq(vsi, ring, budget)) { + clean_complete = false; + continue; + } + arm_wb |= ring->arm_wb; + ring->arm_wb = false; + } + + /* Handle case where we are called by netpoll with a budget of 0 */ + if (budget <= 0) + goto tx_only; + + /* We attempt to distribute budget to each Rx queue fairly, but don't + * allow the budget to go below 1 because that would exit polling early. + */ + budget_per_ring = max(budget/q_vector->num_ringpairs, 1); + + i40e_for_each_ring(ring, q_vector->rx) { + int cleaned = i40e_clean_rx_irq(ring, budget_per_ring); + + work_done += cleaned; + /* if we clean as many as budgeted, we must not be done */ + if (cleaned >= budget_per_ring) + clean_complete = false; + } + + /* If work not completed, return budget and polling will return */ + if (!clean_complete) { + int cpu_id = smp_processor_id(); + + /* It is possible that the interrupt affinity has changed but, + * if the cpu is pegged at 100%, polling will never exit while + * traffic continues and the interrupt will be stuck on this + * cpu. We check to make sure affinity is correct before we + * continue to poll, otherwise we must stop polling so the + * interrupt can move to the correct cpu. + */ + if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) { + /* Tell napi that we are done polling */ + napi_complete_done(napi, work_done); + + /* Force an interrupt */ + i40evf_force_wb(vsi, q_vector); + + /* Return budget-1 so that polling stops */ + return budget - 1; + } +tx_only: + if (arm_wb) { + q_vector->tx.ring[0].tx_stats.tx_force_wb++; + i40e_enable_wb_on_itr(vsi, q_vector); + } + return budget; + } + + if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR) + q_vector->arm_wb_state = false; + + /* Work is done so exit the polling mode and re-enable the interrupt */ + napi_complete_done(napi, work_done); + + i40e_update_enable_itr(vsi, q_vector); + + return min(work_done, budget - 1); +} + +/** + * i40evf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW + * @skb: send buffer + * @tx_ring: ring to send buffer on + * @flags: the tx flags to be set + * + * Checks the skb and set up correspondingly several generic transmit flags + * related to VLAN tagging for the HW, such as VLAN, DCB, etc. + * + * Returns error code indicate the frame should be dropped upon error and the + * otherwise returns 0 to indicate the flags has been set properly. + **/ +static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb, + struct i40e_ring *tx_ring, + u32 *flags) +{ + __be16 protocol = skb->protocol; + u32 tx_flags = 0; + + if (protocol == htons(ETH_P_8021Q) && + !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { + /* When HW VLAN acceleration is turned off by the user the + * stack sets the protocol to 8021q so that the driver + * can take any steps required to support the SW only + * VLAN handling. In our case the driver doesn't need + * to take any further steps so just set the protocol + * to the encapsulated ethertype. + */ + skb->protocol = vlan_get_protocol(skb); + goto out; + } + + /* if we have a HW VLAN tag being added, default to the HW one */ + if (skb_vlan_tag_present(skb)) { + tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT; + tx_flags |= I40E_TX_FLAGS_HW_VLAN; + /* else if it is a SW VLAN, check the next protocol and store the tag */ + } else if (protocol == htons(ETH_P_8021Q)) { + struct vlan_hdr *vhdr, _vhdr; + + vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); + if (!vhdr) + return -EINVAL; + + protocol = vhdr->h_vlan_encapsulated_proto; + tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT; + tx_flags |= I40E_TX_FLAGS_SW_VLAN; + } + +out: + *flags = tx_flags; + return 0; +} + +/** + * i40e_tso - set up the tso context descriptor + * @first: pointer to first Tx buffer for xmit + * @hdr_len: ptr to the size of the packet header + * @cd_type_cmd_tso_mss: Quad Word 1 + * + * Returns 0 if no TSO can happen, 1 if tso is going, or error + **/ +static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, + u64 *cd_type_cmd_tso_mss) +{ + struct sk_buff *skb = first->skb; + u64 cd_cmd, cd_tso_len, cd_mss; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; + u16 gso_segs, gso_size; + int err; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + ip.v4->tot_len = 0; + ip.v4->check = 0; + } else { + ip.v6->payload_len = 0; + } + + if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | + SKB_GSO_GRE_CSUM | + SKB_GSO_IPXIP4 | + SKB_GSO_IPXIP6 | + SKB_GSO_UDP_TUNNEL | + SKB_GSO_UDP_TUNNEL_CSUM)) { + if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && + (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { + l4.udp->len = 0; + + /* determine offset of outer transport header */ + l4_offset = l4.hdr - skb->data; + + /* remove payload length from outer checksum */ + paylen = skb->len - l4_offset; + csum_replace_by_diff(&l4.udp->check, + (__force __wsum)htonl(paylen)); + } + + /* reset pointers to inner headers */ + ip.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + + /* initialize inner IP header fields */ + if (ip.v4->version == 4) { + ip.v4->tot_len = 0; + ip.v4->check = 0; + } else { + ip.v6->payload_len = 0; + } + } + + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; + + /* remove payload length from inner checksum */ + paylen = skb->len - l4_offset; + csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); + + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; + + /* pull values out of skb_shinfo */ + gso_size = skb_shinfo(skb)->gso_size; + gso_segs = skb_shinfo(skb)->gso_segs; + + /* update GSO size and bytecount with header size */ + first->gso_segs = gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + + /* find the field values */ + cd_cmd = I40E_TX_CTX_DESC_TSO; + cd_tso_len = skb->len - *hdr_len; + cd_mss = gso_size; + *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | + (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | + (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); + return 1; +} + +/** + * i40e_tx_enable_csum - Enable Tx checksum offloads + * @skb: send buffer + * @tx_flags: pointer to Tx flags currently set + * @td_cmd: Tx descriptor command bits to set + * @td_offset: Tx descriptor header offsets to set + * @tx_ring: Tx descriptor ring + * @cd_tunneling: ptr to context desc bits + **/ +static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, + u32 *td_cmd, u32 *td_offset, + struct i40e_ring *tx_ring, + u32 *cd_tunneling) +{ + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + unsigned char *exthdr; + u32 offset, cmd = 0; + __be16 frag_off; + u8 l4_proto = 0; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + /* compute outer L2 header size */ + offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; + + if (skb->encapsulation) { + u32 tunnel = 0; + /* define outer network header type */ + if (*tx_flags & I40E_TX_FLAGS_IPV4) { + tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ? + I40E_TX_CTX_EXT_IP_IPV4 : + I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; + + l4_proto = ip.v4->protocol; + } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { + tunnel |= I40E_TX_CTX_EXT_IP_IPV6; + + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto, &frag_off); + } + + /* define outer transport */ + switch (l4_proto) { + case IPPROTO_UDP: + tunnel |= I40E_TXD_CTX_UDP_TUNNELING; + *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; + break; + case IPPROTO_GRE: + tunnel |= I40E_TXD_CTX_GRE_TUNNELING; + *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; + break; + case IPPROTO_IPIP: + case IPPROTO_IPV6: + *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; + l4.hdr = skb_inner_network_header(skb); + break; + default: + if (*tx_flags & I40E_TX_FLAGS_TSO) + return -1; + + skb_checksum_help(skb); + return 0; + } + + /* compute outer L3 header size */ + tunnel |= ((l4.hdr - ip.hdr) / 4) << + I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT; + + /* switch IP header pointer from outer to inner header */ + ip.hdr = skb_inner_network_header(skb); + + /* compute tunnel header size */ + tunnel |= ((ip.hdr - l4.hdr) / 2) << + I40E_TXD_CTX_QW0_NATLEN_SHIFT; + + /* indicate if we need to offload outer UDP header */ + if ((*tx_flags & I40E_TX_FLAGS_TSO) && + !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && + (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) + tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK; + + /* record tunnel offload values */ + *cd_tunneling |= tunnel; + + /* switch L4 header pointer from outer to inner */ + l4.hdr = skb_inner_transport_header(skb); + l4_proto = 0; + + /* reset type as we transition from outer to inner headers */ + *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6); + if (ip.v4->version == 4) + *tx_flags |= I40E_TX_FLAGS_IPV4; + if (ip.v6->version == 6) + *tx_flags |= I40E_TX_FLAGS_IPV6; + } + + /* Enable IP checksum offloads */ + if (*tx_flags & I40E_TX_FLAGS_IPV4) { + l4_proto = ip.v4->protocol; + /* the stack computes the IP header already, the only time we + * need the hardware to recompute it is in the case of TSO. + */ + cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ? + I40E_TX_DESC_CMD_IIPT_IPV4_CSUM : + I40E_TX_DESC_CMD_IIPT_IPV4; + } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { + cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; + + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto, &frag_off); + } + + /* compute inner L3 header size */ + offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; + + /* Enable L4 checksum offloads */ + switch (l4_proto) { + case IPPROTO_TCP: + /* enable checksum offloads */ + cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; + offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + break; + case IPPROTO_SCTP: + /* enable SCTP checksum offload */ + cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; + offset |= (sizeof(struct sctphdr) >> 2) << + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + break; + case IPPROTO_UDP: + /* enable UDP checksum offload */ + cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; + offset |= (sizeof(struct udphdr) >> 2) << + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + break; + default: + if (*tx_flags & I40E_TX_FLAGS_TSO) + return -1; + skb_checksum_help(skb); + return 0; + } + + *td_cmd |= cmd; + *td_offset |= offset; + + return 1; +} + +/** + * i40e_create_tx_ctx Build the Tx context descriptor + * @tx_ring: ring to create the descriptor on + * @cd_type_cmd_tso_mss: Quad Word 1 + * @cd_tunneling: Quad Word 0 - bits 0-31 + * @cd_l2tag2: Quad Word 0 - bits 32-63 + **/ +static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, + const u64 cd_type_cmd_tso_mss, + const u32 cd_tunneling, const u32 cd_l2tag2) +{ + struct i40e_tx_context_desc *context_desc; + int i = tx_ring->next_to_use; + + if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) && + !cd_tunneling && !cd_l2tag2) + return; + + /* grab the next descriptor */ + context_desc = I40E_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* cpu_to_le32 and assign to struct fields */ + context_desc->tunneling_params = cpu_to_le32(cd_tunneling); + context_desc->l2tag2 = cpu_to_le16(cd_l2tag2); + context_desc->rsvd = cpu_to_le16(0); + context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); +} + +/** + * __i40evf_chk_linearize - Check if there are more than 8 buffers per packet + * @skb: send buffer + * + * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire + * and so we need to figure out the cases where we need to linearize the skb. + * + * For TSO we need to count the TSO header and segment payload separately. + * As such we need to check cases where we have 7 fragments or more as we + * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for + * the segment payload in the first descriptor, and another 7 for the + * fragments. + **/ +bool __i40evf_chk_linearize(struct sk_buff *skb) +{ + const struct skb_frag_struct *frag, *stale; + int nr_frags, sum; + + /* no need to check if number of frags is less than 7 */ + nr_frags = skb_shinfo(skb)->nr_frags; + if (nr_frags < (I40E_MAX_BUFFER_TXD - 1)) + return false; + + /* We need to walk through the list and validate that each group + * of 6 fragments totals at least gso_size. + */ + nr_frags -= I40E_MAX_BUFFER_TXD - 2; + frag = &skb_shinfo(skb)->frags[0]; + + /* Initialize size to the negative value of gso_size minus 1. We + * use this as the worst case scenerio in which the frag ahead + * of us only provides one byte which is why we are limited to 6 + * descriptors for a single transmit as the header and previous + * fragment are already consuming 2 descriptors. + */ + sum = 1 - skb_shinfo(skb)->gso_size; + + /* Add size of frags 0 through 4 to create our initial sum */ + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); + + /* Walk through fragments adding latest fragment, testing it, and + * then removing stale fragments from the sum. + */ + for (stale = &skb_shinfo(skb)->frags[0];; stale++) { + int stale_size = skb_frag_size(stale); + + sum += skb_frag_size(frag++); + + /* The stale fragment may present us with a smaller + * descriptor than the actual fragment size. To account + * for that we need to remove all the data on the front and + * figure out what the remainder would be in the last + * descriptor associated with the fragment. + */ + if (stale_size > I40E_MAX_DATA_PER_TXD) { + int align_pad = -(stale->page_offset) & + (I40E_MAX_READ_REQ_SIZE - 1); + + sum -= align_pad; + stale_size -= align_pad; + + do { + sum -= I40E_MAX_DATA_PER_TXD_ALIGNED; + stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED; + } while (stale_size > I40E_MAX_DATA_PER_TXD); + } + + /* if sum is negative we failed to make sufficient progress */ + if (sum < 0) + return true; + + if (!nr_frags--) + break; + + sum -= stale_size; + } + + return false; +} + +/** + * __i40evf_maybe_stop_tx - 2nd level check for tx stop conditions + * @tx_ring: the ring to be checked + * @size: the size buffer we want to assure is available + * + * Returns -EBUSY if a stop is needed, else 0 + **/ +int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + /* Memory barrier before checking head and tail */ + smp_mb(); + + /* Check again in a case another CPU has just made room available. */ + if (likely(I40E_DESC_UNUSED(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + return 0; +} + +/** + * i40evf_tx_map - Build the Tx descriptor + * @tx_ring: ring to send buffer on + * @skb: send buffer + * @first: first buffer info buffer to use + * @tx_flags: collected send information + * @hdr_len: size of the packet header + * @td_cmd: the command field in the descriptor + * @td_offset: offset for checksum or crc + **/ +static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, + struct i40e_tx_buffer *first, u32 tx_flags, + const u8 hdr_len, u32 td_cmd, u32 td_offset) +{ + unsigned int data_len = skb->data_len; + unsigned int size = skb_headlen(skb); + struct skb_frag_struct *frag; + struct i40e_tx_buffer *tx_bi; + struct i40e_tx_desc *tx_desc; + u16 i = tx_ring->next_to_use; + u32 td_tag = 0; + dma_addr_t dma; + + if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { + td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; + td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >> + I40E_TX_FLAGS_VLAN_SHIFT; + } + + first->tx_flags = tx_flags; + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_desc = I40E_TX_DESC(tx_ring, i); + tx_bi = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED; + + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_bi, len, size); + dma_unmap_addr_set(tx_bi, dma, dma); + + /* align size to end of page */ + max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1); + tx_desc->buffer_addr = cpu_to_le64(dma); + + while (unlikely(size > I40E_MAX_DATA_PER_TXD)) { + tx_desc->cmd_type_offset_bsz = + build_ctob(td_cmd, td_offset, + max_data, td_tag); + + tx_desc++; + i++; + + if (i == tx_ring->count) { + tx_desc = I40E_TX_DESC(tx_ring, 0); + i = 0; + } + + dma += max_data; + size -= max_data; + + max_data = I40E_MAX_DATA_PER_TXD_ALIGNED; + tx_desc->buffer_addr = cpu_to_le64(dma); + } + + if (likely(!data_len)) + break; + + tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, + size, td_tag); + + tx_desc++; + i++; + + if (i == tx_ring->count) { + tx_desc = I40E_TX_DESC(tx_ring, 0); + i = 0; + } + + size = skb_frag_size(frag); + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, + DMA_TO_DEVICE); + + tx_bi = &tx_ring->tx_bi[i]; + } + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); + + /* write last descriptor with RS and EOP bits */ + td_cmd |= I40E_TXD_CMD; + tx_desc->cmd_type_offset_bsz = + build_ctob(td_cmd, td_offset, size, td_tag); + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. + * + * We also use this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + /* notify HW of packet */ + if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { + writel(i, tx_ring->tail); + + /* we need this if more than one processor can write to our tail + * at a time, it synchronizes IO on IA64/Altix systems + */ + mmiowb(); + } + + return; + +dma_error: + dev_info(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_bi map */ + for (;;) { + tx_bi = &tx_ring->tx_bi[i]; + i40e_unmap_and_free_tx_resource(tx_ring, tx_bi); + if (tx_bi == first) + break; + if (i == 0) + i = tx_ring->count; + i--; + } + + tx_ring->next_to_use = i; +} + +/** + * i40e_xmit_frame_ring - Sends buffer on Tx ring + * @skb: send buffer + * @tx_ring: ring to send buffer on + * + * Returns NETDEV_TX_OK if sent, else an error code + **/ +static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, + struct i40e_ring *tx_ring) +{ + u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT; + u32 cd_tunneling = 0, cd_l2tag2 = 0; + struct i40e_tx_buffer *first; + u32 td_offset = 0; + u32 tx_flags = 0; + __be16 protocol; + u32 td_cmd = 0; + u8 hdr_len = 0; + int tso, count; + + /* prefetch the data, we'll need it later */ + prefetch(skb->data); + + i40e_trace(xmit_frame_ring, skb, tx_ring); + + count = i40e_xmit_descriptor_count(skb); + if (i40e_chk_linearize(skb, count)) { + if (__skb_linearize(skb)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + count = i40e_txd_use_count(skb->len); + tx_ring->tx_stats.tx_linearize++; + } + + /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, + * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, + * + 4 desc gap to avoid the cache line where head is, + * + 1 desc for context descriptor, + * otherwise try next time + */ + if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_bi[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + + /* prepare the xmit flags */ + if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) + goto out_drop; + + /* obtain protocol of skb */ + protocol = vlan_get_protocol(skb); + + /* setup IPv4/IPv6 offloads */ + if (protocol == htons(ETH_P_IP)) + tx_flags |= I40E_TX_FLAGS_IPV4; + else if (protocol == htons(ETH_P_IPV6)) + tx_flags |= I40E_TX_FLAGS_IPV6; + + tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss); + + if (tso < 0) + goto out_drop; + else if (tso) + tx_flags |= I40E_TX_FLAGS_TSO; + + /* Always offload the checksum, since it's in the data descriptor */ + tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, + tx_ring, &cd_tunneling); + if (tso < 0) + goto out_drop; + + skb_tx_timestamp(skb); + + /* always enable CRC insertion offload */ + td_cmd |= I40E_TX_DESC_CMD_ICRC; + + i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, + cd_tunneling, cd_l2tag2); + + i40evf_tx_map(tx_ring, skb, first, tx_flags, hdr_len, + td_cmd, td_offset); + + return NETDEV_TX_OK; + +out_drop: + i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring); + dev_kfree_skb_any(first->skb); + first->skb = NULL; + return NETDEV_TX_OK; +} + +/** + * i40evf_xmit_frame - Selects the correct VSI and Tx queue to send buffer + * @skb: send buffer + * @netdev: network interface device structure + * + * Returns NETDEV_TX_OK if sent, else an error code + **/ +netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + struct i40e_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping]; + + /* hardware can't handle really short frames, hardware padding works + * beyond this point + */ + if (unlikely(skb->len < I40E_MIN_TX_LEN)) { + if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len)) + return NETDEV_TX_OK; + skb->len = I40E_MIN_TX_LEN; + skb_set_tail_pointer(skb, I40E_MIN_TX_LEN); + } + + return i40e_xmit_frame_ring(skb, tx_ring); +} diff --git a/drivers/net/ethernet/intel/iavf/i40e_txrx.h b/drivers/net/ethernet/intel/iavf/i40e_txrx.h new file mode 100644 index 000000000000..3b5a63b3236e --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/i40e_txrx.h @@ -0,0 +1,524 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _I40E_TXRX_H_ +#define _I40E_TXRX_H_ + +/* Interrupt Throttling and Rate Limiting Goodies */ +#define I40E_DEFAULT_IRQ_WORK 256 + +/* The datasheet for the X710 and XL710 indicate that the maximum value for + * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec + * resolution. 8160 is 0x1FE0 when written out in hex. So instead of storing + * the register value which is divided by 2 lets use the actual values and + * avoid an excessive amount of translation. + */ +#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ +#define I40E_ITR_MASK 0x1FFE /* mask for ITR register value */ +#define I40E_MIN_ITR 2 /* reg uses 2 usec resolution */ +#define I40E_ITR_100K 10 /* all values below must be even */ +#define I40E_ITR_50K 20 +#define I40E_ITR_20K 50 +#define I40E_ITR_18K 60 +#define I40E_ITR_8K 122 +#define I40E_MAX_ITR 8160 /* maximum value as per datasheet */ +#define ITR_TO_REG(setting) ((setting) & ~I40E_ITR_DYNAMIC) +#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~I40E_ITR_MASK) +#define ITR_IS_DYNAMIC(setting) (!!((setting) & I40E_ITR_DYNAMIC)) + +#define I40E_ITR_RX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC) +#define I40E_ITR_TX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC) + +/* 0x40 is the enable bit for interrupt rate limiting, and must be set if + * the value of the rate limit is non-zero + */ +#define INTRL_ENA BIT(6) +#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */ +#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2) +#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0) +#define I40E_INTRL_8K 125 /* 8000 ints/sec */ +#define I40E_INTRL_62K 16 /* 62500 ints/sec */ +#define I40E_INTRL_83K 12 /* 83333 ints/sec */ + +#define I40E_QUEUE_END_OF_LIST 0x7FF + +/* this enum matches hardware bits and is meant to be used by DYN_CTLN + * registers and QINT registers or more generally anywhere in the manual + * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any + * register but instead is a special value meaning "don't update" ITR0/1/2. + */ +enum i40e_dyn_idx_t { + I40E_IDX_ITR0 = 0, + I40E_IDX_ITR1 = 1, + I40E_IDX_ITR2 = 2, + I40E_ITR_NONE = 3 /* ITR_NONE must not be used as an index */ +}; + +/* these are indexes into ITRN registers */ +#define I40E_RX_ITR I40E_IDX_ITR0 +#define I40E_TX_ITR I40E_IDX_ITR1 +#define I40E_PE_ITR I40E_IDX_ITR2 + +/* Supported RSS offloads */ +#define I40E_DEFAULT_RSS_HENA ( \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \ + BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD)) + +#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) + +/* Supported Rx Buffer Sizes (a multiple of 128) */ +#define I40E_RXBUFFER_256 256 +#define I40E_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */ +#define I40E_RXBUFFER_2048 2048 +#define I40E_RXBUFFER_3072 3072 /* Used for large frames w/ padding */ +#define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */ + +/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we + * reserve 2 more, and skb_shared_info adds an additional 384 bytes more, + * this adds up to 512 bytes of extra data meaning the smallest allocation + * we could have is 1K. + * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab) + * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab) + */ +#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256 +#define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) +#define i40e_rx_desc i40e_32byte_rx_desc + +#define I40E_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + +/* Attempt to maximize the headroom available for incoming frames. We + * use a 2K buffer for receives and need 1536/1534 to store the data for + * the frame. This leaves us with 512 bytes of room. From that we need + * to deduct the space needed for the shared info and the padding needed + * to IP align the frame. + * + * Note: For cache line sizes 256 or larger this value is going to end + * up negative. In these cases we should fall back to the legacy + * receive path. + */ +#if (PAGE_SIZE < 8192) +#define I40E_2K_TOO_SMALL_WITH_PADDING \ +((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048)) + +static inline int i40e_compute_pad(int rx_buf_len) +{ + int page_size, pad_size; + + page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); + pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len; + + return pad_size; +} + +static inline int i40e_skb_pad(void) +{ + int rx_buf_len; + + /* If a 2K buffer cannot handle a standard Ethernet frame then + * optimize padding for a 3K buffer instead of a 1.5K buffer. + * + * For a 3K buffer we need to add enough padding to allow for + * tailroom due to NET_IP_ALIGN possibly shifting us out of + * cache-line alignment. + */ + if (I40E_2K_TOO_SMALL_WITH_PADDING) + rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); + else + rx_buf_len = I40E_RXBUFFER_1536; + + /* if needed make room for NET_IP_ALIGN */ + rx_buf_len -= NET_IP_ALIGN; + + return i40e_compute_pad(rx_buf_len); +} + +#define I40E_SKB_PAD i40e_skb_pad() +#else +#define I40E_2K_TOO_SMALL_WITH_PADDING false +#define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) +#endif + +/** + * i40e_test_staterr - tests bits in Rx descriptor status and error fields + * @rx_desc: pointer to receive descriptor (in le64 format) + * @stat_err_bits: value to mask + * + * This function does some fast chicanery in order to return the + * value of the mask which is really only used for boolean tests. + * The status_error_len doesn't need to be shifted because it begins + * at offset zero. + */ +static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc, + const u64 stat_err_bits) +{ + return !!(rx_desc->wb.qword1.status_error_len & + cpu_to_le64(stat_err_bits)); +} + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define I40E_RX_BUFFER_WRITE 32 /* Must be power of 2 */ +#define I40E_RX_INCREMENT(r, i) \ + do { \ + (i)++; \ + if ((i) == (r)->count) \ + i = 0; \ + r->next_to_clean = i; \ + } while (0) + +#define I40E_RX_NEXT_DESC(r, i, n) \ + do { \ + (i)++; \ + if ((i) == (r)->count) \ + i = 0; \ + (n) = I40E_RX_DESC((r), (i)); \ + } while (0) + +#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \ + do { \ + I40E_RX_NEXT_DESC((r), (i), (n)); \ + prefetch((n)); \ + } while (0) + +#define I40E_MAX_BUFFER_TXD 8 +#define I40E_MIN_TX_LEN 17 + +/* The size limit for a transmit buffer in a descriptor is (16K - 1). + * In order to align with the read requests we will align the value to + * the nearest 4K which represents our maximum read request size. + */ +#define I40E_MAX_READ_REQ_SIZE 4096 +#define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1) +#define I40E_MAX_DATA_PER_TXD_ALIGNED \ + (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1)) + +/** + * i40e_txd_use_count - estimate the number of descriptors needed for Tx + * @size: transmit request size in bytes + * + * Due to hardware alignment restrictions (4K alignment), we need to + * assume that we can have no more than 12K of data per descriptor, even + * though each descriptor can take up to 16K - 1 bytes of aligned memory. + * Thus, we need to divide by 12K. But division is slow! Instead, + * we decompose the operation into shifts and one relatively cheap + * multiply operation. + * + * To divide by 12K, we first divide by 4K, then divide by 3: + * To divide by 4K, shift right by 12 bits + * To divide by 3, multiply by 85, then divide by 256 + * (Divide by 256 is done by shifting right by 8 bits) + * Finally, we add one to round up. Because 256 isn't an exact multiple of + * 3, we'll underestimate near each multiple of 12K. This is actually more + * accurate as we have 4K - 1 of wiggle room that we can fit into the last + * segment. For our purposes this is accurate out to 1M which is orders of + * magnitude greater than our largest possible GSO size. + * + * This would then be implemented as: + * return (((size >> 12) * 85) >> 8) + 1; + * + * Since multiplication and division are commutative, we can reorder + * operations into: + * return ((size * 85) >> 20) + 1; + */ +static inline unsigned int i40e_txd_use_count(unsigned int size) +{ + return ((size * 85) >> 20) + 1; +} + +/* Tx Descriptors needed, worst case */ +#define DESC_NEEDED (MAX_SKB_FRAGS + 6) +#define I40E_MIN_DESC_PENDING 4 + +#define I40E_TX_FLAGS_HW_VLAN BIT(1) +#define I40E_TX_FLAGS_SW_VLAN BIT(2) +#define I40E_TX_FLAGS_TSO BIT(3) +#define I40E_TX_FLAGS_IPV4 BIT(4) +#define I40E_TX_FLAGS_IPV6 BIT(5) +#define I40E_TX_FLAGS_FCCRC BIT(6) +#define I40E_TX_FLAGS_FSO BIT(7) +#define I40E_TX_FLAGS_FD_SB BIT(9) +#define I40E_TX_FLAGS_VXLAN_TUNNEL BIT(10) +#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 +#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 +#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 +#define I40E_TX_FLAGS_VLAN_SHIFT 16 + +struct i40e_tx_buffer { + struct i40e_tx_desc *next_to_watch; + union { + struct sk_buff *skb; + void *raw_buf; + }; + unsigned int bytecount; + unsigned short gso_segs; + + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; +}; + +struct i40e_rx_buffer { + dma_addr_t dma; + struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 page_offset; +#else + __u16 page_offset; +#endif + __u16 pagecnt_bias; +}; + +struct i40e_queue_stats { + u64 packets; + u64 bytes; +}; + +struct i40e_tx_queue_stats { + u64 restart_queue; + u64 tx_busy; + u64 tx_done_old; + u64 tx_linearize; + u64 tx_force_wb; + int prev_pkt_ctr; + u64 tx_lost_interrupt; +}; + +struct i40e_rx_queue_stats { + u64 non_eop_descs; + u64 alloc_page_failed; + u64 alloc_buff_failed; + u64 page_reuse_count; + u64 realloc_count; +}; + +enum i40e_ring_state_t { + __I40E_TX_FDIR_INIT_DONE, + __I40E_TX_XPS_INIT_DONE, + __I40E_RING_STATE_NBITS /* must be last */ +}; + +/* some useful defines for virtchannel interface, which + * is the only remaining user of header split + */ +#define I40E_RX_DTYPE_NO_SPLIT 0 +#define I40E_RX_DTYPE_HEADER_SPLIT 1 +#define I40E_RX_DTYPE_SPLIT_ALWAYS 2 +#define I40E_RX_SPLIT_L2 0x1 +#define I40E_RX_SPLIT_IP 0x2 +#define I40E_RX_SPLIT_TCP_UDP 0x4 +#define I40E_RX_SPLIT_SCTP 0x8 + +/* struct that defines a descriptor ring, associated with a VSI */ +struct i40e_ring { + struct i40e_ring *next; /* pointer to next ring in q_vector */ + void *desc; /* Descriptor ring memory */ + struct device *dev; /* Used for DMA mapping */ + struct net_device *netdev; /* netdev ring maps to */ + union { + struct i40e_tx_buffer *tx_bi; + struct i40e_rx_buffer *rx_bi; + }; + DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS); + u16 queue_index; /* Queue number of ring */ + u8 dcb_tc; /* Traffic class of ring */ + u8 __iomem *tail; + + /* high bit set means dynamic, use accessors routines to read/write. + * hardware only supports 2us resolution for the ITR registers. + * these values always store the USER setting, and must be converted + * before programming to a register. + */ + u16 itr_setting; + + u16 count; /* Number of descriptors */ + u16 reg_idx; /* HW register index of the ring */ + u16 rx_buf_len; + + /* used in interrupt processing */ + u16 next_to_use; + u16 next_to_clean; + + u8 atr_sample_rate; + u8 atr_count; + + bool ring_active; /* is ring online or not */ + bool arm_wb; /* do something to arm write back */ + u8 packet_stride; + + u16 flags; +#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) +#define I40E_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1) + + /* stats structs */ + struct i40e_queue_stats stats; + struct u64_stats_sync syncp; + union { + struct i40e_tx_queue_stats tx_stats; + struct i40e_rx_queue_stats rx_stats; + }; + + unsigned int size; /* length of descriptor ring in bytes */ + dma_addr_t dma; /* physical address of ring */ + + struct i40e_vsi *vsi; /* Backreference to associated VSI */ + struct i40e_q_vector *q_vector; /* Backreference to associated vector */ + + struct rcu_head rcu; /* to avoid race on free */ + u16 next_to_alloc; + struct sk_buff *skb; /* When i40evf_clean_rx_ring_irq() must + * return before it sees the EOP for + * the current packet, we save that skb + * here and resume receiving this + * packet the next time + * i40evf_clean_rx_ring_irq() is called + * for this ring. + */ +} ____cacheline_internodealigned_in_smp; + +static inline bool ring_uses_build_skb(struct i40e_ring *ring) +{ + return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED); +} + +static inline void set_ring_build_skb_enabled(struct i40e_ring *ring) +{ + ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED; +} + +static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring) +{ + ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED; +} + +#define I40E_ITR_ADAPTIVE_MIN_INC 0x0002 +#define I40E_ITR_ADAPTIVE_MIN_USECS 0x0002 +#define I40E_ITR_ADAPTIVE_MAX_USECS 0x007e +#define I40E_ITR_ADAPTIVE_LATENCY 0x8000 +#define I40E_ITR_ADAPTIVE_BULK 0x0000 +#define ITR_IS_BULK(x) (!((x) & I40E_ITR_ADAPTIVE_LATENCY)) + +struct i40e_ring_container { + struct i40e_ring *ring; /* pointer to linked list of ring(s) */ + unsigned long next_update; /* jiffies value of next update */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 count; + u16 target_itr; /* target ITR setting for ring(s) */ + u16 current_itr; /* current ITR setting for ring(s) */ +}; + +/* iterator for handling rings in ring container */ +#define i40e_for_each_ring(pos, head) \ + for (pos = (head).ring; pos != NULL; pos = pos->next) + +static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring->rx_buf_len > (PAGE_SIZE / 2)) + return 1; +#endif + return 0; +} + +#define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring)) + +bool i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); +netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev); +void i40evf_clean_tx_ring(struct i40e_ring *tx_ring); +void i40evf_clean_rx_ring(struct i40e_ring *rx_ring); +int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring); +int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring); +void i40evf_free_tx_resources(struct i40e_ring *tx_ring); +void i40evf_free_rx_resources(struct i40e_ring *rx_ring); +int i40evf_napi_poll(struct napi_struct *napi, int budget); +void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); +u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw); +void i40evf_detect_recover_hung(struct i40e_vsi *vsi); +int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size); +bool __i40evf_chk_linearize(struct sk_buff *skb); + +/** + * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed + * @skb: send buffer + * @tx_ring: ring to send buffer on + * + * Returns number of data descriptors needed for this skb. Returns 0 to indicate + * there is not enough descriptors available in this ring since we need at least + * one descriptor. + **/ +static inline int i40e_xmit_descriptor_count(struct sk_buff *skb) +{ + const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + unsigned int nr_frags = skb_shinfo(skb)->nr_frags; + int count = 0, size = skb_headlen(skb); + + for (;;) { + count += i40e_txd_use_count(size); + + if (!nr_frags--) + break; + + size = skb_frag_size(frag++); + } + + return count; +} + +/** + * i40e_maybe_stop_tx - 1st level check for Tx stop conditions + * @tx_ring: the ring to be checked + * @size: the size buffer we want to assure is available + * + * Returns 0 if stop is not needed + **/ +static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +{ + if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) + return 0; + return __i40evf_maybe_stop_tx(tx_ring, size); +} + +/** + * i40e_chk_linearize - Check if there are more than 8 fragments per packet + * @skb: send buffer + * @count: number of buffers used + * + * Note: Our HW can't scatter-gather more than 8 fragments to build + * a packet on the wire and so we need to figure out the cases where we + * need to linearize the skb. + **/ +static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) +{ + /* Both TSO and single send will work if count is less than 8 */ + if (likely(count < I40E_MAX_BUFFER_TXD)) + return false; + + if (skb_is_gso(skb)) + return __i40evf_chk_linearize(skb); + + /* we can support up to 8 data buffers for a single send */ + return count != I40E_MAX_BUFFER_TXD; +} +/** + * @ring: Tx ring to find the netdev equivalent of + **/ +static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring) +{ + return netdev_get_tx_queue(ring->netdev, ring->queue_index); +} +#endif /* _I40E_TXRX_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/i40e_type.h b/drivers/net/ethernet/intel/iavf/i40e_type.h new file mode 100644 index 000000000000..094387db3c11 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/i40e_type.h @@ -0,0 +1,1496 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _I40E_TYPE_H_ +#define _I40E_TYPE_H_ + +#include "i40e_status.h" +#include "i40e_osdep.h" +#include "i40e_register.h" +#include "i40e_adminq.h" +#include "i40e_hmc.h" +#include "i40e_lan_hmc.h" +#include "i40e_devids.h" + +/* I40E_MASK is a macro used on 32 bit registers */ +#define I40E_MASK(mask, shift) ((u32)(mask) << (shift)) + +#define I40E_MAX_VSI_QP 16 +#define I40E_MAX_VF_VSI 3 +#define I40E_MAX_CHAINED_RX_BUFFERS 5 +#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16 + +/* Max default timeout in ms, */ +#define I40E_MAX_NVM_TIMEOUT 18000 + +/* Max timeout in ms for the phy to respond */ +#define I40E_MAX_PHY_TIMEOUT 500 + +/* Switch from ms to the 1usec global time (this is the GTIME resolution) */ +#define I40E_MS_TO_GTIME(time) ((time) * 1000) + +/* forward declaration */ +struct i40e_hw; +typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *); + +/* Data type manipulation macros. */ + +#define I40E_DESC_UNUSED(R) \ + ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ + (R)->next_to_clean - (R)->next_to_use - 1) + +/* bitfields for Tx queue mapping in QTX_CTL */ +#define I40E_QTX_CTL_VF_QUEUE 0x0 +#define I40E_QTX_CTL_VM_QUEUE 0x1 +#define I40E_QTX_CTL_PF_QUEUE 0x2 + +/* debug masks - set these bits in hw->debug_mask to control output */ +enum i40e_debug_mask { + I40E_DEBUG_INIT = 0x00000001, + I40E_DEBUG_RELEASE = 0x00000002, + + I40E_DEBUG_LINK = 0x00000010, + I40E_DEBUG_PHY = 0x00000020, + I40E_DEBUG_HMC = 0x00000040, + I40E_DEBUG_NVM = 0x00000080, + I40E_DEBUG_LAN = 0x00000100, + I40E_DEBUG_FLOW = 0x00000200, + I40E_DEBUG_DCB = 0x00000400, + I40E_DEBUG_DIAG = 0x00000800, + I40E_DEBUG_FD = 0x00001000, + I40E_DEBUG_PACKAGE = 0x00002000, + + I40E_DEBUG_AQ_MESSAGE = 0x01000000, + I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000, + I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000, + I40E_DEBUG_AQ_COMMAND = 0x06000000, + I40E_DEBUG_AQ = 0x0F000000, + + I40E_DEBUG_USER = 0xF0000000, + + I40E_DEBUG_ALL = 0xFFFFFFFF +}; + +/* These are structs for managing the hardware information and the operations. + * The structures of function pointers are filled out at init time when we + * know for sure exactly which hardware we're working with. This gives us the + * flexibility of using the same main driver code but adapting to slightly + * different hardware needs as new parts are developed. For this architecture, + * the Firmware and AdminQ are intended to insulate the driver from most of the + * future changes, but these structures will also do part of the job. + */ +enum i40e_mac_type { + I40E_MAC_UNKNOWN = 0, + I40E_MAC_XL710, + I40E_MAC_VF, + I40E_MAC_X722, + I40E_MAC_X722_VF, + I40E_MAC_GENERIC, +}; + +enum i40e_media_type { + I40E_MEDIA_TYPE_UNKNOWN = 0, + I40E_MEDIA_TYPE_FIBER, + I40E_MEDIA_TYPE_BASET, + I40E_MEDIA_TYPE_BACKPLANE, + I40E_MEDIA_TYPE_CX4, + I40E_MEDIA_TYPE_DA, + I40E_MEDIA_TYPE_VIRTUAL +}; + +enum i40e_fc_mode { + I40E_FC_NONE = 0, + I40E_FC_RX_PAUSE, + I40E_FC_TX_PAUSE, + I40E_FC_FULL, + I40E_FC_PFC, + I40E_FC_DEFAULT +}; + +enum i40e_set_fc_aq_failures { + I40E_SET_FC_AQ_FAIL_NONE = 0, + I40E_SET_FC_AQ_FAIL_GET = 1, + I40E_SET_FC_AQ_FAIL_SET = 2, + I40E_SET_FC_AQ_FAIL_UPDATE = 4, + I40E_SET_FC_AQ_FAIL_SET_UPDATE = 6 +}; + +enum i40e_vsi_type { + I40E_VSI_MAIN = 0, + I40E_VSI_VMDQ1 = 1, + I40E_VSI_VMDQ2 = 2, + I40E_VSI_CTRL = 3, + I40E_VSI_FCOE = 4, + I40E_VSI_MIRROR = 5, + I40E_VSI_SRIOV = 6, + I40E_VSI_FDIR = 7, + I40E_VSI_TYPE_UNKNOWN +}; + +enum i40e_queue_type { + I40E_QUEUE_TYPE_RX = 0, + I40E_QUEUE_TYPE_TX, + I40E_QUEUE_TYPE_PE_CEQ, + I40E_QUEUE_TYPE_UNKNOWN +}; + +struct i40e_link_status { + enum i40e_aq_phy_type phy_type; + enum i40e_aq_link_speed link_speed; + u8 link_info; + u8 an_info; + u8 req_fec_info; + u8 fec_info; + u8 ext_info; + u8 loopback; + /* is Link Status Event notification to SW enabled */ + bool lse_enable; + u16 max_frame_size; + bool crc_enable; + u8 pacing; + u8 requested_speeds; + u8 module_type[3]; + /* 1st byte: module identifier */ +#define I40E_MODULE_TYPE_SFP 0x03 +#define I40E_MODULE_TYPE_QSFP 0x0D + /* 2nd byte: ethernet compliance codes for 10/40G */ +#define I40E_MODULE_TYPE_40G_ACTIVE 0x01 +#define I40E_MODULE_TYPE_40G_LR4 0x02 +#define I40E_MODULE_TYPE_40G_SR4 0x04 +#define I40E_MODULE_TYPE_40G_CR4 0x08 +#define I40E_MODULE_TYPE_10G_BASE_SR 0x10 +#define I40E_MODULE_TYPE_10G_BASE_LR 0x20 +#define I40E_MODULE_TYPE_10G_BASE_LRM 0x40 +#define I40E_MODULE_TYPE_10G_BASE_ER 0x80 + /* 3rd byte: ethernet compliance codes for 1G */ +#define I40E_MODULE_TYPE_1000BASE_SX 0x01 +#define I40E_MODULE_TYPE_1000BASE_LX 0x02 +#define I40E_MODULE_TYPE_1000BASE_CX 0x04 +#define I40E_MODULE_TYPE_1000BASE_T 0x08 +}; + +struct i40e_phy_info { + struct i40e_link_status link_info; + struct i40e_link_status link_info_old; + bool get_link_info; + enum i40e_media_type media_type; + /* all the phy types the NVM is capable of */ + u64 phy_types; +}; + +#define I40E_CAP_PHY_TYPE_SGMII BIT_ULL(I40E_PHY_TYPE_SGMII) +#define I40E_CAP_PHY_TYPE_1000BASE_KX BIT_ULL(I40E_PHY_TYPE_1000BASE_KX) +#define I40E_CAP_PHY_TYPE_10GBASE_KX4 BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4) +#define I40E_CAP_PHY_TYPE_10GBASE_KR BIT_ULL(I40E_PHY_TYPE_10GBASE_KR) +#define I40E_CAP_PHY_TYPE_40GBASE_KR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4) +#define I40E_CAP_PHY_TYPE_XAUI BIT_ULL(I40E_PHY_TYPE_XAUI) +#define I40E_CAP_PHY_TYPE_XFI BIT_ULL(I40E_PHY_TYPE_XFI) +#define I40E_CAP_PHY_TYPE_SFI BIT_ULL(I40E_PHY_TYPE_SFI) +#define I40E_CAP_PHY_TYPE_XLAUI BIT_ULL(I40E_PHY_TYPE_XLAUI) +#define I40E_CAP_PHY_TYPE_XLPPI BIT_ULL(I40E_PHY_TYPE_XLPPI) +#define I40E_CAP_PHY_TYPE_40GBASE_CR4_CU BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU) +#define I40E_CAP_PHY_TYPE_10GBASE_CR1_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU) +#define I40E_CAP_PHY_TYPE_10GBASE_AOC BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC) +#define I40E_CAP_PHY_TYPE_40GBASE_AOC BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC) +#define I40E_CAP_PHY_TYPE_100BASE_TX BIT_ULL(I40E_PHY_TYPE_100BASE_TX) +#define I40E_CAP_PHY_TYPE_1000BASE_T BIT_ULL(I40E_PHY_TYPE_1000BASE_T) +#define I40E_CAP_PHY_TYPE_10GBASE_T BIT_ULL(I40E_PHY_TYPE_10GBASE_T) +#define I40E_CAP_PHY_TYPE_10GBASE_SR BIT_ULL(I40E_PHY_TYPE_10GBASE_SR) +#define I40E_CAP_PHY_TYPE_10GBASE_LR BIT_ULL(I40E_PHY_TYPE_10GBASE_LR) +#define I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU) +#define I40E_CAP_PHY_TYPE_10GBASE_CR1 BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1) +#define I40E_CAP_PHY_TYPE_40GBASE_CR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4) +#define I40E_CAP_PHY_TYPE_40GBASE_SR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4) +#define I40E_CAP_PHY_TYPE_40GBASE_LR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4) +#define I40E_CAP_PHY_TYPE_1000BASE_SX BIT_ULL(I40E_PHY_TYPE_1000BASE_SX) +#define I40E_CAP_PHY_TYPE_1000BASE_LX BIT_ULL(I40E_PHY_TYPE_1000BASE_LX) +#define I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL \ + BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL) +#define I40E_CAP_PHY_TYPE_20GBASE_KR2 BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2) +/* Defining the macro I40E_TYPE_OFFSET to implement a bit shift for some + * PHY types. There is an unused bit (31) in the I40E_CAP_PHY_TYPE_* bit + * fields but no corresponding gap in the i40e_aq_phy_type enumeration. So, + * a shift is needed to adjust for this with values larger than 31. The + * only affected values are I40E_PHY_TYPE_25GBASE_*. + */ +#define I40E_PHY_TYPE_OFFSET 1 +#define I40E_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(I40E_PHY_TYPE_25GBASE_KR + \ + I40E_PHY_TYPE_OFFSET) +#define I40E_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(I40E_PHY_TYPE_25GBASE_CR + \ + I40E_PHY_TYPE_OFFSET) +#define I40E_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(I40E_PHY_TYPE_25GBASE_SR + \ + I40E_PHY_TYPE_OFFSET) +#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_PHY_TYPE_25GBASE_LR + \ + I40E_PHY_TYPE_OFFSET) +#define I40E_HW_CAP_MAX_GPIO 30 +/* Capabilities of a PF or a VF or the whole device */ +struct i40e_hw_capabilities { + u32 switch_mode; +#define I40E_NVM_IMAGE_TYPE_EVB 0x0 +#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2 +#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3 + + u32 management_mode; + u32 mng_protocols_over_mctp; +#define I40E_MNG_PROTOCOL_PLDM 0x2 +#define I40E_MNG_PROTOCOL_OEM_COMMANDS 0x4 +#define I40E_MNG_PROTOCOL_NCSI 0x8 + u32 npar_enable; + u32 os2bmc; + u32 valid_functions; + bool sr_iov_1_1; + bool vmdq; + bool evb_802_1_qbg; /* Edge Virtual Bridging */ + bool evb_802_1_qbh; /* Bridge Port Extension */ + bool dcb; + bool fcoe; + bool iscsi; /* Indicates iSCSI enabled */ + bool flex10_enable; + bool flex10_capable; + u32 flex10_mode; +#define I40E_FLEX10_MODE_UNKNOWN 0x0 +#define I40E_FLEX10_MODE_DCC 0x1 +#define I40E_FLEX10_MODE_DCI 0x2 + + u32 flex10_status; +#define I40E_FLEX10_STATUS_DCC_ERROR 0x1 +#define I40E_FLEX10_STATUS_VC_MODE 0x2 + + bool sec_rev_disabled; + bool update_disabled; +#define I40E_NVM_MGMT_SEC_REV_DISABLED 0x1 +#define I40E_NVM_MGMT_UPDATE_DISABLED 0x2 + + bool mgmt_cem; + bool ieee_1588; + bool iwarp; + bool fd; + u32 fd_filters_guaranteed; + u32 fd_filters_best_effort; + bool rss; + u32 rss_table_size; + u32 rss_table_entry_width; + bool led[I40E_HW_CAP_MAX_GPIO]; + bool sdp[I40E_HW_CAP_MAX_GPIO]; + u32 nvm_image_type; + u32 num_flow_director_filters; + u32 num_vfs; + u32 vf_base_id; + u32 num_vsis; + u32 num_rx_qp; + u32 num_tx_qp; + u32 base_queue; + u32 num_msix_vectors; + u32 num_msix_vectors_vf; + u32 led_pin_num; + u32 sdp_pin_num; + u32 mdio_port_num; + u32 mdio_port_mode; + u8 rx_buf_chain_len; + u32 enabled_tcmap; + u32 maxtc; + u64 wr_csr_prot; +}; + +struct i40e_mac_info { + enum i40e_mac_type type; + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + u8 san_addr[ETH_ALEN]; + u16 max_fcoeq; +}; + +enum i40e_aq_resources_ids { + I40E_NVM_RESOURCE_ID = 1 +}; + +enum i40e_aq_resource_access_type { + I40E_RESOURCE_READ = 1, + I40E_RESOURCE_WRITE +}; + +struct i40e_nvm_info { + u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */ + u32 timeout; /* [ms] */ + u16 sr_size; /* Shadow RAM size in words */ + bool blank_nvm_mode; /* is NVM empty (no FW present)*/ + u16 version; /* NVM package version */ + u32 eetrack; /* NVM data version */ + u32 oem_ver; /* OEM version info */ +}; + +/* definitions used in NVM update support */ + +enum i40e_nvmupd_cmd { + I40E_NVMUPD_INVALID, + I40E_NVMUPD_READ_CON, + I40E_NVMUPD_READ_SNT, + I40E_NVMUPD_READ_LCB, + I40E_NVMUPD_READ_SA, + I40E_NVMUPD_WRITE_ERA, + I40E_NVMUPD_WRITE_CON, + I40E_NVMUPD_WRITE_SNT, + I40E_NVMUPD_WRITE_LCB, + I40E_NVMUPD_WRITE_SA, + I40E_NVMUPD_CSUM_CON, + I40E_NVMUPD_CSUM_SA, + I40E_NVMUPD_CSUM_LCB, + I40E_NVMUPD_STATUS, + I40E_NVMUPD_EXEC_AQ, + I40E_NVMUPD_GET_AQ_RESULT, + I40E_NVMUPD_GET_AQ_EVENT, +}; + +enum i40e_nvmupd_state { + I40E_NVMUPD_STATE_INIT, + I40E_NVMUPD_STATE_READING, + I40E_NVMUPD_STATE_WRITING, + I40E_NVMUPD_STATE_INIT_WAIT, + I40E_NVMUPD_STATE_WRITE_WAIT, + I40E_NVMUPD_STATE_ERROR +}; + +/* nvm_access definition and its masks/shifts need to be accessible to + * application, core driver, and shared code. Where is the right file? + */ +#define I40E_NVM_READ 0xB +#define I40E_NVM_WRITE 0xC + +#define I40E_NVM_MOD_PNT_MASK 0xFF + +#define I40E_NVM_TRANS_SHIFT 8 +#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT) +#define I40E_NVM_PRESERVATION_FLAGS_SHIFT 12 +#define I40E_NVM_PRESERVATION_FLAGS_MASK \ + (0x3 << I40E_NVM_PRESERVATION_FLAGS_SHIFT) +#define I40E_NVM_PRESERVATION_FLAGS_SELECTED 0x01 +#define I40E_NVM_PRESERVATION_FLAGS_ALL 0x02 +#define I40E_NVM_CON 0x0 +#define I40E_NVM_SNT 0x1 +#define I40E_NVM_LCB 0x2 +#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB) +#define I40E_NVM_ERA 0x4 +#define I40E_NVM_CSUM 0x8 +#define I40E_NVM_AQE 0xe +#define I40E_NVM_EXEC 0xf + +#define I40E_NVM_ADAPT_SHIFT 16 +#define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT) + +#define I40E_NVMUPD_MAX_DATA 4096 +#define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */ + +struct i40e_nvm_access { + u32 command; + u32 config; + u32 offset; /* in bytes */ + u32 data_size; /* in bytes */ + u8 data[1]; +}; + +/* (Q)SFP module access definitions */ +#define I40E_I2C_EEPROM_DEV_ADDR 0xA0 +#define I40E_I2C_EEPROM_DEV_ADDR2 0xA2 +#define I40E_MODULE_TYPE_ADDR 0x00 +#define I40E_MODULE_REVISION_ADDR 0x01 +#define I40E_MODULE_SFF_8472_COMP 0x5E +#define I40E_MODULE_SFF_8472_SWAP 0x5C +#define I40E_MODULE_SFF_ADDR_MODE 0x04 +#define I40E_MODULE_TYPE_QSFP_PLUS 0x0D +#define I40E_MODULE_TYPE_QSFP28 0x11 +#define I40E_MODULE_QSFP_MAX_LEN 640 + +/* PCI bus types */ +enum i40e_bus_type { + i40e_bus_type_unknown = 0, + i40e_bus_type_pci, + i40e_bus_type_pcix, + i40e_bus_type_pci_express, + i40e_bus_type_reserved +}; + +/* PCI bus speeds */ +enum i40e_bus_speed { + i40e_bus_speed_unknown = 0, + i40e_bus_speed_33 = 33, + i40e_bus_speed_66 = 66, + i40e_bus_speed_100 = 100, + i40e_bus_speed_120 = 120, + i40e_bus_speed_133 = 133, + i40e_bus_speed_2500 = 2500, + i40e_bus_speed_5000 = 5000, + i40e_bus_speed_8000 = 8000, + i40e_bus_speed_reserved +}; + +/* PCI bus widths */ +enum i40e_bus_width { + i40e_bus_width_unknown = 0, + i40e_bus_width_pcie_x1 = 1, + i40e_bus_width_pcie_x2 = 2, + i40e_bus_width_pcie_x4 = 4, + i40e_bus_width_pcie_x8 = 8, + i40e_bus_width_32 = 32, + i40e_bus_width_64 = 64, + i40e_bus_width_reserved +}; + +/* Bus parameters */ +struct i40e_bus_info { + enum i40e_bus_speed speed; + enum i40e_bus_width width; + enum i40e_bus_type type; + + u16 func; + u16 device; + u16 lan_id; + u16 bus_id; +}; + +/* Flow control (FC) parameters */ +struct i40e_fc_info { + enum i40e_fc_mode current_mode; /* FC mode in effect */ + enum i40e_fc_mode requested_mode; /* FC mode requested by caller */ +}; + +#define I40E_MAX_TRAFFIC_CLASS 8 +#define I40E_MAX_USER_PRIORITY 8 +#define I40E_DCBX_MAX_APPS 32 +#define I40E_LLDPDU_SIZE 1500 + +/* IEEE 802.1Qaz ETS Configuration data */ +struct i40e_ieee_ets_config { + u8 willing; + u8 cbs; + u8 maxtcs; + u8 prioritytable[I40E_MAX_TRAFFIC_CLASS]; + u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS]; + u8 tsatable[I40E_MAX_TRAFFIC_CLASS]; +}; + +/* IEEE 802.1Qaz ETS Recommendation data */ +struct i40e_ieee_ets_recommend { + u8 prioritytable[I40E_MAX_TRAFFIC_CLASS]; + u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS]; + u8 tsatable[I40E_MAX_TRAFFIC_CLASS]; +}; + +/* IEEE 802.1Qaz PFC Configuration data */ +struct i40e_ieee_pfc_config { + u8 willing; + u8 mbc; + u8 pfccap; + u8 pfcenable; +}; + +/* IEEE 802.1Qaz Application Priority data */ +struct i40e_ieee_app_priority_table { + u8 priority; + u8 selector; + u16 protocolid; +}; + +struct i40e_dcbx_config { + u32 numapps; + u32 tlv_status; /* CEE mode TLV status */ + struct i40e_ieee_ets_config etscfg; + struct i40e_ieee_ets_recommend etsrec; + struct i40e_ieee_pfc_config pfc; + struct i40e_ieee_app_priority_table app[I40E_DCBX_MAX_APPS]; +}; + +/* Port hardware description */ +struct i40e_hw { + u8 __iomem *hw_addr; + void *back; + + /* subsystem structs */ + struct i40e_phy_info phy; + struct i40e_mac_info mac; + struct i40e_bus_info bus; + struct i40e_nvm_info nvm; + struct i40e_fc_info fc; + + /* pci info */ + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + u8 port; + bool adapter_stopped; + + /* capabilities for entire device and PCI func */ + struct i40e_hw_capabilities dev_caps; + struct i40e_hw_capabilities func_caps; + + /* Flow Director shared filter space */ + u16 fdir_shared_filter_count; + + /* device profile info */ + u8 pf_id; + u16 main_vsi_seid; + + /* for multi-function MACs */ + u16 partition_id; + u16 num_partitions; + u16 num_ports; + + /* Closest numa node to the device */ + u16 numa_node; + + /* Admin Queue info */ + struct i40e_adminq_info aq; + + /* state of nvm update process */ + enum i40e_nvmupd_state nvmupd_state; + struct i40e_aq_desc nvm_wb_desc; + struct i40e_aq_desc nvm_aq_event_desc; + struct i40e_virt_mem nvm_buff; + bool nvm_release_on_done; + u16 nvm_wait_opcode; + + /* HMC info */ + struct i40e_hmc_info hmc; /* HMC info struct */ + + /* LLDP/DCBX Status */ + u16 dcbx_status; + +#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1) +#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2) + + /* DCBX info */ + struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */ + struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */ + struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */ + + /* Used in set switch config AQ command */ + u16 switch_tag; + u16 first_tag; + u16 second_tag; + + /* debug mask */ + u32 debug_mask; + char err_str[16]; +}; + +static inline bool i40e_is_vf(struct i40e_hw *hw) +{ + return (hw->mac.type == I40E_MAC_VF || + hw->mac.type == I40E_MAC_X722_VF); +} + +struct i40e_driver_version { + u8 major_version; + u8 minor_version; + u8 build_version; + u8 subbuild_version; + u8 driver_string[32]; +}; + +/* RX Descriptors */ +union i40e_16byte_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + struct { + union { + __le16 mirroring_status; + __le16 fcoe_ctx_id; + } mirr_fcoe; + __le16 l2tag1; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + __le32 fd_id; /* Flow director filter id */ + __le32 fcoe_param; /* FCoE DDP Context id */ + } hi_dword; + } qword0; + struct { + /* ext status/error/pktype/length */ + __le64 status_error_len; + } qword1; + } wb; /* writeback */ +}; + +union i40e_32byte_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + /* bit 0 of hdr_buffer_addr is DD bit */ + __le64 rsvd1; + __le64 rsvd2; + } read; + struct { + struct { + struct { + union { + __le16 mirroring_status; + __le16 fcoe_ctx_id; + } mirr_fcoe; + __le16 l2tag1; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + __le32 fcoe_param; /* FCoE DDP Context id */ + /* Flow director filter id in case of + * Programming status desc WB + */ + __le32 fd_id; + } hi_dword; + } qword0; + struct { + /* status/error/pktype/length */ + __le64 status_error_len; + } qword1; + struct { + __le16 ext_status; /* extended status */ + __le16 rsvd; + __le16 l2tag2_1; + __le16 l2tag2_2; + } qword2; + struct { + union { + __le32 flex_bytes_lo; + __le32 pe_status; + } lo_dword; + union { + __le32 flex_bytes_hi; + __le32 fd_id; + } hi_dword; + } qword3; + } wb; /* writeback */ +}; + +enum i40e_rx_desc_status_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_DESC_STATUS_DD_SHIFT = 0, + I40E_RX_DESC_STATUS_EOF_SHIFT = 1, + I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2, + I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3, + I40E_RX_DESC_STATUS_CRCP_SHIFT = 4, + I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */ + I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7, + /* Note: Bit 8 is reserved in X710 and XL710 */ + I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8, + I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */ + I40E_RX_DESC_STATUS_FLM_SHIFT = 11, + I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */ + I40E_RX_DESC_STATUS_LPBK_SHIFT = 14, + I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, + I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */ + /* Note: For non-tunnel packets INT_UDP_0 is the right status for + * UDP header + */ + I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18, + I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */ +}; + +#define I40E_RXD_QW1_STATUS_SHIFT 0 +#define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \ + << I40E_RXD_QW1_STATUS_SHIFT) + +#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT +#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \ + I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT) + +#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT +#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \ + BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT) + +enum i40e_rx_desc_fltstat_values { + I40E_RX_DESC_FLTSTAT_NO_DATA = 0, + I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */ + I40E_RX_DESC_FLTSTAT_RSV = 2, + I40E_RX_DESC_FLTSTAT_RSS_HASH = 3, +}; + +#define I40E_RXD_QW1_ERROR_SHIFT 19 +#define I40E_RXD_QW1_ERROR_MASK (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT) + +enum i40e_rx_desc_error_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_DESC_ERROR_RXE_SHIFT = 0, + I40E_RX_DESC_ERROR_RECIPE_SHIFT = 1, + I40E_RX_DESC_ERROR_HBO_SHIFT = 2, + I40E_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */ + I40E_RX_DESC_ERROR_IPE_SHIFT = 3, + I40E_RX_DESC_ERROR_L4E_SHIFT = 4, + I40E_RX_DESC_ERROR_EIPE_SHIFT = 5, + I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6, + I40E_RX_DESC_ERROR_PPRS_SHIFT = 7 +}; + +enum i40e_rx_desc_error_l3l4e_fcoe_masks { + I40E_RX_DESC_ERROR_L3L4E_NONE = 0, + I40E_RX_DESC_ERROR_L3L4E_PROT = 1, + I40E_RX_DESC_ERROR_L3L4E_FC = 2, + I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3, + I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4 +}; + +#define I40E_RXD_QW1_PTYPE_SHIFT 30 +#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT) + +/* Packet type non-ip values */ +enum i40e_rx_l2_ptype { + I40E_RX_PTYPE_L2_RESERVED = 0, + I40E_RX_PTYPE_L2_MAC_PAY2 = 1, + I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2, + I40E_RX_PTYPE_L2_FIP_PAY2 = 3, + I40E_RX_PTYPE_L2_OUI_PAY2 = 4, + I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5, + I40E_RX_PTYPE_L2_LLDP_PAY2 = 6, + I40E_RX_PTYPE_L2_ECP_PAY2 = 7, + I40E_RX_PTYPE_L2_EVB_PAY2 = 8, + I40E_RX_PTYPE_L2_QCN_PAY2 = 9, + I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10, + I40E_RX_PTYPE_L2_ARP = 11, + I40E_RX_PTYPE_L2_FCOE_PAY3 = 12, + I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13, + I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14, + I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15, + I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16, + I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17, + I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18, + I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19, + I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20, + I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21, + I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58, + I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87, + I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124, + I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153 +}; + +struct i40e_rx_ptype_decoded { + u32 ptype:8; + u32 known:1; + u32 outer_ip:1; + u32 outer_ip_ver:1; + u32 outer_frag:1; + u32 tunnel_type:3; + u32 tunnel_end_prot:2; + u32 tunnel_end_frag:1; + u32 inner_prot:4; + u32 payload_layer:3; +}; + +enum i40e_rx_ptype_outer_ip { + I40E_RX_PTYPE_OUTER_L2 = 0, + I40E_RX_PTYPE_OUTER_IP = 1 +}; + +enum i40e_rx_ptype_outer_ip_ver { + I40E_RX_PTYPE_OUTER_NONE = 0, + I40E_RX_PTYPE_OUTER_IPV4 = 0, + I40E_RX_PTYPE_OUTER_IPV6 = 1 +}; + +enum i40e_rx_ptype_outer_fragmented { + I40E_RX_PTYPE_NOT_FRAG = 0, + I40E_RX_PTYPE_FRAG = 1 +}; + +enum i40e_rx_ptype_tunnel_type { + I40E_RX_PTYPE_TUNNEL_NONE = 0, + I40E_RX_PTYPE_TUNNEL_IP_IP = 1, + I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2, + I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, + I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, +}; + +enum i40e_rx_ptype_tunnel_end_prot { + I40E_RX_PTYPE_TUNNEL_END_NONE = 0, + I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1, + I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2, +}; + +enum i40e_rx_ptype_inner_prot { + I40E_RX_PTYPE_INNER_PROT_NONE = 0, + I40E_RX_PTYPE_INNER_PROT_UDP = 1, + I40E_RX_PTYPE_INNER_PROT_TCP = 2, + I40E_RX_PTYPE_INNER_PROT_SCTP = 3, + I40E_RX_PTYPE_INNER_PROT_ICMP = 4, + I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5 +}; + +enum i40e_rx_ptype_payload_layer { + I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, + I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, + I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, + I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, +}; + +#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38 +#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \ + I40E_RXD_QW1_LENGTH_PBUF_SHIFT) + +#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52 +#define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \ + I40E_RXD_QW1_LENGTH_HBUF_SHIFT) + +#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63 +#define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT) + +enum i40e_rx_desc_ext_status_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0, + I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1, + I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */ + I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */ + I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9, + I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10, + I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11, +}; + +enum i40e_rx_desc_pe_status_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */ + I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */ + I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */ + I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24, + I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25, + I40E_RX_DESC_PE_STATUS_PORTV_SHIFT = 26, + I40E_RX_DESC_PE_STATUS_URG_SHIFT = 27, + I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28, + I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29 +}; + +#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38 +#define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000 + +#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2 +#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \ + I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT) + +#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19 +#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \ + I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT) + +enum i40e_rx_prog_status_desc_status_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_PROG_STATUS_DESC_DD_SHIFT = 0, + I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */ +}; + +enum i40e_rx_prog_status_desc_prog_id_masks { + I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1, + I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2, + I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4, +}; + +enum i40e_rx_prog_status_desc_error_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0, + I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1, + I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2, + I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3 +}; + +/* TX Descriptor */ +struct i40e_tx_desc { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le64 cmd_type_offset_bsz; +}; + +#define I40E_TXD_QW1_DTYPE_SHIFT 0 +#define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT) + +enum i40e_tx_desc_dtype_value { + I40E_TX_DESC_DTYPE_DATA = 0x0, + I40E_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */ + I40E_TX_DESC_DTYPE_CONTEXT = 0x1, + I40E_TX_DESC_DTYPE_FCOE_CTX = 0x2, + I40E_TX_DESC_DTYPE_FILTER_PROG = 0x8, + I40E_TX_DESC_DTYPE_DDP_CTX = 0x9, + I40E_TX_DESC_DTYPE_FLEX_DATA = 0xB, + I40E_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC, + I40E_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD, + I40E_TX_DESC_DTYPE_DESC_DONE = 0xF +}; + +#define I40E_TXD_QW1_CMD_SHIFT 4 +#define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT) + +enum i40e_tx_desc_cmd_bits { + I40E_TX_DESC_CMD_EOP = 0x0001, + I40E_TX_DESC_CMD_RS = 0x0002, + I40E_TX_DESC_CMD_ICRC = 0x0004, + I40E_TX_DESC_CMD_IL2TAG1 = 0x0008, + I40E_TX_DESC_CMD_DUMMY = 0x0010, + I40E_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */ + I40E_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */ + I40E_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */ + I40E_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */ + I40E_TX_DESC_CMD_FCOET = 0x0080, + I40E_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */ +}; + +#define I40E_TXD_QW1_OFFSET_SHIFT 16 +#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \ + I40E_TXD_QW1_OFFSET_SHIFT) + +enum i40e_tx_desc_length_fields { + /* Note: These are predefined bit offsets */ + I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */ + I40E_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */ + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */ +}; + +#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34 +#define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \ + I40E_TXD_QW1_TX_BUF_SZ_SHIFT) + +#define I40E_TXD_QW1_L2TAG1_SHIFT 48 +#define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT) + +/* Context descriptors */ +struct i40e_tx_context_desc { + __le32 tunneling_params; + __le16 l2tag2; + __le16 rsvd; + __le64 type_cmd_tso_mss; +}; + +#define I40E_TXD_CTX_QW1_DTYPE_SHIFT 0 +#define I40E_TXD_CTX_QW1_DTYPE_MASK (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT) + +#define I40E_TXD_CTX_QW1_CMD_SHIFT 4 +#define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT) + +enum i40e_tx_ctx_desc_cmd_bits { + I40E_TX_CTX_DESC_TSO = 0x01, + I40E_TX_CTX_DESC_TSYN = 0x02, + I40E_TX_CTX_DESC_IL2TAG2 = 0x04, + I40E_TX_CTX_DESC_IL2TAG2_IL2H = 0x08, + I40E_TX_CTX_DESC_SWTCH_NOTAG = 0x00, + I40E_TX_CTX_DESC_SWTCH_UPLINK = 0x10, + I40E_TX_CTX_DESC_SWTCH_LOCAL = 0x20, + I40E_TX_CTX_DESC_SWTCH_VSI = 0x30, + I40E_TX_CTX_DESC_SWPE = 0x40 +}; + +#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30 +#define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \ + I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) + +#define I40E_TXD_CTX_QW1_MSS_SHIFT 50 +#define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \ + I40E_TXD_CTX_QW1_MSS_SHIFT) + +#define I40E_TXD_CTX_QW1_VSI_SHIFT 50 +#define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT) + +#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0 +#define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \ + I40E_TXD_CTX_QW0_EXT_IP_SHIFT) + +enum i40e_tx_ctx_desc_eipt_offload { + I40E_TX_CTX_EXT_IP_NONE = 0x0, + I40E_TX_CTX_EXT_IP_IPV6 = 0x1, + I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2, + I40E_TX_CTX_EXT_IP_IPV4 = 0x3 +}; + +#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2 +#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \ + I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT) + +#define I40E_TXD_CTX_QW0_NATT_SHIFT 9 +#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) + +#define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT) +#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) + +#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11 +#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \ + BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT) + +#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK + +#define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12 +#define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \ + I40E_TXD_CTX_QW0_NATLEN_SHIFT) + +#define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19 +#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \ + I40E_TXD_CTX_QW0_DECTTL_SHIFT) + +#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23 +#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT) +struct i40e_filter_program_desc { + __le32 qindex_flex_ptype_vsi; + __le32 rsvd; + __le32 dtype_cmd_cntindex; + __le32 fd_id; +}; +#define I40E_TXD_FLTR_QW0_QINDEX_SHIFT 0 +#define I40E_TXD_FLTR_QW0_QINDEX_MASK (0x7FFUL << \ + I40E_TXD_FLTR_QW0_QINDEX_SHIFT) +#define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT 11 +#define I40E_TXD_FLTR_QW0_FLEXOFF_MASK (0x7UL << \ + I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) +#define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT 17 +#define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \ + I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) + +/* Packet Classifier Types for filters */ +enum i40e_filter_pctype { + /* Note: Values 0-28 are reserved for future use. + * Value 29, 30, 32 are not supported on XL710 and X710. + */ + I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29, + I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30, + I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31, + I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32, + I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33, + I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34, + I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35, + I40E_FILTER_PCTYPE_FRAG_IPV4 = 36, + /* Note: Values 37-38 are reserved for future use. + * Value 39, 40, 42 are not supported on XL710 and X710. + */ + I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39, + I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40, + I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41, + I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42, + I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43, + I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44, + I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45, + I40E_FILTER_PCTYPE_FRAG_IPV6 = 46, + /* Note: Value 47 is reserved for future use */ + I40E_FILTER_PCTYPE_FCOE_OX = 48, + I40E_FILTER_PCTYPE_FCOE_RX = 49, + I40E_FILTER_PCTYPE_FCOE_OTHER = 50, + /* Note: Values 51-62 are reserved for future use */ + I40E_FILTER_PCTYPE_L2_PAYLOAD = 63, +}; + +enum i40e_filter_program_desc_dest { + I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0, + I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1, + I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER = 0x2, +}; + +enum i40e_filter_program_desc_fd_status { + I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE = 0x0, + I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID = 0x1, + I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES = 0x2, + I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES = 0x3, +}; + +#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23 +#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \ + I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) + +#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4 +#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \ + I40E_TXD_FLTR_QW1_CMD_SHIFT) + +#define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT) + +enum i40e_filter_program_desc_pcmd { + I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1, + I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE = 0x2, +}; + +#define I40E_TXD_FLTR_QW1_DEST_SHIFT (0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT) + +#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT) + +#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \ + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \ + I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) + +#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20 +#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \ + I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) + +enum i40e_filter_type { + I40E_FLOW_DIRECTOR_FLTR = 0, + I40E_PE_QUAD_HASH_FLTR = 1, + I40E_ETHERTYPE_FLTR, + I40E_FCOE_CTX_FLTR, + I40E_MAC_VLAN_FLTR, + I40E_HASH_FLTR +}; + +struct i40e_vsi_context { + u16 seid; + u16 uplink_seid; + u16 vsi_number; + u16 vsis_allocated; + u16 vsis_unallocated; + u16 flags; + u8 pf_num; + u8 vf_num; + u8 connection_type; + struct i40e_aqc_vsi_properties_data info; +}; + +struct i40e_veb_context { + u16 seid; + u16 uplink_seid; + u16 veb_number; + u16 vebs_allocated; + u16 vebs_unallocated; + u16 flags; + struct i40e_aqc_get_veb_parameters_completion info; +}; + +/* Statistics collected by each port, VSI, VEB, and S-channel */ +struct i40e_eth_stats { + u64 rx_bytes; /* gorc */ + u64 rx_unicast; /* uprc */ + u64 rx_multicast; /* mprc */ + u64 rx_broadcast; /* bprc */ + u64 rx_discards; /* rdpc */ + u64 rx_unknown_protocol; /* rupp */ + u64 tx_bytes; /* gotc */ + u64 tx_unicast; /* uptc */ + u64 tx_multicast; /* mptc */ + u64 tx_broadcast; /* bptc */ + u64 tx_discards; /* tdpc */ + u64 tx_errors; /* tepc */ +}; + +/* Statistics collected per VEB per TC */ +struct i40e_veb_tc_stats { + u64 tc_rx_packets[I40E_MAX_TRAFFIC_CLASS]; + u64 tc_rx_bytes[I40E_MAX_TRAFFIC_CLASS]; + u64 tc_tx_packets[I40E_MAX_TRAFFIC_CLASS]; + u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS]; +}; + +/* Statistics collected by the MAC */ +struct i40e_hw_port_stats { + /* eth stats collected by the port */ + struct i40e_eth_stats eth; + + /* additional port specific stats */ + u64 tx_dropped_link_down; /* tdold */ + u64 crc_errors; /* crcerrs */ + u64 illegal_bytes; /* illerrc */ + u64 error_bytes; /* errbc */ + u64 mac_local_faults; /* mlfc */ + u64 mac_remote_faults; /* mrfc */ + u64 rx_length_errors; /* rlec */ + u64 link_xon_rx; /* lxonrxc */ + u64 link_xoff_rx; /* lxoffrxc */ + u64 priority_xon_rx[8]; /* pxonrxc[8] */ + u64 priority_xoff_rx[8]; /* pxoffrxc[8] */ + u64 link_xon_tx; /* lxontxc */ + u64 link_xoff_tx; /* lxofftxc */ + u64 priority_xon_tx[8]; /* pxontxc[8] */ + u64 priority_xoff_tx[8]; /* pxofftxc[8] */ + u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */ + u64 rx_size_64; /* prc64 */ + u64 rx_size_127; /* prc127 */ + u64 rx_size_255; /* prc255 */ + u64 rx_size_511; /* prc511 */ + u64 rx_size_1023; /* prc1023 */ + u64 rx_size_1522; /* prc1522 */ + u64 rx_size_big; /* prc9522 */ + u64 rx_undersize; /* ruc */ + u64 rx_fragments; /* rfc */ + u64 rx_oversize; /* roc */ + u64 rx_jabber; /* rjc */ + u64 tx_size_64; /* ptc64 */ + u64 tx_size_127; /* ptc127 */ + u64 tx_size_255; /* ptc255 */ + u64 tx_size_511; /* ptc511 */ + u64 tx_size_1023; /* ptc1023 */ + u64 tx_size_1522; /* ptc1522 */ + u64 tx_size_big; /* ptc9522 */ + u64 mac_short_packet_dropped; /* mspdc */ + u64 checksum_error; /* xec */ + /* flow director stats */ + u64 fd_atr_match; + u64 fd_sb_match; + u64 fd_atr_tunnel_match; + u32 fd_atr_status; + u32 fd_sb_status; + /* EEE LPI */ + u32 tx_lpi_status; + u32 rx_lpi_status; + u64 tx_lpi_count; /* etlpic */ + u64 rx_lpi_count; /* erlpic */ +}; + +/* Checksum and Shadow RAM pointers */ +#define I40E_SR_NVM_CONTROL_WORD 0x00 +#define I40E_EMP_MODULE_PTR 0x0F +#define I40E_SR_EMP_MODULE_PTR 0x48 +#define I40E_NVM_OEM_VER_OFF 0x83 +#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18 +#define I40E_SR_NVM_WAKE_ON_LAN 0x19 +#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27 +#define I40E_SR_NVM_EETRACK_LO 0x2D +#define I40E_SR_NVM_EETRACK_HI 0x2E +#define I40E_SR_VPD_PTR 0x2F +#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E +#define I40E_SR_SW_CHECKSUM_WORD 0x3F + +/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */ +#define I40E_SR_VPD_MODULE_MAX_SIZE 1024 +#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024 +#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06 +#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT) +#define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5) +#define I40E_SR_NVM_MAP_STRUCTURE_TYPE BIT(12) +#define I40E_PTR_TYPE BIT(15) + +/* Shadow RAM related */ +#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800 +#define I40E_SR_WORDS_IN_1KB 512 +/* Checksum should be calculated such that after adding all the words, + * including the checksum word itself, the sum should be 0xBABA. + */ +#define I40E_SR_SW_CHECKSUM_BASE 0xBABA + +#define I40E_SRRD_SRCTL_ATTEMPTS 100000 + +enum i40e_switch_element_types { + I40E_SWITCH_ELEMENT_TYPE_MAC = 1, + I40E_SWITCH_ELEMENT_TYPE_PF = 2, + I40E_SWITCH_ELEMENT_TYPE_VF = 3, + I40E_SWITCH_ELEMENT_TYPE_EMP = 4, + I40E_SWITCH_ELEMENT_TYPE_BMC = 6, + I40E_SWITCH_ELEMENT_TYPE_PE = 16, + I40E_SWITCH_ELEMENT_TYPE_VEB = 17, + I40E_SWITCH_ELEMENT_TYPE_PA = 18, + I40E_SWITCH_ELEMENT_TYPE_VSI = 19, +}; + +/* Supported EtherType filters */ +enum i40e_ether_type_index { + I40E_ETHER_TYPE_1588 = 0, + I40E_ETHER_TYPE_FIP = 1, + I40E_ETHER_TYPE_OUI_EXTENDED = 2, + I40E_ETHER_TYPE_MAC_CONTROL = 3, + I40E_ETHER_TYPE_LLDP = 4, + I40E_ETHER_TYPE_EVB_PROTOCOL1 = 5, + I40E_ETHER_TYPE_EVB_PROTOCOL2 = 6, + I40E_ETHER_TYPE_QCN_CNM = 7, + I40E_ETHER_TYPE_8021X = 8, + I40E_ETHER_TYPE_ARP = 9, + I40E_ETHER_TYPE_RSV1 = 10, + I40E_ETHER_TYPE_RSV2 = 11, +}; + +/* Filter context base size is 1K */ +#define I40E_HASH_FILTER_BASE_SIZE 1024 +/* Supported Hash filter values */ +enum i40e_hash_filter_size { + I40E_HASH_FILTER_SIZE_1K = 0, + I40E_HASH_FILTER_SIZE_2K = 1, + I40E_HASH_FILTER_SIZE_4K = 2, + I40E_HASH_FILTER_SIZE_8K = 3, + I40E_HASH_FILTER_SIZE_16K = 4, + I40E_HASH_FILTER_SIZE_32K = 5, + I40E_HASH_FILTER_SIZE_64K = 6, + I40E_HASH_FILTER_SIZE_128K = 7, + I40E_HASH_FILTER_SIZE_256K = 8, + I40E_HASH_FILTER_SIZE_512K = 9, + I40E_HASH_FILTER_SIZE_1M = 10, +}; + +/* DMA context base size is 0.5K */ +#define I40E_DMA_CNTX_BASE_SIZE 512 +/* Supported DMA context values */ +enum i40e_dma_cntx_size { + I40E_DMA_CNTX_SIZE_512 = 0, + I40E_DMA_CNTX_SIZE_1K = 1, + I40E_DMA_CNTX_SIZE_2K = 2, + I40E_DMA_CNTX_SIZE_4K = 3, + I40E_DMA_CNTX_SIZE_8K = 4, + I40E_DMA_CNTX_SIZE_16K = 5, + I40E_DMA_CNTX_SIZE_32K = 6, + I40E_DMA_CNTX_SIZE_64K = 7, + I40E_DMA_CNTX_SIZE_128K = 8, + I40E_DMA_CNTX_SIZE_256K = 9, +}; + +/* Supported Hash look up table (LUT) sizes */ +enum i40e_hash_lut_size { + I40E_HASH_LUT_SIZE_128 = 0, + I40E_HASH_LUT_SIZE_512 = 1, +}; + +/* Structure to hold a per PF filter control settings */ +struct i40e_filter_control_settings { + /* number of PE Quad Hash filter buckets */ + enum i40e_hash_filter_size pe_filt_num; + /* number of PE Quad Hash contexts */ + enum i40e_dma_cntx_size pe_cntx_num; + /* number of FCoE filter buckets */ + enum i40e_hash_filter_size fcoe_filt_num; + /* number of FCoE DDP contexts */ + enum i40e_dma_cntx_size fcoe_cntx_num; + /* size of the Hash LUT */ + enum i40e_hash_lut_size hash_lut_size; + /* enable FDIR filters for PF and its VFs */ + bool enable_fdir; + /* enable Ethertype filters for PF and its VFs */ + bool enable_ethtype; + /* enable MAC/VLAN filters for PF and its VFs */ + bool enable_macvlan; +}; + +/* Structure to hold device level control filter counts */ +struct i40e_control_filter_stats { + u16 mac_etype_used; /* Used perfect match MAC/EtherType filters */ + u16 etype_used; /* Used perfect EtherType filters */ + u16 mac_etype_free; /* Un-used perfect match MAC/EtherType filters */ + u16 etype_free; /* Un-used perfect EtherType filters */ +}; + +enum i40e_reset_type { + I40E_RESET_POR = 0, + I40E_RESET_CORER = 1, + I40E_RESET_GLOBR = 2, + I40E_RESET_EMPR = 3, +}; + +/* IEEE 802.1AB LLDP Agent Variables from NVM */ +#define I40E_NVM_LLDP_CFG_PTR 0x06 +#define I40E_SR_LLDP_CFG_PTR 0x31 + +/* RSS Hash Table Size */ +#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 + +/* INPUT SET MASK for RSS, flow director and flexible payload */ +#define I40E_FD_INSET_L3_SRC_SHIFT 47 +#define I40E_FD_INSET_L3_SRC_WORD_MASK (0x3ULL << \ + I40E_FD_INSET_L3_SRC_SHIFT) +#define I40E_FD_INSET_L3_DST_SHIFT 35 +#define I40E_FD_INSET_L3_DST_WORD_MASK (0x3ULL << \ + I40E_FD_INSET_L3_DST_SHIFT) +#define I40E_FD_INSET_L4_SRC_SHIFT 34 +#define I40E_FD_INSET_L4_SRC_WORD_MASK (0x1ULL << \ + I40E_FD_INSET_L4_SRC_SHIFT) +#define I40E_FD_INSET_L4_DST_SHIFT 33 +#define I40E_FD_INSET_L4_DST_WORD_MASK (0x1ULL << \ + I40E_FD_INSET_L4_DST_SHIFT) +#define I40E_FD_INSET_VERIFY_TAG_SHIFT 31 +#define I40E_FD_INSET_VERIFY_TAG_WORD_MASK (0x3ULL << \ + I40E_FD_INSET_VERIFY_TAG_SHIFT) + +#define I40E_FD_INSET_FLEX_WORD50_SHIFT 17 +#define I40E_FD_INSET_FLEX_WORD50_MASK (0x1ULL << \ + I40E_FD_INSET_FLEX_WORD50_SHIFT) +#define I40E_FD_INSET_FLEX_WORD51_SHIFT 16 +#define I40E_FD_INSET_FLEX_WORD51_MASK (0x1ULL << \ + I40E_FD_INSET_FLEX_WORD51_SHIFT) +#define I40E_FD_INSET_FLEX_WORD52_SHIFT 15 +#define I40E_FD_INSET_FLEX_WORD52_MASK (0x1ULL << \ + I40E_FD_INSET_FLEX_WORD52_SHIFT) +#define I40E_FD_INSET_FLEX_WORD53_SHIFT 14 +#define I40E_FD_INSET_FLEX_WORD53_MASK (0x1ULL << \ + I40E_FD_INSET_FLEX_WORD53_SHIFT) +#define I40E_FD_INSET_FLEX_WORD54_SHIFT 13 +#define I40E_FD_INSET_FLEX_WORD54_MASK (0x1ULL << \ + I40E_FD_INSET_FLEX_WORD54_SHIFT) +#define I40E_FD_INSET_FLEX_WORD55_SHIFT 12 +#define I40E_FD_INSET_FLEX_WORD55_MASK (0x1ULL << \ + I40E_FD_INSET_FLEX_WORD55_SHIFT) +#define I40E_FD_INSET_FLEX_WORD56_SHIFT 11 +#define I40E_FD_INSET_FLEX_WORD56_MASK (0x1ULL << \ + I40E_FD_INSET_FLEX_WORD56_SHIFT) +#define I40E_FD_INSET_FLEX_WORD57_SHIFT 10 +#define I40E_FD_INSET_FLEX_WORD57_MASK (0x1ULL << \ + I40E_FD_INSET_FLEX_WORD57_SHIFT) + +/* Version format for Dynamic Device Personalization(DDP) */ +struct i40e_ddp_version { + u8 major; + u8 minor; + u8 update; + u8 draft; +}; + +#define I40E_DDP_NAME_SIZE 32 + +/* Package header */ +struct i40e_package_header { + struct i40e_ddp_version version; + u32 segment_count; + u32 segment_offset[1]; +}; + +/* Generic segment header */ +struct i40e_generic_seg_header { +#define SEGMENT_TYPE_METADATA 0x00000001 +#define SEGMENT_TYPE_NOTES 0x00000002 +#define SEGMENT_TYPE_I40E 0x00000011 +#define SEGMENT_TYPE_X722 0x00000012 + u32 type; + struct i40e_ddp_version version; + u32 size; + char name[I40E_DDP_NAME_SIZE]; +}; + +struct i40e_metadata_segment { + struct i40e_generic_seg_header header; + struct i40e_ddp_version version; + u32 track_id; + char name[I40E_DDP_NAME_SIZE]; +}; + +struct i40e_device_id_entry { + u32 vendor_dev_id; + u32 sub_vendor_dev_id; +}; + +struct i40e_profile_segment { + struct i40e_generic_seg_header header; + struct i40e_ddp_version version; + char name[I40E_DDP_NAME_SIZE]; + u32 device_table_count; + struct i40e_device_id_entry device_table[1]; +}; + +struct i40e_section_table { + u32 section_count; + u32 section_offset[1]; +}; + +struct i40e_profile_section_header { + u16 tbl_size; + u16 data_end; + struct { +#define SECTION_TYPE_INFO 0x00000010 +#define SECTION_TYPE_MMIO 0x00000800 +#define SECTION_TYPE_AQ 0x00000801 +#define SECTION_TYPE_NOTE 0x80000000 +#define SECTION_TYPE_NAME 0x80000001 + u32 type; + u32 offset; + u32 size; + } section; +}; + +struct i40e_profile_info { + u32 track_id; + struct i40e_ddp_version version; + u8 op; +#define I40E_DDP_ADD_TRACKID 0x01 +#define I40E_DDP_REMOVE_TRACKID 0x02 + u8 reserved[7]; + u8 name[I40E_DDP_NAME_SIZE]; +}; +#endif /* _I40E_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/i40evf.h b/drivers/net/ethernet/intel/iavf/i40evf.h new file mode 100644 index 000000000000..96e537a35000 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/i40evf.h @@ -0,0 +1,427 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _I40EVF_H_ +#define _I40EVF_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "i40e_type.h" +#include +#include "i40e_txrx.h" + +#define DEFAULT_DEBUG_LEVEL_SHIFT 3 +#define PFX "i40evf: " + +/* VSI state flags shared with common code */ +enum i40evf_vsi_state_t { + __I40E_VSI_DOWN, + /* This must be last as it determines the size of the BITMAP */ + __I40E_VSI_STATE_SIZE__, +}; + +/* dummy struct to make common code less painful */ +struct i40e_vsi { + struct i40evf_adapter *back; + struct net_device *netdev; + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u16 seid; + u16 id; + DECLARE_BITMAP(state, __I40E_VSI_STATE_SIZE__); + int base_vector; + u16 work_limit; + u16 qs_handle; + void *priv; /* client driver data reference. */ +}; + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define I40EVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +#define I40EVF_DEFAULT_TXD 512 +#define I40EVF_DEFAULT_RXD 512 +#define I40EVF_MAX_TXD 4096 +#define I40EVF_MIN_TXD 64 +#define I40EVF_MAX_RXD 4096 +#define I40EVF_MIN_RXD 64 +#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32 +#define I40EVF_MAX_AQ_BUF_SIZE 4096 +#define I40EVF_AQ_LEN 32 +#define I40EVF_AQ_MAX_ERR 20 /* times to try before resetting AQ */ + +#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) + +#define I40E_RX_DESC(R, i) (&(((union i40e_32byte_rx_desc *)((R)->desc))[i])) +#define I40E_TX_DESC(R, i) (&(((struct i40e_tx_desc *)((R)->desc))[i])) +#define I40E_TX_CTXTDESC(R, i) \ + (&(((struct i40e_tx_context_desc *)((R)->desc))[i])) +#define I40EVF_MAX_REQ_QUEUES 4 + +#define I40EVF_HKEY_ARRAY_SIZE ((I40E_VFQF_HKEY_MAX_INDEX + 1) * 4) +#define I40EVF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT_MAX_INDEX + 1) * 4) +#define I40EVF_MBPS_DIVISOR 125000 /* divisor to convert to Mbps */ + +/* MAX_MSIX_Q_VECTORS of these are allocated, + * but we only use one per queue-specific vector. + */ +struct i40e_q_vector { + struct i40evf_adapter *adapter; + struct i40e_vsi *vsi; + struct napi_struct napi; + struct i40e_ring_container rx; + struct i40e_ring_container tx; + u32 ring_mask; + u8 itr_countdown; /* when 0 should adjust adaptive ITR */ + u8 num_ringpairs; /* total number of ring pairs in vector */ + u16 v_idx; /* index in the vsi->q_vector array. */ + u16 reg_idx; /* register index of the interrupt */ + char name[IFNAMSIZ + 15]; + bool arm_wb_state; + cpumask_t affinity_mask; + struct irq_affinity_notify affinity_notify; +}; + +/* Helper macros to switch between ints/sec and what the register uses. + * And yes, it's the same math going both ways. The lowest value + * supported by all of the i40e hardware is 8. + */ +#define EITR_INTS_PER_SEC_TO_REG(_eitr) \ + ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8) +#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG + +#define I40EVF_DESC_UNUSED(R) \ + ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ + (R)->next_to_clean - (R)->next_to_use - 1) + +#define I40EVF_RX_DESC_ADV(R, i) \ + (&(((union i40e_adv_rx_desc *)((R).desc))[i])) +#define I40EVF_TX_DESC_ADV(R, i) \ + (&(((union i40e_adv_tx_desc *)((R).desc))[i])) +#define I40EVF_TX_CTXTDESC_ADV(R, i) \ + (&(((struct i40e_adv_tx_context_desc *)((R).desc))[i])) + +#define OTHER_VECTOR 1 +#define NONQ_VECS (OTHER_VECTOR) + +#define MIN_MSIX_Q_VECTORS 1 +#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NONQ_VECS) + +#define I40EVF_QUEUE_END_OF_LIST 0x7FF +#define I40EVF_FREE_VECTOR 0x7FFF +struct i40evf_mac_filter { + struct list_head list; + u8 macaddr[ETH_ALEN]; + bool remove; /* filter needs to be removed */ + bool add; /* filter needs to be added */ +}; + +struct i40evf_vlan_filter { + struct list_head list; + u16 vlan; + bool remove; /* filter needs to be removed */ + bool add; /* filter needs to be added */ +}; + +#define I40EVF_MAX_TRAFFIC_CLASS 4 +/* State of traffic class creation */ +enum i40evf_tc_state_t { + __I40EVF_TC_INVALID, /* no traffic class, default state */ + __I40EVF_TC_RUNNING, /* traffic classes have been created */ +}; + +/* channel info */ +struct i40evf_channel_config { + struct virtchnl_channel_info ch_info[I40EVF_MAX_TRAFFIC_CLASS]; + enum i40evf_tc_state_t state; + u8 total_qps; +}; + +/* State of cloud filter */ +enum i40evf_cloud_filter_state_t { + __I40EVF_CF_INVALID, /* cloud filter not added */ + __I40EVF_CF_ADD_PENDING, /* cloud filter pending add by the PF */ + __I40EVF_CF_DEL_PENDING, /* cloud filter pending del by the PF */ + __I40EVF_CF_ACTIVE, /* cloud filter is active */ +}; + +/* Driver state. The order of these is important! */ +enum i40evf_state_t { + __I40EVF_STARTUP, /* driver loaded, probe complete */ + __I40EVF_REMOVE, /* driver is being unloaded */ + __I40EVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */ + __I40EVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */ + __I40EVF_INIT_SW, /* got resources, setting up structs */ + __I40EVF_RESETTING, /* in reset */ + /* Below here, watchdog is running */ + __I40EVF_DOWN, /* ready, can be opened */ + __I40EVF_DOWN_PENDING, /* descending, waiting for watchdog */ + __I40EVF_TESTING, /* in ethtool self-test */ + __I40EVF_RUNNING, /* opened, working */ +}; + +enum i40evf_critical_section_t { + __I40EVF_IN_CRITICAL_TASK, /* cannot be interrupted */ + __I40EVF_IN_CLIENT_TASK, + __I40EVF_IN_REMOVE_TASK, /* device being removed */ +}; + +#define I40EVF_CLOUD_FIELD_OMAC 0x01 +#define I40EVF_CLOUD_FIELD_IMAC 0x02 +#define I40EVF_CLOUD_FIELD_IVLAN 0x04 +#define I40EVF_CLOUD_FIELD_TEN_ID 0x08 +#define I40EVF_CLOUD_FIELD_IIP 0x10 + +#define I40EVF_CF_FLAGS_OMAC I40EVF_CLOUD_FIELD_OMAC +#define I40EVF_CF_FLAGS_IMAC I40EVF_CLOUD_FIELD_IMAC +#define I40EVF_CF_FLAGS_IMAC_IVLAN (I40EVF_CLOUD_FIELD_IMAC |\ + I40EVF_CLOUD_FIELD_IVLAN) +#define I40EVF_CF_FLAGS_IMAC_TEN_ID (I40EVF_CLOUD_FIELD_IMAC |\ + I40EVF_CLOUD_FIELD_TEN_ID) +#define I40EVF_CF_FLAGS_OMAC_TEN_ID_IMAC (I40EVF_CLOUD_FIELD_OMAC |\ + I40EVF_CLOUD_FIELD_IMAC |\ + I40EVF_CLOUD_FIELD_TEN_ID) +#define I40EVF_CF_FLAGS_IMAC_IVLAN_TEN_ID (I40EVF_CLOUD_FIELD_IMAC |\ + I40EVF_CLOUD_FIELD_IVLAN |\ + I40EVF_CLOUD_FIELD_TEN_ID) +#define I40EVF_CF_FLAGS_IIP I40E_CLOUD_FIELD_IIP + +/* bookkeeping of cloud filters */ +struct i40evf_cloud_filter { + enum i40evf_cloud_filter_state_t state; + struct list_head list; + struct virtchnl_filter f; + unsigned long cookie; + bool del; /* filter needs to be deleted */ + bool add; /* filter needs to be added */ +}; + +/* board specific private data structure */ +struct i40evf_adapter { + struct timer_list watchdog_timer; + struct work_struct reset_task; + struct work_struct adminq_task; + struct delayed_work client_task; + struct delayed_work init_task; + wait_queue_head_t down_waitqueue; + struct i40e_q_vector *q_vectors; + struct list_head vlan_filter_list; + struct list_head mac_filter_list; + /* Lock to protect accesses to MAC and VLAN lists */ + spinlock_t mac_vlan_list_lock; + char misc_vector_name[IFNAMSIZ + 9]; + int num_active_queues; + int num_req_queues; + + /* TX */ + struct i40e_ring *tx_rings; + u32 tx_timeout_count; + u32 tx_desc_count; + + /* RX */ + struct i40e_ring *rx_rings; + u64 hw_csum_rx_error; + u32 rx_desc_count; + int num_msix_vectors; + int num_iwarp_msix; + int iwarp_base_vector; + u32 client_pending; + struct i40e_client_instance *cinst; + struct msix_entry *msix_entries; + + u32 flags; +#define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0) +#define I40EVF_FLAG_PF_COMMS_FAILED BIT(3) +#define I40EVF_FLAG_RESET_PENDING BIT(4) +#define I40EVF_FLAG_RESET_NEEDED BIT(5) +#define I40EVF_FLAG_WB_ON_ITR_CAPABLE BIT(6) +#define I40EVF_FLAG_ADDR_SET_BY_PF BIT(8) +#define I40EVF_FLAG_SERVICE_CLIENT_REQUESTED BIT(9) +#define I40EVF_FLAG_CLIENT_NEEDS_OPEN BIT(10) +#define I40EVF_FLAG_CLIENT_NEEDS_CLOSE BIT(11) +#define I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS BIT(12) +#define I40EVF_FLAG_PROMISC_ON BIT(13) +#define I40EVF_FLAG_ALLMULTI_ON BIT(14) +#define I40EVF_FLAG_LEGACY_RX BIT(15) +#define I40EVF_FLAG_REINIT_ITR_NEEDED BIT(16) +#define I40EVF_FLAG_QUEUES_DISABLED BIT(17) +/* duplicates for common code */ +#define I40E_FLAG_DCB_ENABLED 0 +#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED +#define I40E_FLAG_LEGACY_RX I40EVF_FLAG_LEGACY_RX + /* flags for admin queue service task */ + u32 aq_required; +#define I40EVF_FLAG_AQ_ENABLE_QUEUES BIT(0) +#define I40EVF_FLAG_AQ_DISABLE_QUEUES BIT(1) +#define I40EVF_FLAG_AQ_ADD_MAC_FILTER BIT(2) +#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER BIT(3) +#define I40EVF_FLAG_AQ_DEL_MAC_FILTER BIT(4) +#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER BIT(5) +#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES BIT(6) +#define I40EVF_FLAG_AQ_MAP_VECTORS BIT(7) +#define I40EVF_FLAG_AQ_HANDLE_RESET BIT(8) +#define I40EVF_FLAG_AQ_CONFIGURE_RSS BIT(9) /* direct AQ config */ +#define I40EVF_FLAG_AQ_GET_CONFIG BIT(10) +/* Newer style, RSS done by the PF so we can ignore hardware vagaries. */ +#define I40EVF_FLAG_AQ_GET_HENA BIT(11) +#define I40EVF_FLAG_AQ_SET_HENA BIT(12) +#define I40EVF_FLAG_AQ_SET_RSS_KEY BIT(13) +#define I40EVF_FLAG_AQ_SET_RSS_LUT BIT(14) +#define I40EVF_FLAG_AQ_REQUEST_PROMISC BIT(15) +#define I40EVF_FLAG_AQ_RELEASE_PROMISC BIT(16) +#define I40EVF_FLAG_AQ_REQUEST_ALLMULTI BIT(17) +#define I40EVF_FLAG_AQ_RELEASE_ALLMULTI BIT(18) +#define I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT(19) +#define I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT(20) +#define I40EVF_FLAG_AQ_ENABLE_CHANNELS BIT(21) +#define I40EVF_FLAG_AQ_DISABLE_CHANNELS BIT(22) +#define I40EVF_FLAG_AQ_ADD_CLOUD_FILTER BIT(23) +#define I40EVF_FLAG_AQ_DEL_CLOUD_FILTER BIT(24) + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + + struct i40e_hw hw; /* defined in i40e_type.h */ + + enum i40evf_state_t state; + unsigned long crit_section; + + struct work_struct watchdog_task; + bool netdev_registered; + bool link_up; + enum virtchnl_link_speed link_speed; + enum virtchnl_ops current_op; +#define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \ + (_a)->vf_res->vf_cap_flags & \ + VIRTCHNL_VF_OFFLOAD_IWARP : \ + 0) +#define CLIENT_ENABLED(_a) ((_a)->cinst) +/* RSS by the PF should be preferred over RSS via other methods. */ +#define RSS_PF(_a) ((_a)->vf_res->vf_cap_flags & \ + VIRTCHNL_VF_OFFLOAD_RSS_PF) +#define RSS_AQ(_a) ((_a)->vf_res->vf_cap_flags & \ + VIRTCHNL_VF_OFFLOAD_RSS_AQ) +#define RSS_REG(_a) (!((_a)->vf_res->vf_cap_flags & \ + (VIRTCHNL_VF_OFFLOAD_RSS_AQ | \ + VIRTCHNL_VF_OFFLOAD_RSS_PF))) +#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \ + VIRTCHNL_VF_OFFLOAD_VLAN) + struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */ + struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */ + struct virtchnl_version_info pf_version; +#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \ + ((_a)->pf_version.minor == 1)) + u16 msg_enable; + struct i40e_eth_stats current_stats; + struct i40e_vsi vsi; + u32 aq_wait_count; + /* RSS stuff */ + u64 hena; + u16 rss_key_size; + u16 rss_lut_size; + u8 *rss_key; + u8 *rss_lut; + /* ADQ related members */ + struct i40evf_channel_config ch_config; + u8 num_tc; + struct list_head cloud_filter_list; + /* lock to protest access to the cloud filter list */ + spinlock_t cloud_filter_list_lock; + u16 num_cloud_filters; +}; + + +/* Ethtool Private Flags */ + +/* lan device */ +struct i40e_device { + struct list_head list; + struct i40evf_adapter *vf; +}; + +/* needed by i40evf_ethtool.c */ +extern char i40evf_driver_name[]; +extern const char i40evf_driver_version[]; + +int i40evf_up(struct i40evf_adapter *adapter); +void i40evf_down(struct i40evf_adapter *adapter); +int i40evf_process_config(struct i40evf_adapter *adapter); +void i40evf_schedule_reset(struct i40evf_adapter *adapter); +void i40evf_reset(struct i40evf_adapter *adapter); +void i40evf_set_ethtool_ops(struct net_device *netdev); +void i40evf_update_stats(struct i40evf_adapter *adapter); +void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter); +int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter); +void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask); +void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter); +void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter); + +void i40e_napi_add_all(struct i40evf_adapter *adapter); +void i40e_napi_del_all(struct i40evf_adapter *adapter); + +int i40evf_send_api_ver(struct i40evf_adapter *adapter); +int i40evf_verify_api_ver(struct i40evf_adapter *adapter); +int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter); +int i40evf_get_vf_config(struct i40evf_adapter *adapter); +void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush); +void i40evf_configure_queues(struct i40evf_adapter *adapter); +void i40evf_deconfigure_queues(struct i40evf_adapter *adapter); +void i40evf_enable_queues(struct i40evf_adapter *adapter); +void i40evf_disable_queues(struct i40evf_adapter *adapter); +void i40evf_map_queues(struct i40evf_adapter *adapter); +int i40evf_request_queues(struct i40evf_adapter *adapter, int num); +void i40evf_add_ether_addrs(struct i40evf_adapter *adapter); +void i40evf_del_ether_addrs(struct i40evf_adapter *adapter); +void i40evf_add_vlans(struct i40evf_adapter *adapter); +void i40evf_del_vlans(struct i40evf_adapter *adapter); +void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags); +void i40evf_request_stats(struct i40evf_adapter *adapter); +void i40evf_request_reset(struct i40evf_adapter *adapter); +void i40evf_get_hena(struct i40evf_adapter *adapter); +void i40evf_set_hena(struct i40evf_adapter *adapter); +void i40evf_set_rss_key(struct i40evf_adapter *adapter); +void i40evf_set_rss_lut(struct i40evf_adapter *adapter); +void i40evf_enable_vlan_stripping(struct i40evf_adapter *adapter); +void i40evf_disable_vlan_stripping(struct i40evf_adapter *adapter); +void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, + enum virtchnl_ops v_opcode, + i40e_status v_retval, u8 *msg, u16 msglen); +int i40evf_config_rss(struct i40evf_adapter *adapter); +int i40evf_lan_add_device(struct i40evf_adapter *adapter); +int i40evf_lan_del_device(struct i40evf_adapter *adapter); +void i40evf_client_subtask(struct i40evf_adapter *adapter); +void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len); +void i40evf_notify_client_l2_params(struct i40e_vsi *vsi); +void i40evf_notify_client_open(struct i40e_vsi *vsi); +void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset); +void i40evf_enable_channels(struct i40evf_adapter *adapter); +void i40evf_disable_channels(struct i40evf_adapter *adapter); +void i40evf_add_cloud_filter(struct i40evf_adapter *adapter); +void i40evf_del_cloud_filter(struct i40evf_adapter *adapter); +#endif /* _I40EVF_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/i40evf_client.c b/drivers/net/ethernet/intel/iavf/i40evf_client.c new file mode 100644 index 000000000000..3cc9d60d0d72 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/i40evf_client.c @@ -0,0 +1,579 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#include +#include + +#include "i40evf.h" +#include "i40e_prototype.h" +#include "i40evf_client.h" + +static +const char i40evf_client_interface_version_str[] = I40EVF_CLIENT_VERSION_STR; +static struct i40e_client *vf_registered_client; +static LIST_HEAD(i40evf_devices); +static DEFINE_MUTEX(i40evf_device_mutex); + +static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev, + struct i40e_client *client, + u8 *msg, u16 len); + +static int i40evf_client_setup_qvlist(struct i40e_info *ldev, + struct i40e_client *client, + struct i40e_qvlist_info *qvlist_info); + +static struct i40e_ops i40evf_lan_ops = { + .virtchnl_send = i40evf_client_virtchnl_send, + .setup_qvlist = i40evf_client_setup_qvlist, +}; + +/** + * i40evf_client_get_params - retrieve relevant client parameters + * @vsi: VSI with parameters + * @params: client param struct + **/ +static +void i40evf_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params) +{ + int i; + + memset(params, 0, sizeof(struct i40e_params)); + params->mtu = vsi->netdev->mtu; + params->link_up = vsi->back->link_up; + + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { + params->qos.prio_qos[i].tc = 0; + params->qos.prio_qos[i].qs_handle = vsi->qs_handle; + } +} + +/** + * i40evf_notify_client_message - call the client message receive callback + * @vsi: the VSI associated with this client + * @msg: message buffer + * @len: length of message + * + * If there is a client to this VSI, call the client + **/ +void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len) +{ + struct i40e_client_instance *cinst; + + if (!vsi) + return; + + cinst = vsi->back->cinst; + if (!cinst || !cinst->client || !cinst->client->ops || + !cinst->client->ops->virtchnl_receive) { + dev_dbg(&vsi->back->pdev->dev, + "Cannot locate client instance virtchnl_receive function\n"); + return; + } + cinst->client->ops->virtchnl_receive(&cinst->lan_info, cinst->client, + msg, len); +} + +/** + * i40evf_notify_client_l2_params - call the client notify callback + * @vsi: the VSI with l2 param changes + * + * If there is a client to this VSI, call the client + **/ +void i40evf_notify_client_l2_params(struct i40e_vsi *vsi) +{ + struct i40e_client_instance *cinst; + struct i40e_params params; + + if (!vsi) + return; + + cinst = vsi->back->cinst; + + if (!cinst || !cinst->client || !cinst->client->ops || + !cinst->client->ops->l2_param_change) { + dev_dbg(&vsi->back->pdev->dev, + "Cannot locate client instance l2_param_change function\n"); + return; + } + i40evf_client_get_params(vsi, ¶ms); + cinst->lan_info.params = params; + cinst->client->ops->l2_param_change(&cinst->lan_info, cinst->client, + ¶ms); +} + +/** + * i40evf_notify_client_open - call the client open callback + * @vsi: the VSI with netdev opened + * + * If there is a client to this netdev, call the client with open + **/ +void i40evf_notify_client_open(struct i40e_vsi *vsi) +{ + struct i40evf_adapter *adapter = vsi->back; + struct i40e_client_instance *cinst = adapter->cinst; + int ret; + + if (!cinst || !cinst->client || !cinst->client->ops || + !cinst->client->ops->open) { + dev_dbg(&vsi->back->pdev->dev, + "Cannot locate client instance open function\n"); + return; + } + if (!(test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state))) { + ret = cinst->client->ops->open(&cinst->lan_info, cinst->client); + if (!ret) + set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state); + } +} + +/** + * i40evf_client_release_qvlist - send a message to the PF to release iwarp qv map + * @ldev: pointer to L2 context. + * + * Return 0 on success or < 0 on error + **/ +static int i40evf_client_release_qvlist(struct i40e_info *ldev) +{ + struct i40evf_adapter *adapter = ldev->vf; + i40e_status err; + + if (adapter->aq_required) + return -EAGAIN; + + err = i40e_aq_send_msg_to_pf(&adapter->hw, + VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, + I40E_SUCCESS, NULL, 0, NULL); + + if (err) + dev_err(&adapter->pdev->dev, + "Unable to send iWarp vector release message to PF, error %d, aq status %d\n", + err, adapter->hw.aq.asq_last_status); + + return err; +} + +/** + * i40evf_notify_client_close - call the client close callback + * @vsi: the VSI with netdev closed + * @reset: true when close called due to reset pending + * + * If there is a client to this netdev, call the client with close + **/ +void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset) +{ + struct i40evf_adapter *adapter = vsi->back; + struct i40e_client_instance *cinst = adapter->cinst; + + if (!cinst || !cinst->client || !cinst->client->ops || + !cinst->client->ops->close) { + dev_dbg(&vsi->back->pdev->dev, + "Cannot locate client instance close function\n"); + return; + } + cinst->client->ops->close(&cinst->lan_info, cinst->client, reset); + i40evf_client_release_qvlist(&cinst->lan_info); + clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state); +} + +/** + * i40evf_client_add_instance - add a client instance to the instance list + * @adapter: pointer to the board struct + * + * Returns cinst ptr on success, NULL on failure + **/ +static struct i40e_client_instance * +i40evf_client_add_instance(struct i40evf_adapter *adapter) +{ + struct i40e_client_instance *cinst = NULL; + struct i40e_vsi *vsi = &adapter->vsi; + struct netdev_hw_addr *mac = NULL; + struct i40e_params params; + + if (!vf_registered_client) + goto out; + + if (adapter->cinst) { + cinst = adapter->cinst; + goto out; + } + + cinst = kzalloc(sizeof(*cinst), GFP_KERNEL); + if (!cinst) + goto out; + + cinst->lan_info.vf = (void *)adapter; + cinst->lan_info.netdev = vsi->netdev; + cinst->lan_info.pcidev = adapter->pdev; + cinst->lan_info.fid = 0; + cinst->lan_info.ftype = I40E_CLIENT_FTYPE_VF; + cinst->lan_info.hw_addr = adapter->hw.hw_addr; + cinst->lan_info.ops = &i40evf_lan_ops; + cinst->lan_info.version.major = I40EVF_CLIENT_VERSION_MAJOR; + cinst->lan_info.version.minor = I40EVF_CLIENT_VERSION_MINOR; + cinst->lan_info.version.build = I40EVF_CLIENT_VERSION_BUILD; + i40evf_client_get_params(vsi, ¶ms); + cinst->lan_info.params = params; + set_bit(__I40E_CLIENT_INSTANCE_NONE, &cinst->state); + + cinst->lan_info.msix_count = adapter->num_iwarp_msix; + cinst->lan_info.msix_entries = + &adapter->msix_entries[adapter->iwarp_base_vector]; + + mac = list_first_entry(&cinst->lan_info.netdev->dev_addrs.list, + struct netdev_hw_addr, list); + if (mac) + ether_addr_copy(cinst->lan_info.lanmac, mac->addr); + else + dev_err(&adapter->pdev->dev, "MAC address list is empty!\n"); + + cinst->client = vf_registered_client; + adapter->cinst = cinst; +out: + return cinst; +} + +/** + * i40evf_client_del_instance - removes a client instance from the list + * @adapter: pointer to the board struct + * + **/ +static +void i40evf_client_del_instance(struct i40evf_adapter *adapter) +{ + kfree(adapter->cinst); + adapter->cinst = NULL; +} + +/** + * i40evf_client_subtask - client maintenance work + * @adapter: board private structure + **/ +void i40evf_client_subtask(struct i40evf_adapter *adapter) +{ + struct i40e_client *client = vf_registered_client; + struct i40e_client_instance *cinst; + int ret = 0; + + if (adapter->state < __I40EVF_DOWN) + return; + + /* first check client is registered */ + if (!client) + return; + + /* Add the client instance to the instance list */ + cinst = i40evf_client_add_instance(adapter); + if (!cinst) + return; + + dev_info(&adapter->pdev->dev, "Added instance of Client %s\n", + client->name); + + if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state)) { + /* Send an Open request to the client */ + + if (client->ops && client->ops->open) + ret = client->ops->open(&cinst->lan_info, client); + if (!ret) + set_bit(__I40E_CLIENT_INSTANCE_OPENED, + &cinst->state); + else + /* remove client instance */ + i40evf_client_del_instance(adapter); + } +} + +/** + * i40evf_lan_add_device - add a lan device struct to the list of lan devices + * @adapter: pointer to the board struct + * + * Returns 0 on success or none 0 on error + **/ +int i40evf_lan_add_device(struct i40evf_adapter *adapter) +{ + struct i40e_device *ldev; + int ret = 0; + + mutex_lock(&i40evf_device_mutex); + list_for_each_entry(ldev, &i40evf_devices, list) { + if (ldev->vf == adapter) { + ret = -EEXIST; + goto out; + } + } + ldev = kzalloc(sizeof(*ldev), GFP_KERNEL); + if (!ldev) { + ret = -ENOMEM; + goto out; + } + ldev->vf = adapter; + INIT_LIST_HEAD(&ldev->list); + list_add(&ldev->list, &i40evf_devices); + dev_info(&adapter->pdev->dev, "Added LAN device bus=0x%02x dev=0x%02x func=0x%02x\n", + adapter->hw.bus.bus_id, adapter->hw.bus.device, + adapter->hw.bus.func); + + /* Since in some cases register may have happened before a device gets + * added, we can schedule a subtask to go initiate the clients. + */ + adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED; + +out: + mutex_unlock(&i40evf_device_mutex); + return ret; +} + +/** + * i40evf_lan_del_device - removes a lan device from the device list + * @adapter: pointer to the board struct + * + * Returns 0 on success or non-0 on error + **/ +int i40evf_lan_del_device(struct i40evf_adapter *adapter) +{ + struct i40e_device *ldev, *tmp; + int ret = -ENODEV; + + mutex_lock(&i40evf_device_mutex); + list_for_each_entry_safe(ldev, tmp, &i40evf_devices, list) { + if (ldev->vf == adapter) { + dev_info(&adapter->pdev->dev, + "Deleted LAN device bus=0x%02x dev=0x%02x func=0x%02x\n", + adapter->hw.bus.bus_id, adapter->hw.bus.device, + adapter->hw.bus.func); + list_del(&ldev->list); + kfree(ldev); + ret = 0; + break; + } + } + + mutex_unlock(&i40evf_device_mutex); + return ret; +} + +/** + * i40evf_client_release - release client specific resources + * @client: pointer to the registered client + * + **/ +static void i40evf_client_release(struct i40e_client *client) +{ + struct i40e_client_instance *cinst; + struct i40e_device *ldev; + struct i40evf_adapter *adapter; + + mutex_lock(&i40evf_device_mutex); + list_for_each_entry(ldev, &i40evf_devices, list) { + adapter = ldev->vf; + cinst = adapter->cinst; + if (!cinst) + continue; + if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state)) { + if (client->ops && client->ops->close) + client->ops->close(&cinst->lan_info, client, + false); + i40evf_client_release_qvlist(&cinst->lan_info); + clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state); + + dev_warn(&adapter->pdev->dev, + "Client %s instance closed\n", client->name); + } + /* delete the client instance */ + i40evf_client_del_instance(adapter); + dev_info(&adapter->pdev->dev, "Deleted client instance of Client %s\n", + client->name); + } + mutex_unlock(&i40evf_device_mutex); +} + +/** + * i40evf_client_prepare - prepare client specific resources + * @client: pointer to the registered client + * + **/ +static void i40evf_client_prepare(struct i40e_client *client) +{ + struct i40e_device *ldev; + struct i40evf_adapter *adapter; + + mutex_lock(&i40evf_device_mutex); + list_for_each_entry(ldev, &i40evf_devices, list) { + adapter = ldev->vf; + /* Signal the watchdog to service the client */ + adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED; + } + mutex_unlock(&i40evf_device_mutex); +} + +/** + * i40evf_client_virtchnl_send - send a message to the PF instance + * @ldev: pointer to L2 context. + * @client: Client pointer. + * @msg: pointer to message buffer + * @len: message length + * + * Return 0 on success or < 0 on error + **/ +static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev, + struct i40e_client *client, + u8 *msg, u16 len) +{ + struct i40evf_adapter *adapter = ldev->vf; + i40e_status err; + + if (adapter->aq_required) + return -EAGAIN; + + err = i40e_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_IWARP, + I40E_SUCCESS, msg, len, NULL); + if (err) + dev_err(&adapter->pdev->dev, "Unable to send iWarp message to PF, error %d, aq status %d\n", + err, adapter->hw.aq.asq_last_status); + + return err; +} + +/** + * i40evf_client_setup_qvlist - send a message to the PF to setup iwarp qv map + * @ldev: pointer to L2 context. + * @client: Client pointer. + * @qvlist_info: queue and vector list + * + * Return 0 on success or < 0 on error + **/ +static int i40evf_client_setup_qvlist(struct i40e_info *ldev, + struct i40e_client *client, + struct i40e_qvlist_info *qvlist_info) +{ + struct virtchnl_iwarp_qvlist_info *v_qvlist_info; + struct i40evf_adapter *adapter = ldev->vf; + struct i40e_qv_info *qv_info; + i40e_status err; + u32 v_idx, i; + u32 msg_size; + + if (adapter->aq_required) + return -EAGAIN; + + /* A quick check on whether the vectors belong to the client */ + for (i = 0; i < qvlist_info->num_vectors; i++) { + qv_info = &qvlist_info->qv_info[i]; + if (!qv_info) + continue; + v_idx = qv_info->v_idx; + if ((v_idx >= + (adapter->iwarp_base_vector + adapter->num_iwarp_msix)) || + (v_idx < adapter->iwarp_base_vector)) + return -EINVAL; + } + + v_qvlist_info = (struct virtchnl_iwarp_qvlist_info *)qvlist_info; + msg_size = sizeof(struct virtchnl_iwarp_qvlist_info) + + (sizeof(struct virtchnl_iwarp_qv_info) * + (v_qvlist_info->num_vectors - 1)); + + adapter->client_pending |= BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP); + err = i40e_aq_send_msg_to_pf(&adapter->hw, + VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP, + I40E_SUCCESS, (u8 *)v_qvlist_info, msg_size, NULL); + + if (err) { + dev_err(&adapter->pdev->dev, + "Unable to send iWarp vector config message to PF, error %d, aq status %d\n", + err, adapter->hw.aq.asq_last_status); + goto out; + } + + err = -EBUSY; + for (i = 0; i < 5; i++) { + msleep(100); + if (!(adapter->client_pending & + BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP))) { + err = 0; + break; + } + } +out: + return err; +} + +/** + * i40evf_register_client - Register a i40e client driver with the L2 driver + * @client: pointer to the i40e_client struct + * + * Returns 0 on success or non-0 on error + **/ +int i40evf_register_client(struct i40e_client *client) +{ + int ret = 0; + + if (!client) { + ret = -EIO; + goto out; + } + + if (strlen(client->name) == 0) { + pr_info("i40evf: Failed to register client with no name\n"); + ret = -EIO; + goto out; + } + + if (vf_registered_client) { + pr_info("i40evf: Client %s has already been registered!\n", + client->name); + ret = -EEXIST; + goto out; + } + + if ((client->version.major != I40EVF_CLIENT_VERSION_MAJOR) || + (client->version.minor != I40EVF_CLIENT_VERSION_MINOR)) { + pr_info("i40evf: Failed to register client %s due to mismatched client interface version\n", + client->name); + pr_info("Client is using version: %02d.%02d.%02d while LAN driver supports %s\n", + client->version.major, client->version.minor, + client->version.build, + i40evf_client_interface_version_str); + ret = -EIO; + goto out; + } + + vf_registered_client = client; + + i40evf_client_prepare(client); + + pr_info("i40evf: Registered client %s with return code %d\n", + client->name, ret); +out: + return ret; +} +EXPORT_SYMBOL(i40evf_register_client); + +/** + * i40evf_unregister_client - Unregister a i40e client driver with the L2 driver + * @client: pointer to the i40e_client struct + * + * Returns 0 on success or non-0 on error + **/ +int i40evf_unregister_client(struct i40e_client *client) +{ + int ret = 0; + + /* When a unregister request comes through we would have to send + * a close for each of the client instances that were opened. + * client_release function is called to handle this. + */ + i40evf_client_release(client); + + if (vf_registered_client != client) { + pr_info("i40evf: Client %s has not been registered\n", + client->name); + ret = -ENODEV; + goto out; + } + vf_registered_client = NULL; + pr_info("i40evf: Unregistered client %s\n", client->name); +out: + return ret; +} +EXPORT_SYMBOL(i40evf_unregister_client); diff --git a/drivers/net/ethernet/intel/iavf/i40evf_client.h b/drivers/net/ethernet/intel/iavf/i40evf_client.h new file mode 100644 index 000000000000..5585f362048a --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/i40evf_client.h @@ -0,0 +1,169 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _I40EVF_CLIENT_H_ +#define _I40EVF_CLIENT_H_ + +#define I40EVF_CLIENT_STR_LENGTH 10 + +/* Client interface version should be updated anytime there is a change in the + * existing APIs or data structures. + */ +#define I40EVF_CLIENT_VERSION_MAJOR 0 +#define I40EVF_CLIENT_VERSION_MINOR 01 +#define I40EVF_CLIENT_VERSION_BUILD 00 +#define I40EVF_CLIENT_VERSION_STR \ + __stringify(I40EVF_CLIENT_VERSION_MAJOR) "." \ + __stringify(I40EVF_CLIENT_VERSION_MINOR) "." \ + __stringify(I40EVF_CLIENT_VERSION_BUILD) + +struct i40e_client_version { + u8 major; + u8 minor; + u8 build; + u8 rsvd; +}; + +enum i40e_client_state { + __I40E_CLIENT_NULL, + __I40E_CLIENT_REGISTERED +}; + +enum i40e_client_instance_state { + __I40E_CLIENT_INSTANCE_NONE, + __I40E_CLIENT_INSTANCE_OPENED, +}; + +struct i40e_ops; +struct i40e_client; + +/* HW does not define a type value for AEQ; only for RX/TX and CEQ. + * In order for us to keep the interface simple, SW will define a + * unique type value for AEQ. + */ +#define I40E_QUEUE_TYPE_PE_AEQ 0x80 +#define I40E_QUEUE_INVALID_IDX 0xFFFF + +struct i40e_qv_info { + u32 v_idx; /* msix_vector */ + u16 ceq_idx; + u16 aeq_idx; + u8 itr_idx; +}; + +struct i40e_qvlist_info { + u32 num_vectors; + struct i40e_qv_info qv_info[1]; +}; + +#define I40E_CLIENT_MSIX_ALL 0xFFFFFFFF + +/* set of LAN parameters useful for clients managed by LAN */ + +/* Struct to hold per priority info */ +struct i40e_prio_qos_params { + u16 qs_handle; /* qs handle for prio */ + u8 tc; /* TC mapped to prio */ + u8 reserved; +}; + +#define I40E_CLIENT_MAX_USER_PRIORITY 8 +/* Struct to hold Client QoS */ +struct i40e_qos_params { + struct i40e_prio_qos_params prio_qos[I40E_CLIENT_MAX_USER_PRIORITY]; +}; + +struct i40e_params { + struct i40e_qos_params qos; + u16 mtu; + u16 link_up; /* boolean */ +}; + +/* Structure to hold LAN device info for a client device */ +struct i40e_info { + struct i40e_client_version version; + u8 lanmac[6]; + struct net_device *netdev; + struct pci_dev *pcidev; + u8 __iomem *hw_addr; + u8 fid; /* function id, PF id or VF id */ +#define I40E_CLIENT_FTYPE_PF 0 +#define I40E_CLIENT_FTYPE_VF 1 + u8 ftype; /* function type, PF or VF */ + void *vf; /* cast to i40evf_adapter */ + + /* All L2 params that could change during the life span of the device + * and needs to be communicated to the client when they change + */ + struct i40e_params params; + struct i40e_ops *ops; + + u16 msix_count; /* number of msix vectors*/ + /* Array down below will be dynamically allocated based on msix_count */ + struct msix_entry *msix_entries; + u16 itr_index; /* Which ITR index the PE driver is suppose to use */ +}; + +struct i40e_ops { + /* setup_q_vector_list enables queues with a particular vector */ + int (*setup_qvlist)(struct i40e_info *ldev, struct i40e_client *client, + struct i40e_qvlist_info *qv_info); + + u32 (*virtchnl_send)(struct i40e_info *ldev, struct i40e_client *client, + u8 *msg, u16 len); + + /* If the PE Engine is unresponsive, RDMA driver can request a reset.*/ + void (*request_reset)(struct i40e_info *ldev, + struct i40e_client *client); +}; + +struct i40e_client_ops { + /* Should be called from register_client() or whenever the driver is + * ready to create a specific client instance. + */ + int (*open)(struct i40e_info *ldev, struct i40e_client *client); + + /* Should be closed when netdev is unavailable or when unregister + * call comes in. If the close happens due to a reset, set the reset + * bit to true. + */ + void (*close)(struct i40e_info *ldev, struct i40e_client *client, + bool reset); + + /* called when some l2 managed parameters changes - mss */ + void (*l2_param_change)(struct i40e_info *ldev, + struct i40e_client *client, + struct i40e_params *params); + + /* called when a message is received from the PF */ + int (*virtchnl_receive)(struct i40e_info *ldev, + struct i40e_client *client, + u8 *msg, u16 len); +}; + +/* Client device */ +struct i40e_client_instance { + struct list_head list; + struct i40e_info lan_info; + struct i40e_client *client; + unsigned long state; +}; + +struct i40e_client { + struct list_head list; /* list of registered clients */ + char name[I40EVF_CLIENT_STR_LENGTH]; + struct i40e_client_version version; + unsigned long state; /* client state */ + atomic_t ref_cnt; /* Count of all the client devices of this kind */ + u32 flags; +#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0) +#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2) + u8 type; +#define I40E_CLIENT_IWARP 0 + struct i40e_client_ops *ops; /* client ops provided by the client */ +}; + +/* used by clients */ +int i40evf_register_client(struct i40e_client *client); +int i40evf_unregister_client(struct i40e_client *client); +#endif /* _I40EVF_CLIENT_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/i40evf_ethtool.c b/drivers/net/ethernet/intel/iavf/i40evf_ethtool.c new file mode 100644 index 000000000000..69efe0aec76a --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/i40evf_ethtool.c @@ -0,0 +1,820 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +/* ethtool support for i40evf */ +#include "i40evf.h" + +#include + +struct i40evf_stats { + char stat_string[ETH_GSTRING_LEN]; + int stat_offset; +}; + +#define I40EVF_STAT(_name, _stat) { \ + .stat_string = _name, \ + .stat_offset = offsetof(struct i40evf_adapter, _stat) \ +} + +/* All stats are u64, so we don't need to track the size of the field. */ +static const struct i40evf_stats i40evf_gstrings_stats[] = { + I40EVF_STAT("rx_bytes", current_stats.rx_bytes), + I40EVF_STAT("rx_unicast", current_stats.rx_unicast), + I40EVF_STAT("rx_multicast", current_stats.rx_multicast), + I40EVF_STAT("rx_broadcast", current_stats.rx_broadcast), + I40EVF_STAT("rx_discards", current_stats.rx_discards), + I40EVF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol), + I40EVF_STAT("tx_bytes", current_stats.tx_bytes), + I40EVF_STAT("tx_unicast", current_stats.tx_unicast), + I40EVF_STAT("tx_multicast", current_stats.tx_multicast), + I40EVF_STAT("tx_broadcast", current_stats.tx_broadcast), + I40EVF_STAT("tx_discards", current_stats.tx_discards), + I40EVF_STAT("tx_errors", current_stats.tx_errors), +}; + +#define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats) +#define I40EVF_QUEUE_STATS_LEN(_dev) \ + (((struct i40evf_adapter *)\ + netdev_priv(_dev))->num_active_queues \ + * 2 * (sizeof(struct i40e_queue_stats) / sizeof(u64))) +#define I40EVF_STATS_LEN(_dev) \ + (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev)) + +/* For now we have one and only one private flag and it is only defined + * when we have support for the SKIP_CPU_SYNC DMA attribute. Instead + * of leaving all this code sitting around empty we will strip it unless + * our one private flag is actually available. + */ +struct i40evf_priv_flags { + char flag_string[ETH_GSTRING_LEN]; + u32 flag; + bool read_only; +}; + +#define I40EVF_PRIV_FLAG(_name, _flag, _read_only) { \ + .flag_string = _name, \ + .flag = _flag, \ + .read_only = _read_only, \ +} + +static const struct i40evf_priv_flags i40evf_gstrings_priv_flags[] = { + I40EVF_PRIV_FLAG("legacy-rx", I40EVF_FLAG_LEGACY_RX, 0), +}; + +#define I40EVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40evf_gstrings_priv_flags) + +/** + * i40evf_get_link_ksettings - Get Link Speed and Duplex settings + * @netdev: network interface device structure + * @cmd: ethtool command + * + * Reports speed/duplex settings. Because this is a VF, we don't know what + * kind of link we really have, so we fake it. + **/ +static int i40evf_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + cmd->base.autoneg = AUTONEG_DISABLE; + cmd->base.port = PORT_NONE; + /* Set speed and duplex */ + switch (adapter->link_speed) { + case I40E_LINK_SPEED_40GB: + cmd->base.speed = SPEED_40000; + break; + case I40E_LINK_SPEED_25GB: +#ifdef SPEED_25000 + cmd->base.speed = SPEED_25000; +#else + netdev_info(netdev, + "Speed is 25G, display not supported by this version of ethtool.\n"); +#endif + break; + case I40E_LINK_SPEED_20GB: + cmd->base.speed = SPEED_20000; + break; + case I40E_LINK_SPEED_10GB: + cmd->base.speed = SPEED_10000; + break; + case I40E_LINK_SPEED_1GB: + cmd->base.speed = SPEED_1000; + break; + case I40E_LINK_SPEED_100MB: + cmd->base.speed = SPEED_100; + break; + default: + break; + } + cmd->base.duplex = DUPLEX_FULL; + + return 0; +} + +/** + * i40evf_get_sset_count - Get length of string set + * @netdev: network interface device structure + * @sset: id of string set + * + * Reports size of string table. This driver only supports + * strings for statistics. + **/ +static int i40evf_get_sset_count(struct net_device *netdev, int sset) +{ + if (sset == ETH_SS_STATS) + return I40EVF_STATS_LEN(netdev); + else if (sset == ETH_SS_PRIV_FLAGS) + return I40EVF_PRIV_FLAGS_STR_LEN; + else + return -EINVAL; +} + +/** + * i40evf_get_ethtool_stats - report device statistics + * @netdev: network interface device structure + * @stats: ethtool statistics structure + * @data: pointer to data buffer + * + * All statistics are added to the data buffer as an array of u64. + **/ +static void i40evf_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + unsigned int i, j; + char *p; + + for (i = 0; i < I40EVF_GLOBAL_STATS_LEN; i++) { + p = (char *)adapter + i40evf_gstrings_stats[i].stat_offset; + data[i] = *(u64 *)p; + } + for (j = 0; j < adapter->num_active_queues; j++) { + data[i++] = adapter->tx_rings[j].stats.packets; + data[i++] = adapter->tx_rings[j].stats.bytes; + } + for (j = 0; j < adapter->num_active_queues; j++) { + data[i++] = adapter->rx_rings[j].stats.packets; + data[i++] = adapter->rx_rings[j].stats.bytes; + } +} + +/** + * i40evf_get_strings - Get string set + * @netdev: network interface device structure + * @sset: id of string set + * @data: buffer for string data + * + * Builds stats string table. + **/ +static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + u8 *p = data; + int i; + + if (sset == ETH_SS_STATS) { + for (i = 0; i < (int)I40EVF_GLOBAL_STATS_LEN; i++) { + memcpy(p, i40evf_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_active_queues; i++) { + snprintf(p, ETH_GSTRING_LEN, "tx-%u.packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "tx-%u.bytes", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_active_queues; i++) { + snprintf(p, ETH_GSTRING_LEN, "rx-%u.packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i); + p += ETH_GSTRING_LEN; + } + } else if (sset == ETH_SS_PRIV_FLAGS) { + for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "%s", + i40evf_gstrings_priv_flags[i].flag_string); + p += ETH_GSTRING_LEN; + } + } +} + +/** + * i40evf_get_priv_flags - report device private flags + * @netdev: network interface device structure + * + * The get string set count and the string set should be matched for each + * flag returned. Add new strings for each flag to the i40e_gstrings_priv_flags + * array. + * + * Returns a u32 bitmap of flags. + **/ +static u32 i40evf_get_priv_flags(struct net_device *netdev) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + u32 i, ret_flags = 0; + + for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) { + const struct i40evf_priv_flags *priv_flags; + + priv_flags = &i40evf_gstrings_priv_flags[i]; + + if (priv_flags->flag & adapter->flags) + ret_flags |= BIT(i); + } + + return ret_flags; +} + +/** + * i40evf_set_priv_flags - set private flags + * @netdev: network interface device structure + * @flags: bit flags to be set + **/ +static int i40evf_set_priv_flags(struct net_device *netdev, u32 flags) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + u32 orig_flags, new_flags, changed_flags; + u32 i; + + orig_flags = READ_ONCE(adapter->flags); + new_flags = orig_flags; + + for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) { + const struct i40evf_priv_flags *priv_flags; + + priv_flags = &i40evf_gstrings_priv_flags[i]; + + if (flags & BIT(i)) + new_flags |= priv_flags->flag; + else + new_flags &= ~(priv_flags->flag); + + if (priv_flags->read_only && + ((orig_flags ^ new_flags) & ~BIT(i))) + return -EOPNOTSUPP; + } + + /* Before we finalize any flag changes, any checks which we need to + * perform to determine if the new flags will be supported should go + * here... + */ + + /* Compare and exchange the new flags into place. If we failed, that + * is if cmpxchg returns anything but the old value, this means + * something else must have modified the flags variable since we + * copied it. We'll just punt with an error and log something in the + * message buffer. + */ + if (cmpxchg(&adapter->flags, orig_flags, new_flags) != orig_flags) { + dev_warn(&adapter->pdev->dev, + "Unable to update adapter->flags as it was modified by another thread...\n"); + return -EAGAIN; + } + + changed_flags = orig_flags ^ new_flags; + + /* Process any additional changes needed as a result of flag changes. + * The changed_flags value reflects the list of bits that were changed + * in the code above. + */ + + /* issue a reset to force legacy-rx change to take effect */ + if (changed_flags & I40EVF_FLAG_LEGACY_RX) { + if (netif_running(netdev)) { + adapter->flags |= I40EVF_FLAG_RESET_NEEDED; + schedule_work(&adapter->reset_task); + } + } + + return 0; +} + +/** + * i40evf_get_msglevel - Get debug message level + * @netdev: network interface device structure + * + * Returns current debug message level. + **/ +static u32 i40evf_get_msglevel(struct net_device *netdev) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +/** + * i40evf_set_msglevel - Set debug message level + * @netdev: network interface device structure + * @data: message level + * + * Set current debug message level. Higher values cause the driver to + * be noisier. + **/ +static void i40evf_set_msglevel(struct net_device *netdev, u32 data) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + if (I40E_DEBUG_USER & data) + adapter->hw.debug_mask = data; + adapter->msg_enable = data; +} + +/** + * i40evf_get_drvinfo - Get driver info + * @netdev: network interface device structure + * @drvinfo: ethool driver info structure + * + * Returns information about the driver and device for display to the user. + **/ +static void i40evf_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, i40evf_driver_name, 32); + strlcpy(drvinfo->version, i40evf_driver_version, 32); + strlcpy(drvinfo->fw_version, "N/A", 4); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + drvinfo->n_priv_flags = I40EVF_PRIV_FLAGS_STR_LEN; +} + +/** + * i40evf_get_ringparam - Get ring parameters + * @netdev: network interface device structure + * @ring: ethtool ringparam structure + * + * Returns current ring parameters. TX and RX rings are reported separately, + * but the number of rings is not reported. + **/ +static void i40evf_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = I40EVF_MAX_RXD; + ring->tx_max_pending = I40EVF_MAX_TXD; + ring->rx_pending = adapter->rx_desc_count; + ring->tx_pending = adapter->tx_desc_count; +} + +/** + * i40evf_set_ringparam - Set ring parameters + * @netdev: network interface device structure + * @ring: ethtool ringparam structure + * + * Sets ring parameters. TX and RX rings are controlled separately, but the + * number of rings is not specified, so all rings get the same settings. + **/ +static int i40evf_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + u32 new_rx_count, new_tx_count; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_tx_count = clamp_t(u32, ring->tx_pending, + I40EVF_MIN_TXD, + I40EVF_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE); + + new_rx_count = clamp_t(u32, ring->rx_pending, + I40EVF_MIN_RXD, + I40EVF_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE); + + /* if nothing to do return success */ + if ((new_tx_count == adapter->tx_desc_count) && + (new_rx_count == adapter->rx_desc_count)) + return 0; + + adapter->tx_desc_count = new_tx_count; + adapter->rx_desc_count = new_rx_count; + + if (netif_running(netdev)) { + adapter->flags |= I40EVF_FLAG_RESET_NEEDED; + schedule_work(&adapter->reset_task); + } + + return 0; +} + +/** + * __i40evf_get_coalesce - get per-queue coalesce settings + * @netdev: the netdev to check + * @ec: ethtool coalesce data structure + * @queue: which queue to pick + * + * Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs + * are per queue. If queue is <0 then we default to queue 0 as the + * representative value. + **/ +static int __i40evf_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + int queue) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + struct i40e_vsi *vsi = &adapter->vsi; + struct i40e_ring *rx_ring, *tx_ring; + + ec->tx_max_coalesced_frames = vsi->work_limit; + ec->rx_max_coalesced_frames = vsi->work_limit; + + /* Rx and Tx usecs per queue value. If user doesn't specify the + * queue, return queue 0's value to represent. + */ + if (queue < 0) + queue = 0; + else if (queue >= adapter->num_active_queues) + return -EINVAL; + + rx_ring = &adapter->rx_rings[queue]; + tx_ring = &adapter->tx_rings[queue]; + + if (ITR_IS_DYNAMIC(rx_ring->itr_setting)) + ec->use_adaptive_rx_coalesce = 1; + + if (ITR_IS_DYNAMIC(tx_ring->itr_setting)) + ec->use_adaptive_tx_coalesce = 1; + + ec->rx_coalesce_usecs = rx_ring->itr_setting & ~I40E_ITR_DYNAMIC; + ec->tx_coalesce_usecs = tx_ring->itr_setting & ~I40E_ITR_DYNAMIC; + + return 0; +} + +/** + * i40evf_get_coalesce - Get interrupt coalescing settings + * @netdev: network interface device structure + * @ec: ethtool coalesce structure + * + * Returns current coalescing settings. This is referred to elsewhere in the + * driver as Interrupt Throttle Rate, as this is how the hardware describes + * this functionality. Note that if per-queue settings have been modified this + * only represents the settings of queue 0. + **/ +static int i40evf_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + return __i40evf_get_coalesce(netdev, ec, -1); +} + +/** + * i40evf_get_per_queue_coalesce - get coalesce values for specific queue + * @netdev: netdev to read + * @ec: coalesce settings from ethtool + * @queue: the queue to read + * + * Read specific queue's coalesce settings. + **/ +static int i40evf_get_per_queue_coalesce(struct net_device *netdev, + u32 queue, + struct ethtool_coalesce *ec) +{ + return __i40evf_get_coalesce(netdev, ec, queue); +} + +/** + * i40evf_set_itr_per_queue - set ITR values for specific queue + * @adapter: the VF adapter struct to set values for + * @ec: coalesce settings from ethtool + * @queue: the queue to modify + * + * Change the ITR settings for a specific queue. + **/ +static void i40evf_set_itr_per_queue(struct i40evf_adapter *adapter, + struct ethtool_coalesce *ec, + int queue) +{ + struct i40e_ring *rx_ring = &adapter->rx_rings[queue]; + struct i40e_ring *tx_ring = &adapter->tx_rings[queue]; + struct i40e_q_vector *q_vector; + + rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs); + tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs); + + rx_ring->itr_setting |= I40E_ITR_DYNAMIC; + if (!ec->use_adaptive_rx_coalesce) + rx_ring->itr_setting ^= I40E_ITR_DYNAMIC; + + tx_ring->itr_setting |= I40E_ITR_DYNAMIC; + if (!ec->use_adaptive_tx_coalesce) + tx_ring->itr_setting ^= I40E_ITR_DYNAMIC; + + q_vector = rx_ring->q_vector; + q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); + + q_vector = tx_ring->q_vector; + q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); + + /* The interrupt handler itself will take care of programming + * the Tx and Rx ITR values based on the values we have entered + * into the q_vector, no need to write the values now. + */ +} + +/** + * __i40evf_set_coalesce - set coalesce settings for particular queue + * @netdev: the netdev to change + * @ec: ethtool coalesce settings + * @queue: the queue to change + * + * Sets the coalesce settings for a particular queue. + **/ +static int __i40evf_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + int queue) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + struct i40e_vsi *vsi = &adapter->vsi; + int i; + + if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) + vsi->work_limit = ec->tx_max_coalesced_frames_irq; + + if (ec->rx_coalesce_usecs == 0) { + if (ec->use_adaptive_rx_coalesce) + netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n"); + } else if ((ec->rx_coalesce_usecs < I40E_MIN_ITR) || + (ec->rx_coalesce_usecs > I40E_MAX_ITR)) { + netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n"); + return -EINVAL; + } + + else + if (ec->tx_coalesce_usecs == 0) { + if (ec->use_adaptive_tx_coalesce) + netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n"); + } else if ((ec->tx_coalesce_usecs < I40E_MIN_ITR) || + (ec->tx_coalesce_usecs > I40E_MAX_ITR)) { + netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n"); + return -EINVAL; + } + + /* Rx and Tx usecs has per queue value. If user doesn't specify the + * queue, apply to all queues. + */ + if (queue < 0) { + for (i = 0; i < adapter->num_active_queues; i++) + i40evf_set_itr_per_queue(adapter, ec, i); + } else if (queue < adapter->num_active_queues) { + i40evf_set_itr_per_queue(adapter, ec, queue); + } else { + netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n", + adapter->num_active_queues - 1); + return -EINVAL; + } + + return 0; +} + +/** + * i40evf_set_coalesce - Set interrupt coalescing settings + * @netdev: network interface device structure + * @ec: ethtool coalesce structure + * + * Change current coalescing settings for every queue. + **/ +static int i40evf_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + return __i40evf_set_coalesce(netdev, ec, -1); +} + +/** + * i40evf_set_per_queue_coalesce - set specific queue's coalesce settings + * @netdev: the netdev to change + * @ec: ethtool's coalesce settings + * @queue: the queue to modify + * + * Modifies a specific queue's coalesce settings. + */ +static int i40evf_set_per_queue_coalesce(struct net_device *netdev, + u32 queue, + struct ethtool_coalesce *ec) +{ + return __i40evf_set_coalesce(netdev, ec, queue); +} + +/** + * i40evf_get_rxnfc - command to get RX flow classification rules + * @netdev: network interface device structure + * @cmd: ethtool rxnfc command + * @rule_locs: pointer to store rule locations + * + * Returns Success if the command is supported. + **/ +static int i40evf_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_active_queues; + ret = 0; + break; + case ETHTOOL_GRXFH: + netdev_info(netdev, + "RSS hash info is not available to vf, use pf.\n"); + break; + default: + break; + } + + return ret; +} +/** + * i40evf_get_channels: get the number of channels supported by the device + * @netdev: network interface device structure + * @ch: channel information structure + * + * For the purposes of our device, we only use combined channels, i.e. a tx/rx + * queue pair. Report one extra channel to match our "other" MSI-X vector. + **/ +static void i40evf_get_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + /* Report maximum channels */ + ch->max_combined = I40EVF_MAX_REQ_QUEUES; + + ch->max_other = NONQ_VECS; + ch->other_count = NONQ_VECS; + + ch->combined_count = adapter->num_active_queues; +} + +/** + * i40evf_set_channels: set the new channel count + * @netdev: network interface device structure + * @ch: channel information structure + * + * Negotiate a new number of channels with the PF then do a reset. During + * reset we'll realloc queues and fix the RSS table. Returns 0 on success, + * negative on failure. + **/ +static int i40evf_set_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + int num_req = ch->combined_count; + + if (num_req != adapter->num_active_queues && + !(adapter->vf_res->vf_cap_flags & + VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)) { + dev_info(&adapter->pdev->dev, "PF is not capable of queue negotiation.\n"); + return -EINVAL; + } + + if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && + adapter->num_tc) { + dev_info(&adapter->pdev->dev, "Cannot set channels since ADq is enabled.\n"); + return -EINVAL; + } + + /* All of these should have already been checked by ethtool before this + * even gets to us, but just to be sure. + */ + if (num_req <= 0 || num_req > I40EVF_MAX_REQ_QUEUES) + return -EINVAL; + + if (ch->rx_count || ch->tx_count || ch->other_count != NONQ_VECS) + return -EINVAL; + + adapter->num_req_queues = num_req; + return i40evf_request_queues(adapter, num_req); +} + +/** + * i40evf_get_rxfh_key_size - get the RSS hash key size + * @netdev: network interface device structure + * + * Returns the table size. + **/ +static u32 i40evf_get_rxfh_key_size(struct net_device *netdev) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + return adapter->rss_key_size; +} + +/** + * i40evf_get_rxfh_indir_size - get the rx flow hash indirection table size + * @netdev: network interface device structure + * + * Returns the table size. + **/ +static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + return adapter->rss_lut_size; +} + +/** + * i40evf_get_rxfh - get the rx flow hash indirection table + * @netdev: network interface device structure + * @indir: indirection table + * @key: hash key + * @hfunc: hash function in use + * + * Reads the indirection table directly from the hardware. Always returns 0. + **/ +static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + u16 i; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!indir) + return 0; + + memcpy(key, adapter->rss_key, adapter->rss_key_size); + + /* Each 32 bits pointed by 'indir' is stored with a lut entry */ + for (i = 0; i < adapter->rss_lut_size; i++) + indir[i] = (u32)adapter->rss_lut[i]; + + return 0; +} + +/** + * i40evf_set_rxfh - set the rx flow hash indirection table + * @netdev: network interface device structure + * @indir: indirection table + * @key: hash key + * @hfunc: hash function to use + * + * Returns -EINVAL if the table specifies an inavlid queue id, otherwise + * returns 0 after programming the table. + **/ +static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + u16 i; + + /* We do not allow change in unsupported parameters */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!indir) + return 0; + + if (key) { + memcpy(adapter->rss_key, key, adapter->rss_key_size); + } + + /* Each 32 bits pointed by 'indir' is stored with a lut entry */ + for (i = 0; i < adapter->rss_lut_size; i++) + adapter->rss_lut[i] = (u8)(indir[i]); + + return i40evf_config_rss(adapter); +} + +static const struct ethtool_ops i40evf_ethtool_ops = { + .get_drvinfo = i40evf_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_ringparam = i40evf_get_ringparam, + .set_ringparam = i40evf_set_ringparam, + .get_strings = i40evf_get_strings, + .get_ethtool_stats = i40evf_get_ethtool_stats, + .get_sset_count = i40evf_get_sset_count, + .get_priv_flags = i40evf_get_priv_flags, + .set_priv_flags = i40evf_set_priv_flags, + .get_msglevel = i40evf_get_msglevel, + .set_msglevel = i40evf_set_msglevel, + .get_coalesce = i40evf_get_coalesce, + .set_coalesce = i40evf_set_coalesce, + .get_per_queue_coalesce = i40evf_get_per_queue_coalesce, + .set_per_queue_coalesce = i40evf_set_per_queue_coalesce, + .get_rxnfc = i40evf_get_rxnfc, + .get_rxfh_indir_size = i40evf_get_rxfh_indir_size, + .get_rxfh = i40evf_get_rxfh, + .set_rxfh = i40evf_set_rxfh, + .get_channels = i40evf_get_channels, + .set_channels = i40evf_set_channels, + .get_rxfh_key_size = i40evf_get_rxfh_key_size, + .get_link_ksettings = i40evf_get_link_ksettings, +}; + +/** + * i40evf_set_ethtool_ops - Initialize ethtool ops struct + * @netdev: network interface device structure + * + * Sets ethtool ops struct in our netdev so that ethtool can call + * our functions. + **/ +void i40evf_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &i40evf_ethtool_ops; +} diff --git a/drivers/net/ethernet/intel/iavf/i40evf_main.c b/drivers/net/ethernet/intel/iavf/i40evf_main.c new file mode 100644 index 000000000000..60c2e5df5827 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/i40evf_main.c @@ -0,0 +1,3990 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#include "i40evf.h" +#include "i40e_prototype.h" +#include "i40evf_client.h" +/* All i40evf tracepoints are defined by the include below, which must + * be included exactly once across the whole kernel with + * CREATE_TRACE_POINTS defined + */ +#define CREATE_TRACE_POINTS +#include "i40e_trace.h" + +static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter); +static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter); +static int i40evf_close(struct net_device *netdev); + +char i40evf_driver_name[] = "i40evf"; +static const char i40evf_driver_string[] = + "Intel(R) Ethernet Adaptive Virtual Function Network Driver"; + +#define DRV_KERN "-k" + +#define DRV_VERSION_MAJOR 3 +#define DRV_VERSION_MINOR 2 +#define DRV_VERSION_BUILD 3 +#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ + __stringify(DRV_VERSION_MINOR) "." \ + __stringify(DRV_VERSION_BUILD) \ + DRV_KERN +const char i40evf_driver_version[] = DRV_VERSION; +static const char i40evf_copyright[] = + "Copyright (c) 2013 - 2018 Intel Corporation."; + +/* i40evf_pci_tbl - PCI Device ID Table + * + * Wildcard entries (PCI_ANY_ID) should come last + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static const struct pci_device_id i40evf_pci_tbl[] = { + {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF_HV), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_ADAPTIVE_VF), 0}, + /* required last entry */ + {0, } +}; + +MODULE_DEVICE_TABLE(pci, i40evf_pci_tbl); + +MODULE_ALIAS("i40evf"); +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +static struct workqueue_struct *i40evf_wq; + +/** + * i40evf_allocate_dma_mem_d - OS specific memory alloc for shared code + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to fill out + * @size: size of memory requested + * @alignment: what to align the allocation to + **/ +i40e_status i40evf_allocate_dma_mem_d(struct i40e_hw *hw, + struct i40e_dma_mem *mem, + u64 size, u32 alignment) +{ + struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back; + + if (!mem) + return I40E_ERR_PARAM; + + mem->size = ALIGN(size, alignment); + mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, + (dma_addr_t *)&mem->pa, GFP_KERNEL); + if (mem->va) + return 0; + else + return I40E_ERR_NO_MEMORY; +} + +/** + * i40evf_free_dma_mem_d - OS specific memory free for shared code + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to free + **/ +i40e_status i40evf_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) +{ + struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back; + + if (!mem || !mem->va) + return I40E_ERR_PARAM; + dma_free_coherent(&adapter->pdev->dev, mem->size, + mem->va, (dma_addr_t)mem->pa); + return 0; +} + +/** + * i40evf_allocate_virt_mem_d - OS specific memory alloc for shared code + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to fill out + * @size: size of memory requested + **/ +i40e_status i40evf_allocate_virt_mem_d(struct i40e_hw *hw, + struct i40e_virt_mem *mem, u32 size) +{ + if (!mem) + return I40E_ERR_PARAM; + + mem->size = size; + mem->va = kzalloc(size, GFP_KERNEL); + + if (mem->va) + return 0; + else + return I40E_ERR_NO_MEMORY; +} + +/** + * i40evf_free_virt_mem_d - OS specific memory free for shared code + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to free + **/ +i40e_status i40evf_free_virt_mem_d(struct i40e_hw *hw, + struct i40e_virt_mem *mem) +{ + if (!mem) + return I40E_ERR_PARAM; + + /* it's ok to kfree a NULL pointer */ + kfree(mem->va); + + return 0; +} + +/** + * i40evf_debug_d - OS dependent version of debug printing + * @hw: pointer to the HW structure + * @mask: debug level mask + * @fmt_str: printf-type format description + **/ +void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...) +{ + char buf[512]; + va_list argptr; + + if (!(mask & ((struct i40e_hw *)hw)->debug_mask)) + return; + + va_start(argptr, fmt_str); + vsnprintf(buf, sizeof(buf), fmt_str, argptr); + va_end(argptr); + + /* the debug string is already formatted with a newline */ + pr_info("%s", buf); +} + +/** + * i40evf_schedule_reset - Set the flags and schedule a reset event + * @adapter: board private structure + **/ +void i40evf_schedule_reset(struct i40evf_adapter *adapter) +{ + if (!(adapter->flags & + (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED))) { + adapter->flags |= I40EVF_FLAG_RESET_NEEDED; + schedule_work(&adapter->reset_task); + } +} + +/** + * i40evf_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void i40evf_tx_timeout(struct net_device *netdev) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + adapter->tx_timeout_count++; + i40evf_schedule_reset(adapter); +} + +/** + * i40evf_misc_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter) +{ + struct i40e_hw *hw = &adapter->hw; + + if (!adapter->msix_entries) + return; + + wr32(hw, I40E_VFINT_DYN_CTL01, 0); + + /* read flush */ + rd32(hw, I40E_VFGEN_RSTAT); + + synchronize_irq(adapter->msix_entries[0].vector); +} + +/** + * i40evf_misc_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter) +{ + struct i40e_hw *hw = &adapter->hw; + + wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK | + I40E_VFINT_DYN_CTL01_ITR_INDX_MASK); + wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK); + + /* read flush */ + rd32(hw, I40E_VFGEN_RSTAT); +} + +/** + * i40evf_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static void i40evf_irq_disable(struct i40evf_adapter *adapter) +{ + int i; + struct i40e_hw *hw = &adapter->hw; + + if (!adapter->msix_entries) + return; + + for (i = 1; i < adapter->num_msix_vectors; i++) { + wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0); + synchronize_irq(adapter->msix_entries[i].vector); + } + /* read flush */ + rd32(hw, I40E_VFGEN_RSTAT); +} + +/** + * i40evf_irq_enable_queues - Enable interrupt for specified queues + * @adapter: board private structure + * @mask: bitmap of queues to enable + **/ +void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask) +{ + struct i40e_hw *hw = &adapter->hw; + int i; + + for (i = 1; i < adapter->num_msix_vectors; i++) { + if (mask & BIT(i - 1)) { + wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), + I40E_VFINT_DYN_CTLN1_INTENA_MASK | + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK); + } + } +} + +/** + * i40evf_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + * @flush: boolean value whether to run rd32() + **/ +void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush) +{ + struct i40e_hw *hw = &adapter->hw; + + i40evf_misc_irq_enable(adapter); + i40evf_irq_enable_queues(adapter, ~0); + + if (flush) + rd32(hw, I40E_VFGEN_RSTAT); +} + +/** + * i40evf_msix_aq - Interrupt handler for vector 0 + * @irq: interrupt number + * @data: pointer to netdev + **/ +static irqreturn_t i40evf_msix_aq(int irq, void *data) +{ + struct net_device *netdev = data; + struct i40evf_adapter *adapter = netdev_priv(netdev); + struct i40e_hw *hw = &adapter->hw; + + /* handle non-queue interrupts, these reads clear the registers */ + rd32(hw, I40E_VFINT_ICR01); + rd32(hw, I40E_VFINT_ICR0_ENA1); + + /* schedule work on the private workqueue */ + schedule_work(&adapter->adminq_task); + + return IRQ_HANDLED; +} + +/** + * i40evf_msix_clean_rings - MSIX mode Interrupt Handler + * @irq: interrupt number + * @data: pointer to a q_vector + **/ +static irqreturn_t i40evf_msix_clean_rings(int irq, void *data) +{ + struct i40e_q_vector *q_vector = data; + + if (!q_vector->tx.ring && !q_vector->rx.ring) + return IRQ_HANDLED; + + napi_schedule_irqoff(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * i40evf_map_vector_to_rxq - associate irqs with rx queues + * @adapter: board private structure + * @v_idx: interrupt number + * @r_idx: queue number + **/ +static void +i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx) +{ + struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx]; + struct i40e_ring *rx_ring = &adapter->rx_rings[r_idx]; + struct i40e_hw *hw = &adapter->hw; + + rx_ring->q_vector = q_vector; + rx_ring->next = q_vector->rx.ring; + rx_ring->vsi = &adapter->vsi; + q_vector->rx.ring = rx_ring; + q_vector->rx.count++; + q_vector->rx.next_update = jiffies + 1; + q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); + q_vector->ring_mask |= BIT(r_idx); + wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, q_vector->reg_idx), + q_vector->rx.current_itr); + q_vector->rx.current_itr = q_vector->rx.target_itr; +} + +/** + * i40evf_map_vector_to_txq - associate irqs with tx queues + * @adapter: board private structure + * @v_idx: interrupt number + * @t_idx: queue number + **/ +static void +i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx) +{ + struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx]; + struct i40e_ring *tx_ring = &adapter->tx_rings[t_idx]; + struct i40e_hw *hw = &adapter->hw; + + tx_ring->q_vector = q_vector; + tx_ring->next = q_vector->tx.ring; + tx_ring->vsi = &adapter->vsi; + q_vector->tx.ring = tx_ring; + q_vector->tx.count++; + q_vector->tx.next_update = jiffies + 1; + q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); + q_vector->num_ringpairs++; + wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, q_vector->reg_idx), + q_vector->tx.target_itr); + q_vector->tx.current_itr = q_vector->tx.target_itr; +} + +/** + * i40evf_map_rings_to_vectors - Maps descriptor rings to vectors + * @adapter: board private structure to initialize + * + * This function maps descriptor rings to the queue-specific vectors + * we were allotted through the MSI-X enabling code. Ideally, we'd have + * one vector per ring/queue, but on a constrained vector budget, we + * group the rings as "efficiently" as possible. You would add new + * mapping configurations in here. + **/ +static void i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter) +{ + int rings_remaining = adapter->num_active_queues; + int ridx = 0, vidx = 0; + int q_vectors; + + q_vectors = adapter->num_msix_vectors - NONQ_VECS; + + for (; ridx < rings_remaining; ridx++) { + i40evf_map_vector_to_rxq(adapter, vidx, ridx); + i40evf_map_vector_to_txq(adapter, vidx, ridx); + + /* In the case where we have more queues than vectors, continue + * round-robin on vectors until all queues are mapped. + */ + if (++vidx >= q_vectors) + vidx = 0; + } + + adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS; +} + +/** + * i40evf_irq_affinity_notify - Callback for affinity changes + * @notify: context as to what irq was changed + * @mask: the new affinity mask + * + * This is a callback function used by the irq_set_affinity_notifier function + * so that we may register to receive changes to the irq affinity masks. + **/ +static void i40evf_irq_affinity_notify(struct irq_affinity_notify *notify, + const cpumask_t *mask) +{ + struct i40e_q_vector *q_vector = + container_of(notify, struct i40e_q_vector, affinity_notify); + + cpumask_copy(&q_vector->affinity_mask, mask); +} + +/** + * i40evf_irq_affinity_release - Callback for affinity notifier release + * @ref: internal core kernel usage + * + * This is a callback function used by the irq_set_affinity_notifier function + * to inform the current notification subscriber that they will no longer + * receive notifications. + **/ +static void i40evf_irq_affinity_release(struct kref *ref) {} + +/** + * i40evf_request_traffic_irqs - Initialize MSI-X interrupts + * @adapter: board private structure + * @basename: device basename + * + * Allocates MSI-X vectors for tx and rx handling, and requests + * interrupts from the kernel. + **/ +static int +i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename) +{ + unsigned int vector, q_vectors; + unsigned int rx_int_idx = 0, tx_int_idx = 0; + int irq_num, err; + int cpu; + + i40evf_irq_disable(adapter); + /* Decrement for Other and TCP Timer vectors */ + q_vectors = adapter->num_msix_vectors - NONQ_VECS; + + for (vector = 0; vector < q_vectors; vector++) { + struct i40e_q_vector *q_vector = &adapter->q_vectors[vector]; + irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; + + if (q_vector->tx.ring && q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name), + "i40evf-%s-TxRx-%d", basename, rx_int_idx++); + tx_int_idx++; + } else if (q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name), + "i40evf-%s-rx-%d", basename, rx_int_idx++); + } else if (q_vector->tx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name), + "i40evf-%s-tx-%d", basename, tx_int_idx++); + } else { + /* skip this unused q_vector */ + continue; + } + err = request_irq(irq_num, + i40evf_msix_clean_rings, + 0, + q_vector->name, + q_vector); + if (err) { + dev_info(&adapter->pdev->dev, + "Request_irq failed, error: %d\n", err); + goto free_queue_irqs; + } + /* register for affinity change notifications */ + q_vector->affinity_notify.notify = i40evf_irq_affinity_notify; + q_vector->affinity_notify.release = + i40evf_irq_affinity_release; + irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); + /* Spread the IRQ affinity hints across online CPUs. Note that + * get_cpu_mask returns a mask with a permanent lifetime so + * it's safe to use as a hint for irq_set_affinity_hint. + */ + cpu = cpumask_local_spread(q_vector->v_idx, -1); + irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); + } + + return 0; + +free_queue_irqs: + while (vector) { + vector--; + irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; + irq_set_affinity_notifier(irq_num, NULL); + irq_set_affinity_hint(irq_num, NULL); + free_irq(irq_num, &adapter->q_vectors[vector]); + } + return err; +} + +/** + * i40evf_request_misc_irq - Initialize MSI-X interrupts + * @adapter: board private structure + * + * Allocates MSI-X vector 0 and requests interrupts from the kernel. This + * vector is only for the admin queue, and stays active even when the netdev + * is closed. + **/ +static int i40evf_request_misc_irq(struct i40evf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + + snprintf(adapter->misc_vector_name, + sizeof(adapter->misc_vector_name) - 1, "i40evf-%s:mbx", + dev_name(&adapter->pdev->dev)); + err = request_irq(adapter->msix_entries[0].vector, + &i40evf_msix_aq, 0, + adapter->misc_vector_name, netdev); + if (err) { + dev_err(&adapter->pdev->dev, + "request_irq for %s failed: %d\n", + adapter->misc_vector_name, err); + free_irq(adapter->msix_entries[0].vector, netdev); + } + return err; +} + +/** + * i40evf_free_traffic_irqs - Free MSI-X interrupts + * @adapter: board private structure + * + * Frees all MSI-X vectors other than 0. + **/ +static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter) +{ + int vector, irq_num, q_vectors; + + if (!adapter->msix_entries) + return; + + q_vectors = adapter->num_msix_vectors - NONQ_VECS; + + for (vector = 0; vector < q_vectors; vector++) { + irq_num = adapter->msix_entries[vector + NONQ_VECS].vector; + irq_set_affinity_notifier(irq_num, NULL); + irq_set_affinity_hint(irq_num, NULL); + free_irq(irq_num, &adapter->q_vectors[vector]); + } +} + +/** + * i40evf_free_misc_irq - Free MSI-X miscellaneous vector + * @adapter: board private structure + * + * Frees MSI-X vector 0. + **/ +static void i40evf_free_misc_irq(struct i40evf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + if (!adapter->msix_entries) + return; + + free_irq(adapter->msix_entries[0].vector, netdev); +} + +/** + * i40evf_configure_tx - Configure Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void i40evf_configure_tx(struct i40evf_adapter *adapter) +{ + struct i40e_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < adapter->num_active_queues; i++) + adapter->tx_rings[i].tail = hw->hw_addr + I40E_QTX_TAIL1(i); +} + +/** + * i40evf_configure_rx - Configure Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void i40evf_configure_rx(struct i40evf_adapter *adapter) +{ + unsigned int rx_buf_len = I40E_RXBUFFER_2048; + struct i40e_hw *hw = &adapter->hw; + int i; + + /* Legacy Rx will always default to a 2048 buffer size. */ +#if (PAGE_SIZE < 8192) + if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX)) { + struct net_device *netdev = adapter->netdev; + + /* For jumbo frames on systems with 4K pages we have to use + * an order 1 page, so we might as well increase the size + * of our Rx buffer to make better use of the available space + */ + rx_buf_len = I40E_RXBUFFER_3072; + + /* We use a 1536 buffer size for configurations with + * standard Ethernet mtu. On x86 this gives us enough room + * for shared info and 192 bytes of padding. + */ + if (!I40E_2K_TOO_SMALL_WITH_PADDING && + (netdev->mtu <= ETH_DATA_LEN)) + rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN; + } +#endif + + for (i = 0; i < adapter->num_active_queues; i++) { + adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i); + adapter->rx_rings[i].rx_buf_len = rx_buf_len; + + if (adapter->flags & I40EVF_FLAG_LEGACY_RX) + clear_ring_build_skb_enabled(&adapter->rx_rings[i]); + else + set_ring_build_skb_enabled(&adapter->rx_rings[i]); + } +} + +/** + * i40evf_find_vlan - Search filter list for specific vlan filter + * @adapter: board private structure + * @vlan: vlan tag + * + * Returns ptr to the filter object or NULL. Must be called while holding the + * mac_vlan_list_lock. + **/ +static struct +i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan) +{ + struct i40evf_vlan_filter *f; + + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + if (vlan == f->vlan) + return f; + } + return NULL; +} + +/** + * i40evf_add_vlan - Add a vlan filter to the list + * @adapter: board private structure + * @vlan: VLAN tag + * + * Returns ptr to the filter object or NULL when no memory available. + **/ +static struct +i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan) +{ + struct i40evf_vlan_filter *f = NULL; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + f = i40evf_find_vlan(adapter, vlan); + if (!f) { + f = kzalloc(sizeof(*f), GFP_KERNEL); + if (!f) + goto clearout; + + f->vlan = vlan; + + INIT_LIST_HEAD(&f->list); + list_add(&f->list, &adapter->vlan_filter_list); + f->add = true; + adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; + } + +clearout: + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return f; +} + +/** + * i40evf_del_vlan - Remove a vlan filter from the list + * @adapter: board private structure + * @vlan: VLAN tag + **/ +static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan) +{ + struct i40evf_vlan_filter *f; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + f = i40evf_find_vlan(adapter, vlan); + if (f) { + f->remove = true; + adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; + } + + spin_unlock_bh(&adapter->mac_vlan_list_lock); +} + +/** + * i40evf_vlan_rx_add_vid - Add a VLAN filter to a device + * @netdev: network device struct + * @proto: unused protocol data + * @vid: VLAN tag + **/ +static int i40evf_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + if (!VLAN_ALLOWED(adapter)) + return -EIO; + if (i40evf_add_vlan(adapter, vid) == NULL) + return -ENOMEM; + return 0; +} + +/** + * i40evf_vlan_rx_kill_vid - Remove a VLAN filter from a device + * @netdev: network device struct + * @proto: unused protocol data + * @vid: VLAN tag + **/ +static int i40evf_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + if (VLAN_ALLOWED(adapter)) { + i40evf_del_vlan(adapter, vid); + return 0; + } + return -EIO; +} + +/** + * i40evf_find_filter - Search filter list for specific mac filter + * @adapter: board private structure + * @macaddr: the MAC address + * + * Returns ptr to the filter object or NULL. Must be called while holding the + * mac_vlan_list_lock. + **/ +static struct +i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter, + const u8 *macaddr) +{ + struct i40evf_mac_filter *f; + + if (!macaddr) + return NULL; + + list_for_each_entry(f, &adapter->mac_filter_list, list) { + if (ether_addr_equal(macaddr, f->macaddr)) + return f; + } + return NULL; +} + +/** + * i40e_add_filter - Add a mac filter to the filter list + * @adapter: board private structure + * @macaddr: the MAC address + * + * Returns ptr to the filter object or NULL when no memory available. + **/ +static struct +i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter, + const u8 *macaddr) +{ + struct i40evf_mac_filter *f; + + if (!macaddr) + return NULL; + + f = i40evf_find_filter(adapter, macaddr); + if (!f) { + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + return f; + + ether_addr_copy(f->macaddr, macaddr); + + list_add_tail(&f->list, &adapter->mac_filter_list); + f->add = true; + adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; + } else { + f->remove = false; + } + + return f; +} + +/** + * i40evf_set_mac - NDO callback to set port mac address + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int i40evf_set_mac(struct net_device *netdev, void *p) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + struct i40e_hw *hw = &adapter->hw; + struct i40evf_mac_filter *f; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) + return 0; + + if (adapter->flags & I40EVF_FLAG_ADDR_SET_BY_PF) + return -EPERM; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + f = i40evf_find_filter(adapter, hw->mac.addr); + if (f) { + f->remove = true; + adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; + } + + f = i40evf_add_filter(adapter, addr->sa_data); + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + if (f) { + ether_addr_copy(hw->mac.addr, addr->sa_data); + ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); + } + + return (f == NULL) ? -ENOMEM : 0; +} + +/** + * i40evf_addr_sync - Callback for dev_(mc|uc)_sync to add address + * @netdev: the netdevice + * @addr: address to add + * + * Called by __dev_(mc|uc)_sync when an address needs to be added. We call + * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. + */ +static int i40evf_addr_sync(struct net_device *netdev, const u8 *addr) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + if (i40evf_add_filter(adapter, addr)) + return 0; + else + return -ENOMEM; +} + +/** + * i40evf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address + * @netdev: the netdevice + * @addr: address to add + * + * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call + * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. + */ +static int i40evf_addr_unsync(struct net_device *netdev, const u8 *addr) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + struct i40evf_mac_filter *f; + + /* Under some circumstances, we might receive a request to delete + * our own device address from our uc list. Because we store the + * device address in the VSI's MAC/VLAN filter list, we need to ignore + * such requests and not delete our device address from this list. + */ + if (ether_addr_equal(addr, netdev->dev_addr)) + return 0; + + f = i40evf_find_filter(adapter, addr); + if (f) { + f->remove = true; + adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; + } + return 0; +} + +/** + * i40evf_set_rx_mode - NDO callback to set the netdev filters + * @netdev: network interface device structure + **/ +static void i40evf_set_rx_mode(struct net_device *netdev) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + spin_lock_bh(&adapter->mac_vlan_list_lock); + __dev_uc_sync(netdev, i40evf_addr_sync, i40evf_addr_unsync); + __dev_mc_sync(netdev, i40evf_addr_sync, i40evf_addr_unsync); + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + if (netdev->flags & IFF_PROMISC && + !(adapter->flags & I40EVF_FLAG_PROMISC_ON)) + adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_PROMISC; + else if (!(netdev->flags & IFF_PROMISC) && + adapter->flags & I40EVF_FLAG_PROMISC_ON) + adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_PROMISC; + + if (netdev->flags & IFF_ALLMULTI && + !(adapter->flags & I40EVF_FLAG_ALLMULTI_ON)) + adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_ALLMULTI; + else if (!(netdev->flags & IFF_ALLMULTI) && + adapter->flags & I40EVF_FLAG_ALLMULTI_ON) + adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI; +} + +/** + * i40evf_napi_enable_all - enable NAPI on all queue vectors + * @adapter: board private structure + **/ +static void i40evf_napi_enable_all(struct i40evf_adapter *adapter) +{ + int q_idx; + struct i40e_q_vector *q_vector; + int q_vectors = adapter->num_msix_vectors - NONQ_VECS; + + for (q_idx = 0; q_idx < q_vectors; q_idx++) { + struct napi_struct *napi; + + q_vector = &adapter->q_vectors[q_idx]; + napi = &q_vector->napi; + napi_enable(napi); + } +} + +/** + * i40evf_napi_disable_all - disable NAPI on all queue vectors + * @adapter: board private structure + **/ +static void i40evf_napi_disable_all(struct i40evf_adapter *adapter) +{ + int q_idx; + struct i40e_q_vector *q_vector; + int q_vectors = adapter->num_msix_vectors - NONQ_VECS; + + for (q_idx = 0; q_idx < q_vectors; q_idx++) { + q_vector = &adapter->q_vectors[q_idx]; + napi_disable(&q_vector->napi); + } +} + +/** + * i40evf_configure - set up transmit and receive data structures + * @adapter: board private structure + **/ +static void i40evf_configure(struct i40evf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i; + + i40evf_set_rx_mode(netdev); + + i40evf_configure_tx(adapter); + i40evf_configure_rx(adapter); + adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES; + + for (i = 0; i < adapter->num_active_queues; i++) { + struct i40e_ring *ring = &adapter->rx_rings[i]; + + i40evf_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); + } +} + +/** + * i40evf_up_complete - Finish the last steps of bringing up a connection + * @adapter: board private structure + * + * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock. + **/ +static void i40evf_up_complete(struct i40evf_adapter *adapter) +{ + adapter->state = __I40EVF_RUNNING; + clear_bit(__I40E_VSI_DOWN, adapter->vsi.state); + + i40evf_napi_enable_all(adapter); + + adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES; + if (CLIENT_ENABLED(adapter)) + adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_OPEN; + mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); +} + +/** + * i40e_down - Shutdown the connection processing + * @adapter: board private structure + * + * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock. + **/ +void i40evf_down(struct i40evf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct i40evf_vlan_filter *vlf; + struct i40evf_mac_filter *f; + struct i40evf_cloud_filter *cf; + + if (adapter->state <= __I40EVF_DOWN_PENDING) + return; + + netif_carrier_off(netdev); + netif_tx_disable(netdev); + adapter->link_up = false; + i40evf_napi_disable_all(adapter); + i40evf_irq_disable(adapter); + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + /* clear the sync flag on all filters */ + __dev_uc_unsync(adapter->netdev, NULL); + __dev_mc_unsync(adapter->netdev, NULL); + + /* remove all MAC filters */ + list_for_each_entry(f, &adapter->mac_filter_list, list) { + f->remove = true; + } + + /* remove all VLAN filters */ + list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { + vlf->remove = true; + } + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + /* remove all cloud filters */ + spin_lock_bh(&adapter->cloud_filter_list_lock); + list_for_each_entry(cf, &adapter->cloud_filter_list, list) { + cf->del = true; + } + spin_unlock_bh(&adapter->cloud_filter_list_lock); + + if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) && + adapter->state != __I40EVF_RESETTING) { + /* cancel any current operation */ + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + /* Schedule operations to close down the HW. Don't wait + * here for this to complete. The watchdog is still running + * and it will take care of this. + */ + adapter->aq_required = I40EVF_FLAG_AQ_DEL_MAC_FILTER; + adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; + adapter->aq_required |= I40EVF_FLAG_AQ_DEL_CLOUD_FILTER; + adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES; + } + + mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); +} + +/** + * i40evf_acquire_msix_vectors - Setup the MSIX capability + * @adapter: board private structure + * @vectors: number of vectors to request + * + * Work with the OS to set up the MSIX vectors needed. + * + * Returns 0 on success, negative on failure + **/ +static int +i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors) +{ + int err, vector_threshold; + + /* We'll want at least 3 (vector_threshold): + * 0) Other (Admin Queue and link, mostly) + * 1) TxQ[0] Cleanup + * 2) RxQ[0] Cleanup + */ + vector_threshold = MIN_MSIX_COUNT; + + /* The more we get, the more we will assign to Tx/Rx Cleanup + * for the separate queues...where Rx Cleanup >= Tx Cleanup. + * Right now, we simply care about how many we'll get; we'll + * set them up later while requesting irq's. + */ + err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, + vector_threshold, vectors); + if (err < 0) { + dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n"); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + return err; + } + + /* Adjust for only the vectors we'll use, which is minimum + * of max_msix_q_vectors + NONQ_VECS, or the number of + * vectors we were allocated. + */ + adapter->num_msix_vectors = err; + return 0; +} + +/** + * i40evf_free_queues - Free memory for all rings + * @adapter: board private structure to initialize + * + * Free all of the memory associated with queue pairs. + **/ +static void i40evf_free_queues(struct i40evf_adapter *adapter) +{ + if (!adapter->vsi_res) + return; + adapter->num_active_queues = 0; + kfree(adapter->tx_rings); + adapter->tx_rings = NULL; + kfree(adapter->rx_rings); + adapter->rx_rings = NULL; +} + +/** + * i40evf_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + * We allocate one ring per queue at run-time since we don't know the + * number of queues at compile-time. The polling_netdev array is + * intended for Multiqueue, but should work fine with a single queue. + **/ +static int i40evf_alloc_queues(struct i40evf_adapter *adapter) +{ + int i, num_active_queues; + + /* If we're in reset reallocating queues we don't actually know yet for + * certain the PF gave us the number of queues we asked for but we'll + * assume it did. Once basic reset is finished we'll confirm once we + * start negotiating config with PF. + */ + if (adapter->num_req_queues) + num_active_queues = adapter->num_req_queues; + else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && + adapter->num_tc) + num_active_queues = adapter->ch_config.total_qps; + else + num_active_queues = min_t(int, + adapter->vsi_res->num_queue_pairs, + (int)(num_online_cpus())); + + + adapter->tx_rings = kcalloc(num_active_queues, + sizeof(struct i40e_ring), GFP_KERNEL); + if (!adapter->tx_rings) + goto err_out; + adapter->rx_rings = kcalloc(num_active_queues, + sizeof(struct i40e_ring), GFP_KERNEL); + if (!adapter->rx_rings) + goto err_out; + + for (i = 0; i < num_active_queues; i++) { + struct i40e_ring *tx_ring; + struct i40e_ring *rx_ring; + + tx_ring = &adapter->tx_rings[i]; + + tx_ring->queue_index = i; + tx_ring->netdev = adapter->netdev; + tx_ring->dev = &adapter->pdev->dev; + tx_ring->count = adapter->tx_desc_count; + tx_ring->itr_setting = I40E_ITR_TX_DEF; + if (adapter->flags & I40EVF_FLAG_WB_ON_ITR_CAPABLE) + tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR; + + rx_ring = &adapter->rx_rings[i]; + rx_ring->queue_index = i; + rx_ring->netdev = adapter->netdev; + rx_ring->dev = &adapter->pdev->dev; + rx_ring->count = adapter->rx_desc_count; + rx_ring->itr_setting = I40E_ITR_RX_DEF; + } + + adapter->num_active_queues = num_active_queues; + + return 0; + +err_out: + i40evf_free_queues(adapter); + return -ENOMEM; +} + +/** + * i40evf_set_interrupt_capability - set MSI-X or FAIL if not supported + * @adapter: board private structure to initialize + * + * Attempt to configure the interrupts using the best available + * capabilities of the hardware and the kernel. + **/ +static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter) +{ + int vector, v_budget; + int pairs = 0; + int err = 0; + + if (!adapter->vsi_res) { + err = -EIO; + goto out; + } + pairs = adapter->num_active_queues; + + /* It's easy to be greedy for MSI-X vectors, but it really doesn't do + * us much good if we have more vectors than CPUs. However, we already + * limit the total number of queues by the number of CPUs so we do not + * need any further limiting here. + */ + v_budget = min_t(int, pairs + NONQ_VECS, + (int)adapter->vf_res->max_vectors); + + adapter->msix_entries = kcalloc(v_budget, + sizeof(struct msix_entry), GFP_KERNEL); + if (!adapter->msix_entries) { + err = -ENOMEM; + goto out; + } + + for (vector = 0; vector < v_budget; vector++) + adapter->msix_entries[vector].entry = vector; + + err = i40evf_acquire_msix_vectors(adapter, v_budget); + +out: + netif_set_real_num_rx_queues(adapter->netdev, pairs); + netif_set_real_num_tx_queues(adapter->netdev, pairs); + return err; +} + +/** + * i40e_config_rss_aq - Configure RSS keys and lut by using AQ commands + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +static int i40evf_config_rss_aq(struct i40evf_adapter *adapter) +{ + struct i40e_aqc_get_set_rss_key_data *rss_key = + (struct i40e_aqc_get_set_rss_key_data *)adapter->rss_key; + struct i40e_hw *hw = &adapter->hw; + int ret = 0; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n", + adapter->current_op); + return -EBUSY; + } + + ret = i40evf_aq_set_rss_key(hw, adapter->vsi.id, rss_key); + if (ret) { + dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n", + i40evf_stat_str(hw, ret), + i40evf_aq_str(hw, hw->aq.asq_last_status)); + return ret; + + } + + ret = i40evf_aq_set_rss_lut(hw, adapter->vsi.id, false, + adapter->rss_lut, adapter->rss_lut_size); + if (ret) { + dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n", + i40evf_stat_str(hw, ret), + i40evf_aq_str(hw, hw->aq.asq_last_status)); + } + + return ret; + +} + +/** + * i40evf_config_rss_reg - Configure RSS keys and lut by writing registers + * @adapter: board private structure + * + * Returns 0 on success, negative on failure + **/ +static int i40evf_config_rss_reg(struct i40evf_adapter *adapter) +{ + struct i40e_hw *hw = &adapter->hw; + u32 *dw; + u16 i; + + dw = (u32 *)adapter->rss_key; + for (i = 0; i <= adapter->rss_key_size / 4; i++) + wr32(hw, I40E_VFQF_HKEY(i), dw[i]); + + dw = (u32 *)adapter->rss_lut; + for (i = 0; i <= adapter->rss_lut_size / 4; i++) + wr32(hw, I40E_VFQF_HLUT(i), dw[i]); + + i40e_flush(hw); + + return 0; +} + +/** + * i40evf_config_rss - Configure RSS keys and lut + * @adapter: board private structure + * + * Returns 0 on success, negative on failure + **/ +int i40evf_config_rss(struct i40evf_adapter *adapter) +{ + + if (RSS_PF(adapter)) { + adapter->aq_required |= I40EVF_FLAG_AQ_SET_RSS_LUT | + I40EVF_FLAG_AQ_SET_RSS_KEY; + return 0; + } else if (RSS_AQ(adapter)) { + return i40evf_config_rss_aq(adapter); + } else { + return i40evf_config_rss_reg(adapter); + } +} + +/** + * i40evf_fill_rss_lut - Fill the lut with default values + * @adapter: board private structure + **/ +static void i40evf_fill_rss_lut(struct i40evf_adapter *adapter) +{ + u16 i; + + for (i = 0; i < adapter->rss_lut_size; i++) + adapter->rss_lut[i] = i % adapter->num_active_queues; +} + +/** + * i40evf_init_rss - Prepare for RSS + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +static int i40evf_init_rss(struct i40evf_adapter *adapter) +{ + struct i40e_hw *hw = &adapter->hw; + int ret; + + if (!RSS_PF(adapter)) { + /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ + if (adapter->vf_res->vf_cap_flags & + VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) + adapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED; + else + adapter->hena = I40E_DEFAULT_RSS_HENA; + + wr32(hw, I40E_VFQF_HENA(0), (u32)adapter->hena); + wr32(hw, I40E_VFQF_HENA(1), (u32)(adapter->hena >> 32)); + } + + i40evf_fill_rss_lut(adapter); + + netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size); + ret = i40evf_config_rss(adapter); + + return ret; +} + +/** + * i40evf_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter) +{ + int q_idx = 0, num_q_vectors; + struct i40e_q_vector *q_vector; + + num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; + adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector), + GFP_KERNEL); + if (!adapter->q_vectors) + return -ENOMEM; + + for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { + q_vector = &adapter->q_vectors[q_idx]; + q_vector->adapter = adapter; + q_vector->vsi = &adapter->vsi; + q_vector->v_idx = q_idx; + q_vector->reg_idx = q_idx; + cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); + netif_napi_add(adapter->netdev, &q_vector->napi, + i40evf_napi_poll, NAPI_POLL_WEIGHT); + } + + return 0; +} + +/** + * i40evf_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void i40evf_free_q_vectors(struct i40evf_adapter *adapter) +{ + int q_idx, num_q_vectors; + int napi_vectors; + + if (!adapter->q_vectors) + return; + + num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; + napi_vectors = adapter->num_active_queues; + + for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { + struct i40e_q_vector *q_vector = &adapter->q_vectors[q_idx]; + if (q_idx < napi_vectors) + netif_napi_del(&q_vector->napi); + } + kfree(adapter->q_vectors); + adapter->q_vectors = NULL; +} + +/** + * i40evf_reset_interrupt_capability - Reset MSIX setup + * @adapter: board private structure + * + **/ +void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter) +{ + if (!adapter->msix_entries) + return; + + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; +} + +/** + * i40evf_init_interrupt_scheme - Determine if MSIX is supported and init + * @adapter: board private structure to initialize + * + **/ +int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter) +{ + int err; + + err = i40evf_alloc_queues(adapter); + if (err) { + dev_err(&adapter->pdev->dev, + "Unable to allocate memory for queues\n"); + goto err_alloc_queues; + } + + rtnl_lock(); + err = i40evf_set_interrupt_capability(adapter); + rtnl_unlock(); + if (err) { + dev_err(&adapter->pdev->dev, + "Unable to setup interrupt capabilities\n"); + goto err_set_interrupt; + } + + err = i40evf_alloc_q_vectors(adapter); + if (err) { + dev_err(&adapter->pdev->dev, + "Unable to allocate memory for queue vectors\n"); + goto err_alloc_q_vectors; + } + + /* If we've made it so far while ADq flag being ON, then we haven't + * bailed out anywhere in middle. And ADq isn't just enabled but actual + * resources have been allocated in the reset path. + * Now we can truly claim that ADq is enabled. + */ + if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && + adapter->num_tc) + dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created", + adapter->num_tc); + + dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u", + (adapter->num_active_queues > 1) ? "Enabled" : "Disabled", + adapter->num_active_queues); + + return 0; +err_alloc_q_vectors: + i40evf_reset_interrupt_capability(adapter); +err_set_interrupt: + i40evf_free_queues(adapter); +err_alloc_queues: + return err; +} + +/** + * i40evf_free_rss - Free memory used by RSS structs + * @adapter: board private structure + **/ +static void i40evf_free_rss(struct i40evf_adapter *adapter) +{ + kfree(adapter->rss_key); + adapter->rss_key = NULL; + + kfree(adapter->rss_lut); + adapter->rss_lut = NULL; +} + +/** + * i40evf_reinit_interrupt_scheme - Reallocate queues and vectors + * @adapter: board private structure + * + * Returns 0 on success, negative on failure + **/ +static int i40evf_reinit_interrupt_scheme(struct i40evf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + + if (netif_running(netdev)) + i40evf_free_traffic_irqs(adapter); + i40evf_free_misc_irq(adapter); + i40evf_reset_interrupt_capability(adapter); + i40evf_free_q_vectors(adapter); + i40evf_free_queues(adapter); + + err = i40evf_init_interrupt_scheme(adapter); + if (err) + goto err; + + netif_tx_stop_all_queues(netdev); + + err = i40evf_request_misc_irq(adapter); + if (err) + goto err; + + set_bit(__I40E_VSI_DOWN, adapter->vsi.state); + + i40evf_map_rings_to_vectors(adapter); + + if (RSS_AQ(adapter)) + adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS; + else + err = i40evf_init_rss(adapter); +err: + return err; +} + +/** + * i40evf_watchdog_timer - Periodic call-back timer + * @data: pointer to adapter disguised as unsigned long + **/ +static void i40evf_watchdog_timer(struct timer_list *t) +{ + struct i40evf_adapter *adapter = from_timer(adapter, t, + watchdog_timer); + + schedule_work(&adapter->watchdog_task); + /* timer will be rescheduled in watchdog task */ +} + +/** + * i40evf_watchdog_task - Periodic call-back task + * @work: pointer to work_struct + **/ +static void i40evf_watchdog_task(struct work_struct *work) +{ + struct i40evf_adapter *adapter = container_of(work, + struct i40evf_adapter, + watchdog_task); + struct i40e_hw *hw = &adapter->hw; + u32 reg_val; + + if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section)) + goto restart_watchdog; + + if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) { + reg_val = rd32(hw, I40E_VFGEN_RSTAT) & + I40E_VFGEN_RSTAT_VFR_STATE_MASK; + if ((reg_val == VIRTCHNL_VFR_VFACTIVE) || + (reg_val == VIRTCHNL_VFR_COMPLETED)) { + /* A chance for redemption! */ + dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n"); + adapter->state = __I40EVF_STARTUP; + adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED; + schedule_delayed_work(&adapter->init_task, 10); + clear_bit(__I40EVF_IN_CRITICAL_TASK, + &adapter->crit_section); + /* Don't reschedule the watchdog, since we've restarted + * the init task. When init_task contacts the PF and + * gets everything set up again, it'll restart the + * watchdog for us. Down, boy. Sit. Stay. Woof. + */ + return; + } + adapter->aq_required = 0; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + goto watchdog_done; + } + + if ((adapter->state < __I40EVF_DOWN) || + (adapter->flags & I40EVF_FLAG_RESET_PENDING)) + goto watchdog_done; + + /* check for reset */ + reg_val = rd32(hw, I40E_VF_ARQLEN1) & I40E_VF_ARQLEN1_ARQENABLE_MASK; + if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) && !reg_val) { + adapter->state = __I40EVF_RESETTING; + adapter->flags |= I40EVF_FLAG_RESET_PENDING; + dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); + schedule_work(&adapter->reset_task); + adapter->aq_required = 0; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + goto watchdog_done; + } + + /* Process admin queue tasks. After init, everything gets done + * here so we don't race on the admin queue. + */ + if (adapter->current_op) { + if (!i40evf_asq_done(hw)) { + dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n"); + i40evf_send_api_ver(adapter); + } + goto watchdog_done; + } + if (adapter->aq_required & I40EVF_FLAG_AQ_GET_CONFIG) { + i40evf_send_vf_config_msg(adapter); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) { + i40evf_disable_queues(adapter); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) { + i40evf_map_queues(adapter); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_MAC_FILTER) { + i40evf_add_ether_addrs(adapter); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_VLAN_FILTER) { + i40evf_add_vlans(adapter); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_MAC_FILTER) { + i40evf_del_ether_addrs(adapter); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_VLAN_FILTER) { + i40evf_del_vlans(adapter); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) { + i40evf_enable_vlan_stripping(adapter); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) { + i40evf_disable_vlan_stripping(adapter); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_QUEUES) { + i40evf_configure_queues(adapter); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_QUEUES) { + i40evf_enable_queues(adapter); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_RSS) { + /* This message goes straight to the firmware, not the + * PF, so we don't have to set current_op as we will + * not get a response through the ARQ. + */ + i40evf_init_rss(adapter); + adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS; + goto watchdog_done; + } + if (adapter->aq_required & I40EVF_FLAG_AQ_GET_HENA) { + i40evf_get_hena(adapter); + goto watchdog_done; + } + if (adapter->aq_required & I40EVF_FLAG_AQ_SET_HENA) { + i40evf_set_hena(adapter); + goto watchdog_done; + } + if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_KEY) { + i40evf_set_rss_key(adapter); + goto watchdog_done; + } + if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_LUT) { + i40evf_set_rss_lut(adapter); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_PROMISC) { + i40evf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC | + FLAG_VF_MULTICAST_PROMISC); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_ALLMULTI) { + i40evf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC); + goto watchdog_done; + } + + if ((adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_PROMISC) && + (adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_ALLMULTI)) { + i40evf_set_promiscuous(adapter, 0); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_CHANNELS) { + i40evf_enable_channels(adapter); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_CHANNELS) { + i40evf_disable_channels(adapter); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_CLOUD_FILTER) { + i40evf_add_cloud_filter(adapter); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_CLOUD_FILTER) { + i40evf_del_cloud_filter(adapter); + goto watchdog_done; + } + + schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5)); + + if (adapter->state == __I40EVF_RUNNING) + i40evf_request_stats(adapter); +watchdog_done: + if (adapter->state == __I40EVF_RUNNING) + i40evf_detect_recover_hung(&adapter->vsi); + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); +restart_watchdog: + if (adapter->state == __I40EVF_REMOVE) + return; + if (adapter->aq_required) + mod_timer(&adapter->watchdog_timer, + jiffies + msecs_to_jiffies(20)); + else + mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2)); + schedule_work(&adapter->adminq_task); +} + +static void i40evf_disable_vf(struct i40evf_adapter *adapter) +{ + struct i40evf_mac_filter *f, *ftmp; + struct i40evf_vlan_filter *fv, *fvtmp; + struct i40evf_cloud_filter *cf, *cftmp; + + adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; + + /* We don't use netif_running() because it may be true prior to + * ndo_open() returning, so we can't assume it means all our open + * tasks have finished, since we're not holding the rtnl_lock here. + */ + if (adapter->state == __I40EVF_RUNNING) { + set_bit(__I40E_VSI_DOWN, adapter->vsi.state); + netif_carrier_off(adapter->netdev); + netif_tx_disable(adapter->netdev); + adapter->link_up = false; + i40evf_napi_disable_all(adapter); + i40evf_irq_disable(adapter); + i40evf_free_traffic_irqs(adapter); + i40evf_free_all_tx_resources(adapter); + i40evf_free_all_rx_resources(adapter); + } + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + /* Delete all of the filters */ + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { + list_del(&f->list); + kfree(f); + } + + list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) { + list_del(&fv->list); + kfree(fv); + } + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + spin_lock_bh(&adapter->cloud_filter_list_lock); + list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { + list_del(&cf->list); + kfree(cf); + adapter->num_cloud_filters--; + } + spin_unlock_bh(&adapter->cloud_filter_list_lock); + + i40evf_free_misc_irq(adapter); + i40evf_reset_interrupt_capability(adapter); + i40evf_free_queues(adapter); + i40evf_free_q_vectors(adapter); + kfree(adapter->vf_res); + i40evf_shutdown_adminq(&adapter->hw); + adapter->netdev->flags &= ~IFF_UP; + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; + adapter->state = __I40EVF_DOWN; + wake_up(&adapter->down_waitqueue); + dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); +} + +#define I40EVF_RESET_WAIT_MS 10 +#define I40EVF_RESET_WAIT_COUNT 500 +/** + * i40evf_reset_task - Call-back task to handle hardware reset + * @work: pointer to work_struct + * + * During reset we need to shut down and reinitialize the admin queue + * before we can use it to communicate with the PF again. We also clear + * and reinit the rings because that context is lost as well. + **/ +static void i40evf_reset_task(struct work_struct *work) +{ + struct i40evf_adapter *adapter = container_of(work, + struct i40evf_adapter, + reset_task); + struct virtchnl_vf_resource *vfres = adapter->vf_res; + struct net_device *netdev = adapter->netdev; + struct i40e_hw *hw = &adapter->hw; + struct i40evf_vlan_filter *vlf; + struct i40evf_cloud_filter *cf; + struct i40evf_mac_filter *f; + u32 reg_val; + int i = 0, err; + bool running; + + /* When device is being removed it doesn't make sense to run the reset + * task, just return in such a case. + */ + if (test_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section)) + return; + + while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK, + &adapter->crit_section)) + usleep_range(500, 1000); + if (CLIENT_ENABLED(adapter)) { + adapter->flags &= ~(I40EVF_FLAG_CLIENT_NEEDS_OPEN | + I40EVF_FLAG_CLIENT_NEEDS_CLOSE | + I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS | + I40EVF_FLAG_SERVICE_CLIENT_REQUESTED); + cancel_delayed_work_sync(&adapter->client_task); + i40evf_notify_client_close(&adapter->vsi, true); + } + i40evf_misc_irq_disable(adapter); + if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) { + adapter->flags &= ~I40EVF_FLAG_RESET_NEEDED; + /* Restart the AQ here. If we have been reset but didn't + * detect it, or if the PF had to reinit, our AQ will be hosed. + */ + i40evf_shutdown_adminq(hw); + i40evf_init_adminq(hw); + i40evf_request_reset(adapter); + } + adapter->flags |= I40EVF_FLAG_RESET_PENDING; + + /* poll until we see the reset actually happen */ + for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { + reg_val = rd32(hw, I40E_VF_ARQLEN1) & + I40E_VF_ARQLEN1_ARQENABLE_MASK; + if (!reg_val) + break; + usleep_range(5000, 10000); + } + if (i == I40EVF_RESET_WAIT_COUNT) { + dev_info(&adapter->pdev->dev, "Never saw reset\n"); + goto continue_reset; /* act like the reset happened */ + } + + /* wait until the reset is complete and the PF is responding to us */ + for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { + /* sleep first to make sure a minimum wait time is met */ + msleep(I40EVF_RESET_WAIT_MS); + + reg_val = rd32(hw, I40E_VFGEN_RSTAT) & + I40E_VFGEN_RSTAT_VFR_STATE_MASK; + if (reg_val == VIRTCHNL_VFR_VFACTIVE) + break; + } + + pci_set_master(adapter->pdev); + + if (i == I40EVF_RESET_WAIT_COUNT) { + dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", + reg_val); + i40evf_disable_vf(adapter); + clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section); + return; /* Do not attempt to reinit. It's dead, Jim. */ + } + +continue_reset: + /* We don't use netif_running() because it may be true prior to + * ndo_open() returning, so we can't assume it means all our open + * tasks have finished, since we're not holding the rtnl_lock here. + */ + running = ((adapter->state == __I40EVF_RUNNING) || + (adapter->state == __I40EVF_RESETTING)); + + if (running) { + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + adapter->link_up = false; + i40evf_napi_disable_all(adapter); + } + i40evf_irq_disable(adapter); + + adapter->state = __I40EVF_RESETTING; + adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; + + /* free the Tx/Rx rings and descriptors, might be better to just + * re-use them sometime in the future + */ + i40evf_free_all_rx_resources(adapter); + i40evf_free_all_tx_resources(adapter); + + adapter->flags |= I40EVF_FLAG_QUEUES_DISABLED; + /* kill and reinit the admin queue */ + i40evf_shutdown_adminq(hw); + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + err = i40evf_init_adminq(hw); + if (err) + dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n", + err); + adapter->aq_required = 0; + + if (adapter->flags & I40EVF_FLAG_REINIT_ITR_NEEDED) { + err = i40evf_reinit_interrupt_scheme(adapter); + if (err) + goto reset_err; + } + + adapter->aq_required |= I40EVF_FLAG_AQ_GET_CONFIG; + adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + /* re-add all MAC filters */ + list_for_each_entry(f, &adapter->mac_filter_list, list) { + f->add = true; + } + /* re-add all VLAN filters */ + list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { + vlf->add = true; + } + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + /* check if TCs are running and re-add all cloud filters */ + spin_lock_bh(&adapter->cloud_filter_list_lock); + if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && + adapter->num_tc) { + list_for_each_entry(cf, &adapter->cloud_filter_list, list) { + cf->add = true; + } + } + spin_unlock_bh(&adapter->cloud_filter_list_lock); + + adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; + adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; + adapter->aq_required |= I40EVF_FLAG_AQ_ADD_CLOUD_FILTER; + i40evf_misc_irq_enable(adapter); + + mod_timer(&adapter->watchdog_timer, jiffies + 2); + + /* We were running when the reset started, so we need to restore some + * state here. + */ + if (running) { + /* allocate transmit descriptors */ + err = i40evf_setup_all_tx_resources(adapter); + if (err) + goto reset_err; + + /* allocate receive descriptors */ + err = i40evf_setup_all_rx_resources(adapter); + if (err) + goto reset_err; + + if (adapter->flags & I40EVF_FLAG_REINIT_ITR_NEEDED) { + err = i40evf_request_traffic_irqs(adapter, + netdev->name); + if (err) + goto reset_err; + + adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED; + } + + i40evf_configure(adapter); + + i40evf_up_complete(adapter); + + i40evf_irq_enable(adapter, true); + } else { + adapter->state = __I40EVF_DOWN; + wake_up(&adapter->down_waitqueue); + } + clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section); + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + + return; +reset_err: + clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section); + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); + i40evf_close(netdev); +} + +/** + * i40evf_adminq_task - worker thread to clean the admin queue + * @work: pointer to work_struct containing our data + **/ +static void i40evf_adminq_task(struct work_struct *work) +{ + struct i40evf_adapter *adapter = + container_of(work, struct i40evf_adapter, adminq_task); + struct i40e_hw *hw = &adapter->hw; + struct i40e_arq_event_info event; + enum virtchnl_ops v_op; + i40e_status ret, v_ret; + u32 val, oldval; + u16 pending; + + if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) + goto out; + + event.buf_len = I40EVF_MAX_AQ_BUF_SIZE; + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); + if (!event.msg_buf) + goto out; + + do { + ret = i40evf_clean_arq_element(hw, &event, &pending); + v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); + v_ret = (i40e_status)le32_to_cpu(event.desc.cookie_low); + + if (ret || !v_op) + break; /* No event to process or error cleaning ARQ */ + + i40evf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf, + event.msg_len); + if (pending != 0) + memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE); + } while (pending); + + if ((adapter->flags & + (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED)) || + adapter->state == __I40EVF_RESETTING) + goto freedom; + + /* check for error indications */ + val = rd32(hw, hw->aq.arq.len); + if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */ + goto freedom; + oldval = val; + if (val & I40E_VF_ARQLEN1_ARQVFE_MASK) { + dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n"); + val &= ~I40E_VF_ARQLEN1_ARQVFE_MASK; + } + if (val & I40E_VF_ARQLEN1_ARQOVFL_MASK) { + dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n"); + val &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK; + } + if (val & I40E_VF_ARQLEN1_ARQCRIT_MASK) { + dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n"); + val &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK; + } + if (oldval != val) + wr32(hw, hw->aq.arq.len, val); + + val = rd32(hw, hw->aq.asq.len); + oldval = val; + if (val & I40E_VF_ATQLEN1_ATQVFE_MASK) { + dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n"); + val &= ~I40E_VF_ATQLEN1_ATQVFE_MASK; + } + if (val & I40E_VF_ATQLEN1_ATQOVFL_MASK) { + dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n"); + val &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK; + } + if (val & I40E_VF_ATQLEN1_ATQCRIT_MASK) { + dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n"); + val &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK; + } + if (oldval != val) + wr32(hw, hw->aq.asq.len, val); + +freedom: + kfree(event.msg_buf); +out: + /* re-enable Admin queue interrupt cause */ + i40evf_misc_irq_enable(adapter); +} + +/** + * i40evf_client_task - worker thread to perform client work + * @work: pointer to work_struct containing our data + * + * This task handles client interactions. Because client calls can be + * reentrant, we can't handle them in the watchdog. + **/ +static void i40evf_client_task(struct work_struct *work) +{ + struct i40evf_adapter *adapter = + container_of(work, struct i40evf_adapter, client_task.work); + + /* If we can't get the client bit, just give up. We'll be rescheduled + * later. + */ + + if (test_and_set_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section)) + return; + + if (adapter->flags & I40EVF_FLAG_SERVICE_CLIENT_REQUESTED) { + i40evf_client_subtask(adapter); + adapter->flags &= ~I40EVF_FLAG_SERVICE_CLIENT_REQUESTED; + goto out; + } + if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) { + i40evf_notify_client_l2_params(&adapter->vsi); + adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS; + goto out; + } + if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_CLOSE) { + i40evf_notify_client_close(&adapter->vsi, false); + adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_CLOSE; + goto out; + } + if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_OPEN) { + i40evf_notify_client_open(&adapter->vsi); + adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_OPEN; + } +out: + clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section); +} + +/** + * i40evf_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter) +{ + int i; + + if (!adapter->tx_rings) + return; + + for (i = 0; i < adapter->num_active_queues; i++) + if (adapter->tx_rings[i].desc) + i40evf_free_tx_resources(&adapter->tx_rings[i]); +} + +/** + * i40evf_setup_all_tx_resources - allocate all queues Tx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_active_queues; i++) { + adapter->tx_rings[i].count = adapter->tx_desc_count; + err = i40evf_setup_tx_descriptors(&adapter->tx_rings[i]); + if (!err) + continue; + dev_err(&adapter->pdev->dev, + "Allocation for Tx Queue %u failed\n", i); + break; + } + + return err; +} + +/** + * i40evf_setup_all_rx_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_active_queues; i++) { + adapter->rx_rings[i].count = adapter->rx_desc_count; + err = i40evf_setup_rx_descriptors(&adapter->rx_rings[i]); + if (!err) + continue; + dev_err(&adapter->pdev->dev, + "Allocation for Rx Queue %u failed\n", i); + break; + } + return err; +} + +/** + * i40evf_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter) +{ + int i; + + if (!adapter->rx_rings) + return; + + for (i = 0; i < adapter->num_active_queues; i++) + if (adapter->rx_rings[i].desc) + i40evf_free_rx_resources(&adapter->rx_rings[i]); +} + +/** + * i40evf_validate_tx_bandwidth - validate the max Tx bandwidth + * @adapter: board private structure + * @max_tx_rate: max Tx bw for a tc + **/ +static int i40evf_validate_tx_bandwidth(struct i40evf_adapter *adapter, + u64 max_tx_rate) +{ + int speed = 0, ret = 0; + + switch (adapter->link_speed) { + case I40E_LINK_SPEED_40GB: + speed = 40000; + break; + case I40E_LINK_SPEED_25GB: + speed = 25000; + break; + case I40E_LINK_SPEED_20GB: + speed = 20000; + break; + case I40E_LINK_SPEED_10GB: + speed = 10000; + break; + case I40E_LINK_SPEED_1GB: + speed = 1000; + break; + case I40E_LINK_SPEED_100MB: + speed = 100; + break; + default: + break; + } + + if (max_tx_rate > speed) { + dev_err(&adapter->pdev->dev, + "Invalid tx rate specified\n"); + ret = -EINVAL; + } + + return ret; +} + +/** + * i40evf_validate_channel_config - validate queue mapping info + * @adapter: board private structure + * @mqprio_qopt: queue parameters + * + * This function validates if the config provided by the user to + * configure queue channels is valid or not. Returns 0 on a valid + * config. + **/ +static int i40evf_validate_ch_config(struct i40evf_adapter *adapter, + struct tc_mqprio_qopt_offload *mqprio_qopt) +{ + u64 total_max_rate = 0; + int i, num_qps = 0; + u64 tx_rate = 0; + int ret = 0; + + if (mqprio_qopt->qopt.num_tc > I40EVF_MAX_TRAFFIC_CLASS || + mqprio_qopt->qopt.num_tc < 1) + return -EINVAL; + + for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) { + if (!mqprio_qopt->qopt.count[i] || + mqprio_qopt->qopt.offset[i] != num_qps) + return -EINVAL; + if (mqprio_qopt->min_rate[i]) { + dev_err(&adapter->pdev->dev, + "Invalid min tx rate (greater than 0) specified\n"); + return -EINVAL; + } + /*convert to Mbps */ + tx_rate = div_u64(mqprio_qopt->max_rate[i], + I40EVF_MBPS_DIVISOR); + total_max_rate += tx_rate; + num_qps += mqprio_qopt->qopt.count[i]; + } + if (num_qps > I40EVF_MAX_REQ_QUEUES) + return -EINVAL; + + ret = i40evf_validate_tx_bandwidth(adapter, total_max_rate); + return ret; +} + +/** + * i40evf_del_all_cloud_filters - delete all cloud filters + * on the traffic classes + **/ +static void i40evf_del_all_cloud_filters(struct i40evf_adapter *adapter) +{ + struct i40evf_cloud_filter *cf, *cftmp; + + spin_lock_bh(&adapter->cloud_filter_list_lock); + list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, + list) { + list_del(&cf->list); + kfree(cf); + adapter->num_cloud_filters--; + } + spin_unlock_bh(&adapter->cloud_filter_list_lock); +} + +/** + * __i40evf_setup_tc - configure multiple traffic classes + * @netdev: network interface device structure + * @type_date: tc offload data + * + * This function processes the config information provided by the + * user to configure traffic classes/queue channels and packages the + * information to request the PF to setup traffic classes. + * + * Returns 0 on success. + **/ +static int __i40evf_setup_tc(struct net_device *netdev, void *type_data) +{ + struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; + struct i40evf_adapter *adapter = netdev_priv(netdev); + struct virtchnl_vf_resource *vfres = adapter->vf_res; + u8 num_tc = 0, total_qps = 0; + int ret = 0, netdev_tc = 0; + u64 max_tx_rate; + u16 mode; + int i; + + num_tc = mqprio_qopt->qopt.num_tc; + mode = mqprio_qopt->mode; + + /* delete queue_channel */ + if (!mqprio_qopt->qopt.hw) { + if (adapter->ch_config.state == __I40EVF_TC_RUNNING) { + /* reset the tc configuration */ + netdev_reset_tc(netdev); + adapter->num_tc = 0; + netif_tx_stop_all_queues(netdev); + netif_tx_disable(netdev); + i40evf_del_all_cloud_filters(adapter); + adapter->aq_required = I40EVF_FLAG_AQ_DISABLE_CHANNELS; + goto exit; + } else { + return -EINVAL; + } + } + + /* add queue channel */ + if (mode == TC_MQPRIO_MODE_CHANNEL) { + if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) { + dev_err(&adapter->pdev->dev, "ADq not supported\n"); + return -EOPNOTSUPP; + } + if (adapter->ch_config.state != __I40EVF_TC_INVALID) { + dev_err(&adapter->pdev->dev, "TC configuration already exists\n"); + return -EINVAL; + } + + ret = i40evf_validate_ch_config(adapter, mqprio_qopt); + if (ret) + return ret; + /* Return if same TC config is requested */ + if (adapter->num_tc == num_tc) + return 0; + adapter->num_tc = num_tc; + + for (i = 0; i < I40EVF_MAX_TRAFFIC_CLASS; i++) { + if (i < num_tc) { + adapter->ch_config.ch_info[i].count = + mqprio_qopt->qopt.count[i]; + adapter->ch_config.ch_info[i].offset = + mqprio_qopt->qopt.offset[i]; + total_qps += mqprio_qopt->qopt.count[i]; + max_tx_rate = mqprio_qopt->max_rate[i]; + /* convert to Mbps */ + max_tx_rate = div_u64(max_tx_rate, + I40EVF_MBPS_DIVISOR); + adapter->ch_config.ch_info[i].max_tx_rate = + max_tx_rate; + } else { + adapter->ch_config.ch_info[i].count = 1; + adapter->ch_config.ch_info[i].offset = 0; + } + } + adapter->ch_config.total_qps = total_qps; + netif_tx_stop_all_queues(netdev); + netif_tx_disable(netdev); + adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_CHANNELS; + netdev_reset_tc(netdev); + /* Report the tc mapping up the stack */ + netdev_set_num_tc(adapter->netdev, num_tc); + for (i = 0; i < I40EVF_MAX_TRAFFIC_CLASS; i++) { + u16 qcount = mqprio_qopt->qopt.count[i]; + u16 qoffset = mqprio_qopt->qopt.offset[i]; + + if (i < num_tc) + netdev_set_tc_queue(netdev, netdev_tc++, qcount, + qoffset); + } + } +exit: + return ret; +} + +/** + * i40evf_parse_cls_flower - Parse tc flower filters provided by kernel + * @adapter: board private structure + * @cls_flower: pointer to struct tc_cls_flower_offload + * @filter: pointer to cloud filter structure + */ +static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter, + struct tc_cls_flower_offload *f, + struct i40evf_cloud_filter *filter) +{ + u16 n_proto_mask = 0; + u16 n_proto_key = 0; + u8 field_flags = 0; + u16 addr_type = 0; + u16 n_proto = 0; + int i = 0; + struct virtchnl_filter *vf = &filter->f; + + if (f->dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_VLAN) | + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_PORTS) | + BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) { + dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n", + f->dissector->used_keys); + return -EOPNOTSUPP; + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { + struct flow_dissector_key_keyid *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ENC_KEYID, + f->mask); + + if (mask->keyid != 0) + field_flags |= I40EVF_CLOUD_FIELD_TEN_ID; + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_dissector_key_basic *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_BASIC, + f->key); + + struct flow_dissector_key_basic *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_BASIC, + f->mask); + n_proto_key = ntohs(key->n_proto); + n_proto_mask = ntohs(mask->n_proto); + + if (n_proto_key == ETH_P_ALL) { + n_proto_key = 0; + n_proto_mask = 0; + } + n_proto = n_proto_key & n_proto_mask; + if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) + return -EINVAL; + if (n_proto == ETH_P_IPV6) { + /* specify flow type as TCP IPv6 */ + vf->flow_type = VIRTCHNL_TCP_V6_FLOW; + } + + if (key->ip_proto != IPPROTO_TCP) { + dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n"); + return -EINVAL; + } + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_dissector_key_eth_addrs *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ETH_ADDRS, + f->key); + + struct flow_dissector_key_eth_addrs *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ETH_ADDRS, + f->mask); + /* use is_broadcast and is_zero to check for all 0xf or 0 */ + if (!is_zero_ether_addr(mask->dst)) { + if (is_broadcast_ether_addr(mask->dst)) { + field_flags |= I40EVF_CLOUD_FIELD_OMAC; + } else { + dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n", + mask->dst); + return I40E_ERR_CONFIG; + } + } + + if (!is_zero_ether_addr(mask->src)) { + if (is_broadcast_ether_addr(mask->src)) { + field_flags |= I40EVF_CLOUD_FIELD_IMAC; + } else { + dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n", + mask->src); + return I40E_ERR_CONFIG; + } + } + + if (!is_zero_ether_addr(key->dst)) + if (is_valid_ether_addr(key->dst) || + is_multicast_ether_addr(key->dst)) { + /* set the mask if a valid dst_mac address */ + for (i = 0; i < ETH_ALEN; i++) + vf->mask.tcp_spec.dst_mac[i] |= 0xff; + ether_addr_copy(vf->data.tcp_spec.dst_mac, + key->dst); + } + + if (!is_zero_ether_addr(key->src)) + if (is_valid_ether_addr(key->src) || + is_multicast_ether_addr(key->src)) { + /* set the mask if a valid dst_mac address */ + for (i = 0; i < ETH_ALEN; i++) + vf->mask.tcp_spec.src_mac[i] |= 0xff; + ether_addr_copy(vf->data.tcp_spec.src_mac, + key->src); + } + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_dissector_key_vlan *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_VLAN, + f->key); + struct flow_dissector_key_vlan *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_VLAN, + f->mask); + + if (mask->vlan_id) { + if (mask->vlan_id == VLAN_VID_MASK) { + field_flags |= I40EVF_CLOUD_FIELD_IVLAN; + } else { + dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n", + mask->vlan_id); + return I40E_ERR_CONFIG; + } + } + vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff); + vf->data.tcp_spec.vlan_id = cpu_to_be16(key->vlan_id); + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_dissector_key_control *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_CONTROL, + f->key); + + addr_type = key->addr_type; + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + struct flow_dissector_key_ipv4_addrs *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV4_ADDRS, + f->key); + struct flow_dissector_key_ipv4_addrs *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV4_ADDRS, + f->mask); + + if (mask->dst) { + if (mask->dst == cpu_to_be32(0xffffffff)) { + field_flags |= I40EVF_CLOUD_FIELD_IIP; + } else { + dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n", + be32_to_cpu(mask->dst)); + return I40E_ERR_CONFIG; + } + } + + if (mask->src) { + if (mask->src == cpu_to_be32(0xffffffff)) { + field_flags |= I40EVF_CLOUD_FIELD_IIP; + } else { + dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n", + be32_to_cpu(mask->dst)); + return I40E_ERR_CONFIG; + } + } + + if (field_flags & I40EVF_CLOUD_FIELD_TEN_ID) { + dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n"); + return I40E_ERR_CONFIG; + } + if (key->dst) { + vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff); + vf->data.tcp_spec.dst_ip[0] = key->dst; + } + if (key->src) { + vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff); + vf->data.tcp_spec.src_ip[0] = key->src; + } + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + struct flow_dissector_key_ipv6_addrs *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV6_ADDRS, + f->key); + struct flow_dissector_key_ipv6_addrs *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV6_ADDRS, + f->mask); + + /* validate mask, make sure it is not IPV6_ADDR_ANY */ + if (ipv6_addr_any(&mask->dst)) { + dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n", + IPV6_ADDR_ANY); + return I40E_ERR_CONFIG; + } + + /* src and dest IPv6 address should not be LOOPBACK + * (0:0:0:0:0:0:0:1) which can be represented as ::1 + */ + if (ipv6_addr_loopback(&key->dst) || + ipv6_addr_loopback(&key->src)) { + dev_err(&adapter->pdev->dev, + "ipv6 addr should not be loopback\n"); + return I40E_ERR_CONFIG; + } + if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src)) + field_flags |= I40EVF_CLOUD_FIELD_IIP; + + for (i = 0; i < 4; i++) + vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff); + memcpy(&vf->data.tcp_spec.dst_ip, &key->dst.s6_addr32, + sizeof(vf->data.tcp_spec.dst_ip)); + for (i = 0; i < 4; i++) + vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff); + memcpy(&vf->data.tcp_spec.src_ip, &key->src.s6_addr32, + sizeof(vf->data.tcp_spec.src_ip)); + } + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_dissector_key_ports *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_PORTS, + f->key); + struct flow_dissector_key_ports *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_PORTS, + f->mask); + + if (mask->src) { + if (mask->src == cpu_to_be16(0xffff)) { + field_flags |= I40EVF_CLOUD_FIELD_IIP; + } else { + dev_err(&adapter->pdev->dev, "Bad src port mask %u\n", + be16_to_cpu(mask->src)); + return I40E_ERR_CONFIG; + } + } + + if (mask->dst) { + if (mask->dst == cpu_to_be16(0xffff)) { + field_flags |= I40EVF_CLOUD_FIELD_IIP; + } else { + dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n", + be16_to_cpu(mask->dst)); + return I40E_ERR_CONFIG; + } + } + if (key->dst) { + vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff); + vf->data.tcp_spec.dst_port = key->dst; + } + + if (key->src) { + vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff); + vf->data.tcp_spec.src_port = key->src; + } + } + vf->field_flags = field_flags; + + return 0; +} + +/** + * i40evf_handle_tclass - Forward to a traffic class on the device + * @adapter: board private structure + * @tc: traffic class index on the device + * @filter: pointer to cloud filter structure + */ +static int i40evf_handle_tclass(struct i40evf_adapter *adapter, u32 tc, + struct i40evf_cloud_filter *filter) +{ + if (tc == 0) + return 0; + if (tc < adapter->num_tc) { + if (!filter->f.data.tcp_spec.dst_port) { + dev_err(&adapter->pdev->dev, + "Specify destination port to redirect to traffic class other than TC0\n"); + return -EINVAL; + } + } + /* redirect to a traffic class on the same device */ + filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT; + filter->f.action_meta = tc; + return 0; +} + +/** + * i40evf_configure_clsflower - Add tc flower filters + * @adapter: board private structure + * @cls_flower: Pointer to struct tc_cls_flower_offload + */ +static int i40evf_configure_clsflower(struct i40evf_adapter *adapter, + struct tc_cls_flower_offload *cls_flower) +{ + int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); + struct i40evf_cloud_filter *filter = NULL; + int err = -EINVAL, count = 50; + + if (tc < 0) { + dev_err(&adapter->pdev->dev, "Invalid traffic class\n"); + return -EINVAL; + } + + filter = kzalloc(sizeof(*filter), GFP_KERNEL); + if (!filter) + return -ENOMEM; + + while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + &adapter->crit_section)) { + if (--count == 0) + goto err; + udelay(1); + } + + filter->cookie = cls_flower->cookie; + + /* set the mask to all zeroes to begin with */ + memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec)); + /* start out with flow type and eth type IPv4 to begin with */ + filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW; + err = i40evf_parse_cls_flower(adapter, cls_flower, filter); + if (err < 0) + goto err; + + err = i40evf_handle_tclass(adapter, tc, filter); + if (err < 0) + goto err; + + /* add filter to the list */ + spin_lock_bh(&adapter->cloud_filter_list_lock); + list_add_tail(&filter->list, &adapter->cloud_filter_list); + adapter->num_cloud_filters++; + filter->add = true; + adapter->aq_required |= I40EVF_FLAG_AQ_ADD_CLOUD_FILTER; + spin_unlock_bh(&adapter->cloud_filter_list_lock); +err: + if (err) + kfree(filter); + + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + return err; +} + +/* i40evf_find_cf - Find the cloud filter in the list + * @adapter: Board private structure + * @cookie: filter specific cookie + * + * Returns ptr to the filter object or NULL. Must be called while holding the + * cloud_filter_list_lock. + */ +static struct i40evf_cloud_filter *i40evf_find_cf(struct i40evf_adapter *adapter, + unsigned long *cookie) +{ + struct i40evf_cloud_filter *filter = NULL; + + if (!cookie) + return NULL; + + list_for_each_entry(filter, &adapter->cloud_filter_list, list) { + if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie))) + return filter; + } + return NULL; +} + +/** + * i40evf_delete_clsflower - Remove tc flower filters + * @adapter: board private structure + * @cls_flower: Pointer to struct tc_cls_flower_offload + */ +static int i40evf_delete_clsflower(struct i40evf_adapter *adapter, + struct tc_cls_flower_offload *cls_flower) +{ + struct i40evf_cloud_filter *filter = NULL; + int err = 0; + + spin_lock_bh(&adapter->cloud_filter_list_lock); + filter = i40evf_find_cf(adapter, &cls_flower->cookie); + if (filter) { + filter->del = true; + adapter->aq_required |= I40EVF_FLAG_AQ_DEL_CLOUD_FILTER; + } else { + err = -EINVAL; + } + spin_unlock_bh(&adapter->cloud_filter_list_lock); + + return err; +} + +/** + * i40evf_setup_tc_cls_flower - flower classifier offloads + * @netdev: net device to configure + * @type_data: offload data + */ +static int i40evf_setup_tc_cls_flower(struct i40evf_adapter *adapter, + struct tc_cls_flower_offload *cls_flower) +{ + if (cls_flower->common.chain_index) + return -EOPNOTSUPP; + + switch (cls_flower->command) { + case TC_CLSFLOWER_REPLACE: + return i40evf_configure_clsflower(adapter, cls_flower); + case TC_CLSFLOWER_DESTROY: + return i40evf_delete_clsflower(adapter, cls_flower); + case TC_CLSFLOWER_STATS: + return -EOPNOTSUPP; + default: + return -EOPNOTSUPP; + } +} + +/** + * i40evf_setup_tc_block_cb - block callback for tc + * @type: type of offload + * @type_data: offload data + * @cb_priv: + * + * This function is the block callback for traffic classes + **/ +static int i40evf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + switch (type) { + case TC_SETUP_CLSFLOWER: + return i40evf_setup_tc_cls_flower(cb_priv, type_data); + default: + return -EOPNOTSUPP; + } +} + +/** + * i40evf_setup_tc_block - register callbacks for tc + * @netdev: network interface device structure + * @f: tc offload data + * + * This function registers block callbacks for tc + * offloads + **/ +static int i40evf_setup_tc_block(struct net_device *dev, + struct tc_block_offload *f) +{ + struct i40evf_adapter *adapter = netdev_priv(dev); + + if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + switch (f->command) { + case TC_BLOCK_BIND: + return tcf_block_cb_register(f->block, i40evf_setup_tc_block_cb, + adapter, adapter, f->extack); + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, i40evf_setup_tc_block_cb, + adapter); + return 0; + default: + return -EOPNOTSUPP; + } +} + +/** + * i40evf_setup_tc - configure multiple traffic classes + * @netdev: network interface device structure + * @type: type of offload + * @type_date: tc offload data + * + * This function is the callback to ndo_setup_tc in the + * netdev_ops. + * + * Returns 0 on success + **/ +static int i40evf_setup_tc(struct net_device *netdev, enum tc_setup_type type, + void *type_data) +{ + switch (type) { + case TC_SETUP_QDISC_MQPRIO: + return __i40evf_setup_tc(netdev, type_data); + case TC_SETUP_BLOCK: + return i40evf_setup_tc_block(netdev, type_data); + default: + return -EOPNOTSUPP; + } +} + +/** + * i40evf_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int i40evf_open(struct net_device *netdev) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + int err; + + if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) { + dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n"); + return -EIO; + } + + while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + &adapter->crit_section)) + usleep_range(500, 1000); + + if (adapter->state != __I40EVF_DOWN) { + err = -EBUSY; + goto err_unlock; + } + + /* allocate transmit descriptors */ + err = i40evf_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = i40evf_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + /* clear any pending interrupts, may auto mask */ + err = i40evf_request_traffic_irqs(adapter, netdev->name); + if (err) + goto err_req_irq; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + i40evf_add_filter(adapter, adapter->hw.mac.addr); + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + i40evf_configure(adapter); + + i40evf_up_complete(adapter); + + i40evf_irq_enable(adapter, true); + + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + + return 0; + +err_req_irq: + i40evf_down(adapter); + i40evf_free_traffic_irqs(adapter); +err_setup_rx: + i40evf_free_all_rx_resources(adapter); +err_setup_tx: + i40evf_free_all_tx_resources(adapter); +err_unlock: + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + + return err; +} + +/** + * i40evf_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. All IRQs except vector 0 (reserved for admin queue) + * are freed, along with all transmit and receive resources. + **/ +static int i40evf_close(struct net_device *netdev) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + int status; + + if (adapter->state <= __I40EVF_DOWN_PENDING) + return 0; + + while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + &adapter->crit_section)) + usleep_range(500, 1000); + + set_bit(__I40E_VSI_DOWN, adapter->vsi.state); + if (CLIENT_ENABLED(adapter)) + adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_CLOSE; + + i40evf_down(adapter); + adapter->state = __I40EVF_DOWN_PENDING; + i40evf_free_traffic_irqs(adapter); + + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + + /* We explicitly don't free resources here because the hardware is + * still active and can DMA into memory. Resources are cleared in + * i40evf_virtchnl_completion() after we get confirmation from the PF + * driver that the rings have been stopped. + * + * Also, we wait for state to transition to __I40EVF_DOWN before + * returning. State change occurs in i40evf_virtchnl_completion() after + * VF resources are released (which occurs after PF driver processes and + * responds to admin queue commands). + */ + + status = wait_event_timeout(adapter->down_waitqueue, + adapter->state == __I40EVF_DOWN, + msecs_to_jiffies(200)); + if (!status) + netdev_warn(netdev, "Device resources not yet released\n"); + return 0; +} + +/** + * i40evf_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int i40evf_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + netdev->mtu = new_mtu; + if (CLIENT_ENABLED(adapter)) { + i40evf_notify_client_l2_params(&adapter->vsi); + adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED; + } + adapter->flags |= I40EVF_FLAG_RESET_NEEDED; + schedule_work(&adapter->reset_task); + + return 0; +} + +/** + * i40e_set_features - set the netdev feature flags + * @netdev: ptr to the netdev being adjusted + * @features: the feature set that the stack is suggesting + * Note: expects to be called while under rtnl_lock() + **/ +static int i40evf_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + /* Don't allow changing VLAN_RX flag when adapter is not capable + * of VLAN offload + */ + if (!VLAN_ALLOWED(adapter)) { + if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) + return -EINVAL; + } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) + adapter->aq_required |= + I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; + else + adapter->aq_required |= + I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; + } + + return 0; +} + +/** + * i40evf_features_check - Validate encapsulated packet conforms to limits + * @skb: skb buff + * @dev: This physical port's netdev + * @features: Offload features that the stack believes apply + **/ +static netdev_features_t i40evf_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + size_t len; + + /* No point in doing any of this if neither checksum nor GSO are + * being requested for this frame. We can rule out both by just + * checking for CHECKSUM_PARTIAL + */ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return features; + + /* We cannot support GSO if the MSS is going to be less than + * 64 bytes. If it is then we need to drop support for GSO. + */ + if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) + features &= ~NETIF_F_GSO_MASK; + + /* MACLEN can support at most 63 words */ + len = skb_network_header(skb) - skb->data; + if (len & ~(63 * 2)) + goto out_err; + + /* IPLEN and EIPLEN can support at most 127 dwords */ + len = skb_transport_header(skb) - skb_network_header(skb); + if (len & ~(127 * 4)) + goto out_err; + + if (skb->encapsulation) { + /* L4TUNLEN can support 127 words */ + len = skb_inner_network_header(skb) - skb_transport_header(skb); + if (len & ~(127 * 2)) + goto out_err; + + /* IPLEN can support at most 127 dwords */ + len = skb_inner_transport_header(skb) - + skb_inner_network_header(skb); + if (len & ~(127 * 4)) + goto out_err; + } + + /* No need to validate L4LEN as TCP is the only protocol with a + * a flexible value and we support all possible values supported + * by TCP, which is at most 15 dwords + */ + + return features; +out_err: + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); +} + +/** + * i40evf_fix_features - fix up the netdev feature bits + * @netdev: our net device + * @features: desired feature bits + * + * Returns fixed-up features bits + **/ +static netdev_features_t i40evf_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + if (adapter->vf_res && + !(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) + features &= ~(NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER); + + return features; +} + +static const struct net_device_ops i40evf_netdev_ops = { + .ndo_open = i40evf_open, + .ndo_stop = i40evf_close, + .ndo_start_xmit = i40evf_xmit_frame, + .ndo_set_rx_mode = i40evf_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = i40evf_set_mac, + .ndo_change_mtu = i40evf_change_mtu, + .ndo_tx_timeout = i40evf_tx_timeout, + .ndo_vlan_rx_add_vid = i40evf_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid, + .ndo_features_check = i40evf_features_check, + .ndo_fix_features = i40evf_fix_features, + .ndo_set_features = i40evf_set_features, + .ndo_setup_tc = i40evf_setup_tc, +}; + +/** + * i40evf_check_reset_complete - check that VF reset is complete + * @hw: pointer to hw struct + * + * Returns 0 if device is ready to use, or -EBUSY if it's in reset. + **/ +static int i40evf_check_reset_complete(struct i40e_hw *hw) +{ + u32 rstat; + int i; + + for (i = 0; i < 100; i++) { + rstat = rd32(hw, I40E_VFGEN_RSTAT) & + I40E_VFGEN_RSTAT_VFR_STATE_MASK; + if ((rstat == VIRTCHNL_VFR_VFACTIVE) || + (rstat == VIRTCHNL_VFR_COMPLETED)) + return 0; + usleep_range(10, 20); + } + return -EBUSY; +} + +/** + * i40evf_process_config - Process the config information we got from the PF + * @adapter: board private structure + * + * Verify that we have a valid config struct, and set up our netdev features + * and our VSI struct. + **/ +int i40evf_process_config(struct i40evf_adapter *adapter) +{ + struct virtchnl_vf_resource *vfres = adapter->vf_res; + int i, num_req_queues = adapter->num_req_queues; + struct net_device *netdev = adapter->netdev; + struct i40e_vsi *vsi = &adapter->vsi; + netdev_features_t hw_enc_features; + netdev_features_t hw_features; + + /* got VF config message back from PF, now we can parse it */ + for (i = 0; i < vfres->num_vsis; i++) { + if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV) + adapter->vsi_res = &vfres->vsi_res[i]; + } + if (!adapter->vsi_res) { + dev_err(&adapter->pdev->dev, "No LAN VSI found\n"); + return -ENODEV; + } + + if (num_req_queues && + num_req_queues != adapter->vsi_res->num_queue_pairs) { + /* Problem. The PF gave us fewer queues than what we had + * negotiated in our request. Need a reset to see if we can't + * get back to a working state. + */ + dev_err(&adapter->pdev->dev, + "Requested %d queues, but PF only gave us %d.\n", + num_req_queues, + adapter->vsi_res->num_queue_pairs); + adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED; + adapter->num_req_queues = adapter->vsi_res->num_queue_pairs; + i40evf_schedule_reset(adapter); + return -ENODEV; + } + adapter->num_req_queues = 0; + + hw_enc_features = NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_HIGHDMA | + NETIF_F_SOFT_FEATURES | + NETIF_F_TSO | + NETIF_F_TSO_ECN | + NETIF_F_TSO6 | + NETIF_F_SCTP_CRC | + NETIF_F_RXHASH | + NETIF_F_RXCSUM | + 0; + + /* advertise to stack only if offloads for encapsulated packets is + * supported + */ + if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) { + hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM | + NETIF_F_GSO_IPXIP4 | + NETIF_F_GSO_IPXIP6 | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL | + 0; + + if (!(vfres->vf_cap_flags & + VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) + netdev->gso_partial_features |= + NETIF_F_GSO_UDP_TUNNEL_CSUM; + + netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; + netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; + netdev->hw_enc_features |= hw_enc_features; + } + /* record features VLANs can make use of */ + netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; + + /* Write features and hw_features separately to avoid polluting + * with, or dropping, features that are set when we registered. + */ + hw_features = hw_enc_features; + + /* Enable VLAN features if supported */ + if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) + hw_features |= (NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX); + /* Enable cloud filter if ADQ is supported */ + if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) + hw_features |= NETIF_F_HW_TC; + + netdev->hw_features |= hw_features; + + netdev->features |= hw_features; + + if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + + netdev->priv_flags |= IFF_UNICAST_FLT; + + /* Do not turn on offloads when they are requested to be turned off. + * TSO needs minimum 576 bytes to work correctly. + */ + if (netdev->wanted_features) { + if (!(netdev->wanted_features & NETIF_F_TSO) || + netdev->mtu < 576) + netdev->features &= ~NETIF_F_TSO; + if (!(netdev->wanted_features & NETIF_F_TSO6) || + netdev->mtu < 576) + netdev->features &= ~NETIF_F_TSO6; + if (!(netdev->wanted_features & NETIF_F_TSO_ECN)) + netdev->features &= ~NETIF_F_TSO_ECN; + if (!(netdev->wanted_features & NETIF_F_GRO)) + netdev->features &= ~NETIF_F_GRO; + if (!(netdev->wanted_features & NETIF_F_GSO)) + netdev->features &= ~NETIF_F_GSO; + } + + adapter->vsi.id = adapter->vsi_res->vsi_id; + + adapter->vsi.back = adapter; + adapter->vsi.base_vector = 1; + adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK; + vsi->netdev = adapter->netdev; + vsi->qs_handle = adapter->vsi_res->qset_handle; + if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { + adapter->rss_key_size = vfres->rss_key_size; + adapter->rss_lut_size = vfres->rss_lut_size; + } else { + adapter->rss_key_size = I40EVF_HKEY_ARRAY_SIZE; + adapter->rss_lut_size = I40EVF_HLUT_ARRAY_SIZE; + } + + return 0; +} + +/** + * i40evf_init_task - worker thread to perform delayed initialization + * @work: pointer to work_struct containing our data + * + * This task completes the work that was begun in probe. Due to the nature + * of VF-PF communications, we may need to wait tens of milliseconds to get + * responses back from the PF. Rather than busy-wait in probe and bog down the + * whole system, we'll do it in a task so we can sleep. + * This task only runs during driver init. Once we've established + * communications with the PF driver and set up our netdev, the watchdog + * takes over. + **/ +static void i40evf_init_task(struct work_struct *work) +{ + struct i40evf_adapter *adapter = container_of(work, + struct i40evf_adapter, + init_task.work); + struct net_device *netdev = adapter->netdev; + struct i40e_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + int err, bufsz; + + switch (adapter->state) { + case __I40EVF_STARTUP: + /* driver loaded, probe complete */ + adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED; + adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; + err = i40e_set_mac_type(hw); + if (err) { + dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", + err); + goto err; + } + err = i40evf_check_reset_complete(hw); + if (err) { + dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n", + err); + goto err; + } + hw->aq.num_arq_entries = I40EVF_AQ_LEN; + hw->aq.num_asq_entries = I40EVF_AQ_LEN; + hw->aq.arq_buf_size = I40EVF_MAX_AQ_BUF_SIZE; + hw->aq.asq_buf_size = I40EVF_MAX_AQ_BUF_SIZE; + + err = i40evf_init_adminq(hw); + if (err) { + dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", + err); + goto err; + } + err = i40evf_send_api_ver(adapter); + if (err) { + dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err); + i40evf_shutdown_adminq(hw); + goto err; + } + adapter->state = __I40EVF_INIT_VERSION_CHECK; + goto restart; + case __I40EVF_INIT_VERSION_CHECK: + if (!i40evf_asq_done(hw)) { + dev_err(&pdev->dev, "Admin queue command never completed\n"); + i40evf_shutdown_adminq(hw); + adapter->state = __I40EVF_STARTUP; + goto err; + } + + /* aq msg sent, awaiting reply */ + err = i40evf_verify_api_ver(adapter); + if (err) { + if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) + err = i40evf_send_api_ver(adapter); + else + dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n", + adapter->pf_version.major, + adapter->pf_version.minor, + VIRTCHNL_VERSION_MAJOR, + VIRTCHNL_VERSION_MINOR); + goto err; + } + err = i40evf_send_vf_config_msg(adapter); + if (err) { + dev_err(&pdev->dev, "Unable to send config request (%d)\n", + err); + goto err; + } + adapter->state = __I40EVF_INIT_GET_RESOURCES; + goto restart; + case __I40EVF_INIT_GET_RESOURCES: + /* aq msg sent, awaiting reply */ + if (!adapter->vf_res) { + bufsz = sizeof(struct virtchnl_vf_resource) + + (I40E_MAX_VF_VSI * + sizeof(struct virtchnl_vsi_resource)); + adapter->vf_res = kzalloc(bufsz, GFP_KERNEL); + if (!adapter->vf_res) + goto err; + } + err = i40evf_get_vf_config(adapter); + if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) { + err = i40evf_send_vf_config_msg(adapter); + goto err; + } else if (err == I40E_ERR_PARAM) { + /* We only get ERR_PARAM if the device is in a very bad + * state or if we've been disabled for previous bad + * behavior. Either way, we're done now. + */ + i40evf_shutdown_adminq(hw); + dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n"); + return; + } + if (err) { + dev_err(&pdev->dev, "Unable to get VF config (%d)\n", + err); + goto err_alloc; + } + adapter->state = __I40EVF_INIT_SW; + break; + default: + goto err_alloc; + } + + if (i40evf_process_config(adapter)) + goto err_alloc; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + + adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED; + + netdev->netdev_ops = &i40evf_netdev_ops; + i40evf_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + + /* MTU range: 68 - 9710 */ + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD; + + if (!is_valid_ether_addr(adapter->hw.mac.addr)) { + dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", + adapter->hw.mac.addr); + eth_hw_addr_random(netdev); + ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); + } else { + adapter->flags |= I40EVF_FLAG_ADDR_SET_BY_PF; + ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); + ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); + } + + timer_setup(&adapter->watchdog_timer, i40evf_watchdog_timer, 0); + mod_timer(&adapter->watchdog_timer, jiffies + 1); + + adapter->tx_desc_count = I40EVF_DEFAULT_TXD; + adapter->rx_desc_count = I40EVF_DEFAULT_RXD; + err = i40evf_init_interrupt_scheme(adapter); + if (err) + goto err_sw_init; + i40evf_map_rings_to_vectors(adapter); + if (adapter->vf_res->vf_cap_flags & + VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) + adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE; + + err = i40evf_request_misc_irq(adapter); + if (err) + goto err_sw_init; + + netif_carrier_off(netdev); + adapter->link_up = false; + + if (!adapter->netdev_registered) { + err = register_netdev(netdev); + if (err) + goto err_register; + } + + adapter->netdev_registered = true; + + netif_tx_stop_all_queues(netdev); + if (CLIENT_ALLOWED(adapter)) { + err = i40evf_lan_add_device(adapter); + if (err) + dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n", + err); + } + + dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr); + if (netdev->features & NETIF_F_GRO) + dev_info(&pdev->dev, "GRO is enabled\n"); + + adapter->state = __I40EVF_DOWN; + set_bit(__I40E_VSI_DOWN, adapter->vsi.state); + i40evf_misc_irq_enable(adapter); + wake_up(&adapter->down_waitqueue); + + adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL); + adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL); + if (!adapter->rss_key || !adapter->rss_lut) + goto err_mem; + + if (RSS_AQ(adapter)) { + adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS; + mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); + } else { + i40evf_init_rss(adapter); + } + return; +restart: + schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30)); + return; +err_mem: + i40evf_free_rss(adapter); +err_register: + i40evf_free_misc_irq(adapter); +err_sw_init: + i40evf_reset_interrupt_capability(adapter); +err_alloc: + kfree(adapter->vf_res); + adapter->vf_res = NULL; +err: + /* Things went into the weeds, so try again later */ + if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) { + dev_err(&pdev->dev, "Failed to communicate with PF; waiting before retry\n"); + adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; + i40evf_shutdown_adminq(hw); + adapter->state = __I40EVF_STARTUP; + schedule_delayed_work(&adapter->init_task, HZ * 5); + return; + } + schedule_delayed_work(&adapter->init_task, HZ); +} + +/** + * i40evf_shutdown - Shutdown the device in preparation for a reboot + * @pdev: pci device structure + **/ +static void i40evf_shutdown(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct i40evf_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (netif_running(netdev)) + i40evf_close(netdev); + + /* Prevent the watchdog from running. */ + adapter->state = __I40EVF_REMOVE; + adapter->aq_required = 0; + +#ifdef CONFIG_PM + pci_save_state(pdev); + +#endif + pci_disable_device(pdev); +} + +/** + * i40evf_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in i40evf_pci_tbl + * + * Returns 0 on success, negative on failure + * + * i40evf_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct i40evf_adapter *adapter = NULL; + struct i40e_hw *hw = NULL; + int err; + + err = pci_enable_device(pdev); + if (err) + return err; + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "DMA configuration failed: 0x%x\n", err); + goto err_dma; + } + } + + err = pci_request_regions(pdev, i40evf_driver_name); + if (err) { + dev_err(&pdev->dev, + "pci_request_regions failed 0x%x\n", err); + goto err_pci_reg; + } + + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + + netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter), + I40EVF_MAX_REQ_QUEUES); + if (!netdev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + + adapter->netdev = netdev; + adapter->pdev = pdev; + + hw = &adapter->hw; + hw->back = adapter; + + adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; + adapter->state = __I40EVF_STARTUP; + + /* Call save state here because it relies on the adapter struct. */ + pci_save_state(pdev); + + hw->hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!hw->hw_addr) { + err = -EIO; + goto err_ioremap; + } + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + hw->bus.device = PCI_SLOT(pdev->devfn); + hw->bus.func = PCI_FUNC(pdev->devfn); + hw->bus.bus_id = pdev->bus->number; + + /* set up the locks for the AQ, do this only once in probe + * and destroy them only once in remove + */ + mutex_init(&hw->aq.asq_mutex); + mutex_init(&hw->aq.arq_mutex); + + spin_lock_init(&adapter->mac_vlan_list_lock); + spin_lock_init(&adapter->cloud_filter_list_lock); + + INIT_LIST_HEAD(&adapter->mac_filter_list); + INIT_LIST_HEAD(&adapter->vlan_filter_list); + INIT_LIST_HEAD(&adapter->cloud_filter_list); + + INIT_WORK(&adapter->reset_task, i40evf_reset_task); + INIT_WORK(&adapter->adminq_task, i40evf_adminq_task); + INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task); + INIT_DELAYED_WORK(&adapter->client_task, i40evf_client_task); + INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task); + schedule_delayed_work(&adapter->init_task, + msecs_to_jiffies(5 * (pdev->devfn & 0x07))); + + /* Setup the wait queue for indicating transition to down status */ + init_waitqueue_head(&adapter->down_waitqueue); + + return 0; + +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_disable_pcie_error_reporting(pdev); + pci_release_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +#ifdef CONFIG_PM +/** + * i40evf_suspend - Power management suspend routine + * @pdev: PCI device information struct + * @state: unused + * + * Called when the system (VM) is entering sleep/suspend. + **/ +static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct i40evf_adapter *adapter = netdev_priv(netdev); + int retval = 0; + + netif_device_detach(netdev); + + while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + &adapter->crit_section)) + usleep_range(500, 1000); + + if (netif_running(netdev)) { + rtnl_lock(); + i40evf_down(adapter); + rtnl_unlock(); + } + i40evf_free_misc_irq(adapter); + i40evf_reset_interrupt_capability(adapter); + + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + + retval = pci_save_state(pdev); + if (retval) + return retval; + + pci_disable_device(pdev); + + return 0; +} + +/** + * i40evf_resume - Power management resume routine + * @pdev: PCI device information struct + * + * Called when the system (VM) is resumed from sleep/suspend. + **/ +static int i40evf_resume(struct pci_dev *pdev) +{ + struct i40evf_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + u32 err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + /* pci_restore_state clears dev->state_saved so call + * pci_save_state to restore it. + */ + pci_save_state(pdev); + + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, "Cannot enable PCI device from suspend.\n"); + return err; + } + pci_set_master(pdev); + + rtnl_lock(); + err = i40evf_set_interrupt_capability(adapter); + if (err) { + rtnl_unlock(); + dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n"); + return err; + } + err = i40evf_request_misc_irq(adapter); + rtnl_unlock(); + if (err) { + dev_err(&pdev->dev, "Cannot get interrupt vector.\n"); + return err; + } + + schedule_work(&adapter->reset_task); + + netif_device_attach(netdev); + + return err; +} + +#endif /* CONFIG_PM */ +/** + * i40evf_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * i40evf_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void i40evf_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct i40evf_adapter *adapter = netdev_priv(netdev); + struct i40evf_vlan_filter *vlf, *vlftmp; + struct i40evf_mac_filter *f, *ftmp; + struct i40evf_cloud_filter *cf, *cftmp; + struct i40e_hw *hw = &adapter->hw; + int err; + /* Indicate we are in remove and not to run reset_task */ + set_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section); + cancel_delayed_work_sync(&adapter->init_task); + cancel_work_sync(&adapter->reset_task); + cancel_delayed_work_sync(&adapter->client_task); + if (adapter->netdev_registered) { + unregister_netdev(netdev); + adapter->netdev_registered = false; + } + if (CLIENT_ALLOWED(adapter)) { + err = i40evf_lan_del_device(adapter); + if (err) + dev_warn(&pdev->dev, "Failed to delete client device: %d\n", + err); + } + + /* Shut down all the garbage mashers on the detention level */ + adapter->state = __I40EVF_REMOVE; + adapter->aq_required = 0; + adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED; + i40evf_request_reset(adapter); + msleep(50); + /* If the FW isn't responding, kick it once, but only once. */ + if (!i40evf_asq_done(hw)) { + i40evf_request_reset(adapter); + msleep(50); + } + i40evf_free_all_tx_resources(adapter); + i40evf_free_all_rx_resources(adapter); + i40evf_misc_irq_disable(adapter); + i40evf_free_misc_irq(adapter); + i40evf_reset_interrupt_capability(adapter); + i40evf_free_q_vectors(adapter); + + if (adapter->watchdog_timer.function) + del_timer_sync(&adapter->watchdog_timer); + + cancel_work_sync(&adapter->adminq_task); + + i40evf_free_rss(adapter); + + if (hw->aq.asq.count) + i40evf_shutdown_adminq(hw); + + /* destroy the locks only once, here */ + mutex_destroy(&hw->aq.arq_mutex); + mutex_destroy(&hw->aq.asq_mutex); + + iounmap(hw->hw_addr); + pci_release_regions(pdev); + i40evf_free_all_tx_resources(adapter); + i40evf_free_all_rx_resources(adapter); + i40evf_free_queues(adapter); + kfree(adapter->vf_res); + spin_lock_bh(&adapter->mac_vlan_list_lock); + /* If we got removed before an up/down sequence, we've got a filter + * hanging out there that we need to get rid of. + */ + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { + list_del(&f->list); + kfree(f); + } + list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list, + list) { + list_del(&vlf->list); + kfree(vlf); + } + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + spin_lock_bh(&adapter->cloud_filter_list_lock); + list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { + list_del(&cf->list); + kfree(cf); + } + spin_unlock_bh(&adapter->cloud_filter_list_lock); + + free_netdev(netdev); + + pci_disable_pcie_error_reporting(pdev); + + pci_disable_device(pdev); +} + +static struct pci_driver i40evf_driver = { + .name = i40evf_driver_name, + .id_table = i40evf_pci_tbl, + .probe = i40evf_probe, + .remove = i40evf_remove, +#ifdef CONFIG_PM + .suspend = i40evf_suspend, + .resume = i40evf_resume, +#endif + .shutdown = i40evf_shutdown, +}; + +/** + * i40e_init_module - Driver Registration Routine + * + * i40e_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init i40evf_init_module(void) +{ + int ret; + + pr_info("i40evf: %s - version %s\n", i40evf_driver_string, + i40evf_driver_version); + + pr_info("%s\n", i40evf_copyright); + + i40evf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, + i40evf_driver_name); + if (!i40evf_wq) { + pr_err("%s: Failed to create workqueue\n", i40evf_driver_name); + return -ENOMEM; + } + ret = pci_register_driver(&i40evf_driver); + return ret; +} + +module_init(i40evf_init_module); + +/** + * i40e_exit_module - Driver Exit Cleanup Routine + * + * i40e_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit i40evf_exit_module(void) +{ + pci_unregister_driver(&i40evf_driver); + destroy_workqueue(i40evf_wq); +} + +module_exit(i40evf_exit_module); + +/* i40evf_main.c */ diff --git a/drivers/net/ethernet/intel/iavf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/iavf/i40evf_virtchnl.c new file mode 100644 index 000000000000..6579dabab78c --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/i40evf_virtchnl.c @@ -0,0 +1,1465 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#include "i40evf.h" +#include "i40e_prototype.h" +#include "i40evf_client.h" + +/* busy wait delay in msec */ +#define I40EVF_BUSY_WAIT_DELAY 10 +#define I40EVF_BUSY_WAIT_COUNT 50 + +/** + * i40evf_send_pf_msg + * @adapter: adapter structure + * @op: virtual channel opcode + * @msg: pointer to message buffer + * @len: message length + * + * Send message to PF and print status if failure. + **/ +static int i40evf_send_pf_msg(struct i40evf_adapter *adapter, + enum virtchnl_ops op, u8 *msg, u16 len) +{ + struct i40e_hw *hw = &adapter->hw; + i40e_status err; + + if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) + return 0; /* nothing to see here, move along */ + + err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL); + if (err) + dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n", + op, i40evf_stat_str(hw, err), + i40evf_aq_str(hw, hw->aq.asq_last_status)); + return err; +} + +/** + * i40evf_send_api_ver + * @adapter: adapter structure + * + * Send API version admin queue message to the PF. The reply is not checked + * in this function. Returns 0 if the message was successfully + * sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not. + **/ +int i40evf_send_api_ver(struct i40evf_adapter *adapter) +{ + struct virtchnl_version_info vvi; + + vvi.major = VIRTCHNL_VERSION_MAJOR; + vvi.minor = VIRTCHNL_VERSION_MINOR; + + return i40evf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi, + sizeof(vvi)); +} + +/** + * i40evf_verify_api_ver + * @adapter: adapter structure + * + * Compare API versions with the PF. Must be called after admin queue is + * initialized. Returns 0 if API versions match, -EIO if they do not, + * I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors + * from the firmware are propagated. + **/ +int i40evf_verify_api_ver(struct i40evf_adapter *adapter) +{ + struct virtchnl_version_info *pf_vvi; + struct i40e_hw *hw = &adapter->hw; + struct i40e_arq_event_info event; + enum virtchnl_ops op; + i40e_status err; + + event.buf_len = I40EVF_MAX_AQ_BUF_SIZE; + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); + if (!event.msg_buf) { + err = -ENOMEM; + goto out; + } + + while (1) { + err = i40evf_clean_arq_element(hw, &event, NULL); + /* When the AQ is empty, i40evf_clean_arq_element will return + * nonzero and this loop will terminate. + */ + if (err) + goto out_alloc; + op = + (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); + if (op == VIRTCHNL_OP_VERSION) + break; + } + + + err = (i40e_status)le32_to_cpu(event.desc.cookie_low); + if (err) + goto out_alloc; + + if (op != VIRTCHNL_OP_VERSION) { + dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n", + op); + err = -EIO; + goto out_alloc; + } + + pf_vvi = (struct virtchnl_version_info *)event.msg_buf; + adapter->pf_version = *pf_vvi; + + if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) || + ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) && + (pf_vvi->minor > VIRTCHNL_VERSION_MINOR))) + err = -EIO; + +out_alloc: + kfree(event.msg_buf); +out: + return err; +} + +/** + * i40evf_send_vf_config_msg + * @adapter: adapter structure + * + * Send VF configuration request admin queue message to the PF. The reply + * is not checked in this function. Returns 0 if the message was + * successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not. + **/ +int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter) +{ + u32 caps; + + caps = VIRTCHNL_VF_OFFLOAD_L2 | + VIRTCHNL_VF_OFFLOAD_RSS_PF | + VIRTCHNL_VF_OFFLOAD_RSS_AQ | + VIRTCHNL_VF_OFFLOAD_RSS_REG | + VIRTCHNL_VF_OFFLOAD_VLAN | + VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | + VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 | + VIRTCHNL_VF_OFFLOAD_ENCAP | + VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM | + VIRTCHNL_VF_OFFLOAD_REQ_QUEUES | + VIRTCHNL_VF_OFFLOAD_ADQ; + + adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES; + adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG; + if (PF_IS_V11(adapter)) + return i40evf_send_pf_msg(adapter, + VIRTCHNL_OP_GET_VF_RESOURCES, + (u8 *)&caps, sizeof(caps)); + else + return i40evf_send_pf_msg(adapter, + VIRTCHNL_OP_GET_VF_RESOURCES, + NULL, 0); +} + +/** + * i40evf_validate_num_queues + * @adapter: adapter structure + * + * Validate that the number of queues the PF has sent in + * VIRTCHNL_OP_GET_VF_RESOURCES is not larger than the VF can handle. + **/ +static void i40evf_validate_num_queues(struct i40evf_adapter *adapter) +{ + if (adapter->vf_res->num_queue_pairs > I40EVF_MAX_REQ_QUEUES) { + struct virtchnl_vsi_resource *vsi_res; + int i; + + dev_info(&adapter->pdev->dev, "Received %d queues, but can only have a max of %d\n", + adapter->vf_res->num_queue_pairs, + I40EVF_MAX_REQ_QUEUES); + dev_info(&adapter->pdev->dev, "Fixing by reducing queues to %d\n", + I40EVF_MAX_REQ_QUEUES); + adapter->vf_res->num_queue_pairs = I40EVF_MAX_REQ_QUEUES; + for (i = 0; i < adapter->vf_res->num_vsis; i++) { + vsi_res = &adapter->vf_res->vsi_res[i]; + vsi_res->num_queue_pairs = I40EVF_MAX_REQ_QUEUES; + } + } +} + +/** + * i40evf_get_vf_config + * @adapter: private adapter structure + * + * Get VF configuration from PF and populate hw structure. Must be called after + * admin queue is initialized. Busy waits until response is received from PF, + * with maximum timeout. Response from PF is returned in the buffer for further + * processing by the caller. + **/ +int i40evf_get_vf_config(struct i40evf_adapter *adapter) +{ + struct i40e_hw *hw = &adapter->hw; + struct i40e_arq_event_info event; + enum virtchnl_ops op; + i40e_status err; + u16 len; + + len = sizeof(struct virtchnl_vf_resource) + + I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource); + event.buf_len = len; + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); + if (!event.msg_buf) { + err = -ENOMEM; + goto out; + } + + while (1) { + /* When the AQ is empty, i40evf_clean_arq_element will return + * nonzero and this loop will terminate. + */ + err = i40evf_clean_arq_element(hw, &event, NULL); + if (err) + goto out_alloc; + op = + (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); + if (op == VIRTCHNL_OP_GET_VF_RESOURCES) + break; + } + + err = (i40e_status)le32_to_cpu(event.desc.cookie_low); + memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len)); + + /* some PFs send more queues than we should have so validate that + * we aren't getting too many queues + */ + if (!err) + i40evf_validate_num_queues(adapter); + i40e_vf_parse_hw_config(hw, adapter->vf_res); +out_alloc: + kfree(event.msg_buf); +out: + return err; +} + +/** + * i40evf_configure_queues + * @adapter: adapter structure + * + * Request that the PF set up our (previously allocated) queues. + **/ +void i40evf_configure_queues(struct i40evf_adapter *adapter) +{ + struct virtchnl_vsi_queue_config_info *vqci; + struct virtchnl_queue_pair_info *vqpi; + int pairs = adapter->num_active_queues; + int i, len, max_frame = I40E_MAX_RXBUFFER; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n", + adapter->current_op); + return; + } + adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES; + len = sizeof(struct virtchnl_vsi_queue_config_info) + + (sizeof(struct virtchnl_queue_pair_info) * pairs); + vqci = kzalloc(len, GFP_KERNEL); + if (!vqci) + return; + + /* Limit maximum frame size when jumbo frames is not enabled */ + if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX) && + (adapter->netdev->mtu <= ETH_DATA_LEN)) + max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN; + + vqci->vsi_id = adapter->vsi_res->vsi_id; + vqci->num_queue_pairs = pairs; + vqpi = vqci->qpair; + /* Size check is not needed here - HW max is 16 queue pairs, and we + * can fit info for 31 of them into the AQ buffer before it overflows. + */ + for (i = 0; i < pairs; i++) { + vqpi->txq.vsi_id = vqci->vsi_id; + vqpi->txq.queue_id = i; + vqpi->txq.ring_len = adapter->tx_rings[i].count; + vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma; + vqpi->rxq.vsi_id = vqci->vsi_id; + vqpi->rxq.queue_id = i; + vqpi->rxq.ring_len = adapter->rx_rings[i].count; + vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma; + vqpi->rxq.max_pkt_size = max_frame; + vqpi->rxq.databuffer_size = + ALIGN(adapter->rx_rings[i].rx_buf_len, + BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT)); + vqpi++; + } + + adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES; + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES, + (u8 *)vqci, len); + kfree(vqci); +} + +/** + * i40evf_enable_queues + * @adapter: adapter structure + * + * Request that the PF enable all of our queues. + **/ +void i40evf_enable_queues(struct i40evf_adapter *adapter) +{ + struct virtchnl_queue_select vqs; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n", + adapter->current_op); + return; + } + adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES; + vqs.vsi_id = adapter->vsi_res->vsi_id; + vqs.tx_queues = BIT(adapter->num_active_queues) - 1; + vqs.rx_queues = vqs.tx_queues; + adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES; + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES, + (u8 *)&vqs, sizeof(vqs)); +} + +/** + * i40evf_disable_queues + * @adapter: adapter structure + * + * Request that the PF disable all of our queues. + **/ +void i40evf_disable_queues(struct i40evf_adapter *adapter) +{ + struct virtchnl_queue_select vqs; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n", + adapter->current_op); + return; + } + adapter->current_op = VIRTCHNL_OP_DISABLE_QUEUES; + vqs.vsi_id = adapter->vsi_res->vsi_id; + vqs.tx_queues = BIT(adapter->num_active_queues) - 1; + vqs.rx_queues = vqs.tx_queues; + adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES; + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES, + (u8 *)&vqs, sizeof(vqs)); +} + +/** + * i40evf_map_queues + * @adapter: adapter structure + * + * Request that the PF map queues to interrupt vectors. Misc causes, including + * admin queue, are always mapped to vector 0. + **/ +void i40evf_map_queues(struct i40evf_adapter *adapter) +{ + struct virtchnl_irq_map_info *vimi; + struct virtchnl_vector_map *vecmap; + int v_idx, q_vectors, len; + struct i40e_q_vector *q_vector; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n", + adapter->current_op); + return; + } + adapter->current_op = VIRTCHNL_OP_CONFIG_IRQ_MAP; + + q_vectors = adapter->num_msix_vectors - NONQ_VECS; + + len = sizeof(struct virtchnl_irq_map_info) + + (adapter->num_msix_vectors * + sizeof(struct virtchnl_vector_map)); + vimi = kzalloc(len, GFP_KERNEL); + if (!vimi) + return; + + vimi->num_vectors = adapter->num_msix_vectors; + /* Queue vectors first */ + for (v_idx = 0; v_idx < q_vectors; v_idx++) { + q_vector = &adapter->q_vectors[v_idx]; + vecmap = &vimi->vecmap[v_idx]; + + vecmap->vsi_id = adapter->vsi_res->vsi_id; + vecmap->vector_id = v_idx + NONQ_VECS; + vecmap->txq_map = q_vector->ring_mask; + vecmap->rxq_map = q_vector->ring_mask; + vecmap->rxitr_idx = I40E_RX_ITR; + vecmap->txitr_idx = I40E_TX_ITR; + } + /* Misc vector last - this is only for AdminQ messages */ + vecmap = &vimi->vecmap[v_idx]; + vecmap->vsi_id = adapter->vsi_res->vsi_id; + vecmap->vector_id = 0; + vecmap->txq_map = 0; + vecmap->rxq_map = 0; + + adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS; + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP, + (u8 *)vimi, len); + kfree(vimi); +} + +/** + * i40evf_request_queues + * @adapter: adapter structure + * @num: number of requested queues + * + * We get a default number of queues from the PF. This enables us to request a + * different number. Returns 0 on success, negative on failure + **/ +int i40evf_request_queues(struct i40evf_adapter *adapter, int num) +{ + struct virtchnl_vf_res_request vfres; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot request queues, command %d pending\n", + adapter->current_op); + return -EBUSY; + } + + vfres.num_queue_pairs = num; + + adapter->current_op = VIRTCHNL_OP_REQUEST_QUEUES; + adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED; + return i40evf_send_pf_msg(adapter, VIRTCHNL_OP_REQUEST_QUEUES, + (u8 *)&vfres, sizeof(vfres)); +} + +/** + * i40evf_add_ether_addrs + * @adapter: adapter structure + * + * Request that the PF add one or more addresses to our filters. + **/ +void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) +{ + struct virtchnl_ether_addr_list *veal; + int len, i = 0, count = 0; + struct i40evf_mac_filter *f; + bool more = false; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n", + adapter->current_op); + return; + } + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + list_for_each_entry(f, &adapter->mac_filter_list, list) { + if (f->add) + count++; + } + if (!count) { + adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR; + + len = sizeof(struct virtchnl_ether_addr_list) + + (count * sizeof(struct virtchnl_ether_addr)); + if (len > I40EVF_MAX_AQ_BUF_SIZE) { + dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n"); + count = (I40EVF_MAX_AQ_BUF_SIZE - + sizeof(struct virtchnl_ether_addr_list)) / + sizeof(struct virtchnl_ether_addr); + len = sizeof(struct virtchnl_ether_addr_list) + + (count * sizeof(struct virtchnl_ether_addr)); + more = true; + } + + veal = kzalloc(len, GFP_ATOMIC); + if (!veal) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + veal->vsi_id = adapter->vsi_res->vsi_id; + veal->num_elements = count; + list_for_each_entry(f, &adapter->mac_filter_list, list) { + if (f->add) { + ether_addr_copy(veal->list[i].addr, f->macaddr); + i++; + f->add = false; + if (i == count) + break; + } + } + if (!more) + adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, + (u8 *)veal, len); + kfree(veal); +} + +/** + * i40evf_del_ether_addrs + * @adapter: adapter structure + * + * Request that the PF remove one or more addresses from our filters. + **/ +void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) +{ + struct virtchnl_ether_addr_list *veal; + struct i40evf_mac_filter *f, *ftmp; + int len, i = 0, count = 0; + bool more = false; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n", + adapter->current_op); + return; + } + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + list_for_each_entry(f, &adapter->mac_filter_list, list) { + if (f->remove) + count++; + } + if (!count) { + adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR; + + len = sizeof(struct virtchnl_ether_addr_list) + + (count * sizeof(struct virtchnl_ether_addr)); + if (len > I40EVF_MAX_AQ_BUF_SIZE) { + dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n"); + count = (I40EVF_MAX_AQ_BUF_SIZE - + sizeof(struct virtchnl_ether_addr_list)) / + sizeof(struct virtchnl_ether_addr); + len = sizeof(struct virtchnl_ether_addr_list) + + (count * sizeof(struct virtchnl_ether_addr)); + more = true; + } + veal = kzalloc(len, GFP_ATOMIC); + if (!veal) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + veal->vsi_id = adapter->vsi_res->vsi_id; + veal->num_elements = count; + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { + if (f->remove) { + ether_addr_copy(veal->list[i].addr, f->macaddr); + i++; + list_del(&f->list); + kfree(f); + if (i == count) + break; + } + } + if (!more) + adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, + (u8 *)veal, len); + kfree(veal); +} + +/** + * i40evf_add_vlans + * @adapter: adapter structure + * + * Request that the PF add one or more VLAN filters to our VSI. + **/ +void i40evf_add_vlans(struct i40evf_adapter *adapter) +{ + struct virtchnl_vlan_filter_list *vvfl; + int len, i = 0, count = 0; + struct i40evf_vlan_filter *f; + bool more = false; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n", + adapter->current_op); + return; + } + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + if (f->add) + count++; + } + if (!count) { + adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + adapter->current_op = VIRTCHNL_OP_ADD_VLAN; + + len = sizeof(struct virtchnl_vlan_filter_list) + + (count * sizeof(u16)); + if (len > I40EVF_MAX_AQ_BUF_SIZE) { + dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); + count = (I40EVF_MAX_AQ_BUF_SIZE - + sizeof(struct virtchnl_vlan_filter_list)) / + sizeof(u16); + len = sizeof(struct virtchnl_vlan_filter_list) + + (count * sizeof(u16)); + more = true; + } + vvfl = kzalloc(len, GFP_ATOMIC); + if (!vvfl) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + vvfl->vsi_id = adapter->vsi_res->vsi_id; + vvfl->num_elements = count; + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + if (f->add) { + vvfl->vlan_id[i] = f->vlan; + i++; + f->add = false; + if (i == count) + break; + } + } + if (!more) + adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); + kfree(vvfl); +} + +/** + * i40evf_del_vlans + * @adapter: adapter structure + * + * Request that the PF remove one or more VLAN filters from our VSI. + **/ +void i40evf_del_vlans(struct i40evf_adapter *adapter) +{ + struct virtchnl_vlan_filter_list *vvfl; + struct i40evf_vlan_filter *f, *ftmp; + int len, i = 0, count = 0; + bool more = false; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n", + adapter->current_op); + return; + } + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + if (f->remove) + count++; + } + if (!count) { + adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + adapter->current_op = VIRTCHNL_OP_DEL_VLAN; + + len = sizeof(struct virtchnl_vlan_filter_list) + + (count * sizeof(u16)); + if (len > I40EVF_MAX_AQ_BUF_SIZE) { + dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n"); + count = (I40EVF_MAX_AQ_BUF_SIZE - + sizeof(struct virtchnl_vlan_filter_list)) / + sizeof(u16); + len = sizeof(struct virtchnl_vlan_filter_list) + + (count * sizeof(u16)); + more = true; + } + vvfl = kzalloc(len, GFP_ATOMIC); + if (!vvfl) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + vvfl->vsi_id = adapter->vsi_res->vsi_id; + vvfl->num_elements = count; + list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { + if (f->remove) { + vvfl->vlan_id[i] = f->vlan; + i++; + list_del(&f->list); + kfree(f); + if (i == count) + break; + } + } + if (!more) + adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); + kfree(vvfl); +} + +/** + * i40evf_set_promiscuous + * @adapter: adapter structure + * @flags: bitmask to control unicast/multicast promiscuous. + * + * Request that the PF enable promiscuous mode for our VSI. + **/ +void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags) +{ + struct virtchnl_promisc_info vpi; + int promisc_all; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n", + adapter->current_op); + return; + } + + promisc_all = FLAG_VF_UNICAST_PROMISC | + FLAG_VF_MULTICAST_PROMISC; + if ((flags & promisc_all) == promisc_all) { + adapter->flags |= I40EVF_FLAG_PROMISC_ON; + adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_PROMISC; + dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n"); + } + + if (flags & FLAG_VF_MULTICAST_PROMISC) { + adapter->flags |= I40EVF_FLAG_ALLMULTI_ON; + adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_ALLMULTI; + dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n"); + } + + if (!flags) { + adapter->flags &= ~(I40EVF_FLAG_PROMISC_ON | + I40EVF_FLAG_ALLMULTI_ON); + adapter->aq_required &= ~(I40EVF_FLAG_AQ_RELEASE_PROMISC | + I40EVF_FLAG_AQ_RELEASE_ALLMULTI); + dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n"); + } + + adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; + vpi.vsi_id = adapter->vsi_res->vsi_id; + vpi.flags = flags; + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, + (u8 *)&vpi, sizeof(vpi)); +} + +/** + * i40evf_request_stats + * @adapter: adapter structure + * + * Request VSI statistics from PF. + **/ +void i40evf_request_stats(struct i40evf_adapter *adapter) +{ + struct virtchnl_queue_select vqs; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* no error message, this isn't crucial */ + return; + } + adapter->current_op = VIRTCHNL_OP_GET_STATS; + vqs.vsi_id = adapter->vsi_res->vsi_id; + /* queue maps are ignored for this message - only the vsi is used */ + if (i40evf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS, + (u8 *)&vqs, sizeof(vqs))) + /* if the request failed, don't lock out others */ + adapter->current_op = VIRTCHNL_OP_UNKNOWN; +} + +/** + * i40evf_get_hena + * @adapter: adapter structure + * + * Request hash enable capabilities from PF + **/ +void i40evf_get_hena(struct i40evf_adapter *adapter) +{ + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n", + adapter->current_op); + return; + } + adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS; + adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_HENA; + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS, + NULL, 0); +} + +/** + * i40evf_set_hena + * @adapter: adapter structure + * + * Request the PF to set our RSS hash capabilities + **/ +void i40evf_set_hena(struct i40evf_adapter *adapter) +{ + struct virtchnl_rss_hena vrh; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n", + adapter->current_op); + return; + } + vrh.hena = adapter->hena; + adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA; + adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_HENA; + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA, + (u8 *)&vrh, sizeof(vrh)); +} + +/** + * i40evf_set_rss_key + * @adapter: adapter structure + * + * Request the PF to set our RSS hash key + **/ +void i40evf_set_rss_key(struct i40evf_adapter *adapter) +{ + struct virtchnl_rss_key *vrk; + int len; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n", + adapter->current_op); + return; + } + len = sizeof(struct virtchnl_rss_key) + + (adapter->rss_key_size * sizeof(u8)) - 1; + vrk = kzalloc(len, GFP_KERNEL); + if (!vrk) + return; + vrk->vsi_id = adapter->vsi.id; + vrk->key_len = adapter->rss_key_size; + memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size); + + adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY; + adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_KEY; + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY, + (u8 *)vrk, len); + kfree(vrk); +} + +/** + * i40evf_set_rss_lut + * @adapter: adapter structure + * + * Request the PF to set our RSS lookup table + **/ +void i40evf_set_rss_lut(struct i40evf_adapter *adapter) +{ + struct virtchnl_rss_lut *vrl; + int len; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n", + adapter->current_op); + return; + } + len = sizeof(struct virtchnl_rss_lut) + + (adapter->rss_lut_size * sizeof(u8)) - 1; + vrl = kzalloc(len, GFP_KERNEL); + if (!vrl) + return; + vrl->vsi_id = adapter->vsi.id; + vrl->lut_entries = adapter->rss_lut_size; + memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size); + adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT; + adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_LUT; + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT, + (u8 *)vrl, len); + kfree(vrl); +} + +/** + * i40evf_enable_vlan_stripping + * @adapter: adapter structure + * + * Request VLAN header stripping to be enabled + **/ +void i40evf_enable_vlan_stripping(struct i40evf_adapter *adapter) +{ + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot enable stripping, command %d pending\n", + adapter->current_op); + return; + } + adapter->current_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING; + adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, + NULL, 0); +} + +/** + * i40evf_disable_vlan_stripping + * @adapter: adapter structure + * + * Request VLAN header stripping to be disabled + **/ +void i40evf_disable_vlan_stripping(struct i40evf_adapter *adapter) +{ + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot disable stripping, command %d pending\n", + adapter->current_op); + return; + } + adapter->current_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING; + adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, + NULL, 0); +} + +/** + * i40evf_print_link_message - print link up or down + * @adapter: adapter structure + * + * Log a message telling the world of our wonderous link status + */ +static void i40evf_print_link_message(struct i40evf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + char *speed = "Unknown "; + + if (!adapter->link_up) { + netdev_info(netdev, "NIC Link is Down\n"); + return; + } + + switch (adapter->link_speed) { + case I40E_LINK_SPEED_40GB: + speed = "40 G"; + break; + case I40E_LINK_SPEED_25GB: + speed = "25 G"; + break; + case I40E_LINK_SPEED_20GB: + speed = "20 G"; + break; + case I40E_LINK_SPEED_10GB: + speed = "10 G"; + break; + case I40E_LINK_SPEED_1GB: + speed = "1000 M"; + break; + case I40E_LINK_SPEED_100MB: + speed = "100 M"; + break; + default: + break; + } + + netdev_info(netdev, "NIC Link is Up %sbps Full Duplex\n", speed); +} + +/** + * i40evf_enable_channel + * @adapter: adapter structure + * + * Request that the PF enable channels as specified by + * the user via tc tool. + **/ +void i40evf_enable_channels(struct i40evf_adapter *adapter) +{ + struct virtchnl_tc_info *vti = NULL; + u16 len; + int i; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n", + adapter->current_op); + return; + } + + len = (adapter->num_tc * sizeof(struct virtchnl_channel_info)) + + sizeof(struct virtchnl_tc_info); + + vti = kzalloc(len, GFP_KERNEL); + if (!vti) + return; + vti->num_tc = adapter->num_tc; + for (i = 0; i < vti->num_tc; i++) { + vti->list[i].count = adapter->ch_config.ch_info[i].count; + vti->list[i].offset = adapter->ch_config.ch_info[i].offset; + vti->list[i].pad = 0; + vti->list[i].max_tx_rate = + adapter->ch_config.ch_info[i].max_tx_rate; + } + + adapter->ch_config.state = __I40EVF_TC_RUNNING; + adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED; + adapter->current_op = VIRTCHNL_OP_ENABLE_CHANNELS; + adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_CHANNELS; + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS, + (u8 *)vti, len); + kfree(vti); +} + +/** + * i40evf_disable_channel + * @adapter: adapter structure + * + * Request that the PF disable channels that are configured + **/ +void i40evf_disable_channels(struct i40evf_adapter *adapter) +{ + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n", + adapter->current_op); + return; + } + + adapter->ch_config.state = __I40EVF_TC_INVALID; + adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED; + adapter->current_op = VIRTCHNL_OP_DISABLE_CHANNELS; + adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_CHANNELS; + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS, + NULL, 0); +} + +/** + * i40evf_print_cloud_filter + * @adapter: adapter structure + * @f: cloud filter to print + * + * Print the cloud filter + **/ +static void i40evf_print_cloud_filter(struct i40evf_adapter *adapter, + struct virtchnl_filter *f) +{ + switch (f->flow_type) { + case VIRTCHNL_TCP_V4_FLOW: + dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI4 src_ip %pI4 dst_port %hu src_port %hu\n", + &f->data.tcp_spec.dst_mac, + &f->data.tcp_spec.src_mac, + ntohs(f->data.tcp_spec.vlan_id), + &f->data.tcp_spec.dst_ip[0], + &f->data.tcp_spec.src_ip[0], + ntohs(f->data.tcp_spec.dst_port), + ntohs(f->data.tcp_spec.src_port)); + break; + case VIRTCHNL_TCP_V6_FLOW: + dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI6 src_ip %pI6 dst_port %hu src_port %hu\n", + &f->data.tcp_spec.dst_mac, + &f->data.tcp_spec.src_mac, + ntohs(f->data.tcp_spec.vlan_id), + &f->data.tcp_spec.dst_ip, + &f->data.tcp_spec.src_ip, + ntohs(f->data.tcp_spec.dst_port), + ntohs(f->data.tcp_spec.src_port)); + break; + } +} + +/** + * i40evf_add_cloud_filter + * @adapter: adapter structure + * + * Request that the PF add cloud filters as specified + * by the user via tc tool. + **/ +void i40evf_add_cloud_filter(struct i40evf_adapter *adapter) +{ + struct i40evf_cloud_filter *cf; + struct virtchnl_filter *f; + int len = 0, count = 0; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot add cloud filter, command %d pending\n", + adapter->current_op); + return; + } + list_for_each_entry(cf, &adapter->cloud_filter_list, list) { + if (cf->add) { + count++; + break; + } + } + if (!count) { + adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_CLOUD_FILTER; + return; + } + adapter->current_op = VIRTCHNL_OP_ADD_CLOUD_FILTER; + + len = sizeof(struct virtchnl_filter); + f = kzalloc(len, GFP_KERNEL); + if (!f) + return; + + list_for_each_entry(cf, &adapter->cloud_filter_list, list) { + if (cf->add) { + memcpy(f, &cf->f, sizeof(struct virtchnl_filter)); + cf->add = false; + cf->state = __I40EVF_CF_ADD_PENDING; + i40evf_send_pf_msg(adapter, + VIRTCHNL_OP_ADD_CLOUD_FILTER, + (u8 *)f, len); + } + } + kfree(f); +} + +/** + * i40evf_del_cloud_filter + * @adapter: adapter structure + * + * Request that the PF delete cloud filters as specified + * by the user via tc tool. + **/ +void i40evf_del_cloud_filter(struct i40evf_adapter *adapter) +{ + struct i40evf_cloud_filter *cf, *cftmp; + struct virtchnl_filter *f; + int len = 0, count = 0; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot remove cloud filter, command %d pending\n", + adapter->current_op); + return; + } + list_for_each_entry(cf, &adapter->cloud_filter_list, list) { + if (cf->del) { + count++; + break; + } + } + if (!count) { + adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_CLOUD_FILTER; + return; + } + adapter->current_op = VIRTCHNL_OP_DEL_CLOUD_FILTER; + + len = sizeof(struct virtchnl_filter); + f = kzalloc(len, GFP_KERNEL); + if (!f) + return; + + list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) { + if (cf->del) { + memcpy(f, &cf->f, sizeof(struct virtchnl_filter)); + cf->del = false; + cf->state = __I40EVF_CF_DEL_PENDING; + i40evf_send_pf_msg(adapter, + VIRTCHNL_OP_DEL_CLOUD_FILTER, + (u8 *)f, len); + } + } + kfree(f); +} + +/** + * i40evf_request_reset + * @adapter: adapter structure + * + * Request that the PF reset this VF. No response is expected. + **/ +void i40evf_request_reset(struct i40evf_adapter *adapter) +{ + /* Don't check CURRENT_OP - this is always higher priority */ + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0); + adapter->current_op = VIRTCHNL_OP_UNKNOWN; +} + +/** + * i40evf_virtchnl_completion + * @adapter: adapter structure + * @v_opcode: opcode sent by PF + * @v_retval: retval sent by PF + * @msg: message sent by PF + * @msglen: message length + * + * Asynchronous completion function for admin queue messages. Rather than busy + * wait, we fire off our requests and assume that no errors will be returned. + * This function handles the reply messages. + **/ +void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, + enum virtchnl_ops v_opcode, + i40e_status v_retval, + u8 *msg, u16 msglen) +{ + struct net_device *netdev = adapter->netdev; + + if (v_opcode == VIRTCHNL_OP_EVENT) { + struct virtchnl_pf_event *vpe = + (struct virtchnl_pf_event *)msg; + bool link_up = vpe->event_data.link_event.link_status; + switch (vpe->event) { + case VIRTCHNL_EVENT_LINK_CHANGE: + adapter->link_speed = + vpe->event_data.link_event.link_speed; + + /* we've already got the right link status, bail */ + if (adapter->link_up == link_up) + break; + + if (link_up) { + /* If we get link up message and start queues + * before our queues are configured it will + * trigger a TX hang. In that case, just ignore + * the link status message,we'll get another one + * after we enable queues and actually prepared + * to send traffic. + */ + if (adapter->state != __I40EVF_RUNNING) + break; + + /* For ADq enabled VF, we reconfigure VSIs and + * re-allocate queues. Hence wait till all + * queues are enabled. + */ + if (adapter->flags & + I40EVF_FLAG_QUEUES_DISABLED) + break; + } + + adapter->link_up = link_up; + if (link_up) { + netif_tx_start_all_queues(netdev); + netif_carrier_on(netdev); + } else { + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + } + i40evf_print_link_message(adapter); + break; + case VIRTCHNL_EVENT_RESET_IMPENDING: + dev_info(&adapter->pdev->dev, "Reset warning received from the PF\n"); + if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) { + adapter->flags |= I40EVF_FLAG_RESET_PENDING; + dev_info(&adapter->pdev->dev, "Scheduling reset task\n"); + schedule_work(&adapter->reset_task); + } + break; + default: + dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n", + vpe->event); + break; + } + return; + } + if (v_retval) { + switch (v_opcode) { + case VIRTCHNL_OP_ADD_VLAN: + dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n", + i40evf_stat_str(&adapter->hw, v_retval)); + break; + case VIRTCHNL_OP_ADD_ETH_ADDR: + dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n", + i40evf_stat_str(&adapter->hw, v_retval)); + break; + case VIRTCHNL_OP_DEL_VLAN: + dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n", + i40evf_stat_str(&adapter->hw, v_retval)); + break; + case VIRTCHNL_OP_DEL_ETH_ADDR: + dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n", + i40evf_stat_str(&adapter->hw, v_retval)); + break; + case VIRTCHNL_OP_ENABLE_CHANNELS: + dev_err(&adapter->pdev->dev, "Failed to configure queue channels, error %s\n", + i40evf_stat_str(&adapter->hw, v_retval)); + adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED; + adapter->ch_config.state = __I40EVF_TC_INVALID; + netdev_reset_tc(netdev); + netif_tx_start_all_queues(netdev); + break; + case VIRTCHNL_OP_DISABLE_CHANNELS: + dev_err(&adapter->pdev->dev, "Failed to disable queue channels, error %s\n", + i40evf_stat_str(&adapter->hw, v_retval)); + adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED; + adapter->ch_config.state = __I40EVF_TC_RUNNING; + netif_tx_start_all_queues(netdev); + break; + case VIRTCHNL_OP_ADD_CLOUD_FILTER: { + struct i40evf_cloud_filter *cf, *cftmp; + + list_for_each_entry_safe(cf, cftmp, + &adapter->cloud_filter_list, + list) { + if (cf->state == __I40EVF_CF_ADD_PENDING) { + cf->state = __I40EVF_CF_INVALID; + dev_info(&adapter->pdev->dev, "Failed to add cloud filter, error %s\n", + i40evf_stat_str(&adapter->hw, + v_retval)); + i40evf_print_cloud_filter(adapter, + &cf->f); + list_del(&cf->list); + kfree(cf); + adapter->num_cloud_filters--; + } + } + } + break; + case VIRTCHNL_OP_DEL_CLOUD_FILTER: { + struct i40evf_cloud_filter *cf; + + list_for_each_entry(cf, &adapter->cloud_filter_list, + list) { + if (cf->state == __I40EVF_CF_DEL_PENDING) { + cf->state = __I40EVF_CF_ACTIVE; + dev_info(&adapter->pdev->dev, "Failed to del cloud filter, error %s\n", + i40evf_stat_str(&adapter->hw, + v_retval)); + i40evf_print_cloud_filter(adapter, + &cf->f); + } + } + } + break; + default: + dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", + v_retval, + i40evf_stat_str(&adapter->hw, v_retval), + v_opcode); + } + } + switch (v_opcode) { + case VIRTCHNL_OP_GET_STATS: { + struct i40e_eth_stats *stats = + (struct i40e_eth_stats *)msg; + netdev->stats.rx_packets = stats->rx_unicast + + stats->rx_multicast + + stats->rx_broadcast; + netdev->stats.tx_packets = stats->tx_unicast + + stats->tx_multicast + + stats->tx_broadcast; + netdev->stats.rx_bytes = stats->rx_bytes; + netdev->stats.tx_bytes = stats->tx_bytes; + netdev->stats.tx_errors = stats->tx_errors; + netdev->stats.rx_dropped = stats->rx_discards; + netdev->stats.tx_dropped = stats->tx_discards; + adapter->current_stats = *stats; + } + break; + case VIRTCHNL_OP_GET_VF_RESOURCES: { + u16 len = sizeof(struct virtchnl_vf_resource) + + I40E_MAX_VF_VSI * + sizeof(struct virtchnl_vsi_resource); + memcpy(adapter->vf_res, msg, min(msglen, len)); + i40evf_validate_num_queues(adapter); + i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res); + if (is_zero_ether_addr(adapter->hw.mac.addr)) { + /* restore current mac address */ + ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); + } else { + /* refresh current mac address if changed */ + ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); + ether_addr_copy(netdev->perm_addr, + adapter->hw.mac.addr); + } + i40evf_process_config(adapter); + } + break; + case VIRTCHNL_OP_ENABLE_QUEUES: + /* enable transmits */ + i40evf_irq_enable(adapter, true); + adapter->flags &= ~I40EVF_FLAG_QUEUES_DISABLED; + break; + case VIRTCHNL_OP_DISABLE_QUEUES: + i40evf_free_all_tx_resources(adapter); + i40evf_free_all_rx_resources(adapter); + if (adapter->state == __I40EVF_DOWN_PENDING) { + adapter->state = __I40EVF_DOWN; + wake_up(&adapter->down_waitqueue); + } + break; + case VIRTCHNL_OP_VERSION: + case VIRTCHNL_OP_CONFIG_IRQ_MAP: + /* Don't display an error if we get these out of sequence. + * If the firmware needed to get kicked, we'll get these and + * it's no problem. + */ + if (v_opcode != adapter->current_op) + return; + break; + case VIRTCHNL_OP_IWARP: + /* Gobble zero-length replies from the PF. They indicate that + * a previous message was received OK, and the client doesn't + * care about that. + */ + if (msglen && CLIENT_ENABLED(adapter)) + i40evf_notify_client_message(&adapter->vsi, + msg, msglen); + break; + + case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: + adapter->client_pending &= + ~(BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP)); + break; + case VIRTCHNL_OP_GET_RSS_HENA_CAPS: { + struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg; + if (msglen == sizeof(*vrh)) + adapter->hena = vrh->hena; + else + dev_warn(&adapter->pdev->dev, + "Invalid message %d from PF\n", v_opcode); + } + break; + case VIRTCHNL_OP_REQUEST_QUEUES: { + struct virtchnl_vf_res_request *vfres = + (struct virtchnl_vf_res_request *)msg; + if (vfres->num_queue_pairs != adapter->num_req_queues) { + dev_info(&adapter->pdev->dev, + "Requested %d queues, PF can support %d\n", + adapter->num_req_queues, + vfres->num_queue_pairs); + adapter->num_req_queues = 0; + adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED; + } + } + break; + case VIRTCHNL_OP_ADD_CLOUD_FILTER: { + struct i40evf_cloud_filter *cf; + + list_for_each_entry(cf, &adapter->cloud_filter_list, list) { + if (cf->state == __I40EVF_CF_ADD_PENDING) + cf->state = __I40EVF_CF_ACTIVE; + } + } + break; + case VIRTCHNL_OP_DEL_CLOUD_FILTER: { + struct i40evf_cloud_filter *cf, *cftmp; + + list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, + list) { + if (cf->state == __I40EVF_CF_DEL_PENDING) { + cf->state = __I40EVF_CF_INVALID; + list_del(&cf->list); + kfree(cf); + adapter->num_cloud_filters--; + } + } + } + break; + default: + if (adapter->current_op && (v_opcode != adapter->current_op)) + dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n", + adapter->current_op, v_opcode); + break; + } /* switch v_opcode */ + adapter->current_op = VIRTCHNL_OP_UNKNOWN; +} -- cgit v1.2.1 From 3783693381a58be3f20b487044d7984090919842 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Fri, 14 Sep 2018 17:37:45 -0700 Subject: iavf: diet and reformat [ Upstream commit ee61022acfffcd4468bc3c31f4fd61503f725999 ] Remove a bunch of unused code and reformat a few lines. Also remove some now un-necessary files. Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher Stable-dep-of: 32d57f667f87 ("iavf: fix inverted Rx hash condition leading to disabled hash") Signed-off-by: Sasha Levin --- drivers/net/ethernet/intel/iavf/i40e_adminq.c | 27 - drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h | 2277 +-------------------- drivers/net/ethernet/intel/iavf/i40e_common.c | 338 --- drivers/net/ethernet/intel/iavf/i40e_hmc.h | 215 -- drivers/net/ethernet/intel/iavf/i40e_lan_hmc.h | 158 -- drivers/net/ethernet/intel/iavf/i40e_prototype.h | 65 +- drivers/net/ethernet/intel/iavf/i40e_register.h | 245 --- drivers/net/ethernet/intel/iavf/i40e_type.h | 783 +------ 8 files changed, 50 insertions(+), 4058 deletions(-) delete mode 100644 drivers/net/ethernet/intel/iavf/i40e_hmc.h delete mode 100644 drivers/net/ethernet/intel/iavf/i40e_lan_hmc.h diff --git a/drivers/net/ethernet/intel/iavf/i40e_adminq.c b/drivers/net/ethernet/intel/iavf/i40e_adminq.c index 21a0dbf6ccf6..32e0e2d9cdc5 100644 --- a/drivers/net/ethernet/intel/iavf/i40e_adminq.c +++ b/drivers/net/ethernet/intel/iavf/i40e_adminq.c @@ -7,16 +7,6 @@ #include "i40e_adminq.h" #include "i40e_prototype.h" -/** - * i40e_is_nvm_update_op - return true if this is an NVM update operation - * @desc: API request descriptor - **/ -static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc) -{ - return (desc->opcode == i40e_aqc_opc_nvm_erase) || - (desc->opcode == i40e_aqc_opc_nvm_update); -} - /** * i40e_adminq_init_regs - Initialize AdminQ registers * @hw: pointer to the hardware structure @@ -569,9 +559,6 @@ i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw) i40e_shutdown_asq(hw); i40e_shutdown_arq(hw); - if (hw->nvm_buff.va) - i40e_free_virt_mem(hw, &hw->nvm_buff); - return ret_code; } @@ -951,17 +938,3 @@ clean_arq_element_err: return ret_code; } - -void i40evf_resume_aq(struct i40e_hw *hw) -{ - /* Registers are reset after PF reset */ - hw->aq.asq.next_to_use = 0; - hw->aq.asq.next_to_clean = 0; - - i40e_config_asq_regs(hw); - - hw->aq.arq.next_to_use = 0; - hw->aq.arq.next_to_clean = 0; - - i40e_config_arq_regs(hw); -} diff --git a/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h index 5fd8529465d4..493bdc5331f7 100644 --- a/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h @@ -307,33 +307,6 @@ enum i40e_admin_queue_opc { */ #define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) -/* internal (0x00XX) commands */ - -/* Get version (direct 0x0001) */ -struct i40e_aqc_get_version { - __le32 rom_ver; - __le32 fw_build; - __le16 fw_major; - __le16 fw_minor; - __le16 api_major; - __le16 api_minor; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version); - -/* Send driver version (indirect 0x0002) */ -struct i40e_aqc_driver_version { - u8 driver_major_ver; - u8 driver_minor_ver; - u8 driver_build_ver; - u8 driver_subbuild_ver; - u8 reserved[4]; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version); - /* Queue Shutdown (direct 0x0003) */ struct i40e_aqc_queue_shutdown { __le32 driver_unloading; @@ -343,490 +316,6 @@ struct i40e_aqc_queue_shutdown { I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); -/* Set PF context (0x0004, direct) */ -struct i40e_aqc_set_pf_context { - u8 pf_id; - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context); - -/* Request resource ownership (direct 0x0008) - * Release resource ownership (direct 0x0009) - */ -#define I40E_AQ_RESOURCE_NVM 1 -#define I40E_AQ_RESOURCE_SDP 2 -#define I40E_AQ_RESOURCE_ACCESS_READ 1 -#define I40E_AQ_RESOURCE_ACCESS_WRITE 2 -#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000 -#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000 - -struct i40e_aqc_request_resource { - __le16 resource_id; - __le16 access_type; - __le32 timeout; - __le32 resource_number; - u8 reserved[4]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); - -/* Get function capabilities (indirect 0x000A) - * Get device capabilities (indirect 0x000B) - */ -struct i40e_aqc_list_capabilites { - u8 command_flags; -#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1 - u8 pf_index; - u8 reserved[2]; - __le32 count; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites); - -struct i40e_aqc_list_capabilities_element_resp { - __le16 id; - u8 major_rev; - u8 minor_rev; - __le32 number; - __le32 logical_id; - __le32 phys_id; - u8 reserved[16]; -}; - -/* list of caps */ - -#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001 -#define I40E_AQ_CAP_ID_MNG_MODE 0x0002 -#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003 -#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 -#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 -#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 -#define I40E_AQ_CAP_ID_WOL_AND_PROXY 0x0008 -#define I40E_AQ_CAP_ID_SRIOV 0x0012 -#define I40E_AQ_CAP_ID_VF 0x0013 -#define I40E_AQ_CAP_ID_VMDQ 0x0014 -#define I40E_AQ_CAP_ID_8021QBG 0x0015 -#define I40E_AQ_CAP_ID_8021QBR 0x0016 -#define I40E_AQ_CAP_ID_VSI 0x0017 -#define I40E_AQ_CAP_ID_DCB 0x0018 -#define I40E_AQ_CAP_ID_FCOE 0x0021 -#define I40E_AQ_CAP_ID_ISCSI 0x0022 -#define I40E_AQ_CAP_ID_RSS 0x0040 -#define I40E_AQ_CAP_ID_RXQ 0x0041 -#define I40E_AQ_CAP_ID_TXQ 0x0042 -#define I40E_AQ_CAP_ID_MSIX 0x0043 -#define I40E_AQ_CAP_ID_VF_MSIX 0x0044 -#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045 -#define I40E_AQ_CAP_ID_1588 0x0046 -#define I40E_AQ_CAP_ID_IWARP 0x0051 -#define I40E_AQ_CAP_ID_LED 0x0061 -#define I40E_AQ_CAP_ID_SDP 0x0062 -#define I40E_AQ_CAP_ID_MDIO 0x0063 -#define I40E_AQ_CAP_ID_WSR_PROT 0x0064 -#define I40E_AQ_CAP_ID_NVM_MGMT 0x0080 -#define I40E_AQ_CAP_ID_FLEX10 0x00F1 -#define I40E_AQ_CAP_ID_CEM 0x00F2 - -/* Set CPPM Configuration (direct 0x0103) */ -struct i40e_aqc_cppm_configuration { - __le16 command_flags; -#define I40E_AQ_CPPM_EN_LTRC 0x0800 -#define I40E_AQ_CPPM_EN_DMCTH 0x1000 -#define I40E_AQ_CPPM_EN_DMCTLX 0x2000 -#define I40E_AQ_CPPM_EN_HPTC 0x4000 -#define I40E_AQ_CPPM_EN_DMARC 0x8000 - __le16 ttlx; - __le32 dmacr; - __le16 dmcth; - u8 hptc; - u8 reserved; - __le32 pfltrc; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration); - -/* Set ARP Proxy command / response (indirect 0x0104) */ -struct i40e_aqc_arp_proxy_data { - __le16 command_flags; -#define I40E_AQ_ARP_INIT_IPV4 0x0800 -#define I40E_AQ_ARP_UNSUP_CTL 0x1000 -#define I40E_AQ_ARP_ENA 0x2000 -#define I40E_AQ_ARP_ADD_IPV4 0x4000 -#define I40E_AQ_ARP_DEL_IPV4 0x8000 - __le16 table_id; - __le32 enabled_offloads; -#define I40E_AQ_ARP_DIRECTED_OFFLOAD_ENABLE 0x00000020 -#define I40E_AQ_ARP_OFFLOAD_ENABLE 0x00000800 - __le32 ip_addr; - u8 mac_addr[6]; - u8 reserved[2]; -}; - -I40E_CHECK_STRUCT_LEN(0x14, i40e_aqc_arp_proxy_data); - -/* Set NS Proxy Table Entry Command (indirect 0x0105) */ -struct i40e_aqc_ns_proxy_data { - __le16 table_idx_mac_addr_0; - __le16 table_idx_mac_addr_1; - __le16 table_idx_ipv6_0; - __le16 table_idx_ipv6_1; - __le16 control; -#define I40E_AQ_NS_PROXY_ADD_0 0x0001 -#define I40E_AQ_NS_PROXY_DEL_0 0x0002 -#define I40E_AQ_NS_PROXY_ADD_1 0x0004 -#define I40E_AQ_NS_PROXY_DEL_1 0x0008 -#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x0010 -#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x0020 -#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x0040 -#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x0080 -#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0100 -#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0200 -#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0400 -#define I40E_AQ_NS_PROXY_OFFLOAD_ENABLE 0x0800 -#define I40E_AQ_NS_PROXY_DIRECTED_OFFLOAD_ENABLE 0x1000 - u8 mac_addr_0[6]; - u8 mac_addr_1[6]; - u8 local_mac_addr[6]; - u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */ - u8 ipv6_addr_1[16]; -}; - -I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data); - -/* Manage LAA Command (0x0106) - obsolete */ -struct i40e_aqc_mng_laa { - __le16 command_flags; -#define I40E_AQ_LAA_FLAG_WR 0x8000 - u8 reserved[2]; - __le32 sal; - __le16 sah; - u8 reserved2[6]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa); - -/* Manage MAC Address Read Command (indirect 0x0107) */ -struct i40e_aqc_mac_address_read { - __le16 command_flags; -#define I40E_AQC_LAN_ADDR_VALID 0x10 -#define I40E_AQC_SAN_ADDR_VALID 0x20 -#define I40E_AQC_PORT_ADDR_VALID 0x40 -#define I40E_AQC_WOL_ADDR_VALID 0x80 -#define I40E_AQC_MC_MAG_EN_VALID 0x100 -#define I40E_AQC_ADDR_VALID_MASK 0x3F0 - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read); - -struct i40e_aqc_mac_address_read_data { - u8 pf_lan_mac[6]; - u8 pf_san_mac[6]; - u8 port_mac[6]; - u8 pf_wol_mac[6]; -}; - -I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data); - -/* Manage MAC Address Write Command (0x0108) */ -struct i40e_aqc_mac_address_write { - __le16 command_flags; -#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 -#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 -#define I40E_AQC_WRITE_TYPE_PORT 0x8000 -#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000 -#define I40E_AQC_WRITE_TYPE_MASK 0xC000 - - __le16 mac_sah; - __le32 mac_sal; - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write); - -/* PXE commands (0x011x) */ - -/* Clear PXE Command and response (direct 0x0110) */ -struct i40e_aqc_clear_pxe { - u8 rx_cnt; - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe); - -/* Set WoL Filter (0x0120) */ - -struct i40e_aqc_set_wol_filter { - __le16 filter_index; -#define I40E_AQC_MAX_NUM_WOL_FILTERS 8 -#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT 15 -#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK (0x1 << \ - I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT) - -#define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT 0 -#define I40E_AQC_SET_WOL_FILTER_INDEX_MASK (0x7 << \ - I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT) - __le16 cmd_flags; -#define I40E_AQC_SET_WOL_FILTER 0x8000 -#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000 -#define I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR 0x2000 -#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0 -#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1 - __le16 valid_flags; -#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000 -#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000 - u8 reserved[2]; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter); - -struct i40e_aqc_set_wol_filter_data { - u8 filter[128]; - u8 mask[16]; -}; - -I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data); - -/* Get Wake Reason (0x0121) */ - -struct i40e_aqc_get_wake_reason_completion { - u8 reserved_1[2]; - __le16 wake_reason; -#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT 0 -#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \ - I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT) -#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT 8 -#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK (0xFF << \ - I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT) - u8 reserved_2[12]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion); - -/* Switch configuration commands (0x02xx) */ - -/* Used by many indirect commands that only pass an seid and a buffer in the - * command - */ -struct i40e_aqc_switch_seid { - __le16 seid; - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid); - -/* Get Switch Configuration command (indirect 0x0200) - * uses i40e_aqc_switch_seid for the descriptor - */ -struct i40e_aqc_get_switch_config_header_resp { - __le16 num_reported; - __le16 num_total; - u8 reserved[12]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp); - -struct i40e_aqc_switch_config_element_resp { - u8 element_type; -#define I40E_AQ_SW_ELEM_TYPE_MAC 1 -#define I40E_AQ_SW_ELEM_TYPE_PF 2 -#define I40E_AQ_SW_ELEM_TYPE_VF 3 -#define I40E_AQ_SW_ELEM_TYPE_EMP 4 -#define I40E_AQ_SW_ELEM_TYPE_BMC 5 -#define I40E_AQ_SW_ELEM_TYPE_PV 16 -#define I40E_AQ_SW_ELEM_TYPE_VEB 17 -#define I40E_AQ_SW_ELEM_TYPE_PA 18 -#define I40E_AQ_SW_ELEM_TYPE_VSI 19 - u8 revision; -#define I40E_AQ_SW_ELEM_REV_1 1 - __le16 seid; - __le16 uplink_seid; - __le16 downlink_seid; - u8 reserved[3]; - u8 connection_type; -#define I40E_AQ_CONN_TYPE_REGULAR 0x1 -#define I40E_AQ_CONN_TYPE_DEFAULT 0x2 -#define I40E_AQ_CONN_TYPE_CASCADED 0x3 - __le16 scheduler_id; - __le16 element_info; -}; - -I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_config_element_resp); - -/* Get Switch Configuration (indirect 0x0200) - * an array of elements are returned in the response buffer - * the first in the array is the header, remainder are elements - */ -struct i40e_aqc_get_switch_config_resp { - struct i40e_aqc_get_switch_config_header_resp header; - struct i40e_aqc_switch_config_element_resp element[1]; -}; - -I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_switch_config_resp); - -/* Add Statistics (direct 0x0201) - * Remove Statistics (direct 0x0202) - */ -struct i40e_aqc_add_remove_statistics { - __le16 seid; - __le16 vlan; - __le16 stat_index; - u8 reserved[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics); - -/* Set Port Parameters command (direct 0x0203) */ -struct i40e_aqc_set_port_parameters { - __le16 command_flags; -#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1 -#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ -#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 - __le16 bad_frame_vsi; -#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0 -#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK 0x3FF - __le16 default_seid; /* reserved for command */ - u8 reserved[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters); - -/* Get Switch Resource Allocation (indirect 0x0204) */ -struct i40e_aqc_get_switch_resource_alloc { - u8 num_entries; /* reserved for command */ - u8 reserved[7]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc); - -/* expect an array of these structs in the response buffer */ -struct i40e_aqc_switch_resource_alloc_element_resp { - u8 resource_type; -#define I40E_AQ_RESOURCE_TYPE_VEB 0x0 -#define I40E_AQ_RESOURCE_TYPE_VSI 0x1 -#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2 -#define I40E_AQ_RESOURCE_TYPE_STAG 0x3 -#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4 -#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5 -#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6 -#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7 -#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8 -#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9 -#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA -#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB -#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC -#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD -#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF -#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10 -#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11 -#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12 -#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13 - u8 reserved1; - __le16 guaranteed; - __le16 total; - __le16 used; - __le16 total_unalloced; - u8 reserved2[6]; -}; - -I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp); - -/* Set Switch Configuration (direct 0x0205) */ -struct i40e_aqc_set_switch_config { - __le16 flags; -/* flags used for both fields below */ -#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001 -#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002 - __le16 valid_flags; - /* The ethertype in switch_tag is dropped on ingress and used - * internally by the switch. Set this to zero for the default - * of 0x88a8 (802.1ad). Should be zero for firmware API - * versions lower than 1.7. - */ - __le16 switch_tag; - /* The ethertypes in first_tag and second_tag are used to - * match the outer and inner VLAN tags (respectively) when HW - * double VLAN tagging is enabled via the set port parameters - * AQ command. Otherwise these are both ignored. Set them to - * zero for their defaults of 0x8100 (802.1Q). Should be zero - * for firmware API versions lower than 1.7. - */ - __le16 first_tag; - __le16 second_tag; - u8 reserved[6]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config); - -/* Read Receive control registers (direct 0x0206) - * Write Receive control registers (direct 0x0207) - * used for accessing Rx control registers that can be - * slow and need special handling when under high Rx load - */ -struct i40e_aqc_rx_ctl_reg_read_write { - __le32 reserved1; - __le32 address; - __le32 reserved2; - __le32 value; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_rx_ctl_reg_read_write); - -/* Add VSI (indirect 0x0210) - * this indirect command uses struct i40e_aqc_vsi_properties_data - * as the indirect buffer (128 bytes) - * - * Update VSI (indirect 0x211) - * uses the same data structure as Add VSI - * - * Get VSI (indirect 0x0212) - * uses the same completion and data structure as Add VSI - */ -struct i40e_aqc_add_get_update_vsi { - __le16 uplink_seid; - u8 connection_type; -#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1 -#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2 -#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3 - u8 reserved1; - u8 vf_id; - u8 reserved2; - __le16 vsi_flags; -#define I40E_AQ_VSI_TYPE_SHIFT 0x0 -#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT) -#define I40E_AQ_VSI_TYPE_VF 0x0 -#define I40E_AQ_VSI_TYPE_VMDQ2 0x1 -#define I40E_AQ_VSI_TYPE_PF 0x2 -#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 -#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi); - -struct i40e_aqc_add_get_update_vsi_completion { - __le16 seid; - __le16 vsi_number; - __le16 vsi_used; - __le16 vsi_free; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion); - struct i40e_aqc_vsi_properties_data { /* first 96 byte are written by SW */ __le16 valid_sections; @@ -952,87 +441,6 @@ struct i40e_aqc_vsi_properties_data { I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); -/* Add Port Virtualizer (direct 0x0220) - * also used for update PV (direct 0x0221) but only flags are used - * (IS_CTRL_PORT only works on add PV) - */ -struct i40e_aqc_add_update_pv { - __le16 command_flags; -#define I40E_AQC_PV_FLAG_PV_TYPE 0x1 -#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2 -#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4 -#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8 - __le16 uplink_seid; - __le16 connected_seid; - u8 reserved[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv); - -struct i40e_aqc_add_update_pv_completion { - /* reserved for update; for add also encodes error if rc == ENOSPC */ - __le16 pv_seid; -#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1 -#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2 -#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4 -#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8 - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion); - -/* Get PV Params (direct 0x0222) - * uses i40e_aqc_switch_seid for the descriptor - */ - -struct i40e_aqc_get_pv_params_completion { - __le16 seid; - __le16 default_stag; - __le16 pv_flags; /* same flags as add_pv */ -#define I40E_AQC_GET_PV_PV_TYPE 0x1 -#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2 -#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4 - u8 reserved[8]; - __le16 default_port_seid; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion); - -/* Add VEB (direct 0x0230) */ -struct i40e_aqc_add_veb { - __le16 uplink_seid; - __le16 downlink_seid; - __le16 veb_flags; -#define I40E_AQC_ADD_VEB_FLOATING 0x1 -#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1 -#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \ - I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT) -#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 -#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 -#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 /* deprecated */ -#define I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS 0x10 - u8 enable_tcs; - u8 reserved[9]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb); - -struct i40e_aqc_add_veb_completion { - u8 reserved[6]; - __le16 switch_seid; - /* also encodes error if rc == ENOSPC; codes are the same as add_pv */ - __le16 veb_seid; -#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1 -#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2 -#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4 -#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8 - __le16 statistic_index; - __le16 vebs_used; - __le16 vebs_free; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion); - /* Get VEB Parameters (direct 0x0232) * uses i40e_aqc_switch_seid for the descriptor */ @@ -1048,1670 +456,73 @@ struct i40e_aqc_get_veb_parameters_completion { I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); -/* Delete Element (direct 0x0243) - * uses the generic i40e_aqc_switch_seid - */ - -/* Add MAC-VLAN (indirect 0x0250) */ - -/* used for the command for most vlan commands */ -struct i40e_aqc_macvlan { - __le16 num_addresses; - __le16 seid[3]; -#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) -#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000 - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan); - -/* indirect data for command and response */ -struct i40e_aqc_add_macvlan_element_data { - u8 mac_addr[6]; - __le16 vlan_tag; - __le16 flags; -#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001 -#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 -#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 -#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 -#define I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC 0x0010 - __le16 queue_number; -#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 -#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ - I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) - /* response section */ - u8 match_method; -#define I40E_AQC_MM_PERFECT_MATCH 0x01 -#define I40E_AQC_MM_HASH_MATCH 0x02 -#define I40E_AQC_MM_ERR_NO_RES 0xFF - u8 reserved1[3]; -}; - -struct i40e_aqc_add_remove_macvlan_completion { - __le16 perfect_mac_used; - __le16 perfect_mac_free; - __le16 unicast_hash_free; - __le16 multicast_hash_free; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion); - -/* Remove MAC-VLAN (indirect 0x0251) - * uses i40e_aqc_macvlan for the descriptor - * data points to an array of num_addresses of elements - */ +#define I40E_LINK_SPEED_100MB_SHIFT 0x1 +#define I40E_LINK_SPEED_1000MB_SHIFT 0x2 +#define I40E_LINK_SPEED_10GB_SHIFT 0x3 +#define I40E_LINK_SPEED_40GB_SHIFT 0x4 +#define I40E_LINK_SPEED_20GB_SHIFT 0x5 +#define I40E_LINK_SPEED_25GB_SHIFT 0x6 -struct i40e_aqc_remove_macvlan_element_data { - u8 mac_addr[6]; - __le16 vlan_tag; - u8 flags; -#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01 -#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02 -#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08 -#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10 - u8 reserved[3]; - /* reply section */ - u8 error_code; -#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0 -#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF - u8 reply_reserved[3]; +enum i40e_aq_link_speed { + I40E_LINK_SPEED_UNKNOWN = 0, + I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT), + I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT), + I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT), + I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT), + I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT), + I40E_LINK_SPEED_25GB = BIT(I40E_LINK_SPEED_25GB_SHIFT), }; -/* Add VLAN (indirect 0x0252) - * Remove VLAN (indirect 0x0253) - * use the generic i40e_aqc_macvlan for the command +/* Send to PF command (indirect 0x0801) id is only used by PF + * Send to VF command (indirect 0x0802) id is only used by PF + * Send to Peer PF command (indirect 0x0803) */ -struct i40e_aqc_add_remove_vlan_element_data { - __le16 vlan_tag; - u8 vlan_flags; -/* flags for add VLAN */ -#define I40E_AQC_ADD_VLAN_LOCAL 0x1 -#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1 -#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT) -#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0 -#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2 -#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4 -#define I40E_AQC_VLAN_PTYPE_SHIFT 3 -#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT) -#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0 -#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8 -#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10 -#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18 -/* flags for remove VLAN */ -#define I40E_AQC_REMOVE_VLAN_ALL 0x1 - u8 reserved; - u8 result; -/* flags for add VLAN */ -#define I40E_AQC_ADD_VLAN_SUCCESS 0x0 -#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE -#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF -/* flags for remove VLAN */ -#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0 -#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF - u8 reserved1[3]; -}; - -struct i40e_aqc_add_remove_vlan_completion { +struct i40e_aqc_pf_vf_message { + __le32 id; u8 reserved[4]; - __le16 vlans_used; - __le16 vlans_free; __le32 addr_high; __le32 addr_low; }; -/* Set VSI Promiscuous Modes (direct 0x0254) */ -struct i40e_aqc_set_vsi_promiscuous_modes { - __le16 promiscuous_flags; - __le16 valid_flags; -/* flags used for both fields above */ -#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01 -#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02 -#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 -#define I40E_AQC_SET_VSI_DEFAULT 0x08 -#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 -#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000 - __le16 seid; -#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF - __le16 vlan_tag; -#define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF -#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); - -/* Add S/E-tag command (direct 0x0255) - * Uses generic i40e_aqc_add_remove_tag_completion for completion - */ -struct i40e_aqc_add_tag { - __le16 flags; -#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001 - __le16 seid; -#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT) - __le16 tag; - __le16 queue_number; - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag); - -struct i40e_aqc_add_remove_tag_completion { - u8 reserved[12]; - __le16 tags_used; - __le16 tags_free; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); - -/* Remove S/E-tag command (direct 0x0256) - * Uses generic i40e_aqc_add_remove_tag_completion for completion - */ -struct i40e_aqc_remove_tag { - __le16 seid; -#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT) - __le16 tag; - u8 reserved[12]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_tag); - -/* Add multicast E-Tag (direct 0x0257) - * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields - * and no external data - */ -struct i40e_aqc_add_remove_mcast_etag { - __le16 pv_seid; - __le16 etag; - u8 num_unicast_etags; - u8 reserved[3]; - __le32 addr_high; /* address of array of 2-byte s-tags */ - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag); +I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message); -struct i40e_aqc_add_remove_mcast_etag_completion { - u8 reserved[4]; - __le16 mcast_etags_used; - __le16 mcast_etags_free; +struct i40e_aqc_get_set_rss_key { +#define I40E_AQC_SET_RSS_KEY_VSI_VALID BIT(15) +#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0 +#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \ + I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) + __le16 vsi_id; + u8 reserved[6]; __le32 addr_high; __le32 addr_low; - }; -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion); +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key); -/* Update S/E-Tag (direct 0x0259) */ -struct i40e_aqc_update_tag { - __le16 seid; -#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT) - __le16 old_tag; - __le16 new_tag; - u8 reserved[10]; +struct i40e_aqc_get_set_rss_key_data { + u8 standard_rss_key[0x28]; + u8 extended_hash_key[0xc]; }; -I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag); - -struct i40e_aqc_update_tag_completion { - u8 reserved[12]; - __le16 tags_used; - __le16 tags_free; -}; +I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data); -I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion); +struct i40e_aqc_get_set_rss_lut { +#define I40E_AQC_SET_RSS_LUT_VSI_VALID BIT(15) +#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0 +#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \ + I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) + __le16 vsi_id; +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0 +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK \ + BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) -/* Add Control Packet filter (direct 0x025A) - * Remove Control Packet filter (direct 0x025B) - * uses the i40e_aqc_add_oveb_cloud, - * and the generic direct completion structure - */ -struct i40e_aqc_add_remove_control_packet_filter { - u8 mac[6]; - __le16 etype; +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0 +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1 __le16 flags; -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000 - __le16 seid; -#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT) - __le16 queue; - u8 reserved[2]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter); - -struct i40e_aqc_add_remove_control_packet_filter_completion { - __le16 mac_etype_used; - __le16 etype_used; - __le16 mac_etype_free; - __le16 etype_free; - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion); - -/* Add Cloud filters (indirect 0x025C) - * Remove Cloud filters (indirect 0x025D) - * uses the i40e_aqc_add_remove_cloud_filters, - * and the generic indirect completion structure - */ -struct i40e_aqc_add_remove_cloud_filters { - u8 num_filters; - u8 reserved; - __le16 seid; -#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT) - u8 big_buffer_flag; -#define I40E_AQC_ADD_CLOUD_CMD_BB 1 - u8 reserved2[3]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters); - -struct i40e_aqc_cloud_filters_element_data { - u8 outer_mac[6]; - u8 inner_mac[6]; - __le16 inner_vlan; - union { - struct { - u8 reserved[12]; - u8 data[4]; - } v4; - struct { - u8 data[16]; - } v6; - struct { - __le16 data[8]; - } raw_v6; - } ipaddr; - __le16 flags; -#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ - I40E_AQC_ADD_CLOUD_FILTER_SHIFT) -/* 0x0000 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 -/* 0x0002 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 -/* 0x0005 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006 -/* 0x0007 reserved */ -/* 0x0008 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009 -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A -#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B -#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C -/* 0x0010 to 0x0017 is for custom filters */ -#define I40E_AQC_ADD_CLOUD_FILTER_IP_PORT 0x0010 /* Dest IP + L4 Port */ -#define I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT 0x0011 /* Dest MAC + L4 Port */ -#define I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT 0x0012 /* Dest MAC + VLAN + L4 Port */ - -#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 -#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 -#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 -#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 -#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 - -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN 0 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE 2 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_RESERVED 4 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE 5 - -#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_MAC 0x2000 -#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_INNER_MAC 0x4000 -#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_IP 0x8000 - - __le32 tenant_id; - u8 reserved[4]; - __le16 queue_number; -#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x7FF << \ - I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) - u8 reserved2[14]; - /* response section */ - u8 allocation_result; -#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 -#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF - u8 response_reserved[7]; -}; - -I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_cloud_filters_element_data); - -/* i40e_aqc_cloud_filters_element_bb is used when - * I40E_AQC_ADD_CLOUD_CMD_BB flag is set. - */ -struct i40e_aqc_cloud_filters_element_bb { - struct i40e_aqc_cloud_filters_element_data element; - u16 general_fields[32]; -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2 2 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0 3 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1 4 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2 5 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0 6 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1 7 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2 8 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0 9 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1 10 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2 11 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0 12 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1 13 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2 14 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1 16 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2 17 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3 18 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4 19 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5 20 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6 21 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7 22 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0 23 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1 24 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2 25 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3 26 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4 27 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5 28 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6 29 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30 -}; - -I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_cloud_filters_element_bb); - -struct i40e_aqc_remove_cloud_filters_completion { - __le16 perfect_ovlan_used; - __le16 perfect_ovlan_free; - __le16 vlan_used; - __le16 vlan_free; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion); - -/* Replace filter Command 0x025F - * uses the i40e_aqc_replace_cloud_filters, - * and the generic indirect completion structure - */ -struct i40e_filter_data { - u8 filter_type; - u8 input[3]; -}; - -I40E_CHECK_STRUCT_LEN(4, i40e_filter_data); - -struct i40e_aqc_replace_cloud_filters_cmd { - u8 valid_flags; -#define I40E_AQC_REPLACE_L1_FILTER 0x0 -#define I40E_AQC_REPLACE_CLOUD_FILTER 0x1 -#define I40E_AQC_GET_CLOUD_FILTERS 0x2 -#define I40E_AQC_MIRROR_CLOUD_FILTER 0x4 -#define I40E_AQC_HIGH_PRIORITY_CLOUD_FILTER 0x8 - u8 old_filter_type; - u8 new_filter_type; - u8 tr_bit; - u8 reserved[4]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_replace_cloud_filters_cmd); - -struct i40e_aqc_replace_cloud_filters_cmd_buf { - u8 data[32]; -/* Filter type INPUT codes*/ -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_ENTRIES_MAX 3 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED BIT(7) - -/* Field Vector offsets */ -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_MAC_DA 0 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_ETH 6 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG 7 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN 8 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_OVLAN 9 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN 10 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY 11 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC 12 -/* big FLU */ -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IP_DA 14 -/* big FLU */ -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_OIP_DA 15 - -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN 37 - struct i40e_filter_data filters[8]; -}; - -I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_replace_cloud_filters_cmd_buf); - -/* Add Mirror Rule (indirect or direct 0x0260) - * Delete Mirror Rule (indirect or direct 0x0261) - * note: some rule types (4,5) do not use an external buffer. - * take care to set the flags correctly. - */ -struct i40e_aqc_add_delete_mirror_rule { - __le16 seid; - __le16 rule_type; -#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0 -#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \ - I40E_AQC_MIRROR_RULE_TYPE_SHIFT) -#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1 -#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2 -#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3 -#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4 -#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5 - __le16 num_entries; - __le16 destination; /* VSI for add, rule id for delete */ - __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */ - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule); - -struct i40e_aqc_add_delete_mirror_rule_completion { - u8 reserved[2]; - __le16 rule_id; /* only used on add */ - __le16 mirror_rules_used; - __le16 mirror_rules_free; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); - -/* Dynamic Device Personalization */ -struct i40e_aqc_write_personalization_profile { - u8 flags; - u8 reserved[3]; - __le32 profile_track_id; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile); - -struct i40e_aqc_write_ddp_resp { - __le32 error_offset; - __le32 error_info; - __le32 addr_high; - __le32 addr_low; -}; - -struct i40e_aqc_get_applied_profiles { - u8 flags; -#define I40E_AQC_GET_DDP_GET_CONF 0x1 -#define I40E_AQC_GET_DDP_GET_RDPU_CONF 0x2 - u8 rsv[3]; - __le32 reserved; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_applied_profiles); - -/* DCB 0x03xx*/ - -/* PFC Ignore (direct 0x0301) - * the command and response use the same descriptor structure - */ -struct i40e_aqc_pfc_ignore { - u8 tc_bitmap; - u8 command_flags; /* unused on response */ -#define I40E_AQC_PFC_IGNORE_SET 0x80 -#define I40E_AQC_PFC_IGNORE_CLEAR 0x0 - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore); - -/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure - * with no parameters - */ - -/* TX scheduler 0x04xx */ - -/* Almost all the indirect commands use - * this generic struct to pass the SEID in param0 - */ -struct i40e_aqc_tx_sched_ind { - __le16 vsi_seid; - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind); - -/* Several commands respond with a set of queue set handles */ -struct i40e_aqc_qs_handles_resp { - __le16 qs_handles[8]; -}; - -/* Configure VSI BW limits (direct 0x0400) */ -struct i40e_aqc_configure_vsi_bw_limit { - __le16 vsi_seid; - u8 reserved[2]; - __le16 credit; - u8 reserved1[2]; - u8 max_credit; /* 0-3, limit = 2^max */ - u8 reserved2[7]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit); - -/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406) - * responds with i40e_aqc_qs_handles_resp - */ -struct i40e_aqc_configure_vsi_ets_sla_bw_data { - u8 tc_valid_bits; - u8 reserved[15]; - __le16 tc_bw_credits[8]; /* FW writesback QS handles here */ - - /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved1[28]; -}; - -I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_vsi_ets_sla_bw_data); - -/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407) - * responds with i40e_aqc_qs_handles_resp - */ -struct i40e_aqc_configure_vsi_tc_bw_data { - u8 tc_valid_bits; - u8 reserved[3]; - u8 tc_bw_credits[8]; - u8 reserved1[4]; - __le16 qs_handles[8]; -}; - -I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_vsi_tc_bw_data); - -/* Query vsi bw configuration (indirect 0x0408) */ -struct i40e_aqc_query_vsi_bw_config_resp { - u8 tc_valid_bits; - u8 tc_suspended_bits; - u8 reserved[14]; - __le16 qs_handles[8]; - u8 reserved1[4]; - __le16 port_bw_limit; - u8 reserved2[2]; - u8 max_bw; /* 0-3, limit = 2^max */ - u8 reserved3[23]; -}; - -I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_vsi_bw_config_resp); - -/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */ -struct i40e_aqc_query_vsi_ets_sla_config_resp { - u8 tc_valid_bits; - u8 reserved[3]; - u8 share_credits[8]; - __le16 credits[8]; - - /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; -}; - -I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_vsi_ets_sla_config_resp); - -/* Configure Switching Component Bandwidth Limit (direct 0x0410) */ -struct i40e_aqc_configure_switching_comp_bw_limit { - __le16 seid; - u8 reserved[2]; - __le16 credit; - u8 reserved1[2]; - u8 max_bw; /* 0-3, limit = 2^max */ - u8 reserved2[7]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); - -/* Enable Physical Port ETS (indirect 0x0413) - * Modify Physical Port ETS (indirect 0x0414) - * Disable Physical Port ETS (indirect 0x0415) - */ -struct i40e_aqc_configure_switching_comp_ets_data { - u8 reserved[4]; - u8 tc_valid_bits; - u8 seepage; -#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1 - u8 tc_strict_priority_flags; - u8 reserved1[17]; - u8 tc_bw_share_credits[8]; - u8 reserved2[96]; -}; - -I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_configure_switching_comp_ets_data); - -/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ -struct i40e_aqc_configure_switching_comp_ets_bw_limit_data { - u8 tc_valid_bits; - u8 reserved[15]; - __le16 tc_bw_credit[8]; - - /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved1[28]; -}; - -I40E_CHECK_STRUCT_LEN(0x40, - i40e_aqc_configure_switching_comp_ets_bw_limit_data); - -/* Configure Switching Component Bandwidth Allocation per Tc - * (indirect 0x0417) - */ -struct i40e_aqc_configure_switching_comp_bw_config_data { - u8 tc_valid_bits; - u8 reserved[2]; - u8 absolute_credits; /* bool */ - u8 tc_bw_share_credits[8]; - u8 reserved1[20]; -}; - -I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_switching_comp_bw_config_data); - -/* Query Switching Component Configuration (indirect 0x0418) */ -struct i40e_aqc_query_switching_comp_ets_config_resp { - u8 tc_valid_bits; - u8 reserved[35]; - __le16 port_bw_limit; - u8 reserved1[2]; - u8 tc_bw_max; /* 0-3, limit = 2^max */ - u8 reserved2[23]; -}; - -I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_switching_comp_ets_config_resp); - -/* Query PhysicalPort ETS Configuration (indirect 0x0419) */ -struct i40e_aqc_query_port_ets_config_resp { - u8 reserved[4]; - u8 tc_valid_bits; - u8 reserved1; - u8 tc_strict_priority_bits; - u8 reserved2; - u8 tc_bw_share_credits[8]; - __le16 tc_bw_limits[8]; - - /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved3[32]; -}; - -I40E_CHECK_STRUCT_LEN(0x44, i40e_aqc_query_port_ets_config_resp); - -/* Query Switching Component Bandwidth Allocation per Traffic Type - * (indirect 0x041A) - */ -struct i40e_aqc_query_switching_comp_bw_config_resp { - u8 tc_valid_bits; - u8 reserved[2]; - u8 absolute_credits_enable; /* bool */ - u8 tc_bw_share_credits[8]; - __le16 tc_bw_limits[8]; - - /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; -}; - -I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_switching_comp_bw_config_resp); - -/* Suspend/resume port TX traffic - * (direct 0x041B and 0x041C) uses the generic SEID struct - */ - -/* Configure partition BW - * (indirect 0x041D) - */ -struct i40e_aqc_configure_partition_bw_data { - __le16 pf_valid_bits; - u8 min_bw[16]; /* guaranteed bandwidth */ - u8 max_bw[16]; /* bandwidth limit */ -}; - -I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data); - -/* Get and set the active HMC resource profile and status. - * (direct 0x0500) and (direct 0x0501) - */ -struct i40e_aq_get_set_hmc_resource_profile { - u8 pm_profile; - u8 pe_vf_enabled; - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); - -enum i40e_aq_hmc_profile { - /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ - I40E_HMC_PROFILE_DEFAULT = 1, - I40E_HMC_PROFILE_FAVOR_VF = 2, - I40E_HMC_PROFILE_EQUAL = 3, -}; - -/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ - -/* set in param0 for get phy abilities to report qualified modules */ -#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001 -#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002 - -enum i40e_aq_phy_type { - I40E_PHY_TYPE_SGMII = 0x0, - I40E_PHY_TYPE_1000BASE_KX = 0x1, - I40E_PHY_TYPE_10GBASE_KX4 = 0x2, - I40E_PHY_TYPE_10GBASE_KR = 0x3, - I40E_PHY_TYPE_40GBASE_KR4 = 0x4, - I40E_PHY_TYPE_XAUI = 0x5, - I40E_PHY_TYPE_XFI = 0x6, - I40E_PHY_TYPE_SFI = 0x7, - I40E_PHY_TYPE_XLAUI = 0x8, - I40E_PHY_TYPE_XLPPI = 0x9, - I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA, - I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB, - I40E_PHY_TYPE_10GBASE_AOC = 0xC, - I40E_PHY_TYPE_40GBASE_AOC = 0xD, - I40E_PHY_TYPE_UNRECOGNIZED = 0xE, - I40E_PHY_TYPE_UNSUPPORTED = 0xF, - I40E_PHY_TYPE_100BASE_TX = 0x11, - I40E_PHY_TYPE_1000BASE_T = 0x12, - I40E_PHY_TYPE_10GBASE_T = 0x13, - I40E_PHY_TYPE_10GBASE_SR = 0x14, - I40E_PHY_TYPE_10GBASE_LR = 0x15, - I40E_PHY_TYPE_10GBASE_SFPP_CU = 0x16, - I40E_PHY_TYPE_10GBASE_CR1 = 0x17, - I40E_PHY_TYPE_40GBASE_CR4 = 0x18, - I40E_PHY_TYPE_40GBASE_SR4 = 0x19, - I40E_PHY_TYPE_40GBASE_LR4 = 0x1A, - I40E_PHY_TYPE_1000BASE_SX = 0x1B, - I40E_PHY_TYPE_1000BASE_LX = 0x1C, - I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D, - I40E_PHY_TYPE_20GBASE_KR2 = 0x1E, - I40E_PHY_TYPE_25GBASE_KR = 0x1F, - I40E_PHY_TYPE_25GBASE_CR = 0x20, - I40E_PHY_TYPE_25GBASE_SR = 0x21, - I40E_PHY_TYPE_25GBASE_LR = 0x22, - I40E_PHY_TYPE_25GBASE_AOC = 0x23, - I40E_PHY_TYPE_25GBASE_ACC = 0x24, - I40E_PHY_TYPE_MAX, - I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD, - I40E_PHY_TYPE_EMPTY = 0xFE, - I40E_PHY_TYPE_DEFAULT = 0xFF, -}; - -#define I40E_LINK_SPEED_100MB_SHIFT 0x1 -#define I40E_LINK_SPEED_1000MB_SHIFT 0x2 -#define I40E_LINK_SPEED_10GB_SHIFT 0x3 -#define I40E_LINK_SPEED_40GB_SHIFT 0x4 -#define I40E_LINK_SPEED_20GB_SHIFT 0x5 -#define I40E_LINK_SPEED_25GB_SHIFT 0x6 - -enum i40e_aq_link_speed { - I40E_LINK_SPEED_UNKNOWN = 0, - I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT), - I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT), - I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT), - I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT), - I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT), - I40E_LINK_SPEED_25GB = BIT(I40E_LINK_SPEED_25GB_SHIFT), -}; - -struct i40e_aqc_module_desc { - u8 oui[3]; - u8 reserved1; - u8 part_number[16]; - u8 revision[4]; - u8 reserved2[8]; -}; - -I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_module_desc); - -struct i40e_aq_get_phy_abilities_resp { - __le32 phy_type; /* bitmap using the above enum for offsets */ - u8 link_speed; /* bitmap using the above enum bit patterns */ - u8 abilities; -#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 -#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 -#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 -#define I40E_AQ_PHY_LINK_ENABLED 0x08 -#define I40E_AQ_PHY_AN_ENABLED 0x10 -#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 -#define I40E_AQ_PHY_FEC_ABILITY_KR 0x40 -#define I40E_AQ_PHY_FEC_ABILITY_RS 0x80 - __le16 eee_capability; -#define I40E_AQ_EEE_100BASE_TX 0x0002 -#define I40E_AQ_EEE_1000BASE_T 0x0004 -#define I40E_AQ_EEE_10GBASE_T 0x0008 -#define I40E_AQ_EEE_1000BASE_KX 0x0010 -#define I40E_AQ_EEE_10GBASE_KX4 0x0020 -#define I40E_AQ_EEE_10GBASE_KR 0x0040 - __le32 eeer_val; - u8 d3_lpan; -#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 - u8 phy_type_ext; -#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01 -#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02 -#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04 -#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08 -#define I40E_AQ_PHY_TYPE_EXT_25G_AOC 0x10 -#define I40E_AQ_PHY_TYPE_EXT_25G_ACC 0x20 - u8 fec_cfg_curr_mod_ext_info; -#define I40E_AQ_ENABLE_FEC_KR 0x01 -#define I40E_AQ_ENABLE_FEC_RS 0x02 -#define I40E_AQ_REQUEST_FEC_KR 0x04 -#define I40E_AQ_REQUEST_FEC_RS 0x08 -#define I40E_AQ_ENABLE_FEC_AUTO 0x10 -#define I40E_AQ_FEC -#define I40E_AQ_MODULE_TYPE_EXT_MASK 0xE0 -#define I40E_AQ_MODULE_TYPE_EXT_SHIFT 5 - - u8 ext_comp_code; - u8 phy_id[4]; - u8 module_type[3]; - u8 qualified_module_count; -#define I40E_AQ_PHY_MAX_QMS 16 - struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; -}; - -I40E_CHECK_STRUCT_LEN(0x218, i40e_aq_get_phy_abilities_resp); - -/* Set PHY Config (direct 0x0601) */ -struct i40e_aq_set_phy_config { /* same bits as above in all */ - __le32 phy_type; - u8 link_speed; - u8 abilities; -/* bits 0-2 use the values from get_phy_abilities_resp */ -#define I40E_AQ_PHY_ENABLE_LINK 0x08 -#define I40E_AQ_PHY_ENABLE_AN 0x10 -#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20 - __le16 eee_capability; - __le32 eeer; - u8 low_power_ctrl; - u8 phy_type_ext; -#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01 -#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02 -#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04 -#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08 - u8 fec_config; -#define I40E_AQ_SET_FEC_ABILITY_KR BIT(0) -#define I40E_AQ_SET_FEC_ABILITY_RS BIT(1) -#define I40E_AQ_SET_FEC_REQUEST_KR BIT(2) -#define I40E_AQ_SET_FEC_REQUEST_RS BIT(3) -#define I40E_AQ_SET_FEC_AUTO BIT(4) -#define I40E_AQ_PHY_FEC_CONFIG_SHIFT 0x0 -#define I40E_AQ_PHY_FEC_CONFIG_MASK (0x1F << I40E_AQ_PHY_FEC_CONFIG_SHIFT) - u8 reserved; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config); - -/* Set MAC Config command data structure (direct 0x0603) */ -struct i40e_aq_set_mac_config { - __le16 max_frame_size; - u8 params; -#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 -#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 -#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 -#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 -#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 -#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 - u8 tx_timer_priority; /* bitmap */ - __le16 tx_timer_value; - __le16 fc_refresh_threshold; - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config); - -/* Restart Auto-Negotiation (direct 0x605) */ -struct i40e_aqc_set_link_restart_an { - u8 command; -#define I40E_AQ_PHY_RESTART_AN 0x02 -#define I40E_AQ_PHY_LINK_ENABLE 0x04 - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an); - -/* Get Link Status cmd & response data structure (direct 0x0607) */ -struct i40e_aqc_get_link_status { - __le16 command_flags; /* only field set on command */ -#define I40E_AQ_LSE_MASK 0x3 -#define I40E_AQ_LSE_NOP 0x0 -#define I40E_AQ_LSE_DISABLE 0x2 -#define I40E_AQ_LSE_ENABLE 0x3 -/* only response uses this flag */ -#define I40E_AQ_LSE_IS_ENABLED 0x1 - u8 phy_type; /* i40e_aq_phy_type */ - u8 link_speed; /* i40e_aq_link_speed */ - u8 link_info; -#define I40E_AQ_LINK_UP 0x01 /* obsolete */ -#define I40E_AQ_LINK_UP_FUNCTION 0x01 -#define I40E_AQ_LINK_FAULT 0x02 -#define I40E_AQ_LINK_FAULT_TX 0x04 -#define I40E_AQ_LINK_FAULT_RX 0x08 -#define I40E_AQ_LINK_FAULT_REMOTE 0x10 -#define I40E_AQ_LINK_UP_PORT 0x20 -#define I40E_AQ_MEDIA_AVAILABLE 0x40 -#define I40E_AQ_SIGNAL_DETECT 0x80 - u8 an_info; -#define I40E_AQ_AN_COMPLETED 0x01 -#define I40E_AQ_LP_AN_ABILITY 0x02 -#define I40E_AQ_PD_FAULT 0x04 -#define I40E_AQ_FEC_EN 0x08 -#define I40E_AQ_PHY_LOW_POWER 0x10 -#define I40E_AQ_LINK_PAUSE_TX 0x20 -#define I40E_AQ_LINK_PAUSE_RX 0x40 -#define I40E_AQ_QUALIFIED_MODULE 0x80 - u8 ext_info; -#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01 -#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02 -#define I40E_AQ_LINK_TX_SHIFT 0x02 -#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT) -#define I40E_AQ_LINK_TX_ACTIVE 0x00 -#define I40E_AQ_LINK_TX_DRAINED 0x01 -#define I40E_AQ_LINK_TX_FLUSHED 0x03 -#define I40E_AQ_LINK_FORCED_40G 0x10 -/* 25G Error Codes */ -#define I40E_AQ_25G_NO_ERR 0X00 -#define I40E_AQ_25G_NOT_PRESENT 0X01 -#define I40E_AQ_25G_NVM_CRC_ERR 0X02 -#define I40E_AQ_25G_SBUS_UCODE_ERR 0X03 -#define I40E_AQ_25G_SERDES_UCODE_ERR 0X04 -#define I40E_AQ_25G_NIMB_UCODE_ERR 0X05 - u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ -/* Since firmware API 1.7 loopback field keeps power class info as well */ -#define I40E_AQ_LOOPBACK_MASK 0x07 -#define I40E_AQ_PWR_CLASS_SHIFT_LB 6 -#define I40E_AQ_PWR_CLASS_MASK_LB (0x03 << I40E_AQ_PWR_CLASS_SHIFT_LB) - __le16 max_frame_size; - u8 config; -#define I40E_AQ_CONFIG_FEC_KR_ENA 0x01 -#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02 -#define I40E_AQ_CONFIG_CRC_ENA 0x04 -#define I40E_AQ_CONFIG_PACING_MASK 0x78 - union { - struct { - u8 power_desc; -#define I40E_AQ_LINK_POWER_CLASS_1 0x00 -#define I40E_AQ_LINK_POWER_CLASS_2 0x01 -#define I40E_AQ_LINK_POWER_CLASS_3 0x02 -#define I40E_AQ_LINK_POWER_CLASS_4 0x03 -#define I40E_AQ_PWR_CLASS_MASK 0x03 - u8 reserved[4]; - }; - struct { - u8 link_type[4]; - u8 link_type_ext; - }; - }; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status); - -/* Set event mask command (direct 0x613) */ -struct i40e_aqc_set_phy_int_mask { - u8 reserved[8]; - __le16 event_mask; -#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002 -#define I40E_AQ_EVENT_MEDIA_NA 0x0004 -#define I40E_AQ_EVENT_LINK_FAULT 0x0008 -#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010 -#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020 -#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040 -#define I40E_AQ_EVENT_AN_COMPLETED 0x0080 -#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100 -#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200 - u8 reserved1[6]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask); - -/* Get Local AN advt register (direct 0x0614) - * Set Local AN advt register (direct 0x0615) - * Get Link Partner AN advt register (direct 0x0616) - */ -struct i40e_aqc_an_advt_reg { - __le32 local_an_reg0; - __le16 local_an_reg1; - u8 reserved[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg); - -/* Set Loopback mode (0x0618) */ -struct i40e_aqc_set_lb_mode { - __le16 lb_mode; -#define I40E_AQ_LB_PHY_LOCAL 0x01 -#define I40E_AQ_LB_PHY_REMOTE 0x02 -#define I40E_AQ_LB_MAC_LOCAL 0x04 - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); - -/* Set PHY Debug command (0x0622) */ -struct i40e_aqc_set_phy_debug { - u8 command_flags; -#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \ - I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT) -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02 -#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10 - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug); - -enum i40e_aq_phy_reg_type { - I40E_AQC_PHY_REG_INTERNAL = 0x1, - I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2, - I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 -}; - -/* Run PHY Activity (0x0626) */ -struct i40e_aqc_run_phy_activity { - __le16 activity_id; - u8 flags; - u8 reserved1; - __le32 control; - __le32 data; - u8 reserved2[4]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity); - -/* Set PHY Register command (0x0628) */ -/* Get PHY Register command (0x0629) */ -struct i40e_aqc_phy_register_access { - u8 phy_interface; -#define I40E_AQ_PHY_REG_ACCESS_INTERNAL 0 -#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL 1 -#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2 - u8 dev_address; - u8 reserved1[2]; - __le32 reg_address; - __le32 reg_value; - u8 reserved2[4]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access); - -/* NVM Read command (indirect 0x0701) - * NVM Erase commands (direct 0x0702) - * NVM Update commands (indirect 0x0703) - */ -struct i40e_aqc_nvm_update { - u8 command_flags; -#define I40E_AQ_NVM_LAST_CMD 0x01 -#define I40E_AQ_NVM_REARRANGE_TO_FLAT 0x20 -#define I40E_AQ_NVM_REARRANGE_TO_STRUCT 0x40 -#define I40E_AQ_NVM_FLASH_ONLY 0x80 -#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1 -#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03 -#define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03 -#define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01 - u8 module_pointer; - __le16 length; - __le32 offset; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); - -/* NVM Config Read (indirect 0x0704) */ -struct i40e_aqc_nvm_config_read { - __le16 cmd_flags; -#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1 -#define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0 -#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1 - __le16 element_count; - __le16 element_id; /* Feature/field ID */ - __le16 element_id_msw; /* MSWord of field ID */ - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read); - -/* NVM Config Write (indirect 0x0705) */ -struct i40e_aqc_nvm_config_write { - __le16 cmd_flags; - __le16 element_count; - u8 reserved[4]; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); - -/* Used for 0x0704 as well as for 0x0705 commands */ -#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1 -#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \ - BIT(I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT) -#define I40E_AQ_ANVM_FEATURE 0 -#define I40E_AQ_ANVM_IMMEDIATE_FIELD BIT(FEATURE_OR_IMMEDIATE_SHIFT) -struct i40e_aqc_nvm_config_data_feature { - __le16 feature_id; -#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01 -#define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP 0x08 -#define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR 0x10 - __le16 feature_options; - __le16 feature_selection; -}; - -I40E_CHECK_STRUCT_LEN(0x6, i40e_aqc_nvm_config_data_feature); - -struct i40e_aqc_nvm_config_data_immediate_field { - __le32 field_id; - __le32 field_value; - __le16 field_options; - __le16 reserved; -}; - -I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field); - -/* OEM Post Update (indirect 0x0720) - * no command data struct used - */ - struct i40e_aqc_nvm_oem_post_update { -#define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA 0x01 - u8 sel_data; - u8 reserved[7]; -}; - -I40E_CHECK_STRUCT_LEN(0x8, i40e_aqc_nvm_oem_post_update); - -struct i40e_aqc_nvm_oem_post_update_buffer { - u8 str_len; - u8 dev_addr; - __le16 eeprom_addr; - u8 data[36]; -}; - -I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer); - -/* Thermal Sensor (indirect 0x0721) - * read or set thermal sensor configs and values - * takes a sensor and command specific data buffer, not detailed here - */ -struct i40e_aqc_thermal_sensor { - u8 sensor_action; -#define I40E_AQ_THERMAL_SENSOR_READ_CONFIG 0 -#define I40E_AQ_THERMAL_SENSOR_SET_CONFIG 1 -#define I40E_AQ_THERMAL_SENSOR_READ_TEMP 2 - u8 reserved[7]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_thermal_sensor); - -/* Send to PF command (indirect 0x0801) id is only used by PF - * Send to VF command (indirect 0x0802) id is only used by PF - * Send to Peer PF command (indirect 0x0803) - */ -struct i40e_aqc_pf_vf_message { - __le32 id; - u8 reserved[4]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message); - -/* Alternate structure */ - -/* Direct write (direct 0x0900) - * Direct read (direct 0x0902) - */ -struct i40e_aqc_alternate_write { - __le32 address0; - __le32 data0; - __le32 address1; - __le32 data1; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write); - -/* Indirect write (indirect 0x0901) - * Indirect read (indirect 0x0903) - */ - -struct i40e_aqc_alternate_ind_write { - __le32 address; - __le32 length; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write); - -/* Done alternate write (direct 0x0904) - * uses i40e_aq_desc - */ -struct i40e_aqc_alternate_write_done { - __le16 cmd_flags; -#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1 -#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0 -#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1 -#define I40E_AQ_ALTERNATE_RESET_NEEDED 2 - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done); - -/* Set OEM mode (direct 0x0905) */ -struct i40e_aqc_alternate_set_mode { - __le32 mode; -#define I40E_AQ_ALTERNATE_MODE_NONE 0 -#define I40E_AQ_ALTERNATE_MODE_OEM 1 - u8 reserved[12]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode); - -/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */ - -/* async events 0x10xx */ - -/* Lan Queue Overflow Event (direct, 0x1001) */ -struct i40e_aqc_lan_overflow { - __le32 prtdcb_rupto; - __le32 otx_ctl; - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow); - -/* Get LLDP MIB (indirect 0x0A00) */ -struct i40e_aqc_lldp_get_mib { - u8 type; - u8 reserved1; -#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3 -#define I40E_AQ_LLDP_MIB_LOCAL 0x0 -#define I40E_AQ_LLDP_MIB_REMOTE 0x1 -#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2 -#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC -#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2 -#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0 -#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1 -#define I40E_AQ_LLDP_TX_SHIFT 0x4 -#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT) -/* TX pause flags use I40E_AQ_LINK_TX_* above */ - __le16 local_len; - __le16 remote_len; - u8 reserved2[2]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); - -/* Configure LLDP MIB Change Event (direct 0x0A01) - * also used for the event (with type in the command field) - */ -struct i40e_aqc_lldp_update_mib { - u8 command; -#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 -#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1 - u8 reserved[7]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib); - -/* Add LLDP TLV (indirect 0x0A02) - * Delete LLDP TLV (indirect 0x0A04) - */ -struct i40e_aqc_lldp_add_tlv { - u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ - u8 reserved1[1]; - __le16 len; - u8 reserved2[4]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv); - -/* Update LLDP TLV (indirect 0x0A03) */ -struct i40e_aqc_lldp_update_tlv { - u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ - u8 reserved; - __le16 old_len; - __le16 new_offset; - __le16 new_len; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv); - -/* Stop LLDP (direct 0x0A05) */ -struct i40e_aqc_lldp_stop { - u8 command; -#define I40E_AQ_LLDP_AGENT_STOP 0x0 -#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop); - -/* Start LLDP (direct 0x0A06) */ - -struct i40e_aqc_lldp_start { - u8 command; -#define I40E_AQ_LLDP_AGENT_START 0x1 - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); - -/* Set DCB (direct 0x0303) */ -struct i40e_aqc_set_dcb_parameters { - u8 command; -#define I40E_AQ_DCB_SET_AGENT 0x1 -#define I40E_DCB_VALID 0x1 - u8 valid_flags; - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_dcb_parameters); - -/* Apply MIB changes (0x0A07) - * uses the generic struc as it contains no data - */ - -/* Add Udp Tunnel command and completion (direct 0x0B00) */ -struct i40e_aqc_add_udp_tunnel { - __le16 udp_port; - u8 reserved0[3]; - u8 protocol_type; -#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00 -#define I40E_AQC_TUNNEL_TYPE_NGE 0x01 -#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10 -#define I40E_AQC_TUNNEL_TYPE_VXLAN_GPE 0x11 - u8 reserved1[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel); - -struct i40e_aqc_add_udp_tunnel_completion { - __le16 udp_port; - u8 filter_entry_index; - u8 multiple_pfs; -#define I40E_AQC_SINGLE_PF 0x0 -#define I40E_AQC_MULTIPLE_PFS 0x1 - u8 total_filters; - u8 reserved[11]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion); - -/* remove UDP Tunnel command (0x0B01) */ -struct i40e_aqc_remove_udp_tunnel { - u8 reserved[2]; - u8 index; /* 0 to 15 */ - u8 reserved2[13]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel); - -struct i40e_aqc_del_udp_tunnel_completion { - __le16 udp_port; - u8 index; /* 0 to 15 */ - u8 multiple_pfs; - u8 total_filters_used; - u8 reserved1[11]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); - -struct i40e_aqc_get_set_rss_key { -#define I40E_AQC_SET_RSS_KEY_VSI_VALID BIT(15) -#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0 -#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \ - I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) - __le16 vsi_id; - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key); - -struct i40e_aqc_get_set_rss_key_data { - u8 standard_rss_key[0x28]; - u8 extended_hash_key[0xc]; -}; - -I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data); - -struct i40e_aqc_get_set_rss_lut { -#define I40E_AQC_SET_RSS_LUT_VSI_VALID BIT(15) -#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0 -#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \ - I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) - __le16 vsi_id; -#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0 -#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK \ - BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) - -#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0 -#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1 - __le16 flags; - u8 reserved[4]; + u8 reserved[4]; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut); - -/* tunnel key structure 0x0B10 */ - -struct i40e_aqc_tunnel_key_structure_A0 { - __le16 key1_off; - __le16 key1_len; - __le16 key2_off; - __le16 key2_len; - __le16 flags; -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 -/* response flags */ -#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 -#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 - u8 resreved[6]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure_A0); - -struct i40e_aqc_tunnel_key_structure { - u8 key1_off; - u8 key2_off; - u8 key1_len; /* 0 to 15 */ - u8 key2_len; /* 0 to 15 */ - u8 flags; -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 -/* response flags */ -#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 -#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 - u8 network_key_index; -#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0 -#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1 -#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2 -#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3 - u8 reserved[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure); - -/* OEM mode commands (direct 0xFE0x) */ -struct i40e_aqc_oem_param_change { - __le32 param_type; -#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0 -#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 -#define I40E_AQ_OEM_PARAM_MAC 2 - __le32 param_value1; - __le16 param_value2; - u8 reserved[6]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change); - -struct i40e_aqc_oem_state_change { - __le32 state; -#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0 -#define I40E_AQ_OEM_STATE_LINK_UP 0x1 - u8 reserved[12]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); - -/* Initialize OCSD (0xFE02, direct) */ -struct i40e_aqc_opc_oem_ocsd_initialize { - u8 type_status; - u8 reserved1[3]; - __le32 ocsd_memory_block_addr_high; - __le32 ocsd_memory_block_addr_low; - __le32 requested_update_interval; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocsd_initialize); - -/* Initialize OCBB (0xFE03, direct) */ -struct i40e_aqc_opc_oem_ocbb_initialize { - u8 type_status; - u8 reserved1[3]; - __le32 ocbb_memory_block_addr_high; - __le32 ocbb_memory_block_addr_low; - u8 reserved2[4]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize); - -/* debug commands */ - -/* get device id (0xFF00) uses the generic structure */ - -/* set test more (0xFF01, internal) */ - -struct i40e_acq_set_test_mode { - u8 mode; -#define I40E_AQ_TEST_PARTIAL 0 -#define I40E_AQ_TEST_FULL 1 -#define I40E_AQ_TEST_NVM 2 - u8 reserved[3]; - u8 command; -#define I40E_AQ_TEST_OPEN 0 -#define I40E_AQ_TEST_CLOSE 1 -#define I40E_AQ_TEST_INC 2 - u8 reserved2[3]; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode); - -/* Debug Read Register command (0xFF03) - * Debug Write Register command (0xFF04) - */ -struct i40e_aqc_debug_reg_read_write { - __le32 reserved; - __le32 address; - __le32 value_high; - __le32 value_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write); - -/* Scatter/gather Reg Read (indirect 0xFF05) - * Scatter/gather Reg Write (indirect 0xFF06) - */ - -/* i40e_aq_desc is used for the command */ -struct i40e_aqc_debug_reg_sg_element_data { - __le32 address; - __le32 value; -}; - -/* Debug Modify register (direct 0xFF07) */ -struct i40e_aqc_debug_modify_reg { - __le32 address; - __le32 value; - __le32 clear_mask; - __le32 set_mask; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg); - -/* dump internal data (0xFF08, indirect) */ - -#define I40E_AQ_CLUSTER_ID_AUX 0 -#define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1 -#define I40E_AQ_CLUSTER_ID_TXSCHED 2 -#define I40E_AQ_CLUSTER_ID_HMC 3 -#define I40E_AQ_CLUSTER_ID_MAC0 4 -#define I40E_AQ_CLUSTER_ID_MAC1 5 -#define I40E_AQ_CLUSTER_ID_MAC2 6 -#define I40E_AQ_CLUSTER_ID_MAC3 7 -#define I40E_AQ_CLUSTER_ID_DCB 8 -#define I40E_AQ_CLUSTER_ID_EMP_MEM 9 -#define I40E_AQ_CLUSTER_ID_PKT_BUF 10 -#define I40E_AQ_CLUSTER_ID_ALTRAM 11 - -struct i40e_aqc_debug_dump_internals { - u8 cluster_id; - u8 table_id; - __le16 data_size; - __le32 idx; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals); - -struct i40e_aqc_debug_modify_internals { - u8 cluster_id; - u8 cluster_specific_params[7]; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals); - #endif /* _I40E_ADMINQ_CMD_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/i40e_common.c b/drivers/net/ethernet/intel/iavf/i40e_common.c index eea280ba411e..f34091d96f49 100644 --- a/drivers/net/ethernet/intel/iavf/i40e_common.c +++ b/drivers/net/ethernet/intel/iavf/i40e_common.c @@ -525,7 +525,6 @@ i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw, return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); } - /* The i40evf_ptype_lookup table is used to convert from the 8-bit ptype in the * hardware to a bit-field that can be used by SW to more easily determine the * packet type. @@ -891,135 +890,6 @@ struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = { I40E_PTT_UNUSED_ENTRY(255) }; -/** - * i40evf_aq_rx_ctl_read_register - use FW to read from an Rx control register - * @hw: pointer to the hw struct - * @reg_addr: register address - * @reg_val: ptr to register value - * @cmd_details: pointer to command details structure or NULL - * - * Use the firmware to read the Rx control register, - * especially useful if the Rx unit is under heavy pressure - **/ -i40e_status i40evf_aq_rx_ctl_read_register(struct i40e_hw *hw, - u32 reg_addr, u32 *reg_val, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp = - (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; - i40e_status status; - - if (!reg_val) - return I40E_ERR_PARAM; - - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_rx_ctl_reg_read); - - cmd_resp->address = cpu_to_le32(reg_addr); - - status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details); - - if (status == 0) - *reg_val = le32_to_cpu(cmd_resp->value); - - return status; -} - -/** - * i40evf_read_rx_ctl - read from an Rx control register - * @hw: pointer to the hw struct - * @reg_addr: register address - **/ -u32 i40evf_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) -{ - i40e_status status = 0; - bool use_register; - int retry = 5; - u32 val = 0; - - use_register = (((hw->aq.api_maj_ver == 1) && - (hw->aq.api_min_ver < 5)) || - (hw->mac.type == I40E_MAC_X722)); - if (!use_register) { -do_retry: - status = i40evf_aq_rx_ctl_read_register(hw, reg_addr, - &val, NULL); - if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { - usleep_range(1000, 2000); - retry--; - goto do_retry; - } - } - - /* if the AQ access failed, try the old-fashioned way */ - if (status || use_register) - val = rd32(hw, reg_addr); - - return val; -} - -/** - * i40evf_aq_rx_ctl_write_register - * @hw: pointer to the hw struct - * @reg_addr: register address - * @reg_val: register value - * @cmd_details: pointer to command details structure or NULL - * - * Use the firmware to write to an Rx control register, - * especially useful if the Rx unit is under heavy pressure - **/ -i40e_status i40evf_aq_rx_ctl_write_register(struct i40e_hw *hw, - u32 reg_addr, u32 reg_val, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_rx_ctl_reg_read_write *cmd = - (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; - i40e_status status; - - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_rx_ctl_reg_write); - - cmd->address = cpu_to_le32(reg_addr); - cmd->value = cpu_to_le32(reg_val); - - status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details); - - return status; -} - -/** - * i40evf_write_rx_ctl - write to an Rx control register - * @hw: pointer to the hw struct - * @reg_addr: register address - * @reg_val: register value - **/ -void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) -{ - i40e_status status = 0; - bool use_register; - int retry = 5; - - use_register = (((hw->aq.api_maj_ver == 1) && - (hw->aq.api_min_ver < 5)) || - (hw->mac.type == I40E_MAC_X722)); - if (!use_register) { -do_retry: - status = i40evf_aq_rx_ctl_write_register(hw, reg_addr, - reg_val, NULL); - if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { - usleep_range(1000, 2000); - retry--; - goto do_retry; - } - } - - /* if the AQ access failed, try the old-fashioned way */ - if (status || use_register) - wr32(hw, reg_addr, reg_val); -} - /** * i40e_aq_send_msg_to_pf * @hw: pointer to the hardware structure @@ -1110,211 +980,3 @@ i40e_status i40e_vf_reset(struct i40e_hw *hw) return i40e_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF, 0, NULL, 0, NULL); } - -/** - * i40evf_aq_write_ddp - Write dynamic device personalization (ddp) - * @hw: pointer to the hw struct - * @buff: command buffer (size in bytes = buff_size) - * @buff_size: buffer size in bytes - * @track_id: package tracking id - * @error_offset: returns error offset - * @error_info: returns error information - * @cmd_details: pointer to command details structure or NULL - **/ -enum -i40e_status_code i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff, - u16 buff_size, u32 track_id, - u32 *error_offset, u32 *error_info, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_write_personalization_profile *cmd = - (struct i40e_aqc_write_personalization_profile *) - &desc.params.raw; - struct i40e_aqc_write_ddp_resp *resp; - i40e_status status; - - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_write_personalization_profile); - - desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); - if (buff_size > I40E_AQ_LARGE_BUF) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); - - desc.datalen = cpu_to_le16(buff_size); - - cmd->profile_track_id = cpu_to_le32(track_id); - - status = i40evf_asq_send_command(hw, &desc, buff, buff_size, cmd_details); - if (!status) { - resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw; - if (error_offset) - *error_offset = le32_to_cpu(resp->error_offset); - if (error_info) - *error_info = le32_to_cpu(resp->error_info); - } - - return status; -} - -/** - * i40evf_aq_get_ddp_list - Read dynamic device personalization (ddp) - * @hw: pointer to the hw struct - * @buff: command buffer (size in bytes = buff_size) - * @buff_size: buffer size in bytes - * @flags: AdminQ command flags - * @cmd_details: pointer to command details structure or NULL - **/ -enum -i40e_status_code i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff, - u16 buff_size, u8 flags, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_get_applied_profiles *cmd = - (struct i40e_aqc_get_applied_profiles *)&desc.params.raw; - i40e_status status; - - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_get_personalization_profile_list); - - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); - if (buff_size > I40E_AQ_LARGE_BUF) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); - desc.datalen = cpu_to_le16(buff_size); - - cmd->flags = flags; - - status = i40evf_asq_send_command(hw, &desc, buff, buff_size, cmd_details); - - return status; -} - -/** - * i40evf_find_segment_in_package - * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E) - * @pkg_hdr: pointer to the package header to be searched - * - * This function searches a package file for a particular segment type. On - * success it returns a pointer to the segment header, otherwise it will - * return NULL. - **/ -struct i40e_generic_seg_header * -i40evf_find_segment_in_package(u32 segment_type, - struct i40e_package_header *pkg_hdr) -{ - struct i40e_generic_seg_header *segment; - u32 i; - - /* Search all package segments for the requested segment type */ - for (i = 0; i < pkg_hdr->segment_count; i++) { - segment = - (struct i40e_generic_seg_header *)((u8 *)pkg_hdr + - pkg_hdr->segment_offset[i]); - - if (segment->type == segment_type) - return segment; - } - - return NULL; -} - -/** - * i40evf_write_profile - * @hw: pointer to the hardware structure - * @profile: pointer to the profile segment of the package to be downloaded - * @track_id: package tracking id - * - * Handles the download of a complete package. - */ -enum i40e_status_code -i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, - u32 track_id) -{ - i40e_status status = 0; - struct i40e_section_table *sec_tbl; - struct i40e_profile_section_header *sec = NULL; - u32 dev_cnt; - u32 vendor_dev_id; - u32 *nvm; - u32 section_size = 0; - u32 offset = 0, info = 0; - u32 i; - - dev_cnt = profile->device_table_count; - - for (i = 0; i < dev_cnt; i++) { - vendor_dev_id = profile->device_table[i].vendor_dev_id; - if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL) - if (hw->device_id == (vendor_dev_id & 0xFFFF)) - break; - } - if (i == dev_cnt) { - i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP"); - return I40E_ERR_DEVICE_NOT_SUPPORTED; - } - - nvm = (u32 *)&profile->device_table[dev_cnt]; - sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; - - for (i = 0; i < sec_tbl->section_count; i++) { - sec = (struct i40e_profile_section_header *)((u8 *)profile + - sec_tbl->section_offset[i]); - - /* Skip 'AQ', 'note' and 'name' sections */ - if (sec->section.type != SECTION_TYPE_MMIO) - continue; - - section_size = sec->section.size + - sizeof(struct i40e_profile_section_header); - - /* Write profile */ - status = i40evf_aq_write_ddp(hw, (void *)sec, (u16)section_size, - track_id, &offset, &info, NULL); - if (status) { - i40e_debug(hw, I40E_DEBUG_PACKAGE, - "Failed to write profile: offset %d, info %d", - offset, info); - break; - } - } - return status; -} - -/** - * i40evf_add_pinfo_to_list - * @hw: pointer to the hardware structure - * @profile: pointer to the profile segment of the package - * @profile_info_sec: buffer for information section - * @track_id: package tracking id - * - * Register a profile to the list of loaded profiles. - */ -enum i40e_status_code -i40evf_add_pinfo_to_list(struct i40e_hw *hw, - struct i40e_profile_segment *profile, - u8 *profile_info_sec, u32 track_id) -{ - i40e_status status = 0; - struct i40e_profile_section_header *sec = NULL; - struct i40e_profile_info *pinfo; - u32 offset = 0, info = 0; - - sec = (struct i40e_profile_section_header *)profile_info_sec; - sec->tbl_size = 1; - sec->data_end = sizeof(struct i40e_profile_section_header) + - sizeof(struct i40e_profile_info); - sec->section.type = SECTION_TYPE_INFO; - sec->section.offset = sizeof(struct i40e_profile_section_header); - sec->section.size = sizeof(struct i40e_profile_info); - pinfo = (struct i40e_profile_info *)(profile_info_sec + - sec->section.offset); - pinfo->track_id = track_id; - pinfo->version = profile->version; - pinfo->op = I40E_DDP_ADD_TRACKID; - memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE); - - status = i40evf_aq_write_ddp(hw, (void *)sec, sec->data_end, - track_id, &offset, &info, NULL); - return status; -} diff --git a/drivers/net/ethernet/intel/iavf/i40e_hmc.h b/drivers/net/ethernet/intel/iavf/i40e_hmc.h deleted file mode 100644 index 1c78de838857..000000000000 --- a/drivers/net/ethernet/intel/iavf/i40e_hmc.h +++ /dev/null @@ -1,215 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_HMC_H_ -#define _I40E_HMC_H_ - -#define I40E_HMC_MAX_BP_COUNT 512 - -/* forward-declare the HW struct for the compiler */ -struct i40e_hw; - -#define I40E_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */ -#define I40E_HMC_PD_CNT_IN_SD 512 -#define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */ -#define I40E_HMC_PAGED_BP_SIZE 4096 -#define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096 -#define I40E_FIRST_VF_FPM_ID 16 - -struct i40e_hmc_obj_info { - u64 base; /* base addr in FPM */ - u32 max_cnt; /* max count available for this hmc func */ - u32 cnt; /* count of objects driver actually wants to create */ - u64 size; /* size in bytes of one object */ -}; - -enum i40e_sd_entry_type { - I40E_SD_TYPE_INVALID = 0, - I40E_SD_TYPE_PAGED = 1, - I40E_SD_TYPE_DIRECT = 2 -}; - -struct i40e_hmc_bp { - enum i40e_sd_entry_type entry_type; - struct i40e_dma_mem addr; /* populate to be used by hw */ - u32 sd_pd_index; - u32 ref_cnt; -}; - -struct i40e_hmc_pd_entry { - struct i40e_hmc_bp bp; - u32 sd_index; - bool rsrc_pg; - bool valid; -}; - -struct i40e_hmc_pd_table { - struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */ - struct i40e_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */ - struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */ - - u32 ref_cnt; - u32 sd_index; -}; - -struct i40e_hmc_sd_entry { - enum i40e_sd_entry_type entry_type; - bool valid; - - union { - struct i40e_hmc_pd_table pd_table; - struct i40e_hmc_bp bp; - } u; -}; - -struct i40e_hmc_sd_table { - struct i40e_virt_mem addr; /* used to track sd_entry allocations */ - u32 sd_cnt; - u32 ref_cnt; - struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */ -}; - -struct i40e_hmc_info { - u32 signature; - /* equals to pci func num for PF and dynamically allocated for VFs */ - u8 hmc_fn_id; - u16 first_sd_index; /* index of the first available SD */ - - /* hmc objects */ - struct i40e_hmc_obj_info *hmc_obj; - struct i40e_virt_mem hmc_obj_virt_mem; - struct i40e_hmc_sd_table sd_table; -}; - -#define I40E_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++) -#define I40E_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++) -#define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++) - -#define I40E_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--) -#define I40E_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--) -#define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--) - -/** - * I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware - * @hw: pointer to our hw struct - * @pa: pointer to physical address - * @sd_index: segment descriptor index - * @type: if sd entry is direct or paged - **/ -#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \ -{ \ - u32 val1, val2, val3; \ - val1 = (u32)(upper_32_bits(pa)); \ - val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT << \ - I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ - ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ - I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \ - BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \ - val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ - wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \ - wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ - wr32((hw), I40E_PFHMC_SDCMD, val3); \ -} - -/** - * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware - * @hw: pointer to our hw struct - * @sd_index: segment descriptor index - * @type: if sd entry is direct or paged - **/ -#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \ -{ \ - u32 val2, val3; \ - val2 = (I40E_HMC_MAX_BP_COUNT << \ - I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ - ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ - I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \ - val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ - wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \ - wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ - wr32((hw), I40E_PFHMC_SDCMD, val3); \ -} - -/** - * I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware - * @hw: pointer to our hw struct - * @sd_idx: segment descriptor index - * @pd_idx: page descriptor index - **/ -#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \ - wr32((hw), I40E_PFHMC_PDINV, \ - (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \ - ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT))) - -/** - * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit - * @hmc_info: pointer to the HMC configuration information structure - * @type: type of HMC resources we're searching - * @index: starting index for the object - * @cnt: number of objects we're trying to create - * @sd_idx: pointer to return index of the segment descriptor in question - * @sd_limit: pointer to return the maximum number of segment descriptors - * - * This function calculates the segment descriptor index and index limit - * for the resource defined by i40e_hmc_rsrc_type. - **/ -#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\ -{ \ - u64 fpm_addr, fpm_limit; \ - fpm_addr = (hmc_info)->hmc_obj[(type)].base + \ - (hmc_info)->hmc_obj[(type)].size * (index); \ - fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\ - *(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE); \ - *(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE); \ - /* add one more to the limit to correct our range */ \ - *(sd_limit) += 1; \ -} - -/** - * I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit - * @hmc_info: pointer to the HMC configuration information struct - * @type: HMC resource type we're examining - * @idx: starting index for the object - * @cnt: number of objects we're trying to create - * @pd_index: pointer to return page descriptor index - * @pd_limit: pointer to return page descriptor index limit - * - * Calculates the page descriptor index and index limit for the resource - * defined by i40e_hmc_rsrc_type. - **/ -#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\ -{ \ - u64 fpm_adr, fpm_limit; \ - fpm_adr = (hmc_info)->hmc_obj[(type)].base + \ - (hmc_info)->hmc_obj[(type)].size * (idx); \ - fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \ - *(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE); \ - *(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE); \ - /* add one more to the limit to correct our range */ \ - *(pd_limit) += 1; \ -} -i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 sd_index, - enum i40e_sd_entry_type type, - u64 direct_mode_sz); - -i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 pd_index, - struct i40e_dma_mem *rsrc_pg); -i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 idx); -i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, - u32 idx); -i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 idx, bool is_pf); -i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info, - u32 idx); -i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 idx, bool is_pf); - -#endif /* _I40E_HMC_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/i40e_lan_hmc.h b/drivers/net/ethernet/intel/iavf/i40e_lan_hmc.h deleted file mode 100644 index 82b00f70a632..000000000000 --- a/drivers/net/ethernet/intel/iavf/i40e_lan_hmc.h +++ /dev/null @@ -1,158 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_LAN_HMC_H_ -#define _I40E_LAN_HMC_H_ - -/* forward-declare the HW struct for the compiler */ -struct i40e_hw; - -/* HMC element context information */ - -/* Rx queue context data - * - * The sizes of the variables may be larger than needed due to crossing byte - * boundaries. If we do not have the width of the variable set to the correct - * size then we could end up shifting bits off the top of the variable when the - * variable is at the top of a byte and crosses over into the next byte. - */ -struct i40e_hmc_obj_rxq { - u16 head; - u16 cpuid; /* bigger than needed, see above for reason */ - u64 base; - u16 qlen; -#define I40E_RXQ_CTX_DBUFF_SHIFT 7 - u16 dbuff; /* bigger than needed, see above for reason */ -#define I40E_RXQ_CTX_HBUFF_SHIFT 6 - u16 hbuff; /* bigger than needed, see above for reason */ - u8 dtype; - u8 dsize; - u8 crcstrip; - u8 fc_ena; - u8 l2tsel; - u8 hsplit_0; - u8 hsplit_1; - u8 showiv; - u32 rxmax; /* bigger than needed, see above for reason */ - u8 tphrdesc_ena; - u8 tphwdesc_ena; - u8 tphdata_ena; - u8 tphhead_ena; - u16 lrxqthresh; /* bigger than needed, see above for reason */ - u8 prefena; /* NOTE: normally must be set to 1 at init */ -}; - -/* Tx queue context data -* -* The sizes of the variables may be larger than needed due to crossing byte -* boundaries. If we do not have the width of the variable set to the correct -* size then we could end up shifting bits off the top of the variable when the -* variable is at the top of a byte and crosses over into the next byte. -*/ -struct i40e_hmc_obj_txq { - u16 head; - u8 new_context; - u64 base; - u8 fc_ena; - u8 timesync_ena; - u8 fd_ena; - u8 alt_vlan_ena; - u16 thead_wb; - u8 cpuid; - u8 head_wb_ena; - u16 qlen; - u8 tphrdesc_ena; - u8 tphrpacket_ena; - u8 tphwdesc_ena; - u64 head_wb_addr; - u32 crc; - u16 rdylist; - u8 rdylist_act; -}; - -/* for hsplit_0 field of Rx HMC context */ -enum i40e_hmc_obj_rx_hsplit_0 { - I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0, - I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1, - I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2, - I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4, - I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8, -}; - -/* fcoe_cntx and fcoe_filt are for debugging purpose only */ -struct i40e_hmc_obj_fcoe_cntx { - u32 rsv[32]; -}; - -struct i40e_hmc_obj_fcoe_filt { - u32 rsv[8]; -}; - -/* Context sizes for LAN objects */ -enum i40e_hmc_lan_object_size { - I40E_HMC_LAN_OBJ_SZ_8 = 0x3, - I40E_HMC_LAN_OBJ_SZ_16 = 0x4, - I40E_HMC_LAN_OBJ_SZ_32 = 0x5, - I40E_HMC_LAN_OBJ_SZ_64 = 0x6, - I40E_HMC_LAN_OBJ_SZ_128 = 0x7, - I40E_HMC_LAN_OBJ_SZ_256 = 0x8, - I40E_HMC_LAN_OBJ_SZ_512 = 0x9, -}; - -#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512 -#define I40E_HMC_OBJ_SIZE_TXQ 128 -#define I40E_HMC_OBJ_SIZE_RXQ 32 -#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 128 -#define I40E_HMC_OBJ_SIZE_FCOE_FILT 64 - -enum i40e_hmc_lan_rsrc_type { - I40E_HMC_LAN_FULL = 0, - I40E_HMC_LAN_TX = 1, - I40E_HMC_LAN_RX = 2, - I40E_HMC_FCOE_CTX = 3, - I40E_HMC_FCOE_FILT = 4, - I40E_HMC_LAN_MAX = 5 -}; - -enum i40e_hmc_model { - I40E_HMC_MODEL_DIRECT_PREFERRED = 0, - I40E_HMC_MODEL_DIRECT_ONLY = 1, - I40E_HMC_MODEL_PAGED_ONLY = 2, - I40E_HMC_MODEL_UNKNOWN, -}; - -struct i40e_hmc_lan_create_obj_info { - struct i40e_hmc_info *hmc_info; - u32 rsrc_type; - u32 start_idx; - u32 count; - enum i40e_sd_entry_type entry_type; - u64 direct_mode_sz; -}; - -struct i40e_hmc_lan_delete_obj_info { - struct i40e_hmc_info *hmc_info; - u32 rsrc_type; - u32 start_idx; - u32 count; -}; - -i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, - u32 rxq_num, u32 fcoe_cntx_num, - u32 fcoe_filt_num); -i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw, - enum i40e_hmc_model model); -i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw); - -i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw, - u16 queue); -i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw, - u16 queue, - struct i40e_hmc_obj_txq *s); -i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw, - u16 queue); -i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw, - u16 queue, - struct i40e_hmc_obj_rxq *s); - -#endif /* _I40E_LAN_HMC_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/i40e_prototype.h b/drivers/net/ethernet/intel/iavf/i40e_prototype.h index a358f4b9d5aa..ef7f74489bfc 100644 --- a/drivers/net/ethernet/intel/iavf/i40e_prototype.h +++ b/drivers/net/ethernet/intel/iavf/i40e_prototype.h @@ -60,71 +60,12 @@ static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) return i40evf_ptype_lookup[ptype]; } -/* prototype for functions used for SW locks */ - /* i40e_common for VF drivers*/ void i40e_vf_parse_hw_config(struct i40e_hw *hw, struct virtchnl_vf_resource *msg); i40e_status i40e_vf_reset(struct i40e_hw *hw); i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw, - enum virtchnl_ops v_opcode, - i40e_status v_retval, - u8 *msg, u16 msglen, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_set_filter_control(struct i40e_hw *hw, - struct i40e_filter_control_settings *settings); -i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, - u8 *mac_addr, u16 ethtype, u16 flags, - u16 vsi_seid, u16 queue, bool is_add, - struct i40e_control_filter_stats *stats, - struct i40e_asq_cmd_details *cmd_details); -void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, - u16 vsi_seid); -i40e_status i40evf_aq_rx_ctl_read_register(struct i40e_hw *hw, - u32 reg_addr, u32 *reg_val, - struct i40e_asq_cmd_details *cmd_details); -u32 i40evf_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr); -i40e_status i40evf_aq_rx_ctl_write_register(struct i40e_hw *hw, - u32 reg_addr, u32 reg_val, - struct i40e_asq_cmd_details *cmd_details); -void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val); -i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw, - u8 phy_select, u8 dev_addr, - u32 reg_addr, u32 reg_val, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw, - u8 phy_select, u8 dev_addr, - u32 reg_addr, u32 *reg_val, - struct i40e_asq_cmd_details *cmd_details); - -i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, - u16 reg, u8 phy_addr, u16 *value); -i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, - u16 reg, u8 phy_addr, u16 value); -i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg, - u8 phy_addr, u16 *value); -i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg, - u8 phy_addr, u16 value); -u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num); -i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, - u32 time, u32 interval); -i40e_status i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff, - u16 buff_size, u32 track_id, - u32 *error_offset, u32 *error_info, - struct i40e_asq_cmd_details * - cmd_details); -i40e_status i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff, - u16 buff_size, u8 flags, - struct i40e_asq_cmd_details * - cmd_details); -struct i40e_generic_seg_header * -i40evf_find_segment_in_package(u32 segment_type, - struct i40e_package_header *pkg_header); -enum i40e_status_code -i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg, - u32 track_id); -enum i40e_status_code -i40evf_add_pinfo_to_list(struct i40e_hw *hw, - struct i40e_profile_segment *profile, - u8 *profile_info_sec, u32 track_id); + enum virtchnl_ops v_opcode, + i40e_status v_retval, u8 *msg, u16 msglen, + struct i40e_asq_cmd_details *cmd_details); #endif /* _I40E_PROTOTYPE_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/i40e_register.h b/drivers/net/ethernet/intel/iavf/i40e_register.h index 49e1f57d99cc..20b464ac1542 100644 --- a/drivers/net/ethernet/intel/iavf/i40e_register.h +++ b/drivers/net/ethernet/intel/iavf/i40e_register.h @@ -4,40 +4,12 @@ #ifndef _I40E_REGISTER_H_ #define _I40E_REGISTER_H_ -#define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */ -#define I40E_VFMSIX_PBA1_MAX_INDEX 19 -#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0 -#define I40E_VFMSIX_PBA1_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA1_PENBIT_SHIFT) -#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TADD1_MAX_INDEX 639 -#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0 -#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT) -#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2 -#define I40E_VFMSIX_TADD1_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD1_MSIXTADD_SHIFT) -#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TMSG1_MAX_INDEX 639 -#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0 -#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT) -#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TUADD1_MAX_INDEX 639 -#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0 -#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT) -#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639 -#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0 -#define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT) #define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */ -#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0 -#define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT) #define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */ -#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0 -#define I40E_VF_ARQBAL1_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL1_ARQBAL_SHIFT) #define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */ #define I40E_VF_ARQH1_ARQH_SHIFT 0 #define I40E_VF_ARQH1_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH1_ARQH_SHIFT) #define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */ -#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0 -#define I40E_VF_ARQLEN1_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN1_ARQLEN_SHIFT) #define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28 #define I40E_VF_ARQLEN1_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQVFE_SHIFT) #define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29 @@ -47,20 +19,10 @@ #define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31 #define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT) #define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */ -#define I40E_VF_ARQT1_ARQT_SHIFT 0 -#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT) #define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */ -#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0 -#define I40E_VF_ATQBAH1_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH1_ATQBAH_SHIFT) #define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */ -#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0 -#define I40E_VF_ATQBAL1_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL1_ATQBAL_SHIFT) #define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */ -#define I40E_VF_ATQH1_ATQH_SHIFT 0 -#define I40E_VF_ATQH1_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH1_ATQH_SHIFT) #define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */ -#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0 -#define I40E_VF_ATQLEN1_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN1_ATQLEN_SHIFT) #define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28 #define I40E_VF_ATQLEN1_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQVFE_SHIFT) #define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29 @@ -70,244 +32,37 @@ #define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31 #define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT) #define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */ -#define I40E_VF_ATQT1_ATQT_SHIFT 0 -#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT) #define I40E_VFGEN_RSTAT 0x00008800 /* Reset: VFR */ #define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0 #define I40E_VFGEN_RSTAT_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT_VFR_STATE_SHIFT) #define I40E_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */ #define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0 #define I40E_VFINT_DYN_CTL01_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_SHIFT) -#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1 -#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT) -#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2 -#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT) #define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3 #define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5 -#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT) -#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24 -#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT) -#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25 -#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31 -#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT) #define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */ -#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15 #define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0 #define I40E_VFINT_DYN_CTLN1_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_SHIFT) -#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1 -#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT) #define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2 #define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT) #define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3 #define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) #define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5 -#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT) #define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24 #define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT) -#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25 -#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31 -#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT) #define I40E_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */ -#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT) #define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30 #define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT) #define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31 -#define I40E_VFINT_ICR0_ENA1_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_RSVD_SHIFT) #define I40E_VFINT_ICR01 0x00004800 /* Reset: CORER */ -#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0 -#define I40E_VFINT_ICR01_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_INTEVENT_SHIFT) -#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1 -#define I40E_VFINT_ICR01_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_0_SHIFT) -#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2 -#define I40E_VFINT_ICR01_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_1_SHIFT) -#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3 -#define I40E_VFINT_ICR01_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_2_SHIFT) -#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4 -#define I40E_VFINT_ICR01_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_3_SHIFT) -#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT) -#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30 -#define I40E_VFINT_ICR01_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_ADMINQ_SHIFT) -#define I40E_VFINT_ICR01_SWINT_SHIFT 31 -#define I40E_VFINT_ICR01_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_SWINT_SHIFT) -#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */ -#define I40E_VFINT_ITR01_MAX_INDEX 2 -#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0 -#define I40E_VFINT_ITR01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR01_INTERVAL_SHIFT) #define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */ -#define I40E_VFINT_ITRN1_MAX_INDEX 2 -#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0 -#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT) -#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */ -#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2 -#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT) #define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_QRX_TAIL1_MAX_INDEX 15 -#define I40E_QRX_TAIL1_TAIL_SHIFT 0 -#define I40E_QRX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL1_TAIL_SHIFT) #define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */ -#define I40E_QTX_TAIL1_MAX_INDEX 15 -#define I40E_QTX_TAIL1_TAIL_SHIFT 0 -#define I40E_QTX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL1_TAIL_SHIFT) -#define I40E_VFMSIX_PBA 0x00002000 /* Reset: VFLR */ -#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0 -#define I40E_VFMSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA_PENBIT_SHIFT) -#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TADD_MAX_INDEX 16 -#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0 -#define I40E_VFMSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD_MSIXTADD10_SHIFT) -#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2 -#define I40E_VFMSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD_MSIXTADD_SHIFT) -#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TMSG_MAX_INDEX 16 -#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0 -#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT) -#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TUADD_MAX_INDEX 16 -#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0 -#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT) -#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16 -#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0 -#define I40E_VFMSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL_MASK_SHIFT) -#define I40E_VFCM_PE_ERRDATA 0x0000DC00 /* Reset: VFR */ -#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0 -#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT) -#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4 -#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT) -#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8 -#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT) -#define I40E_VFCM_PE_ERRINFO 0x0000D800 /* Reset: VFR */ -#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0 -#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT) -#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4 -#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT) -#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8 -#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT) -#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16 -#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT) -#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24 -#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT) #define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ -#define I40E_VFQF_HENA_MAX_INDEX 1 -#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0 -#define I40E_VFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA_PTYPE_ENA_SHIFT) #define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */ #define I40E_VFQF_HKEY_MAX_INDEX 12 -#define I40E_VFQF_HKEY_KEY_0_SHIFT 0 -#define I40E_VFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_0_SHIFT) -#define I40E_VFQF_HKEY_KEY_1_SHIFT 8 -#define I40E_VFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_1_SHIFT) -#define I40E_VFQF_HKEY_KEY_2_SHIFT 16 -#define I40E_VFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_2_SHIFT) -#define I40E_VFQF_HKEY_KEY_3_SHIFT 24 -#define I40E_VFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_3_SHIFT) #define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_VFQF_HLUT_MAX_INDEX 15 -#define I40E_VFQF_HLUT_LUT0_SHIFT 0 -#define I40E_VFQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT0_SHIFT) -#define I40E_VFQF_HLUT_LUT1_SHIFT 8 -#define I40E_VFQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT1_SHIFT) -#define I40E_VFQF_HLUT_LUT2_SHIFT 16 -#define I40E_VFQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT2_SHIFT) -#define I40E_VFQF_HLUT_LUT3_SHIFT 24 -#define I40E_VFQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT3_SHIFT) -#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_VFQF_HREGION_MAX_INDEX 7 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT) -#define I40E_VFQF_HREGION_REGION_0_SHIFT 1 -#define I40E_VFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_0_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT) -#define I40E_VFQF_HREGION_REGION_1_SHIFT 5 -#define I40E_VFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_1_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT) -#define I40E_VFQF_HREGION_REGION_2_SHIFT 9 -#define I40E_VFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_2_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT) -#define I40E_VFQF_HREGION_REGION_3_SHIFT 13 -#define I40E_VFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_3_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT) -#define I40E_VFQF_HREGION_REGION_4_SHIFT 17 -#define I40E_VFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_4_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT) -#define I40E_VFQF_HREGION_REGION_5_SHIFT 21 -#define I40E_VFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_5_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT) -#define I40E_VFQF_HREGION_REGION_6_SHIFT 25 -#define I40E_VFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_6_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT) -#define I40E_VFQF_HREGION_REGION_7_SHIFT 29 -#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT) -#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30 -#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT) #define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30 #define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT) -#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */ -#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0 -#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT) -#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */ -#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0 -#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT) -#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */ -#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0 -#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT) -#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */ -#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0 -#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT) -#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4 -#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT) -#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16 -#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT) -#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31 -#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT) -#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */ -#define I40E_VFPE_CQACK1_PECQID_SHIFT 0 -#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT) -#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */ -#define I40E_VFPE_CQARM1_PECQID_SHIFT 0 -#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT) -#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */ -#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0 -#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT) -#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */ -#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0 -#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT) -#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16 -#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT) -#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */ -#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0 -#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT) -#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31 -#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT) -#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */ -#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0 -#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT) -#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16 -#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT) -#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */ -#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0 -#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT) -#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */ -#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0 -#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT) -#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */ -#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0 -#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT) -#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */ -#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0 -#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT) -#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20 -#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT) #endif /* _I40E_REGISTER_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/i40e_type.h b/drivers/net/ethernet/intel/iavf/i40e_type.h index 094387db3c11..8f1344094bc9 100644 --- a/drivers/net/ethernet/intel/iavf/i40e_type.h +++ b/drivers/net/ethernet/intel/iavf/i40e_type.h @@ -8,26 +8,16 @@ #include "i40e_osdep.h" #include "i40e_register.h" #include "i40e_adminq.h" -#include "i40e_hmc.h" -#include "i40e_lan_hmc.h" #include "i40e_devids.h" +#define I40E_RXQ_CTX_DBUFF_SHIFT 7 + /* I40E_MASK is a macro used on 32 bit registers */ #define I40E_MASK(mask, shift) ((u32)(mask) << (shift)) #define I40E_MAX_VSI_QP 16 #define I40E_MAX_VF_VSI 3 #define I40E_MAX_CHAINED_RX_BUFFERS 5 -#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16 - -/* Max default timeout in ms, */ -#define I40E_MAX_NVM_TIMEOUT 18000 - -/* Max timeout in ms for the phy to respond */ -#define I40E_MAX_PHY_TIMEOUT 500 - -/* Switch from ms to the 1usec global time (this is the GTIME resolution) */ -#define I40E_MS_TO_GTIME(time) ((time) * 1000) /* forward declaration */ struct i40e_hw; @@ -88,33 +78,6 @@ enum i40e_mac_type { I40E_MAC_GENERIC, }; -enum i40e_media_type { - I40E_MEDIA_TYPE_UNKNOWN = 0, - I40E_MEDIA_TYPE_FIBER, - I40E_MEDIA_TYPE_BASET, - I40E_MEDIA_TYPE_BACKPLANE, - I40E_MEDIA_TYPE_CX4, - I40E_MEDIA_TYPE_DA, - I40E_MEDIA_TYPE_VIRTUAL -}; - -enum i40e_fc_mode { - I40E_FC_NONE = 0, - I40E_FC_RX_PAUSE, - I40E_FC_TX_PAUSE, - I40E_FC_FULL, - I40E_FC_PFC, - I40E_FC_DEFAULT -}; - -enum i40e_set_fc_aq_failures { - I40E_SET_FC_AQ_FAIL_NONE = 0, - I40E_SET_FC_AQ_FAIL_GET = 1, - I40E_SET_FC_AQ_FAIL_SET = 2, - I40E_SET_FC_AQ_FAIL_UPDATE = 4, - I40E_SET_FC_AQ_FAIL_SET_UPDATE = 6 -}; - enum i40e_vsi_type { I40E_VSI_MAIN = 0, I40E_VSI_VMDQ1 = 1, @@ -134,162 +97,16 @@ enum i40e_queue_type { I40E_QUEUE_TYPE_UNKNOWN }; -struct i40e_link_status { - enum i40e_aq_phy_type phy_type; - enum i40e_aq_link_speed link_speed; - u8 link_info; - u8 an_info; - u8 req_fec_info; - u8 fec_info; - u8 ext_info; - u8 loopback; - /* is Link Status Event notification to SW enabled */ - bool lse_enable; - u16 max_frame_size; - bool crc_enable; - u8 pacing; - u8 requested_speeds; - u8 module_type[3]; - /* 1st byte: module identifier */ -#define I40E_MODULE_TYPE_SFP 0x03 -#define I40E_MODULE_TYPE_QSFP 0x0D - /* 2nd byte: ethernet compliance codes for 10/40G */ -#define I40E_MODULE_TYPE_40G_ACTIVE 0x01 -#define I40E_MODULE_TYPE_40G_LR4 0x02 -#define I40E_MODULE_TYPE_40G_SR4 0x04 -#define I40E_MODULE_TYPE_40G_CR4 0x08 -#define I40E_MODULE_TYPE_10G_BASE_SR 0x10 -#define I40E_MODULE_TYPE_10G_BASE_LR 0x20 -#define I40E_MODULE_TYPE_10G_BASE_LRM 0x40 -#define I40E_MODULE_TYPE_10G_BASE_ER 0x80 - /* 3rd byte: ethernet compliance codes for 1G */ -#define I40E_MODULE_TYPE_1000BASE_SX 0x01 -#define I40E_MODULE_TYPE_1000BASE_LX 0x02 -#define I40E_MODULE_TYPE_1000BASE_CX 0x04 -#define I40E_MODULE_TYPE_1000BASE_T 0x08 -}; - -struct i40e_phy_info { - struct i40e_link_status link_info; - struct i40e_link_status link_info_old; - bool get_link_info; - enum i40e_media_type media_type; - /* all the phy types the NVM is capable of */ - u64 phy_types; -}; - -#define I40E_CAP_PHY_TYPE_SGMII BIT_ULL(I40E_PHY_TYPE_SGMII) -#define I40E_CAP_PHY_TYPE_1000BASE_KX BIT_ULL(I40E_PHY_TYPE_1000BASE_KX) -#define I40E_CAP_PHY_TYPE_10GBASE_KX4 BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4) -#define I40E_CAP_PHY_TYPE_10GBASE_KR BIT_ULL(I40E_PHY_TYPE_10GBASE_KR) -#define I40E_CAP_PHY_TYPE_40GBASE_KR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4) -#define I40E_CAP_PHY_TYPE_XAUI BIT_ULL(I40E_PHY_TYPE_XAUI) -#define I40E_CAP_PHY_TYPE_XFI BIT_ULL(I40E_PHY_TYPE_XFI) -#define I40E_CAP_PHY_TYPE_SFI BIT_ULL(I40E_PHY_TYPE_SFI) -#define I40E_CAP_PHY_TYPE_XLAUI BIT_ULL(I40E_PHY_TYPE_XLAUI) -#define I40E_CAP_PHY_TYPE_XLPPI BIT_ULL(I40E_PHY_TYPE_XLPPI) -#define I40E_CAP_PHY_TYPE_40GBASE_CR4_CU BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU) -#define I40E_CAP_PHY_TYPE_10GBASE_CR1_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU) -#define I40E_CAP_PHY_TYPE_10GBASE_AOC BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC) -#define I40E_CAP_PHY_TYPE_40GBASE_AOC BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC) -#define I40E_CAP_PHY_TYPE_100BASE_TX BIT_ULL(I40E_PHY_TYPE_100BASE_TX) -#define I40E_CAP_PHY_TYPE_1000BASE_T BIT_ULL(I40E_PHY_TYPE_1000BASE_T) -#define I40E_CAP_PHY_TYPE_10GBASE_T BIT_ULL(I40E_PHY_TYPE_10GBASE_T) -#define I40E_CAP_PHY_TYPE_10GBASE_SR BIT_ULL(I40E_PHY_TYPE_10GBASE_SR) -#define I40E_CAP_PHY_TYPE_10GBASE_LR BIT_ULL(I40E_PHY_TYPE_10GBASE_LR) -#define I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU) -#define I40E_CAP_PHY_TYPE_10GBASE_CR1 BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1) -#define I40E_CAP_PHY_TYPE_40GBASE_CR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4) -#define I40E_CAP_PHY_TYPE_40GBASE_SR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4) -#define I40E_CAP_PHY_TYPE_40GBASE_LR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4) -#define I40E_CAP_PHY_TYPE_1000BASE_SX BIT_ULL(I40E_PHY_TYPE_1000BASE_SX) -#define I40E_CAP_PHY_TYPE_1000BASE_LX BIT_ULL(I40E_PHY_TYPE_1000BASE_LX) -#define I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL \ - BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL) -#define I40E_CAP_PHY_TYPE_20GBASE_KR2 BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2) -/* Defining the macro I40E_TYPE_OFFSET to implement a bit shift for some - * PHY types. There is an unused bit (31) in the I40E_CAP_PHY_TYPE_* bit - * fields but no corresponding gap in the i40e_aq_phy_type enumeration. So, - * a shift is needed to adjust for this with values larger than 31. The - * only affected values are I40E_PHY_TYPE_25GBASE_*. - */ -#define I40E_PHY_TYPE_OFFSET 1 -#define I40E_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(I40E_PHY_TYPE_25GBASE_KR + \ - I40E_PHY_TYPE_OFFSET) -#define I40E_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(I40E_PHY_TYPE_25GBASE_CR + \ - I40E_PHY_TYPE_OFFSET) -#define I40E_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(I40E_PHY_TYPE_25GBASE_SR + \ - I40E_PHY_TYPE_OFFSET) -#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_PHY_TYPE_25GBASE_LR + \ - I40E_PHY_TYPE_OFFSET) -#define I40E_HW_CAP_MAX_GPIO 30 +#define I40E_HW_CAP_MAX_GPIO 30 /* Capabilities of a PF or a VF or the whole device */ struct i40e_hw_capabilities { - u32 switch_mode; -#define I40E_NVM_IMAGE_TYPE_EVB 0x0 -#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2 -#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3 - - u32 management_mode; - u32 mng_protocols_over_mctp; -#define I40E_MNG_PROTOCOL_PLDM 0x2 -#define I40E_MNG_PROTOCOL_OEM_COMMANDS 0x4 -#define I40E_MNG_PROTOCOL_NCSI 0x8 - u32 npar_enable; - u32 os2bmc; - u32 valid_functions; - bool sr_iov_1_1; - bool vmdq; - bool evb_802_1_qbg; /* Edge Virtual Bridging */ - bool evb_802_1_qbh; /* Bridge Port Extension */ bool dcb; bool fcoe; - bool iscsi; /* Indicates iSCSI enabled */ - bool flex10_enable; - bool flex10_capable; - u32 flex10_mode; -#define I40E_FLEX10_MODE_UNKNOWN 0x0 -#define I40E_FLEX10_MODE_DCC 0x1 -#define I40E_FLEX10_MODE_DCI 0x2 - - u32 flex10_status; -#define I40E_FLEX10_STATUS_DCC_ERROR 0x1 -#define I40E_FLEX10_STATUS_VC_MODE 0x2 - - bool sec_rev_disabled; - bool update_disabled; -#define I40E_NVM_MGMT_SEC_REV_DISABLED 0x1 -#define I40E_NVM_MGMT_UPDATE_DISABLED 0x2 - - bool mgmt_cem; - bool ieee_1588; - bool iwarp; - bool fd; - u32 fd_filters_guaranteed; - u32 fd_filters_best_effort; - bool rss; - u32 rss_table_size; - u32 rss_table_entry_width; - bool led[I40E_HW_CAP_MAX_GPIO]; - bool sdp[I40E_HW_CAP_MAX_GPIO]; - u32 nvm_image_type; - u32 num_flow_director_filters; - u32 num_vfs; - u32 vf_base_id; u32 num_vsis; u32 num_rx_qp; u32 num_tx_qp; u32 base_queue; - u32 num_msix_vectors; u32 num_msix_vectors_vf; - u32 led_pin_num; - u32 sdp_pin_num; - u32 mdio_port_num; - u32 mdio_port_mode; - u8 rx_buf_chain_len; - u32 enabled_tcmap; - u32 maxtc; - u64 wr_csr_prot; }; struct i40e_mac_info { @@ -300,106 +117,6 @@ struct i40e_mac_info { u16 max_fcoeq; }; -enum i40e_aq_resources_ids { - I40E_NVM_RESOURCE_ID = 1 -}; - -enum i40e_aq_resource_access_type { - I40E_RESOURCE_READ = 1, - I40E_RESOURCE_WRITE -}; - -struct i40e_nvm_info { - u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */ - u32 timeout; /* [ms] */ - u16 sr_size; /* Shadow RAM size in words */ - bool blank_nvm_mode; /* is NVM empty (no FW present)*/ - u16 version; /* NVM package version */ - u32 eetrack; /* NVM data version */ - u32 oem_ver; /* OEM version info */ -}; - -/* definitions used in NVM update support */ - -enum i40e_nvmupd_cmd { - I40E_NVMUPD_INVALID, - I40E_NVMUPD_READ_CON, - I40E_NVMUPD_READ_SNT, - I40E_NVMUPD_READ_LCB, - I40E_NVMUPD_READ_SA, - I40E_NVMUPD_WRITE_ERA, - I40E_NVMUPD_WRITE_CON, - I40E_NVMUPD_WRITE_SNT, - I40E_NVMUPD_WRITE_LCB, - I40E_NVMUPD_WRITE_SA, - I40E_NVMUPD_CSUM_CON, - I40E_NVMUPD_CSUM_SA, - I40E_NVMUPD_CSUM_LCB, - I40E_NVMUPD_STATUS, - I40E_NVMUPD_EXEC_AQ, - I40E_NVMUPD_GET_AQ_RESULT, - I40E_NVMUPD_GET_AQ_EVENT, -}; - -enum i40e_nvmupd_state { - I40E_NVMUPD_STATE_INIT, - I40E_NVMUPD_STATE_READING, - I40E_NVMUPD_STATE_WRITING, - I40E_NVMUPD_STATE_INIT_WAIT, - I40E_NVMUPD_STATE_WRITE_WAIT, - I40E_NVMUPD_STATE_ERROR -}; - -/* nvm_access definition and its masks/shifts need to be accessible to - * application, core driver, and shared code. Where is the right file? - */ -#define I40E_NVM_READ 0xB -#define I40E_NVM_WRITE 0xC - -#define I40E_NVM_MOD_PNT_MASK 0xFF - -#define I40E_NVM_TRANS_SHIFT 8 -#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT) -#define I40E_NVM_PRESERVATION_FLAGS_SHIFT 12 -#define I40E_NVM_PRESERVATION_FLAGS_MASK \ - (0x3 << I40E_NVM_PRESERVATION_FLAGS_SHIFT) -#define I40E_NVM_PRESERVATION_FLAGS_SELECTED 0x01 -#define I40E_NVM_PRESERVATION_FLAGS_ALL 0x02 -#define I40E_NVM_CON 0x0 -#define I40E_NVM_SNT 0x1 -#define I40E_NVM_LCB 0x2 -#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB) -#define I40E_NVM_ERA 0x4 -#define I40E_NVM_CSUM 0x8 -#define I40E_NVM_AQE 0xe -#define I40E_NVM_EXEC 0xf - -#define I40E_NVM_ADAPT_SHIFT 16 -#define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT) - -#define I40E_NVMUPD_MAX_DATA 4096 -#define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */ - -struct i40e_nvm_access { - u32 command; - u32 config; - u32 offset; /* in bytes */ - u32 data_size; /* in bytes */ - u8 data[1]; -}; - -/* (Q)SFP module access definitions */ -#define I40E_I2C_EEPROM_DEV_ADDR 0xA0 -#define I40E_I2C_EEPROM_DEV_ADDR2 0xA2 -#define I40E_MODULE_TYPE_ADDR 0x00 -#define I40E_MODULE_REVISION_ADDR 0x01 -#define I40E_MODULE_SFF_8472_COMP 0x5E -#define I40E_MODULE_SFF_8472_SWAP 0x5C -#define I40E_MODULE_SFF_ADDR_MODE 0x04 -#define I40E_MODULE_TYPE_QSFP_PLUS 0x0D -#define I40E_MODULE_TYPE_QSFP28 0x11 -#define I40E_MODULE_QSFP_MAX_LEN 640 - /* PCI bus types */ enum i40e_bus_type { i40e_bus_type_unknown = 0, @@ -447,69 +164,16 @@ struct i40e_bus_info { u16 bus_id; }; -/* Flow control (FC) parameters */ -struct i40e_fc_info { - enum i40e_fc_mode current_mode; /* FC mode in effect */ - enum i40e_fc_mode requested_mode; /* FC mode requested by caller */ -}; - #define I40E_MAX_TRAFFIC_CLASS 8 #define I40E_MAX_USER_PRIORITY 8 -#define I40E_DCBX_MAX_APPS 32 -#define I40E_LLDPDU_SIZE 1500 - -/* IEEE 802.1Qaz ETS Configuration data */ -struct i40e_ieee_ets_config { - u8 willing; - u8 cbs; - u8 maxtcs; - u8 prioritytable[I40E_MAX_TRAFFIC_CLASS]; - u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS]; - u8 tsatable[I40E_MAX_TRAFFIC_CLASS]; -}; - -/* IEEE 802.1Qaz ETS Recommendation data */ -struct i40e_ieee_ets_recommend { - u8 prioritytable[I40E_MAX_TRAFFIC_CLASS]; - u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS]; - u8 tsatable[I40E_MAX_TRAFFIC_CLASS]; -}; - -/* IEEE 802.1Qaz PFC Configuration data */ -struct i40e_ieee_pfc_config { - u8 willing; - u8 mbc; - u8 pfccap; - u8 pfcenable; -}; - -/* IEEE 802.1Qaz Application Priority data */ -struct i40e_ieee_app_priority_table { - u8 priority; - u8 selector; - u16 protocolid; -}; - -struct i40e_dcbx_config { - u32 numapps; - u32 tlv_status; /* CEE mode TLV status */ - struct i40e_ieee_ets_config etscfg; - struct i40e_ieee_ets_recommend etsrec; - struct i40e_ieee_pfc_config pfc; - struct i40e_ieee_app_priority_table app[I40E_DCBX_MAX_APPS]; -}; - /* Port hardware description */ struct i40e_hw { u8 __iomem *hw_addr; void *back; /* subsystem structs */ - struct i40e_phy_info phy; struct i40e_mac_info mac; struct i40e_bus_info bus; - struct i40e_nvm_info nvm; - struct i40e_fc_info fc; /* pci info */ u16 device_id; @@ -517,58 +181,13 @@ struct i40e_hw { u16 subsystem_device_id; u16 subsystem_vendor_id; u8 revision_id; - u8 port; - bool adapter_stopped; /* capabilities for entire device and PCI func */ struct i40e_hw_capabilities dev_caps; - struct i40e_hw_capabilities func_caps; - - /* Flow Director shared filter space */ - u16 fdir_shared_filter_count; - - /* device profile info */ - u8 pf_id; - u16 main_vsi_seid; - - /* for multi-function MACs */ - u16 partition_id; - u16 num_partitions; - u16 num_ports; - - /* Closest numa node to the device */ - u16 numa_node; /* Admin Queue info */ struct i40e_adminq_info aq; - /* state of nvm update process */ - enum i40e_nvmupd_state nvmupd_state; - struct i40e_aq_desc nvm_wb_desc; - struct i40e_aq_desc nvm_aq_event_desc; - struct i40e_virt_mem nvm_buff; - bool nvm_release_on_done; - u16 nvm_wait_opcode; - - /* HMC info */ - struct i40e_hmc_info hmc; /* HMC info struct */ - - /* LLDP/DCBX Status */ - u16 dcbx_status; - -#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1) -#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2) - - /* DCBX info */ - struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */ - struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */ - struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */ - - /* Used in set switch config AQ command */ - u16 switch_tag; - u16 first_tag; - u16 second_tag; - /* debug mask */ u32 debug_mask; char err_str[16]; @@ -962,9 +581,6 @@ struct i40e_tx_context_desc { __le64 type_cmd_tso_mss; }; -#define I40E_TXD_CTX_QW1_DTYPE_SHIFT 0 -#define I40E_TXD_CTX_QW1_DTYPE_MASK (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT) - #define I40E_TXD_CTX_QW1_CMD_SHIFT 4 #define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT) @@ -1028,21 +644,6 @@ enum i40e_tx_ctx_desc_eipt_offload { #define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23 #define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT) -struct i40e_filter_program_desc { - __le32 qindex_flex_ptype_vsi; - __le32 rsvd; - __le32 dtype_cmd_cntindex; - __le32 fd_id; -}; -#define I40E_TXD_FLTR_QW0_QINDEX_SHIFT 0 -#define I40E_TXD_FLTR_QW0_QINDEX_MASK (0x7FFUL << \ - I40E_TXD_FLTR_QW0_QINDEX_SHIFT) -#define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT 11 -#define I40E_TXD_FLTR_QW0_FLEXOFF_MASK (0x7UL << \ - I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) -#define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT 17 -#define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \ - I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) /* Packet Classifier Types for filters */ enum i40e_filter_pctype { @@ -1076,58 +677,6 @@ enum i40e_filter_pctype { I40E_FILTER_PCTYPE_L2_PAYLOAD = 63, }; -enum i40e_filter_program_desc_dest { - I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0, - I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1, - I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER = 0x2, -}; - -enum i40e_filter_program_desc_fd_status { - I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE = 0x0, - I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID = 0x1, - I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES = 0x2, - I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES = 0x3, -}; - -#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23 -#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \ - I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) - -#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4 -#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \ - I40E_TXD_FLTR_QW1_CMD_SHIFT) - -#define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) -#define I40E_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT) - -enum i40e_filter_program_desc_pcmd { - I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1, - I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE = 0x2, -}; - -#define I40E_TXD_FLTR_QW1_DEST_SHIFT (0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) -#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT) - -#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) -#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT) - -#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \ - I40E_TXD_FLTR_QW1_CMD_SHIFT) -#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \ - I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) - -#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20 -#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \ - I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) - -enum i40e_filter_type { - I40E_FLOW_DIRECTOR_FLTR = 0, - I40E_PE_QUAD_HASH_FLTR = 1, - I40E_ETHERTYPE_FLTR, - I40E_FCOE_CTX_FLTR, - I40E_MAC_VLAN_FLTR, - I40E_HASH_FLTR -}; struct i40e_vsi_context { u16 seid; @@ -1167,330 +716,4 @@ struct i40e_eth_stats { u64 tx_discards; /* tdpc */ u64 tx_errors; /* tepc */ }; - -/* Statistics collected per VEB per TC */ -struct i40e_veb_tc_stats { - u64 tc_rx_packets[I40E_MAX_TRAFFIC_CLASS]; - u64 tc_rx_bytes[I40E_MAX_TRAFFIC_CLASS]; - u64 tc_tx_packets[I40E_MAX_TRAFFIC_CLASS]; - u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS]; -}; - -/* Statistics collected by the MAC */ -struct i40e_hw_port_stats { - /* eth stats collected by the port */ - struct i40e_eth_stats eth; - - /* additional port specific stats */ - u64 tx_dropped_link_down; /* tdold */ - u64 crc_errors; /* crcerrs */ - u64 illegal_bytes; /* illerrc */ - u64 error_bytes; /* errbc */ - u64 mac_local_faults; /* mlfc */ - u64 mac_remote_faults; /* mrfc */ - u64 rx_length_errors; /* rlec */ - u64 link_xon_rx; /* lxonrxc */ - u64 link_xoff_rx; /* lxoffrxc */ - u64 priority_xon_rx[8]; /* pxonrxc[8] */ - u64 priority_xoff_rx[8]; /* pxoffrxc[8] */ - u64 link_xon_tx; /* lxontxc */ - u64 link_xoff_tx; /* lxofftxc */ - u64 priority_xon_tx[8]; /* pxontxc[8] */ - u64 priority_xoff_tx[8]; /* pxofftxc[8] */ - u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */ - u64 rx_size_64; /* prc64 */ - u64 rx_size_127; /* prc127 */ - u64 rx_size_255; /* prc255 */ - u64 rx_size_511; /* prc511 */ - u64 rx_size_1023; /* prc1023 */ - u64 rx_size_1522; /* prc1522 */ - u64 rx_size_big; /* prc9522 */ - u64 rx_undersize; /* ruc */ - u64 rx_fragments; /* rfc */ - u64 rx_oversize; /* roc */ - u64 rx_jabber; /* rjc */ - u64 tx_size_64; /* ptc64 */ - u64 tx_size_127; /* ptc127 */ - u64 tx_size_255; /* ptc255 */ - u64 tx_size_511; /* ptc511 */ - u64 tx_size_1023; /* ptc1023 */ - u64 tx_size_1522; /* ptc1522 */ - u64 tx_size_big; /* ptc9522 */ - u64 mac_short_packet_dropped; /* mspdc */ - u64 checksum_error; /* xec */ - /* flow director stats */ - u64 fd_atr_match; - u64 fd_sb_match; - u64 fd_atr_tunnel_match; - u32 fd_atr_status; - u32 fd_sb_status; - /* EEE LPI */ - u32 tx_lpi_status; - u32 rx_lpi_status; - u64 tx_lpi_count; /* etlpic */ - u64 rx_lpi_count; /* erlpic */ -}; - -/* Checksum and Shadow RAM pointers */ -#define I40E_SR_NVM_CONTROL_WORD 0x00 -#define I40E_EMP_MODULE_PTR 0x0F -#define I40E_SR_EMP_MODULE_PTR 0x48 -#define I40E_NVM_OEM_VER_OFF 0x83 -#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18 -#define I40E_SR_NVM_WAKE_ON_LAN 0x19 -#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27 -#define I40E_SR_NVM_EETRACK_LO 0x2D -#define I40E_SR_NVM_EETRACK_HI 0x2E -#define I40E_SR_VPD_PTR 0x2F -#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E -#define I40E_SR_SW_CHECKSUM_WORD 0x3F - -/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */ -#define I40E_SR_VPD_MODULE_MAX_SIZE 1024 -#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024 -#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06 -#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT) -#define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5) -#define I40E_SR_NVM_MAP_STRUCTURE_TYPE BIT(12) -#define I40E_PTR_TYPE BIT(15) - -/* Shadow RAM related */ -#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800 -#define I40E_SR_WORDS_IN_1KB 512 -/* Checksum should be calculated such that after adding all the words, - * including the checksum word itself, the sum should be 0xBABA. - */ -#define I40E_SR_SW_CHECKSUM_BASE 0xBABA - -#define I40E_SRRD_SRCTL_ATTEMPTS 100000 - -enum i40e_switch_element_types { - I40E_SWITCH_ELEMENT_TYPE_MAC = 1, - I40E_SWITCH_ELEMENT_TYPE_PF = 2, - I40E_SWITCH_ELEMENT_TYPE_VF = 3, - I40E_SWITCH_ELEMENT_TYPE_EMP = 4, - I40E_SWITCH_ELEMENT_TYPE_BMC = 6, - I40E_SWITCH_ELEMENT_TYPE_PE = 16, - I40E_SWITCH_ELEMENT_TYPE_VEB = 17, - I40E_SWITCH_ELEMENT_TYPE_PA = 18, - I40E_SWITCH_ELEMENT_TYPE_VSI = 19, -}; - -/* Supported EtherType filters */ -enum i40e_ether_type_index { - I40E_ETHER_TYPE_1588 = 0, - I40E_ETHER_TYPE_FIP = 1, - I40E_ETHER_TYPE_OUI_EXTENDED = 2, - I40E_ETHER_TYPE_MAC_CONTROL = 3, - I40E_ETHER_TYPE_LLDP = 4, - I40E_ETHER_TYPE_EVB_PROTOCOL1 = 5, - I40E_ETHER_TYPE_EVB_PROTOCOL2 = 6, - I40E_ETHER_TYPE_QCN_CNM = 7, - I40E_ETHER_TYPE_8021X = 8, - I40E_ETHER_TYPE_ARP = 9, - I40E_ETHER_TYPE_RSV1 = 10, - I40E_ETHER_TYPE_RSV2 = 11, -}; - -/* Filter context base size is 1K */ -#define I40E_HASH_FILTER_BASE_SIZE 1024 -/* Supported Hash filter values */ -enum i40e_hash_filter_size { - I40E_HASH_FILTER_SIZE_1K = 0, - I40E_HASH_FILTER_SIZE_2K = 1, - I40E_HASH_FILTER_SIZE_4K = 2, - I40E_HASH_FILTER_SIZE_8K = 3, - I40E_HASH_FILTER_SIZE_16K = 4, - I40E_HASH_FILTER_SIZE_32K = 5, - I40E_HASH_FILTER_SIZE_64K = 6, - I40E_HASH_FILTER_SIZE_128K = 7, - I40E_HASH_FILTER_SIZE_256K = 8, - I40E_HASH_FILTER_SIZE_512K = 9, - I40E_HASH_FILTER_SIZE_1M = 10, -}; - -/* DMA context base size is 0.5K */ -#define I40E_DMA_CNTX_BASE_SIZE 512 -/* Supported DMA context values */ -enum i40e_dma_cntx_size { - I40E_DMA_CNTX_SIZE_512 = 0, - I40E_DMA_CNTX_SIZE_1K = 1, - I40E_DMA_CNTX_SIZE_2K = 2, - I40E_DMA_CNTX_SIZE_4K = 3, - I40E_DMA_CNTX_SIZE_8K = 4, - I40E_DMA_CNTX_SIZE_16K = 5, - I40E_DMA_CNTX_SIZE_32K = 6, - I40E_DMA_CNTX_SIZE_64K = 7, - I40E_DMA_CNTX_SIZE_128K = 8, - I40E_DMA_CNTX_SIZE_256K = 9, -}; - -/* Supported Hash look up table (LUT) sizes */ -enum i40e_hash_lut_size { - I40E_HASH_LUT_SIZE_128 = 0, - I40E_HASH_LUT_SIZE_512 = 1, -}; - -/* Structure to hold a per PF filter control settings */ -struct i40e_filter_control_settings { - /* number of PE Quad Hash filter buckets */ - enum i40e_hash_filter_size pe_filt_num; - /* number of PE Quad Hash contexts */ - enum i40e_dma_cntx_size pe_cntx_num; - /* number of FCoE filter buckets */ - enum i40e_hash_filter_size fcoe_filt_num; - /* number of FCoE DDP contexts */ - enum i40e_dma_cntx_size fcoe_cntx_num; - /* size of the Hash LUT */ - enum i40e_hash_lut_size hash_lut_size; - /* enable FDIR filters for PF and its VFs */ - bool enable_fdir; - /* enable Ethertype filters for PF and its VFs */ - bool enable_ethtype; - /* enable MAC/VLAN filters for PF and its VFs */ - bool enable_macvlan; -}; - -/* Structure to hold device level control filter counts */ -struct i40e_control_filter_stats { - u16 mac_etype_used; /* Used perfect match MAC/EtherType filters */ - u16 etype_used; /* Used perfect EtherType filters */ - u16 mac_etype_free; /* Un-used perfect match MAC/EtherType filters */ - u16 etype_free; /* Un-used perfect EtherType filters */ -}; - -enum i40e_reset_type { - I40E_RESET_POR = 0, - I40E_RESET_CORER = 1, - I40E_RESET_GLOBR = 2, - I40E_RESET_EMPR = 3, -}; - -/* IEEE 802.1AB LLDP Agent Variables from NVM */ -#define I40E_NVM_LLDP_CFG_PTR 0x06 -#define I40E_SR_LLDP_CFG_PTR 0x31 - -/* RSS Hash Table Size */ -#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 - -/* INPUT SET MASK for RSS, flow director and flexible payload */ -#define I40E_FD_INSET_L3_SRC_SHIFT 47 -#define I40E_FD_INSET_L3_SRC_WORD_MASK (0x3ULL << \ - I40E_FD_INSET_L3_SRC_SHIFT) -#define I40E_FD_INSET_L3_DST_SHIFT 35 -#define I40E_FD_INSET_L3_DST_WORD_MASK (0x3ULL << \ - I40E_FD_INSET_L3_DST_SHIFT) -#define I40E_FD_INSET_L4_SRC_SHIFT 34 -#define I40E_FD_INSET_L4_SRC_WORD_MASK (0x1ULL << \ - I40E_FD_INSET_L4_SRC_SHIFT) -#define I40E_FD_INSET_L4_DST_SHIFT 33 -#define I40E_FD_INSET_L4_DST_WORD_MASK (0x1ULL << \ - I40E_FD_INSET_L4_DST_SHIFT) -#define I40E_FD_INSET_VERIFY_TAG_SHIFT 31 -#define I40E_FD_INSET_VERIFY_TAG_WORD_MASK (0x3ULL << \ - I40E_FD_INSET_VERIFY_TAG_SHIFT) - -#define I40E_FD_INSET_FLEX_WORD50_SHIFT 17 -#define I40E_FD_INSET_FLEX_WORD50_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD50_SHIFT) -#define I40E_FD_INSET_FLEX_WORD51_SHIFT 16 -#define I40E_FD_INSET_FLEX_WORD51_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD51_SHIFT) -#define I40E_FD_INSET_FLEX_WORD52_SHIFT 15 -#define I40E_FD_INSET_FLEX_WORD52_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD52_SHIFT) -#define I40E_FD_INSET_FLEX_WORD53_SHIFT 14 -#define I40E_FD_INSET_FLEX_WORD53_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD53_SHIFT) -#define I40E_FD_INSET_FLEX_WORD54_SHIFT 13 -#define I40E_FD_INSET_FLEX_WORD54_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD54_SHIFT) -#define I40E_FD_INSET_FLEX_WORD55_SHIFT 12 -#define I40E_FD_INSET_FLEX_WORD55_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD55_SHIFT) -#define I40E_FD_INSET_FLEX_WORD56_SHIFT 11 -#define I40E_FD_INSET_FLEX_WORD56_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD56_SHIFT) -#define I40E_FD_INSET_FLEX_WORD57_SHIFT 10 -#define I40E_FD_INSET_FLEX_WORD57_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD57_SHIFT) - -/* Version format for Dynamic Device Personalization(DDP) */ -struct i40e_ddp_version { - u8 major; - u8 minor; - u8 update; - u8 draft; -}; - -#define I40E_DDP_NAME_SIZE 32 - -/* Package header */ -struct i40e_package_header { - struct i40e_ddp_version version; - u32 segment_count; - u32 segment_offset[1]; -}; - -/* Generic segment header */ -struct i40e_generic_seg_header { -#define SEGMENT_TYPE_METADATA 0x00000001 -#define SEGMENT_TYPE_NOTES 0x00000002 -#define SEGMENT_TYPE_I40E 0x00000011 -#define SEGMENT_TYPE_X722 0x00000012 - u32 type; - struct i40e_ddp_version version; - u32 size; - char name[I40E_DDP_NAME_SIZE]; -}; - -struct i40e_metadata_segment { - struct i40e_generic_seg_header header; - struct i40e_ddp_version version; - u32 track_id; - char name[I40E_DDP_NAME_SIZE]; -}; - -struct i40e_device_id_entry { - u32 vendor_dev_id; - u32 sub_vendor_dev_id; -}; - -struct i40e_profile_segment { - struct i40e_generic_seg_header header; - struct i40e_ddp_version version; - char name[I40E_DDP_NAME_SIZE]; - u32 device_table_count; - struct i40e_device_id_entry device_table[1]; -}; - -struct i40e_section_table { - u32 section_count; - u32 section_offset[1]; -}; - -struct i40e_profile_section_header { - u16 tbl_size; - u16 data_end; - struct { -#define SECTION_TYPE_INFO 0x00000010 -#define SECTION_TYPE_MMIO 0x00000800 -#define SECTION_TYPE_AQ 0x00000801 -#define SECTION_TYPE_NOTE 0x80000000 -#define SECTION_TYPE_NAME 0x80000001 - u32 type; - u32 offset; - u32 size; - } section; -}; - -struct i40e_profile_info { - u32 track_id; - struct i40e_ddp_version version; - u8 op; -#define I40E_DDP_ADD_TRACKID 0x01 -#define I40E_DDP_REMOVE_TRACKID 0x02 - u8 reserved[7]; - u8 name[I40E_DDP_NAME_SIZE]; -}; #endif /* _I40E_TYPE_H_ */ -- cgit v1.2.1 From a88903a09785b89574aa994d35db1590db5b99d0 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Wed, 1 Mar 2023 12:59:07 +0100 Subject: iavf: fix inverted Rx hash condition leading to disabled hash [ Upstream commit 32d57f667f871bc5a8babbe27ea4c5e668ee0ea8 ] Condition, which checks whether the netdev has hashing enabled is inverted. Basically, the tagged commit effectively disabled passing flow hash from descriptor to skb, unless user *disables* it via Ethtool. Commit a876c3ba59a6 ("i40e/i40evf: properly report Rx packet hash") fixed this problem, but only for i40e. Invert the condition now in iavf and unblock passing hash to skbs again. Fixes: 857942fd1aa1 ("i40e: Fix Rx hash reported to the stack by our driver") Reviewed-by: Larysa Zaremba Reviewed-by: Michal Kubiak Signed-off-by: Alexander Lobakin Tested-by: Rafal Romanowski Reviewed-by: Leon Romanovsky Signed-off-by: Tony Nguyen Signed-off-by: Sasha Levin --- drivers/net/ethernet/intel/iavf/i40e_txrx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/iavf/i40e_txrx.c b/drivers/net/ethernet/intel/iavf/i40e_txrx.c index 1bf9734ae9cf..d4bd06adc145 100644 --- a/drivers/net/ethernet/intel/iavf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/iavf/i40e_txrx.c @@ -1062,7 +1062,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); - if (ring->netdev->features & NETIF_F_RXHASH) + if (!(ring->netdev->features & NETIF_F_RXHASH)) return; if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { -- cgit v1.2.1 From bceda4102382d167834381deb5784f838d318d67 Mon Sep 17 00:00:00 2001 From: Gaosheng Cui Date: Tue, 22 Nov 2022 10:28:52 +0800 Subject: intel/igbvf: free irq on the error path in igbvf_request_msix() [ Upstream commit 85eb39bb39cbb5c086df1e19ba67cc1366693a77 ] In igbvf_request_msix(), irqs have not been freed on the err path, we need to free it. Fix it. Fixes: d4e0fe01a38a ("igbvf: add new driver to support 82576 virtual functions") Signed-off-by: Gaosheng Cui Reviewed-by: Maciej Fijalkowski Tested-by: Marek Szlosek Signed-off-by: Tony Nguyen Signed-off-by: Sasha Levin --- drivers/net/ethernet/intel/igbvf/netdev.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index df827c254162..70f5f28bfd9e 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -1070,7 +1070,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter) igbvf_intr_msix_rx, 0, adapter->rx_ring->name, netdev); if (err) - goto out; + goto free_irq_tx; adapter->rx_ring->itr_register = E1000_EITR(vector); adapter->rx_ring->itr_val = adapter->current_itr; @@ -1079,10 +1079,14 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter) err = request_irq(adapter->msix_entries[vector].vector, igbvf_msix_other, 0, netdev->name, netdev); if (err) - goto out; + goto free_irq_rx; igbvf_configure_msix(adapter); return 0; +free_irq_rx: + free_irq(adapter->msix_entries[--vector].vector, netdev); +free_irq_tx: + free_irq(adapter->msix_entries[--vector].vector, netdev); out: return err; } -- cgit v1.2.1 From bd04a5d2a62181017fb5c081d4fa5e0b46699278 Mon Sep 17 00:00:00 2001 From: Akihiko Odaki Date: Thu, 1 Dec 2022 19:20:03 +0900 Subject: igbvf: Regard vf reset nack as success [ Upstream commit 02c83791ef969c6a8a150b4927193d0d0e50fb23 ] vf reset nack actually represents the reset operation itself is performed but no address is assigned. Therefore, e1000_reset_hw_vf should fill the "perm_addr" with the zero address and return success on such an occasion. This prevents its callers in netdev.c from saying PF still resetting, and instead allows them to correctly report that no address is assigned. Fixes: 6ddbc4cf1f4d ("igb: Indicate failure on vf reset for empty mac address") Signed-off-by: Akihiko Odaki Reviewed-by: Leon Romanovsky Tested-by: Marek Szlosek Signed-off-by: Tony Nguyen Signed-off-by: Sasha Levin --- drivers/net/ethernet/intel/igbvf/vf.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c index b8ba3f94c363..a47a2e3e548c 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.c +++ b/drivers/net/ethernet/intel/igbvf/vf.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2009 - 2018 Intel Corporation. */ +#include + #include "vf.h" static s32 e1000_check_for_link_vf(struct e1000_hw *hw); @@ -131,11 +133,16 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw) /* set our "perm_addr" based on info provided by PF */ ret_val = mbx->ops.read_posted(hw, msgbuf, 3); if (!ret_val) { - if (msgbuf[0] == (E1000_VF_RESET | - E1000_VT_MSGTYPE_ACK)) + switch (msgbuf[0]) { + case E1000_VF_RESET | E1000_VT_MSGTYPE_ACK: memcpy(hw->mac.perm_addr, addr, ETH_ALEN); - else + break; + case E1000_VF_RESET | E1000_VT_MSGTYPE_NACK: + eth_zero_addr(hw->mac.perm_addr); + break; + default: ret_val = -E1000_ERR_MAC_INIT; + } } } -- cgit v1.2.1 From 6d2e42d94a89ff10b4219a04317d2a2616469ea5 Mon Sep 17 00:00:00 2001 From: Alexander Stein Date: Mon, 30 Jan 2023 16:32:47 +0100 Subject: i2c: imx-lpi2c: check only for enabled interrupt flags [ Upstream commit 1c7885004567e8951d65a983be095f254dd20bef ] When reading from I2C, the Tx watermark is set to 0. Unfortunately the TDF (transmit data flag) is enabled when Tx FIFO entries is equal or less than watermark. So it is set in every case, hence the reset default of 1. This results in the MSR_RDF _and_ MSR_TDF flags to be set thus trying to send Tx data on a read message. Mask the IRQ status to filter for wanted flags only. Fixes: a55fa9d0e42e ("i2c: imx-lpi2c: add low power i2c bus driver") Signed-off-by: Alexander Stein Tested-by: Emanuele Ghidoli Signed-off-by: Wolfram Sang Signed-off-by: Sasha Levin --- drivers/i2c/busses/i2c-imx-lpi2c.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c index 06c4c767af32..90c510d16651 100644 --- a/drivers/i2c/busses/i2c-imx-lpi2c.c +++ b/drivers/i2c/busses/i2c-imx-lpi2c.c @@ -508,10 +508,14 @@ disable: static irqreturn_t lpi2c_imx_isr(int irq, void *dev_id) { struct lpi2c_imx_struct *lpi2c_imx = dev_id; + unsigned int enabled; unsigned int temp; + enabled = readl(lpi2c_imx->base + LPI2C_MIER); + lpi2c_imx_intctrl(lpi2c_imx, 0); temp = readl(lpi2c_imx->base + LPI2C_MSR); + temp &= enabled; if (temp & MSR_RDF) lpi2c_imx_read_rxfifo(lpi2c_imx); -- cgit v1.2.1 From c110051d335ef7f62ad33474b0c23997fee5bfb5 Mon Sep 17 00:00:00 2001 From: Yu Kuai Date: Wed, 15 Mar 2023 14:21:54 +0800 Subject: scsi: scsi_dh_alua: Fix memleak for 'qdata' in alua_activate() [ Upstream commit a13faca032acbf2699293587085293bdfaafc8ae ] If alua_rtpg_queue() failed from alua_activate(), then 'qdata' is not freed, which will cause following memleak: unreferenced object 0xffff88810b2c6980 (size 32): comm "kworker/u16:2", pid 635322, jiffies 4355801099 (age 1216426.076s) hex dump (first 32 bytes): 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ 40 39 24 c1 ff ff ff ff 00 f8 ea 0a 81 88 ff ff @9$............. backtrace: [<0000000098f3a26d>] alua_activate+0xb0/0x320 [<000000003b529641>] scsi_dh_activate+0xb2/0x140 [<000000007b296db3>] activate_path_work+0xc6/0xe0 [dm_multipath] [<000000007adc9ace>] process_one_work+0x3c5/0x730 [<00000000c457a985>] worker_thread+0x93/0x650 [<00000000cb80e628>] kthread+0x1ba/0x210 [<00000000a1e61077>] ret_from_fork+0x22/0x30 Fix the problem by freeing 'qdata' in error path. Fixes: 625fe857e4fa ("scsi: scsi_dh_alua: Check scsi_device_get() return value") Signed-off-by: Yu Kuai Link: https://lore.kernel.org/r/20230315062154.668812-1-yukuai1@huaweicloud.com Reviewed-by: Benjamin Block Reviewed-by: Bart Van Assche Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/device_handler/scsi_dh_alua.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 4cf7c3348bff..9be913c19a6e 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -1050,10 +1050,12 @@ static int alua_activate(struct scsi_device *sdev, rcu_read_unlock(); mutex_unlock(&h->init_mutex); - if (alua_rtpg_queue(pg, sdev, qdata, true)) + if (alua_rtpg_queue(pg, sdev, qdata, true)) { fn = NULL; - else + } else { + kfree(qdata); err = SCSI_DH_DEV_OFFLINED; + } kref_put(&pg->kref, release_port_group); out: if (fn) -- cgit v1.2.1 From d3c145a4d24b752c9a1314d5a595014d51471418 Mon Sep 17 00:00:00 2001 From: Szymon Heidrich Date: Thu, 16 Mar 2023 11:19:54 +0100 Subject: net: usb: smsc95xx: Limit packet length to skb->len [ Upstream commit ff821092cf02a70c2bccd2d19269f01e29aa52cf ] Packet length retrieved from descriptor may be larger than the actual socket buffer length. In such case the cloned skb passed up the network stack will leak kernel memory contents. Fixes: 2f7ca802bdae ("net: Add SMSC LAN9500 USB2.0 10/100 ethernet adapter driver") Signed-off-by: Szymon Heidrich Reviewed-by: Jakub Kicinski Link: https://lore.kernel.org/r/20230316101954.75836-1-szymon.heidrich@gmail.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/net/usb/smsc95xx.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 4f29010e1aef..085048686413 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -1950,6 +1950,12 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) size = (u16)((header & RX_STS_FL_) >> 16); align_count = (4 - ((size + NET_IP_ALIGN) % 4)) % 4; + if (unlikely(size > skb->len)) { + netif_dbg(dev, rx_err, dev->net, + "size err header=0x%08x\n", header); + return 0; + } + if (unlikely(header & RX_STS_ES_)) { netif_dbg(dev, rx_err, dev->net, "Error header=0x%08x\n", header); -- cgit v1.2.1 From 7742c08e012eb65405e8304d100641638c5ff882 Mon Sep 17 00:00:00 2001 From: Daniil Tatianin Date: Thu, 16 Mar 2023 13:29:21 +0300 Subject: qed/qed_sriov: guard against NULL derefs from qed_iov_get_vf_info [ Upstream commit 25143b6a01d0cc5319edd3de22ffa2578b045550 ] We have to make sure that the info returned by the helper is valid before using it. Found by Linux Verification Center (linuxtesting.org) with the SVACE static analysis tool. Fixes: f990c82c385b ("qed*: Add support for ndo_set_vf_trust") Fixes: 733def6a04bf ("qed*: IOV link control") Signed-off-by: Daniil Tatianin Reviewed-by: Michal Swiatkowski Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/qlogic/qed/qed_sriov.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 402c1c3d84ce..5c8eaded6b30 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -4403,6 +4403,9 @@ qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate) } vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true); + if (!vf) + return -EINVAL; + vport_id = vf->vport_id; return qed_configure_vport_wfq(cdev, vport_id, rate); @@ -5142,7 +5145,7 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn) /* Validate that the VF has a configured vport */ vf = qed_iov_get_vf_info(hwfn, i, true); - if (!vf->vport_instance) + if (!vf || !vf->vport_instance) continue; memset(¶ms, 0, sizeof(params)); -- cgit v1.2.1 From 526660c25d3b93b1232a525b75469048388f0928 Mon Sep 17 00:00:00 2001 From: Zheng Wang Date: Fri, 17 Mar 2023 00:15:26 +0800 Subject: xirc2ps_cs: Fix use after free bug in xirc2ps_detach [ Upstream commit e8d20c3ded59a092532513c9bd030d1ea66f5f44 ] In xirc2ps_probe, the local->tx_timeout_task was bounded with xirc2ps_tx_timeout_task. When timeout occurs, it will call xirc_tx_timeout->schedule_work to start the work. When we call xirc2ps_detach to remove the driver, there may be a sequence as follows: Stop responding to timeout tasks and complete scheduled tasks before cleanup in xirc2ps_detach, which will fix the problem. CPU0 CPU1 |xirc2ps_tx_timeout_task xirc2ps_detach | free_netdev | kfree(dev); | | | do_reset | //use dev Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Zheng Wang Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/xircom/xirc2ps_cs.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c index fd5288ff53b5..e3438cef5f9c 100644 --- a/drivers/net/ethernet/xircom/xirc2ps_cs.c +++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c @@ -503,6 +503,11 @@ static void xirc2ps_detach(struct pcmcia_device *link) { struct net_device *dev = link->priv; + struct local_info *local = netdev_priv(dev); + + netif_carrier_off(dev); + netif_tx_disable(dev); + cancel_work_sync(&local->tx_timeout_task); dev_dbg(&link->dev, "detach\n"); -- cgit v1.2.1 From 4bbc59ec4feb1ea8d5cb3d9d38d4cb1317943ea4 Mon Sep 17 00:00:00 2001 From: Zheng Wang Date: Sat, 18 Mar 2023 16:05:26 +0800 Subject: net: qcom/emac: Fix use after free bug in emac_remove due to race condition [ Upstream commit 6b6bc5b8bd2d4ca9e1efa9ae0f98a0b0687ace75 ] In emac_probe, &adpt->work_thread is bound with emac_work_thread. Then it will be started by timeout handler emac_tx_timeout or a IRQ handler emac_isr. If we remove the driver which will call emac_remove to make cleanup, there may be a unfinished work. The possible sequence is as follows: Fix it by finishing the work before cleanup in the emac_remove and disable timeout response. CPU0 CPU1 |emac_work_thread emac_remove | free_netdev | kfree(netdev); | |emac_reinit_locked |emac_mac_down |//use netdev Fixes: b9b17debc69d ("net: emac: emac gigabit ethernet controller driver") Signed-off-by: Zheng Wang Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/qualcomm/emac/emac.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 76a9b37c8680..3c764c28d5db 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -752,9 +752,15 @@ static int emac_remove(struct platform_device *pdev) struct net_device *netdev = dev_get_drvdata(&pdev->dev); struct emac_adapter *adpt = netdev_priv(netdev); + netif_carrier_off(netdev); + netif_tx_disable(netdev); + unregister_netdev(netdev); netif_napi_del(&adpt->rx_q.napi); + free_irq(adpt->irq.irq, &adpt->irq); + cancel_work_sync(&adpt->work_thread); + emac_clks_teardown(adpt); put_device(&adpt->phydev->mdio.dev); -- cgit v1.2.1 From 6cd06bf60f69fd21ecd1a2c6061779da4d24f296 Mon Sep 17 00:00:00 2001 From: Geoff Levand Date: Sat, 18 Mar 2023 17:39:16 +0000 Subject: net/ps3_gelic_net: Fix RX sk_buff length [ Upstream commit 19b3bb51c3bc288b3f2c6f8c4450b0f548320625 ] The Gelic Ethernet device needs to have the RX sk_buffs aligned to GELIC_NET_RXBUF_ALIGN, and also the length of the RX sk_buffs must be a multiple of GELIC_NET_RXBUF_ALIGN. The current Gelic Ethernet driver was not allocating sk_buffs large enough to allow for this alignment. Also, correct the maximum and minimum MTU sizes, and add a new preprocessor macro for the maximum frame size, GELIC_NET_MAX_FRAME. Fixes various randomly occurring runtime network errors. Fixes: 02c1889166b4 ("ps3: gigabit ethernet driver for PS3, take3") Signed-off-by: Geoff Levand Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/toshiba/ps3_gelic_net.c | 19 ++++++++++--------- drivers/net/ethernet/toshiba/ps3_gelic_net.h | 5 +++-- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c index 75237c81c63d..67f61379ba67 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c @@ -378,28 +378,29 @@ iommu_error: * * allocates a new rx skb, iommu-maps it and attaches it to the descriptor. * Activate the descriptor state-wise + * + * Gelic RX sk_buffs must be aligned to GELIC_NET_RXBUF_ALIGN and the length + * must be a multiple of GELIC_NET_RXBUF_ALIGN. */ static int gelic_descr_prepare_rx(struct gelic_card *card, struct gelic_descr *descr) { + static const unsigned int rx_skb_size = + ALIGN(GELIC_NET_MAX_FRAME, GELIC_NET_RXBUF_ALIGN) + + GELIC_NET_RXBUF_ALIGN - 1; int offset; - unsigned int bufsize; if (gelic_descr_get_status(descr) != GELIC_DESCR_DMA_NOT_IN_USE) dev_info(ctodev(card), "%s: ERROR status\n", __func__); - /* we need to round up the buffer size to a multiple of 128 */ - bufsize = ALIGN(GELIC_NET_MAX_MTU, GELIC_NET_RXBUF_ALIGN); - /* and we need to have it 128 byte aligned, therefore we allocate a - * bit more */ - descr->skb = dev_alloc_skb(bufsize + GELIC_NET_RXBUF_ALIGN - 1); + descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size); if (!descr->skb) { descr->buf_addr = 0; /* tell DMAC don't touch memory */ dev_info(ctodev(card), "%s:allocate skb failed !!\n", __func__); return -ENOMEM; } - descr->buf_size = cpu_to_be32(bufsize); + descr->buf_size = cpu_to_be32(rx_skb_size); descr->dmac_cmd_status = 0; descr->result_size = 0; descr->valid_size = 0; @@ -412,7 +413,7 @@ static int gelic_descr_prepare_rx(struct gelic_card *card, /* io-mmu-map the skb */ descr->buf_addr = cpu_to_be32(dma_map_single(ctodev(card), descr->skb->data, - GELIC_NET_MAX_MTU, + GELIC_NET_MAX_FRAME, DMA_FROM_DEVICE)); if (!descr->buf_addr) { dev_kfree_skb_any(descr->skb); @@ -930,7 +931,7 @@ static void gelic_net_pass_skb_up(struct gelic_descr *descr, data_error = be32_to_cpu(descr->data_error); /* unmap skb buffer */ dma_unmap_single(ctodev(card), be32_to_cpu(descr->buf_addr), - GELIC_NET_MAX_MTU, + GELIC_NET_MAX_FRAME, DMA_FROM_DEVICE); skb_put(skb, be32_to_cpu(descr->valid_size)? diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h index fbbf9b54b173..0e592fc19f6c 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h @@ -32,8 +32,9 @@ #define GELIC_NET_RX_DESCRIPTORS 128 /* num of descriptors */ #define GELIC_NET_TX_DESCRIPTORS 128 /* num of descriptors */ -#define GELIC_NET_MAX_MTU VLAN_ETH_FRAME_LEN -#define GELIC_NET_MIN_MTU VLAN_ETH_ZLEN +#define GELIC_NET_MAX_FRAME 2312 +#define GELIC_NET_MAX_MTU 2294 +#define GELIC_NET_MIN_MTU 64 #define GELIC_NET_RXBUF_ALIGN 128 #define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */ #define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ -- cgit v1.2.1 From b6e6af5af072ef511327904aecc11666d6143a2a Mon Sep 17 00:00:00 2001 From: Geoff Levand Date: Sat, 18 Mar 2023 17:39:16 +0000 Subject: net/ps3_gelic_net: Use dma_mapping_error [ Upstream commit bebe933d35a63d4f042fbf4dce4f22e689ba0fcd ] The current Gelic Etherenet driver was checking the return value of its dma_map_single call, and not using the dma_mapping_error() routine. Fixes runtime problems like these: DMA-API: ps3_gelic_driver sb_05: device driver failed to check map error WARNING: CPU: 0 PID: 0 at kernel/dma/debug.c:1027 .check_unmap+0x888/0x8dc Fixes: 02c1889166b4 ("ps3: gigabit ethernet driver for PS3, take3") Reviewed-by: Alexander Duyck Signed-off-by: Geoff Levand Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/toshiba/ps3_gelic_net.c | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c index 67f61379ba67..572294678faf 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c @@ -330,15 +330,17 @@ static int gelic_card_init_chain(struct gelic_card *card, /* set up the hardware pointers in each descriptor */ for (i = 0; i < no; i++, descr++) { + dma_addr_t cpu_addr; + gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE); - descr->bus_addr = - dma_map_single(ctodev(card), descr, - GELIC_DESCR_SIZE, - DMA_BIDIRECTIONAL); - if (!descr->bus_addr) + cpu_addr = dma_map_single(ctodev(card), descr, + GELIC_DESCR_SIZE, DMA_BIDIRECTIONAL); + + if (dma_mapping_error(ctodev(card), cpu_addr)) goto iommu_error; + descr->bus_addr = cpu_to_be32(cpu_addr); descr->next = descr + 1; descr->prev = descr - 1; } @@ -388,6 +390,7 @@ static int gelic_descr_prepare_rx(struct gelic_card *card, static const unsigned int rx_skb_size = ALIGN(GELIC_NET_MAX_FRAME, GELIC_NET_RXBUF_ALIGN) + GELIC_NET_RXBUF_ALIGN - 1; + dma_addr_t cpu_addr; int offset; if (gelic_descr_get_status(descr) != GELIC_DESCR_DMA_NOT_IN_USE) @@ -411,11 +414,10 @@ static int gelic_descr_prepare_rx(struct gelic_card *card, if (offset) skb_reserve(descr->skb, GELIC_NET_RXBUF_ALIGN - offset); /* io-mmu-map the skb */ - descr->buf_addr = cpu_to_be32(dma_map_single(ctodev(card), - descr->skb->data, - GELIC_NET_MAX_FRAME, - DMA_FROM_DEVICE)); - if (!descr->buf_addr) { + cpu_addr = dma_map_single(ctodev(card), descr->skb->data, + GELIC_NET_MAX_FRAME, DMA_FROM_DEVICE); + descr->buf_addr = cpu_to_be32(cpu_addr); + if (dma_mapping_error(ctodev(card), cpu_addr)) { dev_kfree_skb_any(descr->skb); descr->skb = NULL; dev_info(ctodev(card), @@ -795,7 +797,7 @@ static int gelic_descr_prepare_tx(struct gelic_card *card, buf = dma_map_single(ctodev(card), skb->data, skb->len, DMA_TO_DEVICE); - if (!buf) { + if (dma_mapping_error(ctodev(card), buf)) { dev_err(ctodev(card), "dma map 2 failed (%p, %i). Dropping packet\n", skb->data, skb->len); -- cgit v1.2.1 From 42049e65d338870e93732b0b80c6c41faf6aa781 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Mon, 20 Mar 2023 15:37:25 +0100 Subject: bpf: Adjust insufficient default bpf_jit_limit [ Upstream commit 10ec8ca8ec1a2f04c4ed90897225231c58c124a7 ] We've seen recent AWS EKS (Kubernetes) user reports like the following: After upgrading EKS nodes from v20230203 to v20230217 on our 1.24 EKS clusters after a few days a number of the nodes have containers stuck in ContainerCreating state or liveness/readiness probes reporting the following error: Readiness probe errored: rpc error: code = Unknown desc = failed to exec in container: failed to start exec "4a11039f730203ffc003b7[...]": OCI runtime exec failed: exec failed: unable to start container process: unable to init seccomp: error loading seccomp filter into kernel: error loading seccomp filter: errno 524: unknown However, we had not been seeing this issue on previous AMIs and it only started to occur on v20230217 (following the upgrade from kernel 5.4 to 5.10) with no other changes to the underlying cluster or workloads. We tried the suggestions from that issue (sysctl net.core.bpf_jit_limit=452534528) which helped to immediately allow containers to be created and probes to execute but after approximately a day the issue returned and the value returned by cat /proc/vmallocinfo | grep bpf_jit | awk '{s+=$2} END {print s}' was steadily increasing. I tested bpf tree to observe bpf_jit_charge_modmem, bpf_jit_uncharge_modmem their sizes passed in as well as bpf_jit_current under tcpdump BPF filter, seccomp BPF and native (e)BPF programs, and the behavior all looks sane and expected, that is nothing "leaking" from an upstream perspective. The bpf_jit_limit knob was originally added in order to avoid a situation where unprivileged applications loading BPF programs (e.g. seccomp BPF policies) consuming all the module memory space via BPF JIT such that loading of kernel modules would be prevented. The default limit was defined back in 2018 and while good enough back then, we are generally seeing far more BPF consumers today. Adjust the limit for the BPF JIT pool from originally 1/4 to now 1/2 of the module memory space to better reflect today's needs and avoid more users running into potentially hard to debug issues. Fixes: fdadd04931c2 ("bpf: fix bpf_jit_limit knob for PAGE_SIZE >= 64K") Reported-by: Stephen Haynes Reported-by: Lefteris Alexakis Signed-off-by: Daniel Borkmann Link: https://github.com/awslabs/amazon-eks-ami/issues/1179 Link: https://github.com/awslabs/amazon-eks-ami/issues/1219 Reviewed-by: Kuniyuki Iwashima Link: https://lore.kernel.org/r/20230320143725.8394-1-daniel@iogearbox.net Signed-off-by: Alexei Starovoitov Signed-off-by: Sasha Levin --- kernel/bpf/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 24e16538e4d7..285101772c75 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -603,7 +603,7 @@ static int __init bpf_jit_charge_init(void) { /* Only used as heuristic here to derive limit. */ bpf_jit_limit_max = bpf_jit_alloc_exec_limit(); - bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 2, + bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1, PAGE_SIZE), LONG_MAX); return 0; } -- cgit v1.2.1 From 204fa0b057cf4d4b0cc4504dc607b822c76f5d1b Mon Sep 17 00:00:00 2001 From: Maher Sanalla Date: Wed, 15 Mar 2023 11:04:38 +0200 Subject: net/mlx5: Read the TC mapping of all priorities on ETS query [ Upstream commit 44d553188c38ac74b799dfdcebafef2f7bb70942 ] When ETS configurations are queried by the user to get the mapping assignment between packet priority and traffic class, only priorities up to maximum TCs are queried from QTCT register in FW to retrieve their assigned TC, leaving the rest of the priorities mapped to the default TC #0 which might be misleading. Fix by querying the TC mapping of all priorities on each ETS query, regardless of the maximum number of TCs configured in FW. Fixes: 820c2c5e773d ("net/mlx5e: Read ETS settings directly from firmware") Signed-off-by: Maher Sanalla Reviewed-by: Moshe Shemesh Signed-off-by: Saeed Mahameed Signed-off-by: Sasha Levin --- drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index 722998d68564..6f1f53f91ed8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -109,12 +109,14 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev, if (!MLX5_CAP_GEN(priv->mdev, ets)) return -EOPNOTSUPP; - ets->ets_cap = mlx5_max_tc(priv->mdev) + 1; - for (i = 0; i < ets->ets_cap; i++) { + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]); if (err) return err; + } + ets->ets_cap = mlx5_max_tc(priv->mdev) + 1; + for (i = 0; i < ets->ets_cap; i++) { err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]); if (err) return err; -- cgit v1.2.1 From 944f944e040486007d6763d7110281e9ad2d1f71 Mon Sep 17 00:00:00 2001 From: Li Zetao Date: Mon, 20 Mar 2023 14:33:18 +0000 Subject: atm: idt77252: fix kmemleak when rmmod idt77252 [ Upstream commit 4fe3c88552a3fbe1944426a4506a18cdeb457b5a ] There are memory leaks reported by kmemleak: unreferenced object 0xffff888106500800 (size 128): comm "modprobe", pid 1017, jiffies 4297787785 (age 67.152s) hex dump (first 32 bytes): 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ backtrace: [<00000000970ce626>] __kmem_cache_alloc_node+0x20c/0x380 [<00000000fb5f78d9>] kmalloc_trace+0x2f/0xb0 [<000000000e947e2a>] idt77252_init_one+0x2847/0x3c90 [idt77252] [<000000006efb048e>] local_pci_probe+0xeb/0x1a0 ... unreferenced object 0xffff888106500b00 (size 128): comm "modprobe", pid 1017, jiffies 4297787785 (age 67.152s) hex dump (first 32 bytes): 00 20 3d 01 80 88 ff ff 00 20 3d 01 80 88 ff ff . =...... =..... f0 23 3d 01 80 88 ff ff 00 20 3d 01 00 00 00 00 .#=...... =..... backtrace: [<00000000970ce626>] __kmem_cache_alloc_node+0x20c/0x380 [<00000000fb5f78d9>] kmalloc_trace+0x2f/0xb0 [<00000000f451c5be>] alloc_scq.constprop.0+0x4a/0x400 [idt77252] [<00000000e6313849>] idt77252_init_one+0x28cf/0x3c90 [idt77252] The root cause is traced to the vc_maps which alloced in open_card_oam() are not freed in close_card_oam(). The vc_maps are used to record open connections, so when close a vc_map in close_card_oam(), the memory should be freed. Moreover, the ubr0 is not closed when close a idt77252 device, leading to the memory leak of vc_map and scq_info. Fix them by adding kfree in close_card_oam() and implementing new close_card_ubr0() to close ubr0. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Li Zetao Reviewed-by: Francois Romieu Link: https://lore.kernel.org/r/20230320143318.2644630-1-lizetao1@huawei.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/atm/idt77252.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index bc06f5919839..3380322df98e 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c @@ -2915,6 +2915,7 @@ close_card_oam(struct idt77252_dev *card) recycle_rx_pool_skb(card, &vc->rcv.rx_pool); } + kfree(vc); } } } @@ -2958,6 +2959,15 @@ open_card_ubr0(struct idt77252_dev *card) return 0; } +static void +close_card_ubr0(struct idt77252_dev *card) +{ + struct vc_map *vc = card->vcs[0]; + + free_scq(card, vc->scq); + kfree(vc); +} + static int idt77252_dev_open(struct idt77252_dev *card) { @@ -3007,6 +3017,7 @@ static void idt77252_dev_close(struct atm_dev *dev) struct idt77252_dev *card = dev->dev_data; u32 conf; + close_card_ubr0(card); close_card_oam(card); conf = SAR_CFG_RXPTH | /* enable receive path */ -- cgit v1.2.1 From da149daf821a3c05cd04f7c60776c86c5ee9685c Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 20 Mar 2023 16:34:27 +0000 Subject: erspan: do not use skb_mac_header() in ndo_start_xmit() [ Upstream commit 8e50ed774554f93d55426039b27b1e38d7fa64d8 ] Drivers should not assume skb_mac_header(skb) == skb->data in their ndo_start_xmit(). Use skb_network_offset() and skb_transport_offset() which better describe what is needed in erspan_fb_xmit() and ip6erspan_tunnel_xmit() syzbot reported: WARNING: CPU: 0 PID: 5083 at include/linux/skbuff.h:2873 skb_mac_header include/linux/skbuff.h:2873 [inline] WARNING: CPU: 0 PID: 5083 at include/linux/skbuff.h:2873 ip6erspan_tunnel_xmit+0x1d9c/0x2d90 net/ipv6/ip6_gre.c:962 Modules linked in: CPU: 0 PID: 5083 Comm: syz-executor406 Not tainted 6.3.0-rc2-syzkaller-00866-gd4671cb96fa3 #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 03/02/2023 RIP: 0010:skb_mac_header include/linux/skbuff.h:2873 [inline] RIP: 0010:ip6erspan_tunnel_xmit+0x1d9c/0x2d90 net/ipv6/ip6_gre.c:962 Code: 04 02 41 01 de 84 c0 74 08 3c 03 0f 8e 1c 0a 00 00 45 89 b4 24 c8 00 00 00 c6 85 77 fe ff ff 01 e9 33 e7 ff ff e8 b4 27 a1 f8 <0f> 0b e9 b6 e7 ff ff e8 a8 27 a1 f8 49 8d bf f0 0c 00 00 48 b8 00 RSP: 0018:ffffc90003b2f830 EFLAGS: 00010293 RAX: 0000000000000000 RBX: 000000000000ffff RCX: 0000000000000000 RDX: ffff888021273a80 RSI: ffffffff88e1bd4c RDI: 0000000000000003 RBP: ffffc90003b2f9d8 R08: 0000000000000003 R09: 000000000000ffff R10: 000000000000ffff R11: 0000000000000000 R12: ffff88802b28da00 R13: 00000000000000d0 R14: ffff88807e25b6d0 R15: ffff888023408000 FS: 0000555556a61300(0000) GS:ffff8880b9800000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 000055e5b11eb6e8 CR3: 0000000027c1b000 CR4: 00000000003506f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: __netdev_start_xmit include/linux/netdevice.h:4900 [inline] netdev_start_xmit include/linux/netdevice.h:4914 [inline] __dev_direct_xmit+0x504/0x730 net/core/dev.c:4300 dev_direct_xmit include/linux/netdevice.h:3088 [inline] packet_xmit+0x20a/0x390 net/packet/af_packet.c:285 packet_snd net/packet/af_packet.c:3075 [inline] packet_sendmsg+0x31a0/0x5150 net/packet/af_packet.c:3107 sock_sendmsg_nosec net/socket.c:724 [inline] sock_sendmsg+0xde/0x190 net/socket.c:747 __sys_sendto+0x23a/0x340 net/socket.c:2142 __do_sys_sendto net/socket.c:2154 [inline] __se_sys_sendto net/socket.c:2150 [inline] __x64_sys_sendto+0xe1/0x1b0 net/socket.c:2150 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x39/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x63/0xcd RIP: 0033:0x7f123aaa1039 Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 b1 14 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 c0 ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007ffc15d12058 EFLAGS: 00000246 ORIG_RAX: 000000000000002c RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f123aaa1039 RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000003 RBP: 0000000000000000 R08: 0000000020000040 R09: 0000000000000014 R10: 0000000000000000 R11: 0000000000000246 R12: 00007f123aa648c0 R13: 431bde82d7b634db R14: 0000000000000000 R15: 0000000000000000 Fixes: 1baf5ebf8954 ("erspan: auto detect truncated packets.") Reported-by: syzbot Signed-off-by: Eric Dumazet Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230320163427.8096-1-edumazet@google.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- net/ipv4/ip_gre.c | 4 ++-- net/ipv6/ip6_gre.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index c72432ce9bf5..898753328c17 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -603,7 +603,7 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev) truncate = true; } - nhoff = skb_network_header(skb) - skb_mac_header(skb); + nhoff = skb_network_offset(skb); if (skb->protocol == htons(ETH_P_IP) && (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff)) truncate = true; @@ -612,7 +612,7 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev) int thoff; if (skb_transport_header_was_set(skb)) - thoff = skb_transport_header(skb) - skb_mac_header(skb); + thoff = skb_transport_offset(skb); else thoff = nhoff + sizeof(struct ipv6hdr); if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff) diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 00601bc4fdfa..166b7544e54a 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -961,7 +961,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, truncate = true; } - nhoff = skb_network_header(skb) - skb_mac_header(skb); + nhoff = skb_network_offset(skb); if (skb->protocol == htons(ETH_P_IP) && (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff)) truncate = true; @@ -970,7 +970,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, int thoff; if (skb_transport_header_was_set(skb)) - thoff = skb_transport_header(skb) - skb_mac_header(skb); + thoff = skb_transport_offset(skb); else thoff = nhoff + sizeof(struct ipv6hdr); if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff) -- cgit v1.2.1 From 5bf256726d3802932f23d5d7c6dc47337fca838f Mon Sep 17 00:00:00 2001 From: Zhang Changzhong Date: Tue, 21 Mar 2023 14:45:43 +1100 Subject: net/sonic: use dma_mapping_error() for error check [ Upstream commit 4107b8746d93ace135b8c4da4f19bbae81db785f ] The DMA address returned by dma_map_single() should be checked with dma_mapping_error(). Fix it accordingly. Fixes: efcce839360f ("[PATCH] macsonic/jazzsonic network drivers update") Signed-off-by: Zhang Changzhong Tested-by: Stan Johnson Signed-off-by: Finn Thain Reviewed-by: Leon Romanovsky Link: https://lore.kernel.org/r/6645a4b5c1e364312103f48b7b36783b94e197a2.1679370343.git.fthain@linux-m68k.org Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/net/ethernet/natsemi/sonic.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c index 69282f31d519..fe54bcab705f 100644 --- a/drivers/net/ethernet/natsemi/sonic.c +++ b/drivers/net/ethernet/natsemi/sonic.c @@ -255,7 +255,7 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) */ laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE); - if (!laddr) { + if (dma_mapping_error(lp->device, laddr)) { pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name); dev_kfree_skb_any(skb); return NETDEV_TX_OK; @@ -473,7 +473,7 @@ static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp, *new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE), SONIC_RBSIZE, DMA_FROM_DEVICE); - if (!*new_addr) { + if (dma_mapping_error(lp->device, *new_addr)) { dev_kfree_skb(*new_skb); *new_skb = NULL; return false; -- cgit v1.2.1 From fb1950606afbd664b75f8285e9c541a548b8cd78 Mon Sep 17 00:00:00 2001 From: Roger Pau Monne Date: Wed, 30 Nov 2022 16:09:11 +0100 Subject: hvc/xen: prevent concurrent accesses to the shared ring MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 6214894f49a967c749ee6c07cb00f9cede748df4 ] The hvc machinery registers both a console and a tty device based on the hv ops provided by the specific implementation. Those two interfaces however have different locks, and there's no single locks that's shared between the tty and the console implementations, hence the driver needs to protect itself against concurrent accesses. Otherwise concurrent calls using the split interfaces are likely to corrupt the ring indexes, leaving the console unusable. Introduce a lock to xencons_info to serialize accesses to the shared ring. This is only required when using the shared memory console, concurrent accesses to the hypercall based console implementation are not an issue. Note the conditional logic in domU_read_console() is slightly modified so the notify_daemon() call can be done outside of the locked region: it's an hypercall and there's no need for it to be done with the lock held. Fixes: b536b4b96230 ('xen: use the hvc console infrastructure for Xen console') Signed-off-by: Roger Pau MonnΓ© Reviewed-by: Juergen Gross Link: https://lore.kernel.org/r/20221130150919.13935-1-roger.pau@citrix.com Signed-off-by: Juergen Gross Signed-off-by: Sasha Levin --- drivers/tty/hvc/hvc_xen.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c index 47ffb485ff34..59d85bdd132b 100644 --- a/drivers/tty/hvc/hvc_xen.c +++ b/drivers/tty/hvc/hvc_xen.c @@ -43,6 +43,7 @@ struct xencons_info { int irq; int vtermno; grant_ref_t gntref; + spinlock_t ring_lock; }; static LIST_HEAD(xenconsoles); @@ -89,12 +90,15 @@ static int __write_console(struct xencons_info *xencons, XENCONS_RING_IDX cons, prod; struct xencons_interface *intf = xencons->intf; int sent = 0; + unsigned long flags; + spin_lock_irqsave(&xencons->ring_lock, flags); cons = intf->out_cons; prod = intf->out_prod; mb(); /* update queue values before going on */ if ((prod - cons) > sizeof(intf->out)) { + spin_unlock_irqrestore(&xencons->ring_lock, flags); pr_err_once("xencons: Illegal ring page indices"); return -EINVAL; } @@ -104,6 +108,7 @@ static int __write_console(struct xencons_info *xencons, wmb(); /* write ring before updating pointer */ intf->out_prod = prod; + spin_unlock_irqrestore(&xencons->ring_lock, flags); if (sent) notify_daemon(xencons); @@ -146,16 +151,19 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len) int recv = 0; struct xencons_info *xencons = vtermno_to_xencons(vtermno); unsigned int eoiflag = 0; + unsigned long flags; if (xencons == NULL) return -EINVAL; intf = xencons->intf; + spin_lock_irqsave(&xencons->ring_lock, flags); cons = intf->in_cons; prod = intf->in_prod; mb(); /* get pointers before reading ring */ if ((prod - cons) > sizeof(intf->in)) { + spin_unlock_irqrestore(&xencons->ring_lock, flags); pr_err_once("xencons: Illegal ring page indices"); return -EINVAL; } @@ -179,10 +187,13 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len) xencons->out_cons = intf->out_cons; xencons->out_cons_same = 0; } + if (!recv && xencons->out_cons_same++ > 1) { + eoiflag = XEN_EOI_FLAG_SPURIOUS; + } + spin_unlock_irqrestore(&xencons->ring_lock, flags); + if (recv) { notify_daemon(xencons); - } else if (xencons->out_cons_same++ > 1) { - eoiflag = XEN_EOI_FLAG_SPURIOUS; } xen_irq_lateeoi(xencons->irq, eoiflag); @@ -239,6 +250,7 @@ static int xen_hvm_console_init(void) info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL); if (!info) return -ENOMEM; + spin_lock_init(&info->ring_lock); } else if (info->intf != NULL) { /* already configured */ return 0; @@ -275,6 +287,7 @@ err: static int xencons_info_pv_init(struct xencons_info *info, int vtermno) { + spin_lock_init(&info->ring_lock); info->evtchn = xen_start_info->console.domU.evtchn; /* GFN == MFN for PV guest */ info->intf = gfn_to_virt(xen_start_info->console.domU.mfn); @@ -325,6 +338,7 @@ static int xen_initial_domain_console_init(void) info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL); if (!info) return -ENOMEM; + spin_lock_init(&info->ring_lock); } info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0, false); @@ -482,6 +496,7 @@ static int xencons_probe(struct xenbus_device *dev, info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL); if (!info) return -ENOMEM; + spin_lock_init(&info->ring_lock); dev_set_drvdata(&dev->dev, info); info->xbdev = dev; info->vtermno = xenbus_devid_to_vtermno(devid); -- cgit v1.2.1 From 95f068c247b33d6bf6b3a929448597cf3c4fdb23 Mon Sep 17 00:00:00 2001 From: Liang He Date: Wed, 22 Mar 2023 14:20:57 +0800 Subject: net: mdio: thunder: Add missing fwnode_handle_put() [ Upstream commit b1de5c78ebe9858ccec9d49af2f76724f1d47e3e ] In device_for_each_child_node(), we should add fwnode_handle_put() when break out of the iteration device_for_each_child_node() as it will automatically increase and decrease the refcounter. Fixes: 379d7ac7ca31 ("phy: mdio-thunder: Add driver for Cavium Thunder SoC MDIO buses.") Signed-off-by: Liang He Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/phy/mdio-thunder.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/phy/mdio-thunder.c b/drivers/net/phy/mdio-thunder.c index c0c922eff760..959bf342133a 100644 --- a/drivers/net/phy/mdio-thunder.c +++ b/drivers/net/phy/mdio-thunder.c @@ -107,6 +107,7 @@ static int thunder_mdiobus_pci_probe(struct pci_dev *pdev, if (i >= ARRAY_SIZE(nexus->buses)) break; } + fwnode_handle_put(fwn); return 0; err_release_regions: -- cgit v1.2.1 From 709f207e542b8b54a9dd31e5ded316f199f01c58 Mon Sep 17 00:00:00 2001 From: Stephan Gerhold Date: Wed, 8 Mar 2023 14:31:55 +0100 Subject: Bluetooth: btqcomsmd: Fix command timeout after setting BD address [ Upstream commit 5d44ab9e204200a78ad55cdf185aa2bb109b5950 ] On most devices using the btqcomsmd driver (e.g. the DragonBoard 410c and other devices based on the Qualcomm MSM8916/MSM8909/... SoCs) the Bluetooth firmware seems to become unresponsive for a while after setting the BD address. On recent kernel versions (at least 5.17+) this often causes timeouts for subsequent commands, e.g. the HCI reset sent by the Bluetooth core during initialization: Bluetooth: hci0: Opcode 0x c03 failed: -110 Unfortunately this behavior does not seem to be documented anywhere. Experimentation suggests that the minimum necessary delay to avoid the problem is ~150us. However, to be sure add a sleep for > 1ms in case it is a bit longer on other firmware versions. Older kernel versions are likely also affected, although perhaps with slightly different errors or less probability. Side effects can easily hide the issue in most cases, e.g. unrelated incoming interrupts that cause the necessary delay. Fixes: 1511cc750c3d ("Bluetooth: Introduce Qualcomm WCNSS SMD based HCI driver") Signed-off-by: Stephan Gerhold Signed-off-by: Luiz Augusto von Dentz Signed-off-by: Sasha Levin --- drivers/bluetooth/btqcomsmd.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c index 874172aa8e41..a698b1f6394b 100644 --- a/drivers/bluetooth/btqcomsmd.c +++ b/drivers/bluetooth/btqcomsmd.c @@ -146,6 +146,21 @@ static int btqcomsmd_setup(struct hci_dev *hdev) return 0; } +static int btqcomsmd_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) +{ + int ret; + + ret = qca_set_bdaddr_rome(hdev, bdaddr); + if (ret) + return ret; + + /* The firmware stops responding for a while after setting the bdaddr, + * causing timeouts for subsequent commands. Sleep a bit to avoid this. + */ + usleep_range(1000, 10000); + return 0; +} + static int btqcomsmd_probe(struct platform_device *pdev) { struct btqcomsmd *btq; @@ -195,7 +210,7 @@ static int btqcomsmd_probe(struct platform_device *pdev) hdev->close = btqcomsmd_close; hdev->send = btqcomsmd_send; hdev->setup = btqcomsmd_setup; - hdev->set_bdaddr = qca_set_bdaddr_rome; + hdev->set_bdaddr = btqcomsmd_set_bdaddr; ret = hci_register_dev(hdev); if (ret < 0) -- cgit v1.2.1 From af4d48754d5517d33bac5e504ff1f1de0808e29e Mon Sep 17 00:00:00 2001 From: Zheng Wang Date: Thu, 9 Mar 2023 16:07:39 +0800 Subject: Bluetooth: btsdio: fix use after free bug in btsdio_remove due to unfinished work [ Upstream commit 1e9ac114c4428fdb7ff4635b45d4f46017e8916f ] In btsdio_probe, &data->work was bound with btsdio_work.In btsdio_send_frame, it was started by schedule_work. If we call btsdio_remove with an unfinished job, there may be a race condition and cause UAF bug on hdev. Fixes: ddbaf13e3609 ("[Bluetooth] Add generic driver for Bluetooth SDIO devices") Signed-off-by: Zheng Wang Signed-off-by: Luiz Augusto von Dentz Signed-off-by: Sasha Levin --- drivers/bluetooth/btsdio.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c index 20142bc77554..bd55bf7a9914 100644 --- a/drivers/bluetooth/btsdio.c +++ b/drivers/bluetooth/btsdio.c @@ -353,6 +353,7 @@ static void btsdio_remove(struct sdio_func *func) BT_DBG("func %p", func); + cancel_work_sync(&data->work); if (!data) return; -- cgit v1.2.1 From 2b486ecfeb3918c575560ac2851173755752e6e0 Mon Sep 17 00:00:00 2001 From: Frank Crawford Date: Sat, 18 Mar 2023 19:05:42 +1100 Subject: hwmon (it87): Fix voltage scaling for chips with 10.9mV ADCs [ Upstream commit 968b66ffeb7956acc72836a7797aeb7b2444ec51 ] Fix voltage scaling for chips that have 10.9mV ADCs, where scaling was not performed. Fixes: ead8080351c9 ("hwmon: (it87) Add support for IT8732F") Signed-off-by: Frank Crawford Link: https://lore.kernel.org/r/20230318080543.1226700-2-frank@crawford.emu.id.au [groeck: Update subject and description to focus on bug fix] Signed-off-by: Guenter Roeck Signed-off-by: Sasha Levin --- drivers/hwmon/it87.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c index f8499cb95fec..4e4e151760db 100644 --- a/drivers/hwmon/it87.c +++ b/drivers/hwmon/it87.c @@ -495,6 +495,8 @@ static const struct it87_devices it87_devices[] = { #define has_pwm_freq2(data) ((data)->features & FEAT_PWM_FREQ2) #define has_six_temp(data) ((data)->features & FEAT_SIX_TEMP) #define has_vin3_5v(data) ((data)->features & FEAT_VIN3_5V) +#define has_scaling(data) ((data)->features & (FEAT_12MV_ADC | \ + FEAT_10_9MV_ADC)) struct it87_sio_data { int sioaddr; @@ -3107,7 +3109,7 @@ static int it87_probe(struct platform_device *pdev) "Detected broken BIOS defaults, disabling PWM interface\n"); /* Starting with IT8721F, we handle scaling of internal voltages */ - if (has_12mv_adc(data)) { + if (has_scaling(data)) { if (sio_data->internal & BIT(0)) data->in_scaled |= BIT(3); /* in3 is AVCC */ if (sio_data->internal & BIT(1)) -- cgit v1.2.1 From 98a357d88eb3d8021e19ea5243b8c12cd5913a74 Mon Sep 17 00:00:00 2001 From: Yaroslav Furman Date: Sun, 12 Mar 2023 11:07:45 +0200 Subject: uas: Add US_FL_NO_REPORT_OPCODES for JMicron JMS583Gen 2 commit a37eb61b6ec064ac794b8a1e89fd33eb582fe51d upstream. Just like other JMicron JMS5xx enclosures, it chokes on report-opcodes, let's avoid them. Signed-off-by: Yaroslav Furman Cc: stable Link: https://lore.kernel.org/r/20230312090745.47962-1-yaro330@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/storage/unusual_uas.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index d4fa29b623ff..a4513dd931b2 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -111,6 +111,13 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_BROKEN_FUA), +/* Reported by: Yaroslav Furman */ +UNUSUAL_DEV(0x152d, 0x0583, 0x0000, 0x9999, + "JMicron", + "JMS583Gen 2", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_REPORT_OPCODES), + /* Reported-by: Thinh Nguyen */ UNUSUAL_DEV(0x154b, 0xf00b, 0x0000, 0x9999, "PNY", -- cgit v1.2.1 From 1608a4000264a7125aa908fa69bad1d480cca110 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Fri, 10 Mar 2023 11:20:49 -0600 Subject: thunderbolt: Use const qualifier for `ring_interrupt_index` commit 1716efdb07938bd6510e1127d02012799112c433 upstream. `ring_interrupt_index` doesn't change the data for `ring` so mark it as const. This is needed by the following patch that disables interrupt auto clear for rings. Cc: Sanju Mehta Cc: stable@vger.kernel.org Signed-off-by: Mario Limonciello Signed-off-by: Mika Westerberg Signed-off-by: Greg Kroah-Hartman --- drivers/thunderbolt/nhi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c index 384623c49cfe..d22c7216d68c 100644 --- a/drivers/thunderbolt/nhi.c +++ b/drivers/thunderbolt/nhi.c @@ -38,7 +38,7 @@ #define NHI_MAILBOX_TIMEOUT 500 /* ms */ -static int ring_interrupt_index(struct tb_ring *ring) +static int ring_interrupt_index(const struct tb_ring *ring) { int bit = ring->hop; if (!ring->is_tx) -- cgit v1.2.1 From fea400350d182744d35979870819b97864e1c8ca Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Tue, 16 Mar 2021 15:34:20 -0400 Subject: riscv: Bump COMMAND_LINE_SIZE value to 1024 [ Upstream commit 61fc1ee8be26bc192d691932b0a67eabee45d12f ] Increase COMMAND_LINE_SIZE as the current default value is too low for syzbot kernel command line. There has been considerable discussion on this patch that has led to a larger patch set removing COMMAND_LINE_SIZE from the uapi headers on all ports. That's not quite done yet, but it's gotten far enough we're confident this is not a uABI change so this is safe. Reported-by: Dmitry Vyukov Signed-off-by: Alexandre Ghiti Link: https://lore.kernel.org/r/20210316193420.904-1-alex@ghiti.fr [Palmer: it's not uabi] Link: https://lore.kernel.org/linux-riscv/874b8076-b0d1-4aaa-bcd8-05d523060152@app.fastmail.com/#t Signed-off-by: Palmer Dabbelt Signed-off-by: Sasha Levin --- arch/riscv/include/uapi/asm/setup.h | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 arch/riscv/include/uapi/asm/setup.h diff --git a/arch/riscv/include/uapi/asm/setup.h b/arch/riscv/include/uapi/asm/setup.h new file mode 100644 index 000000000000..66b13a522880 --- /dev/null +++ b/arch/riscv/include/uapi/asm/setup.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ + +#ifndef _UAPI_ASM_RISCV_SETUP_H +#define _UAPI_ASM_RISCV_SETUP_H + +#define COMMAND_LINE_SIZE 1024 + +#endif /* _UAPI_ASM_RISCV_SETUP_H */ -- cgit v1.2.1 From 5da4469a7aa011de614c3e2ae383c35a353a382e Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Thu, 16 Feb 2023 23:25:04 -0500 Subject: ca8210: fix mac_len negative array access [ Upstream commit 6c993779ea1d0cccdb3a5d7d45446dd229e610a3 ] This patch fixes a buffer overflow access of skb->data if ieee802154_hdr_peek_addrs() fails. Reported-by: lianhui tang Signed-off-by: Alexander Aring Link: https://lore.kernel.org/r/20230217042504.3303396-1-aahringo@redhat.com Signed-off-by: Stefan Schmidt Signed-off-by: Sasha Levin --- drivers/net/ieee802154/ca8210.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index 917edb3d04b7..2d4471b77fa7 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -1954,6 +1954,8 @@ static int ca8210_skb_tx( * packet */ mac_len = ieee802154_hdr_peek_addrs(skb, &header); + if (mac_len < 0) + return mac_len; secspec.security_level = header.sec.level; secspec.key_id_mode = header.sec.key_id_mode; -- cgit v1.2.1 From f55cb52ec98b22125f5bda36391edb8894f7e8cf Mon Sep 17 00:00:00 2001 From: Michael Schmitz Date: Wed, 1 Mar 2023 15:11:07 +1300 Subject: m68k: Only force 030 bus error if PC not in exception table [ Upstream commit e36a82bebbf7da814530d5a179bef9df5934b717 ] __get_kernel_nofault() does copy data in supervisor mode when forcing a task backtrace log through /proc/sysrq_trigger. This is expected cause a bus error exception on e.g. NULL pointer dereferencing when logging a kernel task has no workqueue associated. This bus error ought to be ignored. Our 030 bus error handler is ill equipped to deal with this: Whenever ssw indicates a kernel mode access on a data fault, we don't even attempt to handle the fault and instead always send a SEGV signal (or panic). As a result, the check for exception handling at the fault PC (buried in send_sig_fault() which gets called from do_page_fault() eventually) is never used. In contrast, both 040 and 060 access error handlers do not care whether a fault happened on supervisor mode access, and will call do_page_fault() on those, ultimately honoring the exception table. Add a check in bus_error030 to call do_page_fault() in case we do have an entry for the fault PC in our exception table. I had attempted a fix for this earlier in 2019 that did rely on testing pagefault_disabled() (see link below) to achieve the same thing, but this patch should be more generic. Tested on 030 Atari Falcon. Reported-by: Eero Tamminen Link: https://lore.kernel.org/r/alpine.LNX.2.21.1904091023540.25@nippy.intranet Link: https://lore.kernel.org/r/63130691-1984-c423-c1f2-73bfd8d3dcd3@gmail.com Signed-off-by: Michael Schmitz Reviewed-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/20230301021107.26307-1-schmitzmic@gmail.com Signed-off-by: Geert Uytterhoeven Signed-off-by: Sasha Levin --- arch/m68k/kernel/traps.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c index 9b70a7f5e705..35f706d836c5 100644 --- a/arch/m68k/kernel/traps.c +++ b/arch/m68k/kernel/traps.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -550,7 +551,8 @@ static inline void bus_error030 (struct frame *fp) errorcode |= 2; if (mmusr & (MMU_I | MMU_WP)) { - if (ssw & 4) { + /* We might have an exception table for this PC */ + if (ssw & 4 && !search_exception_tables(fp->ptregs.pc)) { pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n", ssw & RW ? "read" : "write", fp->un.fmtb.daddr, -- cgit v1.2.1 From 9bb3a10418c82927abf197052e41644dd27f1207 Mon Sep 17 00:00:00 2001 From: Maurizio Lombardi Date: Tue, 14 Feb 2023 15:15:56 +0100 Subject: scsi: target: iscsi: Fix an error message in iscsi_check_key() [ Upstream commit 6cc55c969b7ce8d85e09a636693d4126c3676c11 ] The first half of the error message is printed by pr_err(), the second half is printed by pr_debug(). The user will therefore see only the first part of the message and will miss some useful information. Link: https://lore.kernel.org/r/20230214141556.762047-1-mlombard@redhat.com Signed-off-by: Maurizio Lombardi Reviewed-by: Mike Christie Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/target/iscsi/iscsi_target_parameters.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index 29a37b242d30..01f93de93c8c 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c @@ -1270,18 +1270,20 @@ static struct iscsi_param *iscsi_check_key( return param; if (!(param->phase & phase)) { - pr_err("Key \"%s\" may not be negotiated during ", - param->name); + char *phase_name; + switch (phase) { case PHASE_SECURITY: - pr_debug("Security phase.\n"); + phase_name = "Security"; break; case PHASE_OPERATIONAL: - pr_debug("Operational phase.\n"); + phase_name = "Operational"; break; default: - pr_debug("Unknown phase.\n"); + phase_name = "Unknown"; } + pr_err("Key \"%s\" may not be negotiated during %s phase.\n", + param->name, phase_name); return NULL; } -- cgit v1.2.1 From 66084a02296c5edcc890cd23fa35f33bb5e4bfc2 Mon Sep 17 00:00:00 2001 From: Adrien Thierry Date: Mon, 20 Feb 2023 09:07:40 -0500 Subject: scsi: ufs: core: Add soft dependency on governor_simpleondemand [ Upstream commit 2ebe16155dc8bd4e602cad5b5f65458d2eaa1a75 ] The ufshcd driver uses simpleondemand governor for devfreq. Add it to the list of ufshcd softdeps to allow userspace initramfs tools like dracut to automatically pull the governor module into the initramfs together with UFS drivers. Link: https://lore.kernel.org/r/20230220140740.14379-1-athierry@redhat.com Signed-off-by: Adrien Thierry Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/ufs/ufshcd.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index abc156cf05f6..b45cd6c98bad 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -8228,5 +8228,6 @@ EXPORT_SYMBOL_GPL(ufshcd_init); MODULE_AUTHOR("Santosh Yaragnavi "); MODULE_AUTHOR("Vinayak Holikatti "); MODULE_DESCRIPTION("Generic UFS host controller driver Core"); +MODULE_SOFTDEP("pre: governor_simpleondemand"); MODULE_LICENSE("GPL"); MODULE_VERSION(UFSHCD_DRIVER_VERSION); -- cgit v1.2.1 From dcfe63d378e9cfb919b67f1a7f9167672b53739a Mon Sep 17 00:00:00 2001 From: Enrico Sau Date: Mon, 6 Mar 2023 12:59:33 +0100 Subject: net: usb: cdc_mbim: avoid altsetting toggling for Telit FE990 [ Upstream commit 418383e6ed6b4624a54ec05c535f13d184fbf33b ] Add quirk CDC_MBIM_FLAG_AVOID_ALTSETTING_TOGGLE for Telit FE990 0x1081 composition in order to avoid bind error. Signed-off-by: Enrico Sau Link: https://lore.kernel.org/r/20230306115933.198259-1-enrico.sau@gmail.com Signed-off-by: Paolo Abeni Signed-off-by: Sasha Levin --- drivers/net/usb/cdc_mbim.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index 41bac861ca99..72a93dc2df86 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c @@ -665,6 +665,11 @@ static const struct usb_device_id mbim_devs[] = { .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle, }, + /* Telit FE990 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1081, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle, + }, + /* default entry */ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&cdc_mbim_info_zlp, -- cgit v1.2.1 From bcc029c728bfcc3c6dce17102224caf5c93e873d Mon Sep 17 00:00:00 2001 From: Enrico Sau Date: Mon, 6 Mar 2023 13:05:28 +0100 Subject: net: usb: qmi_wwan: add Telit 0x1080 composition [ Upstream commit 382e363d5bed0cec5807b35761d14e55955eee63 ] Add the following Telit FE990 composition: 0x1080: tty, adb, rmnet, tty, tty, tty, tty Signed-off-by: Enrico Sau Link: https://lore.kernel.org/r/20230306120528.198842-1-enrico.sau@gmail.com Signed-off-by: Paolo Abeni Signed-off-by: Sasha Levin --- drivers/net/usb/qmi_wwan.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 24ce49b311c4..5417932242e7 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1322,6 +1322,7 @@ static const struct usb_device_id products[] = { {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990 */ {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ -- cgit v1.2.1 From a4e7fa8f8be4f09ac8d36d5505fac75fde053ecb Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 6 Mar 2023 01:20:30 +0000 Subject: sh: sanitize the flags on sigreturn [ Upstream commit 573b22ccb7ce9ab7f0539a2e11a9d3609a8783f5 ] We fetch %SR value from sigframe; it might have been modified by signal handler, so we can't trust it with any bits that are not modifiable in user mode. Signed-off-by: Al Viro Cc: Rich Felker Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- arch/sh/include/asm/processor_32.h | 1 + arch/sh/kernel/signal_32.c | 3 +++ 2 files changed, 4 insertions(+) diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h index 95100d8a0b7b..fc94603724b8 100644 --- a/arch/sh/include/asm/processor_32.h +++ b/arch/sh/include/asm/processor_32.h @@ -57,6 +57,7 @@ #define SR_FD 0x00008000 #define SR_MD 0x40000000 +#define SR_USER_MASK 0x00000303 // M, Q, S, T bits /* * DSP structure and data */ diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c index c46c0020ff55..ce93ae78c300 100644 --- a/arch/sh/kernel/signal_32.c +++ b/arch/sh/kernel/signal_32.c @@ -116,6 +116,7 @@ static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p) { unsigned int err = 0; + unsigned int sr = regs->sr & ~SR_USER_MASK; #define COPY(x) err |= __get_user(regs->x, &sc->sc_##x) COPY(regs[1]); @@ -131,6 +132,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p COPY(sr); COPY(pc); #undef COPY + regs->sr = (regs->sr & SR_USER_MASK) | sr; + #ifdef CONFIG_SH_FPU if (boot_cpu_data.flags & CPU_HAS_FPU) { int owned_fp; -- cgit v1.2.1 From 8dd74ca1ccf2a74f8285f2301df8984765f9994b Mon Sep 17 00:00:00 2001 From: Shyam Prasad N Date: Thu, 9 Mar 2023 13:23:29 +0000 Subject: cifs: empty interface list when server doesn't support query interfaces commit 896cd316b841053f6df95ab77b5f1322c16a8e18 upstream. When querying server interfaces returns -EOPNOTSUPP, clear the list of interfaces. Assumption is that multichannel would be disabled too. Signed-off-by: Shyam Prasad N Reviewed-by: Paulo Alcantara (SUSE) Cc: stable@vger.kernel.org Signed-off-by: Steve French Signed-off-by: Greg Kroah-Hartman --- fs/cifs/smb2ops.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index f906984eb25b..118bcb351af9 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -475,7 +475,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon) if (rc == -EOPNOTSUPP) { cifs_dbg(FYI, "server does not support query network interfaces\n"); - goto out; + ret_data_len = 0; } else if (rc != 0) { cifs_dbg(VFS, "error %d on ioctl to get interface list\n", rc); goto out; -- cgit v1.2.1 From b5ea8bc322c6c3bdf20c43b4a247420cfa63b7dd Mon Sep 17 00:00:00 2001 From: Joel Selvaraj Date: Sun, 12 Mar 2023 23:14:02 -0500 Subject: scsi: core: Add BLIST_SKIP_VPD_PAGES for SKhynix H28U74301AMR commit a204b490595de71016b2360a1886ec8c12d0afac upstream. Xiaomi Poco F1 (qcom/sdm845-xiaomi-beryllium*.dts) comes with a SKhynix H28U74301AMR UFS. The sd_read_cpr() operation leads to a 120 second timeout, making the device bootup very slow: [ 121.457736] sd 0:0:0:1: [sdb] tag#23 timing out command, waited 120s Setting the BLIST_SKIP_VPD_PAGES allows the device to skip the failing sd_read_cpr operation and boot normally. Signed-off-by: Joel Selvaraj Link: https://lore.kernel.org/r/20230313041402.39330-1-joelselvaraj.oss@gmail.com Cc: stable@vger.kernel.org Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/scsi_devinfo.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 6a2a413cc97e..d8557a00e1ec 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -232,6 +232,7 @@ static struct { {"SGI", "RAID5", "*", BLIST_SPARSELUN}, {"SGI", "TP9100", "*", BLIST_REPORTLUN2}, {"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, + {"SKhynix", "H28U74301AMR", NULL, BLIST_SKIP_VPD_PAGES}, {"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"DELL", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, -- cgit v1.2.1 From 3256e152b645fc1e788ba44c2d8ced690113e3e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alvin=20=C5=A0ipraga?= Date: Thu, 2 Mar 2023 17:36:47 +0100 Subject: usb: gadget: u_audio: don't let userspace block driver unbind MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 6c67ed9ad9b83e453e808f9b31a931a20a25629b upstream. In the unbind callback for f_uac1 and f_uac2, a call to snd_card_free() via g_audio_cleanup() will disconnect the card and then wait for all resources to be released, which happens when the refcount falls to zero. Since userspace can keep the refcount incremented by not closing the relevant file descriptor, the call to unbind may block indefinitely. This can cause a deadlock during reboot, as evidenced by the following blocked task observed on my machine: task:reboot state:D stack:0 pid:2827 ppid:569 flags:0x0000000c Call trace: __switch_to+0xc8/0x140 __schedule+0x2f0/0x7c0 schedule+0x60/0xd0 schedule_timeout+0x180/0x1d4 wait_for_completion+0x78/0x180 snd_card_free+0x90/0xa0 g_audio_cleanup+0x2c/0x64 afunc_unbind+0x28/0x60 ... kernel_restart+0x4c/0xac __do_sys_reboot+0xcc/0x1ec __arm64_sys_reboot+0x28/0x30 invoke_syscall+0x4c/0x110 ... The issue can also be observed by opening the card with arecord and then stopping the process through the shell before unbinding: # arecord -D hw:UAC2Gadget -f S32_LE -c 2 -r 48000 /dev/null Recording WAVE '/dev/null' : Signed 32 bit Little Endian, Rate 48000 Hz, Stereo ^Z[1]+ Stopped arecord -D hw:UAC2Gadget -f S32_LE -c 2 -r 48000 /dev/null # echo gadget.0 > /sys/bus/gadget/drivers/configfs-gadget/unbind (observe that the unbind command never finishes) Fix the problem by using snd_card_free_when_closed() instead, which will still disconnect the card as desired, but defer the task of freeing the resources to the core once userspace closes its file descriptor. Fixes: 132fcb460839 ("usb: gadget: Add Audio Class 2.0 Driver") Cc: stable@vger.kernel.org Signed-off-by: Alvin Ε ipraga Reviewed-by: Ruslan Bilovol Reviewed-by: John Keeping Link: https://lore.kernel.org/r/20230302163648.3349669-1-alvin@pqrs.dk Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/function/u_audio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c index 168303f21bf4..3136a239e782 100644 --- a/drivers/usb/gadget/function/u_audio.c +++ b/drivers/usb/gadget/function/u_audio.c @@ -626,7 +626,7 @@ void g_audio_cleanup(struct g_audio *g_audio) uac = g_audio->uac; card = uac->card; if (card) - snd_card_free(card); + snd_card_free_when_closed(card); kfree(uac->p_prm.ureq); kfree(uac->c_prm.ureq); -- cgit v1.2.1 From 7d845e9a485f287181ff81567c3900a8e7ad1e28 Mon Sep 17 00:00:00 2001 From: Lin Ma Date: Tue, 7 Mar 2023 23:29:17 +0800 Subject: igb: revert rtnl_lock() that causes deadlock commit 65f69851e44d71248b952a687e44759a7abb5016 upstream. The commit 6faee3d4ee8b ("igb: Add lock to avoid data race") adds rtnl_lock to eliminate a false data race shown below (FREE from device detaching) | (USE from netdev core) igb_remove | igb_ndo_get_vf_config igb_disable_sriov | vf >= adapter->vfs_allocated_count? kfree(adapter->vf_data) | adapter->vfs_allocated_count = 0 | | memcpy(... adapter->vf_data[vf] The above race will never happen and the extra rtnl_lock causes deadlock below [ 141.420169] [ 141.420672] __schedule+0x2dd/0x840 [ 141.421427] schedule+0x50/0xc0 [ 141.422041] schedule_preempt_disabled+0x11/0x20 [ 141.422678] __mutex_lock.isra.13+0x431/0x6b0 [ 141.423324] unregister_netdev+0xe/0x20 [ 141.423578] igbvf_remove+0x45/0xe0 [igbvf] [ 141.423791] pci_device_remove+0x36/0xb0 [ 141.423990] device_release_driver_internal+0xc1/0x160 [ 141.424270] pci_stop_bus_device+0x6d/0x90 [ 141.424507] pci_stop_and_remove_bus_device+0xe/0x20 [ 141.424789] pci_iov_remove_virtfn+0xba/0x120 [ 141.425452] sriov_disable+0x2f/0xf0 [ 141.425679] igb_disable_sriov+0x4e/0x100 [igb] [ 141.426353] igb_remove+0xa0/0x130 [igb] [ 141.426599] pci_device_remove+0x36/0xb0 [ 141.426796] device_release_driver_internal+0xc1/0x160 [ 141.427060] driver_detach+0x44/0x90 [ 141.427253] bus_remove_driver+0x55/0xe0 [ 141.427477] pci_unregister_driver+0x2a/0xa0 [ 141.428296] __x64_sys_delete_module+0x141/0x2b0 [ 141.429126] ? mntput_no_expire+0x4a/0x240 [ 141.429363] ? syscall_trace_enter.isra.19+0x126/0x1a0 [ 141.429653] do_syscall_64+0x5b/0x80 [ 141.429847] ? exit_to_user_mode_prepare+0x14d/0x1c0 [ 141.430109] ? syscall_exit_to_user_mode+0x12/0x30 [ 141.430849] ? do_syscall_64+0x67/0x80 [ 141.431083] ? syscall_exit_to_user_mode_prepare+0x183/0x1b0 [ 141.431770] ? syscall_exit_to_user_mode+0x12/0x30 [ 141.432482] ? do_syscall_64+0x67/0x80 [ 141.432714] ? exc_page_fault+0x64/0x140 [ 141.432911] entry_SYSCALL_64_after_hwframe+0x72/0xdc Since the igb_disable_sriov() will call pci_disable_sriov() before releasing any resources, the netdev core will synchronize the cleanup to avoid any races. This patch removes the useless rtnl_(un)lock to guarantee correctness. CC: stable@vger.kernel.org Fixes: 6faee3d4ee8b ("igb: Add lock to avoid data race") Reported-by: Corinna Vinschen Link: https://lore.kernel.org/intel-wired-lan/ZAcJvkEPqWeJHO2r@calimero.vinschen.de/ Signed-off-by: Lin Ma Tested-by: Corinna Vinschen Reviewed-by: Jacob Keller Reviewed-by: Simon Horman Tested-by: Rafal Romanowski Signed-off-by: Tony Nguyen Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/igb/igb_main.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 87f98170ac93..6f9d563deb6b 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3700,9 +3700,7 @@ static void igb_remove(struct pci_dev *pdev) igb_release_hw_control(adapter); #ifdef CONFIG_PCI_IOV - rtnl_lock(); igb_disable_sriov(pdev); - rtnl_unlock(); #endif unregister_netdev(netdev); -- cgit v1.2.1 From 84e13235e08941ce37aa9ee238b6dd007170f0fc Mon Sep 17 00:00:00 2001 From: Coly Li Date: Mon, 27 Feb 2023 23:23:17 +0800 Subject: dm thin: fix deadlock when swapping to thin device commit 9bbf5feecc7eab2c370496c1c161bbfe62084028 upstream. This is an already known issue that dm-thin volume cannot be used as swap, otherwise a deadlock may happen when dm-thin internal memory demand triggers swap I/O on the dm-thin volume itself. But thanks to commit a666e5c05e7c ("dm: fix deadlock when swapping to encrypted device"), the limit_swap_bios target flag can also be used for dm-thin to avoid the recursive I/O when it is used as swap. Fix is to simply set ti->limit_swap_bios to true in both pool_ctr() and thin_ctr(). In my test, I create a dm-thin volume /dev/vg/swap and use it as swap device. Then I run fio on another dm-thin volume /dev/vg/main and use large --blocksize to trigger swap I/O onto /dev/vg/swap. The following fio command line is used in my test, fio --name recursive-swap-io --lockmem 1 --iodepth 128 \ --ioengine libaio --filename /dev/vg/main --rw randrw \ --blocksize 1M --numjobs 32 --time_based --runtime=12h Without this fix, the whole system can be locked up within 15 seconds. With this fix, there is no any deadlock or hung task observed after 2 hours of running fio. Furthermore, if blocksize is changed from 1M to 128M, after around 30 seconds fio has no visible I/O, and the out-of-memory killer message shows up in kernel message. After around 20 minutes all fio processes are killed and the whole system is back to being alive. This is exactly what is expected when recursive I/O happens on dm-thin volume when it is used as swap. Depends-on: a666e5c05e7c ("dm: fix deadlock when swapping to encrypted device") Cc: stable@vger.kernel.org Signed-off-by: Coly Li Acked-by: Mikulas Patocka Signed-off-by: Mike Snitzer Signed-off-by: Greg Kroah-Hartman --- drivers/md/dm-thin.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 969ea013c74e..a1bbf00e60e5 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -3365,6 +3365,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) pt->low_water_blocks = low_water_blocks; pt->adjusted_pf = pt->requested_pf = pf; ti->num_flush_bios = 1; + ti->limit_swap_bios = true; /* * Only need to enable discards if the pool should pass @@ -4245,6 +4246,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) goto bad; ti->num_flush_bios = 1; + ti->limit_swap_bios = true; ti->flush_supported = true; ti->per_io_data_size = sizeof(struct dm_thin_endio_hook); -- cgit v1.2.1 From 7a95f8c7a43a841d6104e0dd60a04c3f826f4095 Mon Sep 17 00:00:00 2001 From: Xu Yang Date: Fri, 17 Mar 2023 14:15:15 +0800 Subject: usb: chipdea: core: fix return -EINVAL if request role is the same with current role commit 3670de80678961eda7fa2220883fc77c16868951 upstream. It should not return -EINVAL if the request role is the same with current role, return non-error and without do anything instead. Fixes: a932a8041ff9 ("usb: chipidea: core: add sysfs group") cc: Acked-by: Peter Chen Signed-off-by: Xu Yang Link: https://lore.kernel.org/r/20230317061516.2451728-1-xu.yang_2@nxp.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/chipidea/core.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index c13f9a153a5c..669416d90a48 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -872,9 +872,12 @@ static ssize_t role_store(struct device *dev, strlen(ci->roles[role]->name))) break; - if (role == CI_ROLE_END || role == ci->role) + if (role == CI_ROLE_END) return -EINVAL; + if (role == ci->role) + return n; + pm_runtime_get_sync(dev); disable_irq(ci->irq); ci_role_stop(ci); -- cgit v1.2.1 From 0752a5caecf47d008d1def0bcaf22d30984bb414 Mon Sep 17 00:00:00 2001 From: Xu Yang Date: Fri, 17 Mar 2023 14:15:16 +0800 Subject: usb: chipidea: core: fix possible concurrent when switch role commit 451b15ed138ec15bffbebb58a00ebdd884c3e659 upstream. The user may call role_store() when driver is handling ci_handle_id_switch() which is triggerred by otg event or power lost event. Unfortunately, the controller may go into chaos in this case. Fix this by protecting it with mutex lock. Fixes: a932a8041ff9 ("usb: chipidea: core: add sysfs group") cc: Acked-by: Peter Chen Signed-off-by: Xu Yang Link: https://lore.kernel.org/r/20230317061516.2451728-2-xu.yang_2@nxp.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/chipidea/ci.h | 2 ++ drivers/usb/chipidea/core.c | 8 +++++++- drivers/usb/chipidea/otg.c | 5 ++++- 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h index 6a2cc5cd0281..d0e9f3265f5a 100644 --- a/drivers/usb/chipidea/ci.h +++ b/drivers/usb/chipidea/ci.h @@ -202,6 +202,7 @@ struct hw_bank { * @in_lpm: if the core in low power mode * @wakeup_int: if wakeup interrupt occur * @rev: The revision number for controller + * @mutex: protect code from concorrent running when doing role switch */ struct ci_hdrc { struct device *dev; @@ -254,6 +255,7 @@ struct ci_hdrc { bool in_lpm; bool wakeup_int; enum ci_revision rev; + struct mutex mutex; }; static inline struct ci_role_driver *ci_role(struct ci_hdrc *ci) diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 669416d90a48..3fd1073a345d 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -875,8 +875,12 @@ static ssize_t role_store(struct device *dev, if (role == CI_ROLE_END) return -EINVAL; - if (role == ci->role) + mutex_lock(&ci->mutex); + + if (role == ci->role) { + mutex_unlock(&ci->mutex); return n; + } pm_runtime_get_sync(dev); disable_irq(ci->irq); @@ -886,6 +890,7 @@ static ssize_t role_store(struct device *dev, ci_handle_vbus_change(ci); enable_irq(ci->irq); pm_runtime_put_sync(dev); + mutex_unlock(&ci->mutex); return (ret == 0) ? n : ret; } @@ -924,6 +929,7 @@ static int ci_hdrc_probe(struct platform_device *pdev) return -ENOMEM; spin_lock_init(&ci->lock); + mutex_init(&ci->mutex); ci->dev = dev; ci->platdata = dev_get_platdata(dev); ci->imx28_write_fix = !!(ci->platdata->flags & diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c index f25d4827fd49..a714cf3f0ab7 100644 --- a/drivers/usb/chipidea/otg.c +++ b/drivers/usb/chipidea/otg.c @@ -164,8 +164,10 @@ static int hw_wait_vbus_lower_bsv(struct ci_hdrc *ci) static void ci_handle_id_switch(struct ci_hdrc *ci) { - enum ci_role role = ci_otg_role(ci); + enum ci_role role; + mutex_lock(&ci->mutex); + role = ci_otg_role(ci); if (role != ci->role) { dev_dbg(ci->dev, "switching from %s to %s\n", ci_role(ci)->name, ci->roles[role]->name); @@ -188,6 +190,7 @@ static void ci_handle_id_switch(struct ci_hdrc *ci) if (role == CI_ROLE_GADGET) ci_handle_vbus_change(ci); } + mutex_unlock(&ci->mutex); } /** * ci_otg_work - perform otg (vbus/id) event handle -- cgit v1.2.1 From 9c5034e9a0e03db8d5e9eabb176340259b5b97e4 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Tue, 7 Mar 2023 17:55:48 +0900 Subject: nilfs2: fix kernel-infoleak in nilfs_ioctl_wrap_copy() commit 003587000276f81d0114b5ce773d80c119d8cb30 upstream. The ioctl helper function nilfs_ioctl_wrap_copy(), which exchanges a metadata array to/from user space, may copy uninitialized buffer regions to user space memory for read-only ioctl commands NILFS_IOCTL_GET_SUINFO and NILFS_IOCTL_GET_CPINFO. This can occur when the element size of the user space metadata given by the v_size member of the argument nilfs_argv structure is larger than the size of the metadata element (nilfs_suinfo structure or nilfs_cpinfo structure) on the file system side. KMSAN-enabled kernels detect this issue as follows: BUG: KMSAN: kernel-infoleak in instrument_copy_to_user include/linux/instrumented.h:121 [inline] BUG: KMSAN: kernel-infoleak in _copy_to_user+0xc0/0x100 lib/usercopy.c:33 instrument_copy_to_user include/linux/instrumented.h:121 [inline] _copy_to_user+0xc0/0x100 lib/usercopy.c:33 copy_to_user include/linux/uaccess.h:169 [inline] nilfs_ioctl_wrap_copy+0x6fa/0xc10 fs/nilfs2/ioctl.c:99 nilfs_ioctl_get_info fs/nilfs2/ioctl.c:1173 [inline] nilfs_ioctl+0x2402/0x4450 fs/nilfs2/ioctl.c:1290 nilfs_compat_ioctl+0x1b8/0x200 fs/nilfs2/ioctl.c:1343 __do_compat_sys_ioctl fs/ioctl.c:968 [inline] __se_compat_sys_ioctl+0x7dd/0x1000 fs/ioctl.c:910 __ia32_compat_sys_ioctl+0x93/0xd0 fs/ioctl.c:910 do_syscall_32_irqs_on arch/x86/entry/common.c:112 [inline] __do_fast_syscall_32+0xa2/0x100 arch/x86/entry/common.c:178 do_fast_syscall_32+0x37/0x80 arch/x86/entry/common.c:203 do_SYSENTER_32+0x1f/0x30 arch/x86/entry/common.c:246 entry_SYSENTER_compat_after_hwframe+0x70/0x82 Uninit was created at: __alloc_pages+0x9f6/0xe90 mm/page_alloc.c:5572 alloc_pages+0xab0/0xd80 mm/mempolicy.c:2287 __get_free_pages+0x34/0xc0 mm/page_alloc.c:5599 nilfs_ioctl_wrap_copy+0x223/0xc10 fs/nilfs2/ioctl.c:74 nilfs_ioctl_get_info fs/nilfs2/ioctl.c:1173 [inline] nilfs_ioctl+0x2402/0x4450 fs/nilfs2/ioctl.c:1290 nilfs_compat_ioctl+0x1b8/0x200 fs/nilfs2/ioctl.c:1343 __do_compat_sys_ioctl fs/ioctl.c:968 [inline] __se_compat_sys_ioctl+0x7dd/0x1000 fs/ioctl.c:910 __ia32_compat_sys_ioctl+0x93/0xd0 fs/ioctl.c:910 do_syscall_32_irqs_on arch/x86/entry/common.c:112 [inline] __do_fast_syscall_32+0xa2/0x100 arch/x86/entry/common.c:178 do_fast_syscall_32+0x37/0x80 arch/x86/entry/common.c:203 do_SYSENTER_32+0x1f/0x30 arch/x86/entry/common.c:246 entry_SYSENTER_compat_after_hwframe+0x70/0x82 Bytes 16-127 of 3968 are uninitialized ... This eliminates the leak issue by initializing the page allocated as buffer using get_zeroed_page(). Link: https://lkml.kernel.org/r/20230307085548.6290-1-konishi.ryusuke@gmail.com Signed-off-by: Ryusuke Konishi Reported-by: syzbot+132fdd2f1e1805fdc591@syzkaller.appspotmail.com Link: https://lkml.kernel.org/r/000000000000a5bd2d05f63f04ae@google.com Tested-by: Ryusuke Konishi Cc: Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- fs/nilfs2/ioctl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c index 708aa1b92036..dfb2083b8ce1 100644 --- a/fs/nilfs2/ioctl.c +++ b/fs/nilfs2/ioctl.c @@ -70,7 +70,7 @@ static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs, if (argv->v_index > ~(__u64)0 - argv->v_nmembs) return -EINVAL; - buf = (void *)__get_free_pages(GFP_NOFS, 0); + buf = (void *)get_zeroed_page(GFP_NOFS); if (unlikely(!buf)) return -ENOMEM; maxmembs = PAGE_SIZE / argv->v_size; -- cgit v1.2.1 From 5fc2b9485a8722c8350c3379992f5931ccfeaf98 Mon Sep 17 00:00:00 2001 From: Wei Chen Date: Tue, 14 Mar 2023 16:54:21 +0000 Subject: i2c: xgene-slimpro: Fix out-of-bounds bug in xgene_slimpro_i2c_xfer() commit 92fbb6d1296f81f41f65effd7f5f8c0f74943d15 upstream. The data->block[0] variable comes from user and is a number between 0-255. Without proper check, the variable may be very large to cause an out-of-bounds when performing memcpy in slimpro_i2c_blkwr. Fix this bug by checking the value of writelen. Fixes: f6505fbabc42 ("i2c: add SLIMpro I2C device driver on APM X-Gene platform") Signed-off-by: Wei Chen Cc: stable@vger.kernel.org Reviewed-by: Andi Shyti Signed-off-by: Wolfram Sang Signed-off-by: Greg Kroah-Hartman --- drivers/i2c/busses/i2c-xgene-slimpro.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c index a7ac746018ad..7a746f413535 100644 --- a/drivers/i2c/busses/i2c-xgene-slimpro.c +++ b/drivers/i2c/busses/i2c-xgene-slimpro.c @@ -321,6 +321,9 @@ static int slimpro_i2c_blkwr(struct slimpro_i2c_dev *ctx, u32 chip, u32 msg[3]; int rc; + if (writelen > I2C_SMBUS_BLOCK_MAX) + return -EINVAL; + memcpy(ctx->dma_buffer, data, writelen); paddr = dma_map_single(ctx->dev, ctx->dma_buffer, writelen, DMA_TO_DEVICE); -- cgit v1.2.1 From 0d96bd507ed7e7d565b6d53ebd3874686f123b2e Mon Sep 17 00:00:00 2001 From: Jiasheng Jiang Date: Thu, 16 Mar 2023 14:55:06 +0800 Subject: dm stats: check for and propagate alloc_percpu failure commit d3aa3e060c4a80827eb801fc448debc9daa7c46b upstream. Check alloc_precpu()'s return value and return an error from dm_stats_init() if it fails. Update alloc_dev() to fail if dm_stats_init() does. Otherwise, a NULL pointer dereference will occur in dm_stats_cleanup() even if dm-stats isn't being actively used. Fixes: fd2ed4d25270 ("dm: add statistics support") Cc: stable@vger.kernel.org Signed-off-by: Jiasheng Jiang Signed-off-by: Mike Snitzer Signed-off-by: Greg Kroah-Hartman --- drivers/md/dm-stats.c | 7 ++++++- drivers/md/dm-stats.h | 2 +- drivers/md/dm.c | 4 +++- 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c index 3d59f3e208c5..0eb48e739f7e 100644 --- a/drivers/md/dm-stats.c +++ b/drivers/md/dm-stats.c @@ -188,7 +188,7 @@ static int dm_stat_in_flight(struct dm_stat_shared *shared) atomic_read(&shared->in_flight[WRITE]); } -void dm_stats_init(struct dm_stats *stats) +int dm_stats_init(struct dm_stats *stats) { int cpu; struct dm_stats_last_position *last; @@ -196,11 +196,16 @@ void dm_stats_init(struct dm_stats *stats) mutex_init(&stats->mutex); INIT_LIST_HEAD(&stats->list); stats->last = alloc_percpu(struct dm_stats_last_position); + if (!stats->last) + return -ENOMEM; + for_each_possible_cpu(cpu) { last = per_cpu_ptr(stats->last, cpu); last->last_sector = (sector_t)ULLONG_MAX; last->last_rw = UINT_MAX; } + + return 0; } void dm_stats_cleanup(struct dm_stats *stats) diff --git a/drivers/md/dm-stats.h b/drivers/md/dm-stats.h index 2ddfae678f32..dcac11fce03b 100644 --- a/drivers/md/dm-stats.h +++ b/drivers/md/dm-stats.h @@ -22,7 +22,7 @@ struct dm_stats_aux { unsigned long long duration_ns; }; -void dm_stats_init(struct dm_stats *st); +int dm_stats_init(struct dm_stats *st); void dm_stats_cleanup(struct dm_stats *st); struct mapped_device; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 3d9a77f4e20f..9a9b2adcf39e 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2021,7 +2021,9 @@ static struct mapped_device *alloc_dev(int minor) bio_set_dev(&md->flush_bio, md->bdev); md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; - dm_stats_init(&md->stats); + r = dm_stats_init(&md->stats); + if (r < 0) + goto bad; /* Populate the mapping, nobody knows we exist yet */ spin_lock(&_minor_lock); -- cgit v1.2.1 From 7b9f8efb5fc888dd938d2964e705b8e00f1dc0f6 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Mon, 6 Mar 2023 11:17:58 -0500 Subject: dm crypt: add cond_resched() to dmcrypt_write() commit fb294b1c0ba982144ca467a75e7d01ff26304e2b upstream. The loop in dmcrypt_write may be running for unbounded amount of time, thus we need cond_resched() in it. This commit fixes the following warning: [ 3391.153255][ C12] watchdog: BUG: soft lockup - CPU#12 stuck for 23s! [dmcrypt_write/2:2897] ... [ 3391.387210][ C12] Call trace: [ 3391.390338][ C12] blk_attempt_bio_merge.part.6+0x38/0x158 [ 3391.395970][ C12] blk_attempt_plug_merge+0xc0/0x1b0 [ 3391.401085][ C12] blk_mq_submit_bio+0x398/0x550 [ 3391.405856][ C12] submit_bio_noacct+0x308/0x380 [ 3391.410630][ C12] dmcrypt_write+0x1e4/0x208 [dm_crypt] [ 3391.416005][ C12] kthread+0x130/0x138 [ 3391.419911][ C12] ret_from_fork+0x10/0x18 Reported-by: yangerkun Fixes: dc2676210c42 ("dm crypt: offload writes to thread") Cc: stable@vger.kernel.org Signed-off-by: Mikulas Patocka Signed-off-by: Mike Snitzer Signed-off-by: Greg Kroah-Hartman --- drivers/md/dm-crypt.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index e38c713e882e..908bf0768827 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1661,6 +1661,7 @@ pop_from_list: io = crypt_io_from_node(rb_first(&write_tree)); rb_erase(&io->rb_node, &write_tree); kcryptd_io_write(io); + cond_resched(); } while (!RB_EMPTY_ROOT(&write_tree)); blk_finish_plug(&plug); } -- cgit v1.2.1 From a398059a739bf32d07858ff410aacccd4cf2041e Mon Sep 17 00:00:00 2001 From: Zhang Qiao Date: Mon, 30 Jan 2023 13:22:16 +0100 Subject: sched/fair: sanitize vruntime of entity being placed commit 829c1651e9c4a6f78398d3e67651cef9bb6b42cc upstream. When a scheduling entity is placed onto cfs_rq, its vruntime is pulled to the base level (around cfs_rq->min_vruntime), so that the entity doesn't gain extra boost when placed backwards. However, if the entity being placed wasn't executed for a long time, its vruntime may get too far behind (e.g. while cfs_rq was executing a low-weight hog), which can inverse the vruntime comparison due to s64 overflow. This results in the entity being placed with its original vruntime way forwards, so that it will effectively never get to the cpu. To prevent that, ignore the vruntime of the entity being placed if it didn't execute for much longer than the characteristic sheduler time scale. [rkagan: formatted, adjusted commit log, comments, cutoff value] Signed-off-by: Zhang Qiao Co-developed-by: Roman Kagan Signed-off-by: Roman Kagan Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20230130122216.3555094-1-rkagan@amazon.de Signed-off-by: Greg Kroah-Hartman --- kernel/sched/fair.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 84e7efda98da..304e7fa0ae87 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3858,6 +3858,7 @@ static void place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) { u64 vruntime = cfs_rq->min_vruntime; + u64 sleep_time; /* * The 'current' period is already promised to the current tasks, @@ -3882,8 +3883,18 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) vruntime -= thresh; } - /* ensure we never gain time by being placed backwards. */ - se->vruntime = max_vruntime(se->vruntime, vruntime); + /* + * Pull vruntime of the entity being placed to the base level of + * cfs_rq, to prevent boosting it if placed backwards. If the entity + * slept for a long time, don't even try to compare its vruntime with + * the base as it may be too far off and the comparison may get + * inversed due to s64 overflow. + */ + sleep_time = rq_clock_task(rq_of(cfs_rq)) - se->exec_start; + if ((s64)sleep_time > 60LL * NSEC_PER_SEC) + se->vruntime = vruntime; + else + se->vruntime = max_vruntime(se->vruntime, vruntime); } static void check_enqueue_throttle(struct cfs_rq *cfs_rq); -- cgit v1.2.1 From 30d0a53d2a262ae942033e5d6be535f67484d99f Mon Sep 17 00:00:00 2001 From: Vincent Guittot Date: Fri, 17 Mar 2023 17:08:10 +0100 Subject: sched/fair: Sanitize vruntime of entity being migrated commit a53ce18cacb477dd0513c607f187d16f0fa96f71 upstream. Commit 829c1651e9c4 ("sched/fair: sanitize vruntime of entity being placed") fixes an overflowing bug, but ignore a case that se->exec_start is reset after a migration. For fixing this case, we delay the reset of se->exec_start after placing the entity which se->exec_start to detect long sleeping task. In order to take into account a possible divergence between the clock_task of 2 rqs, we increase the threshold to around 104 days. Fixes: 829c1651e9c4 ("sched/fair: sanitize vruntime of entity being placed") Originally-by: Zhang Qiao Signed-off-by: Vincent Guittot Signed-off-by: Peter Zijlstra (Intel) Tested-by: Zhang Qiao Link: https://lore.kernel.org/r/20230317160810.107988-1-vincent.guittot@linaro.org Signed-off-by: Greg Kroah-Hartman --- kernel/sched/core.c | 3 +++ kernel/sched/fair.c | 55 ++++++++++++++++++++++++++++++++++++++++++----------- 2 files changed, 47 insertions(+), 11 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 46227cc48124..207cd446b9d3 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -741,6 +741,9 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) void activate_task(struct rq *rq, struct task_struct *p, int flags) { + if (task_on_rq_migrating(p)) + flags |= ENQUEUE_MIGRATED; + if (task_contributes_to_load(p)) rq->nr_uninterruptible--; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 304e7fa0ae87..eb67f42fb96b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3854,11 +3854,33 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) #endif } +static inline bool entity_is_long_sleeper(struct sched_entity *se) +{ + struct cfs_rq *cfs_rq; + u64 sleep_time; + + if (se->exec_start == 0) + return false; + + cfs_rq = cfs_rq_of(se); + + sleep_time = rq_clock_task(rq_of(cfs_rq)); + + /* Happen while migrating because of clock task divergence */ + if (sleep_time <= se->exec_start) + return false; + + sleep_time -= se->exec_start; + if (sleep_time > ((1ULL << 63) / scale_load_down(NICE_0_LOAD))) + return true; + + return false; +} + static void place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) { u64 vruntime = cfs_rq->min_vruntime; - u64 sleep_time; /* * The 'current' period is already promised to the current tasks, @@ -3885,13 +3907,24 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) /* * Pull vruntime of the entity being placed to the base level of - * cfs_rq, to prevent boosting it if placed backwards. If the entity - * slept for a long time, don't even try to compare its vruntime with - * the base as it may be too far off and the comparison may get - * inversed due to s64 overflow. - */ - sleep_time = rq_clock_task(rq_of(cfs_rq)) - se->exec_start; - if ((s64)sleep_time > 60LL * NSEC_PER_SEC) + * cfs_rq, to prevent boosting it if placed backwards. + * However, min_vruntime can advance much faster than real time, with + * the extreme being when an entity with the minimal weight always runs + * on the cfs_rq. If the waking entity slept for a long time, its + * vruntime difference from min_vruntime may overflow s64 and their + * comparison may get inversed, so ignore the entity's original + * vruntime in that case. + * The maximal vruntime speedup is given by the ratio of normal to + * minimal weight: scale_load_down(NICE_0_LOAD) / MIN_SHARES. + * When placing a migrated waking entity, its exec_start has been set + * from a different rq. In order to take into account a possible + * divergence between new and prev rq's clocks task because of irq and + * stolen time, we take an additional margin. + * So, cutting off on the sleep time of + * 2^63 / scale_load_down(NICE_0_LOAD) ~ 104 days + * should be safe. + */ + if (entity_is_long_sleeper(se)) se->vruntime = vruntime; else se->vruntime = max_vruntime(se->vruntime, vruntime); @@ -3989,6 +4022,9 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) if (flags & ENQUEUE_WAKEUP) place_entity(cfs_rq, se, 0); + /* Entity has migrated, no longer consider this task hot */ + if (flags & ENQUEUE_MIGRATED) + se->exec_start = 0; check_schedstat_required(); update_stats_enqueue(cfs_rq, se, flags); @@ -6555,9 +6591,6 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu) /* Tell new CPU we are migrated */ p->se.avg.last_update_time = 0; - /* We have migrated, no longer consider this task hot */ - p->se.exec_start = 0; - update_scan_period(p, new_cpu); } -- cgit v1.2.1 From 8eb43d635950e27c29f1e9e49a23b31637f37757 Mon Sep 17 00:00:00 2001 From: George Kennedy Date: Thu, 16 Dec 2021 13:25:32 -0500 Subject: tun: avoid double free in tun_free_netdev commit 158b515f703e75e7d68289bf4d98c664e1d632df upstream. Avoid double free in tun_free_netdev() by moving the dev->tstats and tun->security allocs to a new ndo_init routine (tun_net_init()) that will be called by register_netdevice(). ndo_init is paired with the desctructor (tun_free_netdev()), so if there's an error in register_netdevice() the destructor will handle the frees. BUG: KASAN: double-free or invalid-free in selinux_tun_dev_free_security+0x1a/0x20 security/selinux/hooks.c:5605 CPU: 0 PID: 25750 Comm: syz-executor416 Not tainted 5.16.0-rc2-syzk #1 Hardware name: Red Hat KVM, BIOS Call Trace: __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0x89/0xb5 lib/dump_stack.c:106 print_address_description.constprop.9+0x28/0x160 mm/kasan/report.c:247 kasan_report_invalid_free+0x55/0x80 mm/kasan/report.c:372 ____kasan_slab_free mm/kasan/common.c:346 [inline] __kasan_slab_free+0x107/0x120 mm/kasan/common.c:374 kasan_slab_free include/linux/kasan.h:235 [inline] slab_free_hook mm/slub.c:1723 [inline] slab_free_freelist_hook mm/slub.c:1749 [inline] slab_free mm/slub.c:3513 [inline] kfree+0xac/0x2d0 mm/slub.c:4561 selinux_tun_dev_free_security+0x1a/0x20 security/selinux/hooks.c:5605 security_tun_dev_free_security+0x4f/0x90 security/security.c:2342 tun_free_netdev+0xe6/0x150 drivers/net/tun.c:2215 netdev_run_todo+0x4df/0x840 net/core/dev.c:10627 rtnl_unlock+0x13/0x20 net/core/rtnetlink.c:112 __tun_chr_ioctl+0x80c/0x2870 drivers/net/tun.c:3302 tun_chr_ioctl+0x2f/0x40 drivers/net/tun.c:3311 vfs_ioctl fs/ioctl.c:51 [inline] __do_sys_ioctl fs/ioctl.c:874 [inline] __se_sys_ioctl fs/ioctl.c:860 [inline] __x64_sys_ioctl+0x19d/0x220 fs/ioctl.c:860 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3a/0x80 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0xae Reported-by: syzkaller Signed-off-by: George Kennedy Suggested-by: Jakub Kicinski Link: https://lore.kernel.org/r/1639679132-19884-1-git-send-email-george.kennedy@oracle.com Signed-off-by: Jakub Kicinski [DP: adjusted context for 4.19 stable] Signed-off-by: Dragos-Marian Panait Signed-off-by: Greg Kroah-Hartman --- drivers/net/tun.c | 109 +++++++++++++++++++++++++++++------------------------- 1 file changed, 59 insertions(+), 50 deletions(-) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 5194b2ccd4b7..e61f02f7642c 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -256,6 +256,9 @@ struct tun_struct { struct tun_prog __rcu *steering_prog; struct tun_prog __rcu *filter_prog; struct ethtool_link_ksettings link_ksettings; + /* init args */ + struct file *file; + struct ifreq *ifr; }; struct veth { @@ -281,6 +284,9 @@ void *tun_ptr_to_xdp(void *ptr) } EXPORT_SYMBOL(tun_ptr_to_xdp); +static void tun_flow_init(struct tun_struct *tun); +static void tun_flow_uninit(struct tun_struct *tun); + static int tun_napi_receive(struct napi_struct *napi, int budget) { struct tun_file *tfile = container_of(napi, struct tun_file, napi); @@ -1038,6 +1044,49 @@ static int check_filter(struct tap_filter *filter, const struct sk_buff *skb) static const struct ethtool_ops tun_ethtool_ops; +static int tun_net_init(struct net_device *dev) +{ + struct tun_struct *tun = netdev_priv(dev); + struct ifreq *ifr = tun->ifr; + int err; + + tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats); + if (!tun->pcpu_stats) + return -ENOMEM; + + spin_lock_init(&tun->lock); + + err = security_tun_dev_alloc_security(&tun->security); + if (err < 0) { + free_percpu(tun->pcpu_stats); + return err; + } + + tun_flow_init(tun); + + dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | + TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX; + dev->features = dev->hw_features | NETIF_F_LLTX; + dev->vlan_features = dev->features & + ~(NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX); + + tun->flags = (tun->flags & ~TUN_FEATURES) | + (ifr->ifr_flags & TUN_FEATURES); + + INIT_LIST_HEAD(&tun->disabled); + err = tun_attach(tun, tun->file, false, ifr->ifr_flags & IFF_NAPI, + ifr->ifr_flags & IFF_NAPI_FRAGS, false); + if (err < 0) { + tun_flow_uninit(tun); + security_tun_dev_free_security(tun->security); + free_percpu(tun->pcpu_stats); + return err; + } + return 0; +} + /* Net device detach from fd. */ static void tun_net_uninit(struct net_device *dev) { @@ -1268,6 +1317,7 @@ static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp) } static const struct net_device_ops tun_netdev_ops = { + .ndo_init = tun_net_init, .ndo_uninit = tun_net_uninit, .ndo_open = tun_net_open, .ndo_stop = tun_net_close, @@ -1347,6 +1397,7 @@ static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp) } static const struct net_device_ops tap_netdev_ops = { + .ndo_init = tun_net_init, .ndo_uninit = tun_net_uninit, .ndo_open = tun_net_open, .ndo_stop = tun_net_close, @@ -1386,7 +1437,7 @@ static void tun_flow_uninit(struct tun_struct *tun) #define MAX_MTU 65535 /* Initialize net device. */ -static void tun_net_init(struct net_device *dev) +static void tun_net_initialize(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); @@ -2658,9 +2709,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) if (!dev) return -ENOMEM; - err = dev_get_valid_name(net, dev, name); - if (err < 0) - goto err_free_dev; dev_net_set(dev, net); dev->rtnl_link_ops = &tun_link_ops; @@ -2679,41 +2727,16 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) tun->rx_batched = 0; RCU_INIT_POINTER(tun->steering_prog, NULL); - tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats); - if (!tun->pcpu_stats) { - err = -ENOMEM; - goto err_free_dev; - } - - spin_lock_init(&tun->lock); - - err = security_tun_dev_alloc_security(&tun->security); - if (err < 0) - goto err_free_stat; - - tun_net_init(dev); - tun_flow_init(tun); - - dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | - TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_STAG_TX; - dev->features = dev->hw_features | NETIF_F_LLTX; - dev->vlan_features = dev->features & - ~(NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_STAG_TX); + tun->ifr = ifr; + tun->file = file; - tun->flags = (tun->flags & ~TUN_FEATURES) | - (ifr->ifr_flags & TUN_FEATURES); - - INIT_LIST_HEAD(&tun->disabled); - err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI, - ifr->ifr_flags & IFF_NAPI_FRAGS, false); - if (err < 0) - goto err_free_flow; + tun_net_initialize(dev); err = register_netdevice(tun->dev); - if (err < 0) - goto err_detach; + if (err < 0) { + free_netdev(dev); + return err; + } /* free_netdev() won't check refcnt, to aovid race * with dev_put() we need publish tun after registration. */ @@ -2732,20 +2755,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) strcpy(ifr->ifr_name, tun->dev->name); return 0; - -err_detach: - tun_detach_all(dev); - /* register_netdevice() already called tun_free_netdev() */ - goto err_free_dev; - -err_free_flow: - tun_flow_uninit(tun); - security_tun_dev_free_security(tun->security); -err_free_stat: - free_percpu(tun->pcpu_stats); -err_free_dev: - free_netdev(dev); - return err; } static void tun_get_iff(struct net *net, struct tun_struct *tun, -- cgit v1.2.1 From c26f3ff4c0be590c1250f945ac2e4fc5fcdc5f45 Mon Sep 17 00:00:00 2001 From: Jan Kara via Ocfs2-devel Date: Thu, 2 Mar 2023 16:38:43 +0100 Subject: ocfs2: fix data corruption after failed write commit 90410bcf873cf05f54a32183afff0161f44f9715 upstream. When buffered write fails to copy data into underlying page cache page, ocfs2_write_end_nolock() just zeroes out and dirties the page. This can leave dirty page beyond EOF and if page writeback tries to write this page before write succeeds and expands i_size, page gets into inconsistent state where page dirty bit is clear but buffer dirty bits stay set resulting in page data never getting written and so data copied to the page is lost. Fix the problem by invalidating page beyond EOF after failed write. Link: https://lkml.kernel.org/r/20230302153843.18499-1-jack@suse.cz Fixes: 6dbf7bb55598 ("fs: Don't invalidate page buffers in block_write_full_page()") Signed-off-by: Jan Kara Reviewed-by: Joseph Qi Cc: Mark Fasheh Cc: Joel Becker Cc: Junxiao Bi Cc: Changwei Ge Cc: Gang He Cc: Jun Piao Cc: Signed-off-by: Andrew Morton [ replace block_invalidate_folio to block_invalidatepage ] Signed-off-by: Joseph Qi Signed-off-by: Greg Kroah-Hartman --- fs/ocfs2/aops.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index b6948813eb06..1353db3f7f48 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -2003,11 +2003,25 @@ int ocfs2_write_end_nolock(struct address_space *mapping, } if (unlikely(copied < len) && wc->w_target_page) { + loff_t new_isize; + if (!PageUptodate(wc->w_target_page)) copied = 0; - ocfs2_zero_new_buffers(wc->w_target_page, start+copied, - start+len); + new_isize = max_t(loff_t, i_size_read(inode), pos + copied); + if (new_isize > page_offset(wc->w_target_page)) + ocfs2_zero_new_buffers(wc->w_target_page, start+copied, + start+len); + else { + /* + * When page is fully beyond new isize (data copy + * failed), do not bother zeroing the page. Invalidate + * it instead so that writeback does not get confused + * put page & buffer dirty bits into inconsistent + * state. + */ + block_invalidatepage(wc->w_target_page, 0, PAGE_SIZE); + } } if (wc->w_target_page) flush_dcache_page(wc->w_target_page); -- cgit v1.2.1 From 643170dac6572bda3f481e406e54911cbfd8237a Mon Sep 17 00:00:00 2001 From: Ivan Bornyakov Date: Mon, 6 Mar 2023 16:25:26 +0300 Subject: bus: imx-weim: fix branch condition evaluates to a garbage value [ Upstream commit 1adab2922c58e7ff4fa9f0b43695079402cce876 ] If bus type is other than imx50_weim_devtype and have no child devices, variable 'ret' in function weim_parse_dt() will not be initialized, but will be used as branch condition and return value. Fix this by initializing 'ret' with 0. This was discovered with help of clang-analyzer, but the situation is quite possible in real life. Fixes: 52c47b63412b ("bus: imx-weim: improve error handling upon child probe-failure") Signed-off-by: Ivan Bornyakov Cc: stable@vger.kernel.org Reviewed-by: Fabio Estevam Signed-off-by: Shawn Guo Signed-off-by: Sasha Levin --- drivers/bus/imx-weim.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c index 6a94aa6a22c2..1a0f977904b6 100644 --- a/drivers/bus/imx-weim.c +++ b/drivers/bus/imx-weim.c @@ -146,8 +146,8 @@ static int __init weim_parse_dt(struct platform_device *pdev, const struct of_device_id *of_id = of_match_device(weim_id_table, &pdev->dev); const struct imx_weim_devtype *devtype = of_id->data; + int ret = 0, have_child = 0; struct device_node *child; - int ret, have_child = 0; if (devtype == &imx50_weim_devtype) { ret = imx_weim_gpr_setup(pdev); -- cgit v1.2.1 From b7de906cf91457c655edb0cb734df5a19f970ed3 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 6 Mar 2023 09:36:25 +1100 Subject: md: avoid signed overflow in slot_store() [ Upstream commit 3bc57292278a0b6ac4656cad94c14f2453344b57 ] slot_store() uses kstrtouint() to get a slot number, but stores the result in an "int" variable (by casting a pointer). This can result in a negative slot number if the unsigned int value is very large. A negative number means that the slot is empty, but setting a negative slot number this way will not remove the device from the array. I don't think this is a serious problem, but it could cause confusion and it is best to fix it. Reported-by: Dan Carpenter Signed-off-by: NeilBrown Signed-off-by: Song Liu Signed-off-by: Sasha Levin --- drivers/md/md.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/md/md.c b/drivers/md/md.c index 89d4dcc5253e..f8c111b36992 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -2991,6 +2991,9 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len) err = kstrtouint(buf, 10, (unsigned int *)&slot); if (err < 0) return err; + if (slot < 0) + /* overflow */ + return -ENOSPC; } if (rdev->mddev->pers && slot == -1) { /* Setting 'slot' on an active array requires also -- cgit v1.2.1 From 66925411ee9e1ad39ea4cecbc624305bfc566116 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Mon, 13 Mar 2023 00:49:24 +0000 Subject: ALSA: asihpi: check pao in control_message() [ Upstream commit 9026c0bf233db53b86f74f4c620715e94eb32a09 ] control_message() might be called with pao = NULL. Here indicates control_message() as sample. (B) static void control_message(struct hpi_adapter_obj *pao, ...) { ^^^ struct hpi_hw_obj *phw = pao->priv; ... ^^^ } (A) void _HPI_6205(struct hpi_adapter_obj *pao, ...) { ^^^ ... case HPI_OBJ_CONTROL: (B) control_message(pao, phm, phr); break; ^^^ ... } void HPI_6205(...) { ... (A) _HPI_6205(NULL, phm, phr); ... ^^^^ } Therefore, We will get too many warning via cppcheck, like below sound/pci/asihpi/hpi6205.c:238:27: warning: Possible null pointer dereference: pao [nullPointer] struct hpi_hw_obj *phw = pao->priv; ^ sound/pci/asihpi/hpi6205.c:433:13: note: Calling function '_HPI_6205', 1st argument 'NULL' value is 0 _HPI_6205(NULL, phm, phr); ^ sound/pci/asihpi/hpi6205.c:401:20: note: Calling function 'control_message', 1st argument 'pao' value is 0 control_message(pao, phm, phr); ^ Set phr->error like many functions doing, and don't call _HPI_6205() with NULL. Signed-off-by: Kuninori Morimoto Link: https://lore.kernel.org/r/87ttypeaqz.wl-kuninori.morimoto.gx@renesas.com Signed-off-by: Takashi Iwai Signed-off-by: Sasha Levin --- sound/pci/asihpi/hpi6205.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/pci/asihpi/hpi6205.c b/sound/pci/asihpi/hpi6205.c index 2864698436a5..6a49f897c4d9 100644 --- a/sound/pci/asihpi/hpi6205.c +++ b/sound/pci/asihpi/hpi6205.c @@ -441,7 +441,7 @@ void HPI_6205(struct hpi_message *phm, struct hpi_response *phr) pao = hpi_find_adapter(phm->adapter_index); } else { /* subsys messages don't address an adapter */ - _HPI_6205(NULL, phm, phr); + phr->error = HPI_ERROR_INVALID_OBJ_INDEX; return; } -- cgit v1.2.1 From 3590498117a11aa1f92a97e8a04d95320e347ebd Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Mon, 13 Mar 2023 00:50:28 +0000 Subject: ALSA: hda/ca0132: fixup buffer overrun at tuning_ctl_set() [ Upstream commit 98e5eb110095ec77cb6d775051d181edbf9cd3cf ] tuning_ctl_set() might have buffer overrun at (X) if it didn't break from loop by matching (A). static int tuning_ctl_set(...) { for (i = 0; i < TUNING_CTLS_COUNT; i++) (A) if (nid == ca0132_tuning_ctls[i].nid) break; snd_hda_power_up(...); (X) dspio_set_param(..., ca0132_tuning_ctls[i].mid, ...); snd_hda_power_down(...); ^ return 1; } We will get below error by cppcheck sound/pci/hda/patch_ca0132.c:4229:2: note: After for loop, i has value 12 for (i = 0; i < TUNING_CTLS_COUNT; i++) ^ sound/pci/hda/patch_ca0132.c:4234:43: note: Array index out of bounds dspio_set_param(codec, ca0132_tuning_ctls[i].mid, 0x20, ^ This patch cares non match case. Signed-off-by: Kuninori Morimoto Link: https://lore.kernel.org/r/87sfe9eap7.wl-kuninori.morimoto.gx@renesas.com Signed-off-by: Takashi Iwai Signed-off-by: Sasha Levin --- sound/pci/hda/patch_ca0132.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c index ca8a37388d56..9f0e6bbc523c 100644 --- a/sound/pci/hda/patch_ca0132.c +++ b/sound/pci/hda/patch_ca0132.c @@ -3620,8 +3620,10 @@ static int tuning_ctl_set(struct hda_codec *codec, hda_nid_t nid, for (i = 0; i < TUNING_CTLS_COUNT; i++) if (nid == ca0132_tuning_ctls[i].nid) - break; + goto found; + return -EINVAL; +found: snd_hda_power_up(codec); dspio_set_param(codec, ca0132_tuning_ctls[i].mid, 0x20, ca0132_tuning_ctls[i].req, -- cgit v1.2.1 From 598dc990b3bd606524c28db522a79bf13f6416ca Mon Sep 17 00:00:00 2001 From: Wei Chen Date: Tue, 7 Mar 2023 13:08:56 +0000 Subject: fbdev: tgafb: Fix potential divide by zero [ Upstream commit f90bd245de82c095187d8c2cabb8b488a39eaecc ] fb_set_var would by called when user invokes ioctl with cmd FBIOPUT_VSCREENINFO. User-provided data would finally reach tgafb_check_var. In case var->pixclock is assigned to zero, divide by zero would occur when checking whether reciprocal of var->pixclock is too high. Similar crashes have happened in other fbdev drivers. There is no check and modification on var->pixclock along the call chain to tgafb_check_var. We believe it could also be triggered in driver tgafb from user site. Signed-off-by: Wei Chen Signed-off-by: Helge Deller Signed-off-by: Sasha Levin --- drivers/video/fbdev/tgafb.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/video/fbdev/tgafb.c b/drivers/video/fbdev/tgafb.c index 65ba9921506e..9d2912947eef 100644 --- a/drivers/video/fbdev/tgafb.c +++ b/drivers/video/fbdev/tgafb.c @@ -166,6 +166,9 @@ tgafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct tga_par *par = (struct tga_par *)info->par; + if (!var->pixclock) + return -EINVAL; + if (par->tga_type == TGA_TYPE_8PLANE) { if (var->bits_per_pixel != 8) return -EINVAL; -- cgit v1.2.1 From 178ff87d2a0c2d3d74081e1c2efbb33b3487267d Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Tue, 14 Mar 2023 19:32:38 -0700 Subject: sched_getaffinity: don't assume 'cpumask_size()' is fully initialized [ Upstream commit 6015b1aca1a233379625385feb01dd014aca60b5 ] The getaffinity() system call uses 'cpumask_size()' to decide how big the CPU mask is - so far so good. It is indeed the allocation size of a cpumask. But the code also assumes that the whole allocation is initialized without actually doing so itself. That's wrong, because we might have fixed-size allocations (making copying and clearing more efficient), but not all of it is then necessarily used if 'nr_cpu_ids' is smaller. Having checked other users of 'cpumask_size()', they all seem to be ok, either using it purely for the allocation size, or explicitly zeroing the cpumask before using the size in bytes to copy it. See for example the ublk_ctrl_get_queue_affinity() function that uses the proper 'zalloc_cpumask_var()' to make sure that the whole mask is cleared, whether the storage is on the stack or if it was an external allocation. Fix this by just zeroing the allocation before using it. Do the same for the compat version of sched_getaffinity(), which had the same logic. Also, for consistency, make sched_getaffinity() use 'cpumask_bits()' to access the bits. For a cpumask_var_t, it ends up being a pointer to the same data either way, but it's just a good idea to treat it like you would a 'cpumask_t'. The compat case already did that. Reported-by: Ryan Roberts Link: https://lore.kernel.org/lkml/7d026744-6bd6-6827-0471-b5e8eae0be3f@arm.com/ Cc: Yury Norov Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- kernel/compat.c | 2 +- kernel/sched/core.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/kernel/compat.c b/kernel/compat.c index e4548a9e9c52..5f320b0db8d0 100644 --- a/kernel/compat.c +++ b/kernel/compat.c @@ -307,7 +307,7 @@ COMPAT_SYSCALL_DEFINE3(sched_getaffinity, compat_pid_t, pid, unsigned int, len, if (len & (sizeof(compat_ulong_t)-1)) return -EINVAL; - if (!alloc_cpumask_var(&mask, GFP_KERNEL)) + if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; ret = sched_getaffinity(pid, mask); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 207cd446b9d3..8d5a9fa8a951 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4953,14 +4953,14 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, if (len & (sizeof(unsigned long)-1)) return -EINVAL; - if (!alloc_cpumask_var(&mask, GFP_KERNEL)) + if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; ret = sched_getaffinity(pid, mask); if (ret == 0) { unsigned int retlen = min(len, cpumask_size()); - if (copy_to_user(user_mask_ptr, mask, retlen)) + if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen)) ret = -EFAULT; else ret = retlen; -- cgit v1.2.1 From 06c7288521f58a70fd34fc27346b36e76a2f0bbf Mon Sep 17 00:00:00 2001 From: Wei Chen Date: Wed, 15 Mar 2023 07:18:31 +0000 Subject: fbdev: nvidia: Fix potential divide by zero [ Upstream commit 92e2a00f2987483e1f9253625828622edd442e61 ] variable var->pixclock can be set by user. In case it equals to zero, divide by zero would occur in nvidiafb_set_par. Similar crashes have happened in other fbdev drivers. There is no check and modification on var->pixclock along the call chain to nvidia_check_var and nvidiafb_set_par. We believe it could also be triggered in driver nvidia from user site. Signed-off-by: Wei Chen Signed-off-by: Helge Deller Signed-off-by: Sasha Levin --- drivers/video/fbdev/nvidia/nvidia.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c index fbeeed5afe35..aa502b3ba25a 100644 --- a/drivers/video/fbdev/nvidia/nvidia.c +++ b/drivers/video/fbdev/nvidia/nvidia.c @@ -766,6 +766,8 @@ static int nvidiafb_check_var(struct fb_var_screeninfo *var, int pitch, err = 0; NVTRACE_ENTER(); + if (!var->pixclock) + return -EINVAL; var->transp.offset = 0; var->transp.length = 0; -- cgit v1.2.1 From 2961c0bd7e877d2c92a0517eee3024aabb95f8cd Mon Sep 17 00:00:00 2001 From: Wei Chen Date: Wed, 15 Mar 2023 08:33:47 +0000 Subject: fbdev: intelfb: Fix potential divide by zero [ Upstream commit d823685486a3446d061fed7c7d2f80af984f119a ] Variable var->pixclock is controlled by user and can be assigned to zero. Without proper check, divide by zero would occur in intelfbhw_validate_mode and intelfbhw_mode_to_hw. Error out if var->pixclock is zero. Signed-off-by: Wei Chen Signed-off-by: Helge Deller Signed-off-by: Sasha Levin --- drivers/video/fbdev/intelfb/intelfbdrv.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c index d7463a2a5d83..c97c0c851480 100644 --- a/drivers/video/fbdev/intelfb/intelfbdrv.c +++ b/drivers/video/fbdev/intelfb/intelfbdrv.c @@ -1215,6 +1215,9 @@ static int intelfb_check_var(struct fb_var_screeninfo *var, dinfo = GET_DINFO(info); + if (!var->pixclock) + return -EINVAL; + /* update the pitch */ if (intelfbhw_validate_mode(dinfo, var) != 0) return -EINVAL; -- cgit v1.2.1 From a7134de64ad7e62c439a18b4c79a8142974c3dbc Mon Sep 17 00:00:00 2001 From: Wei Chen Date: Wed, 15 Mar 2023 09:05:18 +0000 Subject: fbdev: lxfb: Fix potential divide by zero [ Upstream commit 61ac4b86a4c047c20d5cb423ddd87496f14d9868 ] var->pixclock can be assigned to zero by user. Without proper check, divide by zero would occur in lx_set_clock. Error out if var->pixclock is zero. Signed-off-by: Wei Chen Signed-off-by: Helge Deller Signed-off-by: Sasha Levin --- drivers/video/fbdev/geode/lxfb_core.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/video/fbdev/geode/lxfb_core.c b/drivers/video/fbdev/geode/lxfb_core.c index 138da6cb6cbc..4345246b4c79 100644 --- a/drivers/video/fbdev/geode/lxfb_core.c +++ b/drivers/video/fbdev/geode/lxfb_core.c @@ -247,6 +247,9 @@ static void get_modedb(struct fb_videomode **modedb, unsigned int *size) static int lxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { + if (!var->pixclock) + return -EINVAL; + if (var->xres > 1920 || var->yres > 1440) return -EINVAL; -- cgit v1.2.1 From d53b89fc7d95030192034e06e205d096d74dcd98 Mon Sep 17 00:00:00 2001 From: Wei Chen Date: Wed, 15 Mar 2023 09:22:54 +0000 Subject: fbdev: au1200fb: Fix potential divide by zero [ Upstream commit 44a3b36b42acfc433aaaf526191dd12fbb919fdb ] var->pixclock can be assigned to zero by user. Without proper check, divide by zero would occur when invoking macro PICOS2KHZ in au1200fb_fb_check_var. Error out if var->pixclock is zero. Signed-off-by: Wei Chen Signed-off-by: Helge Deller Signed-off-by: Sasha Levin --- drivers/video/fbdev/au1200fb.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c index 3872ccef4cb2..f8e83a951918 100644 --- a/drivers/video/fbdev/au1200fb.c +++ b/drivers/video/fbdev/au1200fb.c @@ -1039,6 +1039,9 @@ static int au1200fb_fb_check_var(struct fb_var_screeninfo *var, u32 pixclock; int screen_size, plane; + if (!var->pixclock) + return -EINVAL; + plane = fbdev->plane; /* Make sure that the mode respect all LCD controller and -- cgit v1.2.1 From 590c09e046a0e5fc47b239d807dc2efaee753de0 Mon Sep 17 00:00:00 2001 From: Harshit Mogalapalli Date: Mon, 6 Mar 2023 11:18:24 -0800 Subject: ca8210: Fix unsigned mac_len comparison with zero in ca8210_skb_tx() [ Upstream commit 748b2f5e82d17480404b3e2895388fc2925f7caf ] mac_len is of type unsigned, which can never be less than zero. mac_len = ieee802154_hdr_peek_addrs(skb, &header); if (mac_len < 0) return mac_len; Change this to type int as ieee802154_hdr_peek_addrs() can return negative integers, this is found by static analysis with smatch. Fixes: 6c993779ea1d ("ca8210: fix mac_len negative array access") Signed-off-by: Harshit Mogalapalli Acked-by: Alexander Aring Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230306191824.4115839-1-harshit.m.mogalapalli@oracle.com Signed-off-by: Stefan Schmidt Signed-off-by: Sasha Levin --- drivers/net/ieee802154/ca8210.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index 2d4471b77fa7..f75faec23cc9 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -1943,10 +1943,9 @@ static int ca8210_skb_tx( struct ca8210_priv *priv ) { - int status; struct ieee802154_hdr header = { }; struct secspec secspec; - unsigned int mac_len; + int mac_len, status; dev_dbg(&priv->spi->dev, "%s called\n", __func__); -- cgit v1.2.1 From 06ca4e9b78be04c6b2b66e83e6149ef361285f4c Mon Sep 17 00:00:00 2001 From: Tomas Henzl Date: Fri, 24 Mar 2023 16:01:34 +0100 Subject: scsi: megaraid_sas: Fix crash after a double completion [ Upstream commit 2309df27111a51734cb9240b4d3c25f2f3c6ab06 ] When a physical disk is attached directly "without JBOD MAP support" (see megasas_get_tm_devhandle()) then there is no real error handling in the driver. Return FAILED instead of SUCCESS. Fixes: 18365b138508 ("megaraid_sas: Task management support") Signed-off-by: Tomas Henzl Link: https://lore.kernel.org/r/20230324150134.14696-1-thenzl@redhat.com Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/megaraid/megaraid_sas_fusion.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index bdb12bf0d5c7..b400167f9ad4 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -4367,7 +4367,7 @@ int megasas_task_abort_fusion(struct scsi_cmnd *scmd) devhandle = megasas_get_tm_devhandle(scmd->device); if (devhandle == (u16)ULONG_MAX) { - ret = SUCCESS; + ret = FAILED; sdev_printk(KERN_INFO, scmd->device, "task abort issued for invalid devhandle\n"); mutex_unlock(&instance->reset_mutex); @@ -4440,7 +4440,7 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd) devhandle = megasas_get_tm_devhandle(scmd->device); if (devhandle == (u16)ULONG_MAX) { - ret = SUCCESS; + ret = FAILED; sdev_printk(KERN_INFO, scmd->device, "target reset issued for invalid devhandle\n"); mutex_unlock(&instance->reset_mutex); -- cgit v1.2.1 From 618b15d09fed6126356101543451d49860db4388 Mon Sep 17 00:00:00 2001 From: Ivan Orlov Date: Tue, 14 Mar 2023 16:04:45 +0400 Subject: can: bcm: bcm_tx_setup(): fix KMSAN uninit-value in vfs_write [ Upstream commit 2b4c99f7d9a57ecd644eda9b1fb0a1072414959f ] Syzkaller reported the following issue: ===================================================== BUG: KMSAN: uninit-value in aio_rw_done fs/aio.c:1520 [inline] BUG: KMSAN: uninit-value in aio_write+0x899/0x950 fs/aio.c:1600 aio_rw_done fs/aio.c:1520 [inline] aio_write+0x899/0x950 fs/aio.c:1600 io_submit_one+0x1d1c/0x3bf0 fs/aio.c:2019 __do_sys_io_submit fs/aio.c:2078 [inline] __se_sys_io_submit+0x293/0x770 fs/aio.c:2048 __x64_sys_io_submit+0x92/0xd0 fs/aio.c:2048 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3d/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x63/0xcd Uninit was created at: slab_post_alloc_hook mm/slab.h:766 [inline] slab_alloc_node mm/slub.c:3452 [inline] __kmem_cache_alloc_node+0x71f/0xce0 mm/slub.c:3491 __do_kmalloc_node mm/slab_common.c:967 [inline] __kmalloc+0x11d/0x3b0 mm/slab_common.c:981 kmalloc_array include/linux/slab.h:636 [inline] bcm_tx_setup+0x80e/0x29d0 net/can/bcm.c:930 bcm_sendmsg+0x3a2/0xce0 net/can/bcm.c:1351 sock_sendmsg_nosec net/socket.c:714 [inline] sock_sendmsg net/socket.c:734 [inline] sock_write_iter+0x495/0x5e0 net/socket.c:1108 call_write_iter include/linux/fs.h:2189 [inline] aio_write+0x63a/0x950 fs/aio.c:1600 io_submit_one+0x1d1c/0x3bf0 fs/aio.c:2019 __do_sys_io_submit fs/aio.c:2078 [inline] __se_sys_io_submit+0x293/0x770 fs/aio.c:2048 __x64_sys_io_submit+0x92/0xd0 fs/aio.c:2048 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3d/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x63/0xcd CPU: 1 PID: 5034 Comm: syz-executor350 Not tainted 6.2.0-rc6-syzkaller-80422-geda666ff2276 #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/12/2023 ===================================================== We can follow the call chain and find that 'bcm_tx_setup' function calls 'memcpy_from_msg' to copy some content to the newly allocated frame of 'op->frames'. After that the 'len' field of copied structure being compared with some constant value (64 or 8). However, if 'memcpy_from_msg' returns an error, we will compare some uninitialized memory. This triggers 'uninit-value' issue. This patch will add 'memcpy_from_msg' possible errors processing to avoid uninit-value issue. Tested via syzkaller Reported-by: syzbot+c9bfd85eca611ebf5db1@syzkaller.appspotmail.com Link: https://syzkaller.appspot.com/bug?id=47f897f8ad958bbde5790ebf389b5e7e0a345089 Signed-off-by: Ivan Orlov Fixes: 6f3b911d5f29b ("can: bcm: add support for CAN FD frames") Acked-by: Oliver Hartkopp Link: https://lore.kernel.org/all/20230314120445.12407-1-ivan.orlov0322@gmail.com Signed-off-by: Marc Kleine-Budde Signed-off-by: Sasha Levin --- net/can/bcm.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/net/can/bcm.c b/net/can/bcm.c index 74e555a22de7..61269cc2fa82 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -935,6 +935,8 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, cf = op->frames + op->cfsiz * i; err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz); + if (err < 0) + goto free_op; if (op->flags & CAN_FD_FRAME) { if (cf->len > 64) @@ -944,12 +946,8 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, err = -EINVAL; } - if (err < 0) { - if (op->frames != &op->sframe) - kfree(op->frames); - kfree(op); - return err; - } + if (err < 0) + goto free_op; if (msg_head->flags & TX_CP_CAN_ID) { /* copy can_id into frame */ @@ -1020,6 +1018,12 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, bcm_tx_start_timer(op); return msg_head->nframes * op->cfsiz + MHSIZ; + +free_op: + if (op->frames != &op->sframe) + kfree(op->frames); + kfree(op); + return err; } /* -- cgit v1.2.1 From 09119a9f1bbbbb92b8da25777bea943a01bcfd50 Mon Sep 17 00:00:00 2001 From: Radoslaw Tyl Date: Tue, 28 Mar 2023 10:26:59 -0700 Subject: i40e: fix registers dump after run ethtool adapter self test [ Upstream commit c5cff16f461a4a434a9915a7be7ac9ced861a8a4 ] Fix invalid registers dump from ethtool -d ethX after adapter self test by ethtool -t ethY. It causes invalid data display. The problem was caused by overwriting i40e_reg_list[].elements which is common for ethtool self test and dump. Fixes: 22dd9ae8afcc ("i40e: Rework register diagnostic") Signed-off-by: Radoslaw Tyl Reviewed-by: Michal Swiatkowski Tested-by: Arpana Arland (A Contingent worker at Intel) Signed-off-by: Tony Nguyen Reviewed-by: Leon Romanovsky Link: https://lore.kernel.org/r/20230328172659.3906413-1-anthony.l.nguyen@intel.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/net/ethernet/intel/i40e/i40e_diag.c | 11 ++++++----- drivers/net/ethernet/intel/i40e/i40e_diag.h | 2 +- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c index ef4d3762bf37..ca229b0efeb6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_diag.c +++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c @@ -44,7 +44,7 @@ static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw, return 0; } -struct i40e_diag_reg_test_info i40e_reg_list[] = { +const struct i40e_diag_reg_test_info i40e_reg_list[] = { /* offset mask elements stride */ {I40E_QTX_CTL(0), 0x0000FFBF, 1, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)}, @@ -78,27 +78,28 @@ i40e_status i40e_diag_reg_test(struct i40e_hw *hw) { i40e_status ret_code = 0; u32 reg, mask; + u32 elements; u32 i, j; for (i = 0; i40e_reg_list[i].offset != 0 && !ret_code; i++) { + elements = i40e_reg_list[i].elements; /* set actual reg range for dynamically allocated resources */ if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) && hw->func_caps.num_tx_qp != 0) - i40e_reg_list[i].elements = hw->func_caps.num_tx_qp; + elements = hw->func_caps.num_tx_qp; if ((i40e_reg_list[i].offset == I40E_PFINT_ITRN(0, 0) || i40e_reg_list[i].offset == I40E_PFINT_ITRN(1, 0) || i40e_reg_list[i].offset == I40E_PFINT_ITRN(2, 0) || i40e_reg_list[i].offset == I40E_QINT_TQCTL(0) || i40e_reg_list[i].offset == I40E_QINT_RQCTL(0)) && hw->func_caps.num_msix_vectors != 0) - i40e_reg_list[i].elements = - hw->func_caps.num_msix_vectors - 1; + elements = hw->func_caps.num_msix_vectors - 1; /* test register access */ mask = i40e_reg_list[i].mask; - for (j = 0; j < i40e_reg_list[i].elements && !ret_code; j++) { + for (j = 0; j < elements && !ret_code; j++) { reg = i40e_reg_list[i].offset + (j * i40e_reg_list[i].stride); ret_code = i40e_diag_reg_pattern_test(hw, reg, mask); diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h index c3340f320a18..1db7c6d57231 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_diag.h +++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h @@ -20,7 +20,7 @@ struct i40e_diag_reg_test_info { u32 stride; /* bytes between each element */ }; -extern struct i40e_diag_reg_test_info i40e_reg_list[]; +extern const struct i40e_diag_reg_test_info i40e_reg_list[]; i40e_status i40e_diag_reg_test(struct i40e_hw *hw); i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw); -- cgit v1.2.1 From 3b366c8e2f0ce89d2c4a310a0357f60ed2384ee6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Steffen=20B=C3=A4tz?= Date: Wed, 29 Mar 2023 12:01:40 -0300 Subject: net: dsa: mv88e6xxx: Enable IGMP snooping on user ports only MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 7bcad0f0e6fbc1d613e49e0ee35c8e5f2e685bb0 ] Do not set the MV88E6XXX_PORT_CTL0_IGMP_MLD_SNOOP bit on CPU or DSA ports. This allows the host CPU port to be a regular IGMP listener by sending out IGMP Membership Reports, which would otherwise not be forwarded by the mv88exxx chip, but directly looped back to the CPU port itself. Fixes: 54d792f257c6 ("net: dsa: Centralise global and port setup code into mv88e6xxx.") Signed-off-by: Steffen BΓ€tz Signed-off-by: Fabio Estevam Reviewed-by: Andrew Lunn Reviewed-by: Vladimir Oltean Reviewed-by: Florian Fainelli Link: https://lore.kernel.org/r/20230329150140.701559-1-festevam@gmail.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/net/dsa/mv88e6xxx/chip.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index be064bcfd70a..6b310f723580 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2237,9 +2237,14 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) * If this is the upstream port for this switch, enable * forwarding of unknown unicasts and multicasts. */ - reg = MV88E6XXX_PORT_CTL0_IGMP_MLD_SNOOP | - MV88E6185_PORT_CTL0_USE_TAG | MV88E6185_PORT_CTL0_USE_IP | + reg = MV88E6185_PORT_CTL0_USE_TAG | MV88E6185_PORT_CTL0_USE_IP | MV88E6XXX_PORT_CTL0_STATE_FORWARDING; + /* Forward any IPv4 IGMP or IPv6 MLD frames received + * by a USER port to the CPU port to allow snooping. + */ + if (dsa_is_user_port(ds, port)) + reg |= MV88E6XXX_PORT_CTL0_IGMP_MLD_SNOOP; + err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg); if (err) return err; -- cgit v1.2.1 From 9f25e5cf19c3c858f1414c3ea70ff6828fe44896 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Sat, 19 Oct 2019 10:13:26 +0200 Subject: net: mvneta: make tx buffer array agnostic [ Upstream commit 9e58c8b410650b5a6eb5b8fad8474bd8425a4023 ] Allow tx buffer array to contain both skb and xdp buffers in order to enable xdp frame recycling adding XDP_TX verdict support Signed-off-by: Lorenzo Bianconi Signed-off-by: David S. Miller Stable-dep-of: 2960a2d33b02 ("net: mvneta: fix potential double-frees in mvneta_txq_sw_deinit()") Signed-off-by: Sasha Levin --- drivers/net/ethernet/marvell/mvneta.c | 66 +++++++++++++++++++++++------------ 1 file changed, 43 insertions(+), 23 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index fd1311681200..f1a4b11ce0d1 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -542,6 +542,20 @@ struct mvneta_rx_desc { }; #endif +enum mvneta_tx_buf_type { + MVNETA_TYPE_SKB, + MVNETA_TYPE_XDP_TX, + MVNETA_TYPE_XDP_NDO, +}; + +struct mvneta_tx_buf { + enum mvneta_tx_buf_type type; + union { + struct xdp_frame *xdpf; + struct sk_buff *skb; + }; +}; + struct mvneta_tx_queue { /* Number of this TX queue, in the range 0-7 */ u8 id; @@ -557,8 +571,8 @@ struct mvneta_tx_queue { int tx_stop_threshold; int tx_wake_threshold; - /* Array of transmitted skb */ - struct sk_buff **tx_skb; + /* Array of transmitted buffers */ + struct mvneta_tx_buf *buf; /* Index of last TX DMA descriptor that was inserted */ int txq_put_index; @@ -1767,14 +1781,9 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp, int i; for (i = 0; i < num; i++) { + struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index]; struct mvneta_tx_desc *tx_desc = txq->descs + txq->txq_get_index; - struct sk_buff *skb = txq->tx_skb[txq->txq_get_index]; - - if (skb) { - bytes_compl += skb->len; - pkts_compl++; - } mvneta_txq_inc_get(txq); @@ -1782,9 +1791,12 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp, dma_unmap_single(pp->dev->dev.parent, tx_desc->buf_phys_addr, tx_desc->data_size, DMA_TO_DEVICE); - if (!skb) + if (!buf->skb) continue; - dev_kfree_skb_any(skb); + + bytes_compl += buf->skb->len; + pkts_compl++; + dev_kfree_skb_any(buf->skb); } netdev_tx_completed_queue(nq, pkts_compl, bytes_compl); @@ -2238,16 +2250,19 @@ static inline void mvneta_tso_put_hdr(struct sk_buff *skb, struct mvneta_port *pp, struct mvneta_tx_queue *txq) { - struct mvneta_tx_desc *tx_desc; int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; + struct mvneta_tx_desc *tx_desc; - txq->tx_skb[txq->txq_put_index] = NULL; tx_desc = mvneta_txq_next_desc_get(txq); tx_desc->data_size = hdr_len; tx_desc->command = mvneta_skb_tx_csum(pp, skb); tx_desc->command |= MVNETA_TXD_F_DESC; tx_desc->buf_phys_addr = txq->tso_hdrs_phys + txq->txq_put_index * TSO_HEADER_SIZE; + buf->type = MVNETA_TYPE_SKB; + buf->skb = NULL; + mvneta_txq_inc_put(txq); } @@ -2256,6 +2271,7 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq, struct sk_buff *skb, char *data, int size, bool last_tcp, bool is_last) { + struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; struct mvneta_tx_desc *tx_desc; tx_desc = mvneta_txq_next_desc_get(txq); @@ -2269,7 +2285,8 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq, } tx_desc->command = 0; - txq->tx_skb[txq->txq_put_index] = NULL; + buf->type = MVNETA_TYPE_SKB; + buf->skb = NULL; if (last_tcp) { /* last descriptor in the TCP packet */ @@ -2277,7 +2294,7 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq, /* last descriptor in SKB */ if (is_last) - txq->tx_skb[txq->txq_put_index] = skb; + buf->skb = skb; } mvneta_txq_inc_put(txq); return 0; @@ -2362,6 +2379,7 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, int i, nr_frags = skb_shinfo(skb)->nr_frags; for (i = 0; i < nr_frags; i++) { + struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; void *addr = page_address(frag->page.p) + frag->page_offset; @@ -2381,12 +2399,13 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, if (i == nr_frags - 1) { /* Last descriptor */ tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD; - txq->tx_skb[txq->txq_put_index] = skb; + buf->skb = skb; } else { /* Descriptor in the middle: Not First, Not Last */ tx_desc->command = 0; - txq->tx_skb[txq->txq_put_index] = NULL; + buf->skb = NULL; } + buf->type = MVNETA_TYPE_SKB; mvneta_txq_inc_put(txq); } @@ -2414,6 +2433,7 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev) struct mvneta_port *pp = netdev_priv(dev); u16 txq_id = skb_get_queue_mapping(skb); struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; + struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; struct mvneta_tx_desc *tx_desc; int len = skb->len; int frags = 0; @@ -2446,16 +2466,17 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev) goto out; } + buf->type = MVNETA_TYPE_SKB; if (frags == 1) { /* First and Last descriptor */ tx_cmd |= MVNETA_TXD_FLZ_DESC; tx_desc->command = tx_cmd; - txq->tx_skb[txq->txq_put_index] = skb; + buf->skb = skb; mvneta_txq_inc_put(txq); } else { /* First but not Last */ tx_cmd |= MVNETA_TXD_F_DESC; - txq->tx_skb[txq->txq_put_index] = NULL; + buf->skb = NULL; mvneta_txq_inc_put(txq); tx_desc->command = tx_cmd; /* Continue with other skb fragments */ @@ -3000,9 +3021,8 @@ static int mvneta_txq_sw_init(struct mvneta_port *pp, txq->last_desc = txq->size - 1; - txq->tx_skb = kmalloc_array(txq->size, sizeof(*txq->tx_skb), - GFP_KERNEL); - if (!txq->tx_skb) { + txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL); + if (!txq->buf) { dma_free_coherent(pp->dev->dev.parent, txq->size * MVNETA_DESC_ALIGNED_SIZE, txq->descs, txq->descs_phys); @@ -3014,7 +3034,7 @@ static int mvneta_txq_sw_init(struct mvneta_port *pp, txq->size * TSO_HEADER_SIZE, &txq->tso_hdrs_phys, GFP_KERNEL); if (!txq->tso_hdrs) { - kfree(txq->tx_skb); + kfree(txq->buf); dma_free_coherent(pp->dev->dev.parent, txq->size * MVNETA_DESC_ALIGNED_SIZE, txq->descs, txq->descs_phys); @@ -3069,7 +3089,7 @@ static void mvneta_txq_sw_deinit(struct mvneta_port *pp, { struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); - kfree(txq->tx_skb); + kfree(txq->buf); if (txq->tso_hdrs) dma_free_coherent(pp->dev->dev.parent, -- cgit v1.2.1 From 33e60ca0384a50d10d939c2f27888a354613656c Mon Sep 17 00:00:00 2001 From: msizanoen Date: Sun, 19 Mar 2023 23:02:56 -0700 Subject: Input: alps - fix compatibility with -funsigned-char commit 754ff5060daf5a1cf4474eff9b4edeb6c17ef7ab upstream. The AlpsPS/2 code previously relied on the assumption that `char` is a signed type, which was true on x86 platforms (the only place where this driver is used) before kernel 6.2. However, on 6.2 and later, this assumption is broken due to the introduction of -funsigned-char as a new global compiler flag. Fix this by explicitly specifying the signedness of `char` when sign extending the values received from the device. Fixes: f3f33c677699 ("Input: alps - Rushmore and v7 resolution support") Signed-off-by: msizanoen Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20230320045228.182259-1-msizanoen@qtmlabs.xyz Signed-off-by: Dmitry Torokhov Signed-off-by: Greg Kroah-Hartman --- drivers/input/mouse/alps.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index dd80ff6cc427..b53da6360235 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -855,8 +855,8 @@ static void alps_process_packet_v6(struct psmouse *psmouse) x = y = z = 0; /* Divide 4 since trackpoint's speed is too fast */ - input_report_rel(dev2, REL_X, (char)x / 4); - input_report_rel(dev2, REL_Y, -((char)y / 4)); + input_report_rel(dev2, REL_X, (s8)x / 4); + input_report_rel(dev2, REL_Y, -((s8)y / 4)); psmouse_report_standard_buttons(dev2, packet[3]); @@ -1107,8 +1107,8 @@ static void alps_process_trackstick_packet_v7(struct psmouse *psmouse) ((packet[3] & 0x20) << 1); z = (packet[5] & 0x3f) | ((packet[3] & 0x80) >> 1); - input_report_rel(dev2, REL_X, (char)x); - input_report_rel(dev2, REL_Y, -((char)y)); + input_report_rel(dev2, REL_X, (s8)x); + input_report_rel(dev2, REL_Y, -((s8)y)); input_report_abs(dev2, ABS_PRESSURE, z); psmouse_report_standard_buttons(dev2, packet[1]); @@ -2297,20 +2297,20 @@ static int alps_get_v3_v7_resolution(struct psmouse *psmouse, int reg_pitch) if (reg < 0) return reg; - x_pitch = (char)(reg << 4) >> 4; /* sign extend lower 4 bits */ + x_pitch = (s8)(reg << 4) >> 4; /* sign extend lower 4 bits */ x_pitch = 50 + 2 * x_pitch; /* In 0.1 mm units */ - y_pitch = (char)reg >> 4; /* sign extend upper 4 bits */ + y_pitch = (s8)reg >> 4; /* sign extend upper 4 bits */ y_pitch = 36 + 2 * y_pitch; /* In 0.1 mm units */ reg = alps_command_mode_read_reg(psmouse, reg_pitch + 1); if (reg < 0) return reg; - x_electrode = (char)(reg << 4) >> 4; /* sign extend lower 4 bits */ + x_electrode = (s8)(reg << 4) >> 4; /* sign extend lower 4 bits */ x_electrode = 17 + x_electrode; - y_electrode = (char)reg >> 4; /* sign extend upper 4 bits */ + y_electrode = (s8)reg >> 4; /* sign extend upper 4 bits */ y_electrode = 13 + y_electrode; x_phys = x_pitch * (x_electrode - 1); /* In 0.1 mm units */ -- cgit v1.2.1 From 5262777cd23b5dc7ee64e0897d63c75a2f4c63fd Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Sun, 19 Mar 2023 21:36:36 -0700 Subject: Input: focaltech - use explicitly signed char type commit 8980f190947ba29f23110408e712444884b74251 upstream. The recent change of -funsigned-char causes additions of negative numbers to become additions of large positive numbers, leading to wrong calculations of mouse movement. Change these casts to be explicitly signed, to take into account negative offsets. Fixes: 3bc753c06dd0 ("kbuild: treat char as always unsigned") Signed-off-by: Jason A. Donenfeld Reviewed-by: Hans de Goede Cc: stable@vger.kernel.org Link: https://bugzilla.kernel.org/show_bug.cgi?id=217211 Link: https://lore.kernel.org/r/20230318133010.1285202-1-Jason@zx2c4.com Signed-off-by: Dmitry Torokhov Signed-off-by: Greg Kroah-Hartman --- drivers/input/mouse/focaltech.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/input/mouse/focaltech.c b/drivers/input/mouse/focaltech.c index a7d39689bbfb..4bd48b81ed98 100644 --- a/drivers/input/mouse/focaltech.c +++ b/drivers/input/mouse/focaltech.c @@ -206,8 +206,8 @@ static void focaltech_process_rel_packet(struct psmouse *psmouse, state->pressed = packet[0] >> 7; finger1 = ((packet[0] >> 4) & 0x7) - 1; if (finger1 < FOC_MAX_FINGERS) { - state->fingers[finger1].x += (char)packet[1]; - state->fingers[finger1].y += (char)packet[2]; + state->fingers[finger1].x += (s8)packet[1]; + state->fingers[finger1].y += (s8)packet[2]; } else { psmouse_err(psmouse, "First finger in rel packet invalid: %d\n", finger1); @@ -222,8 +222,8 @@ static void focaltech_process_rel_packet(struct psmouse *psmouse, */ finger2 = ((packet[3] >> 4) & 0x7) - 1; if (finger2 < FOC_MAX_FINGERS) { - state->fingers[finger2].x += (char)packet[4]; - state->fingers[finger2].y += (char)packet[5]; + state->fingers[finger2].x += (s8)packet[4]; + state->fingers[finger2].y += (s8)packet[5]; } } -- cgit v1.2.1 From 44b4c1390fa547fd79c9f90ad5d48f445e10f5b3 Mon Sep 17 00:00:00 2001 From: Paulo Alcantara Date: Wed, 29 Mar 2023 17:14:22 -0300 Subject: cifs: prevent infinite recursion in CIFSGetDFSRefer() commit 09ba47b44d26b475bbdf9c80db9e0193d2b58956 upstream. We can't call smb_init() in CIFSGetDFSRefer() as cifs_reconnect_tcon() may end up calling CIFSGetDFSRefer() again to get new DFS referrals and thus causing an infinite recursion. Signed-off-by: Paulo Alcantara (SUSE) Reviewed-by: Ronnie Sahlberg Cc: stable@vger.kernel.org # 6.2 Signed-off-by: Steve French Signed-off-by: Greg Kroah-Hartman --- fs/cifs/cifssmb.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index cb70f0c6aa1b..d16fd8d1f291 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -4895,8 +4895,13 @@ CIFSGetDFSRefer(const unsigned int xid, struct cifs_ses *ses, return -ENODEV; getDFSRetry: - rc = smb_init(SMB_COM_TRANSACTION2, 15, ses->tcon_ipc, (void **) &pSMB, - (void **) &pSMBr); + /* + * Use smb_init_no_reconnect() instead of smb_init() as + * CIFSGetDFSRefer() may be called from cifs_reconnect_tcon() and thus + * causing an infinite recursion. + */ + rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, ses->tcon_ipc, + (void **)&pSMB, (void **)&pSMBr); if (rc) return rc; -- cgit v1.2.1 From 8afb1fabcec1929db46977e84baeee0cc0e79242 Mon Sep 17 00:00:00 2001 From: David Disseldorp Date: Wed, 29 Mar 2023 22:24:06 +0200 Subject: cifs: fix DFS traversal oops without CONFIG_CIFS_DFS_UPCALL commit 179a88a8558bbf42991d361595281f3e45d7edfc upstream. When compiled with CONFIG_CIFS_DFS_UPCALL disabled, cifs_dfs_d_automount is NULL. cifs.ko logic for mapping CIFS_FATTR_DFS_REFERRAL attributes to S_AUTOMOUNT and corresponding dentry flags is retained regardless of CONFIG_CIFS_DFS_UPCALL, leading to a NULL pointer dereference in VFS follow_automount() when traversing a DFS referral link: BUG: kernel NULL pointer dereference, address: 0000000000000000 ... Call Trace: __traverse_mounts+0xb5/0x220 ? cifs_revalidate_mapping+0x65/0xc0 [cifs] step_into+0x195/0x610 ? lookup_fast+0xe2/0xf0 path_lookupat+0x64/0x140 filename_lookup+0xc2/0x140 ? __create_object+0x299/0x380 ? kmem_cache_alloc+0x119/0x220 ? user_path_at_empty+0x31/0x50 user_path_at_empty+0x31/0x50 __x64_sys_chdir+0x2a/0xd0 ? exit_to_user_mode_prepare+0xca/0x100 do_syscall_64+0x42/0x90 entry_SYSCALL_64_after_hwframe+0x72/0xdc This fix adds an inline cifs_dfs_d_automount() {return -EREMOTE} handler when CONFIG_CIFS_DFS_UPCALL is disabled. An alternative would be to avoid flagging S_AUTOMOUNT, etc. without CONFIG_CIFS_DFS_UPCALL. This approach was chosen as it provides more control over the error path. Signed-off-by: David Disseldorp Cc: stable@vger.kernel.org Reviewed-by: Paulo Alcantara (SUSE) Reviewed-by: Ronnie Sahlberg Signed-off-by: Steve French Signed-off-by: Greg Kroah-Hartman --- fs/cifs/cifsfs.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index f047e87871a1..c1d5daa4b351 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -121,7 +121,10 @@ extern const struct dentry_operations cifs_ci_dentry_ops; #ifdef CONFIG_CIFS_DFS_UPCALL extern struct vfsmount *cifs_dfs_d_automount(struct path *path); #else -#define cifs_dfs_d_automount NULL +static inline struct vfsmount *cifs_dfs_d_automount(struct path *path) +{ + return ERR_PTR(-EREMOTE); +} #endif /* Functions related to symlinks */ -- cgit v1.2.1 From e14369897067183801aac7c14b03c4847ad3f683 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Mon, 27 Mar 2023 10:36:45 +0200 Subject: xen/netback: don't do grant copy across page boundary commit 05310f31ca74673a96567fb14637b7d5d6c82ea5 upstream. Fix xenvif_get_requests() not to do grant copy operations across local page boundaries. This requires to double the maximum number of copy operations per queue, as each copy could now be split into 2. Make sure that struct xenvif_tx_cb doesn't grow too large. Cc: stable@vger.kernel.org Fixes: ad7f402ae4f4 ("xen/netback: Ensure protocol headers don't fall in the non-linear area") Signed-off-by: Juergen Gross Reviewed-by: Paul Durrant Signed-off-by: Paolo Abeni Signed-off-by: Greg Kroah-Hartman --- drivers/net/xen-netback/common.h | 2 +- drivers/net/xen-netback/netback.c | 25 +++++++++++++++++++++++-- 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 92d30ebdb111..2b984c5bae24 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -166,7 +166,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */ struct pending_tx_info pending_tx_info[MAX_PENDING_REQS]; grant_handle_t grant_tx_handle[MAX_PENDING_REQS]; - struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS]; + struct gnttab_copy tx_copy_ops[2 * MAX_PENDING_REQS]; struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS]; struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS]; /* passed to gnttab_[un]map_refs with pages under (un)mapping */ diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index fc389f2bba7a..ed644b6824ce 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -327,6 +327,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue, struct xenvif_tx_cb { u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1]; u8 copy_count; + u32 split_mask; }; #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb) @@ -354,6 +355,8 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size) struct sk_buff *skb = alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC | __GFP_NOWARN); + + BUILD_BUG_ON(sizeof(*XENVIF_TX_CB(skb)) > sizeof(skb->cb)); if (unlikely(skb == NULL)) return NULL; @@ -389,11 +392,13 @@ static void xenvif_get_requests(struct xenvif_queue *queue, nr_slots = shinfo->nr_frags + 1; copy_count(skb) = 0; + XENVIF_TX_CB(skb)->split_mask = 0; /* Create copy ops for exactly data_len bytes into the skb head. */ __skb_put(skb, data_len); while (data_len > 0) { int amount = data_len > txp->size ? txp->size : data_len; + bool split = false; cop->source.u.ref = txp->gref; cop->source.domid = queue->vif->domid; @@ -406,6 +411,13 @@ static void xenvif_get_requests(struct xenvif_queue *queue, cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb) - data_len); + /* Don't cross local page boundary! */ + if (cop->dest.offset + amount > XEN_PAGE_SIZE) { + amount = XEN_PAGE_SIZE - cop->dest.offset; + XENVIF_TX_CB(skb)->split_mask |= 1U << copy_count(skb); + split = true; + } + cop->len = amount; cop->flags = GNTCOPY_source_gref; @@ -413,7 +425,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue, pending_idx = queue->pending_ring[index]; callback_param(queue, pending_idx).ctx = NULL; copy_pending_idx(skb, copy_count(skb)) = pending_idx; - copy_count(skb)++; + if (!split) + copy_count(skb)++; cop++; data_len -= amount; @@ -434,7 +447,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue, nr_slots--; } else { /* The copy op partially covered the tx_request. - * The remainder will be mapped. + * The remainder will be mapped or copied in the next + * iteration. */ txp->offset += amount; txp->size -= amount; @@ -532,6 +546,13 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue, pending_idx = copy_pending_idx(skb, i); newerr = (*gopp_copy)->status; + + /* Split copies need to be handled together. */ + if (XENVIF_TX_CB(skb)->split_mask & (1U << i)) { + (*gopp_copy)++; + if (!newerr) + newerr = (*gopp_copy)->status; + } if (likely(!newerr)) { /* The first frag might still have this slot mapped */ if (i < copy_count(skb) - 1 || !sharedslot) -- cgit v1.2.1 From 628bbce0cee68f961953fc980d33bc50322b1f7e Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Fri, 24 Feb 2023 14:08:28 +0100 Subject: pinctrl: at91-pio4: fix domain name assignment commit 7bb97e360acdd38b68ad0a1defb89c6e89c85596 upstream. Since commit d59f6617eef0 ("genirq: Allow fwnode to carry name information only") an IRQ domain is always given a name during allocation (e.g. used for the debugfs entry). Drop the no longer valid name assignment, which would lead to an attempt to free a string constant when removing the domain on late probe failures (e.g. probe deferral). Fixes: d59f6617eef0 ("genirq: Allow fwnode to carry name information only") Cc: stable@vger.kernel.org # 4.13 Signed-off-by: Johan Hovold Reviewed-by: Claudiu Beznea Tested-by: Claudiu Beznea # on SAMA7G5 Link: https://lore.kernel.org/r/20230224130828.27985-1-johan+linaro@kernel.org Signed-off-by: Linus Walleij Signed-off-by: Greg Kroah-Hartman --- drivers/pinctrl/pinctrl-at91-pio4.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c index 89d88e447d44..5b883eb49ce9 100644 --- a/drivers/pinctrl/pinctrl-at91-pio4.c +++ b/drivers/pinctrl/pinctrl-at91-pio4.c @@ -1080,7 +1080,6 @@ static int atmel_pinctrl_probe(struct platform_device *pdev) dev_err(dev, "can't add the irq domain\n"); return -ENODEV; } - atmel_pioctrl->irq_domain->name = "atmel gpio"; for (i = 0; i < atmel_pioctrl->npins; i++) { int irq = irq_create_mapping(atmel_pioctrl->irq_domain, i); -- cgit v1.2.1 From 77cd8eda551dc8b1988d9bc9789b8806845edc20 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Mon, 20 Mar 2023 15:09:54 +0100 Subject: ALSA: hda/conexant: Partial revert of a quirk for Lenovo commit b871cb971c683f7f212e7ca3c9a6709a75785116 upstream. The recent commit f83bb2592482 ("ALSA: hda/conexant: Add quirk for LENOVO 20149 Notebook model") introduced a quirk for the device with 17aa:3977, but this caused a regression on another model (Lenovo Ideadpad U31) with the very same PCI SSID. And, through skimming over the net, it seems that this PCI SSID is used for multiple different models, so it's no good idea to apply the quirk with the SSID. Although we may take a different ID check (e.g. the codec SSID instead of the PCI SSID), unfortunately, the original patch author couldn't identify the hardware details any longer as the machine was returned, and we can't develop the further proper fix. In this patch, instead, we partially revert the change so that the quirk won't be applied as default for addressing the regression. Meanwhile, the quirk function itself is kept, and it's now made to be applicable via the explicit model=lenovo-20149 option. Fixes: f83bb2592482 ("ALSA: hda/conexant: Add quirk for LENOVO 20149 Notebook model") Reported-by: Jetro Jormalainen Link: https://lore.kernel.org/r/20230308215009.4d3e58a6@mopti Cc: Link: https://lore.kernel.org/r/20230320140954.31154-1-tiwai@suse.de Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/patch_conexant.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 69f88d3abf50..cfa958dc2dd5 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -952,7 +952,10 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { SND_PCI_QUIRK(0x17aa, 0x3905, "Lenovo G50-30", CXT_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC), - SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_PINCFG_LENOVO_NOTEBOOK), + /* NOTE: we'd need to extend the quirk for 17aa:3977 as the same + * PCI SSID is used on multiple Lenovo models + */ + SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo G50-70", CXT_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC), SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI), @@ -974,6 +977,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = { { .id = CXT_FIXUP_HP_DOCK, .name = "hp-dock" }, { .id = CXT_FIXUP_MUTE_LED_GPIO, .name = "mute-led-gpio" }, { .id = CXT_FIXUP_HP_MIC_NO_PRESENCE, .name = "hp-mic-fix" }, + { .id = CXT_PINCFG_LENOVO_NOTEBOOK, .name = "lenovo-20149" }, {} }; -- cgit v1.2.1 From d3d1c1bb49bf43beff89d9abd6ba7f6eeacc635d Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Fri, 24 Mar 2023 08:50:05 +0100 Subject: ALSA: usb-audio: Fix regression on detection of Roland VS-100 commit fa4e7a6fa12b1132340785e14bd439cbe95b7a5a upstream. It's been reported that the recent kernel can't probe the PCM devices on Roland VS-100 properly, and it turned out to be a regression by the recent addition of the bit shift range check for the format bits. In the old code, we just did bit-shift and it resulted in zero, which is then corrected to the standard PCM format, while the new code explicitly returns an error in such a case. For addressing the regression, relax the check and fallback to the standard PCM type (with the info output). Fixes: 43d5ca88dfcd ("ALSA: usb-audio: Fix potential out-of-bounds shift") Cc: Link: https://bugzilla.kernel.org/show_bug.cgi?id=217084 Link: https://lore.kernel.org/r/20230324075005.19403-1-tiwai@suse.de Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/usb/format.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/sound/usb/format.c b/sound/usb/format.c index 01ba7a939ac4..342d6edb06ad 100644 --- a/sound/usb/format.c +++ b/sound/usb/format.c @@ -53,8 +53,12 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip, case UAC_VERSION_1: default: { struct uac_format_type_i_discrete_descriptor *fmt = _fmt; - if (format >= 64) - return 0; /* invalid format */ + if (format >= 64) { + usb_audio_info(chip, + "%u:%d: invalid format type 0x%llx is detected, processed as PCM\n", + fp->iface, fp->altsetting, format); + format = UAC_FORMAT_TYPE_I_PCM; + } sample_width = fmt->bBitResolution; sample_bytes = fmt->bSubframeSize; format = 1ULL << format; -- cgit v1.2.1 From 0838cb217a5229a882f0c6da7e0739b27b44bd1f Mon Sep 17 00:00:00 2001 From: Lucas Stach Date: Fri, 24 Feb 2023 18:21:54 +0100 Subject: drm/etnaviv: fix reference leak when mmaping imported buffer commit 963b2e8c428f79489ceeb058e8314554ec9cbe6f upstream. drm_gem_prime_mmap() takes a reference on the GEM object, but before that drm_gem_mmap_obj() already takes a reference, which will be leaked as only one reference is dropped when the mapping is closed. Drop the extra reference when dma_buf_mmap() succeeds. Cc: stable@vger.kernel.org Signed-off-by: Lucas Stach Reviewed-by: Christian Gmeiner Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c index f21529e635e3..a9506a390f98 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c @@ -91,7 +91,15 @@ static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj) static int etnaviv_gem_prime_mmap_obj(struct etnaviv_gem_object *etnaviv_obj, struct vm_area_struct *vma) { - return dma_buf_mmap(etnaviv_obj->base.dma_buf, vma, 0); + int ret; + + ret = dma_buf_mmap(etnaviv_obj->base.dma_buf, vma, 0); + if (!ret) { + /* Drop the reference acquired by drm_gem_mmap_obj(). */ + drm_gem_object_put(&etnaviv_obj->base); + } + + return ret; } static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = { -- cgit v1.2.1 From 76d41dc2622f4947066df1cd1fd35e4274ee4ecc Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 23 Mar 2023 13:09:16 +0100 Subject: s390/uaccess: add missing earlyclobber annotations to __clear_user() commit 89aba4c26fae4e459f755a18912845c348ee48f3 upstream. Add missing earlyclobber annotation to size, to, and tmp2 operands of the __clear_user() inline assembly since they are modified or written to before the last usage of all input operands. This can lead to incorrect register allocation for the inline assembly. Fixes: 6c2a9e6df604 ("[S390] Use alternative user-copy operations for new hardware.") Reported-by: Mark Rutland Link: https://lore.kernel.org/all/20230321122514.1743889-3-mark.rutland@arm.com/ Cc: stable@vger.kernel.org Reviewed-by: Gerald Schaefer Signed-off-by: Heiko Carstens Signed-off-by: Vasily Gorbik Signed-off-by: Greg Kroah-Hartman --- arch/s390/lib/uaccess.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c index 0267405ab7c6..fcfd78f99cb4 100644 --- a/arch/s390/lib/uaccess.c +++ b/arch/s390/lib/uaccess.c @@ -339,7 +339,7 @@ static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size "4: slgr %0,%0\n" "5:\n" EX_TABLE(0b,2b) EX_TABLE(3b,5b) - : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2) + : "+&a" (size), "+&a" (to), "+a" (tmp1), "=&a" (tmp2) : "a" (empty_zero_page), "d" (reg0) : "cc", "memory"); return size; } -- cgit v1.2.1 From b27dd264e67bc8b6883d169ab2da5dfc3398740d Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Sat, 17 Aug 2019 09:55:20 +0300 Subject: usb: host: ohci-pxa27x: Fix and & vs | typo commit 0709831a50d31b3caf2237e8d7fe89e15b0d919d upstream. The code is supposed to clear the RH_A_NPS and RH_A_PSM bits, but it's a no-op because of the & vs | typo. This bug predates git and it was only discovered using static analysis so it must not affect too many people in real life. Signed-off-by: Dan Carpenter Acked-by: Alan Stern Link: https://lore.kernel.org/r/20190817065520.GA29951@mwanda Signed-off-by: Nobuhiro Iwamatsu (CIP) Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/ohci-pxa27x.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c index 3e2474959735..7679fb583e41 100644 --- a/drivers/usb/host/ohci-pxa27x.c +++ b/drivers/usb/host/ohci-pxa27x.c @@ -148,7 +148,7 @@ static int pxa27x_ohci_select_pmm(struct pxa27x_ohci *pxa_ohci, int mode) uhcrhda |= RH_A_NPS; break; case PMM_GLOBAL_MODE: - uhcrhda &= ~(RH_A_NPS & RH_A_PSM); + uhcrhda &= ~(RH_A_NPS | RH_A_PSM); break; case PMM_PERPORT_MODE: uhcrhda &= ~(RH_A_NPS); -- cgit v1.2.1 From 53bb0d3e0a3dfc9649add8133f1ecd9c1bc2dd70 Mon Sep 17 00:00:00 2001 From: Ye Bin Date: Tue, 6 Dec 2022 22:41:34 +0800 Subject: ext4: fix kernel BUG in 'ext4_write_inline_data_end()' commit 5c099c4fdc438014d5893629e70a8ba934433ee8 upstream. Syzbot report follow issue: ------------[ cut here ]------------ kernel BUG at fs/ext4/inline.c:227! invalid opcode: 0000 [#1] PREEMPT SMP KASAN CPU: 1 PID: 3629 Comm: syz-executor212 Not tainted 6.1.0-rc5-syzkaller-00018-g59d0d52c30d4 #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 10/26/2022 RIP: 0010:ext4_write_inline_data+0x344/0x3e0 fs/ext4/inline.c:227 RSP: 0018:ffffc90003b3f368 EFLAGS: 00010293 RAX: 0000000000000000 RBX: ffff8880704e16c0 RCX: 0000000000000000 RDX: ffff888021763a80 RSI: ffffffff821e31a4 RDI: 0000000000000006 RBP: 000000000006818e R08: 0000000000000006 R09: 0000000000068199 R10: 0000000000000079 R11: 0000000000000000 R12: 000000000000000b R13: 0000000000068199 R14: ffffc90003b3f408 R15: ffff8880704e1c82 FS: 000055555723e3c0(0000) GS:ffff8880b9b00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007fffe8ac9080 CR3: 0000000079f81000 CR4: 0000000000350ee0 Call Trace: ext4_write_inline_data_end+0x2a3/0x12f0 fs/ext4/inline.c:768 ext4_write_end+0x242/0xdd0 fs/ext4/inode.c:1313 ext4_da_write_end+0x3ed/0xa30 fs/ext4/inode.c:3063 generic_perform_write+0x316/0x570 mm/filemap.c:3764 ext4_buffered_write_iter+0x15b/0x460 fs/ext4/file.c:285 ext4_file_write_iter+0x8bc/0x16e0 fs/ext4/file.c:700 call_write_iter include/linux/fs.h:2191 [inline] do_iter_readv_writev+0x20b/0x3b0 fs/read_write.c:735 do_iter_write+0x182/0x700 fs/read_write.c:861 vfs_iter_write+0x74/0xa0 fs/read_write.c:902 iter_file_splice_write+0x745/0xc90 fs/splice.c:686 do_splice_from fs/splice.c:764 [inline] direct_splice_actor+0x114/0x180 fs/splice.c:931 splice_direct_to_actor+0x335/0x8a0 fs/splice.c:886 do_splice_direct+0x1ab/0x280 fs/splice.c:974 do_sendfile+0xb19/0x1270 fs/read_write.c:1255 __do_sys_sendfile64 fs/read_write.c:1323 [inline] __se_sys_sendfile64 fs/read_write.c:1309 [inline] __x64_sys_sendfile64+0x1d0/0x210 fs/read_write.c:1309 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x39/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x63/0xcd ---[ end trace 0000000000000000 ]--- Above issue may happens as follows: ext4_da_write_begin ext4_da_write_inline_data_begin ext4_da_convert_inline_data_to_extent ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA); ext4_da_write_end ext4_run_li_request ext4_mb_prefetch ext4_read_block_bitmap_nowait ext4_validate_block_bitmap ext4_mark_group_bitmap_corrupted(sb, block_group, EXT4_GROUP_INFO_BBITMAP_CORRUPT) percpu_counter_sub(&sbi->s_freeclusters_counter,grp->bb_free); -> sbi->s_freeclusters_counter become zero ext4_da_write_begin if (ext4_nonda_switch(inode->i_sb)) -> As freeclusters_counter is zero will return true *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; ext4_write_begin ext4_da_write_end if (write_mode == FALL_BACK_TO_NONDELALLOC) ext4_write_end if (inline_data) ext4_write_inline_data_end ext4_write_inline_data BUG_ON(pos + len > EXT4_I(inode)->i_inline_size); -> As inode is already convert to extent, so 'pos + len' > inline_size -> then trigger BUG. To solve this issue, instead of checking ext4_has_inline_data() which is only cleared after data has been written back, check the EXT4_STATE_MAY_INLINE_DATA flag in ext4_write_end(). Fixes: f19d5870cbf7 ("ext4: add normal write support for inline data") Reported-by: syzbot+4faa160fa96bfba639f8@syzkaller.appspotmail.com Reported-by: Jun Nie Signed-off-by: Ye Bin Link: https://lore.kernel.org/r/20221206144134.1919987-1-yebin@huaweicloud.com Signed-off-by: Theodore Ts'o Cc: stable@kernel.org [ta: Fix conflict in if expression and use the local variable inline_data as it is initialized with ext4_has_inline_data(inode) anyway.] Signed-off-by: Tudor Ambarus Signed-off-by: Greg Kroah-Hartman --- fs/ext4/inode.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index e844d91c461b..7aaf4dafd3e7 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1428,7 +1428,8 @@ static int ext4_write_end(struct file *file, int inline_data = ext4_has_inline_data(inode); trace_ext4_write_end(inode, pos, len, copied); - if (inline_data) { + if (inline_data && + ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { ret = ext4_write_inline_data_end(inode, pos, len, copied, page); if (ret < 0) { -- cgit v1.2.1 From 51a8f136adc99db604c50a545328e0fa5047e472 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Tue, 7 Mar 2023 16:23:24 +0000 Subject: firmware: arm_scmi: Fix device node validation for mailbox transport commit 2ab4f4018cb6b8010ca5002c3bdc37783b5d28c2 upstream. When mailboxes are used as a transport it is possible to setup the SCMI transport layer, depending on the underlying channels configuration, to use one or two mailboxes, associated, respectively, to one or two, distinct, shared memory areas: any other combination should be treated as invalid. Add more strict checking of SCMI mailbox transport device node descriptors. Fixes: 5c8a47a5a91d ("firmware: arm_scmi: Make scmi core independent of the transport type") Cc: # 4.19 Signed-off-by: Cristian Marussi Link: https://lore.kernel.org/r/20230307162324.891866-1-cristian.marussi@arm.com Signed-off-by: Sudeep Holla [Cristian: backported to v4.19] Signed-off-by: Cristian Marussi Signed-off-by: Greg Kroah-Hartman --- drivers/firmware/arm_scmi/driver.c | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index e8cd66705ad7..5ccbbb3eb68e 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -705,6 +705,39 @@ static int scmi_remove(struct platform_device *pdev) return ret; } +static int scmi_mailbox_chan_validate(struct device *cdev) +{ + int num_mb, num_sh, ret = 0; + struct device_node *np = cdev->of_node; + + num_mb = of_count_phandle_with_args(np, "mboxes", "#mbox-cells"); + num_sh = of_count_phandle_with_args(np, "shmem", NULL); + /* Bail out if mboxes and shmem descriptors are inconsistent */ + if (num_mb <= 0 || num_sh > 2 || num_mb != num_sh) { + dev_warn(cdev, "Invalid channel descriptor for '%s'\n", + of_node_full_name(np)); + return -EINVAL; + } + + if (num_sh > 1) { + struct device_node *np_tx, *np_rx; + + np_tx = of_parse_phandle(np, "shmem", 0); + np_rx = of_parse_phandle(np, "shmem", 1); + /* SCMI Tx and Rx shared mem areas have to be distinct */ + if (!np_tx || !np_rx || np_tx == np_rx) { + dev_warn(cdev, "Invalid shmem descriptor for '%s'\n", + of_node_full_name(np)); + ret = -EINVAL; + } + + of_node_put(np_tx); + of_node_put(np_rx); + } + + return ret; +} + static inline int scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev, int prot_id) { @@ -720,6 +753,10 @@ scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev, int prot_id) goto idr_alloc; } + ret = scmi_mailbox_chan_validate(dev); + if (ret) + return ret; + cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL); if (!cinfo) return -ENOMEM; -- cgit v1.2.1 From 45df749f827c286adbc951f2a4865b67f0442ba9 Mon Sep 17 00:00:00 2001 From: Andreas Gruenbacher Date: Sun, 4 Dec 2022 17:00:04 +0100 Subject: gfs2: Always check inode size of inline inodes commit 70376c7ff31221f1d21db5611d8209e677781d3a upstream. Check if the inode size of stuffed (inline) inodes is within the allowed range when reading inodes from disk (gfs2_dinode_in()). This prevents us from on-disk corruption. The two checks in stuffed_readpage() and gfs2_unstuffer_page() that just truncate inline data to the maximum allowed size don't actually make sense, and they can be removed now as well. Reported-by: syzbot+7bb81dfa9cda07d9cd9d@syzkaller.appspotmail.com Signed-off-by: Andreas Gruenbacher [pchelkin@ispras.ru: adjust the inode variable inside gfs2_dinode_in with the format used before upstream commit 7db354444ad8 ("gfs2: Cosmetic gfs2_dinode_{in,out} cleanup")] Signed-off-by: Fedor Pchelkin Signed-off-by: Greg Kroah-Hartman --- fs/gfs2/aops.c | 2 -- fs/gfs2/bmap.c | 3 --- fs/gfs2/glops.c | 3 +++ 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index c5390421cca2..d9866d89f2fb 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -480,8 +480,6 @@ int stuffed_readpage(struct gfs2_inode *ip, struct page *page) return error; kaddr = kmap_atomic(page); - if (dsize > gfs2_max_stuffed_size(ip)) - dsize = gfs2_max_stuffed_size(ip); memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize); memset(kaddr + dsize, 0, PAGE_SIZE - dsize); kunmap_atomic(kaddr); diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 150cec85c416..ccafd45b63f6 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -72,9 +72,6 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh, void *kaddr = kmap(page); u64 dsize = i_size_read(inode); - if (dsize > gfs2_max_stuffed_size(ip)) - dsize = gfs2_max_stuffed_size(ip); - memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize); memset(kaddr + dsize, 0, PAGE_SIZE - dsize); kunmap(page); diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index 20f08f4391c9..a7a423adf7c8 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -388,6 +388,9 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) ip->i_depth = (u8)depth; ip->i_entries = be32_to_cpu(str->di_entries); + if (gfs2_is_stuffed(ip) && ip->i_inode.i_size > gfs2_max_stuffed_size(ip)) + goto corrupt; + if (S_ISREG(ip->i_inode.i_mode)) gfs2_set_aops(&ip->i_inode); -- cgit v1.2.1 From 8ed4c82571d848d76877c4d70687686e607766e3 Mon Sep 17 00:00:00 2001 From: Jamal Hadi Salim Date: Sun, 1 Jan 2023 16:57:44 -0500 Subject: net: sched: cbq: dont intepret cls results when asked to drop commit caa4b35b4317d5147b3ab0fbdc9c075c7d2e9c12 upstream. If asked to drop a packet via TC_ACT_SHOT it is unsafe to assume that res.class contains a valid pointer Sample splat reported by Kyle Zeng [ 5.405624] 0: reclassify loop, rule prio 0, protocol 800 [ 5.406326] ================================================================== [ 5.407240] BUG: KASAN: slab-out-of-bounds in cbq_enqueue+0x54b/0xea0 [ 5.407987] Read of size 1 at addr ffff88800e3122aa by task poc/299 [ 5.408731] [ 5.408897] CPU: 0 PID: 299 Comm: poc Not tainted 5.10.155+ #15 [ 5.409516] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014 [ 5.410439] Call Trace: [ 5.410764] dump_stack+0x87/0xcd [ 5.411153] print_address_description+0x7a/0x6b0 [ 5.411687] ? vprintk_func+0xb9/0xc0 [ 5.411905] ? printk+0x76/0x96 [ 5.412110] ? cbq_enqueue+0x54b/0xea0 [ 5.412323] kasan_report+0x17d/0x220 [ 5.412591] ? cbq_enqueue+0x54b/0xea0 [ 5.412803] __asan_report_load1_noabort+0x10/0x20 [ 5.413119] cbq_enqueue+0x54b/0xea0 [ 5.413400] ? __kasan_check_write+0x10/0x20 [ 5.413679] __dev_queue_xmit+0x9c0/0x1db0 [ 5.413922] dev_queue_xmit+0xc/0x10 [ 5.414136] ip_finish_output2+0x8bc/0xcd0 [ 5.414436] __ip_finish_output+0x472/0x7a0 [ 5.414692] ip_finish_output+0x5c/0x190 [ 5.414940] ip_output+0x2d8/0x3c0 [ 5.415150] ? ip_mc_finish_output+0x320/0x320 [ 5.415429] __ip_queue_xmit+0x753/0x1760 [ 5.415664] ip_queue_xmit+0x47/0x60 [ 5.415874] __tcp_transmit_skb+0x1ef9/0x34c0 [ 5.416129] tcp_connect+0x1f5e/0x4cb0 [ 5.416347] tcp_v4_connect+0xc8d/0x18c0 [ 5.416577] __inet_stream_connect+0x1ae/0xb40 [ 5.416836] ? local_bh_enable+0x11/0x20 [ 5.417066] ? lock_sock_nested+0x175/0x1d0 [ 5.417309] inet_stream_connect+0x5d/0x90 [ 5.417548] ? __inet_stream_connect+0xb40/0xb40 [ 5.417817] __sys_connect+0x260/0x2b0 [ 5.418037] __x64_sys_connect+0x76/0x80 [ 5.418267] do_syscall_64+0x31/0x50 [ 5.418477] entry_SYSCALL_64_after_hwframe+0x61/0xc6 [ 5.418770] RIP: 0033:0x473bb7 [ 5.418952] Code: 64 89 01 48 83 c8 ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 64 8b 04 25 18 00 00 00 85 c0 75 10 b8 2a 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 51 c3 48 83 ec 18 89 54 24 0c 48 89 34 24 89 [ 5.420046] RSP: 002b:00007fffd20eb0f8 EFLAGS: 00000246 ORIG_RAX: 000000000000002a [ 5.420472] RAX: ffffffffffffffda RBX: 00007fffd20eb578 RCX: 0000000000473bb7 [ 5.420872] RDX: 0000000000000010 RSI: 00007fffd20eb110 RDI: 0000000000000007 [ 5.421271] RBP: 00007fffd20eb150 R08: 0000000000000001 R09: 0000000000000004 [ 5.421671] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000001 [ 5.422071] R13: 00007fffd20eb568 R14: 00000000004fc740 R15: 0000000000000002 [ 5.422471] [ 5.422562] Allocated by task 299: [ 5.422782] __kasan_kmalloc+0x12d/0x160 [ 5.423007] kasan_kmalloc+0x5/0x10 [ 5.423208] kmem_cache_alloc_trace+0x201/0x2e0 [ 5.423492] tcf_proto_create+0x65/0x290 [ 5.423721] tc_new_tfilter+0x137e/0x1830 [ 5.423957] rtnetlink_rcv_msg+0x730/0x9f0 [ 5.424197] netlink_rcv_skb+0x166/0x300 [ 5.424428] rtnetlink_rcv+0x11/0x20 [ 5.424639] netlink_unicast+0x673/0x860 [ 5.424870] netlink_sendmsg+0x6af/0x9f0 [ 5.425100] __sys_sendto+0x58d/0x5a0 [ 5.425315] __x64_sys_sendto+0xda/0xf0 [ 5.425539] do_syscall_64+0x31/0x50 [ 5.425764] entry_SYSCALL_64_after_hwframe+0x61/0xc6 [ 5.426065] [ 5.426157] The buggy address belongs to the object at ffff88800e312200 [ 5.426157] which belongs to the cache kmalloc-128 of size 128 [ 5.426955] The buggy address is located 42 bytes to the right of [ 5.426955] 128-byte region [ffff88800e312200, ffff88800e312280) [ 5.427688] The buggy address belongs to the page: [ 5.427992] page:000000009875fabc refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0xe312 [ 5.428562] flags: 0x100000000000200(slab) [ 5.428812] raw: 0100000000000200 dead000000000100 dead000000000122 ffff888007843680 [ 5.429325] raw: 0000000000000000 0000000000100010 00000001ffffffff ffff88800e312401 [ 5.429875] page dumped because: kasan: bad access detected [ 5.430214] page->mem_cgroup:ffff88800e312401 [ 5.430471] [ 5.430564] Memory state around the buggy address: [ 5.430846] ffff88800e312180: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc [ 5.431267] ffff88800e312200: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 fc [ 5.431705] >ffff88800e312280: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc [ 5.432123] ^ [ 5.432391] ffff88800e312300: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 fc [ 5.432810] ffff88800e312380: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc [ 5.433229] ================================================================== [ 5.433648] Disabling lock debugging due to kernel taint Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Reported-by: Kyle Zeng Signed-off-by: Jamal Hadi Salim Signed-off-by: David S. Miller Signed-off-by: Harshit Mogalapalli Signed-off-by: Greg Kroah-Hartman --- net/sched/sch_cbq.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 0a76ad05e5ae..2974f7262f88 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -236,6 +236,8 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) result = tcf_classify(skb, fl, &res, true); if (!fl || result < 0) goto fallback; + if (result == TC_ACT_SHOT) + return NULL; cl = (void *)res.class; if (!cl) { @@ -256,8 +258,6 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; /* fall through */ - case TC_ACT_SHOT: - return NULL; case TC_ACT_RECLASSIFY: return cbq_reclassify(skb, cl); } -- cgit v1.2.1 From 224262583fabf3b6bf2a29d033cf9a8f28fde843 Mon Sep 17 00:00:00 2001 From: Juri Lelli Date: Mon, 20 Mar 2023 01:15:05 +0000 Subject: cgroup/cpuset: Change cpuset_rwsem and hotplug lock order commit d74b27d63a8bebe2fe634944e4ebdc7b10db7a39 upstream. commit 1243dc518c9da ("cgroup/cpuset: Convert cpuset_mutex to percpu_rwsem") is performance patch which is not backport. So convert percpu_rwsem to cpuset_mutex. commit aa44002e7db25 ("cpuset: Fix unsafe lock order between cpuset lock and cpuslock") makes lock order keep cpuset_mutex ->cpu_hotplug_lock. We should change lock order in cpuset_attach. original commit message: cpuset_rwsem is going to be acquired from sched_setscheduler() with a following patch. There are however paths (e.g., spawn_ksoftirqd) in which sched_scheduler() is eventually called while holding hotplug lock; this creates a dependecy between hotplug lock (to be always acquired first) and cpuset_rwsem (to be always acquired after hotplug lock). Fix paths which currently take the two locks in the wrong order (after a following patch is applied). Tested-by: Dietmar Eggemann Signed-off-by: Juri Lelli Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: bristot@redhat.com Cc: claudio@evidence.eu.com Cc: lizefan@huawei.com Cc: longman@redhat.com Cc: luca.abeni@santannapisa.it Cc: mathieu.poirier@linaro.org Cc: rostedt@goodmis.org Cc: tj@kernel.org Cc: tommaso.cucinotta@santannapisa.it Link: https://lkml.kernel.org/r/20190719140000.31694-7-juri.lelli@redhat.com Signed-off-by: Ingo Molnar Signed-off-by: Cai Xinchen Signed-off-by: Greg Kroah-Hartman --- include/linux/cpuset.h | 8 ++++---- kernel/cgroup/cpuset.c | 24 +++++++++++++++++------- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 934633a05d20..7f1478c26a33 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -40,14 +40,14 @@ static inline bool cpusets_enabled(void) static inline void cpuset_inc(void) { - static_branch_inc(&cpusets_pre_enable_key); - static_branch_inc(&cpusets_enabled_key); + static_branch_inc_cpuslocked(&cpusets_pre_enable_key); + static_branch_inc_cpuslocked(&cpusets_enabled_key); } static inline void cpuset_dec(void) { - static_branch_dec(&cpusets_enabled_key); - static_branch_dec(&cpusets_pre_enable_key); + static_branch_dec_cpuslocked(&cpusets_enabled_key); + static_branch_dec_cpuslocked(&cpusets_pre_enable_key); } extern int cpuset_init(void); diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index dcd5755b1fe2..7169e47fb48b 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -830,8 +830,8 @@ static void rebuild_sched_domains_locked(void) cpumask_var_t *doms; int ndoms; + lockdep_assert_cpus_held(); lockdep_assert_held(&cpuset_mutex); - get_online_cpus(); /* * We have raced with CPU hotplug. Don't do anything to avoid @@ -839,15 +839,13 @@ static void rebuild_sched_domains_locked(void) * Anyways, hotplug work item will rebuild sched domains. */ if (!cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask)) - goto out; + return; /* Generate domain masks and attrs */ ndoms = generate_sched_domains(&doms, &attr); /* Have scheduler rebuild the domains */ partition_sched_domains(ndoms, doms, attr); -out: - put_online_cpus(); } #else /* !CONFIG_SMP */ static void rebuild_sched_domains_locked(void) @@ -857,9 +855,11 @@ static void rebuild_sched_domains_locked(void) void rebuild_sched_domains(void) { + get_online_cpus(); mutex_lock(&cpuset_mutex); rebuild_sched_domains_locked(); mutex_unlock(&cpuset_mutex); + put_online_cpus(); } /** @@ -1528,13 +1528,13 @@ static void cpuset_attach(struct cgroup_taskset *tset) cgroup_taskset_first(tset, &css); cs = css_cs(css); - mutex_lock(&cpuset_mutex); - /* * It should hold cpus lock because a cpu offline event can * cause set_cpus_allowed_ptr() failed. */ get_online_cpus(); + mutex_lock(&cpuset_mutex); + /* prepare for attach */ if (cs == &top_cpuset) cpumask_copy(cpus_attach, cpu_possible_mask); @@ -1553,7 +1553,6 @@ static void cpuset_attach(struct cgroup_taskset *tset) cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to); cpuset_update_task_spread_flag(cs, task); } - put_online_cpus(); /* * Change mm for all threadgroup leaders. This is expensive and may @@ -1589,6 +1588,7 @@ static void cpuset_attach(struct cgroup_taskset *tset) wake_up(&cpuset_attach_wq); mutex_unlock(&cpuset_mutex); + put_online_cpus(); } /* The various types of files and directories in a cpuset file system */ @@ -1617,6 +1617,7 @@ static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft, cpuset_filetype_t type = cft->private; int retval = 0; + get_online_cpus(); mutex_lock(&cpuset_mutex); if (!is_cpuset_online(cs)) { retval = -ENODEV; @@ -1654,6 +1655,7 @@ static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft, } out_unlock: mutex_unlock(&cpuset_mutex); + put_online_cpus(); return retval; } @@ -1664,6 +1666,7 @@ static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft, cpuset_filetype_t type = cft->private; int retval = -ENODEV; + get_online_cpus(); mutex_lock(&cpuset_mutex); if (!is_cpuset_online(cs)) goto out_unlock; @@ -1678,6 +1681,7 @@ static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft, } out_unlock: mutex_unlock(&cpuset_mutex); + put_online_cpus(); return retval; } @@ -1716,6 +1720,7 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of, kernfs_break_active_protection(of->kn); flush_work(&cpuset_hotplug_work); + get_online_cpus(); mutex_lock(&cpuset_mutex); if (!is_cpuset_online(cs)) goto out_unlock; @@ -1741,6 +1746,7 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of, free_trial_cpuset(trialcs); out_unlock: mutex_unlock(&cpuset_mutex); + put_online_cpus(); kernfs_unbreak_active_protection(of->kn); css_put(&cs->css); flush_workqueue(cpuset_migrate_mm_wq); @@ -1985,6 +1991,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) if (!parent) return 0; + get_online_cpus(); mutex_lock(&cpuset_mutex); set_bit(CS_ONLINE, &cs->flags); @@ -2035,6 +2042,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) spin_unlock_irq(&callback_lock); out_unlock: mutex_unlock(&cpuset_mutex); + put_online_cpus(); return 0; } @@ -2048,6 +2056,7 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css) { struct cpuset *cs = css_cs(css); + get_online_cpus(); mutex_lock(&cpuset_mutex); if (is_sched_load_balance(cs)) @@ -2057,6 +2066,7 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css) clear_bit(CS_ONLINE, &cs->flags); mutex_unlock(&cpuset_mutex); + put_online_cpus(); } static void cpuset_css_free(struct cgroup_subsys_state *css) -- cgit v1.2.1 From e446300968c6bd25d9cd6c33b9600780a39b3975 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 20 Mar 2023 01:15:06 +0000 Subject: cgroup: Fix threadgroup_rwsem <-> cpus_read_lock() deadlock commit 4f7e7236435ca0abe005c674ebd6892c6e83aeb3 upstream. Add #include to avoid compile error on some architectures. commit 9a3284fad42f6 ("cgroup: Optimize single thread migration") and commit 671c11f0619e5 ("cgroup: Elide write-locking threadgroup_rwsem when updating csses on an empty subtree") are not backport. So ignore the input parameter of cgroup_attach_lock/cgroup_attach_unlock. original commit message: Bringing up a CPU may involve creating and destroying tasks which requires read-locking threadgroup_rwsem, so threadgroup_rwsem nests inside cpus_read_lock(). However, cpuset's ->attach(), which may be called with thredagroup_rwsem write-locked, also wants to disable CPU hotplug and acquires cpus_read_lock(), leading to a deadlock. Fix it by guaranteeing that ->attach() is always called with CPU hotplug disabled and removing cpus_read_lock() call from cpuset_attach(). Signed-off-by: Tejun Heo Reviewed-and-tested-by: Imran Khan Reported-and-tested-by: Xuewen Yan Fixes: 05c7b7a92cc8 ("cgroup/cpuset: Fix a race between cpuset_attach() and cpu hotplug") Cc: stable@vger.kernel.org # v5.17+ Signed-off-by: Cai Xinchen Signed-off-by: Greg Kroah-Hartman --- kernel/cgroup/cgroup.c | 50 +++++++++++++++++++++++++++++++++++++++++++++----- kernel/cgroup/cpuset.c | 7 +------ 2 files changed, 46 insertions(+), 11 deletions(-) diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index a892a99eb4bf..a8185cdb8587 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -55,6 +55,7 @@ #include #include #include +#include #include #define CREATE_TRACE_POINTS @@ -2209,6 +2210,45 @@ int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen) } EXPORT_SYMBOL_GPL(task_cgroup_path); +/** + * cgroup_attach_lock - Lock for ->attach() + * @lock_threadgroup: whether to down_write cgroup_threadgroup_rwsem + * + * cgroup migration sometimes needs to stabilize threadgroups against forks and + * exits by write-locking cgroup_threadgroup_rwsem. However, some ->attach() + * implementations (e.g. cpuset), also need to disable CPU hotplug. + * Unfortunately, letting ->attach() operations acquire cpus_read_lock() can + * lead to deadlocks. + * + * Bringing up a CPU may involve creating and destroying tasks which requires + * read-locking threadgroup_rwsem, so threadgroup_rwsem nests inside + * cpus_read_lock(). If we call an ->attach() which acquires the cpus lock while + * write-locking threadgroup_rwsem, the locking order is reversed and we end up + * waiting for an on-going CPU hotplug operation which in turn is waiting for + * the threadgroup_rwsem to be released to create new tasks. For more details: + * + * http://lkml.kernel.org/r/20220711174629.uehfmqegcwn2lqzu@wubuntu + * + * Resolve the situation by always acquiring cpus_read_lock() before optionally + * write-locking cgroup_threadgroup_rwsem. This allows ->attach() to assume that + * CPU hotplug is disabled on entry. + */ +static void cgroup_attach_lock(void) +{ + get_online_cpus(); + percpu_down_write(&cgroup_threadgroup_rwsem); +} + +/** + * cgroup_attach_unlock - Undo cgroup_attach_lock() + * @lock_threadgroup: whether to up_write cgroup_threadgroup_rwsem + */ +static void cgroup_attach_unlock(void) +{ + percpu_up_write(&cgroup_threadgroup_rwsem); + put_online_cpus(); +} + /** * cgroup_migrate_add_task - add a migration target task to a migration context * @task: target task @@ -2694,7 +2734,7 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup) if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0) return ERR_PTR(-EINVAL); - percpu_down_write(&cgroup_threadgroup_rwsem); + cgroup_attach_lock(); rcu_read_lock(); if (pid) { @@ -2725,7 +2765,7 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup) goto out_unlock_rcu; out_unlock_threadgroup: - percpu_up_write(&cgroup_threadgroup_rwsem); + cgroup_attach_unlock(); out_unlock_rcu: rcu_read_unlock(); return tsk; @@ -2740,7 +2780,7 @@ void cgroup_procs_write_finish(struct task_struct *task) /* release reference from cgroup_procs_write_start() */ put_task_struct(task); - percpu_up_write(&cgroup_threadgroup_rwsem); + cgroup_attach_unlock(); for_each_subsys(ss, ssid) if (ss->post_attach) ss->post_attach(); @@ -2799,7 +2839,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) lockdep_assert_held(&cgroup_mutex); - percpu_down_write(&cgroup_threadgroup_rwsem); + cgroup_attach_lock(); /* look up all csses currently attached to @cgrp's subtree */ spin_lock_irq(&css_set_lock); @@ -2830,7 +2870,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) ret = cgroup_migrate_execute(&mgctx); out_finish: cgroup_migrate_finish(&mgctx); - percpu_up_write(&cgroup_threadgroup_rwsem); + cgroup_attach_unlock(); return ret; } diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 7169e47fb48b..c6d412cebc43 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -1528,11 +1528,7 @@ static void cpuset_attach(struct cgroup_taskset *tset) cgroup_taskset_first(tset, &css); cs = css_cs(css); - /* - * It should hold cpus lock because a cpu offline event can - * cause set_cpus_allowed_ptr() failed. - */ - get_online_cpus(); + lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */ mutex_lock(&cpuset_mutex); /* prepare for attach */ @@ -1588,7 +1584,6 @@ static void cpuset_attach(struct cgroup_taskset *tset) wake_up(&cpuset_attach_wq); mutex_unlock(&cpuset_mutex); - put_online_cpus(); } /* The various types of files and directories in a cpuset file system */ -- cgit v1.2.1 From 321488cfac7d0eb6d97de467015ff754f85813ff Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Mon, 20 Mar 2023 01:15:07 +0000 Subject: cgroup: Add missing cpus_read_lock() to cgroup_attach_task_all() commit 43626dade36fa74d3329046f4ae2d7fdefe401c6 upstream. syzbot is hitting percpu_rwsem_assert_held(&cpu_hotplug_lock) warning at cpuset_attach() [1], for commit 4f7e7236435ca0ab ("cgroup: Fix threadgroup_rwsem <-> cpus_read_lock() deadlock") missed that cpuset_attach() is also called from cgroup_attach_task_all(). Add cpus_read_lock() like what cgroup_procs_write_start() does. Link: https://syzkaller.appspot.com/bug?extid=29d3a3b4d86c8136ad9e [1] Reported-by: syzbot Signed-off-by: Tetsuo Handa Fixes: 4f7e7236435ca0ab ("cgroup: Fix threadgroup_rwsem <-> cpus_read_lock() deadlock") Signed-off-by: Tejun Heo Signed-off-by: Cai Xinchen Signed-off-by: Greg Kroah-Hartman --- kernel/cgroup/cgroup-v1.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c index 61644976225a..c0ebb70808b6 100644 --- a/kernel/cgroup/cgroup-v1.c +++ b/kernel/cgroup/cgroup-v1.c @@ -13,6 +13,7 @@ #include #include #include +#include #include @@ -55,6 +56,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) int retval = 0; mutex_lock(&cgroup_mutex); + get_online_cpus(); percpu_down_write(&cgroup_threadgroup_rwsem); for_each_root(root) { struct cgroup *from_cgrp; @@ -71,6 +73,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) break; } percpu_up_write(&cgroup_threadgroup_rwsem); + put_online_cpus(); mutex_unlock(&cgroup_mutex); return retval; -- cgit v1.2.1 From 5c0966408dee90137adf2e96f949e50a2ba7e401 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 5 Apr 2023 11:15:43 +0200 Subject: Linux 4.19.280 Link: https://lore.kernel.org/r/20230403140353.406927418@linuxfoundation.org Tested-by: Shuah Khan Tested-by: Linux Kernel Functional Testing Tested-by: Jon Hunter Tested-by: Chris Paterson (CIP) Tested-by: Guenter Roeck Signed-off-by: Greg Kroah-Hartman --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index d6c4a53bf505..c70637ed93cd 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 19 -SUBLEVEL = 279 +SUBLEVEL = 280 EXTRAVERSION = NAME = "People's Front" -- cgit v1.2.1