summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--patches/0007-perf-bpf-Remove-preempt-disable-around-BPF-invocatio.patch4
-rw-r--r--patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch6
-rw-r--r--patches/dev-Defer-free-of-skbs-in-flush_backlog.patch27
-rw-r--r--patches/io_wq-Make-io_wqe-lock-a-raw_spinlock_t.patch252
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/printk-fix-ifnullfree.cocci-warnings.patch46
-rw-r--r--patches/series5
-rw-r--r--patches/skbufhead-raw-lock.patch107
8 files changed, 341 insertions, 108 deletions
diff --git a/patches/0007-perf-bpf-Remove-preempt-disable-around-BPF-invocatio.patch b/patches/0007-perf-bpf-Remove-preempt-disable-around-BPF-invocatio.patch
index a03399ea43c7..e1f1b1095347 100644
--- a/patches/0007-perf-bpf-Remove-preempt-disable-around-BPF-invocatio.patch
+++ b/patches/0007-perf-bpf-Remove-preempt-disable-around-BPF-invocatio.patch
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
-@@ -9202,7 +9202,6 @@ static void bpf_overflow_handler(struct
+@@ -9207,7 +9207,6 @@ static void bpf_overflow_handler(struct
int ret = 0;
ctx.regs = perf_arch_bpf_user_pt_regs(regs);
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1))
goto out;
rcu_read_lock();
-@@ -9210,7 +9209,6 @@ static void bpf_overflow_handler(struct
+@@ -9215,7 +9214,6 @@ static void bpf_overflow_handler(struct
rcu_read_unlock();
out:
__this_cpu_dec(bpf_prog_active);
diff --git a/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch b/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
index 12ce2e5695c4..f9f08c636c31 100644
--- a/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
+++ b/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
-@@ -680,7 +680,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -700,7 +700,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
* involves poking the GIC, which must be done in a
* non-preemptible context.
*/
@@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
kvm_pmu_flush_hwstate(vcpu);
-@@ -729,7 +729,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -749,7 +749,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
kvm_timer_sync_hwstate(vcpu);
kvm_vgic_sync_hwstate(vcpu);
local_irq_enable();
@@ -40,7 +40,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
}
-@@ -805,7 +805,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -825,7 +825,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
/* Exit types that need handling before we can be preempted */
handle_exit_early(vcpu, run, ret);
diff --git a/patches/dev-Defer-free-of-skbs-in-flush_backlog.patch b/patches/dev-Defer-free-of-skbs-in-flush_backlog.patch
new file mode 100644
index 000000000000..619a94da0a75
--- /dev/null
+++ b/patches/dev-Defer-free-of-skbs-in-flush_backlog.patch
@@ -0,0 +1,27 @@
+From: Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
+Date: Thu, 23 Jul 2020 11:31:48 -0600
+Subject: [PATCH] dev: Defer free of skbs in flush_backlog
+
+Upstream commit 7df5cb75cfb8acf96c7f2342530eb41e0c11f4c3
+
+IRQs are disabled when freeing skbs in input queue.
+Use the IRQ safe variant to free skbs here.
+
+Fixes: 145dd5f9c88f ("net: flush the softnet backlog in process context")
+Signed-off-by: Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ net/core/dev.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -5506,7 +5506,7 @@ static void flush_backlog(struct work_st
+ skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
+ if (skb->dev->reg_state == NETREG_UNREGISTERING) {
+ __skb_unlink(skb, &sd->input_pkt_queue);
+- kfree_skb(skb);
++ dev_kfree_skb_irq(skb);
+ input_queue_head_incr(sd);
+ }
+ }
diff --git a/patches/io_wq-Make-io_wqe-lock-a-raw_spinlock_t.patch b/patches/io_wq-Make-io_wqe-lock-a-raw_spinlock_t.patch
new file mode 100644
index 000000000000..b7cd685406c8
--- /dev/null
+++ b/patches/io_wq-Make-io_wqe-lock-a-raw_spinlock_t.patch
@@ -0,0 +1,252 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 19 Aug 2020 21:44:45 +0200
+Subject: [PATCH] io_wq: Make io_wqe::lock a raw_spinlock_t
+
+During a context switch the scheduler invokes wq_worker_sleeping() with
+disabled preemption. Disabling preemption is needed because it protects
+access to `worker->sleeping'. As an optimisation it avoids invoking
+schedule() within the schedule path as part of possible wake up (thus
+preempt_enable_no_resched() afterwards).
+
+The io-wq has been added to the mix in the same section with disabled
+preemption. This breaks on PREEMPT_RT because io_wq_worker_sleeping()
+acquires a spinlock_t. Also within the schedule() the spinlock_t must be
+acquired after tsk_is_pi_blocked() otherwise it will block on the
+sleeping lock again while scheduling out.
+
+While playing with `io_uring-bench' I didn't notice a significant
+latency spike after converting io_wqe::lock to a raw_spinlock_t. The
+latency was more or less the same.
+
+In order to keep the spinlock_t it would have to be moved after the
+tsk_is_pi_blocked() check which would introduce a branch instruction
+into the hot path.
+
+The lock is used to maintain the `work_list' and wakes one task up at
+most.
+Should io_wqe_cancel_pending_work() cause latency spikes, while
+searching for a specific item, then it would need to drop the lock
+during iterations.
+revert_creds() is also invoked under the lock. According to debug
+cred::non_rcu is 0. Otherwise it should be moved outside of the locked
+section because put_cred_rcu()->free_uid() acquires a sleeping lock.
+
+Convert io_wqe::lock to a raw_spinlock_t.c
+
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ fs/io-wq.c | 52 ++++++++++++++++++++++++++--------------------------
+ 1 file changed, 26 insertions(+), 26 deletions(-)
+
+--- a/fs/io-wq.c
++++ b/fs/io-wq.c
+@@ -85,7 +85,7 @@ enum {
+ */
+ struct io_wqe {
+ struct {
+- spinlock_t lock;
++ raw_spinlock_t lock;
+ struct io_wq_work_list work_list;
+ unsigned long hash_map;
+ unsigned flags;
+@@ -145,7 +145,7 @@ static bool __io_worker_unuse(struct io_
+
+ if (current->files != worker->restore_files) {
+ __acquire(&wqe->lock);
+- spin_unlock_irq(&wqe->lock);
++ raw_spin_unlock_irq(&wqe->lock);
+ dropped_lock = true;
+
+ task_lock(current);
+@@ -163,7 +163,7 @@ static bool __io_worker_unuse(struct io_
+ if (worker->mm) {
+ if (!dropped_lock) {
+ __acquire(&wqe->lock);
+- spin_unlock_irq(&wqe->lock);
++ raw_spin_unlock_irq(&wqe->lock);
+ dropped_lock = true;
+ }
+ __set_current_state(TASK_RUNNING);
+@@ -218,17 +218,17 @@ static void io_worker_exit(struct io_wor
+ worker->flags = 0;
+ preempt_enable();
+
+- spin_lock_irq(&wqe->lock);
++ raw_spin_lock_irq(&wqe->lock);
+ hlist_nulls_del_rcu(&worker->nulls_node);
+ list_del_rcu(&worker->all_list);
+ if (__io_worker_unuse(wqe, worker)) {
+ __release(&wqe->lock);
+- spin_lock_irq(&wqe->lock);
++ raw_spin_lock_irq(&wqe->lock);
+ }
+ acct->nr_workers--;
+ nr_workers = wqe->acct[IO_WQ_ACCT_BOUND].nr_workers +
+ wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers;
+- spin_unlock_irq(&wqe->lock);
++ raw_spin_unlock_irq(&wqe->lock);
+
+ /* all workers gone, wq exit can proceed */
+ if (!nr_workers && refcount_dec_and_test(&wqe->wq->refs))
+@@ -463,7 +463,7 @@ static void io_worker_handle_work(struct
+ else if (!wq_list_empty(&wqe->work_list))
+ wqe->flags |= IO_WQE_FLAG_STALLED;
+
+- spin_unlock_irq(&wqe->lock);
++ raw_spin_unlock_irq(&wqe->lock);
+ if (put_work && wq->put_work)
+ wq->put_work(old_work);
+ if (!work)
+@@ -514,14 +514,14 @@ static void io_worker_handle_work(struct
+ worker->cur_work = NULL;
+ spin_unlock_irq(&worker->lock);
+
+- spin_lock_irq(&wqe->lock);
++ raw_spin_lock_irq(&wqe->lock);
+
+ if (hash != -1U) {
+ wqe->hash_map &= ~BIT_ULL(hash);
+ wqe->flags &= ~IO_WQE_FLAG_STALLED;
+ }
+ if (work && work != old_work) {
+- spin_unlock_irq(&wqe->lock);
++ raw_spin_unlock_irq(&wqe->lock);
+
+ if (put_work && wq->put_work) {
+ wq->put_work(put_work);
+@@ -546,7 +546,7 @@ static int io_wqe_worker(void *data)
+ while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ loop:
+- spin_lock_irq(&wqe->lock);
++ raw_spin_lock_irq(&wqe->lock);
+ if (io_wqe_run_queue(wqe)) {
+ __set_current_state(TASK_RUNNING);
+ io_worker_handle_work(worker);
+@@ -557,7 +557,7 @@ static int io_wqe_worker(void *data)
+ __release(&wqe->lock);
+ goto loop;
+ }
+- spin_unlock_irq(&wqe->lock);
++ raw_spin_unlock_irq(&wqe->lock);
+ if (signal_pending(current))
+ flush_signals(current);
+ if (schedule_timeout(WORKER_IDLE_TIMEOUT))
+@@ -569,11 +569,11 @@ static int io_wqe_worker(void *data)
+ }
+
+ if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
+- spin_lock_irq(&wqe->lock);
++ raw_spin_lock_irq(&wqe->lock);
+ if (!wq_list_empty(&wqe->work_list))
+ io_worker_handle_work(worker);
+ else
+- spin_unlock_irq(&wqe->lock);
++ raw_spin_unlock_irq(&wqe->lock);
+ }
+
+ io_worker_exit(worker);
+@@ -613,9 +613,9 @@ void io_wq_worker_sleeping(struct task_s
+
+ worker->flags &= ~IO_WORKER_F_RUNNING;
+
+- spin_lock_irq(&wqe->lock);
++ raw_spin_lock_irq(&wqe->lock);
+ io_wqe_dec_running(wqe, worker);
+- spin_unlock_irq(&wqe->lock);
++ raw_spin_unlock_irq(&wqe->lock);
+ }
+
+ static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
+@@ -639,7 +639,7 @@ static bool create_io_worker(struct io_w
+ return false;
+ }
+
+- spin_lock_irq(&wqe->lock);
++ raw_spin_lock_irq(&wqe->lock);
+ hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
+ list_add_tail_rcu(&worker->all_list, &wqe->all_list);
+ worker->flags |= IO_WORKER_F_FREE;
+@@ -648,7 +648,7 @@ static bool create_io_worker(struct io_w
+ if (!acct->nr_workers && (worker->flags & IO_WORKER_F_BOUND))
+ worker->flags |= IO_WORKER_F_FIXED;
+ acct->nr_workers++;
+- spin_unlock_irq(&wqe->lock);
++ raw_spin_unlock_irq(&wqe->lock);
+
+ if (index == IO_WQ_ACCT_UNBOUND)
+ atomic_inc(&wq->user->processes);
+@@ -700,12 +700,12 @@ static int io_wq_manager(void *data)
+ if (!node_online(node))
+ continue;
+
+- spin_lock_irq(&wqe->lock);
++ raw_spin_lock_irq(&wqe->lock);
+ if (io_wqe_need_worker(wqe, IO_WQ_ACCT_BOUND))
+ fork_worker[IO_WQ_ACCT_BOUND] = true;
+ if (io_wqe_need_worker(wqe, IO_WQ_ACCT_UNBOUND))
+ fork_worker[IO_WQ_ACCT_UNBOUND] = true;
+- spin_unlock_irq(&wqe->lock);
++ raw_spin_unlock_irq(&wqe->lock);
+ if (fork_worker[IO_WQ_ACCT_BOUND])
+ create_io_worker(wq, wqe, IO_WQ_ACCT_BOUND);
+ if (fork_worker[IO_WQ_ACCT_UNBOUND])
+@@ -776,10 +776,10 @@ static void io_wqe_enqueue(struct io_wqe
+ }
+
+ work_flags = work->flags;
+- spin_lock_irqsave(&wqe->lock, flags);
++ raw_spin_lock_irqsave(&wqe->lock, flags);
+ wq_list_add_tail(&work->list, &wqe->work_list);
+ wqe->flags &= ~IO_WQE_FLAG_STALLED;
+- spin_unlock_irqrestore(&wqe->lock, flags);
++ raw_spin_unlock_irqrestore(&wqe->lock, flags);
+
+ if ((work_flags & IO_WQ_WORK_CONCURRENT) ||
+ !atomic_read(&acct->nr_running))
+@@ -897,7 +897,7 @@ static enum io_wq_cancel io_wqe_cancel_c
+ unsigned long flags;
+ bool found = false;
+
+- spin_lock_irqsave(&wqe->lock, flags);
++ raw_spin_lock_irqsave(&wqe->lock, flags);
+ wq_list_for_each(node, prev, &wqe->work_list) {
+ work = container_of(node, struct io_wq_work, list);
+
+@@ -907,7 +907,7 @@ static enum io_wq_cancel io_wqe_cancel_c
+ break;
+ }
+ }
+- spin_unlock_irqrestore(&wqe->lock, flags);
++ raw_spin_unlock_irqrestore(&wqe->lock, flags);
+
+ if (found) {
+ io_run_cancel(work);
+@@ -972,7 +972,7 @@ static enum io_wq_cancel io_wqe_cancel_w
+ * from there. CANCEL_OK means that the work is returned as-new,
+ * no completion will be posted for it.
+ */
+- spin_lock_irqsave(&wqe->lock, flags);
++ raw_spin_lock_irqsave(&wqe->lock, flags);
+ wq_list_for_each(node, prev, &wqe->work_list) {
+ work = container_of(node, struct io_wq_work, list);
+
+@@ -982,7 +982,7 @@ static enum io_wq_cancel io_wqe_cancel_w
+ break;
+ }
+ }
+- spin_unlock_irqrestore(&wqe->lock, flags);
++ raw_spin_unlock_irqrestore(&wqe->lock, flags);
+
+ if (found) {
+ io_run_cancel(work);
+@@ -1097,7 +1097,7 @@ struct io_wq *io_wq_create(unsigned boun
+ }
+ atomic_set(&wqe->acct[IO_WQ_ACCT_UNBOUND].nr_running, 0);
+ wqe->wq = wq;
+- spin_lock_init(&wqe->lock);
++ raw_spin_lock_init(&wqe->lock);
+ INIT_WQ_LIST(&wqe->work_list);
+ INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
+ INIT_LIST_HEAD(&wqe->all_list);
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 58842b503a27..12bd473a33f5 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt11
++-rt12
diff --git a/patches/printk-fix-ifnullfree.cocci-warnings.patch b/patches/printk-fix-ifnullfree.cocci-warnings.patch
new file mode 100644
index 000000000000..c27331b46bb7
--- /dev/null
+++ b/patches/printk-fix-ifnullfree.cocci-warnings.patch
@@ -0,0 +1,46 @@
+From: Julia Lawall <julia.lawall@inria.fr>
+Date: Fri, 24 Jul 2020 12:05:31 +0200
+Subject: [PATCH] printk: fix ifnullfree.cocci warnings
+
+Make the code a little simpler by dropping
+some unneeded tests.
+
+Generated by: scripts/coccinelle/free/ifnullfree.cocci
+
+Fixes: c406fbce2054 ("printk: implement syslog")
+CC: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: kernel test robot <lkp@intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Acked-by: John Ogness <john.ogness@linutronix.de>
+---
+ kernel/printk/printk.c | 12 ++++--------
+ 1 file changed, 4 insertions(+), 8 deletions(-)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1458,10 +1458,8 @@ static int syslog_print_all(char __user
+ if (clear && !seq)
+ syslog_clear();
+
+- if (text)
+- kfree(text);
+- if (msgbuf)
+- kfree(msgbuf);
++ kfree(text);
++ kfree(msgbuf);
+ return len;
+ }
+
+@@ -1614,10 +1612,8 @@ int do_syslog(int type, char __user *buf
+ break;
+ }
+ out:
+- if (msgbuf)
+- kfree(msgbuf);
+- if (text)
+- kfree(text);
++ kfree(msgbuf);
++ kfree(text);
+ return error;
+ }
+
diff --git a/patches/series b/patches/series
index d0d6cad638b5..97430cc2c31c 100644
--- a/patches/series
+++ b/patches/series
@@ -8,6 +8,7 @@
workqueue-Use-rcuwait-for-wq_manager_wait.patch
workqueue-Convert-the-pool-lock-and-wq_mayday_lock-t.patch
Bluetooth-Acquire-sk_lock.slock-without-disabling-in.patch
+dev-Defer-free-of-skbs-in-flush_backlog.patch
############################################################
# POSTED by others
@@ -57,6 +58,7 @@ printk-hack-out-emergency-loglevel-usage.patch
printk-Force-a-line-break-on-pr_cont-n.patch
serial-8250-only-atomic-lock-for-console.patch
serial-8250-fsl-ingenic-mtk-fix-atomic-console.patch
+printk-fix-ifnullfree.cocci-warnings.patch
# 2020-01-31 16:07 Peter Zijlstra [PATCH -v2 0/7] locking: Percpu-rwsem rewrite
# https://lkml.kernel.org/r/20200131150703.194229898@infradead.org
@@ -130,6 +132,9 @@ mm-compaction-Disable-compact_unevictable_allowed-on.patch
# 20191015191821.11479-1-bigeasy@linutronix.de
Use-CONFIG_PREEMPTION.patch
+# 20200819194443.eabkhlkocvkgifyh@linutronix.de
+io_wq-Make-io_wqe-lock-a-raw_spinlock_t.patch
+
############################################################
# Ready for posting
############################################################
diff --git a/patches/skbufhead-raw-lock.patch b/patches/skbufhead-raw-lock.patch
index 21e5c7eac378..68908b2c099a 100644
--- a/patches/skbufhead-raw-lock.patch
+++ b/patches/skbufhead-raw-lock.patch
@@ -8,21 +8,10 @@ to the softirq and use the tofree_queue list for it (similar to process_queue).
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- include/linux/netdevice.h | 1 +
- include/linux/skbuff.h | 7 +++++++
- net/core/dev.c | 33 +++++++++++++++++++++++++--------
- 3 files changed, 33 insertions(+), 8 deletions(-)
+ include/linux/skbuff.h | 7 +++++++
+ net/core/dev.c | 6 +++---
+ 2 files changed, 10 insertions(+), 3 deletions(-)
---- a/include/linux/netdevice.h
-+++ b/include/linux/netdevice.h
-@@ -3090,6 +3090,7 @@ struct softnet_data {
- unsigned int dropped;
- struct sk_buff_head input_pkt_queue;
- struct napi_struct backlog;
-+ struct sk_buff_head tofree_queue;
-
- };
-
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -293,6 +293,7 @@ struct sk_buff_head {
@@ -65,98 +54,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
}
-@@ -5503,7 +5503,7 @@ static void flush_backlog(struct work_st
- skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
- if (skb->dev->reg_state == NETREG_UNREGISTERING) {
- __skb_unlink(skb, &sd->input_pkt_queue);
-- kfree_skb(skb);
-+ __skb_queue_tail(&sd->tofree_queue, skb);
- input_queue_head_incr(sd);
- }
- }
-@@ -5513,11 +5513,14 @@ static void flush_backlog(struct work_st
- skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
- if (skb->dev->reg_state == NETREG_UNREGISTERING) {
- __skb_unlink(skb, &sd->process_queue);
-- kfree_skb(skb);
-+ __skb_queue_tail(&sd->tofree_queue, skb);
- input_queue_head_incr(sd);
- }
- }
-+ if (!skb_queue_empty(&sd->tofree_queue))
-+ raise_softirq_irqoff(NET_RX_SOFTIRQ);
- local_bh_enable();
-+
- }
-
- static void flush_all_backlogs(void)
-@@ -6138,7 +6141,9 @@ static int process_backlog(struct napi_s
- while (again) {
- struct sk_buff *skb;
-
-+ local_irq_disable();
- while ((skb = __skb_dequeue(&sd->process_queue))) {
-+ local_irq_enable();
- rcu_read_lock();
- __netif_receive_skb(skb);
- rcu_read_unlock();
-@@ -6146,9 +6151,9 @@ static int process_backlog(struct napi_s
- if (++work >= quota)
- return work;
-
-+ local_irq_disable();
- }
-
-- local_irq_disable();
- rps_lock(sd);
- if (skb_queue_empty(&sd->input_pkt_queue)) {
- /*
-@@ -6630,13 +6635,21 @@ static __latent_entropy void net_rx_acti
- unsigned long time_limit = jiffies +
- usecs_to_jiffies(netdev_budget_usecs);
- int budget = netdev_budget;
-+ struct sk_buff_head tofree_q;
-+ struct sk_buff *skb;
- LIST_HEAD(list);
- LIST_HEAD(repoll);
-
-+ __skb_queue_head_init(&tofree_q);
-+
- local_irq_disable();
-+ skb_queue_splice_init(&sd->tofree_queue, &tofree_q);
- list_splice_init(&sd->poll_list, &list);
- local_irq_enable();
-
-+ while ((skb = __skb_dequeue(&tofree_q)))
-+ kfree_skb(skb);
-+
- for (;;) {
- struct napi_struct *n;
-
-@@ -10188,10 +10201,13 @@ static int dev_cpu_dead(unsigned int old
- netif_rx_ni(skb);
- input_queue_head_incr(oldsd);
- }
-- while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
-+ while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
- netif_rx_ni(skb);
- input_queue_head_incr(oldsd);
- }
-+ while ((skb = __skb_dequeue(&oldsd->tofree_queue))) {
-+ kfree_skb(skb);
-+ }
-
- return 0;
- }
-@@ -10504,8 +10520,9 @@ static int __init net_dev_init(void)
+@@ -10504,7 +10504,7 @@ static int __init net_dev_init(void)
INIT_WORK(flush, flush_backlog);
- skb_queue_head_init(&sd->input_pkt_queue);
-- skb_queue_head_init(&sd->process_queue);
+ skb_queue_head_init_raw(&sd->input_pkt_queue);
-+ skb_queue_head_init_raw(&sd->process_queue);
-+ skb_queue_head_init_raw(&sd->tofree_queue);
+ skb_queue_head_init(&sd->process_queue);
#ifdef CONFIG_XFRM_OFFLOAD
skb_queue_head_init(&sd->xfrm_backlog);
- #endif