summaryrefslogtreecommitdiff
path: root/target/linux/lantiq/patches-5.10/0704-v5.17-net-lantiq_xrx200-add-ingress-SG-DMA-support.patch
blob: f2c36952fca6001fbe942952c869a0c9b26af844 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
From c3e6b2c35b34214c58c1e90d65dab5f5393608e7 Mon Sep 17 00:00:00 2001
From: Aleksander Jan Bajkowski <olek2@wp.pl>
Date: Mon, 3 Jan 2022 20:43:16 +0100
Subject: [PATCH] net: lantiq_xrx200: add ingress SG DMA support

This patch adds support for scatter gather DMA. DMA in PMAC splits
the packet into several buffers when the MTU on the CPU port is
less than the MTU of the switch. The first buffer starts at an
offset of NET_IP_ALIGN. In subsequent buffers, dma ignores the
offset. Thanks to this patch, the user can still connect to the
device in such a situation. For normal configurations, the patch
has no effect on performance.

Signed-off-by: Aleksander Jan Bajkowski <olek2@wp.pl>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
 drivers/net/ethernet/lantiq_xrx200.c | 47 +++++++++++++++++++++++-----
 1 file changed, 40 insertions(+), 7 deletions(-)

--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -26,6 +26,9 @@
 #define XRX200_DMA_RX		0
 #define XRX200_DMA_TX		1
 
+#define XRX200_DMA_PACKET_COMPLETE	0
+#define XRX200_DMA_PACKET_IN_PROGRESS	1
+
 /* cpu port mac */
 #define PMAC_RX_IPG		0x0024
 #define PMAC_RX_IPG_MASK	0xf
@@ -61,6 +64,9 @@ struct xrx200_chan {
 	struct ltq_dma_channel dma;
 	struct sk_buff *skb[LTQ_DESC_NUM];
 
+	struct sk_buff *skb_head;
+	struct sk_buff *skb_tail;
+
 	struct xrx200_priv *priv;
 };
 
@@ -204,7 +210,8 @@ static int xrx200_hw_receive(struct xrx2
 	struct xrx200_priv *priv = ch->priv;
 	struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
 	struct sk_buff *skb = ch->skb[ch->dma.desc];
-	int len = (desc->ctl & LTQ_DMA_SIZE_MASK);
+	u32 ctl = desc->ctl;
+	int len = (ctl & LTQ_DMA_SIZE_MASK);
 	struct net_device *net_dev = priv->net_dev;
 	int ret;
 
@@ -220,12 +227,36 @@ static int xrx200_hw_receive(struct xrx2
 	}
 
 	skb_put(skb, len);
-	skb->protocol = eth_type_trans(skb, net_dev);
-	netif_receive_skb(skb);
-	net_dev->stats.rx_packets++;
-	net_dev->stats.rx_bytes += len;
 
-	return 0;
+	/* add buffers to skb via skb->frag_list */
+	if (ctl & LTQ_DMA_SOP) {
+		ch->skb_head = skb;
+		ch->skb_tail = skb;
+	} else if (ch->skb_head) {
+		if (ch->skb_head == ch->skb_tail)
+			skb_shinfo(ch->skb_tail)->frag_list = skb;
+		else
+			ch->skb_tail->next = skb;
+		ch->skb_tail = skb;
+		skb_reserve(ch->skb_tail, -NET_IP_ALIGN);
+		ch->skb_head->len += skb->len;
+		ch->skb_head->data_len += skb->len;
+		ch->skb_head->truesize += skb->truesize;
+	}
+
+	if (ctl & LTQ_DMA_EOP) {
+		ch->skb_head->protocol = eth_type_trans(ch->skb_head, net_dev);
+		netif_receive_skb(ch->skb_head);
+		net_dev->stats.rx_packets++;
+		net_dev->stats.rx_bytes += ch->skb_head->len;
+		ch->skb_head = NULL;
+		ch->skb_tail = NULL;
+		ret = XRX200_DMA_PACKET_COMPLETE;
+	} else {
+		ret = XRX200_DMA_PACKET_IN_PROGRESS;
+	}
+
+	return ret;
 }
 
 static int xrx200_poll_rx(struct napi_struct *napi, int budget)
@@ -240,7 +271,9 @@ static int xrx200_poll_rx(struct napi_st
 
 		if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
 			ret = xrx200_hw_receive(ch);
-			if (ret)
+			if (ret == XRX200_DMA_PACKET_IN_PROGRESS)
+				continue;
+			if (ret != XRX200_DMA_PACKET_COMPLETE)
 				return ret;
 			rx++;
 		} else {