summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/netdev-bsd.c23
-rw-r--r--lib/netdev-dpdk.c136
-rw-r--r--lib/netdev-dummy.c23
-rw-r--r--lib/netdev-linux.c23
-rw-r--r--lib/netdev-provider.h25
-rw-r--r--lib/netdev.c6
6 files changed, 106 insertions, 130 deletions
diff --git a/lib/netdev-bsd.c b/lib/netdev-bsd.c
index 2e92d9768..869d54d35 100644
--- a/lib/netdev-bsd.c
+++ b/lib/netdev-bsd.c
@@ -618,8 +618,7 @@ netdev_rxq_bsd_recv_tap(struct netdev_rxq_bsd *rxq, struct dp_packet *buffer)
}
static int
-netdev_bsd_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet **packets,
- int *c)
+netdev_bsd_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet_batch *batch)
{
struct netdev_rxq_bsd *rxq = netdev_rxq_bsd_cast(rxq_);
struct netdev *netdev = rxq->up.netdev;
@@ -641,8 +640,8 @@ netdev_bsd_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet **packets,
dp_packet_delete(packet);
} else {
dp_packet_pad(packet);
- packets[0] = packet;
- *c = 1;
+ batch->packets[0] = packet;
+ batch->count = 1;
}
return retval;
}
@@ -681,7 +680,7 @@ netdev_bsd_rxq_drain(struct netdev_rxq *rxq_)
*/
static int
netdev_bsd_send(struct netdev *netdev_, int qid OVS_UNUSED,
- struct dp_packet **pkts, int cnt, bool may_steal)
+ struct dp_packet_batch *batch, bool may_steal)
{
struct netdev_bsd *dev = netdev_bsd_cast(netdev_);
const char *name = netdev_get_name(netdev_);
@@ -695,12 +694,12 @@ netdev_bsd_send(struct netdev *netdev_, int qid OVS_UNUSED,
error = 0;
}
- for (i = 0; i < cnt; i++) {
- const void *data = dp_packet_data(pkts[i]);
- size_t size = dp_packet_size(pkts[i]);
+ for (i = 0; i < batch->count; i++) {
+ const void *data = dp_packet_data(batch->packets[i]);
+ size_t size = dp_packet_size(batch->packets[i]);
/* Truncate the packet if it is configured. */
- size -= dp_packet_get_cutlen(pkts[i]);
+ size -= dp_packet_get_cutlen(batch->packets[i]);
while (!error) {
ssize_t retval;
@@ -731,11 +730,7 @@ netdev_bsd_send(struct netdev *netdev_, int qid OVS_UNUSED,
}
ovs_mutex_unlock(&dev->mutex);
- if (may_steal) {
- for (i = 0; i < cnt; i++) {
- dp_packet_delete(pkts[i]);
- }
- }
+ dp_packet_delete_batch(batch, may_steal);
return error;
}
diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
index 85b18fd5e..7fb6457af 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -478,7 +478,8 @@ dpdk_mp_get(int socket_id, int mtu) OVS_REQUIRES(dpdk_mutex)
dmp->mtu = mtu;
dmp->refcount = 1;
mbp_priv.mbuf_data_room_size = MBUF_SIZE(mtu) - sizeof(struct dp_packet);
- mbp_priv.mbuf_priv_size = sizeof (struct dp_packet) - sizeof (struct rte_mbuf);
+ mbp_priv.mbuf_priv_size = sizeof (struct dp_packet) -
+ sizeof (struct rte_mbuf);
mp_size = MAX_NB_MBUF;
do {
@@ -1215,7 +1216,7 @@ netdev_dpdk_vhost_update_rx_counters(struct netdev_stats *stats,
*/
static int
netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq,
- struct dp_packet **packets, int *c)
+ struct dp_packet_batch *batch)
{
struct netdev_dpdk *dev = netdev_dpdk_cast(rxq->netdev);
struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(dev);
@@ -1231,7 +1232,7 @@ netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq,
nb_rx = rte_vhost_dequeue_burst(virtio_dev, qid * VIRTIO_QNUM + VIRTIO_TXQ,
dev->dpdk_mp->mp,
- (struct rte_mbuf **)packets,
+ (struct rte_mbuf **) batch->packets,
NETDEV_MAX_BURST);
if (!nb_rx) {
return EAGAIN;
@@ -1239,21 +1240,23 @@ netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq,
if (policer) {
dropped = nb_rx;
- nb_rx = ingress_policer_run(policer, (struct rte_mbuf **)packets, nb_rx);
+ nb_rx = ingress_policer_run(policer,
+ (struct rte_mbuf **) batch->packets,
+ nb_rx);
dropped -= nb_rx;
}
rte_spinlock_lock(&dev->stats_lock);
- netdev_dpdk_vhost_update_rx_counters(&dev->stats, packets, nb_rx, dropped);
+ netdev_dpdk_vhost_update_rx_counters(&dev->stats, batch->packets,
+ nb_rx, dropped);
rte_spinlock_unlock(&dev->stats_lock);
- *c = (int) nb_rx;
+ batch->count = (int) nb_rx;
return 0;
}
static int
-netdev_dpdk_rxq_recv(struct netdev_rxq *rxq, struct dp_packet **packets,
- int *c)
+netdev_dpdk_rxq_recv(struct netdev_rxq *rxq, struct dp_packet_batch *batch)
{
struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq);
struct netdev_dpdk *dev = netdev_dpdk_cast(rxq->netdev);
@@ -1262,7 +1265,7 @@ netdev_dpdk_rxq_recv(struct netdev_rxq *rxq, struct dp_packet **packets,
int dropped = 0;
nb_rx = rte_eth_rx_burst(rx->port_id, rxq->queue_id,
- (struct rte_mbuf **) packets,
+ (struct rte_mbuf **) batch->packets,
NETDEV_MAX_BURST);
if (!nb_rx) {
return EAGAIN;
@@ -1270,7 +1273,9 @@ netdev_dpdk_rxq_recv(struct netdev_rxq *rxq, struct dp_packet **packets,
if (policer) {
dropped = nb_rx;
- nb_rx = ingress_policer_run(policer, (struct rte_mbuf **) packets, nb_rx);
+ nb_rx = ingress_policer_run(policer,
+ (struct rte_mbuf **)batch->packets,
+ nb_rx);
dropped -= nb_rx;
}
@@ -1281,7 +1286,7 @@ netdev_dpdk_rxq_recv(struct netdev_rxq *rxq, struct dp_packet **packets,
rte_spinlock_unlock(&dev->stats_lock);
}
- *c = nb_rx;
+ batch->count = nb_rx;
return 0;
}
@@ -1384,12 +1389,11 @@ out:
/* Tx function. Transmit packets indefinitely */
static void
-dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet **pkts,
- int cnt)
+dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet_batch *batch)
OVS_NO_THREAD_SAFETY_ANALYSIS
{
#if !defined(__CHECKER__) && !defined(_WIN32)
- const size_t PKT_ARRAY_SIZE = cnt;
+ const size_t PKT_ARRAY_SIZE = batch->count;
#else
/* Sparse or MSVC doesn't like variable length array. */
enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST };
@@ -1407,8 +1411,8 @@ dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet **pkts,
ovs_mutex_lock(&nonpmd_mempool_mutex);
}
- for (i = 0; i < cnt; i++) {
- int size = dp_packet_size(pkts[i]);
+ for (i = 0; i < batch->count; i++) {
+ int size = dp_packet_size(batch->packets[i]);
if (OVS_UNLIKELY(size > dev->max_packet_len)) {
VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
@@ -1421,16 +1425,17 @@ dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet **pkts,
mbufs[newcnt] = rte_pktmbuf_alloc(dev->dpdk_mp->mp);
if (!mbufs[newcnt]) {
- dropped += cnt - i;
+ dropped += batch->count - i;
break;
}
/* Cut the size so only the truncated size is copied. */
- size -= dp_packet_get_cutlen(pkts[i]);
- dp_packet_reset_cutlen(pkts[i]);
+ size -= dp_packet_get_cutlen(batch->packets[i]);
+ dp_packet_reset_cutlen(batch->packets[i]);
/* We have to do a copy for now */
- memcpy(rte_pktmbuf_mtod(mbufs[newcnt], void *), dp_packet_data(pkts[i]), size);
+ memcpy(rte_pktmbuf_mtod(mbufs[newcnt], void *),
+ dp_packet_data(batch->packets[i]), size);
rte_pktmbuf_data_len(mbufs[newcnt]) = size;
rte_pktmbuf_pkt_len(mbufs[newcnt]) = size;
@@ -1439,7 +1444,8 @@ dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet **pkts,
}
if (dev->type == DPDK_DEV_VHOST) {
- __netdev_dpdk_vhost_send(netdev, qid, (struct dp_packet **) mbufs, newcnt, true);
+ __netdev_dpdk_vhost_send(netdev, qid, (struct dp_packet **) mbufs,
+ newcnt, true);
} else {
unsigned int qos_pkts = newcnt;
@@ -1462,84 +1468,69 @@ dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet **pkts,
}
static int
-netdev_dpdk_vhost_send(struct netdev *netdev, int qid, struct dp_packet **pkts,
- int cnt, bool may_steal)
+netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
+ struct dp_packet_batch *batch,
+ bool may_steal)
{
- if (OVS_UNLIKELY(pkts[0]->source != DPBUF_DPDK)) {
- int i;
- dpdk_do_tx_copy(netdev, qid, pkts, cnt);
- if (may_steal) {
- for (i = 0; i < cnt; i++) {
- dp_packet_delete(pkts[i]);
- }
- }
+ if (OVS_UNLIKELY(batch->packets[0]->source != DPBUF_DPDK)) {
+ dpdk_do_tx_copy(netdev, qid, batch);
+ dp_packet_delete_batch(batch, may_steal);
} else {
- int i;
-
- for (i = 0; i < cnt; i++) {
- int cutlen = dp_packet_get_cutlen(pkts[i]);
-
- dp_packet_set_size(pkts[i], dp_packet_size(pkts[i]) - cutlen);
- dp_packet_reset_cutlen(pkts[i]);
- }
- __netdev_dpdk_vhost_send(netdev, qid, pkts, cnt, may_steal);
+ dp_packet_batch_apply_cutlen(batch);
+ __netdev_dpdk_vhost_send(netdev, qid, batch->packets, batch->count,
+ may_steal);
}
return 0;
}
static inline void
netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
- struct dp_packet **pkts, int cnt, bool may_steal)
+ struct dp_packet_batch *batch, bool may_steal)
{
- int i;
-
if (OVS_UNLIKELY(dev->txq_needs_locking)) {
qid = qid % dev->up.n_txq;
rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
}
if (OVS_UNLIKELY(!may_steal ||
- pkts[0]->source != DPBUF_DPDK)) {
+ batch->packets[0]->source != DPBUF_DPDK)) {
struct netdev *netdev = &dev->up;
- dpdk_do_tx_copy(netdev, qid, pkts, cnt);
-
- if (may_steal) {
- for (i = 0; i < cnt; i++) {
- dp_packet_delete(pkts[i]);
- }
- }
+ dpdk_do_tx_copy(netdev, qid, batch);
+ dp_packet_delete_batch(batch, may_steal);
} else {
int next_tx_idx = 0;
int dropped = 0;
unsigned int qos_pkts = 0;
unsigned int temp_cnt = 0;
+ int cnt = batch->count;
- for (i = 0; i < cnt; i++) {
- int size = dp_packet_size(pkts[i]);
+ for (int i = 0; i < cnt; i++) {
+ int size = dp_packet_size(batch->packets[i]);
- size -= dp_packet_get_cutlen(pkts[i]);
- dp_packet_set_size(pkts[i], size);
+ size -= dp_packet_get_cutlen(batch->packets[i]);
+ dp_packet_set_size(batch->packets[i], size);
if (OVS_UNLIKELY(size > dev->max_packet_len)) {
if (next_tx_idx != i) {
temp_cnt = i - next_tx_idx;
qos_pkts = temp_cnt;
- temp_cnt = netdev_dpdk_qos_run__(dev, (struct rte_mbuf**)pkts,
- temp_cnt);
+ temp_cnt = netdev_dpdk_qos_run__(dev,
+ (struct rte_mbuf**)batch->packets,
+ temp_cnt);
dropped += qos_pkts - temp_cnt;
netdev_dpdk_eth_tx_burst(dev, qid,
- (struct rte_mbuf **)&pkts[next_tx_idx],
- temp_cnt);
+ (struct rte_mbuf **)&batch->packets[next_tx_idx],
+ temp_cnt);
}
VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
(int)size , dev->max_packet_len);
- dp_packet_delete(pkts[i]);
+ dp_packet_delete(batch->packets[i]);
dropped++;
next_tx_idx = i + 1;
}
@@ -1548,11 +1539,12 @@ netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
cnt -= next_tx_idx;
qos_pkts = cnt;
- cnt = netdev_dpdk_qos_run__(dev, (struct rte_mbuf**)pkts, cnt);
+ cnt = netdev_dpdk_qos_run__(dev,
+ (struct rte_mbuf**)batch->packets, cnt);
dropped += qos_pkts - cnt;
netdev_dpdk_eth_tx_burst(dev, qid,
- (struct rte_mbuf **)&pkts[next_tx_idx],
- cnt);
+ (struct rte_mbuf **)&batch->packets[next_tx_idx],
+ cnt);
}
if (OVS_UNLIKELY(dropped)) {
@@ -1569,11 +1561,11 @@ netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
static int
netdev_dpdk_eth_send(struct netdev *netdev, int qid,
- struct dp_packet **pkts, int cnt, bool may_steal)
+ struct dp_packet_batch *batch, bool may_steal)
{
struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
- netdev_dpdk_send__(dev, qid, pkts, cnt, may_steal);
+ netdev_dpdk_send__(dev, qid, batch, may_steal);
return 0;
}
@@ -2011,7 +2003,8 @@ netdev_dpdk_set_miimon(struct netdev *netdev OVS_UNUSED,
static int
netdev_dpdk_update_flags__(struct netdev_dpdk *dev,
enum netdev_flags off, enum netdev_flags on,
- enum netdev_flags *old_flagsp) OVS_REQUIRES(dev->mutex)
+ enum netdev_flags *old_flagsp)
+ OVS_REQUIRES(dev->mutex)
{
int err;
@@ -2518,7 +2511,8 @@ dpdk_ring_create(const char dev_name[], unsigned int port_no,
}
static int
-dpdk_ring_open(const char dev_name[], unsigned int *eth_port_id) OVS_REQUIRES(dpdk_mutex)
+dpdk_ring_open(const char dev_name[], unsigned int *eth_port_id)
+ OVS_REQUIRES(dpdk_mutex)
{
struct dpdk_ring *ivshmem;
unsigned int port_no;
@@ -2544,7 +2538,7 @@ dpdk_ring_open(const char dev_name[], unsigned int *eth_port_id) OVS_REQUIRES(dp
static int
netdev_dpdk_ring_send(struct netdev *netdev, int qid,
- struct dp_packet **pkts, int cnt, bool may_steal)
+ struct dp_packet_batch *batch, bool may_steal)
{
struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
unsigned i;
@@ -2553,11 +2547,11 @@ netdev_dpdk_ring_send(struct netdev *netdev, int qid,
* rss hash field is clear. This is because the same mbuf may be modified by
* the consumer of the ring and return into the datapath without recalculating
* the RSS hash. */
- for (i = 0; i < cnt; i++) {
- dp_packet_rss_invalidate(pkts[i]);
+ for (i = 0; i < batch->count; i++) {
+ dp_packet_rss_invalidate(batch->packets[i]);
}
- netdev_dpdk_send__(dev, qid, pkts, cnt, may_steal);
+ netdev_dpdk_send__(dev, qid, batch, may_steal);
return 0;
}
diff --git a/lib/netdev-dummy.c b/lib/netdev-dummy.c
index 9ea765b33..37c6b0226 100644
--- a/lib/netdev-dummy.c
+++ b/lib/netdev-dummy.c
@@ -947,8 +947,7 @@ netdev_dummy_rxq_dealloc(struct netdev_rxq *rxq_)
}
static int
-netdev_dummy_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet **arr,
- int *c)
+netdev_dummy_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet_batch *batch)
{
struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
@@ -991,8 +990,8 @@ netdev_dummy_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet **arr,
dp_packet_pad(packet);
- arr[0] = packet;
- *c = 1;
+ batch->packets[0] = packet;
+ batch->count = 1;
return 0;
}
@@ -1030,17 +1029,17 @@ netdev_dummy_rxq_drain(struct netdev_rxq *rxq_)
static int
netdev_dummy_send(struct netdev *netdev, int qid OVS_UNUSED,
- struct dp_packet **pkts, int cnt, bool may_steal)
+ struct dp_packet_batch *batch, bool may_steal)
{
struct netdev_dummy *dev = netdev_dummy_cast(netdev);
int error = 0;
int i;
- for (i = 0; i < cnt; i++) {
- const void *buffer = dp_packet_data(pkts[i]);
- size_t size = dp_packet_size(pkts[i]);
+ for (i = 0; i < batch->count; i++) {
+ const void *buffer = dp_packet_data(batch->packets[i]);
+ size_t size = dp_packet_size(batch->packets[i]);
- size -= dp_packet_get_cutlen(pkts[i]);
+ size -= dp_packet_get_cutlen(batch->packets[i]);
if (size < ETH_HEADER_LEN) {
error = EMSGSIZE;
@@ -1096,11 +1095,7 @@ netdev_dummy_send(struct netdev *netdev, int qid OVS_UNUSED,
ovs_mutex_unlock(&dev->mutex);
}
- if (may_steal) {
- for (i = 0; i < cnt; i++) {
- dp_packet_delete(pkts[i]);
- }
- }
+ dp_packet_delete_batch(batch, may_steal);
return error;
}
diff --git a/lib/netdev-linux.c b/lib/netdev-linux.c
index 486910ab4..60ad38b9e 100644
--- a/lib/netdev-linux.c
+++ b/lib/netdev-linux.c
@@ -1091,8 +1091,7 @@ netdev_linux_rxq_recv_tap(int fd, struct dp_packet *buffer)
}
static int
-netdev_linux_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet **packets,
- int *c)
+netdev_linux_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet_batch *batch)
{
struct netdev_rxq_linux *rx = netdev_rxq_linux_cast(rxq_);
struct netdev *netdev = rx->up.netdev;
@@ -1118,8 +1117,8 @@ netdev_linux_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet **packets,
dp_packet_delete(buffer);
} else {
dp_packet_pad(buffer);
- packets[0] = buffer;
- *c = 1;
+ batch->packets[0] = buffer;
+ batch->count = 1;
}
return retval;
@@ -1161,19 +1160,19 @@ netdev_linux_rxq_drain(struct netdev_rxq *rxq_)
* expected to do additional queuing of packets. */
static int
netdev_linux_send(struct netdev *netdev_, int qid OVS_UNUSED,
- struct dp_packet **pkts, int cnt, bool may_steal)
+ struct dp_packet_batch *batch, bool may_steal)
{
int i;
int error = 0;
/* 'i' is incremented only if there's no error */
- for (i = 0; i < cnt;) {
- const void *data = dp_packet_data(pkts[i]);
- size_t size = dp_packet_size(pkts[i]);
+ for (i = 0; i < batch->count;) {
+ const void *data = dp_packet_data(batch->packets[i]);
+ size_t size = dp_packet_size(batch->packets[i]);
ssize_t retval;
/* Truncate the packet if it is configured. */
- size -= dp_packet_get_cutlen(pkts[i]);
+ size -= dp_packet_get_cutlen(batch->packets[i]);
if (!is_tap_netdev(netdev_)) {
/* Use our AF_PACKET socket to send to this device. */
@@ -1249,11 +1248,7 @@ netdev_linux_send(struct netdev *netdev_, int qid OVS_UNUSED,
i++;
}
- if (may_steal) {
- for (i = 0; i < cnt; i++) {
- dp_packet_delete(pkts[i]);
- }
- }
+ dp_packet_delete_batch(batch, may_steal);
if (error && error != EAGAIN) {
VLOG_WARN_RL(&rl, "error sending Ethernet packet on %s: %s",
diff --git a/lib/netdev-provider.h b/lib/netdev-provider.h
index 5da377f99..c62dbd5a1 100644
--- a/lib/netdev-provider.h
+++ b/lib/netdev-provider.h
@@ -340,8 +340,8 @@ struct netdev_class {
* network device from being usefully used by the netdev-based "userspace
* datapath". It will also prevent the OVS implementation of bonding from
* working properly over 'netdev'.) */
- int (*send)(struct netdev *netdev, int qid, struct dp_packet **buffers,
- int cnt, bool may_steal);
+ int (*send)(struct netdev *netdev, int qid, struct dp_packet_batch *batch,
+ bool may_steal);
/* Registers with the poll loop to wake up from the next call to
* poll_block() when the packet transmission queue for 'netdev' has
@@ -727,25 +727,24 @@ struct netdev_class {
void (*rxq_destruct)(struct netdev_rxq *);
void (*rxq_dealloc)(struct netdev_rxq *);
- /* Attempts to receive a batch of packets from 'rx'. The caller supplies
- * 'pkts' as the pointer to the beginning of an array of MAX_RX_BATCH
- * pointers to dp_packet. If successful, the implementation stores
- * pointers to up to MAX_RX_BATCH dp_packets into the array, transferring
- * ownership of the packets to the caller, stores the number of received
- * packets into '*cnt', and returns 0.
+ /* Attempts to receive a batch of packets from 'rx'. In 'batch', the
+ * caller supplies 'packets' as the pointer to the beginning of an array
+ * of MAX_RX_BATCH pointers to dp_packet. If successful, the
+ * implementation stores pointers to up to MAX_RX_BATCH dp_packets into
+ * the array, transferring ownership of the packets to the caller, stores
+ * the number of received packets into 'count', and returns 0.
*
* The implementation does not necessarily initialize any non-data members
- * of 'pkts'. That is, the caller must initialize layer pointers and
- * metadata itself, if desired, e.g. with pkt_metadata_init() and
- * miniflow_extract().
+ * of 'packets' in 'batch'. That is, the caller must initialize layer
+ * pointers and metadata itself, if desired, e.g. with pkt_metadata_init()
+ * and miniflow_extract().
*
* Implementations should allocate buffers with DP_NETDEV_HEADROOM bytes of
* headroom.
*
* Returns EAGAIN immediately if no packet is ready to be received or
* another positive errno value if an error was encountered. */
- int (*rxq_recv)(struct netdev_rxq *rx, struct dp_packet **pkts,
- int *cnt);
+ int (*rxq_recv)(struct netdev_rxq *rx, struct dp_packet_batch *batch);
/* Registers with the poll loop to wake up from the next call to
* poll_block() when a packet is ready to be received with
diff --git a/lib/netdev.c b/lib/netdev.c
index 66511731d..405bf41ef 100644
--- a/lib/netdev.c
+++ b/lib/netdev.c
@@ -625,7 +625,7 @@ netdev_rxq_recv(struct netdev_rxq *rx, struct dp_packet_batch *batch)
{
int retval;
- retval = rx->netdev->netdev_class->rxq_recv(rx, batch->packets, &batch->count);
+ retval = rx->netdev->netdev_class->rxq_recv(rx, batch);
if (!retval) {
COVERAGE_INC(netdev_received);
} else {
@@ -711,9 +711,7 @@ netdev_send(struct netdev *netdev, int qid, struct dp_packet_batch *batch,
return EOPNOTSUPP;
}
- int error = netdev->netdev_class->send(netdev, qid,
- batch->packets, batch->count,
- may_steal);
+ int error = netdev->netdev_class->send(netdev, qid, batch, may_steal);
if (!error) {
COVERAGE_INC(netdev_sent);
if (!may_steal) {