/* * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2016 Nicira, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include "dpif-netdev.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DPDK_NETDEV #include #endif #include "bitmap.h" #include "cmap.h" #include "conntrack.h" #include "coverage.h" #include "ct-dpif.h" #include "csum.h" #include "dp-packet.h" #include "dpif.h" #include "dpif-provider.h" #include "dummy.h" #include "fat-rwlock.h" #include "flow.h" #include "hmapx.h" #include "latch.h" #include "netdev.h" #include "netdev-vport.h" #include "netlink.h" #include "odp-execute.h" #include "odp-util.h" #include "openvswitch/dynamic-string.h" #include "openvswitch/list.h" #include "openvswitch/match.h" #include "openvswitch/ofp-print.h" #include "openvswitch/ofp-util.h" #include "openvswitch/ofpbuf.h" #include "openvswitch/shash.h" #include "openvswitch/vlog.h" #include "ovs-numa.h" #include "ovs-rcu.h" #include "packets.h" #include "poll-loop.h" #include "pvector.h" #include "random.h" #include "seq.h" #include "smap.h" #include "sset.h" #include "timeval.h" #include "tnl-neigh-cache.h" #include "tnl-ports.h" #include "unixctl.h" #include "util.h" VLOG_DEFINE_THIS_MODULE(dpif_netdev); #define FLOW_DUMP_MAX_BATCH 50 /* Use per thread recirc_depth to prevent recirculation loop. */ #define MAX_RECIRC_DEPTH 5 DEFINE_STATIC_PER_THREAD_DATA(uint32_t, recirc_depth, 0) /* Configuration parameters. */ enum { MAX_FLOWS = 65536 }; /* Maximum number of flows in flow table. */ /* Protects against changes to 'dp_netdevs'. */ static struct ovs_mutex dp_netdev_mutex = OVS_MUTEX_INITIALIZER; /* Contains all 'struct dp_netdev's. */ static struct shash dp_netdevs OVS_GUARDED_BY(dp_netdev_mutex) = SHASH_INITIALIZER(&dp_netdevs); static struct vlog_rate_limit upcall_rl = VLOG_RATE_LIMIT_INIT(600, 600); #define DP_NETDEV_CS_SUPPORTED_MASK (CS_NEW | CS_ESTABLISHED | CS_RELATED \ | CS_INVALID | CS_REPLY_DIR | CS_TRACKED) #define DP_NETDEV_CS_UNSUPPORTED_MASK (~(uint32_t)DP_NETDEV_CS_SUPPORTED_MASK) static struct odp_support dp_netdev_support = { .max_mpls_depth = SIZE_MAX, .recirc = true, .ct_state = true, .ct_zone = true, .ct_mark = true, .ct_label = true, }; /* Stores a miniflow with inline values */ struct netdev_flow_key { uint32_t hash; /* Hash function differs for different users. */ uint32_t len; /* Length of the following miniflow (incl. map). */ struct miniflow mf; uint64_t buf[FLOW_MAX_PACKET_U64S]; }; /* Exact match cache for frequently used flows * * The cache uses a 32-bit hash of the packet (which can be the RSS hash) to * search its entries for a miniflow that matches exactly the miniflow of the * packet. It stores the 'dpcls_rule' (rule) that matches the miniflow. * * A cache entry holds a reference to its 'dp_netdev_flow'. * * A miniflow with a given hash can be in one of EM_FLOW_HASH_SEGS different * entries. The 32-bit hash is split into EM_FLOW_HASH_SEGS values (each of * them is EM_FLOW_HASH_SHIFT bits wide and the remainder is thrown away). Each * value is the index of a cache entry where the miniflow could be. * * * Thread-safety * ============= * * Each pmd_thread has its own private exact match cache. * If dp_netdev_input is not called from a pmd thread, a mutex is used. */ #define EM_FLOW_HASH_SHIFT 13 #define EM_FLOW_HASH_ENTRIES (1u << EM_FLOW_HASH_SHIFT) #define EM_FLOW_HASH_MASK (EM_FLOW_HASH_ENTRIES - 1) #define EM_FLOW_HASH_SEGS 2 struct emc_entry { struct dp_netdev_flow *flow; struct netdev_flow_key key; /* key.hash used for emc hash value. */ }; struct emc_cache { struct emc_entry entries[EM_FLOW_HASH_ENTRIES]; int sweep_idx; /* For emc_cache_slow_sweep(). */ }; /* Iterate in the exact match cache through every entry that might contain a * miniflow with hash 'HASH'. */ #define EMC_FOR_EACH_POS_WITH_HASH(EMC, CURRENT_ENTRY, HASH) \ for (uint32_t i__ = 0, srch_hash__ = (HASH); \ (CURRENT_ENTRY) = &(EMC)->entries[srch_hash__ & EM_FLOW_HASH_MASK], \ i__ < EM_FLOW_HASH_SEGS; \ i__++, srch_hash__ >>= EM_FLOW_HASH_SHIFT) /* Simple non-wildcarding single-priority classifier. */ /* Time in ms between successive optimizations of the dpcls subtable vector */ #define DPCLS_OPTIMIZATION_INTERVAL 1000 struct dpcls { struct cmap_node node; /* Within dp_netdev_pmd_thread.classifiers */ odp_port_t in_port; struct cmap subtables_map; struct pvector subtables; }; /* A rule to be inserted to the classifier. */ struct dpcls_rule { struct cmap_node cmap_node; /* Within struct dpcls_subtable 'rules'. */ struct netdev_flow_key *mask; /* Subtable's mask. */ struct netdev_flow_key flow; /* Matching key. */ /* 'flow' must be the last field, additional space is allocated here. */ }; static void dpcls_init(struct dpcls *); static void dpcls_destroy(struct dpcls *); static void dpcls_sort_subtable_vector(struct dpcls *); static void dpcls_insert(struct dpcls *, struct dpcls_rule *, const struct netdev_flow_key *mask); static void dpcls_remove(struct dpcls *, struct dpcls_rule *); static bool dpcls_lookup(struct dpcls *cls, const struct netdev_flow_key keys[], struct dpcls_rule **rules, size_t cnt, int *num_lookups_p); /* Datapath based on the network device interface from netdev.h. * * * Thread-safety * ============= * * Some members, marked 'const', are immutable. Accessing other members * requires synchronization, as noted in more detail below. * * Acquisition order is, from outermost to innermost: * * dp_netdev_mutex (global) * port_mutex * non_pmd_mutex */ struct dp_netdev { const struct dpif_class *const class; const char *const name; struct dpif *dpif; struct ovs_refcount ref_cnt; atomic_flag destroyed; /* Ports. * * Any lookup into 'ports' or any access to the dp_netdev_ports found * through 'ports' requires taking 'port_mutex'. */ struct ovs_mutex port_mutex; struct hmap ports; struct seq *port_seq; /* Incremented whenever a port changes. */ /* Protects access to ofproto-dpif-upcall interface during revalidator * thread synchronization. */ struct fat_rwlock upcall_rwlock; upcall_callback *upcall_cb; /* Callback function for executing upcalls. */ void *upcall_aux; /* Callback function for notifying the purging of dp flows (during * reseting pmd deletion). */ dp_purge_callback *dp_purge_cb; void *dp_purge_aux; /* Stores all 'struct dp_netdev_pmd_thread's. */ struct cmap poll_threads; /* Protects the access of the 'struct dp_netdev_pmd_thread' * instance for non-pmd thread. */ struct ovs_mutex non_pmd_mutex; /* Each pmd thread will store its pointer to * 'struct dp_netdev_pmd_thread' in 'per_pmd_key'. */ ovsthread_key_t per_pmd_key; struct seq *reconfigure_seq; uint64_t last_reconfigure_seq; /* Cpu mask for pin of pmd threads. */ char *pmd_cmask; uint64_t last_tnl_conf_seq; struct conntrack conntrack; }; static struct dp_netdev_port *dp_netdev_lookup_port(const struct dp_netdev *dp, odp_port_t) OVS_REQUIRES(dp->port_mutex); enum dp_stat_type { DP_STAT_EXACT_HIT, /* Packets that had an exact match (emc). */ DP_STAT_MASKED_HIT, /* Packets that matched in the flow table. */ DP_STAT_MISS, /* Packets that did not match. */ DP_STAT_LOST, /* Packets not passed up to the client. */ DP_STAT_LOOKUP_HIT, /* Number of subtable lookups for flow table hits */ DP_N_STATS }; enum pmd_cycles_counter_type { PMD_CYCLES_POLLING, /* Cycles spent polling NICs. */ PMD_CYCLES_PROCESSING, /* Cycles spent processing packets */ PMD_N_CYCLES }; #define XPS_TIMEOUT_MS 500LL /* Contained by struct dp_netdev_port's 'rxqs' member. */ struct dp_netdev_rxq { struct netdev_rxq *rxq; unsigned core_id; /* Сore to which this queue is pinned. */ }; /* A port in a netdev-based datapath. */ struct dp_netdev_port { odp_port_t port_no; struct netdev *netdev; struct hmap_node node; /* Node in dp_netdev's 'ports'. */ struct netdev_saved_flags *sf; struct dp_netdev_rxq *rxqs; unsigned n_rxq; /* Number of elements in 'rxq' */ bool dynamic_txqs; /* If true XPS will be used. */ unsigned *txq_used; /* Number of threads that uses each tx queue. */ struct ovs_mutex txq_used_mutex; char *type; /* Port type as requested by user. */ char *rxq_affinity_list; /* Requested affinity of rx queues. */ }; /* Contained by struct dp_netdev_flow's 'stats' member. */ struct dp_netdev_flow_stats { atomic_llong used; /* Last used time, in monotonic msecs. */ atomic_ullong packet_count; /* Number of packets matched. */ atomic_ullong byte_count; /* Number of bytes matched. */ atomic_uint16_t tcp_flags; /* Bitwise-OR of seen tcp_flags values. */ }; /* A flow in 'dp_netdev_pmd_thread's 'flow_table'. * * * Thread-safety * ============= * * Except near the beginning or ending of its lifespan, rule 'rule' belongs to * its pmd thread's classifier. The text below calls this classifier 'cls'. * * Motivation * ---------- * * The thread safety rules described here for "struct dp_netdev_flow" are * motivated by two goals: * * - Prevent threads that read members of "struct dp_netdev_flow" from * reading bad data due to changes by some thread concurrently modifying * those members. * * - Prevent two threads making changes to members of a given "struct * dp_netdev_flow" from interfering with each other. * * * Rules * ----- * * A flow 'flow' may be accessed without a risk of being freed during an RCU * grace period. Code that needs to hold onto a flow for a while * should try incrementing 'flow->ref_cnt' with dp_netdev_flow_ref(). * * 'flow->ref_cnt' protects 'flow' from being freed. It doesn't protect the * flow from being deleted from 'cls' and it doesn't protect members of 'flow' * from modification. * * Some members, marked 'const', are immutable. Accessing other members * requires synchronization, as noted in more detail below. */ struct dp_netdev_flow { const struct flow flow; /* Unmasked flow that created this entry. */ /* Hash table index by unmasked flow. */ const struct cmap_node node; /* In owning dp_netdev_pmd_thread's */ /* 'flow_table'. */ const ovs_u128 ufid; /* Unique flow identifier. */ const unsigned pmd_id; /* The 'core_id' of pmd thread owning this */ /* flow. */ /* Number of references. * The classifier owns one reference. * Any thread trying to keep a rule from being freed should hold its own * reference. */ struct ovs_refcount ref_cnt; bool dead; /* Statistics. */ struct dp_netdev_flow_stats stats; /* Actions. */ OVSRCU_TYPE(struct dp_netdev_actions *) actions; /* While processing a group of input packets, the datapath uses the next * member to store a pointer to the output batch for the flow. It is * reset after the batch has been sent out (See dp_netdev_queue_batches(), * packet_batch_per_flow_init() and packet_batch_per_flow_execute()). */ struct packet_batch_per_flow *batch; /* Packet classification. */ struct dpcls_rule cr; /* In owning dp_netdev's 'cls'. */ /* 'cr' must be the last member. */ }; static void dp_netdev_flow_unref(struct dp_netdev_flow *); static bool dp_netdev_flow_ref(struct dp_netdev_flow *); static int dpif_netdev_flow_from_nlattrs(const struct nlattr *, uint32_t, struct flow *); /* A set of datapath actions within a "struct dp_netdev_flow". * * * Thread-safety * ============= * * A struct dp_netdev_actions 'actions' is protected with RCU. */ struct dp_netdev_actions { /* These members are immutable: they do not change during the struct's * lifetime. */ unsigned int size; /* Size of 'actions', in bytes. */ struct nlattr actions[]; /* Sequence of OVS_ACTION_ATTR_* attributes. */ }; struct dp_netdev_actions *dp_netdev_actions_create(const struct nlattr *, size_t); struct dp_netdev_actions *dp_netdev_flow_get_actions( const struct dp_netdev_flow *); static void dp_netdev_actions_free(struct dp_netdev_actions *); /* Contained by struct dp_netdev_pmd_thread's 'stats' member. */ struct dp_netdev_pmd_stats { /* Indexed by DP_STAT_*. */ atomic_ullong n[DP_N_STATS]; }; /* Contained by struct dp_netdev_pmd_thread's 'cycle' member. */ struct dp_netdev_pmd_cycles { /* Indexed by PMD_CYCLES_*. */ atomic_ullong n[PMD_N_CYCLES]; }; /* Contained by struct dp_netdev_pmd_thread's 'poll_list' member. */ struct rxq_poll { struct dp_netdev_port *port; struct netdev_rxq *rx; struct ovs_list node; }; /* Contained by struct dp_netdev_pmd_thread's 'port_cache' or 'tx_ports'. */ struct tx_port { struct dp_netdev_port *port; int qid; long long last_used; struct hmap_node node; }; /* PMD: Poll modes drivers. PMD accesses devices via polling to eliminate * the performance overhead of interrupt processing. Therefore netdev can * not implement rx-wait for these devices. dpif-netdev needs to poll * these device to check for recv buffer. pmd-thread does polling for * devices assigned to itself. * * DPDK used PMD for accessing NIC. * * Note, instance with cpu core id NON_PMD_CORE_ID will be reserved for * I/O of all non-pmd threads. There will be no actual thread created * for the instance. * * Each struct has its own flow table and classifier. Packets received * from managed ports are looked up in the corresponding pmd thread's * flow table, and are executed with the found actions. * */ struct dp_netdev_pmd_thread { struct dp_netdev *dp; struct ovs_refcount ref_cnt; /* Every reference must be refcount'ed. */ struct cmap_node node; /* In 'dp->poll_threads'. */ pthread_cond_t cond; /* For synchronizing pmd thread reload. */ struct ovs_mutex cond_mutex; /* Mutex for condition variable. */ /* Per thread exact-match cache. Note, the instance for cpu core * NON_PMD_CORE_ID can be accessed by multiple threads, and thusly * need to be protected by 'non_pmd_mutex'. Every other instance * will only be accessed by its own pmd thread. */ struct emc_cache flow_cache; /* Flow-Table and classifiers * * Writers of 'flow_table' must take the 'flow_mutex'. Corresponding * changes to 'classifiers' must be made while still holding the * 'flow_mutex'. */ struct ovs_mutex flow_mutex; struct cmap flow_table OVS_GUARDED; /* Flow table. */ /* One classifier per in_port polled by the pmd */ struct cmap classifiers; /* Periodically sort subtable vectors according to hit frequencies */ long long int next_optimization; /* Statistics. */ struct dp_netdev_pmd_stats stats; /* Cycles counters */ struct dp_netdev_pmd_cycles cycles; /* Used to count cicles. See 'cycles_counter_end()' */ unsigned long long last_cycles; struct latch exit_latch; /* For terminating the pmd thread. */ atomic_uint change_seq; /* For reloading pmd ports. */ pthread_t thread; unsigned core_id; /* CPU core id of this pmd thread. */ int numa_id; /* numa node id of this pmd thread. */ bool isolated; /* Queue id used by this pmd thread to send packets on all netdevs if * XPS disabled for this netdev. All static_tx_qid's are unique and less * than 'ovs_numa_get_n_cores() + 1'. */ atomic_int static_tx_qid; struct ovs_mutex port_mutex; /* Mutex for 'poll_list' and 'tx_ports'. */ /* List of rx queues to poll. */ struct ovs_list poll_list OVS_GUARDED; /* Number of elements in 'poll_list' */ int poll_cnt; /* Map of 'tx_port's used for transmission. Written by the main thread, * read by the pmd thread. */ struct hmap tx_ports OVS_GUARDED; /* Map of 'tx_port' used in the fast path. This is a thread-local copy of * 'tx_ports'. The instance for cpu core NON_PMD_CORE_ID can be accessed * by multiple threads, and thusly need to be protected by 'non_pmd_mutex'. * Every other instance will only be accessed by its own pmd thread. */ struct hmap port_cache; /* Only a pmd thread can write on its own 'cycles' and 'stats'. * The main thread keeps 'stats_zero' and 'cycles_zero' as base * values and subtracts them from 'stats' and 'cycles' before * reporting to the user */ unsigned long long stats_zero[DP_N_STATS]; uint64_t cycles_zero[PMD_N_CYCLES]; }; #define PMD_INITIAL_SEQ 1 /* Interface to netdev-based datapath. */ struct dpif_netdev { struct dpif dpif; struct dp_netdev *dp; uint64_t last_port_seq; }; static int get_port_by_number(struct dp_netdev *dp, odp_port_t port_no, struct dp_netdev_port **portp) OVS_REQUIRES(dp->port_mutex); static int get_port_by_name(struct dp_netdev *dp, const char *devname, struct dp_netdev_port **portp) OVS_REQUIRES(dp->port_mutex); static void dp_netdev_free(struct dp_netdev *) OVS_REQUIRES(dp_netdev_mutex); static int do_add_port(struct dp_netdev *dp, const char *devname, const char *type, odp_port_t port_no) OVS_REQUIRES(dp->port_mutex); static void do_del_port(struct dp_netdev *dp, struct dp_netdev_port *) OVS_REQUIRES(dp->port_mutex); static int dpif_netdev_open(const struct dpif_class *, const char *name, bool create, struct dpif **); static void dp_netdev_execute_actions(struct dp_netdev_pmd_thread *pmd, struct dp_packet_batch *, bool may_steal, const struct flow *flow, const struct nlattr *actions, size_t actions_len, long long now); static void dp_netdev_input(struct dp_netdev_pmd_thread *, struct dp_packet_batch *, odp_port_t port_no); static void dp_netdev_recirculate(struct dp_netdev_pmd_thread *, struct dp_packet_batch *); static void dp_netdev_disable_upcall(struct dp_netdev *); static void dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread *pmd); static void dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev *dp, unsigned core_id, int numa_id); static void dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread *pmd); static void dp_netdev_set_nonpmd(struct dp_netdev *dp) OVS_REQUIRES(dp->port_mutex); static struct dp_netdev_pmd_thread *dp_netdev_get_pmd(struct dp_netdev *dp, unsigned core_id); static struct dp_netdev_pmd_thread * dp_netdev_pmd_get_next(struct dp_netdev *dp, struct cmap_position *pos); static void dp_netdev_destroy_all_pmds(struct dp_netdev *dp); static void dp_netdev_del_pmds_on_numa(struct dp_netdev *dp, int numa_id); static void dp_netdev_set_pmds_on_numa(struct dp_netdev *dp, int numa_id) OVS_REQUIRES(dp->port_mutex); static void dp_netdev_pmd_clear_ports(struct dp_netdev_pmd_thread *pmd); static void dp_netdev_del_port_from_all_pmds(struct dp_netdev *dp, struct dp_netdev_port *port); static void dp_netdev_add_port_to_pmds(struct dp_netdev *dp, struct dp_netdev_port *port); static void dp_netdev_add_port_tx_to_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev_port *port); static void dp_netdev_add_rxq_to_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev_port *port, struct netdev_rxq *rx); static struct dp_netdev_pmd_thread * dp_netdev_less_loaded_pmd_on_numa(struct dp_netdev *dp, int numa_id); static void dp_netdev_reset_pmd_threads(struct dp_netdev *dp) OVS_REQUIRES(dp->port_mutex); static void reconfigure_pmd_threads(struct dp_netdev *dp) OVS_REQUIRES(dp->port_mutex); static bool dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread *pmd); static void dp_netdev_pmd_unref(struct dp_netdev_pmd_thread *pmd); static void dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread *pmd); static void pmd_load_cached_ports(struct dp_netdev_pmd_thread *pmd) OVS_REQUIRES(pmd->port_mutex); static inline void dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd); static void dpif_netdev_xps_revalidate_pmd(const struct dp_netdev_pmd_thread *pmd, long long now, bool purge); static int dpif_netdev_xps_get_tx_qid(const struct dp_netdev_pmd_thread *pmd, struct tx_port *tx, long long now); static inline bool emc_entry_alive(struct emc_entry *ce); static void emc_clear_entry(struct emc_entry *ce); static void emc_cache_init(struct emc_cache *flow_cache) { int i; flow_cache->sweep_idx = 0; for (i = 0; i < ARRAY_SIZE(flow_cache->entries); i++) { flow_cache->entries[i].flow = NULL; flow_cache->entries[i].key.hash = 0; flow_cache->entries[i].key.len = sizeof(struct miniflow); flowmap_init(&flow_cache->entries[i].key.mf.map); } } static void emc_cache_uninit(struct emc_cache *flow_cache) { int i; for (i = 0; i < ARRAY_SIZE(flow_cache->entries); i++) { emc_clear_entry(&flow_cache->entries[i]); } } /* Check and clear dead flow references slowly (one entry at each * invocation). */ static void emc_cache_slow_sweep(struct emc_cache *flow_cache) { struct emc_entry *entry = &flow_cache->entries[flow_cache->sweep_idx]; if (!emc_entry_alive(entry)) { emc_clear_entry(entry); } flow_cache->sweep_idx = (flow_cache->sweep_idx + 1) & EM_FLOW_HASH_MASK; } /* Returns true if 'dpif' is a netdev or dummy dpif, false otherwise. */ bool dpif_is_netdev(const struct dpif *dpif) { return dpif->dpif_class->open == dpif_netdev_open; } static struct dpif_netdev * dpif_netdev_cast(const struct dpif *dpif) { ovs_assert(dpif_is_netdev(dpif)); return CONTAINER_OF(dpif, struct dpif_netdev, dpif); } static struct dp_netdev * get_dp_netdev(const struct dpif *dpif) { return dpif_netdev_cast(dpif)->dp; } enum pmd_info_type { PMD_INFO_SHOW_STATS, /* Show how cpu cycles are spent. */ PMD_INFO_CLEAR_STATS, /* Set the cycles count to 0. */ PMD_INFO_SHOW_RXQ /* Show poll-lists of pmd threads. */ }; static void pmd_info_show_stats(struct ds *reply, struct dp_netdev_pmd_thread *pmd, unsigned long long stats[DP_N_STATS], uint64_t cycles[PMD_N_CYCLES]) { unsigned long long total_packets = 0; uint64_t total_cycles = 0; int i; /* These loops subtracts reference values ('*_zero') from the counters. * Since loads and stores are relaxed, it might be possible for a '*_zero' * value to be more recent than the current value we're reading from the * counter. This is not a big problem, since these numbers are not * supposed to be too accurate, but we should at least make sure that * the result is not negative. */ for (i = 0; i < DP_N_STATS; i++) { if (stats[i] > pmd->stats_zero[i]) { stats[i] -= pmd->stats_zero[i]; } else { stats[i] = 0; } if (i != DP_STAT_LOST) { /* Lost packets are already included in DP_STAT_MISS */ total_packets += stats[i]; } } for (i = 0; i < PMD_N_CYCLES; i++) { if (cycles[i] > pmd->cycles_zero[i]) { cycles[i] -= pmd->cycles_zero[i]; } else { cycles[i] = 0; } total_cycles += cycles[i]; } ds_put_cstr(reply, (pmd->core_id == NON_PMD_CORE_ID) ? "main thread" : "pmd thread"); if (pmd->numa_id != OVS_NUMA_UNSPEC) { ds_put_format(reply, " numa_id %d", pmd->numa_id); } if (pmd->core_id != OVS_CORE_UNSPEC && pmd->core_id != NON_PMD_CORE_ID) { ds_put_format(reply, " core_id %u", pmd->core_id); } ds_put_cstr(reply, ":\n"); ds_put_format(reply, "\temc hits:%llu\n\tmegaflow hits:%llu\n" "\tavg. subtable lookups per hit:%.2f\n" "\tmiss:%llu\n\tlost:%llu\n", stats[DP_STAT_EXACT_HIT], stats[DP_STAT_MASKED_HIT], stats[DP_STAT_MASKED_HIT] > 0 ? (1.0*stats[DP_STAT_LOOKUP_HIT])/stats[DP_STAT_MASKED_HIT] : 0, stats[DP_STAT_MISS], stats[DP_STAT_LOST]); if (total_cycles == 0) { return; } ds_put_format(reply, "\tpolling cycles:%"PRIu64" (%.02f%%)\n" "\tprocessing cycles:%"PRIu64" (%.02f%%)\n", cycles[PMD_CYCLES_POLLING], cycles[PMD_CYCLES_POLLING] / (double)total_cycles * 100, cycles[PMD_CYCLES_PROCESSING], cycles[PMD_CYCLES_PROCESSING] / (double)total_cycles * 100); if (total_packets == 0) { return; } ds_put_format(reply, "\tavg cycles per packet: %.02f (%"PRIu64"/%llu)\n", total_cycles / (double)total_packets, total_cycles, total_packets); ds_put_format(reply, "\tavg processing cycles per packet: " "%.02f (%"PRIu64"/%llu)\n", cycles[PMD_CYCLES_PROCESSING] / (double)total_packets, cycles[PMD_CYCLES_PROCESSING], total_packets); } static void pmd_info_clear_stats(struct ds *reply OVS_UNUSED, struct dp_netdev_pmd_thread *pmd, unsigned long long stats[DP_N_STATS], uint64_t cycles[PMD_N_CYCLES]) { int i; /* We cannot write 'stats' and 'cycles' (because they're written by other * threads) and we shouldn't change 'stats' (because they're used to count * datapath stats, which must not be cleared here). Instead, we save the * current values and subtract them from the values to be displayed in the * future */ for (i = 0; i < DP_N_STATS; i++) { pmd->stats_zero[i] = stats[i]; } for (i = 0; i < PMD_N_CYCLES; i++) { pmd->cycles_zero[i] = cycles[i]; } } static void pmd_info_show_rxq(struct ds *reply, struct dp_netdev_pmd_thread *pmd) { if (pmd->core_id != NON_PMD_CORE_ID) { struct rxq_poll *poll; const char *prev_name = NULL; ds_put_format(reply, "pmd thread numa_id %d core_id %u:\n\tisolated : %s\n", pmd->numa_id, pmd->core_id, (pmd->isolated) ? "true" : "false"); ovs_mutex_lock(&pmd->port_mutex); LIST_FOR_EACH (poll, node, &pmd->poll_list) { const char *name = netdev_get_name(poll->port->netdev); if (!prev_name || strcmp(name, prev_name)) { if (prev_name) { ds_put_cstr(reply, "\n"); } ds_put_format(reply, "\tport: %s\tqueue-id:", netdev_get_name(poll->port->netdev)); } ds_put_format(reply, " %d", netdev_rxq_get_queue_id(poll->rx)); prev_name = name; } ovs_mutex_unlock(&pmd->port_mutex); ds_put_cstr(reply, "\n"); } } static void dpif_netdev_pmd_info(struct unixctl_conn *conn, int argc, const char *argv[], void *aux) { struct ds reply = DS_EMPTY_INITIALIZER; struct dp_netdev_pmd_thread *pmd; struct dp_netdev *dp = NULL; enum pmd_info_type type = *(enum pmd_info_type *) aux; ovs_mutex_lock(&dp_netdev_mutex); if (argc == 2) { dp = shash_find_data(&dp_netdevs, argv[1]); } else if (shash_count(&dp_netdevs) == 1) { /* There's only one datapath */ dp = shash_first(&dp_netdevs)->data; } if (!dp) { ovs_mutex_unlock(&dp_netdev_mutex); unixctl_command_reply_error(conn, "please specify an existing datapath"); return; } CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { if (type == PMD_INFO_SHOW_RXQ) { pmd_info_show_rxq(&reply, pmd); } else { unsigned long long stats[DP_N_STATS]; uint64_t cycles[PMD_N_CYCLES]; int i; /* Read current stats and cycle counters */ for (i = 0; i < ARRAY_SIZE(stats); i++) { atomic_read_relaxed(&pmd->stats.n[i], &stats[i]); } for (i = 0; i < ARRAY_SIZE(cycles); i++) { atomic_read_relaxed(&pmd->cycles.n[i], &cycles[i]); } if (type == PMD_INFO_CLEAR_STATS) { pmd_info_clear_stats(&reply, pmd, stats, cycles); } else if (type == PMD_INFO_SHOW_STATS) { pmd_info_show_stats(&reply, pmd, stats, cycles); } } } ovs_mutex_unlock(&dp_netdev_mutex); unixctl_command_reply(conn, ds_cstr(&reply)); ds_destroy(&reply); } static int dpif_netdev_init(void) { static enum pmd_info_type show_aux = PMD_INFO_SHOW_STATS, clear_aux = PMD_INFO_CLEAR_STATS, poll_aux = PMD_INFO_SHOW_RXQ; unixctl_command_register("dpif-netdev/pmd-stats-show", "[dp]", 0, 1, dpif_netdev_pmd_info, (void *)&show_aux); unixctl_command_register("dpif-netdev/pmd-stats-clear", "[dp]", 0, 1, dpif_netdev_pmd_info, (void *)&clear_aux); unixctl_command_register("dpif-netdev/pmd-rxq-show", "[dp]", 0, 1, dpif_netdev_pmd_info, (void *)&poll_aux); return 0; } static int dpif_netdev_enumerate(struct sset *all_dps, const struct dpif_class *dpif_class) { struct shash_node *node; ovs_mutex_lock(&dp_netdev_mutex); SHASH_FOR_EACH(node, &dp_netdevs) { struct dp_netdev *dp = node->data; if (dpif_class != dp->class) { /* 'dp_netdevs' contains both "netdev" and "dummy" dpifs. * If the class doesn't match, skip this dpif. */ continue; } sset_add(all_dps, node->name); } ovs_mutex_unlock(&dp_netdev_mutex); return 0; } static bool dpif_netdev_class_is_dummy(const struct dpif_class *class) { return class != &dpif_netdev_class; } static const char * dpif_netdev_port_open_type(const struct dpif_class *class, const char *type) { return strcmp(type, "internal") ? type : dpif_netdev_class_is_dummy(class) ? "dummy-internal" : "tap"; } static struct dpif * create_dpif_netdev(struct dp_netdev *dp) { uint16_t netflow_id = hash_string(dp->name, 0); struct dpif_netdev *dpif; ovs_refcount_ref(&dp->ref_cnt); dpif = xmalloc(sizeof *dpif); dpif_init(&dpif->dpif, dp->class, dp->name, netflow_id >> 8, netflow_id); dpif->dp = dp; dpif->last_port_seq = seq_read(dp->port_seq); return &dpif->dpif; } /* Choose an unused, non-zero port number and return it on success. * Return ODPP_NONE on failure. */ static odp_port_t choose_port(struct dp_netdev *dp, const char *name) OVS_REQUIRES(dp->port_mutex) { uint32_t port_no; if (dp->class != &dpif_netdev_class) { const char *p; int start_no = 0; /* If the port name begins with "br", start the number search at * 100 to make writing tests easier. */ if (!strncmp(name, "br", 2)) { start_no = 100; } /* If the port name contains a number, try to assign that port number. * This can make writing unit tests easier because port numbers are * predictable. */ for (p = name; *p != '\0'; p++) { if (isdigit((unsigned char) *p)) { port_no = start_no + strtol(p, NULL, 10); if (port_no > 0 && port_no != odp_to_u32(ODPP_NONE) && !dp_netdev_lookup_port(dp, u32_to_odp(port_no))) { return u32_to_odp(port_no); } break; } } } for (port_no = 1; port_no <= UINT16_MAX; port_no++) { if (!dp_netdev_lookup_port(dp, u32_to_odp(port_no))) { return u32_to_odp(port_no); } } return ODPP_NONE; } static int create_dp_netdev(const char *name, const struct dpif_class *class, struct dp_netdev **dpp) OVS_REQUIRES(dp_netdev_mutex) { struct dp_netdev *dp; int error; dp = xzalloc(sizeof *dp); shash_add(&dp_netdevs, name, dp); *CONST_CAST(const struct dpif_class **, &dp->class) = class; *CONST_CAST(const char **, &dp->name) = xstrdup(name); ovs_refcount_init(&dp->ref_cnt); atomic_flag_clear(&dp->destroyed); ovs_mutex_init(&dp->port_mutex); hmap_init(&dp->ports); dp->port_seq = seq_create(); fat_rwlock_init(&dp->upcall_rwlock); dp->reconfigure_seq = seq_create(); dp->last_reconfigure_seq = seq_read(dp->reconfigure_seq); /* Disable upcalls by default. */ dp_netdev_disable_upcall(dp); dp->upcall_aux = NULL; dp->upcall_cb = NULL; conntrack_init(&dp->conntrack); cmap_init(&dp->poll_threads); ovs_mutex_init_recursive(&dp->non_pmd_mutex); ovsthread_key_create(&dp->per_pmd_key, NULL); ovs_mutex_lock(&dp->port_mutex); dp_netdev_set_nonpmd(dp); error = do_add_port(dp, name, dpif_netdev_port_open_type(dp->class, "internal"), ODPP_LOCAL); ovs_mutex_unlock(&dp->port_mutex); if (error) { dp_netdev_free(dp); return error; } dp->last_tnl_conf_seq = seq_read(tnl_conf_seq); *dpp = dp; return 0; } static void dp_netdev_request_reconfigure(struct dp_netdev *dp) { seq_change(dp->reconfigure_seq); } static bool dp_netdev_is_reconf_required(struct dp_netdev *dp) { return seq_read(dp->reconfigure_seq) != dp->last_reconfigure_seq; } static int dpif_netdev_open(const struct dpif_class *class, const char *name, bool create, struct dpif **dpifp) { struct dp_netdev *dp; int error; ovs_mutex_lock(&dp_netdev_mutex); dp = shash_find_data(&dp_netdevs, name); if (!dp) { error = create ? create_dp_netdev(name, class, &dp) : ENODEV; } else { error = (dp->class != class ? EINVAL : create ? EEXIST : 0); } if (!error) { *dpifp = create_dpif_netdev(dp); dp->dpif = *dpifp; } ovs_mutex_unlock(&dp_netdev_mutex); return error; } static void dp_netdev_destroy_upcall_lock(struct dp_netdev *dp) OVS_NO_THREAD_SAFETY_ANALYSIS { /* Check that upcalls are disabled, i.e. that the rwlock is taken */ ovs_assert(fat_rwlock_tryrdlock(&dp->upcall_rwlock)); /* Before freeing a lock we should release it */ fat_rwlock_unlock(&dp->upcall_rwlock); fat_rwlock_destroy(&dp->upcall_rwlock); } /* Requires dp_netdev_mutex so that we can't get a new reference to 'dp' * through the 'dp_netdevs' shash while freeing 'dp'. */ static void dp_netdev_free(struct dp_netdev *dp) OVS_REQUIRES(dp_netdev_mutex) { struct dp_netdev_port *port, *next; shash_find_and_delete(&dp_netdevs, dp->name); dp_netdev_destroy_all_pmds(dp); ovs_mutex_destroy(&dp->non_pmd_mutex); ovsthread_key_delete(dp->per_pmd_key); conntrack_destroy(&dp->conntrack); ovs_mutex_lock(&dp->port_mutex); HMAP_FOR_EACH_SAFE (port, next, node, &dp->ports) { do_del_port(dp, port); } ovs_mutex_unlock(&dp->port_mutex); cmap_destroy(&dp->poll_threads); seq_destroy(dp->reconfigure_seq); seq_destroy(dp->port_seq); hmap_destroy(&dp->ports); ovs_mutex_destroy(&dp->port_mutex); /* Upcalls must be disabled at this point */ dp_netdev_destroy_upcall_lock(dp); free(dp->pmd_cmask); free(CONST_CAST(char *, dp->name)); free(dp); } static void dp_netdev_unref(struct dp_netdev *dp) { if (dp) { /* Take dp_netdev_mutex so that, if dp->ref_cnt falls to zero, we can't * get a new reference to 'dp' through the 'dp_netdevs' shash. */ ovs_mutex_lock(&dp_netdev_mutex); if (ovs_refcount_unref_relaxed(&dp->ref_cnt) == 1) { dp_netdev_free(dp); } ovs_mutex_unlock(&dp_netdev_mutex); } } static void dpif_netdev_close(struct dpif *dpif) { struct dp_netdev *dp = get_dp_netdev(dpif); dp_netdev_unref(dp); free(dpif); } static int dpif_netdev_destroy(struct dpif *dpif) { struct dp_netdev *dp = get_dp_netdev(dpif); if (!atomic_flag_test_and_set(&dp->destroyed)) { if (ovs_refcount_unref_relaxed(&dp->ref_cnt) == 1) { /* Can't happen: 'dpif' still owns a reference to 'dp'. */ OVS_NOT_REACHED(); } } return 0; } /* Add 'n' to the atomic variable 'var' non-atomically and using relaxed * load/store semantics. While the increment is not atomic, the load and * store operations are, making it impossible to read inconsistent values. * * This is used to update thread local stats counters. */ static void non_atomic_ullong_add(atomic_ullong *var, unsigned long long n) { unsigned long long tmp; atomic_read_relaxed(var, &tmp); tmp += n; atomic_store_relaxed(var, tmp); } static int dpif_netdev_get_stats(const struct dpif *dpif, struct dpif_dp_stats *stats) { struct dp_netdev *dp = get_dp_netdev(dpif); struct dp_netdev_pmd_thread *pmd; stats->n_flows = stats->n_hit = stats->n_missed = stats->n_lost = 0; CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { unsigned long long n; stats->n_flows += cmap_count(&pmd->flow_table); atomic_read_relaxed(&pmd->stats.n[DP_STAT_MASKED_HIT], &n); stats->n_hit += n; atomic_read_relaxed(&pmd->stats.n[DP_STAT_EXACT_HIT], &n); stats->n_hit += n; atomic_read_relaxed(&pmd->stats.n[DP_STAT_MISS], &n); stats->n_missed += n; atomic_read_relaxed(&pmd->stats.n[DP_STAT_LOST], &n); stats->n_lost += n; } stats->n_masks = UINT32_MAX; stats->n_mask_hit = UINT64_MAX; return 0; } static void dp_netdev_reload_pmd__(struct dp_netdev_pmd_thread *pmd) { int old_seq; if (pmd->core_id == NON_PMD_CORE_ID) { ovs_mutex_lock(&pmd->dp->non_pmd_mutex); ovs_mutex_lock(&pmd->port_mutex); pmd_load_cached_ports(pmd); ovs_mutex_unlock(&pmd->port_mutex); ovs_mutex_unlock(&pmd->dp->non_pmd_mutex); return; } ovs_mutex_lock(&pmd->cond_mutex); atomic_add_relaxed(&pmd->change_seq, 1, &old_seq); ovs_mutex_cond_wait(&pmd->cond, &pmd->cond_mutex); ovs_mutex_unlock(&pmd->cond_mutex); } static uint32_t hash_port_no(odp_port_t port_no) { return hash_int(odp_to_u32(port_no), 0); } static int port_create(const char *devname, const char *type, odp_port_t port_no, struct dp_netdev_port **portp) { struct netdev_saved_flags *sf; struct dp_netdev_port *port; enum netdev_flags flags; struct netdev *netdev; int n_open_rxqs = 0; int n_cores = 0; int i, error; bool dynamic_txqs = false; *portp = NULL; /* Open and validate network device. */ error = netdev_open(devname, type, &netdev); if (error) { return error; } /* XXX reject non-Ethernet devices */ netdev_get_flags(netdev, &flags); if (flags & NETDEV_LOOPBACK) { VLOG_ERR("%s: cannot add a loopback device", devname); error = EINVAL; goto out; } if (netdev_is_pmd(netdev)) { n_cores = ovs_numa_get_n_cores(); if (n_cores == OVS_CORE_UNSPEC) { VLOG_ERR("%s, cannot get cpu core info", devname); error = ENOENT; goto out; } /* There can only be ovs_numa_get_n_cores() pmd threads, * so creates a txq for each, and one extra for the non * pmd threads. */ error = netdev_set_tx_multiq(netdev, n_cores + 1); if (error && (error != EOPNOTSUPP)) { VLOG_ERR("%s, cannot set multiq", devname); goto out; } } if (netdev_is_reconf_required(netdev)) { error = netdev_reconfigure(netdev); if (error) { goto out; } } if (netdev_is_pmd(netdev)) { if (netdev_n_txq(netdev) < n_cores + 1) { dynamic_txqs = true; } } port = xzalloc(sizeof *port); port->port_no = port_no; port->netdev = netdev; port->n_rxq = netdev_n_rxq(netdev); port->rxqs = xcalloc(port->n_rxq, sizeof *port->rxqs); port->txq_used = xcalloc(netdev_n_txq(netdev), sizeof *port->txq_used); port->type = xstrdup(type); ovs_mutex_init(&port->txq_used_mutex); port->dynamic_txqs = dynamic_txqs; for (i = 0; i < port->n_rxq; i++) { error = netdev_rxq_open(netdev, &port->rxqs[i].rxq, i); if (error) { VLOG_ERR("%s: cannot receive packets on this network device (%s)", devname, ovs_strerror(errno)); goto out_rxq_close; } port->rxqs[i].core_id = -1; n_open_rxqs++; } error = netdev_turn_flags_on(netdev, NETDEV_PROMISC, &sf); if (error) { goto out_rxq_close; } port->sf = sf; *portp = port; return 0; out_rxq_close: for (i = 0; i < n_open_rxqs; i++) { netdev_rxq_close(port->rxqs[i].rxq); } ovs_mutex_destroy(&port->txq_used_mutex); free(port->type); free(port->txq_used); free(port->rxqs); free(port); out: netdev_close(netdev); return error; } static int do_add_port(struct dp_netdev *dp, const char *devname, const char *type, odp_port_t port_no) OVS_REQUIRES(dp->port_mutex) { struct dp_netdev_port *port; int error; /* Reject devices already in 'dp'. */ if (!get_port_by_name(dp, devname, &port)) { return EEXIST; } error = port_create(devname, type, port_no, &port); if (error) { return error; } if (netdev_is_pmd(port->netdev)) { int numa_id = netdev_get_numa_id(port->netdev); ovs_assert(ovs_numa_numa_id_is_valid(numa_id)); dp_netdev_set_pmds_on_numa(dp, numa_id); } dp_netdev_add_port_to_pmds(dp, port); hmap_insert(&dp->ports, &port->node, hash_port_no(port_no)); seq_change(dp->port_seq); return 0; } static int dpif_netdev_port_add(struct dpif *dpif, struct netdev *netdev, odp_port_t *port_nop) { struct dp_netdev *dp = get_dp_netdev(dpif); char namebuf[NETDEV_VPORT_NAME_BUFSIZE]; const char *dpif_port; odp_port_t port_no; int error; ovs_mutex_lock(&dp->port_mutex); dpif_port = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf); if (*port_nop != ODPP_NONE) { port_no = *port_nop; error = dp_netdev_lookup_port(dp, *port_nop) ? EBUSY : 0; } else { port_no = choose_port(dp, dpif_port); error = port_no == ODPP_NONE ? EFBIG : 0; } if (!error) { *port_nop = port_no; error = do_add_port(dp, dpif_port, netdev_get_type(netdev), port_no); } ovs_mutex_unlock(&dp->port_mutex); return error; } static int dpif_netdev_port_del(struct dpif *dpif, odp_port_t port_no) { struct dp_netdev *dp = get_dp_netdev(dpif); int error; ovs_mutex_lock(&dp->port_mutex); if (port_no == ODPP_LOCAL) { error = EINVAL; } else { struct dp_netdev_port *port; error = get_port_by_number(dp, port_no, &port); if (!error) { do_del_port(dp, port); } } ovs_mutex_unlock(&dp->port_mutex); return error; } static bool is_valid_port_number(odp_port_t port_no) { return port_no != ODPP_NONE; } static struct dp_netdev_port * dp_netdev_lookup_port(const struct dp_netdev *dp, odp_port_t port_no) OVS_REQUIRES(dp->port_mutex) { struct dp_netdev_port *port; HMAP_FOR_EACH_WITH_HASH (port, node, hash_port_no(port_no), &dp->ports) { if (port->port_no == port_no) { return port; } } return NULL; } static int get_port_by_number(struct dp_netdev *dp, odp_port_t port_no, struct dp_netdev_port **portp) OVS_REQUIRES(dp->port_mutex) { if (!is_valid_port_number(port_no)) { *portp = NULL; return EINVAL; } else { *portp = dp_netdev_lookup_port(dp, port_no); return *portp ? 0 : ENOENT; } } static void port_destroy(struct dp_netdev_port *port) { if (!port) { return; } netdev_close(port->netdev); netdev_restore_flags(port->sf); for (unsigned i = 0; i < port->n_rxq; i++) { netdev_rxq_close(port->rxqs[i].rxq); } ovs_mutex_destroy(&port->txq_used_mutex); free(port->rxq_affinity_list); free(port->txq_used); free(port->rxqs); free(port->type); free(port); } static int get_port_by_name(struct dp_netdev *dp, const char *devname, struct dp_netdev_port **portp) OVS_REQUIRES(dp->port_mutex) { struct dp_netdev_port *port; HMAP_FOR_EACH (port, node, &dp->ports) { if (!strcmp(netdev_get_name(port->netdev), devname)) { *portp = port; return 0; } } return ENOENT; } static int get_n_pmd_threads(struct dp_netdev *dp) { /* There is one non pmd thread in dp->poll_threads */ return cmap_count(&dp->poll_threads) - 1; } static int get_n_pmd_threads_on_numa(struct dp_netdev *dp, int numa_id) { struct dp_netdev_pmd_thread *pmd; int n_pmds = 0; CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { if (pmd->numa_id == numa_id) { n_pmds++; } } return n_pmds; } /* Returns 'true' if there is a port with pmd netdev and the netdev * is on numa node 'numa_id'. */ static bool has_pmd_port_for_numa(struct dp_netdev *dp, int numa_id) OVS_REQUIRES(dp->port_mutex) { struct dp_netdev_port *port; HMAP_FOR_EACH (port, node, &dp->ports) { if (netdev_is_pmd(port->netdev) && netdev_get_numa_id(port->netdev) == numa_id) { return true; } } return false; } static void do_del_port(struct dp_netdev *dp, struct dp_netdev_port *port) OVS_REQUIRES(dp->port_mutex) { hmap_remove(&dp->ports, &port->node); seq_change(dp->port_seq); dp_netdev_del_port_from_all_pmds(dp, port); if (netdev_is_pmd(port->netdev)) { int numa_id = netdev_get_numa_id(port->netdev); /* PMD threads can not be on invalid numa node. */ ovs_assert(ovs_numa_numa_id_is_valid(numa_id)); /* If there is no netdev on the numa node, deletes the pmd threads * for that numa. */ if (!has_pmd_port_for_numa(dp, numa_id)) { dp_netdev_del_pmds_on_numa(dp, numa_id); } } port_destroy(port); } static void answer_port_query(const struct dp_netdev_port *port, struct dpif_port *dpif_port) { dpif_port->name = xstrdup(netdev_get_name(port->netdev)); dpif_port->type = xstrdup(port->type); dpif_port->port_no = port->port_no; } static int dpif_netdev_port_query_by_number(const struct dpif *dpif, odp_port_t port_no, struct dpif_port *dpif_port) { struct dp_netdev *dp = get_dp_netdev(dpif); struct dp_netdev_port *port; int error; ovs_mutex_lock(&dp->port_mutex); error = get_port_by_number(dp, port_no, &port); if (!error && dpif_port) { answer_port_query(port, dpif_port); } ovs_mutex_unlock(&dp->port_mutex); return error; } static int dpif_netdev_port_query_by_name(const struct dpif *dpif, const char *devname, struct dpif_port *dpif_port) { struct dp_netdev *dp = get_dp_netdev(dpif); struct dp_netdev_port *port; int error; ovs_mutex_lock(&dp->port_mutex); error = get_port_by_name(dp, devname, &port); if (!error && dpif_port) { answer_port_query(port, dpif_port); } ovs_mutex_unlock(&dp->port_mutex); return error; } static void dp_netdev_flow_free(struct dp_netdev_flow *flow) { dp_netdev_actions_free(dp_netdev_flow_get_actions(flow)); free(flow); } static void dp_netdev_flow_unref(struct dp_netdev_flow *flow) { if (ovs_refcount_unref_relaxed(&flow->ref_cnt) == 1) { ovsrcu_postpone(dp_netdev_flow_free, flow); } } static uint32_t dp_netdev_flow_hash(const ovs_u128 *ufid) { return ufid->u32[0]; } static inline struct dpcls * dp_netdev_pmd_lookup_dpcls(struct dp_netdev_pmd_thread *pmd, odp_port_t in_port) { struct dpcls *cls; uint32_t hash = hash_port_no(in_port); CMAP_FOR_EACH_WITH_HASH (cls, node, hash, &pmd->classifiers) { if (cls->in_port == in_port) { /* Port classifier exists already */ return cls; } } return NULL; } static inline struct dpcls * dp_netdev_pmd_find_dpcls(struct dp_netdev_pmd_thread *pmd, odp_port_t in_port) OVS_REQUIRES(pmd->flow_mutex) { struct dpcls *cls = dp_netdev_pmd_lookup_dpcls(pmd, in_port); uint32_t hash = hash_port_no(in_port); if (!cls) { /* Create new classifier for in_port */ cls = xmalloc(sizeof(*cls)); dpcls_init(cls); cls->in_port = in_port; cmap_insert(&pmd->classifiers, &cls->node, hash); VLOG_DBG("Creating dpcls %p for in_port %d", cls, in_port); } return cls; } static void dp_netdev_pmd_remove_flow(struct dp_netdev_pmd_thread *pmd, struct dp_netdev_flow *flow) OVS_REQUIRES(pmd->flow_mutex) { struct cmap_node *node = CONST_CAST(struct cmap_node *, &flow->node); struct dpcls *cls; odp_port_t in_port = flow->flow.in_port.odp_port; cls = dp_netdev_pmd_lookup_dpcls(pmd, in_port); ovs_assert(cls != NULL); dpcls_remove(cls, &flow->cr); cmap_remove(&pmd->flow_table, node, dp_netdev_flow_hash(&flow->ufid)); flow->dead = true; dp_netdev_flow_unref(flow); } static void dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread *pmd) { struct dp_netdev_flow *netdev_flow; ovs_mutex_lock(&pmd->flow_mutex); CMAP_FOR_EACH (netdev_flow, node, &pmd->flow_table) { dp_netdev_pmd_remove_flow(pmd, netdev_flow); } ovs_mutex_unlock(&pmd->flow_mutex); } static int dpif_netdev_flow_flush(struct dpif *dpif) { struct dp_netdev *dp = get_dp_netdev(dpif); struct dp_netdev_pmd_thread *pmd; CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { dp_netdev_pmd_flow_flush(pmd); } return 0; } struct dp_netdev_port_state { struct hmap_position position; char *name; }; static int dpif_netdev_port_dump_start(const struct dpif *dpif OVS_UNUSED, void **statep) { *statep = xzalloc(sizeof(struct dp_netdev_port_state)); return 0; } static int dpif_netdev_port_dump_next(const struct dpif *dpif, void *state_, struct dpif_port *dpif_port) { struct dp_netdev_port_state *state = state_; struct dp_netdev *dp = get_dp_netdev(dpif); struct hmap_node *node; int retval; ovs_mutex_lock(&dp->port_mutex); node = hmap_at_position(&dp->ports, &state->position); if (node) { struct dp_netdev_port *port; port = CONTAINER_OF(node, struct dp_netdev_port, node); free(state->name); state->name = xstrdup(netdev_get_name(port->netdev)); dpif_port->name = state->name; dpif_port->type = port->type; dpif_port->port_no = port->port_no; retval = 0; } else { retval = EOF; } ovs_mutex_unlock(&dp->port_mutex); return retval; } static int dpif_netdev_port_dump_done(const struct dpif *dpif OVS_UNUSED, void *state_) { struct dp_netdev_port_state *state = state_; free(state->name); free(state); return 0; } static int dpif_netdev_port_poll(const struct dpif *dpif_, char **devnamep OVS_UNUSED) { struct dpif_netdev *dpif = dpif_netdev_cast(dpif_); uint64_t new_port_seq; int error; new_port_seq = seq_read(dpif->dp->port_seq); if (dpif->last_port_seq != new_port_seq) { dpif->last_port_seq = new_port_seq; error = ENOBUFS; } else { error = EAGAIN; } return error; } static void dpif_netdev_port_poll_wait(const struct dpif *dpif_) { struct dpif_netdev *dpif = dpif_netdev_cast(dpif_); seq_wait(dpif->dp->port_seq, dpif->last_port_seq); } static struct dp_netdev_flow * dp_netdev_flow_cast(const struct dpcls_rule *cr) { return cr ? CONTAINER_OF(cr, struct dp_netdev_flow, cr) : NULL; } static bool dp_netdev_flow_ref(struct dp_netdev_flow *flow) { return ovs_refcount_try_ref_rcu(&flow->ref_cnt); } /* netdev_flow_key utilities. * * netdev_flow_key is basically a miniflow. We use these functions * (netdev_flow_key_clone, netdev_flow_key_equal, ...) instead of the miniflow * functions (miniflow_clone_inline, miniflow_equal, ...), because: * * - Since we are dealing exclusively with miniflows created by * miniflow_extract(), if the map is different the miniflow is different. * Therefore we can be faster by comparing the map and the miniflow in a * single memcmp(). * - These functions can be inlined by the compiler. */ /* Given the number of bits set in miniflow's maps, returns the size of the * 'netdev_flow_key.mf' */ static inline size_t netdev_flow_key_size(size_t flow_u64s) { return sizeof(struct miniflow) + MINIFLOW_VALUES_SIZE(flow_u64s); } static inline bool netdev_flow_key_equal(const struct netdev_flow_key *a, const struct netdev_flow_key *b) { /* 'b->len' may be not set yet. */ return a->hash == b->hash && !memcmp(&a->mf, &b->mf, a->len); } /* Used to compare 'netdev_flow_key' in the exact match cache to a miniflow. * The maps are compared bitwise, so both 'key->mf' and 'mf' must have been * generated by miniflow_extract. */ static inline bool netdev_flow_key_equal_mf(const struct netdev_flow_key *key, const struct miniflow *mf) { return !memcmp(&key->mf, mf, key->len); } static inline void netdev_flow_key_clone(struct netdev_flow_key *dst, const struct netdev_flow_key *src) { memcpy(dst, src, offsetof(struct netdev_flow_key, mf) + src->len); } /* Slow. */ static void netdev_flow_key_from_flow(struct netdev_flow_key *dst, const struct flow *src) { struct dp_packet packet; uint64_t buf_stub[512 / 8]; dp_packet_use_stub(&packet, buf_stub, sizeof buf_stub); pkt_metadata_from_flow(&packet.md, src); flow_compose(&packet, src); miniflow_extract(&packet, &dst->mf); dp_packet_uninit(&packet); dst->len = netdev_flow_key_size(miniflow_n_values(&dst->mf)); dst->hash = 0; /* Not computed yet. */ } /* Initialize a netdev_flow_key 'mask' from 'match'. */ static inline void netdev_flow_mask_init(struct netdev_flow_key *mask, const struct match *match) { uint64_t *dst = miniflow_values(&mask->mf); struct flowmap fmap; uint32_t hash = 0; size_t idx; /* Only check masks that make sense for the flow. */ flow_wc_map(&match->flow, &fmap); flowmap_init(&mask->mf.map); FLOWMAP_FOR_EACH_INDEX(idx, fmap) { uint64_t mask_u64 = flow_u64_value(&match->wc.masks, idx); if (mask_u64) { flowmap_set(&mask->mf.map, idx, 1); *dst++ = mask_u64; hash = hash_add64(hash, mask_u64); } } map_t map; FLOWMAP_FOR_EACH_MAP (map, mask->mf.map) { hash = hash_add64(hash, map); } size_t n = dst - miniflow_get_values(&mask->mf); mask->hash = hash_finish(hash, n * 8); mask->len = netdev_flow_key_size(n); } /* Initializes 'dst' as a copy of 'flow' masked with 'mask'. */ static inline void netdev_flow_key_init_masked(struct netdev_flow_key *dst, const struct flow *flow, const struct netdev_flow_key *mask) { uint64_t *dst_u64 = miniflow_values(&dst->mf); const uint64_t *mask_u64 = miniflow_get_values(&mask->mf); uint32_t hash = 0; uint64_t value; dst->len = mask->len; dst->mf = mask->mf; /* Copy maps. */ FLOW_FOR_EACH_IN_MAPS(value, flow, mask->mf.map) { *dst_u64 = value & *mask_u64++; hash = hash_add64(hash, *dst_u64++); } dst->hash = hash_finish(hash, (dst_u64 - miniflow_get_values(&dst->mf)) * 8); } /* Iterate through netdev_flow_key TNL u64 values specified by 'FLOWMAP'. */ #define NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(VALUE, KEY, FLOWMAP) \ MINIFLOW_FOR_EACH_IN_FLOWMAP(VALUE, &(KEY)->mf, FLOWMAP) /* Returns a hash value for the bits of 'key' where there are 1-bits in * 'mask'. */ static inline uint32_t netdev_flow_key_hash_in_mask(const struct netdev_flow_key *key, const struct netdev_flow_key *mask) { const uint64_t *p = miniflow_get_values(&mask->mf); uint32_t hash = 0; uint64_t value; NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(value, key, mask->mf.map) { hash = hash_add64(hash, value & *p++); } return hash_finish(hash, (p - miniflow_get_values(&mask->mf)) * 8); } static inline bool emc_entry_alive(struct emc_entry *ce) { return ce->flow && !ce->flow->dead; } static void emc_clear_entry(struct emc_entry *ce) { if (ce->flow) { dp_netdev_flow_unref(ce->flow); ce->flow = NULL; } } static inline void emc_change_entry(struct emc_entry *ce, struct dp_netdev_flow *flow, const struct netdev_flow_key *key) { if (ce->flow != flow) { if (ce->flow) { dp_netdev_flow_unref(ce->flow); } if (dp_netdev_flow_ref(flow)) { ce->flow = flow; } else { ce->flow = NULL; } } if (key) { netdev_flow_key_clone(&ce->key, key); } } static inline void emc_insert(struct emc_cache *cache, const struct netdev_flow_key *key, struct dp_netdev_flow *flow) { struct emc_entry *to_be_replaced = NULL; struct emc_entry *current_entry; EMC_FOR_EACH_POS_WITH_HASH(cache, current_entry, key->hash) { if (netdev_flow_key_equal(¤t_entry->key, key)) { /* We found the entry with the 'mf' miniflow */ emc_change_entry(current_entry, flow, NULL); return; } /* Replacement policy: put the flow in an empty (not alive) entry, or * in the first entry where it can be */ if (!to_be_replaced || (emc_entry_alive(to_be_replaced) && !emc_entry_alive(current_entry)) || current_entry->key.hash < to_be_replaced->key.hash) { to_be_replaced = current_entry; } } /* We didn't find the miniflow in the cache. * The 'to_be_replaced' entry is where the new flow will be stored */ emc_change_entry(to_be_replaced, flow, key); } static inline struct dp_netdev_flow * emc_lookup(struct emc_cache *cache, const struct netdev_flow_key *key) { struct emc_entry *current_entry; EMC_FOR_EACH_POS_WITH_HASH(cache, current_entry, key->hash) { if (current_entry->key.hash == key->hash && emc_entry_alive(current_entry) && netdev_flow_key_equal_mf(¤t_entry->key, &key->mf)) { /* We found the entry with the 'key->mf' miniflow */ return current_entry->flow; } } return NULL; } static struct dp_netdev_flow * dp_netdev_pmd_lookup_flow(struct dp_netdev_pmd_thread *pmd, const struct netdev_flow_key *key, int *lookup_num_p) { struct dpcls *cls; struct dpcls_rule *rule; odp_port_t in_port = u32_to_odp(MINIFLOW_GET_U32(&key->mf, in_port)); struct dp_netdev_flow *netdev_flow = NULL; cls = dp_netdev_pmd_lookup_dpcls(pmd, in_port); if (OVS_LIKELY(cls)) { dpcls_lookup(cls, key, &rule, 1, lookup_num_p); netdev_flow = dp_netdev_flow_cast(rule); } return netdev_flow; } static struct dp_netdev_flow * dp_netdev_pmd_find_flow(const struct dp_netdev_pmd_thread *pmd, const ovs_u128 *ufidp, const struct nlattr *key, size_t key_len) { struct dp_netdev_flow *netdev_flow; struct flow flow; ovs_u128 ufid; /* If a UFID is not provided, determine one based on the key. */ if (!ufidp && key && key_len && !dpif_netdev_flow_from_nlattrs(key, key_len, &flow)) { dpif_flow_hash(pmd->dp->dpif, &flow, sizeof flow, &ufid); ufidp = &ufid; } if (ufidp) { CMAP_FOR_EACH_WITH_HASH (netdev_flow, node, dp_netdev_flow_hash(ufidp), &pmd->flow_table) { if (ovs_u128_equals(netdev_flow->ufid, *ufidp)) { return netdev_flow; } } } return NULL; } static void get_dpif_flow_stats(const struct dp_netdev_flow *netdev_flow_, struct dpif_flow_stats *stats) { struct dp_netdev_flow *netdev_flow; unsigned long long n; long long used; uint16_t flags; netdev_flow = CONST_CAST(struct dp_netdev_flow *, netdev_flow_); atomic_read_relaxed(&netdev_flow->stats.packet_count, &n); stats->n_packets = n; atomic_read_relaxed(&netdev_flow->stats.byte_count, &n); stats->n_bytes = n; atomic_read_relaxed(&netdev_flow->stats.used, &used); stats->used = used; atomic_read_relaxed(&netdev_flow->stats.tcp_flags, &flags); stats->tcp_flags = flags; } /* Converts to the dpif_flow format, using 'key_buf' and 'mask_buf' for * storing the netlink-formatted key/mask. 'key_buf' may be the same as * 'mask_buf'. Actions will be returned without copying, by relying on RCU to * protect them. */ static void dp_netdev_flow_to_dpif_flow(const struct dp_netdev_flow *netdev_flow, struct ofpbuf *key_buf, struct ofpbuf *mask_buf, struct dpif_flow *flow, bool terse) { if (terse) { memset(flow, 0, sizeof *flow); } else { struct flow_wildcards wc; struct dp_netdev_actions *actions; size_t offset; struct odp_flow_key_parms odp_parms = { .flow = &netdev_flow->flow, .mask = &wc.masks, .support = dp_netdev_support, }; miniflow_expand(&netdev_flow->cr.mask->mf, &wc.masks); /* Key */ offset = key_buf->size; flow->key = ofpbuf_tail(key_buf); odp_flow_key_from_flow(&odp_parms, key_buf); flow->key_len = key_buf->size - offset; /* Mask */ offset = mask_buf->size; flow->mask = ofpbuf_tail(mask_buf); odp_parms.key_buf = key_buf; odp_flow_key_from_mask(&odp_parms, mask_buf); flow->mask_len = mask_buf->size - offset; /* Actions */ actions = dp_netdev_flow_get_actions(netdev_flow); flow->actions = actions->actions; flow->actions_len = actions->size; } flow->ufid = netdev_flow->ufid; flow->ufid_present = true; flow->pmd_id = netdev_flow->pmd_id; get_dpif_flow_stats(netdev_flow, &flow->stats); } static int dpif_netdev_mask_from_nlattrs(const struct nlattr *key, uint32_t key_len, const struct nlattr *mask_key, uint32_t mask_key_len, const struct flow *flow, struct flow_wildcards *wc) { enum odp_key_fitness fitness; fitness = odp_flow_key_to_mask(mask_key, mask_key_len, wc, flow); if (fitness) { /* This should not happen: it indicates that * odp_flow_key_from_mask() and odp_flow_key_to_mask() * disagree on the acceptable form of a mask. Log the problem * as an error, with enough details to enable debugging. */ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); if (!VLOG_DROP_ERR(&rl)) { struct ds s; ds_init(&s); odp_flow_format(key, key_len, mask_key, mask_key_len, NULL, &s, true); VLOG_ERR("internal error parsing flow mask %s (%s)", ds_cstr(&s), odp_key_fitness_to_string(fitness)); ds_destroy(&s); } return EINVAL; } return 0; } static int dpif_netdev_flow_from_nlattrs(const struct nlattr *key, uint32_t key_len, struct flow *flow) { odp_port_t in_port; if (odp_flow_key_to_flow(key, key_len, flow)) { /* This should not happen: it indicates that odp_flow_key_from_flow() * and odp_flow_key_to_flow() disagree on the acceptable form of a * flow. Log the problem as an error, with enough details to enable * debugging. */ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); if (!VLOG_DROP_ERR(&rl)) { struct ds s; ds_init(&s); odp_flow_format(key, key_len, NULL, 0, NULL, &s, true); VLOG_ERR("internal error parsing flow key %s", ds_cstr(&s)); ds_destroy(&s); } return EINVAL; } in_port = flow->in_port.odp_port; if (!is_valid_port_number(in_port) && in_port != ODPP_NONE) { return EINVAL; } if (flow->ct_state & DP_NETDEV_CS_UNSUPPORTED_MASK) { return EINVAL; } return 0; } static int dpif_netdev_flow_get(const struct dpif *dpif, const struct dpif_flow_get *get) { struct dp_netdev *dp = get_dp_netdev(dpif); struct dp_netdev_flow *netdev_flow; struct dp_netdev_pmd_thread *pmd; struct hmapx to_find = HMAPX_INITIALIZER(&to_find); struct hmapx_node *node; int error = EINVAL; if (get->pmd_id == PMD_ID_NULL) { CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { if (dp_netdev_pmd_try_ref(pmd) && !hmapx_add(&to_find, pmd)) { dp_netdev_pmd_unref(pmd); } } } else { pmd = dp_netdev_get_pmd(dp, get->pmd_id); if (!pmd) { goto out; } hmapx_add(&to_find, pmd); } if (!hmapx_count(&to_find)) { goto out; } HMAPX_FOR_EACH (node, &to_find) { pmd = (struct dp_netdev_pmd_thread *) node->data; netdev_flow = dp_netdev_pmd_find_flow(pmd, get->ufid, get->key, get->key_len); if (netdev_flow) { dp_netdev_flow_to_dpif_flow(netdev_flow, get->buffer, get->buffer, get->flow, false); error = 0; break; } else { error = ENOENT; } } HMAPX_FOR_EACH (node, &to_find) { pmd = (struct dp_netdev_pmd_thread *) node->data; dp_netdev_pmd_unref(pmd); } out: hmapx_destroy(&to_find); return error; } static struct dp_netdev_flow * dp_netdev_flow_add(struct dp_netdev_pmd_thread *pmd, struct match *match, const ovs_u128 *ufid, const struct nlattr *actions, size_t actions_len) OVS_REQUIRES(pmd->flow_mutex) { struct dp_netdev_flow *flow; struct netdev_flow_key mask; struct dpcls *cls; odp_port_t in_port = match->flow.in_port.odp_port; netdev_flow_mask_init(&mask, match); /* Make sure wc does not have metadata. */ ovs_assert(!FLOWMAP_HAS_FIELD(&mask.mf.map, metadata) && !FLOWMAP_HAS_FIELD(&mask.mf.map, regs)); /* Do not allocate extra space. */ flow = xmalloc(sizeof *flow - sizeof flow->cr.flow.mf + mask.len); memset(&flow->stats, 0, sizeof flow->stats); flow->dead = false; flow->batch = NULL; *CONST_CAST(unsigned *, &flow->pmd_id) = pmd->core_id; *CONST_CAST(struct flow *, &flow->flow) = match->flow; *CONST_CAST(ovs_u128 *, &flow->ufid) = *ufid; ovs_refcount_init(&flow->ref_cnt); ovsrcu_set(&flow->actions, dp_netdev_actions_create(actions, actions_len)); netdev_flow_key_init_masked(&flow->cr.flow, &match->flow, &mask); /* Select dpcls for in_port. Relies on in_port to be exact match */ ovs_assert(match->wc.masks.in_port.odp_port == ODP_PORT_C(UINT32_MAX)); cls = dp_netdev_pmd_find_dpcls(pmd, in_port); dpcls_insert(cls, &flow->cr, &mask); cmap_insert(&pmd->flow_table, CONST_CAST(struct cmap_node *, &flow->node), dp_netdev_flow_hash(&flow->ufid)); if (OVS_UNLIKELY(VLOG_IS_DBG_ENABLED())) { struct ds ds = DS_EMPTY_INITIALIZER; struct ofpbuf key_buf, mask_buf; struct odp_flow_key_parms odp_parms = { .flow = &match->flow, .mask = &match->wc.masks, .support = dp_netdev_support, }; ofpbuf_init(&key_buf, 0); ofpbuf_init(&mask_buf, 0); odp_flow_key_from_flow(&odp_parms, &key_buf); odp_parms.key_buf = &key_buf; odp_flow_key_from_mask(&odp_parms, &mask_buf); ds_put_cstr(&ds, "flow_add: "); odp_format_ufid(ufid, &ds); ds_put_cstr(&ds, " "); odp_flow_format(key_buf.data, key_buf.size, mask_buf.data, mask_buf.size, NULL, &ds, false); ds_put_cstr(&ds, ", actions:"); format_odp_actions(&ds, actions, actions_len); VLOG_DBG_RL(&upcall_rl, "%s", ds_cstr(&ds)); ofpbuf_uninit(&key_buf); ofpbuf_uninit(&mask_buf); ds_destroy(&ds); } return flow; } static int dpif_netdev_flow_put(struct dpif *dpif, const struct dpif_flow_put *put) { struct dp_netdev *dp = get_dp_netdev(dpif); struct dp_netdev_flow *netdev_flow; struct netdev_flow_key key; struct dp_netdev_pmd_thread *pmd; struct match match; ovs_u128 ufid; unsigned pmd_id = put->pmd_id == PMD_ID_NULL ? NON_PMD_CORE_ID : put->pmd_id; int error; error = dpif_netdev_flow_from_nlattrs(put->key, put->key_len, &match.flow); if (error) { return error; } error = dpif_netdev_mask_from_nlattrs(put->key, put->key_len, put->mask, put->mask_len, &match.flow, &match.wc); if (error) { return error; } pmd = dp_netdev_get_pmd(dp, pmd_id); if (!pmd) { return EINVAL; } /* Must produce a netdev_flow_key for lookup. * This interface is no longer performance critical, since it is not used * for upcall processing any more. */ netdev_flow_key_from_flow(&key, &match.flow); if (put->ufid) { ufid = *put->ufid; } else { dpif_flow_hash(dpif, &match.flow, sizeof match.flow, &ufid); } ovs_mutex_lock(&pmd->flow_mutex); netdev_flow = dp_netdev_pmd_lookup_flow(pmd, &key, NULL); if (!netdev_flow) { if (put->flags & DPIF_FP_CREATE) { if (cmap_count(&pmd->flow_table) < MAX_FLOWS) { if (put->stats) { memset(put->stats, 0, sizeof *put->stats); } dp_netdev_flow_add(pmd, &match, &ufid, put->actions, put->actions_len); error = 0; } else { error = EFBIG; } } else { error = ENOENT; } } else { if (put->flags & DPIF_FP_MODIFY && flow_equal(&match.flow, &netdev_flow->flow)) { struct dp_netdev_actions *new_actions; struct dp_netdev_actions *old_actions; new_actions = dp_netdev_actions_create(put->actions, put->actions_len); old_actions = dp_netdev_flow_get_actions(netdev_flow); ovsrcu_set(&netdev_flow->actions, new_actions); if (put->stats) { get_dpif_flow_stats(netdev_flow, put->stats); } if (put->flags & DPIF_FP_ZERO_STATS) { /* XXX: The userspace datapath uses thread local statistics * (for flows), which should be updated only by the owning * thread. Since we cannot write on stats memory here, * we choose not to support this flag. Please note: * - This feature is currently used only by dpctl commands with * option --clear. * - Should the need arise, this operation can be implemented * by keeping a base value (to be update here) for each * counter, and subtracting it before outputting the stats */ error = EOPNOTSUPP; } ovsrcu_postpone(dp_netdev_actions_free, old_actions); } else if (put->flags & DPIF_FP_CREATE) { error = EEXIST; } else { /* Overlapping flow. */ error = EINVAL; } } ovs_mutex_unlock(&pmd->flow_mutex); dp_netdev_pmd_unref(pmd); return error; } static int dpif_netdev_flow_del(struct dpif *dpif, const struct dpif_flow_del *del) { struct dp_netdev *dp = get_dp_netdev(dpif); struct dp_netdev_flow *netdev_flow; struct dp_netdev_pmd_thread *pmd; unsigned pmd_id = del->pmd_id == PMD_ID_NULL ? NON_PMD_CORE_ID : del->pmd_id; int error = 0; pmd = dp_netdev_get_pmd(dp, pmd_id); if (!pmd) { return EINVAL; } ovs_mutex_lock(&pmd->flow_mutex); netdev_flow = dp_netdev_pmd_find_flow(pmd, del->ufid, del->key, del->key_len); if (netdev_flow) { if (del->stats) { get_dpif_flow_stats(netdev_flow, del->stats); } dp_netdev_pmd_remove_flow(pmd, netdev_flow); } else { error = ENOENT; } ovs_mutex_unlock(&pmd->flow_mutex); dp_netdev_pmd_unref(pmd); return error; } struct dpif_netdev_flow_dump { struct dpif_flow_dump up; struct cmap_position poll_thread_pos; struct cmap_position flow_pos; struct dp_netdev_pmd_thread *cur_pmd; int status; struct ovs_mutex mutex; }; static struct dpif_netdev_flow_dump * dpif_netdev_flow_dump_cast(struct dpif_flow_dump *dump) { return CONTAINER_OF(dump, struct dpif_netdev_flow_dump, up); } static struct dpif_flow_dump * dpif_netdev_flow_dump_create(const struct dpif *dpif_, bool terse) { struct dpif_netdev_flow_dump *dump; dump = xzalloc(sizeof *dump); dpif_flow_dump_init(&dump->up, dpif_); dump->up.terse = terse; ovs_mutex_init(&dump->mutex); return &dump->up; } static int dpif_netdev_flow_dump_destroy(struct dpif_flow_dump *dump_) { struct dpif_netdev_flow_dump *dump = dpif_netdev_flow_dump_cast(dump_); ovs_mutex_destroy(&dump->mutex); free(dump); return 0; } struct dpif_netdev_flow_dump_thread { struct dpif_flow_dump_thread up; struct dpif_netdev_flow_dump *dump; struct odputil_keybuf keybuf[FLOW_DUMP_MAX_BATCH]; struct odputil_keybuf maskbuf[FLOW_DUMP_MAX_BATCH]; }; static struct dpif_netdev_flow_dump_thread * dpif_netdev_flow_dump_thread_cast(struct dpif_flow_dump_thread *thread) { return CONTAINER_OF(thread, struct dpif_netdev_flow_dump_thread, up); } static struct dpif_flow_dump_thread * dpif_netdev_flow_dump_thread_create(struct dpif_flow_dump *dump_) { struct dpif_netdev_flow_dump *dump = dpif_netdev_flow_dump_cast(dump_); struct dpif_netdev_flow_dump_thread *thread; thread = xmalloc(sizeof *thread); dpif_flow_dump_thread_init(&thread->up, &dump->up); thread->dump = dump; return &thread->up; } static void dpif_netdev_flow_dump_thread_destroy(struct dpif_flow_dump_thread *thread_) { struct dpif_netdev_flow_dump_thread *thread = dpif_netdev_flow_dump_thread_cast(thread_); free(thread); } static int dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread *thread_, struct dpif_flow *flows, int max_flows) { struct dpif_netdev_flow_dump_thread *thread = dpif_netdev_flow_dump_thread_cast(thread_); struct dpif_netdev_flow_dump *dump = thread->dump; struct dp_netdev_flow *netdev_flows[FLOW_DUMP_MAX_BATCH]; int n_flows = 0; int i; ovs_mutex_lock(&dump->mutex); if (!dump->status) { struct dpif_netdev *dpif = dpif_netdev_cast(thread->up.dpif); struct dp_netdev *dp = get_dp_netdev(&dpif->dpif); struct dp_netdev_pmd_thread *pmd = dump->cur_pmd; int flow_limit = MIN(max_flows, FLOW_DUMP_MAX_BATCH); /* First call to dump_next(), extracts the first pmd thread. * If there is no pmd thread, returns immediately. */ if (!pmd) { pmd = dp_netdev_pmd_get_next(dp, &dump->poll_thread_pos); if (!pmd) { ovs_mutex_unlock(&dump->mutex); return n_flows; } } do { for (n_flows = 0; n_flows < flow_limit; n_flows++) { struct cmap_node *node; node = cmap_next_position(&pmd->flow_table, &dump->flow_pos); if (!node) { break; } netdev_flows[n_flows] = CONTAINER_OF(node, struct dp_netdev_flow, node); } /* When finishing dumping the current pmd thread, moves to * the next. */ if (n_flows < flow_limit) { memset(&dump->flow_pos, 0, sizeof dump->flow_pos); dp_netdev_pmd_unref(pmd); pmd = dp_netdev_pmd_get_next(dp, &dump->poll_thread_pos); if (!pmd) { dump->status = EOF; break; } } /* Keeps the reference to next caller. */ dump->cur_pmd = pmd; /* If the current dump is empty, do not exit the loop, since the * remaining pmds could have flows to be dumped. Just dumps again * on the new 'pmd'. */ } while (!n_flows); } ovs_mutex_unlock(&dump->mutex); for (i = 0; i < n_flows; i++) { struct odputil_keybuf *maskbuf = &thread->maskbuf[i]; struct odputil_keybuf *keybuf = &thread->keybuf[i]; struct dp_netdev_flow *netdev_flow = netdev_flows[i]; struct dpif_flow *f = &flows[i]; struct ofpbuf key, mask; ofpbuf_use_stack(&key, keybuf, sizeof *keybuf); ofpbuf_use_stack(&mask, maskbuf, sizeof *maskbuf); dp_netdev_flow_to_dpif_flow(netdev_flow, &key, &mask, f, dump->up.terse); } return n_flows; } static int dpif_netdev_execute(struct dpif *dpif, struct dpif_execute *execute) OVS_NO_THREAD_SAFETY_ANALYSIS { struct dp_netdev *dp = get_dp_netdev(dpif); struct dp_netdev_pmd_thread *pmd; struct dp_packet_batch pp; if (dp_packet_size(execute->packet) < ETH_HEADER_LEN || dp_packet_size(execute->packet) > UINT16_MAX) { return EINVAL; } /* Tries finding the 'pmd'. If NULL is returned, that means * the current thread is a non-pmd thread and should use * dp_netdev_get_pmd(dp, NON_PMD_CORE_ID). */ pmd = ovsthread_getspecific(dp->per_pmd_key); if (!pmd) { pmd = dp_netdev_get_pmd(dp, NON_PMD_CORE_ID); if (!pmd) { return EBUSY; } } /* If the current thread is non-pmd thread, acquires * the 'non_pmd_mutex'. */ if (pmd->core_id == NON_PMD_CORE_ID) { ovs_mutex_lock(&dp->non_pmd_mutex); } /* The action processing expects the RSS hash to be valid, because * it's always initialized at the beginning of datapath processing. * In this case, though, 'execute->packet' may not have gone through * the datapath at all, it may have been generated by the upper layer * (OpenFlow packet-out, BFD frame, ...). */ if (!dp_packet_rss_valid(execute->packet)) { dp_packet_set_rss_hash(execute->packet, flow_hash_5tuple(execute->flow, 0)); } packet_batch_init_packet(&pp, execute->packet); dp_netdev_execute_actions(pmd, &pp, false, execute->flow, execute->actions, execute->actions_len, time_msec()); if (pmd->core_id == NON_PMD_CORE_ID) { ovs_mutex_unlock(&dp->non_pmd_mutex); dp_netdev_pmd_unref(pmd); } return 0; } static void dpif_netdev_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops) { size_t i; for (i = 0; i < n_ops; i++) { struct dpif_op *op = ops[i]; switch (op->type) { case DPIF_OP_FLOW_PUT: op->error = dpif_netdev_flow_put(dpif, &op->u.flow_put); break; case DPIF_OP_FLOW_DEL: op->error = dpif_netdev_flow_del(dpif, &op->u.flow_del); break; case DPIF_OP_EXECUTE: op->error = dpif_netdev_execute(dpif, &op->u.execute); break; case DPIF_OP_FLOW_GET: op->error = dpif_netdev_flow_get(dpif, &op->u.flow_get); break; } } } /* Changes the number or the affinity of pmd threads. The changes are actually * applied in dpif_netdev_run(). */ static int dpif_netdev_pmd_set(struct dpif *dpif, const char *cmask) { struct dp_netdev *dp = get_dp_netdev(dpif); if (!nullable_string_is_equal(dp->pmd_cmask, cmask)) { free(dp->pmd_cmask); dp->pmd_cmask = nullable_xstrdup(cmask); dp_netdev_request_reconfigure(dp); } return 0; } /* Parses affinity list and returns result in 'core_ids'. */ static int parse_affinity_list(const char *affinity_list, unsigned *core_ids, int n_rxq) { unsigned i; char *list, *copy, *key, *value; int error = 0; for (i = 0; i < n_rxq; i++) { core_ids[i] = -1; } if (!affinity_list) { return 0; } list = copy = xstrdup(affinity_list); while (ofputil_parse_key_value(&list, &key, &value)) { int rxq_id, core_id; if (!str_to_int(key, 0, &rxq_id) || rxq_id < 0 || !str_to_int(value, 0, &core_id) || core_id < 0) { error = EINVAL; break; } if (rxq_id < n_rxq) { core_ids[rxq_id] = core_id; } } free(copy); return error; } /* Parses 'affinity_list' and applies configuration if it is valid. */ static int dpif_netdev_port_set_rxq_affinity(struct dp_netdev_port *port, const char *affinity_list) { unsigned *core_ids, i; int error = 0; core_ids = xmalloc(port->n_rxq * sizeof *core_ids); if (parse_affinity_list(affinity_list, core_ids, port->n_rxq)) { error = EINVAL; goto exit; } for (i = 0; i < port->n_rxq; i++) { port->rxqs[i].core_id = core_ids[i]; } exit: free(core_ids); return error; } /* Changes the affinity of port's rx queues. The changes are actually applied * in dpif_netdev_run(). */ static int dpif_netdev_port_set_config(struct dpif *dpif, odp_port_t port_no, const struct smap *cfg) { struct dp_netdev *dp = get_dp_netdev(dpif); struct dp_netdev_port *port; int error = 0; const char *affinity_list = smap_get(cfg, "pmd-rxq-affinity"); ovs_mutex_lock(&dp->port_mutex); error = get_port_by_number(dp, port_no, &port); if (error || !netdev_is_pmd(port->netdev) || nullable_string_is_equal(affinity_list, port->rxq_affinity_list)) { goto unlock; } error = dpif_netdev_port_set_rxq_affinity(port, affinity_list); if (error) { goto unlock; } free(port->rxq_affinity_list); port->rxq_affinity_list = nullable_xstrdup(affinity_list); dp_netdev_request_reconfigure(dp); unlock: ovs_mutex_unlock(&dp->port_mutex); return error; } static int dpif_netdev_queue_to_priority(const struct dpif *dpif OVS_UNUSED, uint32_t queue_id, uint32_t *priority) { *priority = queue_id; return 0; } /* Creates and returns a new 'struct dp_netdev_actions', whose actions are * a copy of the 'ofpacts_len' bytes of 'ofpacts'. */ struct dp_netdev_actions * dp_netdev_actions_create(const struct nlattr *actions, size_t size) { struct dp_netdev_actions *netdev_actions; netdev_actions = xmalloc(sizeof *netdev_actions + size); memcpy(netdev_actions->actions, actions, size); netdev_actions->size = size; return netdev_actions; } struct dp_netdev_actions * dp_netdev_flow_get_actions(const struct dp_netdev_flow *flow) { return ovsrcu_get(struct dp_netdev_actions *, &flow->actions); } static void dp_netdev_actions_free(struct dp_netdev_actions *actions) { free(actions); } static inline unsigned long long cycles_counter(void) { #ifdef DPDK_NETDEV return rte_get_tsc_cycles(); #else return 0; #endif } /* Fake mutex to make sure that the calls to cycles_count_* are balanced */ extern struct ovs_mutex cycles_counter_fake_mutex; /* Start counting cycles. Must be followed by 'cycles_count_end()' */ static inline void cycles_count_start(struct dp_netdev_pmd_thread *pmd) OVS_ACQUIRES(&cycles_counter_fake_mutex) OVS_NO_THREAD_SAFETY_ANALYSIS { pmd->last_cycles = cycles_counter(); } /* Stop counting cycles and add them to the counter 'type' */ static inline void cycles_count_end(struct dp_netdev_pmd_thread *pmd, enum pmd_cycles_counter_type type) OVS_RELEASES(&cycles_counter_fake_mutex) OVS_NO_THREAD_SAFETY_ANALYSIS { unsigned long long interval = cycles_counter() - pmd->last_cycles; non_atomic_ullong_add(&pmd->cycles.n[type], interval); } static void dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread *pmd, struct dp_netdev_port *port, struct netdev_rxq *rxq) { struct dp_packet_batch batch; int error; dp_packet_batch_init(&batch); cycles_count_start(pmd); error = netdev_rxq_recv(rxq, &batch); cycles_count_end(pmd, PMD_CYCLES_POLLING); if (!error) { *recirc_depth_get() = 0; cycles_count_start(pmd); dp_netdev_input(pmd, &batch, port->port_no); cycles_count_end(pmd, PMD_CYCLES_PROCESSING); } else if (error != EAGAIN && error != EOPNOTSUPP) { static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); VLOG_ERR_RL(&rl, "error receiving data from %s: %s", netdev_get_name(port->netdev), ovs_strerror(error)); } } static int port_reconfigure(struct dp_netdev_port *port) { struct netdev *netdev = port->netdev; int i, err; if (!netdev_is_reconf_required(netdev)) { return 0; } /* Closes the existing 'rxq's. */ for (i = 0; i < port->n_rxq; i++) { netdev_rxq_close(port->rxqs[i].rxq); port->rxqs[i].rxq = NULL; } port->n_rxq = 0; /* Allows 'netdev' to apply the pending configuration changes. */ err = netdev_reconfigure(netdev); if (err && (err != EOPNOTSUPP)) { VLOG_ERR("Failed to set interface %s new configuration", netdev_get_name(netdev)); return err; } /* If the netdev_reconfigure() above succeeds, reopens the 'rxq's. */ port->rxqs = xrealloc(port->rxqs, sizeof *port->rxqs * netdev_n_rxq(netdev)); /* Realloc 'used' counters for tx queues. */ free(port->txq_used); port->txq_used = xcalloc(netdev_n_txq(netdev), sizeof *port->txq_used); for (i = 0; i < netdev_n_rxq(netdev); i++) { err = netdev_rxq_open(netdev, &port->rxqs[i].rxq, i); if (err) { return err; } port->n_rxq++; } /* Parse affinity list to apply configuration for new queues. */ dpif_netdev_port_set_rxq_affinity(port, port->rxq_affinity_list); return 0; } static void reconfigure_pmd_threads(struct dp_netdev *dp) OVS_REQUIRES(dp->port_mutex) { struct dp_netdev_port *port, *next; int n_cores; dp->last_reconfigure_seq = seq_read(dp->reconfigure_seq); dp_netdev_destroy_all_pmds(dp); /* Reconfigures the cpu mask. */ ovs_numa_set_cpu_mask(dp->pmd_cmask); n_cores = ovs_numa_get_n_cores(); if (n_cores == OVS_CORE_UNSPEC) { VLOG_ERR("Cannot get cpu core info"); return; } HMAP_FOR_EACH_SAFE (port, next, node, &dp->ports) { int err; err = port_reconfigure(port); if (err) { hmap_remove(&dp->ports, &port->node); seq_change(dp->port_seq); port_destroy(port); } else { port->dynamic_txqs = netdev_n_txq(port->netdev) < n_cores + 1; } } /* Restores the non-pmd. */ dp_netdev_set_nonpmd(dp); /* Restores all pmd threads. */ dp_netdev_reset_pmd_threads(dp); } /* Returns true if one of the netdevs in 'dp' requires a reconfiguration */ static bool ports_require_restart(const struct dp_netdev *dp) OVS_REQUIRES(dp->port_mutex) { struct dp_netdev_port *port; HMAP_FOR_EACH (port, node, &dp->ports) { if (netdev_is_reconf_required(port->netdev)) { return true; } } return false; } /* Return true if needs to revalidate datapath flows. */ static bool dpif_netdev_run(struct dpif *dpif) { struct dp_netdev_port *port; struct dp_netdev *dp = get_dp_netdev(dpif); struct dp_netdev_pmd_thread *non_pmd; uint64_t new_tnl_seq; ovs_mutex_lock(&dp->port_mutex); non_pmd = dp_netdev_get_pmd(dp, NON_PMD_CORE_ID); if (non_pmd) { ovs_mutex_lock(&dp->non_pmd_mutex); HMAP_FOR_EACH (port, node, &dp->ports) { if (!netdev_is_pmd(port->netdev)) { int i; for (i = 0; i < port->n_rxq; i++) { dp_netdev_process_rxq_port(non_pmd, port, port->rxqs[i].rxq); } } } dpif_netdev_xps_revalidate_pmd(non_pmd, time_msec(), false); ovs_mutex_unlock(&dp->non_pmd_mutex); dp_netdev_pmd_unref(non_pmd); } if (dp_netdev_is_reconf_required(dp) || ports_require_restart(dp)) { reconfigure_pmd_threads(dp); } ovs_mutex_unlock(&dp->port_mutex); tnl_neigh_cache_run(); tnl_port_map_run(); new_tnl_seq = seq_read(tnl_conf_seq); if (dp->last_tnl_conf_seq != new_tnl_seq) { dp->last_tnl_conf_seq = new_tnl_seq; return true; } return false; } static void dpif_netdev_wait(struct dpif *dpif) { struct dp_netdev_port *port; struct dp_netdev *dp = get_dp_netdev(dpif); ovs_mutex_lock(&dp_netdev_mutex); ovs_mutex_lock(&dp->port_mutex); HMAP_FOR_EACH (port, node, &dp->ports) { netdev_wait_reconf_required(port->netdev); if (!netdev_is_pmd(port->netdev)) { int i; for (i = 0; i < port->n_rxq; i++) { netdev_rxq_wait(port->rxqs[i].rxq); } } } ovs_mutex_unlock(&dp->port_mutex); ovs_mutex_unlock(&dp_netdev_mutex); seq_wait(tnl_conf_seq, dp->last_tnl_conf_seq); } static void pmd_free_cached_ports(struct dp_netdev_pmd_thread *pmd) { struct tx_port *tx_port_cached; /* Free all used tx queue ids. */ dpif_netdev_xps_revalidate_pmd(pmd, 0, true); HMAP_FOR_EACH_POP (tx_port_cached, node, &pmd->port_cache) { free(tx_port_cached); } } /* Copies ports from 'pmd->tx_ports' (shared with the main thread) to * 'pmd->port_cache' (thread local) */ static void pmd_load_cached_ports(struct dp_netdev_pmd_thread *pmd) OVS_REQUIRES(pmd->port_mutex) { struct tx_port *tx_port, *tx_port_cached; pmd_free_cached_ports(pmd); hmap_shrink(&pmd->port_cache); HMAP_FOR_EACH (tx_port, node, &pmd->tx_ports) { tx_port_cached = xmemdup(tx_port, sizeof *tx_port_cached); hmap_insert(&pmd->port_cache, &tx_port_cached->node, hash_port_no(tx_port_cached->port->port_no)); } } static int pmd_load_queues_and_ports(struct dp_netdev_pmd_thread *pmd, struct rxq_poll **ppoll_list) { struct rxq_poll *poll_list = *ppoll_list; struct rxq_poll *poll; int i; ovs_mutex_lock(&pmd->port_mutex); poll_list = xrealloc(poll_list, pmd->poll_cnt * sizeof *poll_list); i = 0; LIST_FOR_EACH (poll, node, &pmd->poll_list) { poll_list[i++] = *poll; } pmd_load_cached_ports(pmd); ovs_mutex_unlock(&pmd->port_mutex); *ppoll_list = poll_list; return i; } static void * pmd_thread_main(void *f_) { struct dp_netdev_pmd_thread *pmd = f_; unsigned int lc = 0; struct rxq_poll *poll_list; unsigned int port_seq = PMD_INITIAL_SEQ; bool exiting; int poll_cnt; int i; poll_list = NULL; /* Stores the pmd thread's 'pmd' to 'per_pmd_key'. */ ovsthread_setspecific(pmd->dp->per_pmd_key, pmd); ovs_numa_thread_setaffinity_core(pmd->core_id); dpdk_set_lcore_id(pmd->core_id); poll_cnt = pmd_load_queues_and_ports(pmd, &poll_list); reload: emc_cache_init(&pmd->flow_cache); /* List port/core affinity */ for (i = 0; i < poll_cnt; i++) { VLOG_DBG("Core %d processing port \'%s\' with queue-id %d\n", pmd->core_id, netdev_get_name(poll_list[i].port->netdev), netdev_rxq_get_queue_id(poll_list[i].rx)); } for (;;) { for (i = 0; i < poll_cnt; i++) { dp_netdev_process_rxq_port(pmd, poll_list[i].port, poll_list[i].rx); } if (lc++ > 1024) { unsigned int seq; lc = 0; coverage_try_clear(); dp_netdev_pmd_try_optimize(pmd); if (!ovsrcu_try_quiesce()) { emc_cache_slow_sweep(&pmd->flow_cache); } atomic_read_relaxed(&pmd->change_seq, &seq); if (seq != port_seq) { port_seq = seq; break; } } } poll_cnt = pmd_load_queues_and_ports(pmd, &poll_list); exiting = latch_is_set(&pmd->exit_latch); /* Signal here to make sure the pmd finishes * reloading the updated configuration. */ dp_netdev_pmd_reload_done(pmd); emc_cache_uninit(&pmd->flow_cache); if (!exiting) { goto reload; } free(poll_list); pmd_free_cached_ports(pmd); return NULL; } static void dp_netdev_disable_upcall(struct dp_netdev *dp) OVS_ACQUIRES(dp->upcall_rwlock) { fat_rwlock_wrlock(&dp->upcall_rwlock); } static void dpif_netdev_disable_upcall(struct dpif *dpif) OVS_NO_THREAD_SAFETY_ANALYSIS { struct dp_netdev *dp = get_dp_netdev(dpif); dp_netdev_disable_upcall(dp); } static void dp_netdev_enable_upcall(struct dp_netdev *dp) OVS_RELEASES(dp->upcall_rwlock) { fat_rwlock_unlock(&dp->upcall_rwlock); } static void dpif_netdev_enable_upcall(struct dpif *dpif) OVS_NO_THREAD_SAFETY_ANALYSIS { struct dp_netdev *dp = get_dp_netdev(dpif); dp_netdev_enable_upcall(dp); } static void dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread *pmd) { ovs_mutex_lock(&pmd->cond_mutex); xpthread_cond_signal(&pmd->cond); ovs_mutex_unlock(&pmd->cond_mutex); } /* Finds and refs the dp_netdev_pmd_thread on core 'core_id'. Returns * the pointer if succeeds, otherwise, NULL (it can return NULL even if * 'core_id' is NON_PMD_CORE_ID). * * Caller must unrefs the returned reference. */ static struct dp_netdev_pmd_thread * dp_netdev_get_pmd(struct dp_netdev *dp, unsigned core_id) { struct dp_netdev_pmd_thread *pmd; const struct cmap_node *pnode; pnode = cmap_find(&dp->poll_threads, hash_int(core_id, 0)); if (!pnode) { return NULL; } pmd = CONTAINER_OF(pnode, struct dp_netdev_pmd_thread, node); return dp_netdev_pmd_try_ref(pmd) ? pmd : NULL; } /* Sets the 'struct dp_netdev_pmd_thread' for non-pmd threads. */ static void dp_netdev_set_nonpmd(struct dp_netdev *dp) OVS_REQUIRES(dp->port_mutex) { struct dp_netdev_pmd_thread *non_pmd; struct dp_netdev_port *port; non_pmd = xzalloc(sizeof *non_pmd); dp_netdev_configure_pmd(non_pmd, dp, NON_PMD_CORE_ID, OVS_NUMA_UNSPEC); HMAP_FOR_EACH (port, node, &dp->ports) { dp_netdev_add_port_tx_to_pmd(non_pmd, port); } dp_netdev_reload_pmd__(non_pmd); } /* Caller must have valid pointer to 'pmd'. */ static bool dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread *pmd) { return ovs_refcount_try_ref_rcu(&pmd->ref_cnt); } static void dp_netdev_pmd_unref(struct dp_netdev_pmd_thread *pmd) { if (pmd && ovs_refcount_unref(&pmd->ref_cnt) == 1) { ovsrcu_postpone(dp_netdev_destroy_pmd, pmd); } } /* Given cmap position 'pos', tries to ref the next node. If try_ref() * fails, keeps checking for next node until reaching the end of cmap. * * Caller must unrefs the returned reference. */ static struct dp_netdev_pmd_thread * dp_netdev_pmd_get_next(struct dp_netdev *dp, struct cmap_position *pos) { struct dp_netdev_pmd_thread *next; do { struct cmap_node *node; node = cmap_next_position(&dp->poll_threads, pos); next = node ? CONTAINER_OF(node, struct dp_netdev_pmd_thread, node) : NULL; } while (next && !dp_netdev_pmd_try_ref(next)); return next; } /* Configures the 'pmd' based on the input argument. */ static void dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev *dp, unsigned core_id, int numa_id) { pmd->dp = dp; pmd->core_id = core_id; pmd->numa_id = numa_id; pmd->poll_cnt = 0; atomic_init(&pmd->static_tx_qid, (core_id == NON_PMD_CORE_ID) ? ovs_numa_get_n_cores() : get_n_pmd_threads(dp)); ovs_refcount_init(&pmd->ref_cnt); latch_init(&pmd->exit_latch); atomic_init(&pmd->change_seq, PMD_INITIAL_SEQ); xpthread_cond_init(&pmd->cond, NULL); ovs_mutex_init(&pmd->cond_mutex); ovs_mutex_init(&pmd->flow_mutex); ovs_mutex_init(&pmd->port_mutex); cmap_init(&pmd->flow_table); cmap_init(&pmd->classifiers); pmd->next_optimization = time_msec() + DPCLS_OPTIMIZATION_INTERVAL; ovs_list_init(&pmd->poll_list); hmap_init(&pmd->tx_ports); hmap_init(&pmd->port_cache); /* init the 'flow_cache' since there is no * actual thread created for NON_PMD_CORE_ID. */ if (core_id == NON_PMD_CORE_ID) { emc_cache_init(&pmd->flow_cache); } cmap_insert(&dp->poll_threads, CONST_CAST(struct cmap_node *, &pmd->node), hash_int(core_id, 0)); } static void dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread *pmd) { struct dpcls *cls; dp_netdev_pmd_flow_flush(pmd); hmap_destroy(&pmd->port_cache); hmap_destroy(&pmd->tx_ports); /* All flows (including their dpcls_rules) have been deleted already */ CMAP_FOR_EACH (cls, node, &pmd->classifiers) { dpcls_destroy(cls); } cmap_destroy(&pmd->classifiers); cmap_destroy(&pmd->flow_table); ovs_mutex_destroy(&pmd->flow_mutex); latch_destroy(&pmd->exit_latch); xpthread_cond_destroy(&pmd->cond); ovs_mutex_destroy(&pmd->cond_mutex); ovs_mutex_destroy(&pmd->port_mutex); free(pmd); } /* Stops the pmd thread, removes it from the 'dp->poll_threads', * and unrefs the struct. */ static void dp_netdev_del_pmd(struct dp_netdev *dp, struct dp_netdev_pmd_thread *pmd) { /* NON_PMD_CORE_ID doesn't have a thread, so we don't have to synchronize, * but extra cleanup is necessary */ if (pmd->core_id == NON_PMD_CORE_ID) { emc_cache_uninit(&pmd->flow_cache); pmd_free_cached_ports(pmd); } else { latch_set(&pmd->exit_latch); dp_netdev_reload_pmd__(pmd); ovs_numa_unpin_core(pmd->core_id); xpthread_join(pmd->thread, NULL); } dp_netdev_pmd_clear_ports(pmd); /* Purges the 'pmd''s flows after stopping the thread, but before * destroying the flows, so that the flow stats can be collected. */ if (dp->dp_purge_cb) { dp->dp_purge_cb(dp->dp_purge_aux, pmd->core_id); } cmap_remove(&pmd->dp->poll_threads, &pmd->node, hash_int(pmd->core_id, 0)); dp_netdev_pmd_unref(pmd); } /* Destroys all pmd threads. */ static void dp_netdev_destroy_all_pmds(struct dp_netdev *dp) { struct dp_netdev_pmd_thread *pmd; struct dp_netdev_pmd_thread **pmd_list; size_t k = 0, n_pmds; n_pmds = cmap_count(&dp->poll_threads); pmd_list = xcalloc(n_pmds, sizeof *pmd_list); CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { /* We cannot call dp_netdev_del_pmd(), since it alters * 'dp->poll_threads' (while we're iterating it) and it * might quiesce. */ ovs_assert(k < n_pmds); pmd_list[k++] = pmd; } for (size_t i = 0; i < k; i++) { dp_netdev_del_pmd(dp, pmd_list[i]); } free(pmd_list); } /* Deletes all pmd threads on numa node 'numa_id' and * fixes static_tx_qids of other threads to keep them sequential. */ static void dp_netdev_del_pmds_on_numa(struct dp_netdev *dp, int numa_id) { struct dp_netdev_pmd_thread *pmd; int n_pmds_on_numa, n_pmds; int *free_idx, k = 0; struct dp_netdev_pmd_thread **pmd_list; n_pmds_on_numa = get_n_pmd_threads_on_numa(dp, numa_id); free_idx = xcalloc(n_pmds_on_numa, sizeof *free_idx); pmd_list = xcalloc(n_pmds_on_numa, sizeof *pmd_list); CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { /* We cannot call dp_netdev_del_pmd(), since it alters * 'dp->poll_threads' (while we're iterating it) and it * might quiesce. */ if (pmd->numa_id == numa_id) { atomic_read_relaxed(&pmd->static_tx_qid, &free_idx[k]); pmd_list[k] = pmd; ovs_assert(k < n_pmds_on_numa); k++; } } for (int i = 0; i < k; i++) { dp_netdev_del_pmd(dp, pmd_list[i]); } n_pmds = get_n_pmd_threads(dp); CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { int old_tx_qid; atomic_read_relaxed(&pmd->static_tx_qid, &old_tx_qid); if (old_tx_qid >= n_pmds) { int new_tx_qid = free_idx[--k]; atomic_store_relaxed(&pmd->static_tx_qid, new_tx_qid); } } free(pmd_list); free(free_idx); } /* Deletes all rx queues from pmd->poll_list and all the ports from * pmd->tx_ports. */ static void dp_netdev_pmd_clear_ports(struct dp_netdev_pmd_thread *pmd) { struct rxq_poll *poll; struct tx_port *port; ovs_mutex_lock(&pmd->port_mutex); LIST_FOR_EACH_POP (poll, node, &pmd->poll_list) { free(poll); } pmd->poll_cnt = 0; HMAP_FOR_EACH_POP (port, node, &pmd->tx_ports) { free(port); } ovs_mutex_unlock(&pmd->port_mutex); } static struct tx_port * tx_port_lookup(const struct hmap *hmap, odp_port_t port_no) { struct tx_port *tx; HMAP_FOR_EACH_IN_BUCKET (tx, node, hash_port_no(port_no), hmap) { if (tx->port->port_no == port_no) { return tx; } } return NULL; } /* Deletes all rx queues of 'port' from 'poll_list', and the 'port' from * 'tx_ports' of 'pmd' thread. Returns true if 'port' was found in 'pmd' * (therefore a restart is required). */ static bool dp_netdev_del_port_from_pmd__(struct dp_netdev_port *port, struct dp_netdev_pmd_thread *pmd) { struct rxq_poll *poll, *next; struct tx_port *tx; bool found = false; ovs_mutex_lock(&pmd->port_mutex); LIST_FOR_EACH_SAFE (poll, next, node, &pmd->poll_list) { if (poll->port == port) { found = true; ovs_list_remove(&poll->node); pmd->poll_cnt--; free(poll); } } tx = tx_port_lookup(&pmd->tx_ports, port->port_no); if (tx) { hmap_remove(&pmd->tx_ports, &tx->node); free(tx); found = true; } ovs_mutex_unlock(&pmd->port_mutex); return found; } /* Deletes 'port' from the 'poll_list' and from the 'tx_ports' of all the pmd * threads. The pmd threads that need to be restarted are inserted in * 'to_reload'. */ static void dp_netdev_del_port_from_all_pmds__(struct dp_netdev *dp, struct dp_netdev_port *port, struct hmapx *to_reload) { struct dp_netdev_pmd_thread *pmd; CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { bool found; found = dp_netdev_del_port_from_pmd__(port, pmd); if (found) { hmapx_add(to_reload, pmd); } } } /* Deletes 'port' from the 'poll_list' and from the 'tx_ports' of all the pmd * threads. Reloads the threads if needed. */ static void dp_netdev_del_port_from_all_pmds(struct dp_netdev *dp, struct dp_netdev_port *port) { struct dp_netdev_pmd_thread *pmd; struct hmapx to_reload = HMAPX_INITIALIZER(&to_reload); struct hmapx_node *node; dp_netdev_del_port_from_all_pmds__(dp, port, &to_reload); HMAPX_FOR_EACH (node, &to_reload) { pmd = (struct dp_netdev_pmd_thread *) node->data; dp_netdev_reload_pmd__(pmd); } hmapx_destroy(&to_reload); } /* Returns non-isolated PMD thread from this numa node with fewer * rx queues to poll. Returns NULL if there is no non-isolated PMD threads * on this numa node. Can be called safely only by main thread. */ static struct dp_netdev_pmd_thread * dp_netdev_less_loaded_pmd_on_numa(struct dp_netdev *dp, int numa_id) { int min_cnt = -1; struct dp_netdev_pmd_thread *pmd, *res = NULL; CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { if (!pmd->isolated && pmd->numa_id == numa_id && (min_cnt > pmd->poll_cnt || res == NULL)) { min_cnt = pmd->poll_cnt; res = pmd; } } return res; } /* Adds rx queue to poll_list of PMD thread. */ static void dp_netdev_add_rxq_to_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev_port *port, struct netdev_rxq *rx) OVS_REQUIRES(pmd->port_mutex) { struct rxq_poll *poll = xmalloc(sizeof *poll); poll->port = port; poll->rx = rx; ovs_list_push_back(&pmd->poll_list, &poll->node); pmd->poll_cnt++; } /* Add 'port' to the tx port cache of 'pmd', which must be reloaded for the * changes to take effect. */ static void dp_netdev_add_port_tx_to_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev_port *port) { struct tx_port *tx = xzalloc(sizeof *tx); tx->port = port; tx->qid = -1; ovs_mutex_lock(&pmd->port_mutex); hmap_insert(&pmd->tx_ports, &tx->node, hash_port_no(tx->port->port_no)); ovs_mutex_unlock(&pmd->port_mutex); } /* Distribute all {pinned|non-pinned} rx queues of 'port' between PMD * threads in 'dp'. The pmd threads that need to be restarted are inserted * in 'to_reload'. PMD threads with pinned queues marked as isolated. */ static void dp_netdev_add_port_rx_to_pmds(struct dp_netdev *dp, struct dp_netdev_port *port, struct hmapx *to_reload, bool pinned) { int numa_id = netdev_get_numa_id(port->netdev); struct dp_netdev_pmd_thread *pmd; int i; if (!netdev_is_pmd(port->netdev)) { return; } for (i = 0; i < port->n_rxq; i++) { if (pinned) { if (port->rxqs[i].core_id == -1) { continue; } pmd = dp_netdev_get_pmd(dp, port->rxqs[i].core_id); if (!pmd) { VLOG_WARN("There is no PMD thread on core %d. " "Queue %d on port \'%s\' will not be polled.", port->rxqs[i].core_id, i, netdev_get_name(port->netdev)); continue; } pmd->isolated = true; dp_netdev_pmd_unref(pmd); } else { if (port->rxqs[i].core_id != -1) { continue; } pmd = dp_netdev_less_loaded_pmd_on_numa(dp, numa_id); if (!pmd) { VLOG_WARN("There's no available pmd thread on numa node %d", numa_id); break; } } ovs_mutex_lock(&pmd->port_mutex); dp_netdev_add_rxq_to_pmd(pmd, port, port->rxqs[i].rxq); ovs_mutex_unlock(&pmd->port_mutex); hmapx_add(to_reload, pmd); } } /* Distributes all non-pinned rx queues of 'port' between all PMD threads * in 'dp' and inserts 'port' in the PMD threads 'tx_ports'. The pmd threads * that need to be restarted are inserted in 'to_reload'. */ static void dp_netdev_add_port_to_pmds__(struct dp_netdev *dp, struct dp_netdev_port *port, struct hmapx *to_reload) { struct dp_netdev_pmd_thread *pmd; dp_netdev_add_port_rx_to_pmds(dp, port, to_reload, false); CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { dp_netdev_add_port_tx_to_pmd(pmd, port); hmapx_add(to_reload, pmd); } } /* Distributes all non-pinned rx queues of 'port' between all PMD threads * in 'dp', inserts 'port' in the PMD threads 'tx_ports' and reloads them, * if needed. */ static void dp_netdev_add_port_to_pmds(struct dp_netdev *dp, struct dp_netdev_port *port) { struct dp_netdev_pmd_thread *pmd; struct hmapx to_reload = HMAPX_INITIALIZER(&to_reload); struct hmapx_node *node; dp_netdev_add_port_to_pmds__(dp, port, &to_reload); HMAPX_FOR_EACH (node, &to_reload) { pmd = (struct dp_netdev_pmd_thread *) node->data; dp_netdev_reload_pmd__(pmd); } hmapx_destroy(&to_reload); } /* Starts pmd threads for the numa node 'numa_id', if not already started. * The function takes care of filling the threads tx port cache. */ static void dp_netdev_set_pmds_on_numa(struct dp_netdev *dp, int numa_id) OVS_REQUIRES(dp->port_mutex) { int n_pmds; if (!ovs_numa_numa_id_is_valid(numa_id)) { VLOG_WARN("Cannot create pmd threads due to numa id (%d) invalid", numa_id); return; } n_pmds = get_n_pmd_threads_on_numa(dp, numa_id); /* If there are already pmd threads created for the numa node * in which 'netdev' is on, do nothing. Else, creates the * pmd threads for the numa node. */ if (!n_pmds) { int can_have, n_unpinned, i; n_unpinned = ovs_numa_get_n_unpinned_cores_on_numa(numa_id); if (!n_unpinned) { VLOG_WARN("Cannot create pmd threads due to out of unpinned " "cores on numa node %d", numa_id); return; } /* If cpu mask is specified, uses all unpinned cores, otherwise * tries creating NR_PMD_THREADS pmd threads. */ can_have = dp->pmd_cmask ? n_unpinned : MIN(n_unpinned, NR_PMD_THREADS); for (i = 0; i < can_have; i++) { unsigned core_id = ovs_numa_get_unpinned_core_on_numa(numa_id); struct dp_netdev_pmd_thread *pmd = xzalloc(sizeof *pmd); struct dp_netdev_port *port; dp_netdev_configure_pmd(pmd, dp, core_id, numa_id); HMAP_FOR_EACH (port, node, &dp->ports) { dp_netdev_add_port_tx_to_pmd(pmd, port); } pmd->thread = ovs_thread_create("pmd", pmd_thread_main, pmd); } VLOG_INFO("Created %d pmd threads on numa node %d", can_have, numa_id); } } /* Called after pmd threads config change. Restarts pmd threads with * new configuration. */ static void dp_netdev_reset_pmd_threads(struct dp_netdev *dp) OVS_REQUIRES(dp->port_mutex) { struct hmapx to_reload = HMAPX_INITIALIZER(&to_reload); struct dp_netdev_pmd_thread *pmd; struct dp_netdev_port *port; struct hmapx_node *node; HMAP_FOR_EACH (port, node, &dp->ports) { if (netdev_is_pmd(port->netdev)) { int numa_id = netdev_get_numa_id(port->netdev); dp_netdev_set_pmds_on_numa(dp, numa_id); } /* Distribute only pinned rx queues first to mark threads as isolated */ dp_netdev_add_port_rx_to_pmds(dp, port, &to_reload, true); } /* Distribute remaining non-pinned rx queues to non-isolated PMD threads. */ HMAP_FOR_EACH (port, node, &dp->ports) { dp_netdev_add_port_rx_to_pmds(dp, port, &to_reload, false); } HMAPX_FOR_EACH (node, &to_reload) { pmd = (struct dp_netdev_pmd_thread *) node->data; dp_netdev_reload_pmd__(pmd); } hmapx_destroy(&to_reload); } static char * dpif_netdev_get_datapath_version(void) { return xstrdup(""); } static void dp_netdev_flow_used(struct dp_netdev_flow *netdev_flow, int cnt, int size, uint16_t tcp_flags, long long now) { uint16_t flags; atomic_store_relaxed(&netdev_flow->stats.used, now); non_atomic_ullong_add(&netdev_flow->stats.packet_count, cnt); non_atomic_ullong_add(&netdev_flow->stats.byte_count, size); atomic_read_relaxed(&netdev_flow->stats.tcp_flags, &flags); flags |= tcp_flags; atomic_store_relaxed(&netdev_flow->stats.tcp_flags, flags); } static void dp_netdev_count_packet(struct dp_netdev_pmd_thread *pmd, enum dp_stat_type type, int cnt) { non_atomic_ullong_add(&pmd->stats.n[type], cnt); } static int dp_netdev_upcall(struct dp_netdev_pmd_thread *pmd, struct dp_packet *packet_, struct flow *flow, struct flow_wildcards *wc, ovs_u128 *ufid, enum dpif_upcall_type type, const struct nlattr *userdata, struct ofpbuf *actions, struct ofpbuf *put_actions) { struct dp_netdev *dp = pmd->dp; if (OVS_UNLIKELY(!dp->upcall_cb)) { return ENODEV; } if (OVS_UNLIKELY(!VLOG_DROP_DBG(&upcall_rl))) { struct ds ds = DS_EMPTY_INITIALIZER; char *packet_str; struct ofpbuf key; struct odp_flow_key_parms odp_parms = { .flow = flow, .mask = &wc->masks, .support = dp_netdev_support, }; ofpbuf_init(&key, 0); odp_flow_key_from_flow(&odp_parms, &key); packet_str = ofp_packet_to_string(dp_packet_data(packet_), dp_packet_size(packet_)); odp_flow_key_format(key.data, key.size, &ds); VLOG_DBG("%s: %s upcall:\n%s\n%s", dp->name, dpif_upcall_type_to_string(type), ds_cstr(&ds), packet_str); ofpbuf_uninit(&key); free(packet_str); ds_destroy(&ds); } return dp->upcall_cb(packet_, flow, ufid, pmd->core_id, type, userdata, actions, wc, put_actions, dp->upcall_aux); } static inline uint32_t dpif_netdev_packet_get_rss_hash(struct dp_packet *packet, const struct miniflow *mf) { uint32_t hash, recirc_depth; if (OVS_LIKELY(dp_packet_rss_valid(packet))) { hash = dp_packet_get_rss_hash(packet); } else { hash = miniflow_hash_5tuple(mf, 0); dp_packet_set_rss_hash(packet, hash); } /* The RSS hash must account for the recirculation depth to avoid * collisions in the exact match cache */ recirc_depth = *recirc_depth_get_unsafe(); if (OVS_UNLIKELY(recirc_depth)) { hash = hash_finish(hash, recirc_depth); dp_packet_set_rss_hash(packet, hash); } return hash; } struct packet_batch_per_flow { unsigned int byte_count; uint16_t tcp_flags; struct dp_netdev_flow *flow; struct dp_packet_batch array; }; static inline void packet_batch_per_flow_update(struct packet_batch_per_flow *batch, struct dp_packet *packet, const struct miniflow *mf) { batch->byte_count += dp_packet_size(packet); batch->tcp_flags |= miniflow_get_tcp_flags(mf); batch->array.packets[batch->array.count++] = packet; } static inline void packet_batch_per_flow_init(struct packet_batch_per_flow *batch, struct dp_netdev_flow *flow) { flow->batch = batch; batch->flow = flow; dp_packet_batch_init(&batch->array); batch->byte_count = 0; batch->tcp_flags = 0; } static inline void packet_batch_per_flow_execute(struct packet_batch_per_flow *batch, struct dp_netdev_pmd_thread *pmd, long long now) { struct dp_netdev_actions *actions; struct dp_netdev_flow *flow = batch->flow; dp_netdev_flow_used(flow, batch->array.count, batch->byte_count, batch->tcp_flags, now); actions = dp_netdev_flow_get_actions(flow); dp_netdev_execute_actions(pmd, &batch->array, true, &flow->flow, actions->actions, actions->size, now); } static inline void dp_netdev_queue_batches(struct dp_packet *pkt, struct dp_netdev_flow *flow, const struct miniflow *mf, struct packet_batch_per_flow *batches, size_t *n_batches) { struct packet_batch_per_flow *batch = flow->batch; if (OVS_UNLIKELY(!batch)) { batch = &batches[(*n_batches)++]; packet_batch_per_flow_init(batch, flow); } packet_batch_per_flow_update(batch, pkt, mf); } /* Try to process all ('cnt') the 'packets' using only the exact match cache * 'pmd->flow_cache'. If a flow is not found for a packet 'packets[i]', the * miniflow is copied into 'keys' and the packet pointer is moved at the * beginning of the 'packets' array. * * The function returns the number of packets that needs to be processed in the * 'packets' array (they have been moved to the beginning of the vector). * * If 'md_is_valid' is false, the metadata in 'packets' is not valid and must be * initialized by this function using 'port_no'. */ static inline size_t emc_processing(struct dp_netdev_pmd_thread *pmd, struct dp_packet_batch *packets_, struct netdev_flow_key *keys, struct packet_batch_per_flow batches[], size_t *n_batches, bool md_is_valid, odp_port_t port_no) { struct emc_cache *flow_cache = &pmd->flow_cache; struct netdev_flow_key *key = &keys[0]; size_t i, n_missed = 0, n_dropped = 0; struct dp_packet **packets = packets_->packets; int cnt = packets_->count; for (i = 0; i < cnt; i++) { struct dp_netdev_flow *flow; struct dp_packet *packet = packets[i]; if (OVS_UNLIKELY(dp_packet_size(packet) < ETH_HEADER_LEN)) { dp_packet_delete(packet); n_dropped++; continue; } if (i != cnt - 1) { /* Prefetch next packet data and metadata. */ OVS_PREFETCH(dp_packet_data(packets[i+1])); pkt_metadata_prefetch_init(&packets[i+1]->md); } if (!md_is_valid) { pkt_metadata_init(&packet->md, port_no); } miniflow_extract(packet, &key->mf); key->len = 0; /* Not computed yet. */ key->hash = dpif_netdev_packet_get_rss_hash(packet, &key->mf); flow = emc_lookup(flow_cache, key); if (OVS_LIKELY(flow)) { dp_netdev_queue_batches(packet, flow, &key->mf, batches, n_batches); } else { /* Exact match cache missed. Group missed packets together at * the beginning of the 'packets' array. */ packets[n_missed] = packet; /* 'key[n_missed]' contains the key of the current packet and it * must be returned to the caller. The next key should be extracted * to 'keys[n_missed + 1]'. */ key = &keys[++n_missed]; } } dp_netdev_count_packet(pmd, DP_STAT_EXACT_HIT, cnt - n_dropped - n_missed); return n_missed; } static inline void handle_packet_upcall(struct dp_netdev_pmd_thread *pmd, struct dp_packet *packet, const struct netdev_flow_key *key, struct ofpbuf *actions, struct ofpbuf *put_actions, int *lost_cnt, long long now) { struct ofpbuf *add_actions; struct dp_packet_batch b; struct match match; ovs_u128 ufid; int error; match.tun_md.valid = false; miniflow_expand(&key->mf, &match.flow); ofpbuf_clear(actions); ofpbuf_clear(put_actions); dpif_flow_hash(pmd->dp->dpif, &match.flow, sizeof match.flow, &ufid); error = dp_netdev_upcall(pmd, packet, &match.flow, &match.wc, &ufid, DPIF_UC_MISS, NULL, actions, put_actions); if (OVS_UNLIKELY(error && error != ENOSPC)) { dp_packet_delete(packet); (*lost_cnt)++; return; } /* The Netlink encoding of datapath flow keys cannot express * wildcarding the presence of a VLAN tag. Instead, a missing VLAN * tag is interpreted as exact match on the fact that there is no * VLAN. Unless we refactor a lot of code that translates between * Netlink and struct flow representations, we have to do the same * here. */ if (!match.wc.masks.vlan_tci) { match.wc.masks.vlan_tci = htons(0xffff); } /* We can't allow the packet batching in the next loop to execute * the actions. Otherwise, if there are any slow path actions, * we'll send the packet up twice. */ packet_batch_init_packet(&b, packet); dp_netdev_execute_actions(pmd, &b, true, &match.flow, actions->data, actions->size, now); add_actions = put_actions->size ? put_actions : actions; if (OVS_LIKELY(error != ENOSPC)) { struct dp_netdev_flow *netdev_flow; /* XXX: There's a race window where a flow covering this packet * could have already been installed since we last did the flow * lookup before upcall. This could be solved by moving the * mutex lock outside the loop, but that's an awful long time * to be locking everyone out of making flow installs. If we * move to a per-core classifier, it would be reasonable. */ ovs_mutex_lock(&pmd->flow_mutex); netdev_flow = dp_netdev_pmd_lookup_flow(pmd, key, NULL); if (OVS_LIKELY(!netdev_flow)) { netdev_flow = dp_netdev_flow_add(pmd, &match, &ufid, add_actions->data, add_actions->size); } ovs_mutex_unlock(&pmd->flow_mutex); emc_insert(&pmd->flow_cache, key, netdev_flow); } } static inline void fast_path_processing(struct dp_netdev_pmd_thread *pmd, struct dp_packet_batch *packets_, struct netdev_flow_key *keys, struct packet_batch_per_flow batches[], size_t *n_batches, odp_port_t in_port, long long now) { int cnt = packets_->count; #if !defined(__CHECKER__) && !defined(_WIN32) const size_t PKT_ARRAY_SIZE = cnt; #else /* Sparse or MSVC doesn't like variable length array. */ enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST }; #endif struct dp_packet **packets = packets_->packets; struct dpcls *cls; struct dpcls_rule *rules[PKT_ARRAY_SIZE]; struct dp_netdev *dp = pmd->dp; struct emc_cache *flow_cache = &pmd->flow_cache; int miss_cnt = 0, lost_cnt = 0; int lookup_cnt = 0, add_lookup_cnt; bool any_miss; size_t i; for (i = 0; i < cnt; i++) { /* Key length is needed in all the cases, hash computed on demand. */ keys[i].len = netdev_flow_key_size(miniflow_n_values(&keys[i].mf)); } /* Get the classifier for the in_port */ cls = dp_netdev_pmd_lookup_dpcls(pmd, in_port); if (OVS_LIKELY(cls)) { any_miss = !dpcls_lookup(cls, keys, rules, cnt, &lookup_cnt); } else { any_miss = true; memset(rules, 0, sizeof(rules)); } if (OVS_UNLIKELY(any_miss) && !fat_rwlock_tryrdlock(&dp->upcall_rwlock)) { uint64_t actions_stub[512 / 8], slow_stub[512 / 8]; struct ofpbuf actions, put_actions; ofpbuf_use_stub(&actions, actions_stub, sizeof actions_stub); ofpbuf_use_stub(&put_actions, slow_stub, sizeof slow_stub); for (i = 0; i < cnt; i++) { struct dp_netdev_flow *netdev_flow; if (OVS_LIKELY(rules[i])) { continue; } /* It's possible that an earlier slow path execution installed * a rule covering this flow. In this case, it's a lot cheaper * to catch it here than execute a miss. */ netdev_flow = dp_netdev_pmd_lookup_flow(pmd, &keys[i], &add_lookup_cnt); if (netdev_flow) { lookup_cnt += add_lookup_cnt; rules[i] = &netdev_flow->cr; continue; } miss_cnt++; handle_packet_upcall(pmd, packets[i], &keys[i], &actions, &put_actions, &lost_cnt, now); } ofpbuf_uninit(&actions); ofpbuf_uninit(&put_actions); fat_rwlock_unlock(&dp->upcall_rwlock); dp_netdev_count_packet(pmd, DP_STAT_LOST, lost_cnt); } else if (OVS_UNLIKELY(any_miss)) { for (i = 0; i < cnt; i++) { if (OVS_UNLIKELY(!rules[i])) { dp_packet_delete(packets[i]); lost_cnt++; miss_cnt++; } } } for (i = 0; i < cnt; i++) { struct dp_packet *packet = packets[i]; struct dp_netdev_flow *flow; if (OVS_UNLIKELY(!rules[i])) { continue; } flow = dp_netdev_flow_cast(rules[i]); emc_insert(flow_cache, &keys[i], flow); dp_netdev_queue_batches(packet, flow, &keys[i].mf, batches, n_batches); } dp_netdev_count_packet(pmd, DP_STAT_MASKED_HIT, cnt - miss_cnt); dp_netdev_count_packet(pmd, DP_STAT_LOOKUP_HIT, lookup_cnt); dp_netdev_count_packet(pmd, DP_STAT_MISS, miss_cnt); dp_netdev_count_packet(pmd, DP_STAT_LOST, lost_cnt); } /* Packets enter the datapath from a port (or from recirculation) here. * * For performance reasons a caller may choose not to initialize the metadata * in 'packets': in this case 'mdinit' is false and this function needs to * initialize it using 'port_no'. If the metadata in 'packets' is already * valid, 'md_is_valid' must be true and 'port_no' will be ignored. */ static void dp_netdev_input__(struct dp_netdev_pmd_thread *pmd, struct dp_packet_batch *packets, bool md_is_valid, odp_port_t port_no) { int cnt = packets->count; #if !defined(__CHECKER__) && !defined(_WIN32) const size_t PKT_ARRAY_SIZE = cnt; #else /* Sparse or MSVC doesn't like variable length array. */ enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST }; #endif OVS_ALIGNED_VAR(CACHE_LINE_SIZE) struct netdev_flow_key keys[PKT_ARRAY_SIZE]; struct packet_batch_per_flow batches[PKT_ARRAY_SIZE]; long long now = time_msec(); size_t newcnt, n_batches, i; odp_port_t in_port; n_batches = 0; newcnt = emc_processing(pmd, packets, keys, batches, &n_batches, md_is_valid, port_no); if (OVS_UNLIKELY(newcnt)) { packets->count = newcnt; /* Get ingress port from first packet's metadata. */ in_port = packets->packets[0]->md.in_port.odp_port; fast_path_processing(pmd, packets, keys, batches, &n_batches, in_port, now); } /* All the flow batches need to be reset before any call to * packet_batch_per_flow_execute() as it could potentially trigger * recirculation. When a packet matching flow ‘j’ happens to be * recirculated, the nested call to dp_netdev_input__() could potentially * classify the packet as matching another flow - say 'k'. It could happen * that in the previous call to dp_netdev_input__() that same flow 'k' had * already its own batches[k] still waiting to be served. So if its * ‘batch’ member is not reset, the recirculated packet would be wrongly * appended to batches[k] of the 1st call to dp_netdev_input__(). */ for (i = 0; i < n_batches; i++) { batches[i].flow->batch = NULL; } for (i = 0; i < n_batches; i++) { packet_batch_per_flow_execute(&batches[i], pmd, now); } } static void dp_netdev_input(struct dp_netdev_pmd_thread *pmd, struct dp_packet_batch *packets, odp_port_t port_no) { dp_netdev_input__(pmd, packets, false, port_no); } static void dp_netdev_recirculate(struct dp_netdev_pmd_thread *pmd, struct dp_packet_batch *packets) { dp_netdev_input__(pmd, packets, true, 0); } struct dp_netdev_execute_aux { struct dp_netdev_pmd_thread *pmd; long long now; const struct flow *flow; }; static void dpif_netdev_register_dp_purge_cb(struct dpif *dpif, dp_purge_callback *cb, void *aux) { struct dp_netdev *dp = get_dp_netdev(dpif); dp->dp_purge_aux = aux; dp->dp_purge_cb = cb; } static void dpif_netdev_register_upcall_cb(struct dpif *dpif, upcall_callback *cb, void *aux) { struct dp_netdev *dp = get_dp_netdev(dpif); dp->upcall_aux = aux; dp->upcall_cb = cb; } static void dpif_netdev_xps_revalidate_pmd(const struct dp_netdev_pmd_thread *pmd, long long now, bool purge) { struct tx_port *tx; struct dp_netdev_port *port; long long interval; HMAP_FOR_EACH (tx, node, &pmd->port_cache) { if (!tx->port->dynamic_txqs) { continue; } interval = now - tx->last_used; if (tx->qid >= 0 && (purge || interval >= XPS_TIMEOUT_MS)) { port = tx->port; ovs_mutex_lock(&port->txq_used_mutex); port->txq_used[tx->qid]--; ovs_mutex_unlock(&port->txq_used_mutex); tx->qid = -1; } } } static int dpif_netdev_xps_get_tx_qid(const struct dp_netdev_pmd_thread *pmd, struct tx_port *tx, long long now) { struct dp_netdev_port *port; long long interval; int i, min_cnt, min_qid; if (OVS_UNLIKELY(!now)) { now = time_msec(); } interval = now - tx->last_used; tx->last_used = now; if (OVS_LIKELY(tx->qid >= 0 && interval < XPS_TIMEOUT_MS)) { return tx->qid; } port = tx->port; ovs_mutex_lock(&port->txq_used_mutex); if (tx->qid >= 0) { port->txq_used[tx->qid]--; tx->qid = -1; } min_cnt = -1; min_qid = 0; for (i = 0; i < netdev_n_txq(port->netdev); i++) { if (port->txq_used[i] < min_cnt || min_cnt == -1) { min_cnt = port->txq_used[i]; min_qid = i; } } port->txq_used[min_qid]++; tx->qid = min_qid; ovs_mutex_unlock(&port->txq_used_mutex); dpif_netdev_xps_revalidate_pmd(pmd, now, false); VLOG_DBG("Core %d: New TX queue ID %d for port \'%s\'.", pmd->core_id, tx->qid, netdev_get_name(tx->port->netdev)); return min_qid; } static struct tx_port * pmd_tx_port_cache_lookup(const struct dp_netdev_pmd_thread *pmd, odp_port_t port_no) { return tx_port_lookup(&pmd->port_cache, port_no); } static int push_tnl_action(const struct dp_netdev_pmd_thread *pmd, const struct nlattr *attr, struct dp_packet_batch *batch) { struct tx_port *tun_port; const struct ovs_action_push_tnl *data; int err; data = nl_attr_get(attr); tun_port = pmd_tx_port_cache_lookup(pmd, u32_to_odp(data->tnl_port)); if (!tun_port) { err = -EINVAL; goto error; } err = netdev_push_header(tun_port->port->netdev, batch, data); if (!err) { return 0; } error: dp_packet_delete_batch(batch, true); return err; } static void dp_execute_userspace_action(struct dp_netdev_pmd_thread *pmd, struct dp_packet *packet, bool may_steal, struct flow *flow, ovs_u128 *ufid, struct ofpbuf *actions, const struct nlattr *userdata, long long now) { struct dp_packet_batch b; int error; ofpbuf_clear(actions); error = dp_netdev_upcall(pmd, packet, flow, NULL, ufid, DPIF_UC_ACTION, userdata, actions, NULL); if (!error || error == ENOSPC) { packet_batch_init_packet(&b, packet); dp_netdev_execute_actions(pmd, &b, may_steal, flow, actions->data, actions->size, now); } else if (may_steal) { dp_packet_delete(packet); } } static void dp_execute_cb(void *aux_, struct dp_packet_batch *packets_, const struct nlattr *a, bool may_steal) { struct dp_netdev_execute_aux *aux = aux_; uint32_t *depth = recirc_depth_get(); struct dp_netdev_pmd_thread *pmd = aux->pmd; struct dp_netdev *dp = pmd->dp; int type = nl_attr_type(a); long long now = aux->now; struct tx_port *p; switch ((enum ovs_action_attr)type) { case OVS_ACTION_ATTR_OUTPUT: p = pmd_tx_port_cache_lookup(pmd, u32_to_odp(nl_attr_get_u32(a))); if (OVS_LIKELY(p)) { int tx_qid; bool dynamic_txqs; dynamic_txqs = p->port->dynamic_txqs; if (dynamic_txqs) { tx_qid = dpif_netdev_xps_get_tx_qid(pmd, p, now); } else { atomic_read_relaxed(&pmd->static_tx_qid, &tx_qid); } netdev_send(p->port->netdev, tx_qid, packets_, may_steal, dynamic_txqs); return; } break; case OVS_ACTION_ATTR_TUNNEL_PUSH: if (*depth < MAX_RECIRC_DEPTH) { struct dp_packet_batch tnl_pkt; struct dp_packet_batch *orig_packets_ = packets_; int err; if (!may_steal) { dp_packet_batch_clone(&tnl_pkt, packets_); packets_ = &tnl_pkt; dp_packet_batch_reset_cutlen(orig_packets_); } dp_packet_batch_apply_cutlen(packets_); err = push_tnl_action(pmd, a, packets_); if (!err) { (*depth)++; dp_netdev_recirculate(pmd, packets_); (*depth)--; } return; } break; case OVS_ACTION_ATTR_TUNNEL_POP: if (*depth < MAX_RECIRC_DEPTH) { struct dp_packet_batch *orig_packets_ = packets_; odp_port_t portno = u32_to_odp(nl_attr_get_u32(a)); p = pmd_tx_port_cache_lookup(pmd, portno); if (p) { struct dp_packet_batch tnl_pkt; int i; if (!may_steal) { dp_packet_batch_clone(&tnl_pkt, packets_); packets_ = &tnl_pkt; dp_packet_batch_reset_cutlen(orig_packets_); } dp_packet_batch_apply_cutlen(packets_); netdev_pop_header(p->port->netdev, packets_); if (!packets_->count) { return; } for (i = 0; i < packets_->count; i++) { packets_->packets[i]->md.in_port.odp_port = portno; } (*depth)++; dp_netdev_recirculate(pmd, packets_); (*depth)--; return; } } break; case OVS_ACTION_ATTR_USERSPACE: if (!fat_rwlock_tryrdlock(&dp->upcall_rwlock)) { struct dp_packet_batch *orig_packets_ = packets_; struct dp_packet **packets = packets_->packets; const struct nlattr *userdata; struct dp_packet_batch usr_pkt; struct ofpbuf actions; struct flow flow; ovs_u128 ufid; bool clone = false; int i; userdata = nl_attr_find_nested(a, OVS_USERSPACE_ATTR_USERDATA); ofpbuf_init(&actions, 0); if (packets_->trunc) { if (!may_steal) { dp_packet_batch_clone(&usr_pkt, packets_); packets_ = &usr_pkt; packets = packets_->packets; clone = true; dp_packet_batch_reset_cutlen(orig_packets_); } dp_packet_batch_apply_cutlen(packets_); } for (i = 0; i < packets_->count; i++) { flow_extract(packets[i], &flow); dpif_flow_hash(dp->dpif, &flow, sizeof flow, &ufid); dp_execute_userspace_action(pmd, packets[i], may_steal, &flow, &ufid, &actions, userdata, now); } if (clone) { dp_packet_delete_batch(packets_, true); } ofpbuf_uninit(&actions); fat_rwlock_unlock(&dp->upcall_rwlock); return; } break; case OVS_ACTION_ATTR_RECIRC: if (*depth < MAX_RECIRC_DEPTH) { struct dp_packet_batch recirc_pkts; int i; if (!may_steal) { dp_packet_batch_clone(&recirc_pkts, packets_); packets_ = &recirc_pkts; } for (i = 0; i < packets_->count; i++) { packets_->packets[i]->md.recirc_id = nl_attr_get_u32(a); } (*depth)++; dp_netdev_recirculate(pmd, packets_); (*depth)--; return; } VLOG_WARN("Packet dropped. Max recirculation depth exceeded."); break; case OVS_ACTION_ATTR_CT: { const struct nlattr *b; bool commit = false; unsigned int left; uint16_t zone = 0; const char *helper = NULL; const uint32_t *setmark = NULL; const struct ovs_key_ct_labels *setlabel = NULL; NL_ATTR_FOR_EACH_UNSAFE (b, left, nl_attr_get(a), nl_attr_get_size(a)) { enum ovs_ct_attr sub_type = nl_attr_type(b); switch(sub_type) { case OVS_CT_ATTR_COMMIT: commit = true; break; case OVS_CT_ATTR_ZONE: zone = nl_attr_get_u16(b); break; case OVS_CT_ATTR_HELPER: helper = nl_attr_get_string(b); break; case OVS_CT_ATTR_MARK: setmark = nl_attr_get(b); break; case OVS_CT_ATTR_LABELS: setlabel = nl_attr_get(b); break; case OVS_CT_ATTR_NAT: case OVS_CT_ATTR_UNSPEC: case __OVS_CT_ATTR_MAX: OVS_NOT_REACHED(); } } conntrack_execute(&dp->conntrack, packets_, aux->flow->dl_type, commit, zone, setmark, setlabel, helper); break; } case OVS_ACTION_ATTR_PUSH_VLAN: case OVS_ACTION_ATTR_POP_VLAN: case OVS_ACTION_ATTR_PUSH_MPLS: case OVS_ACTION_ATTR_POP_MPLS: case OVS_ACTION_ATTR_SET: case OVS_ACTION_ATTR_SET_MASKED: case OVS_ACTION_ATTR_SAMPLE: case OVS_ACTION_ATTR_HASH: case OVS_ACTION_ATTR_UNSPEC: case OVS_ACTION_ATTR_TRUNC: case __OVS_ACTION_ATTR_MAX: OVS_NOT_REACHED(); } dp_packet_delete_batch(packets_, may_steal); } static void dp_netdev_execute_actions(struct dp_netdev_pmd_thread *pmd, struct dp_packet_batch *packets, bool may_steal, const struct flow *flow, const struct nlattr *actions, size_t actions_len, long long now) { struct dp_netdev_execute_aux aux = { pmd, now, flow }; odp_execute_actions(&aux, packets, may_steal, actions, actions_len, dp_execute_cb); } struct dp_netdev_ct_dump { struct ct_dpif_dump_state up; struct conntrack_dump dump; struct conntrack *ct; struct dp_netdev *dp; }; static int dpif_netdev_ct_dump_start(struct dpif *dpif, struct ct_dpif_dump_state **dump_, const uint16_t *pzone) { struct dp_netdev *dp = get_dp_netdev(dpif); struct dp_netdev_ct_dump *dump; dump = xzalloc(sizeof *dump); dump->dp = dp; dump->ct = &dp->conntrack; conntrack_dump_start(&dp->conntrack, &dump->dump, pzone); *dump_ = &dump->up; return 0; } static int dpif_netdev_ct_dump_next(struct dpif *dpif OVS_UNUSED, struct ct_dpif_dump_state *dump_, struct ct_dpif_entry *entry) { struct dp_netdev_ct_dump *dump; INIT_CONTAINER(dump, dump_, up); return conntrack_dump_next(&dump->dump, entry); } static int dpif_netdev_ct_dump_done(struct dpif *dpif OVS_UNUSED, struct ct_dpif_dump_state *dump_) { struct dp_netdev_ct_dump *dump; int err; INIT_CONTAINER(dump, dump_, up); err = conntrack_dump_done(&dump->dump); free(dump); return err; } static int dpif_netdev_ct_flush(struct dpif *dpif, const uint16_t *zone) { struct dp_netdev *dp = get_dp_netdev(dpif); return conntrack_flush(&dp->conntrack, zone); } const struct dpif_class dpif_netdev_class = { "netdev", dpif_netdev_init, dpif_netdev_enumerate, dpif_netdev_port_open_type, dpif_netdev_open, dpif_netdev_close, dpif_netdev_destroy, dpif_netdev_run, dpif_netdev_wait, dpif_netdev_get_stats, dpif_netdev_port_add, dpif_netdev_port_del, dpif_netdev_port_set_config, dpif_netdev_port_query_by_number, dpif_netdev_port_query_by_name, NULL, /* port_get_pid */ dpif_netdev_port_dump_start, dpif_netdev_port_dump_next, dpif_netdev_port_dump_done, dpif_netdev_port_poll, dpif_netdev_port_poll_wait, dpif_netdev_flow_flush, dpif_netdev_flow_dump_create, dpif_netdev_flow_dump_destroy, dpif_netdev_flow_dump_thread_create, dpif_netdev_flow_dump_thread_destroy, dpif_netdev_flow_dump_next, dpif_netdev_operate, NULL, /* recv_set */ NULL, /* handlers_set */ dpif_netdev_pmd_set, dpif_netdev_queue_to_priority, NULL, /* recv */ NULL, /* recv_wait */ NULL, /* recv_purge */ dpif_netdev_register_dp_purge_cb, dpif_netdev_register_upcall_cb, dpif_netdev_enable_upcall, dpif_netdev_disable_upcall, dpif_netdev_get_datapath_version, dpif_netdev_ct_dump_start, dpif_netdev_ct_dump_next, dpif_netdev_ct_dump_done, dpif_netdev_ct_flush, }; static void dpif_dummy_change_port_number(struct unixctl_conn *conn, int argc OVS_UNUSED, const char *argv[], void *aux OVS_UNUSED) { struct dp_netdev_port *port; struct dp_netdev *dp; odp_port_t port_no; ovs_mutex_lock(&dp_netdev_mutex); dp = shash_find_data(&dp_netdevs, argv[1]); if (!dp || !dpif_netdev_class_is_dummy(dp->class)) { ovs_mutex_unlock(&dp_netdev_mutex); unixctl_command_reply_error(conn, "unknown datapath or not a dummy"); return; } ovs_refcount_ref(&dp->ref_cnt); ovs_mutex_unlock(&dp_netdev_mutex); ovs_mutex_lock(&dp->port_mutex); if (get_port_by_name(dp, argv[2], &port)) { unixctl_command_reply_error(conn, "unknown port"); goto exit; } port_no = u32_to_odp(atoi(argv[3])); if (!port_no || port_no == ODPP_NONE) { unixctl_command_reply_error(conn, "bad port number"); goto exit; } if (dp_netdev_lookup_port(dp, port_no)) { unixctl_command_reply_error(conn, "port number already in use"); goto exit; } /* Remove port. */ hmap_remove(&dp->ports, &port->node); dp_netdev_del_port_from_all_pmds(dp, port); /* Reinsert with new port number. */ port->port_no = port_no; hmap_insert(&dp->ports, &port->node, hash_port_no(port_no)); dp_netdev_add_port_to_pmds(dp, port); seq_change(dp->port_seq); unixctl_command_reply(conn, NULL); exit: ovs_mutex_unlock(&dp->port_mutex); dp_netdev_unref(dp); } static void dpif_dummy_register__(const char *type) { struct dpif_class *class; class = xmalloc(sizeof *class); *class = dpif_netdev_class; class->type = xstrdup(type); dp_register_provider(class); } static void dpif_dummy_override(const char *type) { int error; /* * Ignore EAFNOSUPPORT to allow --enable-dummy=system with * a userland-only build. It's useful for testsuite. */ error = dp_unregister_provider(type); if (error == 0 || error == EAFNOSUPPORT) { dpif_dummy_register__(type); } } void dpif_dummy_register(enum dummy_level level) { if (level == DUMMY_OVERRIDE_ALL) { struct sset types; const char *type; sset_init(&types); dp_enumerate_types(&types); SSET_FOR_EACH (type, &types) { dpif_dummy_override(type); } sset_destroy(&types); } else if (level == DUMMY_OVERRIDE_SYSTEM) { dpif_dummy_override("system"); } dpif_dummy_register__("dummy"); unixctl_command_register("dpif-dummy/change-port-number", "dp port new-number", 3, 3, dpif_dummy_change_port_number, NULL); } /* Datapath Classifier. */ /* A set of rules that all have the same fields wildcarded. */ struct dpcls_subtable { /* The fields are only used by writers. */ struct cmap_node cmap_node OVS_GUARDED; /* Within dpcls 'subtables_map'. */ /* These fields are accessed by readers. */ struct cmap rules; /* Contains "struct dpcls_rule"s. */ uint32_t hit_cnt; /* Number of match hits in subtable in current optimization interval. */ struct netdev_flow_key mask; /* Wildcards for fields (const). */ /* 'mask' must be the last field, additional space is allocated here. */ }; /* Initializes 'cls' as a classifier that initially contains no classification * rules. */ static void dpcls_init(struct dpcls *cls) { cmap_init(&cls->subtables_map); pvector_init(&cls->subtables); } static void dpcls_destroy_subtable(struct dpcls *cls, struct dpcls_subtable *subtable) { VLOG_DBG("Destroying subtable %p for in_port %d", subtable, cls->in_port); pvector_remove(&cls->subtables, subtable); cmap_remove(&cls->subtables_map, &subtable->cmap_node, subtable->mask.hash); cmap_destroy(&subtable->rules); ovsrcu_postpone(free, subtable); } /* Destroys 'cls'. Rules within 'cls', if any, are not freed; this is the * caller's responsibility. * May only be called after all the readers have been terminated. */ static void dpcls_destroy(struct dpcls *cls) { if (cls) { struct dpcls_subtable *subtable; CMAP_FOR_EACH (subtable, cmap_node, &cls->subtables_map) { ovs_assert(cmap_count(&subtable->rules) == 0); dpcls_destroy_subtable(cls, subtable); } cmap_destroy(&cls->subtables_map); pvector_destroy(&cls->subtables); } } static struct dpcls_subtable * dpcls_create_subtable(struct dpcls *cls, const struct netdev_flow_key *mask) { struct dpcls_subtable *subtable; /* Need to add one. */ subtable = xmalloc(sizeof *subtable - sizeof subtable->mask.mf + mask->len); cmap_init(&subtable->rules); subtable->hit_cnt = 0; netdev_flow_key_clone(&subtable->mask, mask); cmap_insert(&cls->subtables_map, &subtable->cmap_node, mask->hash); /* Add the new subtable at the end of the pvector (with no hits yet) */ pvector_insert(&cls->subtables, subtable, 0); VLOG_DBG("Creating %"PRIuSIZE". subtable %p for in_port %d", cmap_count(&cls->subtables_map), subtable, cls->in_port); pvector_publish(&cls->subtables); return subtable; } static inline struct dpcls_subtable * dpcls_find_subtable(struct dpcls *cls, const struct netdev_flow_key *mask) { struct dpcls_subtable *subtable; CMAP_FOR_EACH_WITH_HASH (subtable, cmap_node, mask->hash, &cls->subtables_map) { if (netdev_flow_key_equal(&subtable->mask, mask)) { return subtable; } } return dpcls_create_subtable(cls, mask); } /* Periodically sort the dpcls subtable vectors according to hit counts */ static void dpcls_sort_subtable_vector(struct dpcls *cls) { struct pvector *pvec = &cls->subtables; struct dpcls_subtable *subtable; PVECTOR_FOR_EACH (subtable, pvec) { pvector_change_priority(pvec, subtable, subtable->hit_cnt); subtable->hit_cnt = 0; } pvector_publish(pvec); } static inline void dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd) { struct dpcls *cls; long long int now = time_msec(); if (now > pmd->next_optimization) { /* Try to obtain the flow lock to block out revalidator threads. * If not possible, just try next time. */ if (!ovs_mutex_trylock(&pmd->flow_mutex)) { /* Optimize each classifier */ CMAP_FOR_EACH (cls, node, &pmd->classifiers) { dpcls_sort_subtable_vector(cls); } ovs_mutex_unlock(&pmd->flow_mutex); /* Start new measuring interval */ pmd->next_optimization = now + DPCLS_OPTIMIZATION_INTERVAL; } } } /* Insert 'rule' into 'cls'. */ static void dpcls_insert(struct dpcls *cls, struct dpcls_rule *rule, const struct netdev_flow_key *mask) { struct dpcls_subtable *subtable = dpcls_find_subtable(cls, mask); /* Refer to subtable's mask, also for later removal. */ rule->mask = &subtable->mask; cmap_insert(&subtable->rules, &rule->cmap_node, rule->flow.hash); } /* Removes 'rule' from 'cls', also destructing the 'rule'. */ static void dpcls_remove(struct dpcls *cls, struct dpcls_rule *rule) { struct dpcls_subtable *subtable; ovs_assert(rule->mask); /* Get subtable from reference in rule->mask. */ INIT_CONTAINER(subtable, rule->mask, mask); if (cmap_remove(&subtable->rules, &rule->cmap_node, rule->flow.hash) == 0) { /* Delete empty subtable. */ dpcls_destroy_subtable(cls, subtable); pvector_publish(&cls->subtables); } } /* Returns true if 'target' satisfies 'key' in 'mask', that is, if each 1-bit * in 'mask' the values in 'key' and 'target' are the same. */ static inline bool dpcls_rule_matches_key(const struct dpcls_rule *rule, const struct netdev_flow_key *target) { const uint64_t *keyp = miniflow_get_values(&rule->flow.mf); const uint64_t *maskp = miniflow_get_values(&rule->mask->mf); uint64_t value; NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(value, target, rule->flow.mf.map) { if (OVS_UNLIKELY((value & *maskp++) != *keyp++)) { return false; } } return true; } /* For each miniflow in 'keys' performs a classifier lookup writing the result * into the corresponding slot in 'rules'. If a particular entry in 'keys' is * NULL it is skipped. * * This function is optimized for use in the userspace datapath and therefore * does not implement a lot of features available in the standard * classifier_lookup() function. Specifically, it does not implement * priorities, instead returning any rule which matches the flow. * * Returns true if all miniflows found a corresponding rule. */ static bool dpcls_lookup(struct dpcls *cls, const struct netdev_flow_key keys[], struct dpcls_rule **rules, const size_t cnt, int *num_lookups_p) { /* The received 'cnt' miniflows are the search-keys that will be processed * in batches of 16 elements. N_MAPS will contain the number of these * 16-elements batches. i.e. for 'cnt' = 32, N_MAPS will be 2. The batch * size 16 was experimentally found faster than 8 or 32. */ typedef uint16_t map_type; #define MAP_BITS (sizeof(map_type) * CHAR_BIT) #if !defined(__CHECKER__) && !defined(_WIN32) const int N_MAPS = DIV_ROUND_UP(cnt, MAP_BITS); #else enum { N_MAPS = DIV_ROUND_UP(NETDEV_MAX_BURST, MAP_BITS) }; #endif map_type maps[N_MAPS]; struct dpcls_subtable *subtable; memset(maps, 0xff, sizeof maps); if (cnt % MAP_BITS) { maps[N_MAPS - 1] >>= MAP_BITS - cnt % MAP_BITS; /* Clear extra bits. */ } memset(rules, 0, cnt * sizeof *rules); int lookups_match = 0, subtable_pos = 1; /* The Datapath classifier - aka dpcls - is composed of subtables. * Subtables are dynamically created as needed when new rules are inserted. * Each subtable collects rules with matches on a specific subset of packet * fields as defined by the subtable's mask. We proceed to process every * search-key against each subtable, but when a match is found for a * search-key, the search for that key can stop because the rules are * non-overlapping. */ PVECTOR_FOR_EACH (subtable, &cls->subtables) { const struct netdev_flow_key *mkeys = keys; struct dpcls_rule **mrules = rules; map_type remains = 0; int m; BUILD_ASSERT_DECL(sizeof remains == sizeof *maps); /* Loops on each batch of 16 search-keys. */ for (m = 0; m < N_MAPS; m++, mkeys += MAP_BITS, mrules += MAP_BITS) { uint32_t hashes[MAP_BITS]; const struct cmap_node *nodes[MAP_BITS]; unsigned long map = maps[m]; int i; if (!map) { continue; /* Skip empty maps. */ } /* Compute hashes for the remaining keys. Each search-key is * masked with the subtable's mask to avoid hashing the wildcarded * bits. */ ULLONG_FOR_EACH_1(i, map) { hashes[i] = netdev_flow_key_hash_in_mask(&mkeys[i], &subtable->mask); } /* Lookup. */ map = cmap_find_batch(&subtable->rules, map, hashes, nodes); /* Check results. When the i-th bit of map is set, it means that a * set of nodes with a matching hash value was found for the i-th * search-key. Due to possible hash collisions we need to check * which of the found rules, if any, really matches our masked * search-key. */ ULLONG_FOR_EACH_1(i, map) { struct dpcls_rule *rule; CMAP_NODE_FOR_EACH (rule, cmap_node, nodes[i]) { if (OVS_LIKELY(dpcls_rule_matches_key(rule, &mkeys[i]))) { mrules[i] = rule; /* Even at 20 Mpps the 32-bit hit_cnt cannot wrap * within one second optimization interval */ subtable->hit_cnt++; lookups_match += subtable_pos; goto next; } } /* None of the found rules was a match. Reset the i-th bit to * keep searching in the next subtable. */ ULLONG_SET0(map, i); /* Did not match. */ next: ; /* Keep Sparse happy. */ } maps[m] &= ~map; /* Clear the found rules. */ remains |= maps[m]; } if (!remains) { if (num_lookups_p) { *num_lookups_p = lookups_match; } return true; /* All found. */ } subtable_pos++; } if (num_lookups_p) { *num_lookups_p = lookups_match; } return false; /* Some misses. */ }