[ovs-dev] [v6 06/11] dpif-netdev: Add packet count and core id paramters for study

Cian Ferriter cian.ferriter at intel.com
Tue Jul 6 13:11:45 UTC 2021


From: Kumar Amber <kumar.amber at intel.com>

This commit introduces additional command line paramter
for mfex study function. If user provides additional packet out
it is used in study to compare minimum packets which must be processed
else a default value is choosen.
Also introduces a third paramter for choosing a particular pmd core.

$ ovs-appctl dpif-netdev/miniflow-parser-set study 500 3

Signed-off-by: Kumar Amber <kumar.amber at intel.com>

---

v5:
- fix review comments(Ian, Flavio, Eelco)
- introucde pmd core id parameter
---
---
 Documentation/topics/dpdk/bridge.rst | 35 +++++++++++-
 lib/dpif-netdev-extract-study.c      | 23 +++++++-
 lib/dpif-netdev-private-extract.c    |  2 +-
 lib/dpif-netdev-private-extract.h    |  9 ++++
 lib/dpif-netdev.c                    | 79 ++++++++++++++++++++++++++--
 5 files changed, 139 insertions(+), 9 deletions(-)

diff --git a/Documentation/topics/dpdk/bridge.rst b/Documentation/topics/dpdk/bridge.rst
index c79e108b7..8495687e8 100644
--- a/Documentation/topics/dpdk/bridge.rst
+++ b/Documentation/topics/dpdk/bridge.rst
@@ -282,12 +282,43 @@ command also shows whether the CPU supports each implementation ::
 
 An implementation can be selected manually by the following command ::
 
-    $ ovs-appctl dpif-netdev/miniflow-parser-set study
+    $ ovs-appctl dpif-netdev/miniflow-parser-set [name] [study_cnt] [core_id]
+
+The above command has two optional parameters study_cnt and core_id which can
+be set optionally. The second parameter study_cnt is specific to study
+where how many packets needed to choose best implementation can be provided.
+Third parameter core_id can also be provided to set a particular miniflow
+extract function to a specific pmd thread on the core. In case of any other
+implementation other than study second parameter [study_cnt] can pe provided
+with any arbitatry number and is ignored.
 
 Also user can select the study implementation which studies the traffic for
 a specific number of packets by applying all available implementaions of
 miniflow extract and than chooses the one with most optimal result for that
-traffic pattern.
+traffic pattern. A user can also provide an additional packet count parameter
+which is the minimum number of packets that OVS must study before choosing an
+optimal implementation. If no packet count is provided then the default value
+128 is chosen. Also, as there is no synchronization point between threads, one
+PMD thread might still be running a previous round, and can now decide on
+earlier data.
+
+Study can be selected with packet count by the following command ::
+
+    $ ovs-appctl dpif-netdev/miniflow-parser-set study 1024
+
+Study can be selected with packet count and explicit PMD selection
+by the following command ::
+
+    $ ovs-appctl dpif-netdev/miniflow-parser-set study 1024 3
+
+In the above command the last parameter is the CORE ID of the PMD
+thread and this can also be used to explicitly set the miniflow
+extraction function pointer on different PMD threads.
+
+Scalar can be selected on core 3 by the following command where
+study count can be put as any arbitary number::
+
+    $ ovs-appctl dpif-netdev/miniflow-parser-set scalar 0 3
 
 Miniflow Extract Validation
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/lib/dpif-netdev-extract-study.c b/lib/dpif-netdev-extract-study.c
index 32b76bd03..9b36d1974 100644
--- a/lib/dpif-netdev-extract-study.c
+++ b/lib/dpif-netdev-extract-study.c
@@ -51,6 +51,27 @@ mfex_study_get_study_stats_ptr(void)
     return stats;
 }
 
+uint32_t mfex_set_study_pkt_cnt(uint32_t pkt_cmp_count,
+                                const char *name)
+{
+    struct dpif_miniflow_extract_impl *miniflow_funcs;
+    dpif_mfex_impl_info_get(&miniflow_funcs);
+
+    /* If the packet count is set and implementation called is study then
+     * set packet counter to requested number else set the packet counter
+     * to default number.
+     */
+    if ((strcmp(miniflow_funcs[MFEX_IMPL_STUDY].name, name) == 0) &&
+        (pkt_cmp_count != 0)) {
+
+        mfex_study_pkts_count = pkt_cmp_count;
+        return 0;
+    }
+
+    mfex_study_pkts_count = MFEX_MAX_COUNT;
+    return -EINVAL;
+}
+
 uint32_t
 mfex_study_traffic(struct dp_packet_batch *packets,
                    struct netdev_flow_key *keys,
@@ -86,7 +107,7 @@ mfex_study_traffic(struct dp_packet_batch *packets,
     /* Choose the best implementation after a minimum packets have been
      * processed.
      */
-    if (stats->pkt_count >= MFEX_MAX_COUNT) {
+    if (stats->pkt_count >= mfex_study_pkts_count) {
         uint32_t best_func_index = MFEX_IMPL_MAX;
         uint32_t max_hits = 0;
         for (int i = MFEX_IMPL_MAX; i < impl_count; i++) {
diff --git a/lib/dpif-netdev-private-extract.c b/lib/dpif-netdev-private-extract.c
index 6ae91a24d..c1239b319 100644
--- a/lib/dpif-netdev-private-extract.c
+++ b/lib/dpif-netdev-private-extract.c
@@ -118,7 +118,7 @@ dp_mfex_impl_get(struct ds *reply, struct dp_netdev_pmd_thread **pmd_list,
 
     for (uint32_t i = 0; i < ARRAY_SIZE(mfex_impls); i++) {
 
-        ds_put_format(reply, "  %s (available: %s)(pmds: ",
+        ds_put_format(reply, "  %s (available: %s, pmds: ",
                       mfex_impls[i].name, mfex_impls[i].available ?
                       "True" : "False");
 
diff --git a/lib/dpif-netdev-private-extract.h b/lib/dpif-netdev-private-extract.h
index cd46c94dd..a1f48d870 100644
--- a/lib/dpif-netdev-private-extract.h
+++ b/lib/dpif-netdev-private-extract.h
@@ -135,4 +135,13 @@ mfex_study_traffic(struct dp_packet_batch *packets,
                    uint32_t keys_size, odp_port_t in_port,
                    struct dp_netdev_pmd_thread *pmd_handle);
 
+/* Sets the packet count from user to the stats for use in
+ * study function to match against the classified packets to choose
+ * the optimal implementation.
+ * On error, returns EINVAL.
+ * On success, returns 0.
+ */
+uint32_t mfex_set_study_pkt_cnt(uint32_t pkt_cmp_count,
+                                const char *name);
+
 #endif /* MFEX_AVX512_EXTRACT */
diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 175d8699f..6bcb24a73 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -1103,9 +1103,13 @@ dpif_miniflow_extract_impl_set(struct unixctl_conn *conn, int argc,
                      const char *argv[], void *aux OVS_UNUSED)
 {
     /* This function requires just one parameter, the miniflow name.
+     * A second optional parameter can set the packet count to match in study.
+     * A third optional paramter PMD thread ID can be also provided which
+     * allows users to set miniflow implementation on a particular pmd.
      */
     const char *mfex_name = argv[1];
     struct shash_node *node;
+    struct ds reply = DS_EMPTY_INITIALIZER;
 
     static const char *error_description[2] = {
         "Unknown miniflow implementation",
@@ -1116,7 +1120,6 @@ dpif_miniflow_extract_impl_set(struct unixctl_conn *conn, int argc,
     int32_t err = dp_mfex_impl_set_default_by_name(mfex_name);
 
     if (err) {
-        struct ds reply = DS_EMPTY_INITIALIZER;
         ds_put_format(&reply,
                       "Miniflow implementation not available: %s %s.\n",
                       error_description[ (err == EINVAL) ], mfex_name);
@@ -1128,6 +1131,44 @@ dpif_miniflow_extract_impl_set(struct unixctl_conn *conn, int argc,
         return;
     }
 
+    /* argv[2] is optional packet count, which user can provide along with
+     * study function to set the minimum packet that must be matched in order
+     * to choose the optimal function. */
+    uint32_t pkt_cmp_count = 0;
+    uint32_t study_ret = 0;
+
+    if ((argc == 3) || (argc == 4)) {
+        if (str_to_uint(argv[2], 10, &pkt_cmp_count)) {
+            study_ret = mfex_set_study_pkt_cnt(pkt_cmp_count, mfex_name);
+        } else {
+           study_ret = -EINVAL;
+        }
+    } else {
+        /* Default packet compare count when packets count not provided. */
+        study_ret = mfex_set_study_pkt_cnt(0, mfex_name);
+    }
+
+    uint32_t pmd_thread_specified = 0;
+    uint32_t pmd_thread_to_change = 0;
+    uint32_t pmd_thread_update_ok = 0;
+    if (argc == 4) {
+        if (str_to_uint(argv[3], 10, &pmd_thread_to_change)) {
+            pmd_thread_specified = 1;
+        } else {
+            /* argv[3] isn't even a uint. return without changing anything. */
+            ovs_mutex_unlock(&dp_netdev_mutex);
+            ds_put_format(&reply,
+                 "Error: Miniflow parser not changed, PMD thread argument"
+                 " passed is not valid: '%s'. Pass a valid pmd thread ID.\n",
+                 argv[3]);
+            const char *reply_str = ds_cstr(&reply);
+            VLOG_INFO("%s", reply_str);
+            unixctl_command_reply_error(conn, reply_str);
+            ds_destroy(&reply);
+            return;
+        }
+    }
+
     SHASH_FOR_EACH (node, &dp_netdevs) {
         struct dp_netdev *dp = node->data;
 
@@ -1142,8 +1183,14 @@ dpif_miniflow_extract_impl_set(struct unixctl_conn *conn, int argc,
                 continue;
             }
 
+            if ((pmd_thread_specified) &&
+                (pmd->core_id != pmd_thread_to_change)) {
+                continue;
+            }
+
             /* Initialize MFEX function pointer to the newly configured
              * default. */
+            pmd_thread_update_ok = 1;
             miniflow_extract_func default_func = dp_mfex_impl_get_default();
             atomic_uintptr_t *pmd_func = (void *) &pmd->miniflow_extract_opt;
             atomic_store_relaxed(pmd_func, (uintptr_t) default_func);
@@ -1152,9 +1199,30 @@ dpif_miniflow_extract_impl_set(struct unixctl_conn *conn, int argc,
 
     ovs_mutex_unlock(&dp_netdev_mutex);
 
+    /* If PMD thread was specified, but it wasn't found, return error. */
+    if (pmd_thread_specified && !pmd_thread_update_ok) {
+        ds_put_format(&reply,
+             "Error: Miniflow parser not changed, PMD thread %d not in use,"
+             " pass a valid pmd thread ID.\n",
+             pmd_thread_to_change);
+        const char *reply_str = ds_cstr(&reply);
+        VLOG_INFO("%s", reply_str);
+        unixctl_command_reply_error(conn, reply_str);
+        ds_destroy(&reply);
+        return;
+    }
+
     /* Reply with success to command. */
-    struct ds reply = DS_EMPTY_INITIALIZER;
-    ds_put_format(&reply, "Miniflow implementation set to %s.\n", mfex_name);
+    ds_put_format(&reply, "Miniflow implementation set to %s", mfex_name);
+    if (pmd_thread_specified) {
+        ds_put_format(&reply, ", on pmd thread %d", pmd_thread_to_change);
+    }
+    if (study_ret == 0) {
+        ds_put_format(&reply, ", studing %d packets", pkt_cmp_count);
+    }
+
+    ds_put_format(&reply, "\n");
+
     const char *reply_str = ds_cstr(&reply);
     VLOG_INFO("%s", reply_str);
     unixctl_command_reply(conn, reply_str);
@@ -1391,8 +1459,9 @@ dpif_netdev_init(void)
                              0, 0, dpif_netdev_impl_get,
                              NULL);
     unixctl_command_register("dpif-netdev/miniflow-parser-set",
-                             "miniflow implementation name",
-                             1, 1, dpif_miniflow_extract_impl_set,
+                             "miniflow_implementation_name [study_pkt_cnt]"
+                             " [pmd_core]",
+                             1, 3, dpif_miniflow_extract_impl_set,
                              NULL);
     unixctl_command_register("dpif-netdev/miniflow-parser-get", "",
                              0, 0, dpif_miniflow_extract_impl_get,
-- 
2.32.0



More information about the dev mailing list