[ovs-dev] [PATCH V3 10/19] dpif-netdev: Read hw stats during flow_dump_next() call
Ilya Maximets
i.maximets at ovn.org
Tue Dec 10 17:59:40 UTC 2019
On 08.12.2019 14:22, Eli Britstein wrote:
> From: Ophir Munk <ophirmu at mellanox.com>
>
> Use netdev flow get API in order to update the statistics of fully
> offloaded flows.
>
> Co-authored-by: Eli Britstein <elibr at mellanox.com>
> Signed-off-by: Ophir Munk <ophirmu at mellanox.com>
> Reviewed-by: Oz Shlomo <ozsh at mellanox.com>
> Signed-off-by: Eli Britstein <elibr at mellanox.com>
> ---
> lib/dpif-netdev.c | 47 +++++++++++++++++++++++++++++++++++++++++++++--
> 1 file changed, 45 insertions(+), 2 deletions(-)
>
> diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
> index 1e5493615..37e7e5c38 100644
> --- a/lib/dpif-netdev.c
> +++ b/lib/dpif-netdev.c
> @@ -527,6 +527,7 @@ struct dp_netdev_flow {
>
> bool dead;
> uint32_t mark; /* Unique flow mark assigned to a flow */
> + bool actions_offloaded; /* true if flow is fully offloaded. */
IIUC, this is modified and used asynchronously in different threads.
It should be atomic.
It's better to put 'true' into single quotes.
>
> /* Statistics. */
> struct dp_netdev_flow_stats stats;
> @@ -2409,6 +2410,7 @@ dp_netdev_flow_offload_put(struct dp_flow_offload_item *offload)
> }
> }
> info.flow_mark = mark;
> + info.actions_offloaded = &flow->actions_offloaded;
>
> port = netdev_ports_get(in_port, pmd->dp->dpif->dpif_class);
> if (!port || netdev_vport_is_vport_class(port->netdev_class)) {
> @@ -3071,8 +3073,8 @@ dp_netdev_flow_to_dpif_flow(const struct dp_netdev_flow *netdev_flow,
> flow->pmd_id = netdev_flow->pmd_id;
> get_dpif_flow_stats(netdev_flow, &flow->stats);
>
> - flow->attrs.offloaded = false;
> - flow->attrs.dp_layer = "ovs";
> + flow->attrs.offloaded = netdev_flow->actions_offloaded;
> + flow->attrs.dp_layer = flow->attrs.offloaded ? "in_hw" : "ovs";
> }
>
> static int
> @@ -3242,6 +3244,7 @@ dp_netdev_flow_add(struct dp_netdev_pmd_thread *pmd,
> flow->dead = false;
> flow->batch = NULL;
> flow->mark = INVALID_FLOW_MARK;
> + flow->actions_offloaded = false;
> *CONST_CAST(unsigned *, &flow->pmd_id) = pmd->core_id;
> *CONST_CAST(struct flow *, &flow->flow) = match->flow;
> *CONST_CAST(ovs_u128 *, &flow->ufid) = *ufid;
> @@ -3596,6 +3599,42 @@ dpif_netdev_flow_dump_thread_destroy(struct dpif_flow_dump_thread *thread_)
> free(thread);
> }
>
> +static int
> +dpif_netdev_offload_used(struct dp_netdev_flow *netdev_flow,
> + struct dp_netdev_pmd_thread *pmd)
> +{
> + struct dpif_flow_stats stats;
> + struct netdev *netdev;
> + struct match match;
> + struct nlattr *actions;
> + struct dpif_flow_attrs attrs;
> + struct ofpbuf wbuffer;
> +
> + ovs_u128 ufid;
> + int ret = 0;
> +
> + netdev = netdev_ports_get(netdev_flow->flow.in_port.odp_port,
> + pmd->dp->class);
> + if (!netdev) {
> + return -1;
> + }
> + /* get offloaded stats */
Comment style.
> + ufid = netdev_flow->mega_ufid;
Why copying? 'ufid' argument is constant.
> + ret = netdev_flow_get(netdev, &match, &actions, &ufid, &stats, &attrs,
> + &wbuffer);
dp->port_mutex around above + please, copy the comment from my port_mutex patch.
> + netdev_close(netdev);
> + if (ret) {
> + return -1;
> + }
> + if (stats.n_packets) {
> + atomic_store_relaxed(&netdev_flow->stats.used, pmd->ctx.now / 1000);
This code should not be here according to review for the previous patch, but
anyway, you should not use pmd context as you're not in a pmd thread.
> + non_atomic_ullong_add(&netdev_flow->stats.packet_count, stats.n_packets);
> + non_atomic_ullong_add(&netdev_flow->stats.byte_count, stats.n_bytes);
> + }
> +
> + return 0;
> +}
> +
> static int
> dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread *thread_,
> struct dpif_flow *flows, int max_flows)
> @@ -3636,6 +3675,10 @@ dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread *thread_,
> netdev_flows[n_flows] = CONTAINER_OF(node,
> struct dp_netdev_flow,
> node);
> + /* Read hardware stats in case of hardware offload */
Comment style.
> + if (netdev_flows[n_flows]->actions_offloaded) {
> + dpif_netdev_offload_used(netdev_flows[n_flows], pmd);
> + }
> }
> /* When finishing dumping the current pmd thread, moves to
> * the next. */
>
More information about the dev
mailing list