[ovs-dev] [PATCH v4 3/6] dpif-netdev: Count the rxq processing cycles for an rxq.

Greg Rose gvrose8192 at gmail.com
Thu Aug 10 22:58:58 UTC 2017


On 08/09/2017 08:45 AM, Kevin Traynor wrote:
> Count the cycles used for processing an rxq during the
> pmd rxq interval. As this is an in flight counter and
> pmds run independently, also store the total cycles used
> during the last full interval.
>
> Signed-off-by: Kevin Traynor <ktraynor at redhat.com>
> ---
>   lib/dpif-netdev.c | 65 ++++++++++++++++++++++++++++++++++++++++++++++++++-----
>   1 file changed, 60 insertions(+), 5 deletions(-)
>
> diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
> index 41f16b2..e344063 100644
> --- a/lib/dpif-netdev.c
> +++ b/lib/dpif-netdev.c
> @@ -182,4 +182,8 @@ struct emc_cache {
>   #define DPCLS_OPTIMIZATION_INTERVAL 1000
>
> +/* Time in ms of the interval in which rxq processing cycles used in
> + * rxq to pmd assignments is measured and stored. */
> +#define PMD_RXQ_INTERVAL 1000
> +
>   struct dpcls {
>       struct cmap_node node;      /* Within dp_netdev_pmd_thread.classifiers */
> @@ -558,4 +562,6 @@ struct dp_netdev_pmd_thread {
>       /* Periodically sort subtable vectors according to hit frequencies */
>       long long int next_optimization;
> +    /* Periodically store the processing cycles used for each rxq. */
> +    long long int rxq_interval;
>
>       /* Statistics. */
> @@ -684,5 +690,13 @@ static void pmd_load_cached_ports(struct dp_netdev_pmd_thread *pmd)
>       OVS_REQUIRES(pmd->port_mutex);
>   static inline void
> -dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd);
> +dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd,
> +                           struct polled_queue *poll_list, int poll_cnt);
> +static void
> +dp_netdev_rxq_set_cycles(struct dp_netdev_rxq *rx,
> +                         enum rxq_cycles_counter_type type,
> +                         unsigned long long cycles);
> +static uint64_t
> +dp_netdev_rxq_get_cycles(struct dp_netdev_rxq *rx,
> +                         enum rxq_cycles_counter_type type);
>   static void
>   dpif_netdev_xps_revalidate_pmd(const struct dp_netdev_pmd_thread *pmd,
> @@ -3114,4 +3128,21 @@ cycles_count_intermediate(struct dp_netdev_pmd_thread *pmd,
>   }
>
> +static void
> +dp_netdev_rxq_set_cycles(struct dp_netdev_rxq *rx,
> +                         enum rxq_cycles_counter_type type,
> +                         unsigned long long cycles)
> +{
> +   atomic_store_relaxed(&rx->cycles[type], cycles);
> +}
> +
> +static uint64_t
> +dp_netdev_rxq_get_cycles(struct dp_netdev_rxq *rx,
> +                         enum rxq_cycles_counter_type type)
> +{
> +    unsigned long long tmp;
> +    atomic_read_relaxed(&rx->cycles[type], &tmp);
> +    return tmp;
> +}
> +
>   static int
>   dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread *pmd,
> @@ -3158,5 +3189,5 @@ port_reconfigure(struct dp_netdev_port *port)
>   {
>       struct netdev *netdev = port->netdev;
> -    int i, err;
> +    int i, err, last_nrxq;
>
>       port->need_reconfigure = false;
> @@ -3167,4 +3198,5 @@ port_reconfigure(struct dp_netdev_port *port)
>           port->rxqs[i].rx = NULL;
>       }
> +    last_nrxq = port->n_rxq;
>       port->n_rxq = 0;
>
> @@ -3187,4 +3219,9 @@ port_reconfigure(struct dp_netdev_port *port)
>       for (i = 0; i < netdev_n_rxq(netdev); i++) {
>           port->rxqs[i].port = port;
> +        if (i >= last_nrxq) {
> +            /* Only reset cycle stats for new queues */
> +            dp_netdev_rxq_set_cycles(&port->rxqs[i], RXQ_CYCLES_PROC_CURR, 0);
> +            dp_netdev_rxq_set_cycles(&port->rxqs[i], RXQ_CYCLES_PROC_LAST, 0);
> +        }
>           err = netdev_rxq_open(netdev, &port->rxqs[i].rx, i);
>           if (err) {
> @@ -3867,5 +3904,5 @@ reload:
>                   dp_netdev_process_rxq_port(pmd, poll_list[i].rxq->rx,
>                                              poll_list[i].port_no);
> -            cycles_count_intermediate(pmd, NULL,
> +            cycles_count_intermediate(pmd, poll_list[i].rxq,
>                                         process_packets ? PMD_CYCLES_PROCESSING
>                                                         : PMD_CYCLES_IDLE);
> @@ -3878,5 +3915,5 @@ reload:
>
>               coverage_try_clear();
> -            dp_netdev_pmd_try_optimize(pmd);
> +            dp_netdev_pmd_try_optimize(pmd, poll_list, poll_cnt);
>               if (!ovsrcu_try_quiesce()) {
>                   emc_cache_slow_sweep(&pmd->flow_cache);
> @@ -4322,4 +4359,5 @@ dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev *dp,
>       cmap_init(&pmd->classifiers);
>       pmd->next_optimization = time_msec() + DPCLS_OPTIMIZATION_INTERVAL;
> +    pmd->rxq_interval = time_msec() + PMD_RXQ_INTERVAL;
>       hmap_init(&pmd->poll_list);
>       hmap_init(&pmd->tx_ports);
> @@ -5759,8 +5797,25 @@ dpcls_sort_subtable_vector(struct dpcls *cls)
>
>   static inline void
> -dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd)
> +dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd,
> +                           struct polled_queue *poll_list, int poll_cnt)
>   {
>       struct dpcls *cls;
>       long long int now = time_msec();
> +    int i;
> +    uint64_t rxq_cyc_curr;
> +
> +    if (now > pmd->rxq_interval) {
> +        /* Get the cycles that were used to process each queue and store. */
> +        for (i = 0; i < poll_cnt; i++) {
> +            rxq_cyc_curr = dp_netdev_rxq_get_cycles(poll_list[i].rxq,
> +                                                    RXQ_CYCLES_PROC_CURR);
> +            dp_netdev_rxq_set_cycles(poll_list[i].rxq, RXQ_CYCLES_PROC_LAST,
> +                                     rxq_cyc_curr);
> +            dp_netdev_rxq_set_cycles(poll_list[i].rxq, RXQ_CYCLES_PROC_CURR,
> +                                     0);
> +        }
> +        /* Start new measuring interval */
> +        pmd->rxq_interval = now + PMD_RXQ_INTERVAL;
> +    }
>
>       if (now > pmd->next_optimization) {
>
Tested-by: Greg Rose <gvrose8192 at gmail.com>
Reviewed-by: Greg Rose <gvrose8192 at gmail.com>


More information about the dev mailing list