[ovs-dev] [PATCH v10 1/5] dpif-netdev: Use microsecond granularity.

Stokes, Ian ian.stokes at intel.com
Sat Jan 13 13:34:34 UTC 2018


LGTM,

Acked.

> -----Original Message-----
> From: Ilya Maximets [mailto:i.maximets at samsung.com]
> Sent: Friday, January 12, 2018 11:17 AM
> To: ovs-dev at openvswitch.org
> Cc: Heetae Ahn <heetae82.ahn at samsung.com>; Bodireddy, Bhanuprakash
> <bhanuprakash.bodireddy at intel.com>; Fischetti, Antonio
> <antonio.fischetti at intel.com>; Eelco Chaudron <echaudro at redhat.com>;
> Loftus, Ciara <ciara.loftus at intel.com>; Kevin Traynor
> <ktraynor at redhat.com>; Jan Scheurich <jan.scheurich at ericsson.com>; O
> Mahony, Billy <billy.o.mahony at intel.com>; Stokes, Ian
> <ian.stokes at intel.com>; Ilya Maximets <i.maximets at samsung.com>
> Subject: [PATCH v10 1/5] dpif-netdev: Use microsecond granularity.
> 
> Upcoming time-based output batching will require microsecond granularity
> for it's flexible configuration.
> 
> Signed-off-by: Ilya Maximets <i.maximets at samsung.com>
> ---
>  lib/dpif-netdev.c | 27 ++++++++++++++-------------
>  1 file changed, 14 insertions(+), 13 deletions(-)
> 
> diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index dc26026..b35700d
> 100644
> --- a/lib/dpif-netdev.c
> +++ b/lib/dpif-netdev.c
> @@ -180,12 +180,13 @@ struct emc_cache {
>  

>  /* Simple non-wildcarding single-priority classifier. */
> 
> -/* Time in ms between successive optimizations of the dpcls subtable
> vector */ -#define DPCLS_OPTIMIZATION_INTERVAL 1000
> +/* Time in microseconds between successive optimizations of the dpcls
> + * subtable vector */
> +#define DPCLS_OPTIMIZATION_INTERVAL 1000000LL
> 
> -/* Time in ms of the interval in which rxq processing cycles used in
> - * rxq to pmd assignments is measured and stored. */ -#define
> PMD_RXQ_INTERVAL_LEN 10000
> +/* Time in microseconds of the interval in which rxq processing cycles
> +used
> + * in rxq to pmd assignments is measured and stored. */ #define
> +PMD_RXQ_INTERVAL_LEN 10000000LL
> 
>  /* Number of intervals for which cycles are stored
>   * and used during rxq to pmd assignment. */ @@ -341,7 +342,7 @@ enum
> rxq_cycles_counter_type {
>      RXQ_N_CYCLES
>  };
> 
> -#define XPS_TIMEOUT_MS 500LL
> +#define XPS_TIMEOUT 500000LL    /* In microseconds. */
> 
>  /* Contained by struct dp_netdev_port's 'rxqs' member.  */  struct
> dp_netdev_rxq { @@ -758,7 +759,7 @@ emc_cache_slow_sweep(struct emc_cache
> *flow_cache)  static inline void  pmd_thread_ctx_time_update(struct
> dp_netdev_pmd_thread *pmd)  {
> -    pmd->ctx.now = time_msec();
> +    pmd->ctx.now = time_usec();
>  }
> 
>  /* Returns true if 'dpif' is a netdev or dummy dpif, false otherwise. */
> @@ -4145,7 +4146,7 @@ dp_netdev_run_meter(struct dp_netdev *dp, struct
> dp_packet_batch *packets_,
>      memset(exceeded_rate, 0, cnt * sizeof *exceeded_rate);
> 
>      /* All packets will hit the meter at the same time. */
> -    long_delta_t = (now - meter->used); /* msec */
> +    long_delta_t = (now - meter->used) / 1000; /* msec */
> 
>      /* Make sure delta_t will not be too large, so that bucket will not
>       * wrap around below. */
> @@ -4301,7 +4302,7 @@ dpif_netdev_meter_set(struct dpif *dpif,
> ofproto_meter_id *meter_id,
>          meter->flags = config->flags;
>          meter->n_bands = config->n_bands;
>          meter->max_delta_t = 0;
> -        meter->used = time_msec();
> +        meter->used = time_usec();
> 
>          /* set up bands */
>          for (i = 0; i < config->n_bands; ++i) { @@ -4843,7 +4844,7 @@
> packet_batch_per_flow_execute(struct packet_batch_per_flow *batch,
>      struct dp_netdev_flow *flow = batch->flow;
> 
>      dp_netdev_flow_used(flow, batch->array.count, batch->byte_count,
> -                        batch->tcp_flags, pmd->ctx.now);
> +                        batch->tcp_flags, pmd->ctx.now / 1000);
> 
>      actions = dp_netdev_flow_get_actions(flow);
> 
> @@ -5228,7 +5229,7 @@ dpif_netdev_xps_revalidate_pmd(const struct
> dp_netdev_pmd_thread *pmd,
>              continue;
>          }
>          interval = pmd->ctx.now - tx->last_used;
> -        if (tx->qid >= 0 && (purge || interval >= XPS_TIMEOUT_MS)) {
> +        if (tx->qid >= 0 && (purge || interval >= XPS_TIMEOUT)) {
>              port = tx->port;
>              ovs_mutex_lock(&port->txq_used_mutex);
>              port->txq_used[tx->qid]--;
> @@ -5249,7 +5250,7 @@ dpif_netdev_xps_get_tx_qid(const struct
> dp_netdev_pmd_thread *pmd,
>      interval = pmd->ctx.now - tx->last_used;
>      tx->last_used = pmd->ctx.now;
> 
> -    if (OVS_LIKELY(tx->qid >= 0 && interval < XPS_TIMEOUT_MS)) {
> +    if (OVS_LIKELY(tx->qid >= 0 && interval < XPS_TIMEOUT)) {
>          return tx->qid;
>      }
> 
> @@ -5628,7 +5629,7 @@ dp_execute_cb(void *aux_, struct dp_packet_batch
> *packets_,
>          conntrack_execute(&dp->conntrack, packets_, aux->flow->dl_type,
> force,
>                            commit, zone, setmark, setlabel, aux->flow-
> >tp_src,
>                            aux->flow->tp_dst, helper, nat_action_info_ref,
> -                          pmd->ctx.now);
> +                          pmd->ctx.now / 1000);
>          break;
>      }
> 
> --
> 2.7.4



More information about the dev mailing list