[ovs-dev] [PATCH v4] ofproto: Add support for Openflow group and bucket stats.

Andy Zhou azhou at nicira.com
Thu May 22 02:45:04 UTC 2014


Would you please make a separate patch for the todo list change, and
resend this one without it? Thanks.

On Wed, May 21, 2014 at 7:29 PM, Ryan Wilson <wryan at nicira.com> wrote:
> Signed-off-by: Ryan Wilson <wryan at nicira.com>
> Acked-by: Andy Zhou <azhou at nicira.com>
>
> ---
> v2: Fixed bug with group stats all buckets, cleaned up ofgroup unref code,
>     added Andy Zhou's test for more complete test coverage
> v3: Split group ref/unref, support for group and bucket stats, and Andy Zhou's
> tests into 3 patches.
> v4: Fix rebase conflicts, remove Openflow group and bucket stats from TODO file
> ---
>  TODO                         |   17 ------------
>  lib/ofp-util.h               |   12 ++++----
>  ofproto/ofproto-dpif-xlate.c |   51 +++++++++++++++++++++++++++-------
>  ofproto/ofproto-dpif.c       |   62 ++++++++++++++++++++++++++++--------------
>  ofproto/ofproto-dpif.h       |    3 ++
>  5 files changed, 93 insertions(+), 52 deletions(-)
>
> diff --git a/TODO b/TODO
> index dabe49c..e11089a 100644
> --- a/TODO
> +++ b/TODO
> @@ -52,23 +52,6 @@ These changes might have backward-compatibility implications; one
>  would have to test the behavior of the reduced cipher list OVS against
>  older versions.
>
> -OpenFlow Group Bucket Stats
> ----------------------------
> -
> -When OpenFlow group support was added, we forgot to support statistics
> -for individual buckets.  xlate_group_bucket() in
> -ofproto/ofproto-dpif-xlate.c appears to be where we need to increment
> -the counters, in the case where ctx->xin->resubmit_stats is
> -nonnull. See the ovs-dev thread starting here:
> -http://openvswitch.org/pipermail/dev/2014-January/036107.html
> -
> -Joe Stringer adds: If this involves resubmit_stats, then it would also
> -need a new xc_type. The xlate_group_bucket() code would add an entry
> -to ctx->xin->xcache if it is nonnull.  This would also need to follow
> -the code in xlate_push_stats() and xlate_cache_clear() for the new
> -xc_type.
> -
> -
>  Bash Command Completion
>  -----------------------
>
> diff --git a/lib/ofp-util.h b/lib/ofp-util.h
> index ce6045b..552b006 100644
> --- a/lib/ofp-util.h
> +++ b/lib/ofp-util.h
> @@ -1050,6 +1050,11 @@ int ofputil_decode_queue_stats(struct ofputil_queue_stats *qs, struct ofpbuf *ms
>  void ofputil_append_queue_stat(struct list *replies,
>                                 const struct ofputil_queue_stats *oqs);
>
> +struct bucket_counter {
> +    uint64_t packet_count;   /* Number of packets processed by bucket. */
> +    uint64_t byte_count;     /* Number of bytes processed by bucket. */
> +};
> +
>  /* Bucket for use in groups. */
>  struct ofputil_bucket {
>      struct list list_node;
> @@ -1062,6 +1067,8 @@ struct ofputil_bucket {
>                                   * failover groups. */
>      struct ofpact *ofpacts;     /* Series of "struct ofpact"s. */
>      size_t ofpacts_len;         /* Length of ofpacts, in bytes. */
> +
> +    struct bucket_counter stats;
>  };
>
>  /* Protocol-independent group_mod. */
> @@ -1072,11 +1079,6 @@ struct ofputil_group_mod {
>      struct list buckets;          /* Contains "struct ofputil_bucket"s. */
>  };
>
> -struct bucket_counter {
> -    uint64_t packet_count;   /* Number of packets processed by bucket. */
> -    uint64_t byte_count;     /* Number of bytes processed by bucket. */
> -};
> -
>  /* Group stats reply, independent of protocol. */
>  struct ofputil_group_stats {
>      uint32_t group_id;    /* Group identifier. */
> diff --git a/ofproto/ofproto-dpif-xlate.c b/ofproto/ofproto-dpif-xlate.c
> index 2095f37..b6ba023 100644
> --- a/ofproto/ofproto-dpif-xlate.c
> +++ b/ofproto/ofproto-dpif-xlate.c
> @@ -235,6 +235,7 @@ enum xc_type {
>      XC_LEARN,
>      XC_NORMAL,
>      XC_FIN_TIMEOUT,
> +    XC_GROUP,
>  };
>
>  /* xlate_cache entries hold enough information to perform the side effects of
> @@ -279,6 +280,10 @@ struct xc_entry {
>              uint16_t idle;
>              uint16_t hard;
>          } fin;
> +        struct {
> +            struct group_dpif *group;
> +            struct ofputil_bucket *bucket;
> +        } group;
>      } u;
>  };
>
> @@ -837,7 +842,7 @@ odp_port_is_alive(const struct xlate_ctx *ctx, ofp_port_t ofp_port)
>      return true;
>  }
>
> -static const struct ofputil_bucket *
> +static struct ofputil_bucket *
>  group_first_live_bucket(const struct xlate_ctx *, const struct group_dpif *,
>                          int depth);
>
> @@ -862,7 +867,7 @@ group_is_alive(const struct xlate_ctx *ctx, uint32_t group_id, int depth)
>
>  static bool
>  bucket_is_alive(const struct xlate_ctx *ctx,
> -                const struct ofputil_bucket *bucket, int depth)
> +                struct ofputil_bucket *bucket, int depth)
>  {
>      if (depth >= MAX_LIVENESS_RECURSION) {
>          static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
> @@ -879,7 +884,7 @@ bucket_is_alive(const struct xlate_ctx *ctx,
>           group_is_alive(ctx, bucket->watch_group, depth + 1));
>  }
>
> -static const struct ofputil_bucket *
> +static struct ofputil_bucket *
>  group_first_live_bucket(const struct xlate_ctx *ctx,
>                          const struct group_dpif *group, int depth)
>  {
> @@ -896,16 +901,16 @@ group_first_live_bucket(const struct xlate_ctx *ctx,
>      return NULL;
>  }
>
> -static const struct ofputil_bucket *
> +static struct ofputil_bucket *
>  group_best_live_bucket(const struct xlate_ctx *ctx,
>                         const struct group_dpif *group,
>                         uint32_t basis)
>  {
> -    const struct ofputil_bucket *best_bucket = NULL;
> +    struct ofputil_bucket *best_bucket = NULL;
>      uint32_t best_score = 0;
>      int i = 0;
>
> -    const struct ofputil_bucket *bucket;
> +    struct ofputil_bucket *bucket;
>      const struct list *buckets;
>
>      group_dpif_get_buckets(group, &buckets);
> @@ -2119,7 +2124,23 @@ match:
>  }
>
>  static void
> -xlate_group_bucket(struct xlate_ctx *ctx, const struct ofputil_bucket *bucket)
> +xlate_group_stats(struct xlate_ctx *ctx, struct group_dpif *group,
> +                  struct ofputil_bucket *bucket)
> +{
> +    if (ctx->xin->resubmit_stats) {
> +        group_dpif_credit_stats(group, bucket, ctx->xin->resubmit_stats);
> +    }
> +    if (ctx->xin->xcache) {
> +        struct xc_entry *entry;
> +
> +        entry = xlate_cache_add_entry(ctx->xin->xcache, XC_GROUP);
> +        entry->u.group.group = group_dpif_ref(group);
> +        entry->u.group.bucket = bucket;
> +    }
> +}
> +
> +static void
> +xlate_group_bucket(struct xlate_ctx *ctx, struct ofputil_bucket *bucket)
>  {
>      uint64_t action_list_stub[1024 / 8];
>      struct ofpbuf action_list, action_set;
> @@ -2139,7 +2160,7 @@ xlate_group_bucket(struct xlate_ctx *ctx, const struct ofputil_bucket *bucket)
>  static void
>  xlate_all_group(struct xlate_ctx *ctx, struct group_dpif *group)
>  {
> -    const struct ofputil_bucket *bucket;
> +    struct ofputil_bucket *bucket;
>      const struct list *buckets;
>      struct flow old_flow = ctx->xin->flow;
>
> @@ -2155,16 +2176,18 @@ xlate_all_group(struct xlate_ctx *ctx, struct group_dpif *group)
>           * just before applying the all or indirect group. */
>          ctx->xin->flow = old_flow;
>      }
> +    xlate_group_stats(ctx, group, NULL);
>  }
>
>  static void
>  xlate_ff_group(struct xlate_ctx *ctx, struct group_dpif *group)
>  {
> -    const struct ofputil_bucket *bucket;
> +    struct ofputil_bucket *bucket;
>
>      bucket = group_first_live_bucket(ctx, group, 0);
>      if (bucket) {
>          xlate_group_bucket(ctx, bucket);
> +        xlate_group_stats(ctx, group, bucket);
>      }
>  }
>
> @@ -2172,7 +2195,7 @@ static void
>  xlate_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
>  {
>      struct flow_wildcards *wc = &ctx->xout->wc;
> -    const struct ofputil_bucket *bucket;
> +    struct ofputil_bucket *bucket;
>      uint32_t basis;
>
>      basis = hash_mac(ctx->xin->flow.dl_dst, 0, 0);
> @@ -2180,6 +2203,7 @@ xlate_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
>      if (bucket) {
>          memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
>          xlate_group_bucket(ctx, bucket);
> +        xlate_group_stats(ctx, group, bucket);
>      }
>  }
>
> @@ -3602,6 +3626,10 @@ xlate_push_stats(struct xlate_cache *xcache, bool may_learn,
>              xlate_fin_timeout__(entry->u.fin.rule, stats->tcp_flags,
>                                  entry->u.fin.idle, entry->u.fin.hard);
>              break;
> +        case XC_GROUP:
> +            group_dpif_credit_stats(entry->u.group.group, entry->u.group.bucket,
> +                                    stats);
> +            break;
>          default:
>              OVS_NOT_REACHED();
>          }
> @@ -3670,6 +3698,9 @@ xlate_cache_clear(struct xlate_cache *xcache)
>              /* 'u.fin.rule' is always already held as a XC_RULE, which
>               * has already released it's reference above. */
>              break;
> +        case XC_GROUP:
> +            group_dpif_unref(entry->u.group.group);
> +            break;
>          default:
>              OVS_NOT_REACHED();
>          }
> diff --git a/ofproto/ofproto-dpif.c b/ofproto/ofproto-dpif.c
> index c79602c..c34349a 100644
> --- a/ofproto/ofproto-dpif.c
> +++ b/ofproto/ofproto-dpif.c
> @@ -107,7 +107,6 @@ struct group_dpif {
>      struct ovs_mutex stats_mutex;
>      uint64_t packet_count OVS_GUARDED;  /* Number of packets received. */
>      uint64_t byte_count OVS_GUARDED;    /* Number of bytes received. */
> -    struct bucket_counter *bucket_stats OVS_GUARDED;  /* Bucket statistics. */
>  };
>
>  struct ofbundle {
> @@ -3541,15 +3540,40 @@ static void
>  group_construct_stats(struct group_dpif *group)
>      OVS_REQUIRES(group->stats_mutex)
>  {
> +    struct ofputil_bucket *bucket;
> +    const struct list *buckets;
> +
>      group->packet_count = 0;
>      group->byte_count = 0;
> -    if (!group->bucket_stats) {
> -        group->bucket_stats = xcalloc(group->up.n_buckets,
> -                                      sizeof *group->bucket_stats);
> -    } else {
> -        memset(group->bucket_stats, 0, group->up.n_buckets *
> -               sizeof *group->bucket_stats);
> +
> +    group_dpif_get_buckets(group, &buckets);
> +    LIST_FOR_EACH (bucket, list_node, buckets) {
> +        bucket->stats.packet_count = 0;
> +        bucket->stats.byte_count = 0;
> +    }
> +}
> +
> +void
> +group_dpif_credit_stats(struct group_dpif *group,
> +                        struct ofputil_bucket *bucket,
> +                        const struct dpif_flow_stats *stats)
> +{
> +    ovs_mutex_lock(&group->stats_mutex);
> +    group->packet_count += stats->n_packets;
> +    group->byte_count += stats->n_bytes;
> +    if (bucket) {
> +        bucket->stats.packet_count += stats->n_packets;
> +        bucket->stats.byte_count += stats->n_bytes;
> +    } else { /* Credit to all buckets */
> +        const struct list *buckets;
> +
> +        group_dpif_get_buckets(group, &buckets);
> +        LIST_FOR_EACH (bucket, list_node, buckets) {
> +            bucket->stats.packet_count += stats->n_packets;
> +            bucket->stats.byte_count += stats->n_bytes;
> +        }
>      }
> +    ovs_mutex_unlock(&group->stats_mutex);
>  }
>
>  static enum ofperr
> @@ -3578,20 +3602,9 @@ group_construct(struct ofgroup *group_)
>  }
>
>  static void
> -group_destruct__(struct group_dpif *group)
> -    OVS_REQUIRES(group->stats_mutex)
> -{
> -    free(group->bucket_stats);
> -    group->bucket_stats = NULL;
> -}
> -
> -static void
>  group_destruct(struct ofgroup *group_)
>  {
>      struct group_dpif *group = group_dpif_cast(group_);
> -    ovs_mutex_lock(&group->stats_mutex);
> -    group_destruct__(group);
> -    ovs_mutex_unlock(&group->stats_mutex);
>      ovs_mutex_destroy(&group->stats_mutex);
>  }
>
> @@ -3609,12 +3622,21 @@ static enum ofperr
>  group_get_stats(const struct ofgroup *group_, struct ofputil_group_stats *ogs)
>  {
>      struct group_dpif *group = group_dpif_cast(group_);
> +    struct ofputil_bucket *bucket;
> +    const struct list *buckets;
> +    struct bucket_counter *bucket_stats;
>
>      ovs_mutex_lock(&group->stats_mutex);
>      ogs->packet_count = group->packet_count;
>      ogs->byte_count = group->byte_count;
> -    memcpy(ogs->bucket_stats, group->bucket_stats,
> -           group->up.n_buckets * sizeof *group->bucket_stats);
> +
> +    group_dpif_get_buckets(group, &buckets);
> +    bucket_stats = ogs->bucket_stats;
> +    LIST_FOR_EACH (bucket, list_node, buckets) {
> +        bucket_stats->packet_count = bucket->stats.packet_count;
> +        bucket_stats->byte_count = bucket->stats.byte_count;
> +        bucket_stats++;
> +    }
>      ovs_mutex_unlock(&group->stats_mutex);
>
>      return 0;
> diff --git a/ofproto/ofproto-dpif.h b/ofproto/ofproto-dpif.h
> index 9542c5a..06c4854 100644
> --- a/ofproto/ofproto-dpif.h
> +++ b/ofproto/ofproto-dpif.h
> @@ -125,6 +125,9 @@ void choose_miss_rule(enum ofputil_port_config,
>                        struct rule_dpif *no_packet_in_rule,
>                        struct rule_dpif **rule, bool take_ref);
>
> +void group_dpif_credit_stats(struct group_dpif *,
> +                             struct ofputil_bucket *,
> +                             const struct dpif_flow_stats *);
>  bool group_dpif_lookup(struct ofproto_dpif *ofproto, uint32_t group_id,
>                         struct group_dpif **group);
>
> --
> 1.7.9.5
>
> _______________________________________________
> dev mailing list
> dev at openvswitch.org
> http://openvswitch.org/mailman/listinfo/dev



More information about the dev mailing list