[ovs-dev] [PATCH RFC] ovn-controller: Optimize processing for non-local datapath without patch ports.

Ryan Moats rmoats at us.ibm.com
Tue Mar 29 13:57:35 UTC 2016


Acked-by: Ryan Moats <rmoats at us.ibm.com>

"dev" <dev-bounces at openvswitch.org> wrote on 03/28/2016 02:10:35 AM:

> From: Han Zhou <zhouhan at gmail.com>
> To: dev at openvswitch.org
> Date: 03/28/2016 02:11 AM
> Subject: [ovs-dev] [PATCH RFC] ovn-controller: Optimize processing
> for non-local datapath without patch ports.
> Sent by: "dev" <dev-bounces at openvswitch.org>
>
> For non-local datapaths, if there are no patch ports attached, it
> means the lflows and port bindings would never be needed on the
> Chassis. Skipping the processing for such lflows and port bindings
> can save significant amount of CPU, at the same time largely reduce
> the number of rules in local openflow tables.
>
> Signed-off-by: Han Zhou <zhouhan at gmail.com>
> ---
>  ovn/controller/lflow.c          | 38 ++++++++++++++++++++++++++
+-----------
>  ovn/controller/lflow.h          |  3 ++-
>  ovn/controller/ovn-controller.c | 16 +++++++++++++---
>  ovn/controller/ovn-controller.h |  6 ++++++
>  ovn/controller/patch.c          | 22 +++++++++++++++++++---
>  ovn/controller/patch.h          |  2 +-
>  ovn/controller/physical.c       | 34 +++++++++++++++++++++++++++++++++-
>  ovn/controller/physical.h       |  2 +-
>  8 files changed, 102 insertions(+), 21 deletions(-)
>
> diff --git a/ovn/controller/lflow.c b/ovn/controller/lflow.c
> index 0614a54..be10d18 100644
> --- a/ovn/controller/lflow.c
> +++ b/ovn/controller/lflow.c
> @@ -198,6 +198,7 @@ static void
>  add_logical_flows(struct controller_ctx *ctx, const struct
> lport_index *lports,
>                    const struct mcgroup_index *mcgroups,
>                    const struct hmap *local_datapaths,
> +                  const struct hmap *patched_datapaths,
>                    const struct simap *ct_zones, struct hmap *flow_table)
>  {
>      uint32_t conj_id_ofs = 1;
> @@ -211,17 +212,18 @@ add_logical_flows(struct controller_ctx *ctx,
> const struct lport_index *lports,
>          if (!ldp) {
>              continue;
>          }
> -        if (!ingress && is_switch(ldp)) {
> +        if (is_switch(ldp)) {
>              /* For a logical switch datapath, local_datapaths tells
> us if there
> -             * are any local ports for this datapath.  If not,
processing
> -             * logical flows for the egress pipeline of this datapath is
> -             * unnecessary.
> +             * are any local ports for this datapath.  If not, we can
skip
> +             * processing logical flows if the flow belongs to
> egress pipeline
> +             * or if that logical switch datapath is not patched to
> any logical
> +             * router.
>               *
> -             * We still need the ingress pipeline because even if
> there are no
> -             * local ports, we still may need to execute the ingress
pipeline
> -             * after a packet leaves a logical router.  Further
optimization
> -             * is possible, but not based on what we know with
> local_datapaths
> -             * right now.
> +             * Otherwise, we still need the ingress pipeline because
even if
> +             * there are no local ports, we still may need to
> execute the ingress
> +             * pipeline after a packet leaves a logical router.  Further
> +             * optimization is possible, but not based on what we know
with
> +             * local_datapaths right now.
>               *
>               * A better approach would be a kind of "flood fill"
algorithm:
>               *
> @@ -231,12 +233,25 @@ add_logical_flows(struct controller_ctx *ctx,
> const struct lport_index *lports,
>               *   2. For each patch port P in a logical datapath in S,
add the
>               *      logical datapath of the remote end of P to S.
Iterate
>               *      until S reaches a fixed point.
> +             *
> +             * This can be implemented in northd, which can
> generate the sets and
> +             * save it on each port-binding record in SB, and ovn-
> controller can
> +             * use the information directly. However, there can be
> update storms
> +             * when a pair of patch ports are added/removed to
> connect/disconnect
> +             * large lrouters and lswitches. This need to be studied
further.
>               */
>
>              struct hmap_node *ld;
>              ld = hmap_first_with_hash(local_datapaths, ldp->tunnel_key);
>              if (!ld) {
> -                continue;
> +                if (!ingress) {
> +                    continue;
> +                }
> +                struct hmap_node *pd;
> +                pd = hmap_first_with_hash(patched_datapaths,
> ldp->tunnel_key);
> +                if (!pd) {
> +                    continue;
> +                }
>              }
>          }
>
> @@ -416,10 +431,11 @@ void
>  lflow_run(struct controller_ctx *ctx, const struct lport_index *lports,
>            const struct mcgroup_index *mcgroups,
>            const struct hmap *local_datapaths,
> +          const struct hmap *patched_datapaths,
>            const struct simap *ct_zones, struct hmap *flow_table)
>  {
>      add_logical_flows(ctx, lports, mcgroups, local_datapaths,
> -                      ct_zones, flow_table);
> +                      patched_datapaths, ct_zones, flow_table);
>      add_neighbor_flows(ctx, lports, flow_table);
>  }
>
> diff --git a/ovn/controller/lflow.h b/ovn/controller/lflow.h
> index ff823d4..a3fc50c 100644
> --- a/ovn/controller/lflow.h
> +++ b/ovn/controller/lflow.h
> @@ -61,7 +61,8 @@ struct uuid;
>  void lflow_init(void);
>  void lflow_run(struct controller_ctx *, const struct lport_index *,
>                 const struct mcgroup_index *,
> -               const struct hmap *local_datapaths,
> +               const struct hmap *local_datapaths,
> +               const struct hmap *patched_datapaths,
>                 const struct simap *ct_zones,
>                 struct hmap *flow_table);
>  void lflow_destroy(void);
> diff --git a/ovn/controller/ovn-controller.c
b/ovn/controller/ovn-controller.c
> index e52b731..44ab8b3 100644
> --- a/ovn/controller/ovn-controller.c
> +++ b/ovn/controller/ovn-controller.c
> @@ -282,6 +282,8 @@ main(int argc, char *argv[])
>           * tunnel_key of datapaths with at least one local port binding.
*/
>          struct hmap local_datapaths = HMAP_INITIALIZER
(&local_datapaths);
>
> +        struct hmap patched_datapaths = HMAP_INITIALIZER
(&patched_datapaths);
> +
>          const struct ovsrec_bridge *br_int = get_br_int(&ctx);
>          const char *chassis_id = get_chassis_id(ctx.ovs_idl);
>
> @@ -293,7 +295,7 @@ main(int argc, char *argv[])
>          }
>
>          if (br_int) {
> -            patch_run(&ctx, br_int, &local_datapaths);
> +            patch_run(&ctx, br_int, &local_datapaths,
&patched_datapaths);
>
>              struct lport_index lports;
>              struct mcgroup_index mcgroups;
> @@ -306,11 +308,11 @@ main(int argc, char *argv[])
>
>              struct hmap flow_table = HMAP_INITIALIZER(&flow_table);
>              lflow_run(&ctx, &lports, &mcgroups, &local_datapaths,
> -                      &ct_zones, &flow_table);
> +                      &patched_datapaths, &ct_zones, &flow_table);
>              if (chassis_id) {
>                  physical_run(&ctx, mff_ovn_geneve,
>                               br_int, chassis_id, &ct_zones, &flow_table,
> -                             &local_datapaths);
> +                             &local_datapaths, &patched_datapaths);
>              }
>              ofctrl_put(&flow_table);
>              hmap_destroy(&flow_table);
> @@ -325,6 +327,14 @@ main(int argc, char *argv[])
>          }
>          hmap_destroy(&local_datapaths);
>
> +        struct patched_datapath *pd_cur_node, *pd_next_node;
> +        HMAP_FOR_EACH_SAFE (pd_cur_node, pd_next_node, hmap_node,
> +                &patched_datapaths) {
> +            hmap_remove(&patched_datapaths, &pd_cur_node->hmap_node);
> +            free(pd_cur_node);
> +        }
> +        hmap_destroy(&patched_datapaths);
> +
>          unixctl_server_run(unixctl);
>
>          unixctl_server_wait(unixctl);
> diff --git a/ovn/controller/ovn-controller.h
b/ovn/controller/ovn-controller.h
> index a3465eb..9955097 100644
> --- a/ovn/controller/ovn-controller.h
> +++ b/ovn/controller/ovn-controller.h
> @@ -41,6 +41,12 @@ struct local_datapath {
>      const struct sbrec_port_binding *localnet_port;
>  };
>
> +/* Contains hmap_node whose hash values are the tunnel_key of datapaths
> + * with at least one logical patch port binding. */
> +struct patched_datapath {
> +    struct hmap_node hmap_node;
> +};
> +
>  const struct ovsrec_bridge *get_bridge(struct ovsdb_idl *,
>                                         const char *br_name);
>
> diff --git a/ovn/controller/patch.c b/ovn/controller/patch.c
> index 753ce3e..be9418c 100644
> --- a/ovn/controller/patch.c
> +++ b/ovn/controller/patch.c
> @@ -229,6 +229,20 @@ add_bridge_mappings(struct controller_ctx *ctx,
>      shash_destroy(&bridge_mappings);
>  }
>
> +static void
> +add_patched_datapath(struct hmap *patched_datapaths,
> +                     const struct sbrec_port_binding *binding_rec)
> +{
> +    if (hmap_first_with_hash(patched_datapaths,
> +                             binding_rec->datapath->tunnel_key)) {
> +        return;
> +    }
> +
> +    struct patched_datapath *pd = xzalloc(sizeof *pd);
> +    hmap_insert(patched_datapaths, &pd->hmap_node,
> +                binding_rec->datapath->tunnel_key);
> +}
> +
>  /* Add one OVS patch port for each OVN logical patch port.
>   *
>   * This is suboptimal for several reasons.  First, it creates an OVS
port for
> @@ -254,7 +268,8 @@ add_bridge_mappings(struct controller_ctx *ctx,
>  static void
>  add_logical_patch_ports(struct controller_ctx *ctx,
>                          const struct ovsrec_bridge *br_int,
> -                        struct shash *existing_ports)
> +                        struct shash *existing_ports,
> +                        struct hmap *patched_datapaths)
>  {
>      const struct sbrec_port_binding *binding;
>      SBREC_PORT_BINDING_FOR_EACH (binding, ctx->ovnsb_idl) {
> @@ -272,13 +287,14 @@ add_logical_patch_ports(struct controller_ctx *ctx,
>                                existing_ports);
>              free(dst_name);
>              free(src_name);
> +            add_patched_datapath(patched_datapaths, binding);
>          }
>      }
>  }
>
>  void
>  patch_run(struct controller_ctx *ctx, const struct ovsrec_bridge
*br_int,
> -          struct hmap *local_datapaths)
> +          struct hmap *local_datapaths, struct hmap *patched_datapaths)
>  {
>      if (!ctx->ovs_idl_txn) {
>          return;
> @@ -298,7 +314,7 @@ patch_run(struct controller_ctx *ctx, const
> struct ovsrec_bridge *br_int,
>       * 'existing_ports' any patch ports that do exist in the database
and
>       * should be there. */
>      add_bridge_mappings(ctx, br_int, &existing_ports, local_datapaths);
> -    add_logical_patch_ports(ctx, br_int, &existing_ports);
> +    add_logical_patch_ports(ctx, br_int, &existing_ports,
patched_datapaths);
>
>      /* Now 'existing_ports' only still contains patch ports that exist
in the
>       * database but shouldn't.  Delete them from the database. */
> diff --git a/ovn/controller/patch.h b/ovn/controller/patch.h
> index 38ee7a8..d5d842e 100644
> --- a/ovn/controller/patch.h
> +++ b/ovn/controller/patch.h
> @@ -27,6 +27,6 @@ struct hmap;
>  struct ovsrec_bridge;
>
>  void patch_run(struct controller_ctx *, const struct ovsrec_bridge
*br_int,
> -               struct hmap *local_datapaths);
> +               struct hmap *local_datapaths, struct hmap
*patched_datapaths);
>
>  #endif /* ovn/patch.h */
> diff --git a/ovn/controller/physical.c b/ovn/controller/physical.c
> index 657c3e2..9615142 100644
> --- a/ovn/controller/physical.c
> +++ b/ovn/controller/physical.c
> @@ -148,7 +148,7 @@ void
>  physical_run(struct controller_ctx *ctx, enum mf_field_id
mff_ovn_geneve,
>               const struct ovsrec_bridge *br_int, const char
*this_chassis_id,
>               const struct simap *ct_zones, struct hmap *flow_table,
> -             struct hmap *local_datapaths)
> +             struct hmap *local_datapaths, struct hmap
*patched_datapaths)
>  {
>      struct simap localvif_to_ofport = SIMAP_INITIALIZER
(&localvif_to_ofport);
>      struct hmap tunnels = HMAP_INITIALIZER(&tunnels);
> @@ -232,6 +232,38 @@ physical_run(struct controller_ctx *ctx, enum
> mf_field_id mff_ovn_geneve,
>       * 64 for logical-to-physical translation. */
>      const struct sbrec_port_binding *binding;
>      SBREC_PORT_BINDING_FOR_EACH (binding, ctx->ovnsb_idl) {
> +        /* Skip the port binding if the port is on a datapath that is
neither
> +         * local nor with any logical patch port connected, because
> local ports
> +         * would never need to talk to those ports.
> +         *
> +         * Even with this approach there could still be unnecessary port
> +         * bindings processed. A better approach would be a kind of
"flood
> +         * fill" algorithm:
> +         *
> +         *   1. Initialize set S to the logical datapaths that have a
port
> +         *      located on the hypervisor.
> +         *
> +         *   2. For each patch port P in a logical datapath in S, add
the
> +         *      logical datapath of the remote end of P to S.  Iterate
> +         *      until S reaches a fixed point.
> +         *
> +         * This can be implemented in northd, which can generate the
sets and
> +         * save it on each port-binding record in SB, and ovn-controller
can
> +         * use the information directly. However, there can be update
storms
> +         * when a pair of patch ports are added/removed to
connect/disconnect
> +         * large lrouters and lswitches. This need to be studied
further.
> +         */
> +        struct hmap_node *ld;
> +        ld = hmap_first_with_hash(local_datapaths,
> binding->datapath->tunnel_key);
> +        if (!ld) {
> +            struct hmap_node *pd;
> +            pd = hmap_first_with_hash(patched_datapaths,
> +                                      binding->datapath->tunnel_key);
> +            if (!pd) {
> +                continue;
> +            }
> +        }
> +
>          /* Find the OpenFlow port for the logical port, as 'ofport'.
This is
>           * one of:
>           *
> diff --git a/ovn/controller/physical.h b/ovn/controller/physical.h
> index 826b99b..9f40574 100644
> --- a/ovn/controller/physical.h
> +++ b/ovn/controller/physical.h
> @@ -44,6 +44,6 @@ void physical_register_ovs_idl(struct ovsdb_idl *);
>  void physical_run(struct controller_ctx *, enum mf_field_id
mff_ovn_geneve,
>                    const struct ovsrec_bridge *br_int, const char
*chassis_id,
>                    const struct simap *ct_zones, struct hmap *flow_table,
> -                  struct hmap *local_datapaths);
> +                  struct hmap *local_datapaths, struct hmap
> *patched_datapaths);
>
>  #endif /* ovn/physical.h */
> --
> 2.1.0
>
> _______________________________________________
> dev mailing list
> dev at openvswitch.org
> http://openvswitch.org/mailman/listinfo/dev



More information about the dev mailing list