[ovs-dev] [PATCH ovn v9 5/8] ovn-controller: I-P for ct zone and OVS interface changes in flow output stage.

Han Zhou hzhou at ovn.org
Wed Jun 3 05:36:33 UTC 2020


Thanks for the update. LGTM, except some minor changes related to the
comment in v9 4/8.

On Thu, May 28, 2020 at 4:05 AM <numans at ovn.org> wrote:
>
> From: Numan Siddique <numans at ovn.org>
>
> This patch handles ct zone changes and OVS interface changes incrementally
> in the flow output stage.
>
> Any changes to ct zone can be handled by running physical_run() instead
of running
> flow_output_run(). And any changes to OVS interfaces can be either handled
> incrementally (for OVS interfaces representing VIFs) or just running
> physical_run() (for tunnel and other types of interfaces).
>
> To better handle this, a new engine node 'physical_flow_changes' is added
which
> handles changes to ct zone and OVS interfaces.
>
> Acked-by: Mark Michelson <mmichels at redhat.com>
> Signed-off-by: Numan Siddique <numans at ovn.org>
> ---
>  controller/binding.c        |  23 +-----
>  controller/binding.h        |  24 +++++-
>  controller/ovn-controller.c | 146 +++++++++++++++++++++++++++++++++++-
>  controller/physical.c       |  51 +++++++++++++
>  controller/physical.h       |   5 +-
>  5 files changed, 224 insertions(+), 25 deletions(-)
>
> diff --git a/controller/binding.c b/controller/binding.c
> index 74e0e0710..63c7203bb 100644
> --- a/controller/binding.c
> +++ b/controller/binding.c
> @@ -503,7 +503,7 @@ remove_local_lport_ids(const struct
sbrec_port_binding *binding_rec,
>   * 'struct local_binding' is used. A shash of these local bindings is
>   * maintained with the 'external_ids:iface-id' as the key to the shash.
>   *
> - * struct local_binding has 3 main fields:
> + * struct local_binding (defined in binding.h) has 3 main fields:
>   *    - type
>   *    - OVS interface row object
>   *    - Port_Binding row object
> @@ -554,21 +554,6 @@ remove_local_lport_ids(const struct
sbrec_port_binding *binding_rec,
>   *   - For each 'virtual' Port Binding (of type BT_VIRTUAL) provided its
parent
>   *     is bound to this chassis.
>   */
> -enum local_binding_type {
> -    BT_VIF,
> -    BT_CONTAINER,
> -    BT_VIRTUAL
> -};
> -
> -struct local_binding {
> -    char *name;
> -    enum local_binding_type type;
> -    const struct ovsrec_interface *iface;
> -    const struct sbrec_port_binding *pb;
> -
> -    /* shash of 'struct local_binding' representing children. */
> -    struct shash children;
> -};
>
>  static struct local_binding *
>  local_binding_create(const char *name, const struct ovsrec_interface
*iface,
> @@ -590,12 +575,6 @@ local_binding_add(struct shash *local_bindings,
struct local_binding *lbinding)
>      shash_add(local_bindings, lbinding->name, lbinding);
>  }
>
> -static struct local_binding *
> -local_binding_find(struct shash *local_bindings, const char *name)
> -{
> -    return shash_find_data(local_bindings, name);
> -}
> -
>  static void
>  local_binding_destroy(struct local_binding *lbinding)
>  {
> diff --git a/controller/binding.h b/controller/binding.h
> index f7ace6faf..21118ecd4 100644
> --- a/controller/binding.h
> +++ b/controller/binding.h
> @@ -18,6 +18,7 @@
>  #define OVN_BINDING_H 1
>
>  #include <stdbool.h>
> +#include "openvswitch/shash.h"
>
>  struct hmap;
>  struct ovsdb_idl;
> @@ -32,7 +33,6 @@ struct sbrec_chassis;
>  struct sbrec_port_binding_table;
>  struct sset;
>  struct sbrec_port_binding;
> -struct shash;
>
>  struct binding_ctx_in {
>      struct ovsdb_idl_txn *ovnsb_idl_txn;
> @@ -60,6 +60,28 @@ struct binding_ctx_out {
>      struct smap *local_iface_ids;
>  };
>
> +enum local_binding_type {
> +    BT_VIF,
> +    BT_CONTAINER,
> +    BT_VIRTUAL
> +};
> +
> +struct local_binding {
> +    char *name;
> +    enum local_binding_type type;
> +    const struct ovsrec_interface *iface;
> +    const struct sbrec_port_binding *pb;
> +
> +    /* shash of 'struct local_binding' representing children. */
> +    struct shash children;
> +};
> +
> +static inline struct local_binding *
> +local_binding_find(struct shash *local_bindings, const char *name)
> +{
> +    return shash_find_data(local_bindings, name);
> +}
> +
>  void binding_register_ovs_idl(struct ovsdb_idl *);
>  void binding_run(struct binding_ctx_in *, struct binding_ctx_out *);
>  bool binding_cleanup(struct ovsdb_idl_txn *ovnsb_idl_txn,
> diff --git a/controller/ovn-controller.c b/controller/ovn-controller.c
> index 89a911b36..6381e3856 100644
> --- a/controller/ovn-controller.c
> +++ b/controller/ovn-controller.c
> @@ -1369,8 +1369,13 @@ static void init_physical_ctx(struct engine_node
*node,
>
>      ovs_assert(br_int && chassis);
>
> +    struct ovsrec_interface_table *iface_table =
> +        (struct ovsrec_interface_table *)EN_OVSDB_GET(
> +            engine_get_input("OVS_interface", node));
> +
>      struct ed_type_ct_zones *ct_zones_data =
>          engine_get_input_data("ct_zones", node);
> +
>      struct simap *ct_zones = &ct_zones_data->current;
>
>      p_ctx->sbrec_port_binding_by_name = sbrec_port_binding_by_name;
> @@ -1378,12 +1383,14 @@ static void init_physical_ctx(struct engine_node
*node,
>      p_ctx->mc_group_table = multicast_group_table;
>      p_ctx->br_int = br_int;
>      p_ctx->chassis_table = chassis_table;
> +    p_ctx->iface_table = iface_table;
>      p_ctx->chassis = chassis;
>      p_ctx->active_tunnels = &rt_data->active_tunnels;
>      p_ctx->local_datapaths = &rt_data->local_datapaths;
>      p_ctx->local_lports = &rt_data->local_lports;
>      p_ctx->ct_zones = ct_zones;
>      p_ctx->mff_ovn_geneve = ed_mff_ovn_geneve->mff_ovn_geneve;
> +    p_ctx->local_bindings = &rt_data->local_bindings;
>  }
>
>  static void init_lflow_ctx(struct engine_node *node,
> @@ -1791,6 +1798,127 @@ flow_output_port_groups_handler(struct
engine_node *node, void *data)
>      return _flow_output_resource_ref_handler(node, data,
REF_TYPE_PORTGROUP);
>  }
>
> +/* Engine node en_physical_flow_changes indicates whether
> + * there is a need to
> + *   - recompute only physical flows or
> + *   - we can incrementally process the physical flows.
> + *
> + * en_physical_flow_changes is an input to flow_output engine node.
> + * If the engine node 'en_physical_flow_changes' gets updated during
> + * engine run, it means the handler for this -
> + * flow_output_physical_flow_changes_handler() will either
> + *    - recompute the physical flows by calling 'physical_run() or
> + *    - incrementlly process some of the changes for physical flow
> + *      calculation. Right now we handle OVS interfaces changes
> + *      for physical flow computation.
> + *
> + * When ever a port binding happens, the follow up
> + * activity is the zone id allocation for that port binding.
> + * With this intermediate engine node, we avoid full recomputation.
> + * Instead we do physical flow computation (either full recomputation
> + * by calling physical_run() or handling the changes incrementally.
> + *
> + * Hence this is an intermediate engine node to indicate the
> + * flow_output engine to recomputes/compute the physical flows.
> + *
> + * TODO 1. Ideally this engine node should recompute/compute the physica
> + *      flows instead of relegating it to the flow_output node.
> + *      But this requires splitting the flow_output node to
> + *      logical_flow_output and physical_flow_output.
> + *
> + * TODO 2. We can further optimise the en_ct_zone changes to
> + *         compute the phsyical flows for changed zone ids.
> + *
> + * TODO 3: physical.c has a global simap -localvif_to_ofport which
stores the
> + *         local OVS interfaces and the ofport numbers. Ideally this
should be
> + *         part of the engine data.
> + */
> +struct ed_type_pfc_tracked_data {
> +    bool recompute_physical_flows;
> +    bool ovs_ifaces_changed;
> +};
> +
> +static void
> +en_physical_flow_changes_clear_data(void *tracked_data)
> +{
> +    struct ed_type_pfc_tracked_data *data = tracked_data;
> +    data->recompute_physical_flows = false;
> +    data->ovs_ifaces_changed = false;
> +}
> +
> +static void *
> +en_physical_flow_changes_init(struct engine_node *node,
> +                              struct engine_arg *arg OVS_UNUSED)
> +{
> +    struct ed_type_pfc_tracked_data *tdata = xzalloc(sizeof *tdata);
> +    node->tracked_data = tdata;
> +    node->clear_tracked_data = en_physical_flow_changes_clear_data;
> +    return NULL;
> +}
> +
> +static void
> +en_physical_flow_changes_cleanup(void *data OVS_UNUSED)
> +{
> +}
> +
> +/* Indicate to the flow_output engine that we need to recompute physical
> + * flows. */
> +static void
> +en_physical_flow_changes_run(struct engine_node *node, void *data
OVS_UNUSED)
> +{
> +    struct ed_type_pfc_tracked_data *pfc_tdata = node->tracked_data;
> +    pfc_tdata->recompute_physical_flows = true;
> +    engine_set_node_state(node, EN_UPDATED);
> +}
> +
> +/* There are OVS interface changes. Indicate to the flow_output engine
> + * to handle these OVS interface changes for physical flow computations.
*/
> +static bool
> +physical_flow_changes_ovs_iface_handler(struct engine_node *node,
> +                                        void *data OVS_UNUSED)
> +{
> +    struct ed_type_pfc_tracked_data *pfc_tdata = node->tracked_data;
> +    pfc_tdata->ovs_ifaces_changed = true;
> +    engine_set_node_state(node, EN_UPDATED);
> +    return true;
> +}
> +
> +static bool
> +flow_output_physical_flow_changes_handler(struct engine_node *node, void
*data)
> +{
> +    struct ed_type_runtime_data *rt_data =
> +        engine_get_input_data("runtime_data", node);
> +
> +    struct ed_type_flow_output *fo = data;
> +    struct physical_ctx p_ctx;
> +    init_physical_ctx(node, rt_data, &p_ctx);
> +
> +    engine_set_node_state(node, EN_UPDATED);
> +    struct ed_type_pfc_tracked_data *pfc_tdata =
> +        engine_get_input_tracked_data("physical_flow_changes", node);
> +
> +    if (pfc_tdata->recompute_physical_flows) {
> +        /* This indicates that we need to recompute the physical flows.
*/
> +        physical_run(&p_ctx, &fo->flow_table);
> +        return true;
> +    }
> +
> +    if (pfc_tdata->ovs_ifaces_changed) {
> +        /* There are OVS interface changes. Try to handle them
> +         * incrementally. */
> +        return physical_handle_ovs_iface_changes(&p_ctx,
&fo->flow_table);
> +    }
> +
> +    return true;
> +}
> +
> +static bool
> +flow_output_noop_handler(struct engine_node *node OVS_UNUSED,
> +                         void *data OVS_UNUSED)
> +{
> +    return true;
> +}
> +
>  struct ovn_controller_exit_args {
>      bool *exiting;
>      bool *restart;
> @@ -1915,6 +2043,7 @@ main(int argc, char *argv[])
>      ENGINE_NODE(runtime_data, "runtime_data");
>      ENGINE_NODE(mff_ovn_geneve, "mff_ovn_geneve");
>      ENGINE_NODE(ofctrl_is_connected, "ofctrl_is_connected");
> +    ENGINE_NODE(physical_flow_changes, "physical_flow_changes");
>      ENGINE_NODE(flow_output, "flow_output");
>      ENGINE_NODE(addr_sets, "addr_sets");
>      ENGINE_NODE(port_groups, "port_groups");
> @@ -1934,13 +2063,28 @@ main(int argc, char *argv[])
>      engine_add_input(&en_port_groups, &en_sb_port_group,
>                       port_groups_sb_port_group_handler);
>
> +    /* Engine node physical_flow_changes indicates whether
> +     * we can recompute only physical flows or we can
> +     * incrementally process the physical flows.
> +     */
> +    engine_add_input(&en_physical_flow_changes, &en_ct_zones,
> +                     NULL);
> +    engine_add_input(&en_physical_flow_changes, &en_ovs_interface,
> +                     physical_flow_changes_ovs_iface_handler);
> +
>      engine_add_input(&en_flow_output, &en_addr_sets,
>                       flow_output_addr_sets_handler);
>      engine_add_input(&en_flow_output, &en_port_groups,
>                       flow_output_port_groups_handler);
>      engine_add_input(&en_flow_output, &en_runtime_data, NULL);
> -    engine_add_input(&en_flow_output, &en_ct_zones, NULL);
>      engine_add_input(&en_flow_output, &en_mff_ovn_geneve, NULL);
> +    engine_add_input(&en_flow_output, &en_physical_flow_changes,
> +                     flow_output_physical_flow_changes_handler);
> +
> +    /* We need this input nodes for only data. Hence the noop handler. */
> +    engine_add_input(&en_flow_output, &en_ct_zones,
flow_output_noop_handler);
> +    engine_add_input(&en_flow_output, &en_ovs_interface,
> +                     flow_output_noop_handler);
>
>      engine_add_input(&en_flow_output, &en_ovs_open_vswitch, NULL);
>      engine_add_input(&en_flow_output, &en_ovs_bridge, NULL);
> diff --git a/controller/physical.c b/controller/physical.c
> index f06313b9d..240ec158e 100644
> --- a/controller/physical.c
> +++ b/controller/physical.c
> @@ -1780,6 +1780,57 @@ physical_run(struct physical_ctx *p_ctx,
>      simap_destroy(&new_tunnel_to_ofport);
>  }
>
> +bool
> +physical_handle_ovs_iface_changes(struct physical_ctx *p_ctx,
> +                                  struct ovn_desired_flow_table
*flow_table)
> +{
> +    const struct ovsrec_interface *iface_rec;
> +    OVSREC_INTERFACE_TABLE_FOR_EACH_TRACKED (iface_rec,
p_ctx->iface_table) {
> +        if (!strcmp(iface_rec->type, "geneve") ||
> +            !strcmp(iface_rec->type, "patch")) {
> +            return false;
> +        }
> +    }
> +
> +    struct ofpbuf ofpacts;
> +    ofpbuf_init(&ofpacts, 0);
> +
> +    OVSREC_INTERFACE_TABLE_FOR_EACH_TRACKED (iface_rec,
p_ctx->iface_table) {
> +        const char *iface_id = smap_get(&iface_rec->external_ids,
"iface-id");
> +        if (!iface_id) {
> +            continue;
> +        }
> +
> +        const struct local_binding *lb =
> +            local_binding_find(p_ctx->local_bindings, iface_id);
> +
> +        if (!lb || !lb->pb) {
> +            continue;
> +        }
> +
> +        int64_t ofport = iface_rec->n_ofport ? *iface_rec->ofport : 0;
> +        if (ovsrec_interface_is_deleted(iface_rec)) {
> +            ofctrl_remove_flows(flow_table, &lb->pb->header_.uuid);
> +            simap_find_and_delete(&localvif_to_ofport, iface_id);
> +        } else {
> +            if (!ovsrec_interface_is_new(iface_rec)) {
> +                ofctrl_remove_flows(flow_table, &lb->pb->header_.uuid);
> +            }
> +
> +            simap_put(&localvif_to_ofport, iface_id, ofport);
> +            consider_port_binding(p_ctx->sbrec_port_binding_by_name,
> +                                  p_ctx->mff_ovn_geneve, p_ctx->ct_zones,
> +                                  p_ctx->active_tunnels,
> +                                  p_ctx->local_datapaths,
> +                                  lb->pb, p_ctx->chassis,
> +                                  flow_table, &ofpacts);
> +        }
> +    }
> +
> +    ofpbuf_uninit(&ofpacts);
> +    return true;
> +}
> +
>  bool
>  get_tunnel_ofport(const char *chassis_name, char *encap_ip, ofp_port_t
*ofport)
>  {
> diff --git a/controller/physical.h b/controller/physical.h
> index dadf84ea1..9ca34436a 100644
> --- a/controller/physical.h
> +++ b/controller/physical.h
> @@ -49,11 +49,13 @@ struct physical_ctx {
>      const struct ovsrec_bridge *br_int;
>      const struct sbrec_chassis_table *chassis_table;
>      const struct sbrec_chassis *chassis;
> +    const struct ovsrec_interface_table *iface_table;
>      const struct sset *active_tunnels;
>      struct hmap *local_datapaths;
>      struct sset *local_lports;
>      const struct simap *ct_zones;
>      enum mf_field_id mff_ovn_geneve;
> +    struct shash *local_bindings;
>  };
>
>  void physical_register_ovs_idl(struct ovsdb_idl *);
> @@ -63,7 +65,8 @@ void physical_handle_port_binding_changes(struct
physical_ctx *,
>                                            struct ovn_desired_flow_table
*);
>  void physical_handle_mc_group_changes(struct physical_ctx *,
>                                        struct ovn_desired_flow_table *);
> -
> +bool physical_handle_ovs_iface_changes(struct physical_ctx *,
> +                                       struct ovn_desired_flow_table *);
>  bool get_tunnel_ofport(const char *chassis_name, char *encap_ip,
>                         ofp_port_t *ofport);
>  #endif /* controller/physical.h */
> --
> 2.26.2
>
> _______________________________________________
> dev mailing list
> dev at openvswitch.org
> https://mail.openvswitch.org/mailman/listinfo/ovs-dev


More information about the dev mailing list