[ovs-dev] [PATCH v2 17/21] ovn: Rename Pipeline table to Rule table.

Justin Pettit jpettit at nicira.com
Thu Jul 30 23:33:23 UTC 2015


I think Pipeline is more descriptive about what it actually is.  I also find it confusing since we use the term "rule" in the classifier.  I think Flow (or Logical_Flow) would be clearer than Rule, since we really are talking about flows, and people may look for a distinction that isn't there.  That, and the fact that we use "rule" for a different purpose in other parts of the tree, I think will make it more confusing.

All that said, I haven't looked ahead at the other patches yet, so maybe this is the right choice.  I'll defer to you.

Acked-by: Justin Pettit <jpettit at nicira.com>

--Justin


> On Jul 28, 2015, at 8:44 AM, Ben Pfaff <blp at nicira.com> wrote:
> 
> The OVN pipeline is being split into two phases, which are most naturally
> called "pipelines".  I kept getting very confused trying to call them
> anything else, and in the end it seems to make more sense to just rename
> the Pipeline table.
> 
> It would be even better to call this table Flow or Logical_Flow, but I
> am worried that we already have far too many uses of the word "flow".
> "Rule" is slightly less overloaded in OVS.
> 
> Signed-off-by: Ben Pfaff <blp at nicira.com>
> ---
> ovn/TODO                              |   2 +-
> ovn/controller/automake.mk            |   6 +-
> ovn/controller/ovn-controller.c       |   8 +-
> ovn/controller/physical.c             |   2 +-
> ovn/controller/{pipeline.c => rule.c} |  50 +++++-----
> ovn/controller/{pipeline.h => rule.h} |  18 ++--
> ovn/lib/actions.c                     |   4 +-
> ovn/northd/ovn-northd.c               | 182 +++++++++++++++++-----------------
> ovn/ovn-architecture.7.xml            |  20 ++--
> ovn/ovn-nb.xml                        |   4 +-
> ovn/ovn-sb.ovsschema                  |   2 +-
> ovn/ovn-sb.xml                        |   6 +-
> 12 files changed, 152 insertions(+), 152 deletions(-)
> rename ovn/controller/{pipeline.c => rule.c} (89%)
> rename ovn/controller/{pipeline.h => rule.h} (79%)
> 
> diff --git a/ovn/TODO b/ovn/TODO
> index 07d66da..19c95ca 100644
> --- a/ovn/TODO
> +++ b/ovn/TODO
> @@ -48,7 +48,7 @@
>     Currently, clients monitor the entire contents of a table.  It
>     might make sense to allow clients to monitor only rows that
>     satisfy specific criteria, e.g. to allow an ovn-controller to
> -    receive only Pipeline rows for logical networks on its hypervisor.
> +    receive only Rule rows for logical networks on its hypervisor.
> 
> *** Reducing redundant data and code within ovsdb-server.
> 
> diff --git a/ovn/controller/automake.mk b/ovn/controller/automake.mk
> index 9ed6bec..55134a3 100644
> --- a/ovn/controller/automake.mk
> +++ b/ovn/controller/automake.mk
> @@ -10,10 +10,10 @@ ovn_controller_ovn_controller_SOURCES = \
> 	ovn/controller/ofctrl.h \
> 	ovn/controller/ovn-controller.c \
> 	ovn/controller/ovn-controller.h \
> -	ovn/controller/pipeline.c \
> -	ovn/controller/pipeline.h \
> 	ovn/controller/physical.c \
> -	ovn/controller/physical.h
> +	ovn/controller/physical.h \
> +	ovn/controller/rule.c \
> +	ovn/controller/rule.h
> ovn_controller_ovn_controller_LDADD = ovn/lib/libovn.la lib/libopenvswitch.la
> man_MANS += ovn/controller/ovn-controller.8
> EXTRA_DIST += ovn/controller/ovn-controller.8.xml
> diff --git a/ovn/controller/ovn-controller.c b/ovn/controller/ovn-controller.c
> index 12515c3..cfd6eb9 100644
> --- a/ovn/controller/ovn-controller.c
> +++ b/ovn/controller/ovn-controller.c
> @@ -44,7 +44,7 @@
> #include "chassis.h"
> #include "encaps.h"
> #include "physical.h"
> -#include "pipeline.h"
> +#include "rule.h"
> 
> VLOG_DEFINE_THIS_MODULE(main);
> 
> @@ -224,7 +224,7 @@ main(int argc, char *argv[])
>     sbrec_init();
> 
>     ofctrl_init();
> -    pipeline_init();
> +    rule_init();
> 
>     /* Connect to OVS OVSDB instance.  We do not monitor all tables by
>      * default, so modules must register their interest explicitly.  */
> @@ -266,7 +266,7 @@ main(int argc, char *argv[])
> 
>         if (br_int) {
>             struct hmap flow_table = HMAP_INITIALIZER(&flow_table);
> -            pipeline_run(&ctx, &flow_table);
> +            rule_run(&ctx, &flow_table);
>             if (chassis_id) {
>                 physical_run(&ctx, br_int, chassis_id, &flow_table);
>             }
> @@ -318,7 +318,7 @@ main(int argc, char *argv[])
>     }
> 
>     unixctl_server_destroy(unixctl);
> -    pipeline_destroy();
> +    rule_destroy();
>     ofctrl_destroy();
> 
>     idl_loop_destroy(&ovs_idl_loop);
> diff --git a/ovn/controller/physical.c b/ovn/controller/physical.c
> index 55d6107..2dc96ab 100644
> --- a/ovn/controller/physical.c
> +++ b/ovn/controller/physical.c
> @@ -21,7 +21,7 @@
> #include "ofpbuf.h"
> #include "ovn-controller.h"
> #include "ovn/lib/ovn-sb-idl.h"
> -#include "pipeline.h"
> +#include "rule.h"
> #include "simap.h"
> #include "vswitch-idl.h"
> 
> diff --git a/ovn/controller/pipeline.c b/ovn/controller/rule.c
> similarity index 89%
> rename from ovn/controller/pipeline.c
> rename to ovn/controller/rule.c
> index 1927ce4..0f5971b 100644
> --- a/ovn/controller/pipeline.c
> +++ b/ovn/controller/rule.c
> @@ -14,7 +14,7 @@
>  */
> 
> #include <config.h>
> -#include "pipeline.h"
> +#include "rule.h"
> #include "dynamic-string.h"
> #include "ofctrl.h"
> #include "ofp-actions.h"
> @@ -26,11 +26,11 @@
> #include "ovn/lib/ovn-sb-idl.h"
> #include "simap.h"
> 
> -VLOG_DEFINE_THIS_MODULE(pipeline);
> +VLOG_DEFINE_THIS_MODULE(rule);
> 
> /* Symbol table. */
> 
> -/* Contains "struct expr_symbol"s for fields supported by OVN pipeline. */
> +/* Contains "struct expr_symbol"s for fields supported by OVN rules. */
> static struct shash symtab;
> 
> static void
> @@ -244,31 +244,31 @@ ldp_destroy(void)
> }
> 
> void
> -pipeline_init(void)
> +rule_init(void)
> {
>     symtab_init();
> }
> 
> -/* Translates logical flows in the Pipeline table in the OVN_SB database
> - * into OpenFlow flows, adding the OpenFlow flows to 'flow_table'.
> +/* Translates logical flows in the Rule table in the OVN_SB database into
> + * OpenFlow flows, adding the OpenFlow flows to 'flow_table'.
>  *
> - * We put the Pipeline flows into OpenFlow tables 16 through 47 (inclusive). */
> + * We put the Rule flows into OpenFlow tables 16 through 47 (inclusive). */
> void
> -pipeline_run(struct controller_ctx *ctx, struct hmap *flow_table)
> +rule_run(struct controller_ctx *ctx, struct hmap *flow_table)
> {
>     struct hmap flows = HMAP_INITIALIZER(&flows);
>     uint32_t conj_id_ofs = 1;
> 
>     ldp_run(ctx);
> 
> -    const struct sbrec_pipeline *pipeline;
> -    SBREC_PIPELINE_FOR_EACH (pipeline, ctx->ovnsb_idl) {
> -        /* Find the "struct logical_datapath" asssociated with this Pipeline
> -         * row.  If there's no such struct, that must be because no logical
> -         * ports are bound to that logical datapath, so there's no point in
> -         * maintaining any flows for it anyway, so skip it. */
> +    const struct sbrec_rule *rule;
> +    SBREC_RULE_FOR_EACH (rule, ctx->ovnsb_idl) {
> +        /* Find the "struct logical_datapath" asssociated with this Rule row.
> +         * If there's no such struct, that must be because no logical ports are
> +         * bound to that logical datapath, so there's no point in maintaining
> +         * any flows for it anyway, so skip it. */
>         const struct logical_datapath *ldp;
> -        ldp = ldp_lookup(&pipeline->logical_datapath);
> +        ldp = ldp_lookup(&rule->logical_datapath);
>         if (!ldp) {
>             continue;
>         }
> @@ -281,13 +281,13 @@ pipeline_run(struct controller_ctx *ctx, struct hmap *flow_table)
>         char *error;
> 
>         ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
> -        next_table_id = pipeline->table_id < 31 ? pipeline->table_id + 17 : 0;
> -        error = actions_parse_string(pipeline->actions, &symtab, &ldp->ports,
> +        next_table_id = rule->table_id < 31 ? rule->table_id + 17 : 0;
> +        error = actions_parse_string(rule->actions, &symtab, &ldp->ports,
>                                      next_table_id, &ofpacts, &prereqs);
>         if (error) {
>             static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
>             VLOG_WARN_RL(&rl, "error parsing actions \"%s\": %s",
> -                         pipeline->actions, error);
> +                         rule->actions, error);
>             free(error);
>             continue;
>         }
> @@ -296,7 +296,7 @@ pipeline_run(struct controller_ctx *ctx, struct hmap *flow_table)
>         struct hmap matches;
>         struct expr *expr;
> 
> -        expr = expr_parse_string(pipeline->match, &symtab, &error);
> +        expr = expr_parse_string(rule->match, &symtab, &error);
>         if (!error) {
>             if (prereqs) {
>                 expr = expr_combine(EXPR_T_AND, expr, prereqs);
> @@ -307,7 +307,7 @@ pipeline_run(struct controller_ctx *ctx, struct hmap *flow_table)
>         if (error) {
>             static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
>             VLOG_WARN_RL(&rl, "error parsing match \"%s\": %s",
> -                         pipeline->match, error);
> +                         rule->match, error);
>             expr_destroy(prereqs);
>             ofpbuf_uninit(&ofpacts);
>             free(error);
> @@ -327,8 +327,8 @@ pipeline_run(struct controller_ctx *ctx, struct hmap *flow_table)
>                 m->match.flow.conj_id += conj_id_ofs;
>             }
>             if (!m->n) {
> -                ofctrl_add_flow(flow_table, pipeline->table_id + 16,
> -                                pipeline->priority, &m->match, &ofpacts);
> +                ofctrl_add_flow(flow_table, rule->table_id + 16,
> +                                rule->priority, &m->match, &ofpacts);
>             } else {
>                 uint64_t conj_stubs[64 / 8];
>                 struct ofpbuf conj;
> @@ -343,8 +343,8 @@ pipeline_run(struct controller_ctx *ctx, struct hmap *flow_table)
>                     dst->clause = src->clause;
>                     dst->n_clauses = src->n_clauses;
>                 }
> -                ofctrl_add_flow(flow_table, pipeline->table_id + 16,
> -                                pipeline->priority, &m->match, &conj);
> +                ofctrl_add_flow(flow_table, rule->table_id + 16,
> +                                rule->priority, &m->match, &conj);
>                 ofpbuf_uninit(&conj);
>             }
>         }
> @@ -357,7 +357,7 @@ pipeline_run(struct controller_ctx *ctx, struct hmap *flow_table)
> }
> 
> void
> -pipeline_destroy(void)
> +rule_destroy(void)
> {
>     expr_symtab_destroy(&symtab);
>     ldp_destroy();
> diff --git a/ovn/controller/pipeline.h b/ovn/controller/rule.h
> similarity index 79%
> rename from ovn/controller/pipeline.h
> rename to ovn/controller/rule.h
> index 7d33341..3998994 100644
> --- a/ovn/controller/pipeline.h
> +++ b/ovn/controller/rule.h
> @@ -14,13 +14,13 @@
>  */
> 
> 
> -#ifndef OVN_PIPELINE_H
> -#define OVN_PIPELINE_H 1
> +#ifndef OVN_RULE_H
> +#define OVN_RULE_H 1
> 
> -/* Pipeline table translation to OpenFlow
> - * ======================================
> +/* Rule table translation to OpenFlow
> + * ==================================
>  *
> - * The Pipeline table obtained from the OVN_Southbound database works in terms
> + * The Rule table obtained from the OVN_Southbound database works in terms
>  * of logical entities, that is, logical flows among logical datapaths and
>  * logical ports.  This code translates these logical flows into OpenFlow flows
>  * that, again, work in terms of logical entities implemented through OpenFlow
> @@ -41,10 +41,10 @@ struct uuid;
> #define MFF_LOG_INPORT  MFF_REG6 /* Logical input port. */
> #define MFF_LOG_OUTPORT MFF_REG7 /* Logical output port. */
> 
> -void pipeline_init(void);
> -void pipeline_run(struct controller_ctx *, struct hmap *flow_table);
> -void pipeline_destroy(void);
> +void rule_init(void);
> +void rule_run(struct controller_ctx *, struct hmap *flow_table);
> +void rule_destroy(void);
> 
> uint32_t ldp_to_integer(const struct uuid *logical_datapath);
> 
> -#endif /* ovn/pipeline.h */
> +#endif /* ovn/rule.h */
> diff --git a/ovn/lib/actions.c b/ovn/lib/actions.c
> index 28be688..0aabdcf 100644
> --- a/ovn/lib/actions.c
> +++ b/ovn/lib/actions.c
> @@ -176,8 +176,8 @@ parse_actions(struct action_context *ctx)
> }
> 
> /* Parses OVN actions, in the format described for the "actions" column in the
> - * Pipeline table in ovn-sb(5), and appends the parsed versions of the actions
> - * to 'ofpacts' as "struct ofpact"s.
> + * Rule table in ovn-sb(5), and appends the parsed versions of the actions to
> + * 'ofpacts' as "struct ofpact"s.
>  *
>  * 'symtab' provides a table of "struct expr_symbol"s to support (as one would
>  * provide to expr_parse()).
> diff --git a/ovn/northd/ovn-northd.c b/ovn/northd/ovn-northd.c
> index 2ad727c..eac5546 100644
> --- a/ovn/northd/ovn-northd.c
> +++ b/ovn/northd/ovn-northd.c
> @@ -120,35 +120,35 @@ macs_equal(char **binding_macs_, size_t b_n_macs,
>     return (i == b_n_macs) ? true : false;
> }
> 
> -/* Pipeline generation.
> +/* Rule generation.
>  *
> - * This code generates the Pipeline table in the southbound database, as a
> + * This code generates the Rule table in the southbound database, as a
>  * function of most of the northbound database.
>  */
> 
> -/* Enough context to add a Pipeline row, using pipeline_add(). */
> -struct pipeline_ctx {
> +/* Enough context to add a Rule row, using rule_add(). */
> +struct rule_ctx {
>     /* From northd_context. */
>     struct ovsdb_idl *ovnsb_idl;
>     struct ovsdb_idl_txn *ovnsb_txn;
> 
> -    /* Contains "struct pipeline_hash_node"s.  Used to figure out what existing
> -     * Pipeline rows should be deleted: we index all of the Pipeline rows into
> -     * this data structure, then as existing rows are generated we remove them.
> -     * After generating all the rows, any remaining in 'pipeline_hmap' must be
> +    /* Contains "struct rule_hash_node"s.  Used to figure out what existing
> +     * Rule rows should be deleted: we index all of the Rule rows into this
> +     * data structure, then as existing rows are generated we remove them.
> +     * After generating all the rows, any remaining in 'rule_hmap' must be
>      * deleted from the database. */
> -    struct hmap pipeline_hmap;
> +    struct hmap rule_hmap;
> };
> 
> -/* A row in the Pipeline table, indexed by its full contents, */
> -struct pipeline_hash_node {
> +/* A row in the Rule table, indexed by its full contents, */
> +struct rule_hash_node {
>     struct hmap_node node;
> -    const struct sbrec_pipeline *pipeline;
> +    const struct sbrec_rule *rule;
> };
> 
> static size_t
> -pipeline_hash(const struct uuid *logical_datapath, uint8_t table_id,
> -              uint16_t priority, const char *match, const char *actions)
> +rule_hash(const struct uuid *logical_datapath, uint8_t table_id,
> +          uint16_t priority, const char *match, const char *actions)
> {
>     size_t hash = uuid_hash(logical_datapath);
>     hash = hash_2words((table_id << 16) | priority, hash);
> @@ -157,52 +157,52 @@ pipeline_hash(const struct uuid *logical_datapath, uint8_t table_id,
> }
> 
> static size_t
> -pipeline_hash_rec(const struct sbrec_pipeline *pipeline)
> +rule_hash_rec(const struct sbrec_rule *rule)
> {
> -    return pipeline_hash(&pipeline->logical_datapath, pipeline->table_id,
> -                         pipeline->priority, pipeline->match,
> -                         pipeline->actions);
> +    return rule_hash(&rule->logical_datapath, rule->table_id,
> +                         rule->priority, rule->match,
> +                         rule->actions);
> }
> 
> -/* Adds a row with the specified contents to the Pipeline table. */
> +/* Adds a row with the specified contents to the Rule table. */
> static void
> -pipeline_add(struct pipeline_ctx *ctx,
> -             const struct nbrec_logical_switch *logical_datapath,
> -             uint8_t table_id,
> -             uint16_t priority,
> -             const char *match,
> -             const char *actions)
> +rule_add(struct rule_ctx *ctx,
> +         const struct nbrec_logical_switch *logical_datapath,
> +         uint8_t table_id,
> +         uint16_t priority,
> +         const char *match,
> +         const char *actions)
> {
> -    struct pipeline_hash_node *hash_node;
> +    struct rule_hash_node *hash_node;
> 
> -    /* Check whether such a row already exists in the Pipeline table.  If so,
> -     * remove it from 'ctx->pipeline_hmap' and we're done. */
> +    /* Check whether such a row already exists in the Rule table.  If so,
> +     * remove it from 'ctx->rule_hmap' and we're done. */
>     HMAP_FOR_EACH_WITH_HASH (hash_node, node,
> -                             pipeline_hash(&logical_datapath->header_.uuid,
> -                                           table_id, priority, match, actions),
> -                             &ctx->pipeline_hmap) {
> -        const struct sbrec_pipeline *pipeline = hash_node->pipeline;
> -        if (uuid_equals(&pipeline->logical_datapath,
> +                             rule_hash(&logical_datapath->header_.uuid,
> +                                       table_id, priority, match, actions),
> +                             &ctx->rule_hmap) {
> +        const struct sbrec_rule *rule = hash_node->rule;
> +        if (uuid_equals(&rule->logical_datapath,
>                         &logical_datapath->header_.uuid)
> -            && pipeline->table_id == table_id
> -            && pipeline->priority == priority
> -            && !strcmp(pipeline->match, match)
> -            && !strcmp(pipeline->actions, actions)) {
> -            hmap_remove(&ctx->pipeline_hmap, &hash_node->node);
> +            && rule->table_id == table_id
> +            && rule->priority == priority
> +            && !strcmp(rule->match, match)
> +            && !strcmp(rule->actions, actions)) {
> +            hmap_remove(&ctx->rule_hmap, &hash_node->node);
>             free(hash_node);
>             return;
>         }
>     }
> 
> -    /* No such Pipeline row.  Add one. */
> -    const struct sbrec_pipeline *pipeline;
> -    pipeline = sbrec_pipeline_insert(ctx->ovnsb_txn);
> -    sbrec_pipeline_set_logical_datapath(pipeline,
> +    /* No such Rule row.  Add one. */
> +    const struct sbrec_rule *rule;
> +    rule = sbrec_rule_insert(ctx->ovnsb_txn);
> +    sbrec_rule_set_logical_datapath(rule,
>                                         logical_datapath->header_.uuid);
> -    sbrec_pipeline_set_table_id(pipeline, table_id);
> -    sbrec_pipeline_set_priority(pipeline, priority);
> -    sbrec_pipeline_set_match(pipeline, match);
> -    sbrec_pipeline_set_actions(pipeline, actions);
> +    sbrec_rule_set_table_id(rule, table_id);
> +    sbrec_rule_set_priority(rule, priority);
> +    sbrec_rule_set_match(rule, match);
> +    sbrec_rule_set_actions(rule, actions);
> }
> 
> /* Appends port security constraints on L2 address field 'eth_addr_field'
> @@ -241,43 +241,43 @@ lport_is_enabled(const struct nbrec_logical_port *lport)
>     return !lport->enabled || *lport->enabled;
> }
> 
> -/* Updates the Pipeline table in the OVN_SB database, constructing its contents
> +/* Updates the Rule table in the OVN_SB database, constructing its contents
>  * based on the OVN_NB database. */
> static void
> -build_pipeline(struct northd_context *ctx)
> +build_rule(struct northd_context *ctx)
> {
> -    struct pipeline_ctx pc = {
> +    struct rule_ctx pc = {
>         .ovnsb_idl = ctx->ovnsb_idl,
>         .ovnsb_txn = ctx->ovnsb_txn,
> -        .pipeline_hmap = HMAP_INITIALIZER(&pc.pipeline_hmap)
> +        .rule_hmap = HMAP_INITIALIZER(&pc.rule_hmap)
>     };
> 
> -    /* Add all the Pipeline entries currently in the southbound database to
> -     * 'pc.pipeline_hmap'.  We remove entries that we generate from the hmap,
> +    /* Add all the Rule entries currently in the southbound database to
> +     * 'pc.rule_hmap'.  We remove entries that we generate from the hmap,
>      * thus by the time we're done only entries that need to be removed
>      * remain. */
> -    const struct sbrec_pipeline *pipeline;
> -    SBREC_PIPELINE_FOR_EACH (pipeline, ctx->ovnsb_idl) {
> -        struct pipeline_hash_node *hash_node = xzalloc(sizeof *hash_node);
> -        hash_node->pipeline = pipeline;
> -        hmap_insert(&pc.pipeline_hmap, &hash_node->node,
> -                    pipeline_hash_rec(pipeline));
> +    const struct sbrec_rule *rule;
> +    SBREC_RULE_FOR_EACH (rule, ctx->ovnsb_idl) {
> +        struct rule_hash_node *hash_node = xzalloc(sizeof *hash_node);
> +        hash_node->rule = rule;
> +        hmap_insert(&pc.rule_hmap, &hash_node->node,
> +                    rule_hash_rec(rule));
>     }
> 
>     /* Table 0: Admission control framework. */
>     const struct nbrec_logical_switch *lswitch;
>     NBREC_LOGICAL_SWITCH_FOR_EACH (lswitch, ctx->ovnnb_idl) {
>         /* Logical VLANs not supported. */
> -        pipeline_add(&pc, lswitch, 0, 100, "vlan.present", "drop;");
> +        rule_add(&pc, lswitch, 0, 100, "vlan.present", "drop;");
> 
>         /* Broadcast/multicast source address is invalid. */
> -        pipeline_add(&pc, lswitch, 0, 100, "eth.src[40]", "drop;");
> +        rule_add(&pc, lswitch, 0, 100, "eth.src[40]", "drop;");
> 
>         /* Port security flows have priority 50 (see below) and will continue
>          * to the next table if packet source is acceptable. */
> 
>         /* Otherwise drop the packet. */
> -        pipeline_add(&pc, lswitch, 0, 0, "1", "drop;");
> +        rule_add(&pc, lswitch, 0, 0, "1", "drop;");
>     }
> 
>     /* Table 0: Ingress port security. */
> @@ -290,8 +290,8 @@ build_pipeline(struct northd_context *ctx)
>             build_port_security("eth.src",
>                                 lport->port_security, lport->n_port_security,
>                                 &match);
> -            pipeline_add(&pc, lswitch, 0, 50, ds_cstr(&match),
> -                         lport_is_enabled(lport) ? "next;" : "drop;");
> +            rule_add(&pc, lswitch, 0, 50, ds_cstr(&match),
> +                     lport_is_enabled(lport) ? "next;" : "drop;");
>             ds_destroy(&match);
>         }
>     }
> @@ -329,8 +329,8 @@ build_pipeline(struct northd_context *ctx)
>                     ds_put_cstr(&unicast, "outport = ");
>                     json_string_escape(lport->name, &unicast);
>                     ds_put_cstr(&unicast, "; next;");
> -                    pipeline_add(&pc, lswitch, 1, 50,
> -                                 ds_cstr(&match), ds_cstr(&unicast));
> +                    rule_add(&pc, lswitch, 1, 50,
> +                             ds_cstr(&match), ds_cstr(&unicast));
>                     ds_destroy(&unicast);
>                     ds_destroy(&match);
>                 } else if (!strcmp(s, "unknown")) {
> @@ -347,12 +347,12 @@ build_pipeline(struct northd_context *ctx)
>         }
> 
>         ds_chomp(&bcast, ' ');
> -        pipeline_add(&pc, lswitch, 1, 100, "eth.dst[40]", ds_cstr(&bcast));
> +        rule_add(&pc, lswitch, 1, 100, "eth.dst[40]", ds_cstr(&bcast));
>         ds_destroy(&bcast);
> 
>         if (unknown.length) {
>             ds_chomp(&unknown, ' ');
> -            pipeline_add(&pc, lswitch, 1, 0, "1", ds_cstr(&unknown));
> +            rule_add(&pc, lswitch, 1, 0, "1", ds_cstr(&unknown));
>         }
>         ds_destroy(&unknown);
>     }
> @@ -363,19 +363,19 @@ build_pipeline(struct northd_context *ctx)
>             const struct nbrec_acl *acl = lswitch->acls[i];
> 
>             NBREC_ACL_FOR_EACH (acl, ctx->ovnnb_idl) {
> -                pipeline_add(&pc, lswitch, 2, acl->priority, acl->match,
> -                             (!strcmp(acl->action, "allow") ||
> -                              !strcmp(acl->action, "allow-related")
> -                              ? "next;" : "drop;"));
> +                rule_add(&pc, lswitch, 2, acl->priority, acl->match,
> +                         (!strcmp(acl->action, "allow") ||
> +                          !strcmp(acl->action, "allow-related")
> +                          ? "next;" : "drop;"));
>             }
>         }
> 
> -        pipeline_add(&pc, lswitch, 2, 0, "1", "next;");
> +        rule_add(&pc, lswitch, 2, 0, "1", "next;");
>     }
> 
>     /* Table 3: Egress port security. */
>     NBREC_LOGICAL_SWITCH_FOR_EACH (lswitch, ctx->ovnnb_idl) {
> -        pipeline_add(&pc, lswitch, 3, 100, "eth.dst[40]", "output;");
> +        rule_add(&pc, lswitch, 3, 100, "eth.dst[40]", "output;");
> 
>         for (size_t i = 0; i < lswitch->n_ports; i++) {
>             const struct nbrec_logical_port *lport = lswitch->ports[i];
> @@ -388,21 +388,21 @@ build_pipeline(struct northd_context *ctx)
>                                 lport->port_security, lport->n_port_security,
>                                 &match);
> 
> -            pipeline_add(&pc, lswitch, 3, 50, ds_cstr(&match),
> +            rule_add(&pc, lswitch, 3, 50, ds_cstr(&match),
>                          lport_is_enabled(lport) ? "output;" : "drop;");
> 
>             ds_destroy(&match);
>         }
>     }
> 
> -    /* Delete any existing Pipeline rows that were not re-generated.  */
> -    struct pipeline_hash_node *hash_node, *next_hash_node;
> -    HMAP_FOR_EACH_SAFE (hash_node, next_hash_node, node, &pc.pipeline_hmap) {
> -        hmap_remove(&pc.pipeline_hmap, &hash_node->node);
> -        sbrec_pipeline_delete(hash_node->pipeline);
> +    /* Delete any existing Rule rows that were not re-generated.  */
> +    struct rule_hash_node *hash_node, *next_hash_node;
> +    HMAP_FOR_EACH_SAFE (hash_node, next_hash_node, node, &pc.rule_hmap) {
> +        hmap_remove(&pc.rule_hmap, &hash_node->node);
> +        sbrec_rule_delete(hash_node->rule);
>         free(hash_node);
>     }
> -    hmap_destroy(&pc.pipeline_hmap);
> +    hmap_destroy(&pc.rule_hmap);
> }
> 
> static bool
> @@ -610,7 +610,7 @@ ovnnb_db_changed(struct northd_context *ctx)
>     VLOG_DBG("ovn-nb db contents have changed.");
> 
>     set_port_bindings(ctx);
> -    build_pipeline(ctx);
> +    build_rule(ctx);
> }
> 
> /*
> @@ -804,16 +804,16 @@ main(int argc, char *argv[])
>     ovsdb_idl_add_column(ovnsb_idl, &sbrec_port_binding_col_parent_port);
>     ovsdb_idl_add_column(ovnsb_idl, &sbrec_port_binding_col_logical_datapath);
>     ovsdb_idl_add_column(ovnsb_idl, &sbrec_port_binding_col_tunnel_key);
> -    ovsdb_idl_add_column(ovnsb_idl, &sbrec_pipeline_col_logical_datapath);
> -    ovsdb_idl_omit_alert(ovnsb_idl, &sbrec_pipeline_col_logical_datapath);
> -    ovsdb_idl_add_column(ovnsb_idl, &sbrec_pipeline_col_table_id);
> -    ovsdb_idl_omit_alert(ovnsb_idl, &sbrec_pipeline_col_table_id);
> -    ovsdb_idl_add_column(ovnsb_idl, &sbrec_pipeline_col_priority);
> -    ovsdb_idl_omit_alert(ovnsb_idl, &sbrec_pipeline_col_priority);
> -    ovsdb_idl_add_column(ovnsb_idl, &sbrec_pipeline_col_match);
> -    ovsdb_idl_omit_alert(ovnsb_idl, &sbrec_pipeline_col_match);
> -    ovsdb_idl_add_column(ovnsb_idl, &sbrec_pipeline_col_actions);
> -    ovsdb_idl_omit_alert(ovnsb_idl, &sbrec_pipeline_col_actions);
> +    ovsdb_idl_add_column(ovnsb_idl, &sbrec_rule_col_logical_datapath);
> +    ovsdb_idl_omit_alert(ovnsb_idl, &sbrec_rule_col_logical_datapath);
> +    ovsdb_idl_add_column(ovnsb_idl, &sbrec_rule_col_table_id);
> +    ovsdb_idl_omit_alert(ovnsb_idl, &sbrec_rule_col_table_id);
> +    ovsdb_idl_add_column(ovnsb_idl, &sbrec_rule_col_priority);
> +    ovsdb_idl_omit_alert(ovnsb_idl, &sbrec_rule_col_priority);
> +    ovsdb_idl_add_column(ovnsb_idl, &sbrec_rule_col_match);
> +    ovsdb_idl_omit_alert(ovnsb_idl, &sbrec_rule_col_match);
> +    ovsdb_idl_add_column(ovnsb_idl, &sbrec_rule_col_actions);
> +    ovsdb_idl_omit_alert(ovnsb_idl, &sbrec_rule_col_actions);
> 
>     /*
>      * The loop here just runs the IDL in a loop waiting for the seqno to
> diff --git a/ovn/ovn-architecture.7.xml b/ovn/ovn-architecture.7.xml
> index 5d95e26..0334d82 100644
> --- a/ovn/ovn-architecture.7.xml
> +++ b/ovn/ovn-architecture.7.xml
> @@ -346,7 +346,7 @@
>       <code>ovn-northd</code> receives the OVN Northbound database update.
>       In turn, it makes the corresponding updates to the OVN Southbound
>       database, by adding rows to the OVN Southbound database
> -      <code>Pipeline</code> table to reflect the new port, e.g. add a
> +      <code>Rule</code> table to reflect the new port, e.g. add a
>       flow to recognize that packets destined to the new port's MAC
>       address should be delivered to it, and update the flow that
>       delivers broadcast and multicast packets to include the new port.
> @@ -357,7 +357,7 @@
> 
>     <li>
>       On every hypervisor, <code>ovn-controller</code> receives the
> -      <code>Pipeline</code> table updates that <code>ovn-northd</code> made
> +      <code>Rule</code> table updates that <code>ovn-northd</code> made
>       in the previous step.  As long as the VM that owns the VIF is powered off,
>       <code>ovn-controller</code> cannot do much; it cannot, for example,
>       arrange to send packets to or receive packets from the VIF, because the
> @@ -404,7 +404,7 @@
>       <code>Binding</code> table.  This provides <code>ovn-controller</code>
>       the physical location of the logical port, so each instance updates the
>       OpenFlow tables of its switch (based on logical datapath flows in the OVN
> -      DB <code>Pipeline</code> table) so that packets to and from the VIF can
> +      DB <code>Rule</code> table) so that packets to and from the VIF can
>       be properly handled via tunnels.
>     </li>
> 
> @@ -444,13 +444,13 @@
>       <code>ovn-northd</code> receives the OVN Northbound update and in turn
>       updates the OVN Southbound database accordingly, by removing or
>       updating the rows from the OVN Southbound database
> -      <code>Pipeline</code> table and <code>Binding</code> table that
> +      <code>Rule</code> table and <code>Binding</code> table that
>       were related to the now-destroyed VIF.
>     </li>
> 
>     <li>
>       On every hypervisor, <code>ovn-controller</code> receives the
> -      <code>Pipeline</code> table updates that <code>ovn-northd</code> made
> +      <code>Rule</code> table updates that <code>ovn-northd</code> made
>       in the previous step.  <code>ovn-controller</code> updates OpenFlow tables
>       to reflect the update, although there may not be much to do, since the VIF
>       had already become unreachable when it was removed from the
> @@ -541,7 +541,7 @@
>       <code>ovn-northd</code> receives the OVN Northbound database update.
>       In turn, it makes the corresponding updates to the OVN Southbound
>       database, by adding rows to the OVN Southbound database's
> -      <code>Pipeline</code> table to reflect the new port and also by
> +      <code>Rule</code> table to reflect the new port and also by
>       creating a new row in the <code>Binding</code> table and
>       populating all its columns except the column that identifies the
>       <code>chassis</code>.
> @@ -582,16 +582,16 @@
>       <code>ovn-northd</code> receives the OVN Northbound update and in turn
>       updates the OVN Southbound database accordingly, by removing or
>       updating the rows from the OVN Southbound database
> -      <code>Pipeline</code> table that were related to the now-destroyed
> +      <code>Rule</code> table that were related to the now-destroyed
>       CIF.  It also deletes the row in the <code>Binding</code> table
>       for that CIF.
>     </li>
> 
>     <li>
>       On every hypervisor, <code>ovn-controller</code> receives the
> -      <code>Pipeline</code> table updates that <code>ovn-northd</code> made
> -      in the previous step.  <code>ovn-controller</code> updates OpenFlow tables
> -      to reflect the update.
> +      <code>Rule</code> table updates that <code>ovn-northd</code> made in the
> +      previous step.  <code>ovn-controller</code> updates OpenFlow tables to
> +      reflect the update.
>     </li>
>   </ol>
> 
> diff --git a/ovn/ovn-nb.xml b/ovn/ovn-nb.xml
> index 032e23d..d953fa5 100644
> --- a/ovn/ovn-nb.xml
> +++ b/ovn/ovn-nb.xml
> @@ -202,9 +202,9 @@
> 
>     <column name="match">
>       The packets that the ACL should match, in the same expression
> -      language used for the <ref column="match" table="Pipeline"
> +      language used for the <ref column="match" table="Rule"
>       db="OVN_Southbound"/> column in the OVN Southbound database's <ref
> -      table="Pipeline" db="OVN_Southbound"/> table.  Match
> +      table="Rule" db="OVN_Southbound"/> table.  Match
>       <code>inport</code> and <code>outport</code> against names of
>       logical ports within <ref column="lswitch"/> to implement ingress
>       and egress ACLs, respectively.  In logical switches connected to
> diff --git a/ovn/ovn-sb.ovsschema b/ovn/ovn-sb.ovsschema
> index 4a2df47..add908b 100644
> --- a/ovn/ovn-sb.ovsschema
> +++ b/ovn/ovn-sb.ovsschema
> @@ -32,7 +32,7 @@
>                                               "value": {"type": "string"},
>                                               "min": 0,
>                                               "max": "unlimited"}}}},
> -        "Pipeline": {
> +        "Rule": {
>             "columns": {
>                 "logical_datapath": {"type": "uuid"},
>                 "table_id": {"type": {"key": {"type": "integer",
> diff --git a/ovn/ovn-sb.xml b/ovn/ovn-sb.xml
> index 13e5145..2f2a55e 100644
> --- a/ovn/ovn-sb.xml
> +++ b/ovn/ovn-sb.xml
> @@ -74,7 +74,7 @@
>   </p>
> 
>   <p>
> -    The <ref table="Pipeline"/> table is currently the only LN table.
> +    The <ref table="Rule"/> table is currently the only LN table.
>   </p>
> 
>   <h3>Bindings data</h3>
> @@ -198,7 +198,7 @@
>     </column>
>   </table>
> 
> -  <table name="Pipeline" title="Logical Network Pipeline">
> +  <table name="Rule" title="Logical Network Rule">
>     <p>
>       Each row in this table represents one logical flow.  The cloud management
>       system, via its OVN integration, populates this table with logical flows
> @@ -663,7 +663,7 @@
>     <column name="logical_datapath">
>       The logical datapath to which the logical port belongs.  A logical
>       datapath implements a logical pipeline via logical flows in the <ref
> -      table="Pipeline"/> table.  (No table represents a logical datapath.)
> +      table="Rule"/> table.  (No table represents a logical datapath.)
>     </column>
> 
>     <column name="logical_port">
> -- 
> 2.1.3
> 
> _______________________________________________
> dev mailing list
> dev at openvswitch.org
> http://openvswitch.org/mailman/listinfo/dev




More information about the dev mailing list