[ovs-dev] [PATCH v3 02/11] dpif-netdev: Keep count of elements in port->rxq[].
Daniele Di Proietto
diproiettod at vmware.com
Wed Mar 16 23:35:58 UTC 2016
On 16/03/2016 07:13, "Ilya Maximets" <i.maximets at samsung.com> wrote:
>IMHO, it's confusing to have so many similar variables in all
>structures.
>Without this patch we already have:
>
>netdev->n_rxq
>netdev_dpdk->requested_n_rxq
>netdev_dpdk->real_n_rxq
I understand your concern, and in general I agree, but in this case
I still think it's worth to add another variable for the following
reasons:
* 'port->n_rxq' is there to count the elements of the 'port->rxq'
array. dpif-netdev is responsible for freeing each element.
In my opinion it's cleaner to keep a local count than to rely
on netdev_n_rxq() not to change.
* Except for 'netdev->n_rxq' all these variables are local to some
module, therefore, IMHO, more manageable. 'netdev->requested_n_rxq'
was more confusing, and this series removes it.
>
>Another ways to get same variable:
>netdev_dpdk->up.n_rxq
>
>Plus, all netdev* variables are named in a complete chaos:
>'netdev', 'netdev_', 'dev', 'vhost_dev'...
This is mostly unrelated to this series, but now's a good time as
any to fix it, so I included a patch to introduce a naming convention
in netdev-dpdk
>
>Almost same situation with TX queues.
>So, is it really necessary to introduce 'yet_another_n_rxq'?
>
>As far as I understand you introduces it to simplify do_destroy_port.
>Am I right?
>
>But, reconfigure function clears all removed rxqs:
> port->rxq[i] = NULL;
>So, there will be no problem to free all remaining queues.
Sure, but in general I'd prefer to be able to call do_destroy_port()
anywhere in the code and expect it to do its job properly.
>
>What do you think?
I left it as it is for now. If you feel strongly about it, I'd prefer
we tackle this problem by simplifying/clarifying the behavior of the
rxq/txq
variables in netdev-dpdk
Thanks
>
>Best regards, Ilya Maximets.
>
>
>On 16.03.2016 01:29, Daniele Di Proietto wrote:
>> This will ease deleting a port with no open rxqs.
>>
>> Signed-off-by: Daniele Di Proietto <diproiettod at vmware.com>
>> ---
>> lib/dpif-netdev.c | 24 +++++++++++++-----------
>> 1 file changed, 13 insertions(+), 11 deletions(-)
>>
>> diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
>> index 9c30dad..a2281b8 100644
>> --- a/lib/dpif-netdev.c
>> +++ b/lib/dpif-netdev.c
>> @@ -250,6 +250,7 @@ struct dp_netdev_port {
>> struct netdev *netdev;
>> struct cmap_node node; /* Node in dp_netdev's 'ports'. */
>> struct netdev_saved_flags *sf;
>> + unsigned n_rxq; /* Number of elements in 'rxq' */
>> struct netdev_rxq **rxq;
>> struct ovs_refcount ref_cnt;
>> char *type; /* Port type as requested by user. */
>> @@ -1151,11 +1152,12 @@ do_add_port(struct dp_netdev *dp, const char
>>*devname, const char *type,
>> port = xzalloc(sizeof *port);
>> port->port_no = port_no;
>> port->netdev = netdev;
>> - port->rxq = xmalloc(sizeof *port->rxq * netdev_n_rxq(netdev));
>> + port->n_rxq = netdev_n_rxq(netdev);
>> + port->rxq = xmalloc(sizeof *port->rxq * port->n_rxq);
>> port->type = xstrdup(type);
>> port->latest_requested_n_rxq = netdev_requested_n_rxq(netdev);
>>
>> - for (i = 0; i < netdev_n_rxq(netdev); i++) {
>> + for (i = 0; i < port->n_rxq; i++) {
>> error = netdev_rxq_open(netdev, &port->rxq[i], i);
>> if (error) {
>> VLOG_ERR("%s: cannot receive packets on this network
>>device (%s)",
>> @@ -1288,13 +1290,12 @@ static void
>> port_unref(struct dp_netdev_port *port)
>> {
>> if (port && ovs_refcount_unref_relaxed(&port->ref_cnt) == 1) {
>> - int n_rxq = netdev_n_rxq(port->netdev);
>> int i;
>>
>> netdev_close(port->netdev);
>> netdev_restore_flags(port->sf);
>>
>> - for (i = 0; i < n_rxq; i++) {
>> + for (i = 0; i < port->n_rxq; i++) {
>> netdev_rxq_close(port->rxq[i]);
>> }
>> free(port->rxq);
>> @@ -2461,6 +2462,7 @@ dpif_netdev_pmd_set(struct dpif *dpif, const char
>>*cmask)
>> netdev_rxq_close(port->rxq[i]);
>> port->rxq[i] = NULL;
>> }
>> + port->n_rxq = 0;
>>
>> /* Sets the new rx queue config. */
>> err = netdev_set_multiq(port->netdev,
>> @@ -2474,9 +2476,9 @@ dpif_netdev_pmd_set(struct dpif *dpif, const char
>>*cmask)
>> }
>> port->latest_requested_n_rxq = requested_n_rxq;
>> /* If the set_multiq() above succeeds, reopens the
>>'rxq's. */
>> - port->rxq = xrealloc(port->rxq, sizeof *port->rxq
>> - * netdev_n_rxq(port->netdev));
>> - for (i = 0; i < netdev_n_rxq(port->netdev); i++) {
>> + port->n_rxq = netdev_n_rxq(port->netdev);
>> + port->rxq = xrealloc(port->rxq, sizeof *port->rxq *
>>port->n_rxq);
>> + for (i = 0; i < port->n_rxq; i++) {
>> netdev_rxq_open(port->netdev, &port->rxq[i], i);
>> }
>> }
>> @@ -2604,7 +2606,7 @@ dpif_netdev_run(struct dpif *dpif)
>> if (!netdev_is_pmd(port->netdev)) {
>> int i;
>>
>> - for (i = 0; i < netdev_n_rxq(port->netdev); i++) {
>> + for (i = 0; i < port->n_rxq; i++) {
>> dp_netdev_process_rxq_port(non_pmd, port,
>>port->rxq[i]);
>> }
>> }
>> @@ -2634,7 +2636,7 @@ dpif_netdev_wait(struct dpif *dpif)
>> if (!netdev_is_pmd(port->netdev)) {
>> int i;
>>
>> - for (i = 0; i < netdev_n_rxq(port->netdev); i++) {
>> + for (i = 0; i < port->n_rxq; i++) {
>> netdev_rxq_wait(port->rxq[i]);
>> }
>> }
>> @@ -3099,7 +3101,7 @@ dp_netdev_add_port_to_pmds(struct dp_netdev *dp,
>>struct dp_netdev_port *port)
>> /* Cannot create pmd threads for invalid numa node. */
>> ovs_assert(ovs_numa_numa_id_is_valid(numa_id));
>>
>> - for (i = 0; i < netdev_n_rxq(port->netdev); i++) {
>> + for (i = 0; i < port->n_rxq; i++) {
>> pmd = dp_netdev_less_loaded_pmd_on_numa(dp, numa_id);
>> if (!pmd) {
>> /* There is no pmd threads on this numa node. */
>> @@ -3167,7 +3169,7 @@ dp_netdev_set_pmds_on_numa(struct dp_netdev *dp,
>>int numa_id)
>> CMAP_FOR_EACH (port, node, &dp->ports) {
>> if (netdev_is_pmd(port->netdev)
>> && netdev_get_numa_id(port->netdev) == numa_id) {
>> - for (i = 0; i < netdev_n_rxq(port->netdev); i++) {
>> + for (i = 0; i < port->n_rxq; i++) {
>> /* Make thread-safety analyser happy. */
>> ovs_mutex_lock(&pmds[index]->poll_mutex);
>> dp_netdev_add_rxq_to_pmd(pmds[index], port,
>>port->rxq[i]);
>>
More information about the dev
mailing list