[ovs-dev] [PATCH v1] netdev-dpdk: Execute QoS Checking before copying to mbuf
Stokes, Ian
ian.stokes at intel.com
Tue Aug 8 09:08:13 UTC 2017
Hi Darrell,
I am, I've had a cursory look over it already but was planning to do a deeper dive later this week as I have a few concerns about the approach.
Ian
> -----Original Message-----
> From: Darrell Ball [mailto:dball at vmware.com]
> Sent: Tuesday, August 8, 2017 3:07 AM
> To: Gao Zhenyu <sysugaozhenyu at gmail.com>; Ben Pfaff <blp at ovn.org>;
> Chandran, Sugesh <sugesh.chandran at intel.com>; ovs dev
> <dev at openvswitch.org>; Stokes, Ian <ian.stokes at intel.com>
> Subject: Re: [ovs-dev] [PATCH v1] netdev-dpdk: Execute QoS Checking before
> copying to mbuf
>
> Hi Ian
>
> Are you interested in reviewing this ?
>
> Thanks Darrell
>
> -----Original Message-----
> From: <ovs-dev-bounces at openvswitch.org> on behalf of Gao Zhenyu
> <sysugaozhenyu at gmail.com>
> Date: Monday, August 7, 2017 at 6:36 PM
> To: Ben Pfaff <blp at ovn.org>, "Chandran, Sugesh"
> <sugesh.chandran at intel.com>, ovs dev <dev at openvswitch.org>
> Subject: Re: [ovs-dev] [PATCH v1] netdev-dpdk: Execute QoS Checking before
> copying to mbuf
>
> ping....
>
> Thanks
> Zhenyu Gao
>
> 2017-07-25 18:27 GMT+08:00 Zhenyu Gao <sysugaozhenyu at gmail.com>:
>
> > In dpdk_do_tx_copy function, all packets were copied to mbuf first,
> > but QoS checking may drop some of them.
> > Move the QoS checking in front of copying data to mbuf, it helps to
> > reduce useless copy.
> >
> > Signed-off-by: Zhenyu Gao <sysugaozhenyu at gmail.com>
> > ---
> > lib/netdev-dpdk.c | 55 ++++++++++++++++++++++++++++++
> > ++++++-------------------
> > 1 file changed, 36 insertions(+), 19 deletions(-)
> >
> > diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
> > index ea17b97..bb8bd8f 100644
> > --- a/lib/netdev-dpdk.c
> > +++ b/lib/netdev-dpdk.c
> > @@ -258,7 +258,7 @@ struct dpdk_qos_ops {
> > * For all QoS implementations it should always be non-null.
> > */
> > int (*qos_run)(struct qos_conf *qos_conf, struct rte_mbuf
> **pkts,
> > - int pkt_cnt);
> > + int pkt_cnt, bool may_steal);
> > };
> >
> > /* dpdk_qos_ops for each type of user space QoS implementation */
> > @@ -1438,7 +1438,8 @@ netdev_dpdk_policer_pkt_handle(struct
> > rte_meter_srtcm *meter,
> >
> > static int
> > netdev_dpdk_policer_run(struct rte_meter_srtcm *meter,
> > - struct rte_mbuf **pkts, int pkt_cnt)
> > + struct rte_mbuf **pkts, int pkt_cnt,
> > + bool may_steal)
> > {
> > int i = 0;
> > int cnt = 0;
> > @@ -1454,7 +1455,9 @@ netdev_dpdk_policer_run(struct rte_meter_srtcm
> > *meter,
> > }
> > cnt++;
> > } else {
> > - rte_pktmbuf_free(pkt);
> > + if (may_steal) {
> > + rte_pktmbuf_free(pkt);
> > + }
> > }
> > }
> >
> > @@ -1463,12 +1466,13 @@ netdev_dpdk_policer_run(struct
> rte_meter_srtcm
> > *meter,
> >
> > static int
> > ingress_policer_run(struct ingress_policer *policer, struct
> rte_mbuf
> > **pkts,
> > - int pkt_cnt)
> > + int pkt_cnt, bool may_steal)
> > {
> > int cnt = 0;
> >
> > rte_spinlock_lock(&policer->policer_lock);
> > - cnt = netdev_dpdk_policer_run(&policer->in_policer, pkts,
> pkt_cnt);
> > + cnt = netdev_dpdk_policer_run(&policer->in_policer, pkts,
> > + pkt_cnt, may_steal);
> > rte_spinlock_unlock(&policer->policer_lock);
> >
> > return cnt;
> > @@ -1572,7 +1576,7 @@ netdev_dpdk_vhost_rxq_recv(struct netdev_rxq
> *rxq,
> > dropped = nb_rx;
> > nb_rx = ingress_policer_run(policer,
> > (struct rte_mbuf **) batch-
> >packets,
> > - nb_rx);
> > + nb_rx, true);
> > dropped -= nb_rx;
> > }
> >
> > @@ -1609,7 +1613,7 @@ netdev_dpdk_rxq_recv(struct netdev_rxq *rxq,
> struct
> > dp_packet_batch *batch)
> > dropped = nb_rx;
> > nb_rx = ingress_policer_run(policer,
> > (struct rte_mbuf **) batch-
> >packets,
> > - nb_rx);
> > + nb_rx, true);
> > dropped -= nb_rx;
> > }
> >
> > @@ -1627,13 +1631,13 @@ netdev_dpdk_rxq_recv(struct netdev_rxq *rxq,
> > struct dp_packet_batch *batch)
> >
> > static inline int
> > netdev_dpdk_qos_run(struct netdev_dpdk *dev, struct rte_mbuf
> **pkts,
> > - int cnt)
> > + int cnt, bool may_steal)
> > {
> > struct qos_conf *qos_conf = ovsrcu_get(struct qos_conf *,
> > &dev->qos_conf);
> >
> > if (qos_conf) {
> > rte_spinlock_lock(&qos_conf->lock);
> > - cnt = qos_conf->ops->qos_run(qos_conf, pkts, cnt);
> > + cnt = qos_conf->ops->qos_run(qos_conf, pkts, cnt,
> may_steal);
> > rte_spinlock_unlock(&qos_conf->lock);
> > }
> >
> > @@ -1707,7 +1711,7 @@ __netdev_dpdk_vhost_send(struct netdev
> *netdev, int
> > qid,
> >
> > cnt = netdev_dpdk_filter_packet_len(dev, cur_pkts, cnt);
> > /* Check has QoS has been configured for the netdev */
> > - cnt = netdev_dpdk_qos_run(dev, cur_pkts, cnt);
> > + cnt = netdev_dpdk_qos_run(dev, cur_pkts, cnt, true);
> > dropped = total_pkts - cnt;
> >
> > do {
> > @@ -1753,10 +1757,22 @@ dpdk_do_tx_copy(struct netdev *netdev, int
> qid,
> > struct dp_packet_batch *batch)
> > #endif
> > struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
> > struct rte_mbuf *pkts[PKT_ARRAY_SIZE];
> > + int txcnt_eth = batch->count;
> > int dropped = 0;
> > int newcnt = 0;
> > int i;
> >
> > + if (dev->type != DPDK_DEV_VHOST) {
> > + /* Check if QoS has been configured for this netdev. */
> > + txcnt_eth = netdev_dpdk_qos_run(dev,
> > + (struct rte_mbuf
> > **)batch->packets,
> > + txcnt_eth, false);
> > + if (txcnt_eth == 0) {
> > + dropped = batch->count;
> > + goto out;
> > + }
> > + }
> > +
> > dp_packet_batch_apply_cutlen(batch);
> >
> > for (i = 0; i < batch->count; i++) {
> > @@ -1785,21 +1801,20 @@ dpdk_do_tx_copy(struct netdev *netdev, int
> qid,
> > struct dp_packet_batch *batch)
> > rte_pktmbuf_pkt_len(pkts[newcnt]) = size;
> >
> > newcnt++;
> > + if (dev->type != DPDK_DEV_VHOST && newcnt >= txcnt_eth) {
> > + dropped += batch->count - i - 1;
> > + break;
> > + }
> > }
> >
> > if (dev->type == DPDK_DEV_VHOST) {
> > __netdev_dpdk_vhost_send(netdev, qid, (struct dp_packet **)
> pkts,
> > newcnt);
> > } else {
> > - unsigned int qos_pkts = newcnt;
> > -
> > - /* Check if QoS has been configured for this netdev. */
> > - newcnt = netdev_dpdk_qos_run(dev, pkts, newcnt);
> > -
> > - dropped += qos_pkts - newcnt;
> > dropped += netdev_dpdk_eth_tx_burst(dev, qid, pkts,
> newcnt);
> > }
> >
> > +out:
> > if (OVS_UNLIKELY(dropped)) {
> > rte_spinlock_lock(&dev->stats_lock);
> > dev->stats.tx_dropped += dropped;
> > @@ -1852,7 +1867,7 @@ netdev_dpdk_send__(struct netdev_dpdk *dev,
> int qid,
> > dp_packet_batch_apply_cutlen(batch);
> >
> > cnt = netdev_dpdk_filter_packet_len(dev, pkts, cnt);
> > - cnt = netdev_dpdk_qos_run(dev, pkts, cnt);
> > + cnt = netdev_dpdk_qos_run(dev, pkts, cnt, true);
> > dropped = batch->count - cnt;
> >
> > dropped += netdev_dpdk_eth_tx_burst(dev, qid, pkts, cnt);
> > @@ -3061,13 +3076,15 @@ egress_policer_qos_is_equal(const struct
> qos_conf
> > *conf,
> > }
> >
> > static int
> > -egress_policer_run(struct qos_conf *conf, struct rte_mbuf **pkts,
> int
> > pkt_cnt)
> > +egress_policer_run(struct qos_conf *conf, struct rte_mbuf **pkts,
> int
> > pkt_cnt,
> > + bool may_steal)
> > {
> > int cnt = 0;
> > struct egress_policer *policer =
> > CONTAINER_OF(conf, struct egress_policer, qos_conf);
> >
> > - cnt = netdev_dpdk_policer_run(&policer->egress_meter, pkts,
> pkt_cnt);
> > + cnt = netdev_dpdk_policer_run(&policer->egress_meter, pkts,
> > + pkt_cnt, may_steal);
> >
> > return cnt;
> > }
> > --
> > 1.8.3.1
> >
> >
> _______________________________________________
> dev mailing list
> dev at openvswitch.org
> https://urldefense.proofpoint.com/v2/url?u=https-
> 3A__mail.openvswitch.org_mailman_listinfo_ovs-
> 2Ddev&d=DwICAg&c=uilaK90D4TOVoH58JNXRgQ&r=BVhFA09CGX7JQ5Ih-
> uZnsw&m=roZOnbVuoZgmZiJ5Yl6u9BT9dmjoSQBFIScSFBX2pTE&s=F-
> jBBvlqt9jYLPkVbaiqJoGKjt3zYp1sTA1LtUK6AJ0&e=
>
More information about the dev
mailing list