[ovs-dev] [PATCH] poll-loop: Drop malloc() from every poll_block().

Jarno Rajahalme jrajahalme at nicira.com
Mon Apr 13 20:16:16 UTC 2015


I’ll let Ben have his say on this, but see some comments below:

  Jarno

> On Apr 11, 2015, at 6:59 PM, Russell Bryant <rbryant at redhat.com> wrote:
> 
> The poll_block() function already has some state in thread-specific
> storage that's used as a registry for all of the fds to poll() on the
> next time poll_block() gets called.  poll_block() was calling
> malloc() and free() every time it was called to create the pollfd and
> wevents arrays needed to pass to poll().  We can optimize this a bit
> by caching the allocation on the existing thread-specific storage and
> just reallocating it to a larger size if necessary.
> 
> Signed-off-by: Russell Bryant <rbryant at redhat.com>
> ---
> lib/poll-loop.c | 46 ++++++++++++++++++++++++++++++++++------------
> 1 file changed, 34 insertions(+), 12 deletions(-)
> 
> 
> Just to clarify, this was not done in response to any problem I was
> experiencing.  it's just something I thought of as I was reading poll-loop.c to
> see how it worked.  It's theoretically a minor performance improvement, but I
> haven't attempted to measure it or anything.
> 
> 
> diff --git a/lib/poll-loop.c b/lib/poll-loop.c
> index 3c4b55c..51f67f2 100644
> --- a/lib/poll-loop.c
> +++ b/lib/poll-loop.c
> @@ -53,6 +53,16 @@ struct poll_loop {
>      * wake up immediately, or LLONG_MAX to wait forever. */
>     long long int timeout_when; /* In msecs as returned by time_msec(). */
>     const char *timeout_where;  /* Where 'timeout_when' was set. */
> +
> +    /* A cached array of pollfds that gets filled in each time poll_block() is
> +     * called and is expanded as needed. */
> +    struct pollfd *pollfds;
> +    size_t n_pollfds;
> +
> +    /* A cached array of wevents that gets filled in each time poll_block() is
> +     * called and is expanded as needed. */
> +    HANDLE *wevents;
> +    size_t n_wevents;
> };
> 
> static struct poll_loop *poll_loop(void);
> @@ -314,8 +324,6 @@ poll_block(void)
> {
>     struct poll_loop *loop = poll_loop();
>     struct poll_node *node;
> -    struct pollfd *pollfds;
> -    HANDLE *wevents = NULL;
>     int elapsed;
>     int retval;
>     int i;
> @@ -329,18 +337,34 @@ poll_block(void)
>     }
> 
>     timewarp_run();
> -    pollfds = xmalloc(hmap_count(&loop->poll_nodes) * sizeof *pollfds);
> +    if (loop->n_pollfds < hmap_count(&loop->poll_nodes)) {
> +        size_t bytes = hmap_count(&loop->poll_nodes) * sizeof *loop->pollfds;
> +        if (loop->pollfds) {
> +            loop->pollfds = xrealloc(loop->pollfds, bytes);
> +        } else {
> +            loop->pollfds = xmalloc(bytes);
> +        }
> +        loop->n_pollfds = hmap_count(&loop->poll_nodes);
> +    }
> 

The separate malloc is not necessary, as realloc does allocate the space when the pointer is NULL, so this could become:

    if (loop->n_pollfds < hmap_count(&loop->poll_nodes)) {
        loop->n_pollfds = hmap_count(&loop->poll_nodes);
        loop->pollfds = xrealloc(loop->pollfds, loop->n_pollfds * sizeof *loop->pollfds);
    }


> #ifdef _WIN32
> -    wevents = xmalloc(hmap_count(&loop->poll_nodes) * sizeof *wevents);
> +    if (loop->n_wevents < hmap_count(&loop->poll_nodes)) {
> +        size_t bytes = hmap_count(&loop->poll_nodes) * sizeof *loop->wevents;
> +        if (loop->wevents) {
> +            loop->wevents = xrealloc(loop->wevents, bytes);
> +        } else {
> +            loop->wevents = xmalloc(bytes);
> +        }
> +        loop->n_wevents = hmap_count(&loop->poll_nodes);
> +    }
> #endif
> 

Same here.

>     /* Populate with all the fds and events. */
>     i = 0;
>     HMAP_FOR_EACH (node, hmap_node, &loop->poll_nodes) {
> -        pollfds[i] = node->pollfd;
> +        loop->pollfds[i] = node->pollfd;
> #ifdef _WIN32
> -        wevents[i] = node->wevent;
> +        loop->wevents[i] = node->wevent;
>         if (node->pollfd.fd && node->wevent) {
>             short int wsa_events = 0;
>             if (node->pollfd.events & POLLIN) {
> @@ -355,8 +379,8 @@ poll_block(void)
>         i++;
>     }
> 
> -    retval = time_poll(pollfds, hmap_count(&loop->poll_nodes), wevents,
> -                       loop->timeout_when, &elapsed);
> +    retval = time_poll(loop->pollfds, hmap_count(&loop->poll_nodes),
> +                       loop->wevents, loop->timeout_when, &elapsed);
>     if (retval < 0) {
>         static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
>         VLOG_ERR_RL(&rl, "poll: %s", ovs_strerror(-retval));
> @@ -365,8 +389,8 @@ poll_block(void)
>     } else if (get_cpu_usage() > 50 || VLOG_IS_DBG_ENABLED()) {
>         i = 0;
>         HMAP_FOR_EACH (node, hmap_node, &loop->poll_nodes) {
> -            if (pollfds[i].revents) {
> -                log_wakeup(node->where, &pollfds[i], 0);
> +            if (loop->pollfds[i].revents) {
> +                log_wakeup(node->where, &loop->pollfds[i], 0);
>             }
>             i++;
>         }
> @@ -375,8 +399,6 @@ poll_block(void)
>     free_poll_nodes(loop);
>     loop->timeout_when = LLONG_MAX;
>     loop->timeout_where = NULL;
> -    free(pollfds);
> -    free(wevents);
> 
>     /* Handle any pending signals before doing anything else. */
>     fatal_signal_run();
> -- 
> 2.1.0
> 
> _______________________________________________
> dev mailing list
> dev at openvswitch.org
> http://openvswitch.org/mailman/listinfo/dev




More information about the dev mailing list