c++11threadpoolboost-asio

Boost asio thread_pool join does not wait for tasks to be finished


Consider the functions

#include <iostream>
#include <boost/bind.hpp>
#include <boost/asio.hpp>

void foo(const uint64_t begin, uint64_t *result)
{
    uint64_t prev[] = {begin, 0};
    for (uint64_t i = 0; i < 1000000000; ++i)
    {
        const auto tmp = (prev[0] + prev[1]) % 1000;
        prev[1] = prev[0];
        prev[0] = tmp;
    }
    *result = prev[0];
}

void batch(boost::asio::thread_pool &pool, const uint64_t a[])
{
    uint64_t r[] = {0, 0};
    boost::asio::post(pool, boost::bind(foo, a[0], &r[0]));
    boost::asio::post(pool, boost::bind(foo, a[1], &r[1]));

    pool.join();
    std::cerr << "foo(" << a[0] << "): " << r[0] << " foo(" << a[1] << "): " << r[1] << std::endl;
}

where foo is a simple "pure" function that performs a calculation on begin and writes the result to the pointer *result. This function gets called with different inputs from batch. Here dispatching each call to another CPU core might be beneficial.

Now assume the batch function gets called several 10 000 times. Therefore a thread pool would be nice which is shared between all the sequential batch calls.

Trying this with (for the sake of simplicity only 3 calls)

int main(int argn, char **)
{
    boost::asio::thread_pool pool(2);

    const uint64_t a[] = {2, 4};
    batch(pool, a);

    const uint64_t b[] = {3, 5};
    batch(pool, b);

    const uint64_t c[] = {7, 9};
    batch(pool, c);
}

leads to the result

foo(2): 2 foo(4): 4
foo(3): 0 foo(5): 0
foo(7): 0 foo(9): 0

Where all three lines appear at the same time, while the computation of foo takes ~3s. I assume that only the first join really waits for the pool to complete all jobs. The others have invalid results. (The not initialized values) What is the best practice here to reuse the thread pool?


Solution

  • I just ran into this advanced executor example which is hidden from the documentation:

    I realized just now that Asio comes with a fork_executor example which does exactly this: you can "group" tasks and join the executor (which represents that group) instead of the pool. I've missed this for the longest time since none of the executor examples are listed in the HTML documentation – sehe 21 mins ago

    So without further ado, here's that sample applied to your question:

    Live On Coliru

    #define BOOST_BIND_NO_PLACEHOLDERS
    #include <boost/asio/thread_pool.hpp>
    #include <boost/asio/ts/executor.hpp>
    #include <condition_variable>
    #include <memory>
    #include <mutex>
    #include <queue>
    #include <thread>
    
    // A fixed-size thread pool used to implement fork/join semantics. Functions
    // are scheduled using a simple FIFO queue. Implementing work stealing, or
    // using a queue based on atomic operations, are left as tasks for the reader.
    class fork_join_pool : public boost::asio::execution_context {
      public:
        // The constructor starts a thread pool with the specified number of
        // threads. Note that the thread_count is not a fixed limit on the pool's
        // concurrency. Additional threads may temporarily be added to the pool if
        // they join a fork_executor.
        explicit fork_join_pool(std::size_t thread_count = std::thread::hardware_concurrency()*2)
                : use_count_(1), threads_(thread_count)
        {
            try {
                // Ask each thread in the pool to dequeue and execute functions
                // until it is time to shut down, i.e. the use count is zero.
                for (thread_count_ = 0; thread_count_ < thread_count; ++thread_count_) {
                    boost::asio::dispatch(threads_, [&] {
                        std::unique_lock<std::mutex> lock(mutex_);
                        while (use_count_ > 0)
                            if (!execute_next(lock))
                                condition_.wait(lock);
                    });
                }
            } catch (...) {
                stop_threads();
                threads_.join();
                throw;
            }
        }
    
        // The destructor waits for the pool to finish executing functions.
        ~fork_join_pool() {
            stop_threads();
            threads_.join();
        }
    
      private:
        friend class fork_executor;
    
        // The base for all functions that are queued in the pool.
        struct function_base {
            std::shared_ptr<std::size_t> work_count_;
            void (*execute_)(std::shared_ptr<function_base>& p);
        };
    
        // Execute the next function from the queue, if any. Returns true if a
        // function was executed, and false if the queue was empty.
        bool execute_next(std::unique_lock<std::mutex>& lock) {
            if (queue_.empty())
                return false;
            auto p(queue_.front());
            queue_.pop();
            lock.unlock();
            execute(lock, p);
            return true;
        }
    
        // Execute a function and decrement the outstanding work.
        void execute(std::unique_lock<std::mutex>& lock,
                     std::shared_ptr<function_base>& p) {
            std::shared_ptr<std::size_t> work_count(std::move(p->work_count_));
            try {
                p->execute_(p);
                lock.lock();
                do_work_finished(work_count);
            } catch (...) {
                lock.lock();
                do_work_finished(work_count);
                throw;
            }
        }
    
        // Increment outstanding work.
        void
        do_work_started(const std::shared_ptr<std::size_t>& work_count) noexcept {
            if (++(*work_count) == 1)
                ++use_count_;
        }
    
        // Decrement outstanding work. Notify waiting threads if we run out.
        void
        do_work_finished(const std::shared_ptr<std::size_t>& work_count) noexcept {
            if (--(*work_count) == 0) {
                --use_count_;
                condition_.notify_all();
            }
        }
    
        // Dispatch a function, executing it immediately if the queue is already
        // loaded. Otherwise adds the function to the queue and wakes a thread.
        void do_dispatch(std::shared_ptr<function_base> p,
                         const std::shared_ptr<std::size_t>& work_count) {
            std::unique_lock<std::mutex> lock(mutex_);
            if (queue_.size() > thread_count_ * 16) {
                do_work_started(work_count);
                lock.unlock();
                execute(lock, p);
            } else {
                queue_.push(p);
                do_work_started(work_count);
                condition_.notify_one();
            }
        }
    
        // Add a function to the queue and wake a thread.
        void do_post(std::shared_ptr<function_base> p,
                     const std::shared_ptr<std::size_t>& work_count) {
            std::lock_guard<std::mutex> lock(mutex_);
            queue_.push(p);
            do_work_started(work_count);
            condition_.notify_one();
        }
    
        // Ask all threads to shut down.
        void stop_threads() {
            std::lock_guard<std::mutex> lock(mutex_);
            --use_count_;
            condition_.notify_all();
        }
    
        std::mutex mutex_;
        std::condition_variable condition_;
        std::queue<std::shared_ptr<function_base>> queue_;
        std::size_t use_count_;
        std::size_t thread_count_;
        boost::asio::thread_pool threads_;
    };
    
    // A class that satisfies the Executor requirements. Every function or piece of
    // work associated with a fork_executor is part of a single, joinable group.
    class fork_executor {
      public:
        fork_executor(fork_join_pool& ctx)
                : context_(ctx), work_count_(std::make_shared<std::size_t>(0)) {}
    
        fork_join_pool& context() const noexcept { return context_; }
    
        void on_work_started() const noexcept {
            std::lock_guard<std::mutex> lock(context_.mutex_);
            context_.do_work_started(work_count_);
        }
    
        void on_work_finished() const noexcept {
            std::lock_guard<std::mutex> lock(context_.mutex_);
            context_.do_work_finished(work_count_);
        }
    
        template <class Func, class Alloc>
        void dispatch(Func&& f, const Alloc& a) const {
            auto p(std::allocate_shared<exFun<Func>>(
                typename std::allocator_traits<Alloc>::template rebind_alloc<char>(a),
                std::move(f), work_count_));
            context_.do_dispatch(p, work_count_);
        }
    
        template <class Func, class Alloc> void post(Func f, const Alloc& a) const {
            auto p(std::allocate_shared<exFun<Func>>(
                typename std::allocator_traits<Alloc>::template rebind_alloc<char>(a),
                std::move(f), work_count_));
            context_.do_post(p, work_count_);
        }
    
        template <class Func, class Alloc>
        void defer(Func&& f, const Alloc& a) const {
            post(std::forward<Func>(f), a);
        }
    
        friend bool operator==(const fork_executor& a, const fork_executor& b) noexcept {
            return a.work_count_ == b.work_count_;
        }
    
        friend bool operator!=(const fork_executor& a, const fork_executor& b) noexcept {
            return a.work_count_ != b.work_count_;
        }
    
        // Block until all work associated with the executor is complete. While it
        // is waiting, the thread may be borrowed to execute functions from the
        // queue.
        void join() const {
            std::unique_lock<std::mutex> lock(context_.mutex_);
            while (*work_count_ > 0)
                if (!context_.execute_next(lock))
                    context_.condition_.wait(lock);
        }
    
      private:
        template <class Func> struct exFun : fork_join_pool::function_base {
            explicit exFun(Func f, const std::shared_ptr<std::size_t>& w)
                    : function_(std::move(f)) {
                work_count_ = w;
                execute_ = [](std::shared_ptr<fork_join_pool::function_base>& p) {
                    Func tmp(std::move(static_cast<exFun*>(p.get())->function_));
                    p.reset();
                    tmp();
                };
            }
    
            Func function_;
        };
    
        fork_join_pool& context_;
        std::shared_ptr<std::size_t> work_count_;
    };
    
    // Helper class to automatically join a fork_executor when exiting a scope.
    class join_guard {
      public:
        explicit join_guard(const fork_executor& ex) : ex_(ex) {}
        join_guard(const join_guard&) = delete;
        join_guard(join_guard&&) = delete;
        ~join_guard() { ex_.join(); }
    
      private:
        fork_executor ex_;
    };
    
    //------------------------------------------------------------------------------
    
    #include <algorithm>
    #include <iostream>
    #include <random>
    #include <vector>
    #include <boost/bind.hpp>
    
    static void foo(const uint64_t begin, uint64_t *result)
    {
        uint64_t prev[] = {begin, 0};
        for (uint64_t i = 0; i < 1000000000; ++i) {
            const auto tmp = (prev[0] + prev[1]) % 1000;
            prev[1] = prev[0];
            prev[0] = tmp;
        }
        *result = prev[0];
    }
    
    void batch(fork_join_pool &pool, const uint64_t (&a)[2])
    {
        uint64_t r[] = {0, 0};
        {
            fork_executor fork(pool);
            join_guard join(fork);
            boost::asio::post(fork, boost::bind(foo, a[0], &r[0]));
            boost::asio::post(fork, boost::bind(foo, a[1], &r[1]));
            // fork.join(); // or let join_guard destructor run
        }
        std::cerr << "foo(" << a[0] << "): " << r[0] << " foo(" << a[1] << "): " << r[1] << std::endl;
    }
    
    int main() {
        fork_join_pool pool;
    
        batch(pool, {2, 4});
        batch(pool, {3, 5});
        batch(pool, {7, 9});
    }
    

    Prints:

    foo(2): 2 foo(4): 4
    foo(3): 503 foo(5): 505
    foo(7): 507 foo(9): 509
    

    Things to note:

    You can get that sense easily when looking at the library example (which does a recursive divide-and-conquer merge sort).