I was wondering that if we run multiple calls of async_resolve
than in practice the io_context
doesn't run more than one request at the time due to a known limitation.
Here's a sample example where I'd expect that each resolve request will yield while it wait for the dns response to return, and move to the next request and so on.
but from looking at Wireshark it looks like the next resolve is performed only after the previous one finishes.
boost::asio::ip::tcp::resolver resolver(io_ctx_);
const auto results1 = resolver.async_resolve(host1_, std::to_string(port1_), yield);
const auto results2 = resolver.async_resolve(host2_, std::to_string(port2_), yield);
const auto results3 = resolver.async_resolve(host3_, std::to_string(port3_), yield);
Does io_context
handle only one resolve request at a time ?
You specifically yield and resume the coroutine for each invocation.
Your own co-routine is what is explicitly serializing the invocations.
Here's a direct, self-contained repro:
#include <boost/asio.hpp>
#include <boost/asio/experimental/promise.hpp>
#include <boost/asio/experimental/use_promise.hpp>
#include <boost/asio/spawn.hpp>
#include <iostream>
namespace asio = boost::asio;
using asio::ip::tcp;
void foo(asio::yield_context yield) {
struct {
std::string host;
uint16_t port;
tcp::resolver::results_type results;
} queries[]{
{"www.example.com", 80, {}},
{"localhost", 53, {}},
{"httpbin.org", 443, {}},
};
tcp::resolver resolver(yield.get_executor());
for (auto& [h, p, r] : queries)
r = resolver.async_resolve(h, std::to_string(p), yield);
for (auto& q : queries)
for (auto& ep : q.results)
std::cout << ep.host_name() << ":" << ep.service_name() << " -> " << ep.endpoint() << std::endl;
}
int main() {
asio::io_context ioc;
asio::spawn(ioc, foo);
ioc.run();
}
Printing e.g.
www.example.com:80 -> 93.184.216.34:80
www.example.com:80 -> [2606:2800:220:1:248:1893:25c8:1946]:80
localhost:53 -> 127.0.0.1:53
httpbin.org:443 -> 3.221.184.26:443
httpbin.org:443 -> 35.173.166.175:443
httpbin.org:443 -> 54.165.134.201:443
httpbin.org:443 -> 174.129.27.151:443
Using handler tracking:
There are many ways to fix it, but let me try the "new" use_promise
approach here:
#include <boost/asio.hpp>
#include <boost/asio/experimental/parallel_group.hpp>
#include <boost/asio/experimental/promise.hpp>
#include <boost/asio/experimental/use_promise.hpp>
#include <boost/asio/spawn.hpp>
#include <iostream>
namespace asio = boost::asio;
using asio::experimental::promise;
using asio::experimental::use_promise;
using asio::ip::tcp;
using boost::system::error_code;
void foo(asio::yield_context yield) {
struct {
std::string host;
uint16_t port;
} queries[]{
{"www.example.com", 80},
{"localhost", 53},
{"httpbin.org", 443},
};
tcp::socket s(yield.get_executor());
tcp::resolver resolver(yield.get_executor());
std::vector<promise<void(error_code, tcp::resolver::results_type)>> pp;
for (auto& [h, p] : queries)
pp.emplace_back(resolver.async_resolve(h, std::to_string(p), use_promise));
for (auto& p : pp)
for (auto& ep : p(yield))
std::cout << ep.host_name() << ":" << ep.service_name() << " -> " << ep.endpoint() << std::endl;
}
int main() {
asio::io_context ioc;
spawn(ioc, foo);
ioc.run();
}
Same output, but now using handler tracking:
Using awaitable operators with c++20 coroutines is usually friendlier, and doesn't require Boost Context (or Boost Coroutine):
#include <boost/asio.hpp>
#include <boost/asio/experimental/awaitable_operators.hpp>
#include <iostream>
#include <ranges>
namespace asio = boost::asio;
using namespace asio::experimental::awaitable_operators;
using asio::ip::tcp;
asio::awaitable<void> foo() {
tcp::resolver resolver(co_await asio::this_coro::executor);
auto op1 = resolver.async_resolve("www.example.com", std::to_string(80), asio::use_awaitable);
auto op2 = resolver.async_resolve("localhost", std::to_string(53), asio::use_awaitable);
auto op3 = resolver.async_resolve("httpbin.org", std::to_string(443), asio::use_awaitable);
auto [r1, r2, r3] = co_await (std::move(op1) && std::move(op2) && std::move(op3));
for (auto&& eps : {r1, r2, r3})
for (auto&& ep : eps)
std::cout << ep.host_name() << ":" << ep.service_name() << " -> " << ep.endpoint() << std::endl;
}
int main() {
asio::io_context ioc;
co_spawn(ioc, foo, asio::detached);
ioc.run();
}
Or use parallel groups, specifically to make it dynamic with a ranged parallel group:
#include <boost/asio.hpp>
#include <boost/asio/experimental/parallel_group.hpp>
#include <boost/asio/spawn.hpp>
#include <iostream>
#include <ranges>
namespace r = std::ranges;
namespace v = r::views;
namespace asio = boost::asio;
using asio::ip::tcp;
asio::awaitable<void> foo() {
struct {
std::string host;
uint16_t port;
} queries[]{
{"www.example.com", 80},
{"localhost", 53},
{"httpbin.org", 443},
};
tcp::resolver resolver(co_await asio::this_coro::executor);
auto start_resolve = [&resolver](auto& q) {
return resolver.async_resolve(q.host, std::to_string(q.port), asio::deferred);
};
auto r = queries | v::transform(start_resolve);
auto pg = asio::experimental::make_parallel_group(std::vector(r.begin(), r.end()));
auto results = co_await pg.async_wait(asio::experimental::wait_for_all{}, asio::deferred);
for (auto&& eps : get<2>(results))
for (auto&& ep : eps)
std::cout << ep.host_name() << ":" << ep.service_name() << " -> " << ep.endpoint() << std::endl;
}
int main() {
asio::io_context ioc;
co_spawn(ioc, foo, asio::detached);
ioc.run();
}