asio
asio copied to clipboard
Data race reported by TSAN when using async_read_some_at
When tried to run a TSAN test for reading a file at different offsets using async_read_some_at function , it gives warning that a data race occurring in boost::asio::detail::io_uring_service::io_queue::set_result. Any idea what is wrong?
TSAN Report:
SUMMARY: ThreadSanitizer: data race x64-linux/include/boost/asio/detail/io_uring_service.hpp:66 in boost::asio::detail::io_uring_service::io_queue::set_result(int)
WARNING: ThreadSanitizer: data race (pid=19)
Write of size 4 at 0x7b4000000360 by thread T11:
#0 boost::asio::detail::io_uring_service::io_queue::set_result(int) /usr/vcpkg_installed/x64-linux/include/boost/asio/detail/io_uring_service.hpp:66 (unittests+0x56aea6) (BuildId: dda2fb272eb5a700df7c8aeed2328e009552bc6f)
#1 boost::asio::detail::io_uring_service::run(long, boost::asio::detail::op_queue<boost::asio::detail::scheduler_operation>&) /usr/vcpkg_installed/x64-linux/include/boost/asio/detail/impl/io_uring_service.ipp:480 (unittests+0x56aea6)
#2 boost::asio::detail::scheduler::do_run_one(boost::asio::detail::conditionally_enabled_mutex::scoped_lock&, boost::asio::detail::scheduler_thread_info&, boost::system::error_code const&) <null> (unittests+0x56dd5e) (BuildId: dda2fb272eb5a700df7c8aeed2328e009552bc6f)
#3 boost::asio::detail::scheduler::run(boost::system::error_code&) <null> (unittests+0x57747c) (BuildId: dda2fb272eb5a700df7c8aeed2328e009552bc6f)
#4 boost::asio::io_context::run() /usr/vcpkg_installed/x64-linux/include/boost/asio/impl/io_context.ipp:64 (unittests+0x50a671)
Previous write of size 8 at 0x7b4000000360 by main thread (mutexes: write M0):
#0 operator new(unsigned long) <null> (libtsan.so.2+0x895ca) (BuildId: 43a43f3c9aafc5756b2b836cf969881c9952df8c)
#1 boost::asio::detail::io_uring_service::register_io_object(boost::asio::detail::io_uring_service::io_object*&) <null> (unittests+0x57c662) (BuildId: dda2fb272eb5a700df7c8aeed2328e009552bc6f)
#2 boost::asio::detail::io_uring_descriptor_service::assign(boost::asio::detail::io_uring_descriptor_service::implementation_type&, int const&, boost::system::error_code&) /usr/vcpkg_installed/x64-linux/include/boost/asio/detail/impl/io_uring_descriptor_service.ipp:109 (unittests+0x57cd49) (BuildId: dda2fb272eb5a700df7c8aeed2328e009552bc6f)
#3 boost::asio::detail::io_uring_file_service::open(boost::asio::detail::io_uring_file_service::implementation_type&, char const*, boost::asio::file_base::flags, boost::system::error_code&) /usr/vcpkg_installed/x64-linux/include/boost/asio/detail/impl/io_uring_file_service.ipp:66 (unittests+0x57cd49)
#4 std::shared_ptr<boost::asio::basic_random_access_file<boost::asio::any_io_executor> > std::make_shared<boost::asio::basic_random_access_file<boost::asio::any_io_executor>, boost::asio::io_context::basic_executor_type<std::allocator<void>, 0ul>, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >&, boost::asio::file_base::flags>(boost::asio::io_context::basic_executor_type<std::allocator<void>, 0ul>&&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >&, boost::asio::file_base::flags&&) <null>
Code:
boost::asio::io_context context;
boost::asio::io_context::work work{context};
auto io_thread = std::thread([&context](auto)
{
return context.run();
}, 0);
std::this_thread::sleep_for(3s);
std::vector<off_t> offsets = {64_KB, 32_KB, 128_KB, 16_KB};
char buffer[4][4096];
auto rfile = std::make_shared<boost::asio::random_access_file>(context.get_executor(),
file_name,
boost::asio::random_access_file::read_write);
std::atomic<bool> all_tasks_completed{false};
std::atomic<size_t> outstanding_tasks{offsets.size()};
for(size_t i=0; i<offsets.size(); ++i)
{
auto& offset = offsets[i];
rfile->async_read_some_at(
offset,
boost::asio::buffer(buffer[i]),
[offset, &outstanding_tasks, &all_tasks_completed](auto e, auto bytes)
{
log_info("read bytes of size:", bytes, ", at offset:", offset);
if(e)
{
log_error("error:", e.message());
return;
}
--outstanding_tasks;
if(outstanding_tasks.load() == 0)
{
all_tasks_completed = true;
all_tasks_completed.notify_one();
}
});
}
all_tasks_completed.wait(false);
context.stop();
io_thread.join();