I want to write an C++ programm which should wait for a linux signal (millisecond resolution), but I am not able to find a possibility to achieve this.
The following test code should terminate after 500ms, but it doesn't.
#include <iostream>
#include <csignal>
#include <unistd.h>
#include <chrono>
#include <future>
using namespace std::chrono_literals;
extern "C" void handler(int s) {
}
int main() {
std::signal(SIGUSR1, handler);
bool started = false;
auto f = std::async(std::launch::async, [&] {
auto start = std::chrono::high_resolution_clock::now();
started = true;
//usleep(1000000);
sleep(1);
//std::this_thread::sleep_for(1s);
std::cout << std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::high_resolution_clock::now() - start).count() << "ms";
});
std::this_thread::sleep_for(500ms);
std::raise(SIGUSR1);
}
Does anybody know how to fix this behaviour?
With the idea of ti7 and user4581301 I finally found a solution.
Using the idea of an mutex in the signal_handler but restricted to the set of allowed system calls, I use a semaphore.
sem_t *sem_g = nullptr;
extern "C" void handler(int s) {
if (sem_g)
sem_post(sem_g);
}
int main() {
sem_t sem = {};
sem_init(&sem, 0, 0);
sem_g = &sem;
std::signal(SIGUSR1, handler);
auto f = std::async(std::launch::async, [&] {
auto start = std::chrono::high_resolution_clock::now();
auto time_point = std::chrono::system_clock::now() + 10s;
auto duration = time_point.time_since_epoch();
auto secs = std::chrono::duration_cast<std::chrono::seconds>(duration);
auto nanos = std::chrono::duration_cast<std::chrono::nanoseconds>(duration - secs);
timespec t{secs.count(), nanos.count()};
auto r = sem_timedwait(&sem, &t);
std::cout << std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::high_resolution_clock::now() - start).count() << "ms";
});
std::this_thread::sleep_for(500ms);
std::raise(SIGUSR1);
}