Added lock_and_signal::signal_all(), and made the rust_kernel::join() use wait instead of yield.

This commit is contained in:
Michael Bebenita 2010-09-09 16:01:49 -07:00
parent 79dc07d648
commit f985fded3e
4 changed files with 31 additions and 16 deletions

View File

@ -22,7 +22,7 @@ rust_kernel::create_domain(const rust_crate *crate, const char *name) {
message_queue->associate(handle);
domains.append(dom);
message_queues.append(message_queue);
_kernel_lock.signal();
_kernel_lock.signal_all();
_kernel_lock.unlock();
return handle;
}
@ -37,7 +37,7 @@ rust_kernel::destroy_domain(rust_dom *dom) {
rust_srv *srv = dom->srv;
delete dom;
delete srv;
_kernel_lock.signal();
_kernel_lock.signal_all();
_kernel_lock.unlock();
}
@ -91,10 +91,11 @@ rust_kernel::get_port_handle(rust_port *port) {
void
rust_kernel::join_all_domains() {
// TODO: Perhaps we can do this a little smarter. Just spin wait for now.
_kernel_lock.lock();
while (domains.length() > 0) {
sync::yield();
_kernel_lock.wait();
}
_kernel_lock.unlock();
log(rust_log::KERN, "joined domains");
}
@ -162,7 +163,7 @@ rust_kernel::terminate_kernel_loop() {
log(rust_log::KERN, "terminating kernel loop");
_interrupt_kernel_loop = true;
_kernel_lock.lock();
_kernel_lock.signal();
_kernel_lock.signal_all();
_kernel_lock.unlock();
join();
}

View File

@ -75,8 +75,6 @@ public:
* live on after their associated domain has died. This way we can safely
* communicate with domains that may have died.
*
* Although the message_queues list is synchronized, each individual
* message queue is lock free.
*/
indexed_list<rust_message_queue> message_queues;

View File

@ -11,7 +11,12 @@
#if defined(__WIN32__)
lock_and_signal::lock_and_signal() {
_event = CreateEvent(NULL, FALSE, FALSE, NULL);
// TODO: In order to match the behavior of pthread_cond_broadcast on
// Windows, we create manual reset events. This however breaks the
// behavior of pthread_cond_signal, fixing this is quite involved:
// refer to: http://www.cs.wustl.edu/~schmidt/win32-cv-1.html
_event = CreateEvent(NULL, TRUE, FALSE, NULL);
InitializeCriticalSection(&_cs);
}
@ -33,21 +38,20 @@ lock_and_signal::~lock_and_signal() {
void lock_and_signal::lock() {
#if defined(__WIN32__)
EnterCriticalSection(&_cs);
EnterCriticalSection(&_cs);
#else
pthread_mutex_lock(&_mutex);
pthread_mutex_lock(&_mutex);
#endif
}
void lock_and_signal::unlock() {
#if defined(__WIN32__)
LeaveCriticalSection(&_cs);
LeaveCriticalSection(&_cs);
#else
pthread_mutex_unlock(&_mutex);
pthread_mutex_unlock(&_mutex);
#endif
}
/**
* Wait indefinitely until condition is signaled.
*/
@ -57,9 +61,9 @@ void lock_and_signal::wait() {
void lock_and_signal::timed_wait(size_t timeout_in_ns) {
#if defined(__WIN32__)
LeaveCriticalSection(&_cs);
WaitForSingleObject(_event, INFINITE);
EnterCriticalSection(&_cs);
LeaveCriticalSection(&_cs);
WaitForSingleObject(_event, INFINITE);
EnterCriticalSection(&_cs);
#else
if (timeout_in_ns == 0) {
pthread_cond_wait(&_cond, &_mutex);
@ -85,6 +89,17 @@ void lock_and_signal::signal() {
#endif
}
/**
* Signal condition, and resume all waiting threads.
*/
void lock_and_signal::signal_all() {
#if defined(__WIN32__)
SetEvent(_event);
#else
pthread_cond_broadcast(&_cond);
#endif
}
//
// Local Variables:

View File

@ -18,6 +18,7 @@ public:
void wait();
void timed_wait(size_t timeout_in_ns);
void signal();
void signal_all();
};
#endif /* LOCK_AND_SIGNAL_H */