lock.h revision 9fc464631dc4a68fbb5eb6419d61fbe91b6b16bd
/* $Id$ */
/** @file
* Lock.h - Haiku, private locking internals.
*/
/*
* Copyright (C) 2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
/*
* This code is based on:
*
* VirtualBox Guest Additions for Haiku.
*
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2009, Axel D�rfler, axeld@pinc-software.de.
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the MIT License.
*/
/** @todo r=ramshankar: Eventually this file should be shipped by Haiku and
* should be removed from the VBox tree. */
#ifndef _KERNEL_LOCK_H
#define _KERNEL_LOCK_H
#include <OS.h>
struct mutex_waiter;
typedef struct mutex {
const char* name;
struct mutex_waiter* waiters;
#if KDEBUG
#else
#endif
} mutex;
#define MUTEX_FLAG_CLONE_NAME 0x1
typedef struct recursive_lock {
#if !KDEBUG
#endif
int recursion;
struct rw_lock_waiter;
typedef struct rw_lock {
const char* name;
struct rw_lock_waiter* waiters;
// Only > 0 while a writer is waiting: number
// of active readers when the first waiting
// writer started waiting.
// Number of readers that have already
// incremented "count", but have not yet started
// to wait at the time the last writer unlocked.
} rw_lock;
#define RW_LOCK_WRITER_COUNT_BASE 0x10000
#define RW_LOCK_FLAG_CLONE_NAME 0x1
#if KDEBUG
# define KDEBUG_RW_LOCK_DEBUG 0
// Define to 1 if you want to use ASSERT_READ_LOCKED_RW_LOCK().
// The rw_lock will just behave like a recursive locker then.
# define ASSERT_LOCKED_RECURSIVE(r) \
# define ASSERT_WRITE_LOCKED_RW_LOCK(l) \
# if KDEBUG_RW_LOCK_DEBUG
# define ASSERT_READ_LOCKED_RW_LOCK(l) \
# else
# define ASSERT_READ_LOCKED_RW_LOCK(l) do {} while (false)
# endif
#else
# define ASSERT_LOCKED_RECURSIVE(r) do {} while (false)
# define ASSERT_LOCKED_MUTEX(m) do {} while (false)
# define ASSERT_WRITE_LOCKED_RW_LOCK(m) do {} while (false)
# define ASSERT_READ_LOCKED_RW_LOCK(l) do {} while (false)
#endif
// static initializers
#if KDEBUG
#else
#endif
#if KDEBUG
#else
#endif
#ifdef __cplusplus
extern "C" {
#endif
// name is *not* cloned nor freed in recursive_lock_destroy()
// name is *not* cloned nor freed in rw_lock_destroy()
// name is *not* cloned nor freed in mutex_destroy()
// Unlocks "from" and locks "to" such that unlocking and starting to wait
// for the lock is atomically. I.e. if "from" guards the object "to" belongs
// to, the operation is safe as long as "from" is held while destroying
// "to".
// Like mutex_switch_lock(), just for a switching from a read-locked
// rw_lock.
// implementation private:
static inline status_t
{
return rw_lock_write_lock(lock);
#else
if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
return _rw_lock_read_lock(lock);
return B_OK;
#endif
}
static inline status_t
{
#else
if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
return B_OK;
#endif
}
static inline void
{
#else
if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
_rw_lock_read_unlock(lock, false);
#endif
}
static inline void
{
_rw_lock_write_unlock(lock, false);
}
static inline status_t
{
#if KDEBUG
return _mutex_lock(lock, false);
#else
return _mutex_lock(lock, false);
return B_OK;
#endif
}
static inline status_t
{
#if KDEBUG
return _mutex_lock(lock, true);
#else
return _mutex_lock(lock, true);
return B_OK;
#endif
}
static inline status_t
{
#if KDEBUG
return _mutex_trylock(lock);
#else
return B_WOULD_BLOCK;
return B_OK;
#endif
}
static inline status_t
{
#if KDEBUG
#else
return B_OK;
#endif
}
static inline void
{
#if !KDEBUG
#endif
_mutex_unlock(lock, false);
}
static inline void
{
#if KDEBUG
#endif
}
extern void lock_debug_init();
#ifdef __cplusplus
}
#endif
#endif /* _KERNEL_LOCK_H */