ptrQueue.cpp revision 1111
829N/A/*
2362N/A * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
829N/A * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
829N/A *
829N/A * This code is free software; you can redistribute it and/or modify it
829N/A * under the terms of the GNU General Public License version 2 only, as
2362N/A * published by the Free Software Foundation.
829N/A *
2362N/A * This code is distributed in the hope that it will be useful, but WITHOUT
829N/A * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
829N/A * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
829N/A * version 2 for more details (a copy is included in the LICENSE file that
829N/A * accompanied this code).
829N/A *
829N/A * You should have received a copy of the GNU General Public License version
829N/A * 2 along with this work; if not, write to the Free Software Foundation,
829N/A * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
829N/A *
829N/A * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
829N/A * CA 95054 USA or visit www.sun.com if you need additional information or
2362N/A * have any questions.
2362N/A *
2362N/A */
829N/A
829N/A# include "incls/_precompiled.incl"
829N/A# include "incls/_ptrQueue.cpp.incl"
829N/A
829N/APtrQueue::PtrQueue(PtrQueueSet* qset_, bool perm) :
829N/A _qset(qset_), _buf(NULL), _index(0), _active(false),
829N/A _perm(perm), _lock(NULL)
829N/A{}
829N/A
829N/Avoid PtrQueue::flush() {
829N/A if (!_perm && _buf != NULL) {
829N/A if (_index == _sz) {
829N/A // No work to do.
829N/A qset()->deallocate_buffer(_buf);
829N/A } else {
829N/A // We must NULL out the unused entries, then enqueue.
829N/A for (size_t i = 0; i < _index; i += oopSize) {
829N/A _buf[byte_index_to_index((int)i)] = NULL;
829N/A }
829N/A qset()->enqueue_complete_buffer(_buf);
829N/A }
829N/A _buf = NULL;
829N/A _index = 0;
829N/A }
829N/A}
829N/A
829N/A
829N/Astatic int byte_index_to_index(int ind) {
829N/A assert((ind % oopSize) == 0, "Invariant.");
829N/A return ind / oopSize;
829N/A}
829N/A
829N/Astatic int index_to_byte_index(int byte_ind) {
829N/A return byte_ind * oopSize;
829N/A}
829N/A
829N/Avoid PtrQueue::enqueue_known_active(void* ptr) {
829N/A assert(0 <= _index && _index <= _sz, "Invariant.");
829N/A assert(_index == 0 || _buf != NULL, "invariant");
829N/A
829N/A while (_index == 0) {
829N/A handle_zero_index();
829N/A }
829N/A
829N/A assert(_index > 0, "postcondition");
829N/A _index -= oopSize;
829N/A _buf[byte_index_to_index((int)_index)] = ptr;
829N/A assert(0 <= _index && _index <= _sz, "Invariant.");
829N/A}
829N/A
829N/Avoid PtrQueue::locking_enqueue_completed_buffer(void** buf) {
assert(_lock->owned_by_self(), "Required.");
_lock->unlock();
qset()->enqueue_complete_buffer(buf);
// We must relock only because the caller will unlock, for the normal
// case.
_lock->lock_without_safepoint_check();
}
PtrQueueSet::PtrQueueSet(bool notify_when_complete) :
_max_completed_queue(0),
_cbl_mon(NULL), _fl_lock(NULL),
_notify_when_complete(notify_when_complete),
_sz(0),
_completed_buffers_head(NULL),
_completed_buffers_tail(NULL),
_n_completed_buffers(0),
_process_completed_threshold(0), _process_completed(false),
_buf_free_list(NULL), _buf_free_list_sz(0)
{
_fl_owner = this;
}
void** PtrQueueSet::allocate_buffer() {
assert(_sz > 0, "Didn't set a buffer size.");
MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
if (_fl_owner->_buf_free_list != NULL) {
void** res = BufferNode::make_buffer_from_node(_fl_owner->_buf_free_list);
_fl_owner->_buf_free_list = _fl_owner->_buf_free_list->next();
_fl_owner->_buf_free_list_sz--;
return res;
} else {
// Allocate space for the BufferNode in front of the buffer.
char *b = NEW_C_HEAP_ARRAY(char, _sz + BufferNode::aligned_size());
return BufferNode::make_buffer_from_block(b);
}
}
void PtrQueueSet::deallocate_buffer(void** buf) {
assert(_sz > 0, "Didn't set a buffer size.");
MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
BufferNode *node = BufferNode::make_node_from_buffer(buf);
node->set_next(_fl_owner->_buf_free_list);
_fl_owner->_buf_free_list = node;
_fl_owner->_buf_free_list_sz++;
}
void PtrQueueSet::reduce_free_list() {
assert(_fl_owner == this, "Free list reduction is allowed only for the owner");
// For now we'll adopt the strategy of deleting half.
MutexLockerEx x(_fl_lock, Mutex::_no_safepoint_check_flag);
size_t n = _buf_free_list_sz / 2;
while (n > 0) {
assert(_buf_free_list != NULL, "_buf_free_list_sz must be wrong.");
void* b = BufferNode::make_block_from_node(_buf_free_list);
_buf_free_list = _buf_free_list->next();
FREE_C_HEAP_ARRAY(char, b);
_buf_free_list_sz --;
n--;
}
}
void PtrQueue::handle_zero_index() {
assert(0 == _index, "Precondition.");
// This thread records the full buffer and allocates a new one (while
// holding the lock if there is one).
if (_buf != NULL) {
if (_lock) {
locking_enqueue_completed_buffer(_buf);
} else {
if (qset()->process_or_enqueue_complete_buffer(_buf)) {
// Recycle the buffer. No allocation.
_sz = qset()->buffer_size();
_index = _sz;
return;
}
}
}
// Reallocate the buffer
_buf = qset()->allocate_buffer();
_sz = qset()->buffer_size();
_index = _sz;
assert(0 <= _index && _index <= _sz, "Invariant.");
}
bool PtrQueueSet::process_or_enqueue_complete_buffer(void** buf) {
if (Thread::current()->is_Java_thread()) {
// We don't lock. It is fine to be epsilon-precise here.
if (_max_completed_queue == 0 || _max_completed_queue > 0 &&
_n_completed_buffers >= _max_completed_queue + _completed_queue_padding) {
bool b = mut_process_buffer(buf);
if (b) {
// True here means that the buffer hasn't been deallocated and the caller may reuse it.
return true;
}
}
}
// The buffer will be enqueued. The caller will have to get a new one.
enqueue_complete_buffer(buf);
return false;
}
void PtrQueueSet::enqueue_complete_buffer(void** buf, size_t index) {
MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
BufferNode* cbn = BufferNode::new_from_buffer(buf);
cbn->set_index(index);
if (_completed_buffers_tail == NULL) {
assert(_completed_buffers_head == NULL, "Well-formedness");
_completed_buffers_head = cbn;
_completed_buffers_tail = cbn;
} else {
_completed_buffers_tail->set_next(cbn);
_completed_buffers_tail = cbn;
}
_n_completed_buffers++;
if (!_process_completed && _process_completed_threshold >= 0 &&
_n_completed_buffers >= _process_completed_threshold) {
_process_completed = true;
if (_notify_when_complete)
_cbl_mon->notify();
}
debug_only(assert_completed_buffer_list_len_correct_locked());
}
int PtrQueueSet::completed_buffers_list_length() {
int n = 0;
BufferNode* cbn = _completed_buffers_head;
while (cbn != NULL) {
n++;
cbn = cbn->next();
}
return n;
}
void PtrQueueSet::assert_completed_buffer_list_len_correct() {
MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
assert_completed_buffer_list_len_correct_locked();
}
void PtrQueueSet::assert_completed_buffer_list_len_correct_locked() {
guarantee(completed_buffers_list_length() == _n_completed_buffers,
"Completed buffer length is wrong.");
}
void PtrQueueSet::set_buffer_size(size_t sz) {
assert(_sz == 0 && sz > 0, "Should be called only once.");
_sz = sz * oopSize;
}
// Merge lists of buffers. Notify the processing threads.
// The source queue is emptied as a result. The queues
// must share the monitor.
void PtrQueueSet::merge_bufferlists(PtrQueueSet *src) {
assert(_cbl_mon == src->_cbl_mon, "Should share the same lock");
MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
if (_completed_buffers_tail == NULL) {
assert(_completed_buffers_head == NULL, "Well-formedness");
_completed_buffers_head = src->_completed_buffers_head;
_completed_buffers_tail = src->_completed_buffers_tail;
} else {
assert(_completed_buffers_head != NULL, "Well formedness");
if (src->_completed_buffers_head != NULL) {
_completed_buffers_tail->set_next(src->_completed_buffers_head);
_completed_buffers_tail = src->_completed_buffers_tail;
}
}
_n_completed_buffers += src->_n_completed_buffers;
src->_n_completed_buffers = 0;
src->_completed_buffers_head = NULL;
src->_completed_buffers_tail = NULL;
assert(_completed_buffers_head == NULL && _completed_buffers_tail == NULL ||
_completed_buffers_head != NULL && _completed_buffers_tail != NULL,
"Sanity");
}
void PtrQueueSet::notify_if_necessary() {
MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
if (_n_completed_buffers >= _process_completed_threshold || _max_completed_queue == 0) {
_process_completed = true;
if (_notify_when_complete)
_cbl_mon->notify();
}
}