1798N/A/*
1798N/A * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
1798N/A * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
1798N/A *
1798N/A * This code is free software; you can redistribute it and/or modify it
1798N/A * under the terms of the GNU General Public License version 2 only, as
1798N/A * published by the Free Software Foundation.
1798N/A *
1798N/A * This code is distributed in the hope that it will be useful, but WITHOUT
1798N/A * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
1798N/A * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
1798N/A * version 2 for more details (a copy is included in the LICENSE file that
1798N/A * accompanied this code).
1798N/A *
1798N/A * You should have received a copy of the GNU General Public License version
1798N/A * 2 along with this work; if not, write to the Free Software Foundation,
1798N/A * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
1798N/A *
1798N/A * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
1798N/A * or visit www.oracle.com if you need additional information or have any
1798N/A * questions.
1798N/A *
1798N/A */
1798N/A
1879N/A#include "precompiled.hpp"
1879N/A#include "runtime/thread.hpp"
1798N/A
1798N/A
1798N/A
1798N/A// Lifecycle management for TSM ParkEvents.
1798N/A// ParkEvents are type-stable (TSM).
1798N/A// In our particular implementation they happen to be immortal.
1798N/A//
1798N/A// We manage concurrency on the FreeList with a CAS-based
1798N/A// detach-modify-reattach idiom that avoids the ABA problems
1798N/A// that would otherwise be present in a simple CAS-based
1798N/A// push-pop implementation. (push-one and pop-all)
1798N/A//
1798N/A// Caveat: Allocate() and Release() may be called from threads
1798N/A// other than the thread associated with the Event!
1798N/A// If we need to call Allocate() when running as the thread in
1798N/A// question then look for the PD calls to initialize native TLS.
1798N/A// Native TLS (Win32/Linux/Solaris) can only be initialized or
1798N/A// accessed by the associated thread.
1798N/A// See also pd_initialize().
1798N/A//
1798N/A// Note that we could defer associating a ParkEvent with a thread
1798N/A// until the 1st time the thread calls park(). unpark() calls to
1798N/A// an unprovisioned thread would be ignored. The first park() call
1798N/A// for a thread would allocate and associate a ParkEvent and return
1798N/A// immediately.
1798N/A
1798N/Avolatile int ParkEvent::ListLock = 0 ;
1798N/AParkEvent * volatile ParkEvent::FreeList = NULL ;
1798N/A
1798N/AParkEvent * ParkEvent::Allocate (Thread * t) {
1798N/A // In rare cases -- JVM_RawMonitor* operations -- we can find t == null.
1798N/A ParkEvent * ev ;
1798N/A
1798N/A // Start by trying to recycle an existing but unassociated
1798N/A // ParkEvent from the global free list.
1798N/A for (;;) {
1798N/A ev = FreeList ;
1798N/A if (ev == NULL) break ;
1798N/A // 1: Detach - sequester or privatize the list
1798N/A // Tantamount to ev = Swap (&FreeList, NULL)
1798N/A if (Atomic::cmpxchg_ptr (NULL, &FreeList, ev) != ev) {
1798N/A continue ;
1798N/A }
1798N/A
1798N/A // We've detached the list. The list in-hand is now
1798N/A // local to this thread. This thread can operate on the
1798N/A // list without risk of interference from other threads.
1798N/A // 2: Extract -- pop the 1st element from the list.
1798N/A ParkEvent * List = ev->FreeNext ;
1798N/A if (List == NULL) break ;
1798N/A for (;;) {
1798N/A // 3: Try to reattach the residual list
1798N/A guarantee (List != NULL, "invariant") ;
1798N/A ParkEvent * Arv = (ParkEvent *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
1798N/A if (Arv == NULL) break ;
1798N/A
1798N/A // New nodes arrived. Try to detach the recent arrivals.
1798N/A if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
1798N/A continue ;
1798N/A }
1798N/A guarantee (Arv != NULL, "invariant") ;
1798N/A // 4: Merge Arv into List
1798N/A ParkEvent * Tail = List ;
1798N/A while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
1798N/A Tail->FreeNext = Arv ;
1798N/A }
1798N/A break ;
1798N/A }
1798N/A
1798N/A if (ev != NULL) {
1798N/A guarantee (ev->AssociatedWith == NULL, "invariant") ;
1798N/A } else {
1798N/A // Do this the hard way -- materialize a new ParkEvent.
1798N/A // In rare cases an allocating thread might detach a long list --
1798N/A // installing null into FreeList -- and then stall or be obstructed.
1798N/A // A 2nd thread calling Allocate() would see FreeList == null.
1798N/A // The list held privately by the 1st thread is unavailable to the 2nd thread.
1798N/A // In that case the 2nd thread would have to materialize a new ParkEvent,
1798N/A // even though free ParkEvents existed in the system. In this case we end up
1798N/A // with more ParkEvents in circulation than we need, but the race is
1798N/A // rare and the outcome is benign. Ideally, the # of extant ParkEvents
1798N/A // is equal to the maximum # of threads that existed at any one time.
1798N/A // Because of the race mentioned above, segments of the freelist
1798N/A // can be transiently inaccessible. At worst we may end up with the
1798N/A // # of ParkEvents in circulation slightly above the ideal.
1798N/A // Note that if we didn't have the TSM/immortal constraint, then
1798N/A // when reattaching, above, we could trim the list.
1798N/A ev = new ParkEvent () ;
1798N/A guarantee ((intptr_t(ev) & 0xFF) == 0, "invariant") ;
1798N/A }
1798N/A ev->reset() ; // courtesy to caller
1798N/A ev->AssociatedWith = t ; // Associate ev with t
1798N/A ev->FreeNext = NULL ;
1798N/A return ev ;
1798N/A}
1798N/A
1798N/Avoid ParkEvent::Release (ParkEvent * ev) {
1798N/A if (ev == NULL) return ;
1798N/A guarantee (ev->FreeNext == NULL , "invariant") ;
1798N/A ev->AssociatedWith = NULL ;
1798N/A for (;;) {
1798N/A // Push ev onto FreeList
1798N/A // The mechanism is "half" lock-free.
1798N/A ParkEvent * List = FreeList ;
1798N/A ev->FreeNext = List ;
1798N/A if (Atomic::cmpxchg_ptr (ev, &FreeList, List) == List) break ;
1798N/A }
1798N/A}
1798N/A
1798N/A// Override operator new and delete so we can ensure that the
1798N/A// least significant byte of ParkEvent addresses is 0.
1798N/A// Beware that excessive address alignment is undesirable
1798N/A// as it can result in D$ index usage imbalance as
1798N/A// well as bank access imbalance on Niagara-like platforms,
1798N/A// although Niagara's hash function should help.
1798N/A
1798N/Avoid * ParkEvent::operator new (size_t sz) {
3863N/A return (void *) ((intptr_t (AllocateHeap(sz + 256, mtInternal, CALLER_PC)) + 256) & -256) ;
1798N/A}
1798N/A
1798N/Avoid ParkEvent::operator delete (void * a) {
1798N/A // ParkEvents are type-stable and immortal ...
1798N/A ShouldNotReachHere();
1798N/A}
1798N/A
1798N/A
1798N/A// 6399321 As a temporary measure we copied & modified the ParkEvent::
1798N/A// allocate() and release() code for use by Parkers. The Parker:: forms
1798N/A// will eventually be removed as we consolide and shift over to ParkEvents
1798N/A// for both builtin synchronization and JSR166 operations.
1798N/A
1798N/Avolatile int Parker::ListLock = 0 ;
1798N/AParker * volatile Parker::FreeList = NULL ;
1798N/A
1798N/AParker * Parker::Allocate (JavaThread * t) {
1798N/A guarantee (t != NULL, "invariant") ;
1798N/A Parker * p ;
1798N/A
1798N/A // Start by trying to recycle an existing but unassociated
1798N/A // Parker from the global free list.
1798N/A for (;;) {
1798N/A p = FreeList ;
1798N/A if (p == NULL) break ;
1798N/A // 1: Detach
1798N/A // Tantamount to p = Swap (&FreeList, NULL)
1798N/A if (Atomic::cmpxchg_ptr (NULL, &FreeList, p) != p) {
1798N/A continue ;
1798N/A }
1798N/A
1798N/A // We've detached the list. The list in-hand is now
1798N/A // local to this thread. This thread can operate on the
1798N/A // list without risk of interference from other threads.
1798N/A // 2: Extract -- pop the 1st element from the list.
1798N/A Parker * List = p->FreeNext ;
1798N/A if (List == NULL) break ;
1798N/A for (;;) {
1798N/A // 3: Try to reattach the residual list
1798N/A guarantee (List != NULL, "invariant") ;
1798N/A Parker * Arv = (Parker *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
1798N/A if (Arv == NULL) break ;
1798N/A
1798N/A // New nodes arrived. Try to detach the recent arrivals.
1798N/A if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
1798N/A continue ;
1798N/A }
1798N/A guarantee (Arv != NULL, "invariant") ;
1798N/A // 4: Merge Arv into List
1798N/A Parker * Tail = List ;
1798N/A while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
1798N/A Tail->FreeNext = Arv ;
1798N/A }
1798N/A break ;
1798N/A }
1798N/A
1798N/A if (p != NULL) {
1798N/A guarantee (p->AssociatedWith == NULL, "invariant") ;
1798N/A } else {
1798N/A // Do this the hard way -- materialize a new Parker..
1798N/A // In rare cases an allocating thread might detach
1798N/A // a long list -- installing null into FreeList --and
1798N/A // then stall. Another thread calling Allocate() would see
1798N/A // FreeList == null and then invoke the ctor. In this case we
1798N/A // end up with more Parkers in circulation than we need, but
1798N/A // the race is rare and the outcome is benign.
1798N/A // Ideally, the # of extant Parkers is equal to the
1798N/A // maximum # of threads that existed at any one time.
1798N/A // Because of the race mentioned above, segments of the
1798N/A // freelist can be transiently inaccessible. At worst
1798N/A // we may end up with the # of Parkers in circulation
1798N/A // slightly above the ideal.
1798N/A p = new Parker() ;
1798N/A }
1798N/A p->AssociatedWith = t ; // Associate p with t
1798N/A p->FreeNext = NULL ;
1798N/A return p ;
1798N/A}
1798N/A
1798N/A
1798N/Avoid Parker::Release (Parker * p) {
1798N/A if (p == NULL) return ;
1798N/A guarantee (p->AssociatedWith != NULL, "invariant") ;
1798N/A guarantee (p->FreeNext == NULL , "invariant") ;
1798N/A p->AssociatedWith = NULL ;
1798N/A for (;;) {
1798N/A // Push p onto FreeList
1798N/A Parker * List = FreeList ;
1798N/A p->FreeNext = List ;
1798N/A if (Atomic::cmpxchg_ptr (p, &FreeList, List) == List) break ;
1798N/A }
1798N/A}
1798N/A