OpenJDK / portola / portola
changeset 50310:d11e87c8cd44
8202945: OopStorage should use GlobalCounter
Summary: Use GlobalCounter rather than private mechanism.
Reviewed-by: eosterlund, tschatzl, rehn
author | kbarrett |
---|---|
date | Tue, 29 May 2018 18:10:09 -0400 |
parents | 896486c6e357 |
children | fe42de5250f3 |
files | src/hotspot/share/gc/shared/oopStorage.cpp src/hotspot/share/gc/shared/oopStorage.hpp |
diffstat | 2 files changed, 5 insertions(+), 63 deletions(-) [+] |
line wrap: on
line diff
--- a/src/hotspot/share/gc/shared/oopStorage.cpp Tue May 29 17:21:31 2018 -0400 +++ b/src/hotspot/share/gc/shared/oopStorage.cpp Tue May 29 18:10:09 2018 -0400 @@ -40,6 +40,7 @@ #include "utilities/align.hpp" #include "utilities/count_trailing_zeros.hpp" #include "utilities/debug.hpp" +#include "utilities/globalCounter.inline.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/macros.hpp" #include "utilities/ostream.hpp" @@ -501,48 +502,6 @@ return true; } -OopStorage::ProtectActive::ProtectActive() : _enter(0), _exit() {} - -// Begin read-side critical section. -uint OopStorage::ProtectActive::read_enter() { - return Atomic::add(2u, &_enter); -} - -// End read-side critical section. -void OopStorage::ProtectActive::read_exit(uint enter_value) { - Atomic::add(2u, &_exit[enter_value & 1]); -} - -// Wait until all readers that entered the critical section before -// synchronization have exited that critical section. -void OopStorage::ProtectActive::write_synchronize() { - SpinYield spinner; - // Determine old and new exit counters, based on bit0 of the - // on-entry _enter counter. - uint value = OrderAccess::load_acquire(&_enter); - volatile uint* new_ptr = &_exit[(value + 1) & 1]; - // Atomically change the in-use exit counter to the new counter, by - // adding 1 to the _enter counter (flipping bit0 between 0 and 1) - // and initializing the new exit counter to that enter value. Note: - // The new exit counter is not being used by read operations until - // this change succeeds. - uint old; - do { - old = value; - *new_ptr = ++value; - value = Atomic::cmpxchg(value, &_enter, old); - } while (old != value); - // Readers that entered the critical section before we changed the - // selected exit counter will use the old exit counter. Readers - // entering after the change will use the new exit counter. Wait - // for all the critical sections started before the change to - // complete, e.g. for the value of old_ptr to catch up with old. - volatile uint* old_ptr = &_exit[old & 1]; - while (old != OrderAccess::load_acquire(old_ptr)) { - spinner.wait(); - } -} - // Make new_array the _active_array. Increments new_array's refcount // to account for the new reference. The assignment is atomic wrto // obtain_active_array; once this function returns, it is safe for the @@ -554,9 +513,9 @@ // Install new_array, ensuring its initialization is complete first. OrderAccess::release_store(&_active_array, new_array); // Wait for any readers that could read the old array from _active_array. - _protect_active.write_synchronize(); - // All obtain critical sections that could see the old array have - // completed, having incremented the refcount of the old array. The + GlobalCounter::write_synchronize(); + // All obtain_active_array critical sections that could see the old array + // have completed, having incremented the refcount of the old array. The // caller can now safely relinquish the old array. } @@ -566,10 +525,9 @@ // _active_array. The caller must relinquish the array when done // using it. OopStorage::ActiveArray* OopStorage::obtain_active_array() const { - uint enter_value = _protect_active.read_enter(); + GlobalCounter::CriticalSection cs(Thread::current()); ActiveArray* result = OrderAccess::load_acquire(&_active_array); result->increment_refcount(); - _protect_active.read_exit(enter_value); return result; }
--- a/src/hotspot/share/gc/shared/oopStorage.hpp Tue May 29 17:21:31 2018 -0400 +++ b/src/hotspot/share/gc/shared/oopStorage.hpp Tue May 29 18:10:09 2018 -0400 @@ -204,19 +204,6 @@ void unlink(const Block& block); }; - // RCU-inspired protection of access to _active_array. - class ProtectActive { - volatile uint _enter; - volatile uint _exit[2]; - - public: - ProtectActive(); - - uint read_enter(); - void read_exit(uint enter_value); - void write_synchronize(); - }; - private: const char* _name; ActiveArray* _active_array; @@ -229,9 +216,6 @@ // Volatile for racy unlocked accesses. volatile size_t _allocation_count; - // Protection for _active_array. - mutable ProtectActive _protect_active; - // mutable because this gets set even for const iteration. mutable bool _concurrent_iteration_active;