OpenJDK / jdk / jdk
changeset 58416:0daa6b52ba56
Merge
author | aph |
---|---|
date | Tue, 17 Mar 2020 14:13:52 +0000 |
parents | a4e2fc916323 56e78301b358 |
children | 0a2e12c3e6e3 |
files | |
diffstat | 14 files changed, 226 insertions(+), 81 deletions(-) [+] |
line wrap: on
line diff
--- a/src/hotspot/cpu/x86/assembler_x86.cpp Mon Mar 16 17:10:26 2020 +0000 +++ b/src/hotspot/cpu/x86/assembler_x86.cpp Tue Mar 17 14:13:52 2020 +0000 @@ -7900,7 +7900,7 @@ // is allowed in legacy mode and has resources which will fit in it. // Pure EVEX instructions will have is_evex_instruction set in their definition. if (!attributes->is_legacy_mode()) { - if (UseAVX > 2 && !attributes->is_evex_instruction() && !_is_managed) { + if (UseAVX > 2 && !attributes->is_evex_instruction() && !is_managed()) { if ((attributes->get_vector_len() != AVX_512bit) && (nds_enc < 16) && (xreg_enc < 16)) { attributes->set_is_legacy_mode(); } @@ -7915,7 +7915,7 @@ assert(((nds_enc < 16 && xreg_enc < 16) || (!attributes->is_legacy_mode())),"XMM register should be 0-15"); } - _is_managed = false; + clear_managed(); if (UseAVX > 2 && !attributes->is_legacy_mode()) { bool evex_r = (xreg_enc >= 16); @@ -7947,7 +7947,7 @@ // is allowed in legacy mode and has resources which will fit in it. // Pure EVEX instructions will have is_evex_instruction set in their definition. if (!attributes->is_legacy_mode()) { - if (UseAVX > 2 && !attributes->is_evex_instruction() && !_is_managed) { + if (UseAVX > 2 && !attributes->is_evex_instruction() && !is_managed()) { if ((!attributes->uses_vl() || (attributes->get_vector_len() != AVX_512bit)) && (dst_enc < 16) && (nds_enc < 16) && (src_enc < 16)) { attributes->set_is_legacy_mode(); @@ -7969,7 +7969,7 @@ assert(((dst_enc < 16 && nds_enc < 16 && src_enc < 16) || (!attributes->is_legacy_mode())),"XMM register should be 0-15"); } - _is_managed = false; + clear_managed(); if (UseAVX > 2 && !attributes->is_legacy_mode()) { bool evex_r = (dst_enc >= 16);
--- a/src/hotspot/cpu/x86/assembler_x86.hpp Mon Mar 16 17:10:26 2020 +0000 +++ b/src/hotspot/cpu/x86/assembler_x86.hpp Tue Mar 17 14:13:52 2020 +0000 @@ -339,15 +339,15 @@ private: bool base_needs_rex() const { - return _base != noreg && _base->encoding() >= 8; + return _base->is_valid() && _base->encoding() >= 8; } bool index_needs_rex() const { - return _index != noreg &&_index->encoding() >= 8; + return _index->is_valid() &&_index->encoding() >= 8; } bool xmmindex_needs_rex() const { - return _xmmindex != xnoreg && _xmmindex->encoding() >= 8; + return _xmmindex->is_valid() && _xmmindex->encoding() >= 8; } relocInfo::relocType reloc() const { return _rspec.type(); } @@ -659,7 +659,7 @@ bool _legacy_mode_dq; bool _legacy_mode_vl; bool _legacy_mode_vlbw; - bool _is_managed; + NOT_LP64(bool _is_managed;) class InstructionAttr *_attributes; @@ -870,16 +870,18 @@ _legacy_mode_dq = (VM_Version::supports_avx512dq() == false); _legacy_mode_vl = (VM_Version::supports_avx512vl() == false); _legacy_mode_vlbw = (VM_Version::supports_avx512vlbw() == false); - _is_managed = false; + NOT_LP64(_is_managed = false;) _attributes = NULL; } void set_attributes(InstructionAttr *attributes) { _attributes = attributes; } void clear_attributes(void) { _attributes = NULL; } - void set_managed(void) { _is_managed = true; } - void clear_managed(void) { _is_managed = false; } - bool is_managed(void) { return _is_managed; } + void set_managed(void) { NOT_LP64(_is_managed = true;) } + void clear_managed(void) { NOT_LP64(_is_managed = false;) } + bool is_managed(void) { + NOT_LP64(return _is_managed;) + LP64_ONLY(return false;) } void lea(Register dst, Address src); @@ -2280,22 +2282,20 @@ bool no_reg_mask, // when true, k0 is used when EVEX encoding is chosen, else embedded_opmask_register_specifier is used bool uses_vl) // This instruction may have legacy constraints based on vector length for EVEX : - _avx_vector_len(vector_len), _rex_vex_w(rex_vex_w), - _rex_vex_w_reverted(false), - _legacy_mode(legacy_mode), + _legacy_mode(legacy_mode || UseAVX < 3), _no_reg_mask(no_reg_mask), _uses_vl(uses_vl), + _rex_vex_w_reverted(false), + _is_evex_instruction(false), + _is_clear_context(true), + _is_extended_context(false), + _avx_vector_len(vector_len), _tuple_type(Assembler::EVEX_ETUP), _input_size_in_bits(Assembler::EVEX_NObit), - _is_evex_instruction(false), _evex_encoding(0), - _is_clear_context(true), - _is_extended_context(false), _embedded_opmask_register_specifier(0), // hard code k0 - _current_assembler(NULL) { - if (UseAVX < 3) _legacy_mode = true; - } + _current_assembler(NULL) { } ~InstructionAttr() { if (_current_assembler != NULL) { @@ -2305,37 +2305,37 @@ } private: - int _avx_vector_len; bool _rex_vex_w; - bool _rex_vex_w_reverted; bool _legacy_mode; bool _no_reg_mask; bool _uses_vl; + bool _rex_vex_w_reverted; + bool _is_evex_instruction; + bool _is_clear_context; + bool _is_extended_context; + int _avx_vector_len; int _tuple_type; int _input_size_in_bits; - bool _is_evex_instruction; int _evex_encoding; - bool _is_clear_context; - bool _is_extended_context; int _embedded_opmask_register_specifier; Assembler *_current_assembler; public: // query functions for field accessors - int get_vector_len(void) const { return _avx_vector_len; } bool is_rex_vex_w(void) const { return _rex_vex_w; } - bool is_rex_vex_w_reverted(void) { return _rex_vex_w_reverted; } bool is_legacy_mode(void) const { return _legacy_mode; } bool is_no_reg_mask(void) const { return _no_reg_mask; } bool uses_vl(void) const { return _uses_vl; } + bool is_rex_vex_w_reverted(void) { return _rex_vex_w_reverted; } + bool is_evex_instruction(void) const { return _is_evex_instruction; } + bool is_clear_context(void) const { return _is_clear_context; } + bool is_extended_context(void) const { return _is_extended_context; } + int get_vector_len(void) const { return _avx_vector_len; } int get_tuple_type(void) const { return _tuple_type; } int get_input_size(void) const { return _input_size_in_bits; } - int is_evex_instruction(void) const { return _is_evex_instruction; } int get_evex_encoding(void) const { return _evex_encoding; } - bool is_clear_context(void) const { return _is_clear_context; } - bool is_extended_context(void) const { return _is_extended_context; } - int get_embedded_opmask_register_specifier(void) const { return _embedded_opmask_register_specifier; } + int get_embedded_opmask_register_specifier(void) const { return _embedded_opmask_register_specifier; } // Set the vector len manually void set_vector_len(int vector_len) { _avx_vector_len = vector_len; }
--- a/src/hotspot/os/linux/os_linux.cpp Mon Mar 16 17:10:26 2020 +0000 +++ b/src/hotspot/os/linux/os_linux.cpp Tue Mar 17 14:13:52 2020 +0000 @@ -2302,6 +2302,19 @@ st->print("\n/proc/meminfo:\n"); _print_ascii_file("/proc/meminfo", st); st->cr(); + + // some information regarding THPs; for details see + // https://www.kernel.org/doc/Documentation/vm/transhuge.txt + st->print_cr("/sys/kernel/mm/transparent_hugepage/enabled:"); + if (!_print_ascii_file("/sys/kernel/mm/transparent_hugepage/enabled", st)) { + st->print_cr(" <Not Available>"); + } + st->cr(); + st->print_cr("/sys/kernel/mm/transparent_hugepage/defrag (defrag/compaction efforts parameter):"); + if (!_print_ascii_file("/sys/kernel/mm/transparent_hugepage/defrag", st)) { + st->print_cr(" <Not Available>"); + } + st->cr(); } void os::Linux::print_ld_preload_file(outputStream* st) {
--- a/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp Mon Mar 16 17:10:26 2020 +0000 +++ b/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp Tue Mar 17 14:13:52 2020 +0000 @@ -171,11 +171,54 @@ } } +class ShenandoahDisarmNMethodClosure : public NMethodClosure { +private: + BarrierSetNMethod* const _bs; + +public: + ShenandoahDisarmNMethodClosure() : + _bs(BarrierSet::barrier_set()->barrier_set_nmethod()) { + } + + virtual void do_nmethod(nmethod* nm) { + _bs->disarm(nm); + } +}; + +class ShenandoahDisarmNMethodsTask : public AbstractGangTask { +private: + ShenandoahDisarmNMethodClosure _cl; + ShenandoahConcurrentNMethodIterator _iterator; + +public: + ShenandoahDisarmNMethodsTask() : + AbstractGangTask("ShenandoahDisarmNMethodsTask"), + _iterator(ShenandoahCodeRoots::table()) { + MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + _iterator.nmethods_do_begin(); + } + + ~ShenandoahDisarmNMethodsTask() { + MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + _iterator.nmethods_do_end(); + } + + virtual void work(uint worker_id) { + _iterator.nmethods_do(&_cl); + } +}; + +void ShenandoahCodeRoots::disarm_nmethods() { + ShenandoahDisarmNMethodsTask task; + ShenandoahHeap::heap()->workers()->run_task(&task); +} + class ShenandoahNMethodUnlinkClosure : public NMethodClosure { private: - bool _unloading_occurred; - volatile bool _failed; - ShenandoahHeap* _heap; + bool _unloading_occurred; + volatile bool _failed; + ShenandoahHeap* const _heap; + BarrierSetNMethod* const _bs; void set_failed() { Atomic::store(&_failed, true); @@ -201,7 +244,8 @@ ShenandoahNMethodUnlinkClosure(bool unloading_occurred) : _unloading_occurred(unloading_occurred), _failed(false), - _heap(ShenandoahHeap::heap()) {} + _heap(ShenandoahHeap::heap()), + _bs(ShenandoahBarrierSet::barrier_set()->barrier_set_nmethod()) {} virtual void do_nmethod(nmethod* nm) { assert(_heap->is_concurrent_root_in_progress(), "Only this phase"); @@ -225,10 +269,10 @@ ShenandoahReentrantLocker locker(nm_data->lock()); // Heal oops and disarm - if (_heap->is_evacuation_in_progress()) { + if (_bs->is_armed(nm)) { ShenandoahNMethod::heal_nmethod(nm); + _bs->disarm(nm); } - ShenandoahNMethod::disarm_nmethod(nm); // Clear compiled ICs and exception caches if (!nm->unload_nmethod_caches(_unloading_occurred)) {
--- a/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.hpp Mon Mar 16 17:10:26 2020 +0000 +++ b/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.hpp Tue Mar 17 14:13:52 2020 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved. + * Copyright (c) 2017, 2020, Red Hat, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -111,6 +111,7 @@ static void unlink(WorkGang* workers, bool unloading_occurred); static void purge(WorkGang* workers); static void arm_nmethods(); + static void disarm_nmethods(); static int disarmed_value() { return _disarmed_value; } static int* disarmed_value_address() { return &_disarmed_value; }
--- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp Mon Mar 16 17:10:26 2020 +0000 +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp Tue Mar 17 14:13:52 2020 +0000 @@ -180,19 +180,30 @@ } }; -class ShenandoahSATBThreadsClosure : public ThreadClosure { +class ShenandoahSATBAndRemarkCodeRootsThreadsClosure : public ThreadClosure { private: ShenandoahSATBBufferClosure* _satb_cl; + MarkingCodeBlobClosure* _code_cl; uintx _claim_token; public: - ShenandoahSATBThreadsClosure(ShenandoahSATBBufferClosure* satb_cl) : - _satb_cl(satb_cl), + ShenandoahSATBAndRemarkCodeRootsThreadsClosure(ShenandoahSATBBufferClosure* satb_cl, MarkingCodeBlobClosure* code_cl) : + _satb_cl(satb_cl), _code_cl(code_cl), _claim_token(Threads::thread_claim_token()) {} void do_thread(Thread* thread) { if (thread->claim_threads_do(true, _claim_token)) { ShenandoahThreadLocalData::satb_mark_queue(thread).apply_closure_and_empty(_satb_cl); + if (_code_cl != NULL && thread->is_Java_thread()) { + // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking + // however the liveness of oops reachable from nmethods have very complex lifecycles: + // * Alive if on the stack of an executing method + // * Weakly reachable otherwise + // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be + // live by the SATB invariant but other oops recorded in nmethods may behave differently. + JavaThread* jt = (JavaThread*)thread; + jt->nmethods_do(_code_cl); + } } } }; @@ -212,6 +223,14 @@ ShenandoahHeap* heap = ShenandoahHeap::heap(); ShenandoahParallelWorkerSession worker_session(worker_id); + ReferenceProcessor* rp; + if (heap->process_references()) { + rp = heap->ref_processor(); + shenandoah_assert_rp_isalive_installed(); + } else { + rp = NULL; + } + // First drain remaining SATB buffers. // Notice that this is not strictly necessary for mark-compact. But since // it requires a StrongRootsScope around the task, we need to claim the @@ -219,19 +238,27 @@ // full-gc. { ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id); + ShenandoahSATBBufferClosure cl(q); SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set(); while (satb_mq_set.apply_closure_to_completed_buffer(&cl)); - ShenandoahSATBThreadsClosure tc(&cl); - Threads::threads_do(&tc); - } - ReferenceProcessor* rp; - if (heap->process_references()) { - rp = heap->ref_processor(); - shenandoah_assert_rp_isalive_installed(); - } else { - rp = NULL; + if (heap->unload_classes() && !ShenandoahConcurrentRoots::can_do_concurrent_class_unloading()) { + if (heap->has_forwarded_objects()) { + ShenandoahMarkResolveRefsClosure resolve_mark_cl(q, rp); + MarkingCodeBlobClosure blobsCl(&resolve_mark_cl, !CodeBlobToOopClosure::FixRelocations); + ShenandoahSATBAndRemarkCodeRootsThreadsClosure tc(&cl, &blobsCl); + Threads::threads_do(&tc); + } else { + ShenandoahMarkRefsClosure mark_cl(q, rp); + MarkingCodeBlobClosure blobsCl(&mark_cl, !CodeBlobToOopClosure::FixRelocations); + ShenandoahSATBAndRemarkCodeRootsThreadsClosure tc(&cl, &blobsCl); + Threads::threads_do(&tc); + } + } else { + ShenandoahSATBAndRemarkCodeRootsThreadsClosure tc(&cl, NULL); + Threads::threads_do(&tc); + } } if (heap->is_degenerated_gc_in_progress()) {
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp Mon Mar 16 17:10:26 2020 +0000 +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp Tue Mar 17 14:13:52 2020 +0000 @@ -1420,6 +1420,13 @@ if (ShenandoahPacing) { pacer()->setup_for_mark(); } + + // Arm nmethods for concurrent marking. When a nmethod is about to be executed, + // we need to make sure that all its metadata are marked. alternative is to remark + // thread roots at final mark pause, but it can be potential latency killer. + if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) { + ShenandoahCodeRoots::arm_nmethods(); + } } void ShenandoahHeap::op_mark() { @@ -1879,6 +1886,13 @@ return; } + if (!has_forwarded_objects() && ShenandoahConcurrentRoots::can_do_concurrent_class_unloading()) { + // Disarm nmethods that armed for concurrent mark. On normal cycle, it would + // be disarmed while conc-roots phase is running. + // TODO: Call op_conc_roots() here instead + ShenandoahCodeRoots::disarm_nmethods(); + } + op_cleanup(); case _degenerated_evac: @@ -2393,7 +2407,6 @@ if (r->is_active() && !r->is_cset()) { _heap->marked_object_oop_iterate(r, &cl, update_watermark); } - r->set_update_watermark(r->bottom()); if (ShenandoahPacing) { _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom())); }
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp Mon Mar 16 17:10:26 2020 +0000 +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp Tue Mar 17 14:13:52 2020 +0000 @@ -261,7 +261,7 @@ volatile size_t _live_data; volatile size_t _critical_pins; - HeapWord* volatile _update_watermark; + HeapWord* _update_watermark; // Claim some space at the end to protect next region DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, 0); @@ -431,13 +431,20 @@ } HeapWord* get_update_watermark() const { - assert(bottom() <= _update_watermark && _update_watermark <= top(), "within bounds"); - return Atomic::load_acquire(&_update_watermark); + // Updates to the update-watermark only happen at safepoints or, when pushing + // back the watermark for evacuation regions, under the Shenandoah heap-lock. + // Consequently, we should access the field under the same lock. However, since + // those updates are only monotonically increasing, possibly reading a stale value + // is only conservative - we would not miss to update any fields. + HeapWord* watermark = _update_watermark; + assert(bottom() <= watermark && watermark <= top(), "within bounds"); + return watermark; } void set_update_watermark(HeapWord* w) { + _heap->assert_heaplock_or_safepoint(); assert(bottom() <= w && w <= top(), "within bounds"); - Atomic::release_store(&_update_watermark, w); + _update_watermark = w; } private:
--- a/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp Mon Mar 16 17:10:26 2020 +0000 +++ b/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp Tue Mar 17 14:13:52 2020 +0000 @@ -175,15 +175,54 @@ return new ShenandoahNMethod(nm, oops, non_immediate_oops); } +template <bool HAS_FWD> +class ShenandoahKeepNMethodMetadataAliveClosure : public OopClosure { +private: + ShenandoahBarrierSet* const _bs; +public: + ShenandoahKeepNMethodMetadataAliveClosure() : + _bs(static_cast<ShenandoahBarrierSet*>(BarrierSet::barrier_set())) { + } + + virtual void do_oop(oop* p) { + oop obj = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(obj)) { + if (HAS_FWD) { + obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); + } + _bs->enqueue(obj); + } + } + + virtual void do_oop(narrowOop* p) { + ShouldNotReachHere(); + } +}; + void ShenandoahNMethod::heal_nmethod(nmethod* nm) { - assert(ShenandoahHeap::heap()->is_concurrent_root_in_progress(), "Only this phase"); ShenandoahNMethod* data = gc_data(nm); assert(data != NULL, "Sanity"); assert(data->lock()->owned_by_self(), "Must hold the lock"); - ShenandoahEvacOOMScope evac_scope; - ShenandoahEvacuateUpdateRootsClosure<> cl; - data->oops_do(&cl, true /*fix relocation*/); + ShenandoahHeap* const heap = ShenandoahHeap::heap(); + if (heap->is_concurrent_mark_in_progress()) { + if (heap->has_forwarded_objects()) { + ShenandoahKeepNMethodMetadataAliveClosure<true> cl; + data->oops_do(&cl); + } else { + ShenandoahKeepNMethodMetadataAliveClosure<false> cl; + data->oops_do(&cl); + } + } else if (heap->is_concurrent_root_in_progress()) { + ShenandoahEvacOOMScope evac_scope; + ShenandoahEvacuateUpdateRootsClosure<> cl; + data->oops_do(&cl, true /*fix relocation*/); + } else { + // There is possibility that GC is cancelled when it arrives final mark. + // In this case, concurrent root phase is skipped and degenerated GC should be + // followed, where nmethods are disarmed. + assert(heap->cancelled_gc(), "What else?"); + } } #ifdef ASSERT
--- a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp Mon Mar 16 17:10:26 2020 +0000 +++ b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp Tue Mar 17 14:13:52 2020 +0000 @@ -181,12 +181,12 @@ ShenandoahRootEvacuator::ShenandoahRootEvacuator(uint n_workers, ShenandoahPhaseTimings::Phase phase, - bool include_concurrent_roots, - bool include_concurrent_code_roots) : + bool stw_roots_processing, + bool stw_class_unloading) : ShenandoahRootProcessor(phase), _thread_roots(n_workers > 1), - _include_concurrent_roots(include_concurrent_roots), - _include_concurrent_code_roots(include_concurrent_code_roots) { + _stw_roots_processing(stw_roots_processing), + _stw_class_unloading(stw_class_unloading) { } void ShenandoahRootEvacuator::roots_do(uint worker_id, OopClosure* oops) { @@ -199,15 +199,15 @@ _serial_roots.oops_do(oops, worker_id); _serial_weak_roots.weak_oops_do(oops, worker_id); - if (_include_concurrent_roots) { - CLDToOopClosure clds(oops, ClassLoaderData::_claim_strong); + if (_stw_roots_processing) { _vm_roots.oops_do<OopClosure>(oops, worker_id); - _cld_roots.cld_do(&clds, worker_id); _weak_roots.oops_do<OopClosure>(oops, worker_id); _dedup_roots.oops_do(&always_true, oops, worker_id); } - if (_include_concurrent_code_roots) { + if (_stw_class_unloading) { + CLDToOopClosure clds(oops, ClassLoaderData::_claim_strong); + _cld_roots.cld_do(&clds, worker_id); _code_roots.code_blobs_do(codes_cl, worker_id); _thread_roots.oops_do(oops, NULL, worker_id); } else {
--- a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp Mon Mar 16 17:10:26 2020 +0000 +++ b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp Tue Mar 17 14:13:52 2020 +0000 @@ -288,11 +288,11 @@ ShenandoahWeakRoots<false /*concurrent*/> _weak_roots; ShenandoahStringDedupRoots _dedup_roots; ShenandoahCodeCacheRoots<ShenandoahAllCodeRootsIterator> _code_roots; - bool _include_concurrent_roots; - bool _include_concurrent_code_roots; + bool _stw_roots_processing; + bool _stw_class_unloading; public: ShenandoahRootEvacuator(uint n_workers, ShenandoahPhaseTimings::Phase phase, - bool include_concurrent_roots, bool _include_concurrent_code_roots); + bool stw_roots_processing, bool stw_class_unloading); void roots_do(uint worker_id, OopClosure* oops); };
--- a/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.cpp Mon Mar 16 17:10:26 2020 +0000 +++ b/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.cpp Tue Mar 17 14:13:52 2020 +0000 @@ -255,15 +255,16 @@ // Step 1: Process GC roots. // For oops in code roots, they are marked, evacuated, enqueued for further traversal, - // and the references to the oops are updated during init pause. New nmethods are handled - // in similar way during nmethod-register process. Therefore, we don't need to rescan code - // roots here. + // and the references to the oops are updated during init pause. We only need to rescan + // on stack code roots, in case of class unloading is enabled. Otherwise, code roots are + // scanned during init traversal or degenerated GC will update them at the end. if (!_heap->is_degenerated_gc_in_progress()) { ShenandoahTraversalRootsClosure roots_cl(q, rp); ShenandoahTraversalSATBThreadsClosure tc(&satb_cl); if (unload_classes) { ShenandoahRemarkCLDClosure remark_cld_cl(&roots_cl); - _rp->strong_roots_do(worker_id, &roots_cl, &remark_cld_cl, NULL, &tc); + MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations); + _rp->strong_roots_do(worker_id, &roots_cl, &remark_cld_cl, &code_cl, &tc); } else { CLDToOopClosure cld_cl(&roots_cl, ClassLoaderData::_claim_strong); _rp->roots_do(worker_id, &roots_cl, &cld_cl, NULL, &tc);
--- a/src/hotspot/share/gc/shenandoah/shenandoahUnload.cpp Mon Mar 16 17:10:26 2020 +0000 +++ b/src/hotspot/share/gc/shenandoah/shenandoahUnload.cpp Tue Mar 17 14:13:52 2020 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, Red Hat, Inc. All rights reserved. + * Copyright (c) 2019, 2020, Red Hat, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,12 +45,12 @@ class ShenandoahIsUnloadingOopClosure : public OopClosure { private: - ShenandoahMarkingContext* _marking_context; - bool _is_unloading; + ShenandoahMarkingContext* const _marking_context; + bool _is_unloading; public: ShenandoahIsUnloadingOopClosure() : - _marking_context(ShenandoahHeap::heap()->marking_context()), + _marking_context(ShenandoahHeap::heap()->complete_marking_context()), _is_unloading(false) { } @@ -61,7 +61,6 @@ const oop o = RawAccess<>::oop_load(p); if (!CompressedOops::is_null(o) && - _marking_context->is_complete() && !_marking_context->is_marked(o)) { _is_unloading = true; } @@ -80,7 +79,7 @@ public: virtual bool is_unloading(CompiledMethod* method) const { nmethod* const nm = method->as_nmethod(); - guarantee(ShenandoahHeap::heap()->is_concurrent_root_in_progress(), "Only this phase"); + assert(ShenandoahHeap::heap()->is_concurrent_root_in_progress(), "Only for this phase"); ShenandoahNMethod* data = ShenandoahNMethod::gc_data(nm); ShenandoahReentrantLocker locker(data->lock()); ShenandoahIsUnloadingOopClosure cl;
--- a/src/hotspot/share/opto/constantTable.cpp Mon Mar 16 17:10:26 2020 +0000 +++ b/src/hotspot/share/opto/constantTable.cpp Tue Mar 17 14:13:52 2020 +0000 @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "asm/codeBuffer.hpp" +#include "asm/macroAssembler.hpp" #include "opto/block.hpp" #include "opto/constantTable.hpp" #include "opto/machnode.hpp"