OpenJDK / jdk / jdk
changeset 57824:5a7864630f91
8237637: Remove dubious type conversions from oop
Reviewed-by: kbarrett, dholmes, mdoerr
line wrap: on
line diff
--- a/src/hotspot/cpu/aarch64/jvmciCodeInstaller_aarch64.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/cpu/aarch64/jvmciCodeInstaller_aarch64.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -61,7 +61,7 @@ #endif // ASSERT Handle obj = jvmci_env()->asConstant(constant, JVMCI_CHECK); jobject value = JNIHandles::make_local(obj()); - MacroAssembler::patch_oop(pc, (address)obj()); + MacroAssembler::patch_oop(pc, cast_from_oop<address>(obj())); int oop_index = _oop_recorder->find_index(value); RelocationHolder rspec = oop_Relocation::spec(oop_index); _instructions->relocate(pc, rspec);
--- a/src/hotspot/cpu/s390/assembler_s390.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/cpu/s390/assembler_s390.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -351,14 +351,6 @@ : _address((address) addr), _rspec(rspec_from_rtype(rtype, (address) addr)) {} - AddressLiteral(oop addr, relocInfo::relocType rtype = relocInfo::none) - : _address((address) addr), - _rspec(rspec_from_rtype(rtype, (address) addr)) {} - - AddressLiteral(oop* addr, relocInfo::relocType rtype = relocInfo::none) - : _address((address) addr), - _rspec(rspec_from_rtype(rtype, (address) addr)) {} - AddressLiteral(float* addr, relocInfo::relocType rtype = relocInfo::none) : _address((address) addr), _rspec(rspec_from_rtype(rtype, (address) addr)) {} @@ -390,7 +382,6 @@ public: ExternalAddress(address target) : AddressLiteral(target, reloc_for_target( target)) {} - ExternalAddress(oop* target) : AddressLiteral(target, reloc_for_target((address) target)) {} }; // Argument is an abstraction used to represent an outgoing actual
--- a/src/hotspot/share/ci/ciInstanceKlass.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/ci/ciInstanceKlass.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -315,7 +315,7 @@ // Implementation of the print method. void ciInstanceKlass::print_impl(outputStream* st) { ciKlass::print_impl(st); - GUARDED_VM_ENTRY(st->print(" loader=" INTPTR_FORMAT, p2i((address)loader()));) + GUARDED_VM_ENTRY(st->print(" loader=" INTPTR_FORMAT, p2i(loader()));) if (is_loaded()) { st->print(" loaded=true initialized=%s finalized=%s subklass=%s size=%d flags=", bool_to_str(is_initialized()),
--- a/src/hotspot/share/code/nmethod.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/code/nmethod.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -1047,7 +1047,7 @@ oop_Relocation* reloc = iter.oop_reloc(); if (initialize_immediates && reloc->oop_is_immediate()) { oop* dest = reloc->oop_addr(); - initialize_immediate_oop(dest, (jobject) *dest); + initialize_immediate_oop(dest, cast_from_oop<jobject>(*dest)); } // Refresh the oop-related bits of this instruction. reloc->fix_oop_relocation();
--- a/src/hotspot/share/code/relocInfo.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/code/relocInfo.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -935,7 +935,7 @@ void verify_oop_relocation(); - address value() { return (address) *oop_addr(); } + address value() { return cast_from_oop<address>(*oop_addr()); } bool oop_is_immediate() { return oop_index() == 0; }
--- a/src/hotspot/share/compiler/oopMap.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/compiler/oopMap.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -367,7 +367,7 @@ omv.print(); tty->print_cr("register r"); omv.reg()->print(); - tty->print_cr("loc = %p *loc = %p\n", loc, (address)*loc); + tty->print_cr("loc = %p *loc = %p\n", loc, cast_from_oop<address>(*loc)); // do the real assert. assert(Universe::heap()->is_in_or_null(*loc), "found non oop pointer"); } @@ -770,7 +770,7 @@ "Add derived pointer@" INTPTR_FORMAT " - Derived: " INTPTR_FORMAT " Base: " INTPTR_FORMAT " (@" INTPTR_FORMAT ") (Offset: " INTX_FORMAT ")", - p2i(derived_loc), p2i((address)*derived_loc), p2i((address)*base_loc), p2i(base_loc), offset + p2i(derived_loc), p2i(*derived_loc), p2i(*base_loc), p2i(base_loc), offset ); } // Set derived oop location to point to base. @@ -792,13 +792,13 @@ oop base = **(oop**)derived_loc; assert(Universe::heap()->is_in_or_null(base), "must be an oop"); - *derived_loc = (oop)(((address)base) + offset); + *derived_loc = (oop)(cast_from_oop<address>(base) + offset); assert(value_of_loc(derived_loc) - value_of_loc(&base) == offset, "sanity check"); if (TraceDerivedPointers) { tty->print_cr("Updating derived pointer@" INTPTR_FORMAT " - Derived: " INTPTR_FORMAT " Base: " INTPTR_FORMAT " (Offset: " INTX_FORMAT ")", - p2i(derived_loc), p2i((address)*derived_loc), p2i((address)base), offset); + p2i(derived_loc), p2i(*derived_loc), p2i(base), offset); } // Delete entry
--- a/src/hotspot/share/gc/g1/g1Allocator.inline.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/g1/g1Allocator.inline.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -158,11 +158,11 @@ // Check if an object is in a closed archive region using the _archive_region_map. inline bool G1ArchiveAllocator::in_closed_archive_range(oop object) { - return _archive_region_map.get_by_address((HeapWord*)object) == G1ArchiveRegionMap::ClosedArchive; + return _archive_region_map.get_by_address(cast_from_oop<HeapWord*>(object)) == G1ArchiveRegionMap::ClosedArchive; } inline bool G1ArchiveAllocator::in_open_archive_range(oop object) { - return _archive_region_map.get_by_address((HeapWord*)object) == G1ArchiveRegionMap::OpenArchive; + return _archive_region_map.get_by_address(cast_from_oop<HeapWord*>(object)) == G1ArchiveRegionMap::OpenArchive; } // Check if archive object checking is enabled, to avoid calling in_open/closed_archive_range @@ -181,7 +181,7 @@ inline bool G1ArchiveAllocator::is_archived_object(oop object) { return archive_check_enabled() && - (_archive_region_map.get_by_address((HeapWord*)object) != G1ArchiveRegionMap::NoArchive); + (_archive_region_map.get_by_address(cast_from_oop<HeapWord*>(object)) != G1ArchiveRegionMap::NoArchive); } #endif // SHARE_GC_G1_G1ALLOCATOR_INLINE_HPP
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -33,6 +33,7 @@ #include "gc/g1/heapRegionManager.inline.hpp" #include "gc/g1/heapRegionRemSet.hpp" #include "gc/g1/heapRegionSet.inline.hpp" +#include "gc/shared/markBitMap.inline.hpp" #include "gc/shared/taskqueue.inline.hpp" G1GCPhaseTimes* G1CollectedHeap::phase_times() const { @@ -89,7 +90,7 @@ assert(is_in_g1_reserved((const void*) addr), "Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")", p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end())); - return _hrm->addr_to_region((HeapWord*) addr); + return _hrm->addr_to_region((HeapWord*)(void*) addr); } template <class T> @@ -143,11 +144,11 @@ } inline bool G1CollectedHeap::is_marked_next(oop obj) const { - return _cm->next_mark_bitmap()->is_marked((HeapWord*)obj); + return _cm->next_mark_bitmap()->is_marked(obj); } inline bool G1CollectedHeap::is_in_cset(oop obj) { - return is_in_cset((HeapWord*)obj); + return is_in_cset(cast_from_oop<HeapWord*>(obj)); } inline bool G1CollectedHeap::is_in_cset(HeapWord* addr) { @@ -159,7 +160,7 @@ } bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) { - return _region_attr.is_in_cset_or_humongous((HeapWord*)obj); + return _region_attr.is_in_cset_or_humongous(cast_from_oop<HeapWord*>(obj)); } G1HeapRegionAttr G1CollectedHeap::region_attr(const void* addr) const { @@ -303,7 +304,7 @@ } inline void G1CollectedHeap::set_humongous_is_live(oop obj) { - uint region = addr_to_region((HeapWord*)obj); + uint region = addr_to_region(cast_from_oop<HeapWord*>(obj)); // Clear the flag in the humongous_reclaim_candidates table. Also // reset the entry in the region attribute table so that subsequent references // to the same humongous object do not go into the slow path again.
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -1728,9 +1728,8 @@ G1ObjectCountIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) { } bool do_object_b(oop obj) { - HeapWord* addr = (HeapWord*)obj; - return addr != NULL && - (!_g1h->is_in_g1_reserved(addr) || !_g1h->is_obj_dead(obj)); + return obj != NULL && + (!_g1h->is_in_g1_reserved(obj) || !_g1h->is_obj_dead(obj)); } };
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -72,9 +72,7 @@ // Can't assert that this is a valid object at this point, since it might be in the process of being copied by another thread. assert(!hr->is_continues_humongous(), "Should not try to mark object " PTR_FORMAT " in Humongous continues region %u above nTAMS " PTR_FORMAT, p2i(obj), hr->hrm_index(), p2i(hr->next_top_at_mark_start())); - HeapWord* const obj_addr = (HeapWord*)obj; - - bool success = _next_mark_bitmap->par_mark(obj_addr); + bool success = _next_mark_bitmap->par_mark(obj); if (success) { add_to_liveness(worker_id, obj, obj->size()); } @@ -112,7 +110,7 @@ assert(task_entry.is_array_slice() || !_g1h->is_on_master_free_list( _g1h->heap_region_containing(task_entry.obj())), "invariant"); assert(task_entry.is_array_slice() || !_g1h->is_obj_ill(task_entry.obj()), "invariant"); // FIXME!!! - assert(task_entry.is_array_slice() || _next_mark_bitmap->is_marked((HeapWord*)task_entry.obj()), "invariant"); + assert(task_entry.is_array_slice() || _next_mark_bitmap->is_marked(cast_from_oop<HeapWord*>(task_entry.obj())), "invariant"); if (!_task_queue->push(task_entry)) { // The local task queue looks full. We need to push some entries @@ -135,7 +133,7 @@ // of checking both vs only checking the global finger is that the // local check will be more accurate and so result in fewer pushes, // but may also be a little slower. - HeapWord* objAddr = (HeapWord*)obj; + HeapWord* objAddr = cast_from_oop<HeapWord*>(obj); if (_finger != NULL) { // We have a current region. @@ -160,7 +158,7 @@ template<bool scan> inline void G1CMTask::process_grey_task_entry(G1TaskQueueEntry task_entry) { assert(scan || (task_entry.is_oop() && task_entry.obj()->is_typeArray()), "Skipping scan of grey non-typeArray"); - assert(task_entry.is_array_slice() || _next_mark_bitmap->is_marked((HeapWord*)task_entry.obj()), + assert(task_entry.is_array_slice() || _next_mark_bitmap->is_marked(cast_from_oop<HeapWord*>(task_entry.obj())), "Any stolen object should be a slice or marked"); if (scan) { @@ -203,7 +201,7 @@ } inline void G1CMTask::update_liveness(oop const obj, const size_t obj_size) { - _mark_stats_cache.add_live_words(_g1h->addr_to_region((HeapWord*)obj), obj_size); + _mark_stats_cache.add_live_words(_g1h->addr_to_region(cast_from_oop<HeapWord*>(obj)), obj_size); } inline void G1ConcurrentMark::add_to_liveness(uint worker_id, oop const obj, size_t size) { @@ -270,18 +268,18 @@ } inline void G1ConcurrentMark::mark_in_prev_bitmap(oop p) { - assert(!_prev_mark_bitmap->is_marked((HeapWord*) p), "sanity"); - _prev_mark_bitmap->mark((HeapWord*) p); + assert(!_prev_mark_bitmap->is_marked(p), "sanity"); + _prev_mark_bitmap->mark(p); } bool G1ConcurrentMark::is_marked_in_prev_bitmap(oop p) const { assert(p != NULL && oopDesc::is_oop(p), "expected an oop"); - return _prev_mark_bitmap->is_marked((HeapWord*)p); + return _prev_mark_bitmap->is_marked(cast_from_oop<HeapWord*>(p)); } bool G1ConcurrentMark::is_marked_in_next_bitmap(oop p) const { assert(p != NULL && oopDesc::is_oop(p), "expected an oop"); - return _next_mark_bitmap->is_marked((HeapWord*)p); + return _next_mark_bitmap->is_marked(cast_from_oop<HeapWord*>(p)); } inline bool G1ConcurrentMark::do_yield_check() {
--- a/src/hotspot/share/gc/g1/g1ConcurrentMarkObjArrayProcessor.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkObjArrayProcessor.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -45,7 +45,7 @@ size_t G1CMObjArrayProcessor::process_obj(oop obj) { assert(should_be_sliced(obj), "Must be an array object %d and large " SIZE_FORMAT, obj->is_objArray(), (size_t)obj->size()); - return process_array_slice(objArrayOop(obj), (HeapWord*)obj, (size_t)objArrayOop(obj)->size()); + return process_array_slice(objArrayOop(obj), cast_from_oop<HeapWord*>(obj), (size_t)objArrayOop(obj)->size()); } size_t G1CMObjArrayProcessor::process_slice(HeapWord* slice) {
--- a/src/hotspot/share/gc/g1/g1EvacFailure.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/g1/g1EvacFailure.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -106,7 +106,7 @@ // as they have either been dead or evacuated (which are unreferenced now, i.e. // dead too) already. void do_object(oop obj) { - HeapWord* obj_addr = (HeapWord*) obj; + HeapWord* obj_addr = cast_from_oop<HeapWord*>(obj); assert(_hr->is_in(obj_addr), "sanity"); if (obj->is_forwarded() && obj->forwardee() == obj) {
--- a/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -61,14 +61,14 @@ size_t G1FullGCCompactTask::G1CompactRegionClosure::apply(oop obj) { size_t size = obj->size(); - HeapWord* destination = (HeapWord*)obj->forwardee(); + HeapWord* destination = cast_from_oop<HeapWord*>(obj->forwardee()); if (destination == NULL) { // Object not moving return size; } // copy object and reinit its mark - HeapWord* obj_addr = (HeapWord*) obj; + HeapWord* obj_addr = cast_from_oop<HeapWord*>(obj); assert(obj_addr != destination, "everything in this pass should be moving"); Copy::aligned_conjoint_words(obj_addr, destination, size); oop(destination)->init_mark_raw();
--- a/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -102,7 +102,7 @@ } // Store a forwarding pointer if the object should be moved. - if ((HeapWord*)object != _compaction_top) { + if (cast_from_oop<HeapWord*>(object) != _compaction_top) { object->forward_to(oop(_compaction_top)); } else { if (object->forwardee() != NULL) {
--- a/src/hotspot/share/gc/g1/g1FullGCOopClosures.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/g1/g1FullGCOopClosures.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -77,7 +77,7 @@ p2i(obj)); } else { HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); - HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj); + HeapRegion* to = _g1h->heap_region_containing(obj); yy.print_cr("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region " HR_FORMAT, p2i(p), p2i(_containing_obj), HR_FORMAT_PARAMS(from)); print_object(&yy, _containing_obj);
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -284,7 +284,7 @@ const oop obj = oop(obj_ptr); const oop forward_ptr = old->forward_to_atomic(obj, old_mark, memory_order_relaxed); if (forward_ptr == NULL) { - Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); + Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), obj_ptr, word_sz); const uint young_index = from_region->young_index_in_cset();
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -129,7 +129,7 @@ assert(!_g1h->heap_region_containing(p)->is_young(), "Should have filtered out from-young references already."); #ifdef ASSERT - HeapRegion* const hr_obj = _g1h->heap_region_containing((HeapWord*)o); + HeapRegion* const hr_obj = _g1h->heap_region_containing(o); assert(region_attr.needs_remset_update() == hr_obj->rem_set()->is_tracked(), "State flag indicating remset tracking disagrees (%s) with actual remembered set (%s) for region %u", BOOL_TO_STR(region_attr.needs_remset_update()),
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -46,7 +46,7 @@ // as they are not added to the collection set due to above precondition. assert(!region_attr.is_humongous(), "Obj " PTR_FORMAT " should not refer to humongous region %u from " PTR_FORMAT, - p2i(obj), _g1h->addr_to_region((HeapWord*)obj), p2i(p)); + p2i(obj), _g1h->addr_to_region(cast_from_oop<HeapWord*>(obj)), p2i(p)); if (!region_attr.is_in_cset()) { // In this case somebody else already did all the work.
--- a/src/hotspot/share/gc/g1/g1RemSet.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/g1/g1RemSet.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -1467,14 +1467,14 @@ size_t const obj_size = obj->size(); // All non-objArrays and objArrays completely within the mr // can be scanned without passing the mr. - if (!obj->is_objArray() || mr.contains(MemRegion((HeapWord*)obj, obj_size))) { + if (!obj->is_objArray() || mr.contains(MemRegion(cast_from_oop<HeapWord*>(obj), obj_size))) { obj->oop_iterate(&_update_cl); return obj_size; } // This path is for objArrays crossing the given MemRegion. Only scan the // area within the MemRegion. obj->oop_iterate(&_update_cl, mr); - return mr.intersection(MemRegion((HeapWord*)obj, obj_size)).word_size(); + return mr.intersection(MemRegion(cast_from_oop<HeapWord*>(obj), obj_size)).word_size(); } // A humongous object is live (with respect to the scanning) either @@ -1579,7 +1579,7 @@ assert(hr->top() == top_at_mark_start || hr->top() == top_at_rebuild_start, "More than one object in the humongous region?"); humongous_obj->oop_iterate(&_update_cl, mr); - return top_at_mark_start != hr->bottom() ? mr.intersection(MemRegion((HeapWord*)humongous_obj, humongous_obj->size())).byte_size() : 0; + return top_at_mark_start != hr->bottom() ? mr.intersection(MemRegion(cast_from_oop<HeapWord*>(humongous_obj), humongous_obj->size())).byte_size() : 0; } else { return 0; } @@ -1588,7 +1588,7 @@ for (LiveObjIterator it(bitmap, top_at_mark_start, mr, hr->block_start(mr.start())); it.has_next(); it.move_to_next()) { oop obj = it.next(); size_t scanned_size = scan_for_references(obj, mr); - if ((HeapWord*)obj < top_at_mark_start) { + if (cast_from_oop<HeapWord*>(obj) < top_at_mark_start) { marked_words += scanned_size; } }
--- a/src/hotspot/share/gc/g1/heapRegion.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/g1/heapRegion.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -357,7 +357,7 @@ // current region. We only look at those which are. if (_hr->is_in(obj)) { // Object is in the region. Check that its less than top - if (_hr->top() <= (HeapWord*)obj) { + if (_hr->top() <= cast_from_oop<HeapWord*>(obj)) { // Object is above top log_error(gc, verify)("Object " PTR_FORMAT " in region " HR_FORMAT " is above top ", p2i(obj), HR_FORMAT_PARAMS(_hr)); @@ -566,7 +566,7 @@ p2i(obj), HR_FORMAT_PARAMS(to), to->rem_set()->get_state_str()); } else { HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); - HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj); + HeapRegion* to = _g1h->heap_region_containing(obj); log.error("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region " HR_FORMAT, p2i(p), p2i(_containing_obj), HR_FORMAT_PARAMS(from)); LogStream ls(log.error()); @@ -737,7 +737,7 @@ if (is_region_humongous) { oop obj = oop(this->humongous_start_region()->bottom()); - if ((HeapWord*)obj > bottom() || (HeapWord*)obj + obj->size() < bottom()) { + if (cast_from_oop<HeapWord*>(obj) > bottom() || cast_from_oop<HeapWord*>(obj) + obj->size() < bottom()) { log_error(gc, verify)("this humongous region is not part of its' humongous object " PTR_FORMAT, p2i(obj)); *failures = true; return;
--- a/src/hotspot/share/gc/g1/heapRegion.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/g1/heapRegion.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -554,10 +554,10 @@ // mark performed by the collector. This returns true iff the object // is within the unmarked area of the region. bool obj_allocated_since_prev_marking(oop obj) const { - return (HeapWord *) obj >= prev_top_at_mark_start(); + return cast_from_oop<HeapWord*>(obj) >= prev_top_at_mark_start(); } bool obj_allocated_since_next_marking(oop obj) const { - return (HeapWord *) obj >= next_top_at_mark_start(); + return cast_from_oop<HeapWord*>(obj) >= next_top_at_mark_start(); } // Update the region state after a failed evacuation.
--- a/src/hotspot/share/gc/g1/heapRegion.inline.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/g1/heapRegion.inline.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -116,7 +116,7 @@ } inline bool HeapRegion::is_obj_dead_with_size(const oop obj, const G1CMBitMap* const prev_bitmap, size_t* size) const { - HeapWord* addr = (HeapWord*) obj; + HeapWord* addr = cast_from_oop<HeapWord*>(obj); assert(addr < top(), "must be"); assert(!is_closed_archive(), @@ -165,7 +165,7 @@ inline bool HeapRegion::is_obj_dead(const oop obj, const G1CMBitMap* const prev_bitmap) const { assert(is_in_reserved(obj), "Object " PTR_FORMAT " must be in region", p2i(obj)); return !obj_allocated_since_prev_marking(obj) && - !prev_bitmap->is_marked((HeapWord*)obj) && + !prev_bitmap->is_marked(obj) && !is_open_archive(); } @@ -299,7 +299,7 @@ // We have scanned to the end of the object, but since there can be no objects // after this humongous object in the region, we can return the end of the // region if it is greater. - return MAX2((HeapWord*)obj + size, mr.end()); + return MAX2(cast_from_oop<HeapWord*>(obj) + size, mr.end()); } } @@ -358,7 +358,7 @@ // start, in which case we need to iterate over them in full. // objArrays are precisely marked, but can still be iterated // over in full if completely covered. - if (!obj->is_objArray() || (((HeapWord*)obj) >= start && cur <= end)) { + if (!obj->is_objArray() || (cast_from_oop<HeapWord*>(obj) >= start && cur <= end)) { obj->oop_iterate(cl); } else { obj->oop_iterate(cl, mr);
--- a/src/hotspot/share/gc/parallel/parMarkBitMap.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/parallel/parMarkBitMap.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -111,13 +111,13 @@ size_t ParMarkBitMap::live_words_in_range_helper(HeapWord* beg_addr, oop end_obj) const { - assert(beg_addr <= (HeapWord*)end_obj, "bad range"); + assert(beg_addr <= cast_from_oop<HeapWord*>(end_obj), "bad range"); assert(is_marked(end_obj), "end_obj must be live"); idx_t live_bits = 0; // The bitmap routines require the right boundary to be word-aligned. - const idx_t end_bit = addr_to_bit((HeapWord*)end_obj); + const idx_t end_bit = addr_to_bit(cast_from_oop<HeapWord*>(end_obj)); const idx_t range_end = align_range_end(end_bit); idx_t beg_bit = find_obj_beg(addr_to_bit(beg_addr), range_end); @@ -134,8 +134,8 @@ ParMarkBitMap::live_words_in_range_use_cache(ParCompactionManager* cm, HeapWord* beg_addr, oop end_oop) const { HeapWord* last_beg = cm->last_query_begin(); - HeapWord* last_obj = (HeapWord*)cm->last_query_object(); - HeapWord* end_obj = (HeapWord*)end_oop; + HeapWord* last_obj = cast_from_oop<HeapWord*>(cm->last_query_object()); + HeapWord* end_obj = cast_from_oop<HeapWord*>(end_oop); size_t last_ret = cm->last_query_return(); if (end_obj > last_obj) {
--- a/src/hotspot/share/gc/parallel/parMarkBitMap.inline.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/parallel/parMarkBitMap.inline.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -81,7 +81,7 @@ } inline bool ParMarkBitMap::is_marked(oop obj) const { - return is_marked((HeapWord*)obj); + return is_marked(cast_from_oop<HeapWord*>(obj)); } inline bool ParMarkBitMap::is_unmarked(idx_t bit) const { @@ -144,7 +144,7 @@ } inline bool ParMarkBitMap::mark_obj(oop obj, int size) { - return mark_obj((HeapWord*)obj, (size_t)size); + return mark_obj(cast_from_oop<HeapWord*>(obj), (size_t)size); } inline ParMarkBitMap::idx_t ParMarkBitMap::addr_to_bit(HeapWord* addr) const {
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.inline.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.inline.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -44,7 +44,7 @@ inline bool ParallelScavengeHeap::is_in_young(oop p) { // Assumes the the old gen address range is lower than that of the young gen. - bool result = ((HeapWord*)p) >= young_gen()->reserved().start(); + bool result = cast_from_oop<HeapWord*>(p) >= young_gen()->reserved().start(); assert(result == young_gen()->is_in_reserved(p), "incorrect test - result=%d, p=" PTR_FORMAT, result, p2i((void*)p)); return result;
--- a/src/hotspot/share/gc/parallel/psOldGen.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/parallel/psOldGen.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -445,9 +445,9 @@ _old_gen(old_gen), _start_array(start_array) { } virtual void do_object(oop obj) { - HeapWord* test_addr = (HeapWord*)obj + 1; - guarantee(_start_array->object_start(test_addr) == (HeapWord*)obj, "ObjectStartArray cannot find start of object"); - guarantee(_start_array->is_block_allocated((HeapWord*)obj), "ObjectStartArray missing block allocation"); + HeapWord* test_addr = cast_from_oop<HeapWord*>(obj) + 1; + guarantee(_start_array->object_start(test_addr) == cast_from_oop<HeapWord*>(obj), "ObjectStartArray cannot find start of object"); + guarantee(_start_array->is_block_allocated(cast_from_oop<HeapWord*>(obj)), "ObjectStartArray missing block allocation"); } };
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -3383,7 +3383,7 @@ assert(oopDesc::is_oop_or_null(moved_oop), "Expected an oop or NULL at " PTR_FORMAT, p2i(moved_oop)); update_state(words); - assert(copy_destination() == (HeapWord*)moved_oop + moved_oop->size(), "sanity"); + assert(copy_destination() == cast_from_oop<HeapWord*>(moved_oop) + moved_oop->size(), "sanity"); return is_full() ? ParMarkBitMap::full : ParMarkBitMap::incomplete; }
--- a/src/hotspot/share/gc/parallel/psParallelCompact.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/parallel/psParallelCompact.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -427,7 +427,7 @@ inline size_t block(const BlockData* block_ptr) const; void add_obj(HeapWord* addr, size_t len); - void add_obj(oop p, size_t len) { add_obj((HeapWord*)p, len); } + void add_obj(oop p, size_t len) { add_obj(cast_from_oop<HeapWord*>(p), len); } // Fill in the regions covering [beg, end) so that no data moves; i.e., the // destination of region n is simply the start of region n. The argument beg @@ -468,7 +468,7 @@ size_t block_offset(const HeapWord* addr) const; size_t addr_to_block_idx(const HeapWord* addr) const; size_t addr_to_block_idx(const oop obj) const { - return addr_to_block_idx((HeapWord*) obj); + return addr_to_block_idx(cast_from_oop<HeapWord*>(obj)); } inline BlockData* addr_to_block_ptr(const HeapWord* addr) const; inline HeapWord* block_to_addr(size_t block) const; @@ -485,7 +485,7 @@ HeapWord* calc_new_pointer(HeapWord* addr, ParCompactionManager* cm); HeapWord* calc_new_pointer(oop p, ParCompactionManager* cm) { - return calc_new_pointer((HeapWord*) p, cm); + return calc_new_pointer(cast_from_oop<HeapWord*>(p), cm); } #ifdef ASSERT
--- a/src/hotspot/share/gc/parallel/psPromotionLAB.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/parallel/psPromotionLAB.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -92,7 +92,7 @@ #ifdef ASSERT // Note that we actually DO NOT want to use the aligned header size! - HeapWord* elt_words = ((HeapWord*)filler_oop) + typeArrayOopDesc::header_size(T_INT); + HeapWord* elt_words = cast_from_oop<HeapWord*>(filler_oop) + typeArrayOopDesc::header_size(T_INT); Copy::fill_to_words(elt_words, array_length, 0xDEAABABE); #endif
--- a/src/hotspot/share/gc/parallel/psPromotionManager.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/parallel/psPromotionManager.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -124,7 +124,7 @@ } oop* mask_chunked_array_oop(oop obj) { - assert(!is_oop_masked((oop*) obj), "invariant"); + assert(!is_oop_masked(cast_from_oop<oop*>(obj)), "invariant"); oop* ret = (oop*) (cast_from_oop<uintptr_t>(obj) | PS_CHUNKED_ARRAY_OOP_MASK); assert(is_oop_masked(ret), "invariant"); return ret; @@ -135,7 +135,7 @@ assert(!p.is_narrow(), "chunked array oops cannot be narrow"); oop *chunk = (oop*)p; // cast p to oop (uses conversion operator) oop ret = oop((oop*)((uintptr_t)chunk & ~PS_CHUNKED_ARRAY_OOP_MASK)); - assert(!is_oop_masked((oop*) ret), "invariant"); + assert(!is_oop_masked(cast_from_oop<oop*>(ret)), "invariant"); return ret; }
--- a/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -251,7 +251,7 @@ assert(new_obj != NULL, "allocation should have succeeded"); // Copy obj - Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size); + Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(o), cast_from_oop<HeapWord*>(new_obj), new_obj_size); // Now we have to CAS in the header. // Make copy visible to threads reading the forwardee. @@ -290,11 +290,11 @@ // deallocate it, so we have to test. If the deallocation fails, // overwrite with a filler object. if (new_obj_is_tenured) { - if (!_old_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) { - CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size); + if (!_old_lab.unallocate_object(cast_from_oop<HeapWord*>(new_obj), new_obj_size)) { + CollectedHeap::fill_with_object(cast_from_oop<HeapWord*>(new_obj), new_obj_size); } - } else if (!_young_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) { - CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size); + } else if (!_young_lab.unallocate_object(cast_from_oop<HeapWord*>(new_obj), new_obj_size)) { + CollectedHeap::fill_with_object(cast_from_oop<HeapWord*>(new_obj), new_obj_size); } // don't update this before the unallocation!
--- a/src/hotspot/share/gc/parallel/psScavenge.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/parallel/psScavenge.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -141,7 +141,7 @@ // so it only checks one side of the complete predicate. inline static bool is_obj_in_young(oop o) { - return (HeapWord*)o >= _young_generation_boundary; + return cast_from_oop<HeapWord*>(o) >= _young_generation_boundary; } inline static bool is_obj_in_young(narrowOop o) {
--- a/src/hotspot/share/gc/parallel/psScavenge.inline.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/parallel/psScavenge.inline.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -49,7 +49,7 @@ if (should_scavenge(p)) { oop obj = RawAccess<IS_NOT_NULL>::oop_load(p); // Skip objects copied to to_space since the scavenge started. - HeapWord* const addr = (HeapWord*)obj; + HeapWord* const addr = cast_from_oop<HeapWord*>(obj); return addr < to_space_top_before_gc() || addr >= to_space->end(); } return false;
--- a/src/hotspot/share/gc/serial/defNewGeneration.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/serial/defNewGeneration.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -69,7 +69,7 @@ } bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) { - return (HeapWord*)p >= _young_gen->reserved().end() || p->is_forwarded(); + return cast_from_oop<HeapWord*>(p) >= _young_gen->reserved().end() || p->is_forwarded(); } DefNewGeneration::KeepAliveClosure:: @@ -757,7 +757,7 @@ Prefetch::write(obj, interval); // Copy obj - Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s); + Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), cast_from_oop<HeapWord*>(obj), s); // Increment age if obj still in new generation obj->incr_age();
--- a/src/hotspot/share/gc/serial/defNewGeneration.inline.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/serial/defNewGeneration.inline.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -83,7 +83,7 @@ // we set a younger_gen card if we have an older->youngest // generation pointer. oop obj = RawAccess<IS_NOT_NULL>::oop_load(p); - if (((HeapWord*)obj < _boundary) && GenCollectedHeap::heap()->is_in_reserved(p)) { + if ((cast_from_oop<HeapWord*>(obj) < _boundary) && GenCollectedHeap::heap()->is_in_reserved(p)) { _rs->inline_write_ref_field_gc(p, obj); } }
--- a/src/hotspot/share/gc/shared/cardTableBarrierSet.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/shared/cardTableBarrierSet.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -133,7 +133,7 @@ // following the flush above. assert(thread->deferred_card_mark().is_empty(), "Error"); } else { - MemRegion mr((HeapWord*)new_obj, new_obj->size()); + MemRegion mr(cast_from_oop<HeapWord*>(new_obj), new_obj->size()); assert(!mr.is_empty(), "Error"); if (_defer_initial_card_mark) { // Defer the card mark
--- a/src/hotspot/share/gc/shared/cardTableRS.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/shared/cardTableRS.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -309,7 +309,7 @@ "[_begin, _end) = [" PTR_FORMAT "," PTR_FORMAT ")", p2i(jp), p2i(_begin), p2i(_end)); oop obj = RawAccess<>::oop_load(p); - guarantee(obj == NULL || (HeapWord*)obj >= _boundary, + guarantee(obj == NULL || cast_from_oop<HeapWord*>(obj) >= _boundary, "pointer " PTR_FORMAT " at " PTR_FORMAT " on " "clean card crosses boundary" PTR_FORMAT, p2i(obj), p2i(jp), p2i(_boundary));
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -1004,7 +1004,7 @@ } bool GenCollectedHeap::is_in_young(oop p) { - bool result = ((HeapWord*)p) < _old_gen->reserved().start(); + bool result = cast_from_oop<HeapWord*>(p) < _old_gen->reserved().start(); assert(result == _young_gen->is_in_reserved(p), "incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p)); return result; @@ -1365,7 +1365,7 @@ result = old_gen->expand_and_allocate(obj_size, false); if (result != NULL) { - Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size); + Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), result, obj_size); } return oop(result); }
--- a/src/hotspot/share/gc/shared/genOopClosures.inline.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/shared/genOopClosures.inline.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -57,7 +57,7 @@ assert(!CompressedOops::is_null(heap_oop), "expected non-null oop"); oop obj = CompressedOops::decode_not_null(heap_oop); // If p points to a younger generation, mark the card. - if ((HeapWord*)obj < _gen_boundary) { + if (cast_from_oop<HeapWord*>(obj) < _gen_boundary) { _rs->inline_write_ref_field_gc(p, obj); } } @@ -81,7 +81,7 @@ // Should we copy the obj? if (!CompressedOops::is_null(heap_oop)) { oop obj = CompressedOops::decode_not_null(heap_oop); - if ((HeapWord*)obj < _boundary) { + if (cast_from_oop<HeapWord*>(obj) < _boundary) { assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?"); oop new_obj = obj->is_forwarded() ? obj->forwardee() : _g->copy_to_survivor_space(obj); @@ -107,7 +107,7 @@ // Should we copy the obj? if (!CompressedOops::is_null(heap_oop)) { oop obj = CompressedOops::decode_not_null(heap_oop); - if ((HeapWord*)obj < _boundary) { + if (cast_from_oop<HeapWord*>(obj) < _boundary) { assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?"); oop new_obj = obj->is_forwarded() ? obj->forwardee() : _g->copy_to_survivor_space(obj); @@ -131,7 +131,7 @@ T heap_oop = RawAccess<>::oop_load(p); if (!CompressedOops::is_null(heap_oop)) { oop obj = CompressedOops::decode_not_null(heap_oop); - if ((HeapWord*)obj < _boundary) { + if (cast_from_oop<HeapWord*>(obj) < _boundary) { _cl->do_oop(p); } } @@ -148,7 +148,7 @@ oop obj = RawAccess<IS_NOT_NULL>::oop_load(p); // weak references are sometimes scanned twice; must check // that to-space doesn't already contain this object - if ((HeapWord*)obj < _boundary && !_g->to()->is_in_reserved(obj)) { + if (cast_from_oop<HeapWord*>(obj) < _boundary && !_g->to()->is_in_reserved(obj)) { oop new_obj = obj->is_forwarded() ? obj->forwardee() : _g->copy_to_survivor_space(obj); RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
--- a/src/hotspot/share/gc/shared/generation.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/shared/generation.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -169,7 +169,7 @@ HeapWord* result = allocate(obj_size, false); if (result != NULL) { - Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size); + Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), result, obj_size); return oop(result); } else { GenCollectedHeap* gch = GenCollectedHeap::heap();
--- a/src/hotspot/share/gc/shared/locationPrinter.inline.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/shared/locationPrinter.inline.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -73,7 +73,7 @@ narrowOop narrow_oop = (narrowOop)(uintptr_t)addr; oop o = CompressedOops::decode_raw(narrow_oop); - if (is_valid_obj((address)o)) { + if (is_valid_obj(o)) { st->print(UINT32_FORMAT " is a compressed pointer to object: ", narrow_oop); o->print_on(st); return true;
--- a/src/hotspot/share/gc/shared/markBitMap.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/shared/markBitMap.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -87,6 +87,7 @@ // Write marks. inline void mark(HeapWord* addr); + inline void mark(oop obj); inline void clear(HeapWord* addr); inline void clear(oop obj); inline bool par_mark(HeapWord* addr);
--- a/src/hotspot/share/gc/shared/markBitMap.inline.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/shared/markBitMap.inline.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -46,6 +46,10 @@ _bm.set_bit(addr_to_offset(addr)); } +inline void MarkBitMap::mark(oop obj) { + return mark(cast_from_oop<HeapWord*>(obj)); +} + inline void MarkBitMap::clear(HeapWord* addr) { check_mark(addr); _bm.clear_bit(addr_to_offset(addr)); @@ -57,15 +61,15 @@ } inline bool MarkBitMap::par_mark(oop obj) { - return par_mark((HeapWord*) obj); + return par_mark(cast_from_oop<HeapWord*>(obj)); } inline bool MarkBitMap::is_marked(oop obj) const{ - return is_marked((HeapWord*) obj); + return is_marked(cast_from_oop<HeapWord*>(obj)); } inline void MarkBitMap::clear(oop obj) { - clear((HeapWord*) obj); + clear(cast_from_oop<HeapWord*>(obj)); } #endif // SHARE_GC_SHARED_MARKBITMAP_INLINE_HPP
--- a/src/hotspot/share/gc/shared/memAllocator.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/shared/memAllocator.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -221,7 +221,7 @@ } void MemAllocator::Allocation::notify_allocation_jfr_sampler() { - HeapWord* mem = (HeapWord*)obj(); + HeapWord* mem = cast_from_oop<HeapWord*>(obj()); size_t size_in_bytes = _allocator._word_size * HeapWordSize; if (_allocated_outside_tlab) { @@ -406,7 +406,7 @@ } ArrayKlass* array_klass = ArrayKlass::cast(_klass); const size_t hs = arrayOopDesc::header_size(array_klass->element_type()); - return MemRegion(((HeapWord*)obj) + hs, _word_size - hs); + return MemRegion(cast_from_oop<HeapWord*>(obj) + hs, _word_size - hs); } oop ObjArrayAllocator::initialize(HeapWord* mem) const {
--- a/src/hotspot/share/gc/shared/memAllocator.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/shared/memAllocator.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -66,7 +66,7 @@ HeapWord* mem_allocate(Allocation& allocation) const; virtual MemRegion obj_memory_range(oop obj) const { - return MemRegion((HeapWord*)obj, _word_size); + return MemRegion(cast_from_oop<HeapWord*>(obj), _word_size); } public:
--- a/src/hotspot/share/gc/shared/space.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/shared/space.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -373,7 +373,7 @@ } // store the forwarding pointer into the mark word - if ((HeapWord*)q != compact_top) { + if (cast_from_oop<HeapWord*>(q) != compact_top) { q->forward_to(oop(compact_top)); assert(q->is_gc_marked(), "encoding the pointer should preserve the mark"); } else {
--- a/src/hotspot/share/gc/shared/space.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/shared/space.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -91,7 +91,7 @@ // Returns true if this object has been allocated since a // generation's "save_marks" call. virtual bool obj_allocated_since_save_marks(const oop obj) const { - return (HeapWord*)obj >= saved_mark_word(); + return cast_from_oop<HeapWord*>(obj) >= saved_mark_word(); } // Returns a subregion of the space containing only the allocated objects in
--- a/src/hotspot/share/gc/shared/space.inline.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/shared/space.inline.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -332,7 +332,7 @@ // size and destination size_t size = space->obj_size(cur_obj); - HeapWord* compaction_top = (HeapWord*)oop(cur_obj)->forwardee(); + HeapWord* compaction_top = cast_from_oop<HeapWord*>(oop(cur_obj)->forwardee()); // prefetch beyond compaction_top Prefetch::write(compaction_top, copy_interval);
--- a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -67,11 +67,11 @@ ShenandoahMarkingContext* const ctx = heap->marking_context(); msg.append(" " PTR_FORMAT " - klass " PTR_FORMAT " %s\n", p2i(obj), p2i(obj->klass()), obj->klass()->external_name()); - msg.append(" %3s allocated after mark start\n", ctx->allocated_after_mark_start((HeapWord *) obj) ? "" : "not"); + msg.append(" %3s allocated after mark start\n", ctx->allocated_after_mark_start(obj) ? "" : "not"); msg.append(" %3s marked \n", ctx->is_marked(obj) ? "" : "not"); msg.append(" %3s in collection set\n", heap->in_collection_set(obj) ? "" : "not"); if (heap->traversal_gc() != NULL) { - msg.append(" %3s in traversal set\n", heap->traversal_gc()->traversal_set()->is_in((HeapWord*) obj) ? "" : "not"); + msg.append(" %3s in traversal set\n", heap->traversal_gc()->traversal_set()->is_in(cast_from_oop<HeapWord*>(obj)) ? "" : "not"); } msg.append(" mark:%s\n", mw_ss.as_string()); msg.append(" region: %s", ss.as_string());
--- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -168,7 +168,7 @@ ShenandoahHeapRegion* r = _heap->heap_region_containing(obj); assert(r->is_cset(), "sanity"); - HeapWord* cur = (HeapWord*)obj + obj->size(); + HeapWord* cur = cast_from_oop<HeapWord*>(obj) + obj->size(); size_t count = 0; while ((cur < r->top()) && ctx->is_marked(oop(cur)) && (count++ < max)) {
--- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -42,7 +42,7 @@ } inline oop ShenandoahBarrierSet::resolve_forwarded(oop p) { - if (((HeapWord*) p) != NULL) { + if (p != NULL) { return resolve_forwarded_not_null(p); } else { return p; @@ -268,7 +268,7 @@ T o = RawAccess<>::oop_load(elem_ptr); if (!CompressedOops::is_null(o)) { oop obj = CompressedOops::decode_not_null(o); - if (HAS_FWD && cset->is_in((HeapWord *) obj)) { + if (HAS_FWD && cset->is_in(cast_from_oop<HeapWord *>(obj))) { assert(_heap->has_forwarded_objects(), "only get here with forwarded objects"); oop fwd = resolve_forwarded_not_null(obj); if (EVAC && obj == fwd) {
--- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSetClone.inline.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSetClone.inline.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -46,7 +46,7 @@ T o = RawAccess<>::oop_load(p); if (!CompressedOops::is_null(o)) { oop obj = CompressedOops::decode_not_null(o); - if (_cset->is_in((HeapWord *)obj)) { + if (_cset->is_in(obj)) { oop fwd = _bs->resolve_forwarded_not_null(obj); if (EVAC && obj == fwd) { fwd = _heap->evacuate_object(obj, _thread);
--- a/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -94,7 +94,7 @@ T o = RawAccess<>::oop_load(p); if (!CompressedOops::is_null(o)) { oop obj = CompressedOops::decode_not_null(o); - if (_heap->in_collection_set(obj) || _traversal_set->is_in((HeapWord*)obj)) { + if (_heap->in_collection_set(obj) || _traversal_set->is_in(cast_from_oop<HeapWord*>(obj))) { obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); RawAccess<IS_NOT_NULL>::oop_store(p, obj); } else {
--- a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -80,7 +80,7 @@ inline bool is_in(ShenandoahHeapRegion* r) const; inline bool is_in(size_t region_number) const; - inline bool is_in(HeapWord* p) const; + inline bool is_in(oop obj) const; void print_on(outputStream* out) const;
--- a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.inline.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.inline.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -39,9 +39,9 @@ return is_in(r->region_number()); } -bool ShenandoahCollectionSet::is_in(HeapWord* p) const { +bool ShenandoahCollectionSet::is_in(oop p) const { assert(_heap->is_in(p), "Must be in the heap"); - uintx index = ((uintx) p) >> _region_size_bytes_shift; + uintx index = (cast_from_oop<uintx>(p)) >> _region_size_bytes_shift; // no need to subtract the bottom of the heap from p, // _biased_cset_map is biased return _biased_cset_map[index] == 1;
--- a/src/hotspot/share/gc/shenandoah/shenandoahForwarding.inline.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/shenandoah/shenandoahForwarding.inline.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -39,7 +39,7 @@ if (mark.is_marked()) { return (HeapWord*) mark.clear_lock_bits().to_pointer(); } else { - return (HeapWord*) obj; + return cast_from_oop<HeapWord*>(obj); } }
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -1252,8 +1252,8 @@ obj = fwd; } assert(oopDesc::is_oop(obj), "must be a valid oop"); - if (!_bitmap->is_marked((HeapWord*) obj)) { - _bitmap->mark((HeapWord*) obj); + if (!_bitmap->is_marked(obj)) { + _bitmap->mark(obj); _oop_stack->push(obj); } }
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -283,7 +283,7 @@ } // Copy the object: - Copy::aligned_disjoint_words((HeapWord*) p, copy, size); + Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size); // Try to install the new forwarding pointer. oop copy_val = oop(copy); @@ -326,7 +326,7 @@ template <class T> inline bool ShenandoahHeap::in_collection_set(T p) const { - HeapWord* obj = (HeapWord*) p; + HeapWord* obj = cast_from_oop<HeapWord*>(p); assert(collection_set() != NULL, "Sanity"); assert(is_in(obj), "should be in heap");
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -84,7 +84,7 @@ inline bool is_in(ShenandoahHeapRegion* r) const; inline bool is_in(size_t region_number) const; - inline bool is_in(HeapWord* p) const; + inline bool is_in(oop p) const; void print_on(outputStream* out) const;
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.inline.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.inline.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -39,9 +39,9 @@ return is_in(r->region_number()); } -bool ShenandoahHeapRegionSet::is_in(HeapWord* p) const { +bool ShenandoahHeapRegionSet::is_in(oop p) const { assert(_heap->is_in(p), "Must be in the heap"); - uintx index = ((uintx) p) >> _region_size_bytes_shift; + uintx index = (cast_from_oop<uintx>(p)) >> _region_size_bytes_shift; // no need to subtract the bottom of the heap from p, // _biased_set_map is biased return _biased_set_map[index] == 1;
--- a/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -297,7 +297,7 @@ void do_object(oop p) { assert(_from_region != NULL, "must set before work"); assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); - assert(!_heap->complete_marking_context()->allocated_after_mark_start((HeapWord*) p), "must be truly marked"); + assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked"); size_t obj_size = p->size(); if (_compact_point + obj_size > _to_region->end()) { @@ -664,8 +664,8 @@ assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); size_t size = (size_t)p->size(); if (p->is_forwarded()) { - HeapWord* compact_from = (HeapWord*) p; - HeapWord* compact_to = (HeapWord*) p->forwardee(); + HeapWord* compact_from = cast_from_oop<HeapWord*>(p); + HeapWord* compact_to = cast_from_oop<HeapWord*>(p->forwardee()); Copy::aligned_conjoint_words(compact_from, compact_to, size); oop new_obj = oop(compact_to); new_obj->init_mark_raw();
--- a/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -55,7 +55,7 @@ inline bool is_marked(oop obj) const; - inline bool allocated_after_mark_start(HeapWord* addr) const; + inline bool allocated_after_mark_start(oop obj) const; inline MarkBitMap* mark_bit_map();
--- a/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.inline.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.inline.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -33,16 +33,15 @@ inline bool ShenandoahMarkingContext::mark(oop obj) { shenandoah_assert_not_forwarded(NULL, obj); - HeapWord* addr = (HeapWord*) obj; - return (! allocated_after_mark_start(addr)) && _mark_bit_map.par_mark(addr); + return (! allocated_after_mark_start(obj)) && _mark_bit_map.par_mark(obj); } inline bool ShenandoahMarkingContext::is_marked(oop obj) const { - HeapWord* addr = (HeapWord*) obj; - return allocated_after_mark_start(addr) || _mark_bit_map.is_marked(addr); + return allocated_after_mark_start(obj) || _mark_bit_map.is_marked(obj); } -inline bool ShenandoahMarkingContext::allocated_after_mark_start(HeapWord* addr) const { +inline bool ShenandoahMarkingContext::allocated_after_mark_start(oop obj) const { + HeapWord* addr = cast_from_oop<HeapWord*>(obj); uintx index = ((uintx) addr) >> ShenandoahHeapRegion::region_size_bytes_shift(); HeapWord* top_at_mark_start = _top_at_mark_starts[index]; bool alloc_after_mark_start = addr >= top_at_mark_start;
--- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -88,8 +88,7 @@ // For performance reasons, only fully verify non-marked field values. // We are here when the host object for *p is already marked. - HeapWord* addr = (HeapWord*) obj; - if (_map->par_mark(addr)) { + if (_map->par_mark(obj)) { verify_oop_at(p, obj); _stack->push(ShenandoahVerifierTask(obj)); } @@ -117,7 +116,7 @@ check(ShenandoahAsserts::_safe_unknown, obj, Metaspace::contains(obj_klass), "Object klass pointer must go to metaspace"); - HeapWord *obj_addr = (HeapWord *) obj; + HeapWord *obj_addr = cast_from_oop<HeapWord*>(obj); check(ShenandoahAsserts::_safe_unknown, obj, obj_addr < obj_reg->top(), "Object start should be within the region"); @@ -181,7 +180,7 @@ check(ShenandoahAsserts::_safe_oop, obj, !fwd_reg->is_humongous(), "Should have no humongous forwardees"); - HeapWord *fwd_addr = (HeapWord *) fwd; + HeapWord *fwd_addr = cast_from_oop<HeapWord *>(fwd); check(ShenandoahAsserts::_safe_oop, obj, fwd_addr < fwd_reg->top(), "Forwardee start should be within the region"); check(ShenandoahAsserts::_safe_oop, obj, (fwd_addr + fwd->size()) <= fwd_reg->top(),
--- a/src/hotspot/share/interpreter/abstractInterpreter.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/interpreter/abstractInterpreter.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -289,34 +289,6 @@ Bytes::put_native_u8((address)slot_addr, value); } } - static void get_jvalue_in_slot(intptr_t* slot_addr, BasicType type, jvalue* value) { - switch (type) { - case T_BOOLEAN: value->z = *int_addr_in_slot(slot_addr); break; - case T_CHAR: value->c = *int_addr_in_slot(slot_addr); break; - case T_BYTE: value->b = *int_addr_in_slot(slot_addr); break; - case T_SHORT: value->s = *int_addr_in_slot(slot_addr); break; - case T_INT: value->i = *int_addr_in_slot(slot_addr); break; - case T_LONG: value->j = long_in_slot(slot_addr); break; - case T_FLOAT: value->f = *(jfloat*)int_addr_in_slot(slot_addr); break; - case T_DOUBLE: value->d = jdouble_cast(long_in_slot(slot_addr)); break; - case T_OBJECT: value->l = (jobject)*oop_addr_in_slot(slot_addr); break; - default: ShouldNotReachHere(); - } - } - static void set_jvalue_in_slot(intptr_t* slot_addr, BasicType type, jvalue* value) { - switch (type) { - case T_BOOLEAN: *int_addr_in_slot(slot_addr) = (value->z != 0); break; - case T_CHAR: *int_addr_in_slot(slot_addr) = value->c; break; - case T_BYTE: *int_addr_in_slot(slot_addr) = value->b; break; - case T_SHORT: *int_addr_in_slot(slot_addr) = value->s; break; - case T_INT: *int_addr_in_slot(slot_addr) = value->i; break; - case T_LONG: set_long_in_slot(slot_addr, value->j); break; - case T_FLOAT: *(jfloat*)int_addr_in_slot(slot_addr) = value->f; break; - case T_DOUBLE: set_long_in_slot(slot_addr, jlong_cast(value->d)); break; - case T_OBJECT: *oop_addr_in_slot(slot_addr) = (oop) value->l; break; - default: ShouldNotReachHere(); - } - } static void initialize_method_handle_entries(); };
--- a/src/hotspot/share/jfr/jni/jfrJavaCall.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/jfr/jni/jfrJavaCall.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -73,7 +73,7 @@ assert(_storage != NULL, "invariant"); assert(receiver != NULL, "invariant"); JavaValue value(T_OBJECT); - value.set_jobject((jobject)receiver); + value.set_jobject(cast_from_oop<jobject>(receiver)); _storage[0] = value; } @@ -96,7 +96,7 @@ void JfrJavaArguments::Parameters::push_oop(const oop obj) { JavaValue value(T_OBJECT); - value.set_jobject((jobject)obj); + value.set_jobject(cast_from_oop<jobject>(obj)); push(value); }
--- a/src/hotspot/share/jfr/jni/jfrJavaSupport.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/jfr/jni/jfrJavaSupport.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -163,7 +163,7 @@ result->set_type(T_VOID); // constructor result type JfrJavaSupport::call_special(args, CHECK); result->set_type(T_OBJECT); // set back to original result type - result->set_jobject((jobject)h_obj()); + result->set_jobject(cast_from_oop<jobject>(h_obj())); } static void array_construction(JfrJavaArguments* args, JavaValue* result, InstanceKlass* klass, int array_length, TRAPS) { @@ -176,7 +176,7 @@ ObjArrayKlass::cast(ak)->initialize(THREAD); HandleMark hm(THREAD); objArrayOop arr = ObjArrayKlass::cast(ak)->allocate(array_length, CHECK); - result->set_jobject((jobject)arr); + result->set_jobject(cast_from_oop<jobject>(arr)); } static void create_object(JfrJavaArguments* args, JavaValue* result, TRAPS) { @@ -377,7 +377,7 @@ result->set_jlong(h_oop->long_field(fd->offset())); break; case T_OBJECT: - result->set_jobject((jobject)h_oop->obj_field(fd->offset())); + result->set_jobject(cast_from_oop<jobject>(h_oop->obj_field(fd->offset()))); break; default: ShouldNotReachHere();
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -206,7 +206,7 @@ oop object = oosi->_data._object; assert(object != NULL, "invariant"); writer->write(oosi->_id); - writer->write((u8)(const HeapWord*)object); + writer->write(cast_from_oop<u8>(object)); writer->write(const_cast<const Klass*>(object->klass())); ObjectSampleDescription od(object); writer->write(od.description());
--- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -431,7 +431,7 @@ } else if (JVMCIENV->isa_HotSpotObjectConstantImpl(base_object)) { Handle base_oop = JVMCIENV->asConstant(base_object, JVMCI_CHECK_NULL); if (base_oop->is_a(SystemDictionary::Class_klass())) { - base_address = (jlong) (address) base_oop(); + base_address = cast_from_oop<jlong>(base_oop()); } } if (base_address == 0) { @@ -1534,7 +1534,7 @@ StackValue* var = locals->at(i2); if (var->type() == T_OBJECT && scopeLocals->at(i2)->is_object()) { jvalue val; - val.l = (jobject) locals->at(i2)->get_obj()(); + val.l = cast_from_oop<jobject>(locals->at(i2)->get_obj()()); cvf->update_local(T_OBJECT, i2, val); } } @@ -1547,7 +1547,7 @@ StackValue* var = expressions->at(i2); if (var->type() == T_OBJECT && scopeExpressions->at(i2)->is_object()) { jvalue val; - val.l = (jobject) expressions->at(i2)->get_obj()(); + val.l = cast_from_oop<jobject>(expressions->at(i2)->get_obj()()); cvf->update_stack(T_OBJECT, i2, val); } }
--- a/src/hotspot/share/memory/filemap.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/memory/filemap.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -1559,9 +1559,9 @@ assert(offset == (size_t)(uint32_t)offset, "must be 32-bit only"); uint n = (uint)offset; if (with_current_oop_encoding_mode) { - return (address)CompressedOops::decode_not_null(n); + return cast_from_oop<address>(CompressedOops::decode_not_null(n)); } else { - return (address)HeapShared::decode_from_archive(n); + return cast_from_oop<address>(HeapShared::decode_from_archive(n)); } }
--- a/src/hotspot/share/memory/heapShared.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/memory/heapShared.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -139,7 +139,7 @@ oop archived_oop = (oop)G1CollectedHeap::heap()->archive_mem_allocate(len); if (archived_oop != NULL) { - Copy::aligned_disjoint_words((HeapWord*)obj, (HeapWord*)archived_oop, len); + Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), cast_from_oop<HeapWord*>(archived_oop), len); MetaspaceShared::relocate_klass_ptr(archived_oop); ArchivedObjectCache* cache = archived_object_cache(); cache->put(obj, archived_oop); @@ -553,7 +553,7 @@ "original objects must not point to archived objects"); size_t field_delta = pointer_delta(p, _orig_referencing_obj, sizeof(char)); - T* new_p = (T*)(address(_archived_referencing_obj) + field_delta); + T* new_p = (T*)(cast_from_oop<address>(_archived_referencing_obj) + field_delta); Thread* THREAD = _thread; if (!_record_klasses_only && log_is_enabled(Debug, cds, heap)) {
--- a/src/hotspot/share/oops/oopsHierarchy.hpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/oops/oopsHierarchy.hpp Fri Jan 24 09:27:07 2020 +0100 @@ -70,7 +70,6 @@ // instead, which generates less code anyway. class Thread; -class PromotedObject; class oopDesc; extern "C" bool CheckUnhandledOops; @@ -111,22 +110,7 @@ volatile oop& operator=(const volatile oop& o) volatile { _o = o.obj(); return *this; } // Explict user conversions - operator void* () const { return (void *)obj(); } -#ifndef SOLARIS - operator void* () const volatile { return (void *)obj(); } -#endif - operator HeapWord* () const { return (HeapWord*)obj(); } operator oopDesc* () const volatile { return obj(); } - operator intptr_t* () const { return (intptr_t*)obj(); } - operator PromotedObject* () const { return (PromotedObject*)obj(); } - operator address () const { return (address)obj(); } - - // from javaCalls.cpp - operator jobject () const { return (jobject)obj(); } - - // from parNewGeneration and other things that want to get to the end of - // an oop for stuff (like ObjArrayKlass.cpp) - operator oop* () const { return (oop *)obj(); } }; template<> @@ -187,7 +171,7 @@ return (oop)(CHECK_UNHANDLED_OOPS_ONLY((void *))(value)); } template <class T> inline T cast_from_oop(oop o) { - return (T)(CHECK_UNHANDLED_OOPS_ONLY((void*))o); + return (T)(CHECK_UNHANDLED_OOPS_ONLY((oopDesc*))o); } // The metadata hierarchy is separate from the oop hierarchy
--- a/src/hotspot/share/opto/runtime.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/opto/runtime.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -303,7 +303,7 @@ const size_t hs = arrayOopDesc::header_size(elem_type); // Align to next 8 bytes to avoid trashing arrays's length. const size_t aligned_hs = align_object_offset(hs); - HeapWord* obj = (HeapWord*)result; + HeapWord* obj = cast_from_oop<HeapWord*>(result); if (aligned_hs > hs) { Copy::zero_to_words(obj+hs, aligned_hs-hs); }
--- a/src/hotspot/share/prims/jvm.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/prims/jvm.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -2906,7 +2906,7 @@ bool is_alive = tlh.cv_internal_thread_to_JavaThread(jthread, &receiver, &java_thread); Events::log_exception(thread, "JVM_StopThread thread JavaThread " INTPTR_FORMAT " as oop " INTPTR_FORMAT " [exception " INTPTR_FORMAT "]", - p2i(receiver), p2i((address)java_thread), p2i(throwable)); + p2i(receiver), p2i(java_thread), p2i(throwable)); if (is_alive) { // jthread refers to a live JavaThread.
--- a/src/hotspot/share/prims/jvmtiImpl.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/prims/jvmtiImpl.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -700,7 +700,7 @@ // happens. The oop stored in the deferred local will be // gc'd on its own. if (_type == T_OBJECT) { - _value.l = (jobject) (JNIHandles::resolve_external_guard(_value.l)); + _value.l = cast_from_oop<jobject>(JNIHandles::resolve_external_guard(_value.l)); } // Re-read the vframe so we can see that it is deoptimized // [ Only need because of assert in update_local() ]
--- a/src/hotspot/share/prims/jvmtiTagMap.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/prims/jvmtiTagMap.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -1182,7 +1182,7 @@ // get offset and field value int offset = field->field_offset(); - address addr = (address)klass->java_mirror() + offset; + address addr = cast_from_oop<address>(klass->java_mirror()) + offset; jvalue value; copy_to_jvalue(&value, addr, value_type); @@ -1235,7 +1235,7 @@ // get offset and field value int offset = field->field_offset(); - address addr = (address)obj + offset; + address addr = cast_from_oop<address>(obj) + offset; jvalue value; copy_to_jvalue(&value, addr, value_type); @@ -2808,7 +2808,7 @@ // verify that a static oop field is in range static inline bool verify_static_oop(InstanceKlass* ik, oop mirror, int offset) { - address obj_p = (address)mirror + offset; + address obj_p = cast_from_oop<address>(mirror) + offset; address start = (address)InstanceMirrorKlass::start_of_static_fields(mirror); address end = start + (java_lang_Class::static_oop_field_count(mirror) * heapOopSize); assert(end >= start, "sanity check"); @@ -2936,7 +2936,7 @@ } } else { if (is_reporting_primitive_fields()) { - address addr = (address)mirror + field->field_offset(); + address addr = cast_from_oop<address>(mirror) + field->field_offset(); int slot = field->field_index(); if (!CallbackInvoker::report_primitive_static_field(mirror, slot, addr, type)) { delete field_map; @@ -2981,7 +2981,7 @@ } else { if (is_reporting_primitive_fields()) { // primitive instance field - address addr = (address)o + field->field_offset(); + address addr = cast_from_oop<address>(o) + field->field_offset(); int slot = field->field_index(); if (!CallbackInvoker::report_primitive_instance_field(o, slot, addr, type)) { return false;
--- a/src/hotspot/share/prims/unsafe.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/prims/unsafe.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -111,7 +111,7 @@ if (p != NULL) { assert(byte_offset >= 0 && byte_offset <= (jlong)MAX_OBJECT_SIZE, "sane offset"); if (byte_offset == (jint)byte_offset) { - void* ptr_plus_disp = (address)p + byte_offset; + void* ptr_plus_disp = cast_from_oop<address>(p) + byte_offset; assert(p->field_addr_raw((jint)byte_offset) == ptr_plus_disp, "raw [ptr+disp] must be consistent with oop::field_addr_raw"); } @@ -130,9 +130,9 @@ } if (sizeof(char*) == sizeof(jint)) { // (this constant folds!) - return (address)p + (jint) byte_offset; + return cast_from_oop<address>(p) + (jint) byte_offset; } else { - return (address)p + byte_offset; + return cast_from_oop<address>(p) + byte_offset; } }
--- a/src/hotspot/share/runtime/javaCalls.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/runtime/javaCalls.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -460,7 +460,7 @@ // Restore possible oop return if (oop_result_flag) { - result->set_jobject((jobject)thread->vm_result()); + result->set_jobject(cast_from_oop<jobject>(thread->vm_result())); thread->set_vm_result(NULL); } }
--- a/src/hotspot/share/runtime/reflection.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/runtime/reflection.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -113,7 +113,7 @@ BasicType Reflection::unbox_for_regular_object(oop box, jvalue* value) { // Note: box is really the unboxed oop. It might even be a Short, etc.! - value->l = (jobject) box; + value->l = cast_from_oop<jobject>(box); return T_OBJECT; } @@ -224,7 +224,7 @@ THROW_(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), T_ILLEGAL); } if (a->is_objArray()) { - value->l = (jobject) objArrayOop(a)->obj_at(index); + value->l = cast_from_oop<jobject>(objArrayOop(a)->obj_at(index)); return T_OBJECT; } else { assert(a->is_typeArray(), "just checking");
--- a/src/hotspot/share/runtime/stackValue.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/runtime/stackValue.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -221,7 +221,7 @@ } else { st->print("NULL"); } - st->print(" <" INTPTR_FORMAT ">", p2i((address)_handle_value())); + st->print(" <" INTPTR_FORMAT ">", p2i(_handle_value())); break; case T_CONFLICT:
--- a/src/hotspot/share/runtime/vframe_hp.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/runtime/vframe_hp.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -97,7 +97,7 @@ void compiledVFrame::update_monitor(int index, MonitorInfo* val) { assert(index >= 0, "out of bounds"); jvalue value; - value.l = (jobject) val->owner(); + value.l = cast_from_oop<jobject>(val->owner()); update_deferred_value(T_OBJECT, index + method()->max_locals() + method()->max_stack(), value); }
--- a/src/hotspot/share/services/heapDumper.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/src/hotspot/share/services/heapDumper.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -557,7 +557,7 @@ } void DumpWriter::write_objectID(oop o) { - address a = (address)o; + address a = cast_from_oop<address>(o); #ifdef _LP64 write_u8((u8)a); #else
--- a/test/hotspot/gtest/gc/g1/test_heapRegion.cpp Fri Jan 24 09:24:46 2020 +0100 +++ b/test/hotspot/gtest/gc/g1/test_heapRegion.cpp Fri Jan 24 09:27:07 2020 +0100 @@ -45,7 +45,7 @@ virtual size_t apply(oop object) { _count++; - ensure_marked((HeapWord*) object); + ensure_marked(cast_from_oop<HeapWord*>(object)); // Must return positive size to advance the iteration. return MinObjAlignment; }