FreeBSD Bugzilla – Attachment 203773 Details for
Bug 237370
java/openjdk12: Add powerpc64 support
Home
|
New
|
Browse
|
Search
|
[?]
|
Reports
|
Help
|
New Account
|
Log In
Remember
[x]
|
Forgot Password
Login:
[x]
[patch]
Patch to add powerpc64 support
patch-openjdk12_powerpc64 (text/plain), 86.38 KB, created by
Curtis Hamilton
on 2019-04-18 21:42:17 UTC
(
hide
)
Description:
Patch to add powerpc64 support
Filename:
MIME Type:
Creator:
Curtis Hamilton
Created:
2019-04-18 21:42:17 UTC
Size:
86.38 KB
patch
obsolete
>Index: Makefile >=================================================================== >--- Makefile (revision 498356) >+++ Makefile (working copy) >@@ -12,7 +12,7 @@ > > LICENSE= GPLv2 > >-ONLY_FOR_ARCHS= amd64 i386 >+ONLY_FOR_ARCHS= amd64 i386 powerpc64 > > BUILD_DEPENDS= zip:archivers/zip \ > autoconf>0:devel/autoconf \ >@@ -36,12 +36,12 @@ > CLASSPATH="" \ > JAVA_HOME="" \ > LD_LIBRARY_PATH="" \ >- CC=/usr/bin/cc \ >- CXX=/usr/bin/c++ \ >- MAKEFLAGS="" \ >- USE_CLANG=true >- >-JDK_OSARCH= bsd-${ARCH:S/amd64/x86_64/:S/i386/x86/} >+ CC=${CC} \ >+ CXX=${CXX} \ >+ CPP=${CPP} \ >+ MAKEFLAGS="" >+ >+JDK_OSARCH= bsd-${ARCH:S/amd64/x86_64/:S/i386/x86/:S/powerpc64/ppc64/} > JDK_BUILDDIR= ${WRKSRC}/build/${JDK_OSARCH}-${JDK_BUILD_JVM}-${JDK_BUILD_TYPE} > JDK_IMAGEDIR= ${JDK_BUILDDIR}/images/jdk > INSTALLDIR= ${PREFIX}/${PKGBASE} >@@ -55,8 +55,9 @@ > BSD_JDK_VERSION= 4 > > GNU_CONFIGURE= yes >-CONFIGURE_ENV= CC=/usr/bin/cc \ >- CXX=/usr/bin/c++ >+CONFIGURE_ENV= CC=${CC} \ >+ CXX=${CXX} \ >+ CPP=${CPP} > CONFIGURE_ARGS= --with-boot-jdk=${BOOTSTRAPJDKDIR} \ > --disable-ccache \ > --disable-javac-server \ >@@ -74,7 +75,6 @@ > --with-libpng=system \ > --with-zlib=system \ > --with-lcms=system \ >- --with-toolchain-type=clang \ > --x-includes=${LOCALBASE}/include \ > --x-libraries=${LOCALBASE}/lib \ > --with-cacerts-file=${FILESDIR}/cacerts \ >@@ -105,8 +105,28 @@ > BUILD_DEPENDS+= ${BOOTSTRAPJDKDIR}/bin/javac:java/bootstrap-openjdk11 > .endif > >+.if ${ARCH} == amd64 || ${ARCH} == i386 || ${ARCH} == powerpc64 > JDK_BUILD_JVM= server >+.else >+JDK_BUILD_JVM= zero >+LIB_DEPENDS+= libffi.so:devel/libffi >+.endif > >+.if ${COMPILER_TYPE} == clang >+MAKE_ENV+= --with-toolchain-type=clang >+.else >+MAKE_ENV+= --with-toolchain-type=gcc >+.endif >+ >+.if ${ARCH} == powerpc64 >+USE_GCC= yes >+MAKE_ENV+= --with-extra-ldflags="${LDFLAGS}" \ >+ --with-extra-cflags="${CFLAGS}" \ >+ --with-extra-cxxflags="${CXXFLAGS}" >+CONFIGURE_ARGS+= --disable-warnings-as-errors \ >+ --disable-dtrace >+.endif >+ > .if empty(ICONV_LIB) > ICONV_CFLAGS= -DLIBICONV_PLUG > .else >Index: files/patch-openjdk12_powerpc64 >=================================================================== >--- /dev/null >+++ files/patch-openjdk12_powerpc64 >@@ -0,0 +1,2200 @@ >+--- make/autoconf/flags-ldflags.m4.orig >++++ make/autoconf/flags-ldflags.m4 >+@@ -69,11 +69,10 @@ >+ LIBJSIG_HASHSTYLE_LDFLAGS="-Wl,--hash-style=both" >+ fi >+ >+- # Add -z defs, to forbid undefined symbols in object files. >+- BASIC_LDFLAGS="$BASIC_LDFLAGS -Wl,-z,defs" >+- >+- BASIC_LDFLAGS_JVM_ONLY="-Wl,-O1 -Wl,-z,relro" >+- >++ BASIC_LDFLAGS_JVM_ONLY="-Wl,-z,noexecstack -Wl,-O1 -Wl,-z,relro" >++ >++ BASIC_LDFLAGS_JDK_LIB_ONLY="-Wl,-z,noexecstack" >++ LIBJSIG_NOEXECSTACK_LDFLAGS="-Wl,-z,noexecstack" >+ >+ elif test "x$TOOLCHAIN_TYPE" = xclang; then >+ BASIC_LDFLAGS_JVM_ONLY="-mno-omit-leaf-frame-pointer -mstack-alignment=16 \ >+--- make/lib/Lib-jdk.hotspot.agent.gmk.orig >++++ make/lib/Lib-jdk.hotspot.agent.gmk >+@@ -36,7 +36,7 @@ >+ SA_LDFLAGS := -mt >+ >+ else ifeq ($(OPENJDK_TARGET_OS), bsd) >+- SA_CFLAGS := -Wno-error=format-nonliteral -Wno-sign-compare -Wno-error=tautological-pointer-compare >++ SA_CFLAGS := -Wno-format-nonliteral -Wno-sign-compare -Wno-tautological-pointer-compare >+ >+ else ifeq ($(OPENJDK_TARGET_OS), macosx) >+ SA_CFLAGS := -Damd64 -D_GNU_SOURCE -mno-omit-leaf-frame-pointer \ >+--- src/hotspot/cpu/ppc/macroAssembler_ppc.cpp.orig >++++ src/hotspot/cpu/ppc/macroAssembler_ppc.cpp >+@@ -1285,12 +1285,16 @@ >+ return true; // No ucontext given. Can't check value of ra. Assume true. >+ } >+ >+-#ifdef LINUX >++#if defined(LINUX) || defined(BSD) >+ // Ucontext given. Check that register ra contains the address of >+ // the safepoing polling page. >+ ucontext_t* uc = (ucontext_t*) ucontext; >+ // Set polling address. >++#ifdef LINUX >+ address addr = (address)uc->uc_mcontext.regs->gpr[ra] + (ssize_t)ds; >++#elif defined(BSD) >++ address addr = (address)uc->uc_mcontext.mc_gpr[ra] + (ssize_t)ds; >++#endif >+ if (polling_address_ptr != NULL) { >+ *polling_address_ptr = addr; >+ } >+@@ -1344,7 +1348,7 @@ >+ // or stdux R1_SP, Rx, R1_SP (see push_frame(), resize_frame()) >+ // return the banged address. Otherwise, return 0. >+ address MacroAssembler::get_stack_bang_address(int instruction, void *ucontext) { >+-#ifdef LINUX >++#if defined(LINUX) || defined(BSD) >+ ucontext_t* uc = (ucontext_t*) ucontext; >+ int rs = inv_rs_field(instruction); >+ int ra = inv_ra_field(instruction); >+@@ -1353,12 +1357,21 @@ >+ || (is_stdu(instruction) && rs == 1)) { >+ int ds = inv_ds_field(instruction); >+ // return banged address >++#ifdef LINUX >+ return ds+(address)uc->uc_mcontext.regs->gpr[ra]; >++#elif defined(BSD) >++ return ds+(address)uc->uc_mcontext.mc_gpr[ra]; >++#endif >+ } else if (is_stdux(instruction) && rs == 1) { >+ int rb = inv_rb_field(instruction); >++#ifdef LINUX >+ address sp = (address)uc->uc_mcontext.regs->gpr[1]; >+ long rb_val = (long)uc->uc_mcontext.regs->gpr[rb]; >+- return ra != 1 || rb_val >= 0 ? NULL // not a stack bang >++#elif defined(BSD) >++ address sp = (address)uc->uc_mcontext.mc_gpr[1]; >++ long rb_val = (long)uc->uc_mcontext.mc_gpr[rb]; >++#endif >++ return ra != 1 || rb_val >= 0 ? NULL // not a stack bang >+ : sp + rb_val; // banged address >+ } >+ return NULL; // not a stack bang >+--- src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp.orig >++++ src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp >+@@ -47,7 +47,9 @@ >+ #include "opto/runtime.hpp" >+ #endif >+ >++#ifndef __FreeBSD__ >+ #include <alloca.h> >++#endif >+ >+ #define __ masm-> >+ >+@@ -856,6 +858,8 @@ >+ // Although AIX runs on big endian CPU, float is in the most >+ // significant word of an argument slot. >+ #define FLOAT_WORD_OFFSET_IN_SLOT 0 >++#elif defined(BSD) >++#define FLOAT_WORD_OFFSET_IN_SLOT 1 >+ #else >+ #error "unknown OS" >+ #endif >+--- src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp.orig >++++ src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp >+@@ -296,6 +296,8 @@ >+ // Although AIX runs on big endian CPU, float is in most significant >+ // word of an argument slot. >+ __ stfs(floatSlot, 0, arg_c); >++#elif defined(BSD) >++ __ stfs(floatSlot, 4, arg_c); >+ #else >+ #error "unknown OS" >+ #endif >+--- src/hotspot/cpu/ppc/vm_version_ppc.cpp.orig >++++ src/hotspot/cpu/ppc/vm_version_ppc.cpp >+@@ -37,7 +37,9 @@ >+ #include "utilities/globalDefinitions.hpp" >+ #include "vm_version_ppc.hpp" >+ >++#ifndef __FreeBSD__ >+ #include <sys/sysinfo.h> >++#endif >+ >+ #if defined(LINUX) && defined(VM_LITTLE_ENDIAN) >+ #include <sys/auxv.h> >+--- src/hotspot/os/bsd/os_bsd.cpp.orig >++++ src/hotspot/os/bsd/os_bsd.cpp >+@@ -4086,3 +4086,112 @@ >+ } >+ return yes; >+ } >++ >++// Java thread: >++// >++// Low memory addresses >++// +------------------------+ >++// | |\ Java thread created by VM does not have glibc >++// | glibc guard page | - guard, attached Java thread usually has >++// | |/ 1 glibc guard page. >++// P1 +------------------------+ Thread::stack_base() - Thread::stack_size() >++// | |\ >++// | HotSpot Guard Pages | - red, yellow and reserved pages >++// | |/ >++// +------------------------+ JavaThread::stack_reserved_zone_base() >++// | |\ >++// | Normal Stack | - >++// | |/ >++// P2 +------------------------+ Thread::stack_base() >++// >++// Non-Java thread: >++// >++// Low memory addresses >++// +------------------------+ >++// | |\ >++// | glibc guard page | - usually 1 page >++// | |/ >++// P1 +------------------------+ Thread::stack_base() - Thread::stack_size() >++// | |\ >++// | Normal Stack | - >++// | |/ >++// P2 +------------------------+ Thread::stack_base() >++// >++// ** P1 (aka bottom) and size ( P2 = P1 - size) are the address and stack size returned from >++// pthread_attr_getstack() >++ >++static void current_stack_region(address * bottom, size_t * size) { >++#ifdef __APPLE__ >++ pthread_t self = pthread_self(); >++ void *stacktop = pthread_get_stackaddr_np(self); >++ *size = pthread_get_stacksize_np(self); >++ // workaround for OS X 10.9.0 (Mavericks) >++ // pthread_get_stacksize_np returns 128 pages even though the actual size is 2048 pages >++ if (pthread_main_np() == 1) { >++ // At least on Mac OS 10.12 we have observed stack sizes not aligned >++ // to pages boundaries. This can be provoked by e.g. setrlimit() (ulimit -s xxxx in the >++ // shell). Apparently Mac OS actually rounds upwards to next multiple of page size, >++ // however, we round downwards here to be on the safe side. >++ *size = align_down(*size, getpagesize()); >++ >++ if ((*size) < (DEFAULT_MAIN_THREAD_STACK_PAGES * (size_t)getpagesize())) { >++ char kern_osrelease[256]; >++ size_t kern_osrelease_size = sizeof(kern_osrelease); >++ int ret = sysctlbyname("kern.osrelease", kern_osrelease, &kern_osrelease_size, NULL, 0); >++ if (ret == 0) { >++ // get the major number, atoi will ignore the minor amd micro portions of the version string >++ if (atoi(kern_osrelease) >= OS_X_10_9_0_KERNEL_MAJOR_VERSION) { >++ *size = (DEFAULT_MAIN_THREAD_STACK_PAGES*getpagesize()); >++ } >++ } >++ } >++ } >++ *bottom = (address) stacktop - *size; >++#elif defined(__OpenBSD__) >++ stack_t ss; >++ int rslt = pthread_stackseg_np(pthread_self(), &ss); >++ >++ if (rslt != 0) >++ fatal("pthread_stackseg_np failed with error = %d", rslt); >++ >++ *bottom = (address)((char *)ss.ss_sp - ss.ss_size); >++ *size = ss.ss_size; >++#else >++ pthread_attr_t attr; >++ >++ int rslt = pthread_attr_init(&attr); >++ >++ // JVM needs to know exact stack location, abort if it fails >++ if (rslt != 0) >++ fatal("pthread_attr_init failed with error = %d", rslt); >++ >++ rslt = pthread_attr_get_np(pthread_self(), &attr); >++ >++ if (rslt != 0) >++ fatal("pthread_attr_get_np failed with error = %d", rslt); >++ >++ if (pthread_attr_getstackaddr(&attr, (void **)bottom) != 0 || >++ pthread_attr_getstacksize(&attr, size) != 0) { >++ fatal("Can not locate current stack attributes!"); >++ } >++ >++ pthread_attr_destroy(&attr); >++#endif >++ assert(os::current_stack_pointer() >= *bottom && >++ os::current_stack_pointer() < *bottom + *size, "just checking"); >++} >++ >++address os::current_stack_base() { >++ address bottom; >++ size_t size; >++ current_stack_region(&bottom, &size); >++ return (bottom + size); >++} >++ >++size_t os::current_stack_size() { >++ // stack size includes normal stack and HotSpot guard pages >++ address bottom; >++ size_t size; >++ current_stack_region(&bottom, &size); >++ return size; >++} >+--- /dev/null 2019-04-10 22:31:08 UTC 2019-04-10 22:31:08 UTC >++++ src/hotspot/os_cpu/bsd_ppc/atomic_bsd_ppc.hpp >+@@ -0,0 +1,418 @@ >++/* >++ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. >++ * Copyright (c) 2012, 2018 SAP SE. All rights reserved. >++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. >++ * >++ * This code is free software; you can redistribute it and/or modify it >++ * under the terms of the GNU General Public License version 2 only, as >++ * published by the Free Software Foundation. >++ * >++ * This code is distributed in the hope that it will be useful, but WITHOUT >++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or >++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License >++ * version 2 for more details (a copy is included in the LICENSE file that >++ * accompanied this code). >++ * >++ * You should have received a copy of the GNU General Public License version >++ * 2 along with this work; if not, write to the Free Software Foundation, >++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. >++ * >++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA >++ * or visit www.oracle.com if you need additional information or have any >++ * questions. >++ * >++ */ >++ >++#ifndef OS_CPU_BSD_PPC_VM_ATOMIC_BSD_PPC_HPP >++#define OS_CPU_BSD_PPC_VM_ATOMIC_BSD_PPC_HPP >++ >++#ifndef PPC64 >++#error "Atomic currently only implemented for PPC64" >++#endif >++ >++// Implementation of class atomic >++ >++// >++// machine barrier instructions: >++// >++// - sync two-way memory barrier, aka fence >++// - lwsync orders Store|Store, >++// Load|Store, >++// Load|Load, >++// but not Store|Load >++// - eieio orders memory accesses for device memory (only) >++// - isync invalidates speculatively executed instructions >++// From the POWER ISA 2.06 documentation: >++// "[...] an isync instruction prevents the execution of >++// instructions following the isync until instructions >++// preceding the isync have completed, [...]" >++// From IBM's AIX assembler reference: >++// "The isync [...] instructions causes the processor to >++// refetch any instructions that might have been fetched >++// prior to the isync instruction. The instruction isync >++// causes the processor to wait for all previous instructions >++// to complete. Then any instructions already fetched are >++// discarded and instruction processing continues in the >++// environment established by the previous instructions." >++// >++// semantic barrier instructions: >++// (as defined in orderAccess.hpp) >++// >++// - release orders Store|Store, (maps to lwsync) >++// Load|Store >++// - acquire orders Load|Store, (maps to lwsync) >++// Load|Load >++// - fence orders Store|Store, (maps to sync) >++// Load|Store, >++// Load|Load, >++// Store|Load >++// >++ >++#define strasm_sync "\n sync \n" >++#define strasm_lwsync "\n lwsync \n" >++#define strasm_isync "\n isync \n" >++#define strasm_release strasm_lwsync >++#define strasm_acquire strasm_lwsync >++#define strasm_fence strasm_sync >++#define strasm_nobarrier "" >++#define strasm_nobarrier_clobber_memory "" >++ >++inline void pre_membar(atomic_memory_order order) { >++ switch (order) { >++ case memory_order_relaxed: >++ case memory_order_acquire: break; >++ case memory_order_release: >++ case memory_order_acq_rel: __asm__ __volatile__ (strasm_lwsync); break; >++ default /*conservative*/ : __asm__ __volatile__ (strasm_sync); break; >++ } >++} >++ >++inline void post_membar(atomic_memory_order order) { >++ switch (order) { >++ case memory_order_relaxed: >++ case memory_order_release: break; >++ case memory_order_acquire: >++ case memory_order_acq_rel: __asm__ __volatile__ (strasm_isync); break; >++ default /*conservative*/ : __asm__ __volatile__ (strasm_sync); break; >++ } >++} >++ >++ >++template<size_t byte_size> >++struct Atomic::PlatformAdd >++ : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > >++{ >++ template<typename I, typename D> >++ D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const; >++}; >++ >++template<> >++template<typename I, typename D> >++inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, >++ atomic_memory_order order) const { >++ STATIC_ASSERT(4 == sizeof(I)); >++ STATIC_ASSERT(4 == sizeof(D)); >++ >++ D result; >++ >++ pre_membar(order); >++ >++ __asm__ __volatile__ ( >++ "1: lwarx %0, 0, %2 \n" >++ " add %0, %0, %1 \n" >++ " stwcx. %0, 0, %2 \n" >++ " bne- 1b \n" >++ : /*%0*/"=&r" (result) >++ : /*%1*/"r" (add_value), /*%2*/"r" (dest) >++ : "cc", "memory" ); >++ >++ post_membar(order); >++ >++ return result; >++} >++ >++ >++template<> >++template<typename I, typename D> >++inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest, >++ atomic_memory_order order) const { >++ STATIC_ASSERT(8 == sizeof(I)); >++ STATIC_ASSERT(8 == sizeof(D)); >++ >++ D result; >++ >++ pre_membar(order); >++ >++ __asm__ __volatile__ ( >++ "1: ldarx %0, 0, %2 \n" >++ " add %0, %0, %1 \n" >++ " stdcx. %0, 0, %2 \n" >++ " bne- 1b \n" >++ : /*%0*/"=&r" (result) >++ : /*%1*/"r" (add_value), /*%2*/"r" (dest) >++ : "cc", "memory" ); >++ >++ post_membar(order); >++ >++ return result; >++} >++ >++template<> >++template<typename T> >++inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, >++ T volatile* dest, >++ atomic_memory_order order) const { >++ // Note that xchg doesn't necessarily do an acquire >++ // (see synchronizer.cpp). >++ >++ T old_value; >++ const uint64_t zero = 0; >++ >++ pre_membar(order); >++ >++ __asm__ __volatile__ ( >++ /* atomic loop */ >++ "1: \n" >++ " lwarx %[old_value], %[dest], %[zero] \n" >++ " stwcx. %[exchange_value], %[dest], %[zero] \n" >++ " bne- 1b \n" >++ /* exit */ >++ "2: \n" >++ /* out */ >++ : [old_value] "=&r" (old_value), >++ "=m" (*dest) >++ /* in */ >++ : [dest] "b" (dest), >++ [zero] "r" (zero), >++ [exchange_value] "r" (exchange_value), >++ "m" (*dest) >++ /* clobber */ >++ : "cc", >++ "memory" >++ ); >++ >++ post_membar(order); >++ >++ return old_value; >++} >++ >++template<> >++template<typename T> >++inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, >++ T volatile* dest, >++ atomic_memory_order order) const { >++ STATIC_ASSERT(8 == sizeof(T)); >++ // Note that xchg doesn't necessarily do an acquire >++ // (see synchronizer.cpp). >++ >++ T old_value; >++ const uint64_t zero = 0; >++ >++ pre_membar(order); >++ >++ __asm__ __volatile__ ( >++ /* atomic loop */ >++ "1: \n" >++ " ldarx %[old_value], %[dest], %[zero] \n" >++ " stdcx. %[exchange_value], %[dest], %[zero] \n" >++ " bne- 1b \n" >++ /* exit */ >++ "2: \n" >++ /* out */ >++ : [old_value] "=&r" (old_value), >++ "=m" (*dest) >++ /* in */ >++ : [dest] "b" (dest), >++ [zero] "r" (zero), >++ [exchange_value] "r" (exchange_value), >++ "m" (*dest) >++ /* clobber */ >++ : "cc", >++ "memory" >++ ); >++ >++ post_membar(order); >++ >++ return old_value; >++} >++ >++template<> >++template<typename T> >++inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value, >++ T volatile* dest, >++ T compare_value, >++ atomic_memory_order order) const { >++ STATIC_ASSERT(1 == sizeof(T)); >++ >++ // Note that cmpxchg guarantees a two-way memory barrier across >++ // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not >++ // specified otherwise (see atomic.hpp). >++ >++ // Using 32 bit internally. >++ volatile int *dest_base = (volatile int*)((uintptr_t)dest & ~3); >++ >++#ifdef VM_LITTLE_ENDIAN >++ const unsigned int shift_amount = ((uintptr_t)dest & 3) * 8; >++#else >++ const unsigned int shift_amount = ((~(uintptr_t)dest) & 3) * 8; >++#endif >++ const unsigned int masked_compare_val = ((unsigned int)(unsigned char)compare_value), >++ masked_exchange_val = ((unsigned int)(unsigned char)exchange_value), >++ xor_value = (masked_compare_val ^ masked_exchange_val) << shift_amount; >++ >++ unsigned int old_value, value32; >++ >++ pre_membar(order); >++ >++ __asm__ __volatile__ ( >++ /* simple guard */ >++ " lbz %[old_value], 0(%[dest]) \n" >++ " cmpw %[masked_compare_val], %[old_value] \n" >++ " bne- 2f \n" >++ /* atomic loop */ >++ "1: \n" >++ " lwarx %[value32], 0, %[dest_base] \n" >++ /* extract byte and compare */ >++ " srd %[old_value], %[value32], %[shift_amount] \n" >++ " clrldi %[old_value], %[old_value], 56 \n" >++ " cmpw %[masked_compare_val], %[old_value] \n" >++ " bne- 2f \n" >++ /* replace byte and try to store */ >++ " xor %[value32], %[xor_value], %[value32] \n" >++ " stwcx. %[value32], 0, %[dest_base] \n" >++ " bne- 1b \n" >++ /* exit */ >++ "2: \n" >++ /* out */ >++ : [old_value] "=&r" (old_value), >++ [value32] "=&r" (value32), >++ "=m" (*dest), >++ "=m" (*dest_base) >++ /* in */ >++ : [dest] "b" (dest), >++ [dest_base] "b" (dest_base), >++ [shift_amount] "r" (shift_amount), >++ [masked_compare_val] "r" (masked_compare_val), >++ [xor_value] "r" (xor_value), >++ "m" (*dest), >++ "m" (*dest_base) >++ /* clobber */ >++ : "cc", >++ "memory" >++ ); >++ >++ post_membar(order); >++ >++ return PrimitiveConversions::cast<T>((unsigned char)old_value); >++} >++ >++template<> >++template<typename T> >++inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, >++ T volatile* dest, >++ T compare_value, >++ atomic_memory_order order) const { >++ STATIC_ASSERT(4 == sizeof(T)); >++ >++ // Note that cmpxchg guarantees a two-way memory barrier across >++ // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not >++ // specified otherwise (see atomic.hpp). >++ >++ T old_value; >++ const uint64_t zero = 0; >++ >++ pre_membar(order); >++ >++ __asm__ __volatile__ ( >++ /* simple guard */ >++ " lwz %[old_value], 0(%[dest]) \n" >++ " cmpw %[compare_value], %[old_value] \n" >++ " bne- 2f \n" >++ /* atomic loop */ >++ "1: \n" >++ " lwarx %[old_value], %[dest], %[zero] \n" >++ " cmpw %[compare_value], %[old_value] \n" >++ " bne- 2f \n" >++ " stwcx. %[exchange_value], %[dest], %[zero] \n" >++ " bne- 1b \n" >++ /* exit */ >++ "2: \n" >++ /* out */ >++ : [old_value] "=&r" (old_value), >++ "=m" (*dest) >++ /* in */ >++ : [dest] "b" (dest), >++ [zero] "r" (zero), >++ [compare_value] "r" (compare_value), >++ [exchange_value] "r" (exchange_value), >++ "m" (*dest) >++ /* clobber */ >++ : "cc", >++ "memory" >++ ); >++ >++ post_membar(order); >++ >++ return old_value; >++} >++ >++template<> >++template<typename T> >++inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, >++ T volatile* dest, >++ T compare_value, >++ atomic_memory_order order) const { >++ STATIC_ASSERT(8 == sizeof(T)); >++ >++ // Note that cmpxchg guarantees a two-way memory barrier across >++ // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not >++ // specified otherwise (see atomic.hpp). >++ >++ T old_value; >++ const uint64_t zero = 0; >++ >++ pre_membar(order); >++ >++ __asm__ __volatile__ ( >++ /* simple guard */ >++ " ld %[old_value], 0(%[dest]) \n" >++ " cmpd %[compare_value], %[old_value] \n" >++ " bne- 2f \n" >++ /* atomic loop */ >++ "1: \n" >++ " ldarx %[old_value], %[dest], %[zero] \n" >++ " cmpd %[compare_value], %[old_value] \n" >++ " bne- 2f \n" >++ " stdcx. %[exchange_value], %[dest], %[zero] \n" >++ " bne- 1b \n" >++ /* exit */ >++ "2: \n" >++ /* out */ >++ : [old_value] "=&r" (old_value), >++ "=m" (*dest) >++ /* in */ >++ : [dest] "b" (dest), >++ [zero] "r" (zero), >++ [compare_value] "r" (compare_value), >++ [exchange_value] "r" (exchange_value), >++ "m" (*dest) >++ /* clobber */ >++ : "cc", >++ "memory" >++ ); >++ >++ post_membar(order); >++ >++ return old_value; >++} >++ >++#undef strasm_sync >++#undef strasm_lwsync >++#undef strasm_isync >++#undef strasm_release >++#undef strasm_acquire >++#undef strasm_fence >++#undef strasm_nobarrier >++#undef strasm_nobarrier_clobber_memory >++ >++#endif // OS_CPU_BSD_PPC_VM_ATOMIC_BSD_PPC_HPP >+--- /dev/null 2019-04-10 22:31:08 UTC >++++ src/hotspot/os_cpu/bsd_ppc/bytes_bsd_ppc.inline.hpp >+@@ -0,0 +1,39 @@ >++/* >++ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. >++ * Copyright 2014 Google Inc. All rights reserved. >++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. >++ * >++ * This code is free software; you can redistribute it and/or modify it >++ * under the terms of the GNU General Public License version 2 only, as >++ * published by the Free Software Foundation. >++ * >++ * This code is distributed in the hope that it will be useful, but WITHOUT >++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or >++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License >++ * version 2 for more details (a copy is included in the LICENSE file that >++ * accompanied this code). >++ * >++ * You should have received a copy of the GNU General Public License version >++ * 2 along with this work; if not, write to the Free Software Foundation, >++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. >++ * >++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA >++ * or visit www.oracle.com if you need additional information or have any >++ * questions. >++ * >++ */ >++ >++#ifndef OS_CPU_BSD_PPC_VM_BYTES_BSD_PPC_INLINE_HPP >++#define OS_CPU_BSD_PPC_VM_BYTES_BSD_PPC_INLINE_HPP >++ >++#if defined(VM_LITTLE_ENDIAN) >++#include <byteswap.h> >++ >++// Efficient swapping of data bytes from Java byte >++// ordering to native byte ordering and vice versa. >++inline u2 Bytes::swap_u2(u2 x) { return bswap_16(x); } >++inline u4 Bytes::swap_u4(u4 x) { return bswap_32(x); } >++inline u8 Bytes::swap_u8(u8 x) { return bswap_64(x); } >++#endif // VM_LITTLE_ENDIAN >++ >++#endif // OS_CPU_BSD_PPC_VM_BYTES_BSD_PPC_INLINE_HPP >+--- /dev/null 2019-04-10 22:31:08 UTC >++++ src/hotspot/os_cpu/bsd_ppc/globals_bsd_ppc.hpp >+@@ -0,0 +1,44 @@ >++/* >++ * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved. >++ * Copyright (c) 2012, 2015 SAP SE. All rights reserved. >++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. >++ * >++ * This code is free software; you can redistribute it and/or modify it >++ * under the terms of the GNU General Public License version 2 only, as >++ * published by the Free Software Foundation. >++ * >++ * This code is distributed in the hope that it will be useful, but WITHOUT >++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or >++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License >++ * version 2 for more details (a copy is included in the LICENSE file that >++ * accompanied this code). >++ * >++ * You should have received a copy of the GNU General Public License version >++ * 2 along with this work; if not, write to the Free Software Foundation, >++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. >++ * >++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA >++ * or visit www.oracle.com if you need additional information or have any >++ * questions. >++ * >++ */ >++ >++#ifndef OS_CPU_BSD_PPC_VM_GLOBALS_BSD_PPC_HPP >++#define OS_CPU_BSD_PPC_VM_GLOBALS_BSD_PPC_HPP >++ >++// Sets the default values for platform dependent flags used by the runtime system. >++// (see globals.hpp) >++ >++define_pd_global(bool, DontYieldALot, false); >++define_pd_global(intx, ThreadStackSize, 2048); // 0 => use system default >++define_pd_global(intx, VMThreadStackSize, 2048); >++ >++define_pd_global(intx, CompilerThreadStackSize, 4096); >++ >++// Allow extra space in DEBUG builds for asserts. >++define_pd_global(size_t, JVMInvokeMethodSlack, 8192); >++ >++// Only used on 64 bit platforms >++define_pd_global(size_t, HeapBaseMinAddress, 2*G); >++ >++#endif // OS_CPU_BSD_PPC_VM_GLOBALS_BSD_PPC_HPP >+--- /dev/null 2019-04-10 22:31:08 UTC >++++ src/hotspot/os_cpu/bsd_ppc/orderAccess_bsd_ppc.hpp >+@@ -0,0 +1,97 @@ >++/* >++ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. >++ * Copyright (c) 2012, 2014 SAP SE. All rights reserved. >++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. >++ * >++ * This code is free software; you can redistribute it and/or modify it >++ * under the terms of the GNU General Public License version 2 only, as >++ * published by the Free Software Foundation. >++ * >++ * This code is distributed in the hope that it will be useful, but WITHOUT >++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or >++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License >++ * version 2 for more details (a copy is included in the LICENSE file that >++ * accompanied this code). >++ * >++ * You should have received a copy of the GNU General Public License version >++ * 2 along with this work; if not, write to the Free Software Foundation, >++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. >++ * >++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA >++ * or visit www.oracle.com if you need additional information or have any >++ * questions. >++ * >++ */ >++ >++#ifndef OS_CPU_BSD_PPC_VM_ORDERACCESS_BSD_PPC_HPP >++#define OS_CPU_BSD_PPC_VM_ORDERACCESS_BSD_PPC_HPP >++ >++// Included in orderAccess.hpp header file. >++ >++#ifndef PPC64 >++#error "OrderAccess currently only implemented for PPC64" >++#endif >++ >++// Compiler version last used for testing: gcc 4.1.2 >++// Please update this information when this file changes >++ >++// Implementation of class OrderAccess. >++ >++// >++// Machine barrier instructions: >++// >++// - sync Two-way memory barrier, aka fence. >++// - lwsync orders Store|Store, >++// Load|Store, >++// Load|Load, >++// but not Store|Load >++// - eieio orders Store|Store >++// - isync Invalidates speculatively executed instructions, >++// but isync may complete before storage accesses >++// associated with instructions preceding isync have >++// been performed. >++// >++// Semantic barrier instructions: >++// (as defined in orderAccess.hpp) >++// >++// - release orders Store|Store, (maps to lwsync) >++// Load|Store >++// - acquire orders Load|Store, (maps to lwsync) >++// Load|Load >++// - fence orders Store|Store, (maps to sync) >++// Load|Store, >++// Load|Load, >++// Store|Load >++// >++ >++#define inlasm_sync() __asm__ __volatile__ ("sync" : : : "memory"); >++#define inlasm_lwsync() __asm__ __volatile__ ("lwsync" : : : "memory"); >++#define inlasm_eieio() __asm__ __volatile__ ("eieio" : : : "memory"); >++#define inlasm_isync() __asm__ __volatile__ ("isync" : : : "memory"); >++// Use twi-isync for load_acquire (faster than lwsync). >++#define inlasm_acquire_reg(X) __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (X) : "memory"); >++ >++inline void OrderAccess::loadload() { inlasm_lwsync(); } >++inline void OrderAccess::storestore() { inlasm_lwsync(); } >++inline void OrderAccess::loadstore() { inlasm_lwsync(); } >++inline void OrderAccess::storeload() { inlasm_sync(); } >++ >++inline void OrderAccess::acquire() { inlasm_lwsync(); } >++inline void OrderAccess::release() { inlasm_lwsync(); } >++inline void OrderAccess::fence() { inlasm_sync(); } >++ >++ >++template<size_t byte_size> >++struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE> >++{ >++ template <typename T> >++ T operator()(const volatile T* p) const { T t = Atomic::load(p); inlasm_acquire_reg(t); return t; } >++}; >++ >++#undef inlasm_sync >++#undef inlasm_lwsync >++#undef inlasm_eieio >++#undef inlasm_isync >++#undef inlasm_acquire_reg >++ >++#endif // OS_CPU_BSD_PPC_VM_ORDERACCESS_BSD_PPC_HPP >+--- /dev/null 2019-04-10 22:31:08 UTC >++++ src/hotspot/os_cpu/bsd_ppc/os_bsd_ppc.cpp >+@@ -0,0 +1,567 @@ >++/* >++ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. >++ * Copyright (c) 2012, 2018 SAP SE. All rights reserved. >++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. >++ * >++ * This code is free software; you can redistribute it and/or modify it >++ * under the terms of the GNU General Public License version 2 only, as >++ * published by the Free Software Foundation. >++ * >++ * This code is distributed in the hope that it will be useful, but WITHOUT >++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or >++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License >++ * version 2 for more details (a copy is included in the LICENSE file that >++ * accompanied this code). >++ * >++ * You should have received a copy of the GNU General Public License version >++ * 2 along with this work; if not, write to the Free Software Foundation, >++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. >++ * >++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA >++ * or visit www.oracle.com if you need additional information or have any >++ * questions. >++ * >++ */ >++ >++// no precompiled headers >++#include "jvm.h" >++#include "asm/assembler.inline.hpp" >++#include "classfile/classLoader.hpp" >++#include "classfile/systemDictionary.hpp" >++#include "classfile/vmSymbols.hpp" >++#include "code/codeCache.hpp" >++#include "code/icBuffer.hpp" >++#include "code/vtableStubs.hpp" >++#include "interpreter/interpreter.hpp" >++#include "memory/allocation.inline.hpp" >++#include "nativeInst_ppc.hpp" >++#include "os_share_bsd.hpp" >++#include "prims/jniFastGetField.hpp" >++#include "prims/jvm_misc.hpp" >++#include "runtime/arguments.hpp" >++#include "runtime/extendedPC.hpp" >++#include "runtime/frame.inline.hpp" >++#include "runtime/interfaceSupport.inline.hpp" >++#include "runtime/java.hpp" >++#include "runtime/javaCalls.hpp" >++#include "runtime/mutexLocker.hpp" >++#include "runtime/osThread.hpp" >++#include "runtime/safepointMechanism.hpp" >++#include "runtime/sharedRuntime.hpp" >++#include "runtime/stubRoutines.hpp" >++#include "runtime/thread.inline.hpp" >++#include "runtime/timer.hpp" >++#include "utilities/debug.hpp" >++#include "utilities/events.hpp" >++#include "utilities/vmError.hpp" >++ >++// put OS-includes here >++# include <sys/types.h> >++# include <sys/mman.h> >++# include <pthread.h> >++# include <signal.h> >++# include <errno.h> >++# include <dlfcn.h> >++# include <stdlib.h> >++# include <stdio.h> >++# include <unistd.h> >++# include <sys/resource.h> >++# include <pthread.h> >++# include <sys/stat.h> >++# include <sys/time.h> >++# include <sys/utsname.h> >++# include <sys/socket.h> >++# include <sys/wait.h> >++# include <pwd.h> >++# include <poll.h> >++# include <ucontext.h> >++ >++ >++address os::current_stack_pointer() { >++ intptr_t* csp; >++ >++ // inline assembly `mr regno(csp), R1_SP': >++ __asm__ __volatile__ ("mr %0, 1":"=r"(csp):); >++ >++ return (address) csp; >++} >++ >++char* os::non_memory_address_word() { >++ // Must never look like an address returned by reserve_memory, >++ // even in its subfields (as defined by the CPU immediate fields, >++ // if the CPU splits constants across multiple instructions). >++ >++ return (char*) -1; >++} >++ >++// Frame information (pc, sp, fp) retrieved via ucontext >++// always looks like a C-frame according to the frame >++// conventions in frame_ppc64.hpp. >++address os::Bsd::ucontext_get_pc(const ucontext_t * uc) { >++ // On powerpc64, ucontext_t is not selfcontained but contains >++ // a pointer to an optional substructure (mcontext_t.regs) containing the volatile >++ // registers - NIP, among others. >++ // This substructure may or may not be there depending where uc came from: >++ // - if uc was handed over as the argument to a sigaction handler, a pointer to the >++ // substructure was provided by the kernel when calling the signal handler, and >++ // regs->nip can be accessed. >++ // - if uc was filled by getcontext(), it is undefined - getcontext() does not fill >++ // it because the volatile registers are not needed to make setcontext() work. >++ // Hopefully it was zero'd out beforehand. >++ guarantee(uc->uc_mcontext.mc_gpr != NULL, "only use ucontext_get_pc in sigaction context"); >++ return (address)uc->uc_mcontext.mc_srr0; >++} >++ >++// modify PC in ucontext. >++// Note: Only use this for an ucontext handed down to a signal handler. See comment >++// in ucontext_get_pc. >++void os::Bsd::ucontext_set_pc(ucontext_t * uc, address pc) { >++ guarantee(uc->uc_mcontext.mc_gpr != NULL, "only use ucontext_set_pc in sigaction context"); >++ uc->uc_mcontext.mc_srr0 = (unsigned long)pc; >++} >++ >++static address ucontext_get_lr(const ucontext_t * uc) { >++ return (address)uc->uc_mcontext.mc_lr; >++} >++ >++intptr_t* os::Bsd::ucontext_get_sp(const ucontext_t * uc) { >++ return (intptr_t*)uc->uc_mcontext.mc_gpr[1/*REG_SP*/]; >++} >++ >++intptr_t* os::Bsd::ucontext_get_fp(const ucontext_t * uc) { >++ return NULL; >++} >++ >++ExtendedPC os::fetch_frame_from_context(const void* ucVoid, >++ intptr_t** ret_sp, intptr_t** ret_fp) { >++ >++ ExtendedPC epc; >++ const ucontext_t* uc = (const ucontext_t*)ucVoid; >++ >++ if (uc != NULL) { >++ epc = ExtendedPC(os::Bsd::ucontext_get_pc(uc)); >++ if (ret_sp) *ret_sp = os::Bsd::ucontext_get_sp(uc); >++ if (ret_fp) *ret_fp = os::Bsd::ucontext_get_fp(uc); >++ } else { >++ // construct empty ExtendedPC for return value checking >++ epc = ExtendedPC(NULL); >++ if (ret_sp) *ret_sp = (intptr_t *)NULL; >++ if (ret_fp) *ret_fp = (intptr_t *)NULL; >++ } >++ >++ return epc; >++} >++ >++frame os::fetch_frame_from_context(const void* ucVoid) { >++ intptr_t* sp; >++ intptr_t* fp; >++ ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp); >++ return frame(sp, epc.pc()); >++} >++ >++bool os::Bsd::get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr) { >++ address pc = (address) os::Bsd::ucontext_get_pc(uc); >++ if (Interpreter::contains(pc)) { >++ // Interpreter performs stack banging after the fixed frame header has >++ // been generated while the compilers perform it before. To maintain >++ // semantic consistency between interpreted and compiled frames, the >++ // method returns the Java sender of the current frame. >++ *fr = os::fetch_frame_from_context(uc); >++ if (!fr->is_first_java_frame()) { >++ assert(fr->safe_for_sender(thread), "Safety check"); >++ *fr = fr->java_sender(); >++ } >++ } else { >++ // More complex code with compiled code. >++ assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above"); >++ CodeBlob* cb = CodeCache::find_blob(pc); >++ if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) { >++ // Not sure where the pc points to, fallback to default >++ // stack overflow handling. In compiled code, we bang before >++ // the frame is complete. >++ return false; >++ } else { >++ intptr_t* sp = os::Bsd::ucontext_get_sp(uc); >++ address lr = ucontext_get_lr(uc); >++ *fr = frame(sp, lr); >++ if (!fr->is_java_frame()) { >++ assert(fr->safe_for_sender(thread), "Safety check"); >++ assert(!fr->is_first_frame(), "Safety check"); >++ *fr = fr->java_sender(); >++ } >++ } >++ } >++ assert(fr->is_java_frame(), "Safety check"); >++ return true; >++} >++ >++frame os::get_sender_for_C_frame(frame* fr) { >++ if (*fr->sp() == 0) { >++ // fr is the last C frame >++ return frame(NULL, NULL); >++ } >++ return frame(fr->sender_sp(), fr->sender_pc()); >++} >++ >++ >++frame os::current_frame() { >++ intptr_t* csp = (intptr_t*) *((intptr_t*) os::current_stack_pointer()); >++ // hack. >++ frame topframe(csp, (address)0x8); >++ // Return sender of sender of current topframe which hopefully >++ // both have pc != NULL. >++ frame tmp = os::get_sender_for_C_frame(&topframe); >++ return os::get_sender_for_C_frame(&tmp); >++} >++ >++// Utility functions >++ >++extern "C" JNIEXPORT int >++JVM_handle_bsd_signal(int sig, >++ siginfo_t* info, >++ void* ucVoid, >++ int abort_if_unrecognized) { >++ ucontext_t* uc = (ucontext_t*) ucVoid; >++ >++ Thread* t = Thread::current_or_null_safe(); >++ >++ SignalHandlerMark shm(t); >++ >++ // Note: it's not uncommon that JNI code uses signal/sigset to install >++ // then restore certain signal handler (e.g. to temporarily block SIGPIPE, >++ // or have a SIGILL handler when detecting CPU type). When that happens, >++ // JVM_handle_bsd_signal() might be invoked with junk info/ucVoid. To >++ // avoid unnecessary crash when libjsig is not preloaded, try handle signals >++ // that do not require siginfo/ucontext first. >++ >++ if (sig == SIGPIPE) { >++ if (os::Bsd::chained_handler(sig, info, ucVoid)) { >++ return true; >++ } else { >++ // Ignoring SIGPIPE - see bugs 4229104 >++ return true; >++ } >++ } >++ >++#ifdef CAN_SHOW_REGISTERS_ON_ASSERT >++ if ((sig == SIGSEGV || sig == SIGBUS) && info != NULL && info->si_addr == g_assert_poison) { >++ handle_assert_poison_fault(ucVoid, info->si_addr); >++ return 1; >++ } >++#endif >++ >++ JavaThread* thread = NULL; >++ VMThread* vmthread = NULL; >++ if (os::Bsd::signal_handlers_are_installed) { >++ if (t != NULL) { >++ if(t->is_Java_thread()) { >++ thread = (JavaThread*)t; >++ } else if(t->is_VM_thread()) { >++ vmthread = (VMThread *)t; >++ } >++ } >++ } >++ >++ // Moved SafeFetch32 handling outside thread!=NULL conditional block to make >++ // it work if no associated JavaThread object exists. >++ if (uc) { >++ address const pc = os::Bsd::ucontext_get_pc(uc); >++ if (pc && StubRoutines::is_safefetch_fault(pc)) { >++ os::Bsd::ucontext_set_pc(uc, StubRoutines::continuation_for_safefetch_fault(pc)); >++ return true; >++ } >++ } >++ >++ // decide if this trap can be handled by a stub >++ address stub = NULL; >++ address pc = NULL; >++ >++ //%note os_trap_1 >++ if (info != NULL && uc != NULL && thread != NULL) { >++ pc = (address) os::Bsd::ucontext_get_pc(uc); >++ >++ // Handle ALL stack overflow variations here >++ if (sig == SIGSEGV) { >++ // Si_addr may not be valid due to a bug in the bsd-ppc64 kernel (see >++ // comment below). Use get_stack_bang_address instead of si_addr. >++ address addr = ((NativeInstruction*)pc)->get_stack_bang_address(uc); >++ >++ // Check if fault address is within thread stack. >++ if (thread->on_local_stack(addr)) { >++ // stack overflow >++ if (thread->in_stack_yellow_reserved_zone(addr)) { >++ if (thread->thread_state() == _thread_in_Java) { >++ if (thread->in_stack_reserved_zone(addr)) { >++ frame fr; >++ if (os::Bsd::get_frame_at_stack_banging_point(thread, uc, &fr)) { >++ assert(fr.is_java_frame(), "Must be a Javac frame"); >++ frame activation = >++ SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr); >++ if (activation.sp() != NULL) { >++ thread->disable_stack_reserved_zone(); >++ if (activation.is_interpreted_frame()) { >++ thread->set_reserved_stack_activation((address)activation.fp()); >++ } else { >++ thread->set_reserved_stack_activation((address)activation.unextended_sp()); >++ } >++ return 1; >++ } >++ } >++ } >++ // Throw a stack overflow exception. >++ // Guard pages will be reenabled while unwinding the stack. >++ thread->disable_stack_yellow_reserved_zone(); >++ stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW); >++ } else { >++ // Thread was in the vm or native code. Return and try to finish. >++ thread->disable_stack_yellow_reserved_zone(); >++ return 1; >++ } >++ } else if (thread->in_stack_red_zone(addr)) { >++ // Fatal red zone violation. Disable the guard pages and fall through >++ // to handle_unexpected_exception way down below. >++ thread->disable_stack_red_zone(); >++ tty->print_raw_cr("An irrecoverable stack overflow has occurred."); >++ >++ // This is a likely cause, but hard to verify. Let's just print >++ // it as a hint. >++ tty->print_raw_cr("Please check if any of your loaded .so files has " >++ "enabled executable stack (see man page execstack(8))"); >++ } >++ } >++ } >++ >++ if (thread->thread_state() == _thread_in_Java) { >++ // Java thread running in Java code => find exception handler if any >++ // a fault inside compiled code, the interpreter, or a stub >++ >++ // A VM-related SIGILL may only occur if we are not in the zero page. >++ // On AIX, we get a SIGILL if we jump to 0x0 or to somewhere else >++ // in the zero page, because it is filled with 0x0. We ignore >++ // explicit SIGILLs in the zero page. >++ if (sig == SIGILL && (pc < (address) 0x200)) { >++ if (TraceTraps) { >++ tty->print_raw_cr("SIGILL happened inside zero page."); >++ } >++ goto report_and_die; >++ } >++ >++ CodeBlob *cb = NULL; >++ // Handle signal from NativeJump::patch_verified_entry(). >++ if (( TrapBasedNotEntrantChecks && sig == SIGTRAP && nativeInstruction_at(pc)->is_sigtrap_zombie_not_entrant()) || >++ (!TrapBasedNotEntrantChecks && sig == SIGILL && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant())) { >++ if (TraceTraps) { >++ tty->print_cr("trap: zombie_not_entrant (%s)", (sig == SIGTRAP) ? "SIGTRAP" : "SIGILL"); >++ } >++ stub = SharedRuntime::get_handle_wrong_method_stub(); >++ } >++ >++ else if (sig == ((SafepointMechanism::uses_thread_local_poll() && USE_POLL_BIT_ONLY) ? SIGTRAP : SIGSEGV) && >++ // A bsd-ppc64 kernel before 2.6.6 doesn't set si_addr on some segfaults >++ // in 64bit mode (cf. http://www.kernel.org/pub/bsd/kernel/v2.6/ChangeLog-2.6.6), >++ // especially when we try to read from the safepoint polling page. So the check >++ // (address)info->si_addr == os::get_standard_polling_page() >++ // doesn't work for us. We use: >++ ((NativeInstruction*)pc)->is_safepoint_poll() && >++ CodeCache::contains((void*) pc) && >++ ((cb = CodeCache::find_blob(pc)) != NULL) && >++ cb->is_compiled()) { >++ if (TraceTraps) { >++ tty->print_cr("trap: safepoint_poll at " INTPTR_FORMAT " (%s)", p2i(pc), >++ (SafepointMechanism::uses_thread_local_poll() && USE_POLL_BIT_ONLY) ? "SIGTRAP" : "SIGSEGV"); >++ } >++ stub = SharedRuntime::get_poll_stub(pc); >++ } >++ >++ // SIGTRAP-based ic miss check in compiled code. >++ else if (sig == SIGTRAP && TrapBasedICMissChecks && >++ nativeInstruction_at(pc)->is_sigtrap_ic_miss_check()) { >++ if (TraceTraps) { >++ tty->print_cr("trap: ic_miss_check at " INTPTR_FORMAT " (SIGTRAP)", p2i(pc)); >++ } >++ stub = SharedRuntime::get_ic_miss_stub(); >++ } >++ >++ // SIGTRAP-based implicit null check in compiled code. >++ else if (sig == SIGTRAP && TrapBasedNullChecks && >++ nativeInstruction_at(pc)->is_sigtrap_null_check()) { >++ if (TraceTraps) { >++ tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGTRAP)", p2i(pc)); >++ } >++ stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); >++ } >++ >++ // SIGSEGV-based implicit null check in compiled code. >++ else if (sig == SIGSEGV && ImplicitNullChecks && >++ CodeCache::contains((void*) pc) && >++ MacroAssembler::uses_implicit_null_check(info->si_addr)) { >++ if (TraceTraps) { >++ tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", p2i(pc)); >++ } >++ stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); >++ } >++ >++#ifdef COMPILER2 >++ // SIGTRAP-based implicit range check in compiled code. >++ else if (sig == SIGTRAP && TrapBasedRangeChecks && >++ nativeInstruction_at(pc)->is_sigtrap_range_check()) { >++ if (TraceTraps) { >++ tty->print_cr("trap: range_check at " INTPTR_FORMAT " (SIGTRAP)", p2i(pc)); >++ } >++ stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); >++ } >++#endif >++ else if (sig == SIGBUS) { >++ // BugId 4454115: A read from a MappedByteBuffer can fault here if the >++ // underlying file has been truncated. Do not crash the VM in such a case. >++ CodeBlob* cb = CodeCache::find_blob_unsafe(pc); >++ CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL; >++ if (nm != NULL && nm->has_unsafe_access()) { >++ address next_pc = pc + 4; >++ next_pc = SharedRuntime::handle_unsafe_access(thread, next_pc); >++ os::Bsd::ucontext_set_pc(uc, next_pc); >++ return true; >++ } >++ } >++ } >++ >++ else { // thread->thread_state() != _thread_in_Java >++ if (sig == SIGILL && VM_Version::is_determine_features_test_running()) { >++ // SIGILL must be caused by VM_Version::determine_features(). >++ *(int *)pc = 0; // patch instruction to 0 to indicate that it causes a SIGILL, >++ // flushing of icache is not necessary. >++ stub = pc + 4; // continue with next instruction. >++ } >++ else if (thread->thread_state() == _thread_in_vm && >++ sig == SIGBUS && thread->doing_unsafe_access()) { >++ address next_pc = pc + 4; >++ next_pc = SharedRuntime::handle_unsafe_access(thread, next_pc); >++ os::Bsd::ucontext_set_pc(uc, pc + 4); >++ return true; >++ } >++ } >++ } >++ >++ if (stub != NULL) { >++ // Save all thread context in case we need to restore it. >++ if (thread != NULL) thread->set_saved_exception_pc(pc); >++ os::Bsd::ucontext_set_pc(uc, stub); >++ return true; >++ } >++ >++ // signal-chaining >++ if (os::Bsd::chained_handler(sig, info, ucVoid)) { >++ return true; >++ } >++ >++ if (!abort_if_unrecognized) { >++ // caller wants another chance, so give it to him >++ return false; >++ } >++ >++ if (pc == NULL && uc != NULL) { >++ pc = os::Bsd::ucontext_get_pc(uc); >++ } >++ >++report_and_die: >++ // unmask current signal >++ sigset_t newset; >++ sigemptyset(&newset); >++ sigaddset(&newset, sig); >++ sigprocmask(SIG_UNBLOCK, &newset, NULL); >++ >++ VMError::report_and_die(t, sig, pc, info, ucVoid); >++ >++ ShouldNotReachHere(); >++ return false; >++} >++ >++void os::Bsd::init_thread_fpu_state(void) { >++ // Disable FP exceptions. >++ __asm__ __volatile__ ("mtfsfi 6,0"); >++} >++ >++//////////////////////////////////////////////////////////////////////////////// >++// thread stack >++ >++// Minimum usable stack sizes required to get to user code. Space for >++// HotSpot guard pages is added later. >++size_t os::Posix::_compiler_thread_min_stack_allowed = 64 * K; >++size_t os::Posix::_java_thread_min_stack_allowed = 64 * K; >++size_t os::Posix::_vm_internal_thread_min_stack_allowed = 64 * K; >++ >++// Return default stack size for thr_type. >++size_t os::Posix::default_stack_size(os::ThreadType thr_type) { >++ // Default stack size (compiler thread needs larger stack). >++ size_t s = (thr_type == os::compiler_thread ? 4 * M : 1024 * K); >++ return s; >++} >++ >++///////////////////////////////////////////////////////////////////////////// >++// helper functions for fatal error handler >++ >++void os::print_context(outputStream *st, const void *context) { >++ if (context == NULL) return; >++ >++ const ucontext_t* uc = (const ucontext_t*)context; >++ >++ st->print_cr("Registers:"); >++ st->print("pc =" INTPTR_FORMAT " ", uc->uc_mcontext.mc_srr0); >++ st->print("lr =" INTPTR_FORMAT " ", uc->uc_mcontext.mc_lr); >++ st->print("ctr=" INTPTR_FORMAT " ", uc->uc_mcontext.mc_ctr); >++ st->cr(); >++ for (int i = 0; i < 32; i++) { >++ st->print("r%-2d=" INTPTR_FORMAT " ", i, uc->uc_mcontext.mc_gpr[i]); >++ if (i % 3 == 2) st->cr(); >++ } >++ st->cr(); >++ st->cr(); >++ >++ intptr_t *sp = (intptr_t *)os::Bsd::ucontext_get_sp(uc); >++ st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", p2i(sp)); >++ print_hex_dump(st, (address)sp, (address)(sp + 128), sizeof(intptr_t)); >++ st->cr(); >++ >++ // Note: it may be unsafe to inspect memory near pc. For example, pc may >++ // point to garbage if entry point in an nmethod is corrupted. Leave >++ // this at the end, and hope for the best. >++ address pc = os::Bsd::ucontext_get_pc(uc); >++ st->print_cr("Instructions: (pc=" PTR_FORMAT ")", p2i(pc)); >++ print_hex_dump(st, pc - 64, pc + 64, /*instrsize=*/4); >++ st->cr(); >++} >++ >++void os::print_register_info(outputStream *st, const void *context) { >++ if (context == NULL) return; >++ >++ const ucontext_t *uc = (const ucontext_t*)context; >++ >++ st->print_cr("Register to memory mapping:"); >++ st->cr(); >++ >++ st->print("pc ="); print_location(st, (intptr_t)uc->uc_mcontext.mc_srr0); >++ st->print("lr ="); print_location(st, (intptr_t)uc->uc_mcontext.mc_lr); >++ st->print("ctr ="); print_location(st, (intptr_t)uc->uc_mcontext.mc_ctr); >++ for (int i = 0; i < 32; i++) { >++ st->print("r%-2d=", i); >++ print_location(st, uc->uc_mcontext.mc_gpr[i]); >++ } >++ st->cr(); >++} >++ >++extern "C" { >++ int SpinPause() { >++ return 0; >++ } >++} >++ >++#ifndef PRODUCT >++void os::verify_stack_alignment() { >++ assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment"); >++} >++#endif >++ >++int os::extra_bang_size_in_bytes() { >++ // PPC does not require the additional stack bang. >++ return 0; >++} >+--- /dev/null 2019-04-10 22:31:08 UTC >++++ src/hotspot/os_cpu/bsd_ppc/os_bsd_ppc.hpp >+@@ -0,0 +1,35 @@ >++/* >++ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. >++ * Copyright (c) 2012, 2013 SAP SE. All rights reserved. >++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. >++ * >++ * This code is free software; you can redistribute it and/or modify it >++ * under the terms of the GNU General Public License version 2 only, as >++ * published by the Free Software Foundation. >++ * >++ * This code is distributed in the hope that it will be useful, but WITHOUT >++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or >++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License >++ * version 2 for more details (a copy is included in the LICENSE file that >++ * accompanied this code). >++ * >++ * You should have received a copy of the GNU General Public License version >++ * 2 along with this work; if not, write to the Free Software Foundation, >++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. >++ * >++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA >++ * or visit www.oracle.com if you need additional information or have any >++ * questions. >++ * >++ */ >++ >++#ifndef OS_CPU_BSD_PPC_VM_OS_BSD_PPC_HPP >++#define OS_CPU_BSD_PPC_VM_OS_BSD_PPC_HPP >++ >++ static void setup_fpu() {} >++ >++ // Used to register dynamic code cache area with the OS >++ // Note: Currently only used in 64 bit Windows implementations >++ static bool register_code_area(char *low, char *high) { return true; } >++ >++#endif // OS_CPU_BSD_PPC_VM_OS_BSD_PPC_HPP >+--- /dev/null 2019-04-10 22:31:08 UTC >++++ src/hotspot/os_cpu/bsd_ppc/prefetch_bsd_ppc.inline.hpp >+@@ -0,0 +1,50 @@ >++/* >++ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. >++ * Copyright (c) 2012, 2013 SAP SE. All rights reserved. >++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. >++ * >++ * This code is free software; you can redistribute it and/or modify it >++ * under the terms of the GNU General Public License version 2 only, as >++ * published by the Free Software Foundation. >++ * >++ * This code is distributed in the hope that it will be useful, but WITHOUT >++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or >++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License >++ * version 2 for more details (a copy is included in the LICENSE file that >++ * accompanied this code). >++ * >++ * You should have received a copy of the GNU General Public License version >++ * 2 along with this work; if not, write to the Free Software Foundation, >++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. >++ * >++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA >++ * or visit www.oracle.com if you need additional information or have any >++ * questions. >++ * >++ */ >++ >++#ifndef OS_CPU_BSD_PPC_VM_PREFETCH_BSD_PPC_INLINE_HPP >++#define OS_CPU_BSD_PPC_VM_PREFETCH_BSD_PPC_INLINE_HPP >++ >++#include "runtime/prefetch.hpp" >++ >++ >++inline void Prefetch::read(void *loc, intx interval) { >++ __asm__ __volatile__ ( >++ " dcbt 0, %0 \n" >++ : >++ : /*%0*/"r" ( ((address)loc) +((long)interval) ) >++ //: >++ ); >++} >++ >++inline void Prefetch::write(void *loc, intx interval) { >++ __asm__ __volatile__ ( >++ " dcbtst 0, %0 \n" >++ : >++ : /*%0*/"r" ( ((address)loc) +((long)interval) ) >++ //: >++ ); >++} >++ >++#endif // OS_CPU_BSD_PPC_VM_PREFETCH_BSD_PPC_INLINE_HPP >+--- /dev/null 2019-04-10 22:31:08 UTC >++++ src/hotspot/os_cpu/bsd_ppc/thread_bsd_ppc.cpp >+@@ -0,0 +1,101 @@ >++/* >++ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. >++ * Copyright (c) 2012, 2014 SAP SE. All rights reserved. >++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. >++ * >++ * This code is free software; you can redistribute it and/or modify it >++ * under the terms of the GNU General Public License version 2 only, as >++ * published by the Free Software Foundation. >++ * >++ * This code is distributed in the hope that it will be useful, but WITHOUT >++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or >++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License >++ * version 2 for more details (a copy is included in the LICENSE file that >++ * accompanied this code). >++ * >++ * You should have received a copy of the GNU General Public License version >++ * 2 along with this work; if not, write to the Free Software Foundation, >++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. >++ * >++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA >++ * or visit www.oracle.com if you need additional information or have any >++ * questions. >++ * >++ */ >++ >++#include "precompiled.hpp" >++#include "runtime/frame.inline.hpp" >++#include "runtime/thread.hpp" >++ >++frame JavaThread::pd_last_frame() { >++ assert(has_last_Java_frame(), "must have last_Java_sp() when suspended"); >++ >++ intptr_t* sp = last_Java_sp(); >++ address pc = _anchor.last_Java_pc(); >++ >++ // Last_Java_pc ist not set, if we come here from compiled code. >++ if (pc == NULL) { >++ pc = (address) *(sp + 2); >++ } >++ >++ return frame(sp, pc); >++} >++ >++bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava) { >++ assert(this->is_Java_thread(), "must be JavaThread"); >++ >++ // If we have a last_Java_frame, then we should use it even if >++ // isInJava == true. It should be more reliable than ucontext info. >++ if (has_last_Java_frame() && frame_anchor()->walkable()) { >++ *fr_addr = pd_last_frame(); >++ return true; >++ } >++ >++ // At this point, we don't have a last_Java_frame, so >++ // we try to glean some information out of the ucontext >++ // if we were running Java code when SIGPROF came in. >++ if (isInJava) { >++ ucontext_t* uc = (ucontext_t*) ucontext; >++ frame ret_frame((intptr_t*)uc->uc_mcontext.mc_gpr[1/*REG_SP*/], >++ (address)uc->uc_mcontext.mc_srr0); >++ >++ if (ret_frame.pc() == NULL) { >++ // ucontext wasn't useful >++ return false; >++ } >++ >++ if (ret_frame.is_interpreted_frame()) { >++ frame::ijava_state* istate = ret_frame.get_ijava_state(); >++ if (!((Method*)(istate->method))->is_metaspace_object()) { >++ return false; >++ } >++ uint64_t reg_bcp = uc->uc_mcontext.mc_gpr[14/*R14_bcp*/]; >++ uint64_t istate_bcp = istate->bcp; >++ uint64_t code_start = (uint64_t)(((Method*)(istate->method))->code_base()); >++ uint64_t code_end = (uint64_t)(((Method*)istate->method)->code_base() + ((Method*)istate->method)->code_size()); >++ if (istate_bcp >= code_start && istate_bcp < code_end) { >++ // we have a valid bcp, don't touch it, do nothing >++ } else if (reg_bcp >= code_start && reg_bcp < code_end) { >++ istate->bcp = reg_bcp; >++ } else { >++ return false; >++ } >++ } >++ if (!ret_frame.safe_for_sender(this)) { >++ // nothing else to try if the frame isn't good >++ return false; >++ } >++ *fr_addr = ret_frame; >++ return true; >++ } >++ // nothing else to try >++ return false; >++} >++ >++// Forte Analyzer AsyncGetCallTrace profiling support is not implemented on Linux/PPC. >++bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava) { >++ assert(this->is_Java_thread(), "must be JavaThread"); >++ return pd_get_top_frame_for_profiling(fr_addr, ucontext, isInJava); >++} >++ >++void JavaThread::cache_global_variables() { } >+--- /dev/null 2019-04-10 22:31:08 UTC >++++ src/hotspot/os_cpu/bsd_ppc/thread_bsd_ppc.hpp >+@@ -0,0 +1,54 @@ >++/* >++ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. >++ * Copyright (c) 2012, 2013 SAP SE. All rights reserved. >++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. >++ * >++ * This code is free software; you can redistribute it and/or modify it >++ * under the terms of the GNU General Public License version 2 only, as >++ * published by the Free Software Foundation. >++ * >++ * This code is distributed in the hope that it will be useful, but WITHOUT >++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or >++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License >++ * version 2 for more details (a copy is included in the LICENSE file that >++ * accompanied this code). >++ * >++ * You should have received a copy of the GNU General Public License version >++ * 2 along with this work; if not, write to the Free Software Foundation, >++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. >++ * >++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA >++ * or visit www.oracle.com if you need additional information or have any >++ * questions. >++ * >++ */ >++ >++#ifndef OS_CPU_BSD_PPC_VM_THREAD_BSD_PPC_HPP >++#define OS_CPU_BSD_PPC_VM_THREAD_BSD_PPC_HPP >++ >++ private: >++ >++ void pd_initialize() { >++ _anchor.clear(); >++ } >++ >++ // The `last' frame is the youngest Java frame on the thread's stack. >++ frame pd_last_frame(); >++ >++ public: >++ >++ void set_base_of_stack_pointer(intptr_t* base_sp) {} >++ intptr_t* base_of_stack_pointer() { return NULL; } >++ void record_base_of_stack_pointer() {} >++ >++ // These routines are only used on cpu architectures that >++ // have separate register stacks (Itanium). >++ static bool register_stack_overflow() { return false; } >++ static void enable_register_stack_guard() {} >++ static void disable_register_stack_guard() {} >++ >++ bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava); >++ >++ bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava); >++ >++#endif // OS_CPU_BSD_PPC_VM_THREAD_BSD_PPC_HPP >+--- /dev/null 2019-04-10 22:31:08 UTC >++++ src/hotspot/os_cpu/bsd_ppc/vmStructs_bsd_ppc.hpp >+@@ -0,0 +1,55 @@ >++/* >++ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. >++ * Copyright (c) 2012, 2013 SAP SE. All rights reserved. >++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. >++ * >++ * This code is free software; you can redistribute it and/or modify it >++ * under the terms of the GNU General Public License version 2 only, as >++ * published by the Free Software Foundation. >++ * >++ * This code is distributed in the hope that it will be useful, but WITHOUT >++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or >++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License >++ * version 2 for more details (a copy is included in the LICENSE file that >++ * accompanied this code). >++ * >++ * You should have received a copy of the GNU General Public License version >++ * 2 along with this work; if not, write to the Free Software Foundation, >++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. >++ * >++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA >++ * or visit www.oracle.com if you need additional information or have any >++ * questions. >++ * >++ */ >++ >++#ifndef OS_CPU_BSD_PPC_VM_VMSTRUCTS_BSD_PPC_HPP >++#define OS_CPU_BSD_PPC_VM_VMSTRUCTS_BSD_PPC_HPP >++ >++// These are the OS and CPU-specific fields, types and integer >++// constants required by the Serviceability Agent. This file is >++// referenced by vmStructs.cpp. >++ >++#define VM_STRUCTS_OS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field) \ >++ \ >++ /******************************/ \ >++ /* Threads (NOTE: incomplete) */ \ >++ /******************************/ \ >++ nonstatic_field(OSThread, _thread_id, pid_t) \ >++ nonstatic_field(OSThread, _pthread_id, pthread_t) >++ >++ >++#define VM_TYPES_OS_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type) \ >++ \ >++ /**********************/ \ >++ /* Posix Thread IDs */ \ >++ /**********************/ \ >++ \ >++ declare_integer_type(pid_t) \ >++ declare_unsigned_integer_type(pthread_t) >++ >++#define VM_INT_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant) >++ >++#define VM_LONG_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant) >++ >++#endif // OS_CPU_BSD_PPC_VM_VMSTRUCTS_BSD_PPC_HPP >+--- src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp >++++ src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp >+@@ -854,240 +854,3 @@ >+ #endif // AMD64 >+ return s; >+ } >+- >+- >+-// Java thread: >+-// >+-// Low memory addresses >+-// +------------------------+ >+-// | |\ Java thread created by VM does not have glibc >+-// | glibc guard page | - guard, attached Java thread usually has >+-// | |/ 1 glibc guard page. >+-// P1 +------------------------+ Thread::stack_base() - Thread::stack_size() >+-// | |\ >+-// | HotSpot Guard Pages | - red, yellow and reserved pages >+-// | |/ >+-// +------------------------+ JavaThread::stack_reserved_zone_base() >+-// | |\ >+-// | Normal Stack | - >+-// | |/ >+-// P2 +------------------------+ Thread::stack_base() >+-// >+-// Non-Java thread: >+-// >+-// Low memory addresses >+-// +------------------------+ >+-// | |\ >+-// | glibc guard page | - usually 1 page >+-// | |/ >+-// P1 +------------------------+ Thread::stack_base() - Thread::stack_size() >+-// | |\ >+-// | Normal Stack | - >+-// | |/ >+-// P2 +------------------------+ Thread::stack_base() >+-// >+-// ** P1 (aka bottom) and size ( P2 = P1 - size) are the address and stack size returned from >+-// pthread_attr_getstack() >+- >+-static void current_stack_region(address * bottom, size_t * size) { >+-#ifdef __APPLE__ >+- pthread_t self = pthread_self(); >+- void *stacktop = pthread_get_stackaddr_np(self); >+- *size = pthread_get_stacksize_np(self); >+- // workaround for OS X 10.9.0 (Mavericks) >+- // pthread_get_stacksize_np returns 128 pages even though the actual size is 2048 pages >+- if (pthread_main_np() == 1) { >+- // At least on Mac OS 10.12 we have observed stack sizes not aligned >+- // to pages boundaries. This can be provoked by e.g. setrlimit() (ulimit -s xxxx in the >+- // shell). Apparently Mac OS actually rounds upwards to next multiple of page size, >+- // however, we round downwards here to be on the safe side. >+- *size = align_down(*size, getpagesize()); >+- >+- if ((*size) < (DEFAULT_MAIN_THREAD_STACK_PAGES * (size_t)getpagesize())) { >+- char kern_osrelease[256]; >+- size_t kern_osrelease_size = sizeof(kern_osrelease); >+- int ret = sysctlbyname("kern.osrelease", kern_osrelease, &kern_osrelease_size, NULL, 0); >+- if (ret == 0) { >+- // get the major number, atoi will ignore the minor amd micro portions of the version string >+- if (atoi(kern_osrelease) >= OS_X_10_9_0_KERNEL_MAJOR_VERSION) { >+- *size = (DEFAULT_MAIN_THREAD_STACK_PAGES*getpagesize()); >+- } >+- } >+- } >+- } >+- *bottom = (address) stacktop - *size; >+-#elif defined(__OpenBSD__) >+- stack_t ss; >+- int rslt = pthread_stackseg_np(pthread_self(), &ss); >+- >+- if (rslt != 0) >+- fatal("pthread_stackseg_np failed with error = %d", rslt); >+- >+- *bottom = (address)((char *)ss.ss_sp - ss.ss_size); >+- *size = ss.ss_size; >+-#else >+- pthread_attr_t attr; >+- >+- int rslt = pthread_attr_init(&attr); >+- >+- // JVM needs to know exact stack location, abort if it fails >+- if (rslt != 0) >+- fatal("pthread_attr_init failed with error = %d", rslt); >+- >+- rslt = pthread_attr_get_np(pthread_self(), &attr); >+- >+- if (rslt != 0) >+- fatal("pthread_attr_get_np failed with error = %d", rslt); >+- >+- if (pthread_attr_getstackaddr(&attr, (void **)bottom) != 0 || >+- pthread_attr_getstacksize(&attr, size) != 0) { >+- fatal("Can not locate current stack attributes!"); >+- } >+- >+- pthread_attr_destroy(&attr); >+-#endif >+- assert(os::current_stack_pointer() >= *bottom && >+- os::current_stack_pointer() < *bottom + *size, "just checking"); >+-} >+- >+-address os::current_stack_base() { >+- address bottom; >+- size_t size; >+- current_stack_region(&bottom, &size); >+- return (bottom + size); >+-} >+- >+-size_t os::current_stack_size() { >+- // stack size includes normal stack and HotSpot guard pages >+- address bottom; >+- size_t size; >+- current_stack_region(&bottom, &size); >+- return size; >+-} >+- >+-///////////////////////////////////////////////////////////////////////////// >+-// helper functions for fatal error handler >+- >+-void os::print_context(outputStream *st, const void *context) { >+- if (context == NULL) return; >+- >+- const ucontext_t *uc = (const ucontext_t*)context; >+- st->print_cr("Registers:"); >+-#ifdef AMD64 >+- st->print( "RAX=" INTPTR_FORMAT, (intptr_t)uc->context_rax); >+- st->print(", RBX=" INTPTR_FORMAT, (intptr_t)uc->context_rbx); >+- st->print(", RCX=" INTPTR_FORMAT, (intptr_t)uc->context_rcx); >+- st->print(", RDX=" INTPTR_FORMAT, (intptr_t)uc->context_rdx); >+- st->cr(); >+- st->print( "RSP=" INTPTR_FORMAT, (intptr_t)uc->context_rsp); >+- st->print(", RBP=" INTPTR_FORMAT, (intptr_t)uc->context_rbp); >+- st->print(", RSI=" INTPTR_FORMAT, (intptr_t)uc->context_rsi); >+- st->print(", RDI=" INTPTR_FORMAT, (intptr_t)uc->context_rdi); >+- st->cr(); >+- st->print( "R8 =" INTPTR_FORMAT, (intptr_t)uc->context_r8); >+- st->print(", R9 =" INTPTR_FORMAT, (intptr_t)uc->context_r9); >+- st->print(", R10=" INTPTR_FORMAT, (intptr_t)uc->context_r10); >+- st->print(", R11=" INTPTR_FORMAT, (intptr_t)uc->context_r11); >+- st->cr(); >+- st->print( "R12=" INTPTR_FORMAT, (intptr_t)uc->context_r12); >+- st->print(", R13=" INTPTR_FORMAT, (intptr_t)uc->context_r13); >+- st->print(", R14=" INTPTR_FORMAT, (intptr_t)uc->context_r14); >+- st->print(", R15=" INTPTR_FORMAT, (intptr_t)uc->context_r15); >+- st->cr(); >+- st->print( "RIP=" INTPTR_FORMAT, (intptr_t)uc->context_rip); >+- st->print(", EFLAGS=" INTPTR_FORMAT, (intptr_t)uc->context_flags); >+- st->print(", ERR=" INTPTR_FORMAT, (intptr_t)uc->context_err); >+- st->cr(); >+- st->print(" TRAPNO=" INTPTR_FORMAT, (intptr_t)uc->context_trapno); >+-#else >+- st->print( "EAX=" INTPTR_FORMAT, (intptr_t)uc->context_eax); >+- st->print(", EBX=" INTPTR_FORMAT, (intptr_t)uc->context_ebx); >+- st->print(", ECX=" INTPTR_FORMAT, (intptr_t)uc->context_ecx); >+- st->print(", EDX=" INTPTR_FORMAT, (intptr_t)uc->context_edx); >+- st->cr(); >+- st->print( "ESP=" INTPTR_FORMAT, (intptr_t)uc->context_esp); >+- st->print(", EBP=" INTPTR_FORMAT, (intptr_t)uc->context_ebp); >+- st->print(", ESI=" INTPTR_FORMAT, (intptr_t)uc->context_esi); >+- st->print(", EDI=" INTPTR_FORMAT, (intptr_t)uc->context_edi); >+- st->cr(); >+- st->print( "EIP=" INTPTR_FORMAT, (intptr_t)uc->context_eip); >+- st->print(", EFLAGS=" INTPTR_FORMAT, (intptr_t)uc->context_eflags); >+-#endif // AMD64 >+- st->cr(); >+- st->cr(); >+- >+- intptr_t *sp = (intptr_t *)os::Bsd::ucontext_get_sp(uc); >+- st->print_cr("Top of Stack: (sp=" INTPTR_FORMAT ")", (intptr_t)sp); >+- print_hex_dump(st, (address)sp, (address)(sp + 8*sizeof(intptr_t)), sizeof(intptr_t)); >+- st->cr(); >+- >+- // Note: it may be unsafe to inspect memory near pc. For example, pc may >+- // point to garbage if entry point in an nmethod is corrupted. Leave >+- // this at the end, and hope for the best. >+- address pc = os::Bsd::ucontext_get_pc(uc); >+- st->print_cr("Instructions: (pc=" INTPTR_FORMAT ")", (intptr_t)pc); >+- print_hex_dump(st, pc - 32, pc + 32, sizeof(char)); >+-} >+- >+-void os::print_register_info(outputStream *st, const void *context) { >+- if (context == NULL) return; >+- >+- const ucontext_t *uc = (const ucontext_t*)context; >+- >+- st->print_cr("Register to memory mapping:"); >+- st->cr(); >+- >+- // this is horrendously verbose but the layout of the registers in the >+- // context does not match how we defined our abstract Register set, so >+- // we can't just iterate through the gregs area >+- >+- // this is only for the "general purpose" registers >+- >+-#ifdef AMD64 >+- st->print("RAX="); print_location(st, uc->context_rax); >+- st->print("RBX="); print_location(st, uc->context_rbx); >+- st->print("RCX="); print_location(st, uc->context_rcx); >+- st->print("RDX="); print_location(st, uc->context_rdx); >+- st->print("RSP="); print_location(st, uc->context_rsp); >+- st->print("RBP="); print_location(st, uc->context_rbp); >+- st->print("RSI="); print_location(st, uc->context_rsi); >+- st->print("RDI="); print_location(st, uc->context_rdi); >+- st->print("R8 ="); print_location(st, uc->context_r8); >+- st->print("R9 ="); print_location(st, uc->context_r9); >+- st->print("R10="); print_location(st, uc->context_r10); >+- st->print("R11="); print_location(st, uc->context_r11); >+- st->print("R12="); print_location(st, uc->context_r12); >+- st->print("R13="); print_location(st, uc->context_r13); >+- st->print("R14="); print_location(st, uc->context_r14); >+- st->print("R15="); print_location(st, uc->context_r15); >+-#else >+- st->print("EAX="); print_location(st, uc->context_eax); >+- st->print("EBX="); print_location(st, uc->context_ebx); >+- st->print("ECX="); print_location(st, uc->context_ecx); >+- st->print("EDX="); print_location(st, uc->context_edx); >+- st->print("ESP="); print_location(st, uc->context_esp); >+- st->print("EBP="); print_location(st, uc->context_ebp); >+- st->print("ESI="); print_location(st, uc->context_esi); >+- st->print("EDI="); print_location(st, uc->context_edi); >+-#endif // AMD64 >+- >+- st->cr(); >+-} >+- >+-void os::setup_fpu() { >+-#ifndef AMD64 >+- address fpu_cntrl = StubRoutines::addr_fpu_cntrl_wrd_std(); >+- __asm__ volatile ( "fldcw (%0)" : >+- : "r" (fpu_cntrl) : "memory"); >+-#endif // !AMD64 >+-} >+- >+-#ifndef PRODUCT >+-void os::verify_stack_alignment() { >+-} >+-#endif >+- >+-int os::extra_bang_size_in_bytes() { >+- // JDK-8050147 requires the full cache line bang for x86. >+- return VM_Version::L1_line_size(); >+-} >+--- src/hotspot/share/runtime/os.cpp.orig >++++ src/hotspot/share/runtime/os.cpp >+@@ -1435,7 +1435,6 @@ >+ X(ENETUNREACH, "Network unreachable") \ >+ X(ENFILE, "Too many files open in system") \ >+ X(ENOBUFS, "No buffer space available") \ >+- X(ENODATA, "No message is available on the STREAM head read queue") \ >+ X(ENODEV, "No such device") \ >+ X(ENOENT, "No such file or directory") \ >+ X(ENOEXEC, "Executable file format error") \ >+@@ -1445,8 +1444,6 @@ >+ X(ENOMSG, "No message of the desired type") \ >+ X(ENOPROTOOPT, "Protocol not available") \ >+ X(ENOSPC, "No space left on device") \ >+- X(ENOSR, "No STREAM resources") \ >+- X(ENOSTR, "Not a STREAM") \ >+ X(ENOSYS, "Function not supported") \ >+ X(ENOTCONN, "The socket is not connected") \ >+ X(ENOTDIR, "Not a directory") \ >+@@ -1466,7 +1463,6 @@ >+ X(EROFS, "Read-only file system") \ >+ X(ESPIPE, "Invalid seek") \ >+ X(ESRCH, "No such process") \ >+- X(ETIME, "Stream ioctl() timeout") \ >+ X(ETIMEDOUT, "Connection timed out") \ >+ X(ETXTBSY, "Text file busy") \ >+ X(EWOULDBLOCK, "Operation would block") \ >+--- src/jdk.hotspot.agent/bsd/native/libsaproc/BsdDebuggerLocal.c.orig >++++ src/jdk.hotspot.agent/bsd/native/libsaproc/BsdDebuggerLocal.c >+@@ -38,6 +38,10 @@ >+ #include "sun_jvm_hotspot_debugger_amd64_AMD64ThreadContext.h" >+ #endif >+ >++#ifdef ppc64 >++#include "sun_jvm_hotspot_debugger_ppc64_PPC64ThreadContext.h" >++#endif >++ >+ #if defined(sparc) || defined(sparcv9) >+ #include "sun_jvm_hotspot_debugger_sparc_SPARCThreadContext.h" >+ #endif >+@@ -303,6 +307,9 @@ >+ #endif >+ #if defined(sparc) || defined(sparcv9) >+ #define NPRGREG sun_jvm_hotspot_debugger_sparc_SPARCThreadContext_NPRGREG >++#endif >++#ifdef ppc64 >++#define NPRGREG sun_jvm_hotspot_debugger_ppc64_PPC64ThreadContext_NPRGREG >+ #endif >+ >+ array = (*env)->NewLongArray(env, NPRGREG); >+@@ -395,6 +402,45 @@ >+ regs[REG_INDEX(R_O6)] = gregs.u_regs[13]; >+ regs[REG_INDEX(R_O7)] = gregs.u_regs[14]; >+ #endif /* sparc */ >++#if defined(ppc64) || defined(ppc64le) >++#define REG_INDEX(reg) sun_jvm_hotspot_debugger_ppc64_PPC64ThreadContext_##reg >++ >++ regs[REG_INDEX(LR)] = gregs.lr; >++ regs[REG_INDEX(PC)] = gregs.pc; >++ regs[REG_INDEX(R0)] = gregs.fixreg[0]; >++ regs[REG_INDEX(R1)] = gregs.fixreg[1]; >++ regs[REG_INDEX(R2)] = gregs.fixreg[2]; >++ regs[REG_INDEX(R3)] = gregs.fixreg[3]; >++ regs[REG_INDEX(R4)] = gregs.fixreg[4]; >++ regs[REG_INDEX(R5)] = gregs.fixreg[5]; >++ regs[REG_INDEX(R6)] = gregs.fixreg[6]; >++ regs[REG_INDEX(R7)] = gregs.fixreg[7]; >++ regs[REG_INDEX(R8)] = gregs.fixreg[8]; >++ regs[REG_INDEX(R9)] = gregs.fixreg[9]; >++ regs[REG_INDEX(R10)] = gregs.fixreg[10]; >++ regs[REG_INDEX(R11)] = gregs.fixreg[11]; >++ regs[REG_INDEX(R12)] = gregs.fixreg[12]; >++ regs[REG_INDEX(R13)] = gregs.fixreg[13]; >++ regs[REG_INDEX(R14)] = gregs.fixreg[14]; >++ regs[REG_INDEX(R15)] = gregs.fixreg[15]; >++ regs[REG_INDEX(R16)] = gregs.fixreg[16]; >++ regs[REG_INDEX(R17)] = gregs.fixreg[17]; >++ regs[REG_INDEX(R18)] = gregs.fixreg[18]; >++ regs[REG_INDEX(R19)] = gregs.fixreg[19]; >++ regs[REG_INDEX(R20)] = gregs.fixreg[20]; >++ regs[REG_INDEX(R21)] = gregs.fixreg[21]; >++ regs[REG_INDEX(R22)] = gregs.fixreg[22]; >++ regs[REG_INDEX(R23)] = gregs.fixreg[23]; >++ regs[REG_INDEX(R24)] = gregs.fixreg[24]; >++ regs[REG_INDEX(R25)] = gregs.fixreg[25]; >++ regs[REG_INDEX(R26)] = gregs.fixreg[26]; >++ regs[REG_INDEX(R27)] = gregs.fixreg[27]; >++ regs[REG_INDEX(R28)] = gregs.fixreg[28]; >++ regs[REG_INDEX(R29)] = gregs.fixreg[29]; >++ regs[REG_INDEX(R30)] = gregs.fixreg[30]; >++ regs[REG_INDEX(R31)] = gregs.fixreg[31]; >++ >++#endif >+ >+ >+ (*env)->ReleaseLongArrayElements(env, array, regs, JNI_COMMIT); >+--- /dev/null 2019-04-10 22:31:08 UTC >++++ src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/bsd/ppc64/BsdPPC64CFrame.java >+@@ -0,0 +1,79 @@ >++/* >++ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. >++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. >++ * >++ * This code is free software; you can redistribute it and/or modify it >++ * under the terms of the GNU General Public License version 2 only, as >++ * published by the Free Software Foundation. >++ * >++ * This code is distributed in the hope that it will be useful, but WITHOUT >++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or >++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License >++ * version 2 for more details (a copy is included in the LICENSE file that >++ * accompanied this code). >++ * >++ * You should have received a copy of the GNU General Public License version >++ * 2 along with this work; if not, write to the Free Software Foundation, >++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. >++ * >++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA >++ * or visit www.oracle.com if you need additional information or have any >++ * questions. >++ * >++ */ >++ >++package sun.jvm.hotspot.debugger.bsd.ppc64; >++ >++import sun.jvm.hotspot.debugger.*; >++import sun.jvm.hotspot.debugger.ppc64.*; >++import sun.jvm.hotspot.debugger.bsd.*; >++import sun.jvm.hotspot.debugger.cdbg.*; >++import sun.jvm.hotspot.debugger.cdbg.basic.*; >++ >++final public class BsdPPC64CFrame extends BasicCFrame { >++ // package/class internals only >++ >++ public BsdPPC64CFrame(BsdDebugger dbg, Address sp, Address pc, int address_size) { >++ super(dbg.getCDebugger()); >++ this.sp = sp; >++ this.pc = pc; >++ this.dbg = dbg; >++ this.address_size = address_size; >++ } >++ >++ // override base class impl to avoid ELF parsing >++ public ClosestSymbol closestSymbolToPC() { >++ // try native lookup in debugger. >++ return dbg.lookup(dbg.getAddressValue(pc())); >++ } >++ >++ public Address pc() { >++ return pc; >++ } >++ >++ public Address localVariableBase() { >++ return sp; >++ } >++ >++ public CFrame sender(ThreadProxy thread) { >++ if (sp == null) { >++ return null; >++ } >++ >++ Address nextSP = sp.getAddressAt(0); >++ if (nextSP == null) { >++ return null; >++ } >++ Address nextPC = sp.getAddressAt(2 * address_size); >++ if (nextPC == null) { >++ return null; >++ } >++ return new BsdPPC64CFrame(dbg, nextSP, nextPC, address_size); >++ } >++ >++ public static int PPC64_STACK_BIAS = 0; >++ private static int address_size; >++ private Address pc; >++ private Address sp; >++ private BsdDebugger dbg; >++} >+--- /dev/null 2019-04-10 22:31:08 UTC >++++ src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/bsd/ppc64/BsdPPC64ThreadContext.java >+@@ -0,0 +1,46 @@ >++/* >++ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. >++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. >++ * >++ * This code is free software; you can redistribute it and/or modify it >++ * under the terms of the GNU General Public License version 2 only, as >++ * published by the Free Software Foundation. >++ * >++ * This code is distributed in the hope that it will be useful, but WITHOUT >++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or >++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License >++ * version 2 for more details (a copy is included in the LICENSE file that >++ * accompanied this code). >++ * >++ * You should have received a copy of the GNU General Public License version >++ * 2 along with this work; if not, write to the Free Software Foundation, >++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. >++ * >++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA >++ * or visit www.oracle.com if you need additional information or have any >++ * questions. >++ * >++ */ >++ >++package sun.jvm.hotspot.debugger.bsd.ppc64; >++ >++import sun.jvm.hotspot.debugger.*; >++import sun.jvm.hotspot.debugger.ppc64.*; >++import sun.jvm.hotspot.debugger.bsd.*; >++ >++public class BsdPPC64ThreadContext extends PPC64ThreadContext { >++ private BsdDebugger debugger; >++ >++ public BsdPPC64ThreadContext(BsdDebugger debugger) { >++ super(); >++ this.debugger = debugger; >++ } >++ >++ public void setRegisterAsAddress(int index, Address value) { >++ setRegister(index, debugger.getAddressValue(value)); >++ } >++ >++ public Address getRegisterAsAddress(int index) { >++ return debugger.newAddress(getRegister(index)); >++ } >++}
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 237370
: 203773