blob: 6b09069e09dd71a03002722f5a285d2f62ae1dbd [file] [log] [blame]
/*
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
// no precompiled headers
#include "jvm.h"
#include "asm/macroAssembler.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "code/nativeInst.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/allocation.inline.hpp"
#include "os_share_linux.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"
#include "runtime/arguments.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/osThread.hpp"
#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/timer.hpp"
#include "signals_posix.hpp"
#include "utilities/debug.hpp"
#include "utilities/events.hpp"
#include "utilities/vmError.hpp"
// put OS-includes here
# include <sys/types.h>
# include <sys/mman.h>
# include <pthread.h>
# include <signal.h>
# include <errno.h>
# include <dlfcn.h>
# include <stdlib.h>
# include <stdio.h>
# include <unistd.h>
# include <sys/resource.h>
# include <pthread.h>
# include <sys/stat.h>
# include <sys/time.h>
# include <sys/utsname.h>
# include <sys/socket.h>
# include <sys/wait.h>
# include <pwd.h>
# include <poll.h>
# include <ucontext.h>
#define REG_FP 29
#define REG_LR 30
NOINLINE address os::current_stack_pointer() {
return (address)__builtin_frame_address(0);
}
char* os::non_memory_address_word() {
// Must never look like an address returned by reserve_memory,
// even in its subfields (as defined by the CPU immediate fields,
// if the CPU splits constants across multiple instructions).
return (char*) 0xffffffffffff;
}
address os::Posix::ucontext_get_pc(const ucontext_t * uc) {
return (address)uc->uc_mcontext.pc;
}
void os::Posix::ucontext_set_pc(ucontext_t * uc, address pc) {
uc->uc_mcontext.pc = (intptr_t)pc;
}
intptr_t* os::Linux::ucontext_get_sp(const ucontext_t * uc) {
return (intptr_t*)uc->uc_mcontext.sp;
}
intptr_t* os::Linux::ucontext_get_fp(const ucontext_t * uc) {
return (intptr_t*)uc->uc_mcontext.regs[REG_FP];
}
address os::fetch_frame_from_context(const void* ucVoid,
intptr_t** ret_sp, intptr_t** ret_fp) {
address epc;
const ucontext_t* uc = (const ucontext_t*)ucVoid;
if (uc != NULL) {
epc = os::Posix::ucontext_get_pc(uc);
if (ret_sp) *ret_sp = os::Linux::ucontext_get_sp(uc);
if (ret_fp) *ret_fp = os::Linux::ucontext_get_fp(uc);
} else {
epc = NULL;
if (ret_sp) *ret_sp = (intptr_t *)NULL;
if (ret_fp) *ret_fp = (intptr_t *)NULL;
}
return epc;
}
frame os::fetch_frame_from_context(const void* ucVoid) {
intptr_t* sp;
intptr_t* fp;
address epc = fetch_frame_from_context(ucVoid, &sp, &fp);
return frame(sp, fp, epc);
}
frame os::fetch_compiled_frame_from_context(const void* ucVoid) {
const ucontext_t* uc = (const ucontext_t*)ucVoid;
// In compiled code, the stack banging is performed before LR
// has been saved in the frame. LR is live, and SP and FP
// belong to the caller.
intptr_t* fp = os::Linux::ucontext_get_fp(uc);
intptr_t* sp = os::Linux::ucontext_get_sp(uc);
address pc = (address)(uc->uc_mcontext.regs[REG_LR]
- NativeInstruction::instruction_size);
return frame(sp, fp, pc);
}
// By default, gcc always saves frame pointer rfp on this stack. This
// may get turned off by -fomit-frame-pointer.
frame os::get_sender_for_C_frame(frame* fr) {
return frame(fr->link(), fr->link(), fr->sender_pc());
}
NOINLINE frame os::current_frame() {
intptr_t *fp = *(intptr_t **)__builtin_frame_address(0);
frame myframe((intptr_t*)os::current_stack_pointer(),
(intptr_t*)fp,
CAST_FROM_FN_PTR(address, os::current_frame));
if (os::is_first_C_frame(&myframe)) {
// stack is not walkable
return frame();
} else {
return os::get_sender_for_C_frame(&myframe);
}
}
bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
ucontext_t* uc, JavaThread* thread) {
/*
NOTE: does not seem to work on linux.
if (info == NULL || info->si_code <= 0 || info->si_code == SI_NOINFO) {
// can't decode this kind of signal
info = NULL;
} else {
assert(sig == info->si_signo, "bad siginfo");
}
*/
// decide if this trap can be handled by a stub
address stub = NULL;
address pc = NULL;
//%note os_trap_1
if (info != NULL && uc != NULL && thread != NULL) {
pc = (address) os::Posix::ucontext_get_pc(uc);
address addr = (address) info->si_addr;
// Make sure the high order byte is sign extended, as it may be masked away by the hardware.
if ((uintptr_t(addr) & (uintptr_t(1) << 55)) != 0) {
addr = address(uintptr_t(addr) | (uintptr_t(0xFF) << 56));
}
// Handle ALL stack overflow variations here
if (sig == SIGSEGV) {
// check if fault address is within thread stack
if (thread->is_in_full_stack(addr)) {
if (os::Posix::handle_stack_overflow(thread, addr, pc, uc, &stub)) {
return true; // continue
}
}
}
if (thread->thread_state() == _thread_in_Java) {
// Java thread running in Java code => find exception handler if any
// a fault inside compiled code, the interpreter, or a stub
// Handle signal from NativeJump::patch_verified_entry().
if ((sig == SIGILL || sig == SIGTRAP)
&& nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) {
if (TraceTraps) {
tty->print_cr("trap: zombie_not_entrant (%s)", (sig == SIGTRAP) ? "SIGTRAP" : "SIGILL");
}
stub = SharedRuntime::get_handle_wrong_method_stub();
} else if (sig == SIGSEGV && SafepointMechanism::is_poll_address((address)info->si_addr)) {
stub = SharedRuntime::get_poll_stub(pc);
} else if (sig == SIGBUS /* && info->si_code == BUS_OBJERR */) {
// BugId 4454115: A read from a MappedByteBuffer can fault
// here if the underlying file has been truncated.
// Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc));
if ((nm != NULL && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
address next_pc = pc + NativeCall::instruction_size;
if (is_unsafe_arraycopy) {
next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
}
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
}
} else if (sig == SIGILL && nativeInstruction_at(pc)->is_stop()) {
// Pull a pointer to the error message out of the instruction
// stream.
const uint64_t *detail_msg_ptr
= (uint64_t*)(pc + NativeInstruction::instruction_size);
const char *detail_msg = (const char *)*detail_msg_ptr;
const char *msg = "stop";
if (TraceTraps) {
tty->print_cr("trap: %s: (SIGILL)", msg);
}
// End life with a fatal error, message and detail message and the context.
// Note: no need to do any post-processing here (e.g. signal chaining)
va_list va_dummy;
VMError::report_and_die(thread, uc, NULL, 0, msg, detail_msg, va_dummy);
va_end(va_dummy);
ShouldNotReachHere();
}
else
if (sig == SIGFPE &&
(info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV)) {
stub =
SharedRuntime::
continuation_for_implicit_exception(thread,
pc,
SharedRuntime::
IMPLICIT_DIVIDE_BY_ZERO);
} else if (sig == SIGSEGV &&
MacroAssembler::uses_implicit_null_check((void*)addr)) {
// Determination of interpreter/vtable stub/compiled code null exception
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
}
} else if ((thread->thread_state() == _thread_in_vm ||
thread->thread_state() == _thread_in_native) &&
sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
thread->doing_unsafe_access()) {
address next_pc = pc + NativeCall::instruction_size;
if (UnsafeCopyMemory::contains_pc(pc)) {
next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
}
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
}
// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in
// and the heap gets shrunk before the field access.
if ((sig == SIGSEGV) || (sig == SIGBUS)) {
address addr = JNI_FastGetField::find_slowcase_pc(pc);
if (addr != (address)-1) {
stub = addr;
}
}
}
if (stub != NULL) {
// save all thread context in case we need to restore it
if (thread != NULL) thread->set_saved_exception_pc(pc);
os::Posix::ucontext_set_pc(uc, stub);
return true;
}
return false; // Mute compiler
}
void os::Linux::init_thread_fpu_state(void) {
}
int os::Linux::get_fpu_control_word(void) {
return 0;
}
void os::Linux::set_fpu_control_word(int fpu_control) {
}
////////////////////////////////////////////////////////////////////////////////
// thread stack
// Minimum usable stack sizes required to get to user code. Space for
// HotSpot guard pages is added later.
size_t os::Posix::_compiler_thread_min_stack_allowed = 72 * K;
size_t os::Posix::_java_thread_min_stack_allowed = 72 * K;
size_t os::Posix::_vm_internal_thread_min_stack_allowed = 72 * K;
// return default stack size for thr_type
size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
// default stack size (compiler thread needs larger stack)
size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
return s;
}
/////////////////////////////////////////////////////////////////////////////
// helper functions for fatal error handler
void os::print_context(outputStream *st, const void *context) {
if (context == NULL) return;
const ucontext_t *uc = (const ucontext_t*)context;
st->print_cr("Registers:");
for (int r = 0; r < 31; r++) {
st->print("R%-2d=", r);
print_location(st, uc->uc_mcontext.regs[r]);
}
st->cr();
intptr_t *sp = (intptr_t *)os::Linux::ucontext_get_sp(uc);
st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", p2i(sp));
print_hex_dump(st, (address)sp, (address)(sp + 8*sizeof(intptr_t)), sizeof(intptr_t));
st->cr();
// Note: it may be unsafe to inspect memory near pc. For example, pc may
// point to garbage if entry point in an nmethod is corrupted. Leave
// this at the end, and hope for the best.
address pc = os::Posix::ucontext_get_pc(uc);
print_instructions(st, pc, 4/*native instruction size*/);
st->cr();
}
void os::print_register_info(outputStream *st, const void *context) {
if (context == NULL) return;
const ucontext_t *uc = (const ucontext_t*)context;
st->print_cr("Register to memory mapping:");
st->cr();
// this is horrendously verbose but the layout of the registers in the
// context does not match how we defined our abstract Register set, so
// we can't just iterate through the gregs area
// this is only for the "general purpose" registers
for (int r = 0; r < 31; r++)
st->print_cr( "R%d=" INTPTR_FORMAT, r, (uintptr_t)uc->uc_mcontext.regs[r]);
st->cr();
}
void os::setup_fpu() {
}
#ifndef PRODUCT
void os::verify_stack_alignment() {
assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment");
}
#endif
int os::extra_bang_size_in_bytes() {
// AArch64 does not require the additional stack bang.
return 0;
}
extern "C" {
int SpinPause() {
using spin_wait_func_ptr_t = void (*)();
spin_wait_func_ptr_t func = CAST_TO_FN_PTR(spin_wait_func_ptr_t, StubRoutines::aarch64::spin_wait());
assert(func != nullptr, "StubRoutines::aarch64::spin_wait must not be null.");
(*func)();
// If StubRoutines::aarch64::spin_wait consists of only a RET,
// SpinPause can be considered as implemented. There will be a sequence
// of instructions for:
// - call of SpinPause
// - load of StubRoutines::aarch64::spin_wait stub pointer
// - indirect call of the stub
// - return from the stub
// - return from SpinPause
// So '1' always is returned.
return 1;
}
void _Copy_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) {
if (from > to) {
const jshort *end = from + count;
while (from < end)
*(to++) = *(from++);
}
else if (from < to) {
const jshort *end = from;
from += count - 1;
to += count - 1;
while (from >= end)
*(to--) = *(from--);
}
}
void _Copy_conjoint_jints_atomic(const jint* from, jint* to, size_t count) {
if (from > to) {
const jint *end = from + count;
while (from < end)
*(to++) = *(from++);
}
else if (from < to) {
const jint *end = from;
from += count - 1;
to += count - 1;
while (from >= end)
*(to--) = *(from--);
}
}
void _Copy_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) {
if (from > to) {
const jlong *end = from + count;
while (from < end)
os::atomic_copy64(from++, to++);
}
else if (from < to) {
const jlong *end = from;
from += count - 1;
to += count - 1;
while (from >= end)
os::atomic_copy64(from--, to--);
}
}
void _Copy_arrayof_conjoint_bytes(const HeapWord* from,
HeapWord* to,
size_t count) {
memmove(to, from, count);
}
void _Copy_arrayof_conjoint_jshorts(const HeapWord* from,
HeapWord* to,
size_t count) {
memmove(to, from, count * 2);
}
void _Copy_arrayof_conjoint_jints(const HeapWord* from,
HeapWord* to,
size_t count) {
memmove(to, from, count * 4);
}
void _Copy_arrayof_conjoint_jlongs(const HeapWord* from,
HeapWord* to,
size_t count) {
memmove(to, from, count * 8);
}
};