blob: 0ae4c3e813157cd3a52eedf9d8884456ee77c340 [file] [log] [blame]
/*
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
#ifdef _WIN64
// Must be at least Windows 2000 or XP to use VectoredExceptions
#define _WIN32_WINNT 0x500
#endif
// do not include precompiled header file
# include "incls/_os_windows.cpp.incl"
#ifdef _DEBUG
#include <crtdbg.h>
#endif
#include <windows.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/timeb.h>
#include <objidl.h>
#include <shlobj.h>
#include <malloc.h>
#include <signal.h>
#include <direct.h>
#include <errno.h>
#include <fcntl.h>
#include <io.h>
#include <process.h> // For _beginthreadex(), _endthreadex()
#include <imagehlp.h> // For os::dll_address_to_function_name
/* for enumerating dll libraries */
#include <tlhelp32.h>
#include <vdmdbg.h>
// for timer info max values which include all bits
#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
// For DLL loading/load error detection
// Values of PE COFF
#define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
#define IMAGE_FILE_SIGNATURE_LENGTH 4
static HANDLE main_process;
static HANDLE main_thread;
static int main_thread_id;
static FILETIME process_creation_time;
static FILETIME process_exit_time;
static FILETIME process_user_time;
static FILETIME process_kernel_time;
#ifdef _WIN64
PVOID topLevelVectoredExceptionHandler = NULL;
#endif
#ifdef _M_IA64
#define __CPU__ ia64
#elif _M_AMD64
#define __CPU__ amd64
#else
#define __CPU__ i486
#endif
// save DLL module handle, used by GetModuleFileName
HINSTANCE vm_lib_handle;
static int getLastErrorString(char *buf, size_t len);
BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
switch (reason) {
case DLL_PROCESS_ATTACH:
vm_lib_handle = hinst;
if(ForceTimeHighResolution)
timeBeginPeriod(1L);
break;
case DLL_PROCESS_DETACH:
if(ForceTimeHighResolution)
timeEndPeriod(1L);
#ifdef _WIN64
if (topLevelVectoredExceptionHandler != NULL) {
RemoveVectoredExceptionHandler(topLevelVectoredExceptionHandler);
topLevelVectoredExceptionHandler = NULL;
}
#endif
break;
default:
break;
}
return true;
}
static inline double fileTimeAsDouble(FILETIME* time) {
const double high = (double) ((unsigned int) ~0);
const double split = 10000000.0;
double result = (time->dwLowDateTime / split) +
time->dwHighDateTime * (high/split);
return result;
}
// Implementation of os
bool os::getenv(const char* name, char* buffer, int len) {
int result = GetEnvironmentVariable(name, buffer, len);
return result > 0 && result < len;
}
// No setuid programs under Windows.
bool os::have_special_privileges() {
return false;
}
// This method is a periodic task to check for misbehaving JNI applications
// under CheckJNI, we can add any periodic checks here.
// For Windows at the moment does nothing
void os::run_periodic_checks() {
return;
}
#ifndef _WIN64
LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
#endif
void os::init_system_properties_values() {
/* sysclasspath, java_home, dll_dir */
{
char *home_path;
char *dll_path;
char *pslash;
char *bin = "\\bin";
char home_dir[MAX_PATH];
if (!getenv("_ALT_JAVA_HOME_DIR", home_dir, MAX_PATH)) {
os::jvm_path(home_dir, sizeof(home_dir));
// Found the full path to jvm[_g].dll.
// Now cut the path to <java_home>/jre if we can.
*(strrchr(home_dir, '\\')) = '\0'; /* get rid of \jvm.dll */
pslash = strrchr(home_dir, '\\');
if (pslash != NULL) {
*pslash = '\0'; /* get rid of \{client|server} */
pslash = strrchr(home_dir, '\\');
if (pslash != NULL)
*pslash = '\0'; /* get rid of \bin */
}
}
home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1);
if (home_path == NULL)
return;
strcpy(home_path, home_dir);
Arguments::set_java_home(home_path);
dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1);
if (dll_path == NULL)
return;
strcpy(dll_path, home_dir);
strcat(dll_path, bin);
Arguments::set_dll_dir(dll_path);
if (!set_boot_path('\\', ';'))
return;
}
/* library_path */
#define EXT_DIR "\\lib\\ext"
#define BIN_DIR "\\bin"
#define PACKAGE_DIR "\\Sun\\Java"
{
/* Win32 library search order (See the documentation for LoadLibrary):
*
* 1. The directory from which application is loaded.
* 2. The current directory
* 3. The system wide Java Extensions directory (Java only)
* 4. System directory (GetSystemDirectory)
* 5. Windows directory (GetWindowsDirectory)
* 6. The PATH environment variable
*/
char *library_path;
char tmp[MAX_PATH];
char *path_str = ::getenv("PATH");
library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10);
library_path[0] = '\0';
GetModuleFileName(NULL, tmp, sizeof(tmp));
*(strrchr(tmp, '\\')) = '\0';
strcat(library_path, tmp);
strcat(library_path, ";.");
GetWindowsDirectory(tmp, sizeof(tmp));
strcat(library_path, ";");
strcat(library_path, tmp);
strcat(library_path, PACKAGE_DIR BIN_DIR);
GetSystemDirectory(tmp, sizeof(tmp));
strcat(library_path, ";");
strcat(library_path, tmp);
GetWindowsDirectory(tmp, sizeof(tmp));
strcat(library_path, ";");
strcat(library_path, tmp);
if (path_str) {
strcat(library_path, ";");
strcat(library_path, path_str);
}
Arguments::set_library_path(library_path);
FREE_C_HEAP_ARRAY(char, library_path);
}
/* Default extensions directory */
{
char path[MAX_PATH];
char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
GetWindowsDirectory(path, MAX_PATH);
sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
path, PACKAGE_DIR, EXT_DIR);
Arguments::set_ext_dirs(buf);
}
#undef EXT_DIR
#undef BIN_DIR
#undef PACKAGE_DIR
/* Default endorsed standards directory. */
{
#define ENDORSED_DIR "\\lib\\endorsed"
size_t len = strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR);
char * buf = NEW_C_HEAP_ARRAY(char, len);
sprintf(buf, "%s%s", Arguments::get_java_home(), ENDORSED_DIR);
Arguments::set_endorsed_dirs(buf);
#undef ENDORSED_DIR
}
#ifndef _WIN64
SetUnhandledExceptionFilter(Handle_FLT_Exception);
#endif
// Done
return;
}
void os::breakpoint() {
DebugBreak();
}
// Invoked from the BREAKPOINT Macro
extern "C" void breakpoint() {
os::breakpoint();
}
// Returns an estimate of the current stack pointer. Result must be guaranteed
// to point into the calling threads stack, and be no lower than the current
// stack pointer.
address os::current_stack_pointer() {
int dummy;
address sp = (address)&dummy;
return sp;
}
// os::current_stack_base()
//
// Returns the base of the stack, which is the stack's
// starting address. This function must be called
// while running on the stack of the thread being queried.
address os::current_stack_base() {
MEMORY_BASIC_INFORMATION minfo;
address stack_bottom;
size_t stack_size;
VirtualQuery(&minfo, &minfo, sizeof(minfo));
stack_bottom = (address)minfo.AllocationBase;
stack_size = minfo.RegionSize;
// Add up the sizes of all the regions with the same
// AllocationBase.
while( 1 )
{
VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
if ( stack_bottom == (address)minfo.AllocationBase )
stack_size += minfo.RegionSize;
else
break;
}
#ifdef _M_IA64
// IA64 has memory and register stacks
stack_size = stack_size / 2;
#endif
return stack_bottom + stack_size;
}
size_t os::current_stack_size() {
size_t sz;
MEMORY_BASIC_INFORMATION minfo;
VirtualQuery(&minfo, &minfo, sizeof(minfo));
sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
return sz;
}
struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
const struct tm* time_struct_ptr = localtime(clock);
if (time_struct_ptr != NULL) {
*res = *time_struct_ptr;
return res;
}
return NULL;
}
LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
// Thread start routine for all new Java threads
static unsigned __stdcall java_start(Thread* thread) {
// Try to randomize the cache line index of hot stack frames.
// This helps when threads of the same stack traces evict each other's
// cache lines. The threads can be either from the same JVM instance, or
// from different JVM instances. The benefit is especially true for
// processors with hyperthreading technology.
static int counter = 0;
int pid = os::current_process_id();
_alloca(((pid ^ counter++) & 7) * 128);
OSThread* osthr = thread->osthread();
assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
if (UseNUMA) {
int lgrp_id = os::numa_get_group_id();
if (lgrp_id != -1) {
thread->set_lgrp_id(lgrp_id);
}
}
if (UseVectoredExceptions) {
// If we are using vectored exception we don't need to set a SEH
thread->run();
}
else {
// Install a win32 structured exception handler around every thread created
// by VM, so VM can genrate error dump when an exception occurred in non-
// Java thread (e.g. VM thread).
__try {
thread->run();
} __except(topLevelExceptionFilter(
(_EXCEPTION_POINTERS*)_exception_info())) {
// Nothing to do.
}
}
// One less thread is executing
// When the VMThread gets here, the main thread may have already exited
// which frees the CodeHeap containing the Atomic::add code
if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count);
}
return 0;
}
static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle, int thread_id) {
// Allocate the OSThread object
OSThread* osthread = new OSThread(NULL, NULL);
if (osthread == NULL) return NULL;
// Initialize support for Java interrupts
HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
if (interrupt_event == NULL) {
delete osthread;
return NULL;
}
osthread->set_interrupt_event(interrupt_event);
// Store info on the Win32 thread into the OSThread
osthread->set_thread_handle(thread_handle);
osthread->set_thread_id(thread_id);
if (UseNUMA) {
int lgrp_id = os::numa_get_group_id();
if (lgrp_id != -1) {
thread->set_lgrp_id(lgrp_id);
}
}
// Initial thread state is INITIALIZED, not SUSPENDED
osthread->set_state(INITIALIZED);
return osthread;
}
bool os::create_attached_thread(JavaThread* thread) {
#ifdef ASSERT
thread->verify_not_published();
#endif
HANDLE thread_h;
if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
&thread_h, THREAD_ALL_ACCESS, false, 0)) {
fatal("DuplicateHandle failed\n");
}
OSThread* osthread = create_os_thread(thread, thread_h,
(int)current_thread_id());
if (osthread == NULL) {
return false;
}
// Initial thread state is RUNNABLE
osthread->set_state(RUNNABLE);
thread->set_osthread(osthread);
return true;
}
bool os::create_main_thread(JavaThread* thread) {
#ifdef ASSERT
thread->verify_not_published();
#endif
if (_starting_thread == NULL) {
_starting_thread = create_os_thread(thread, main_thread, main_thread_id);
if (_starting_thread == NULL) {
return false;
}
}
// The primordial thread is runnable from the start)
_starting_thread->set_state(RUNNABLE);
thread->set_osthread(_starting_thread);
return true;
}
// Allocate and initialize a new OSThread
bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
unsigned thread_id;
// Allocate the OSThread object
OSThread* osthread = new OSThread(NULL, NULL);
if (osthread == NULL) {
return false;
}
// Initialize support for Java interrupts
HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
if (interrupt_event == NULL) {
delete osthread;
return NULL;
}
osthread->set_interrupt_event(interrupt_event);
osthread->set_interrupted(false);
thread->set_osthread(osthread);
if (stack_size == 0) {
switch (thr_type) {
case os::java_thread:
// Java threads use ThreadStackSize which default value can be changed with the flag -Xss
if (JavaThread::stack_size_at_create() > 0)
stack_size = JavaThread::stack_size_at_create();
break;
case os::compiler_thread:
if (CompilerThreadStackSize > 0) {
stack_size = (size_t)(CompilerThreadStackSize * K);
break;
} // else fall through:
// use VMThreadStackSize if CompilerThreadStackSize is not defined
case os::vm_thread:
case os::pgc_thread:
case os::cgc_thread:
case os::watcher_thread:
if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
break;
}
}
// Create the Win32 thread
//
// Contrary to what MSDN document says, "stack_size" in _beginthreadex()
// does not specify stack size. Instead, it specifies the size of
// initially committed space. The stack size is determined by
// PE header in the executable. If the committed "stack_size" is larger
// than default value in the PE header, the stack is rounded up to the
// nearest multiple of 1MB. For example if the launcher has default
// stack size of 320k, specifying any size less than 320k does not
// affect the actual stack size at all, it only affects the initial
// commitment. On the other hand, specifying 'stack_size' larger than
// default value may cause significant increase in memory usage, because
// not only the stack space will be rounded up to MB, but also the
// entire space is committed upfront.
//
// Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
// for CreateThread() that can treat 'stack_size' as stack size. However we
// are not supposed to call CreateThread() directly according to MSDN
// document because JVM uses C runtime library. The good news is that the
// flag appears to work with _beginthredex() as well.
#ifndef STACK_SIZE_PARAM_IS_A_RESERVATION
#define STACK_SIZE_PARAM_IS_A_RESERVATION (0x10000)
#endif
HANDLE thread_handle =
(HANDLE)_beginthreadex(NULL,
(unsigned)stack_size,
(unsigned (__stdcall *)(void*)) java_start,
thread,
CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION,
&thread_id);
if (thread_handle == NULL) {
// perhaps STACK_SIZE_PARAM_IS_A_RESERVATION is not supported, try again
// without the flag.
thread_handle =
(HANDLE)_beginthreadex(NULL,
(unsigned)stack_size,
(unsigned (__stdcall *)(void*)) java_start,
thread,
CREATE_SUSPENDED,
&thread_id);
}
if (thread_handle == NULL) {
// Need to clean up stuff we've allocated so far
CloseHandle(osthread->interrupt_event());
thread->set_osthread(NULL);
delete osthread;
return NULL;
}
Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count);
// Store info on the Win32 thread into the OSThread
osthread->set_thread_handle(thread_handle);
osthread->set_thread_id(thread_id);
// Initial thread state is INITIALIZED, not SUSPENDED
osthread->set_state(INITIALIZED);
// The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
return true;
}
// Free Win32 resources related to the OSThread
void os::free_thread(OSThread* osthread) {
assert(osthread != NULL, "osthread not set");
CloseHandle(osthread->thread_handle());
CloseHandle(osthread->interrupt_event());
delete osthread;
}
static int has_performance_count = 0;
static jlong first_filetime;
static jlong initial_performance_count;
static jlong performance_frequency;
jlong as_long(LARGE_INTEGER x) {
jlong result = 0; // initialization to avoid warning
set_high(&result, x.HighPart);
set_low(&result, x.LowPart);
return result;
}
jlong os::elapsed_counter() {
LARGE_INTEGER count;
if (has_performance_count) {
QueryPerformanceCounter(&count);
return as_long(count) - initial_performance_count;
} else {
FILETIME wt;
GetSystemTimeAsFileTime(&wt);
return (jlong_from(wt.dwHighDateTime, wt.dwLowDateTime) - first_filetime);
}
}
jlong os::elapsed_frequency() {
if (has_performance_count) {
return performance_frequency;
} else {
// the FILETIME time is the number of 100-nanosecond intervals since January 1,1601.
return 10000000;
}
}
julong os::available_memory() {
return win32::available_memory();
}
julong os::win32::available_memory() {
// FIXME: GlobalMemoryStatus() may return incorrect value if total memory
// is larger than 4GB
MEMORYSTATUS ms;
GlobalMemoryStatus(&ms);
return (julong)ms.dwAvailPhys;
}
julong os::physical_memory() {
return win32::physical_memory();
}
julong os::allocatable_physical_memory(julong size) {
#ifdef _LP64
return size;
#else
// Limit to 1400m because of the 2gb address space wall
return MIN2(size, (julong)1400*M);
#endif
}
// VC6 lacks DWORD_PTR
#if _MSC_VER < 1300
typedef UINT_PTR DWORD_PTR;
#endif
int os::active_processor_count() {
DWORD_PTR lpProcessAffinityMask = 0;
DWORD_PTR lpSystemAffinityMask = 0;
int proc_count = processor_count();
if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
// Nof active processors is number of bits in process affinity mask
int bitcount = 0;
while (lpProcessAffinityMask != 0) {
lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
bitcount++;
}
return bitcount;
} else {
return proc_count;
}
}
bool os::distribute_processes(uint length, uint* distribution) {
// Not yet implemented.
return false;
}
bool os::bind_to_processor(uint processor_id) {
// Not yet implemented.
return false;
}
static void initialize_performance_counter() {
LARGE_INTEGER count;
if (QueryPerformanceFrequency(&count)) {
has_performance_count = 1;
performance_frequency = as_long(count);
QueryPerformanceCounter(&count);
initial_performance_count = as_long(count);
} else {
has_performance_count = 0;
FILETIME wt;
GetSystemTimeAsFileTime(&wt);
first_filetime = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
}
}
double os::elapsedTime() {
return (double) elapsed_counter() / (double) elapsed_frequency();
}
// Windows format:
// The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
// Java format:
// Java standards require the number of milliseconds since 1/1/1970
// Constant offset - calculated using offset()
static jlong _offset = 116444736000000000;
// Fake time counter for reproducible results when debugging
static jlong fake_time = 0;
#ifdef ASSERT
// Just to be safe, recalculate the offset in debug mode
static jlong _calculated_offset = 0;
static int _has_calculated_offset = 0;
jlong offset() {
if (_has_calculated_offset) return _calculated_offset;
SYSTEMTIME java_origin;
java_origin.wYear = 1970;
java_origin.wMonth = 1;
java_origin.wDayOfWeek = 0; // ignored
java_origin.wDay = 1;
java_origin.wHour = 0;
java_origin.wMinute = 0;
java_origin.wSecond = 0;
java_origin.wMilliseconds = 0;
FILETIME jot;
if (!SystemTimeToFileTime(&java_origin, &jot)) {
fatal1("Error = %d\nWindows error", GetLastError());
}
_calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
_has_calculated_offset = 1;
assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
return _calculated_offset;
}
#else
jlong offset() {
return _offset;
}
#endif
jlong windows_to_java_time(FILETIME wt) {
jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
return (a - offset()) / 10000;
}
FILETIME java_to_windows_time(jlong l) {
jlong a = (l * 10000) + offset();
FILETIME result;
result.dwHighDateTime = high(a);
result.dwLowDateTime = low(a);
return result;
}
// For now, we say that Windows does not support vtime. I have no idea
// whether it can actually be made to (DLD, 9/13/05).
bool os::supports_vtime() { return false; }
bool os::enable_vtime() { return false; }
bool os::vtime_enabled() { return false; }
double os::elapsedVTime() {
// better than nothing, but not much
return elapsedTime();
}
jlong os::javaTimeMillis() {
if (UseFakeTimers) {
return fake_time++;
} else {
FILETIME wt;
GetSystemTimeAsFileTime(&wt);
return windows_to_java_time(wt);
}
}
#define NANOS_PER_SEC CONST64(1000000000)
#define NANOS_PER_MILLISEC 1000000
jlong os::javaTimeNanos() {
if (!has_performance_count) {
return javaTimeMillis() * NANOS_PER_MILLISEC; // the best we can do.
} else {
LARGE_INTEGER current_count;
QueryPerformanceCounter(&current_count);
double current = as_long(current_count);
double freq = performance_frequency;
jlong time = (jlong)((current/freq) * NANOS_PER_SEC);
return time;
}
}
void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
if (!has_performance_count) {
// javaTimeMillis() doesn't have much percision,
// but it is not going to wrap -- so all 64 bits
info_ptr->max_value = ALL_64_BITS;
// this is a wall clock timer, so may skip
info_ptr->may_skip_backward = true;
info_ptr->may_skip_forward = true;
} else {
jlong freq = performance_frequency;
if (freq < NANOS_PER_SEC) {
// the performance counter is 64 bits and we will
// be multiplying it -- so no wrap in 64 bits
info_ptr->max_value = ALL_64_BITS;
} else if (freq > NANOS_PER_SEC) {
// use the max value the counter can reach to
// determine the max value which could be returned
julong max_counter = (julong)ALL_64_BITS;
info_ptr->max_value = (jlong)(max_counter / (freq / NANOS_PER_SEC));
} else {
// the performance counter is 64 bits and we will
// be using it directly -- so no wrap in 64 bits
info_ptr->max_value = ALL_64_BITS;
}
// using a counter, so no skipping
info_ptr->may_skip_backward = false;
info_ptr->may_skip_forward = false;
}
info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time
}
char* os::local_time_string(char *buf, size_t buflen) {
SYSTEMTIME st;
GetLocalTime(&st);
jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
return buf;
}
bool os::getTimesSecs(double* process_real_time,
double* process_user_time,
double* process_system_time) {
HANDLE h_process = GetCurrentProcess();
FILETIME create_time, exit_time, kernel_time, user_time;
BOOL result = GetProcessTimes(h_process,
&create_time,
&exit_time,
&kernel_time,
&user_time);
if (result != 0) {
FILETIME wt;
GetSystemTimeAsFileTime(&wt);
jlong rtc_millis = windows_to_java_time(wt);
jlong user_millis = windows_to_java_time(user_time);
jlong system_millis = windows_to_java_time(kernel_time);
*process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
*process_user_time = ((double) user_millis) / ((double) MILLIUNITS);
*process_system_time = ((double) system_millis) / ((double) MILLIUNITS);
return true;
} else {
return false;
}
}
void os::shutdown() {
// allow PerfMemory to attempt cleanup of any persistent resources
perfMemory_exit();
// flush buffered output, finish log files
ostream_abort();
// Check for abort hook
abort_hook_t abort_hook = Arguments::abort_hook();
if (abort_hook != NULL) {
abort_hook();
}
}
void os::abort(bool dump_core)
{
os::shutdown();
// no core dump on Windows
::exit(1);
}
// Die immediately, no exit hook, no abort hook, no cleanup.
void os::die() {
_exit(-1);
}
// Directory routines copied from src/win32/native/java/io/dirent_md.c
// * dirent_md.c 1.15 00/02/02
//
// The declarations for DIR and struct dirent are in jvm_win32.h.
/* Caller must have already run dirname through JVM_NativePath, which removes
duplicate slashes and converts all instances of '/' into '\\'. */
DIR *
os::opendir(const char *dirname)
{
assert(dirname != NULL, "just checking"); // hotspot change
DIR *dirp = (DIR *)malloc(sizeof(DIR));
DWORD fattr; // hotspot change
char alt_dirname[4] = { 0, 0, 0, 0 };
if (dirp == 0) {
errno = ENOMEM;
return 0;
}
/*
* Win32 accepts "\" in its POSIX stat(), but refuses to treat it
* as a directory in FindFirstFile(). We detect this case here and
* prepend the current drive name.
*/
if (dirname[1] == '\0' && dirname[0] == '\\') {
alt_dirname[0] = _getdrive() + 'A' - 1;
alt_dirname[1] = ':';
alt_dirname[2] = '\\';
alt_dirname[3] = '\0';
dirname = alt_dirname;
}
dirp->path = (char *)malloc(strlen(dirname) + 5);
if (dirp->path == 0) {
free(dirp);
errno = ENOMEM;
return 0;
}
strcpy(dirp->path, dirname);
fattr = GetFileAttributes(dirp->path);
if (fattr == 0xffffffff) {
free(dirp->path);
free(dirp);
errno = ENOENT;
return 0;
} else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
free(dirp->path);
free(dirp);
errno = ENOTDIR;
return 0;
}
/* Append "*.*", or possibly "\\*.*", to path */
if (dirp->path[1] == ':'
&& (dirp->path[2] == '\0'
|| (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
/* No '\\' needed for cases like "Z:" or "Z:\" */
strcat(dirp->path, "*.*");
} else {
strcat(dirp->path, "\\*.*");
}
dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
if (dirp->handle == INVALID_HANDLE_VALUE) {
if (GetLastError() != ERROR_FILE_NOT_FOUND) {
free(dirp->path);
free(dirp);
errno = EACCES;
return 0;
}
}
return dirp;
}
/* parameter dbuf unused on Windows */
struct dirent *
os::readdir(DIR *dirp, dirent *dbuf)
{
assert(dirp != NULL, "just checking"); // hotspot change
if (dirp->handle == INVALID_HANDLE_VALUE) {
return 0;
}
strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
if (!FindNextFile(dirp->handle, &dirp->find_data)) {
if (GetLastError() == ERROR_INVALID_HANDLE) {
errno = EBADF;
return 0;
}
FindClose(dirp->handle);
dirp->handle = INVALID_HANDLE_VALUE;
}
return &dirp->dirent;
}
int
os::closedir(DIR *dirp)
{
assert(dirp != NULL, "just checking"); // hotspot change
if (dirp->handle != INVALID_HANDLE_VALUE) {
if (!FindClose(dirp->handle)) {
errno = EBADF;
return -1;
}
dirp->handle = INVALID_HANDLE_VALUE;
}
free(dirp->path);
free(dirp);
return 0;
}
const char* os::dll_file_extension() { return ".dll"; }
const char * os::get_temp_directory()
{
static char path_buf[MAX_PATH];
if (GetTempPath(MAX_PATH, path_buf)>0)
return path_buf;
else{
path_buf[0]='\0';
return path_buf;
}
}
static bool file_exists(const char* filename) {
if (filename == NULL || strlen(filename) == 0) {
return false;
}
return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES;
}
void os::dll_build_name(char *buffer, size_t buflen,
const char* pname, const char* fname) {
// Copied from libhpi
const size_t pnamelen = pname ? strlen(pname) : 0;
const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0;
// Quietly truncates on buffer overflow. Should be an error.
if (pnamelen + strlen(fname) + 10 > buflen) {
*buffer = '\0';
return;
}
if (pnamelen == 0) {
jio_snprintf(buffer, buflen, "%s.dll", fname);
} else if (c == ':' || c == '\\') {
jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname);
} else if (strchr(pname, *os::path_separator()) != NULL) {
int n;
char** pelements = split_path(pname, &n);
for (int i = 0 ; i < n ; i++) {
char* path = pelements[i];
// Really shouldn't be NULL, but check can't hurt
size_t plen = (path == NULL) ? 0 : strlen(path);
if (plen == 0) {
continue; // skip the empty path values
}
const char lastchar = path[plen - 1];
if (lastchar == ':' || lastchar == '\\') {
jio_snprintf(buffer, buflen, "%s%s.dll", path, fname);
} else {
jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname);
}
if (file_exists(buffer)) {
break;
}
}
// release the storage
for (int i = 0 ; i < n ; i++) {
if (pelements[i] != NULL) {
FREE_C_HEAP_ARRAY(char, pelements[i]);
}
}
if (pelements != NULL) {
FREE_C_HEAP_ARRAY(char*, pelements);
}
} else {
jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname);
}
}
// Needs to be in os specific directory because windows requires another
// header file <direct.h>
const char* os::get_current_directory(char *buf, int buflen) {
return _getcwd(buf, buflen);
}
//-----------------------------------------------------------
// Helper functions for fatal error handler
// The following library functions are resolved dynamically at runtime:
// PSAPI functions, for Windows NT, 2000, XP
// psapi.h doesn't come with Visual Studio 6; it can be downloaded as Platform
// SDK from Microsoft. Here are the definitions copied from psapi.h
typedef struct _MODULEINFO {
LPVOID lpBaseOfDll;
DWORD SizeOfImage;
LPVOID EntryPoint;
} MODULEINFO, *LPMODULEINFO;
static BOOL (WINAPI *_EnumProcessModules) ( HANDLE, HMODULE *, DWORD, LPDWORD );
static DWORD (WINAPI *_GetModuleFileNameEx) ( HANDLE, HMODULE, LPTSTR, DWORD );
static BOOL (WINAPI *_GetModuleInformation)( HANDLE, HMODULE, LPMODULEINFO, DWORD );
// ToolHelp Functions, for Windows 95, 98 and ME
static HANDLE(WINAPI *_CreateToolhelp32Snapshot)(DWORD,DWORD) ;
static BOOL (WINAPI *_Module32First) (HANDLE,LPMODULEENTRY32) ;
static BOOL (WINAPI *_Module32Next) (HANDLE,LPMODULEENTRY32) ;
bool _has_psapi;
bool _psapi_init = false;
bool _has_toolhelp;
static bool _init_psapi() {
HINSTANCE psapi = LoadLibrary( "PSAPI.DLL" ) ;
if( psapi == NULL ) return false ;
_EnumProcessModules = CAST_TO_FN_PTR(
BOOL(WINAPI *)(HANDLE, HMODULE *, DWORD, LPDWORD),
GetProcAddress(psapi, "EnumProcessModules")) ;
_GetModuleFileNameEx = CAST_TO_FN_PTR(
DWORD (WINAPI *)(HANDLE, HMODULE, LPTSTR, DWORD),
GetProcAddress(psapi, "GetModuleFileNameExA"));
_GetModuleInformation = CAST_TO_FN_PTR(
BOOL (WINAPI *)(HANDLE, HMODULE, LPMODULEINFO, DWORD),
GetProcAddress(psapi, "GetModuleInformation"));
_has_psapi = (_EnumProcessModules && _GetModuleFileNameEx && _GetModuleInformation);
_psapi_init = true;
return _has_psapi;
}
static bool _init_toolhelp() {
HINSTANCE kernel32 = LoadLibrary("Kernel32.DLL") ;
if (kernel32 == NULL) return false ;
_CreateToolhelp32Snapshot = CAST_TO_FN_PTR(
HANDLE(WINAPI *)(DWORD,DWORD),
GetProcAddress(kernel32, "CreateToolhelp32Snapshot"));
_Module32First = CAST_TO_FN_PTR(
BOOL(WINAPI *)(HANDLE,LPMODULEENTRY32),
GetProcAddress(kernel32, "Module32First" ));
_Module32Next = CAST_TO_FN_PTR(
BOOL(WINAPI *)(HANDLE,LPMODULEENTRY32),
GetProcAddress(kernel32, "Module32Next" ));
_has_toolhelp = (_CreateToolhelp32Snapshot && _Module32First && _Module32Next);
return _has_toolhelp;
}
#ifdef _WIN64
// Helper routine which returns true if address in
// within the NTDLL address space.
//
static bool _addr_in_ntdll( address addr )
{
HMODULE hmod;
MODULEINFO minfo;
hmod = GetModuleHandle("NTDLL.DLL");
if ( hmod == NULL ) return false;
if ( !_GetModuleInformation( GetCurrentProcess(), hmod,
&minfo, sizeof(MODULEINFO)) )
return false;
if ( (addr >= minfo.lpBaseOfDll) &&
(addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage)))
return true;
else
return false;
}
#endif
// Enumerate all modules for a given process ID
//
// Notice that Windows 95/98/Me and Windows NT/2000/XP have
// different API for doing this. We use PSAPI.DLL on NT based
// Windows and ToolHelp on 95/98/Me.
// Callback function that is called by enumerate_modules() on
// every DLL module.
// Input parameters:
// int pid,
// char* module_file_name,
// address module_base_addr,
// unsigned module_size,
// void* param
typedef int (*EnumModulesCallbackFunc)(int, char *, address, unsigned, void *);
// enumerate_modules for Windows NT, using PSAPI
static int _enumerate_modules_winnt( int pid, EnumModulesCallbackFunc func, void * param)
{
HANDLE hProcess ;
# define MAX_NUM_MODULES 128
HMODULE modules[MAX_NUM_MODULES];
static char filename[ MAX_PATH ];
int result = 0;
if (!_has_psapi && (_psapi_init || !_init_psapi())) return 0;
hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
FALSE, pid ) ;
if (hProcess == NULL) return 0;
DWORD size_needed;
if (!_EnumProcessModules(hProcess, modules,
sizeof(modules), &size_needed)) {
CloseHandle( hProcess );
return 0;
}
// number of modules that are currently loaded
int num_modules = size_needed / sizeof(HMODULE);
for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
// Get Full pathname:
if(!_GetModuleFileNameEx(hProcess, modules[i],
filename, sizeof(filename))) {
filename[0] = '\0';
}
MODULEINFO modinfo;
if (!_GetModuleInformation(hProcess, modules[i],
&modinfo, sizeof(modinfo))) {
modinfo.lpBaseOfDll = NULL;
modinfo.SizeOfImage = 0;
}
// Invoke callback function
result = func(pid, filename, (address)modinfo.lpBaseOfDll,
modinfo.SizeOfImage, param);
if (result) break;
}
CloseHandle( hProcess ) ;
return result;
}
// enumerate_modules for Windows 95/98/ME, using TOOLHELP
static int _enumerate_modules_windows( int pid, EnumModulesCallbackFunc func, void *param)
{
HANDLE hSnapShot ;
static MODULEENTRY32 modentry ;
int result = 0;
if (!_has_toolhelp) return 0;
// Get a handle to a Toolhelp snapshot of the system
hSnapShot = _CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, pid ) ;
if( hSnapShot == INVALID_HANDLE_VALUE ) {
return FALSE ;
}
// iterate through all modules
modentry.dwSize = sizeof(MODULEENTRY32) ;
bool not_done = _Module32First( hSnapShot, &modentry ) != 0;
while( not_done ) {
// invoke the callback
result=func(pid, modentry.szExePath, (address)modentry.modBaseAddr,
modentry.modBaseSize, param);
if (result) break;
modentry.dwSize = sizeof(MODULEENTRY32) ;
not_done = _Module32Next( hSnapShot, &modentry ) != 0;
}
CloseHandle(hSnapShot);
return result;
}
int enumerate_modules( int pid, EnumModulesCallbackFunc func, void * param )
{
// Get current process ID if caller doesn't provide it.
if (!pid) pid = os::current_process_id();
if (os::win32::is_nt()) return _enumerate_modules_winnt (pid, func, param);
else return _enumerate_modules_windows(pid, func, param);
}
struct _modinfo {
address addr;
char* full_path; // point to a char buffer
int buflen; // size of the buffer
address base_addr;
};
static int _locate_module_by_addr(int pid, char * mod_fname, address base_addr,
unsigned size, void * param) {
struct _modinfo *pmod = (struct _modinfo *)param;
if (!pmod) return -1;
if (base_addr <= pmod->addr &&
base_addr+size > pmod->addr) {
// if a buffer is provided, copy path name to the buffer
if (pmod->full_path) {
jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
}
pmod->base_addr = base_addr;
return 1;
}
return 0;
}
bool os::dll_address_to_library_name(address addr, char* buf,
int buflen, int* offset) {
// NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
// return the full path to the DLL file, sometimes it returns path
// to the corresponding PDB file (debug info); sometimes it only
// returns partial path, which makes life painful.
struct _modinfo mi;
mi.addr = addr;
mi.full_path = buf;
mi.buflen = buflen;
int pid = os::current_process_id();
if (enumerate_modules(pid, _locate_module_by_addr, (void *)&mi)) {
// buf already contains path name
if (offset) *offset = addr - mi.base_addr;
return true;
} else {
if (buf) buf[0] = '\0';
if (offset) *offset = -1;
return false;
}
}
bool os::dll_address_to_function_name(address addr, char *buf,
int buflen, int *offset) {
// Unimplemented on Windows - in order to use SymGetSymFromAddr(),
// we need to initialize imagehlp/dbghelp, then load symbol table
// for every module. That's too much work to do after a fatal error.
// For an example on how to implement this function, see 1.4.2.
if (offset) *offset = -1;
if (buf) buf[0] = '\0';
return false;
}
void* os::dll_lookup(void* handle, const char* name) {
return GetProcAddress((HMODULE)handle, name);
}
// save the start and end address of jvm.dll into param[0] and param[1]
static int _locate_jvm_dll(int pid, char* mod_fname, address base_addr,
unsigned size, void * param) {
if (!param) return -1;
if (base_addr <= (address)_locate_jvm_dll &&
base_addr+size > (address)_locate_jvm_dll) {
((address*)param)[0] = base_addr;
((address*)param)[1] = base_addr + size;
return 1;
}
return 0;
}
address vm_lib_location[2]; // start and end address of jvm.dll
// check if addr is inside jvm.dll
bool os::address_is_in_vm(address addr) {
if (!vm_lib_location[0] || !vm_lib_location[1]) {
int pid = os::current_process_id();
if (!enumerate_modules(pid, _locate_jvm_dll, (void *)vm_lib_location)) {
assert(false, "Can't find jvm module.");
return false;
}
}
return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
}
// print module info; param is outputStream*
static int _print_module(int pid, char* fname, address base,
unsigned size, void* param) {
if (!param) return -1;
outputStream* st = (outputStream*)param;
address end_addr = base + size;
st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base, end_addr, fname);
return 0;
}
// Loads .dll/.so and
// in case of error it checks if .dll/.so was built for the
// same architecture as Hotspot is running on
void * os::dll_load(const char *name, char *ebuf, int ebuflen)
{
void * result = LoadLibrary(name);
if (result != NULL)
{
return result;
}
long errcode = GetLastError();
if (errcode == ERROR_MOD_NOT_FOUND) {
strncpy(ebuf, "Can't find dependent libraries", ebuflen-1);
ebuf[ebuflen-1]='\0';
return NULL;
}
// Parsing dll below
// If we can read dll-info and find that dll was built
// for an architecture other than Hotspot is running in
// - then print to buffer "DLL was built for a different architecture"
// else call getLastErrorString to obtain system error message
// Read system error message into ebuf
// It may or may not be overwritten below (in the for loop and just above)
getLastErrorString(ebuf, (size_t) ebuflen);
ebuf[ebuflen-1]='\0';
int file_descriptor=::open(name, O_RDONLY | O_BINARY, 0);
if (file_descriptor<0)
{
return NULL;
}
uint32_t signature_offset;
uint16_t lib_arch=0;
bool failed_to_get_lib_arch=
(
//Go to position 3c in the dll
(os::seek_to_file_offset(file_descriptor,IMAGE_FILE_PTR_TO_SIGNATURE)<0)
||
// Read loacation of signature
(sizeof(signature_offset)!=
(os::read(file_descriptor, (void*)&signature_offset,sizeof(signature_offset))))
||
//Go to COFF File Header in dll
//that is located after"signature" (4 bytes long)
(os::seek_to_file_offset(file_descriptor,
signature_offset+IMAGE_FILE_SIGNATURE_LENGTH)<0)
||
//Read field that contains code of architecture
// that dll was build for
(sizeof(lib_arch)!=
(os::read(file_descriptor, (void*)&lib_arch,sizeof(lib_arch))))
);
::close(file_descriptor);
if (failed_to_get_lib_arch)
{
// file i/o error - report getLastErrorString(...) msg
return NULL;
}
typedef struct
{
uint16_t arch_code;
char* arch_name;
} arch_t;
static const arch_t arch_array[]={
{IMAGE_FILE_MACHINE_I386, (char*)"IA 32"},
{IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"},
{IMAGE_FILE_MACHINE_IA64, (char*)"IA 64"}
};
#if (defined _M_IA64)
static const uint16_t running_arch=IMAGE_FILE_MACHINE_IA64;
#elif (defined _M_AMD64)
static const uint16_t running_arch=IMAGE_FILE_MACHINE_AMD64;
#elif (defined _M_IX86)
static const uint16_t running_arch=IMAGE_FILE_MACHINE_I386;
#else
#error Method os::dll_load requires that one of following \
is defined :_M_IA64,_M_AMD64 or _M_IX86
#endif
// Obtain a string for printf operation
// lib_arch_str shall contain string what platform this .dll was built for
// running_arch_str shall string contain what platform Hotspot was built for
char *running_arch_str=NULL,*lib_arch_str=NULL;
for (unsigned int i=0;i<ARRAY_SIZE(arch_array);i++)
{
if (lib_arch==arch_array[i].arch_code)
lib_arch_str=arch_array[i].arch_name;
if (running_arch==arch_array[i].arch_code)
running_arch_str=arch_array[i].arch_name;
}
assert(running_arch_str,
"Didn't find runing architecture code in arch_array");
// If the architure is right
// but some other error took place - report getLastErrorString(...) msg
if (lib_arch == running_arch)
{
return NULL;
}
if (lib_arch_str!=NULL)
{
::_snprintf(ebuf, ebuflen-1,
"Can't load %s-bit .dll on a %s-bit platform",
lib_arch_str,running_arch_str);
}
else
{
// don't know what architecture this dll was build for
::_snprintf(ebuf, ebuflen-1,
"Can't load this .dll (machine code=0x%x) on a %s-bit platform",
lib_arch,running_arch_str);
}
return NULL;
}
void os::print_dll_info(outputStream *st) {
int pid = os::current_process_id();
st->print_cr("Dynamic libraries:");
enumerate_modules(pid, _print_module, (void *)st);
}
// function pointer to Windows API "GetNativeSystemInfo".
typedef void (WINAPI *GetNativeSystemInfo_func_type)(LPSYSTEM_INFO);
static GetNativeSystemInfo_func_type _GetNativeSystemInfo;
void os::print_os_info(outputStream* st) {
st->print("OS:");
OSVERSIONINFOEX osvi;
ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
st->print_cr("N/A");
return;
}
int os_vers = osvi.dwMajorVersion * 1000 + osvi.dwMinorVersion;
if (osvi.dwPlatformId == VER_PLATFORM_WIN32_NT) {
switch (os_vers) {
case 3051: st->print(" Windows NT 3.51"); break;
case 4000: st->print(" Windows NT 4.0"); break;
case 5000: st->print(" Windows 2000"); break;
case 5001: st->print(" Windows XP"); break;
case 5002:
case 6000: {
// Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
// find out whether we are running on 64 bit processor or not.
SYSTEM_INFO si;
ZeroMemory(&si, sizeof(SYSTEM_INFO));
// Check to see if _GetNativeSystemInfo has been initialized.
if (_GetNativeSystemInfo == NULL) {
HMODULE hKernel32 = GetModuleHandle(TEXT("kernel32.dll"));
_GetNativeSystemInfo =
CAST_TO_FN_PTR(GetNativeSystemInfo_func_type,
GetProcAddress(hKernel32,
"GetNativeSystemInfo"));
if (_GetNativeSystemInfo == NULL)
GetSystemInfo(&si);
} else {
_GetNativeSystemInfo(&si);
}
if (os_vers == 5002) {
if (osvi.wProductType == VER_NT_WORKSTATION &&
si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
st->print(" Windows XP x64 Edition");
else
st->print(" Windows Server 2003 family");
} else { // os_vers == 6000
if (osvi.wProductType == VER_NT_WORKSTATION)
st->print(" Windows Vista");
else
st->print(" Windows Server 2008");
if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
st->print(" , 64 bit");
}
break;
}
default: // future windows, print out its major and minor versions
st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
}
} else {
switch (os_vers) {
case 4000: st->print(" Windows 95"); break;
case 4010: st->print(" Windows 98"); break;
case 4090: st->print(" Windows Me"); break;
default: // future windows, print out its major and minor versions
st->print(" Windows %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
}
}
st->print(" Build %d", osvi.dwBuildNumber);
st->print(" %s", osvi.szCSDVersion); // service pack
st->cr();
}
void os::print_memory_info(outputStream* st) {
st->print("Memory:");
st->print(" %dk page", os::vm_page_size()>>10);
// FIXME: GlobalMemoryStatus() may return incorrect value if total memory
// is larger than 4GB
MEMORYSTATUS ms;
GlobalMemoryStatus(&ms);
st->print(", physical %uk", os::physical_memory() >> 10);
st->print("(%uk free)", os::available_memory() >> 10);
st->print(", swap %uk", ms.dwTotalPageFile >> 10);
st->print("(%uk free)", ms.dwAvailPageFile >> 10);
st->cr();
}
void os::print_siginfo(outputStream *st, void *siginfo) {
EXCEPTION_RECORD* er = (EXCEPTION_RECORD*)siginfo;
st->print("siginfo:");
st->print(" ExceptionCode=0x%x", er->ExceptionCode);
if (er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
er->NumberParameters >= 2) {
switch (er->ExceptionInformation[0]) {
case 0: st->print(", reading address"); break;
case 1: st->print(", writing address"); break;
default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
er->ExceptionInformation[0]);
}
st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
} else if (er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR &&
er->NumberParameters >= 2 && UseSharedSpaces) {
FileMapInfo* mapinfo = FileMapInfo::current_info();
if (mapinfo->is_in_shared_space((void*)er->ExceptionInformation[1])) {
st->print("\n\nError accessing class data sharing archive." \
" Mapped file inaccessible during execution, " \
" possible disk/network problem.");
}
} else {
int num = er->NumberParameters;
if (num > 0) {
st->print(", ExceptionInformation=");
for (int i = 0; i < num; i++) {
st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
}
}
}
st->cr();
}
void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
// do nothing
}
static char saved_jvm_path[MAX_PATH] = {0};
// Find the full path to the current module, jvm.dll or jvm_g.dll
void os::jvm_path(char *buf, jint buflen) {
// Error checking.
if (buflen < MAX_PATH) {
assert(false, "must use a large-enough buffer");
buf[0] = '\0';
return;
}
// Lazy resolve the path to current module.
if (saved_jvm_path[0] != 0) {
strcpy(buf, saved_jvm_path);
return;
}
GetModuleFileName(vm_lib_handle, buf, buflen);
strcpy(saved_jvm_path, buf);
}
void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
#ifndef _WIN64
st->print("_");
#endif
}
void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
#ifndef _WIN64
st->print("@%d", args_size * sizeof(int));
#endif
}
// sun.misc.Signal
// NOTE that this is a workaround for an apparent kernel bug where if
// a signal handler for SIGBREAK is installed then that signal handler
// takes priority over the console control handler for CTRL_CLOSE_EVENT.
// See bug 4416763.
static void (*sigbreakHandler)(int) = NULL;
static void UserHandler(int sig, void *siginfo, void *context) {
os::signal_notify(sig);
// We need to reinstate the signal handler each time...
os::signal(sig, (void*)UserHandler);
}
void* os::user_handler() {
return (void*) UserHandler;
}
void* os::signal(int signal_number, void* handler) {
if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
void (*oldHandler)(int) = sigbreakHandler;
sigbreakHandler = (void (*)(int)) handler;
return (void*) oldHandler;
} else {
return (void*)::signal(signal_number, (void (*)(int))handler);
}
}
void os::signal_raise(int signal_number) {
raise(signal_number);
}
// The Win32 C runtime library maps all console control events other than ^C
// into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
// logoff, and shutdown events. We therefore install our own console handler
// that raises SIGTERM for the latter cases.
//
static BOOL WINAPI consoleHandler(DWORD event) {
switch(event) {
case CTRL_C_EVENT:
if (is_error_reported()) {
// Ctrl-C is pressed during error reporting, likely because the error
// handler fails to abort. Let VM die immediately.
os::die();
}
os::signal_raise(SIGINT);
return TRUE;
break;
case CTRL_BREAK_EVENT:
if (sigbreakHandler != NULL) {
(*sigbreakHandler)(SIGBREAK);
}
return TRUE;
break;
case CTRL_CLOSE_EVENT:
case CTRL_LOGOFF_EVENT:
case CTRL_SHUTDOWN_EVENT:
os::signal_raise(SIGTERM);
return TRUE;
break;
default:
break;
}
return FALSE;
}
/*
* The following code is moved from os.cpp for making this
* code platform specific, which it is by its very nature.
*/
// Return maximum OS signal used + 1 for internal use only
// Used as exit signal for signal_thread
int os::sigexitnum_pd(){
return NSIG;
}
// a counter for each possible signal value, including signal_thread exit signal
static volatile jint pending_signals[NSIG+1] = { 0 };
static HANDLE sig_sem;
void os::signal_init_pd() {
// Initialize signal structures
memset((void*)pending_signals, 0, sizeof(pending_signals));
sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL);
// Programs embedding the VM do not want it to attempt to receive
// events like CTRL_LOGOFF_EVENT, which are used to implement the
// shutdown hooks mechanism introduced in 1.3. For example, when
// the VM is run as part of a Windows NT service (i.e., a servlet
// engine in a web server), the correct behavior is for any console
// control handler to return FALSE, not TRUE, because the OS's
// "final" handler for such events allows the process to continue if
// it is a service (while terminating it if it is not a service).
// To make this behavior uniform and the mechanism simpler, we
// completely disable the VM's usage of these console events if -Xrs
// (=ReduceSignalUsage) is specified. This means, for example, that
// the CTRL-BREAK thread dump mechanism is also disabled in this
// case. See bugs 4323062, 4345157, and related bugs.
if (!ReduceSignalUsage) {
// Add a CTRL-C handler
SetConsoleCtrlHandler(consoleHandler, TRUE);
}
}
void os::signal_notify(int signal_number) {
BOOL ret;
Atomic::inc(&pending_signals[signal_number]);
ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
assert(ret != 0, "ReleaseSemaphore() failed");
}
static int check_pending_signals(bool wait_for_signal) {
DWORD ret;
while (true) {
for (int i = 0; i < NSIG + 1; i++) {
jint n = pending_signals[i];
if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
return i;
}
}
if (!wait_for_signal) {
return -1;
}
JavaThread *thread = JavaThread::current();
ThreadBlockInVM tbivm(thread);
bool threadIsSuspended;
do {
thread->set_suspend_equivalent();
// cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
ret = ::WaitForSingleObject(sig_sem, INFINITE);
assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed");
// were we externally suspended while we were waiting?
threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
if (threadIsSuspended) {
//
// The semaphore has been incremented, but while we were waiting
// another thread suspended us. We don't want to continue running
// while suspended because that would surprise the thread that
// suspended us.
//
ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
assert(ret != 0, "ReleaseSemaphore() failed");
thread->java_suspend_self();
}
} while (threadIsSuspended);
}
}
int os::signal_lookup() {
return check_pending_signals(false);
}
int os::signal_wait() {
return check_pending_signals(true);
}
// Implicit OS exception handling
LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, address handler) {
JavaThread* thread = JavaThread::current();
// Save pc in thread
#ifdef _M_IA64
thread->set_saved_exception_pc((address)exceptionInfo->ContextRecord->StIIP);
// Set pc to handler
exceptionInfo->ContextRecord->StIIP = (DWORD64)handler;
#elif _M_AMD64
thread->set_saved_exception_pc((address)exceptionInfo->ContextRecord->Rip);
// Set pc to handler
exceptionInfo->ContextRecord->Rip = (DWORD64)handler;
#else
thread->set_saved_exception_pc((address)exceptionInfo->ContextRecord->Eip);
// Set pc to handler
exceptionInfo->ContextRecord->Eip = (LONG)handler;
#endif
// Continue the execution
return EXCEPTION_CONTINUE_EXECUTION;
}
// Used for PostMortemDump
extern "C" void safepoints();
extern "C" void find(int x);
extern "C" void events();
// According to Windows API documentation, an illegal instruction sequence should generate
// the 0xC000001C exception code. However, real world experience shows that occasionnaly
// the execution of an illegal instruction can generate the exception code 0xC000001E. This
// seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
#define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
// From "Execution Protection in the Windows Operating System" draft 0.35
// Once a system header becomes available, the "real" define should be
// included or copied here.
#define EXCEPTION_INFO_EXEC_VIOLATION 0x08
#define def_excpt(val) #val, val
struct siglabel {
char *name;
int number;
};
struct siglabel exceptlabels[] = {
def_excpt(EXCEPTION_ACCESS_VIOLATION),
def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
def_excpt(EXCEPTION_BREAKPOINT),
def_excpt(EXCEPTION_SINGLE_STEP),
def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
def_excpt(EXCEPTION_FLT_OVERFLOW),
def_excpt(EXCEPTION_FLT_STACK_CHECK),
def_excpt(EXCEPTION_FLT_UNDERFLOW),
def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
def_excpt(EXCEPTION_INT_OVERFLOW),
def_excpt(EXCEPTION_PRIV_INSTRUCTION),
def_excpt(EXCEPTION_IN_PAGE_ERROR),
def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
def_excpt(EXCEPTION_STACK_OVERFLOW),
def_excpt(EXCEPTION_INVALID_DISPOSITION),
def_excpt(EXCEPTION_GUARD_PAGE),
def_excpt(EXCEPTION_INVALID_HANDLE),
NULL, 0
};
const char* os::exception_name(int exception_code, char *buf, size_t size) {
for (int i = 0; exceptlabels[i].name != NULL; i++) {
if (exceptlabels[i].number == exception_code) {
jio_snprintf(buf, size, "%s", exceptlabels[i].name);
return buf;
}
}
return NULL;
}
//-----------------------------------------------------------------------------
LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
// handle exception caused by idiv; should only happen for -MinInt/-1
// (division by zero is handled explicitly)
#ifdef _M_IA64
assert(0, "Fix Handle_IDiv_Exception");
#elif _M_AMD64
PCONTEXT ctx = exceptionInfo->ContextRecord;
address pc = (address)ctx->Rip;
NOT_PRODUCT(Events::log("idiv overflow exception at " INTPTR_FORMAT , pc));
assert(pc[0] == 0xF7, "not an idiv opcode");
assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
assert(ctx->Rax == min_jint, "unexpected idiv exception");
// set correct result values and continue after idiv instruction
ctx->Rip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes
ctx->Rax = (DWORD)min_jint; // result
ctx->Rdx = (DWORD)0; // remainder
// Continue the execution
#else
PCONTEXT ctx = exceptionInfo->ContextRecord;
address pc = (address)ctx->Eip;
NOT_PRODUCT(Events::log("idiv overflow exception at " INTPTR_FORMAT , pc));
assert(pc[0] == 0xF7, "not an idiv opcode");
assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
assert(ctx->Eax == min_jint, "unexpected idiv exception");
// set correct result values and continue after idiv instruction
ctx->Eip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes
ctx->Eax = (DWORD)min_jint; // result
ctx->Edx = (DWORD)0; // remainder
// Continue the execution
#endif
return EXCEPTION_CONTINUE_EXECUTION;
}
#ifndef _WIN64
//-----------------------------------------------------------------------------
LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
// handle exception caused by native mothod modifying control word
PCONTEXT ctx = exceptionInfo->ContextRecord;
DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
switch (exception_code) {
case EXCEPTION_FLT_DENORMAL_OPERAND:
case EXCEPTION_FLT_DIVIDE_BY_ZERO:
case EXCEPTION_FLT_INEXACT_RESULT:
case EXCEPTION_FLT_INVALID_OPERATION:
case EXCEPTION_FLT_OVERFLOW:
case EXCEPTION_FLT_STACK_CHECK:
case EXCEPTION_FLT_UNDERFLOW:
jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
if (fp_control_word != ctx->FloatSave.ControlWord) {
// Restore FPCW and mask out FLT exceptions
ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
// Mask out pending FLT exceptions
ctx->FloatSave.StatusWord &= 0xffffff00;
return EXCEPTION_CONTINUE_EXECUTION;
}
}
return EXCEPTION_CONTINUE_SEARCH;
}
#else //_WIN64
/*
On Windows, the mxcsr control bits are non-volatile across calls
See also CR 6192333
If EXCEPTION_FLT_* happened after some native method modified
mxcsr - it is not a jvm fault.
However should we decide to restore of mxcsr after a faulty
native method we can uncomment following code
jint MxCsr = INITIAL_MXCSR;
// we can't use StubRoutines::addr_mxcsr_std()
// because in Win64 mxcsr is not saved there
if (MxCsr != ctx->MxCsr) {
ctx->MxCsr = MxCsr;
return EXCEPTION_CONTINUE_EXECUTION;
}
*/
#endif //_WIN64
// Fatal error reporting is single threaded so we can make this a
// static and preallocated. If it's more than MAX_PATH silently ignore
// it.
static char saved_error_file[MAX_PATH] = {0};
void os::set_error_file(const char *logfile) {
if (strlen(logfile) <= MAX_PATH) {
strncpy(saved_error_file, logfile, MAX_PATH);
}
}
static inline void report_error(Thread* t, DWORD exception_code,
address addr, void* siginfo, void* context) {
VMError err(t, exception_code, addr, siginfo, context);
err.report_and_die();
// If UseOsErrorReporting, this will return here and save the error file
// somewhere where we can find it in the minidump.
}
//-----------------------------------------------------------------------------
LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
#ifdef _M_IA64
address pc = (address) exceptionInfo->ContextRecord->StIIP;
#elif _M_AMD64
address pc = (address) exceptionInfo->ContextRecord->Rip;
#else
address pc = (address) exceptionInfo->ContextRecord->Eip;
#endif
Thread* t = ThreadLocalStorage::get_thread_slow(); // slow & steady
#ifndef _WIN64
// Execution protection violation - win32 running on AMD64 only
// Handled first to avoid misdiagnosis as a "normal" access violation;
// This is safe to do because we have a new/unique ExceptionInformation
// code for this condition.
if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
int exception_subcode = (int) exceptionRecord->ExceptionInformation[0];
address addr = (address) exceptionRecord->ExceptionInformation[1];
if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
int page_size = os::vm_page_size();
// Make sure the pc and the faulting address are sane.
//
// If an instruction spans a page boundary, and the page containing
// the beginning of the instruction is executable but the following
// page is not, the pc and the faulting address might be slightly
// different - we still want to unguard the 2nd page in this case.
//
// 15 bytes seems to be a (very) safe value for max instruction size.
bool pc_is_near_addr =
(pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
bool instr_spans_page_boundary =
(align_size_down((intptr_t) pc ^ (intptr_t) addr,
(intptr_t) page_size) > 0);
if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
static volatile address last_addr =
(address) os::non_memory_address_word();
// In conservative mode, don't unguard unless the address is in the VM
if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
(UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
// Set memory to RWX and retry
address page_start =
(address) align_size_down((intptr_t) addr, (intptr_t) page_size);
bool res = os::protect_memory((char*) page_start, page_size,
os::MEM_PROT_RWX);
if (PrintMiscellaneous && Verbose) {
char buf[256];
jio_snprintf(buf, sizeof(buf), "Execution protection violation "
"at " INTPTR_FORMAT
", unguarding " INTPTR_FORMAT ": %s", addr,
page_start, (res ? "success" : strerror(errno)));
tty->print_raw_cr(buf);
}
// Set last_addr so if we fault again at the same address, we don't
// end up in an endless loop.
//
// There are two potential complications here. Two threads trapping
// at the same address at the same time could cause one of the
// threads to think it already unguarded, and abort the VM. Likely
// very rare.
//
// The other race involves two threads alternately trapping at
// different addresses and failing to unguard the page, resulting in
// an endless loop. This condition is probably even more unlikely
// than the first.
//
// Although both cases could be avoided by using locks or thread
// local last_addr, these solutions are unnecessary complication:
// this handler is a best-effort safety net, not a complete solution.
// It is disabled by default and should only be used as a workaround
// in case we missed any no-execute-unsafe VM code.
last_addr = addr;
return EXCEPTION_CONTINUE_EXECUTION;
}
}
// Last unguard failed or not unguarding
tty->print_raw_cr("Execution protection violation");
report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord,
exceptionInfo->ContextRecord);
return EXCEPTION_CONTINUE_SEARCH;
}
}
#endif // _WIN64
// Check to see if we caught the safepoint code in the
// process of write protecting the memory serialization page.
// It write enables the page immediately after protecting it
// so just return.
if ( exception_code == EXCEPTION_ACCESS_VIOLATION ) {
JavaThread* thread = (JavaThread*) t;
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
address addr = (address) exceptionRecord->ExceptionInformation[1];
if ( os::is_memory_serialize_page(thread, addr) ) {
// Block current thread until the memory serialize page permission restored.
os::block_on_serialize_page_trap();
return EXCEPTION_CONTINUE_EXECUTION;
}
}
if (t != NULL && t->is_Java_thread()) {
JavaThread* thread = (JavaThread*) t;
bool in_java = thread->thread_state() == _thread_in_Java;
// Handle potential stack overflows up front.
if (exception_code == EXCEPTION_STACK_OVERFLOW) {
if (os::uses_stack_guard_pages()) {
#ifdef _M_IA64
//
// If it's a legal stack address continue, Windows will map it in.
//
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
address addr = (address) exceptionRecord->ExceptionInformation[1];
if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base() )
return EXCEPTION_CONTINUE_EXECUTION;
// The register save area is the same size as the memory stack
// and starts at the page just above the start of the memory stack.
// If we get a fault in this area, we've run out of register
// stack. If we are in java, try throwing a stack overflow exception.
if (addr > thread->stack_base() &&
addr <= (thread->stack_base()+thread->stack_size()) ) {
char buf[256];
jio_snprintf(buf, sizeof(buf),
"Register stack overflow, addr:%p, stack_base:%p\n",
addr, thread->stack_base() );
tty->print_raw_cr(buf);
// If not in java code, return and hope for the best.
return in_java ? Handle_Exception(exceptionInfo,
SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
: EXCEPTION_CONTINUE_EXECUTION;
}
#endif
if (thread->stack_yellow_zone_enabled()) {
// Yellow zone violation. The o/s has unprotected the first yellow
// zone page for us. Note: must call disable_stack_yellow_zone to
// update the enabled status, even if the zone contains only one page.
thread->disable_stack_yellow_zone();
// If not in java code, return and hope for the best.
return in_java ? Handle_Exception(exceptionInfo,
SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
: EXCEPTION_CONTINUE_EXECUTION;
} else {
// Fatal red zone violation.
thread->disable_stack_red_zone();
tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
exceptionInfo->ContextRecord);
return EXCEPTION_CONTINUE_SEARCH;
}
} else if (in_java) {
// JVM-managed guard pages cannot be used on win95/98. The o/s provides
// a one-time-only guard page, which it has released to us. The next
// stack overflow on this thread will result in an ACCESS_VIOLATION.
return Handle_Exception(exceptionInfo,
SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
} else {
// Can only return and hope for the best. Further stack growth will
// result in an ACCESS_VIOLATION.
return EXCEPTION_CONTINUE_EXECUTION;
}
} else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
// Either stack overflow or null pointer exception.
if (in_java) {
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
address addr = (address) exceptionRecord->ExceptionInformation[1];
address stack_end = thread->stack_base() - thread->stack_size();
if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
// Stack overflow.
assert(!os::uses_stack_guard_pages(),
"should be caught by red zone code above.");
return Handle_Exception(exceptionInfo,
SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
}
//
// Check for safepoint polling and implicit null
// We only expect null pointers in the stubs (vtable)
// the rest are checked explicitly now.
//
CodeBlob* cb = CodeCache::find_blob(pc);
if (cb != NULL) {
if (os::is_poll_address(addr)) {
address stub = SharedRuntime::get_poll_stub(pc);
return Handle_Exception(exceptionInfo, stub);
}
}
{
#ifdef _WIN64
//
// If it's a legal stack address map the entire region in
//
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
address addr = (address) exceptionRecord->ExceptionInformation[1];
if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base() ) {
addr = (address)((uintptr_t)addr &
(~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
os::commit_memory((char *)addr, thread->stack_base() - addr,
false );
return EXCEPTION_CONTINUE_EXECUTION;
}
else
#endif
{
// Null pointer exception.
#ifdef _M_IA64
// We catch register stack overflows in compiled code by doing
// an explicit compare and executing a st8(G0, G0) if the
// BSP enters into our guard area. We test for the overflow
// condition and fall into the normal null pointer exception
// code if BSP hasn't overflowed.
if ( in_java ) {
if(thread->register_stack_overflow()) {
assert((address)exceptionInfo->ContextRecord->IntS3 ==
thread->register_stack_limit(),
"GR7 doesn't contain register_stack_limit");
// Disable the yellow zone which sets the state that
// we've got a stack overflow problem.
if (thread->stack_yellow_zone_enabled()) {
thread->disable_stack_yellow_zone();
}
// Give us some room to process the exception
thread->disable_register_stack_guard();
// Update GR7 with the new limit so we can continue running
// compiled code.
exceptionInfo->ContextRecord->IntS3 =
(ULONGLONG)thread->register_stack_limit();
return Handle_Exception(exceptionInfo,
SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
} else {
//
// Check for implicit null
// We only expect null pointers in the stubs (vtable)
// the rest are checked explicitly now.
//
if (((uintptr_t)addr) < os::vm_page_size() ) {
// an access to the first page of VM--assume it is a null pointer
address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
}
}
} // in_java
// IA64 doesn't use implicit null checking yet. So we shouldn't
// get here.
tty->print_raw_cr("Access violation, possible null pointer exception");
report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
exceptionInfo->ContextRecord);
return EXCEPTION_CONTINUE_SEARCH;
#else /* !IA64 */
// Windows 98 reports faulting addresses incorrectly
if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr) ||
!os::win32::is_nt()) {
address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
}
report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
exceptionInfo->ContextRecord);
return EXCEPTION_CONTINUE_SEARCH;
#endif
}
}
}
#ifdef _WIN64
// Special care for fast JNI field accessors.
// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
// in and the heap gets shrunk before the field access.
if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
address addr = JNI_FastGetField::find_slowcase_pc(pc);
if (addr != (address)-1) {
return Handle_Exception(exceptionInfo, addr);
}
}
#endif
#ifdef _WIN64
// Windows will sometimes generate an access violation
// when we call malloc. Since we use VectoredExceptions
// on 64 bit platforms, we see this exception. We must
// pass this exception on so Windows can recover.
// We check to see if the pc of the fault is in NTDLL.DLL
// if so, we pass control on to Windows for handling.
if (UseVectoredExceptions && _addr_in_ntdll(pc)) return EXCEPTION_CONTINUE_SEARCH;
#endif
// Stack overflow or null pointer exception in native code.
report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
exceptionInfo->ContextRecord);
return EXCEPTION_CONTINUE_SEARCH;
}
if (in_java) {
switch (exception_code) {
case EXCEPTION_INT_DIVIDE_BY_ZERO:
return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
case EXCEPTION_INT_OVERFLOW:
return Handle_IDiv_Exception(exceptionInfo);
} // switch
}
#ifndef _WIN64
if ((thread->thread_state() == _thread_in_Java) ||
(thread->thread_state() == _thread_in_native) )
{
LONG result=Handle_FLT_Exception(exceptionInfo);
if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
}
#endif //_WIN64
}
if (exception_code != EXCEPTION_BREAKPOINT) {
#ifndef _WIN64
report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
exceptionInfo->ContextRecord);
#else
// Itanium Windows uses a VectoredExceptionHandler
// Which means that C++ programatic exception handlers (try/except)
// will get here. Continue the search for the right except block if
// the exception code is not a fatal code.
switch ( exception_code ) {
case EXCEPTION_ACCESS_VIOLATION:
case EXCEPTION_STACK_OVERFLOW:
case EXCEPTION_ILLEGAL_INSTRUCTION:
case EXCEPTION_ILLEGAL_INSTRUCTION_2:
case EXCEPTION_INT_OVERFLOW:
case EXCEPTION_INT_DIVIDE_BY_ZERO:
{ report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
exceptionInfo->ContextRecord);
}
break;
default:
break;
}
#endif
}
return EXCEPTION_CONTINUE_SEARCH;
}
#ifndef _WIN64
// Special care for fast JNI accessors.
// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
// the heap gets shrunk before the field access.
// Need to install our own structured exception handler since native code may
// install its own.
LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
address pc = (address) exceptionInfo->ContextRecord->Eip;
address addr = JNI_FastGetField::find_slowcase_pc(pc);
if (addr != (address)-1) {
return Handle_Exception(exceptionInfo, addr);
}
}
return EXCEPTION_CONTINUE_SEARCH;
}
#define DEFINE_FAST_GETFIELD(Return,Fieldname,Result) \
Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, jobject obj, jfieldID fieldID) { \
__try { \
return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, obj, fieldID); \
} __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)_exception_info())) { \
} \
return 0; \
}
DEFINE_FAST_GETFIELD(jboolean, bool, Boolean)
DEFINE_FAST_GETFIELD(jbyte, byte, Byte)
DEFINE_FAST_GETFIELD(jchar, char, Char)
DEFINE_FAST_GETFIELD(jshort, short, Short)
DEFINE_FAST_GETFIELD(jint, int, Int)
DEFINE_FAST_GETFIELD(jlong, long, Long)
DEFINE_FAST_GETFIELD(jfloat, float, Float)
DEFINE_FAST_GETFIELD(jdouble, double, Double)
address os::win32::fast_jni_accessor_wrapper(BasicType type) {
switch (type) {
case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
case T_BYTE: return (address)jni_fast_GetByteField_wrapper;
case T_CHAR: return (address)jni_fast_GetCharField_wrapper;
case T_SHORT: return (address)jni_fast_GetShortField_wrapper;
case T_INT: return (address)jni_fast_GetIntField_wrapper;
case T_LONG: return (address)jni_fast_GetLongField_wrapper;
case T_FLOAT: return (address)jni_fast_GetFloatField_wrapper;
case T_DOUBLE: return (address)jni_fast_GetDoubleField_wrapper;
default: ShouldNotReachHere();
}
return (address)-1;
}
#endif
// Virtual Memory
int os::vm_page_size() { return os::win32::vm_page_size(); }
int os::vm_allocation_granularity() {
return os::win32::vm_allocation_granularity();
}
// Windows large page support is available on Windows 2003. In order to use
// large page memory, the administrator must first assign additional privilege
// to the user:
// + select Control Panel -> Administrative Tools -> Local Security Policy
// + select Local Policies -> User Rights Assignment
// + double click "Lock pages in memory", add users and/or groups
// + reboot
// Note the above steps are needed for administrator as well, as administrators
// by default do not have the privilege to lock pages in memory.
//
// Note about Windows 2003: although the API supports committing large page
// memory on a page-by-page basis and VirtualAlloc() returns success under this
// scenario, I found through experiment it only uses large page if the entire
// memory region is reserved and committed in a single VirtualAlloc() call.
// This makes Windows large page support more or less like Solaris ISM, in
// that the entire heap must be committed upfront. This probably will change
// in the future, if so the code below needs to be revisited.
#ifndef MEM_LARGE_PAGES
#define MEM_LARGE_PAGES 0x20000000
#endif
// GetLargePageMinimum is only available on Windows 2003. The other functions
// are available on NT but not on Windows 98/Me. We have to resolve them at
// runtime.
typedef SIZE_T (WINAPI *GetLargePageMinimum_func_type) (void);
typedef BOOL (WINAPI *AdjustTokenPrivileges_func_type)
(HANDLE, BOOL, PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD);
typedef BOOL (WINAPI *OpenProcessToken_func_type) (HANDLE, DWORD, PHANDLE);
typedef BOOL (WINAPI *LookupPrivilegeValue_func_type) (LPCTSTR, LPCTSTR, PLUID);
static GetLargePageMinimum_func_type _GetLargePageMinimum;
static AdjustTokenPrivileges_func_type _AdjustTokenPrivileges;
static OpenProcessToken_func_type _OpenProcessToken;
static LookupPrivilegeValue_func_type _LookupPrivilegeValue;
static HINSTANCE _kernel32;
static HINSTANCE _advapi32;
static HANDLE _hProcess;
static HANDLE _hToken;
static size_t _large_page_size = 0;
static bool resolve_functions_for_large_page_init() {
_kernel32 = LoadLibrary("kernel32.dll");
if (_kernel32 == NULL) return false;
_GetLargePageMinimum = CAST_TO_FN_PTR(GetLargePageMinimum_func_type,
GetProcAddress(_kernel32, "GetLargePageMinimum"));
if (_GetLargePageMinimum == NULL) return false;
_advapi32 = LoadLibrary("advapi32.dll");
if (_advapi32 == NULL) return false;
_AdjustTokenPrivileges = CAST_TO_FN_PTR(AdjustTokenPrivileges_func_type,
GetProcAddress(_advapi32, "AdjustTokenPrivileges"));
_OpenProcessToken = CAST_TO_FN_PTR(OpenProcessToken_func_type,
GetProcAddress(_advapi32, "OpenProcessToken"));
_LookupPrivilegeValue = CAST_TO_FN_PTR(LookupPrivilegeValue_func_type,
GetProcAddress(_advapi32, "LookupPrivilegeValueA"));
return _AdjustTokenPrivileges != NULL &&
_OpenProcessToken != NULL &&
_LookupPrivilegeValue != NULL;
}
static bool request_lock_memory_privilege() {
_hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
os::current_process_id());
LUID luid;
if (_hProcess != NULL &&
_OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) &&
_LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
TOKEN_PRIVILEGES tp;
tp.PrivilegeCount = 1;
tp.Privileges[0].Luid = luid;
tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
// AdjustTokenPrivileges() may return TRUE even when it couldn't change the
// privilege. Check GetLastError() too. See MSDN document.
if (_AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) &&
(GetLastError() == ERROR_SUCCESS)) {
return true;
}
}
return false;
}
static void cleanup_after_large_page_init() {
_GetLargePageMinimum = NULL;
_AdjustTokenPrivileges = NULL;
_OpenProcessToken = NULL;
_LookupPrivilegeValue = NULL;
if (_kernel32) FreeLibrary(_kernel32);
_kernel32 = NULL;
if (_advapi32) FreeLibrary(_advapi32);
_advapi32 = NULL;
if (_hProcess) CloseHandle(_hProcess);
_hProcess = NULL;
if (_hToken) CloseHandle(_hToken);
_hToken = NULL;
}
bool os::large_page_init() {
if (!UseLargePages) return false;
// print a warning if any large page related flag is specified on command line
bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
!FLAG_IS_DEFAULT(LargePageSizeInBytes);
bool success = false;
# define WARN(msg) if (warn_on_failure) { warning(msg); }
if (resolve_functions_for_large_page_init()) {
if (request_lock_memory_privilege()) {
size_t s = _GetLargePageMinimum();
if (s) {
#if defined(IA32) || defined(AMD64)
if (s > 4*M || LargePageSizeInBytes > 4*M) {
WARN("JVM cannot use large pages bigger than 4mb.");
} else {
#endif
if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) {
_large_page_size = LargePageSizeInBytes;
} else {
_large_page_size = s;
}
success = true;
#if defined(IA32) || defined(AMD64)
}
#endif
} else {
WARN("Large page is not supported by the processor.");
}
} else {
WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
}
} else {
WARN("Large page is not supported by the operating system.");
}
#undef WARN
const size_t default_page_size = (size_t) vm_page_size();
if (success && _large_page_size > default_page_size) {
_page_sizes[0] = _large_page_size;
_page_sizes[1] = default_page_size;
_page_sizes[2] = 0;
}
cleanup_after_large_page_init();
return success;
}
// On win32, one cannot release just a part of reserved memory, it's an
// all or nothing deal. When we split a reservation, we must break the
// reservation into two reservations.
void os::split_reserved_memory(char *base, size_t size, size_t split,
bool realloc) {
if (size > 0) {
release_memory(base, size);
if (realloc) {
reserve_memory(split, base);
}
if (size != split) {
reserve_memory(size - split, base + split);
}
}
}
char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
assert((size_t)addr % os::vm_allocation_granularity() == 0,
"reserve alignment");
assert(bytes % os::vm_allocation_granularity() == 0, "reserve block size");
char* res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
assert(res == NULL || addr == NULL || addr == res,
"Unexpected address from reserve.");
return res;
}
// Reserve memory at an arbitrary address, only if that area is
// available (and not reserved for something else).
char* os::attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
// Windows os::reserve_memory() fails of the requested address range is
// not avilable.
return reserve_memory(bytes, requested_addr);
}
size_t os::large_page_size() {
return _large_page_size;
}
bool os::can_commit_large_page_memory() {
// Windows only uses large page memory when the entire region is reserved
// and committed in a single VirtualAlloc() call. This may change in the
// future, but with Windows 2003 it's not possible to commit on demand.
return false;
}
bool os::can_execute_large_page_memory() {
return true;
}
char* os::reserve_memory_special(size_t bytes, char* addr, bool exec) {
if (UseLargePagesIndividualAllocation) {
if (TracePageSizes && Verbose) {
tty->print_cr("Reserving large pages individually.");
}
char * p_buf;
// first reserve enough address space in advance since we want to be
// able to break a single contiguous virtual address range into multiple
// large page commits but WS2003 does not allow reserving large page space
// so we just use 4K pages for reserve, this gives us a legal contiguous
// address space. then we will deallocate that reservation, and re alloc
// using large pages
const size_t size_of_reserve = bytes + _large_page_size;
if (bytes > size_of_reserve) {
// Overflowed.
warning("Individually allocated large pages failed, "
"use -XX:-UseLargePagesIndividualAllocation to turn off");
return NULL;
}
p_buf = (char *) VirtualAlloc(addr,
size_of_reserve, // size of Reserve
MEM_RESERVE,
PAGE_READWRITE);
// If reservation failed, return NULL
if (p_buf == NULL) return NULL;
release_memory(p_buf, bytes + _large_page_size);
// round up to page boundary. If the size_of_reserve did not
// overflow and the reservation did not fail, this align up
// should not overflow.
p_buf = (char *) align_size_up((size_t)p_buf, _large_page_size);
// now go through and allocate one page at a time until all bytes are
// allocated
size_t bytes_remaining = align_size_up(bytes, _large_page_size);
// An overflow of align_size_up() would have been caught above
// in the calculation of size_of_reserve.
char * next_alloc_addr = p_buf;
#ifdef ASSERT
// Variable for the failure injection
long ran_num = os::random();
size_t fail_after = ran_num % bytes;
#endif
while (bytes_remaining) {
size_t bytes_to_rq = MIN2(bytes_remaining, _large_page_size);
// Note allocate and commit
char * p_new;
#ifdef ASSERT
bool inject_error = LargePagesIndividualAllocationInjectError &&
(bytes_remaining <= fail_after);
#else
const bool inject_error = false;
#endif
if (inject_error) {
p_new = NULL;
} else {
p_new = (char *) VirtualAlloc(next_alloc_addr,
bytes_to_rq,
MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES,
PAGE_READWRITE);
if (p_new != NULL && exec) {
DWORD oldprot;
// Windows doc says to use VirtualProtect to get execute permissions
VirtualProtect(next_alloc_addr, bytes_to_rq,
PAGE_EXECUTE_READWRITE, &oldprot);
}
}
if (p_new == NULL) {
// Free any allocated pages
if (next_alloc_addr > p_buf) {
// Some memory was committed so release it.
size_t bytes_to_release = bytes - bytes_remaining;
release_memory(p_buf, bytes_to_release);
}
#ifdef ASSERT
if (UseLargePagesIndividualAllocation &&
LargePagesIndividualAllocationInjectError) {
if (TracePageSizes && Verbose) {
tty->print_cr("Reserving large pages individually failed.");
}
}
#endif
return NULL;
}
bytes_remaining -= bytes_to_rq;
next_alloc_addr += bytes_to_rq;
}
return p_buf;
} else {
// normal policy just allocate it all at once
DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
char * res = (char *)VirtualAlloc(NULL, bytes, flag, PAGE_READWRITE);
if (res != NULL && exec) {
DWORD oldprot;
// Windows doc says to use VirtualProtect to get execute permissions
VirtualProtect(res, bytes, PAGE_EXECUTE_READWRITE, &oldprot);
}
return res;
}
}
bool os::release_memory_special(char* base, size_t bytes) {
return release_memory(base, bytes);
}
void os::print_statistics() {
}
bool os::commit_memory(char* addr, size_t bytes, bool exec) {
if (bytes == 0) {
// Don't bother the OS with noops.
return true;
}
assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
// Don't attempt to print anything if the OS call fails. We're
// probably low on resources, so the print itself may cause crashes.
bool result = VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) != 0;
if (result != NULL && exec) {
DWORD oldprot;
// Windows doc says to use VirtualProtect to get execute permissions
return VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot) != 0;
} else {
return result;
}
}
bool os::commit_memory(char* addr, size_t size, size_t alignment_hint,
bool exec) {
return commit_memory(addr, size, exec);
}
bool os::uncommit_memory(char* addr, size_t bytes) {
if (bytes == 0) {
// Don't bother the OS with noops.
return true;
}
assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
return VirtualFree(addr, bytes, MEM_DECOMMIT) != 0;
}
bool os::release_memory(char* addr, size_t bytes) {
return VirtualFree(addr, 0, MEM_RELEASE) != 0;
}
// Set protections specified
bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
bool is_committed) {
unsigned int p = 0;
switch (prot) {
case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
case MEM_PROT_READ: p = PAGE_READONLY; break;
case MEM_PROT_RW: p = PAGE_READWRITE; break;
case MEM_PROT_RWX: p = PAGE_EXECUTE_READWRITE; break;
default:
ShouldNotReachHere();
}
DWORD old_status;
// Strange enough, but on Win32 one can change protection only for committed
// memory, not a big deal anyway, as bytes less or equal than 64K
if (!is_committed && !commit_memory(addr, bytes, prot == MEM_PROT_RWX)) {
fatal("cannot commit protection page");
}
// One cannot use os::guard_memory() here, as on Win32 guard page
// have different (one-shot) semantics, from MSDN on PAGE_GUARD:
//
// Pages in the region become guard pages. Any attempt to access a guard page
// causes the system to raise a STATUS_GUARD_PAGE exception and turn off
// the guard page status. Guard pages thus act as a one-time access alarm.
return VirtualProtect(addr, bytes, p, &old_status) != 0;
}
bool os::guard_memory(char* addr, size_t bytes) {
DWORD old_status;
return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
}
bool os::unguard_memory(char* addr, size_t bytes) {
DWORD old_status;
return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
}
void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
void os::free_memory(char *addr, size_t bytes) { }
void os::numa_make_global(char *addr, size_t bytes) { }
void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { }
bool os::numa_topology_changed() { return false; }
size_t os::numa_get_groups_num() { return 1; }
int os::numa_get_group_id() { return 0; }
size_t os::numa_get_leaf_groups(int *ids, size_t size) {
if (size > 0) {
ids[0] = 0;
return 1;
}
return 0;
}
bool os::get_page_info(char *start, page_info* info) {
return false;
}
char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
return end;
}
char* os::non_memory_address_word() {
// Must never look like an address returned by reserve_memory,
// even in its subfields (as defined by the CPU immediate fields,
// if the CPU splits constants across multiple instructions).
return (char*)-1;
}
#define MAX_ERROR_COUNT 100
#define SYS_THREAD_ERROR 0xffffffffUL
void os::pd_start_thread(Thread* thread) {
DWORD ret = ResumeThread(thread->osthread()->thread_handle());
// Returns previous suspend state:
// 0: Thread was not suspended
// 1: Thread is running now
// >1: Thread is still suspended.
assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
}
size_t os::read(int fd, void *buf, unsigned int nBytes) {
return ::read(fd, buf, nBytes);
}
class HighResolutionInterval {
// The default timer resolution seems to be 10 milliseconds.
// (Where is this written down?)
// If someone wants to sleep for only a fraction of the default,
// then we set the timer resolution down to 1 millisecond for
// the duration of their interval.
// We carefully set the resolution back, since otherwise we
// seem to incur an overhead (3%?) that we don't need.
// CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
// Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
// Alternatively, we could compute the relative error (503/500 = .6%) and only use
// timeBeginPeriod() if the relative error exceeded some threshold.
// timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
// to decreased efficiency related to increased timer "tick" rates. We want to minimize
// (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
// resolution timers running.
private:
jlong resolution;
public:
HighResolutionInterval(jlong ms) {
resolution = ms % 10L;
if (resolution != 0) {
MMRESULT result = timeBeginPeriod(1L);
}
}
~HighResolutionInterval() {
if (resolution != 0) {
MMRESULT result = timeEndPeriod(1L);
}
resolution = 0L;
}
};
int os::sleep(Thread* thread, jlong ms, bool interruptable) {
jlong limit = (jlong) MAXDWORD;
while(ms > limit) {
int res;
if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT)
return res;
ms -= limit;
}
assert(thread == Thread::current(), "thread consistency check");
OSThread* osthread = thread->osthread();
OSThreadWaitState osts(osthread, false /* not Object.wait() */);
int result;
if (interruptable) {
assert(thread->is_Java_thread(), "must be java thread");
JavaThread *jt = (JavaThread *) thread;
ThreadBlockInVM tbivm(jt);
jt->set_suspend_equivalent();
// cleared by handle_special_suspend_equivalent_condition() or
// java_suspend_self() via check_and_wait_while_suspended()
HANDLE events[1];
events[0] = osthread->interrupt_event();
HighResolutionInterval *phri=NULL;
if(!ForceTimeHighResolution)
phri = new HighResolutionInterval( ms );
if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) {
result = OS_TIMEOUT;
} else {
ResetEvent(osthread->interrupt_event());
osthread->set_interrupted(false);
result = OS_INTRPT;
}
delete phri; //if it is NULL, harmless
// were we externally suspended while we were waiting?
jt->check_and_wait_while_suspended();
} else {
assert(!thread->is_Java_thread(), "must not be java thread");
Sleep((long) ms);
result = OS_TIMEOUT;
}
return result;
}
// Sleep forever; naked call to OS-specific sleep; use with CAUTION
void os::infinite_sleep() {
while (true) { // sleep forever ...
Sleep(100000); // ... 100 seconds at a time
}
}
typedef BOOL (WINAPI * STTSignature)(void) ;
os::YieldResult os::NakedYield() {
// Use either SwitchToThread() or Sleep(0)
// Consider passing back the return value from SwitchToThread().
// We use GetProcAddress() as ancient Win9X versions of windows doen't support SwitchToThread.
// In that case we revert to Sleep(0).
static volatile STTSignature stt = (STTSignature) 1 ;
if (stt == ((STTSignature) 1)) {
stt = (STTSignature) ::GetProcAddress (LoadLibrary ("Kernel32.dll"), "SwitchToThread") ;
// It's OK if threads race during initialization as the operation above is idempotent.
}
if (stt != NULL) {
return (*stt)() ? os::YIELD_SWITCHED : os::YIELD_NONEREADY ;
} else {
Sleep (0) ;
}
return os::YIELD_UNKNOWN ;
}
void os::yield() { os::NakedYield(); }
void os::yield_all(int attempts) {
// Yields to all threads, including threads with lower priorities
Sleep(1);
}
// Win32 only gives you access to seven real priorities at a time,
// so we compress Java's ten down to seven. It would be better
// if we dynamically adjusted relative priorities.
int os::java_to_os_priority[MaxPriority + 1] = {
THREAD_PRIORITY_IDLE, // 0 Entry should never be used
THREAD_PRIORITY_LOWEST, // 1 MinPriority
THREAD_PRIORITY_LOWEST, // 2
THREAD_PRIORITY_BELOW_NORMAL, // 3
THREAD_PRIORITY_BELOW_NORMAL, // 4
THREAD_PRIORITY_NORMAL, // 5 NormPriority
THREAD_PRIORITY_NORMAL, // 6
THREAD_PRIORITY_ABOVE_NORMAL, // 7
THREAD_PRIORITY_ABOVE_NORMAL, // 8
THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority
THREAD_PRIORITY_HIGHEST // 10 MaxPriority
};
int prio_policy1[MaxPriority + 1] = {
THREAD_PRIORITY_IDLE, // 0 Entry should never be used
THREAD_PRIORITY_LOWEST, // 1 MinPriority
THREAD_PRIORITY_LOWEST, // 2
THREAD_PRIORITY_BELOW_NORMAL, // 3
THREAD_PRIORITY_BELOW_NORMAL, // 4
THREAD_PRIORITY_NORMAL, // 5 NormPriority
THREAD_PRIORITY_ABOVE_NORMAL, // 6
THREAD_PRIORITY_ABOVE_NORMAL, // 7
THREAD_PRIORITY_HIGHEST, // 8
THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority
THREAD_PRIORITY_TIME_CRITICAL // 10 MaxPriority
};
static int prio_init() {
// If ThreadPriorityPolicy is 1, switch tables
if (ThreadPriorityPolicy == 1) {
int i;
for (i = 0; i < MaxPriority + 1; i++) {
os::java_to_os_priority[i] = prio_policy1[i];
}
}
return 0;
}
OSReturn os::set_native_priority(Thread* thread, int priority) {
if (!UseThreadPriorities) return OS_OK;
bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
return ret ? OS_OK : OS_ERR;
}
OSReturn os::get_native_priority(const Thread* const thread, int* priority_ptr) {
if ( !UseThreadPriorities ) {
*priority_ptr = java_to_os_priority[NormPriority];
return OS_OK;
}
int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
assert(false, "GetThreadPriority failed");
return OS_ERR;
}
*priority_ptr = os_prio;
return OS_OK;
}
// Hint to the underlying OS that a task switch would not be good.
// Void return because it's a hint and can fail.
void os::hint_no_preempt() {}
void os::interrupt(Thread* thread) {
assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
"possibility of dangling Thread pointer");
OSThread* osthread = thread->osthread();
osthread->set_interrupted(true);
// More than one thread can get here with the same value of osthread,
// resulting in multiple notifications. We do, however, want the store
// to interrupted() to be visible to other threads before we post
// the interrupt event.
OrderAccess::release();
SetEvent(osthread->interrupt_event());
// For JSR166: unpark after setting status
if (thread->is_Java_thread())
((JavaThread*)thread)->parker()->unpark();
ParkEvent * ev = thread->_ParkEvent ;
if (ev != NULL) ev->unpark() ;
}
bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
"possibility of dangling Thread pointer");
OSThread* osthread = thread->osthread();
bool interrupted;
interrupted = osthread->interrupted();
if (clear_interrupted == true) {
osthread->set_interrupted(false);
ResetEvent(osthread->interrupt_event());
} // Otherwise leave the interrupted state alone
return interrupted;
}
// Get's a pc (hint) for a running thread. Currently used only for profiling.
ExtendedPC os::get_thread_pc(Thread* thread) {
CONTEXT context;
context.ContextFlags = CONTEXT_CONTROL;
HANDLE handle = thread->osthread()->thread_handle();
#ifdef _M_IA64
assert(0, "Fix get_thread_pc");
return ExtendedPC(NULL);
#else
if (GetThreadContext(handle, &context)) {
#ifdef _M_AMD64
return ExtendedPC((address) context.Rip);
#else
return ExtendedPC((address) context.Eip);
#endif
} else {
return ExtendedPC(NULL);
}
#endif
}
// GetCurrentThreadId() returns DWORD
intx os::current_thread_id() { return GetCurrentThreadId(); }
static int _initial_pid = 0;
int os::current_process_id()
{
return (_initial_pid ? _initial_pid : _getpid());
}
int os::win32::_vm_page_size = 0;
int os::win32::_vm_allocation_granularity = 0;
int os::win32::_processor_type = 0;
// Processor level is not available on non-NT systems, use vm_version instead
int os::win32::_processor_level = 0;
julong os::win32::_physical_memory = 0;
size_t os::win32::_default_stack_size = 0;
intx os::win32::_os_thread_limit = 0;
volatile intx os::win32::_os_thread_count = 0;
bool os::win32::_is_nt = false;
bool os::win32::_is_windows_2003 = false;
void os::win32::initialize_system_info() {
SYSTEM_INFO si;
GetSystemInfo(&si);
_vm_page_size = si.dwPageSize;
_vm_allocation_granularity = si.dwAllocationGranularity;
_processor_type = si.dwProcessorType;
_processor_level = si.wProcessorLevel;
_processor_count = si.dwNumberOfProcessors;
MEMORYSTATUS ms;
// also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
// dwMemoryLoad (% of memory in use)
GlobalMemoryStatus(&ms);
_physical_memory = ms.dwTotalPhys;
OSVERSIONINFO oi;
oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
GetVersionEx(&oi);
switch(oi.dwPlatformId) {
case VER_PLATFORM_WIN32_WINDOWS: _is_nt = false; break;
case VER_PLATFORM_WIN32_NT:
_is_nt = true;
{
int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
if (os_vers == 5002) {
_is_windows_2003 = true;
}
}
break;
default: fatal("Unknown platform");
}
_default_stack_size = os::current_stack_size();
assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
assert((_default_stack_size & (_vm_page_size - 1)) == 0,
"stack size not a multiple of page size");
initialize_performance_counter();
// Win95/Win98 scheduler bug work-around. The Win95/98 scheduler is
// known to deadlock the system, if the VM issues to thread operations with
// a too high frequency, e.g., such as changing the priorities.
// The 6000 seems to work well - no deadlocks has been notices on the test
// programs that we have seen experience this problem.
if (!os::win32::is_nt()) {
StarvationMonitorInterval = 6000;
}
}
void os::win32::setmode_streams() {
_setmode(_fileno(stdin), _O_BINARY);
_setmode(_fileno(stdout), _O_BINARY);
_setmode(_fileno(stderr), _O_BINARY);
}
int os::message_box(const char* title, const char* message) {
int result = MessageBox(NULL, message, title,
MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
return result == IDYES;
}
int os::allocate_thread_local_storage() {
return TlsAlloc();
}
void os::free_thread_local_storage(int index) {
TlsFree(index);
}
void os::thread_local_storage_at_put(int index, void* value) {
TlsSetValue(index, value);
assert(thread_local_storage_at(index) == value, "Just checking");
}
void* os::thread_local_storage_at(int index) {
return TlsGetValue(index);
}
#ifndef PRODUCT
#ifndef _WIN64
// Helpers to check whether NX protection is enabled
int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
pex->ExceptionRecord->NumberParameters > 0 &&
pex->ExceptionRecord->ExceptionInformation[0] ==
EXCEPTION_INFO_EXEC_VIOLATION) {
return EXCEPTION_EXECUTE_HANDLER;
}
return EXCEPTION_CONTINUE_SEARCH;
}
void nx_check_protection() {
// If NX is enabled we'll get an exception calling into code on the stack
char code[] = { (char)0xC3 }; // ret
void *code_ptr = (void *)code;
__try {
__asm call code_ptr
} __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
tty->print_raw_cr("NX protection detected.");
}
}
#endif // _WIN64
#endif // PRODUCT
// this is called _before_ the global arguments have been parsed
void os::init(void) {
_initial_pid = _getpid();
init_random(1234567);
win32::initialize_system_info();
win32::setmode_streams();
init_page_sizes((size_t) win32::vm_page_size());
// For better scalability on MP systems (must be called after initialize_system_info)
#ifndef PRODUCT
if (is_MP()) {
NoYieldsInMicrolock = true;
}
#endif
// This may be overridden later when argument processing is done.
FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation,
os::win32::is_windows_2003());
// Initialize main_process and main_thread
main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle
if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
&main_thread, THREAD_ALL_ACCESS, false, 0)) {
fatal("DuplicateHandle failed\n");
}
main_thread_id = (int) GetCurrentThreadId();
}
// To install functions for atexit processing
extern "C" {
static void perfMemory_exit_helper() {
perfMemory_exit();
}
}
// this is called _after_ the global arguments have been parsed
jint os::init_2(void) {
// Allocate a single page and mark it as readable for safepoint polling
address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY);
guarantee( polling_page != NULL, "Reserve Failed for polling page");
address return_page = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY);
guarantee( return_page != NULL, "Commit Failed for polling page");
os::set_polling_page( polling_page );
#ifndef PRODUCT
if( Verbose && PrintMiscellaneous )
tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
#endif
if (!UseMembar) {
address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE);
guarantee( mem_serialize_page != NULL, "Reserve Failed for memory serialize page");
return_page = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE);
guarantee( return_page != NULL, "Commit Failed for memory serialize page");
os::set_memory_serialize_page( mem_serialize_page );
#ifndef PRODUCT
if(Verbose && PrintMiscellaneous)
tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
#endif
}
FLAG_SET_DEFAULT(UseLargePages, os::large_page_init());
// Setup Windows Exceptions
// On Itanium systems, Structured Exception Handling does not
// work since stack frames must be walkable by the OS. Since
// much of our code is dynamically generated, and we do not have
// proper unwind .xdata sections, the system simply exits
// rather than delivering the exception. To work around
// this we use VectorExceptions instead.
#ifdef _WIN64
if (UseVectoredExceptions) {
topLevelVectoredExceptionHandler = AddVectoredExceptionHandler( 1, topLevelExceptionFilter);
}
#endif
// for debugging float code generation bugs
if (ForceFloatExceptions) {
#ifndef _WIN64
static long fp_control_word = 0;
__asm { fstcw fp_control_word }
// see Intel PPro Manual, Vol. 2, p 7-16
const long precision = 0x20;
const long underflow = 0x10;
const long overflow = 0x08;
const long zero_div = 0x04;
const long denorm = 0x02;
const long invalid = 0x01;
fp_control_word |= invalid;
__asm { fldcw fp_control_word }
#endif
}
// Initialize HPI.
jint hpi_result = hpi::initialize();
if (hpi_result != JNI_OK) { return hpi_result; }
// If stack_commit_size is 0, windows will reserve the default size,
// but only commit a small portion of it.
size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size());
size_t default_reserve_size = os::win32::default_stack_size();
size_t actual_reserve_size = stack_commit_size;
if (stack_commit_size < default_reserve_size) {
// If stack_commit_size == 0, we want this too
actual_reserve_size = default_reserve_size;
}
JavaThread::set_stack_size_at_create(stack_commit_size);
// Calculate theoretical max. size of Threads to guard gainst artifical
// out-of-memory situations, where all available address-space has been
// reserved by thread stacks.
assert(actual_reserve_size != 0, "Must have a stack");
// Calculate the thread limit when we should start doing Virtual Memory
// banging. Currently when the threads will have used all but 200Mb of space.
//
// TODO: consider performing a similar calculation for commit size instead
// as reserve size, since on a 64-bit platform we'll run into that more
// often than running out of virtual memory space. We can use the
// lower value of the two calculations as the os_thread_limit.
size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
// at exit methods are called in the reverse order of their registration.
// there is no limit to the number of functions registered. atexit does
// not set errno.
if (PerfAllowAtExitRegistration) {
// only register atexit functions if PerfAllowAtExitRegistration is set.
// atexit functions can be delayed until process exit time, which
// can be problematic for embedded VM situations. Embedded VMs should
// call DestroyJavaVM() to assure that VM resources are released.
// note: perfMemory_exit_helper atexit function may be removed in
// the future if the appropriate cleanup code can be added to the
// VM_Exit VMOperation's doit method.
if (atexit(perfMemory_exit_helper) != 0) {
warning("os::init_2 atexit(perfMemory_exit_helper) failed");
}
}
// initialize PSAPI or ToolHelp for fatal error handler
if (win32::is_nt()) _init_psapi();
else _init_toolhelp();
#ifndef _WIN64
// Print something if NX is enabled (win32 on AMD64)
NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
#endif
// initialize thread priority policy
prio_init();
if (UseNUMA && !ForceNUMA) {
UseNUMA = false; // Currently unsupported.
}
return JNI_OK;
}
// Mark the polling page as unreadable
void os::make_polling_page_unreadable(void) {
DWORD old_status;
if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_NOACCESS, &old_status) )
fatal("Could not disable polling page");
};
// Mark the polling page as readable
void os::make_polling_page_readable(void) {
DWORD old_status;
if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_READONLY, &old_status) )
fatal("Could not enable polling page");
};
int os::stat(const char *path, struct stat *sbuf) {
char pathbuf[MAX_PATH];
if (strlen(path) > MAX_PATH - 1) {
errno = ENAMETOOLONG;
return -1;
}
hpi::native_path(strcpy(pathbuf, path));
int ret = ::stat(pathbuf, sbuf);
if (sbuf != NULL && UseUTCFileTimestamp) {
// Fix for 6539723. st_mtime returned from stat() is dependent on
// the system timezone and so can return different values for the
// same file if/when daylight savings time changes. This adjustment
// makes sure the same timestamp is returned regardless of the TZ.
//
// See:
// http://msdn.microsoft.com/library/
// default.asp?url=/library/en-us/sysinfo/base/
// time_zone_information_str.asp
// and
// http://msdn.microsoft.com/library/default.asp?url=
// /library/en-us/sysinfo/base/settimezoneinformation.asp
//
// NOTE: there is a insidious bug here: If the timezone is changed
// after the call to stat() but before 'GetTimeZoneInformation()', then
// the adjustment we do here will be wrong and we'll return the wrong
// value (which will likely end up creating an invalid class data
// archive). Absent a better API for this, or some time zone locking
// mechanism, we'll have to live with this risk.
TIME_ZONE_INFORMATION tz;
DWORD tzid = GetTimeZoneInformation(&tz);
int daylightBias =
(tzid == TIME_ZONE_ID_DAYLIGHT) ? tz.DaylightBias : tz.StandardBias;
sbuf->st_mtime += (tz.Bias + daylightBias) * 60;
}
return ret;
}
#define FT2INT64(ft) \
((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
// are used by JVM M&M and JVMTI to get user+sys or user CPU time
// of a thread.
//
// current_thread_cpu_time() and thread_cpu_time(Thread*) returns
// the fast estimate available on the platform.
// current_thread_cpu_time() is not optimized for Windows yet
jlong os::current_thread_cpu_time() {
// return user + sys since the cost is the same
return os::thread_cpu_time(Thread::current(), true /* user+sys */);
}
jlong os::thread_cpu_time(Thread* thread) {
// consistent with what current_thread_cpu_time() returns.
return os::thread_cpu_time(thread, true /* user+sys */);
}
jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
}
jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
// This code is copy from clasic VM -> hpi::sysThreadCPUTime
// If this function changes, os::is_thread_cpu_time_supported() should too
if (os::win32::is_nt()) {
FILETIME CreationTime;
FILETIME ExitTime;
FILETIME KernelTime;
FILETIME UserTime;
if ( GetThreadTimes(thread->osthread()->thread_handle(),
&CreationTime, &ExitTime, &KernelTime, &UserTime) == 0)
return -1;
else
if (user_sys_cpu_time) {
return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
} else {
return FT2INT64(UserTime) * 100;
}
} else {
return (jlong) timeGetTime() * 1000000;
}
}
void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits
info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time
info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time
info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned
}
void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits
info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time
info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time
info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned
}
bool os::is_thread_cpu_time_supported() {
// see os::thread_cpu_time
if (os::win32::is_nt()) {
FILETIME CreationTime;
FILETIME ExitTime;
FILETIME KernelTime;
FILETIME UserTime;
if ( GetThreadTimes(GetCurrentThread(),
&CreationTime, &ExitTime, &KernelTime, &UserTime) == 0)
return false;
else
return true;
} else {
return false;
}
}
// Windows does't provide a loadavg primitive so this is stubbed out for now.
// It does have primitives (PDH API) to get CPU usage and run queue length.
// "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
// If we wanted to implement loadavg on Windows, we have a few options:
//
// a) Query CPU usage and run queue length and "fake" an answer by
// returning the CPU usage if it's under 100%, and the run queue
// length otherwise. It turns out that querying is pretty slow
// on Windows, on the order of 200 microseconds on a fast machine.
// Note that on the Windows the CPU usage value is the % usage
// since the last time the API was called (and the first call
// returns 100%), so we'd have to deal with that as well.
//
// b) Sample the "fake" answer using a sampling thread and store
// the answer in a global variable. The call to loadavg would
// just return the value of the global, avoiding the slow query.
//
// c) Sample a better answer using exponential decay to smooth the
// value. This is basically the algorithm used by UNIX kernels.
//
// Note that sampling thread starvation could affect both (b) and (c).
int os::loadavg(double loadavg[], int nelem) {
return -1;
}
// DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
bool os::dont_yield() {
return DontYieldALot;
}
// Is a (classpath) directory empty?
bool os::dir_is_empty(const char* path) {
WIN32_FIND_DATA fd;
HANDLE f = FindFirstFile(path, &fd);
if (f == INVALID_HANDLE_VALUE) {
return true;
}
FindClose(f);
return false;
}
// create binary file, rewriting existing file if required
int os::create_binary_file(const char* path, bool rewrite_existing) {
int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
if (!rewrite_existing) {
oflags |= _O_EXCL;
}
return ::open(path, oflags, _S_IREAD | _S_IWRITE);
}
// return current position of file pointer
jlong os::current_file_offset(int fd) {
return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
}
// move file pointer to the specified offset
jlong os::seek_to_file_offset(int fd, jlong offset) {
return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
}
// Map a block of memory.
char* os::map_memory(int fd, const char* file_name, size_t file_offset,
char *addr, size_t bytes, bool read_only,
bool allow_exec) {
HANDLE hFile;
char* base;
hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL,
OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
if (hFile == NULL) {
if (PrintMiscellaneous && Verbose) {
DWORD err = GetLastError();
tty->print_cr("CreateFile() failed: GetLastError->%ld.");
}
return NULL;
}
if (allow_exec) {
// CreateFileMapping/MapViewOfFileEx can't map executable memory
// unless it comes from a PE image (which the shared archive is not.)
// Even VirtualProtect refuses to give execute access to mapped memory
// that was not previously executable.
//
// Instead, stick the executable region in anonymous memory. Yuck.
// Penalty is that ~4 pages will not be shareable - in the future
// we might consider DLLizing the shared archive with a proper PE
// header so that mapping executable + sharing is possible.
base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
PAGE_READWRITE);
if (base == NULL) {
if (PrintMiscellaneous && Verbose) {
DWORD err = GetLastError();
tty->print_cr("VirtualAlloc() failed: GetLastError->%ld.", err);
}
CloseHandle(hFile);
return NULL;
}
DWORD bytes_read;
OVERLAPPED overlapped;
overlapped.Offset = (DWORD)file_offset;
overlapped.OffsetHigh = 0;
overlapped.hEvent = NULL;
// ReadFile guarantees that if the return value is true, the requested
// number of bytes were read before returning.
bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
if (!res) {
if (PrintMiscellaneous && Verbose) {
DWORD err = GetLastError();
tty->print_cr("ReadFile() failed: GetLastError->%ld.", err);
}
release_memory(base, bytes);
CloseHandle(hFile);
return NULL;
}
} else {
HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
NULL /*file_name*/);
if (hMap == NULL) {
if (PrintMiscellaneous && Verbose) {
DWORD err = GetLastError();
tty->print_cr("CreateFileMapping() failed: GetLastError->%ld.");
}
CloseHandle(hFile);
return NULL;
}
DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
(DWORD)bytes, addr);
if (base == NULL) {
if (PrintMiscellaneous && Verbose) {
DWORD err = GetLastError();
tty->print_cr("MapViewOfFileEx() failed: GetLastError->%ld.", err);
}
CloseHandle(hMap);
CloseHandle(hFile);
return NULL;
}
if (CloseHandle(hMap) == 0) {
if (PrintMiscellaneous && Verbose) {
DWORD err = GetLastError();
tty->print_cr("CloseHandle(hMap) failed: GetLastError->%ld.", err);
}
CloseHandle(hFile);
return base;
}
}
if (allow_exec) {
DWORD old_protect;
DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
if (!res) {
if (PrintMiscellaneous && Verbose) {
DWORD err = GetLastError();
tty->print_cr("VirtualProtect() failed: GetLastError->%ld.", err);
}
// Don't consider this a hard error, on IA32 even if the
// VirtualProtect fails, we should still be able to execute
CloseHandle(hFile);
return base;
}
}
if (CloseHandle(hFile) == 0) {
if (PrintMiscellaneous && Verbose) {
DWORD err = GetLastError();
tty->print_cr("CloseHandle(hFile) failed: GetLastError->%ld.", err);
}
return base;
}
return base;
}
// Remap a block of memory.
char* os::remap_memory(int fd, const char* file_name, size_t file_offset,
char *addr, size_t bytes, bool read_only,
bool allow_exec) {
// This OS does not allow existing memory maps to be remapped so we
// have to unmap the memory before we remap it.
if (!os::unmap_memory(addr, bytes)) {
return NULL;
}
// There is a very small theoretical window between the unmap_memory()
// call above and the map_memory() call below where a thread in native
// code may be able to access an address that is no longer mapped.
return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
allow_exec);
}
// Unmap a block of memory.
// Returns true=success, otherwise false.
bool os::unmap_memory(char* addr, size_t bytes) {
BOOL result = UnmapViewOfFile(addr);
if (result == 0) {
if (PrintMiscellaneous && Verbose) {
DWORD err = GetLastError();
tty->print_cr("UnmapViewOfFile() failed: GetLastError->%ld.", err);
}
return false;
}
return true;
}
void os::pause() {
char filename[MAX_PATH];
if (PauseAtStartupFile && PauseAtStartupFile[0]) {
jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
} else {
jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
}
int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
if (fd != -1) {
struct stat buf;
close(fd);
while (::stat(filename, &buf) == 0) {
Sleep(100);
}
} else {
jio_fprintf(stderr,
"Could not open pause file '%s', continuing immediately.\n", filename);
}
}
// An Event wraps a win32 "CreateEvent" kernel handle.
//
// We have a number of choices regarding "CreateEvent" win32 handle leakage:
//
// 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle
// field, and call CloseHandle() on the win32 event handle. Unpark() would
// need to be modified to tolerate finding a NULL (invalid) win32 event handle.
// In addition, an unpark() operation might fetch the handle field, but the
// event could recycle between the fetch and the SetEvent() operation.
// SetEvent() would either fail because the handle was invalid, or inadvertently work,
// as the win32 handle value had been recycled. In an ideal world calling SetEvent()
// on an stale but recycled handle would be harmless, but in practice this might
// confuse other non-Sun code, so it's not a viable approach.
//
// 2: Once a win32 event handle is associated with an Event, it remains associated
// with the Event. The event handle is never closed. This could be construed
// as handle leakage, but only up to the maximum # of threads that have been extant
// at any one time. This shouldn't be an issue, as windows platforms typically
// permit a process to have hundreds of thousands of open handles.
//
// 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
// and release unused handles.
//
// 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
// It's not clear, however, that we wouldn't be trading one type of leak for another.
//
// 5. Use an RCU-like mechanism (Read-Copy Update).
// Or perhaps something similar to Maged Michael's "Hazard pointers".
//
// We use (2).
//
// TODO-FIXME:
// 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
// 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
// to recover from (or at least detect) the dreaded Windows 841176 bug.
// 3. Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent
// into a single win32 CreateEvent() handle.
//
// _Event transitions in park()
// -1 => -1 : illegal
// 1 => 0 : pass - return immediately
// 0 => -1 : block
//
// _Event serves as a restricted-range semaphore :
// -1 : thread is blocked
// 0 : neutral - thread is running or ready
// 1 : signaled - thread is running or ready
//
// Another possible encoding of _Event would be
// with explicit "PARKED" and "SIGNALED" bits.
int os::PlatformEvent::park (jlong Millis) {
guarantee (_ParkHandle != NULL , "Invariant") ;
guarantee (Millis > 0 , "Invariant") ;
int v ;
// CONSIDER: defer assigning a CreateEvent() handle to the Event until
// the initial park() operation.
for (;;) {
v = _Event ;
if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
}
guarantee ((v == 0) || (v == 1), "invariant") ;
if (v != 0) return OS_OK ;
// Do this the hard way by blocking ...
// TODO: consider a brief spin here, gated on the success of recent
// spin attempts by this thread.
//
// We decompose long timeouts into series of shorter timed waits.
// Evidently large timo values passed in WaitForSingleObject() are problematic on some
// versions of Windows. See EventWait() for details. This may be superstition. Or not.
// We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
// with os::javaTimeNanos(). Furthermore, we assume that spurious returns from
// ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
// to happen early in the wait interval. Specifically, after a spurious wakeup (rv ==
// WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
// for the already waited time. This policy does not admit any new outcomes.
// In the future, however, we might want to track the accumulated wait time and
// adjust Millis accordingly if we encounter a spurious wakeup.
const int MAXTIMEOUT = 0x10000000 ;
DWORD rv = WAIT_TIMEOUT ;
while (_Event < 0 && Millis > 0) {
DWORD prd = Millis ; // set prd = MAX (Millis, MAXTIMEOUT)
if (Millis > MAXTIMEOUT) {
prd = MAXTIMEOUT ;
}
rv = ::WaitForSingleObject (_ParkHandle, prd) ;
assert (rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed") ;
if (rv == WAIT_TIMEOUT) {
Millis -= prd ;
}
}
v = _Event ;
_Event = 0 ;
OrderAccess::fence() ;
// If we encounter a nearly simultanous timeout expiry and unpark()
// we return OS_OK indicating we awoke via unpark().
// Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
return (v >= 0) ? OS_OK : OS_TIMEOUT ;
}
void os::PlatformEvent::park () {
guarantee (_ParkHandle != NULL, "Invariant") ;
// Invariant: Only the thread associated with the Event/PlatformEvent
// may call park().
int v ;
for (;;) {
v = _Event ;
if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
}
guarantee ((v == 0) || (v == 1), "invariant") ;
if (v != 0) return ;
// Do this the hard way by blocking ...
// TODO: consider a brief spin here, gated on the success of recent
// spin attempts by this thread.
while (_Event < 0) {
DWORD rv = ::WaitForSingleObject (_ParkHandle, INFINITE) ;
assert (rv == WAIT_OBJECT_0, "WaitForSingleObject failed") ;
}
// Usually we'll find _Event == 0 at this point, but as
// an optional optimization we clear it, just in case can
// multiple unpark() operations drove _Event up to 1.
_Event = 0 ;
OrderAccess::fence() ;
guarantee (_Event >= 0, "invariant") ;
}
void os::PlatformEvent::unpark() {
guarantee (_ParkHandle != NULL, "Invariant") ;
int v ;
for (;;) {
v = _Event ; // Increment _Event if it's < 1.
if (v > 0) {
// If it's already signaled just return.
// The LD of _Event could have reordered or be satisfied
// by a read-aside from this processor's write buffer.
// To avoid problems execute a barrier and then
// ratify the value. A degenerate CAS() would also work.
// Viz., CAS (v+0, &_Event, v) == v).
OrderAccess::fence() ;
if (_Event == v) return ;
continue ;
}
if (Atomic::cmpxchg (v+1, &_Event, v) == v) break ;
}
if (v < 0) {
::SetEvent (_ParkHandle) ;
}
}
// JSR166
// -------------------------------------------------------
/*
* The Windows implementation of Park is very straightforward: Basic
* operations on Win32 Events turn out to have the right semantics to
* use them directly. We opportunistically resuse the event inherited
* from Monitor.
*/
void Parker::park(bool isAbsolute, jlong time) {
guarantee (_ParkEvent != NULL, "invariant") ;
// First, demultiplex/decode time arguments
if (time < 0) { // don't wait
return;
}
else if (time == 0) {
time = INFINITE;
}
else if (isAbsolute) {
time -= os::javaTimeMillis(); // convert to relative time
if (time <= 0) // already elapsed
return;
}
else { // relative
time /= 1000000; // Must coarsen from nanos to millis
if (time == 0) // Wait for the minimal time unit if zero
time = 1;
}
JavaThread* thread = (JavaThread*)(Thread::current());
assert(thread->is_Java_thread(), "Must be JavaThread");
JavaThread *jt = (JavaThread *)thread;
// Don't wait if interrupted or already triggered
if (Thread::is_interrupted(thread, false) ||
WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
ResetEvent(_ParkEvent);
return;
}
else {
ThreadBlockInVM tbivm(jt);
OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
jt->set_suspend_equivalent();
WaitForSingleObject(_ParkEvent, time);
ResetEvent(_ParkEvent);
// If externally suspended while waiting, re-suspend
if (jt->handle_special_suspend_equivalent_condition()) {
jt->java_suspend_self();
}
}
}
void Parker::unpark() {
guarantee (_ParkEvent != NULL, "invariant") ;
SetEvent(_ParkEvent);
}
// Run the specified command in a separate process. Return its exit value,
// or -1 on failure (e.g. can't create a new process).
int os::fork_and_exec(char* cmd) {
STARTUPINFO si;
PROCESS_INFORMATION pi;
memset(&si, 0, sizeof(si));
si.cb = sizeof(si);
memset(&pi, 0, sizeof(pi));
BOOL rslt = CreateProcess(NULL, // executable name - use command line
cmd, // command line
NULL, // process security attribute
NULL, // thread security attribute
TRUE, // inherits system handles
0, // no creation flags
NULL, // use parent's environment block
NULL, // use parent's starting directory
&si, // (in) startup information
&pi); // (out) process information
if (rslt) {
// Wait until child process exits.
WaitForSingleObject(pi.hProcess, INFINITE);
DWORD exit_code;
GetExitCodeProcess(pi.hProcess, &exit_code);
// Close process and thread handles.
CloseHandle(pi.hProcess);
CloseHandle(pi.hThread);
return (int)exit_code;
} else {
return -1;
}
}
//--------------------------------------------------------------------------------------------------
// Non-product code
static int mallocDebugIntervalCounter = 0;
static int mallocDebugCounter = 0;
bool os::check_heap(bool force) {
if (++mallocDebugCounter < MallocVerifyStart && !force) return true;
if (++mallocDebugIntervalCounter >= MallocVerifyInterval || force) {
// Note: HeapValidate executes two hardware breakpoints when it finds something
// wrong; at these points, eax contains the address of the offending block (I think).
// To get to the exlicit error message(s) below, just continue twice.
HANDLE heap = GetProcessHeap();
{ HeapLock(heap);
PROCESS_HEAP_ENTRY phe;
phe.lpData = NULL;
while (HeapWalk(heap, &phe) != 0) {
if ((phe.wFlags & PROCESS_HEAP_ENTRY_BUSY) &&
!HeapValidate(heap, 0, phe.lpData)) {
tty->print_cr("C heap has been corrupted (time: %d allocations)", mallocDebugCounter);
tty->print_cr("corrupted block near address %#x, length %d", phe.lpData, phe.cbData);
fatal("corrupted C heap");
}
}
int err = GetLastError();
if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED) {
fatal1("heap walk aborted with error %d", err);
}
HeapUnlock(heap);
}
mallocDebugIntervalCounter = 0;
}
return true;
}
#ifndef PRODUCT
bool os::find(address addr) {
// Nothing yet
return false;
}
#endif
LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) {
DWORD exception_code = e->ExceptionRecord->ExceptionCode;
if ( exception_code == EXCEPTION_ACCESS_VIOLATION ) {
JavaThread* thread = (JavaThread*)ThreadLocalStorage::get_thread_slow();
PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord;
address addr = (address) exceptionRecord->ExceptionInformation[1];
if (os::is_memory_serialize_page(thread, addr))
return EXCEPTION_CONTINUE_EXECUTION;
}
return EXCEPTION_CONTINUE_SEARCH;
}
static int getLastErrorString(char *buf, size_t len)
{
long errval;
if ((errval = GetLastError()) != 0)
{
/* DOS error */
size_t n = (size_t)FormatMessage(
FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
NULL,
errval,
0,
buf,
(DWORD)len,
NULL);
if (n > 3) {
/* Drop final '.', CR, LF */
if (buf[n - 1] == '\n') n--;
if (buf[n - 1] == '\r') n--;
if (buf[n - 1] == '.') n--;
buf[n] = '\0';
}
return (int)n;
}
if (errno != 0)
{
/* C runtime error that has no corresponding DOS error code */
const char *s = strerror(errno);
size_t n = strlen(s);
if (n >= len) n = len - 1;
strncpy(buf, s, n);
buf[n] = '\0';
return (int)n;
}
return 0;
}