| |
| /*--------------------------------------------------------------------*/ |
| /*--- Darwin-specific syscalls, etc. syswrap-darwin.c ---*/ |
| /*--------------------------------------------------------------------*/ |
| |
| /* |
| This file is part of Valgrind, a dynamic binary instrumentation |
| framework. |
| |
| Copyright (C) 2005-2013 Apple Inc. |
| Greg Parker gparker@apple.com |
| |
| This program is free software; you can redistribute it and/or |
| modify it under the terms of the GNU General Public License as |
| published by the Free Software Foundation; either version 2 of the |
| License, or (at your option) any later version. |
| |
| This program is distributed in the hope that it will be useful, but |
| WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| General Public License for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with this program; if not, write to the Free Software |
| Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA |
| 02111-1307, USA. |
| |
| The GNU General Public License is contained in the file COPYING. |
| */ |
| |
| #if defined(VGO_darwin) |
| |
| #include "pub_core_basics.h" |
| #include "pub_core_vki.h" |
| #include "pub_core_vkiscnums.h" |
| #include "pub_core_libcsetjmp.h" // to keep _threadstate.h happy |
| #include "pub_core_threadstate.h" |
| #include "pub_core_aspacemgr.h" |
| #include "pub_core_xarray.h" |
| #include "pub_core_clientstate.h" |
| #include "pub_core_debuglog.h" |
| #include "pub_core_debuginfo.h" // VG_(di_notify_*) |
| #include "pub_core_transtab.h" // VG_(discard_translations) |
| #include "pub_core_libcbase.h" |
| #include "pub_core_libcassert.h" |
| #include "pub_core_libcfile.h" |
| #include "pub_core_libcprint.h" |
| #include "pub_core_libcproc.h" |
| #include "pub_core_libcsignal.h" |
| #include "pub_core_machine.h" // VG_(get_SP) |
| #include "pub_core_mallocfree.h" |
| #include "pub_core_options.h" |
| #include "pub_core_oset.h" |
| #include "pub_core_scheduler.h" |
| #include "pub_core_sigframe.h" // For VG_(sigframe_destroy)() |
| #include "pub_core_signals.h" |
| #include "pub_core_syscall.h" |
| #include "pub_core_syswrap.h" |
| #include "pub_core_tooliface.h" |
| #include "pub_core_wordfm.h" |
| |
| #include "priv_types_n_macros.h" |
| #include "priv_syswrap-generic.h" /* for decls of generic wrappers */ |
| #include "priv_syswrap-darwin.h" /* for decls of darwin-ish wrappers */ |
| #include "priv_syswrap-main.h" |
| |
| /* --- !!! --- EXTERNAL HEADERS start --- !!! --- */ |
| #include <mach/mach.h> |
| #include <mach/mach_vm.h> |
| #include <semaphore.h> |
| /* --- !!! --- EXTERNAL HEADERS end --- !!! --- */ |
| |
| #define msgh_request_port msgh_remote_port |
| #define msgh_reply_port msgh_local_port |
| #define BOOTSTRAP_MAX_NAME_LEN 128 |
| typedef HChar name_t[BOOTSTRAP_MAX_NAME_LEN]; |
| |
| typedef uint64_t mig_addr_t; |
| |
| |
| // Saved ports |
| static mach_port_t vg_host_port = 0; |
| static mach_port_t vg_task_port = 0; |
| static mach_port_t vg_bootstrap_port = 0; |
| |
| // Run a thread from beginning to end and return the thread's |
| // scheduler-return-code. |
| static VgSchedReturnCode thread_wrapper(Word /*ThreadId*/ tidW) |
| { |
| VgSchedReturnCode ret; |
| ThreadId tid = (ThreadId)tidW; |
| ThreadState* tst = VG_(get_ThreadState)(tid); |
| |
| VG_(debugLog)(1, "syswrap-darwin", |
| "thread_wrapper(tid=%lld): entry\n", |
| (ULong)tidW); |
| |
| vg_assert(tst->status == VgTs_Init); |
| |
| /* make sure we get the CPU lock before doing anything significant */ |
| VG_(acquire_BigLock)(tid, "thread_wrapper"); |
| |
| if (0) |
| VG_(printf)("thread tid %d started: stack = %p\n", |
| tid, &tid); |
| |
| /* Make sure error reporting is enabled in the new thread. */ |
| tst->err_disablement_level = 0; |
| |
| VG_TRACK(pre_thread_first_insn, tid); |
| |
| tst->os_state.lwpid = VG_(gettid)(); |
| tst->os_state.threadgroup = VG_(getpid)(); |
| |
| /* Thread created with all signals blocked; scheduler will set the |
| appropriate mask */ |
| |
| ret = VG_(scheduler)(tid); |
| |
| vg_assert(VG_(is_exiting)(tid)); |
| |
| vg_assert(tst->status == VgTs_Runnable); |
| vg_assert(VG_(is_running_thread)(tid)); |
| |
| VG_(debugLog)(1, "syswrap-darwin", |
| "thread_wrapper(tid=%lld): done\n", |
| (ULong)tidW); |
| |
| /* Return to caller, still holding the lock. */ |
| return ret; |
| } |
| |
| |
| |
| /* Allocate a stack for this thread, if it doesn't already have one. |
| Returns the initial stack pointer value to use, or 0 if allocation |
| failed. */ |
| |
| Addr allocstack ( ThreadId tid ) |
| { |
| ThreadState* tst = VG_(get_ThreadState)(tid); |
| VgStack* stack; |
| Addr initial_SP; |
| |
| /* Either the stack_base and stack_init_SP are both zero (in which |
| case a stack hasn't been allocated) or they are both non-zero, |
| in which case it has. */ |
| |
| if (tst->os_state.valgrind_stack_base == 0) |
| vg_assert(tst->os_state.valgrind_stack_init_SP == 0); |
| |
| if (tst->os_state.valgrind_stack_base != 0) |
| vg_assert(tst->os_state.valgrind_stack_init_SP != 0); |
| |
| /* If no stack is present, allocate one. */ |
| |
| if (tst->os_state.valgrind_stack_base == 0) { |
| stack = VG_(am_alloc_VgStack)( &initial_SP ); |
| if (stack) { |
| tst->os_state.valgrind_stack_base = (Addr)stack; |
| tst->os_state.valgrind_stack_init_SP = initial_SP; |
| } |
| } |
| |
| VG_(debugLog)( 2, "syswrap-darwin", "stack for tid %d at %p; init_SP=%p\n", |
| tid, |
| (void*)tst->os_state.valgrind_stack_base, |
| (void*)tst->os_state.valgrind_stack_init_SP ); |
| |
| vg_assert(VG_IS_32_ALIGNED(tst->os_state.valgrind_stack_init_SP)); |
| |
| return tst->os_state.valgrind_stack_init_SP; |
| } |
| |
| |
| void find_stack_segment(ThreadId tid, Addr sp) |
| { |
| ML_(guess_and_register_stack) (sp, VG_(get_ThreadState)(tid)); |
| } |
| |
| |
| /* Run a thread all the way to the end, then do appropriate exit actions |
| (this is the last-one-out-turn-off-the-lights bit). |
| */ |
| static void run_a_thread_NORETURN ( Word tidW ) |
| { |
| Int c; |
| VgSchedReturnCode src; |
| ThreadId tid = (ThreadId)tidW; |
| ThreadState* tst; |
| |
| VG_(debugLog)(1, "syswrap-darwin", |
| "run_a_thread_NORETURN(tid=%lld): pre-thread_wrapper\n", |
| (ULong)tidW); |
| |
| tst = VG_(get_ThreadState)(tid); |
| vg_assert(tst); |
| |
| /* Run the thread all the way through. */ |
| src = thread_wrapper(tid); |
| |
| VG_(debugLog)(1, "syswrap-darwin", |
| "run_a_thread_NORETURN(tid=%lld): post-thread_wrapper\n", |
| (ULong)tidW); |
| |
| c = VG_(count_living_threads)(); |
| vg_assert(c >= 1); /* stay sane */ |
| |
| // Tell the tool this thread is exiting |
| VG_TRACK( pre_thread_ll_exit, tid ); |
| |
| /* If the thread is exiting with errors disabled, complain loudly; |
| doing so is bad (does the user know this has happened?) Also, |
| in all cases, be paranoid and clear the flag anyway so that the |
| thread slot is safe in this respect if later reallocated. This |
| should be unnecessary since the flag should be cleared when the |
| slot is reallocated, in thread_wrapper(). */ |
| if (tst->err_disablement_level > 0) { |
| VG_(umsg)( |
| "WARNING: exiting thread has error reporting disabled.\n" |
| "WARNING: possibly as a result of some mistake in the use\n" |
| "WARNING: of the VALGRIND_DISABLE_ERROR_REPORTING macros.\n" |
| ); |
| VG_(debugLog)( |
| 1, "syswrap-linux", |
| "run_a_thread_NORETURN(tid=%lld): " |
| "WARNING: exiting thread has err_disablement_level = %u\n", |
| (ULong)tidW, tst->err_disablement_level |
| ); |
| } |
| tst->err_disablement_level = 0; |
| |
| if (c == 1) { |
| |
| VG_(debugLog)(1, "syswrap-darwin", |
| "run_a_thread_NORETURN(tid=%lld): " |
| "last one standing\n", |
| (ULong)tidW); |
| |
| /* We are the last one standing. Keep hold of the lock and |
| carry on to show final tool results, then exit the entire system. |
| Use the continuation pointer set at startup in m_main. */ |
| ( * VG_(address_of_m_main_shutdown_actions_NORETURN) ) (tid, src); |
| |
| } else { |
| |
| mach_msg_header_t msg; |
| |
| VG_(debugLog)(1, "syswrap-darwin", |
| "run_a_thread_NORETURN(tid=%lld): " |
| "not last one standing\n", |
| (ULong)tidW); |
| |
| /* OK, thread is dead, but others still exist. Just exit. */ |
| |
| /* This releases the run lock */ |
| VG_(exit_thread)(tid); |
| vg_assert(tst->status == VgTs_Zombie); |
| |
| /* tid is now invalid. */ |
| |
| // GrP fixme exit race |
| msg.msgh_bits = MACH_MSGH_BITS(17, MACH_MSG_TYPE_MAKE_SEND_ONCE); |
| msg.msgh_request_port = VG_(gettid)(); |
| msg.msgh_reply_port = 0; |
| msg.msgh_id = 3600; // thread_terminate |
| |
| tst->status = VgTs_Empty; |
| // GrP fixme race here! new thread may claim this V thread stack |
| // before we get out here! |
| // GrP fixme use bsdthread_terminate for safe cleanup? |
| mach_msg(&msg, MACH_SEND_MSG|MACH_MSG_OPTION_NONE, |
| sizeof(msg), 0, 0, MACH_MSG_TIMEOUT_NONE, 0); |
| |
| // DDD: This is reached sometimes on none/tests/manythreads, maybe |
| // because of the race above. |
| VG_(core_panic)("Thread exit failed?\n"); |
| } |
| |
| /*NOTREACHED*/ |
| vg_assert(0); |
| } |
| |
| |
| /* Allocate a stack for the main thread, and run it all the way to the |
| end. Although we already have a working VgStack |
| (VG_(interim_stack)) it's better to allocate a new one, so that |
| overflow detection works uniformly for all threads. |
| */ |
| void VG_(main_thread_wrapper_NORETURN)(ThreadId tid) |
| { |
| Addr sp; |
| VG_(debugLog)(1, "syswrap-darwin", |
| "entering VG_(main_thread_wrapper_NORETURN)\n"); |
| |
| sp = allocstack(tid); |
| |
| /* If we can't even allocate the first thread's stack, we're hosed. |
| Give up. */ |
| vg_assert2(sp != 0, "Cannot allocate main thread's stack."); |
| |
| /* shouldn't be any other threads around yet */ |
| vg_assert( VG_(count_living_threads)() == 1 ); |
| |
| call_on_new_stack_0_1( |
| (Addr)sp, /* stack */ |
| 0, /*bogus return address*/ |
| run_a_thread_NORETURN, /* fn to call */ |
| (Word)tid /* arg to give it */ |
| ); |
| |
| /*NOTREACHED*/ |
| vg_assert(0); |
| } |
| |
| |
| void start_thread_NORETURN ( Word arg ) |
| { |
| ThreadState* tst = (ThreadState*)arg; |
| ThreadId tid = tst->tid; |
| |
| run_a_thread_NORETURN ( (Word)tid ); |
| /*NOTREACHED*/ |
| vg_assert(0); |
| } |
| |
| |
| void VG_(cleanup_thread) ( ThreadArchState* arch ) |
| { |
| } |
| |
| |
| /* --------------------------------------------------------------------- |
| Message reporting, with duplicate removal |
| ------------------------------------------------------------------ */ |
| |
| static WordFM* decaying_string_table = NULL; /* HChar* -> UWord */ |
| |
| static Word decaying_string_table_cmp ( UWord s1, UWord s2 ) { |
| return (Word)VG_(strcmp)( (HChar*)s1, (HChar*)s2 ); |
| } |
| |
| static void log_decaying ( const HChar* format, ... ) PRINTF_CHECK(1, 2); |
| static void log_decaying ( const HChar* format, ... ) |
| { |
| // get the message into a stack-allocated string. |
| HChar buf[256]; |
| VG_(memset)(buf, 0, sizeof(buf)); |
| va_list vargs; |
| va_start(vargs,format); |
| (void) VG_(vsnprintf)(buf, sizeof(buf), format, vargs); |
| va_end(vargs); |
| buf[sizeof(buf)-1] = 0; |
| |
| // Now see if it already exists in the table of strings that we have. |
| if (!decaying_string_table) { |
| decaying_string_table |
| = VG_(newFM)( VG_(malloc), "syswrap-darwin.pd.1", |
| VG_(free), decaying_string_table_cmp ); |
| } |
| |
| const HChar* key = NULL; |
| UWord val = 0; |
| if (!VG_(lookupFM)(decaying_string_table, |
| (UWord*)&key, &val, (UWord)&buf[0])) { |
| // We haven't seen this string before, so strdup it and add |
| // it to the table. |
| vg_assert(key == NULL && val == 0); |
| key = VG_(strdup)("syswrap-darwin.pd.2", buf); |
| VG_(addToFM)(decaying_string_table, (UWord)key, (UWord)0); |
| } |
| |
| vg_assert(key != NULL && key != &buf[0]); |
| |
| // So, finally, |key| is in the tree, and |val| is what it is |
| // currently associated with. Increment that counter. |
| val++; |
| Bool b = VG_(addToFM)(decaying_string_table, (UWord)key, (UWord)val); |
| vg_assert(b); |
| |
| if (-1 != VG_(log2)( (UInt)val )) { |
| if (val == 1) |
| VG_(dmsg)("%s\n", key); |
| else |
| VG_(dmsg)("%s (repeated %lu times)\n", key, val); |
| } |
| } |
| |
| |
| /* --------------------------------------------------------------------- |
| Mach port tracking (based on syswrap-generic's fd tracker) |
| ------------------------------------------------------------------ */ |
| |
| /* One of these is allocated for each open port. */ |
| typedef struct OpenPort |
| { |
| mach_port_t port; |
| mach_port_type_t type; /* right type(s) */ |
| Int send_count; /* number of send rights */ |
| HChar *name; /* bootstrap name or NULL */ |
| ExeContext *where; /* first allocation only */ |
| struct OpenPort *next, *prev; |
| } OpenPort; |
| |
| // strlen("0x12345678") |
| #define PORT_STRLEN (2+2*sizeof(mach_port_t)) |
| |
| /* List of allocated ports. */ |
| static OpenPort *allocated_ports; |
| |
| /* Count of open ports. */ |
| static Int allocated_port_count = 0; |
| |
| /* Create an entry for |port|, with no other info. Assumes it doesn't |
| already exist. */ |
| static void port_create_vanilla(mach_port_t port) |
| { |
| OpenPort* op |
| = VG_(calloc)("syswrap-darwin.port_create_vanilla", sizeof(OpenPort), 1); |
| op->port = port; |
| /* Add it to the list. */ |
| op->next = allocated_ports; |
| if (allocated_ports) allocated_ports->prev = op; |
| allocated_ports = op; |
| allocated_port_count++; |
| } |
| |
| __attribute__((unused)) |
| static Bool port_exists(mach_port_t port) |
| { |
| OpenPort *i; |
| |
| /* Check to see if this port is already open. */ |
| i = allocated_ports; |
| while (i) { |
| if (i->port == port) { |
| return True; |
| } |
| i = i->next; |
| } |
| |
| return False; |
| } |
| |
| static OpenPort *info_for_port(mach_port_t port) |
| { |
| OpenPort *i; |
| if (!port) return NULL; |
| |
| i = allocated_ports; |
| while (i) { |
| if (i->port == port) { |
| return i; |
| } |
| i = i->next; |
| } |
| |
| return NULL; |
| } |
| |
| |
| // Give a port a name, without changing its refcount |
| // GrP fixme don't override name if it already has a specific one |
| __private_extern__ void assign_port_name(mach_port_t port, const HChar *name) |
| { |
| OpenPort *i; |
| if (!port) return; |
| vg_assert(name); |
| |
| i = info_for_port(port); |
| vg_assert(i); |
| |
| if (i->name) VG_(free)(i->name); |
| i->name = |
| VG_(malloc)("syswrap-darwin.mach-port-name", |
| VG_(strlen)(name) + PORT_STRLEN + 1); |
| VG_(sprintf)(i->name, name, port); |
| } |
| |
| |
| // Return the name of the given port or "UNKNOWN 0x1234" if not known. |
| static const HChar *name_for_port(mach_port_t port) |
| { |
| static HChar buf[8 + PORT_STRLEN + 1]; |
| OpenPort *i; |
| |
| // hack |
| if (port == VG_(gettid)()) return "mach_thread_self()"; |
| if (port == 0) return "NULL"; |
| |
| i = allocated_ports; |
| while (i) { |
| if (i->port == port) { |
| return i->name; |
| } |
| i = i->next; |
| } |
| |
| VG_(sprintf)(buf, "NONPORT-%#x", port); |
| return buf; |
| } |
| |
| /* Note the fact that a port was just deallocated. */ |
| |
| static |
| void record_port_mod_refs(mach_port_t port, mach_port_type_t right, Int delta) |
| { |
| OpenPort *i = allocated_ports; |
| if (!port) return; |
| |
| while(i) { |
| if(i->port == port) { |
| vg_assert(right != MACH_PORT_TYPE_DEAD_NAME); |
| if (right & MACH_PORT_TYPE_SEND) { |
| // send rights are refcounted |
| if (delta == INT_MIN) delta = -i->send_count; // INT_MIN == destroy |
| i->send_count += delta; |
| if (i->send_count > 0) i->type |= MACH_PORT_TYPE_SEND; |
| else i->type &= ~MACH_PORT_TYPE_SEND; |
| } |
| right = right & ~MACH_PORT_TYPE_SEND; |
| if (right) { |
| // other rights are not refcounted |
| if (delta > 0) { |
| i->type |= right; |
| } else if (delta < 0) { |
| i->type &= ~right; |
| } |
| } |
| |
| if (i->type != 0) return; |
| |
| // Port has no rights left. Kill it. |
| // VG_(printf)("deleting port %p %s", i->port, i->name); |
| if(i->prev) |
| i->prev->next = i->next; |
| else |
| allocated_ports = i->next; |
| if(i->next) |
| i->next->prev = i->prev; |
| if(i->name) |
| VG_(free) (i->name); |
| VG_(free) (i); |
| allocated_port_count--; |
| return; |
| } |
| i = i->next; |
| } |
| |
| VG_(printf)("UNKNOWN Mach port modified (port %#x delta %d)\n", port, delta); |
| } |
| |
| static |
| void record_port_insert_rights(mach_port_t port, mach_msg_type_name_t type) |
| { |
| switch (type) { |
| case MACH_MSG_TYPE_PORT_NAME: |
| // this task has no rights for the name |
| break; |
| case MACH_MSG_TYPE_PORT_RECEIVE: |
| // this task gets receive rights |
| record_port_mod_refs(port, MACH_PORT_TYPE_RECEIVE, 1); |
| break; |
| case MACH_MSG_TYPE_PORT_SEND: |
| // this task gets a send right |
| record_port_mod_refs(port, MACH_PORT_TYPE_SEND, 1); |
| break; |
| case MACH_MSG_TYPE_PORT_SEND_ONCE: |
| // this task gets send-once rights |
| record_port_mod_refs(port, MACH_PORT_TYPE_SEND_ONCE, 1); |
| break; |
| default: |
| vg_assert(0); |
| break; |
| } |
| } |
| |
| static |
| void record_port_dealloc(mach_port_t port) |
| { |
| // deletes 1 send or send-once right (port can't have both) |
| record_port_mod_refs(port, MACH_PORT_TYPE_SEND_RIGHTS, -1); |
| } |
| |
| static |
| void record_port_destroy(mach_port_t port) |
| { |
| // deletes all rights to port |
| record_port_mod_refs(port, MACH_PORT_TYPE_ALL_RIGHTS, INT_MIN); |
| } |
| |
| |
| /* Note the fact that a Mach port was just allocated or transferred. |
| If the port is already known, increment its reference count. */ |
| void record_named_port(ThreadId tid, mach_port_t port, |
| mach_port_right_t right, const HChar *name) |
| { |
| OpenPort *i; |
| if (!port) return; |
| |
| /* Check to see if this port is already open. */ |
| i = allocated_ports; |
| while (i) { |
| if (i->port == port) { |
| if (right != -1) record_port_mod_refs(port, MACH_PORT_TYPE(right), 1); |
| return; |
| } |
| i = i->next; |
| } |
| |
| /* Not already one: allocate an OpenPort */ |
| if (i == NULL) { |
| i = VG_(malloc)("syswrap-darwin.mach-port", sizeof(OpenPort)); |
| |
| i->prev = NULL; |
| i->next = allocated_ports; |
| if(allocated_ports) allocated_ports->prev = i; |
| allocated_ports = i; |
| allocated_port_count++; |
| |
| i->port = port; |
| i->where = (tid == -1) ? NULL : VG_(record_ExeContext)(tid, 0); |
| i->name = NULL; |
| if (right != -1) { |
| i->type = MACH_PORT_TYPE(right); |
| i->send_count = (right == MACH_PORT_RIGHT_SEND) ? 1 : 0; |
| } else { |
| i->type = 0; |
| i->send_count = 0; |
| } |
| |
| assign_port_name(port, name); |
| } |
| } |
| |
| |
| // Record opening of a nameless port. |
| static void record_unnamed_port(ThreadId tid, mach_port_t port, mach_port_right_t right) |
| { |
| record_named_port(tid, port, right, "unnamed-%p"); |
| } |
| |
| |
| /* Dump summary of open Mach ports, like VG_(show_open_fds) */ |
| void VG_(show_open_ports)(void) |
| { |
| OpenPort *i; |
| |
| VG_(message)(Vg_UserMsg, |
| "MACH PORTS: %d open at exit.\n", allocated_port_count); |
| |
| for (i = allocated_ports; i; i = i->next) { |
| if (i->name) { |
| VG_(message)(Vg_UserMsg, "Open Mach port 0x%x: %s\n", i->port, |
| i->name); |
| } else { |
| VG_(message)(Vg_UserMsg, "Open Mach port 0x%x\n", i->port); |
| } |
| |
| if (i->where) { |
| VG_(pp_ExeContext)(i->where); |
| VG_(message)(Vg_UserMsg, "\n"); |
| } |
| } |
| |
| VG_(message)(Vg_UserMsg, "\n"); |
| } |
| |
| |
| /* --------------------------------------------------------------------- |
| sync_mappings |
| ------------------------------------------------------------------ */ |
| |
| typedef |
| enum { CheckAlways=1, CheckEvery20, CheckNever } |
| CheckHowOften; |
| |
| static const HChar* show_CheckHowOften ( CheckHowOften cho ) { |
| switch (cho) { |
| case CheckAlways: return "Always "; |
| case CheckEvery20: return "Every20"; |
| case CheckNever: return "Never "; |
| default: vg_assert(0); |
| } |
| } |
| |
| /* Statistics for one particular resync-call set of arguments, |
| as specified by key1, key2 and key3. */ |
| typedef |
| struct { |
| CheckHowOften cho; |
| const HChar* key1; |
| const HChar* key2; |
| UWord key3; |
| ULong n_checks; |
| ULong n_mappings_added; |
| ULong n_mappings_removed; |
| } |
| SyncStats; |
| |
| static Bool cmp_eqkeys_SyncStats ( SyncStats* ss1, SyncStats* ss2 ) { |
| return ss1->key3 == ss2->key3 |
| && 0 == VG_(strcmp)(ss1->key1, ss2->key1) |
| && 0 == VG_(strcmp)(ss1->key2, ss2->key2); |
| } |
| |
| /* The filter data. */ |
| #define N_SYNCSTATS 1000 |
| static Int syncstats_used = 0; |
| static SyncStats syncstats[N_SYNCSTATS]; |
| |
| /* Statistics overall, for the filter. */ |
| static ULong n_syncsRequested = 0; // Total number requested |
| static ULong n_syncsPerformed = 0; // Number carried out (the rest skipped) |
| |
| |
| static |
| void update_syncstats ( CheckHowOften cho, |
| const HChar* key1, const HChar* key2, |
| UWord key3, |
| UInt n_mappings_added, UInt n_mappings_removed ) |
| { |
| SyncStats dummy = { CheckAlways, key1, key2, key3, 0, 0, 0 }; |
| Int i; |
| for (i = 0; i < syncstats_used; i++) { |
| if (cmp_eqkeys_SyncStats(&syncstats[i], &dummy)) |
| break; |
| } |
| vg_assert(i >= 0 && i <= syncstats_used); |
| if (i == syncstats_used) { |
| // alloc new |
| vg_assert(syncstats_used < N_SYNCSTATS); |
| syncstats_used++; |
| syncstats[i] = dummy; |
| syncstats[i].cho = cho; |
| } |
| vg_assert(cmp_eqkeys_SyncStats(&syncstats[i], &dummy)); |
| syncstats[i].n_checks++; |
| syncstats[i].n_mappings_added += (ULong)n_mappings_added; |
| syncstats[i].n_mappings_removed += (ULong)n_mappings_removed; |
| // reorder |
| static UInt reorder_ctr = 0; |
| if (i > 0 && 0 == (1 & reorder_ctr++)) { |
| SyncStats tmp = syncstats[i-1]; |
| syncstats[i-1] = syncstats[i]; |
| syncstats[i] = tmp; |
| } |
| } |
| |
| |
| static void maybe_show_syncstats ( void ) |
| { |
| Int i; |
| |
| // display |
| if (0 == (n_syncsRequested & 0xFF)) { |
| VG_(printf)("Resync filter: %'llu requested, %'llu performed (%llu%%)\n", |
| n_syncsRequested, n_syncsPerformed, |
| (100 * n_syncsPerformed) / |
| (n_syncsRequested == 0 ? 1 : n_syncsRequested)); |
| for (i = 0; i < syncstats_used; i++) { |
| if (i >= 40) break; // just show the top 40 |
| VG_(printf)(" [%3d] (%s) upd %6llu diff %4llu+,%3llu-" |
| " %s %s 0x%08llx\n", |
| i, show_CheckHowOften(syncstats[i].cho), |
| syncstats[i].n_checks, |
| syncstats[i].n_mappings_added, |
| syncstats[i].n_mappings_removed, |
| syncstats[i].key1, syncstats[i].key2, |
| (ULong)syncstats[i].key3); |
| } |
| if (i < syncstats_used) { |
| VG_(printf)(" and %d more entries not shown.\n", syncstats_used - i); |
| } |
| VG_(printf)("\n"); |
| } |
| } |
| |
| |
| Bool ML_(sync_mappings)(const HChar* when, const HChar* where, UWord num) |
| { |
| // If VG(clo_resync_filter) == 0, the filter is disabled, and |
| // we must always honour the resync request. |
| // |
| // If VG(clo_resync_filter) == 1, the filter is enabled, |
| // so we try to avoid doing the sync if possible, but keep |
| // quiet. |
| // |
| // If VG(clo_resync_filter) == 2, the filter is enabled, |
| // so we try to avoid doing the sync if possible, and also |
| // periodically show stats, so that the filter can be updated. |
| // (by hand). |
| |
| if (VG_(clo_resync_filter) >= 2) |
| maybe_show_syncstats(); |
| |
| n_syncsRequested++; |
| |
| // Usually the number of segments added/removed in a single call is very |
| // small e.g. 1. But it sometimes gets up to at least 100 or so (eg. for |
| // Quicktime). So we use a repeat-with-bigger-buffers-until-success model, |
| // because we can't do dynamic allocation within VG_(get_changed_segments), |
| // because it's in m_aspacemgr. |
| ChangedSeg* css = NULL; |
| Int css_size; |
| Int css_used; |
| Int i; |
| Bool ok; |
| |
| // -------------- BEGIN resync-filter-kludge -------------- |
| // |
| // Some kludges to try and avoid the worst case cost hit of doing |
| // zillions of resyncs (huge). The idea is that many of the most |
| // common resyncs never appear to cause a delta, so we just ignore |
| // them (CheckNever). Then, a bunch of them also happen a lot, but |
| // only very occasionally cause a delta. We resync after 20 of those |
| // (CheckEvery20). Finally, the rest form a long tail, so we always |
| // resync after those (CheckAlways). |
| // |
| // Assume this is kernel-version and word-size specific, so develop |
| // filters accordingly. This might be overly conservative -- |
| // I don't know. |
| |
| # define STREQ(_s1, _s2) (0 == VG_(strcmp)((_s1),(_s2))) |
| Bool when_in = STREQ(when, "in"); |
| Bool when_after = STREQ(when, "after"); |
| Bool where_mmr = STREQ(where, "mach_msg_receive"); |
| Bool where_mmrU = STREQ(where, "mach_msg_receive-UNHANDLED"); |
| Bool where_iuct = STREQ(where, "iokit_user_client_trap"); |
| Bool where_MwcN = STREQ(where, "ML_(wqthread_continue_NORETURN)"); |
| Bool where_woQR = STREQ(where, "workq_ops(QUEUE_REQTHREADS)"); |
| Bool where_woTR = STREQ(where, "workq_ops(THREAD_RETURN)"); |
| Bool where_ke64 = STREQ(where, "kevent64"); |
| # undef STREQ |
| |
| vg_assert( |
| 1 >= ( (where_mmr ? 1 : 0) + (where_mmrU ? 1 : 0) |
| + (where_iuct ? 1 : 0) + (where_MwcN ? 1 : 0) |
| + (where_woQR ? 1 : 0) + (where_woTR ? 1 : 0) |
| + (where_ke64 ? 1 : 0) |
| )); |
| // merely to stop gcc complaining of non-use in the case where |
| // there's no filter: |
| vg_assert(when_in == True || when_in == False); |
| vg_assert(when_after == True || when_after == False); |
| |
| CheckHowOften check = CheckAlways; |
| |
| # if DARWIN_VERS == DARWIN_10_9 && VG_WORDSIZE == 8 |
| /* ---------- BEGIN filter for 64-bit 10.9.x ---------- */ |
| if (when_after && where_mmr) { |
| // "after mach_msg_receive <number>" |
| switch (num) { |
| case 0x00000000: // upd 12414 diff 36+,0- |
| check = CheckEvery20; |
| break; |
| default: |
| break; |
| } |
| } |
| else |
| if (when_after && where_mmrU) { |
| // "after mach_msg_receive-UNHANDLED <number>" |
| switch (num) { |
| case 0x00000000: // upd 16687 diff 73+,0- |
| case 0x00000001: // upd 5106 diff 89+,0- |
| case 0x00000002: // upd 1609 diff 1+,0- |
| case 0x00000003: // upd 1987 diff 6+,0- |
| // case 0x00000b95: // upd 2894 diff 57+,1- <==dangerous |
| case 0x000072d9: // upd 2616 diff 11+,0- |
| case 0x000072cb: // upd 2616 diff 9+,0- |
| case 0x000074d5: // upd 172 diff 0+,0- |
| check = CheckEvery20; |
| break; |
| default: |
| break; |
| } |
| } |
| else |
| if (when_in && where_MwcN && num == 0x00000000) { |
| // in ML_(wqthread_continue_NORETURN) 0x00000000 |
| // upd 4346 diff 0+,0- |
| check = CheckEvery20; |
| } |
| else |
| if (when_after && where_woQR && num == 0x00000000) { |
| // after workq_ops(QUEUE_REQTHREADS) 0x00000000 |
| // upd 14434 diff 102+,0- |
| check = CheckEvery20; |
| } |
| else |
| if (when_after && where_woTR && num == 0x00000000) { |
| // after workq_ops(THREAD_RETURN) 0x00000000 |
| // upd 14434 diff 102+,0- |
| check = CheckEvery20; |
| } |
| else |
| if (when_after && where_ke64 && num == 0x00000000) { |
| // after kevent64 0x00000000 |
| // upd 1736 diff 78+,0- |
| check = CheckEvery20; |
| } |
| /* ----------- END filter for 64-bit 10.9.x ----------- */ |
| # endif /* DARWIN_VERS == DARWIN_10_9 && VG_WORDSIZE == 8 */ |
| |
| # if DARWIN_VERS == DARWIN_10_10 && VG_WORDSIZE == 8 |
| /* ---------- BEGIN filter for 64-bit 10.10.x ---------- */ |
| if (when_after && where_mmr) { |
| // "after mach_msg_receive <number>" |
| switch (num) { |
| case 0x00000000: // upd 2380 diff 23+,0- |
| check = CheckEvery20; |
| break; |
| default: |
| break; |
| } |
| } |
| else |
| if (when_after && where_mmrU) { |
| // "after mach_msg_receive-UNHANDLED <number>" |
| switch (num) { |
| case 0x00000000: // upd 2370 diff 93+,1- <==dangerous |
| case 0x0000004f: // upd 212 diff 2+,0- |
| case 0x00000b95: // upd 9826 diff 163+,1- diff scale, dangerous |
| case 0x00000ba5: // upd 304 diff 0+,0- |
| case 0x0000157f: // upd 201 diff 2+,0- |
| case 0x0000157d: // upd 197 diff 1+,0- |
| case 0x0000333d: // upd 112 diff 0+,0- |
| case 0x0000333f: // upd 223 diff 10+,0- |
| case 0x000072cd: // upd 8286 diff 98+,0- diff scale |
| case 0x000072ae: // upd 193 diff 10+,0- |
| case 0x000072ec: // upd 319 diff 7+,0- |
| case 0x77303074: // upd 113 diff 3+,0- |
| case 0x10000000: // upd 314 diff 6+,0- |
| check = CheckEvery20; |
| break; |
| default: |
| break; |
| } |
| } |
| else |
| if (when_in && where_MwcN && num == 0x00000000) { |
| // in ML_(wqthread_continue_NORETURN) 0x00000000 |
| // upd 1110 diff 37+,0- |
| check = CheckEvery20; |
| } |
| else |
| if (when_after && where_woQR && num == 0x00000000) { |
| // after workq_ops(QUEUE_REQTHREADS) 0x00000000 |
| // upd 1099 diff 37+,0- |
| check = CheckEvery20; |
| } |
| else |
| if (when_after && where_woTR && num == 0x00000000) { |
| // after workq_ops(THREAD_RETURN) 0x00000000 |
| // 1239 diff 53+,0- |
| check = CheckEvery20; |
| } |
| else |
| if (when_after && where_ke64 && num == 0x00000000) { |
| // after kevent64 0x00000000 |
| // upd 1463 diff 15+,0- |
| check = CheckEvery20; |
| } |
| /* ----------- END filter for 64-bit 10.10.x ----------- */ |
| # endif /* DARWIN_VERS == DARWIN_10_10 && VG_WORDSIZE == 8 */ |
| |
| /* Regardless of what the filter says, force a sync every 1 time in |
| 1000, to stop things getting too far out of sync. */ |
| { |
| static UInt ctr1k = 0; |
| ctr1k++; |
| if ((ctr1k % 1000) == 0) |
| check = CheckAlways; |
| } |
| |
| /* If the filter is disabled, we must always check. */ |
| if (VG_(clo_resync_filter) == 0) |
| check = CheckAlways; |
| |
| switch (check) { |
| case CheckAlways: |
| break; |
| case CheckEvery20: { |
| // only resync once every 20th time |
| static UInt ctr10 = 0; |
| ctr10++; |
| if ((ctr10 % 20) != 0) return False; |
| break; |
| } |
| case CheckNever: |
| return False; |
| default: |
| vg_assert(0); |
| } |
| // |
| // --------------- END resync-filter-kludge --------------- |
| |
| if (0 || VG_(clo_trace_syscalls)) { |
| VG_(debugLog)(0, "syswrap-darwin", |
| "sync_mappings (%s) (\"%s\", \"%s\", 0x%llx)\n", |
| show_CheckHowOften(check), when, where, (ULong)num); |
| } |
| |
| // 16 is enough for most cases, but small enough that overflow happens |
| // occasionally and thus the overflow path gets some test coverage. |
| css_size = 16; |
| ok = False; |
| while (!ok) { |
| VG_(free)(css); // css is NULL on first iteration; that's ok. |
| css = VG_(calloc)("sys_wrap.sync_mappings", |
| css_size, sizeof(ChangedSeg)); |
| ok = VG_(get_changed_segments)(when, where, css, css_size, &css_used); |
| css_size *= 2; |
| } |
| |
| UInt css_added = 0, css_removed = 0; |
| |
| // Now add/remove them. |
| for (i = 0; i < css_used; i++) { |
| ChangedSeg* cs = &css[i]; |
| if (cs->is_added) { |
| css_added++; |
| ML_(notify_core_and_tool_of_mmap)( |
| cs->start, cs->end - cs->start + 1, |
| cs->prot, VKI_MAP_PRIVATE, 0, cs->offset); |
| // should this call VG_(di_notify_mmap) also? |
| } else { |
| css_removed++; |
| ML_(notify_core_and_tool_of_munmap)( |
| cs->start, cs->end - cs->start + 1); |
| } |
| if (VG_(clo_trace_syscalls)) { |
| if (cs->is_added) { |
| VG_(debugLog)(0, "syswrap-darwin", |
| " added region 0x%010lx..0x%010lx prot %u at %s (%s)\n", |
| cs->start, cs->end + 1, (UInt)cs->prot, where, when); |
| } else { |
| VG_(debugLog)(0, "syswrap-darwin", |
| " removed region 0x%010lx..0x%010lx at %s (%s)\n", |
| cs->start, cs->end + 1, where, when); |
| } |
| } |
| } |
| |
| VG_(free)(css); |
| |
| if (0) |
| VG_(debugLog)(0, "syswrap-darwin", "SYNC: %d %s %s\n", |
| css_used, when, where); |
| |
| // Update the stats, so we can derive the filter above. |
| n_syncsPerformed++; |
| update_syncstats(check, when, where, num, css_added, css_removed); |
| |
| return css_used > 0; |
| } |
| |
| /* --------------------------------------------------------------------- |
| wrappers |
| ------------------------------------------------------------------ */ |
| |
| #define PRE(name) DEFN_PRE_TEMPLATE(darwin, name) |
| #define POST(name) DEFN_POST_TEMPLATE(darwin, name) |
| |
| #define PRE_FN(name) vgSysWrap_darwin_##name##_before |
| #define POST_FN(name) vgSysWrap_darwin_##name##_after |
| |
| #define CALL_PRE(name) PRE_FN(name)(tid, layout, arrghs, status, flags) |
| #define CALL_POST(name) POST_FN(name)(tid, arrghs, status) |
| |
| #if VG_WORDSIZE == 4 |
| // Combine two 32-bit values into a 64-bit value |
| // Always use with low-numbered arg first (e.g. LOHI64(ARG1,ARG2) ) |
| # if defined(VGA_x86) |
| # define LOHI64(lo,hi) ( ((ULong)(UInt)(lo)) | (((ULong)(UInt)(hi)) << 32) ) |
| # else |
| # error unknown architecture |
| # endif |
| #endif |
| |
| // Retrieve the current Mach thread |
| #define MACH_THREAD ((Addr)VG_(get_ThreadState)(tid)->os_state.lwpid) |
| |
| // Set the POST handler for a mach_msg derivative |
| #define AFTER VG_(get_ThreadState)(tid)->os_state.post_mach_trap_fn |
| |
| // Set or get values saved from Mach messages |
| #define MACH_ARG(x) VG_(get_ThreadState)(tid)->os_state.mach_args.x |
| #define MACH_REMOTE VG_(get_ThreadState)(tid)->os_state.remote_port |
| #define MACH_MSGH_ID VG_(get_ThreadState)(tid)->os_state.msgh_id |
| |
| /* --------------------------------------------------------------------- |
| darwin ioctl wrapper |
| ------------------------------------------------------------------ */ |
| |
| PRE(ioctl) |
| { |
| *flags |= SfMayBlock; |
| |
| /* Handle ioctls that don't take an arg first */ |
| switch (ARG2 /* request */) { |
| case VKI_TIOCSCTTY: |
| case VKI_TIOCEXCL: |
| case VKI_TIOCPTYGRANT: |
| case VKI_TIOCPTYUNLK: |
| case VKI_DTRACEHIOC_REMOVE: |
| PRINT("ioctl ( %ld, 0x%lx )",ARG1,ARG2); |
| PRE_REG_READ2(long, "ioctl", |
| unsigned int, fd, unsigned int, request); |
| return; |
| default: |
| PRINT("ioctl ( %ld, 0x%lx, %#lx )",ARG1,ARG2,ARG3); |
| PRE_REG_READ3(long, "ioctl", |
| unsigned int, fd, unsigned int, request, unsigned long, arg); |
| } |
| |
| switch (ARG2 /* request */) { |
| case VKI_TIOCGWINSZ: |
| PRE_MEM_WRITE( "ioctl(TIOCGWINSZ)", ARG3, sizeof(struct vki_winsize) ); |
| break; |
| case VKI_TIOCSWINSZ: |
| PRE_MEM_READ( "ioctl(TIOCSWINSZ)", ARG3, sizeof(struct vki_winsize) ); |
| break; |
| case VKI_TIOCMBIS: |
| PRE_MEM_READ( "ioctl(TIOCMBIS)", ARG3, sizeof(unsigned int) ); |
| break; |
| case VKI_TIOCMBIC: |
| PRE_MEM_READ( "ioctl(TIOCMBIC)", ARG3, sizeof(unsigned int) ); |
| break; |
| case VKI_TIOCMSET: |
| PRE_MEM_READ( "ioctl(TIOCMSET)", ARG3, sizeof(unsigned int) ); |
| break; |
| case VKI_TIOCMGET: |
| PRE_MEM_WRITE( "ioctl(TIOCMGET)", ARG3, sizeof(unsigned int) ); |
| break; |
| case VKI_TIOCGPGRP: |
| /* Get process group ID for foreground processing group. */ |
| PRE_MEM_WRITE( "ioctl(TIOCGPGRP)", ARG3, sizeof(vki_pid_t) ); |
| break; |
| case VKI_TIOCSPGRP: |
| /* Set a process group ID? */ |
| PRE_MEM_WRITE( "ioctl(TIOCGPGRP)", ARG3, sizeof(vki_pid_t) ); |
| break; |
| case VKI_FIONBIO: |
| PRE_MEM_READ( "ioctl(FIONBIO)", ARG3, sizeof(int) ); |
| break; |
| case VKI_FIOASYNC: |
| PRE_MEM_READ( "ioctl(FIOASYNC)", ARG3, sizeof(int) ); |
| break; |
| case VKI_FIONREAD: /* identical to SIOCINQ */ |
| PRE_MEM_WRITE( "ioctl(FIONREAD)", ARG3, sizeof(int) ); |
| break; |
| |
| |
| /* These all use struct ifreq AFAIK */ |
| /* GrP fixme is sizeof(struct vki_if_req) correct if it's using a sockaddr? */ |
| case VKI_SIOCGIFFLAGS: /* get flags */ |
| PRE_MEM_RASCIIZ( "ioctl(SIOCGIFFLAGS)", |
| (Addr)((struct vki_ifreq *)ARG3)->vki_ifr_name ); |
| PRE_MEM_WRITE( "ioctl(SIOCGIFFLAGS)", ARG3, sizeof(struct vki_ifreq)); |
| break; |
| case VKI_SIOCGIFMTU: /* get MTU size */ |
| PRE_MEM_RASCIIZ( "ioctl(SIOCGIFMTU)", |
| (Addr)((struct vki_ifreq *)ARG3)->vki_ifr_name ); |
| PRE_MEM_WRITE( "ioctl(SIOCGIFMTU)", ARG3, sizeof(struct vki_ifreq)); |
| break; |
| case VKI_SIOCGIFADDR: /* get PA address */ |
| PRE_MEM_RASCIIZ( "ioctl(SIOCGIFADDR)", |
| (Addr)((struct vki_ifreq *)ARG3)->vki_ifr_name ); |
| PRE_MEM_WRITE( "ioctl(SIOCGIFADDR)", ARG3, sizeof(struct vki_ifreq)); |
| break; |
| case VKI_SIOCGIFNETMASK: /* get network PA mask */ |
| PRE_MEM_RASCIIZ( "ioctl(SIOCGIFNETMASK)", |
| (Addr)((struct vki_ifreq *)ARG3)->vki_ifr_name ); |
| PRE_MEM_WRITE( "ioctl(SIOCGIFNETMASK)", ARG3, sizeof(struct vki_ifreq)); |
| break; |
| case VKI_SIOCGIFMETRIC: /* get metric */ |
| PRE_MEM_RASCIIZ( "ioctl(SIOCGIFMETRIC)", |
| (Addr)((struct vki_ifreq *)ARG3)->vki_ifr_name ); |
| PRE_MEM_WRITE( "ioctl(SIOCGIFMETRIC)", ARG3, sizeof(struct vki_ifreq)); |
| break; |
| case VKI_SIOCGIFDSTADDR: /* get remote PA address */ |
| PRE_MEM_RASCIIZ( "ioctl(SIOCGIFDSTADDR)", |
| (Addr)((struct vki_ifreq *)ARG3)->vki_ifr_name ); |
| PRE_MEM_WRITE( "ioctl(SIOCGIFDSTADDR)", ARG3, sizeof(struct vki_ifreq)); |
| break; |
| case VKI_SIOCGIFBRDADDR: /* get broadcast PA address */ |
| PRE_MEM_RASCIIZ( "ioctl(SIOCGIFBRDADDR)", |
| (Addr)((struct vki_ifreq *)ARG3)->vki_ifr_name ); |
| PRE_MEM_WRITE( "ioctl(SIOCGIFBRDADDR)", ARG3, sizeof(struct vki_ifreq)); |
| break; |
| case VKI_SIOCGIFCONF: /* get iface list */ |
| /* WAS: |
| PRE_MEM_WRITE( "ioctl(SIOCGIFCONF)", ARG3, sizeof(struct ifconf)); |
| KERNEL_DO_SYSCALL(tid,RES); |
| if (!VG_(is_kerror)(RES) && RES == 0) |
| POST_MEM_WRITE(ARG3, sizeof(struct ifconf)); |
| */ |
| PRE_MEM_READ( "ioctl(SIOCGIFCONF)", |
| (Addr)&((struct vki_ifconf *)ARG3)->ifc_len, |
| sizeof(((struct vki_ifconf *)ARG3)->ifc_len)); |
| PRE_MEM_READ( "ioctl(SIOCGIFCONF)", |
| (Addr)&((struct vki_ifconf *)ARG3)->vki_ifc_buf, |
| sizeof(((struct vki_ifconf *)ARG3)->vki_ifc_buf)); |
| if ( ARG3 ) { |
| // TODO len must be readable and writable |
| // buf pointer only needs to be readable |
| struct vki_ifconf *ifc = (struct vki_ifconf *) ARG3; |
| PRE_MEM_WRITE( "ioctl(SIOCGIFCONF).ifc_buf", |
| (Addr)(ifc->vki_ifc_buf), ifc->ifc_len ); |
| } |
| break; |
| |
| case VKI_SIOCSIFFLAGS: /* set flags */ |
| PRE_MEM_RASCIIZ( "ioctl(SIOCSIFFLAGS)", |
| (Addr)((struct vki_ifreq *)ARG3)->vki_ifr_name ); |
| PRE_MEM_READ( "ioctl(SIOCSIFFLAGS)", |
| (Addr)&((struct vki_ifreq *)ARG3)->vki_ifr_flags, |
| sizeof(((struct vki_ifreq *)ARG3)->vki_ifr_flags) ); |
| break; |
| case VKI_SIOCSIFADDR: /* set PA address */ |
| case VKI_SIOCSIFDSTADDR: /* set remote PA address */ |
| case VKI_SIOCSIFBRDADDR: /* set broadcast PA address */ |
| case VKI_SIOCSIFNETMASK: /* set network PA mask */ |
| PRE_MEM_RASCIIZ( "ioctl(SIOCSIF*ADDR)", |
| (Addr)((struct vki_ifreq *)ARG3)->vki_ifr_name ); |
| PRE_MEM_READ( "ioctl(SIOCSIF*ADDR)", |
| (Addr)&((struct vki_ifreq *)ARG3)->ifr_addr, |
| sizeof(((struct vki_ifreq *)ARG3)->ifr_addr) ); |
| break; |
| case VKI_SIOCSIFMETRIC: /* set metric */ |
| PRE_MEM_RASCIIZ( "ioctl(SIOCSIFMETRIC)", |
| (Addr)((struct vki_ifreq *)ARG3)->vki_ifr_name ); |
| PRE_MEM_READ( "ioctl(SIOCSIFMETRIC)", |
| (Addr)&((struct vki_ifreq *)ARG3)->vki_ifr_metric, |
| sizeof(((struct vki_ifreq *)ARG3)->vki_ifr_metric) ); |
| break; |
| case VKI_SIOCSIFMTU: /* set MTU size */ |
| PRE_MEM_RASCIIZ( "ioctl(SIOCSIFMTU)", |
| (Addr)((struct vki_ifreq *)ARG3)->vki_ifr_name ); |
| PRE_MEM_READ( "ioctl(SIOCSIFMTU)", |
| (Addr)&((struct vki_ifreq *)ARG3)->vki_ifr_mtu, |
| sizeof(((struct vki_ifreq *)ARG3)->vki_ifr_mtu) ); |
| break; |
| /* Routing table calls. */ |
| #ifdef VKI_SIOCADDRT |
| case VKI_SIOCADDRT: /* add routing table entry */ |
| case VKI_SIOCDELRT: /* delete routing table entry */ |
| PRE_MEM_READ( "ioctl(SIOCADDRT/DELRT)", ARG3, |
| sizeof(struct vki_rtentry)); |
| break; |
| #endif |
| |
| case VKI_SIOCGPGRP: |
| PRE_MEM_WRITE( "ioctl(SIOCGPGRP)", ARG3, sizeof(int) ); |
| break; |
| case VKI_SIOCSPGRP: |
| PRE_MEM_READ( "ioctl(SIOCSPGRP)", ARG3, sizeof(int) ); |
| //tst->sys_flags &= ~SfMayBlock; |
| break; |
| |
| case VKI_FIODTYPE: |
| PRE_MEM_WRITE( "ioctl(FIONREAD)", ARG3, sizeof(int) ); |
| break; |
| |
| case VKI_DTRACEHIOC_ADDDOF: |
| break; |
| |
| // ttycom.h |
| case VKI_TIOCGETA: |
| PRE_MEM_WRITE( "ioctl(TIOCGETA)", ARG3, sizeof(struct vki_termios) ); |
| break; |
| case VKI_TIOCSETA: |
| PRE_MEM_READ( "ioctl(TIOCSETA)", ARG3, sizeof(struct vki_termios) ); |
| break; |
| case VKI_TIOCGETD: |
| PRE_MEM_WRITE( "ioctl(TIOCGETD)", ARG3, sizeof(int) ); |
| break; |
| case VKI_TIOCSETD: |
| PRE_MEM_READ( "ioctl(TIOCSETD)", ARG3, sizeof(int) ); |
| break; |
| case VKI_TIOCPTYGNAME: |
| PRE_MEM_WRITE( "ioctl(TIOCPTYGNAME)", ARG3, 128 ); |
| break; |
| |
| // filio.h |
| case VKI_FIOCLEX: |
| break; |
| case VKI_FIONCLEX: |
| break; |
| |
| default: |
| ML_(PRE_unknown_ioctl)(tid, ARG2, ARG3); |
| break; |
| } |
| } |
| |
| |
| POST(ioctl) |
| { |
| vg_assert(SUCCESS); |
| switch (ARG2 /* request */) { |
| case VKI_TIOCGWINSZ: |
| POST_MEM_WRITE( ARG3, sizeof(struct vki_winsize) ); |
| break; |
| case VKI_TIOCSWINSZ: |
| case VKI_TIOCMBIS: |
| case VKI_TIOCMBIC: |
| case VKI_TIOCMSET: |
| break; |
| case VKI_TIOCMGET: |
| POST_MEM_WRITE( ARG3, sizeof(unsigned int) ); |
| break; |
| case VKI_TIOCGPGRP: |
| /* Get process group ID for foreground processing group. */ |
| POST_MEM_WRITE( ARG3, sizeof(vki_pid_t) ); |
| break; |
| case VKI_TIOCSPGRP: |
| /* Set a process group ID? */ |
| POST_MEM_WRITE( ARG3, sizeof(vki_pid_t) ); |
| break; |
| case VKI_TIOCSCTTY: |
| break; |
| case VKI_FIONBIO: |
| break; |
| case VKI_FIOASYNC: |
| break; |
| case VKI_FIONREAD: /* identical to SIOCINQ */ |
| POST_MEM_WRITE( ARG3, sizeof(int) ); |
| break; |
| |
| /* These all use struct ifreq AFAIK */ |
| case VKI_SIOCGIFFLAGS: /* get flags */ |
| POST_MEM_WRITE( (Addr)&((struct vki_ifreq *)ARG3)->vki_ifr_flags, |
| sizeof(((struct vki_ifreq *)ARG3)->vki_ifr_flags) ); |
| break; |
| case VKI_SIOCGIFMTU: /* get MTU size */ |
| POST_MEM_WRITE( (Addr)&((struct vki_ifreq *)ARG3)->vki_ifr_mtu, |
| sizeof(((struct vki_ifreq *)ARG3)->vki_ifr_mtu) ); |
| break; |
| case VKI_SIOCGIFADDR: /* get PA address */ |
| case VKI_SIOCGIFDSTADDR: /* get remote PA address */ |
| case VKI_SIOCGIFBRDADDR: /* get broadcast PA address */ |
| case VKI_SIOCGIFNETMASK: /* get network PA mask */ |
| POST_MEM_WRITE( |
| (Addr)&((struct vki_ifreq *)ARG3)->ifr_addr, |
| sizeof(((struct vki_ifreq *)ARG3)->ifr_addr) ); |
| break; |
| case VKI_SIOCGIFMETRIC: /* get metric */ |
| POST_MEM_WRITE( |
| (Addr)&((struct vki_ifreq *)ARG3)->vki_ifr_metric, |
| sizeof(((struct vki_ifreq *)ARG3)->vki_ifr_metric) ); |
| break; |
| case VKI_SIOCGIFCONF: /* get iface list */ |
| /* WAS: |
| PRE_MEM_WRITE("ioctl(SIOCGIFCONF)", ARG3, sizeof(struct ifconf)); |
| KERNEL_DO_SYSCALL(tid,RES); |
| if (!VG_(is_kerror)(RES) && RES == 0) |
| POST_MEM_WRITE(ARG3, sizeof(struct ifconf)); |
| */ |
| if (RES == 0 && ARG3 ) { |
| struct vki_ifconf *ifc = (struct vki_ifconf *) ARG3; |
| if (ifc->vki_ifc_buf != NULL) |
| POST_MEM_WRITE( (Addr)(ifc->vki_ifc_buf), ifc->ifc_len ); |
| } |
| break; |
| |
| case VKI_SIOCSIFFLAGS: /* set flags */ |
| case VKI_SIOCSIFDSTADDR: /* set remote PA address */ |
| case VKI_SIOCSIFBRDADDR: /* set broadcast PA address */ |
| case VKI_SIOCSIFNETMASK: /* set network PA mask */ |
| case VKI_SIOCSIFMETRIC: /* set metric */ |
| case VKI_SIOCSIFADDR: /* set PA address */ |
| case VKI_SIOCSIFMTU: /* set MTU size */ |
| break; |
| |
| #ifdef VKI_SIOCADDRT |
| /* Routing table calls. */ |
| case VKI_SIOCADDRT: /* add routing table entry */ |
| case VKI_SIOCDELRT: /* delete routing table entry */ |
| break; |
| #endif |
| |
| case VKI_SIOCGPGRP: |
| POST_MEM_WRITE(ARG3, sizeof(int)); |
| break; |
| case VKI_SIOCSPGRP: |
| break; |
| |
| case VKI_FIODTYPE: |
| POST_MEM_WRITE( ARG3, sizeof(int) ); |
| break; |
| |
| case VKI_DTRACEHIOC_REMOVE: |
| case VKI_DTRACEHIOC_ADDDOF: |
| break; |
| |
| // ttycom.h |
| case VKI_TIOCGETA: |
| POST_MEM_WRITE( ARG3, sizeof(struct vki_termios)); |
| break; |
| case VKI_TIOCSETA: |
| break; |
| case VKI_TIOCGETD: |
| POST_MEM_WRITE( ARG3, sizeof(int) ); |
| break; |
| case VKI_TIOCSETD: |
| break; |
| case VKI_TIOCPTYGNAME: |
| POST_MEM_WRITE( ARG3, 128); |
| break; |
| case VKI_TIOCPTYGRANT: |
| case VKI_TIOCPTYUNLK: |
| break; |
| |
| default: |
| break; |
| } |
| } |
| |
| |
| /* --------------------------------------------------------------------- |
| darwin fcntl wrapper |
| ------------------------------------------------------------------ */ |
| static const HChar *name_for_fcntl(UWord cmd) { |
| #define F(n) case VKI_##n: return #n |
| switch (cmd) { |
| F(F_CHKCLEAN); |
| F(F_RDAHEAD); |
| F(F_NOCACHE); |
| F(F_FULLFSYNC); |
| F(F_FREEZE_FS); |
| F(F_THAW_FS); |
| F(F_GLOBAL_NOCACHE); |
| F(F_PREALLOCATE); |
| F(F_SETSIZE); |
| F(F_RDADVISE); |
| # if DARWIN_VERS < DARWIN_10_9 |
| F(F_READBOOTSTRAP); |
| F(F_WRITEBOOTSTRAP); |
| # endif |
| F(F_LOG2PHYS); |
| F(F_GETPATH); |
| F(F_PATHPKG_CHECK); |
| F(F_ADDSIGS); |
| # if DARWIN_VERS >= DARWIN_10_9 |
| F(F_ADDFILESIGS); |
| # endif |
| default: |
| return "UNKNOWN"; |
| } |
| #undef F |
| } |
| |
| PRE(fcntl) |
| { |
| switch (ARG2) { |
| // These ones ignore ARG3. |
| case VKI_F_GETFD: |
| case VKI_F_GETFL: |
| case VKI_F_GETOWN: |
| PRINT("fcntl ( %ld, %ld )", ARG1,ARG2); |
| PRE_REG_READ2(long, "fcntl", unsigned int, fd, unsigned int, cmd); |
| break; |
| |
| // These ones use ARG3 as "arg". |
| case VKI_F_DUPFD: |
| case VKI_F_SETFD: |
| case VKI_F_SETFL: |
| case VKI_F_SETOWN: |
| PRINT("fcntl[ARG3=='arg'] ( %ld, %ld, %ld )", ARG1,ARG2,ARG3); |
| PRE_REG_READ3(long, "fcntl", |
| unsigned int, fd, unsigned int, cmd, unsigned long, arg); |
| break; |
| |
| // These ones use ARG3 as "lock". |
| case VKI_F_GETLK: |
| case VKI_F_SETLK: |
| case VKI_F_SETLKW: |
| PRINT("fcntl[ARG3=='lock'] ( %ld, %ld, %#lx )", ARG1,ARG2,ARG3); |
| PRE_REG_READ3(long, "fcntl", |
| unsigned int, fd, unsigned int, cmd, |
| struct flock64 *, lock); |
| // GrP fixme mem read sizeof(flock64) |
| if (ARG2 == VKI_F_SETLKW) |
| *flags |= SfMayBlock; |
| break; |
| # if DARWIN_VERS >= DARWIN_10_10 |
| case VKI_F_SETLKWTIMEOUT: |
| PRINT("fcntl[ARG3=='locktimeout'] ( %ld, %ld, %#lx )", ARG1,ARG2,ARG3); |
| PRE_REG_READ3(long, "fcntl", |
| unsigned int, fd, unsigned int, cmd, |
| struct flocktimeout *, lock); |
| *flags |= SfMayBlock; |
| break; |
| # endif |
| |
| // none |
| case VKI_F_CHKCLEAN: |
| case VKI_F_RDAHEAD: |
| case VKI_F_NOCACHE: |
| case VKI_F_FULLFSYNC: |
| case VKI_F_FREEZE_FS: |
| case VKI_F_THAW_FS: |
| case VKI_F_GLOBAL_NOCACHE: |
| PRINT("fcntl ( %ld, %s )", ARG1, name_for_fcntl(ARG1)); |
| PRE_REG_READ2(long, "fcntl", unsigned int, fd, unsigned int, cmd); |
| break; |
| |
| // struct fstore |
| case VKI_F_PREALLOCATE: |
| PRINT("fcntl ( %ld, %s, %#lx )", ARG1, name_for_fcntl(ARG2), ARG3); |
| PRE_REG_READ3(long, "fcntl", |
| unsigned int, fd, unsigned int, cmd, |
| struct fstore *, fstore); |
| { |
| struct vki_fstore *fstore = (struct vki_fstore *)ARG3; |
| PRE_FIELD_READ( "fcntl(F_PREALLOCATE, fstore->fst_flags)", |
| fstore->fst_flags ); |
| PRE_FIELD_READ( "fcntl(F_PREALLOCATE, fstore->fst_flags)", |
| fstore->fst_posmode ); |
| PRE_FIELD_READ( "fcntl(F_PREALLOCATE, fstore->fst_flags)", |
| fstore->fst_offset ); |
| PRE_FIELD_READ( "fcntl(F_PREALLOCATE, fstore->fst_flags)", |
| fstore->fst_length ); |
| PRE_FIELD_WRITE( "fcntl(F_PREALLOCATE, fstore->fst_bytesalloc)", |
| fstore->fst_bytesalloc); |
| } |
| break; |
| |
| // off_t |
| case VKI_F_SETSIZE: |
| PRINT("fcntl ( %ld, %s, %#lx )", ARG1, name_for_fcntl(ARG2), ARG3); |
| PRE_REG_READ3(long, "fcntl", |
| unsigned int, fd, unsigned int, cmd, |
| vki_off_t *, offset); |
| break; |
| |
| // struct radvisory |
| case VKI_F_RDADVISE: |
| PRINT("fcntl ( %ld, %s, %#lx )", ARG1, name_for_fcntl(ARG2), ARG3); |
| PRE_REG_READ3(long, "fcntl", |
| unsigned int, fd, unsigned int, cmd, |
| struct vki_radvisory *, radvisory); |
| { |
| struct vki_radvisory *radvisory = (struct vki_radvisory *)ARG3; |
| PRE_FIELD_READ( "fcntl(F_PREALLOCATE, radvisory->ra_offset)", |
| radvisory->ra_offset ); |
| PRE_FIELD_READ( "fcntl(F_PREALLOCATE, radvisory->ra_count)", |
| radvisory->ra_count ); |
| } |
| break; |
| |
| # if DARWIN_VERS < DARWIN_10_9 |
| // struct fbootstraptransfer |
| case VKI_F_READBOOTSTRAP: |
| case VKI_F_WRITEBOOTSTRAP: |
| PRINT("fcntl ( %ld, %s, %#lx )", ARG1, name_for_fcntl(ARG2), ARG3); |
| PRE_REG_READ3(long, "fcntl", |
| unsigned int, fd, unsigned int, cmd, |
| struct fbootstraptransfer *, bootstrap); |
| PRE_MEM_READ( "fcntl(F_READ/WRITEBOOTSTRAP, bootstrap)", |
| ARG3, sizeof(struct vki_fbootstraptransfer) ); |
| break; |
| # endif |
| |
| // struct log2phys (out) |
| case VKI_F_LOG2PHYS: |
| PRINT("fcntl ( %ld, %s, %#lx )", ARG1, name_for_fcntl(ARG2), ARG3); |
| PRE_REG_READ3(long, "fcntl", |
| unsigned int, fd, unsigned int, cmd, |
| struct log2phys *, l2p); |
| PRE_MEM_WRITE( "fcntl(F_LOG2PHYS, l2p)", |
| ARG3, sizeof(struct vki_log2phys) ); |
| break; |
| |
| // char[maxpathlen] (out) |
| case VKI_F_GETPATH: |
| PRINT("fcntl ( %ld, %s, %#lx )", ARG1, name_for_fcntl(ARG2), ARG3); |
| PRE_REG_READ3(long, "fcntl", |
| unsigned int, fd, unsigned int, cmd, |
| char *, pathbuf); |
| PRE_MEM_WRITE( "fcntl(F_GETPATH, pathbuf)", |
| ARG3, VKI_MAXPATHLEN ); |
| break; |
| |
| // char[maxpathlen] (in) |
| case VKI_F_PATHPKG_CHECK: |
| PRINT("fcntl ( %ld, %s, %#lx '%s')", ARG1, name_for_fcntl(ARG2), ARG3, |
| (char *)ARG3); |
| PRE_REG_READ3(long, "fcntl", |
| unsigned int, fd, unsigned int, cmd, |
| char *, pathbuf); |
| PRE_MEM_RASCIIZ( "fcntl(F_PATHPKG_CHECK, pathbuf)", ARG3); |
| break; |
| |
| case VKI_F_ADDSIGS: /* Add detached signatures (for code signing) */ |
| PRINT("fcntl ( %ld, %s )", ARG1, name_for_fcntl(ARG2)); |
| PRE_REG_READ3(long, "fcntl", |
| unsigned int, fd, unsigned int, cmd, |
| vki_fsignatures_t *, sigs); |
| |
| { |
| vki_fsignatures_t *fsigs = (vki_fsignatures_t*)ARG3; |
| PRE_FIELD_READ( "fcntl(F_ADDSIGS, fsigs->fs_blob_start)", |
| fsigs->fs_blob_start); |
| PRE_FIELD_READ( "fcntl(F_ADDSIGS, fsigs->fs_blob_size)", |
| fsigs->fs_blob_size); |
| |
| if (fsigs->fs_blob_start) |
| PRE_MEM_READ( "fcntl(F_ADDSIGS, fsigs->fs_blob_start)", |
| (Addr)fsigs->fs_blob_start, fsigs->fs_blob_size); |
| } |
| break; |
| |
| case VKI_F_ADDFILESIGS: /* Add signature from same file (used by dyld for shared libs) */ |
| PRINT("fcntl ( %ld, %s )", ARG1, name_for_fcntl(ARG2)); |
| PRE_REG_READ3(long, "fcntl", |
| unsigned int, fd, unsigned int, cmd, |
| vki_fsignatures_t *, sigs); |
| |
| { |
| vki_fsignatures_t *fsigs = (vki_fsignatures_t*)ARG3; |
| PRE_FIELD_READ( "fcntl(F_ADDFILESIGS, fsigs->fs_blob_start)", |
| fsigs->fs_blob_start); |
| PRE_FIELD_READ( "fcntl(F_ADDFILESIGS, fsigs->fs_blob_size)", |
| fsigs->fs_blob_size); |
| } |
| break; |
| |
| default: |
| PRINT("fcntl ( %ld, %ld [??] )", ARG1, ARG2); |
| log_decaying("UNKNOWN fcntl %ld!", ARG2); |
| break; |
| } |
| } |
| |
| POST(fcntl) |
| { |
| vg_assert(SUCCESS); |
| switch (ARG2) { |
| case VKI_F_DUPFD: |
| if (!ML_(fd_allowed)(RES, "fcntl(DUPFD)", tid, True)) { |
| VG_(close)(RES); |
| SET_STATUS_Failure( VKI_EMFILE ); |
| } else { |
| if (VG_(clo_track_fds)) |
| ML_(record_fd_open_named)(tid, RES); |
| } |
| break; |
| |
| case VKI_F_GETFD: |
| case VKI_F_GETFL: |
| case VKI_F_GETOWN: |
| case VKI_F_SETFD: |
| case VKI_F_SETFL: |
| case VKI_F_SETOWN: |
| case VKI_F_GETLK: |
| case VKI_F_SETLK: |
| case VKI_F_SETLKW: |
| # if DARWIN_VERS >= DARWIN_10_10 |
| case VKI_F_SETLKWTIMEOUT: |
| break; |
| # endif |
| |
| case VKI_F_PREALLOCATE: |
| { |
| struct vki_fstore *fstore = (struct vki_fstore *)ARG3; |
| POST_FIELD_WRITE( fstore->fst_bytesalloc ); |
| } |
| break; |
| |
| case VKI_F_LOG2PHYS: |
| POST_MEM_WRITE( ARG3, sizeof(struct vki_log2phys) ); |
| break; |
| |
| case VKI_F_GETPATH: |
| POST_MEM_WRITE( ARG3, 1+VG_(strlen)((char *)ARG3) ); |
| PRINT("\"%s\"", (char*)ARG3); |
| break; |
| |
| default: |
| // DDD: ugh, missing lots of cases here, not nice |
| break; |
| } |
| } |
| |
| /* --------------------------------------------------------------------- |
| unix syscalls |
| ------------------------------------------------------------------ */ |
| |
| PRE(futimes) |
| { |
| PRINT("futimes ( %ld, %#lx )", ARG1,ARG2); |
| PRE_REG_READ2(long, "futimes", int, fd, struct timeval *, tvp); |
| if (!ML_(fd_allowed)(ARG1, "futimes", tid, False)) { |
| SET_STATUS_Failure( VKI_EBADF ); |
| } else if (ARG2 != 0) { |
| PRE_timeval_READ( "futimes(tvp[0])", ARG2 ); |
| PRE_timeval_READ( "futimes(tvp[1])", ARG2+sizeof(struct vki_timeval) ); |
| } |
| } |
| |
| PRE(semget) |
| { |
| PRINT("semget ( %ld, %ld, %ld )",ARG1,ARG2,ARG3); |
| PRE_REG_READ3(long, "semget", vki_key_t, key, int, nsems, int, semflg); |
| } |
| |
| PRE(semop) |
| { |
| *flags |= SfMayBlock; |
| PRINT("semop ( %ld, %#lx, %lu )",ARG1,ARG2,ARG3); |
| PRE_REG_READ3(long, "semop", |
| int, semid, struct sembuf *, sops, vki_size_t, nsoops); |
| ML_(generic_PRE_sys_semop)(tid, ARG1,ARG2,ARG3); |
| } |
| |
| PRE(semctl) |
| { |
| switch (ARG3) { |
| case VKI_IPC_STAT: |
| case VKI_IPC_SET: |
| PRINT("semctl ( %ld, %ld, %ld, %#lx )",ARG1,ARG2,ARG3,ARG4); |
| PRE_REG_READ4(long, "semctl", |
| int, semid, int, semnum, int, cmd, struct semid_ds *, arg); |
| break; |
| case VKI_GETALL: |
| case VKI_SETALL: |
| PRINT("semctl ( %ld, %ld, %ld, %#lx )",ARG1,ARG2,ARG3,ARG4); |
| PRE_REG_READ4(long, "semctl", |
| int, semid, int, semnum, int, cmd, unsigned short *, arg); |
| break; |
| case VKI_SETVAL: |
| PRINT("semctl ( %ld, %ld, %ld, %#lx )",ARG1,ARG2,ARG3,ARG4); |
| PRE_REG_READ4(long, "semctl", |
| int, semid, int, semnum, int, cmd, int, arg); |
| break; |
| default: |
| PRINT("semctl ( %ld, %ld, %ld )",ARG1,ARG2,ARG3); |
| PRE_REG_READ3(long, "semctl", |
| int, semid, int, semnum, int, cmd); |
| break; |
| } |
| ML_(generic_PRE_sys_semctl)(tid, ARG1,ARG2,ARG3,ARG4); |
| } |
| POST(semctl) |
| { |
| ML_(generic_POST_sys_semctl)(tid, RES,ARG1,ARG2,ARG3,ARG4); |
| } |
| |
| PRE(sem_open) |
| { |
| if (ARG2 & VKI_O_CREAT) { |
| // 4-arg version |
| PRINT("sem_open ( %#lx(%s), %ld, %ld, %ld )", |
| ARG1,(char*)ARG1,ARG2,ARG3,ARG4); |
| PRE_REG_READ4(vki_sem_t *, "sem_open", |
| const char *, name, int, oflag, vki_mode_t, mode, |
| unsigned int, value); |
| } else { |
| // 2-arg version |
| PRINT("sem_open ( %#lx(%s), %ld )",ARG1,(char*)ARG1,ARG2); |
| PRE_REG_READ2(vki_sem_t *, "sem_open", |
| const char *, name, int, oflag); |
| } |
| PRE_MEM_RASCIIZ( "sem_open(name)", ARG1 ); |
| |
| /* Otherwise handle normally */ |
| *flags |= SfMayBlock; |
| } |
| |
| PRE(sem_close) |
| { |
| PRINT("sem_close( %#lx )", ARG1); |
| PRE_REG_READ1(int, "sem_close", vki_sem_t *, sem); |
| } |
| |
| PRE(sem_unlink) |
| { |
| PRINT("sem_unlink( %#lx(%s) )", ARG1,(char*)ARG1); |
| PRE_REG_READ1(int, "sem_unlink", const char *, name); |
| PRE_MEM_RASCIIZ( "sem_unlink(name)", ARG1 ); |
| } |
| |
| PRE(sem_post) |
| { |
| PRINT("sem_post( %#lx )", ARG1); |
| PRE_REG_READ1(int, "sem_post", vki_sem_t *, sem); |
| *flags |= SfMayBlock; |
| } |
| |
| PRE(sem_destroy) |
| { |
| PRINT("sem_destroy( %#lx )", ARG1); |
| PRE_REG_READ1(int, "sem_destroy", vki_sem_t *, sem); |
| PRE_MEM_READ("sem_destroy(sem)", ARG1, sizeof(vki_sem_t)); |
| } |
| |
| PRE(sem_init) |
| { |
| PRINT("sem_init( %#lx, %ld, %ld )", ARG1, ARG2, ARG3); |
| PRE_REG_READ3(int, "sem_init", vki_sem_t *, sem, |
| int, pshared, unsigned int, value); |
| PRE_MEM_WRITE("sem_init(sem)", ARG1, sizeof(vki_sem_t)); |
| } |
| |
| POST(sem_init) |
| { |
| POST_MEM_WRITE(ARG1, sizeof(vki_sem_t)); |
| } |
| |
| PRE(sem_wait) |
| { |
| PRINT("sem_wait( %#lx )", ARG1); |
| PRE_REG_READ1(int, "sem_wait", vki_sem_t *, sem); |
| *flags |= SfMayBlock; |
| } |
| |
| PRE(sem_trywait) |
| { |
| PRINT("sem_trywait( %#lx )", ARG1); |
| PRE_REG_READ1(int, "sem_trywait", vki_sem_t *, sem); |
| *flags |= SfMayBlock; |
| } |
| |
| PRE(kqueue) |
| { |
| PRINT("kqueue()"); |
| } |
| |
| POST(kqueue) |
| { |
| if (!ML_(fd_allowed)(RES, "kqueue", tid, True)) { |
| VG_(close)(RES); |
| SET_STATUS_Failure( VKI_EMFILE ); |
| } else { |
| if (VG_(clo_track_fds)) { |
| ML_(record_fd_open_with_given_name)(tid, RES, NULL); |
| } |
| } |
| } |
| |
| PRE(fileport_makeport) |
| { |
| PRINT("fileport_makeport(fd:%#lx, portnamep:%#lx) FIXME", |
| ARG1, ARG2); |
| } |
| |
| PRE(guarded_open_np) |
| { |
| PRINT("guarded_open_np(path:%#lx(%s), guard:%#lx, guardflags:%#lx, flags:%#lx) FIXME", |
| ARG1, (char*)ARG1, ARG2, ARG3, ARG4); |
| } |
| |
| PRE(guarded_kqueue_np) |
| { |
| PRINT("guarded_kqueue_np(guard:%#lx, guardflags:%#lx) FIXME", |
| ARG1, ARG2); |
| } |
| |
| POST(guarded_kqueue_np) |
| { |
| if (!ML_(fd_allowed)(RES, "guarded_kqueue_np", tid, True)) { |
| VG_(close)(RES); |
| SET_STATUS_Failure( VKI_EMFILE ); |
| } else { |
| if (VG_(clo_track_fds)) { |
| ML_(record_fd_open_with_given_name)(tid, RES, NULL); |
| } |
| } |
| } |
| |
| PRE(guarded_close_np) |
| { |
| PRINT("guarded_close_np(fd:%#lx, guard:%#lx) FIXME", |
| ARG1, ARG2); |
| } |
| |
| PRE(change_fdguard_np) |
| { |
| PRINT("change_fdguard_np(fd:%#lx, guard:%#lx, guardflags:%#lx, nguard:%#lx, nguardflags:%#lx, fdflagsp:%#lx) FIXME", |
| ARG1, ARG2, ARG3, ARG4, ARG5, ARG6); |
| } |
| |
| PRE(connectx) |
| { |
| PRINT("connectx(s:%#lx, src:%#lx, srclen:%#lx, dsts:%#lx, dstlen:%#lx, ifscope:%#lx, aid:%#lx, out_cid:%#lx) FIXME", |
| ARG1, ARG2, ARG3, ARG4, ARG5, ARG6, ARG7, ARG8); |
| } |
| |
| PRE(disconnectx) |
| { |
| PRINT("disconnectx(s:%#lx, aid:%#lx, cid:%#lx) FIXME", |
| ARG1, ARG2, ARG3); |
| } |
| |
| |
| PRE(kevent) |
| { |
| PRINT("kevent( %ld, %#lx, %ld, %#lx, %ld, %#lx )", |
| ARG1, ARG2, ARG3, ARG4, ARG5, ARG6); |
| PRE_REG_READ6(int,"kevent", int,kq, |
| const struct vki_kevent *,changelist, int,nchanges, |
| struct vki_kevent *,eventlist, int,nevents, |
| const struct vki_timespec *,timeout); |
| |
| if (ARG3) PRE_MEM_READ ("kevent(changelist)", |
| ARG2, ARG3 * sizeof(struct vki_kevent)); |
| if (ARG5) PRE_MEM_WRITE("kevent(eventlist)", |
| ARG4, ARG5 * sizeof(struct vki_kevent)); |
| if (ARG6) PRE_MEM_READ ("kevent(timeout)", |
| ARG6, sizeof(struct vki_timespec)); |
| |
| *flags |= SfMayBlock; |
| } |
| |
| POST(kevent) |
| { |
| PRINT("kevent ret %ld dst %#lx (%zu)", RES, ARG4, sizeof(struct vki_kevent)); |
| if (RES > 0) POST_MEM_WRITE(ARG4, RES * sizeof(struct vki_kevent)); |
| } |
| |
| |
| PRE(kevent64) |
| { |
| PRINT("kevent64( %ld, %#lx, %ld, %#lx, %ld, %#lx )", |
| ARG1, ARG2, ARG3, ARG4, ARG5, ARG6); |
| PRE_REG_READ6(int,"kevent64", int,kq, |
| const struct vki_kevent64 *,changelist, int,nchanges, |
| struct vki_kevent64 *,eventlist, int,nevents, |
| const struct vki_timespec *,timeout); |
| |
| if (ARG3) PRE_MEM_READ ("kevent64(changelist)", |
| ARG2, ARG3 * sizeof(struct vki_kevent64)); |
| if (ARG5) PRE_MEM_WRITE("kevent64(eventlist)", |
| ARG4, ARG5 * sizeof(struct vki_kevent64)); |
| if (ARG6) PRE_MEM_READ ("kevent64(timeout)", |
| ARG6, sizeof(struct vki_timespec)); |
| |
| *flags |= SfMayBlock; |
| } |
| |
| POST(kevent64) |
| { |
| PRINT("kevent64 ret %ld dst %#lx (%zu)", RES, ARG4, sizeof(struct vki_kevent64)); |
| if (RES > 0) { |
| ML_(sync_mappings)("after", "kevent64", 0); |
| POST_MEM_WRITE(ARG4, RES * sizeof(struct vki_kevent64)); |
| } |
| } |
| |
| |
| Addr pthread_starter = 0; |
| Addr wqthread_starter = 0; |
| SizeT pthread_structsize = 0; |
| |
| PRE(bsdthread_register) |
| { |
| PRINT("bsdthread_register( %#lx, %#lx, %lu )", ARG1, ARG2, ARG3); |
| PRE_REG_READ3(int,"__bsdthread_register", void *,"threadstart", |
| void *,"wqthread", size_t,"pthsize"); |
| |
| pthread_starter = ARG1; |
| wqthread_starter = ARG2; |
| pthread_structsize = ARG3; |
| ARG1 = (Word)&pthread_hijack_asm; |
| ARG2 = (Word)&wqthread_hijack_asm; |
| } |
| |
| PRE(workq_open) |
| { |
| PRINT("workq_open()"); |
| PRE_REG_READ0(int, "workq_open"); |
| |
| // This creates lots of threads and thread stacks under the covers, |
| // but we ignore them all until some work item starts running on it. |
| } |
| |
| static const HChar *workqop_name(int op) |
| { |
| switch (op) { |
| case VKI_WQOPS_QUEUE_ADD: return "QUEUE_ADD"; |
| case VKI_WQOPS_QUEUE_REMOVE: return "QUEUE_REMOVE"; |
| case VKI_WQOPS_THREAD_RETURN: return "THREAD_RETURN"; |
| case VKI_WQOPS_THREAD_SETCONC: return "THREAD_SETCONC"; |
| case VKI_WQOPS_QUEUE_NEWSPISUPP: return "QUEUE_NEWSPISUPP"; |
| case VKI_WQOPS_QUEUE_REQTHREADS: return "QUEUE_REQTHREADS"; |
| default: return "?"; |
| } |
| } |
| |
| |
| PRE(workq_ops) |
| { |
| PRINT("workq_ops( %ld(%s), %#lx, %ld )", ARG1, workqop_name(ARG1), ARG2, |
| ARG3); |
| PRE_REG_READ3(int,"workq_ops", int,"options", void *,"item", |
| int,"priority"); |
| |
| switch (ARG1) { |
| case VKI_WQOPS_QUEUE_ADD: |
| case VKI_WQOPS_QUEUE_REMOVE: |
| // GrP fixme need anything here? |
| // GrP fixme may block? |
| break; |
| case VKI_WQOPS_QUEUE_NEWSPISUPP: |
| // JRS don't think we need to do anything here -- this just checks |
| // whether some newer functionality is supported |
| break; |
| case VKI_WQOPS_QUEUE_REQTHREADS: |
| // JRS uh, looks like it queues up a bunch of threads, or some such? |
| *flags |= SfMayBlock; // the kernel sources take a spinlock, so play safe |
| break; |
| case VKI_WQOPS_THREAD_RETURN: { |
| // The interesting case. The kernel will do one of two things: |
| // 1. Return normally. We continue; libc proceeds to stop the thread. |
| // V does nothing special here. |
| // 2. Jump to wqthread_hijack. This wipes the stack and runs a |
| // new work item, and never returns from workq_ops. |
| // V handles this by longjmp() from wqthread_hijack back to the |
| // scheduler, which continues at the new client SP/IP/state. |
| // This works something like V's signal handling. |
| // To the tool, this looks like workq_ops() sometimes returns |
| // to a strange address. |
| ThreadState *tst = VG_(get_ThreadState)(tid); |
| tst->os_state.wq_jmpbuf_valid = True; |
| *flags |= SfMayBlock; // GrP fixme true? |
| break; |
| } |
| default: |
| VG_(printf)("UNKNOWN workq_ops option %ld\n", ARG1); |
| break; |
| } |
| } |
| POST(workq_ops) |
| { |
| ThreadState *tst = VG_(get_ThreadState)(tid); |
| tst->os_state.wq_jmpbuf_valid = False; |
| switch (ARG1) { |
| case VKI_WQOPS_THREAD_RETURN: |
| ML_(sync_mappings)("after", "workq_ops(THREAD_RETURN)", 0); |
| break; |
| case VKI_WQOPS_QUEUE_REQTHREADS: |
| ML_(sync_mappings)("after", "workq_ops(QUEUE_REQTHREADS)", 0); |
| break; |
| default: |
| break; |
| } |
| } |
| |
| |
| |
| PRE(__mac_syscall) |
| { |
| PRINT("__mac_syscall( %#lx(%s), %ld, %#lx )", |
| ARG1, (HChar*)ARG1, ARG2, ARG3); |
| PRE_REG_READ3(int,"__mac_syscall", char *,"policy", |
| int,"call", void *,"arg"); |
| |
| // GrP fixme check call's arg? |
| // GrP fixme check policy? |
| } |
| |
| |
| /* Not like syswrap-generic's sys_exit, which exits only one thread. |
| More like syswrap-generic's sys_exit_group. */ |
| PRE(exit) |
| { |
| ThreadId t; |
| ThreadState* tst; |
| |
| PRINT("darwin exit( %ld )", ARG1); |
| PRE_REG_READ1(void, "exit", int, status); |
| |
| tst = VG_(get_ThreadState)(tid); |
| |
| /* A little complex; find all the threads with the same threadgroup |
| as this one (including this one), and mark them to exit */ |
| for (t = 1; t < VG_N_THREADS; t++) { |
| if ( /* not alive */ |
| VG_(threads)[t].status == VgTs_Empty |
| /* GrP fixme zombie? */ |
| ) |
| continue; |
| |
| VG_(threads)[t].exitreason = VgSrc_ExitProcess; |
| VG_(threads)[t].os_state.exitcode = ARG1; |
| |
| if (t != tid) |
| VG_(get_thread_out_of_syscall)(t); /* unblock it, if blocked */ |
| } |
| |
| /* We have to claim the syscall already succeeded. */ |
| SET_STATUS_Success(0); |
| } |
| |
| |
| PRE(sigaction) |
| { |
| PRINT("sigaction ( %ld, %#lx, %#lx )", ARG1,ARG2,ARG3); |
| PRE_REG_READ3(long, "sigaction", |
| int, signum, vki_sigaction_toK_t *, act, |
| vki_sigaction_fromK_t *, oldact); |
| |
| if (ARG2 != 0) { |
| vki_sigaction_toK_t *sa = (vki_sigaction_toK_t *)ARG2; |
| PRE_MEM_READ( "sigaction(act->sa_handler)", |
| (Addr)&sa->ksa_handler, sizeof(sa->ksa_handler)); |
| PRE_MEM_READ( "sigaction(act->sa_mask)", |
| (Addr)&sa->sa_mask, sizeof(sa->sa_mask)); |
| PRE_MEM_READ( "sigaction(act->sa_flags)", |
| (Addr)&sa->sa_flags, sizeof(sa->sa_flags)); |
| } |
| if (ARG3 != 0) |
| PRE_MEM_WRITE( "sigaction(oldact)", |
| ARG3, sizeof(vki_sigaction_fromK_t)); |
| |
| SET_STATUS_from_SysRes( |
| VG_(do_sys_sigaction)(ARG1, (const vki_sigaction_toK_t *)ARG2, |
| (vki_sigaction_fromK_t *)ARG3) |
| ); |
| } |
| POST(sigaction) |
| { |
| vg_assert(SUCCESS); |
| if (RES == 0 && ARG3 != 0) |
| POST_MEM_WRITE( ARG3, sizeof(vki_sigaction_fromK_t)); |
| } |
| |
| |
| PRE(__pthread_kill) |
| { |
| PRINT("__pthread_kill ( %ld, %ld )", ARG1, ARG2); |
| PRE_REG_READ2(long, "__pthread_kill", vki_pthread_t*, thread, int, sig); |
| } |
| |
| |
| PRE(__pthread_sigmask) |
| { |
| // GrP fixme |
| // JRS: arguments are identical to sigprocmask |
| // (how, sigset_t*, sigset_t*). Perhaps behave identically? |
| log_decaying("UNKNOWN __pthread_sigmask is unsupported."); |
| SET_STATUS_Success( 0 ); |
| } |
| |
| |
| PRE(__pthread_canceled) |
| { |
| *flags |= SfMayBlock; /* might kill this thread??? */ |
| /* I don't think so -- I think it just changes the cancellation |
| state. But taking no chances. */ |
| PRINT("__pthread_canceled ( %ld )", ARG1); |
| PRE_REG_READ1(long, "__pthread_canceled", void*, arg1); |
| } |
| |
| |
| PRE(__pthread_markcancel) |
| { |
| *flags |= SfMayBlock; /* might kill this thread??? */ |
| PRINT("__pthread_markcancel ( %#lx )", ARG1); |
| PRE_REG_READ1(long, "__pthread_markcancel", void*, arg1); |
| /* Just let it go through. No idea if this is correct. */ |
| } |
| |
| |
| PRE(__disable_threadsignal) |
| { |
| vki_sigset_t set; |
| PRINT("__disable_threadsignal(%ld, %ld, %ld)", ARG1, ARG2, ARG3); |
| /* I don't think this really looks at its arguments. So don't |
| bother to check them. */ |
| |
| VG_(sigfillset)( &set ); |
| SET_STATUS_from_SysRes( |
| VG_(do_sys_sigprocmask) ( tid, VKI_SIG_BLOCK, &set, NULL ) |
| ); |
| |
| /* We don't expect that blocking all signals for this thread could |
| cause any more to be delivered (how could it?), but just in case |
| .. */ |
| if (SUCCESS) |
| *flags |= SfPollAfter; |
| } |
| |
| |
| PRE(kdebug_trace) |
| { |
| PRINT("kdebug_trace(%ld, %ld, %ld, %ld, %ld, %ld)", |
| ARG1, ARG2, ARG3, ARG4, ARG5, ARG6); |
| /* |
| Don't check anything - some clients pass fewer arguments. |
| PRE_REG_READ6(long, "kdebug_trace", |
| int,"code", int,"arg1", int,"arg2", |
| int,"arg3", int,"arg4", int,"arg5"); |
| */ |
| } |
| |
| |
| PRE(seteuid) |
| { |
| PRINT("seteuid(%ld)", ARG1); |
| PRE_REG_READ1(long, "seteuid", vki_uid_t, "uid"); |
| } |
| |
| |
| PRE(setegid) |
| { |
| PRINT("setegid(%ld)", ARG1); |
| PRE_REG_READ1(long, "setegid", vki_uid_t, "uid"); |
| } |
| |
| PRE(settid) |
| { |
| PRINT("settid(%ld, %ld)", ARG1, ARG2); |
| PRE_REG_READ2(long, "settid", vki_uid_t, "uid", vki_gid_t, "gid"); |
| } |
| |
| PRE(gettid) |
| { |
| PRINT("gettid()"); |
| PRE_REG_READ0(long, gettid); |
| } |
| |
| /* XXX need to check whether we need POST operations for |
| * waitevent, watchevent, modwatch -- jpeach |
| */ |
| PRE(watchevent) |
| { |
| PRINT("watchevent(%#lx, %#lx)", ARG1, ARG2); |
| PRE_REG_READ2(long, "watchevent", |
| vki_eventreq *, "event", unsigned int, "eventmask"); |
| |
| PRE_MEM_READ("watchevent(event)", ARG1, sizeof(vki_eventreq)); |
| PRE_MEM_READ("watchevent(eventmask)", ARG2, sizeof(unsigned int)); |
| *flags |= SfMayBlock; |
| } |
| |
| #define WAITEVENT_FAST_POLL ((Addr)(struct timeval *)-1) |
| PRE(waitevent) |
| { |
| PRINT("waitevent(%#lx, %#lx)", ARG1, ARG2); |
| PRE_REG_READ2(long, "waitevent", |
| vki_eventreq *, "event", struct timeval *, "timeout"); |
| PRE_MEM_WRITE("waitevent(event)", ARG1, sizeof(vki_eventreq)); |
| |
| if (ARG2 && ARG2 != WAITEVENT_FAST_POLL) { |
| PRE_timeval_READ("waitevent(timeout)", ARG2); |
| } |
| |
| /* XXX ((timeval*)-1) is valid for ARG2 -- jpeach */ |
| *flags |= SfMayBlock; |
| } |
| |
| POST(waitevent) |
| { |
| POST_MEM_WRITE(ARG1, sizeof(vki_eventreq)); |
| } |
| |
| PRE(modwatch) |
| { |
| PRINT("modwatch(%#lx, %#lx)", ARG1, ARG2); |
| PRE_REG_READ2(long, "modwatch", |
| vki_eventreq *, "event", unsigned int, "eventmask"); |
| |
| PRE_MEM_READ("modwatch(event)", ARG1, sizeof(vki_eventreq)); |
| PRE_MEM_READ("modwatch(eventmask)", ARG2, sizeof(unsigned int)); |
| } |
| |
| PRE(getxattr) |
| { |
| PRINT("getxattr(%#lx(%s), %#lx(%s), %#lx, %lu, %lu, %ld)", |
| ARG1, (char *)ARG1, ARG2, (char *)ARG2, ARG3, ARG4, ARG5, ARG6); |
| |
| PRE_REG_READ6(vki_ssize_t, "getxattr", |
| const char *, path, char *, name, void *, value, |
| vki_size_t, size, uint32_t, position, int, options); |
| PRE_MEM_RASCIIZ("getxattr(path)", ARG1); |
| PRE_MEM_RASCIIZ("getxattr(name)", ARG2); |
| if (ARG3) |
| PRE_MEM_WRITE( "getxattr(value)", ARG3, ARG4); |
| } |
| |
| POST(getxattr) |
| { |
| vg_assert((vki_ssize_t)RES >= 0); |
| if (ARG3) |
| POST_MEM_WRITE(ARG3, (vki_ssize_t)RES); |
| } |
| |
| PRE(fgetxattr) |
| { |
| PRINT("fgetxattr(%ld, %#lx(%s), %#lx, %lu, %lu, %ld)", |
| ARG1, ARG2, (char *)ARG2, ARG3, ARG4, ARG5, ARG6); |
| |
| PRE_REG_READ6(vki_ssize_t, "fgetxattr", |
| int, fd, char *, name, void *, value, |
| vki_size_t, size, uint32_t, position, int, options); |
| PRE_MEM_RASCIIZ("getxattr(name)", ARG2); |
| PRE_MEM_WRITE( "getxattr(value)", ARG3, ARG4); |
| } |
| |
| POST(fgetxattr) |
| { |
| vg_assert((vki_ssize_t)RES >= 0); |
| POST_MEM_WRITE(ARG3, (vki_ssize_t)RES); |
| } |
| |
| PRE(setxattr) |
| { |
| PRINT("setxattr ( %#lx(%s), %#lx(%s), %#lx, %lu, %lu, %ld )", |
| ARG1, (char *)ARG1, ARG2, (char*)ARG2, ARG3, ARG4, ARG5, ARG6 ); |
| PRE_REG_READ6(int, "setxattr", |
| const char *,"path", char *,"name", void *,"value", |
| vki_size_t,"size", uint32_t,"position", int,"options" ); |
| |
| PRE_MEM_RASCIIZ( "setxattr(path)", ARG1 ); |
| PRE_MEM_RASCIIZ( "setxattr(name)", ARG2 ); |
| PRE_MEM_READ( "setxattr(value)", ARG3, ARG4 ); |
| } |
| |
| |
| PRE(fsetxattr) |
| { |
| PRINT( "fsetxattr ( %ld, %#lx(%s), %#lx, %lu, %lu, %ld )", |
| ARG1, ARG2, (char*)ARG2, ARG3, ARG4, ARG5, ARG6 ); |
| PRE_REG_READ6(int, "fsetxattr", |
| int,"fd", char *,"name", void *,"value", |
| vki_size_t,"size", uint32_t,"position", int,"options" ); |
| |
| PRE_MEM_RASCIIZ( "fsetxattr(name)", ARG2 ); |
| PRE_MEM_READ( "fsetxattr(value)", ARG3, ARG4 ); |
| } |
| |
| |
| PRE(removexattr) |
| { |
| PRINT( "removexattr ( %#lx(%s), %#lx(%s), %ld )", |
| ARG1, (HChar*)ARG1, ARG2, (HChar*)ARG2, ARG3 ); |
| PRE_REG_READ3(int, "removexattr", |
| const char*, "path", char*, "attrname", int, "options"); |
| PRE_MEM_RASCIIZ( "removexattr(path)", ARG1 ); |
| PRE_MEM_RASCIIZ( "removexattr(attrname)", ARG2 ); |
| } |
| |
| |
| PRE(fremovexattr) |
| { |
| PRINT( "fremovexattr ( %ld, %#lx(%s), %ld )", |
| ARG1, ARG2, (HChar*)ARG2, ARG3 ); |
| PRE_REG_READ3(int, "fremovexattr", |
| int, "fd", char*, "attrname", int, "options"); |
| PRE_MEM_RASCIIZ( "removexattr(attrname)", ARG2 ); |
| } |
| |
| |
| PRE(listxattr) |
| { |
| PRINT( "listxattr ( %#lx(%s), %#lx, %lu, %ld )", |
| ARG1, (char *)ARG1, ARG2, ARG3, ARG4 ); |
| PRE_REG_READ4 (long, "listxattr", |
| const char *,"path", char *,"namebuf", |
| vki_size_t,"size", int,"options" ); |
| |
| PRE_MEM_RASCIIZ( "listxattr(path)", ARG1 ); |
| PRE_MEM_WRITE( "listxattr(namebuf)", ARG2, ARG3 ); |
| *flags |= SfMayBlock; |
| } |
| POST(listxattr) |
| { |
| vg_assert(SUCCESS); |
| vg_assert((vki_ssize_t)RES >= 0); |
| POST_MEM_WRITE( ARG2, (vki_ssize_t)RES ); |
| } |
| |
| |
| PRE(flistxattr) |
| { |
| PRINT( "flistxattr ( %ld, %#lx, %lu, %ld )", |
| ARG1, ARG2, ARG3, ARG4 ); |
| PRE_REG_READ4 (long, "flistxattr", |
| int, "fd", char *,"namebuf", |
| vki_size_t,"size", int,"options" ); |
| PRE_MEM_WRITE( "flistxattr(namebuf)", ARG2, ARG3 ); |
| *flags |= SfMayBlock; |
| } |
| POST(flistxattr) |
| { |
| vg_assert(SUCCESS); |
| vg_assert((vki_ssize_t)RES >= 0); |
| POST_MEM_WRITE( ARG2, (vki_ssize_t)RES ); |
| } |
| |
| |
| PRE(shmat) |
| { |
| UWord arg2tmp; |
| PRINT("shmat ( %ld, %#lx, %ld )",ARG1,ARG2,ARG3); |
| PRE_REG_READ3(long, "shmat", |
| int, shmid, const void *, shmaddr, int, shmflg); |
| arg2tmp = ML_(generic_PRE_sys_shmat)(tid, ARG1,ARG2,ARG3); |
| if (arg2tmp == 0) |
| SET_STATUS_Failure( VKI_EINVAL ); |
| else |
| ARG2 = arg2tmp; // used in POST |
| } |
| POST(shmat) |
| { |
| ML_(generic_POST_sys_shmat)(tid, RES,ARG1,ARG2,ARG3); |
| } |
| |
| PRE(shmctl) |
| { |
| PRINT("shmctl ( %ld, %ld, %#lx )",ARG1,ARG2,ARG3); |
| PRE_REG_READ3(long, "shmctl", |
| int, shmid, int, cmd, struct vki_shmid_ds *, buf); |
| ML_(generic_PRE_sys_shmctl)(tid, ARG1,ARG2,ARG3); |
| } |
| POST(shmctl) |
| { |
| ML_(generic_POST_sys_shmctl)(tid, RES,ARG1,ARG2,ARG3); |
| } |
| |
| PRE(shmdt) |
| { |
| PRINT("shmdt ( %#lx )",ARG1); |
| PRE_REG_READ1(long, "shmdt", const void *, shmaddr); |
| if (!ML_(generic_PRE_sys_shmdt)(tid, ARG1)) |
| SET_STATUS_Failure( VKI_EINVAL ); |
| } |
| POST(shmdt) |
| { |
| ML_(generic_POST_sys_shmdt)(tid, RES,ARG1); |
| } |
| |
| PRE(shmget) |
| { |
| PRINT("shmget ( %ld, %ld, %ld )",ARG1,ARG2,ARG3); |
| PRE_REG_READ3(long, "shmget", vki_key_t, key, vki_size_t, size, int, shmflg); |
| } |
| |
| PRE(shm_open) |
| { |
| PRINT("shm_open(%#lx(%s), %ld, %ld)", ARG1, (char *)ARG1, ARG2, ARG3); |
| PRE_REG_READ3(long, "shm_open", |
| const char *,"name", int,"flags", vki_mode_t,"mode"); |
| |
| PRE_MEM_RASCIIZ( "shm_open(filename)", ARG1 ); |
| |
| *flags |= SfMayBlock; |
| } |
| POST(shm_open) |
| { |
| vg_assert(SUCCESS); |
| if (!ML_(fd_allowed)(RES, "shm_open", tid, True)) { |
| VG_(close)(RES); |
| SET_STATUS_Failure( VKI_EMFILE ); |
| } else { |
| if (VG_(clo_track_fds)) |
| ML_(record_fd_open_with_given_name)(tid, RES, (char*)ARG1); |
| } |
| } |
| |
| PRE(shm_unlink) |
| { |
| *flags |= SfMayBlock; |
| PRINT("shm_unlink ( %#lx(%s) )", ARG1,(char*)ARG1); |
| PRE_REG_READ1(long, "shm_unlink", const char *, pathname); |
| PRE_MEM_RASCIIZ( "shm_unlink(pathname)", ARG1 ); |
| } |
| POST(shm_unlink) |
| { |
| /* My reading of the man page suggests that a call may cause memory |
| mappings to change: "if no references exist at the time of the |
| call to shm_unlink(), the resources are reclaimed immediately". |
| So we need to resync here, sigh. */ |
| ML_(sync_mappings)("after", "shm_unlink", 0); |
| } |
| |
| PRE(stat_extended) |
| { |
| PRINT("stat_extended( %#lx(%s), %#lx, %#lx, %#lx )", |
| ARG1, (char *)ARG1, ARG2, ARG3, ARG4); |
| PRE_REG_READ4(int, "stat_extended", char *, file_name, struct stat *, buf, |
| void *, fsacl, vki_size_t *, fsacl_size); |
| PRE_MEM_RASCIIZ( "stat_extended(file_name)", ARG1 ); |
| PRE_MEM_WRITE( "stat_extended(buf)", ARG2, sizeof(struct vki_stat) ); |
| if (ML_(safe_to_deref)( (void*)ARG4, sizeof(vki_size_t) )) |
| PRE_MEM_WRITE("stat_extended(fsacl)", ARG3, *(vki_size_t *)ARG4 ); |
| PRE_MEM_READ( "stat_extended(fsacl_size)", ARG4, sizeof(vki_size_t) ); |
| } |
| POST(stat_extended) |
| { |
| POST_MEM_WRITE( ARG2, sizeof(struct vki_stat) ); |
| if (ML_(safe_to_deref)( (void*)ARG4, sizeof(vki_size_t) )) |
| POST_MEM_WRITE( ARG3, *(vki_size_t *)ARG4 ); |
| POST_MEM_WRITE( ARG4, sizeof(vki_size_t) ); |
| } |
| |
| |
| PRE(lstat_extended) |
| { |
| PRINT("lstat_extended( %#lx(%s), %#lx, %#lx, %#lx )", |
| ARG1, (char *)ARG1, ARG2, ARG3, ARG4); |
| PRE_REG_READ4(int, "lstat_extended", char *, file_name, struct stat *, buf, |
| void *, fsacl, vki_size_t *, fsacl_size); |
| PRE_MEM_RASCIIZ( "lstat_extended(file_name)", ARG1 ); |
| PRE_MEM_WRITE( "lstat_extended(buf)", ARG2, sizeof(struct vki_stat) ); |
| if (ML_(safe_to_deref)( (void*)ARG4, sizeof(vki_size_t) )) |
| PRE_MEM_WRITE("lstat_extended(fsacl)", ARG3, *(vki_size_t *)ARG4 ); |
| PRE_MEM_READ( "lstat_extended(fsacl_size)", ARG4, sizeof(vki_size_t) ); |
| } |
| POST(lstat_extended) |
| { |
| POST_MEM_WRITE( ARG2, sizeof(struct vki_stat) ); |
| if (ML_(safe_to_deref)( (void*)ARG4, sizeof(vki_size_t) )) |
| POST_MEM_WRITE( ARG3, *(vki_size_t *)ARG4 ); |
| POST_MEM_WRITE( ARG4, sizeof(vki_size_t) ); |
| } |
| |
| |
| PRE(fstat_extended) |
| { |
| PRINT("fstat_extended( %ld, %#lx, %#lx, %#lx )", |
| ARG1, ARG2, ARG3, ARG4); |
| PRE_REG_READ4(int, "fstat_extended", int, fd, struct stat *, buf, |
| void *, fsacl, vki_size_t *, fsacl_size); |
| PRE_MEM_WRITE( "fstat_extended(buf)", ARG2, sizeof(struct vki_stat) ); |
| if (ML_(safe_to_deref)( (void*)ARG4, sizeof(vki_size_t) )) |
| PRE_MEM_WRITE("fstat_extended(fsacl)", ARG3, *(vki_size_t *)ARG4 ); |
| PRE_MEM_READ( "fstat_extended(fsacl_size)", ARG4, sizeof(vki_size_t) ); |
| } |
| POST(fstat_extended) |
| { |
| POST_MEM_WRITE( ARG2, sizeof(struct vki_stat) ); |
| if (ML_(safe_to_deref)( (void*)ARG4, sizeof(vki_size_t) )) |
| POST_MEM_WRITE( ARG3, *(vki_size_t *)ARG4 ); |
| POST_MEM_WRITE( ARG4, sizeof(vki_size_t) ); |
| } |
| |
| |
| PRE(stat64_extended) |
| { |
| PRINT("stat64_extended( %#lx(%s), %#lx, %#lx, %#lx )", |
| ARG1, (char *)ARG1, ARG2, ARG3, ARG4); |
| PRE_REG_READ4(int, "stat64_extended", char *, file_name, struct stat64 *, buf, |
| void *, fsacl, vki_size_t *, fsacl_size); |
| PRE_MEM_RASCIIZ( "stat64_extended(file_name)", ARG1 ); |
| PRE_MEM_WRITE( "stat64_extended(buf)", ARG2, sizeof(struct vki_stat64) ); |
| if (ML_(safe_to_deref)( (void*)ARG4, sizeof(vki_size_t) )) |
| PRE_MEM_WRITE("stat64_extended(fsacl)", ARG3, *(vki_size_t *)ARG4 ); |
| PRE_MEM_READ( "stat64_extended(fsacl_size)", ARG4, sizeof(vki_size_t) ); |
| } |
| POST(stat64_extended) |
| { |
| POST_MEM_WRITE( ARG2, sizeof(struct vki_stat64) ); |
| if (ML_(safe_to_deref)( (void*)ARG4, sizeof(vki_size_t) )) |
| POST_MEM_WRITE( ARG3, *(vki_size_t *)ARG4 ); |
| POST_MEM_WRITE( ARG4, sizeof(vki_size_t) ); |
| } |
| |
| |
| PRE(lstat64_extended) |
| { |
| PRINT("lstat64_extended( %#lx(%s), %#lx, %#lx, %#lx )", |
| ARG1, (char *)ARG1, ARG2, ARG3, ARG4); |
| PRE_REG_READ4(int, "lstat64_extended", char *, file_name, struct stat64 *, buf, |
| void *, fsacl, vki_size_t *, fsacl_size); |
| PRE_MEM_RASCIIZ( "lstat64_extended(file_name)", ARG1 ); |
| PRE_MEM_WRITE( "lstat64_extended(buf)", ARG2, sizeof(struct vki_stat64) ); |
| if (ML_(safe_to_deref)( (void*)ARG4, sizeof(vki_size_t) )) |
| PRE_MEM_WRITE( "lstat64_extended(fsacl)", ARG3, *(vki_size_t *)ARG4 ); |
| PRE_MEM_READ( "lstat64_extended(fsacl_size)", ARG4, sizeof(vki_size_t) ); |
| } |
| POST(lstat64_extended) |
| { |
| POST_MEM_WRITE( ARG2, sizeof(struct vki_stat64) ); |
| if (ML_(safe_to_deref)( (void*)ARG4, sizeof(vki_size_t) )) |
| POST_MEM_WRITE( ARG3, *(vki_size_t *)ARG4 ); |
| POST_MEM_WRITE( ARG4, sizeof(vki_size_t) ); |
| } |
| |
| |
| PRE(fstat64_extended) |
| { |
| PRINT("fstat64_extended( %ld, %#lx, %#lx, %#lx )", |
| ARG1, ARG2, ARG3, ARG4); |
| PRE_REG_READ4(int, "fstat64_extended", int, fd, struct stat64 *, buf, |
| void *, fsacl, vki_size_t *, fsacl_size); |
| PRE_MEM_WRITE( "fstat64_extended(buf)", ARG2, sizeof(struct vki_stat64) ); |
| if (ML_(safe_to_deref)( (void*)ARG4, sizeof(vki_size_t) )) |
| PRE_MEM_WRITE("fstat64_extended(fsacl)", ARG3, *(vki_size_t *)ARG4 ); |
| PRE_MEM_READ( "fstat64_extended(fsacl_size)", ARG4, sizeof(vki_size_t) ); |
| } |
| POST(fstat64_extended) |
| { |
| POST_MEM_WRITE( ARG2, sizeof(struct vki_stat64) ); |
| if (ML_(safe_to_deref)( (void*)ARG4, sizeof(vki_size_t) )) |
| POST_MEM_WRITE( ARG3, *(vki_size_t *)ARG4 ); |
| POST_MEM_WRITE( ARG4, sizeof(vki_size_t) ); |
| } |
| |
| |
| PRE(fchmod_extended) |
| { |
| /* DDD: Note: this is not really correct. Handling of |
| chmod_extended is broken in the same way. */ |
| PRINT("fchmod_extended ( %ld, %ld, %ld, %ld, %#lx )", |
| ARG1, ARG2, ARG3, ARG4, ARG5); |
| PRE_REG_READ5(long, "fchmod_extended", |
| unsigned int, fildes, |
| uid_t, uid, |
| gid_t, gid, |
| vki_mode_t, mode, |
| void* /*really,user_addr_t*/, xsecurity); |
| /* DDD: relative to the xnu sources (kauth_copyinfilesec), this |
| is just way wrong. [The trouble is with the size, which depends on a |
| non-trival kernel computation] */ |
| if (ARG5) { |
| PRE_MEM_READ( "fchmod_extended(xsecurity)", ARG5, |
| sizeof(struct vki_kauth_filesec) ); |
| } |
| } |
| |
| PRE(chmod_extended) |
| { |
| /* DDD: Note: this is not really correct. Handling of |
| fchmod_extended is broken in the same way. */ |
| PRINT("chmod_extended ( %#lx(%s), %ld, %ld, %ld, %#lx )", |
| ARG1, ARG1 ? (HChar*)ARG1 : "(null)", ARG2, ARG3, ARG4, ARG5); |
| PRE_REG_READ5(long, "chmod_extended", |
| unsigned int, fildes, |
| uid_t, uid, |
| gid_t, gid, |
| vki_mode_t, mode, |
| void* /*really,user_addr_t*/, xsecurity); |
| PRE_MEM_RASCIIZ("chmod_extended(path)", ARG1); |
| /* DDD: relative to the xnu sources (kauth_copyinfilesec), this |
| is just way wrong. [The trouble is with the size, which depends on a |
| non-trival kernel computation] */ |
| if (ARG5) { |
| PRE_MEM_READ( "chmod_extended(xsecurity)", ARG5, |
| sizeof(struct vki_kauth_filesec) ); |
| } |
| } |
| |
| PRE(open_extended) |
| { |
| /* DDD: Note: this is not really correct. Handling of |
| {,f}chmod_extended is broken in the same way. */ |
| PRINT("open_extended ( %#lx(%s), 0x%lx, %ld, %ld, %ld, %#lx )", |
| ARG1, ARG1 ? (HChar*)ARG1 : "(null)", |
| ARG2, ARG3, ARG4, ARG5, ARG6); |
| PRE_REG_READ6(long, "open_extended", |
| char*, path, |
| int, flags, |
| uid_t, uid, |
| gid_t, gid, |
| vki_mode_t, mode, |
| void* /*really,user_addr_t*/, xsecurity); |
| PRE_MEM_RASCIIZ("open_extended(path)", ARG1); |
| /* DDD: relative to the xnu sources (kauth_copyinfilesec), this |
| is just way wrong. [The trouble is with the size, which depends on a |
| non-trival kernel computation] */ |
| if (ARG6) |
| PRE_MEM_READ( "open_extended(xsecurity)", ARG6, |
| sizeof(struct vki_kauth_filesec) ); |
| } |
| |
| // This is a ridiculous syscall. Specifically, the 'entries' argument points |
| // to a buffer that contains one or more 'accessx_descriptor' structs followed |
| // by one or more strings. Each accessx_descriptor contains a field, |
| // 'ad_name_offset', which points to one of the strings (or it can contain |
| // zero which means "reuse the string from the previous accessx_descriptor"). |
| // |
| // What's really ridiculous is that we are only given the size of the overall |
| // buffer, not the number of accessx_descriptors, nor the number of strings. |
| // The kernel determines the number of accessx_descriptors by walking through |
| // them one by one, checking that the ad_name_offset points within the buffer, |
| // past the current point (or that it's a zero, unless its the first |
| // descriptor); if so, we assume that this really is an accessx_descriptor, |
| // if not, we assume we've hit the strings section. Gah. |
| // |
| // This affects us here because number of entries in the 'results' buffer is |
| // determined by the number of accessx_descriptors. So we have to know that |
| // number in order to do PRE_MEM_WRITE/POST_MEM_WRITE of 'results'. In |
| // practice, we skip the PRE_MEM_WRITE step because it's easier to do the |
| // computation after the syscall has succeeded, because the kernel will have |
| // checked for all the zillion different ways this syscall can fail, and we'll |