blob: 4bb88b37224ff7e82ea9ded415d5e3c8424e45b6 [file] [log] [blame]
/*--------------------------------------------------------------------*/
/*--- Darwin-specific syscalls, etc. syswrap-darwin.c ---*/
/*--------------------------------------------------------------------*/
/*
This file is part of Valgrind, a dynamic binary instrumentation
framework.
Copyright (C) 2005-2013 Apple Inc.
Greg Parker gparker@apple.com
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307, USA.
The GNU General Public License is contained in the file COPYING.
*/
#if defined(VGO_darwin)
#include "pub_core_basics.h"
#include "pub_core_vki.h"
#include "pub_core_vkiscnums.h"
#include "pub_core_libcsetjmp.h" // to keep _threadstate.h happy
#include "pub_core_threadstate.h"
#include "pub_core_aspacemgr.h"
#include "pub_core_xarray.h"
#include "pub_core_clientstate.h"
#include "pub_core_debuglog.h"
#include "pub_core_debuginfo.h" // VG_(di_notify_*)
#include "pub_core_transtab.h" // VG_(discard_translations)
#include "pub_core_libcbase.h"
#include "pub_core_libcassert.h"
#include "pub_core_libcfile.h"
#include "pub_core_libcprint.h"
#include "pub_core_libcproc.h"
#include "pub_core_libcsignal.h"
#include "pub_core_machine.h" // VG_(get_SP)
#include "pub_core_mallocfree.h"
#include "pub_core_options.h"
#include "pub_core_oset.h"
#include "pub_core_scheduler.h"
#include "pub_core_sigframe.h" // For VG_(sigframe_destroy)()
#include "pub_core_signals.h"
#include "pub_core_syscall.h"
#include "pub_core_syswrap.h"
#include "pub_core_tooliface.h"
#include "priv_types_n_macros.h"
#include "priv_syswrap-generic.h" /* for decls of generic wrappers */
#include "priv_syswrap-darwin.h" /* for decls of darwin-ish wrappers */
#include "priv_syswrap-main.h"
/* --- !!! --- EXTERNAL HEADERS start --- !!! --- */
#include <mach/mach.h>
#include <mach/mach_vm.h>
#include <semaphore.h>
/* --- !!! --- EXTERNAL HEADERS end --- !!! --- */
#define msgh_request_port msgh_remote_port
#define msgh_reply_port msgh_local_port
#define BOOTSTRAP_MAX_NAME_LEN 128
typedef HChar name_t[BOOTSTRAP_MAX_NAME_LEN];
typedef uint64_t mig_addr_t;
// Saved ports
static mach_port_t vg_host_port = 0;
static mach_port_t vg_task_port = 0;
static mach_port_t vg_bootstrap_port = 0;
// Run a thread from beginning to end and return the thread's
// scheduler-return-code.
static VgSchedReturnCode thread_wrapper(Word /*ThreadId*/ tidW)
{
VgSchedReturnCode ret;
ThreadId tid = (ThreadId)tidW;
ThreadState* tst = VG_(get_ThreadState)(tid);
VG_(debugLog)(1, "syswrap-darwin",
"thread_wrapper(tid=%lld): entry\n",
(ULong)tidW);
vg_assert(tst->status == VgTs_Init);
/* make sure we get the CPU lock before doing anything significant */
VG_(acquire_BigLock)(tid, "thread_wrapper");
if (0)
VG_(printf)("thread tid %d started: stack = %p\n",
tid, &tid);
/* Make sure error reporting is enabled in the new thread. */
tst->err_disablement_level = 0;
VG_TRACK(pre_thread_first_insn, tid);
tst->os_state.lwpid = VG_(gettid)();
tst->os_state.threadgroup = VG_(getpid)();
/* Thread created with all signals blocked; scheduler will set the
appropriate mask */
ret = VG_(scheduler)(tid);
vg_assert(VG_(is_exiting)(tid));
vg_assert(tst->status == VgTs_Runnable);
vg_assert(VG_(is_running_thread)(tid));
VG_(debugLog)(1, "syswrap-darwin",
"thread_wrapper(tid=%lld): done\n",
(ULong)tidW);
/* Return to caller, still holding the lock. */
return ret;
}
/* Allocate a stack for this thread, if it doesn't already have one.
Returns the initial stack pointer value to use, or 0 if allocation
failed. */
Addr allocstack ( ThreadId tid )
{
ThreadState* tst = VG_(get_ThreadState)(tid);
VgStack* stack;
Addr initial_SP;
/* Either the stack_base and stack_init_SP are both zero (in which
case a stack hasn't been allocated) or they are both non-zero,
in which case it has. */
if (tst->os_state.valgrind_stack_base == 0)
vg_assert(tst->os_state.valgrind_stack_init_SP == 0);
if (tst->os_state.valgrind_stack_base != 0)
vg_assert(tst->os_state.valgrind_stack_init_SP != 0);
/* If no stack is present, allocate one. */
if (tst->os_state.valgrind_stack_base == 0) {
stack = VG_(am_alloc_VgStack)( &initial_SP );
if (stack) {
tst->os_state.valgrind_stack_base = (Addr)stack;
tst->os_state.valgrind_stack_init_SP = initial_SP;
}
}
VG_(debugLog)( 2, "syswrap-darwin", "stack for tid %d at %p; init_SP=%p\n",
tid,
(void*)tst->os_state.valgrind_stack_base,
(void*)tst->os_state.valgrind_stack_init_SP );
vg_assert(VG_IS_32_ALIGNED(tst->os_state.valgrind_stack_init_SP));
return tst->os_state.valgrind_stack_init_SP;
}
void find_stack_segment(ThreadId tid, Addr sp)
{
ML_(guess_and_register_stack) (sp, VG_(get_ThreadState)(tid));
}
/* Run a thread all the way to the end, then do appropriate exit actions
(this is the last-one-out-turn-off-the-lights bit).
*/
static void run_a_thread_NORETURN ( Word tidW )
{
Int c;
VgSchedReturnCode src;
ThreadId tid = (ThreadId)tidW;
ThreadState* tst;
VG_(debugLog)(1, "syswrap-darwin",
"run_a_thread_NORETURN(tid=%lld): pre-thread_wrapper\n",
(ULong)tidW);
tst = VG_(get_ThreadState)(tid);
vg_assert(tst);
/* Run the thread all the way through. */
src = thread_wrapper(tid);
VG_(debugLog)(1, "syswrap-darwin",
"run_a_thread_NORETURN(tid=%lld): post-thread_wrapper\n",
(ULong)tidW);
c = VG_(count_living_threads)();
vg_assert(c >= 1); /* stay sane */
// Tell the tool this thread is exiting
VG_TRACK( pre_thread_ll_exit, tid );
/* If the thread is exiting with errors disabled, complain loudly;
doing so is bad (does the user know this has happened?) Also,
in all cases, be paranoid and clear the flag anyway so that the
thread slot is safe in this respect if later reallocated. This
should be unnecessary since the flag should be cleared when the
slot is reallocated, in thread_wrapper(). */
if (tst->err_disablement_level > 0) {
VG_(umsg)(
"WARNING: exiting thread has error reporting disabled.\n"
"WARNING: possibly as a result of some mistake in the use\n"
"WARNING: of the VALGRIND_DISABLE_ERROR_REPORTING macros.\n"
);
VG_(debugLog)(
1, "syswrap-linux",
"run_a_thread_NORETURN(tid=%lld): "
"WARNING: exiting thread has err_disablement_level = %u\n",
(ULong)tidW, tst->err_disablement_level
);
}
tst->err_disablement_level = 0;
if (c == 1) {
VG_(debugLog)(1, "syswrap-darwin",
"run_a_thread_NORETURN(tid=%lld): "
"last one standing\n",
(ULong)tidW);
/* We are the last one standing. Keep hold of the lock and
carry on to show final tool results, then exit the entire system.
Use the continuation pointer set at startup in m_main. */
( * VG_(address_of_m_main_shutdown_actions_NORETURN) ) (tid, src);
} else {
mach_msg_header_t msg;
VG_(debugLog)(1, "syswrap-darwin",
"run_a_thread_NORETURN(tid=%lld): "
"not last one standing\n",
(ULong)tidW);
/* OK, thread is dead, but others still exist. Just exit. */
/* This releases the run lock */
VG_(exit_thread)(tid);
vg_assert(tst->status == VgTs_Zombie);
/* tid is now invalid. */
// GrP fixme exit race
msg.msgh_bits = MACH_MSGH_BITS(17, MACH_MSG_TYPE_MAKE_SEND_ONCE);
msg.msgh_request_port = VG_(gettid)();
msg.msgh_reply_port = 0;
msg.msgh_id = 3600; // thread_terminate
tst->status = VgTs_Empty;
// GrP fixme race here! new thread may claim this V thread stack
// before we get out here!
// GrP fixme use bsdthread_terminate for safe cleanup?
mach_msg(&msg, MACH_SEND_MSG|MACH_MSG_OPTION_NONE,
sizeof(msg), 0, 0, MACH_MSG_TIMEOUT_NONE, 0);
// DDD: This is reached sometimes on none/tests/manythreads, maybe
// because of the race above.
VG_(core_panic)("Thread exit failed?\n");
}
/*NOTREACHED*/
vg_assert(0);
}
/* Allocate a stack for the main thread, and run it all the way to the
end. Although we already have a working VgStack
(VG_(interim_stack)) it's better to allocate a new one, so that
overflow detection works uniformly for all threads.
*/
void VG_(main_thread_wrapper_NORETURN)(ThreadId tid)
{
Addr sp;
VG_(debugLog)(1, "syswrap-darwin",
"entering VG_(main_thread_wrapper_NORETURN)\n");
sp = allocstack(tid);
/* If we can't even allocate the first thread's stack, we're hosed.
Give up. */
vg_assert2(sp != 0, "Cannot allocate main thread's stack.");
/* shouldn't be any other threads around yet */
vg_assert( VG_(count_living_threads)() == 1 );
call_on_new_stack_0_1(
(Addr)sp, /* stack */
0, /*bogus return address*/
run_a_thread_NORETURN, /* fn to call */
(Word)tid /* arg to give it */
);
/*NOTREACHED*/
vg_assert(0);
}
void start_thread_NORETURN ( Word arg )
{
ThreadState* tst = (ThreadState*)arg;
ThreadId tid = tst->tid;
run_a_thread_NORETURN ( (Word)tid );
/*NOTREACHED*/
vg_assert(0);
}
void VG_(cleanup_thread) ( ThreadArchState* arch )
{
}
/* ---------------------------------------------------------------------
Mach port tracking (based on syswrap-generic's fd tracker)
------------------------------------------------------------------ */
/* One of these is allocated for each open port. */
typedef struct OpenPort
{
mach_port_t port;
mach_port_type_t type; /* right type(s) */
Int send_count; /* number of send rights */
HChar *name; /* bootstrap name or NULL */
ExeContext *where; /* first allocation only */
struct OpenPort *next, *prev;
} OpenPort;
// strlen("0x12345678")
#define PORT_STRLEN (2+2*sizeof(mach_port_t))
/* List of allocated ports. */
static OpenPort *allocated_ports;
/* Count of open ports. */
static Int allocated_port_count = 0;
/* Create an entry for |port|, with no other info. Assumes it doesn't
already exist. */
static void port_create_vanilla(mach_port_t port)
{
OpenPort* op
= VG_(calloc)("syswrap-darwin.port_create_vanilla", sizeof(OpenPort), 1);
op->port = port;
/* Add it to the list. */
op->next = allocated_ports;
if (allocated_ports) allocated_ports->prev = op;
allocated_ports = op;
allocated_port_count++;
}
__attribute__((unused))
static Bool port_exists(mach_port_t port)
{
OpenPort *i;
/* Check to see if this port is already open. */
i = allocated_ports;
while (i) {
if (i->port == port) {
return True;
}
i = i->next;
}
return False;
}
static OpenPort *info_for_port(mach_port_t port)
{
OpenPort *i;
if (!port) return NULL;
i = allocated_ports;
while (i) {
if (i->port == port) {
return i;
}
i = i->next;
}
return NULL;
}
// Give a port a name, without changing its refcount
// GrP fixme don't override name if it already has a specific one
__private_extern__ void assign_port_name(mach_port_t port, const HChar *name)
{
OpenPort *i;
if (!port) return;
vg_assert(name);
i = info_for_port(port);
vg_assert(i);
if (i->name) VG_(free)(i->name);
i->name =
VG_(malloc)("syswrap-darwin.mach-port-name",
VG_(strlen)(name) + PORT_STRLEN + 1);
VG_(sprintf)(i->name, name, port);
}
// Return the name of the given port or "UNKNOWN 0x1234" if not known.
static const HChar *name_for_port(mach_port_t port)
{
static HChar buf[8 + PORT_STRLEN + 1];
OpenPort *i;
// hack
if (port == VG_(gettid)()) return "mach_thread_self()";
if (port == 0) return "NULL";
i = allocated_ports;
while (i) {
if (i->port == port) {
return i->name;
}
i = i->next;
}
VG_(sprintf)(buf, "NONPORT-%#x", port);
return buf;
}
/* Note the fact that a port was just deallocated. */
static
void record_port_mod_refs(mach_port_t port, mach_port_type_t right, Int delta)
{
OpenPort *i = allocated_ports;
if (!port) return;
while(i) {
if(i->port == port) {
vg_assert(right != MACH_PORT_TYPE_DEAD_NAME);
if (right & MACH_PORT_TYPE_SEND) {
// send rights are refcounted
if (delta == INT_MIN) delta = -i->send_count; // INT_MIN == destroy
i->send_count += delta;
if (i->send_count > 0) i->type |= MACH_PORT_TYPE_SEND;
else i->type &= ~MACH_PORT_TYPE_SEND;
}
right = right & ~MACH_PORT_TYPE_SEND;
if (right) {
// other rights are not refcounted
if (delta > 0) {
i->type |= right;
} else if (delta < 0) {
i->type &= ~right;
}
}
if (i->type != 0) return;
// Port has no rights left. Kill it.
// VG_(printf)("deleting port %p %s", i->port, i->name);
if(i->prev)
i->prev->next = i->next;
else
allocated_ports = i->next;
if(i->next)
i->next->prev = i->prev;
if(i->name)
VG_(free) (i->name);
VG_(free) (i);
allocated_port_count--;
return;
}
i = i->next;
}
VG_(printf)("UNKNOWN Mach port modified (port %#x delta %d)\n", port, delta);
}
static
void record_port_insert_rights(mach_port_t port, mach_msg_type_name_t type)
{
switch (type) {
case MACH_MSG_TYPE_PORT_NAME:
// this task has no rights for the name
break;
case MACH_MSG_TYPE_PORT_RECEIVE:
// this task gets receive rights
record_port_mod_refs(port, MACH_PORT_TYPE_RECEIVE, 1);
break;
case MACH_MSG_TYPE_PORT_SEND:
// this task gets a send right
record_port_mod_refs(port, MACH_PORT_TYPE_SEND, 1);
break;
case MACH_MSG_TYPE_PORT_SEND_ONCE:
// this task gets send-once rights
record_port_mod_refs(port, MACH_PORT_TYPE_SEND_ONCE, 1);
break;
default:
vg_assert(0);
break;
}
}
static
void record_port_dealloc(mach_port_t port)
{
// deletes 1 send or send-once right (port can't have both)
record_port_mod_refs(port, MACH_PORT_TYPE_SEND_RIGHTS, -1);
}
static
void record_port_destroy(mach_port_t port)
{
// deletes all rights to port
record_port_mod_refs(port, MACH_PORT_TYPE_ALL_RIGHTS, INT_MIN);
}
/* Note the fact that a Mach port was just allocated or transferred.
If the port is already known, increment its reference count. */
void record_named_port(ThreadId tid, mach_port_t port,
mach_port_right_t right, const HChar *name)
{
OpenPort *i;
if (!port) return;
/* Check to see if this port is already open. */
i = allocated_ports;
while (i) {
if (i->port == port) {
if (right != -1) record_port_mod_refs(port, MACH_PORT_TYPE(right), 1);
return;
}
i = i->next;
}
/* Not already one: allocate an OpenPort */
if (i == NULL) {
i = VG_(malloc)("syswrap-darwin.mach-port", sizeof(OpenPort));
i->prev = NULL;
i->next = allocated_ports;
if(allocated_ports) allocated_ports->prev = i;
allocated_ports = i;
allocated_port_count++;
i->port = port;
i->where = (tid == -1) ? NULL : VG_(record_ExeContext)(tid, 0);
i->name = NULL;
if (right != -1) {
i->type = MACH_PORT_TYPE(right);
i->send_count = (right == MACH_PORT_RIGHT_SEND) ? 1 : 0;
} else {
i->type = 0;
i->send_count = 0;
}
assign_port_name(port, name);
}
}
// Record opening of a nameless port.
static void record_unnamed_port(ThreadId tid, mach_port_t port, mach_port_right_t right)
{
record_named_port(tid, port, right, "unnamed-%p");
}
/* Dump summary of open Mach ports, like VG_(show_open_fds) */
void VG_(show_open_ports)(void)
{
OpenPort *i;
VG_(message)(Vg_UserMsg,
"MACH PORTS: %d open at exit.", allocated_port_count);
for (i = allocated_ports; i; i = i->next) {
if (i->name) {
VG_(message)(Vg_UserMsg, "Open Mach port 0x%x: %s", i->port, i->name);
} else {
VG_(message)(Vg_UserMsg, "Open Mach port 0x%x", i->port);
}
if (i->where) {
VG_(pp_ExeContext)(i->where);
VG_(message)(Vg_UserMsg, "");
}
}
VG_(message)(Vg_UserMsg, "");
}
/* ---------------------------------------------------------------------
sync_mappings
------------------------------------------------------------------ */
Bool ML_(sync_mappings)(const HChar *when, const HChar *where, Int num)
{
// Usually the number of segments added/removed in a single calls is very
// small e.g. 1. But it sometimes gets up to at least 100 or so (eg. for
// Quicktime). So we use a repeat-with-bigger-buffers-until-success model,
// because we can't do dynamic allocation within VG_(get_changed_segments),
// because it's in m_aspacemgr.
ChangedSeg* css = NULL;
Int css_size;
Int css_used;
Int i;
Bool ok;
if (VG_(clo_trace_syscalls)) {
VG_(debugLog)(0, "syswrap-darwin",
"sync_mappings(\"%s\", \"%s\", %d)\n",
when, where, num);
}
// 16 is enough for most cases, but small enough that overflow happens
// occasionally and thus the overflow path gets some test coverage.
css_size = 16;
ok = False;
while (!ok) {
VG_(free)(css); // css is NULL on first iteration; that's ok.
css = VG_(calloc)("sys_wrap.sync_mappings", css_size, sizeof(ChangedSeg));
ok = VG_(get_changed_segments)(when, where, css, css_size, &css_used);
css_size *= 2;
}
// Now add/remove them.
for (i = 0; i < css_used; i++) {
ChangedSeg* cs = &css[i];
if (cs->is_added) {
ML_(notify_core_and_tool_of_mmap)(
cs->start, cs->end - cs->start + 1,
cs->prot, VKI_MAP_PRIVATE, 0, cs->offset);
// should this call VG_(di_notify_mmap) also?
} else {
ML_(notify_core_and_tool_of_munmap)(
cs->start, cs->end - cs->start + 1);
}
if (VG_(clo_trace_syscalls)) {
if (cs->is_added) {
VG_(debugLog)(0, "syswrap-darwin",
" added region 0x%010lx..0x%010lx prot %u at %s (%s)\n",
cs->start, cs->end + 1, (UInt)cs->prot, where, when);
} else {
VG_(debugLog)(0, "syswrap-darwin",
" removed region 0x%010lx..0x%010lx at %s (%s)\n",
cs->start, cs->end + 1, where, when);
}
}
}
VG_(free)(css);
return css_used > 0;
}
/* ---------------------------------------------------------------------
wrappers
------------------------------------------------------------------ */
#define PRE(name) DEFN_PRE_TEMPLATE(darwin, name)
#define POST(name) DEFN_POST_TEMPLATE(darwin, name)
#define PRE_FN(name) vgSysWrap_darwin_##name##_before
#define POST_FN(name) vgSysWrap_darwin_##name##_after
#define CALL_PRE(name) PRE_FN(name)(tid, layout, arrghs, status, flags)
#define CALL_POST(name) POST_FN(name)(tid, arrghs, status)
#if VG_WORDSIZE == 4
// Combine two 32-bit values into a 64-bit value
// Always use with low-numbered arg first (e.g. LOHI64(ARG1,ARG2) )
# if defined(VGA_x86)
# define LOHI64(lo,hi) ( ((ULong)(UInt)(lo)) | (((ULong)(UInt)(hi)) << 32) )
# else
# error unknown architecture
# endif
#endif
// Retrieve the current Mach thread
#define MACH_THREAD ((Addr)VG_(get_ThreadState)(tid)->os_state.lwpid)
// Set the POST handler for a mach_msg derivative
#define AFTER VG_(get_ThreadState)(tid)->os_state.post_mach_trap_fn
// Set or get values saved from Mach messages
#define MACH_ARG(x) VG_(get_ThreadState)(tid)->os_state.mach_args.x
#define MACH_REMOTE VG_(get_ThreadState)(tid)->os_state.remote_port
#define MACH_MSGH_ID VG_(get_ThreadState)(tid)->os_state.msgh_id
/* ---------------------------------------------------------------------
darwin ioctl wrapper
------------------------------------------------------------------ */
PRE(ioctl)
{
*flags |= SfMayBlock;
/* Handle ioctls that don't take an arg first */
switch (ARG2 /* request */) {
case VKI_TIOCSCTTY:
case VKI_TIOCEXCL:
case VKI_TIOCPTYGRANT:
case VKI_TIOCPTYUNLK:
case VKI_DTRACEHIOC_REMOVE:
PRINT("ioctl ( %ld, 0x%lx )",ARG1,ARG2);
PRE_REG_READ2(long, "ioctl",
unsigned int, fd, unsigned int, request);
return;
default:
PRINT("ioctl ( %ld, 0x%lx, %#lx )",ARG1,ARG2,ARG3);
PRE_REG_READ3(long, "ioctl",
unsigned int, fd, unsigned int, request, unsigned long, arg);
}
switch (ARG2 /* request */) {
case VKI_TIOCGWINSZ:
PRE_MEM_WRITE( "ioctl(TIOCGWINSZ)", ARG3, sizeof(struct vki_winsize) );
break;
case VKI_TIOCSWINSZ:
PRE_MEM_READ( "ioctl(TIOCSWINSZ)", ARG3, sizeof(struct vki_winsize) );
break;
case VKI_TIOCMBIS:
PRE_MEM_READ( "ioctl(TIOCMBIS)", ARG3, sizeof(unsigned int) );
break;
case VKI_TIOCMBIC:
PRE_MEM_READ( "ioctl(TIOCMBIC)", ARG3, sizeof(unsigned int) );
break;
case VKI_TIOCMSET:
PRE_MEM_READ( "ioctl(TIOCMSET)", ARG3, sizeof(unsigned int) );
break;
case VKI_TIOCMGET:
PRE_MEM_WRITE( "ioctl(TIOCMGET)", ARG3, sizeof(unsigned int) );
break;
case VKI_TIOCGPGRP:
/* Get process group ID for foreground processing group. */
PRE_MEM_WRITE( "ioctl(TIOCGPGRP)", ARG3, sizeof(vki_pid_t) );
break;
case VKI_TIOCSPGRP:
/* Set a process group ID? */
PRE_MEM_WRITE( "ioctl(TIOCGPGRP)", ARG3, sizeof(vki_pid_t) );
break;
case VKI_FIONBIO:
PRE_MEM_READ( "ioctl(FIONBIO)", ARG3, sizeof(int) );
break;
case VKI_FIOASYNC:
PRE_MEM_READ( "ioctl(FIOASYNC)", ARG3, sizeof(int) );
break;
case VKI_FIONREAD: /* identical to SIOCINQ */
PRE_MEM_WRITE( "ioctl(FIONREAD)", ARG3, sizeof(int) );
break;
/* These all use struct ifreq AFAIK */
/* GrP fixme is sizeof(struct vki_if_req) correct if it's using a sockaddr? */
case VKI_SIOCGIFFLAGS: /* get flags */
PRE_MEM_RASCIIZ( "ioctl(SIOCGIFFLAGS)",
(Addr)((struct vki_ifreq *)ARG3)->vki_ifr_name );
PRE_MEM_WRITE( "ioctl(SIOCGIFFLAGS)", ARG3, sizeof(struct vki_ifreq));
break;
case VKI_SIOCGIFMTU: /* get MTU size */
PRE_MEM_RASCIIZ( "ioctl(SIOCGIFMTU)",
(Addr)((struct vki_ifreq *)ARG3)->vki_ifr_name );
PRE_MEM_WRITE( "ioctl(SIOCGIFMTU)", ARG3, sizeof(struct vki_ifreq));
break;
case VKI_SIOCGIFADDR: /* get PA address */
PRE_MEM_RASCIIZ( "ioctl(SIOCGIFADDR)",
(Addr)((struct vki_ifreq *)ARG3)->vki_ifr_name );
PRE_MEM_WRITE( "ioctl(SIOCGIFADDR)", ARG3, sizeof(struct vki_ifreq));
break;
case VKI_SIOCGIFNETMASK: /* get network PA mask */
PRE_MEM_RASCIIZ( "ioctl(SIOCGIFNETMASK)",
(Addr)((struct vki_ifreq *)ARG3)->vki_ifr_name );
PRE_MEM_WRITE( "ioctl(SIOCGIFNETMASK)", ARG3, sizeof(struct vki_ifreq));
break;
case VKI_SIOCGIFMETRIC: /* get metric */
PRE_MEM_RASCIIZ( "ioctl(SIOCGIFMETRIC)",
(Addr)((struct vki_ifreq *)ARG3)->vki_ifr_name );
PRE_MEM_WRITE( "ioctl(SIOCGIFMETRIC)", ARG3, sizeof(struct vki_ifreq));
break;
case VKI_SIOCGIFDSTADDR: /* get remote PA address */
PRE_MEM_RASCIIZ( "ioctl(SIOCGIFDSTADDR)",
(Addr)((struct vki_ifreq *)ARG3)->vki_ifr_name );
PRE_MEM_WRITE( "ioctl(SIOCGIFDSTADDR)", ARG3, sizeof(struct vki_ifreq));
break;
case VKI_SIOCGIFBRDADDR: /* get broadcast PA address */
PRE_MEM_RASCIIZ( "ioctl(SIOCGIFBRDADDR)",
(Addr)((struct vki_ifreq *)ARG3)->vki_ifr_name );
PRE_MEM_WRITE( "ioctl(SIOCGIFBRDADDR)", ARG3, sizeof(struct vki_ifreq));
break;
case VKI_SIOCGIFCONF: /* get iface list */
/* WAS:
PRE_MEM_WRITE( "ioctl(SIOCGIFCONF)", ARG3, sizeof(struct ifconf));
KERNEL_DO_SYSCALL(tid,RES);
if (!VG_(is_kerror)(RES) && RES == 0)
POST_MEM_WRITE(ARG3, sizeof(struct ifconf));
*/
PRE_MEM_READ( "ioctl(SIOCGIFCONF)",
(Addr)&((struct vki_ifconf *)ARG3)->ifc_len,
sizeof(((struct vki_ifconf *)ARG3)->ifc_len));
PRE_MEM_READ( "ioctl(SIOCGIFCONF)",
(Addr)&((struct vki_ifconf *)ARG3)->vki_ifc_buf,
sizeof(((struct vki_ifconf *)ARG3)->vki_ifc_buf));
if ( ARG3 ) {
// TODO len must be readable and writable
// buf pointer only needs to be readable
struct vki_ifconf *ifc = (struct vki_ifconf *) ARG3;
PRE_MEM_WRITE( "ioctl(SIOCGIFCONF).ifc_buf",
(Addr)(ifc->vki_ifc_buf), ifc->ifc_len );
}
break;
case VKI_SIOCSIFFLAGS: /* set flags */
PRE_MEM_RASCIIZ( "ioctl(SIOCSIFFLAGS)",
(Addr)((struct vki_ifreq *)ARG3)->vki_ifr_name );
PRE_MEM_READ( "ioctl(SIOCSIFFLAGS)",
(Addr)&((struct vki_ifreq *)ARG3)->vki_ifr_flags,
sizeof(((struct vki_ifreq *)ARG3)->vki_ifr_flags) );
break;
case VKI_SIOCSIFADDR: /* set PA address */
case VKI_SIOCSIFDSTADDR: /* set remote PA address */
case VKI_SIOCSIFBRDADDR: /* set broadcast PA address */
case VKI_SIOCSIFNETMASK: /* set network PA mask */
PRE_MEM_RASCIIZ( "ioctl(SIOCSIF*ADDR)",
(Addr)((struct vki_ifreq *)ARG3)->vki_ifr_name );
PRE_MEM_READ( "ioctl(SIOCSIF*ADDR)",
(Addr)&((struct vki_ifreq *)ARG3)->ifr_addr,
sizeof(((struct vki_ifreq *)ARG3)->ifr_addr) );
break;
case VKI_SIOCSIFMETRIC: /* set metric */
PRE_MEM_RASCIIZ( "ioctl(SIOCSIFMETRIC)",
(Addr)((struct vki_ifreq *)ARG3)->vki_ifr_name );
PRE_MEM_READ( "ioctl(SIOCSIFMETRIC)",
(Addr)&((struct vki_ifreq *)ARG3)->vki_ifr_metric,
sizeof(((struct vki_ifreq *)ARG3)->vki_ifr_metric) );
break;
case VKI_SIOCSIFMTU: /* set MTU size */
PRE_MEM_RASCIIZ( "ioctl(SIOCSIFMTU)",
(Addr)((struct vki_ifreq *)ARG3)->vki_ifr_name );
PRE_MEM_READ( "ioctl(SIOCSIFMTU)",
(Addr)&((struct vki_ifreq *)ARG3)->vki_ifr_mtu,
sizeof(((struct vki_ifreq *)ARG3)->vki_ifr_mtu) );
break;
/* Routing table calls. */
#ifdef VKI_SIOCADDRT
case VKI_SIOCADDRT: /* add routing table entry */
case VKI_SIOCDELRT: /* delete routing table entry */
PRE_MEM_READ( "ioctl(SIOCADDRT/DELRT)", ARG3,
sizeof(struct vki_rtentry));
break;
#endif
case VKI_SIOCGPGRP:
PRE_MEM_WRITE( "ioctl(SIOCGPGRP)", ARG3, sizeof(int) );
break;
case VKI_SIOCSPGRP:
PRE_MEM_READ( "ioctl(SIOCSPGRP)", ARG3, sizeof(int) );
//tst->sys_flags &= ~SfMayBlock;
break;
case VKI_FIODTYPE:
PRE_MEM_WRITE( "ioctl(FIONREAD)", ARG3, sizeof(int) );
break;
case VKI_DTRACEHIOC_ADDDOF:
break;
// ttycom.h
case VKI_TIOCGETA:
PRE_MEM_WRITE( "ioctl(TIOCGETA)", ARG3, sizeof(struct vki_termios) );
break;
case VKI_TIOCSETA:
PRE_MEM_READ( "ioctl(TIOCSETA)", ARG3, sizeof(struct vki_termios) );
break;
case VKI_TIOCGETD:
PRE_MEM_WRITE( "ioctl(TIOCGETD)", ARG3, sizeof(int) );
break;
case VKI_TIOCSETD:
PRE_MEM_READ( "ioctl(TIOCSETD)", ARG3, sizeof(int) );
break;
case VKI_TIOCPTYGNAME:
PRE_MEM_WRITE( "ioctl(TIOCPTYGNAME)", ARG3, 128 );
break;
// filio.h
case VKI_FIOCLEX:
break;
case VKI_FIONCLEX:
break;
default:
ML_(PRE_unknown_ioctl)(tid, ARG2, ARG3);
break;
}
}
POST(ioctl)
{
vg_assert(SUCCESS);
switch (ARG2 /* request */) {
case VKI_TIOCGWINSZ:
POST_MEM_WRITE( ARG3, sizeof(struct vki_winsize) );
break;
case VKI_TIOCSWINSZ:
case VKI_TIOCMBIS:
case VKI_TIOCMBIC:
case VKI_TIOCMSET:
break;
case VKI_TIOCMGET:
POST_MEM_WRITE( ARG3, sizeof(unsigned int) );
break;
case VKI_TIOCGPGRP:
/* Get process group ID for foreground processing group. */
POST_MEM_WRITE( ARG3, sizeof(vki_pid_t) );
break;
case VKI_TIOCSPGRP:
/* Set a process group ID? */
POST_MEM_WRITE( ARG3, sizeof(vki_pid_t) );
break;
case VKI_TIOCSCTTY:
break;
case VKI_FIONBIO:
break;
case VKI_FIOASYNC:
break;
case VKI_FIONREAD: /* identical to SIOCINQ */
POST_MEM_WRITE( ARG3, sizeof(int) );
break;
/* These all use struct ifreq AFAIK */
case VKI_SIOCGIFFLAGS: /* get flags */
POST_MEM_WRITE( (Addr)&((struct vki_ifreq *)ARG3)->vki_ifr_flags,
sizeof(((struct vki_ifreq *)ARG3)->vki_ifr_flags) );
break;
case VKI_SIOCGIFMTU: /* get MTU size */
POST_MEM_WRITE( (Addr)&((struct vki_ifreq *)ARG3)->vki_ifr_mtu,
sizeof(((struct vki_ifreq *)ARG3)->vki_ifr_mtu) );
break;
case VKI_SIOCGIFADDR: /* get PA address */
case VKI_SIOCGIFDSTADDR: /* get remote PA address */
case VKI_SIOCGIFBRDADDR: /* get broadcast PA address */
case VKI_SIOCGIFNETMASK: /* get network PA mask */
POST_MEM_WRITE(
(Addr)&((struct vki_ifreq *)ARG3)->ifr_addr,
sizeof(((struct vki_ifreq *)ARG3)->ifr_addr) );
break;
case VKI_SIOCGIFMETRIC: /* get metric */
POST_MEM_WRITE(
(Addr)&((struct vki_ifreq *)ARG3)->vki_ifr_metric,
sizeof(((struct vki_ifreq *)ARG3)->vki_ifr_metric) );
break;
case VKI_SIOCGIFCONF: /* get iface list */
/* WAS:
PRE_MEM_WRITE("ioctl(SIOCGIFCONF)", ARG3, sizeof(struct ifconf));
KERNEL_DO_SYSCALL(tid,RES);
if (!VG_(is_kerror)(RES) && RES == 0)
POST_MEM_WRITE(ARG3, sizeof(struct ifconf));
*/
if (RES == 0 && ARG3 ) {
struct vki_ifconf *ifc = (struct vki_ifconf *) ARG3;
if (ifc->vki_ifc_buf != NULL)
POST_MEM_WRITE( (Addr)(ifc->vki_ifc_buf), ifc->ifc_len );
}
break;
case VKI_SIOCSIFFLAGS: /* set flags */
case VKI_SIOCSIFDSTADDR: /* set remote PA address */
case VKI_SIOCSIFBRDADDR: /* set broadcast PA address */
case VKI_SIOCSIFNETMASK: /* set network PA mask */
case VKI_SIOCSIFMETRIC: /* set metric */
case VKI_SIOCSIFADDR: /* set PA address */
case VKI_SIOCSIFMTU: /* set MTU size */
break;
#ifdef VKI_SIOCADDRT
/* Routing table calls. */
case VKI_SIOCADDRT: /* add routing table entry */
case VKI_SIOCDELRT: /* delete routing table entry */
break;
#endif
case VKI_SIOCGPGRP:
POST_MEM_WRITE(ARG3, sizeof(int));
break;
case VKI_SIOCSPGRP:
break;
case VKI_FIODTYPE:
POST_MEM_WRITE( ARG3, sizeof(int) );
break;
case VKI_DTRACEHIOC_REMOVE:
case VKI_DTRACEHIOC_ADDDOF:
break;
// ttycom.h
case VKI_TIOCGETA:
POST_MEM_WRITE( ARG3, sizeof(struct vki_termios));
break;
case VKI_TIOCSETA:
break;
case VKI_TIOCGETD:
POST_MEM_WRITE( ARG3, sizeof(int) );
break;
case VKI_TIOCSETD:
break;
case VKI_TIOCPTYGNAME:
POST_MEM_WRITE( ARG3, 128);
break;
case VKI_TIOCPTYGRANT:
case VKI_TIOCPTYUNLK:
break;
default:
break;
}
}
/* ---------------------------------------------------------------------
darwin fcntl wrapper
------------------------------------------------------------------ */
static const HChar *name_for_fcntl(UWord cmd) {
#define F(n) case VKI_##n: return #n
switch (cmd) {
F(F_CHKCLEAN);
F(F_RDAHEAD);
F(F_NOCACHE);
F(F_FULLFSYNC);
F(F_FREEZE_FS);
F(F_THAW_FS);
F(F_GLOBAL_NOCACHE);
F(F_PREALLOCATE);
F(F_SETSIZE);
F(F_RDADVISE);
# if DARWIN_VERS < DARWIN_10_9
F(F_READBOOTSTRAP);
F(F_WRITEBOOTSTRAP);
# endif
F(F_LOG2PHYS);
F(F_GETPATH);
F(F_PATHPKG_CHECK);
F(F_ADDSIGS);
# if DARWIN_VERS >= DARWIN_10_9
F(F_ADDFILESIGS);
# endif
default:
return "UNKNOWN";
}
#undef F
}
PRE(fcntl)
{
switch (ARG2) {
// These ones ignore ARG3.
case VKI_F_GETFD:
case VKI_F_GETFL:
case VKI_F_GETOWN:
PRINT("fcntl ( %ld, %ld )", ARG1,ARG2);
PRE_REG_READ2(long, "fcntl", unsigned int, fd, unsigned int, cmd);
break;
// These ones use ARG3 as "arg".
case VKI_F_DUPFD:
case VKI_F_SETFD:
case VKI_F_SETFL:
case VKI_F_SETOWN:
PRINT("fcntl[ARG3=='arg'] ( %ld, %ld, %ld )", ARG1,ARG2,ARG3);
PRE_REG_READ3(long, "fcntl",
unsigned int, fd, unsigned int, cmd, unsigned long, arg);
break;
// These ones use ARG3 as "lock".
case VKI_F_GETLK:
case VKI_F_SETLK:
case VKI_F_SETLKW:
PRINT("fcntl[ARG3=='lock'] ( %ld, %ld, %#lx )", ARG1,ARG2,ARG3);
PRE_REG_READ3(long, "fcntl",
unsigned int, fd, unsigned int, cmd,
struct flock64 *, lock);
// GrP fixme mem read sizeof(flock64)
if (ARG2 == VKI_F_SETLKW)
*flags |= SfMayBlock;
break;
// none
case VKI_F_CHKCLEAN:
case VKI_F_RDAHEAD:
case VKI_F_NOCACHE:
case VKI_F_FULLFSYNC:
case VKI_F_FREEZE_FS:
case VKI_F_THAW_FS:
case VKI_F_GLOBAL_NOCACHE:
PRINT("fcntl ( %ld, %s )", ARG1, name_for_fcntl(ARG1));
PRE_REG_READ2(long, "fcntl", unsigned int, fd, unsigned int, cmd);
break;
// struct fstore
case VKI_F_PREALLOCATE:
PRINT("fcntl ( %ld, %s, %#lx )", ARG1, name_for_fcntl(ARG2), ARG3);
PRE_REG_READ3(long, "fcntl",
unsigned int, fd, unsigned int, cmd,
struct fstore *, fstore);
{
struct vki_fstore *fstore = (struct vki_fstore *)ARG3;
PRE_FIELD_READ( "fcntl(F_PREALLOCATE, fstore->fst_flags)",
fstore->fst_flags );
PRE_FIELD_READ( "fcntl(F_PREALLOCATE, fstore->fst_flags)",
fstore->fst_posmode );
PRE_FIELD_READ( "fcntl(F_PREALLOCATE, fstore->fst_flags)",
fstore->fst_offset );
PRE_FIELD_READ( "fcntl(F_PREALLOCATE, fstore->fst_flags)",
fstore->fst_length );
PRE_FIELD_WRITE( "fcntl(F_PREALLOCATE, fstore->fst_bytesalloc)",
fstore->fst_bytesalloc);
}
break;
// off_t
case VKI_F_SETSIZE:
PRINT("fcntl ( %ld, %s, %#lx )", ARG1, name_for_fcntl(ARG2), ARG3);
PRE_REG_READ3(long, "fcntl",
unsigned int, fd, unsigned int, cmd,
vki_off_t *, offset);
break;
// struct radvisory
case VKI_F_RDADVISE:
PRINT("fcntl ( %ld, %s, %#lx )", ARG1, name_for_fcntl(ARG2), ARG3);
PRE_REG_READ3(long, "fcntl",
unsigned int, fd, unsigned int, cmd,
struct vki_radvisory *, radvisory);
{
struct vki_radvisory *radvisory = (struct vki_radvisory *)ARG3;
PRE_FIELD_READ( "fcntl(F_PREALLOCATE, radvisory->ra_offset)",
radvisory->ra_offset );
PRE_FIELD_READ( "fcntl(F_PREALLOCATE, radvisory->ra_count)",
radvisory->ra_count );
}
break;
# if DARWIN_VERS < DARWIN_10_9
// struct fbootstraptransfer
case VKI_F_READBOOTSTRAP:
case VKI_F_WRITEBOOTSTRAP:
PRINT("fcntl ( %ld, %s, %#lx )", ARG1, name_for_fcntl(ARG2), ARG3);
PRE_REG_READ3(long, "fcntl",
unsigned int, fd, unsigned int, cmd,
struct fbootstraptransfer *, bootstrap);
PRE_MEM_READ( "fcntl(F_READ/WRITEBOOTSTRAP, bootstrap)",
ARG3, sizeof(struct vki_fbootstraptransfer) );
break;
# endif
// struct log2phys (out)
case VKI_F_LOG2PHYS:
PRINT("fcntl ( %ld, %s, %#lx )", ARG1, name_for_fcntl(ARG2), ARG3);
PRE_REG_READ3(long, "fcntl",
unsigned int, fd, unsigned int, cmd,
struct log2phys *, l2p);
PRE_MEM_WRITE( "fcntl(F_LOG2PHYS, l2p)",
ARG3, sizeof(struct vki_log2phys) );
break;
// char[maxpathlen] (out)
case VKI_F_GETPATH:
PRINT("fcntl ( %ld, %s, %#lx )", ARG1, name_for_fcntl(ARG2), ARG3);
PRE_REG_READ3(long, "fcntl",
unsigned int, fd, unsigned int, cmd,
char *, pathbuf);
PRE_MEM_WRITE( "fcntl(F_GETPATH, pathbuf)",
ARG3, VKI_MAXPATHLEN );
break;
// char[maxpathlen] (in)
case VKI_F_PATHPKG_CHECK:
PRINT("fcntl ( %ld, %s, %#lx '%s')", ARG1, name_for_fcntl(ARG2), ARG3,
(char *)ARG3);
PRE_REG_READ3(long, "fcntl",
unsigned int, fd, unsigned int, cmd,
char *, pathbuf);
PRE_MEM_RASCIIZ( "fcntl(F_PATHPKG_CHECK, pathbuf)", ARG3);
break;
case VKI_F_ADDSIGS: /* Add detached signatures (for code signing) */
PRINT("fcntl ( %ld, %s )", ARG1, name_for_fcntl(ARG2));
PRE_REG_READ3(long, "fcntl",
unsigned int, fd, unsigned int, cmd,
vki_fsignatures_t *, sigs);
{
vki_fsignatures_t *fsigs = (vki_fsignatures_t*)ARG3;
PRE_FIELD_READ( "fcntl(F_ADDSIGS, fsigs->fs_blob_start)",
fsigs->fs_blob_start);
PRE_FIELD_READ( "fcntl(F_ADDSIGS, fsigs->fs_blob_size)",
fsigs->fs_blob_size);
if (fsigs->fs_blob_start)
PRE_MEM_READ( "fcntl(F_ADDSIGS, fsigs->fs_blob_start)",
(Addr)fsigs->fs_blob_start, fsigs->fs_blob_size);
}
break;
case VKI_F_ADDFILESIGS: /* Add signature from same file (used by dyld for shared libs) */
PRINT("fcntl ( %ld, %s )", ARG1, name_for_fcntl(ARG2));
PRE_REG_READ3(long, "fcntl",
unsigned int, fd, unsigned int, cmd,
vki_fsignatures_t *, sigs);
{
vki_fsignatures_t *fsigs = (vki_fsignatures_t*)ARG3;
PRE_FIELD_READ( "fcntl(F_ADDFILESIGS, fsigs->fs_blob_start)",
fsigs->fs_blob_start);
PRE_FIELD_READ( "fcntl(F_ADDFILESIGS, fsigs->fs_blob_size)",
fsigs->fs_blob_size);
}
break;
default:
PRINT("fcntl ( %ld, %ld [??] )", ARG1, ARG2);
VG_(printf)("UNKNOWN fcntl %ld!", ARG2);
break;
}
}
POST(fcntl)
{
vg_assert(SUCCESS);
switch (ARG2) {
case VKI_F_DUPFD:
if (!ML_(fd_allowed)(RES, "fcntl(DUPFD)", tid, True)) {
VG_(close)(RES);
SET_STATUS_Failure( VKI_EMFILE );
} else {
if (VG_(clo_track_fds))
ML_(record_fd_open_named)(tid, RES);
}
break;
case VKI_F_GETFD:
case VKI_F_GETFL:
case VKI_F_GETOWN:
case VKI_F_SETFD:
case VKI_F_SETFL:
case VKI_F_SETOWN:
case VKI_F_GETLK:
case VKI_F_SETLK:
case VKI_F_SETLKW:
break;
case VKI_F_PREALLOCATE:
{
struct vki_fstore *fstore = (struct vki_fstore *)ARG3;
POST_FIELD_WRITE( fstore->fst_bytesalloc );
}
break;
case VKI_F_LOG2PHYS:
POST_MEM_WRITE( ARG3, sizeof(struct vki_log2phys) );
break;
case VKI_F_GETPATH:
POST_MEM_WRITE( ARG3, 1+VG_(strlen)((char *)ARG3) );
PRINT("\"%s\"", (char*)ARG3);
break;
default:
// DDD: ugh, missing lots of cases here, not nice
break;
}
}
/* ---------------------------------------------------------------------
unix syscalls
------------------------------------------------------------------ */
PRE(futimes)
{
PRINT("futimes ( %ld, %#lx )", ARG1,ARG2);
PRE_REG_READ2(long, "futimes", int, fd, struct timeval *, tvp);
if (!ML_(fd_allowed)(ARG1, "futimes", tid, False)) {
SET_STATUS_Failure( VKI_EBADF );
} else if (ARG2 != 0) {
PRE_timeval_READ( "futimes(tvp[0])", ARG2 );
PRE_timeval_READ( "futimes(tvp[1])", ARG2+sizeof(struct vki_timeval) );
}
}
PRE(semget)
{
PRINT("semget ( %ld, %ld, %ld )",ARG1,ARG2,ARG3);
PRE_REG_READ3(long, "semget", vki_key_t, key, int, nsems, int, semflg);
}
PRE(semop)
{
*flags |= SfMayBlock;
PRINT("semop ( %ld, %#lx, %lu )",ARG1,ARG2,ARG3);
PRE_REG_READ3(long, "semop",
int, semid, struct sembuf *, sops, vki_size_t, nsoops);
ML_(generic_PRE_sys_semop)(tid, ARG1,ARG2,ARG3);
}
PRE(semctl)
{
switch (ARG3) {
case VKI_IPC_STAT:
case VKI_IPC_SET:
PRINT("semctl ( %ld, %ld, %ld, %#lx )",ARG1,ARG2,ARG3,ARG4);
PRE_REG_READ4(long, "semctl",
int, semid, int, semnum, int, cmd, struct semid_ds *, arg);
break;
case VKI_GETALL:
case VKI_SETALL:
PRINT("semctl ( %ld, %ld, %ld, %#lx )",ARG1,ARG2,ARG3,ARG4);
PRE_REG_READ4(long, "semctl",
int, semid, int, semnum, int, cmd, unsigned short *, arg);
break;
case VKI_SETVAL:
PRINT("semctl ( %ld, %ld, %ld, %#lx )",ARG1,ARG2,ARG3,ARG4);
PRE_REG_READ4(long, "semctl",
int, semid, int, semnum, int, cmd, int, arg);
break;
default:
PRINT("semctl ( %ld, %ld, %ld )",ARG1,ARG2,ARG3);
PRE_REG_READ3(long, "semctl",
int, semid, int, semnum, int, cmd);
break;
}
ML_(generic_PRE_sys_semctl)(tid, ARG1,ARG2,ARG3,ARG4);
}
POST(semctl)
{
ML_(generic_POST_sys_semctl)(tid, RES,ARG1,ARG2,ARG3,ARG4);
}
PRE(sem_open)
{
if (ARG2 & VKI_O_CREAT) {
// 4-arg version
PRINT("sem_open ( %#lx(%s), %ld, %ld, %ld )",
ARG1,(char*)ARG1,ARG2,ARG3,ARG4);
PRE_REG_READ4(vki_sem_t *, "sem_open",
const char *, name, int, oflag, vki_mode_t, mode,
unsigned int, value);
} else {
// 2-arg version
PRINT("sem_open ( %#lx(%s), %ld )",ARG1,(char*)ARG1,ARG2);
PRE_REG_READ2(vki_sem_t *, "sem_open",
const char *, name, int, oflag);
}
PRE_MEM_RASCIIZ( "sem_open(name)", ARG1 );
/* Otherwise handle normally */
*flags |= SfMayBlock;
}
PRE(sem_close)
{
PRINT("sem_close( %#lx )", ARG1);
PRE_REG_READ1(int, "sem_close", vki_sem_t *, sem);
}
PRE(sem_unlink)
{
PRINT("sem_unlink( %#lx(%s) )", ARG1,(char*)ARG1);
PRE_REG_READ1(int, "sem_unlink", const char *, name);
PRE_MEM_RASCIIZ( "sem_unlink(name)", ARG1 );
}
PRE(sem_post)
{
PRINT("sem_post( %#lx )", ARG1);
PRE_REG_READ1(int, "sem_post", vki_sem_t *, sem);
*flags |= SfMayBlock;
}
PRE(sem_destroy)
{
PRINT("sem_destroy( %#lx )", ARG1);
PRE_REG_READ1(int, "sem_destroy", vki_sem_t *, sem);
PRE_MEM_READ("sem_destroy(sem)", ARG1, sizeof(vki_sem_t));
}
PRE(sem_init)
{
PRINT("sem_init( %#lx, %ld, %ld )", ARG1, ARG2, ARG3);
PRE_REG_READ3(int, "sem_init", vki_sem_t *, sem,
int, pshared, unsigned int, value);
PRE_MEM_WRITE("sem_init(sem)", ARG1, sizeof(vki_sem_t));
}
POST(sem_init)
{
POST_MEM_WRITE(ARG1, sizeof(vki_sem_t));
}
PRE(sem_wait)
{
PRINT("sem_wait( %#lx )", ARG1);
PRE_REG_READ1(int, "sem_wait", vki_sem_t *, sem);
*flags |= SfMayBlock;
}
PRE(sem_trywait)
{
PRINT("sem_trywait( %#lx )", ARG1);
PRE_REG_READ1(int, "sem_trywait", vki_sem_t *, sem);
*flags |= SfMayBlock;
}
PRE(kqueue)
{
PRINT("kqueue()");
}
POST(kqueue)
{
if (!ML_(fd_allowed)(RES, "kqueue", tid, True)) {
VG_(close)(RES);
SET_STATUS_Failure( VKI_EMFILE );
} else {
if (VG_(clo_track_fds)) {
ML_(record_fd_open_with_given_name)(tid, RES, NULL);
}
}
}
PRE(fileport_makeport)
{
PRINT("guarded_open_np(fd:%#lx, portnamep:%#lx) FIXME",
ARG1, ARG2);
}
PRE(guarded_open_np)
{
PRINT("guarded_open_np(path:%#lx(%s), guard:%#lx, guardflags:%#lx, flags:%#lx) FIXME",
ARG1, (char*)ARG1, ARG2, ARG3, ARG4);
}
PRE(guarded_kqueue_np)
{
PRINT("guarded_kqueue_np(guard:%#lx, guardflags:%#lx) FIXME",
ARG1, ARG2);
}
POST(guarded_kqueue_np)
{
if (!ML_(fd_allowed)(RES, "guarded_kqueue_np", tid, True)) {
VG_(close)(RES);
SET_STATUS_Failure( VKI_EMFILE );
} else {
if (VG_(clo_track_fds)) {
ML_(record_fd_open_with_given_name)(tid, RES, NULL);
}
}
}
PRE(guarded_close_np)
{
PRINT("guarded_close_np(fd:%#lx, guard:%#lx) FIXME",
ARG1, ARG2);
}
PRE(change_fdguard_np)
{
PRINT("change_fdguard_np(fd:%#lx, guard:%#lx, guardflags:%#lx, nguard:%#lx, nguardflags:%#lx, fdflagsp:%#lx) FIXME",
ARG1, ARG2, ARG3, ARG4, ARG5, ARG6);
}
PRE(connectx)
{
PRINT("connectx(s:%#lx, src:%#lx, srclen:%#lx, dsts:%#lx, dstlen:%#lx, ifscope:%#lx, aid:%#lx, out_cid:%#lx) FIXME",
ARG1, ARG2, ARG3, ARG4, ARG5, ARG6, ARG7, ARG8);
}
PRE(disconnectx)
{
PRINT("disconnectx(s:%#lx, aid:%#lx, cid:%#lx) FIXME",
ARG1, ARG2, ARG3);
}
PRE(kevent)
{
PRINT("kevent( %ld, %#lx, %ld, %#lx, %ld, %#lx )",
ARG1, ARG2, ARG3, ARG4, ARG5, ARG6);
PRE_REG_READ6(int,"kevent", int,kq,
const struct vki_kevent *,changelist, int,nchanges,
struct vki_kevent *,eventlist, int,nevents,
const struct vki_timespec *,timeout);
if (ARG3) PRE_MEM_READ ("kevent(changelist)",
ARG2, ARG3 * sizeof(struct vki_kevent));
if (ARG5) PRE_MEM_WRITE("kevent(eventlist)",
ARG4, ARG5 * sizeof(struct vki_kevent));
if (ARG6) PRE_MEM_READ ("kevent(timeout)",
ARG6, sizeof(struct vki_timespec));
*flags |= SfMayBlock;
}
POST(kevent)
{
PRINT("kevent ret %ld dst %#lx (%zu)", RES, ARG4, sizeof(struct vki_kevent));
if (RES > 0) POST_MEM_WRITE(ARG4, RES * sizeof(struct vki_kevent));
}
PRE(kevent64)
{
PRINT("kevent64( %ld, %#lx, %ld, %#lx, %ld, %#lx )",
ARG1, ARG2, ARG3, ARG4, ARG5, ARG6);
PRE_REG_READ6(int,"kevent64", int,kq,
const struct vki_kevent64 *,changelist, int,nchanges,
struct vki_kevent64 *,eventlist, int,nevents,
const struct vki_timespec *,timeout);
if (ARG3) PRE_MEM_READ ("kevent64(changelist)",
ARG2, ARG3 * sizeof(struct vki_kevent64));
if (ARG5) PRE_MEM_WRITE("kevent64(eventlist)",
ARG4, ARG5 * sizeof(struct vki_kevent64));
if (ARG6) PRE_MEM_READ ("kevent64(timeout)",
ARG6, sizeof(struct vki_timespec));
*flags |= SfMayBlock;
}
POST(kevent64)
{
PRINT("kevent64 ret %ld dst %#lx (%zu)", RES, ARG4, sizeof(struct vki_kevent64));
if (RES > 0) {
ML_(sync_mappings)("after", "kevent64", 0);
POST_MEM_WRITE(ARG4, RES * sizeof(struct vki_kevent64));
}
}
Addr pthread_starter = 0;
Addr wqthread_starter = 0;
SizeT pthread_structsize = 0;
PRE(bsdthread_register)
{
PRINT("bsdthread_register( %#lx, %#lx, %lu )", ARG1, ARG2, ARG3);
PRE_REG_READ3(int,"__bsdthread_register", void *,"threadstart",
void *,"wqthread", size_t,"pthsize");
pthread_starter = ARG1;
wqthread_starter = ARG2;
pthread_structsize = ARG3;
ARG1 = (Word)&pthread_hijack_asm;
ARG2 = (Word)&wqthread_hijack_asm;
}
PRE(workq_open)
{
PRINT("workq_open()");
PRE_REG_READ0(int, "workq_open");
// This creates lots of threads and thread stacks under the covers,
// but we ignore them all until some work item starts running on it.
}
static const HChar *workqop_name(int op)
{
switch (op) {
case VKI_WQOPS_QUEUE_ADD: return "QUEUE_ADD";
case VKI_WQOPS_QUEUE_REMOVE: return "QUEUE_REMOVE";
case VKI_WQOPS_THREAD_RETURN: return "THREAD_RETURN";
case VKI_WQOPS_THREAD_SETCONC: return "THREAD_SETCONC";
case VKI_WQOPS_QUEUE_NEWSPISUPP: return "QUEUE_NEWSPISUPP";
case VKI_WQOPS_QUEUE_REQTHREADS: return "QUEUE_REQTHREADS";
default: return "?";
}
}
PRE(workq_ops)
{
PRINT("workq_ops( %ld(%s), %#lx, %ld )", ARG1, workqop_name(ARG1), ARG2,
ARG3);
PRE_REG_READ3(int,"workq_ops", int,"options", void *,"item",
int,"priority");
switch (ARG1) {
case VKI_WQOPS_QUEUE_ADD:
case VKI_WQOPS_QUEUE_REMOVE:
// GrP fixme need anything here?
// GrP fixme may block?
break;
case VKI_WQOPS_QUEUE_NEWSPISUPP:
// JRS don't think we need to do anything here -- this just checks
// whether some newer functionality is supported
break;
case VKI_WQOPS_QUEUE_REQTHREADS:
// JRS uh, looks like it queues up a bunch of threads, or some such?
*flags |= SfMayBlock; // the kernel sources take a spinlock, so play safe
break;
case VKI_WQOPS_THREAD_RETURN: {
// The interesting case. The kernel will do one of two things:
// 1. Return normally. We continue; libc proceeds to stop the thread.
// V does nothing special here.
// 2. Jump to wqthread_hijack. This wipes the stack and runs a
// new work item, and never returns from workq_ops.
// V handles this by longjmp() from wqthread_hijack back to the
// scheduler, which continues at the new client SP/IP/state.
// This works something like V's signal handling.
// To the tool, this looks like workq_ops() sometimes returns
// to a strange address.
ThreadState *tst = VG_(get_ThreadState)(tid);
tst->os_state.wq_jmpbuf_valid = True;
*flags |= SfMayBlock; // GrP fixme true?
break;
}
default:
VG_(printf)("UNKNOWN workq_ops option %ld\n", ARG1);
break;
}
}
POST(workq_ops)
{
ThreadState *tst = VG_(get_ThreadState)(tid);
tst->os_state.wq_jmpbuf_valid = False;
switch (ARG1) {
case VKI_WQOPS_THREAD_RETURN:
ML_(sync_mappings)("after", "workq_ops(THREAD_RETURN)", 0);
break;
case VKI_WQOPS_QUEUE_REQTHREADS:
ML_(sync_mappings)("after", "workq_ops(QUEUE_REQTHREADS)", 0);
break;
default:
break;
}
}
PRE(__mac_syscall)
{
PRINT("__mac_syscall( %#lx, %ld, %#lx )", ARG1, ARG2, ARG3);
PRE_REG_READ3(int,"__mac_syscall", char *,"policy",
int,"call", void *,"arg");
// GrP fixme check call's arg?
// GrP fixme check policy?
}
/* Not like syswrap-generic's sys_exit, which exits only one thread.
More like syswrap-generic's sys_exit_group. */
PRE(exit)
{
ThreadId t;
ThreadState* tst;
PRINT("darwin exit( %ld )", ARG1);
PRE_REG_READ1(void, "exit", int, status);
tst = VG_(get_ThreadState)(tid);
/* A little complex; find all the threads with the same threadgroup
as this one (including this one), and mark them to exit */
for (t = 1; t < VG_N_THREADS; t++) {
if ( /* not alive */
VG_(threads)[t].status == VgTs_Empty
/* GrP fixme zombie? */
)
continue;
VG_(threads)[t].exitreason = VgSrc_ExitProcess;
VG_(threads)[t].os_state.exitcode = ARG1;
if (t != tid)
VG_(get_thread_out_of_syscall)(t); /* unblock it, if blocked */
}
/* We have to claim the syscall already succeeded. */
SET_STATUS_Success(0);
}
PRE(sigaction)
{
PRINT("sigaction ( %ld, %#lx, %#lx )", ARG1,ARG2,ARG3);
PRE_REG_READ3(long, "sigaction",
int, signum, vki_sigaction_toK_t *, act,
vki_sigaction_fromK_t *, oldact);
if (ARG2 != 0) {
vki_sigaction_toK_t *sa = (vki_sigaction_toK_t *)ARG2;
PRE_MEM_READ( "sigaction(act->sa_handler)",
(Addr)&sa->ksa_handler, sizeof(sa->ksa_handler));
PRE_MEM_READ( "sigaction(act->sa_mask)",
(Addr)&sa->sa_mask, sizeof(sa->sa_mask));
PRE_MEM_READ( "sigaction(act->sa_flags)",
(Addr)&sa->sa_flags, sizeof(sa->sa_flags));
}
if (ARG3 != 0)
PRE_MEM_WRITE( "sigaction(oldact)",
ARG3, sizeof(vki_sigaction_fromK_t));
SET_STATUS_from_SysRes(
VG_(do_sys_sigaction)(ARG1, (const vki_sigaction_toK_t *)ARG2,
(vki_sigaction_fromK_t *)ARG3)
);
}
POST(sigaction)
{
vg_assert(SUCCESS);
if (RES == 0 && ARG3 != 0)
POST_MEM_WRITE( ARG3, sizeof(vki_sigaction_fromK_t));
}
PRE(__pthread_kill)
{
PRINT("__pthread_kill ( %ld, %ld )", ARG1, ARG2);
PRE_REG_READ2(long, "__pthread_kill", vki_pthread_t*, thread, int, sig);
}
PRE(__pthread_sigmask)
{
// GrP fixme
// JRS: arguments are identical to sigprocmask
// (how, sigset_t*, sigset_t*). Perhaps behave identically?
static Bool warned;
if (!warned) {
VG_(printf)("UNKNOWN __pthread_sigmask is unsupported. "
"This warning will not be repeated.\n");
warned = True;
}
SET_STATUS_Success( 0 );
}
PRE(__pthread_canceled)
{
*flags |= SfMayBlock; /* might kill this thread??? */
/* I don't think so -- I think it just changes the cancellation
state. But taking no chances. */
PRINT("__pthread_canceled ( %ld )", ARG1);
PRE_REG_READ1(long, "__pthread_canceled", void*, arg1);
}
PRE(__pthread_markcancel)
{
*flags |= SfMayBlock; /* might kill this thread??? */
PRINT("__pthread_markcancel ( %#lx )", ARG1);
PRE_REG_READ1(long, "__pthread_markcancel", void*, arg1);
/* Just let it go through. No idea if this is correct. */
}
PRE(__disable_threadsignal)
{
vki_sigset_t set;
PRINT("__disable_threadsignal(%ld, %ld, %ld)", ARG1, ARG2, ARG3);
/* I don't think this really looks at its arguments. So don't
bother to check them. */
VG_(sigfillset)( &set );
SET_STATUS_from_SysRes(
VG_(do_sys_sigprocmask) ( tid, VKI_SIG_BLOCK, &set, NULL )
);
/* We don't expect that blocking all signals for this thread could
cause any more to be delivered (how could it?), but just in case
.. */
if (SUCCESS)
*flags |= SfPollAfter;
}
PRE(kdebug_trace)
{
PRINT("kdebug_trace(%ld, %ld, %ld, %ld, %ld, %ld)",
ARG1, ARG2, ARG3, ARG4, ARG5, ARG6);
/*
Don't check anything - some clients pass fewer arguments.
PRE_REG_READ6(long, "kdebug_trace",
int,"code", int,"arg1", int,"arg2",
int,"arg3", int,"arg4", int,"arg5");
*/
}
PRE(seteuid)
{
PRINT("seteuid(%ld)", ARG1);
PRE_REG_READ1(long, "seteuid", vki_uid_t, "uid");
}
PRE(setegid)
{
PRINT("setegid(%ld)", ARG1);
PRE_REG_READ1(long, "setegid", vki_uid_t, "uid");
}
PRE(settid)
{
PRINT("settid(%ld, %ld)", ARG1, ARG2);
PRE_REG_READ2(long, "settid", vki_uid_t, "uid", vki_gid_t, "gid");
}
PRE(gettid)
{
PRINT("gettid()");
PRE_REG_READ0(long, gettid);
}
/* XXX need to check whether we need POST operations for
* waitevent, watchevent, modwatch -- jpeach
*/
PRE(watchevent)
{
PRINT("watchevent(%#lx, %#lx)", ARG1, ARG2);
PRE_REG_READ2(long, "watchevent",
vki_eventreq *, "event", unsigned int, "eventmask");
PRE_MEM_READ("watchevent(event)", ARG1, sizeof(vki_eventreq));
PRE_MEM_READ("watchevent(eventmask)", ARG2, sizeof(unsigned int));
*flags |= SfMayBlock;
}
#define WAITEVENT_FAST_POLL ((Addr)(struct timeval *)-1)
PRE(waitevent)
{
PRINT("waitevent(%#lx, %#lx)", ARG1, ARG2);
PRE_REG_READ2(long, "waitevent",
vki_eventreq *, "event", struct timeval *, "timeout");
PRE_MEM_WRITE("waitevent(event)", ARG1, sizeof(vki_eventreq));
if (ARG2 && ARG2 != WAITEVENT_FAST_POLL) {
PRE_timeval_READ("waitevent(timeout)", ARG2);
}
/* XXX ((timeval*)-1) is valid for ARG2 -- jpeach */
*flags |= SfMayBlock;
}
POST(waitevent)
{
POST_MEM_WRITE(ARG1, sizeof(vki_eventreq));
}
PRE(modwatch)
{
PRINT("modwatch(%#lx, %#lx)", ARG1, ARG2);
PRE_REG_READ2(long, "modwatch",
vki_eventreq *, "event", unsigned int, "eventmask");
PRE_MEM_READ("modwatch(event)", ARG1, sizeof(vki_eventreq));
PRE_MEM_READ("modwatch(eventmask)", ARG2, sizeof(unsigned int));
}
PRE(getxattr)
{
PRINT("getxattr(%#lx(%s), %#lx(%s), %#lx, %lu, %lu, %ld)",
ARG1, (char *)ARG1, ARG2, (char *)ARG2, ARG3, ARG4, ARG5, ARG6);
PRE_REG_READ6(vki_ssize_t, "getxattr",
const char *, path, char *, name, void *, value,
vki_size_t, size, uint32_t, position, int, options);
PRE_MEM_RASCIIZ("getxattr(path)", ARG1);
PRE_MEM_RASCIIZ("getxattr(name)", ARG2);
if (ARG3)
PRE_MEM_WRITE( "getxattr(value)", ARG3, ARG4);
}
POST(getxattr)
{
vg_assert((vki_ssize_t)RES >= 0);
if (ARG3)
POST_MEM_WRITE(ARG3, (vki_ssize_t)RES);
}
PRE(fgetxattr)
{
PRINT("fgetxattr(%ld, %#lx(%s), %#lx, %lu, %lu, %ld)",
ARG1, ARG2, (char *)ARG2, ARG3, ARG4, ARG5, ARG6);
PRE_REG_READ6(vki_ssize_t, "fgetxattr",
int, fd, char *, name, void *, value,
vki_size_t, size, uint32_t, position, int, options);
PRE_MEM_RASCIIZ("getxattr(name)", ARG2);
PRE_MEM_WRITE( "getxattr(value)", ARG3, ARG4);
}
POST(fgetxattr)
{
vg_assert((vki_ssize_t)RES >= 0);
POST_MEM_WRITE(ARG3, (vki_ssize_t)RES);
}
PRE(setxattr)
{
PRINT("setxattr ( %#lx(%s), %#lx(%s), %#lx, %lu, %lu, %ld )",
ARG1, (char *)ARG1, ARG2, (char*)ARG2, ARG3, ARG4, ARG5, ARG6 );
PRE_REG_READ6(int, "setxattr",
const char *,"path", char *,"name", void *,"value",
vki_size_t,"size", uint32_t,"position", int,"options" );
PRE_MEM_RASCIIZ( "setxattr(path)", ARG1 );
PRE_MEM_RASCIIZ( "setxattr(name)", ARG2 );
PRE_MEM_READ( "setxattr(value)", ARG3, ARG4 );
}
PRE(fsetxattr)
{
PRINT( "fsetxattr ( %ld, %#lx(%s), %#lx, %lu, %lu, %ld )",
ARG1, ARG2, (char*)ARG2, ARG3, ARG4, ARG5, ARG6 );
PRE_REG_READ6(int, "fsetxattr",
int,"fd", char *,"name", void *,"value",
vki_size_t,"size", uint32_t,"position", int,"options" );
PRE_MEM_RASCIIZ( "fsetxattr(name)", ARG2 );
PRE_MEM_READ( "fsetxattr(value)", ARG3, ARG4 );
}
PRE(removexattr)
{
PRINT( "removexattr ( %#lx(%s), %#lx(%s), %ld )",
ARG1, (HChar*)ARG1, ARG2, (HChar*)ARG2, ARG3 );
PRE_REG_READ3(int, "removexattr",
const char*, "path", char*, "attrname", int, "options");
PRE_MEM_RASCIIZ( "removexattr(path)", ARG1 );
PRE_MEM_RASCIIZ( "removexattr(attrname)", ARG2 );
}
PRE(fremovexattr)
{
PRINT( "fremovexattr ( %ld, %#lx(%s), %ld )",
ARG1, ARG2, (HChar*)ARG2, ARG3 );
PRE_REG_READ3(int, "fremovexattr",
int, "fd", char*, "attrname", int, "options");
PRE_MEM_RASCIIZ( "removexattr(attrname)", ARG2 );
}
PRE(listxattr)
{
PRINT( "listxattr ( %#lx(%s), %#lx, %lu, %ld )",
ARG1, (char *)ARG1, ARG2, ARG3, ARG4 );
PRE_REG_READ4 (long, "listxattr",
const char *,"path", char *,"namebuf",
vki_size_t,"size", int,"options" );
PRE_MEM_RASCIIZ( "listxattr(path)", ARG1 );
PRE_MEM_WRITE( "listxattr(namebuf)", ARG2, ARG3 );
*flags |= SfMayBlock;
}
POST(listxattr)
{
vg_assert(SUCCESS);
vg_assert((vki_ssize_t)RES >= 0);
POST_MEM_WRITE( ARG2, (vki_ssize_t)RES );
}
PRE(flistxattr)
{
PRINT( "flistxattr ( %ld, %#lx, %lu, %ld )",
ARG1, ARG2, ARG3, ARG4 );
PRE_REG_READ4 (long, "flistxattr",
int, "fd", char *,"namebuf",
vki_size_t,"size", int,"options" );
PRE_MEM_WRITE( "flistxattr(namebuf)", ARG2, ARG3 );
*flags |= SfMayBlock;
}
POST(flistxattr)
{
vg_assert(SUCCESS);
vg_assert((vki_ssize_t)RES >= 0);
POST_MEM_WRITE( ARG2, (vki_ssize_t)RES );
}
PRE(shmat)
{
UWord arg2tmp;
PRINT("shmat ( %ld, %#lx, %ld )",ARG1,ARG2,ARG3);
PRE_REG_READ3(long, "shmat",
int, shmid, const void *, shmaddr, int, shmflg);
arg2tmp = ML_(generic_PRE_sys_shmat)(tid, ARG1,ARG2,ARG3);
if (arg2tmp == 0)
SET_STATUS_Failure( VKI_EINVAL );
else
ARG2 = arg2tmp; // used in POST
}
POST(shmat)
{
ML_(generic_POST_sys_shmat)(tid, RES,ARG1,ARG2,ARG3);
}
PRE(shmctl)
{
PRINT("shmctl ( %ld, %ld, %#lx )",ARG1,ARG2,ARG3);
PRE_REG_READ3(long, "shmctl",
int, shmid, int, cmd, struct vki_shmid_ds *, buf);
ML_(generic_PRE_sys_shmctl)(tid, ARG1,ARG2,ARG3);
}
POST(shmctl)
{
ML_(generic_POST_sys_shmctl)(tid, RES,ARG1,ARG2,ARG3);
}
PRE(shmdt)
{
PRINT("shmdt ( %#lx )",ARG1);
PRE_REG_READ1(long, "shmdt", const void *, shmaddr);
if (!ML_(generic_PRE_sys_shmdt)(tid, ARG1))
SET_STATUS_Failure( VKI_EINVAL );
}
POST(shmdt)
{
ML_(generic_POST_sys_shmdt)(tid, RES,ARG1);
}
PRE(shmget)
{
PRINT("shmget ( %ld, %ld, %ld )",ARG1,ARG2,ARG3);
PRE_REG_READ3(long, "shmget", vki_key_t, key, vki_size_t, size, int, shmflg);
}
PRE(shm_open)
{
PRINT("shm_open(%#lx(%s), %ld, %ld)", ARG1, (char *)ARG1, ARG2, ARG3);
PRE_REG_READ3(long, "shm_open",
const char *,"name", int,"flags", vki_mode_t,"mode");
PRE_MEM_RASCIIZ( "shm_open(filename)", ARG1 );
*flags |= SfMayBlock;
}
POST(shm_open)
{
vg_assert(SUCCESS);
if (!ML_(fd_allowed)(RES, "shm_open", tid, True)) {
VG_(close)(RES);
SET_STATUS_Failure( VKI_EMFILE );
} else {
if (VG_(clo_track_fds))
ML_(record_fd_open_with_given_name)(tid, RES, (char*)ARG1);
}
}
PRE(shm_unlink)
{
*flags |= SfMayBlock;
PRINT("shm_unlink ( %#lx(%s) )", ARG1,(char*)ARG1);
PRE_REG_READ1(long, "shm_unlink", const char *, pathname);
PRE_MEM_RASCIIZ( "shm_unlink(pathname)", ARG1 );
}
POST(shm_unlink)
{
/* My reading of the man page suggests that a call may cause memory
mappings to change: "if no references exist at the time of the
call to shm_unlink(), the resources are reclaimed immediately".
So we need to resync here, sigh. */
ML_(sync_mappings)("after", "shm_unlink", 0);
}
PRE(stat_extended)
{
PRINT("stat_extended( %#lx(%s), %#lx, %#lx, %#lx )",
ARG1, (char *)ARG1, ARG2, ARG3, ARG4);
PRE_REG_READ4(int, "stat_extended", char *, file_name, struct stat *, buf,
void *, fsacl, vki_size_t *, fsacl_size);
PRE_MEM_RASCIIZ( "stat_extended(file_name)", ARG1 );
PRE_MEM_WRITE( "stat_extended(buf)", ARG2, sizeof(struct vki_stat) );
if (ML_(safe_to_deref)( (void*)ARG4, sizeof(vki_size_t) ))
PRE_MEM_WRITE("stat_extended(fsacl)", ARG3, *(vki_size_t *)ARG4 );
PRE_MEM_READ( "stat_extended(fsacl_size)", ARG4, sizeof(vki_size_t) );
}
POST(stat_extended)
{
POST_MEM_WRITE( ARG2, sizeof(struct vki_stat) );
if (ML_(safe_to_deref)( (void*)ARG4, sizeof(vki_size_t) ))
POST_MEM_WRITE( ARG3, *(vki_size_t *)ARG4 );
POST_MEM_WRITE( ARG4, sizeof(vki_size_t) );
}
PRE(lstat_extended)
{
PRINT("lstat_extended( %#lx(%s), %#lx, %#lx, %#lx )",
ARG1, (char *)ARG1, ARG2, ARG3, ARG4);
PRE_REG_READ4(int, "lstat_extended", char *, file_name, struct stat *, buf,
void *, fsacl, vki_size_t *, fsacl_size);
PRE_MEM_RASCIIZ( "lstat_extended(file_name)", ARG1 );
PRE_MEM_WRITE( "lstat_extended(buf)", ARG2, sizeof(struct vki_stat) );
if (ML_(safe_to_deref)( (void*)ARG4, sizeof(vki_size_t) ))
PRE_MEM_WRITE("lstat_extended(fsacl)", ARG3, *(vki_size_t *)ARG4 );
PRE_MEM_READ( "lstat_extended(fsacl_size)", ARG4, sizeof(vki_size_t) );
}
POST(lstat_extended)
{
POST_MEM_WRITE( ARG2, sizeof(struct vki_stat) );
if (ML_(safe_to_deref)( (void*)ARG4, sizeof(vki_size_t) ))
POST_MEM_WRITE( ARG3, *(vki_size_t *)ARG4 );
POST_MEM_WRITE( ARG4, sizeof(vki_size_t) );
}
PRE(fstat_extended)
{
PRINT("fstat_extended( %ld, %#lx, %#lx, %#lx )",
ARG1, ARG2, ARG3, ARG4);
PRE_REG_READ4(int, "fstat_extended", int, fd, struct stat *, buf,
void *, fsacl, vki_size_t *, fsacl_size);
PRE_MEM_WRITE( "fstat_extended(buf)", ARG2, sizeof(struct vki_stat) );
if (ML_(safe_to_deref)( (void*)ARG4, sizeof(vki_size_t) ))
PRE_MEM_WRITE("fstat_extended(fsacl)", ARG3, *(vki_size_t *)ARG4 );
PRE_MEM_READ( "fstat_extended(fsacl_size)", ARG4, sizeof(vki_size_t) );
}
POST(fstat_extended)
{
POST_MEM_WRITE( ARG2, sizeof(struct vki_stat) );
if (ML_(safe_to_deref)( (void*)ARG4, sizeof(vki_size_t) ))
POST_MEM_WRITE( ARG3, *(vki_size_t *)ARG4 );
POST_MEM_WRITE( ARG4, sizeof(vki_size_t) );
}
PRE(stat64_extended)
{
PRINT("stat64_extended( %#lx(%s), %#lx, %#lx, %#lx )",
ARG1, (char *)ARG1, ARG2, ARG3, ARG4);
PRE_REG_READ4(int, "stat64_extended", char *, file_name, struct stat64 *, buf,
void *, fsacl, vki_size_t *, fsacl_size);
PRE_MEM_RASCIIZ( "stat64_extended(file_name)", ARG1 );
PRE_MEM_WRITE( "stat64_extended(buf)", ARG2, sizeof(struct vki_stat64) );
if (ML_(safe_to_deref)( (void*)ARG4, sizeof(vki_size_t) ))
PRE_MEM_WRITE("stat64_extended(fsacl)", ARG3, *(vki_size_t *)ARG4 );
PRE_MEM_READ( "stat64_extended(fsacl_size)", ARG4, sizeof(vki_size_t) );
}
POST(stat64_extended)
{
POST_MEM_WRITE( ARG2, sizeof(struct vki_stat64) );
if (ML_(safe_to_deref)( (void*)ARG4, sizeof(vki_size_t) ))
POST_MEM_WRITE( ARG3, *(vki_size_t *)ARG4 );
POST_MEM_WRITE( ARG4, sizeof(vki_size_t) );
}
PRE(lstat64_extended)
{
PRINT("lstat64_extended( %#lx(%s), %#lx, %#lx, %#lx )",
ARG1, (char *)ARG1, ARG2, ARG3, ARG4);
PRE_REG_READ4(int, "lstat64_extended", char *, file_name, struct stat64 *, buf,
void *, fsacl, vki_size_t *, fsacl_size);
PRE_MEM_RASCIIZ( "lstat64_extended(file_name)", ARG1 );
PRE_MEM_WRITE( "lstat64_extended(buf)", ARG2, sizeof(struct vki_stat64) );
if (ML_(safe_to_deref)( (void*)ARG4, sizeof(vki_size_t) ))
PRE_MEM_WRITE( "lstat64_extended(fsacl)", ARG3, *(vki_size_t *)ARG4 );
PRE_MEM_READ( "lstat64_extended(fsacl_size)", ARG4, sizeof(vki_size_t) );
}
POST(lstat64_extended)
{
POST_MEM_WRITE( ARG2, sizeof(struct vki_stat64) );
if (ML_(safe_to_deref)( (void*)ARG4, sizeof(vki_size_t) ))
POST_MEM_WRITE( ARG3, *(vki_size_t *)ARG4 );
POST_MEM_WRITE( ARG4, sizeof(vki_size_t) );
}
PRE(fstat64_extended)
{
PRINT("fstat64_extended( %ld, %#lx, %#lx, %#lx )",
ARG1, ARG2, ARG3, ARG4);
PRE_REG_READ4(int, "fstat64_extended", int, fd, struct stat64 *, buf,
void *, fsacl, vki_size_t *, fsacl_size);
PRE_MEM_WRITE( "fstat64_extended(buf)", ARG2, sizeof(struct vki_stat64) );
if (ML_(safe_to_deref)( (void*)ARG4, sizeof(vki_size_t) ))
PRE_MEM_WRITE("fstat64_extended(fsacl)", ARG3, *(vki_size_t *)ARG4 );
PRE_MEM_READ( "fstat64_extended(fsacl_size)", ARG4, sizeof(vki_size_t) );
}
POST(fstat64_extended)
{
POST_MEM_WRITE( ARG2, sizeof(struct vki_stat64) );
if (ML_(safe_to_deref)( (void*)ARG4, sizeof(vki_size_t) ))
POST_MEM_WRITE( ARG3, *(vki_size_t *)ARG4 );
POST_MEM_WRITE( ARG4, sizeof(vki_size_t) );
}
PRE(fchmod_extended)
{
/* DDD: Note: this is not really correct. Handling of
chmod_extended is broken in the same way. */
PRINT("fchmod_extended ( %ld, %ld, %ld, %ld, %#lx )",
ARG1, ARG2, ARG3, ARG4, ARG5);
PRE_REG_READ5(long, "fchmod_extended",
unsigned int, fildes,
uid_t, uid,
gid_t, gid,
vki_mode_t, mode,
void* /*really,user_addr_t*/, xsecurity);
/* DDD: relative to the xnu sources (kauth_copyinfilesec), this
is just way wrong. [The trouble is with the size, which depends on a
non-trival kernel computation] */
if (ARG5) {
PRE_MEM_READ( "fchmod_extended(xsecurity)", ARG5,
sizeof(struct vki_kauth_filesec) );
}
}
PRE(chmod_extended)
{
/* DDD: Note: this is not really correct. Handling of
fchmod_extended is broken in the same way. */
PRINT("chmod_extended ( %#lx(%s), %ld, %ld, %ld, %#lx )",
ARG1, ARG1 ? (HChar*)ARG1 : "(null)", ARG2, ARG3, ARG4, ARG5);
PRE_REG_READ5(long, "chmod_extended",
unsigned int, fildes,
uid_t, uid,
gid_t, gid,
vki_mode_t, mode,
void* /*really,user_addr_t*/, xsecurity);
PRE_MEM_RASCIIZ("chmod_extended(path)", ARG1);
/* DDD: relative to the xnu sources (kauth_copyinfilesec), this
is just way wrong. [The trouble is with the size, which depends on a
non-trival kernel computation] */
if (ARG5) {
PRE_MEM_READ( "chmod_extended(xsecurity)", ARG5,
sizeof(struct vki_kauth_filesec) );
}
}
PRE(open_extended)
{
/* DDD: Note: this is not really correct. Handling of
{,f}chmod_extended is broken in the same way. */
PRINT("open_extended ( %#lx(%s), 0x%lx, %ld, %ld, %ld, %#lx )",
ARG1, ARG1 ? (HChar*)ARG1 : "(null)",
ARG2, ARG3, ARG4, ARG5, ARG6);
PRE_REG_READ6(long, "open_extended",
char*, path,
int, flags,
uid_t, uid,
gid_t, gid,
vki_mode_t, mode,
void* /*really,user_addr_t*/, xsecurity);
PRE_MEM_RASCIIZ("open_extended(path)", ARG1);
/* DDD: relative to the xnu sources (kauth_copyinfilesec), this
is just way wrong. [The trouble is with the size, which depends on a
non-trival kernel computation] */
if (ARG6)
PRE_MEM_READ( "open_extended(xsecurity)", ARG6,
sizeof(struct vki_kauth_filesec) );
}
// This is a ridiculous syscall. Specifically, the 'entries' argument points
// to a buffer that contains one or more 'accessx_descriptor' structs followed
// by one or more strings. Each accessx_descriptor contains a field,
// 'ad_name_offset', which points to one of the strings (or it can contain
// zero which means "reuse the string from the previous accessx_descriptor").
//
// What's really ridiculous is that we are only given the size of the overall
// buffer, not the number of accessx_descriptors, nor the number of strings.
// The kernel determines the number of accessx_descriptors by walking through
// them one by one, checking that the ad_name_offset points within the buffer,
// past the current point (or that it's a zero, unless its the first
// descriptor); if so, we assume that this really is an accessx_descriptor,
// if not, we assume we've hit the strings section. Gah.
//
// This affects us here because number of entries in the 'results' buffer is
// determined by the number of accessx_descriptors. So we have to know that
// number in order to do PRE_MEM_WRITE/POST_MEM_WRITE of 'results'. In
// practice, we skip the PRE_MEM_WRITE step because it's easier to do the
// computation after the syscall has succeeded, because the kernel will have
// checked for all the zillion different ways this syscall can fail, and we'll
// know we have a well-formed 'entries' buffer. This means we might miss some
// uses of unaddressable memory but oh well.
//
PRE(access_extended)
{
PRINT("access_extended( %#lx(%s), %lu, %#lx, %lu )",
ARG1, (char *)ARG1, ARG2, ARG3, ARG4);
// XXX: the accessx_descriptor struct contains padding, so this can cause
// unnecessary undefined value errors. But you arguably shouldn't be
// passing undefined values to the kernel anyway...
PRE_REG_READ4(int, "access_extended", void *, entries, vki_size_t, size,
vki_errno_t *, results, vki_uid_t *, uid);
PRE_MEM_READ("access_extended(entries)", ARG1, ARG2 );
// XXX: as mentioned above, this check is too hard to do before the
// syscall.
//PRE_MEM_WRITE("access_extended(results)", ARG3, ??? );
}
POST(access_extended)
{
// 'n_descs' is the number of descriptors we think are in the buffer. We
// start with the maximum possible value, which occurs if we have the
// shortest possible string section. The shortest string section allowed
// consists of a single one-char string (plus the NUL char). Hence the
// '2'.
struct vki_accessx_descriptor* entries = (struct vki_accessx_descriptor*)ARG1;
SizeT size = ARG2;
Int n_descs = (size - 2) / sizeof(struct accessx_descriptor);
Int i; // Current position in the descriptors section array.
Int u; // Upper bound on the length of the descriptors array
// (recomputed each time around the loop)
vg_assert(n_descs > 0);
// Step through the descriptors, lowering 'n_descs' until we know we've
// reached the string section.
for (i = 0; True; i++) {
// If we're past our estimate, we must be one past the end of the
// descriptors section (ie. at the start of the string section). Stop.
if (i >= n_descs)
break;
// Get the array index for the string, but pretend momentarily that it
// is actually another accessx_descriptor. That gives us an upper bound
// on the length of the descriptors section. (Unless the index is zero,
// in which case we have no new info.)
u = entries[i].ad_name_offset / sizeof(struct vki_accessx_descriptor);
if (u == 0) {
vg_assert(i != 0);
continue;
}
// If the upper bound is below our current estimate, revise that
// estimate downwards.
if (u < n_descs)
n_descs = u;
}
// Sanity check.
vg_assert(n_descs <= VKI_ACCESSX_MAX_DESCRIPTORS);
POST_MEM_WRITE( ARG3, n_descs * sizeof(vki_errno_t) );
}
PRE(chflags)
{
PRINT("chflags ( %#lx(%s), %lu )", ARG1, (char *)ARG1, ARG2);
PRE_REG_READ2(int, "chflags", const char *,path, unsigned int,flags);
PRE_MEM_RASCIIZ("chflags(path)", ARG1);
// GrP fixme sanity-check flags value?
}
PRE(fchflags)
{
PRINT("fchflags ( %ld, %lu )", ARG1, ARG2);
PRE_REG_READ2(int, "fchflags", int,fd, unsigned int,flags);
// GrP fixme sanity-check flags value?
}
PRE(stat64)
{
PRINT("stat64 ( %#lx(%s), %#lx )", ARG1, (char *)ARG1, ARG2);
PRE_REG_READ2(long, "stat", const char *,path, struct stat64 *,buf);
PRE_MEM_RASCIIZ("stat64(path)", ARG1);
PRE_MEM_WRITE( "stat64(buf)", ARG2, sizeof(struct vki_stat64) );
}
POST(stat64)
{
POST_MEM_WRITE( ARG2, sizeof(struct vki_stat64) );
}
PRE(lstat64)
{
PRINT("lstat64 ( %#lx(%s), %#lx )", ARG1, (char *)ARG1, ARG2);
PRE_REG_READ2(long, "stat", const char *,path, struct stat64 *,buf);
PRE_MEM_RASCIIZ("lstat64(path)", ARG1);
PRE_MEM_WRITE( "lstat64(buf)", ARG2, sizeof(struct vki_stat64) );
}
POST(lstat64)
{
POST_MEM_WRITE( ARG2, sizeof(struct vki_stat64) );
}
PRE(fstat64)
{
PRINT("fstat64 ( %ld, %#lx )", ARG1,ARG2);
PRE_REG_READ2(long, "fstat", unsigned int, fd, struct stat64 *, buf);
PRE_MEM_WRITE( "fstat64(buf)", ARG2, sizeof(struct vki_stat64) );
}
POST(fstat64)
{
POST_MEM_WRITE( ARG2, sizeof(struct vki_stat64) );
}
PRE(getfsstat)
{
PRINT("getfsstat(%#lx, %ld, %ld)", ARG1, ARG2, ARG3);
PRE_REG_READ3(int, "getfsstat",
struct vki_statfs *, buf, int, bufsize, int, flags);
if (ARG1) {
// ARG2 is a BYTE SIZE
PRE_MEM_WRITE("getfsstat(buf)", ARG1, ARG2);
}
}
POST(getfsstat)
{
if (ARG1) {
// RES is a STRUCT COUNT
POST_MEM_WRITE(ARG1, RES * sizeof(struct vki_statfs));
}
}
PRE(getfsstat64)
{
PRINT("getfsstat64(%#lx, %ld, %ld)", ARG1, ARG2, ARG3);
PRE_REG_READ3(int, "getfsstat64",
struct vki_statfs64 *, buf, int, bufsize, int, flags);
if (ARG1) {
// ARG2 is a BYTE SIZE
PRE_MEM_WRITE("getfsstat64(buf)", ARG1, ARG2);
}
}
POST(getfsstat64)
{
if (ARG1) {
// RES is a STRUCT COUNT
POST_MEM_WRITE(ARG1, RES * sizeof(struct vki_statfs64));
}
}
PRE(mount)
{
// Nb: depending on 'flags', the 'type' and 'data' args may be ignored.
// We are conservative and check everything, except the memory pointed to
// by 'data'.
*flags |= SfMayBlock;
PRINT("sys_mount( %#lx(%s), %#lx(%s), %#lx, %#lx )",
ARG1,(char*)ARG1, ARG2,(char*)ARG2, ARG3, ARG4);
PRE_REG_READ4(long, "mount",
const char *, type, const char *, dir,
int, flags, void *, data);
PRE_MEM_RASCIIZ( "mount(type)", ARG1);
PRE_MEM_RASCIIZ( "mount(dir)", ARG2);
}
static void scan_attrlist(ThreadId tid, struct vki_attrlist *attrList,
void *attrBuf, SizeT attrBufSize,
void (*fn)(ThreadId, void *attrData, SizeT size)
)
{
typedef struct {
uint32_t attrBit;
int32_t attrSize;
} attrspec;
static const attrspec commonattr[] = {
// This order is important.
#if DARWIN_VERS >= DARWIN_10_6
{ ATTR_CMN_RETURNED_ATTRS, sizeof(attribute_set_t) },
#endif
{ ATTR_CMN_NAME, -1 },
{ ATTR_CMN_DEVID, sizeof(dev_t) },
{ ATTR_CMN_FSID, sizeof(fsid_t) },
{ ATTR_CMN_OBJTYPE, sizeof(fsobj_type_t) },
{ ATTR_CMN_OBJTAG, sizeof(fsobj_tag_t) },
{ ATTR_CMN_OBJID, sizeof(fsobj_id_t) },
{ ATTR_CMN_OBJPERMANENTID, sizeof(fsobj_id_t) },
{ ATTR_CMN_PAROBJID, sizeof(fsobj_id_t) },
{ ATTR_CMN_SCRIPT, sizeof(text_encoding_t) },
{ ATTR_CMN_CRTIME, sizeof(struct timespec) },
{ ATTR_CMN_MODTIME, sizeof(struct timespec) },
{ ATTR_CMN_CHGTIME, sizeof(struct timespec) },
{ ATTR_CMN_ACCTIME, sizeof(struct timespec) },
{ ATTR_CMN_BKUPTIME, sizeof(struct timespec) },
{ ATTR_CMN_FNDRINFO, 32 /*FileInfo+ExtendedFileInfo, or FolderInfo+ExtendedFolderInfo*/ },
{ ATTR_CMN_OWNERID, sizeof(uid_t) },
{ ATTR_CMN_GRPID, sizeof(gid_t) },
{ ATTR_CMN_ACCESSMASK, sizeof(uint32_t) },
{ ATTR_CMN_NAMEDATTRCOUNT, sizeof(uint32_t) },
{ ATTR_CMN_NAMEDATTRLIST, -1 },
{ ATTR_CMN_FLAGS, sizeof(uint32_t) },
{ ATTR_CMN_USERACCESS, sizeof(uint32_t) },
{ ATTR_CMN_EXTENDED_SECURITY, -1 },
{ ATTR_CMN_UUID, sizeof(guid_t) },
{ ATTR_CMN_GRPUUID, sizeof(guid_t) },
{ ATTR_CMN_FILEID, sizeof(uint64_t) },
{ ATTR_CMN_PARENTID, sizeof(uint64_t) },
#if DARWIN_VERS >= DARWIN_10_6
{ ATTR_CMN_FULLPATH, -1 },
#endif
{ 0, 0 }
};
static const attrspec volattr[] = {
// This order is important.
{ ATTR_VOL_INFO, 0 },
{ ATTR_VOL_FSTYPE, sizeof(uint32_t) },
{ ATTR_VOL_SIGNATURE, sizeof(uint32_t) },
{ ATTR_VOL_SIZE, sizeof(off_t) },
{ ATTR_VOL_SPACEFREE, sizeof(off_t) },
{ ATTR_VOL_SPACEAVAIL, sizeof(off_t) },
{ ATTR_VOL_MINALLOCATION, sizeof(off_t) },
{ ATTR_VOL_ALLOCATIONCLUMP, sizeof(off_t) },
{ ATTR_VOL_IOBLOCKSIZE, sizeof(uint32_t) },
{ ATTR_VOL_OBJCOUNT, sizeof(uint32_t) },
{ ATTR_VOL_FILECOUNT, sizeof(uint32_t) },
{ ATTR_VOL_DIRCOUNT, sizeof(uint32_t) },
{ ATTR_VOL_MAXOBJCOUNT, sizeof(uint32_t) },
{ ATTR_VOL_MOUNTPOINT, -1 },
{ ATTR_VOL_NAME, -1 },
{ ATTR_VOL_MOUNTFLAGS, sizeof(uint32_t) },
{ ATTR_VOL_MOUNTEDDEVICE, -1 },
{ ATTR_VOL_ENCODINGSUSED, sizeof(uint64_t) },
{ ATTR_VOL_CAPABILITIES, sizeof(vol_capabilities_attr_t) },
#if DARWIN_VERS >= DARWIN_10_6
{ ATTR_VOL_UUID, sizeof(uuid_t) },
#endif
{ ATTR_VOL_ATTRIBUTES, sizeof(vol_attributes_attr_t) },
{ 0, 0 }
};
static const attrspec dirattr[] = {
// This order is important.
{ ATTR_DIR_LINKCOUNT, sizeof(uint32_t) },
{ ATTR_DIR_ENTRYCOUNT, sizeof(uint32_t) },
{ ATTR_DIR_MOUNTSTATUS, sizeof(uint32_t) },
{ 0, 0 }
};
static const attrspec fileattr[] = {
// This order is important.
{ ATTR_FILE_LINKCOUNT, sizeof(uint32_t) },
{ ATTR_FILE_TOTALSIZE, sizeof(off_t) },
{ ATTR_FILE_ALLOCSIZE, sizeof(off_t) },
{ ATTR_FILE_IOBLOCKSIZE, sizeof(uint32_t) },
{ ATTR_FILE_CLUMPSIZE, sizeof(uint32_t) },
{ ATTR_FILE_DEVTYPE, sizeof(uint32_t) },
{ ATTR_FILE_FILETYPE, sizeof(uint32_t) },
{ ATTR_FILE_FORKCOUNT, sizeof(uint32_t) },
{ ATTR_FILE_FORKLIST, -1 },
{ ATTR_FILE_DATALENGTH, sizeof(off_t) },
{ ATTR_FILE_DATAALLOCSIZE, sizeof(off_t) },
{ ATTR_FILE_DATAEXTENTS, sizeof(extentrecord) },
{ ATTR_FILE_RSRCLENGTH, sizeof(off_t) },
{ ATTR_FILE_RSRCALLOCSIZE, sizeof(off_t) },
{ ATTR_FILE_RSRCEXTENTS, sizeof(extentrecord) },
{ 0, 0 }
};
static const attrspec forkattr[] = {
// This order is important.
{ ATTR_FORK_TOTALSIZE, sizeof(off_t) },
{ ATTR_FORK_ALLOCSIZE, sizeof(off_t) },
{ 0, 0 }
};
static const attrspec *attrdefs[5] = {
commonattr, volattr, dirattr, fileattr, forkattr
};
attrgroup_t a[5];
uint8_t *d, *dend;
int g, i;
vg_assert(attrList->bitmapcount == 5);
VG_(memcpy)(a, &attrList->commonattr, sizeof(a));
d = attrBuf;
dend = d + attrBufSize;
#if DARWIN_VERS >= DARWIN_10_6
// ATTR_CMN_RETURNED_ATTRS tells us what's really here, if set
if (a[0] & ATTR_CMN_RETURNED_ATTRS) {
// fixme range check this?
a[0] &= ~ATTR_CMN_RETURNED_ATTRS;
fn(tid, d, sizeof(attribute_set_t));
VG_(memcpy)(a, d, sizeof(a));
}
#endif
for (g = 0; g < 5; g++) {
for (i = 0; attrdefs[g][i].attrBit; i++) {
uint32_t bit = attrdefs[g][i].attrBit;
int32_t size = attrdefs[g][i].attrSize;
if (a[g] & bit) {
a[g] &= ~bit; // clear bit for error check later
if (size == -1) {
attrreference_t *ref = (attrreference_t *)d;
size = MIN(sizeof(attrreference_t), dend - d);
fn(tid, d, size);
if (size >= sizeof(attrreference_t) &&
d + ref->attr_dataoffset < dend)
{
fn(tid, d + ref->attr_dataoffset,
MIN(ref->attr_length, dend - (d + ref->attr_dataoffset)));
}
d += size;
}
else {
size = MIN(size, dend - d);
fn(tid, d, size);
d += size;
}
if ((uintptr_t)d % 4) d += 4 - ((uintptr_t)d % 4);
if (d > dend) d = dend;
}
}
// Known bits are cleared. Die if any bits are left.
if (a[g] != 0) {
VG_(message)(Vg_UserMsg, "UNKNOWN attrlist flags %d:0x%x\n", g, a[g]);
}
}
}
static void get1attr(ThreadId tid, void *attrData, SizeT attrDataSize)
{
POST_MEM_WRITE((Addr)attrData, attrDataSize);
}
static void set1attr(ThreadId tid, void *attrData, SizeT attrDataSize)
{
PRE_MEM_READ("setattrlist(attrBuf value)", (Addr)attrData, attrDataSize);
}
PRE(getattrlist)
{
PRINT("getattrlist(%#lx(%s), %#lx, %#lx, %lu, %lu)",
ARG1, (char *)ARG1, ARG2, ARG3, ARG4, ARG5);
PRE_REG_READ5(int, "getattrlist",
const char *,path, struct vki_attrlist *,attrList,
void *,attrBuf, vki_size_t,attrBufSize, unsigned int,options);
PRE_MEM_RASCIIZ("getattrlist(path)", ARG1);
PRE_MEM_READ("getattrlist(attrList)", ARG2, sizeof(struct vki_attrlist));
PRE_MEM_WRITE("getattrlist(attrBuf)", ARG3, ARG4);
}
POST(getattrlist)
{
if (ARG4 > sizeof(vki_uint32_t)) {