blob: e1157e95a887ac8864598f9d9a0cb3d4c32629c7 [file] [log] [blame]
/*--------------------------------------------------------------------*/
/*--- Linux-specific syscalls, etc. syswrap-linux.c ---*/
/*--------------------------------------------------------------------*/
/*
This file is part of Valgrind, a dynamic binary instrumentation
framework.
Copyright (C) 2000-2013 Nicholas Nethercote
njn@valgrind.org
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307, USA.
The GNU General Public License is contained in the file COPYING.
*/
#if defined(VGO_linux)
#include "pub_core_basics.h"
#include "pub_core_vki.h"
#include "pub_core_vkiscnums.h"
#include "pub_core_libcsetjmp.h" // to keep _threadstate.h happy
#include "pub_core_threadstate.h"
#include "pub_core_aspacemgr.h"
#include "pub_core_debuginfo.h" // VG_(di_notify_*)
#include "pub_core_transtab.h" // VG_(discard_translations)
#include "pub_core_xarray.h"
#include "pub_core_clientstate.h"
#include "pub_core_debuglog.h"
#include "pub_core_libcbase.h"
#include "pub_core_libcassert.h"
#include "pub_core_libcfile.h"
#include "pub_core_libcprint.h"
#include "pub_core_libcproc.h"
#include "pub_core_libcsignal.h"
#include "pub_core_machine.h" // VG_(get_SP)
#include "pub_core_mallocfree.h"
#include "pub_core_tooliface.h"
#include "pub_core_options.h"
#include "pub_core_scheduler.h"
#include "pub_core_signals.h"
#include "pub_core_syscall.h"
#include "pub_core_syswrap.h"
#include "pub_core_inner.h"
#if defined(ENABLE_INNER_CLIENT_REQUEST)
#include "pub_core_clreq.h"
#endif
#include "priv_types_n_macros.h"
#include "priv_syswrap-generic.h"
#include "priv_syswrap-linux.h"
#include "priv_syswrap-xen.h"
// Run a thread from beginning to end and return the thread's
// scheduler-return-code.
static VgSchedReturnCode thread_wrapper(Word /*ThreadId*/ tidW)
{
VgSchedReturnCode ret;
ThreadId tid = (ThreadId)tidW;
ThreadState* tst = VG_(get_ThreadState)(tid);
VG_(debugLog)(1, "syswrap-linux",
"thread_wrapper(tid=%lld): entry\n",
(ULong)tidW);
vg_assert(tst->status == VgTs_Init);
/* make sure we get the CPU lock before doing anything significant */
VG_(acquire_BigLock)(tid, "thread_wrapper(starting new thread)");
if (0)
VG_(printf)("thread tid %d started: stack = %p\n",
tid, &tid);
/* Make sure error reporting is enabled in the new thread. */
tst->err_disablement_level = 0;
VG_TRACK(pre_thread_first_insn, tid);
tst->os_state.lwpid = VG_(gettid)();
/* Set the threadgroup for real. This overwrites the provisional
value set in do_clone() syswrap-*-linux.c. See comments in
do_clone for background, also #226116. */
tst->os_state.threadgroup = VG_(getpid)();
/* Thread created with all signals blocked; scheduler will set the
appropriate mask */
ret = VG_(scheduler)(tid);
vg_assert(VG_(is_exiting)(tid));
vg_assert(tst->status == VgTs_Runnable);
vg_assert(VG_(is_running_thread)(tid));
VG_(debugLog)(1, "syswrap-linux",
"thread_wrapper(tid=%lld): exit, schedreturncode %s\n",
(ULong)tidW, VG_(name_of_VgSchedReturnCode)(ret));
/* Return to caller, still holding the lock. */
return ret;
}
/* ---------------------------------------------------------------------
clone-related stuff
------------------------------------------------------------------ */
/* Run a thread all the way to the end, then do appropriate exit actions
(this is the last-one-out-turn-off-the-lights bit). */
static void run_a_thread_NORETURN ( Word tidW )
{
ThreadId tid = (ThreadId)tidW;
VgSchedReturnCode src;
Int c;
ThreadState* tst;
#ifdef ENABLE_INNER_CLIENT_REQUEST
Int registered_vgstack_id;
#endif
VG_(debugLog)(1, "syswrap-linux",
"run_a_thread_NORETURN(tid=%lld): pre-thread_wrapper\n",
(ULong)tidW);
tst = VG_(get_ThreadState)(tid);
vg_assert(tst);
/* An thread has two stacks:
* the simulated stack (used by the synthetic cpu. Guest process
is using this stack).
* the valgrind stack (used by the real cpu. Valgrind code is running
on this stack).
When Valgrind runs as an inner, it must signals that its (real) stack
is the stack to use by the outer to e.g. do stacktraces.
*/
INNER_REQUEST
(registered_vgstack_id
= VALGRIND_STACK_REGISTER (tst->os_state.valgrind_stack_base,
tst->os_state.valgrind_stack_init_SP));
/* Run the thread all the way through. */
src = thread_wrapper(tid);
VG_(debugLog)(1, "syswrap-linux",
"run_a_thread_NORETURN(tid=%lld): post-thread_wrapper\n",
(ULong)tidW);
c = VG_(count_living_threads)();
vg_assert(c >= 1); /* stay sane */
// Tell the tool this thread is exiting
VG_TRACK( pre_thread_ll_exit, tid );
/* If the thread is exiting with errors disabled, complain loudly;
doing so is bad (does the user know this has happened?) Also,
in all cases, be paranoid and clear the flag anyway so that the
thread slot is safe in this respect if later reallocated. This
should be unnecessary since the flag should be cleared when the
slot is reallocated, in thread_wrapper(). */
if (tst->err_disablement_level > 0) {
VG_(umsg)(
"WARNING: exiting thread has error reporting disabled.\n"
"WARNING: possibly as a result of some mistake in the use\n"
"WARNING: of the VALGRIND_DISABLE_ERROR_REPORTING macros.\n"
);
VG_(debugLog)(
1, "syswrap-linux",
"run_a_thread_NORETURN(tid=%lld): "
"WARNING: exiting thread has err_disablement_level = %u\n",
(ULong)tidW, tst->err_disablement_level
);
}
tst->err_disablement_level = 0;
if (c == 1) {
VG_(debugLog)(1, "syswrap-linux",
"run_a_thread_NORETURN(tid=%lld): "
"last one standing\n",
(ULong)tidW);
/* We are the last one standing. Keep hold of the lock and
carry on to show final tool results, then exit the entire system.
Use the continuation pointer set at startup in m_main. */
( * VG_(address_of_m_main_shutdown_actions_NORETURN) ) (tid, src);
} else {
VG_(debugLog)(1, "syswrap-linux",
"run_a_thread_NORETURN(tid=%lld): "
"not last one standing\n",
(ULong)tidW);
/* OK, thread is dead, but others still exist. Just exit. */
/* This releases the run lock */
VG_(exit_thread)(tid);
vg_assert(tst->status == VgTs_Zombie);
vg_assert(sizeof(tst->status) == 4);
vg_assert(sizeof(tst->os_state.exitcode) == sizeof(Word));
INNER_REQUEST (VALGRIND_STACK_DEREGISTER (registered_vgstack_id));
/* We have to use this sequence to terminate the thread to
prevent a subtle race. If VG_(exit_thread)() had left the
ThreadState as Empty, then it could have been reallocated,
reusing the stack while we're doing these last cleanups.
Instead, VG_(exit_thread) leaves it as Zombie to prevent
reallocation. We need to make sure we don't touch the stack
between marking it Empty and exiting. Hence the
assembler. */
#if defined(VGP_x86_linux)
asm volatile (
"pushl %%ebx\n"
"movl %1, %0\n" /* set tst->status = VgTs_Empty */
"movl %2, %%eax\n" /* set %eax = __NR_exit */
"movl %3, %%ebx\n" /* set %ebx = tst->os_state.exitcode */
"int $0x80\n" /* exit(tst->os_state.exitcode) */
"popl %%ebx\n"
: "=m" (tst->status)
: "n" (VgTs_Empty), "n" (__NR_exit), "m" (tst->os_state.exitcode)
: "eax"
);
#elif defined(VGP_amd64_linux)
asm volatile (
"movl %1, %0\n" /* set tst->status = VgTs_Empty */
"movq %2, %%rax\n" /* set %rax = __NR_exit */
"movq %3, %%rdi\n" /* set %rdi = tst->os_state.exitcode */
"syscall\n" /* exit(tst->os_state.exitcode) */
: "=m" (tst->status)
: "n" (VgTs_Empty), "n" (__NR_exit), "m" (tst->os_state.exitcode)
: "rax", "rdi"
);
#elif defined(VGP_ppc32_linux) || defined(VGP_ppc64_linux)
{ UInt vgts_empty = (UInt)VgTs_Empty;
asm volatile (
"stw %1,%0\n\t" /* set tst->status = VgTs_Empty */
"li 0,%2\n\t" /* set r0 = __NR_exit */
"lwz 3,%3\n\t" /* set r3 = tst->os_state.exitcode */
"sc\n\t" /* exit(tst->os_state.exitcode) */
: "=m" (tst->status)
: "r" (vgts_empty), "n" (__NR_exit), "m" (tst->os_state.exitcode)
: "r0", "r3"
);
}
#elif defined(VGP_arm_linux)
asm volatile (
"str %1, %0\n" /* set tst->status = VgTs_Empty */
"mov r7, %2\n" /* set %r7 = __NR_exit */
"ldr r0, %3\n" /* set %r0 = tst->os_state.exitcode */
"svc 0x00000000\n" /* exit(tst->os_state.exitcode) */
: "=m" (tst->status)
: "r" (VgTs_Empty), "n" (__NR_exit), "m" (tst->os_state.exitcode)
: "r0", "r7"
);
#elif defined(VGP_arm64_linux)
asm volatile (
"str %w1, %0\n" /* set tst->status = VgTs_Empty (32-bit store) */
"mov x8, %2\n" /* set %r7 = __NR_exit */
"ldr x0, %3\n" /* set %r0 = tst->os_state.exitcode */
"svc 0x00000000\n" /* exit(tst->os_state.exitcode) */
: "=m" (tst->status)
: "r" (VgTs_Empty), "n" (__NR_exit), "m" (tst->os_state.exitcode)
: "r0", "r7"
);
#elif defined(VGP_s390x_linux)
asm volatile (
"st %1, %0\n" /* set tst->status = VgTs_Empty */
"lg 2, %3\n" /* set r2 = tst->os_state.exitcode */
"svc %2\n" /* exit(tst->os_state.exitcode) */
: "=m" (tst->status)
: "d" (VgTs_Empty), "n" (__NR_exit), "m" (tst->os_state.exitcode)
: "2"
);
#elif defined(VGP_mips32_linux) || defined(VGP_mips64_linux)
asm volatile (
"sw %1, %0\n\t" /* set tst->status = VgTs_Empty */
"li $2, %2\n\t" /* set v0 = __NR_exit */
"lw $4, %3\n\t" /* set a0 = tst->os_state.exitcode */
"syscall\n\t" /* exit(tst->os_state.exitcode) */
"nop"
: "=m" (tst->status)
: "r" (VgTs_Empty), "n" (__NR_exit), "m" (tst->os_state.exitcode)
: "cc", "memory" , "v0", "a0"
);
#else
# error Unknown platform
#endif
VG_(core_panic)("Thread exit failed?\n");
}
/*NOTREACHED*/
vg_assert(0);
}
Word ML_(start_thread_NORETURN) ( void* arg )
{
ThreadState* tst = (ThreadState*)arg;
ThreadId tid = tst->tid;
run_a_thread_NORETURN ( (Word)tid );
/*NOTREACHED*/
vg_assert(0);
}
/* Allocate a stack for this thread, if it doesn't already have one.
They're allocated lazily, and never freed. Returns the initial stack
pointer value to use, or 0 if allocation failed. */
Addr ML_(allocstack)(ThreadId tid)
{
ThreadState* tst = VG_(get_ThreadState)(tid);
VgStack* stack;
Addr initial_SP;
/* Either the stack_base and stack_init_SP are both zero (in which
case a stack hasn't been allocated) or they are both non-zero,
in which case it has. */
if (tst->os_state.valgrind_stack_base == 0)
vg_assert(tst->os_state.valgrind_stack_init_SP == 0);
if (tst->os_state.valgrind_stack_base != 0)
vg_assert(tst->os_state.valgrind_stack_init_SP != 0);
/* If no stack is present, allocate one. */
if (tst->os_state.valgrind_stack_base == 0) {
stack = VG_(am_alloc_VgStack)( &initial_SP );
if (stack) {
tst->os_state.valgrind_stack_base = (Addr)stack;
tst->os_state.valgrind_stack_init_SP = initial_SP;
}
}
if (0)
VG_(printf)( "stack for tid %d at %p; init_SP=%p\n",
tid,
(void*)tst->os_state.valgrind_stack_base,
(void*)tst->os_state.valgrind_stack_init_SP );
return tst->os_state.valgrind_stack_init_SP;
}
/* Allocate a stack for the main thread, and run it all the way to the
end. Although we already have a working VgStack
(VG_(interim_stack)) it's better to allocate a new one, so that
overflow detection works uniformly for all threads.
*/
void VG_(main_thread_wrapper_NORETURN)(ThreadId tid)
{
Addr sp;
VG_(debugLog)(1, "syswrap-linux",
"entering VG_(main_thread_wrapper_NORETURN)\n");
sp = ML_(allocstack)(tid);
#if defined(ENABLE_INNER_CLIENT_REQUEST)
{
// we must register the main thread stack before the call
// to ML_(call_on_new_stack_0_1), otherwise the outer valgrind
// reports 'write error' on the non registered stack.
ThreadState* tst = VG_(get_ThreadState)(tid);
INNER_REQUEST
((void)
VALGRIND_STACK_REGISTER (tst->os_state.valgrind_stack_base,
tst->os_state.valgrind_stack_init_SP));
}
#endif
#if defined(VGP_ppc32_linux)
/* make a stack frame */
sp -= 16;
sp &= ~0xF;
*(UWord *)sp = 0;
#elif defined(VGP_ppc64_linux)
/* make a stack frame */
sp -= 112;
sp &= ~((Addr)0xF);
*(UWord *)sp = 0;
#elif defined(VGP_s390x_linux)
/* make a stack frame */
sp -= 160;
sp &= ~((Addr)0xF);
*(UWord *)sp = 0;
#endif
/* If we can't even allocate the first thread's stack, we're hosed.
Give up. */
vg_assert2(sp != 0, "Cannot allocate main thread's stack.");
/* shouldn't be any other threads around yet */
vg_assert( VG_(count_living_threads)() == 1 );
ML_(call_on_new_stack_0_1)(
(Addr)sp, /* stack */
0, /* bogus return address */
run_a_thread_NORETURN, /* fn to call */
(Word)tid /* arg to give it */
);
/*NOTREACHED*/
vg_assert(0);
}
/* Do a clone which is really a fork() */
SysRes ML_(do_fork_clone) ( ThreadId tid, UInt flags,
Int* parent_tidptr, Int* child_tidptr )
{
vki_sigset_t fork_saved_mask;
vki_sigset_t mask;
SysRes res;
if (flags & (VKI_CLONE_SETTLS | VKI_CLONE_FS | VKI_CLONE_VM
| VKI_CLONE_FILES | VKI_CLONE_VFORK))
return VG_(mk_SysRes_Error)( VKI_EINVAL );
/* Block all signals during fork, so that we can fix things up in
the child without being interrupted. */
VG_(sigfillset)(&mask);
VG_(sigprocmask)(VKI_SIG_SETMASK, &mask, &fork_saved_mask);
VG_(do_atfork_pre)(tid);
/* Since this is the fork() form of clone, we don't need all that
VG_(clone) stuff */
#if defined(VGP_x86_linux) \
|| defined(VGP_ppc32_linux) || defined(VGP_ppc64_linux) \
|| defined(VGP_arm_linux) || defined(VGP_mips32_linux) \
|| defined(VGP_mips64_linux) || defined(VGP_arm64_linux)
res = VG_(do_syscall5)( __NR_clone, flags,
(UWord)NULL, (UWord)parent_tidptr,
(UWord)NULL, (UWord)child_tidptr );
#elif defined(VGP_amd64_linux)
/* note that the last two arguments are the opposite way round to x86 and
ppc32 as the amd64 kernel expects the arguments in a different order */
res = VG_(do_syscall5)( __NR_clone, flags,
(UWord)NULL, (UWord)parent_tidptr,
(UWord)child_tidptr, (UWord)NULL );
#elif defined(VGP_s390x_linux)
/* Note that s390 has the stack first and then the flags */
res = VG_(do_syscall4)( __NR_clone, (UWord) NULL, flags,
(UWord)parent_tidptr, (UWord)child_tidptr);
#else
# error Unknown platform
#endif
if (!sr_isError(res) && sr_Res(res) == 0) {
/* child */
VG_(do_atfork_child)(tid);
/* restore signal mask */
VG_(sigprocmask)(VKI_SIG_SETMASK, &fork_saved_mask, NULL);
/* If --child-silent-after-fork=yes was specified, set the
output file descriptors to 'impossible' values. This is
noticed by send_bytes_to_logging_sink in m_libcprint.c, which
duly stops writing any further output. */
if (VG_(clo_child_silent_after_fork)) {
if (!VG_(log_output_sink).is_socket)
VG_(log_output_sink).fd = -1;
if (!VG_(xml_output_sink).is_socket)
VG_(xml_output_sink).fd = -1;
}
}
else
if (!sr_isError(res) && sr_Res(res) > 0) {
/* parent */
VG_(do_atfork_parent)(tid);
if (VG_(clo_trace_syscalls))
VG_(printf)(" clone(fork): process %d created child %ld\n",
VG_(getpid)(), sr_Res(res));
/* restore signal mask */
VG_(sigprocmask)(VKI_SIG_SETMASK, &fork_saved_mask, NULL);
}
return res;
}
/* ---------------------------------------------------------------------
PRE/POST wrappers for arch-generic, Linux-specific syscalls
------------------------------------------------------------------ */
// Nb: See the comment above the generic PRE/POST wrappers in
// m_syswrap/syswrap-generic.c for notes about how they work.
#define PRE(name) DEFN_PRE_TEMPLATE(linux, name)
#define POST(name) DEFN_POST_TEMPLATE(linux, name)
// Macros to support 64-bit syscall args split into two 32 bit values
#define LOHI64(lo,hi) ( ((ULong)(lo)) | (((ULong)(hi)) << 32) )
#if defined(VG_LITTLEENDIAN)
#define MERGE64(lo,hi) ( ((ULong)(lo)) | (((ULong)(hi)) << 32) )
#define MERGE64_FIRST(name) name##_low
#define MERGE64_SECOND(name) name##_high
#elif defined(VG_BIGENDIAN)
#define MERGE64(hi,lo) ( ((ULong)(lo)) | (((ULong)(hi)) << 32) )
#define MERGE64_FIRST(name) name##_high
#define MERGE64_SECOND(name) name##_low
#else
#error Unknown endianness
#endif
/* ---------------------------------------------------------------------
*mount wrappers
------------------------------------------------------------------ */
PRE(sys_mount)
{
// Nb: depending on 'flags', the 'type' and 'data' args may be ignored.
// We are conservative and check everything, except the memory pointed to
// by 'data'.
*flags |= SfMayBlock;
PRINT("sys_mount( %#lx(%s), %#lx(%s), %#lx(%s), %#lx, %#lx )",
ARG1,(HChar*)ARG1, ARG2,(HChar*)ARG2, ARG3,(HChar*)ARG3, ARG4, ARG5);
PRE_REG_READ5(long, "mount",
char *, source, char *, target, char *, type,
unsigned long, flags, void *, data);
if (ARG1)
PRE_MEM_RASCIIZ( "mount(source)", ARG1);
PRE_MEM_RASCIIZ( "mount(target)", ARG2);
PRE_MEM_RASCIIZ( "mount(type)", ARG3);
}
PRE(sys_oldumount)
{
PRINT("sys_oldumount( %#lx )", ARG1);
PRE_REG_READ1(long, "umount", char *, path);
PRE_MEM_RASCIIZ( "umount(path)", ARG1);
}
PRE(sys_umount)
{
PRINT("sys_umount( %#lx, %ld )", ARG1, ARG2);
PRE_REG_READ2(long, "umount2", char *, path, int, flags);
PRE_MEM_RASCIIZ( "umount2(path)", ARG1);
}
/* ---------------------------------------------------------------------
16- and 32-bit uid/gid wrappers
------------------------------------------------------------------ */
PRE(sys_setfsuid16)
{
PRINT("sys_setfsuid16 ( %ld )", ARG1);
PRE_REG_READ1(long, "setfsuid16", vki_old_uid_t, uid);
}
PRE(sys_setfsuid)
{
PRINT("sys_setfsuid ( %ld )", ARG1);
PRE_REG_READ1(long, "setfsuid", vki_uid_t, uid);
}
PRE(sys_setfsgid16)
{
PRINT("sys_setfsgid16 ( %ld )", ARG1);
PRE_REG_READ1(long, "setfsgid16", vki_old_gid_t, gid);
}
PRE(sys_setfsgid)
{
PRINT("sys_setfsgid ( %ld )", ARG1);
PRE_REG_READ1(long, "setfsgid", vki_gid_t, gid);
}
PRE(sys_setresuid16)
{
PRINT("sys_setresuid16 ( %ld, %ld, %ld )", ARG1, ARG2, ARG3);
PRE_REG_READ3(long, "setresuid16",
vki_old_uid_t, ruid, vki_old_uid_t, euid, vki_old_uid_t, suid);
}
PRE(sys_setresuid)
{
PRINT("sys_setresuid ( %ld, %ld, %ld )", ARG1, ARG2, ARG3);
PRE_REG_READ3(long, "setresuid",
vki_uid_t, ruid, vki_uid_t, euid, vki_uid_t, suid);
}
PRE(sys_getresuid16)
{
PRINT("sys_getresuid16 ( %#lx, %#lx, %#lx )", ARG1,ARG2,ARG3);
PRE_REG_READ3(long, "getresuid16",
vki_old_uid_t *, ruid, vki_old_uid_t *, euid,
vki_old_uid_t *, suid);
PRE_MEM_WRITE( "getresuid16(ruid)", ARG1, sizeof(vki_old_uid_t) );
PRE_MEM_WRITE( "getresuid16(euid)", ARG2, sizeof(vki_old_uid_t) );
PRE_MEM_WRITE( "getresuid16(suid)", ARG3, sizeof(vki_old_uid_t) );
}
POST(sys_getresuid16)
{
vg_assert(SUCCESS);
if (RES == 0) {
POST_MEM_WRITE( ARG1, sizeof(vki_old_uid_t) );
POST_MEM_WRITE( ARG2, sizeof(vki_old_uid_t) );
POST_MEM_WRITE( ARG3, sizeof(vki_old_uid_t) );
}
}
PRE(sys_getresuid)
{
PRINT("sys_getresuid ( %#lx, %#lx, %#lx )", ARG1,ARG2,ARG3);
PRE_REG_READ3(long, "getresuid",
vki_uid_t *, ruid, vki_uid_t *, euid, vki_uid_t *, suid);
PRE_MEM_WRITE( "getresuid(ruid)", ARG1, sizeof(vki_uid_t) );
PRE_MEM_WRITE( "getresuid(euid)", ARG2, sizeof(vki_uid_t) );
PRE_MEM_WRITE( "getresuid(suid)", ARG3, sizeof(vki_uid_t) );
}
POST(sys_getresuid)
{
vg_assert(SUCCESS);
if (RES == 0) {
POST_MEM_WRITE( ARG1, sizeof(vki_uid_t) );
POST_MEM_WRITE( ARG2, sizeof(vki_uid_t) );
POST_MEM_WRITE( ARG3, sizeof(vki_uid_t) );
}
}
PRE(sys_setresgid16)
{
PRINT("sys_setresgid16 ( %ld, %ld, %ld )", ARG1, ARG2, ARG3);
PRE_REG_READ3(long, "setresgid16",
vki_old_gid_t, rgid,
vki_old_gid_t, egid, vki_old_gid_t, sgid);
}
PRE(sys_setresgid)
{
PRINT("sys_setresgid ( %ld, %ld, %ld )", ARG1, ARG2, ARG3);
PRE_REG_READ3(long, "setresgid",
vki_gid_t, rgid, vki_gid_t, egid, vki_gid_t, sgid);
}
PRE(sys_getresgid16)
{
PRINT("sys_getresgid16 ( %#lx, %#lx, %#lx )", ARG1,ARG2,ARG3);
PRE_REG_READ3(long, "getresgid16",
vki_old_gid_t *, rgid, vki_old_gid_t *, egid,
vki_old_gid_t *, sgid);
PRE_MEM_WRITE( "getresgid16(rgid)", ARG1, sizeof(vki_old_gid_t) );
PRE_MEM_WRITE( "getresgid16(egid)", ARG2, sizeof(vki_old_gid_t) );
PRE_MEM_WRITE( "getresgid16(sgid)", ARG3, sizeof(vki_old_gid_t) );
}
POST(sys_getresgid16)
{
vg_assert(SUCCESS);
if (RES == 0) {
POST_MEM_WRITE( ARG1, sizeof(vki_old_gid_t) );
POST_MEM_WRITE( ARG2, sizeof(vki_old_gid_t) );
POST_MEM_WRITE( ARG3, sizeof(vki_old_gid_t) );
}
}
PRE(sys_getresgid)
{
PRINT("sys_getresgid ( %#lx, %#lx, %#lx )", ARG1,ARG2,ARG3);
PRE_REG_READ3(long, "getresgid",
vki_gid_t *, rgid, vki_gid_t *, egid, vki_gid_t *, sgid);
PRE_MEM_WRITE( "getresgid(rgid)", ARG1, sizeof(vki_gid_t) );
PRE_MEM_WRITE( "getresgid(egid)", ARG2, sizeof(vki_gid_t) );
PRE_MEM_WRITE( "getresgid(sgid)", ARG3, sizeof(vki_gid_t) );
}
POST(sys_getresgid)
{
vg_assert(SUCCESS);
if (RES == 0) {
POST_MEM_WRITE( ARG1, sizeof(vki_gid_t) );
POST_MEM_WRITE( ARG2, sizeof(vki_gid_t) );
POST_MEM_WRITE( ARG3, sizeof(vki_gid_t) );
}
}
/* ---------------------------------------------------------------------
miscellaneous wrappers
------------------------------------------------------------------ */
PRE(sys_exit_group)
{
ThreadId t;
ThreadState* tst;
PRINT("exit_group( %ld )", ARG1);
PRE_REG_READ1(void, "exit_group", int, status);
tst = VG_(get_ThreadState)(tid);
/* A little complex; find all the threads with the same threadgroup
as this one (including this one), and mark them to exit */
/* It is unclear how one can get a threadgroup in this process which
is not the threadgroup of the calling thread:
The assignments to threadgroups are:
= 0; /// scheduler.c os_state_clear
= getpid(); /// scheduler.c in child after fork
= getpid(); /// this file, in thread_wrapper
= ptst->os_state.threadgroup; /// syswrap-*-linux.c,
copying the thread group of the thread doing clone
So, the only case where the threadgroup might be different to the getpid
value is in the child, just after fork. But then the fork syscall is
still going on, the forked thread has had no chance yet to make this
syscall. */
for (t = 1; t < VG_N_THREADS; t++) {
if ( /* not alive */
VG_(threads)[t].status == VgTs_Empty
||
/* not our group */
VG_(threads)[t].os_state.threadgroup != tst->os_state.threadgroup
)
continue;
/* Assign the exit code, VG_(nuke_all_threads_except) will assign
the exitreason. */
VG_(threads)[t].os_state.exitcode = ARG1;
}
/* Indicate in all other threads that the process is exiting.
Then wait using VG_(reap_threads) for these threads to disappear.
Can this give a deadlock if another thread is calling exit in parallel
and would then wait for this thread to disappear ?
The answer is no:
Other threads are either blocked in a syscall or have yielded the CPU.
A thread that has yielded the CPU is trying to get the big lock in
VG_(scheduler). This thread will get the CPU thanks to the call
to VG_(reap_threads). The scheduler will then check for signals,
kill the process if this is a fatal signal, and otherwise prepare
the thread for handling this signal. After this preparation, if
the thread status is VG_(is_exiting), the scheduler exits the thread.
So, a thread that has yielded the CPU does not have a chance to
call exit => no deadlock for this thread.
VG_(nuke_all_threads_except) will send the VG_SIGVGKILL signal
to all threads blocked in a syscall.
The syscall will be interrupted, and the control will go to the
scheduler. The scheduler will then return, as the thread is in
exiting state. */
VG_(nuke_all_threads_except)( tid, VgSrc_ExitProcess );
VG_(reap_threads)(tid);
VG_(threads)[tid].exitreason = VgSrc_ExitThread;
/* we do assign VgSrc_ExitThread and not VgSrc_ExitProcess, as this thread
is the thread calling exit_group and so its registers must be considered
as not reachable. See pub_tool_machine.h VG_(apply_to_GP_regs). */
/* We have to claim the syscall already succeeded. */
SET_STATUS_Success(0);
}
PRE(sys_llseek)
{
PRINT("sys_llseek ( %ld, 0x%lx, 0x%lx, %#lx, %ld )", ARG1,ARG2,ARG3,ARG4,ARG5);
PRE_REG_READ5(long, "llseek",
unsigned int, fd, unsigned long, offset_high,
unsigned long, offset_low, vki_loff_t *, result,
unsigned int, whence);
if (!ML_(fd_allowed)(ARG1, "llseek", tid, False))
SET_STATUS_Failure( VKI_EBADF );
else
PRE_MEM_WRITE( "llseek(result)", ARG4, sizeof(vki_loff_t));
}
POST(sys_llseek)
{
vg_assert(SUCCESS);
if (RES == 0)
POST_MEM_WRITE( ARG4, sizeof(vki_loff_t) );
}
PRE(sys_adjtimex)
{
struct vki_timex *tx = (struct vki_timex *)ARG1;
PRINT("sys_adjtimex ( %#lx )", ARG1);
PRE_REG_READ1(long, "adjtimex", struct timex *, buf);
PRE_MEM_READ( "adjtimex(timex->modes)", ARG1, sizeof(tx->modes));
#define ADJX(bits,field) \
if (tx->modes & (bits)) \
PRE_MEM_READ( "adjtimex(timex->"#field")", \
(Addr)&tx->field, sizeof(tx->field))
if (tx->modes & VKI_ADJ_ADJTIME) {
if (!(tx->modes & VKI_ADJ_OFFSET_READONLY))
PRE_MEM_READ( "adjtimex(timex->offset)", (Addr)&tx->offset, sizeof(tx->offset));
} else {
ADJX(VKI_ADJ_OFFSET, offset);
ADJX(VKI_ADJ_FREQUENCY, freq);
ADJX(VKI_ADJ_MAXERROR, maxerror);
ADJX(VKI_ADJ_ESTERROR, esterror);
ADJX(VKI_ADJ_STATUS, status);
ADJX(VKI_ADJ_TIMECONST|VKI_ADJ_TAI, constant);
ADJX(VKI_ADJ_TICK, tick);
}
#undef ADJX
PRE_MEM_WRITE( "adjtimex(timex)", ARG1, sizeof(struct vki_timex));
}
POST(sys_adjtimex)
{
POST_MEM_WRITE( ARG1, sizeof(struct vki_timex) );
}
PRE(sys_clock_adjtime)
{
struct vki_timex *tx = (struct vki_timex *)ARG2;
PRINT("sys_clock_adjtime ( %ld, %#lx )", ARG1,ARG2);
PRE_REG_READ2(long, "clock_adjtime", vki_clockid_t, id, struct timex *, buf);
PRE_MEM_READ( "clock_adjtime(timex->modes)", ARG2, sizeof(tx->modes));
#define ADJX(bits,field) \
if (tx->modes & (bits)) \
PRE_MEM_READ( "clock_adjtime(timex->"#field")", \
(Addr)&tx->field, sizeof(tx->field))
if (tx->modes & VKI_ADJ_ADJTIME) {
if (!(tx->modes & VKI_ADJ_OFFSET_READONLY))
PRE_MEM_READ( "clock_adjtime(timex->offset)", (Addr)&tx->offset, sizeof(tx->offset));
} else {
ADJX(VKI_ADJ_OFFSET, offset);
ADJX(VKI_ADJ_FREQUENCY, freq);
ADJX(VKI_ADJ_MAXERROR, maxerror);
ADJX(VKI_ADJ_ESTERROR, esterror);
ADJX(VKI_ADJ_STATUS, status);
ADJX(VKI_ADJ_TIMECONST|VKI_ADJ_TAI, constant);
ADJX(VKI_ADJ_TICK, tick);
}
#undef ADJX
PRE_MEM_WRITE( "adjtimex(timex)", ARG2, sizeof(struct vki_timex));
}
POST(sys_clock_adjtime)
{
POST_MEM_WRITE( ARG2, sizeof(struct vki_timex) );
}
PRE(sys_ioperm)
{
PRINT("sys_ioperm ( %ld, %ld, %ld )", ARG1, ARG2, ARG3 );
PRE_REG_READ3(long, "ioperm",
unsigned long, from, unsigned long, num, int, turn_on);
}
PRE(sys_syslog)
{
*flags |= SfMayBlock;
PRINT("sys_syslog (%ld, %#lx, %ld)", ARG1,ARG2,ARG3);
PRE_REG_READ3(long, "syslog", int, type, char *, bufp, int, len);
switch (ARG1) {
// The kernel uses magic numbers here, rather than named constants,
// therefore so do we.
case 2: case 3: case 4:
PRE_MEM_WRITE( "syslog(bufp)", ARG2, ARG3);
break;
default:
break;
}
}
POST(sys_syslog)
{
switch (ARG1) {
case 2: case 3: case 4:
POST_MEM_WRITE( ARG2, ARG3 );
break;
default:
break;
}
}
PRE(sys_vhangup)
{
PRINT("sys_vhangup ( )");
PRE_REG_READ0(long, "vhangup");
}
PRE(sys_sysinfo)
{
PRINT("sys_sysinfo ( %#lx )",ARG1);
PRE_REG_READ1(long, "sysinfo", struct sysinfo *, info);
PRE_MEM_WRITE( "sysinfo(info)", ARG1, sizeof(struct vki_sysinfo) );
}
POST(sys_sysinfo)
{
POST_MEM_WRITE( ARG1, sizeof(struct vki_sysinfo) );
}
PRE(sys_personality)
{
PRINT("sys_personality ( %llu )", (ULong)ARG1);
PRE_REG_READ1(long, "personality", vki_u_long, persona);
}
PRE(sys_sysctl)
{
struct __vki_sysctl_args *args;
PRINT("sys_sysctl ( %#lx )", ARG1 );
args = (struct __vki_sysctl_args *)ARG1;
PRE_REG_READ1(long, "sysctl", struct __sysctl_args *, args);
PRE_MEM_WRITE( "sysctl(args)", ARG1, sizeof(struct __vki_sysctl_args) );
if (!VG_(am_is_valid_for_client)(ARG1, sizeof(struct __vki_sysctl_args),
VKI_PROT_READ)) {
SET_STATUS_Failure( VKI_EFAULT );
return;
}
PRE_MEM_READ("sysctl(name)", (Addr)args->name, args->nlen * sizeof(*args->name));
if (args->newval != NULL)
PRE_MEM_READ("sysctl(newval)", (Addr)args->newval, args->newlen);
if (args->oldlenp != NULL) {
PRE_MEM_READ("sysctl(oldlenp)", (Addr)args->oldlenp, sizeof(*args->oldlenp));
PRE_MEM_WRITE("sysctl(oldval)", (Addr)args->oldval, *args->oldlenp);
}
}
POST(sys_sysctl)
{
struct __vki_sysctl_args *args;
args = (struct __vki_sysctl_args *)ARG1;
if (args->oldlenp != NULL) {
POST_MEM_WRITE((Addr)args->oldlenp, sizeof(*args->oldlenp));
POST_MEM_WRITE((Addr)args->oldval, 1 + *args->oldlenp);
}
}
PRE(sys_prctl)
{
*flags |= SfMayBlock;
PRINT( "sys_prctl ( %ld, %ld, %ld, %ld, %ld )", ARG1, ARG2, ARG3, ARG4, ARG5 );
switch (ARG1) {
case VKI_PR_SET_PDEATHSIG:
PRE_REG_READ2(int, "prctl", int, option, int, signal);
break;
case VKI_PR_GET_PDEATHSIG:
PRE_REG_READ2(int, "prctl", int, option, int *, signal);
PRE_MEM_WRITE("prctl(get-death-signal)", ARG2, sizeof(Int));
break;
case VKI_PR_GET_DUMPABLE:
PRE_REG_READ1(int, "prctl", int, option);
break;
case VKI_PR_SET_DUMPABLE:
PRE_REG_READ2(int, "prctl", int, option, int, dump);
break;
case VKI_PR_GET_UNALIGN:
PRE_REG_READ2(int, "prctl", int, option, int *, value);
PRE_MEM_WRITE("prctl(get-unalign)", ARG2, sizeof(Int));
break;
case VKI_PR_SET_UNALIGN:
PRE_REG_READ2(int, "prctl", int, option, int, value);
break;
case VKI_PR_GET_KEEPCAPS:
PRE_REG_READ1(int, "prctl", int, option);
break;
case VKI_PR_SET_KEEPCAPS:
PRE_REG_READ2(int, "prctl", int, option, int, keepcaps);
break;
case VKI_PR_GET_FPEMU:
PRE_REG_READ2(int, "prctl", int, option, int *, value);
PRE_MEM_WRITE("prctl(get-fpemu)", ARG2, sizeof(Int));
break;
case VKI_PR_SET_FPEMU:
PRE_REG_READ2(int, "prctl", int, option, int, value);
break;
case VKI_PR_GET_FPEXC:
PRE_REG_READ2(int, "prctl", int, option, int *, value);
PRE_MEM_WRITE("prctl(get-fpexc)", ARG2, sizeof(Int));
break;
case VKI_PR_SET_FPEXC:
PRE_REG_READ2(int, "prctl", int, option, int, value);
break;
case VKI_PR_GET_TIMING:
PRE_REG_READ1(int, "prctl", int, option);
break;
case VKI_PR_SET_TIMING:
PRE_REG_READ2(int, "prctl", int, option, int, timing);
break;
case VKI_PR_SET_NAME:
PRE_REG_READ2(int, "prctl", int, option, char *, name);
PRE_MEM_RASCIIZ("prctl(set-name)", ARG2);
break;
case VKI_PR_GET_NAME:
PRE_REG_READ2(int, "prctl", int, option, char *, name);
PRE_MEM_WRITE("prctl(get-name)", ARG2, VKI_TASK_COMM_LEN);
break;
case VKI_PR_GET_ENDIAN:
PRE_REG_READ2(int, "prctl", int, option, int *, value);
PRE_MEM_WRITE("prctl(get-endian)", ARG2, sizeof(Int));
break;
case VKI_PR_SET_ENDIAN:
PRE_REG_READ2(int, "prctl", int, option, int, value);
break;
default:
PRE_REG_READ5(long, "prctl",
int, option, unsigned long, arg2, unsigned long, arg3,
unsigned long, arg4, unsigned long, arg5);
break;
}
}
POST(sys_prctl)
{
switch (ARG1) {
case VKI_PR_GET_PDEATHSIG:
POST_MEM_WRITE(ARG2, sizeof(Int));
break;
case VKI_PR_GET_UNALIGN:
POST_MEM_WRITE(ARG2, sizeof(Int));
break;
case VKI_PR_GET_FPEMU:
POST_MEM_WRITE(ARG2, sizeof(Int));
break;
case VKI_PR_GET_FPEXC:
POST_MEM_WRITE(ARG2, sizeof(Int));
break;
case VKI_PR_GET_NAME:
POST_MEM_WRITE(ARG2, VKI_TASK_COMM_LEN);
break;
case VKI_PR_GET_ENDIAN:
POST_MEM_WRITE(ARG2, sizeof(Int));
break;
case VKI_PR_SET_NAME:
{
const HChar* new_name = (const HChar*) ARG2;
if (new_name) { // Paranoia
ThreadState* tst = VG_(get_ThreadState)(tid);
SizeT new_len = VG_(strlen)(new_name);
/* Don't bother reusing the memory. This is a rare event. */
tst->thread_name =
VG_(arena_realloc)(VG_AR_CORE, "syswrap.prctl",
tst->thread_name, new_len + 1);
VG_(strcpy)(tst->thread_name, new_name);
}
}
break;
}
}
PRE(sys_sendfile)
{
*flags |= SfMayBlock;
PRINT("sys_sendfile ( %ld, %ld, %#lx, %lu )", ARG1,ARG2,ARG3,ARG4);
PRE_REG_READ4(ssize_t, "sendfile",
int, out_fd, int, in_fd, vki_off_t *, offset,
vki_size_t, count);
if (ARG3 != 0)
PRE_MEM_WRITE( "sendfile(offset)", ARG3, sizeof(vki_off_t) );
}
POST(sys_sendfile)
{
if (ARG3 != 0 ) {
POST_MEM_WRITE( ARG3, sizeof( vki_off_t ) );
}
}
PRE(sys_sendfile64)
{
*flags |= SfMayBlock;
PRINT("sendfile64 ( %ld, %ld, %#lx, %lu )",ARG1,ARG2,ARG3,ARG4);
PRE_REG_READ4(ssize_t, "sendfile64",
int, out_fd, int, in_fd, vki_loff_t *, offset,
vki_size_t, count);
if (ARG3 != 0)
PRE_MEM_WRITE( "sendfile64(offset)", ARG3, sizeof(vki_loff_t) );
}
POST(sys_sendfile64)
{
if (ARG3 != 0 ) {
POST_MEM_WRITE( ARG3, sizeof(vki_loff_t) );
}
}
PRE(sys_futex)
{
/*
arg param used by ops
ARG1 - u32 *futex all
ARG2 - int op
ARG3 - int val WAIT,WAKE,FD,REQUEUE,CMP_REQUEUE
ARG4 - struct timespec *utime WAIT:time* REQUEUE,CMP_REQUEUE:val2
ARG5 - u32 *uaddr2 REQUEUE,CMP_REQUEUE
ARG6 - int val3 CMP_REQUEUE
*/
PRINT("sys_futex ( %#lx, %ld, %ld, %#lx, %#lx )", ARG1,ARG2,ARG3,ARG4,ARG5);
switch(ARG2 & ~(VKI_FUTEX_PRIVATE_FLAG|VKI_FUTEX_CLOCK_REALTIME)) {
case VKI_FUTEX_CMP_REQUEUE:
case VKI_FUTEX_WAKE_OP:
case VKI_FUTEX_CMP_REQUEUE_PI:
PRE_REG_READ6(long, "futex",
vki_u32 *, futex, int, op, int, val,
struct timespec *, utime, vki_u32 *, uaddr2, int, val3);
break;
case VKI_FUTEX_REQUEUE:
case VKI_FUTEX_WAIT_REQUEUE_PI:
PRE_REG_READ5(long, "futex",
vki_u32 *, futex, int, op, int, val,
struct timespec *, utime, vki_u32 *, uaddr2);
break;
case VKI_FUTEX_WAIT_BITSET:
/* Check that the address at least begins in client-accessible area. */
if (!VG_(am_is_valid_for_client)( ARG1, 1, VKI_PROT_READ )) {
SET_STATUS_Failure( VKI_EFAULT );
return;
}
if (*(vki_u32 *)ARG1 != ARG3) {
PRE_REG_READ5(long, "futex",
vki_u32 *, futex, int, op, int, val,
struct timespec *, utime, int, dummy);
} else {
PRE_REG_READ6(long, "futex",
vki_u32 *, futex, int, op, int, val,
struct timespec *, utime, int, dummy, int, val3);
}
break;
case VKI_FUTEX_WAKE_BITSET:
PRE_REG_READ6(long, "futex",
vki_u32 *, futex, int, op, int, val,
int, dummy, int, dummy2, int, val3);
break;
case VKI_FUTEX_WAIT:
case VKI_FUTEX_LOCK_PI:
PRE_REG_READ4(long, "futex",
vki_u32 *, futex, int, op, int, val,
struct timespec *, utime);
break;
case VKI_FUTEX_WAKE:
case VKI_FUTEX_FD:
case VKI_FUTEX_TRYLOCK_PI:
PRE_REG_READ3(long, "futex",
vki_u32 *, futex, int, op, int, val);
break;
case VKI_FUTEX_UNLOCK_PI:
default:
PRE_REG_READ2(long, "futex", vki_u32 *, futex, int, op);
break;
}
*flags |= SfMayBlock;
switch(ARG2 & ~(VKI_FUTEX_PRIVATE_FLAG|VKI_FUTEX_CLOCK_REALTIME)) {
case VKI_FUTEX_WAIT:
case VKI_FUTEX_LOCK_PI:
case VKI_FUTEX_WAIT_BITSET:
case VKI_FUTEX_WAIT_REQUEUE_PI:
PRE_MEM_READ( "futex(futex)", ARG1, sizeof(Int) );
if (ARG4 != 0)
PRE_MEM_READ( "futex(timeout)", ARG4, sizeof(struct vki_timespec) );
break;
case VKI_FUTEX_REQUEUE:
case VKI_FUTEX_CMP_REQUEUE:
case VKI_FUTEX_CMP_REQUEUE_PI:
case VKI_FUTEX_WAKE_OP:
PRE_MEM_READ( "futex(futex)", ARG1, sizeof(Int) );
PRE_MEM_READ( "futex(futex2)", ARG5, sizeof(Int) );
break;
case VKI_FUTEX_FD:
case VKI_FUTEX_TRYLOCK_PI:
case VKI_FUTEX_UNLOCK_PI:
PRE_MEM_READ( "futex(futex)", ARG1, sizeof(Int) );
break;
case VKI_FUTEX_WAKE:
case VKI_FUTEX_WAKE_BITSET:
/* no additional pointers */
break;
default:
SET_STATUS_Failure( VKI_ENOSYS ); // some futex function we don't understand
break;
}
}
POST(sys_futex)
{
vg_assert(SUCCESS);
POST_MEM_WRITE( ARG1, sizeof(int) );
if (ARG2 == VKI_FUTEX_FD) {
if (!ML_(fd_allowed)(RES, "futex", tid, True)) {
VG_(close)(RES);
SET_STATUS_Failure( VKI_EMFILE );
} else {
if (VG_(clo_track_fds))
ML_(record_fd_open_nameless)(tid, RES);
}
}
}
PRE(sys_set_robust_list)
{
PRINT("sys_set_robust_list ( %#lx, %ld )", ARG1,ARG2);
PRE_REG_READ2(long, "set_robust_list",
struct vki_robust_list_head *, head, vki_size_t, len);
/* Just check the robust_list_head structure is readable - don't
try and chase the list as the kernel will only read it when
the thread exits so the current contents is irrelevant. */
if (ARG1 != 0)
PRE_MEM_READ("set_robust_list(head)", ARG1, ARG2);
}
PRE(sys_get_robust_list)
{
PRINT("sys_get_robust_list ( %ld, %#lx, %ld )", ARG1,ARG2,ARG3);
PRE_REG_READ3(long, "get_robust_list",
int, pid,
struct vki_robust_list_head **, head_ptr,
vki_size_t *, len_ptr);
PRE_MEM_WRITE("get_robust_list(head_ptr)",
ARG2, sizeof(struct vki_robust_list_head *));
PRE_MEM_WRITE("get_robust_list(len_ptr)",
ARG3, sizeof(struct vki_size_t *));
}
POST(sys_get_robust_list)
{
POST_MEM_WRITE(ARG2, sizeof(struct vki_robust_list_head *));
POST_MEM_WRITE(ARG3, sizeof(struct vki_size_t *));
}
PRE(sys_pselect6)
{
*flags |= SfMayBlock;
PRINT("sys_pselect6 ( %ld, %#lx, %#lx, %#lx, %#lx, %#lx )", ARG1,ARG2,ARG3,ARG4,ARG5,ARG6);
PRE_REG_READ6(long, "pselect6",
int, n, vki_fd_set *, readfds, vki_fd_set *, writefds,
vki_fd_set *, exceptfds, struct vki_timeval *, timeout,
void *, sig);
// XXX: this possibly understates how much memory is read.
if (ARG2 != 0)
PRE_MEM_READ( "pselect6(readfds)",
ARG2, ARG1/8 /* __FD_SETSIZE/8 */ );
if (ARG3 != 0)
PRE_MEM_READ( "pselect6(writefds)",
ARG3, ARG1/8 /* __FD_SETSIZE/8 */ );
if (ARG4 != 0)
PRE_MEM_READ( "pselect6(exceptfds)",
ARG4, ARG1/8 /* __FD_SETSIZE/8 */ );
if (ARG5 != 0)
PRE_MEM_READ( "pselect6(timeout)", ARG5, sizeof(struct vki_timeval) );
if (ARG6 != 0)
PRE_MEM_READ( "pselect6(sig)", ARG6, sizeof(void *)+sizeof(vki_size_t) );
}
PRE(sys_ppoll)
{
UInt i;
struct vki_pollfd* ufds = (struct vki_pollfd *)ARG1;
*flags |= SfMayBlock;
PRINT("sys_ppoll ( %#lx, %ld, %#lx, %#lx, %llu )\n", ARG1,ARG2,ARG3,ARG4,(ULong)ARG5);
PRE_REG_READ5(long, "ppoll",
struct vki_pollfd *, ufds, unsigned int, nfds,
struct vki_timespec *, tsp, vki_sigset_t *, sigmask,
vki_size_t, sigsetsize);
for (i = 0; i < ARG2; i++) {
PRE_MEM_READ( "ppoll(ufds.fd)",
(Addr)(&ufds[i].fd), sizeof(ufds[i].fd) );
PRE_MEM_READ( "ppoll(ufds.events)",
(Addr)(&ufds[i].events), sizeof(ufds[i].events) );
PRE_MEM_WRITE( "ppoll(ufd.revents)",
(Addr)(&ufds[i].revents), sizeof(ufds[i].revents) );
}
if (ARG3)
PRE_MEM_READ( "ppoll(tsp)", ARG3, sizeof(struct vki_timespec) );
if (ARG4)
PRE_MEM_READ( "ppoll(sigmask)", ARG4, sizeof(vki_sigset_t) );
}
POST(sys_ppoll)
{
if (RES > 0) {
UInt i;
struct vki_pollfd* ufds = (struct vki_pollfd *)ARG1;
for (i = 0; i < ARG2; i++)
POST_MEM_WRITE( (Addr)(&ufds[i].revents), sizeof(ufds[i].revents) );
}
}
/* ---------------------------------------------------------------------
epoll_* wrappers
------------------------------------------------------------------ */
PRE(sys_epoll_create)
{
PRINT("sys_epoll_create ( %ld )", ARG1);
PRE_REG_READ1(long, "epoll_create", int, size);
}
POST(sys_epoll_create)
{
vg_assert(SUCCESS);
if (!ML_(fd_allowed)(RES, "epoll_create", tid, True)) {
VG_(close)(RES);
SET_STATUS_Failure( VKI_EMFILE );
} else {
if (VG_(clo_track_fds))
ML_(record_fd_open_nameless) (tid, RES);
}
}
PRE(sys_epoll_create1)
{
PRINT("sys_epoll_create1 ( %ld )", ARG1);
PRE_REG_READ1(long, "epoll_create1", int, flags);
}
POST(sys_epoll_create1)
{
vg_assert(SUCCESS);
if (!ML_(fd_allowed)(RES, "epoll_create1", tid, True)) {
VG_(close)(RES);
SET_STATUS_Failure( VKI_EMFILE );
} else {
if (VG_(clo_track_fds))
ML_(record_fd_open_nameless) (tid, RES);
}
}
PRE(sys_epoll_ctl)
{
static const HChar* epoll_ctl_s[3] = {
"EPOLL_CTL_ADD",
"EPOLL_CTL_DEL",
"EPOLL_CTL_MOD"
};
PRINT("sys_epoll_ctl ( %ld, %s, %ld, %#lx )",
ARG1, ( ARG2<3 ? epoll_ctl_s[ARG2] : "?" ), ARG3, ARG4);
PRE_REG_READ4(long, "epoll_ctl",
int, epfd, int, op, int, fd, struct vki_epoll_event *, event);
if (ARG2 != VKI_EPOLL_CTL_DEL)
PRE_MEM_READ( "epoll_ctl(event)", ARG4, sizeof(struct vki_epoll_event) );
}
PRE(sys_epoll_wait)
{
*flags |= SfMayBlock;
PRINT("sys_epoll_wait ( %ld, %#lx, %ld, %ld )", ARG1, ARG2, ARG3, ARG4);
PRE_REG_READ4(long, "epoll_wait",
int, epfd, struct vki_epoll_event *, events,
int, maxevents, int, timeout);
PRE_MEM_WRITE( "epoll_wait(events)", ARG2, sizeof(struct vki_epoll_event)*ARG3);
}
POST(sys_epoll_wait)
{
vg_assert(SUCCESS);
if (RES > 0)
POST_MEM_WRITE( ARG2, sizeof(struct vki_epoll_event)*RES ) ;
}
PRE(sys_epoll_pwait)
{
*flags |= SfMayBlock;
PRINT("sys_epoll_pwait ( %ld, %#lx, %ld, %ld, %#lx, %llu )", ARG1,ARG2,ARG3,ARG4,ARG5,(ULong)ARG6);
PRE_REG_READ6(long, "epoll_pwait",
int, epfd, struct vki_epoll_event *, events,
int, maxevents, int, timeout, vki_sigset_t *, sigmask,
vki_size_t, sigsetsize);
PRE_MEM_WRITE( "epoll_pwait(events)", ARG2, sizeof(struct vki_epoll_event)*ARG3);
if (ARG4)
PRE_MEM_READ( "epoll_pwait(sigmask)", ARG5, sizeof(vki_sigset_t) );
}
POST(sys_epoll_pwait)
{
vg_assert(SUCCESS);
if (RES > 0)
POST_MEM_WRITE( ARG2, sizeof(struct vki_epoll_event)*RES ) ;
}
PRE(sys_eventfd)
{
PRINT("sys_eventfd ( %lu )", ARG1);
PRE_REG_READ1(long, "sys_eventfd", unsigned int, count);
}
POST(sys_eventfd)
{
if (!ML_(fd_allowed)(RES, "eventfd", tid, True)) {
VG_(close)(RES);
SET_STATUS_Failure( VKI_EMFILE );
} else {
if (VG_(clo_track_fds))
ML_(record_fd_open_nameless) (tid, RES);
}
}
PRE(sys_eventfd2)
{
PRINT("sys_eventfd2 ( %lu, %ld )", ARG1,ARG2);
PRE_REG_READ2(long, "sys_eventfd2", unsigned int, count, int, flags);
}
POST(sys_eventfd2)
{
if (!ML_(fd_allowed)(RES, "eventfd2", tid, True)) {
VG_(close)(RES);
SET_STATUS_Failure( VKI_EMFILE );
} else {
if (VG_(clo_track_fds))
ML_(record_fd_open_nameless) (tid, RES);
}
}
PRE(sys_fallocate)
{
*flags |= SfMayBlock;
#if VG_WORDSIZE == 4
PRINT("sys_fallocate ( %ld, %ld, %lld, %lld )",
ARG1, ARG2, MERGE64(ARG3,ARG4), MERGE64(ARG5,ARG6));
PRE_REG_READ6(long, "fallocate",
int, fd, int, mode,
unsigned, MERGE64_FIRST(offset), unsigned, MERGE64_SECOND(offset),
unsigned, MERGE64_FIRST(len), unsigned, MERGE64_SECOND(len));
#elif VG_WORDSIZE == 8
PRINT("sys_fallocate ( %ld, %ld, %lld, %lld )",
ARG1, ARG2, (Long)ARG3, (Long)ARG4);
PRE_REG_READ4(long, "fallocate",
int, fd, int, mode, vki_loff_t, offset, vki_loff_t, len);
#else
# error Unexpected word size
#endif
if (!ML_(fd_allowed)(ARG1, "fallocate", tid, False))
SET_STATUS_Failure( VKI_EBADF );
}
PRE(sys_prlimit64)
{
PRINT("sys_prlimit64 ( %ld, %ld, %#lx, %#lx )", ARG1,ARG2,ARG3,ARG4);
PRE_REG_READ4(long, "prlimit64",
vki_pid_t, pid, unsigned int, resource,
const struct rlimit64 *, new_rlim,
struct rlimit64 *, old_rlim);
if (ARG3)
PRE_MEM_READ( "rlimit64(new_rlim)", ARG3, sizeof(struct vki_rlimit64) );
if (ARG4)
PRE_MEM_WRITE( "rlimit64(old_rlim)", ARG4, sizeof(struct vki_rlimit64) );
if (ARG3 &&
((struct vki_rlimit64 *)ARG3)->rlim_cur > ((struct vki_rlimit64 *)ARG3)->rlim_max) {
SET_STATUS_Failure( VKI_EINVAL );
}
else if (ARG1 == 0 || ARG1 == VG_(getpid)()) {
switch (ARG2) {
case VKI_RLIMIT_NOFILE:
SET_STATUS_Success( 0 );
if (ARG4) {
((struct vki_rlimit64 *)ARG4)->rlim_cur = VG_(fd_soft_limit);
((struct vki_rlimit64 *)ARG4)->rlim_max = VG_(fd_hard_limit);
}
if (ARG3) {
if (((struct vki_rlimit64 *)ARG3)->rlim_cur > VG_(fd_hard_limit) ||
((struct vki_rlimit64 *)ARG3)->rlim_max != VG_(fd_hard_limit)) {
SET_STATUS_Failure( VKI_EPERM );
}
else {
VG_(fd_soft_limit) = ((struct vki_rlimit64 *)ARG3)->rlim_cur;
}
}
break;
case VKI_RLIMIT_DATA:
SET_STATUS_Success( 0 );
if (ARG4) {
((struct vki_rlimit64 *)ARG4)->rlim_cur = VG_(client_rlimit_data).rlim_cur;
((struct vki_rlimit64 *)ARG4)->rlim_max = VG_(client_rlimit_data).rlim_max;
}
if (ARG3) {
if (((struct vki_rlimit64 *)ARG3)->rlim_cur > VG_(client_rlimit_data).rlim_max ||
((struct vki_rlimit64 *)ARG3)->rlim_max > VG_(client_rlimit_data).rlim_max) {
SET_STATUS_Failure( VKI_EPERM );
}
else {
VG_(client_rlimit_data).rlim_cur = ((struct vki_rlimit64 *)ARG3)->rlim_cur;
VG_(client_rlimit_data).rlim_max = ((struct vki_rlimit64 *)ARG3)->rlim_max;
}
}
break;
case VKI_RLIMIT_STACK:
SET_STATUS_Success( 0 );
if (ARG4) {
((struct vki_rlimit64 *)ARG4)->rlim_cur = VG_(client_rlimit_stack).rlim_cur;
((struct vki_rlimit64 *)ARG4)->rlim_max = VG_(client_rlimit_stack).rlim_max;
}
if (ARG3) {
if (((struct vki_rlimit64 *)ARG3)->rlim_cur > VG_(client_rlimit_stack).rlim_max ||
((struct vki_rlimit64 *)ARG3)->rlim_max > VG_(client_rlimit_stack).rlim_max) {
SET_STATUS_Failure( VKI_EPERM );
}
else {
VG_(threads)[tid].client_stack_szB = ((struct vki_rlimit64 *)ARG3)->rlim_cur;
VG_(client_rlimit_stack).rlim_cur = ((struct vki_rlimit64 *)ARG3)->rlim_cur;
VG_(client_rlimit_stack).rlim_max = ((struct vki_rlimit64 *)ARG3)->rlim_max;
}
}
break;
}
}
}
POST(sys_prlimit64)
{
if (ARG4)
POST_MEM_WRITE( ARG4, sizeof(struct vki_rlimit64) );
}
/* ---------------------------------------------------------------------
tid-related wrappers
------------------------------------------------------------------ */
PRE(sys_gettid)
{
PRINT("sys_gettid ()");
PRE_REG_READ0(long, "gettid");
}
PRE(sys_set_tid_address)
{
PRINT("sys_set_tid_address ( %#lx )", ARG1);
PRE_REG_READ1(long, "set_tid_address", int *, tidptr);
}
PRE(sys_tkill)
{
PRINT("sys_tgkill ( %ld, %ld )", ARG1,ARG2);
PRE_REG_READ2(long, "tkill", int, tid, int, sig);
if (!ML_(client_signal_OK)(ARG2)) {
SET_STATUS_Failure( VKI_EINVAL );
return;
}
/* Check to see if this kill gave us a pending signal */
*flags |= SfPollAfter;
if (VG_(clo_trace_signals))
VG_(message)(Vg_DebugMsg, "tkill: sending signal %ld to pid %ld\n",
ARG2, ARG1);
/* If we're sending SIGKILL, check to see if the target is one of
our threads and handle it specially. */
if (ARG2 == VKI_SIGKILL && ML_(do_sigkill)(ARG1, -1)) {
SET_STATUS_Success(0);
return;
}
/* Ask to handle this syscall via the slow route, since that's the
only one that sets tst->status to VgTs_WaitSys. If the result
of doing the syscall is an immediate run of
async_signalhandler() in m_signals, then we need the thread to
be properly tidied away. I have the impression the previous
version of this wrapper worked on x86/amd64 only because the
kernel did not immediately deliver the async signal to this
thread (on ppc it did, which broke the assertion re tst->status
at the top of async_signalhandler()). */
*flags |= SfMayBlock;
}
POST(sys_tkill)
{
if (VG_(clo_trace_signals))
VG_(message)(Vg_DebugMsg, "tkill: sent signal %ld to pid %ld\n",
ARG2, ARG1);
}
PRE(sys_tgkill)
{
PRINT("sys_tgkill ( %ld, %ld, %ld )", ARG1,ARG2,ARG3);
PRE_REG_READ3(long, "tgkill", int, tgid, int, tid, int, sig);
if (!ML_(client_signal_OK)(ARG3)) {
SET_STATUS_Failure( VKI_EINVAL );
return;
}
/* Check to see if this kill gave us a pending signal */
*flags |= SfPollAfter;
if (VG_(clo_trace_signals))
VG_(message)(Vg_DebugMsg,
"tgkill: sending signal %ld to pid %ld/%ld\n",
ARG3, ARG1, ARG2);
/* If we're sending SIGKILL, check to see if the target is one of
our threads and handle it specially. */
if (ARG3 == VKI_SIGKILL && ML_(do_sigkill)(ARG2, ARG1)) {
SET_STATUS_Success(0);
return;
}
/* Ask to handle this syscall via the slow route, since that's the
only one that sets tst->status to VgTs_WaitSys. If the result
of doing the syscall is an immediate run of
async_signalhandler() in m_signals, then we need the thread to
be properly tidied away. I have the impression the previous
version of this wrapper worked on x86/amd64 only because the
kernel did not immediately deliver the async signal to this
thread (on ppc it did, which broke the assertion re tst->status
at the top of async_signalhandler()). */
*flags |= SfMayBlock;
}
POST(sys_tgkill)
{
if (VG_(clo_trace_signals))
VG_(message)(Vg_DebugMsg,
"tgkill: sent signal %ld to pid %ld/%ld\n",
ARG3, ARG1, ARG2);
}
/* ---------------------------------------------------------------------
fadvise64* wrappers
------------------------------------------------------------------ */
PRE(sys_fadvise64)
{
PRINT("sys_fadvise64 ( %ld, %lld, %lu, %ld )",
ARG1, MERGE64(ARG2,ARG3), ARG4, ARG5);
PRE_REG_READ5(long, "fadvise64",
int, fd, vki_u32, MERGE64_FIRST(offset), vki_u32, MERGE64_SECOND(offset),
vki_size_t, len, int, advice);
}
PRE(sys_fadvise64_64)
{
PRINT("sys_fadvise64_64 ( %ld, %lld, %lld, %ld )",
ARG1, MERGE64(ARG2,ARG3), MERGE64(ARG4,ARG5), ARG6);
PRE_REG_READ6(long, "fadvise64_64",
int, fd, vki_u32, MERGE64_FIRST(offset), vki_u32, MERGE64_SECOND(offset),
vki_u32, MERGE64_FIRST(len), vki_u32, MERGE64_SECOND(len), int, advice);
}
/* ---------------------------------------------------------------------
io_* wrappers
------------------------------------------------------------------ */
// Nb: this wrapper has to pad/unpad memory around the syscall itself,
// and this allows us to control exactly the code that gets run while
// the padding is in place.
PRE(sys_io_setup)
{
PRINT("sys_io_setup ( %lu, %#lx )", ARG1,ARG2);
PRE_REG_READ2(long, "io_setup",
unsigned, nr_events, vki_aio_context_t *, ctxp);
PRE_MEM_WRITE( "io_setup(ctxp)", ARG2, sizeof(vki_aio_context_t) );
}
POST(sys_io_setup)
{
SizeT size;
struct vki_aio_ring *r;
size = VG_PGROUNDUP(sizeof(struct vki_aio_ring) +
ARG1*sizeof(struct vki_io_event));
r = *(struct vki_aio_ring **)ARG2;
vg_assert(ML_(valid_client_addr)((Addr)r, size, tid, "io_setup"));
ML_(notify_core_and_tool_of_mmap)( (Addr)r, size,
VKI_PROT_READ | VKI_PROT_WRITE,
VKI_MAP_ANONYMOUS, -1, 0 );
POST_MEM_WRITE( ARG2, sizeof(vki_aio_context_t) );
}
// Nb: This wrapper is "Special" because we need 'size' to do the unmap
// after the syscall. We must get 'size' from the aio_ring structure,
// before the syscall, while the aio_ring structure still exists. (And we
// know that we must look at the aio_ring structure because Tom inspected the
// kernel and glibc sources to see what they do, yuk.)
//
// XXX This segment can be implicitly unmapped when aio
// file-descriptors are closed...
PRE(sys_io_destroy)
{
SizeT size = 0;
PRINT("sys_io_destroy ( %llu )", (ULong)ARG1);
PRE_REG_READ1(long, "io_destroy", vki_aio_context_t, ctx);
// If we are going to seg fault (due to a bogus ARG1) do it as late as
// possible...
if (ML_(safe_to_deref)( (void*)ARG1, sizeof(struct vki_aio_ring))) {
struct vki_aio_ring *r = (struct vki_aio_ring *)ARG1;
size = VG_PGROUNDUP(sizeof(struct vki_aio_ring) +
r->nr*sizeof(struct vki_io_event));
}
SET_STATUS_from_SysRes( VG_(do_syscall1)(SYSNO, ARG1) );
if (SUCCESS && RES == 0) {
Bool d = VG_(am_notify_munmap)( ARG1, size );
VG_TRACK( die_mem_munmap, ARG1, size );
if (d)
VG_(discard_translations)( (Addr64)ARG1, (ULong)size,
"PRE(sys_io_destroy)" );
}
}
PRE(sys_io_getevents)
{
*flags |= SfMayBlock;
PRINT("sys_io_getevents ( %llu, %lld, %lld, %#lx, %#lx )",
(ULong)ARG1,(Long)ARG2,(Long)ARG3,ARG4,ARG5);
PRE_REG_READ5(long, "io_getevents",
vki_aio_context_t, ctx_id, long, min_nr, long, nr,
struct io_event *, events,
struct timespec *, timeout);
if (ARG3 > 0)
PRE_MEM_WRITE( "io_getevents(events)",
ARG4, sizeof(struct vki_io_event)*ARG3 );
if (ARG5 != 0)
PRE_MEM_READ( "io_getevents(timeout)",
ARG5, sizeof(struct vki_timespec));
}
POST(sys_io_getevents)
{
Int i;
vg_assert(SUCCESS);
if (RES > 0) {
POST_MEM_WRITE( ARG4, sizeof(struct vki_io_event)*RES );
for (i = 0; i < RES; i++) {
const struct vki_io_event *vev = ((struct vki_io_event *)ARG4) + i;
const struct vki_iocb *cb = (struct vki_iocb *)(Addr)vev->obj;
switch (cb->aio_lio_opcode) {
case VKI_IOCB_CMD_PREAD:
if (vev->result > 0)
POST_MEM_WRITE( cb->aio_buf, vev->result );
break;
case VKI_IOCB_CMD_PWRITE:
break;
case VKI_IOCB_CMD_FSYNC:
break;
case VKI_IOCB_CMD_FDSYNC:
break;
case VKI_IOCB_CMD_PREADV:
if (vev->result > 0) {
struct vki_iovec * vec = (struct vki_iovec *)(Addr)cb->aio_buf;
Int remains = vev->result;
Int j;
for (j = 0; j < cb->aio_nbytes; j++) {
Int nReadThisBuf = vec[j].iov_len;
if (nReadThisBuf > remains) nReadThisBuf = remains;
POST_MEM_WRITE( (Addr)vec[j].iov_base, nReadThisBuf );
remains -= nReadThisBuf;
if (remains < 0) VG_(core_panic)("io_getevents(PREADV): remains < 0");
}
}
break;
case VKI_IOCB_CMD_PWRITEV:
break;
default:
VG_(message)(Vg_DebugMsg,
"Warning: unhandled io_getevents opcode: %u\n",
cb->aio_lio_opcode);
break;
}
}
}
}
PRE(sys_io_submit)
{
Int i, j;
PRINT("sys_io_submit ( %llu, %ld, %#lx )", (ULong)ARG1,ARG2,ARG3);
PRE_REG_READ3(long, "io_submit",
vki_aio_context_t, ctx_id, long, nr,
struct iocb **, iocbpp);
PRE_MEM_READ( "io_submit(iocbpp)", ARG3, ARG2*sizeof(struct vki_iocb *) );
if (ARG3 != 0) {
for (i = 0; i < ARG2; i++) {
struct vki_iocb *cb = ((struct vki_iocb **)ARG3)[i];
struct vki_iovec *iov;
PRE_MEM_READ( "io_submit(iocb)", (Addr)cb, sizeof(struct vki_iocb) );
switch (cb->aio_lio_opcode) {
case VKI_IOCB_CMD_PREAD:
PRE_MEM_WRITE( "io_submit(PREAD)", cb->aio_buf, cb->aio_nbytes );
break;
case VKI_IOCB_CMD_PWRITE:
PRE_MEM_READ( "io_submit(PWRITE)", cb->aio_buf, cb->aio_nbytes );
break;
case VKI_IOCB_CMD_FSYNC:
break;
case VKI_IOCB_CMD_FDSYNC:
break;
case VKI_IOCB_CMD_PREADV:
iov = (struct vki_iovec *)(Addr)cb->aio_buf;
PRE_MEM_READ( "io_submit(PREADV)", cb->aio_buf, cb->aio_nbytes * sizeof(struct vki_iovec) );
for (j = 0; j < cb->aio_nbytes; j++)
PRE_MEM_WRITE( "io_submit(PREADV(iov[i]))", (Addr)iov[j].iov_base, iov[j].iov_len );
break;
case VKI_IOCB_CMD_PWRITEV:
iov = (struct vki_iovec *)(Addr)cb->aio_buf;
PRE_MEM_READ( "io_submit(PWRITEV)", cb->aio_buf, cb->aio_nbytes * sizeof(struct vki_iovec) );
for (j = 0; j < cb->aio_nbytes; j++)
PRE_MEM_READ( "io_submit(PWRITEV(iov[i]))", (Addr)iov[j].iov_base, iov[j].iov_len );
break;
default:
VG_(message)(Vg_DebugMsg,"Warning: unhandled io_submit opcode: %u\n",
cb->aio_lio_opcode);
break;
}
}
}
}
PRE(sys_io_cancel)
{
PRINT("sys_io_cancel ( %llu, %#lx, %#lx )", (ULong)ARG1,ARG2,ARG3);
PRE_REG_READ3(long, "io_cancel",
vki_aio_context_t, ctx_id, struct iocb *, iocb,
struct io_event *, result);
PRE_MEM_READ( "io_cancel(iocb)", ARG2, sizeof(struct vki_iocb) );
PRE_MEM_WRITE( "io_cancel(result)", ARG3, sizeof(struct vki_io_event) );
}
POST(sys_io_cancel)
{
POST_MEM_WRITE( ARG3, sizeof(struct vki_io_event) );
}
/* ---------------------------------------------------------------------
*_mempolicy wrappers
------------------------------------------------------------------ */
PRE(sys_mbind)
{
PRINT("sys_mbind ( %#lx, %lu, %ld, %#lx, %lu, %lu )", ARG1,ARG2,ARG3,ARG4,ARG5,ARG6);
PRE_REG_READ6(long, "mbind",
unsigned long, start, unsigned long, len,
unsigned long, policy, unsigned long *, nodemask,
unsigned long, maxnode, unsigned, flags);
if (ARG1 != 0)
PRE_MEM_READ( "mbind(nodemask)", ARG4,
VG_ROUNDUP( ARG5-1, sizeof(UWord) * 8 ) / 8 );
}
PRE(sys_set_mempolicy)
{
PRINT("sys_set_mempolicy ( %ld, %#lx, %ld )", ARG1,ARG2,ARG3);
PRE_REG_READ3(long, "set_mempolicy",
int, policy, unsigned long *, nodemask,
unsigned long, maxnode);
PRE_MEM_READ( "set_mempolicy(nodemask)", ARG2,
VG_ROUNDUP( ARG3-1, sizeof(UWord) * 8 ) / 8 );
}
PRE(sys_get_mempolicy)
{
PRINT("sys_get_mempolicy ( %#lx, %#lx, %ld, %#lx, %lx )", ARG1,ARG2,ARG3,ARG4,ARG5);
PRE_REG_READ5(long, "get_mempolicy",
int *, policy, unsigned long *, nodemask,
unsigned long, maxnode, unsigned long, addr,
unsigned long, flags);
if (ARG1 != 0)
PRE_MEM_WRITE( "get_mempolicy(policy)", ARG1, sizeof(Int) );
if (ARG2 != 0)
PRE_MEM_WRITE( "get_mempolicy(nodemask)", ARG2,
VG_ROUNDUP( ARG3-1, sizeof(UWord) * 8 ) / 8 );
}
POST(sys_get_mempolicy)
{
if (ARG1 != 0)
POST_MEM_WRITE( ARG1, sizeof(Int) );
if (ARG2 != 0)
POST_MEM_WRITE( ARG2, VG_ROUNDUP( ARG3-1, sizeof(UWord) * 8 ) / 8 );
}
/* ---------------------------------------------------------------------
fanotify_* wrappers
------------------------------------------------------------------ */
PRE(sys_fanotify_init)
{
PRINT("sys_fanotify_init ( %lu, %lu )", ARG1,ARG2);
PRE_REG_READ2(long, "fanotify_init",
unsigned int, flags, unsigned int, event_f_flags);
}
POST(sys_fanotify_init)
{
vg_assert(SUCCESS);
if (!ML_(fd_allowed)(RES, "fanotify_init", tid, True)) {
VG_(close)(RES);
SET_STATUS_Failure( VKI_EMFILE );
} else {
if (VG_(clo_track_fds))
ML_(record_fd_open_nameless) (tid, RES);
}
}
PRE(sys_fanotify_mark)
{
#if VG_WORDSIZE == 4
PRINT( "sys_fanotify_mark ( %ld, %lu, %llu, %ld, %#lx(%s))",
ARG1,ARG2,MERGE64(ARG3,ARG4),ARG5,ARG6,(char *)ARG6);
PRE_REG_READ6(long, "sys_fanotify_mark",
int, fanotify_fd, unsigned int, flags,
__vki_u32, mask0, __vki_u32, mask1,
int, dfd, const char *, pathname);
if (ARG6)
PRE_MEM_RASCIIZ( "fanotify_mark(path)", ARG6);
#elif VG_WORDSIZE == 8
PRINT( "sys_fanotify_mark ( %ld, %lu, %llu, %ld, %#lx(%s))",
ARG1,ARG2,(ULong)ARG3,ARG4,ARG5,(char *)ARG5);
PRE_REG_READ5(long, "sys_fanotify_mark",
int, fanotify_fd, unsigned int, flags,
__vki_u64, mask,
int, dfd, const char *, pathname);
if (ARG5)
PRE_MEM_RASCIIZ( "fanotify_mark(path)", ARG5);
#else
# error Unexpected word size
#endif
}
/* ---------------------------------------------------------------------
inotify_* wrappers
------------------------------------------------------------------ */
PRE(sys_inotify_init)
{
PRINT("sys_inotify_init ( )");
PRE_REG_READ0(long, "inotify_init");
}
POST(sys_inotify_init)
{
vg_assert(SUCCESS);
if (!ML_(fd_allowed)(RES, "inotify_init", tid, True)) {
VG_(close)(RES);
SET_STATUS_Failure( VKI_EMFILE );
} else {
if (VG_(clo_track_fds))
ML_(record_fd_open_nameless) (tid, RES);
}
}
PRE(sys_inotify_init1)
{
PRINT("sys_inotify_init ( %ld )", ARG1);
PRE_REG_READ1(long, "inotify_init", int, flag);
}
POST(sys_inotify_init1)
{
vg_assert(SUCCESS);
if (!ML_(fd_allowed)(RES, "inotify_init", tid, True)) {
VG_(close)(RES);
SET_STATUS_Failure( VKI_EMFILE );
} else {
if (VG_(clo_track_fds))
ML_(record_fd_open_nameless) (tid, RES);
}
}
PRE(sys_inotify_add_watch)
{
PRINT( "sys_inotify_add_watch ( %ld, %#lx, %lx )", ARG1,ARG2,ARG3);
PRE_REG_READ3(long, "inotify_add_watch", int, fd, char *, path, int, mask);
PRE_MEM_RASCIIZ( "inotify_add_watch(path)", ARG2 );
}
PRE(sys_inotify_rm_watch)
{
PRINT( "sys_inotify_rm_watch ( %ld, %lx )", ARG1,ARG2);
PRE_REG_READ2(long, "inotify_rm_watch", int, fd, int, wd);
}
/* ---------------------------------------------------------------------
mq_* wrappers
------------------------------------------------------------------ */
PRE(sys_mq_open)
{
PRINT("sys_mq_open( %#lx(%s), %ld, %lld, %#lx )",
ARG1,(char*)ARG1,ARG2,(ULong)ARG3,ARG4);
PRE_REG_READ4(long, "mq_open",
const char *, name, int, oflag, vki_mode_t, mode,
struct mq_attr *, attr);
PRE_MEM_RASCIIZ( "mq_open(name)", ARG1 );
if ((ARG2 & VKI_O_CREAT) != 0 && ARG4 != 0) {
const struct vki_mq_attr *attr = (struct vki_mq_attr *)ARG4;
PRE_MEM_READ( "mq_open(attr->mq_maxmsg)",
(Addr)&attr->mq_maxmsg, sizeof(attr->mq_maxmsg) );
PRE_MEM_READ( "mq_open(attr->mq_msgsize)",
(Addr)&attr->mq_msgsize, sizeof(attr->mq_msgsize) );
}
}
POST(sys_mq_open)
{
vg_assert(SUCCESS);
if (!ML_(fd_allowed)(RES, "mq_open", tid, True)) {
VG_(close)(RES);
SET_STATUS_Failure( VKI_EMFILE );
} else {
if (VG_(clo_track_fds))
ML_(record_fd_open_with_given_name)(tid, RES, (HChar*)ARG1);
}
}
PRE(sys_mq_unlink)
{
PRINT("sys_mq_unlink ( %#lx(%s) )", ARG1,(char*)ARG1);
PRE_REG_READ1(long, "mq_unlink", const char *, name);
PRE_MEM_RASCIIZ( "mq_unlink(name)", ARG1 );
}
PRE(sys_mq_timedsend)
{
*flags |= SfMayBlock;
PRINT("sys_mq_timedsend ( %ld, %#lx, %llu, %ld, %#lx )",
ARG1,ARG2,(ULong)ARG3,ARG4,ARG5);
PRE_REG_READ5(long, "mq_timedsend",
vki_mqd_t, mqdes, const char *, msg_ptr, vki_size_t, msg_len,
unsigned int, msg_prio, const struct timespec *, abs_timeout);
if (!ML_(fd_allowed)(ARG1, "mq_timedsend", tid, False)) {
SET_STATUS_Failure( VKI_EBADF );
} else {
PRE_MEM_READ( "mq_timedsend(msg_ptr)", ARG2, ARG3 );
if (ARG5 != 0)
PRE_MEM_READ( "mq_timedsend(abs_timeout)", ARG5,
sizeof(struct vki_timespec) );
}
}
PRE(sys_mq_timedreceive)
{
*flags |= SfMayBlock;
PRINT("sys_mq_timedreceive( %ld, %#lx, %llu, %#lx, %#lx )",
ARG1,ARG2,(ULong)ARG3,ARG4,ARG5);
PRE_REG_READ5(ssize_t, "mq_timedreceive",
vki_mqd_t, mqdes, char *, msg_ptr, vki_size_t, msg_len,
unsigned int *, msg_prio,
const struct timespec *, abs_timeout);
if (!ML_(fd_allowed)(ARG1, "mq_timedreceive", tid, False)) {
SET_STATUS_Failure( VKI_EBADF );
} else {
PRE_MEM_WRITE( "mq_timedreceive(msg_ptr)", ARG2, ARG3 );
if (ARG4 != 0)
PRE_MEM_WRITE( "mq_timedreceive(msg_prio)",
ARG4, sizeof(unsigned int) );
if (ARG5 != 0)
PRE_MEM_READ( "mq_timedreceive(abs_timeout)",
ARG5, sizeof(struct vki_timespec) );
}
}
POST(sys_mq_timedreceive)
{
POST_MEM_WRITE( ARG2, RES );
if (ARG4 != 0)
POST_MEM_WRITE( ARG4, sizeof(unsigned int) );
}
PRE(sys_mq_notify)
{
PRINT("sys_mq_notify( %ld, %#lx )", ARG1,ARG2 );
PRE_REG_READ2(long, "mq_notify",
vki_mqd_t, mqdes, const struct sigevent *, notification);
if (!ML_(fd_allowed)(ARG1, "mq_notify", tid, False))
SET_STATUS_Failure( VKI_EBADF );
else if (ARG2 != 0)
PRE_MEM_READ( "mq_notify(notification)",
ARG2, sizeof(struct vki_sigevent) );
}
PRE(sys_mq_getsetattr)
{
PRINT("sys_mq_getsetattr( %ld, %#lx, %#lx )", ARG1,ARG2,ARG3 );
PRE_REG_READ3(long, "mq_getsetattr",
vki_mqd_t, mqdes, const struct mq_attr *, mqstat,
struct mq_attr *, omqstat);
if (!ML_(fd_allowed)(ARG1, "mq_getsetattr", tid, False)) {
SET_STATUS_Failure( VKI_EBADF );
} else {
if (ARG2 != 0) {
const struct vki_mq_attr *attr = (struct vki_mq_attr *)ARG2;
PRE_MEM_READ( "mq_getsetattr(mqstat->mq_flags)",
(Addr)&attr->mq_flags, sizeof(attr->mq_flags) );
}
if (ARG3 != 0)
PRE_MEM_WRITE( "mq_getsetattr(omqstat)", ARG3,
sizeof(struct vki_mq_attr) );
}
}
POST(sys_mq_getsetattr)
{
if (ARG3 != 0)
POST_MEM_WRITE( ARG3, sizeof(struct vki_mq_attr) );
}
/* ---------------------------------------------------------------------
clock_* wrappers
------------------------------------------------------------------ */
PRE(sys_clock_settime)
{
PRINT("sys_clock_settime( %ld, %#lx )", ARG1,ARG2);
PRE_REG_READ2(long, "clock_settime",
vki_clockid_t, clk_id, const struct timespec *, tp);
PRE_MEM_READ( "clock_settime(tp)", ARG2, sizeof(struct vki_timespec) );
}
PRE(sys_clock_gettime)
{
PRINT("sys_clock_gettime( %ld, %#lx )" , ARG1,ARG2);
PRE_REG_READ2(long, "clock_gettime",
vki_clockid_t, clk_id, struct timespec *, tp);
PRE_MEM_WRITE( "clock_gettime(tp)", ARG2, sizeof(struct vki_timespec) );
}
POST(sys_clock_gettime)
{
POST_MEM_WRITE( ARG2, sizeof(struct vki_timespec) );
}
PRE(sys_clock_getres)
{
PRINT("sys_clock_getres( %ld, %#lx )" , ARG1,ARG2);
// Nb: we can't use "RES" as the param name because that's a macro
// defined above!
PRE_REG_READ2(long, "clock_getres",
vki_clockid_t, clk_id, struct timespec *, res);
if (ARG2 != 0)
PRE_MEM_WRITE( "clock_getres(res)", ARG2, sizeof(struct vki_timespec) );
}
POST(sys_clock_getres)
{
if (ARG2 != 0)
POST_MEM_WRITE( ARG2, sizeof(struct vki_timespec) );
}
PRE(sys_clock_nanosleep)
{
*flags |= SfMayBlock|SfPostOnFail;
PRINT("sys_clock_nanosleep( %ld, %ld, %#lx, %#lx )", ARG1,ARG2,ARG3,ARG4);
PRE_REG_READ4(int32_t, "clock_nanosleep",
vki_clockid_t, clkid, int, flags,
const struct timespec *, rqtp, struct timespec *, rmtp);
PRE_MEM_READ( "clock_nanosleep(rqtp)", ARG3, sizeof(struct vki_timespec) );
if (ARG4 != 0)
PRE_MEM_WRITE( "clock_nanosleep(rmtp)", ARG4, sizeof(struct vki_timespec) );
}
POST(sys_clock_nanosleep)
{
if (ARG4 != 0 && FAILURE && ERR == VKI_EINTR)
POST_MEM_WRITE( ARG4, sizeof(struct vki_timespec) );
}
/* ---------------------------------------------------------------------
timer_* wrappers
------------------------------------------------------------------ */
PRE(sys_timer_create)
{
PRINT("sys_timer_create( %ld, %#lx, %#lx )", ARG1,ARG2,ARG3);
PRE_REG_READ3(long, "timer_create",
vki_clockid_t, clockid, struct sigevent *, evp,
vki_timer_t *, timerid);
if (ARG2 != 0) {
struct vki_sigevent *evp = (struct vki_sigevent *) ARG2;
PRE_MEM_READ( "timer_create(evp.sigev_value)", (Addr)&evp->sigev_value,
sizeof(vki_sigval_t) );
PRE_MEM_READ( "timer_create(evp.sigev_signo)", (Addr)&evp->sigev_signo,
sizeof(int) );
PRE_MEM_READ( "timer_create(evp.sigev_notify)", (Addr)&evp->sigev_notify,
sizeof(int) );
if (ML_(safe_to_deref)(&evp->sigev_notify, sizeof(int))
&& (evp->sigev_notify & VKI_SIGEV_THREAD_ID) != 0)
PRE_MEM_READ( "timer_create(evp.sigev_notify_thread_id)",
(Addr)&evp->vki_sigev_notify_thread_id, sizeof(int) );
}
PRE_MEM_WRITE( "timer_create(timerid)", ARG3, sizeof(vki_timer_t) );
}
POST(sys_timer_create)
{
POST_MEM_WRITE( ARG3, sizeof(vki_timer_t) );
}
PRE(sys_timer_settime)
{
PRINT("sys_timer_settime( %lld, %ld, %#lx, %#lx )", (ULong)ARG1,ARG2,ARG3,ARG4);
PRE_REG_READ4(long, "timer_settime",
vki_timer_t, timerid, int, flags,
const struct itimerspec *, value,
struct itimerspec *, ovalue);
PRE_MEM_READ( "timer_settime(value)", ARG3,
sizeof(struct vki_itimerspec) );
if (ARG4 != 0)
PRE_MEM_WRITE( "timer_settime(ovalue)", ARG4,
sizeof(struct vki_itimerspec) );
}
POST(sys_timer_settime)
{
if (ARG4 != 0)
POST_MEM_WRITE( ARG4, sizeof(struct vki_itimerspec) );
}
PRE(sys_timer_gettime)
{
PRINT("sys_timer_gettime( %lld, %#lx )", (ULong)ARG1,ARG2);
PRE_REG_READ2(long, "timer_gettime",
vki_timer_t, timerid, struct itimerspec *, value);
PRE_MEM_WRITE( "timer_gettime(value)", ARG2,
sizeof(struct vki_itimerspec));
}
POST(sys_timer_gettime)
{
POST_MEM_WRITE( ARG2, sizeof(struct vki_itimerspec) );
}
PRE(sys_timer_getoverrun)
{
PRINT("sys_timer_getoverrun( %#lx )", ARG1);
PRE_REG_READ1(long, "timer_getoverrun", vki_timer_t, timerid);
}
PRE(sys_timer_delete)
{
PRINT("sys_timer_delete( %#lx )", ARG1);
PRE_REG_READ1(long, "timer_delete", vki_timer_t, timerid);
}
/* ---------------------------------------------------------------------
timerfd* wrappers
See also http://lwn.net/Articles/260172/ for an overview.
See also /usr/src/linux/fs/timerfd.c for the implementation.
------------------------------------------------------------------ */
/* Returns True if running on 2.6.22, else False (or False if
cannot be determined). */
static Bool linux_kernel_2_6_22(void)
{
static Int result = -1;
Int fd, read;
HChar release[64];
SysRes res;
if (result == -1) {
res = VG_(open)("/proc/sys/kernel/osrelease", 0, 0);
if (sr_isError(res))
return False;
fd = sr_Res(res);
read = VG_(read)(fd, release, sizeof(release) - 1);
vg_assert(read >= 0);
release[read] = 0;
VG_(close)(fd);
//VG_(printf)("kernel release = %s\n", release);
result = (VG_(strncmp)(release, "2.6.22", 6) == 0
&& (release[6] < '0' || release[6] > '9'));
}
vg_assert(result == 0 || result == 1);
return result == 1;
}
PRE(sys_timerfd_create)
{
if (linux_kernel_2_6_22()) {
/* 2.6.22 kernel: timerfd system call. */
PRINT("sys_timerfd ( %ld, %ld, %#lx )", ARG1, ARG2, ARG3);
PRE_REG_READ3(long, "sys_timerfd",
int, fd, int, clockid, const struct itimerspec *, tmr);
PRE_MEM_READ("timerfd(tmr)", ARG3,
sizeof(struct vki_itimerspec) );
if ((Word)ARG1 != -1L && !ML_(fd_allowed)(ARG1, "timerfd", tid, False))
SET_STATUS_Failure( VKI_EBADF );
} else {
/* 2.6.24 and later kernels: timerfd_create system call. */
PRINT("sys_timerfd_create (%ld, %ld )", ARG1, ARG2);
PRE_REG_READ2(long, "timerfd_create", int, clockid, int, flags);
}
}
POST(sys_timerfd_create)
{
if (linux_kernel_2_6_22())
{
/* 2.6.22 kernel: timerfd system call. */
if (!ML_(fd_allowed)(RES, "timerfd", tid, True)) {
VG_(close)(RES);
SET_STATUS_Failure( VKI_EMFILE );
} else {
if (VG_(clo_track_fds))
ML_(record_fd_open_nameless) (tid, RES);
}
}
else
{
/* 2.6.24 and later kernels: timerfd_create system call. */
if (!ML_(fd_allowed)(RES, "timerfd_create", tid, True)) {
VG_(close)(RES);
SET_STATUS_Failure( VKI_EMFILE );
} else {
if (VG_(clo_track_fds))
ML_(record_fd_open_nameless) (tid, RES);
}
}
}
PRE(sys_timerfd_gettime)
{
PRINT("sys_timerfd_gettime ( %ld, %#lx )", ARG1, ARG2);
PRE_REG_READ2(long, "timerfd_gettime",
int, ufd,
struct vki_itimerspec*, otmr);
if (!ML_(fd_allowed)(ARG1, "timerfd_gettime", tid, False))
SET_STATUS_Failure(VKI_EBADF);
else
PRE_MEM_WRITE("timerfd_gettime(result)",
ARG2, sizeof(struct vki_itimerspec));
}
POST(sys_timerfd_gettime)
{
if (RES == 0)
POST_MEM_WRITE(ARG2, sizeof(struct vki_itimerspec));
}
PRE(sys_timerfd_settime)
{
PRINT("sys_timerfd_settime ( %ld, %ld, %#lx, %#lx )", ARG1, ARG2, ARG3, ARG4);
PRE_REG_READ4(long, "timerfd_settime",
int, ufd,
int, flags,
const struct vki_itimerspec*, utmr,
struct vki_itimerspec*, otmr);
if (!ML_(fd_allowed)(ARG1, "timerfd_settime", tid, False))
SET_STATUS_Failure(VKI_EBADF);
else
{
PRE_MEM_READ("timerfd_settime(result)",
ARG3, sizeof(struct vki_itimerspec));
if (ARG4)
{
PRE_MEM_WRITE("timerfd_settime(result)",
ARG4, sizeof(struct vki_itimerspec));
}
}
}
POST(sys_timerfd_settime)
{
if (RES == 0 && ARG4 != 0)
POST_MEM_WRITE(ARG4, sizeof(struct vki_itimerspec));
}
/* ---------------------------------------------------------------------
capabilities wrappers
------------------------------------------------------------------ */
PRE(sys_capget)
{
PRINT("sys_capget ( %#lx, %#lx )", ARG1, ARG2 );
PRE_REG_READ2(long, "capget",
vki_cap_user_header_t, header, vki_cap_user_data_t, data);
PRE_MEM_READ( "capget(header)", ARG1,
sizeof(struct __vki_user_cap_header_struct) );
if (ARG2 != (Addr)NULL)
PRE_MEM_WRITE( "capget(data)", ARG2,
sizeof(struct __vki_user_cap_data_struct) );
}
POST(sys_capget)
{
if (ARG2 != (Addr)NULL)
POST_MEM_WRITE( ARG2, sizeof(struct __vki_user_cap_data_struct) );
}
PRE(sys_capset)
{
PRINT("sys_capset ( %#lx, %#lx )", ARG1, ARG2 );
PRE_REG_READ2(long, "capset",
vki_cap_user_header_t, header,
const vki_cap_user_data_t, data);
PRE_MEM_READ( "capset(header)",
ARG1, sizeof(struct __vki_user_cap_header_struct) );
PRE_MEM_READ( "capset(data)",
ARG2, sizeof(struct __vki_user_cap_data_struct) );
}
/* ---------------------------------------------------------------------
16-bit uid/gid/groups wrappers
------------------------------------------------------------------ */
PRE(sys_getuid16)
{
PRINT("sys_getuid16 ( )");
PRE_REG_READ0(long, "getuid16");
}
PRE(sys_setuid16)
{
PRINT("sys_setuid16 ( %ld )", ARG1);
PRE_REG_READ1(long, "setuid16", vki_old_uid_t, uid);
}
PRE(sys_getgid16)
{
PRINT("sys_getgid16 ( )");
PRE_REG_READ0(long, "getgid16");
}
PRE(sys_setgid16)
{
PRINT("sys_setgid16 ( %ld )", ARG1);
PRE_REG_READ1(long, "setgid16", vki_old_gid_t, gid);
}
PRE(sys_geteuid16)
{
PRINT("sys_geteuid16 ( )");
PRE_REG_READ0(long, "geteuid16");
}
PRE(sys_getegid16)
{
PRINT("sys_getegid16 ( )");
PRE_REG_READ0(long, "getegid16");
}
PRE(sys_setreuid16)
{
PRINT("setreuid16 ( 0x%lx, 0x%lx )", ARG1, ARG2);
PRE_REG_READ2(long, "setreuid16", vki_old_uid_t, ruid, vki_old_uid_t, euid);
}
PRE(sys_setregid16)
{
PRINT("sys_setregid16 ( %ld, %ld )", ARG1, ARG2);
PRE_REG_READ2(long, "setregid16", vki_old_gid_t, rgid, vki_old_gid_t, egid);
}
PRE(sys_getgroups16)
{
PRINT("sys_getgroups16 ( %ld, %#lx )", ARG1, ARG2);
PRE_REG_READ2(long, "getgroups16", int, size, vki_old_gid_t *, list);
if (ARG1 > 0)
PRE_MEM_WRITE( "getgroups16(list)", ARG2, ARG1 * sizeof(vki_old_gid_t) );
}
POST(sys_getgroups16)
{
vg_assert(SUCCESS);
if (ARG1 > 0 && RES > 0)
POST_MEM_WRITE( ARG2, RES * sizeof(vki_old_gid_t) );
}
PRE(sys_setgroups16)
{
PRINT("sys_setgroups16 ( %llu, %#lx )", (ULong)ARG1, ARG2);
PRE_REG_READ2(long, "setgroups16", int, size, vki_old_gid_t *, list);
if (ARG1 > 0)
PRE_MEM_READ( "setgroups16(list)", ARG2, ARG1 * sizeof(vki_old_gid_t) );
}
/* ---------------------------------------------------------------------
*chown16 wrappers
------------------------------------------------------------------ */
PRE(sys_chown16)
{
PRINT("sys_chown16 ( %#lx, 0x%lx, 0x%lx )", ARG1,ARG2,ARG3);
PRE_REG_READ3(long, "chown16",
const char *, path,
vki_old_uid_t, owner, vki_old_gid_t, group);
PRE_MEM_RASCIIZ( "chown16(path)", ARG1 );
}
PRE(sys_fchown16)
{
PRINT("sys_fchown16 ( %ld, %ld, %ld )", ARG1,ARG2,ARG3);
PRE_REG_READ3(long, "fchown16",
unsigned int, fd, vki_old_uid_t, owner, vki_old_gid_t, group);
}
/* ---------------------------------------------------------------------
*xattr wrappers
------------------------------------------------------------------ */
PRE(sys_setxattr)
{
*flags |= SfMayBlock;
PRINT("sys_setxattr ( %#lx, %#lx, %#lx, %llu, %ld )",
ARG1, ARG2, ARG3, (ULong)ARG4, ARG5);
PRE_REG_READ5(long, "setxattr",
char *, path, char *, name,
void *, value, vki_size_t, size, int, flags);
PRE_MEM_RASCIIZ( "setxattr(path)", ARG1 );
PRE_MEM_RASCIIZ( "setxattr(name)", ARG2 );
PRE_MEM_READ( "setxattr(value)", ARG3, ARG4 );
}
PRE(sys_lsetxattr)
{
*flags |= SfMayBlock;
PRINT("sys_lsetxattr ( %#lx, %#lx, %#lx, %llu, %ld )",
ARG1, ARG2, ARG3, (ULong)ARG4, ARG5);
PRE_REG_READ5(long, "lsetxattr",
char *, path, char *, name,
void *, value, vki_size_t, size, int, flags);
PRE_MEM_RASCIIZ( "lsetxattr(path)", ARG1 );
PRE_MEM_RASCIIZ( "lsetxattr(name)", ARG2 );
PRE_MEM_READ( "lsetxattr(value)", ARG3, ARG4 );
}
PRE(sys_fsetxattr)
{
*flags |= SfMayBlock;
PRINT("sys_fsetxattr ( %ld, %#lx, %#lx, %llu, %ld )",
ARG1, ARG2, ARG3, (ULong)ARG4, ARG5);
PRE_REG_READ5(long, "fsetxattr",
int, fd, char *, name, void *, value,
vki_size_t, size, int, flags);
PRE_MEM_RASCIIZ( "fsetxattr(name)", ARG2 );
PRE_MEM_READ( "fsetxattr(value)", ARG3, ARG4 );
}
PRE(sys_getxattr)
{
*flags |= SfMayBlock;
PRINT("sys_getxattr ( %#lx, %#lx, %#lx, %llu )", ARG1,ARG2,ARG3, (ULong)ARG4);
PRE_REG_READ4(ssize_t, "getxattr",
char *, path, char *, name, void *, value, vki_size_t, size);
PRE_MEM_RASCIIZ( "getxattr(path)", ARG1 );
PRE_MEM_RASCIIZ( "getxattr(name)", ARG2 );
PRE_MEM_WRITE( "getxattr(value)", ARG3, ARG4 );
}
POST(sys_getxattr)
{
vg_assert(SUCCESS);
if (RES > 0 && ARG3 != (Addr)NULL) {
POST_MEM_WRITE( ARG3, RES );
}
}
PRE(sys_lgetxattr)
{
*flags |= SfMayBlock;
PRINT("sys_lgetxattr ( %#lx, %#lx, %#lx, %llu )", ARG1,ARG2,ARG3, (ULong)ARG4);
PRE_REG_READ4(ssize_t, "lgetxattr",
char *, path, char *, name, void *, value, vki_size_t, size);
PRE_MEM_RASCIIZ( "lgetxattr(path)", ARG1 );
PRE_MEM_RASCIIZ( "lgetxattr(name)", ARG2 );
PRE_MEM_WRITE( "lgetxattr(value)", ARG3, ARG4 );
}
POST(sys_lgetxattr)
{
vg_assert(SUCCESS);
if (RES > 0 && ARG3 != (Addr)NULL) {
POST_MEM_WRITE( ARG3, RES );
}
}
PRE(sys_fgetxattr)
{
*flags |= SfMayBlock;
PRINT("sys_fgetxattr ( %ld, %#lx, %#lx, %llu )", ARG1, ARG2, ARG3, (ULong)ARG4);
PRE_REG_READ4(ssize_t, "fgetxattr",
int, fd, char *, name, void *, value, vki_size_t, size);
PRE_MEM_RASCIIZ( "fgetxattr(name)", ARG2 );
PRE_MEM_WRITE( "fgetxattr(value)", ARG3, ARG4 );
}
POST(sys_fgetxattr)
{
if (RES > 0 && ARG3 != (Addr)NULL)
POST_MEM_WRITE( ARG3, RES );
}
PRE(sys_listxattr)
{
*flags |= SfMayBlock;
PRINT("sys_listxattr ( %#lx, %#lx, %llu )", ARG1, ARG2, (ULong)ARG3);
PRE_REG_READ3(ssize_t, "listxattr",
char *, path, char *, list, vki_size_t, size);
PRE_MEM_RASCIIZ( "listxattr(path)", ARG1 );
PRE_MEM_WRITE( "listxattr(list)", ARG2, ARG3 );
}
POST(sys_listxattr)
{
if (RES > 0 && ARG2 != (Addr)NULL)
POST_MEM_WRITE( ARG2, RES );
}
PRE(sys_llistxattr)
{
*flags |= SfMayBlock;
PRINT("sys_llistxattr ( %#lx, %#lx, %llu )", ARG1, ARG2, (ULong)ARG3);
PRE_REG_READ3(ssize_t, "llistxattr",