blob: 1c1983c8b67eaace6503166db500d7b2d3e6c2fe [file] [log] [blame]
/*
* Linux syscalls
*
* Copyright (c) 2003 Fabrice Bellard
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#define _ATFILE_SOURCE
#include "qemu/osdep.h"
#include "qemu/cutils.h"
#include "qemu/path.h"
#include <elf.h>
#include <endian.h>
#include <grp.h>
#include <sys/ipc.h>
#include <sys/msg.h>
#include <sys/wait.h>
#include <sys/mount.h>
#include <sys/file.h>
#include <sys/fsuid.h>
#include <sys/personality.h>
#include <sys/prctl.h>
#include <sys/resource.h>
#include <sys/swap.h>
#include <linux/capability.h>
#include <sched.h>
#include <sys/timex.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <sys/uio.h>
#include <poll.h>
#include <sys/times.h>
#include <sys/shm.h>
#include <sys/sem.h>
#include <sys/statfs.h>
#include <utime.h>
#include <sys/sysinfo.h>
#include <sys/signalfd.h>
//#include <sys/user.h>
#include <netinet/ip.h>
#include <netinet/tcp.h>
#include <linux/wireless.h>
#include <linux/icmp.h>
#include <linux/icmpv6.h>
#include <linux/errqueue.h>
#include <linux/random.h>
#include "qemu-common.h"
#ifdef CONFIG_TIMERFD
#include <sys/timerfd.h>
#endif
#ifdef TARGET_GPROF
#include <sys/gmon.h>
#endif
#ifdef CONFIG_EVENTFD
#include <sys/eventfd.h>
#endif
#ifdef CONFIG_EPOLL
#include <sys/epoll.h>
#endif
#ifdef CONFIG_ATTR
#include "qemu/xattr.h"
#endif
#ifdef CONFIG_SENDFILE
#include <sys/sendfile.h>
#endif
#define termios host_termios
#define winsize host_winsize
#define termio host_termio
#define sgttyb host_sgttyb /* same as target */
#define tchars host_tchars /* same as target */
#define ltchars host_ltchars /* same as target */
#include <linux/termios.h>
#include <linux/unistd.h>
#include <linux/cdrom.h>
#include <linux/hdreg.h>
#include <linux/soundcard.h>
#include <linux/kd.h>
#include <linux/mtio.h>
#include <linux/fs.h>
#if defined(CONFIG_FIEMAP)
#include <linux/fiemap.h>
#endif
#include <linux/fb.h>
#include <linux/vt.h>
#include <linux/dm-ioctl.h>
#include <linux/reboot.h>
#include <linux/route.h>
#include <linux/filter.h>
#include <linux/blkpg.h>
#include <netpacket/packet.h>
#include <linux/netlink.h>
#ifdef CONFIG_RTNETLINK
#include <linux/rtnetlink.h>
#include <linux/if_bridge.h>
#endif
#include <linux/audit.h>
#include "linux_loop.h"
#include "uname.h"
#include "qemu.h"
#ifndef CLONE_IO
#define CLONE_IO 0x80000000 /* Clone io context */
#endif
/* We can't directly call the host clone syscall, because this will
* badly confuse libc (breaking mutexes, for example). So we must
* divide clone flags into:
* * flag combinations that look like pthread_create()
* * flag combinations that look like fork()
* * flags we can implement within QEMU itself
* * flags we can't support and will return an error for
*/
/* For thread creation, all these flags must be present; for
* fork, none must be present.
*/
#define CLONE_THREAD_FLAGS \
(CLONE_VM | CLONE_FS | CLONE_FILES | \
CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
/* These flags are ignored:
* CLONE_DETACHED is now ignored by the kernel;
* CLONE_IO is just an optimisation hint to the I/O scheduler
*/
#define CLONE_IGNORED_FLAGS \
(CLONE_DETACHED | CLONE_IO)
/* Flags for fork which we can implement within QEMU itself */
#define CLONE_OPTIONAL_FORK_FLAGS \
(CLONE_SETTLS | CLONE_PARENT_SETTID | \
CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
/* Flags for thread creation which we can implement within QEMU itself */
#define CLONE_OPTIONAL_THREAD_FLAGS \
(CLONE_SETTLS | CLONE_PARENT_SETTID | \
CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
#define CLONE_INVALID_FORK_FLAGS \
(~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
#define CLONE_INVALID_THREAD_FLAGS \
(~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
CLONE_IGNORED_FLAGS))
/* CLONE_VFORK is special cased early in do_fork(). The other flag bits
* have almost all been allocated. We cannot support any of
* CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
* CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
* The checks against the invalid thread masks above will catch these.
* (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
*/
//#define DEBUG
/* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
* once. This exercises the codepaths for restart.
*/
//#define DEBUG_ERESTARTSYS
//#include <linux/msdos_fs.h>
#define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
#define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
#undef _syscall0
#undef _syscall1
#undef _syscall2
#undef _syscall3
#undef _syscall4
#undef _syscall5
#undef _syscall6
#define _syscall0(type,name) \
static type name (void) \
{ \
return syscall(__NR_##name); \
}
#define _syscall1(type,name,type1,arg1) \
static type name (type1 arg1) \
{ \
return syscall(__NR_##name, arg1); \
}
#define _syscall2(type,name,type1,arg1,type2,arg2) \
static type name (type1 arg1,type2 arg2) \
{ \
return syscall(__NR_##name, arg1, arg2); \
}
#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
static type name (type1 arg1,type2 arg2,type3 arg3) \
{ \
return syscall(__NR_##name, arg1, arg2, arg3); \
}
#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
{ \
return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
}
#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
type5,arg5) \
static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
{ \
return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
}
#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
type5,arg5,type6,arg6) \
static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
type6 arg6) \
{ \
return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
}
#define __NR_sys_uname __NR_uname
#define __NR_sys_getcwd1 __NR_getcwd
#define __NR_sys_getdents __NR_getdents
#define __NR_sys_getdents64 __NR_getdents64
#define __NR_sys_getpriority __NR_getpriority
#define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
#define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
#define __NR_sys_syslog __NR_syslog
#define __NR_sys_futex __NR_futex
#define __NR_sys_inotify_init __NR_inotify_init
#define __NR_sys_inotify_add_watch __NR_inotify_add_watch
#define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
#if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
#define __NR__llseek __NR_lseek
#endif
/* Newer kernel ports have llseek() instead of _llseek() */
#if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
#define TARGET_NR__llseek TARGET_NR_llseek
#endif
#ifdef __NR_gettid
_syscall0(int, gettid)
#else
/* This is a replacement for the host gettid() and must return a host
errno. */
static int gettid(void) {
return -ENOSYS;
}
#endif
#if defined(TARGET_NR_getdents) && defined(__NR_getdents)
_syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
#endif
#if !defined(__NR_getdents) || \
(defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
_syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
#endif
#if defined(TARGET_NR__llseek) && defined(__NR_llseek)
_syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
loff_t *, res, uint, wh);
#endif
_syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
_syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
siginfo_t *, uinfo)
_syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
#ifdef __NR_exit_group
_syscall1(int,exit_group,int,error_code)
#endif
#if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
_syscall1(int,set_tid_address,int *,tidptr)
#endif
#if defined(TARGET_NR_futex) && defined(__NR_futex)
_syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
const struct timespec *,timeout,int *,uaddr2,int,val3)
#endif
#define __NR_sys_sched_getaffinity __NR_sched_getaffinity
_syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
unsigned long *, user_mask_ptr);
#define __NR_sys_sched_setaffinity __NR_sched_setaffinity
_syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
unsigned long *, user_mask_ptr);
#define __NR_sys_getcpu __NR_getcpu
_syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
_syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
void *, arg);
_syscall2(int, capget, struct __user_cap_header_struct *, header,
struct __user_cap_data_struct *, data);
_syscall2(int, capset, struct __user_cap_header_struct *, header,
struct __user_cap_data_struct *, data);
#if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
_syscall2(int, ioprio_get, int, which, int, who)
#endif
#if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
_syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
#endif
#if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
_syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
#endif
#if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
_syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
unsigned long, idx1, unsigned long, idx2)
#endif
static bitmask_transtbl fcntl_flags_tbl[] = {
{ TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
{ TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
{ TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
{ TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
{ TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
{ TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
{ TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
{ TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
{ TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
{ TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
{ TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
{ TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
{ TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
#if defined(O_DIRECT)
{ TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
#endif
#if defined(O_NOATIME)
{ TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
#endif
#if defined(O_CLOEXEC)
{ TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
#endif
#if defined(O_PATH)
{ TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
#endif
#if defined(O_TMPFILE)
{ TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
#endif
/* Don't terminate the list prematurely on 64-bit host+guest. */
#if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
{ TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
#endif
{ 0, 0, 0, 0 }
};
enum {
QEMU_IFLA_BR_UNSPEC,
QEMU_IFLA_BR_FORWARD_DELAY,
QEMU_IFLA_BR_HELLO_TIME,
QEMU_IFLA_BR_MAX_AGE,
QEMU_IFLA_BR_AGEING_TIME,
QEMU_IFLA_BR_STP_STATE,
QEMU_IFLA_BR_PRIORITY,
QEMU_IFLA_BR_VLAN_FILTERING,
QEMU_IFLA_BR_VLAN_PROTOCOL,
QEMU_IFLA_BR_GROUP_FWD_MASK,
QEMU_IFLA_BR_ROOT_ID,
QEMU_IFLA_BR_BRIDGE_ID,
QEMU_IFLA_BR_ROOT_PORT,
QEMU_IFLA_BR_ROOT_PATH_COST,
QEMU_IFLA_BR_TOPOLOGY_CHANGE,
QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
QEMU_IFLA_BR_HELLO_TIMER,
QEMU_IFLA_BR_TCN_TIMER,
QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
QEMU_IFLA_BR_GC_TIMER,
QEMU_IFLA_BR_GROUP_ADDR,
QEMU_IFLA_BR_FDB_FLUSH,
QEMU_IFLA_BR_MCAST_ROUTER,
QEMU_IFLA_BR_MCAST_SNOOPING,
QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
QEMU_IFLA_BR_MCAST_QUERIER,
QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
QEMU_IFLA_BR_MCAST_HASH_MAX,
QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
QEMU_IFLA_BR_MCAST_QUERY_INTVL,
QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
QEMU_IFLA_BR_NF_CALL_IPTABLES,
QEMU_IFLA_BR_NF_CALL_IP6TABLES,
QEMU_IFLA_BR_NF_CALL_ARPTABLES,
QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
QEMU_IFLA_BR_PAD,
QEMU_IFLA_BR_VLAN_STATS_ENABLED,
QEMU_IFLA_BR_MCAST_STATS_ENABLED,
QEMU___IFLA_BR_MAX,
};
enum {
QEMU_IFLA_UNSPEC,
QEMU_IFLA_ADDRESS,
QEMU_IFLA_BROADCAST,
QEMU_IFLA_IFNAME,
QEMU_IFLA_MTU,
QEMU_IFLA_LINK,
QEMU_IFLA_QDISC,
QEMU_IFLA_STATS,
QEMU_IFLA_COST,
QEMU_IFLA_PRIORITY,
QEMU_IFLA_MASTER,
QEMU_IFLA_WIRELESS,
QEMU_IFLA_PROTINFO,
QEMU_IFLA_TXQLEN,
QEMU_IFLA_MAP,
QEMU_IFLA_WEIGHT,
QEMU_IFLA_OPERSTATE,
QEMU_IFLA_LINKMODE,
QEMU_IFLA_LINKINFO,
QEMU_IFLA_NET_NS_PID,
QEMU_IFLA_IFALIAS,
QEMU_IFLA_NUM_VF,
QEMU_IFLA_VFINFO_LIST,
QEMU_IFLA_STATS64,
QEMU_IFLA_VF_PORTS,
QEMU_IFLA_PORT_SELF,
QEMU_IFLA_AF_SPEC,
QEMU_IFLA_GROUP,
QEMU_IFLA_NET_NS_FD,
QEMU_IFLA_EXT_MASK,
QEMU_IFLA_PROMISCUITY,
QEMU_IFLA_NUM_TX_QUEUES,
QEMU_IFLA_NUM_RX_QUEUES,
QEMU_IFLA_CARRIER,
QEMU_IFLA_PHYS_PORT_ID,
QEMU_IFLA_CARRIER_CHANGES,
QEMU_IFLA_PHYS_SWITCH_ID,
QEMU_IFLA_LINK_NETNSID,
QEMU_IFLA_PHYS_PORT_NAME,
QEMU_IFLA_PROTO_DOWN,
QEMU_IFLA_GSO_MAX_SEGS,
QEMU_IFLA_GSO_MAX_SIZE,
QEMU_IFLA_PAD,
QEMU_IFLA_XDP,
QEMU___IFLA_MAX
};
enum {
QEMU_IFLA_BRPORT_UNSPEC,
QEMU_IFLA_BRPORT_STATE,
QEMU_IFLA_BRPORT_PRIORITY,
QEMU_IFLA_BRPORT_COST,
QEMU_IFLA_BRPORT_MODE,
QEMU_IFLA_BRPORT_GUARD,
QEMU_IFLA_BRPORT_PROTECT,
QEMU_IFLA_BRPORT_FAST_LEAVE,
QEMU_IFLA_BRPORT_LEARNING,
QEMU_IFLA_BRPORT_UNICAST_FLOOD,
QEMU_IFLA_BRPORT_PROXYARP,
QEMU_IFLA_BRPORT_LEARNING_SYNC,
QEMU_IFLA_BRPORT_PROXYARP_WIFI,
QEMU_IFLA_BRPORT_ROOT_ID,
QEMU_IFLA_BRPORT_BRIDGE_ID,
QEMU_IFLA_BRPORT_DESIGNATED_PORT,
QEMU_IFLA_BRPORT_DESIGNATED_COST,
QEMU_IFLA_BRPORT_ID,
QEMU_IFLA_BRPORT_NO,
QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
QEMU_IFLA_BRPORT_CONFIG_PENDING,
QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
QEMU_IFLA_BRPORT_HOLD_TIMER,
QEMU_IFLA_BRPORT_FLUSH,
QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
QEMU_IFLA_BRPORT_PAD,
QEMU___IFLA_BRPORT_MAX
};
enum {
QEMU_IFLA_INFO_UNSPEC,
QEMU_IFLA_INFO_KIND,
QEMU_IFLA_INFO_DATA,
QEMU_IFLA_INFO_XSTATS,
QEMU_IFLA_INFO_SLAVE_KIND,
QEMU_IFLA_INFO_SLAVE_DATA,
QEMU___IFLA_INFO_MAX,
};
enum {
QEMU_IFLA_INET_UNSPEC,
QEMU_IFLA_INET_CONF,
QEMU___IFLA_INET_MAX,
};
enum {
QEMU_IFLA_INET6_UNSPEC,
QEMU_IFLA_INET6_FLAGS,
QEMU_IFLA_INET6_CONF,
QEMU_IFLA_INET6_STATS,
QEMU_IFLA_INET6_MCAST,
QEMU_IFLA_INET6_CACHEINFO,
QEMU_IFLA_INET6_ICMP6STATS,
QEMU_IFLA_INET6_TOKEN,
QEMU_IFLA_INET6_ADDR_GEN_MODE,
QEMU___IFLA_INET6_MAX
};
typedef abi_long (*TargetFdDataFunc)(void *, size_t);
typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
typedef struct TargetFdTrans {
TargetFdDataFunc host_to_target_data;
TargetFdDataFunc target_to_host_data;
TargetFdAddrFunc target_to_host_addr;
} TargetFdTrans;
static TargetFdTrans **target_fd_trans;
static unsigned int target_fd_max;
static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
{
if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
return target_fd_trans[fd]->target_to_host_data;
}
return NULL;
}
static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
{
if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
return target_fd_trans[fd]->host_to_target_data;
}
return NULL;
}
static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
{
if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
return target_fd_trans[fd]->target_to_host_addr;
}
return NULL;
}
static void fd_trans_register(int fd, TargetFdTrans *trans)
{
unsigned int oldmax;
if (fd >= target_fd_max) {
oldmax = target_fd_max;
target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
target_fd_trans = g_renew(TargetFdTrans *,
target_fd_trans, target_fd_max);
memset((void *)(target_fd_trans + oldmax), 0,
(target_fd_max - oldmax) * sizeof(TargetFdTrans *));
}
target_fd_trans[fd] = trans;
}
static void fd_trans_unregister(int fd)
{
if (fd >= 0 && fd < target_fd_max) {
target_fd_trans[fd] = NULL;
}
}
static void fd_trans_dup(int oldfd, int newfd)
{
fd_trans_unregister(newfd);
if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
fd_trans_register(newfd, target_fd_trans[oldfd]);
}
}
static int sys_getcwd1(char *buf, size_t size)
{
if (getcwd(buf, size) == NULL) {
/* getcwd() sets errno */
return (-1);
}
return strlen(buf)+1;
}
#ifdef TARGET_NR_utimensat
#if defined(__NR_utimensat)
#define __NR_sys_utimensat __NR_utimensat
_syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
const struct timespec *,tsp,int,flags)
#else
static int sys_utimensat(int dirfd, const char *pathname,
const struct timespec times[2], int flags)
{
errno = ENOSYS;
return -1;
}
#endif
#endif /* TARGET_NR_utimensat */
#ifdef TARGET_NR_renameat2
#if defined(__NR_renameat2)
#define __NR_sys_renameat2 __NR_renameat2
_syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
const char *, new, unsigned int, flags)
#else
static int sys_renameat2(int oldfd, const char *old,
int newfd, const char *new, int flags)
{
if (flags == 0) {
return renameat(oldfd, old, newfd, new);
}
errno = ENOSYS;
return -1;
}
#endif
#endif /* TARGET_NR_renameat2 */
#ifdef CONFIG_INOTIFY
#include <sys/inotify.h>
#if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
static int sys_inotify_init(void)
{
return (inotify_init());
}
#endif
#if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
{
return (inotify_add_watch(fd, pathname, mask));
}
#endif
#if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
static int sys_inotify_rm_watch(int fd, int32_t wd)
{
return (inotify_rm_watch(fd, wd));
}
#endif
#ifdef CONFIG_INOTIFY1
#if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
static int sys_inotify_init1(int flags)
{
return (inotify_init1(flags));
}
#endif
#endif
#else
/* Userspace can usually survive runtime without inotify */
#undef TARGET_NR_inotify_init
#undef TARGET_NR_inotify_init1
#undef TARGET_NR_inotify_add_watch
#undef TARGET_NR_inotify_rm_watch
#endif /* CONFIG_INOTIFY */
#if defined(TARGET_NR_prlimit64)
#ifndef __NR_prlimit64
# define __NR_prlimit64 -1
#endif
#define __NR_sys_prlimit64 __NR_prlimit64
/* The glibc rlimit structure may not be that used by the underlying syscall */
struct host_rlimit64 {
uint64_t rlim_cur;
uint64_t rlim_max;
};
_syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
const struct host_rlimit64 *, new_limit,
struct host_rlimit64 *, old_limit)
#endif
#if defined(TARGET_NR_timer_create)
/* Maxiumum of 32 active POSIX timers allowed at any one time. */
static timer_t g_posix_timers[32] = { 0, } ;
static inline int next_free_host_timer(void)
{
int k ;
/* FIXME: Does finding the next free slot require a lock? */
for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
if (g_posix_timers[k] == 0) {
g_posix_timers[k] = (timer_t) 1;
return k;
}
}
return -1;
}
#endif
/* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
#ifdef TARGET_ARM
static inline int regpairs_aligned(void *cpu_env, int num)
{
return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
}
#elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
#elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
/* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
* of registers which translates to the same as ARM/MIPS, because we start with
* r3 as arg1 */
static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
#elif defined(TARGET_SH4)
/* SH4 doesn't align register pairs, except for p{read,write}64 */
static inline int regpairs_aligned(void *cpu_env, int num)
{
switch (num) {
case TARGET_NR_pread64:
case TARGET_NR_pwrite64:
return 1;
default:
return 0;
}
}
#elif defined(TARGET_XTENSA)
static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
#else
static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
#endif
#define ERRNO_TABLE_SIZE 1200
/* target_to_host_errno_table[] is initialized from
* host_to_target_errno_table[] in syscall_init(). */
static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
};
/*
* This list is the union of errno values overridden in asm-<arch>/errno.h
* minus the errnos that are not actually generic to all archs.
*/
static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
[EAGAIN] = TARGET_EAGAIN,
[EIDRM] = TARGET_EIDRM,
[ECHRNG] = TARGET_ECHRNG,
[EL2NSYNC] = TARGET_EL2NSYNC,
[EL3HLT] = TARGET_EL3HLT,
[EL3RST] = TARGET_EL3RST,
[ELNRNG] = TARGET_ELNRNG,
[EUNATCH] = TARGET_EUNATCH,
[ENOCSI] = TARGET_ENOCSI,
[EL2HLT] = TARGET_EL2HLT,
[EDEADLK] = TARGET_EDEADLK,
[ENOLCK] = TARGET_ENOLCK,
[EBADE] = TARGET_EBADE,
[EBADR] = TARGET_EBADR,
[EXFULL] = TARGET_EXFULL,
[ENOANO] = TARGET_ENOANO,
[EBADRQC] = TARGET_EBADRQC,
[EBADSLT] = TARGET_EBADSLT,
[EBFONT] = TARGET_EBFONT,
[ENOSTR] = TARGET_ENOSTR,
[ENODATA] = TARGET_ENODATA,
[ETIME] = TARGET_ETIME,
[ENOSR] = TARGET_ENOSR,
[ENONET] = TARGET_ENONET,
[ENOPKG] = TARGET_ENOPKG,
[EREMOTE] = TARGET_EREMOTE,
[ENOLINK] = TARGET_ENOLINK,
[EADV] = TARGET_EADV,
[ESRMNT] = TARGET_ESRMNT,
[ECOMM] = TARGET_ECOMM,
[EPROTO] = TARGET_EPROTO,
[EDOTDOT] = TARGET_EDOTDOT,
[EMULTIHOP] = TARGET_EMULTIHOP,
[EBADMSG] = TARGET_EBADMSG,
[ENAMETOOLONG] = TARGET_ENAMETOOLONG,
[EOVERFLOW] = TARGET_EOVERFLOW,
[ENOTUNIQ] = TARGET_ENOTUNIQ,
[EBADFD] = TARGET_EBADFD,
[EREMCHG] = TARGET_EREMCHG,
[ELIBACC] = TARGET_ELIBACC,
[ELIBBAD] = TARGET_ELIBBAD,
[ELIBSCN] = TARGET_ELIBSCN,
[ELIBMAX] = TARGET_ELIBMAX,
[ELIBEXEC] = TARGET_ELIBEXEC,
[EILSEQ] = TARGET_EILSEQ,
[ENOSYS] = TARGET_ENOSYS,
[ELOOP] = TARGET_ELOOP,
[ERESTART] = TARGET_ERESTART,
[ESTRPIPE] = TARGET_ESTRPIPE,
[ENOTEMPTY] = TARGET_ENOTEMPTY,
[EUSERS] = TARGET_EUSERS,
[ENOTSOCK] = TARGET_ENOTSOCK,
[EDESTADDRREQ] = TARGET_EDESTADDRREQ,
[EMSGSIZE] = TARGET_EMSGSIZE,
[EPROTOTYPE] = TARGET_EPROTOTYPE,
[ENOPROTOOPT] = TARGET_ENOPROTOOPT,
[EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
[ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
[EOPNOTSUPP] = TARGET_EOPNOTSUPP,
[EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
[EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
[EADDRINUSE] = TARGET_EADDRINUSE,
[EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
[ENETDOWN] = TARGET_ENETDOWN,
[ENETUNREACH] = TARGET_ENETUNREACH,
[ENETRESET] = TARGET_ENETRESET,
[ECONNABORTED] = TARGET_ECONNABORTED,
[ECONNRESET] = TARGET_ECONNRESET,
[ENOBUFS] = TARGET_ENOBUFS,
[EISCONN] = TARGET_EISCONN,
[ENOTCONN] = TARGET_ENOTCONN,
[EUCLEAN] = TARGET_EUCLEAN,
[ENOTNAM] = TARGET_ENOTNAM,
[ENAVAIL] = TARGET_ENAVAIL,
[EISNAM] = TARGET_EISNAM,
[EREMOTEIO] = TARGET_EREMOTEIO,
[EDQUOT] = TARGET_EDQUOT,
[ESHUTDOWN] = TARGET_ESHUTDOWN,
[ETOOMANYREFS] = TARGET_ETOOMANYREFS,
[ETIMEDOUT] = TARGET_ETIMEDOUT,
[ECONNREFUSED] = TARGET_ECONNREFUSED,
[EHOSTDOWN] = TARGET_EHOSTDOWN,
[EHOSTUNREACH] = TARGET_EHOSTUNREACH,
[EALREADY] = TARGET_EALREADY,
[EINPROGRESS] = TARGET_EINPROGRESS,
[ESTALE] = TARGET_ESTALE,
[ECANCELED] = TARGET_ECANCELED,
[ENOMEDIUM] = TARGET_ENOMEDIUM,
[EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
#ifdef ENOKEY
[ENOKEY] = TARGET_ENOKEY,
#endif
#ifdef EKEYEXPIRED
[EKEYEXPIRED] = TARGET_EKEYEXPIRED,
#endif
#ifdef EKEYREVOKED
[EKEYREVOKED] = TARGET_EKEYREVOKED,
#endif
#ifdef EKEYREJECTED
[EKEYREJECTED] = TARGET_EKEYREJECTED,
#endif
#ifdef EOWNERDEAD
[EOWNERDEAD] = TARGET_EOWNERDEAD,
#endif
#ifdef ENOTRECOVERABLE
[ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
#endif
#ifdef ENOMSG
[ENOMSG] = TARGET_ENOMSG,
#endif
#ifdef ERKFILL
[ERFKILL] = TARGET_ERFKILL,
#endif
#ifdef EHWPOISON
[EHWPOISON] = TARGET_EHWPOISON,
#endif
};
static inline int host_to_target_errno(int err)
{
if (err >= 0 && err < ERRNO_TABLE_SIZE &&
host_to_target_errno_table[err]) {
return host_to_target_errno_table[err];
}
return err;
}
static inline int target_to_host_errno(int err)
{
if (err >= 0 && err < ERRNO_TABLE_SIZE &&
target_to_host_errno_table[err]) {
return target_to_host_errno_table[err];
}
return err;
}
static inline abi_long get_errno(abi_long ret)
{
if (ret == -1)
return -host_to_target_errno(errno);
else
return ret;
}
static inline int is_error(abi_long ret)
{
return (abi_ulong)ret >= (abi_ulong)(-4096);
}
const char *target_strerror(int err)
{
if (err == TARGET_ERESTARTSYS) {
return "To be restarted";
}
if (err == TARGET_QEMU_ESIGRETURN) {
return "Successful exit from sigreturn";
}
if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
return NULL;
}
return strerror(target_to_host_errno(err));
}
#define safe_syscall0(type, name) \
static type safe_##name(void) \
{ \
return safe_syscall(__NR_##name); \
}
#define safe_syscall1(type, name, type1, arg1) \
static type safe_##name(type1 arg1) \
{ \
return safe_syscall(__NR_##name, arg1); \
}
#define safe_syscall2(type, name, type1, arg1, type2, arg2) \
static type safe_##name(type1 arg1, type2 arg2) \
{ \
return safe_syscall(__NR_##name, arg1, arg2); \
}
#define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
{ \
return safe_syscall(__NR_##name, arg1, arg2, arg3); \
}
#define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
type4, arg4) \
static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
{ \
return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
}
#define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
type4, arg4, type5, arg5) \
static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
type5 arg5) \
{ \
return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
}
#define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
type4, arg4, type5, arg5, type6, arg6) \
static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
type5 arg5, type6 arg6) \
{ \
return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
}
safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
int, flags, mode_t, mode)
safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
struct rusage *, rusage)
safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
int, options, struct rusage *, rusage)
safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
struct timespec *, tsp, const sigset_t *, sigmask,
size_t, sigsetsize)
safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
int, maxevents, int, timeout, const sigset_t *, sigmask,
size_t, sigsetsize)
safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
const struct timespec *,timeout,int *,uaddr2,int,val3)
safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
safe_syscall2(int, kill, pid_t, pid, int, sig)
safe_syscall2(int, tkill, int, tid, int, sig)
safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
unsigned long, pos_l, unsigned long, pos_h)
safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
unsigned long, pos_l, unsigned long, pos_h)
safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
socklen_t, addrlen)
safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
safe_syscall2(int, flock, int, fd, int, operation)
safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
const struct timespec *, uts, size_t, sigsetsize)
safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
int, flags)
safe_syscall2(int, nanosleep, const struct timespec *, req,
struct timespec *, rem)
#ifdef TARGET_NR_clock_nanosleep
safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
const struct timespec *, req, struct timespec *, rem)
#endif
#ifdef __NR_msgsnd
safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
int, flags)
safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
long, msgtype, int, flags)
safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
unsigned, nsops, const struct timespec *, timeout)
#else
/* This host kernel architecture uses a single ipc syscall; fake up
* wrappers for the sub-operations to hide this implementation detail.
* Annoyingly we can't include linux/ipc.h to get the constant definitions
* for the call parameter because some structs in there conflict with the
* sys/ipc.h ones. So we just define them here, and rely on them being
* the same for all host architectures.
*/
#define Q_SEMTIMEDOP 4
#define Q_MSGSND 11
#define Q_MSGRCV 12
#define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
void *, ptr, long, fifth)
static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
{
return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
}
static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
{
return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
}
static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
const struct timespec *timeout)
{
return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
(long)timeout);
}
#endif
#if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
size_t, len, unsigned, prio, const struct timespec *, timeout)
safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
size_t, len, unsigned *, prio, const struct timespec *, timeout)
#endif
/* We do ioctl like this rather than via safe_syscall3 to preserve the
* "third argument might be integer or pointer or not present" behaviour of
* the libc function.
*/
#define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
/* Similarly for fcntl. Note that callers must always:
* pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
* use the flock64 struct rather than unsuffixed flock
* This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
*/
#ifdef __NR_fcntl64
#define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
#else
#define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
#endif
static inline int host_to_target_sock_type(int host_type)
{
int target_type;
switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
case SOCK_DGRAM:
target_type = TARGET_SOCK_DGRAM;
break;
case SOCK_STREAM:
target_type = TARGET_SOCK_STREAM;
break;
default:
target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
break;
}
#if defined(SOCK_CLOEXEC)
if (host_type & SOCK_CLOEXEC) {
target_type |= TARGET_SOCK_CLOEXEC;
}
#endif
#if defined(SOCK_NONBLOCK)
if (host_type & SOCK_NONBLOCK) {
target_type |= TARGET_SOCK_NONBLOCK;
}
#endif
return target_type;
}
static abi_ulong target_brk;
static abi_ulong target_original_brk;
static abi_ulong brk_page;
void target_set_brk(abi_ulong new_brk)
{
target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
brk_page = HOST_PAGE_ALIGN(target_brk);
}
//#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
#define DEBUGF_BRK(message, args...)
/* do_brk() must return target values and target errnos. */
abi_long do_brk(abi_ulong new_brk)
{
abi_long mapped_addr;
abi_ulong new_alloc_size;
DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
if (!new_brk) {
DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
return target_brk;
}
if (new_brk < target_original_brk) {
DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
target_brk);
return target_brk;
}
/* If the new brk is less than the highest page reserved to the
* target heap allocation, set it and we're almost done... */
if (new_brk <= brk_page) {
/* Heap contents are initialized to zero, as for anonymous
* mapped pages. */
if (new_brk > target_brk) {
memset(g2h(target_brk), 0, new_brk - target_brk);
}
target_brk = new_brk;
DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
return target_brk;
}
/* We need to allocate more memory after the brk... Note that
* we don't use MAP_FIXED because that will map over the top of
* any existing mapping (like the one with the host libc or qemu
* itself); instead we treat "mapped but at wrong address" as
* a failure and unmap again.
*/
new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
PROT_READ|PROT_WRITE,
MAP_ANON|MAP_PRIVATE, 0, 0));
if (mapped_addr == brk_page) {
/* Heap contents are initialized to zero, as for anonymous
* mapped pages. Technically the new pages are already
* initialized to zero since they *are* anonymous mapped
* pages, however we have to take care with the contents that
* come from the remaining part of the previous page: it may
* contains garbage data due to a previous heap usage (grown
* then shrunken). */
memset(g2h(target_brk), 0, brk_page - target_brk);
target_brk = new_brk;
brk_page = HOST_PAGE_ALIGN(target_brk);
DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
target_brk);
return target_brk;
} else if (mapped_addr != -1) {
/* Mapped but at wrong address, meaning there wasn't actually
* enough space for this brk.
*/
target_munmap(mapped_addr, new_alloc_size);
mapped_addr = -1;
DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
}
else {
DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
}
#if defined(TARGET_ALPHA)
/* We (partially) emulate OSF/1 on Alpha, which requires we
return a proper errno, not an unchanged brk value. */
return -TARGET_ENOMEM;
#endif
/* For everything else, return the previous break. */
return target_brk;
}
static inline abi_long copy_from_user_fdset(fd_set *fds,
abi_ulong target_fds_addr,
int n)
{
int i, nw, j, k;
abi_ulong b, *target_fds;
nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
if (!(target_fds = lock_user(VERIFY_READ,
target_fds_addr,
sizeof(abi_ulong) * nw,
1)))
return -TARGET_EFAULT;
FD_ZERO(fds);
k = 0;
for (i = 0; i < nw; i++) {
/* grab the abi_ulong */
__get_user(b, &target_fds[i]);
for (j = 0; j < TARGET_ABI_BITS; j++) {
/* check the bit inside the abi_ulong */
if ((b >> j) & 1)
FD_SET(k, fds);
k++;
}
}
unlock_user(target_fds, target_fds_addr, 0);
return 0;
}
static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
abi_ulong target_fds_addr,
int n)
{
if (target_fds_addr) {
if (copy_from_user_fdset(fds, target_fds_addr, n))
return -TARGET_EFAULT;
*fds_ptr = fds;
} else {
*fds_ptr = NULL;
}
return 0;
}
static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
const fd_set *fds,
int n)
{
int i, nw, j, k;
abi_long v;
abi_ulong *target_fds;
nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
if (!(target_fds = lock_user(VERIFY_WRITE,
target_fds_addr,
sizeof(abi_ulong) * nw,
0)))
return -TARGET_EFAULT;
k = 0;
for (i = 0; i < nw; i++) {
v = 0;
for (j = 0; j < TARGET_ABI_BITS; j++) {
v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
k++;
}
__put_user(v, &target_fds[i]);
}
unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
return 0;
}
#if defined(__alpha__)
#define HOST_HZ 1024
#else
#define HOST_HZ 100
#endif
static inline abi_long host_to_target_clock_t(long ticks)
{
#if HOST_HZ == TARGET_HZ
return ticks;
#else
return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
#endif
}
static inline abi_long host_to_target_rusage(abi_ulong target_addr,
const struct rusage *rusage)
{
struct target_rusage *target_rusage;
if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
return -TARGET_EFAULT;
target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
unlock_user_struct(target_rusage, target_addr, 1);
return 0;
}
static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
{
abi_ulong target_rlim_swap;
rlim_t result;
target_rlim_swap = tswapal(target_rlim);
if (target_rlim_swap == TARGET_RLIM_INFINITY)
return RLIM_INFINITY;
result = target_rlim_swap;
if (target_rlim_swap != (rlim_t)result)
return RLIM_INFINITY;
return result;
}
static inline abi_ulong host_to_target_rlim(rlim_t rlim)
{
abi_ulong target_rlim_swap;
abi_ulong result;
if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
target_rlim_swap = TARGET_RLIM_INFINITY;
else
target_rlim_swap = rlim;
result = tswapal(target_rlim_swap);
return result;
}
static inline int target_to_host_resource(int code)
{
switch (code) {
case TARGET_RLIMIT_AS:
return RLIMIT_AS;
case TARGET_RLIMIT_CORE:
return RLIMIT_CORE;
case TARGET_RLIMIT_CPU:
return RLIMIT_CPU;
case TARGET_RLIMIT_DATA:
return RLIMIT_DATA;
case TARGET_RLIMIT_FSIZE:
return RLIMIT_FSIZE;
case TARGET_RLIMIT_LOCKS:
return RLIMIT_LOCKS;
case TARGET_RLIMIT_MEMLOCK:
return RLIMIT_MEMLOCK;
case TARGET_RLIMIT_MSGQUEUE:
return RLIMIT_MSGQUEUE;
case TARGET_RLIMIT_NICE:
return RLIMIT_NICE;
case TARGET_RLIMIT_NOFILE:
return RLIMIT_NOFILE;
case TARGET_RLIMIT_NPROC:
return RLIMIT_NPROC;
case TARGET_RLIMIT_RSS:
return RLIMIT_RSS;
case TARGET_RLIMIT_RTPRIO:
return RLIMIT_RTPRIO;
case TARGET_RLIMIT_SIGPENDING:
return RLIMIT_SIGPENDING;
case TARGET_RLIMIT_STACK:
return RLIMIT_STACK;
default:
return code;
}
}
static inline abi_long copy_from_user_timeval(struct timeval *tv,
abi_ulong target_tv_addr)
{
struct target_timeval *target_tv;
if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
return -TARGET_EFAULT;
__get_user(tv->tv_sec, &target_tv->tv_sec);
__get_user(tv->tv_usec, &target_tv->tv_usec);
unlock_user_struct(target_tv, target_tv_addr, 0);
return 0;
}
static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
const struct timeval *tv)
{
struct target_timeval *target_tv;
if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
return -TARGET_EFAULT;
__put_user(tv->tv_sec, &target_tv->tv_sec);
__put_user(tv->tv_usec, &target_tv->tv_usec);
unlock_user_struct(target_tv, target_tv_addr, 1);
return 0;
}
static inline abi_long copy_from_user_timezone(struct timezone *tz,
abi_ulong target_tz_addr)
{
struct target_timezone *target_tz;
if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
return -TARGET_EFAULT;
}
__get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
__get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
unlock_user_struct(target_tz, target_tz_addr, 0);
return 0;
}
#if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
#include <mqueue.h>
static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
abi_ulong target_mq_attr_addr)
{
struct target_mq_attr *target_mq_attr;
if (!lock_user_struct(VERIFY_READ, target_mq_attr,
target_mq_attr_addr, 1))
return -TARGET_EFAULT;
__get_user(attr->mq_flags, &target_mq_attr->mq_flags);
__get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
__get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
__get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
return 0;
}
static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
const struct mq_attr *attr)
{
struct target_mq_attr *target_mq_attr;
if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
target_mq_attr_addr, 0))
return -TARGET_EFAULT;
__put_user(attr->mq_flags, &target_mq_attr->mq_flags);
__put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
__put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
__put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
return 0;
}
#endif
#if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
/* do_select() must return target values and target errnos. */
static abi_long do_select(int n,
abi_ulong rfd_addr, abi_ulong wfd_addr,
abi_ulong efd_addr, abi_ulong target_tv_addr)
{
fd_set rfds, wfds, efds;
fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
struct timeval tv;
struct timespec ts, *ts_ptr;
abi_long ret;
ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
if (ret) {
return ret;
}
ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
if (ret) {
return ret;
}
ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
if (ret) {
return ret;
}
if (target_tv_addr) {
if (copy_from_user_timeval(&tv, target_tv_addr))
return -TARGET_EFAULT;
ts.tv_sec = tv.tv_sec;
ts.tv_nsec = tv.tv_usec * 1000;
ts_ptr = &ts;
} else {
ts_ptr = NULL;
}
ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
ts_ptr, NULL));
if (!is_error(ret)) {
if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
return -TARGET_EFAULT;
if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
return -TARGET_EFAULT;
if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
return -TARGET_EFAULT;
if (target_tv_addr) {
tv.tv_sec = ts.tv_sec;
tv.tv_usec = ts.tv_nsec / 1000;
if (copy_to_user_timeval(target_tv_addr, &tv)) {
return -TARGET_EFAULT;
}
}
}
return ret;
}
#if defined(TARGET_WANT_OLD_SYS_SELECT)
static abi_long do_old_select(abi_ulong arg1)
{
struct target_sel_arg_struct *sel;
abi_ulong inp, outp, exp, tvp;
long nsel;
if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
return -TARGET_EFAULT;
}
nsel = tswapal(sel->n);
inp = tswapal(sel->inp);
outp = tswapal(sel->outp);
exp = tswapal(sel->exp);
tvp = tswapal(sel->tvp);
unlock_user_struct(sel, arg1, 0);
return do_select(nsel, inp, outp, exp, tvp);
}
#endif
#endif
static abi_long do_pipe2(int host_pipe[], int flags)
{
#ifdef CONFIG_PIPE2
return pipe2(host_pipe, flags);
#else
return -ENOSYS;
#endif
}
static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
int flags, int is_pipe2)
{
int host_pipe[2];
abi_long ret;
ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
if (is_error(ret))
return get_errno(ret);
/* Several targets have special calling conventions for the original
pipe syscall, but didn't replicate this into the pipe2 syscall. */
if (!is_pipe2) {
#if defined(TARGET_ALPHA)
((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
return host_pipe[0];
#elif defined(TARGET_MIPS)
((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
return host_pipe[0];
#elif defined(TARGET_SH4)
((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
return host_pipe[0];
#elif defined(TARGET_SPARC)
((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
return host_pipe[0];
#endif
}
if (put_user_s32(host_pipe[0], pipedes)
|| put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
return -TARGET_EFAULT;
return get_errno(ret);
}
static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
abi_ulong target_addr,
socklen_t len)
{
struct target_ip_mreqn *target_smreqn;
target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
if (!target_smreqn)
return -TARGET_EFAULT;
mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
if (len == sizeof(struct target_ip_mreqn))
mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
unlock_user(target_smreqn, target_addr, 0);
return 0;
}
static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
abi_ulong target_addr,
socklen_t len)
{
const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
sa_family_t sa_family;
struct target_sockaddr *target_saddr;
if (fd_trans_target_to_host_addr(fd)) {
return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
}
target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
if (!target_saddr)
return -TARGET_EFAULT;
sa_family = tswap16(target_saddr->sa_family);
/* Oops. The caller might send a incomplete sun_path; sun_path
* must be terminated by \0 (see the manual page), but
* unfortunately it is quite common to specify sockaddr_un
* length as "strlen(x->sun_path)" while it should be
* "strlen(...) + 1". We'll fix that here if needed.
* Linux kernel has a similar feature.
*/
if (sa_family == AF_UNIX) {
if (len < unix_maxlen && len > 0) {
char *cp = (char*)target_saddr;
if ( cp[len-1] && !cp[len] )
len++;
}
if (len > unix_maxlen)
len = unix_maxlen;
}
memcpy(addr, target_saddr, len);
addr->sa_family = sa_family;
if (sa_family == AF_NETLINK) {
struct sockaddr_nl *nladdr;
nladdr = (struct sockaddr_nl *)addr;
nladdr->nl_pid = tswap32(nladdr->nl_pid);
nladdr->nl_groups = tswap32(nladdr->nl_groups);
} else if (sa_family == AF_PACKET) {
struct target_sockaddr_ll *lladdr;
lladdr = (struct target_sockaddr_ll *)addr;
lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
}
unlock_user(target_saddr, target_addr, 0);
return 0;
}
static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
struct sockaddr *addr,
socklen_t len)
{
struct target_sockaddr *target_saddr;
if (len == 0) {
return 0;
}
assert(addr);
target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
if (!target_saddr)
return -TARGET_EFAULT;
memcpy(target_saddr, addr, len);
if (len >= offsetof(struct target_sockaddr, sa_family) +
sizeof(target_saddr->sa_family)) {
target_saddr->sa_family = tswap16(addr->sa_family);
}
if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
target_nl->nl_pid = tswap32(target_nl->nl_pid);
target_nl->nl_groups = tswap32(target_nl->nl_groups);
} else if (addr->sa_family == AF_PACKET) {
struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
} else if (addr->sa_family == AF_INET6 &&
len >= sizeof(struct target_sockaddr_in6)) {
struct target_sockaddr_in6 *target_in6 =
(struct target_sockaddr_in6 *)target_saddr;
target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
}
unlock_user(target_saddr, target_addr, len);
return 0;
}
static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
struct target_msghdr *target_msgh)
{
struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
abi_long msg_controllen;
abi_ulong target_cmsg_addr;
struct target_cmsghdr *target_cmsg, *target_cmsg_start;
socklen_t space = 0;
msg_controllen = tswapal(target_msgh->msg_controllen);
if (msg_controllen < sizeof (struct target_cmsghdr))
goto the_end;
target_cmsg_addr = tswapal(target_msgh->msg_control);
target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
target_cmsg_start = target_cmsg;
if (!target_cmsg)
return -TARGET_EFAULT;
while (cmsg && target_cmsg) {
void *data = CMSG_DATA(cmsg);
void *target_data = TARGET_CMSG_DATA(target_cmsg);
int len = tswapal(target_cmsg->cmsg_len)
- sizeof(struct target_cmsghdr);
space += CMSG_SPACE(len);
if (space > msgh->msg_controllen) {
space -= CMSG_SPACE(len);
/* This is a QEMU bug, since we allocated the payload
* area ourselves (unlike overflow in host-to-target
* conversion, which is just the guest giving us a buffer
* that's too small). It can't happen for the payload types
* we currently support; if it becomes an issue in future
* we would need to improve our allocation strategy to
* something more intelligent than "twice the size of the
* target buffer we're reading from".
*/
gemu_log("Host cmsg overflow\n");
break;
}
if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
cmsg->cmsg_level = SOL_SOCKET;
} else {
cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
}
cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
cmsg->cmsg_len = CMSG_LEN(len);
if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
int *fd = (int *)data;
int *target_fd = (int *)target_data;
int i, numfds = len / sizeof(int);
for (i = 0; i < numfds; i++) {
__get_user(fd[i], target_fd + i);
}
} else if (cmsg->cmsg_level == SOL_SOCKET
&& cmsg->cmsg_type == SCM_CREDENTIALS) {
struct ucred *cred = (struct ucred *)data;
struct target_ucred *target_cred =
(struct target_ucred *)target_data;
__get_user(cred->pid, &target_cred->pid);
__get_user(cred->uid, &target_cred->uid);
__get_user(cred->gid, &target_cred->gid);
} else {
gemu_log("Unsupported ancillary data: %d/%d\n",
cmsg->cmsg_level, cmsg->cmsg_type);
memcpy(data, target_data, len);
}
cmsg = CMSG_NXTHDR(msgh, cmsg);
target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
target_cmsg_start);
}
unlock_user(target_cmsg, target_cmsg_addr, 0);
the_end:
msgh->msg_controllen = space;
return 0;
}
static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
struct msghdr *msgh)
{
struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
abi_long msg_controllen;
abi_ulong target_cmsg_addr;
struct target_cmsghdr *target_cmsg, *target_cmsg_start;
socklen_t space = 0;
msg_controllen = tswapal(target_msgh->msg_controllen);
if (msg_controllen < sizeof (struct target_cmsghdr))
goto the_end;
target_cmsg_addr = tswapal(target_msgh->msg_control);
target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
target_cmsg_start = target_cmsg;
if (!target_cmsg)
return -TARGET_EFAULT;
while (cmsg && target_cmsg) {
void *data = CMSG_DATA(cmsg);
void *target_data = TARGET_CMSG_DATA(target_cmsg);
int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
int tgt_len, tgt_space;
/* We never copy a half-header but may copy half-data;
* this is Linux's behaviour in put_cmsg(). Note that
* truncation here is a guest problem (which we report
* to the guest via the CTRUNC bit), unlike truncation
* in target_to_host_cmsg, which is a QEMU bug.
*/
if (msg_controllen < sizeof(struct target_cmsghdr)) {
target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
break;
}
if (cmsg->cmsg_level == SOL_SOCKET) {
target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
} else {
target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
}
target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
/* Payload types which need a different size of payload on
* the target must adjust tgt_len here.
*/
switch (cmsg->cmsg_level) {
case SOL_SOCKET:
switch (cmsg->cmsg_type) {
case SO_TIMESTAMP:
tgt_len = sizeof(struct target_timeval);
break;
default:
break;
}
default:
tgt_len = len;
break;
}
if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
}
/* We must now copy-and-convert len bytes of payload
* into tgt_len bytes of destination space. Bear in mind
* that in both source and destination we may be dealing
* with a truncated value!
*/
switch (cmsg->cmsg_level) {
case SOL_SOCKET:
switch (cmsg->cmsg_type) {
case SCM_RIGHTS:
{
int *fd = (int *)data;
int *target_fd = (int *)target_data;
int i, numfds = tgt_len / sizeof(int);
for (i = 0; i < numfds; i++) {
__put_user(fd[i], target_fd + i);
}
break;
}
case SO_TIMESTAMP:
{
struct timeval *tv = (struct timeval *)data;
struct target_timeval *target_tv =
(struct target_timeval *)target_data;
if (len != sizeof(struct timeval) ||
tgt_len != sizeof(struct target_timeval)) {
goto unimplemented;
}
/* copy struct timeval to target */
__put_user(tv->tv_sec, &target_tv->tv_sec);
__put_user(tv->tv_usec, &target_tv->tv_usec);
break;
}
case SCM_CREDENTIALS:
{
struct ucred *cred = (struct ucred *)data;
struct target_ucred *target_cred =
(struct target_ucred *)target_data;
__put_user(cred->pid, &target_cred->pid);
__put_user(cred->uid, &target_cred->uid);
__put_user(cred->gid, &target_cred->gid);
break;
}
default:
goto unimplemented;
}
break;
case SOL_IP:
switch (cmsg->cmsg_type) {
case IP_TTL:
{
uint32_t *v = (uint32_t *)data;
uint32_t *t_int = (uint32_t *)target_data;
if (len != sizeof(uint32_t) ||
tgt_len != sizeof(uint32_t)) {
goto unimplemented;
}
__put_user(*v, t_int);
break;
}
case IP_RECVERR:
{
struct errhdr_t {
struct sock_extended_err ee;
struct sockaddr_in offender;
};
struct errhdr_t *errh = (struct errhdr_t *)data;
struct errhdr_t *target_errh =
(struct errhdr_t *)target_data;
if (len != sizeof(struct errhdr_t) ||
tgt_len != sizeof(struct errhdr_t)) {
goto unimplemented;
}
__put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
__put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
__put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
__put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
__put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
__put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
__put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
host_to_target_sockaddr((unsigned long) &target_errh->offender,
(void *) &errh->offender, sizeof(errh->offender));
break;
}
default:
goto unimplemented;
}
break;
case SOL_IPV6:
switch (cmsg->cmsg_type) {
case IPV6_HOPLIMIT:
{
uint32_t *v = (uint32_t *)data;
uint32_t *t_int = (uint32_t *)target_data;
if (len != sizeof(uint32_t) ||
tgt_len != sizeof(uint32_t)) {
goto unimplemented;
}
__put_user(*v, t_int);
break;
}
case IPV6_RECVERR:
{
struct errhdr6_t {
struct sock_extended_err ee;
struct sockaddr_in6 offender;
};
struct errhdr6_t *errh = (struct errhdr6_t *)data;
struct errhdr6_t *target_errh =
(struct errhdr6_t *)target_data;
if (len != sizeof(struct errhdr6_t) ||
tgt_len != sizeof(struct errhdr6_t)) {
goto unimplemented;
}
__put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
__put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
__put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
__put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
__put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
__put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
__put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
host_to_target_sockaddr((unsigned long) &target_errh->offender,
(void *) &errh->offender, sizeof(errh->offender));
break;
}
default:
goto unimplemented;
}
break;
default:
unimplemented:
gemu_log("Unsupported ancillary data: %d/%d\n",
cmsg->cmsg_level, cmsg->cmsg_type);
memcpy(target_data, data, MIN(len, tgt_len));
if (tgt_len > len) {
memset(target_data + len, 0, tgt_len - len);
}
}
target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
tgt_space = TARGET_CMSG_SPACE(tgt_len);
if (msg_controllen < tgt_space) {
tgt_space = msg_controllen;
}
msg_controllen -= tgt_space;
space += tgt_space;
cmsg = CMSG_NXTHDR(msgh, cmsg);
target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
target_cmsg_start);
}
unlock_user(target_cmsg, target_cmsg_addr, space);
the_end:
target_msgh->msg_controllen = tswapal(space);
return 0;
}
static void tswap_nlmsghdr(struct nlmsghdr *nlh)
{
nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
}
static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
size_t len,
abi_long (*host_to_target_nlmsg)
(struct nlmsghdr *))
{
uint32_t nlmsg_len;
abi_long ret;
while (len > sizeof(struct nlmsghdr)) {
nlmsg_len = nlh->nlmsg_len;
if (nlmsg_len < sizeof(struct nlmsghdr) ||
nlmsg_len > len) {
break;
}
switch (nlh->nlmsg_type) {
case NLMSG_DONE:
tswap_nlmsghdr(nlh);
return 0;
case NLMSG_NOOP:
break;
case NLMSG_ERROR:
{
struct nlmsgerr *e = NLMSG_DATA(nlh);
e->error = tswap32(e->error);
tswap_nlmsghdr(&e->msg);
tswap_nlmsghdr(nlh);
return 0;
}
default:
ret = host_to_target_nlmsg(nlh);
if (ret < 0) {
tswap_nlmsghdr(nlh);
return ret;
}
break;
}
tswap_nlmsghdr(nlh);
len -= NLMSG_ALIGN(nlmsg_len);
nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
}
return 0;
}
static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
size_t len,
abi_long (*target_to_host_nlmsg)
(struct nlmsghdr *))
{
int ret;
while (len > sizeof(struct nlmsghdr)) {
if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
tswap32(nlh->nlmsg_len) > len) {
break;
}
tswap_nlmsghdr(nlh);
switch (nlh->nlmsg_type) {
case NLMSG_DONE:
return 0;
case NLMSG_NOOP:
break;
case NLMSG_ERROR:
{
struct nlmsgerr *e = NLMSG_DATA(nlh);
e->error = tswap32(e->error);
tswap_nlmsghdr(&e->msg);
return 0;
}
default:
ret = target_to_host_nlmsg(nlh);
if (ret < 0) {
return ret;
}
}
len -= NLMSG_ALIGN(nlh->nlmsg_len);
nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
}
return 0;
}
#ifdef CONFIG_RTNETLINK
static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
size_t len, void *context,
abi_long (*host_to_target_nlattr)
(struct nlattr *,
void *context))
{
unsigned short nla_len;
abi_long ret;
while (len > sizeof(struct nlattr)) {
nla_len = nlattr->nla_len;
if (nla_len < sizeof(struct nlattr) ||
nla_len > len) {
break;
}
ret = host_to_target_nlattr(nlattr, context);
nlattr->nla_len = tswap16(nlattr->nla_len);
nlattr->nla_type = tswap16(nlattr->nla_type);
if (ret < 0) {
return ret;
}
len -= NLA_ALIGN(nla_len);
nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
}
return 0;
}
static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
size_t len,
abi_long (*host_to_target_rtattr)
(struct rtattr *))
{
unsigned short rta_len;
abi_long ret;
while (len > sizeof(struct rtattr)) {
rta_len = rtattr->rta_len;
if (rta_len < sizeof(struct rtattr) ||
rta_len > len) {
break;
}
ret = host_to_target_rtattr(rtattr);
rtattr->rta_len = tswap16(rtattr->rta_len);
rtattr->rta_type = tswap16(rtattr->rta_type);
if (ret < 0) {
return ret;
}
len -= RTA_ALIGN(rta_len);
rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
}
return 0;
}
#define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
void *context)
{
uint16_t *u16;
uint32_t *u32;
uint64_t *u64;
switch (nlattr->nla_type) {
/* no data */
case QEMU_IFLA_BR_FDB_FLUSH:
break;
/* binary */
case QEMU_IFLA_BR_GROUP_ADDR:
break;
/* uint8_t */
case QEMU_IFLA_BR_VLAN_FILTERING:
case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
case QEMU_IFLA_BR_MCAST_ROUTER:
case QEMU_IFLA_BR_MCAST_SNOOPING:
case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
case QEMU_IFLA_BR_MCAST_QUERIER:
case QEMU_IFLA_BR_NF_CALL_IPTABLES:
case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
break;
/* uint16_t */
case QEMU_IFLA_BR_PRIORITY:
case QEMU_IFLA_BR_VLAN_PROTOCOL:
case QEMU_IFLA_BR_GROUP_FWD_MASK:
case QEMU_IFLA_BR_ROOT_PORT:
case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
u16 = NLA_DATA(nlattr);
*u16 = tswap16(*u16);
break;
/* uint32_t */
case QEMU_IFLA_BR_FORWARD_DELAY:
case QEMU_IFLA_BR_HELLO_TIME:
case QEMU_IFLA_BR_MAX_AGE:
case QEMU_IFLA_BR_AGEING_TIME:
case QEMU_IFLA_BR_STP_STATE:
case QEMU_IFLA_BR_ROOT_PATH_COST:
case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
case QEMU_IFLA_BR_MCAST_HASH_MAX:
case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
u32 = NLA_DATA(nlattr);
*u32 = tswap32(*u32);
break;
/* uint64_t */
case QEMU_IFLA_BR_HELLO_TIMER:
case QEMU_IFLA_BR_TCN_TIMER:
case QEMU_IFLA_BR_GC_TIMER:
case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
u64 = NLA_DATA(nlattr);
*u64 = tswap64(*u64);
break;
/* ifla_bridge_id: uin8_t[] */
case QEMU_IFLA_BR_ROOT_ID:
case QEMU_IFLA_BR_BRIDGE_ID:
break;
default:
gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
break;
}
return 0;
}
static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
void *context)
{
uint16_t *u16;
uint32_t *u32;
uint64_t *u64;
switch (nlattr->nla_type) {
/* uint8_t */
case QEMU_IFLA_BRPORT_STATE:
case QEMU_IFLA_BRPORT_MODE:
case QEMU_IFLA_BRPORT_GUARD:
case QEMU_IFLA_BRPORT_PROTECT:
case QEMU_IFLA_BRPORT_FAST_LEAVE:
case QEMU_IFLA_BRPORT_LEARNING:
case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
case QEMU_IFLA_BRPORT_PROXYARP:
case QEMU_IFLA_BRPORT_LEARNING_SYNC:
case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
case QEMU_IFLA_BRPORT_CONFIG_PENDING:
case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
break;
/* uint16_t */
case QEMU_IFLA_BRPORT_PRIORITY:
case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
case QEMU_IFLA_BRPORT_DESIGNATED_COST:
case QEMU_IFLA_BRPORT_ID:
case QEMU_IFLA_BRPORT_NO:
u16 = NLA_DATA(nlattr);
*u16 = tswap16(*u16);
break;
/* uin32_t */
case QEMU_IFLA_BRPORT_COST:
u32 = NLA_DATA(nlattr);
*u32 = tswap32(*u32);
break;
/* uint64_t */
case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
case QEMU_IFLA_BRPORT_HOLD_TIMER:
u64 = NLA_DATA(nlattr);
*u64 = tswap64(*u64);
break;
/* ifla_bridge_id: uint8_t[] */
case QEMU_IFLA_BRPORT_ROOT_ID:
case QEMU_IFLA_BRPORT_BRIDGE_ID:
break;
default:
gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
break;
}
return 0;
}
struct linkinfo_context {
int len;
char *name;
int slave_len;
char *slave_name;
};
static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
void *context)
{
struct linkinfo_context *li_context = context;
switch (nlattr->nla_type) {
/* string */
case QEMU_IFLA_INFO_KIND:
li_context->name = NLA_DATA(nlattr);
li_context->len = nlattr->nla_len - NLA_HDRLEN;
break;
case QEMU_IFLA_INFO_SLAVE_KIND:
li_context->slave_name = NLA_DATA(nlattr);
li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
break;
/* stats */
case QEMU_IFLA_INFO_XSTATS:
/* FIXME: only used by CAN */
break;
/* nested */
case QEMU_IFLA_INFO_DATA:
if (strncmp(li_context->name, "bridge",
li_context->len) == 0) {
return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
nlattr->nla_len,
NULL,
host_to_target_data_bridge_nlattr);
} else {
gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
}
break;
case QEMU_IFLA_INFO_SLAVE_DATA:
if (strncmp(li_context->slave_name, "bridge",
li_context->slave_len) == 0) {
return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
nlattr->nla_len,
NULL,
host_to_target_slave_data_bridge_nlattr);
} else {
gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
li_context->slave_name);
}
break;
default:
gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
break;
}
return 0;
}
static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
void *context)
{
uint32_t *u32;
int i;
switch (nlattr->nla_type) {
case QEMU_IFLA_INET_CONF:
u32 = NLA_DATA(nlattr);
for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
i++) {
u32[i] = tswap32(u32[i]);
}
break;
default:
gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
}
return 0;
}
static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
void *context)
{
uint32_t *u32;
uint64_t *u64;
struct ifla_cacheinfo *ci;
int i;
switch (nlattr->nla_type) {
/* binaries */
case QEMU_IFLA_INET6_TOKEN:
break;
/* uint8_t */
case QEMU_IFLA_INET6_ADDR_GEN_MODE:
break;
/* uint32_t */
case QEMU_IFLA_INET6_FLAGS:
u32 = NLA_DATA(nlattr);
*u32 = tswap32(*u32);
break;
/* uint32_t[] */
case QEMU_IFLA_INET6_CONF:
u32 = NLA_DATA(nlattr);
for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
i++) {
u32[i] = tswap32(u32[i]);
}
break;
/* ifla_cacheinfo */
case QEMU_IFLA_INET6_CACHEINFO:
ci = NLA_DATA(nlattr);
ci->max_reasm_len = tswap32(ci->max_reasm_len);
ci->tstamp = tswap32(ci->tstamp);
ci->reachable_time = tswap32(ci->reachable_time);
ci->retrans_time = tswap32(ci->retrans_time);
break;
/* uint64_t[] */
case QEMU_IFLA_INET6_STATS:
case QEMU_IFLA_INET6_ICMP6STATS:
u64 = NLA_DATA(nlattr);
for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
i++) {
u64[i] = tswap64(u64[i]);
}
break;
default:
gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
}
return 0;
}
static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
void *context)
{
switch (nlattr->nla_type) {
case AF_INET:
return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
NULL,
host_to_target_data_inet_nlattr);
case AF_INET6:
return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
NULL,
host_to_target_data_inet6_nlattr);
default:
gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
break;
}
return 0;
}
static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
{
uint32_t *u32;
struct rtnl_link_stats *st;
struct rtnl_link_stats64 *st64;
struct rtnl_link_ifmap *map;
struct linkinfo_context li_context;
switch (rtattr->rta_type) {
/* binary stream */
case QEMU_IFLA_ADDRESS:
case QEMU_IFLA_BROADCAST:
/* string */
case QEMU_IFLA_IFNAME:
case QEMU_IFLA_QDISC:
break;
/* uin8_t */
case QEMU_IFLA_OPERSTATE:
case QEMU_IFLA_LINKMODE:
case QEMU_IFLA_CARRIER:
case QEMU_IFLA_PROTO_DOWN:
break;
/* uint32_t */
case QEMU_IFLA_MTU:
case QEMU_IFLA_LINK:
case QEMU_IFLA_WEIGHT:
case QEMU_IFLA_TXQLEN:
case QEMU_IFLA_CARRIER_CHANGES:
case QEMU_IFLA_NUM_RX_QUEUES:
case QEMU_IFLA_NUM_TX_QUEUES:
case QEMU_IFLA_PROMISCUITY:
case QEMU_IFLA_EXT_MASK:
case QEMU_IFLA_LINK_NETNSID:
case QEMU_IFLA_GROUP:
case QEMU_IFLA_MASTER:
case QEMU_IFLA_NUM_VF:
case QEMU_IFLA_GSO_MAX_SEGS:
case QEMU_IFLA_GSO_MAX_SIZE:
u32 = RTA_DATA(rtattr);
*u32 = tswap32(*u32);
break;
/* struct rtnl_link_stats */
case QEMU_IFLA_STATS:
st = RTA_DATA(rtattr);
st->rx_packets = tswap32(st->rx_packets);
st->tx_packets = tswap32(st->tx_packets);
st->rx_bytes = tswap32(st->rx_bytes);
st->tx_bytes = tswap32(st->tx_bytes);
st->rx_errors = tswap32(st->rx_errors);
st->tx_errors = tswap32(st->tx_errors);
st->rx_dropped = tswap32(st->rx_dropped);
st->tx_dropped = tswap32(st->tx_dropped);
st->multicast = tswap32(st->multicast);
st->collisions = tswap32(st->collisions);
/* detailed rx_errors: */
st->rx_length_errors = tswap32(st->rx_length_errors);
st->rx_over_errors = tswap32(st->rx_over_errors);
st->rx_crc_errors = tswap32(st->rx_crc_errors);
st->rx_frame_errors = tswap32(st->rx_frame_errors);
st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
st->rx_missed_errors = tswap32(st->rx_missed_errors);
/* detailed tx_errors */
st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
st->tx_window_errors = tswap32(st->tx_window_errors);
/* for cslip etc */
st->rx_compressed = tswap32(st->rx_compressed);
st->tx_compressed = tswap32(st->tx_compressed);
break;
/* struct rtnl_link_stats64 */
case QEMU_IFLA_STATS64:
st64 = RTA_DATA(rtattr);
st64->rx_packets = tswap64(st64->rx_packets);
st64->tx_packets = tswap64(st64->tx_packets);
st64->rx_bytes = tswap64(st64->rx_bytes);
st64->tx_bytes = tswap64(st64->tx_bytes);
st64->rx_errors = tswap64(st64->rx_errors);
st64->tx_errors = tswap64(st64->tx_errors);
st64->rx_dropped = tswap64(st64->rx_dropped);
st64->tx_dropped = tswap64(st64->tx_dropped);
st64->multicast = tswap64(st64->multicast);
st64->collisions = tswap64(st64->collisions);
/* detailed rx_errors: */
st64->rx_length_errors = tswap64(st64->rx_length_errors);
st64->rx_over_errors = tswap64(st64->rx_over_errors);
st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
/* detailed tx_errors */
st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
st64->tx_window_errors = tswap64(st64->tx_window_errors);
/* for cslip etc */
st64->rx_compressed = tswap64(st64->rx_compressed);
st64->tx_compressed = tswap64(st64->tx_compressed);
break;
/* struct rtnl_link_ifmap */
case QEMU_IFLA_MAP:
map = RTA_DATA(rtattr);
map->mem_start = tswap64(map->mem_start);
map->mem_end = tswap64(map->mem_end);
map->base_addr = tswap64(map->base_addr);
map->irq = tswap16(map->irq);
break;
/* nested */
case QEMU_IFLA_LINKINFO:
memset(&li_context, 0, sizeof(li_context));
return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
&li_context,
host_to_target_data_linkinfo_nlattr);
case QEMU_IFLA_AF_SPEC:
return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
NULL,
host_to_target_data_spec_nlattr);
default:
gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
break;
}
return 0;
}
static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
{
uint32_t *u32;
struct ifa_cacheinfo *ci;
switch (rtattr->rta_type) {
/* binary: depends on family type */
case IFA_ADDRESS:
case IFA_LOCAL:
break;
/* string */
case IFA_LABEL:
break;
/* u32 */
case IFA_FLAGS:
case IFA_BROADCAST:
u32 = RTA_DATA(rtattr);
*u32 = tswap32(*u32);
break;
/* struct ifa_cacheinfo */
case IFA_CACHEINFO:
ci = RTA_DATA(rtattr);
ci->ifa_prefered = tswap32(ci->ifa_prefered);
ci->ifa_valid = tswap32(ci->ifa_valid);
ci->cstamp = tswap32(ci->cstamp);
ci->tstamp = tswap32(ci->tstamp);
break;
default:
gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
break;
}
return 0;
}
static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
{
uint32_t *u32;
switch (rtattr->rta_type) {
/* binary: depends on family type */
case RTA_GATEWAY:
case RTA_DST:
case RTA_PREFSRC:
break;
/* u32 */
case RTA_PRIORITY:
case RTA_TABLE:
case RTA_OIF:
u32 = RTA_DATA(rtattr);
*u32 = tswap32(*u32);
break;
default:
gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
break;
}
return 0;
}
static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
uint32_t rtattr_len)
{
return host_to_target_for_each_rtattr(rtattr, rtattr_len,
host_to_target_data_link_rtattr);
}
static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
uint32_t rtattr_len)
{
return host_to_target_for_each_rtattr(rtattr, rtattr_len,
host_to_target_data_addr_rtattr);
}
static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
uint32_t rtattr_len)
{
return host_to_target_for_each_rtattr(rtattr, rtattr_len,
host_to_target_data_route_rtattr);
}
static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
{
uint32_t nlmsg_len;
struct ifinfomsg *ifi;
struct ifaddrmsg *ifa;
struct rtmsg *rtm;
nlmsg_len = nlh->nlmsg_len;
switch (nlh->nlmsg_type) {
case RTM_NEWLINK:
case RTM_DELLINK:
case RTM_GETLINK:
if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
ifi = NLMSG_DATA(nlh);
ifi->ifi_type = tswap16(ifi->ifi_type);
ifi->ifi_index = tswap32(ifi->ifi_index);
ifi->ifi_flags = tswap32(ifi->ifi_flags);
ifi->ifi_change = tswap32(ifi->ifi_change);
host_to_target_link_rtattr(IFLA_RTA(ifi),
nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
}
break;
case RTM_NEWADDR:
case RTM_DELADDR:
case RTM_GETADDR:
if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
ifa = NLMSG_DATA(nlh);
ifa->ifa_index = tswap32(ifa->ifa_index);
host_to_target_addr_rtattr(IFA_RTA(ifa),
nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
}
break;
case RTM_NEWROUTE:
case RTM_DELROUTE:
case RTM_GETROUTE:
if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
rtm =