blob: d753ffe868992e72fcfc00d9ff672b6e43703a24 [file] [log] [blame] [edit]
/* SPDX-License-Identifier: MIT */
// https://syzkaller.appspot.com/bug?id=5f5a44abb4cba056fe24255c4fcb7e7bbe13de7a
// autogenerated by syzkaller (https://github.com/google/syzkaller)
#include <dirent.h>
#include <endian.h>
#include <errno.h>
#include <fcntl.h>
#include <pthread.h>
#include <signal.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/prctl.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <time.h>
#include <unistd.h>
#include <linux/futex.h>
#ifdef __NR_futex
static void sleep_ms(uint64_t ms)
{
usleep(ms * 1000);
}
static uint64_t current_time_ms(void)
{
struct timespec ts;
if (clock_gettime(CLOCK_MONOTONIC, &ts))
exit(1);
return (uint64_t)ts.tv_sec * 1000 + (uint64_t)ts.tv_nsec / 1000000;
}
static void thread_start(void* (*fn)(void*), void* arg)
{
pthread_t th;
pthread_attr_t attr;
pthread_attr_init(&attr);
pthread_attr_setstacksize(&attr, 128 << 10);
int i = 0;
for (; i < 100; i++) {
if (pthread_create(&th, &attr, fn, arg) == 0) {
pthread_attr_destroy(&attr);
return;
}
if (errno == EAGAIN) {
usleep(50);
continue;
}
break;
}
exit(1);
}
typedef struct {
int state;
} event_t;
static void event_init(event_t* ev)
{
ev->state = 0;
}
static void event_reset(event_t* ev)
{
ev->state = 0;
}
static void event_set(event_t* ev)
{
if (ev->state)
exit(1);
__atomic_store_n(&ev->state, 1, __ATOMIC_RELEASE);
syscall(__NR_futex, &ev->state, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1000000);
}
static void event_wait(event_t* ev)
{
while (!__atomic_load_n(&ev->state, __ATOMIC_ACQUIRE))
syscall(__NR_futex, &ev->state, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, 0, 0);
}
static int event_isset(event_t* ev)
{
return __atomic_load_n(&ev->state, __ATOMIC_ACQUIRE);
}
static int event_timedwait(event_t* ev, uint64_t timeout)
{
uint64_t start = current_time_ms();
uint64_t now = start;
for (;;) {
uint64_t remain = timeout - (now - start);
struct timespec ts;
ts.tv_sec = remain / 1000;
ts.tv_nsec = (remain % 1000) * 1000 * 1000;
syscall(__NR_futex, &ev->state, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, 0, &ts);
if (__atomic_load_n(&ev->state, __ATOMIC_ACQUIRE))
return 1;
now = current_time_ms();
if (now - start > timeout)
return 0;
}
}
#define SIZEOF_IO_URING_SQE 64
#define SIZEOF_IO_URING_CQE 16
#define SQ_HEAD_OFFSET 0
#define SQ_TAIL_OFFSET 64
#define SQ_RING_MASK_OFFSET 256
#define SQ_RING_ENTRIES_OFFSET 264
#define SQ_FLAGS_OFFSET 276
#define SQ_DROPPED_OFFSET 272
#define CQ_HEAD_OFFSET 128
#define CQ_TAIL_OFFSET 192
#define CQ_RING_MASK_OFFSET 260
#define CQ_RING_ENTRIES_OFFSET 268
#define CQ_RING_OVERFLOW_OFFSET 284
#define CQ_FLAGS_OFFSET 280
#define CQ_CQES_OFFSET 320
struct io_sqring_offsets {
uint32_t head;
uint32_t tail;
uint32_t ring_mask;
uint32_t ring_entries;
uint32_t flags;
uint32_t dropped;
uint32_t array;
uint32_t resv1;
uint64_t resv2;
};
struct io_cqring_offsets {
uint32_t head;
uint32_t tail;
uint32_t ring_mask;
uint32_t ring_entries;
uint32_t overflow;
uint32_t cqes;
uint64_t resv[2];
};
struct io_uring_params {
uint32_t sq_entries;
uint32_t cq_entries;
uint32_t flags;
uint32_t sq_thread_cpu;
uint32_t sq_thread_idle;
uint32_t features;
uint32_t resv[4];
struct io_sqring_offsets sq_off;
struct io_cqring_offsets cq_off;
};
#define IORING_OFF_SQ_RING 0
#define IORING_OFF_SQES 0x10000000ULL
#define sys_io_uring_setup 425
static long syz_io_uring_setup(volatile long a0, volatile long a1,
volatile long a2, volatile long a3,
volatile long a4, volatile long a5)
{
uint32_t entries = (uint32_t)a0;
struct io_uring_params* setup_params = (struct io_uring_params*)a1;
void* vma1 = (void*)a2;
void* vma2 = (void*)a3;
void** ring_ptr_out = (void**)a4;
void** sqes_ptr_out = (void**)a5;
uint32_t fd_io_uring = syscall(sys_io_uring_setup, entries, setup_params);
uint32_t sq_ring_sz =
setup_params->sq_off.array + setup_params->sq_entries * sizeof(uint32_t);
uint32_t cq_ring_sz = setup_params->cq_off.cqes +
setup_params->cq_entries * SIZEOF_IO_URING_CQE;
uint32_t ring_sz = sq_ring_sz > cq_ring_sz ? sq_ring_sz : cq_ring_sz;
*ring_ptr_out = mmap(vma1, ring_sz, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_POPULATE | MAP_FIXED, fd_io_uring,
IORING_OFF_SQ_RING);
uint32_t sqes_sz = setup_params->sq_entries * SIZEOF_IO_URING_SQE;
*sqes_ptr_out =
mmap(vma2, sqes_sz, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_POPULATE | MAP_FIXED, fd_io_uring, IORING_OFF_SQES);
return fd_io_uring;
}
static long syz_io_uring_submit(volatile long a0, volatile long a1,
volatile long a2, volatile long a3)
{
char* ring_ptr = (char*)a0;
char* sqes_ptr = (char*)a1;
char* sqe = (char*)a2;
uint32_t sqes_index = (uint32_t)a3;
uint32_t sq_ring_entries = *(uint32_t*)(ring_ptr + SQ_RING_ENTRIES_OFFSET);
uint32_t cq_ring_entries = *(uint32_t*)(ring_ptr + CQ_RING_ENTRIES_OFFSET);
uint32_t sq_array_off =
(CQ_CQES_OFFSET + cq_ring_entries * SIZEOF_IO_URING_CQE + 63) & ~63;
if (sq_ring_entries)
sqes_index %= sq_ring_entries;
char* sqe_dest = sqes_ptr + sqes_index * SIZEOF_IO_URING_SQE;
memcpy(sqe_dest, sqe, SIZEOF_IO_URING_SQE);
uint32_t sq_ring_mask = *(uint32_t*)(ring_ptr + SQ_RING_MASK_OFFSET);
uint32_t* sq_tail_ptr = (uint32_t*)(ring_ptr + SQ_TAIL_OFFSET);
uint32_t sq_tail = *sq_tail_ptr & sq_ring_mask;
uint32_t sq_tail_next = *sq_tail_ptr + 1;
uint32_t* sq_array = (uint32_t*)(ring_ptr + sq_array_off);
*(sq_array + sq_tail) = sqes_index;
__atomic_store_n(sq_tail_ptr, sq_tail_next, __ATOMIC_RELEASE);
return 0;
}
static void kill_and_wait(int pid, int* status)
{
kill(-pid, SIGKILL);
kill(pid, SIGKILL);
for (int i = 0; i < 100; i++) {
if (waitpid(-1, status, WNOHANG | __WALL) == pid)
return;
usleep(1000);
}
DIR* dir = opendir("/sys/fs/fuse/connections");
if (dir) {
for (;;) {
struct dirent* ent = readdir(dir);
if (!ent)
break;
if (strcmp(ent->d_name, ".") == 0 || strcmp(ent->d_name, "..") == 0)
continue;
char abort[300];
snprintf(abort, sizeof(abort), "/sys/fs/fuse/connections/%s/abort",
ent->d_name);
int fd = open(abort, O_WRONLY);
if (fd == -1) {
continue;
}
if (write(fd, abort, 1) < 0) {
}
close(fd);
}
closedir(dir);
} else {
}
while (waitpid(-1, status, __WALL) != pid) {
}
}
static void setup_test()
{
prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0);
setpgrp();
}
struct thread_t {
int created, call;
event_t ready, done;
};
static struct thread_t threads[16];
static void execute_call(int call);
static int running;
static void* thr(void* arg)
{
struct thread_t* th = (struct thread_t*)arg;
for (;;) {
event_wait(&th->ready);
event_reset(&th->ready);
execute_call(th->call);
__atomic_fetch_sub(&running, 1, __ATOMIC_RELAXED);
event_set(&th->done);
}
return 0;
}
static void execute_one(void)
{
int i, call, thread;
for (call = 0; call < 4; call++) {
for (thread = 0; thread < (int)(sizeof(threads) / sizeof(threads[0]));
thread++) {
struct thread_t* th = &threads[thread];
if (!th->created) {
th->created = 1;
event_init(&th->ready);
event_init(&th->done);
event_set(&th->done);
thread_start(thr, th);
}
if (!event_isset(&th->done))
continue;
event_reset(&th->done);
th->call = call;
__atomic_fetch_add(&running, 1, __ATOMIC_RELAXED);
event_set(&th->ready);
event_timedwait(&th->done, 50);
break;
}
}
for (i = 0; i < 100 && __atomic_load_n(&running, __ATOMIC_RELAXED); i++)
sleep_ms(1);
}
static void execute_one(void);
#define WAIT_FLAGS __WALL
static void loop(void)
{
int iter = 0;
for (; iter < 5000; iter++) {
int pid = fork();
if (pid < 0)
exit(1);
if (pid == 0) {
setup_test();
execute_one();
exit(0);
}
int status = 0;
uint64_t start = current_time_ms();
for (;;) {
if (waitpid(-1, &status, WNOHANG | WAIT_FLAGS) == pid)
break;
sleep_ms(1);
if (current_time_ms() - start < 5000)
continue;
kill_and_wait(pid, &status);
break;
}
}
}
#ifndef __NR_io_uring_enter
#define __NR_io_uring_enter 426
#endif
uint64_t r[4] = {0xffffffffffffffff, 0xffffffffffffffff, 0x0, 0x0};
void execute_call(int call)
{
intptr_t res = 0;
switch (call) {
case 0:
*(uint64_t*)0x200000c0 = 0;
res = syscall(__NR_signalfd4, -1, 0x200000c0ul, 8ul, 0ul);
if (res != -1)
r[0] = res;
break;
case 1:
*(uint32_t*)0x20000a84 = 0;
*(uint32_t*)0x20000a88 = 0;
*(uint32_t*)0x20000a8c = 0;
*(uint32_t*)0x20000a90 = 0;
*(uint32_t*)0x20000a98 = -1;
memset((void*)0x20000a9c, 0, 12);
res = -1;
res = syz_io_uring_setup(0x87, 0x20000a80, 0x206d6000, 0x206d7000,
0x20000000, 0x20000040);
if (res != -1) {
r[1] = res;
r[2] = *(uint64_t*)0x20000000;
r[3] = *(uint64_t*)0x20000040;
}
break;
case 2:
*(uint8_t*)0x20002240 = 6;
*(uint8_t*)0x20002241 = 0;
*(uint16_t*)0x20002242 = 0;
*(uint32_t*)0x20002244 = r[0];
*(uint64_t*)0x20002248 = 0;
*(uint64_t*)0x20002250 = 0;
*(uint32_t*)0x20002258 = 0;
*(uint16_t*)0x2000225c = 0;
*(uint16_t*)0x2000225e = 0;
*(uint64_t*)0x20002260 = 0;
*(uint16_t*)0x20002268 = 0;
*(uint16_t*)0x2000226a = 0;
memset((void*)0x2000226c, 0, 20);
syz_io_uring_submit(r[2], r[3], 0x20002240, 0);
break;
case 3:
syscall(__NR_io_uring_enter, r[1], 0x1523a, 0, 0ul, 0ul, 0xaul);
break;
}
}
int main(int argc, char *argv[])
{
void *ret;
#if !defined(__i386) && !defined(__x86_64__)
return 0;
#endif
if (argc > 1)
return 0;
ret = mmap((void *)0x1ffff000ul, 0x1000ul, 0ul, 0x32ul, -1, 0ul);
if (ret == MAP_FAILED)
return 0;
ret = mmap((void *)0x20000000ul, 0x1000000ul, 7ul, 0x32ul, -1, 0ul);
if (ret == MAP_FAILED)
return 0;
ret = mmap((void *)0x21000000ul, 0x1000ul, 0ul, 0x32ul, -1, 0ul);
if (ret == MAP_FAILED)
return 0;
loop();
return 0;
}
#else /* __NR_futex */
int main(int argc, char *argv[])
{
return 0;
}
#endif /* __NR_futex */