blob: b489e90210ae008be47b427195242a56272daa90 [file] [log] [blame]
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <ctype.h>
#include <string.h>
#include <assert.h>
#include <libgen.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <netinet/in.h>
#include "fio.h"
#include "verify.h"
#include "parse.h"
#include "lib/fls.h"
#include "lib/pattern.h"
#include "options.h"
#include "optgroup.h"
char client_sockaddr_str[INET6_ADDRSTRLEN] = { 0 };
#define cb_data_to_td(data) container_of(data, struct thread_data, o)
static struct pattern_fmt_desc fmt_desc[] = {
{
.fmt = "%o",
.len = FIELD_SIZE(struct io_u *, offset),
.paste = paste_blockoff
}
};
/*
* Check if mmap/mmaphuge has a :/foo/bar/file at the end. If so, return that.
*/
static char *get_opt_postfix(const char *str)
{
char *p = strstr(str, ":");
if (!p)
return NULL;
p++;
strip_blank_front(&p);
strip_blank_end(p);
return strdup(p);
}
static int bs_cmp(const void *p1, const void *p2)
{
const struct bssplit *bsp1 = p1;
const struct bssplit *bsp2 = p2;
return (int) bsp1->perc - (int) bsp2->perc;
}
struct split {
unsigned int nr;
unsigned int val1[100];
unsigned int val2[100];
};
static int split_parse_ddir(struct thread_options *o, struct split *split,
enum fio_ddir ddir, char *str)
{
unsigned int i, perc;
long long val;
char *fname;
split->nr = 0;
i = 0;
while ((fname = strsep(&str, ":")) != NULL) {
char *perc_str;
if (!strlen(fname))
break;
perc_str = strstr(fname, "/");
if (perc_str) {
*perc_str = '\0';
perc_str++;
perc = atoi(perc_str);
if (perc > 100)
perc = 100;
else if (!perc)
perc = -1U;
} else
perc = -1U;
if (str_to_decimal(fname, &val, 1, o, 0, 0)) {
log_err("fio: bssplit conversion failed\n");
return 1;
}
split->val1[i] = val;
split->val2[i] = perc;
i++;
if (i == 100)
break;
}
split->nr = i;
return 0;
}
static int bssplit_ddir(struct thread_options *o, enum fio_ddir ddir, char *str)
{
unsigned int i, perc, perc_missing;
unsigned int max_bs, min_bs;
struct split split;
memset(&split, 0, sizeof(split));
if (split_parse_ddir(o, &split, ddir, str))
return 1;
if (!split.nr)
return 0;
max_bs = 0;
min_bs = -1;
o->bssplit[ddir] = malloc(split.nr * sizeof(struct bssplit));
o->bssplit_nr[ddir] = split.nr;
for (i = 0; i < split.nr; i++) {
if (split.val1[i] > max_bs)
max_bs = split.val1[i];
if (split.val1[i] < min_bs)
min_bs = split.val1[i];
o->bssplit[ddir][i].bs = split.val1[i];
o->bssplit[ddir][i].perc =split.val2[i];
}
/*
* Now check if the percentages add up, and how much is missing
*/
perc = perc_missing = 0;
for (i = 0; i < o->bssplit_nr[ddir]; i++) {
struct bssplit *bsp = &o->bssplit[ddir][i];
if (bsp->perc == -1U)
perc_missing++;
else
perc += bsp->perc;
}
if (perc > 100 && perc_missing > 1) {
log_err("fio: bssplit percentages add to more than 100%%\n");
free(o->bssplit[ddir]);
o->bssplit[ddir] = NULL;
return 1;
}
/*
* If values didn't have a percentage set, divide the remains between
* them.
*/
if (perc_missing) {
if (perc_missing == 1 && o->bssplit_nr[ddir] == 1)
perc = 100;
for (i = 0; i < o->bssplit_nr[ddir]; i++) {
struct bssplit *bsp = &o->bssplit[ddir][i];
if (bsp->perc == -1U)
bsp->perc = (100 - perc) / perc_missing;
}
}
o->min_bs[ddir] = min_bs;
o->max_bs[ddir] = max_bs;
/*
* now sort based on percentages, for ease of lookup
*/
qsort(o->bssplit[ddir], o->bssplit_nr[ddir], sizeof(struct bssplit), bs_cmp);
return 0;
}
typedef int (split_parse_fn)(struct thread_options *, enum fio_ddir, char *);
static int str_split_parse(struct thread_data *td, char *str, split_parse_fn *fn)
{
char *odir, *ddir;
int ret = 0;
odir = strchr(str, ',');
if (odir) {
ddir = strchr(odir + 1, ',');
if (ddir) {
ret = fn(&td->o, DDIR_TRIM, ddir + 1);
if (!ret)
*ddir = '\0';
} else {
char *op;
op = strdup(odir + 1);
ret = fn(&td->o, DDIR_TRIM, op);
free(op);
}
if (!ret)
ret = fn(&td->o, DDIR_WRITE, odir + 1);
if (!ret) {
*odir = '\0';
ret = fn(&td->o, DDIR_READ, str);
}
} else {
char *op;
op = strdup(str);
ret = fn(&td->o, DDIR_WRITE, op);
free(op);
if (!ret) {
op = strdup(str);
ret = fn(&td->o, DDIR_TRIM, op);
free(op);
}
if (!ret)
ret = fn(&td->o, DDIR_READ, str);
}
return ret;
}
static int str_bssplit_cb(void *data, const char *input)
{
struct thread_data *td = cb_data_to_td(data);
char *str, *p;
int ret = 0;
p = str = strdup(input);
strip_blank_front(&str);
strip_blank_end(str);
ret = str_split_parse(td, str, bssplit_ddir);
if (parse_dryrun()) {
int i;
for (i = 0; i < DDIR_RWDIR_CNT; i++) {
free(td->o.bssplit[i]);
td->o.bssplit[i] = NULL;
td->o.bssplit_nr[i] = 0;
}
}
free(p);
return ret;
}
static int str2error(char *str)
{
const char *err[] = { "EPERM", "ENOENT", "ESRCH", "EINTR", "EIO",
"ENXIO", "E2BIG", "ENOEXEC", "EBADF",
"ECHILD", "EAGAIN", "ENOMEM", "EACCES",
"EFAULT", "ENOTBLK", "EBUSY", "EEXIST",
"EXDEV", "ENODEV", "ENOTDIR", "EISDIR",
"EINVAL", "ENFILE", "EMFILE", "ENOTTY",
"ETXTBSY","EFBIG", "ENOSPC", "ESPIPE",
"EROFS","EMLINK", "EPIPE", "EDOM", "ERANGE" };
int i = 0, num = sizeof(err) / sizeof(char *);
while (i < num) {
if (!strcmp(err[i], str))
return i + 1;
i++;
}
return 0;
}
static int ignore_error_type(struct thread_data *td, int etype, char *str)
{
unsigned int i;
int *error;
char *fname;
if (etype >= ERROR_TYPE_CNT) {
log_err("Illegal error type\n");
return 1;
}
td->o.ignore_error_nr[etype] = 4;
error = malloc(4 * sizeof(struct bssplit));
i = 0;
while ((fname = strsep(&str, ":")) != NULL) {
if (!strlen(fname))
break;
/*
* grow struct buffer, if needed
*/
if (i == td->o.ignore_error_nr[etype]) {
td->o.ignore_error_nr[etype] <<= 1;
error = realloc(error, td->o.ignore_error_nr[etype]
* sizeof(int));
}
if (fname[0] == 'E') {
error[i] = str2error(fname);
} else {
error[i] = atoi(fname);
if (error[i] < 0)
error[i] = -error[i];
}
if (!error[i]) {
log_err("Unknown error %s, please use number value \n",
fname);
free(error);
return 1;
}
i++;
}
if (i) {
td->o.continue_on_error |= 1 << etype;
td->o.ignore_error_nr[etype] = i;
td->o.ignore_error[etype] = error;
} else
free(error);
return 0;
}
static int str_ignore_error_cb(void *data, const char *input)
{
struct thread_data *td = cb_data_to_td(data);
char *str, *p, *n;
int type = 0, ret = 1;
if (parse_dryrun())
return 0;
p = str = strdup(input);
strip_blank_front(&str);
strip_blank_end(str);
while (p) {
n = strchr(p, ',');
if (n)
*n++ = '\0';
ret = ignore_error_type(td, type, p);
if (ret)
break;
p = n;
type++;
}
free(str);
return ret;
}
static int str_rw_cb(void *data, const char *str)
{
struct thread_data *td = cb_data_to_td(data);
struct thread_options *o = &td->o;
char *nr;
if (parse_dryrun())
return 0;
o->ddir_seq_nr = 1;
o->ddir_seq_add = 0;
nr = get_opt_postfix(str);
if (!nr)
return 0;
if (td_random(td))
o->ddir_seq_nr = atoi(nr);
else {
long long val;
if (str_to_decimal(nr, &val, 1, o, 0, 0)) {
log_err("fio: rw postfix parsing failed\n");
free(nr);
return 1;
}
o->ddir_seq_add = val;
}
free(nr);
return 0;
}
static int str_mem_cb(void *data, const char *mem)
{
struct thread_data *td = cb_data_to_td(data);
if (td->o.mem_type == MEM_MMAPHUGE || td->o.mem_type == MEM_MMAP ||
td->o.mem_type == MEM_MMAPSHARED)
td->o.mmapfile = get_opt_postfix(mem);
return 0;
}
static int fio_clock_source_cb(void *data, const char *str)
{
struct thread_data *td = cb_data_to_td(data);
fio_clock_source = td->o.clocksource;
fio_clock_source_set = 1;
fio_clock_init();
return 0;
}
static int str_rwmix_read_cb(void *data, unsigned long long *val)
{
struct thread_data *td = cb_data_to_td(data);
td->o.rwmix[DDIR_READ] = *val;
td->o.rwmix[DDIR_WRITE] = 100 - *val;
return 0;
}
static int str_rwmix_write_cb(void *data, unsigned long long *val)
{
struct thread_data *td = cb_data_to_td(data);
td->o.rwmix[DDIR_WRITE] = *val;
td->o.rwmix[DDIR_READ] = 100 - *val;
return 0;
}
static int str_exitall_cb(void)
{
exitall_on_terminate = 1;
return 0;
}
#ifdef FIO_HAVE_CPU_AFFINITY
int fio_cpus_split(os_cpu_mask_t *mask, unsigned int cpu_index)
{
unsigned int i, index, cpus_in_mask;
const long max_cpu = cpus_online();
cpus_in_mask = fio_cpu_count(mask);
cpu_index = cpu_index % cpus_in_mask;
index = 0;
for (i = 0; i < max_cpu; i++) {
if (!fio_cpu_isset(mask, i))
continue;
if (cpu_index != index)
fio_cpu_clear(mask, i);
index++;
}
return fio_cpu_count(mask);
}
static int str_cpumask_cb(void *data, unsigned long long *val)
{
struct thread_data *td = cb_data_to_td(data);
unsigned int i;
long max_cpu;
int ret;
if (parse_dryrun())
return 0;
ret = fio_cpuset_init(&td->o.cpumask);
if (ret < 0) {
log_err("fio: cpuset_init failed\n");
td_verror(td, ret, "fio_cpuset_init");
return 1;
}
max_cpu = cpus_online();
for (i = 0; i < sizeof(int) * 8; i++) {
if ((1 << i) & *val) {
if (i >= max_cpu) {
log_err("fio: CPU %d too large (max=%ld)\n", i,
max_cpu - 1);
return 1;
}
dprint(FD_PARSE, "set cpu allowed %d\n", i);
fio_cpu_set(&td->o.cpumask, i);
}
}
return 0;
}
static int set_cpus_allowed(struct thread_data *td, os_cpu_mask_t *mask,
const char *input)
{
char *cpu, *str, *p;
long max_cpu;
int ret = 0;
ret = fio_cpuset_init(mask);
if (ret < 0) {
log_err("fio: cpuset_init failed\n");
td_verror(td, ret, "fio_cpuset_init");
return 1;
}
p = str = strdup(input);
strip_blank_front(&str);
strip_blank_end(str);
max_cpu = cpus_online();
while ((cpu = strsep(&str, ",")) != NULL) {
char *str2, *cpu2;
int icpu, icpu2;
if (!strlen(cpu))
break;
str2 = cpu;
icpu2 = -1;
while ((cpu2 = strsep(&str2, "-")) != NULL) {
if (!strlen(cpu2))
break;
icpu2 = atoi(cpu2);
}
icpu = atoi(cpu);
if (icpu2 == -1)
icpu2 = icpu;
while (icpu <= icpu2) {
if (icpu >= FIO_MAX_CPUS) {
log_err("fio: your OS only supports up to"
" %d CPUs\n", (int) FIO_MAX_CPUS);
ret = 1;
break;
}
if (icpu >= max_cpu) {
log_err("fio: CPU %d too large (max=%ld)\n",
icpu, max_cpu - 1);
ret = 1;
break;
}
dprint(FD_PARSE, "set cpu allowed %d\n", icpu);
fio_cpu_set(mask, icpu);
icpu++;
}
if (ret)
break;
}
free(p);
return ret;
}
static int str_cpus_allowed_cb(void *data, const char *input)
{
struct thread_data *td = cb_data_to_td(data);
if (parse_dryrun())
return 0;
return set_cpus_allowed(td, &td->o.cpumask, input);
}
static int str_verify_cpus_allowed_cb(void *data, const char *input)
{
struct thread_data *td = cb_data_to_td(data);
if (parse_dryrun())
return 0;
return set_cpus_allowed(td, &td->o.verify_cpumask, input);
}
#ifdef CONFIG_ZLIB
static int str_log_cpus_allowed_cb(void *data, const char *input)
{
struct thread_data *td = cb_data_to_td(data);
if (parse_dryrun())
return 0;
return set_cpus_allowed(td, &td->o.log_gz_cpumask, input);
}
#endif /* CONFIG_ZLIB */
#endif /* FIO_HAVE_CPU_AFFINITY */
#ifdef CONFIG_LIBNUMA
static int str_numa_cpunodes_cb(void *data, char *input)
{
struct thread_data *td = cb_data_to_td(data);
struct bitmask *verify_bitmask;
if (parse_dryrun())
return 0;
/* numa_parse_nodestring() parses a character string list
* of nodes into a bit mask. The bit mask is allocated by
* numa_allocate_nodemask(), so it should be freed by
* numa_free_nodemask().
*/
verify_bitmask = numa_parse_nodestring(input);
if (verify_bitmask == NULL) {
log_err("fio: numa_parse_nodestring failed\n");
td_verror(td, 1, "str_numa_cpunodes_cb");
return 1;
}
numa_free_nodemask(verify_bitmask);
td->o.numa_cpunodes = strdup(input);
return 0;
}
static int str_numa_mpol_cb(void *data, char *input)
{
struct thread_data *td = cb_data_to_td(data);
const char * const policy_types[] =
{ "default", "prefer", "bind", "interleave", "local", NULL };
int i;
char *nodelist;
struct bitmask *verify_bitmask;
if (parse_dryrun())
return 0;
nodelist = strchr(input, ':');
if (nodelist) {
/* NUL-terminate mode */
*nodelist++ = '\0';
}
for (i = 0; i <= MPOL_LOCAL; i++) {
if (!strcmp(input, policy_types[i])) {
td->o.numa_mem_mode = i;
break;
}
}
if (i > MPOL_LOCAL) {
log_err("fio: memory policy should be: default, prefer, bind, interleave, local\n");
goto out;
}
switch (td->o.numa_mem_mode) {
case MPOL_PREFERRED:
/*
* Insist on a nodelist of one node only
*/
if (nodelist) {
char *rest = nodelist;
while (isdigit(*rest))
rest++;
if (*rest) {
log_err("fio: one node only for \'prefer\'\n");
goto out;
}
} else {
log_err("fio: one node is needed for \'prefer\'\n");
goto out;
}
break;
case MPOL_INTERLEAVE:
/*
* Default to online nodes with memory if no nodelist
*/
if (!nodelist)
nodelist = strdup("all");
break;
case MPOL_LOCAL:
case MPOL_DEFAULT:
/*
* Don't allow a nodelist
*/
if (nodelist) {
log_err("fio: NO nodelist for \'local\'\n");
goto out;
}
break;
case MPOL_BIND:
/*
* Insist on a nodelist
*/
if (!nodelist) {
log_err("fio: a nodelist is needed for \'bind\'\n");
goto out;
}
break;
}
/* numa_parse_nodestring() parses a character string list
* of nodes into a bit mask. The bit mask is allocated by
* numa_allocate_nodemask(), so it should be freed by
* numa_free_nodemask().
*/
switch (td->o.numa_mem_mode) {
case MPOL_PREFERRED:
td->o.numa_mem_prefer_node = atoi(nodelist);
break;
case MPOL_INTERLEAVE:
case MPOL_BIND:
verify_bitmask = numa_parse_nodestring(nodelist);
if (verify_bitmask == NULL) {
log_err("fio: numa_parse_nodestring failed\n");
td_verror(td, 1, "str_numa_memnodes_cb");
return 1;
}
td->o.numa_memnodes = strdup(nodelist);
numa_free_nodemask(verify_bitmask);
break;
case MPOL_LOCAL:
case MPOL_DEFAULT:
default:
break;
}
return 0;
out:
return 1;
}
#endif
static int str_fst_cb(void *data, const char *str)
{
struct thread_data *td = cb_data_to_td(data);
double val;
bool done = false;
char *nr;
td->file_service_nr = 1;
switch (td->o.file_service_type) {
case FIO_FSERVICE_RANDOM:
case FIO_FSERVICE_RR:
case FIO_FSERVICE_SEQ:
nr = get_opt_postfix(str);
if (nr) {
td->file_service_nr = atoi(nr);
free(nr);
}
done = true;
break;
case FIO_FSERVICE_ZIPF:
val = FIO_DEF_ZIPF;
break;
case FIO_FSERVICE_PARETO:
val = FIO_DEF_PARETO;
break;
case FIO_FSERVICE_GAUSS:
val = 0.0;
break;
default:
log_err("fio: bad file service type: %d\n", td->o.file_service_type);
return 1;
}
if (done)
return 0;
nr = get_opt_postfix(str);
if (nr && !str_to_float(nr, &val, 0)) {
log_err("fio: file service type random postfix parsing failed\n");
free(nr);
return 1;
}
free(nr);
switch (td->o.file_service_type) {
case FIO_FSERVICE_ZIPF:
if (val == 1.00) {
log_err("fio: zipf theta must be different than 1.0\n");
return 1;
}
if (parse_dryrun())
return 0;
td->zipf_theta = val;
break;
case FIO_FSERVICE_PARETO:
if (val <= 0.00 || val >= 1.00) {
log_err("fio: pareto input out of range (0 < input < 1.0)\n");
return 1;
}
if (parse_dryrun())
return 0;
td->pareto_h = val;
break;
case FIO_FSERVICE_GAUSS:
if (val < 0.00 || val >= 100.00) {
log_err("fio: normal deviation out of range (0 <= input < 100.0)\n");
return 1;
}
if (parse_dryrun())
return 0;
td->gauss_dev = val;
break;
}
return 0;
}
#ifdef CONFIG_SYNC_FILE_RANGE
static int str_sfr_cb(void *data, const char *str)
{
struct thread_data *td = cb_data_to_td(data);
char *nr = get_opt_postfix(str);
td->sync_file_range_nr = 1;
if (nr) {
td->sync_file_range_nr = atoi(nr);
free(nr);
}
return 0;
}
#endif
static int zone_cmp(const void *p1, const void *p2)
{
const struct zone_split *zsp1 = p1;
const struct zone_split *zsp2 = p2;
return (int) zsp2->access_perc - (int) zsp1->access_perc;
}
static int zone_split_ddir(struct thread_options *o, enum fio_ddir ddir,
char *str)
{
unsigned int i, perc, perc_missing, sperc, sperc_missing;
struct split split;
memset(&split, 0, sizeof(split));
if (split_parse_ddir(o, &split, ddir, str))
return 1;
if (!split.nr)
return 0;
o->zone_split[ddir] = malloc(split.nr * sizeof(struct zone_split));
o->zone_split_nr[ddir] = split.nr;
for (i = 0; i < split.nr; i++) {
o->zone_split[ddir][i].access_perc = split.val1[i];
o->zone_split[ddir][i].size_perc = split.val2[i];
}
/*
* Now check if the percentages add up, and how much is missing
*/
perc = perc_missing = 0;
sperc = sperc_missing = 0;
for (i = 0; i < o->zone_split_nr[ddir]; i++) {
struct zone_split *zsp = &o->zone_split[ddir][i];
if (zsp->access_perc == (uint8_t) -1U)
perc_missing++;
else
perc += zsp->access_perc;
if (zsp->size_perc == (uint8_t) -1U)
sperc_missing++;
else
sperc += zsp->size_perc;
}
if (perc > 100 || sperc > 100) {
log_err("fio: zone_split percentages add to more than 100%%\n");
free(o->zone_split[ddir]);
o->zone_split[ddir] = NULL;
return 1;
}
if (perc < 100) {
log_err("fio: access percentage don't add up to 100 for zoned "
"random distribution (got=%u)\n", perc);
free(o->zone_split[ddir]);
o->zone_split[ddir] = NULL;
return 1;
}
/*
* If values didn't have a percentage set, divide the remains between
* them.
*/
if (perc_missing) {
if (perc_missing == 1 && o->zone_split_nr[ddir] == 1)
perc = 100;
for (i = 0; i < o->zone_split_nr[ddir]; i++) {
struct zone_split *zsp = &o->zone_split[ddir][i];
if (zsp->access_perc == (uint8_t) -1U)
zsp->access_perc = (100 - perc) / perc_missing;
}
}
if (sperc_missing) {
if (sperc_missing == 1 && o->zone_split_nr[ddir] == 1)
sperc = 100;
for (i = 0; i < o->zone_split_nr[ddir]; i++) {
struct zone_split *zsp = &o->zone_split[ddir][i];
if (zsp->size_perc == (uint8_t) -1U)
zsp->size_perc = (100 - sperc) / sperc_missing;
}
}
/*
* now sort based on percentages, for ease of lookup
*/
qsort(o->zone_split[ddir], o->zone_split_nr[ddir], sizeof(struct zone_split), zone_cmp);
return 0;
}
static void __td_zone_gen_index(struct thread_data *td, enum fio_ddir ddir)
{
unsigned int i, j, sprev, aprev;
td->zone_state_index[ddir] = malloc(sizeof(struct zone_split_index) * 100);
sprev = aprev = 0;
for (i = 0; i < td->o.zone_split_nr[ddir]; i++) {
struct zone_split *zsp = &td->o.zone_split[ddir][i];
for (j = aprev; j < aprev + zsp->access_perc; j++) {
struct zone_split_index *zsi = &td->zone_state_index[ddir][j];
zsi->size_perc = sprev + zsp->size_perc;
zsi->size_perc_prev = sprev;
}
aprev += zsp->access_perc;
sprev += zsp->size_perc;
}
}
/*
* Generate state table for indexes, so we don't have to do it inline from
* the hot IO path
*/
static void td_zone_gen_index(struct thread_data *td)
{
int i;
td->zone_state_index = malloc(DDIR_RWDIR_CNT *
sizeof(struct zone_split_index *));
for (i = 0; i < DDIR_RWDIR_CNT; i++)
__td_zone_gen_index(td, i);
}
static int parse_zoned_distribution(struct thread_data *td, const char *input)
{
char *str, *p;
int i, ret = 0;
p = str = strdup(input);
strip_blank_front(&str);
strip_blank_end(str);
/* We expect it to start like that, bail if not */
if (strncmp(str, "zoned:", 6)) {
log_err("fio: mismatch in zoned input <%s>\n", str);
free(p);
return 1;
}
str += strlen("zoned:");
ret = str_split_parse(td, str, zone_split_ddir);
free(p);
for (i = 0; i < DDIR_RWDIR_CNT; i++) {
int j;
dprint(FD_PARSE, "zone ddir %d (nr=%u): \n", i, td->o.zone_split_nr[i]);
for (j = 0; j < td->o.zone_split_nr[i]; j++) {
struct zone_split *zsp = &td->o.zone_split[i][j];
dprint(FD_PARSE, "\t%d: %u/%u\n", j, zsp->access_perc,
zsp->size_perc);
}
}
if (parse_dryrun()) {
int i;
for (i = 0; i < DDIR_RWDIR_CNT; i++) {
free(td->o.zone_split[i]);
td->o.zone_split[i] = NULL;
td->o.zone_split_nr[i] = 0;
}
return ret;
}
if (!ret)
td_zone_gen_index(td);
else {
for (i = 0; i < DDIR_RWDIR_CNT; i++)
td->o.zone_split_nr[i] = 0;
}
return ret;
}
static int str_random_distribution_cb(void *data, const char *str)
{
struct thread_data *td = cb_data_to_td(data);
double val;
char *nr;
if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
val = FIO_DEF_ZIPF;
else if (td->o.random_distribution == FIO_RAND_DIST_PARETO)
val = FIO_DEF_PARETO;
else if (td->o.random_distribution == FIO_RAND_DIST_GAUSS)
val = 0.0;
else if (td->o.random_distribution == FIO_RAND_DIST_ZONED)
return parse_zoned_distribution(td, str);
else
return 0;
nr = get_opt_postfix(str);
if (nr && !str_to_float(nr, &val, 0)) {
log_err("fio: random postfix parsing failed\n");
free(nr);
return 1;
}
free(nr);
if (td->o.random_distribution == FIO_RAND_DIST_ZIPF) {
if (val == 1.00) {
log_err("fio: zipf theta must different than 1.0\n");
return 1;
}
if (parse_dryrun())
return 0;
td->o.zipf_theta.u.f = val;
} else if (td->o.random_distribution == FIO_RAND_DIST_PARETO) {
if (val <= 0.00 || val >= 1.00) {
log_err("fio: pareto input out of range (0 < input < 1.0)\n");
return 1;
}
if (parse_dryrun())
return 0;
td->o.pareto_h.u.f = val;
} else {
if (val < 0.00 || val >= 100.0) {
log_err("fio: normal deviation out of range (0 <= input < 100.0)\n");
return 1;
}
if (parse_dryrun())
return 0;
td->o.gauss_dev.u.f = val;
}
return 0;
}
static int str_steadystate_cb(void *data, const char *str)
{
struct thread_data *td = cb_data_to_td(data);
double val;
char *nr;
char *pct;
long long ll;
if (td->o.ss_state != FIO_SS_IOPS && td->o.ss_state != FIO_SS_IOPS_SLOPE &&
td->o.ss_state != FIO_SS_BW && td->o.ss_state != FIO_SS_BW_SLOPE) {
/* should be impossible to get here */
log_err("fio: unknown steady state criterion\n");
return 1;
}
nr = get_opt_postfix(str);
if (!nr) {
log_err("fio: steadystate threshold must be specified in addition to criterion\n");
free(nr);
return 1;
}
/* ENHANCEMENT Allow fio to understand size=10.2% and use here */
pct = strstr(nr, "%");
if (pct) {
*pct = '\0';
strip_blank_end(nr);
if (!str_to_float(nr, &val, 0)) {
log_err("fio: could not parse steadystate threshold percentage\n");
free(nr);
return 1;
}
dprint(FD_PARSE, "set steady state threshold to %f%%\n", val);
free(nr);
if (parse_dryrun())
return 0;
td->o.ss_state |= __FIO_SS_PCT;
td->o.ss_limit.u.f = val;
} else if (td->o.ss_state & __FIO_SS_IOPS) {
if (!str_to_float(nr, &val, 0)) {
log_err("fio: steadystate IOPS threshold postfix parsing failed\n");
free(nr);
return 1;
}
dprint(FD_PARSE, "set steady state IOPS threshold to %f\n", val);
free(nr);
if (parse_dryrun())
return 0;
td->o.ss_limit.u.f = val;
} else { /* bandwidth criterion */
if (str_to_decimal(nr, &ll, 1, td, 0, 0)) {
log_err("fio: steadystate BW threshold postfix parsing failed\n");
free(nr);
return 1;
}
dprint(FD_PARSE, "set steady state BW threshold to %lld\n", ll);
free(nr);
if (parse_dryrun())
return 0;
td->o.ss_limit.u.f = (double) ll;
}
td->ss.state = td->o.ss_state;
return 0;
}
/*
* Return next name in the string. Files are separated with ':'. If the ':'
* is escaped with a '\', then that ':' is part of the filename and does not
* indicate a new file.
*/
static char *get_next_name(char **ptr)
{
char *str = *ptr;
char *p, *start;
if (!str || !strlen(str))
return NULL;
start = str;
do {
/*
* No colon, we are done
*/
p = strchr(str, ':');
if (!p) {
*ptr = NULL;
break;
}
/*
* We got a colon, but it's the first character. Skip and
* continue
*/
if (p == start) {
str = ++start;
continue;
}
if (*(p - 1) != '\\') {
*p = '\0';
*ptr = p + 1;
break;
}
memmove(p - 1, p, strlen(p) + 1);
str = p;
} while (1);
return start;
}
static int get_max_name_idx(char *input)
{
unsigned int cur_idx;
char *str, *p;
p = str = strdup(input);
for (cur_idx = 0; ; cur_idx++)
if (get_next_name(&str) == NULL)
break;
free(p);
return cur_idx;
}
/*
* Returns the directory at the index, indexes > entires will be
* assigned via modulo division of the index
*/
int set_name_idx(char *target, size_t tlen, char *input, int index,
bool unique_filename)
{
unsigned int cur_idx;
int len;
char *fname, *str, *p;
p = str = strdup(input);
index %= get_max_name_idx(input);
for (cur_idx = 0; cur_idx <= index; cur_idx++)
fname = get_next_name(&str);
if (client_sockaddr_str[0] && unique_filename) {
len = snprintf(target, tlen, "%s/%s.", fname,
client_sockaddr_str);
} else
len = snprintf(target, tlen, "%s/", fname);
target[tlen - 1] = '\0';
free(p);
return len;
}
static int str_filename_cb(void *data, const char *input)
{
struct thread_data *td = cb_data_to_td(data);
char *fname, *str, *p;
p = str = strdup(input);
strip_blank_front(&str);
strip_blank_end(str);
/*
* Ignore what we may already have from nrfiles option.
*/
if (!td->files_index)
td->o.nr_files = 0;
while ((fname = get_next_name(&str)) != NULL) {
if (!strlen(fname))
break;
add_file(td, fname, 0, 1);
}
free(p);
return 0;
}
static int str_directory_cb(void *data, const char fio_unused *unused)
{
struct thread_data *td = cb_data_to_td(data);
struct stat sb;
char *dirname, *str, *p;
int ret = 0;
if (parse_dryrun())
return 0;
p = str = strdup(td->o.directory);
while ((dirname = get_next_name(&str)) != NULL) {
if (lstat(dirname, &sb) < 0) {
ret = errno;
log_err("fio: %s is not a directory\n", dirname);
td_verror(td, ret, "lstat");
goto out;
}
if (!S_ISDIR(sb.st_mode)) {
log_err("fio: %s is not a directory\n", dirname);
ret = 1;
goto out;
}
}
out:
free(p);
return ret;
}
static int str_opendir_cb(void *data, const char fio_unused *str)
{
struct thread_data *td = cb_data_to_td(data);
if (parse_dryrun())
return 0;
if (!td->files_index)
td->o.nr_files = 0;
return add_dir_files(td, td->o.opendir);
}
static int str_buffer_pattern_cb(void *data, const char *input)
{
struct thread_data *td = cb_data_to_td(data);
int ret;
/* FIXME: for now buffer pattern does not support formats */
ret = parse_and_fill_pattern(input, strlen(input), td->o.buffer_pattern,
MAX_PATTERN_SIZE, NULL, 0, NULL, NULL);
if (ret < 0)
return 1;
assert(ret != 0);
td->o.buffer_pattern_bytes = ret;
/*
* If this job is doing any reading or has compression set,
* ensure that we refill buffers for writes or we could be
* invalidating the pattern through reads.
*/
if (!td->o.compress_percentage && !td_read(td))
td->o.refill_buffers = 0;
else
td->o.refill_buffers = 1;
td->o.scramble_buffers = 0;
td->o.zero_buffers = 0;
return 0;
}
static int str_buffer_compress_cb(void *data, unsigned long long *il)
{
struct thread_data *td = cb_data_to_td(data);
td->flags |= TD_F_COMPRESS;
td->o.compress_percentage = *il;
return 0;
}
static int str_dedupe_cb(void *data, unsigned long long *il)
{
struct thread_data *td = cb_data_to_td(data);
td->flags |= TD_F_COMPRESS;
td->o.dedupe_percentage = *il;
td->o.refill_buffers = 1;
return 0;
}
static int str_verify_pattern_cb(void *data, const char *input)
{
struct thread_data *td = cb_data_to_td(data);
int ret;
td->o.verify_fmt_sz = ARRAY_SIZE(td->o.verify_fmt);
ret = parse_and_fill_pattern(input, strlen(input), td->o.verify_pattern,
MAX_PATTERN_SIZE, fmt_desc, sizeof(fmt_desc),
td->o.verify_fmt, &td->o.verify_fmt_sz);
if (ret < 0)
return 1;
assert(ret != 0);
td->o.verify_pattern_bytes = ret;
/*
* VERIFY_* could already be set
*/
if (!fio_option_is_set(&td->o, verify))
td->o.verify = VERIFY_PATTERN;
return 0;
}
static int str_gtod_reduce_cb(void *data, int *il)
{
struct thread_data *td = cb_data_to_td(data);
int val = *il;
td->o.disable_lat = !!val;
td->o.disable_clat = !!val;
td->o.disable_slat = !!val;
td->o.disable_bw = !!val;
td->o.clat_percentiles = !val;
if (val)
td->tv_cache_mask = 63;
return 0;
}
static int str_size_cb(void *data, unsigned long long *__val)
{
struct thread_data *td = cb_data_to_td(data);
unsigned long long v = *__val;
if (parse_is_percent(v)) {
td->o.size = 0;
td->o.size_percent = -1ULL - v;
} else
td->o.size = v;
return 0;
}
static int str_write_bw_log_cb(void *data, const char *str)
{
struct thread_data *td = cb_data_to_td(data);
if (str)
td->o.bw_log_file = strdup(str);
td->o.write_bw_log = 1;
return 0;
}
static int str_write_lat_log_cb(void *data, const char *str)
{
struct thread_data *td = cb_data_to_td(data);
if (str)
td->o.lat_log_file = strdup(str);
td->o.write_lat_log = 1;
return 0;
}
static int str_write_iops_log_cb(void *data, const char *str)
{
struct thread_data *td = cb_data_to_td(data);
if (str)
td->o.iops_log_file = strdup(str);
td->o.write_iops_log = 1;
return 0;
}
static int str_write_hist_log_cb(void *data, const char *str)
{
struct thread_data *td = cb_data_to_td(data);
if (str)
td->o.hist_log_file = strdup(str);
td->o.write_hist_log = 1;
return 0;
}
static int rw_verify(struct fio_option *o, void *data)
{
struct thread_data *td = cb_data_to_td(data);
if (read_only && td_write(td)) {
log_err("fio: job <%s> has write bit set, but fio is in"
" read-only mode\n", td->o.name);
return 1;
}
return 0;
}
static int gtod_cpu_verify(struct fio_option *o, void *data)
{
#ifndef FIO_HAVE_CPU_AFFINITY
struct thread_data *td = cb_data_to_td(data);
if (td->o.gtod_cpu) {
log_err("fio: platform must support CPU affinity for"
"gettimeofday() offloading\n");
return 1;
}
#endif
return 0;
}
/*
* Map of job/command line options
*/
struct fio_option fio_options[FIO_MAX_OPTS] = {
{
.name = "description",
.lname = "Description of job",
.type = FIO_OPT_STR_STORE,
.off1 = offsetof(struct thread_options, description),
.help = "Text job description",
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_DESC,
},
{
.name = "name",
.lname = "Job name",
.type = FIO_OPT_STR_STORE,
.off1 = offsetof(struct thread_options, name),
.help = "Name of this job",
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_DESC,
},
{
.name = "wait_for",
.lname = "Waitee name",
.type = FIO_OPT_STR_STORE,
.off1 = offsetof(struct thread_options, wait_for),
.help = "Name of the job this one wants to wait for before starting",
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_DESC,
},
{
.name = "filename",
.lname = "Filename(s)",
.type = FIO_OPT_STR_STORE,
.off1 = offsetof(struct thread_options, filename),
.cb = str_filename_cb,
.prio = -1, /* must come after "directory" */
.help = "File(s) to use for the workload",
.category = FIO_OPT_C_FILE,
.group = FIO_OPT_G_FILENAME,
},
{
.name = "directory",
.lname = "Directory",
.type = FIO_OPT_STR_STORE,
.off1 = offsetof(struct thread_options, directory),
.cb = str_directory_cb,
.help = "Directory to store files in",
.category = FIO_OPT_C_FILE,
.group = FIO_OPT_G_FILENAME,
},
{
.name = "filename_format",
.lname = "Filename Format",
.type = FIO_OPT_STR_STORE,
.off1 = offsetof(struct thread_options, filename_format),
.prio = -1, /* must come after "directory" */
.help = "Override default $jobname.$jobnum.$filenum naming",
.def = "$jobname.$jobnum.$filenum",
.category = FIO_OPT_C_FILE,
.group = FIO_OPT_G_FILENAME,
},
{
.name = "unique_filename",
.lname = "Unique Filename",
.type = FIO_OPT_BOOL,
.off1 = offsetof(struct thread_options, unique_filename),
.help = "For network clients, prefix file with source IP",
.def = "1",
.category = FIO_OPT_C_FILE,
.group = FIO_OPT_G_FILENAME,
},
{
.name = "lockfile",
.lname = "Lockfile",
.type = FIO_OPT_STR,
.off1 = offsetof(struct thread_options, file_lock_mode),
.help = "Lock file when doing IO to it",
.prio = 1,
.parent = "filename",
.hide = 0,
.def = "none",
.category = FIO_OPT_C_FILE,
.group = FIO_OPT_G_FILENAME,
.posval = {
{ .ival = "none",
.oval = FILE_LOCK_NONE,
.help = "No file locking",
},
{ .ival = "exclusive",
.oval = FILE_LOCK_EXCLUSIVE,
.help = "Exclusive file lock",
},
{
.ival = "readwrite",
.oval = FILE_LOCK_READWRITE,
.help = "Read vs write lock",
},
},
},
{
.name = "opendir",
.lname = "Open directory",
.type = FIO_OPT_STR_STORE,
.off1 = offsetof(struct thread_options, opendir),
.cb = str_opendir_cb,
.help = "Recursively add files from this directory and down",
.category = FIO_OPT_C_FILE,
.group = FIO_OPT_G_FILENAME,
},
{
.name = "rw",
.lname = "Read/write",
.alias = "readwrite",
.type = FIO_OPT_STR,
.cb = str_rw_cb,
.off1 = offsetof(struct thread_options, td_ddir),
.help = "IO direction",
.def = "read",
.verify = rw_verify,
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_IO_BASIC,
.posval = {
{ .ival = "read",
.oval = TD_DDIR_READ,
.help = "Sequential read",
},
{ .ival = "write",
.oval = TD_DDIR_WRITE,
.help = "Sequential write",
},
{ .ival = "trim",
.oval = TD_DDIR_TRIM,
.help = "Sequential trim",
},
{ .ival = "randread",
.oval = TD_DDIR_RANDREAD,
.help = "Random read",
},
{ .ival = "randwrite",
.oval = TD_DDIR_RANDWRITE,
.help = "Random write",
},
{ .ival = "randtrim",
.oval = TD_DDIR_RANDTRIM,
.help = "Random trim",
},
{ .ival = "rw",
.oval = TD_DDIR_RW,
.help = "Sequential read and write mix",
},
{ .ival = "readwrite",
.oval = TD_DDIR_RW,
.help = "Sequential read and write mix",
},
{ .ival = "randrw",
.oval = TD_DDIR_RANDRW,
.help = "Random read and write mix"
},
{ .ival = "trimwrite",
.oval = TD_DDIR_TRIMWRITE,
.help = "Trim and write mix, trims preceding writes"
},
},
},
{
.name = "rw_sequencer",
.lname = "RW Sequencer",
.type = FIO_OPT_STR,
.off1 = offsetof(struct thread_options, rw_seq),
.help = "IO offset generator modifier",
.def = "sequential",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_IO_BASIC,
.posval = {
{ .ival = "sequential",
.oval = RW_SEQ_SEQ,
.help = "Generate sequential offsets",
},
{ .ival = "identical",
.oval = RW_SEQ_IDENT,
.help = "Generate identical offsets",
},
},
},
{
.name = "ioengine",
.lname = "IO Engine",
.type = FIO_OPT_STR_STORE,
.off1 = offsetof(struct thread_options, ioengine),
.help = "IO engine to use",
.def = FIO_PREFERRED_ENGINE,
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_IO_BASIC,
.posval = {
{ .ival = "sync",
.help = "Use read/write",
},
{ .ival = "psync",
.help = "Use pread/pwrite",
},
{ .ival = "vsync",
.help = "Use readv/writev",
},
#ifdef CONFIG_PWRITEV
{ .ival = "pvsync",
.help = "Use preadv/pwritev",
},
#endif
#ifdef FIO_HAVE_PWRITEV2
{ .ival = "pvsync2",
.help = "Use preadv2/pwritev2",
},
#endif
#ifdef CONFIG_LIBAIO
{ .ival = "libaio",
.help = "Linux native asynchronous IO",
},
#endif
#ifdef CONFIG_POSIXAIO
{ .ival = "posixaio",
.help = "POSIX asynchronous IO",
},
#endif
#ifdef CONFIG_SOLARISAIO
{ .ival = "solarisaio",
.help = "Solaris native asynchronous IO",
},
#endif
#ifdef CONFIG_WINDOWSAIO
{ .ival = "windowsaio",
.help = "Windows native asynchronous IO"
},
#endif
#ifdef CONFIG_RBD
{ .ival = "rbd",
.help = "Rados Block Device asynchronous IO"
},
#endif
{ .ival = "mmap",
.help = "Memory mapped IO"
},
#ifdef CONFIG_LINUX_SPLICE
{ .ival = "splice",
.help = "splice/vmsplice based IO",
},
{ .ival = "netsplice",
.help = "splice/vmsplice to/from the network",
},
#endif
#ifdef FIO_HAVE_SGIO
{ .ival = "sg",
.help = "SCSI generic v3 IO",
},
#endif
{ .ival = "null",
.help = "Testing engine (no data transfer)",
},
{ .ival = "net",
.help = "Network IO",
},
{ .ival = "cpuio",
.help = "CPU cycle burner engine",
},
#ifdef CONFIG_GUASI
{ .ival = "guasi",
.help = "GUASI IO engine",
},
#endif
#ifdef FIO_HAVE_BINJECT
{ .ival = "binject",
.help = "binject direct inject block engine",
},
#endif
#ifdef CONFIG_RDMA
{ .ival = "rdma",
.help = "RDMA IO engine",
},
#endif
#ifdef CONFIG_FUSION_AW
{ .ival = "fusion-aw-sync",
.help = "Fusion-io atomic write engine",
},
#endif
#ifdef CONFIG_LINUX_EXT4_MOVE_EXTENT
{ .ival = "e4defrag",
.help = "ext4 defrag engine",
},
#endif
#ifdef CONFIG_LINUX_FALLOCATE
{ .ival = "falloc",
.help = "fallocate() file based engine",
},
#endif
#ifdef CONFIG_GFAPI
{ .ival = "gfapi",
.help = "Glusterfs libgfapi(sync) based engine"
},
{ .ival = "gfapi_async",
.help = "Glusterfs libgfapi(async) based engine"
},
#endif
#ifdef CONFIG_LIBHDFS
{ .ival = "libhdfs",
.help = "Hadoop Distributed Filesystem (HDFS) engine"
},
#endif
#ifdef CONFIG_PMEMBLK
{ .ival = "pmemblk",
.help = "NVML libpmemblk based IO engine",
},
#endif
#ifdef CONFIG_LINUX_DEVDAX
{ .ival = "dev-dax",
.help = "DAX Device based IO engine",
},
#endif
{ .ival = "external",
.help = "Load external engine (append name)",
},
},
},
{
.name = "iodepth",
.lname = "IO Depth",
.type = FIO_OPT_INT,
.off1 = offsetof(struct thread_options, iodepth),
.help = "Number of IO buffers to keep in flight",
.minval = 1,
.interval = 1,
.def = "1",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_IO_BASIC,
},
{
.name = "iodepth_batch",
.lname = "IO Depth batch",
.alias = "iodepth_batch_submit",
.type = FIO_OPT_INT,
.off1 = offsetof(struct thread_options, iodepth_batch),
.help = "Number of IO buffers to submit in one go",
.parent = "iodepth",
.hide = 1,
.interval = 1,
.def = "1",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_IO_BASIC,
},
{
.name = "iodepth_batch_complete_min",
.lname = "Min IO depth batch complete",
.alias = "iodepth_batch_complete",
.type = FIO_OPT_INT,
.off1 = offsetof(struct thread_options, iodepth_batch_complete_min),
.help = "Min number of IO buffers to retrieve in one go",
.parent = "iodepth",
.hide = 1,
.minval = 0,
.interval = 1,
.def = "1",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_IO_BASIC,
},
{
.name = "iodepth_batch_complete_max",
.lname = "Max IO depth batch complete",
.type = FIO_OPT_INT,
.off1 = offsetof(struct thread_options, iodepth_batch_complete_max),
.help = "Max number of IO buffers to retrieve in one go",
.parent = "iodepth",
.hide = 1,
.minval = 0,
.interval = 1,
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_IO_BASIC,
},
{
.name = "iodepth_low",
.lname = "IO Depth batch low",
.type = FIO_OPT_INT,
.off1 = offsetof(struct thread_options, iodepth_low),
.help = "Low water mark for queuing depth",
.parent = "iodepth",
.hide = 1,
.interval = 1,
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_IO_BASIC,
},
{
.name = "io_submit_mode",
.lname = "IO submit mode",
.type = FIO_OPT_STR,
.off1 = offsetof(struct thread_options, io_submit_mode),
.help = "How IO submissions and completions are done",
.def = "inline",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_IO_BASIC,
.posval = {
{ .ival = "inline",
.oval = IO_MODE_INLINE,
.help = "Submit and complete IO inline",
},
{ .ival = "offload",
.oval = IO_MODE_OFFLOAD,
.help = "Offload submit and complete to threads",
},
},
},
{
.name = "size",
.lname = "Size",
.type = FIO_OPT_STR_VAL,
.cb = str_size_cb,
.off1 = offsetof(struct thread_options, size),
.help = "Total size of device or files",
.interval = 1024 * 1024,
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_INVALID,
},
{
.name = "io_size",
.alias = "io_limit",
.lname = "IO Size",
.type = FIO_OPT_STR_VAL,
.off1 = offsetof(struct thread_options, io_size),
.help = "Total size of I/O to be performed",
.interval = 1024 * 1024,
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_INVALID,
},
{
.name = "fill_device",
.lname = "Fill device",
.alias = "fill_fs",
.type = FIO_OPT_BOOL,
.off1 = offsetof(struct thread_options, fill_device),
.help = "Write until an ENOSPC error occurs",
.def = "0",
.category = FIO_OPT_C_FILE,
.group = FIO_OPT_G_INVALID,
},
{
.name = "filesize",
.lname = "File size",
.type = FIO_OPT_STR_VAL,
.off1 = offsetof(struct thread_options, file_size_low),
.off2 = offsetof(struct thread_options, file_size_high),
.minval = 1,
.help = "Size of individual files",
.interval = 1024 * 1024,
.category = FIO_OPT_C_FILE,
.group = FIO_OPT_G_INVALID,
},
{
.name = "file_append",
.lname = "File append",
.type = FIO_OPT_BOOL,
.off1 = offsetof(struct thread_options, file_append),
.help = "IO will start at the end of the file(s)",
.def = "0",
.category = FIO_OPT_C_FILE,
.group = FIO_OPT_G_INVALID,
},
{
.name = "offset",
.lname = "IO offset",
.alias = "fileoffset",
.type = FIO_OPT_STR_VAL,
.off1 = offsetof(struct thread_options, start_offset),
.help = "Start IO from this offset",
.def = "0",
.interval = 1024 * 1024,
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_INVALID,
},
{
.name = "offset_increment",
.lname = "IO offset increment",
.type = FIO_OPT_STR_VAL,
.off1 = offsetof(struct thread_options, offset_increment),
.help = "What is the increment from one offset to the next",
.parent = "offset",
.hide = 1,
.def = "0",
.interval = 1024 * 1024,
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_INVALID,
},
{
.name = "number_ios",
.lname = "Number of IOs to perform",
.type = FIO_OPT_STR_VAL,
.off1 = offsetof(struct thread_options, number_ios),
.help = "Force job completion after this number of IOs",
.def = "0",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_INVALID,
},
{
.name = "bs",
.lname = "Block size",
.alias = "blocksize",
.type = FIO_OPT_INT,
.off1 = offsetof(struct thread_options, bs[DDIR_READ]),
.off2 = offsetof(struct thread_options, bs[DDIR_WRITE]),
.off3 = offsetof(struct thread_options, bs[DDIR_TRIM]),
.minval = 1,
.help = "Block size unit",
.def = "4096",
.parent = "rw",
.hide = 1,
.interval = 512,
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_INVALID,
},
{
.name = "ba",
.lname = "Block size align",
.alias = "blockalign",
.type = FIO_OPT_INT,
.off1 = offsetof(struct thread_options, ba[DDIR_READ]),
.off2 = offsetof(struct thread_options, ba[DDIR_WRITE]),
.off3 = offsetof(struct thread_options, ba[DDIR_TRIM]),
.minval = 1,
.help = "IO block offset alignment",
.parent = "rw",
.hide = 1,
.interval = 512,
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_INVALID,
},
{
.name = "bsrange",
.lname = "Block size range",
.alias = "blocksize_range",
.type = FIO_OPT_RANGE,
.off1 = offsetof(struct thread_options, min_bs[DDIR_READ]),
.off2 = offsetof(struct thread_options, max_bs[DDIR_READ]),
.off3 = offsetof(struct thread_options, min_bs[DDIR_WRITE]),
.off4 = offsetof(struct thread_options, max_bs[DDIR_WRITE]),
.off5 = offsetof(struct thread_options, min_bs[DDIR_TRIM]),
.off6 = offsetof(struct thread_options, max_bs[DDIR_TRIM]),
.minval = 1,
.help = "Set block size range (in more detail than bs)",
.parent = "rw",
.hide = 1,
.interval = 4096,
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_INVALID,
},
{
.name = "bssplit",
.lname = "Block size split",
.type = FIO_OPT_STR,
.cb = str_bssplit_cb,
.off1 = offsetof(struct thread_options, bssplit),
.help = "Set a specific mix of block sizes",
.parent = "rw",
.hide = 1,
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_INVALID,
},
{
.name = "bs_unaligned",
.lname = "Block size unaligned",
.alias = "blocksize_unaligned",
.type = FIO_OPT_STR_SET,
.off1 = offsetof(struct thread_options, bs_unaligned),
.help = "Don't sector align IO buffer sizes",
.parent = "rw",
.hide = 1,
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_INVALID,
},
{
.name = "bs_is_seq_rand",
.lname = "Block size division is seq/random (not read/write)",
.type = FIO_OPT_BOOL,
.off1 = offsetof(struct thread_options, bs_is_seq_rand),
.help = "Consider any blocksize setting to be sequential,random",
.def = "0",
.parent = "blocksize",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_INVALID,
},
{
.name = "randrepeat",
.lname = "Random repeatable",
.type = FIO_OPT_BOOL,
.off1 = offsetof(struct thread_options, rand_repeatable),
.help = "Use repeatable random IO pattern",
.def = "1",
.parent = "rw",
.hide = 1,
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_RANDOM,
},
{
.name = "randseed",
.lname = "The random generator seed",
.type = FIO_OPT_STR_VAL,
.off1 = offsetof(struct thread_options, rand_seed),
.help = "Set the random generator seed value",
.def = "0x89",
.parent = "rw",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_RANDOM,
},
{
.name = "use_os_rand",
.lname = "Use OS random",
.type = FIO_OPT_DEPRECATED,
.off1 = offsetof(struct thread_options, dep_use_os_rand),
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_RANDOM,
},
{
.name = "norandommap",
.lname = "No randommap",
.type = FIO_OPT_STR_SET,
.off1 = offsetof(struct thread_options, norandommap),
.help = "Accept potential duplicate random blocks",
.parent = "rw",
.hide = 1,
.hide_on_set = 1,
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_RANDOM,
},
{
.name = "softrandommap",
.lname = "Soft randommap",
.type = FIO_OPT_BOOL,
.off1 = offsetof(struct thread_options, softrandommap),
.help = "Set norandommap if randommap allocation fails",
.parent = "norandommap",
.hide = 1,
.def = "0",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_RANDOM,
},
{
.name = "random_generator",
.lname = "Random Generator",
.type = FIO_OPT_STR,
.off1 = offsetof(struct thread_options, random_generator),
.help = "Type of random number generator to use",
.def = "tausworthe",
.posval = {
{ .ival = "tausworthe",
.oval = FIO_RAND_GEN_TAUSWORTHE,
.help = "Strong Tausworthe generator",
},
{ .ival = "lfsr",
.oval = FIO_RAND_GEN_LFSR,
.help = "Variable length LFSR",
},
{
.ival = "tausworthe64",
.oval = FIO_RAND_GEN_TAUSWORTHE64,
.help = "64-bit Tausworthe variant",
},
},
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_RANDOM,
},
{
.name = "random_distribution",
.lname = "Random Distribution",
.type = FIO_OPT_STR,
.off1 = offsetof(struct thread_options, random_distribution),
.cb = str_random_distribution_cb,
.help = "Random offset distribution generator",
.def = "random",
.posval = {
{ .ival = "random",
.oval = FIO_RAND_DIST_RANDOM,
.help = "Completely random",
},
{ .ival = "zipf",
.oval = FIO_RAND_DIST_ZIPF,
.help = "Zipf distribution",
},
{ .ival = "pareto",
.oval = FIO_RAND_DIST_PARETO,
.help = "Pareto distribution",
},
{ .ival = "normal",
.oval = FIO_RAND_DIST_GAUSS,
.help = "Normal (Gaussian) distribution",
},
{ .ival = "zoned",
.oval = FIO_RAND_DIST_ZONED,
.help = "Zoned random distribution",
},
},
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_RANDOM,
},
{
.name = "percentage_random",
.lname = "Percentage Random",
.type = FIO_OPT_INT,
.off1 = offsetof(struct thread_options, perc_rand[DDIR_READ]),
.off2 = offsetof(struct thread_options, perc_rand[DDIR_WRITE]),
.off3 = offsetof(struct thread_options, perc_rand[DDIR_TRIM]),
.maxval = 100,
.help = "Percentage of seq/random mix that should be random",
.def = "100,100,100",
.interval = 5,
.inverse = "percentage_sequential",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_RANDOM,
},
{
.name = "percentage_sequential",
.lname = "Percentage Sequential",
.type = FIO_OPT_DEPRECATED,
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_RANDOM,
},
{
.name = "allrandrepeat",
.lname = "All Random Repeat",
.type = FIO_OPT_BOOL,
.off1 = offsetof(struct thread_options, allrand_repeatable),
.help = "Use repeatable random numbers for everything",
.def = "0",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_RANDOM,
},
{
.name = "nrfiles",
.lname = "Number of files",
.alias = "nr_files",
.type = FIO_OPT_INT,
.off1 = offsetof(struct thread_options, nr_files),
.help = "Split job workload between this number of files",
.def = "1",
.interval = 1,
.category = FIO_OPT_C_FILE,
.group = FIO_OPT_G_INVALID,
},
{
.name = "openfiles",
.lname = "Number of open files",
.type = FIO_OPT_INT,
.off1 = offsetof(struct thread_options, open_files),
.help = "Number of files to keep open at the same time",
.category = FIO_OPT_C_FILE,
.group = FIO_OPT_G_INVALID,
},
{
.name = "file_service_type",
.lname = "File service type",
.type = FIO_OPT_STR,
.cb = str_fst_cb,
.off1 = offsetof(struct thread_options, file_service_type),
.help = "How to select which file to service next",
.def = "roundrobin",
.category = FIO_OPT_C_FILE,
.group = FIO_OPT_G_INVALID,
.posval = {
{ .ival = "random",
.oval = FIO_FSERVICE_RANDOM,
.help = "Choose a file at random (uniform)",
},
{ .ival = "zipf",
.oval = FIO_FSERVICE_ZIPF,
.help = "Zipf randomized",
},
{ .ival = "pareto",
.oval = FIO_FSERVICE_PARETO,
.help = "Pareto randomized",
},
{ .ival = "gauss",
.oval = FIO_FSERVICE_GAUSS,
.help = "Normal (Gaussian) distribution",
},
{ .ival = "roundrobin",
.oval = FIO_FSERVICE_RR,
.help = "Round robin select files",
},
{ .ival = "sequential",
.oval = FIO_FSERVICE_SEQ,
.help = "Finish one file before moving to the next",
},
},
.parent = "nrfiles",
.hide = 1,
},
#ifdef CONFIG_POSIX_FALLOCATE
{
.name = "fallocate",
.lname = "Fallocate",
.type = FIO_OPT_STR,
.off1 = offsetof(struct thread_options, fallocate_mode),
.help = "Whether pre-allocation is performed when laying out files",
.def = "posix",
.category = FIO_OPT_C_FILE,
.group = FIO_OPT_G_INVALID,
.posval = {
{ .ival = "none",
.oval = FIO_FALLOCATE_NONE,
.help = "Do not pre-allocate space",
},
{ .ival = "posix",
.oval = FIO_FALLOCATE_POSIX,
.help = "Use posix_fallocate()",
},
#ifdef CONFIG_LINUX_FALLOCATE
{ .ival = "keep",
.oval = FIO_FALLOCATE_KEEP_SIZE,
.help = "Use fallocate(..., FALLOC_FL_KEEP_SIZE, ...)",
},
#endif
/* Compatibility with former boolean values */
{ .ival = "0",
.oval = FIO_FALLOCATE_NONE,
.help = "Alias for 'none'",
},
{ .ival = "1",
.oval = FIO_FALLOCATE_POSIX,
.help = "Alias for 'posix'",
},
},
},
#else /* CONFIG_POSIX_FALLOCATE */
{
.name = "fallocate",
.lname = "Fallocate",
.type = FIO_OPT_UNSUPPORTED,
.help = "Your platform does not support fallocate",
},
#endif /* CONFIG_POSIX_FALLOCATE */
{
.name = "fadvise_hint",
.lname = "Fadvise hint",
.type = FIO_OPT_STR,
.off1 = offsetof(struct thread_options, fadvise_hint),
.posval = {
{ .ival = "0",
.oval = F_ADV_NONE,
.help = "Don't issue fadvise",
},
{ .ival = "1",
.oval = F_ADV_TYPE,
.help = "Advise using fio IO pattern",
},
{ .ival = "random",
.oval = F_ADV_RANDOM,
.help = "Advise using FADV_RANDOM",
},
{ .ival = "sequential",
.oval = F_ADV_SEQUENTIAL,
.help = "Advise using FADV_SEQUENTIAL",
},
},
.help = "Use fadvise() to advise the kernel on IO pattern",
.def = "1",
.category = FIO_OPT_C_FILE,
.group = FIO_OPT_G_INVALID,
},
#ifdef FIO_HAVE_STREAMID
{
.name = "fadvise_stream",
.lname = "Fadvise stream",
.type = FIO_OPT_INT,
.off1 = offsetof(struct thread_options, fadvise_stream),
.help = "Use fadvise() to set stream ID",
.category = FIO_OPT_C_FILE,
.group = FIO_OPT_G_INVALID,
},
#else
{
.name = "fadvise_stream",
.lname = "Fadvise stream",
.type = FIO_OPT_UNSUPPORTED,
.help = "Your platform does not support fadvise stream ID",
},
#endif
{
.name = "fsync",
.lname = "Fsync",
.type = FIO_OPT_INT,
.off1 = offsetof(struct thread_options, fsync_blocks),
.help = "Issue fsync for writes every given number of blocks",
.def = "0",
.interval = 1,
.category = FIO_OPT_C_FILE,
.group = FIO_OPT_G_INVALID,
},
{
.name = "fdatasync",
.lname = "Fdatasync",
.type = FIO_OPT_INT,
.off1 = offsetof(struct thread_options, fdatasync_blocks),
.help = "Issue fdatasync for writes every given number of blocks",
.def = "0",
.interval = 1,
.category = FIO_OPT_C_FILE,
.group = FIO_OPT_G_INVALID,
},
{
.name = "write_barrier",
.lname = "Write barrier",
.type = FIO_OPT_INT,
.off1 = offsetof(struct thread_options, barrier_blocks),
.help = "Make every Nth write a barrier write",
.def = "0",
.interval = 1,
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_INVALID,
},
#ifdef CONFIG_SYNC_FILE_RANGE
{
.name = "sync_file_range",
.lname = "Sync file range",
.posval = {
{ .ival = "wait_before",
.oval = SYNC_FILE_RANGE_WAIT_BEFORE,
.help = "SYNC_FILE_RANGE_WAIT_BEFORE",
.orval = 1,
},
{ .ival = "write",
.oval = SYNC_FILE_RANGE_WRITE,
.help = "SYNC_FILE_RANGE_WRITE",
.orval = 1,
},
{
.ival = "wait_after",
.oval = SYNC_FILE_RANGE_WAIT_AFTER,
.help = "SYNC_FILE_RANGE_WAIT_AFTER",
.orval = 1,
},
},
.type = FIO_OPT_STR_MULTI,
.cb = str_sfr_cb,
.off1 = offsetof(struct thread_options, sync_file_range),
.help = "Use sync_file_range()",
.category = FIO_OPT_C_FILE,
.group = FIO_OPT_G_INVALID,
},
#else
{
.name = "sync_file_range",
.lname = "Sync file range",
.type = FIO_OPT_UNSUPPORTED,
.help = "Your platform does not support sync_file_range",
},
#endif
{
.name = "direct",
.lname = "Direct I/O",
.type = FIO_OPT_BOOL,
.off1 = offsetof(struct thread_options, odirect),
.help = "Use O_DIRECT IO (negates buffered)",
.def = "0",
.inverse = "buffered",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_IO_TYPE,
},
{
.name = "atomic",
.lname = "Atomic I/O",
.type = FIO_OPT_BOOL,
.off1 = offsetof(struct thread_options, oatomic),
.help = "Use Atomic IO with O_DIRECT (implies O_DIRECT)",
.def = "0",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_IO_TYPE,
},
{
.name = "buffered",
.lname = "Buffered I/O",
.type = FIO_OPT_BOOL,
.off1 = offsetof(struct thread_options, odirect),
.neg = 1,
.help = "Use buffered IO (negates direct)",
.def = "1",
.inverse = "direct",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_IO_TYPE,
},
{
.name = "overwrite",
.lname = "Overwrite",
.type = FIO_OPT_BOOL,
.off1 = offsetof(struct thread_options, overwrite),
.help = "When writing, set whether to overwrite current data",
.def = "0",
.category = FIO_OPT_C_FILE,
.group = FIO_OPT_G_INVALID,
},
{
.name = "loops",
.lname = "Loops",
.type = FIO_OPT_INT,
.off1 = offsetof(struct thread_options, loops),
.help = "Number of times to run the job",
.def = "1",
.interval = 1,
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_RUNTIME,
},
{
.name = "numjobs",
.lname = "Number of jobs",
.type = FIO_OPT_INT,
.off1 = offsetof(struct thread_options, numjobs),
.help = "Duplicate this job this many times",
.def = "1",
.interval = 1,
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_RUNTIME,
},
{
.name = "startdelay",
.lname = "Start delay",
.type = FIO_OPT_STR_VAL_TIME,
.off1 = offsetof(struct thread_options, start_delay),
.off2 = offsetof(struct thread_options, start_delay_high),
.help = "Only start job when this period has passed",
.def = "0",
.is_seconds = 1,
.is_time = 1,
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_RUNTIME,
},
{
.name = "runtime",
.lname = "Runtime",
.alias = "timeout",
.type = FIO_OPT_STR_VAL_TIME,
.off1 = offsetof(struct thread_options, timeout),
.help = "Stop workload when this amount of time has passed",
.def = "0",
.is_seconds = 1,
.is_time = 1,
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_RUNTIME,
},
{
.name = "time_based",
.lname = "Time based",
.type = FIO_OPT_STR_SET,
.off1 = offsetof(struct thread_options, time_based),
.help = "Keep running until runtime/timeout is met",
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_RUNTIME,
},
{
.name = "verify_only",
.lname = "Verify only",
.type = FIO_OPT_STR_SET,
.off1 = offsetof(struct thread_options, verify_only),
.help = "Verifies previously written data is still valid",
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_RUNTIME,
},
{
.name = "ramp_time",
.lname = "Ramp time",
.type = FIO_OPT_STR_VAL_TIME,
.off1 = offsetof(struct thread_options, ramp_time),
.help = "Ramp up time before measuring performance",
.is_seconds = 1,
.is_time = 1,
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_RUNTIME,
},
{
.name = "clocksource",
.lname = "Clock source",
.type = FIO_OPT_STR,
.cb = fio_clock_source_cb,
.off1 = offsetof(struct thread_options, clocksource),
.help = "What type of timing source to use",
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_CLOCK,
.posval = {
#ifdef CONFIG_GETTIMEOFDAY
{ .ival = "gettimeofday",
.oval = CS_GTOD,
.help = "Use gettimeofday(2) for timing",
},
#endif
#ifdef CONFIG_CLOCK_GETTIME
{ .ival = "clock_gettime",
.oval = CS_CGETTIME,
.help = "Use clock_gettime(2) for timing",
},
#endif
#ifdef ARCH_HAVE_CPU_CLOCK
{ .ival = "cpu",
.oval = CS_CPUCLOCK,
.help = "Use CPU private clock",
},
#endif
},
},
{
.name = "mem",
.alias = "iomem",
.lname = "I/O Memory",
.type = FIO_OPT_STR,
.cb = str_mem_cb,
.off1 = offsetof(struct thread_options, mem_type),
.help = "Backing type for IO buffers",
.def = "malloc",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_INVALID,
.posval = {
{ .ival = "malloc",
.oval = MEM_MALLOC,
.help = "Use malloc(3) for IO buffers",
},
#ifndef CONFIG_NO_SHM
{ .ival = "shm",
.oval = MEM_SHM,
.help = "Use shared memory segments for IO buffers",
},
#ifdef FIO_HAVE_HUGETLB
{ .ival = "shmhuge",
.oval = MEM_SHMHUGE,
.help = "Like shm, but use huge pages",
},
#endif
#endif
{ .ival = "mmap",
.oval = MEM_MMAP,
.help = "Use mmap(2) (file or anon) for IO buffers",
},
{ .ival = "mmapshared",
.oval = MEM_MMAPSHARED,
.help = "Like mmap, but use the shared flag",
},
#ifdef FIO_HAVE_HUGETLB
{ .ival = "mmaphuge",
.oval = MEM_MMAPHUGE,
.help = "Like mmap, but use huge pages",
},
#endif
#ifdef CONFIG_CUDA
{ .ival = "cudamalloc",
.oval = MEM_CUDA_MALLOC,
.help = "Allocate GPU device memory for GPUDirect RDMA",
},
#endif
},
},
{
.name = "iomem_align",
.alias = "mem_align",
.lname = "I/O memory alignment",
.type = FIO_OPT_INT,
.off1 = offsetof(struct thread_options, mem_align),
.minval = 0,
.help = "IO memory buffer offset alignment",
.def = "0",
.parent = "iomem",
.hide = 1,
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_INVALID,
},
{
.name = "verify",
.lname = "Verify",
.type = FIO_OPT_STR,
.off1 = offsetof(struct thread_options, verify),
.help = "Verify data written",
.def = "0",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_VERIFY,
.posval = {
{ .ival = "0",
.oval = VERIFY_NONE,
.help = "Don't do IO verification",
},
{ .ival =