| /* |
| * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. |
| * |
| * This software is available to you under a choice of one of two |
| * licenses. You may choose to be licensed under the terms of the GNU |
| * General Public License (GPL) Version 2, available from the file |
| * COPYING in the main directory of this source tree, or the |
| * OpenIB.org BSD license below: |
| * |
| * Redistribution and use in source and binary forms, with or |
| * without modification, are permitted provided that the following |
| * conditions are met: |
| * |
| * - Redistributions of source code must retain the above |
| * copyright notice, this list of conditions and the following |
| * disclaimer. |
| * |
| * - Redistributions in binary form must reproduce the above |
| * copyright notice, this list of conditions and the following |
| * disclaimer in the documentation and/or other materials |
| * provided with the distribution. |
| * |
| * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| * SOFTWARE. |
| */ |
| |
| #include <linux/interrupt.h> |
| #include <linux/module.h> |
| #include <linux/mlx5/driver.h> |
| #include <linux/mlx5/eq.h> |
| #include <linux/mlx5/cmd.h> |
| #ifdef CONFIG_RFS_ACCEL |
| #include <linux/cpu_rmap.h> |
| #endif |
| #include "mlx5_core.h" |
| #include "lib/eq.h" |
| #include "fpga/core.h" |
| #include "eswitch.h" |
| #include "lib/clock.h" |
| #include "diag/fw_tracer.h" |
| |
| enum { |
| MLX5_EQE_SIZE = sizeof(struct mlx5_eqe), |
| MLX5_EQE_OWNER_INIT_VAL = 0x1, |
| }; |
| |
| enum { |
| MLX5_EQ_STATE_ARMED = 0x9, |
| MLX5_EQ_STATE_FIRED = 0xa, |
| MLX5_EQ_STATE_ALWAYS_ARMED = 0xb, |
| }; |
| |
| enum { |
| MLX5_NUM_SPARE_EQE = 0x80, |
| MLX5_NUM_ASYNC_EQE = 0x1000, |
| MLX5_NUM_CMD_EQE = 32, |
| MLX5_NUM_PF_DRAIN = 64, |
| }; |
| |
| enum { |
| MLX5_EQ_DOORBEL_OFFSET = 0x40, |
| }; |
| |
| struct mlx5_irq_info { |
| cpumask_var_t mask; |
| char name[MLX5_MAX_IRQ_NAME]; |
| void *context; /* dev_id provided to request_irq */ |
| }; |
| |
| struct mlx5_eq_table { |
| struct list_head comp_eqs_list; |
| struct mlx5_eq pages_eq; |
| struct mlx5_eq async_eq; |
| struct mlx5_eq cmd_eq; |
| |
| #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
| struct mlx5_eq_pagefault pfault_eq; |
| #endif |
| struct mutex lock; /* sync async eqs creations */ |
| int num_comp_vectors; |
| struct mlx5_irq_info *irq_info; |
| #ifdef CONFIG_RFS_ACCEL |
| struct cpu_rmap *rmap; |
| #endif |
| }; |
| |
| #define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \ |
| (1ull << MLX5_EVENT_TYPE_COMM_EST) | \ |
| (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \ |
| (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \ |
| (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \ |
| (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \ |
| (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ |
| (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \ |
| (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \ |
| (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \ |
| (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \ |
| (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT)) |
| |
| static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn) |
| { |
| u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0}; |
| u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {0}; |
| |
| MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ); |
| MLX5_SET(destroy_eq_in, in, eq_number, eqn); |
| return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); |
| } |
| |
| static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry) |
| { |
| return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE); |
| } |
| |
| static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq) |
| { |
| struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1)); |
| |
| return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe; |
| } |
| |
| static const char *eqe_type_str(u8 type) |
| { |
| switch (type) { |
| case MLX5_EVENT_TYPE_COMP: |
| return "MLX5_EVENT_TYPE_COMP"; |
| case MLX5_EVENT_TYPE_PATH_MIG: |
| return "MLX5_EVENT_TYPE_PATH_MIG"; |
| case MLX5_EVENT_TYPE_COMM_EST: |
| return "MLX5_EVENT_TYPE_COMM_EST"; |
| case MLX5_EVENT_TYPE_SQ_DRAINED: |
| return "MLX5_EVENT_TYPE_SQ_DRAINED"; |
| case MLX5_EVENT_TYPE_SRQ_LAST_WQE: |
| return "MLX5_EVENT_TYPE_SRQ_LAST_WQE"; |
| case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: |
| return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT"; |
| case MLX5_EVENT_TYPE_CQ_ERROR: |
| return "MLX5_EVENT_TYPE_CQ_ERROR"; |
| case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: |
| return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR"; |
| case MLX5_EVENT_TYPE_PATH_MIG_FAILED: |
| return "MLX5_EVENT_TYPE_PATH_MIG_FAILED"; |
| case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: |
| return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR"; |
| case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: |
| return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR"; |
| case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: |
| return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR"; |
| case MLX5_EVENT_TYPE_INTERNAL_ERROR: |
| return "MLX5_EVENT_TYPE_INTERNAL_ERROR"; |
| case MLX5_EVENT_TYPE_PORT_CHANGE: |
| return "MLX5_EVENT_TYPE_PORT_CHANGE"; |
| case MLX5_EVENT_TYPE_GPIO_EVENT: |
| return "MLX5_EVENT_TYPE_GPIO_EVENT"; |
| case MLX5_EVENT_TYPE_PORT_MODULE_EVENT: |
| return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT"; |
| case MLX5_EVENT_TYPE_TEMP_WARN_EVENT: |
| return "MLX5_EVENT_TYPE_TEMP_WARN_EVENT"; |
| case MLX5_EVENT_TYPE_REMOTE_CONFIG: |
| return "MLX5_EVENT_TYPE_REMOTE_CONFIG"; |
| case MLX5_EVENT_TYPE_DB_BF_CONGESTION: |
| return "MLX5_EVENT_TYPE_DB_BF_CONGESTION"; |
| case MLX5_EVENT_TYPE_STALL_EVENT: |
| return "MLX5_EVENT_TYPE_STALL_EVENT"; |
| case MLX5_EVENT_TYPE_CMD: |
| return "MLX5_EVENT_TYPE_CMD"; |
| case MLX5_EVENT_TYPE_PAGE_REQUEST: |
| return "MLX5_EVENT_TYPE_PAGE_REQUEST"; |
| case MLX5_EVENT_TYPE_PAGE_FAULT: |
| return "MLX5_EVENT_TYPE_PAGE_FAULT"; |
| case MLX5_EVENT_TYPE_PPS_EVENT: |
| return "MLX5_EVENT_TYPE_PPS_EVENT"; |
| case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE: |
| return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE"; |
| case MLX5_EVENT_TYPE_FPGA_ERROR: |
| return "MLX5_EVENT_TYPE_FPGA_ERROR"; |
| case MLX5_EVENT_TYPE_FPGA_QP_ERROR: |
| return "MLX5_EVENT_TYPE_FPGA_QP_ERROR"; |
| case MLX5_EVENT_TYPE_GENERAL_EVENT: |
| return "MLX5_EVENT_TYPE_GENERAL_EVENT"; |
| case MLX5_EVENT_TYPE_DEVICE_TRACER: |
| return "MLX5_EVENT_TYPE_DEVICE_TRACER"; |
| default: |
| return "Unrecognized event"; |
| } |
| } |
| |
| static enum mlx5_dev_event port_subtype_event(u8 subtype) |
| { |
| switch (subtype) { |
| case MLX5_PORT_CHANGE_SUBTYPE_DOWN: |
| return MLX5_DEV_EVENT_PORT_DOWN; |
| case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: |
| return MLX5_DEV_EVENT_PORT_UP; |
| case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: |
| return MLX5_DEV_EVENT_PORT_INITIALIZED; |
| case MLX5_PORT_CHANGE_SUBTYPE_LID: |
| return MLX5_DEV_EVENT_LID_CHANGE; |
| case MLX5_PORT_CHANGE_SUBTYPE_PKEY: |
| return MLX5_DEV_EVENT_PKEY_CHANGE; |
| case MLX5_PORT_CHANGE_SUBTYPE_GUID: |
| return MLX5_DEV_EVENT_GUID_CHANGE; |
| case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: |
| return MLX5_DEV_EVENT_CLIENT_REREG; |
| } |
| return -1; |
| } |
| |
| static void eq_update_ci(struct mlx5_eq *eq, int arm) |
| { |
| __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2); |
| u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24); |
| |
| __raw_writel((__force u32)cpu_to_be32(val), addr); |
| /* We still want ordering, just not swabbing, so add a barrier */ |
| mb(); |
| } |
| |
| #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
| static void eqe_pf_action(struct work_struct *work) |
| { |
| struct mlx5_pagefault *pfault = container_of(work, |
| struct mlx5_pagefault, |
| work); |
| struct mlx5_eq_pagefault *eq = pfault->eq; |
| |
| mlx5_core_page_fault(eq->core->dev, pfault); |
| mempool_free(pfault, eq->pool); |
| } |
| |
| static void eq_pf_process(struct mlx5_eq_pagefault *eq) |
| { |
| struct mlx5_core_dev *dev = eq->core->dev; |
| struct mlx5_eqe_page_fault *pf_eqe; |
| struct mlx5_pagefault *pfault; |
| struct mlx5_eqe *eqe; |
| int set_ci = 0; |
| |
| while ((eqe = next_eqe_sw(eq->core))) { |
| pfault = mempool_alloc(eq->pool, GFP_ATOMIC); |
| if (!pfault) { |
| schedule_work(&eq->work); |
| break; |
| } |
| |
| dma_rmb(); |
| pf_eqe = &eqe->data.page_fault; |
| pfault->event_subtype = eqe->sub_type; |
| pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed); |
| |
| mlx5_core_dbg(dev, |
| "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n", |
| eqe->sub_type, pfault->bytes_committed); |
| |
| switch (eqe->sub_type) { |
| case MLX5_PFAULT_SUBTYPE_RDMA: |
| /* RDMA based event */ |
| pfault->type = |
| be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24; |
| pfault->token = |
| be32_to_cpu(pf_eqe->rdma.pftype_token) & |
| MLX5_24BIT_MASK; |
| pfault->rdma.r_key = |
| be32_to_cpu(pf_eqe->rdma.r_key); |
| pfault->rdma.packet_size = |
| be16_to_cpu(pf_eqe->rdma.packet_length); |
| pfault->rdma.rdma_op_len = |
| be32_to_cpu(pf_eqe->rdma.rdma_op_len); |
| pfault->rdma.rdma_va = |
| be64_to_cpu(pf_eqe->rdma.rdma_va); |
| mlx5_core_dbg(dev, |
| "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n", |
| pfault->type, pfault->token, |
| pfault->rdma.r_key); |
| mlx5_core_dbg(dev, |
| "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n", |
| pfault->rdma.rdma_op_len, |
| pfault->rdma.rdma_va); |
| break; |
| |
| case MLX5_PFAULT_SUBTYPE_WQE: |
| /* WQE based event */ |
| pfault->type = |
| (be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7; |
| pfault->token = |
| be32_to_cpu(pf_eqe->wqe.token); |
| pfault->wqe.wq_num = |
| be32_to_cpu(pf_eqe->wqe.pftype_wq) & |
| MLX5_24BIT_MASK; |
| pfault->wqe.wqe_index = |
| be16_to_cpu(pf_eqe->wqe.wqe_index); |
| pfault->wqe.packet_size = |
| be16_to_cpu(pf_eqe->wqe.packet_length); |
| mlx5_core_dbg(dev, |
| "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n", |
| pfault->type, pfault->token, |
| pfault->wqe.wq_num, |
| pfault->wqe.wqe_index); |
| break; |
| |
| default: |
| mlx5_core_warn(dev, |
| "Unsupported page fault event sub-type: 0x%02hhx\n", |
| eqe->sub_type); |
| /* Unsupported page faults should still be |
| * resolved by the page fault handler |
| */ |
| } |
| |
| pfault->eq = eq; |
| INIT_WORK(&pfault->work, eqe_pf_action); |
| queue_work(eq->wq, &pfault->work); |
| |
| ++eq->core->cons_index; |
| ++set_ci; |
| |
| if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) { |
| eq_update_ci(eq->core, 0); |
| set_ci = 0; |
| } |
| } |
| |
| eq_update_ci(eq->core, 1); |
| } |
| |
| static irqreturn_t mlx5_eq_pf_int(int irq, void *eq_ptr) |
| { |
| struct mlx5_eq_pagefault *eq = eq_ptr; |
| unsigned long flags; |
| |
| if (spin_trylock_irqsave(&eq->lock, flags)) { |
| eq_pf_process(eq); |
| spin_unlock_irqrestore(&eq->lock, flags); |
| } else { |
| schedule_work(&eq->work); |
| } |
| |
| return IRQ_HANDLED; |
| } |
| |
| /* mempool_refill() was proposed but unfortunately wasn't accepted |
| * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html |
| * Chip workaround. |
| */ |
| static void mempool_refill(mempool_t *pool) |
| { |
| while (pool->curr_nr < pool->min_nr) |
| mempool_free(mempool_alloc(pool, GFP_KERNEL), pool); |
| } |
| |
| static void eq_pf_action(struct work_struct *work) |
| { |
| struct mlx5_eq_pagefault *eq = |
| container_of(work, struct mlx5_eq_pagefault, work); |
| |
| mempool_refill(eq->pool); |
| |
| spin_lock_irq(&eq->lock); |
| eq_pf_process(eq); |
| spin_unlock_irq(&eq->lock); |
| } |
| |
| static int |
| create_pf_eq(struct mlx5_core_dev *dev, struct mlx5_eq_pagefault *eq) |
| { |
| struct mlx5_eq_param param = {}; |
| int err; |
| |
| spin_lock_init(&eq->lock); |
| INIT_WORK(&eq->work, eq_pf_action); |
| |
| eq->pool = mempool_create_kmalloc_pool(MLX5_NUM_PF_DRAIN, |
| sizeof(struct mlx5_pagefault)); |
| if (!eq->pool) |
| return -ENOMEM; |
| |
| eq->wq = alloc_workqueue("mlx5_page_fault", |
| WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, |
| MLX5_NUM_CMD_EQE); |
| if (!eq->wq) { |
| err = -ENOMEM; |
| goto err_mempool; |
| } |
| |
| param = (struct mlx5_eq_param) { |
| .index = MLX5_EQ_PFAULT_IDX, |
| .mask = 1 << MLX5_EVENT_TYPE_PAGE_FAULT, |
| .nent = MLX5_NUM_ASYNC_EQE, |
| .context = eq, |
| .handler = mlx5_eq_pf_int |
| }; |
| |
| eq->core = mlx5_eq_create_generic(dev, "mlx5_page_fault_eq", ¶m); |
| if (IS_ERR(eq->core)) { |
| err = PTR_ERR(eq->core); |
| goto err_wq; |
| } |
| |
| return 0; |
| err_wq: |
| destroy_workqueue(eq->wq); |
| err_mempool: |
| mempool_destroy(eq->pool); |
| return err; |
| } |
| |
| static int destroy_pf_eq(struct mlx5_core_dev *dev, struct mlx5_eq_pagefault *eq) |
| { |
| int err; |
| |
| err = mlx5_eq_destroy_generic(dev, eq->core); |
| cancel_work_sync(&eq->work); |
| destroy_workqueue(eq->wq); |
| mempool_destroy(eq->pool); |
| |
| return err; |
| } |
| |
| int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token, |
| u32 wq_num, u8 type, int error) |
| { |
| u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = {0}; |
| u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {0}; |
| |
| MLX5_SET(page_fault_resume_in, in, opcode, |
| MLX5_CMD_OP_PAGE_FAULT_RESUME); |
| MLX5_SET(page_fault_resume_in, in, error, !!error); |
| MLX5_SET(page_fault_resume_in, in, page_fault_type, type); |
| MLX5_SET(page_fault_resume_in, in, wq_number, wq_num); |
| MLX5_SET(page_fault_resume_in, in, token, token); |
| |
| return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); |
| } |
| EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume); |
| #endif |
| |
| static void general_event_handler(struct mlx5_core_dev *dev, |
| struct mlx5_eqe *eqe) |
| { |
| switch (eqe->sub_type) { |
| case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT: |
| if (dev->event) |
| dev->event(dev, MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT, 0); |
| break; |
| default: |
| mlx5_core_dbg(dev, "General event with unrecognized subtype: sub_type %d\n", |
| eqe->sub_type); |
| } |
| } |
| |
| static void mlx5_temp_warning_event(struct mlx5_core_dev *dev, |
| struct mlx5_eqe *eqe) |
| { |
| u64 value_lsb; |
| u64 value_msb; |
| |
| value_lsb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb); |
| value_msb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb); |
| |
| mlx5_core_warn(dev, |
| "High temperature on sensors with bit set %llx %llx", |
| value_msb, value_lsb); |
| } |
| |
| /* caller must eventually call mlx5_cq_put on the returned cq */ |
| static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn) |
| { |
| struct mlx5_cq_table *table = &eq->cq_table; |
| struct mlx5_core_cq *cq = NULL; |
| |
| spin_lock(&table->lock); |
| cq = radix_tree_lookup(&table->tree, cqn); |
| if (likely(cq)) |
| mlx5_cq_hold(cq); |
| spin_unlock(&table->lock); |
| |
| return cq; |
| } |
| |
| static void mlx5_eq_cq_event(struct mlx5_eq *eq, u32 cqn, int event_type) |
| { |
| struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn); |
| |
| if (unlikely(!cq)) { |
| mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn); |
| return; |
| } |
| |
| cq->event(cq, event_type); |
| |
| mlx5_cq_put(cq); |
| } |
| |
| static irqreturn_t mlx5_eq_comp_int(int irq, void *eq_ptr) |
| { |
| struct mlx5_eq_comp *eq_comp = eq_ptr; |
| struct mlx5_eq *eq = eq_ptr; |
| struct mlx5_eqe *eqe; |
| int set_ci = 0; |
| u32 cqn = -1; |
| |
| while ((eqe = next_eqe_sw(eq))) { |
| struct mlx5_core_cq *cq; |
| /* Make sure we read EQ entry contents after we've |
| * checked the ownership bit. |
| */ |
| dma_rmb(); |
| /* Assume (eqe->type) is always MLX5_EVENT_TYPE_COMP */ |
| cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff; |
| |
| cq = mlx5_eq_cq_get(eq, cqn); |
| if (likely(cq)) { |
| ++cq->arm_sn; |
| cq->comp(cq); |
| mlx5_cq_put(cq); |
| } else { |
| mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn); |
| } |
| |
| ++eq->cons_index; |
| ++set_ci; |
| |
| /* The HCA will think the queue has overflowed if we |
| * don't tell it we've been processing events. We |
| * create our EQs with MLX5_NUM_SPARE_EQE extra |
| * entries, so we must update our consumer index at |
| * least that often. |
| */ |
| if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) { |
| eq_update_ci(eq, 0); |
| set_ci = 0; |
| } |
| } |
| |
| eq_update_ci(eq, 1); |
| |
| if (cqn != -1) |
| tasklet_schedule(&eq_comp->tasklet_ctx.task); |
| |
| return IRQ_HANDLED; |
| } |
| |
| /* Some architectures don't latch interrupts when they are disabled, so using |
| * mlx5_eq_poll_irq_disabled could end up losing interrupts while trying to |
| * avoid losing them. It is not recommended to use it, unless this is the last |
| * resort. |
| */ |
| u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq) |
| { |
| u32 count_eqe; |
| |
| disable_irq(eq->core.irqn); |
| count_eqe = eq->core.cons_index; |
| mlx5_eq_comp_int(eq->core.irqn, eq); |
| count_eqe = eq->core.cons_index - count_eqe; |
| enable_irq(eq->core.irqn); |
| |
| return count_eqe; |
| } |
| |
| static irqreturn_t mlx5_eq_async_int(int irq, void *eq_ptr) |
| { |
| struct mlx5_eq *eq = eq_ptr; |
| struct mlx5_core_dev *dev = eq->dev; |
| struct mlx5_eqe *eqe; |
| int set_ci = 0; |
| u32 cqn = -1; |
| u32 rsn; |
| u8 port; |
| |
| while ((eqe = next_eqe_sw(eq))) { |
| /* |
| * Make sure we read EQ entry contents after we've |
| * checked the ownership bit. |
| */ |
| dma_rmb(); |
| |
| mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n", |
| eq->eqn, eqe_type_str(eqe->type)); |
| switch (eqe->type) { |
| case MLX5_EVENT_TYPE_DCT_DRAINED: |
| rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff; |
| rsn |= (MLX5_RES_DCT << MLX5_USER_INDEX_LEN); |
| mlx5_rsc_event(dev, rsn, eqe->type); |
| break; |
| case MLX5_EVENT_TYPE_PATH_MIG: |
| case MLX5_EVENT_TYPE_COMM_EST: |
| case MLX5_EVENT_TYPE_SQ_DRAINED: |
| case MLX5_EVENT_TYPE_SRQ_LAST_WQE: |
| case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: |
| case MLX5_EVENT_TYPE_PATH_MIG_FAILED: |
| case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: |
| case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: |
| rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; |
| rsn |= (eqe->data.qp_srq.type << MLX5_USER_INDEX_LEN); |
| mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n", |
| eqe_type_str(eqe->type), eqe->type, rsn); |
| mlx5_rsc_event(dev, rsn, eqe->type); |
| break; |
| |
| case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: |
| case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: |
| rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; |
| mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n", |
| eqe_type_str(eqe->type), eqe->type, rsn); |
| mlx5_srq_event(dev, rsn, eqe->type); |
| break; |
| |
| case MLX5_EVENT_TYPE_CMD: |
| mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false); |
| break; |
| |
| case MLX5_EVENT_TYPE_PORT_CHANGE: |
| port = (eqe->data.port.port >> 4) & 0xf; |
| switch (eqe->sub_type) { |
| case MLX5_PORT_CHANGE_SUBTYPE_DOWN: |
| case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: |
| case MLX5_PORT_CHANGE_SUBTYPE_LID: |
| case MLX5_PORT_CHANGE_SUBTYPE_PKEY: |
| case MLX5_PORT_CHANGE_SUBTYPE_GUID: |
| case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: |
| case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: |
| if (dev->event) |
| dev->event(dev, port_subtype_event(eqe->sub_type), |
| (unsigned long)port); |
| break; |
| default: |
| mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n", |
| port, eqe->sub_type); |
| } |
| break; |
| case MLX5_EVENT_TYPE_CQ_ERROR: |
| cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff; |
| mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrome 0x%x\n", |
| cqn, eqe->data.cq_err.syndrome); |
| mlx5_eq_cq_event(eq, cqn, eqe->type); |
| break; |
| |
| case MLX5_EVENT_TYPE_PAGE_REQUEST: |
| { |
| u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); |
| s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages); |
| |
| mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n", |
| func_id, npages); |
| mlx5_core_req_pages_handler(dev, func_id, npages); |
| } |
| break; |
| |
| case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE: |
| mlx5_eswitch_vport_event(dev->priv.eswitch, eqe); |
| break; |
| |
| case MLX5_EVENT_TYPE_PORT_MODULE_EVENT: |
| mlx5_port_module_event(dev, eqe); |
| break; |
| |
| case MLX5_EVENT_TYPE_PPS_EVENT: |
| mlx5_pps_event(dev, eqe); |
| break; |
| |
| case MLX5_EVENT_TYPE_FPGA_ERROR: |
| case MLX5_EVENT_TYPE_FPGA_QP_ERROR: |
| mlx5_fpga_event(dev, eqe->type, &eqe->data.raw); |
| break; |
| |
| case MLX5_EVENT_TYPE_TEMP_WARN_EVENT: |
| mlx5_temp_warning_event(dev, eqe); |
| break; |
| |
| case MLX5_EVENT_TYPE_GENERAL_EVENT: |
| general_event_handler(dev, eqe); |
| break; |
| |
| case MLX5_EVENT_TYPE_DEVICE_TRACER: |
| mlx5_fw_tracer_event(dev, eqe); |
| break; |
| |
| default: |
| mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", |
| eqe->type, eq->eqn); |
| break; |
| } |
| |
| ++eq->cons_index; |
| ++set_ci; |
| |
| /* The HCA will think the queue has overflowed if we |
| * don't tell it we've been processing events. We |
| * create our EQs with MLX5_NUM_SPARE_EQE extra |
| * entries, so we must update our consumer index at |
| * least that often. |
| */ |
| if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) { |
| eq_update_ci(eq, 0); |
| set_ci = 0; |
| } |
| } |
| |
| eq_update_ci(eq, 1); |
| |
| return IRQ_HANDLED; |
| } |
| |
| static void init_eq_buf(struct mlx5_eq *eq) |
| { |
| struct mlx5_eqe *eqe; |
| int i; |
| |
| for (i = 0; i < eq->nent; i++) { |
| eqe = get_eqe(eq, i); |
| eqe->owner = MLX5_EQE_OWNER_INIT_VAL; |
| } |
| } |
| |
| static int |
| create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, const char *name, |
| struct mlx5_eq_param *param) |
| { |
| struct mlx5_eq_table *eq_table = dev->priv.eq_table; |
| struct mlx5_cq_table *cq_table = &eq->cq_table; |
| u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0}; |
| struct mlx5_priv *priv = &dev->priv; |
| u8 vecidx = param->index; |
| __be64 *pas; |
| void *eqc; |
| int inlen; |
| u32 *in; |
| int err; |
| |
| if (eq_table->irq_info[vecidx].context) |
| return -EEXIST; |
| |
| /* Init CQ table */ |
| memset(cq_table, 0, sizeof(*cq_table)); |
| spin_lock_init(&cq_table->lock); |
| INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC); |
| |
| eq->nent = roundup_pow_of_two(param->nent + MLX5_NUM_SPARE_EQE); |
| eq->cons_index = 0; |
| err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf); |
| if (err) |
| return err; |
| |
| init_eq_buf(eq); |
| |
| inlen = MLX5_ST_SZ_BYTES(create_eq_in) + |
| MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages; |
| |
| in = kvzalloc(inlen, GFP_KERNEL); |
| if (!in) { |
| err = -ENOMEM; |
| goto err_buf; |
| } |
| |
| pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas); |
| mlx5_fill_page_array(&eq->buf, pas); |
| |
| MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ); |
| MLX5_SET64(create_eq_in, in, event_bitmask, param->mask); |
| |
| eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry); |
| MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent)); |
| MLX5_SET(eqc, eqc, uar_page, priv->uar->index); |
| MLX5_SET(eqc, eqc, intr, vecidx); |
| MLX5_SET(eqc, eqc, log_page_size, |
| eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); |
| |
| err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); |
| if (err) |
| goto err_in; |
| |
| snprintf(eq_table->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s", |
| name, pci_name(dev->pdev)); |
| eq_table->irq_info[vecidx].context = param->context; |
| |
| eq->vecidx = vecidx; |
| eq->eqn = MLX5_GET(create_eq_out, out, eq_number); |
| eq->irqn = pci_irq_vector(dev->pdev, vecidx); |
| eq->dev = dev; |
| eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET; |
| err = request_irq(eq->irqn, param->handler, 0, |
| eq_table->irq_info[vecidx].name, param->context); |
| if (err) |
| goto err_eq; |
| |
| err = mlx5_debug_eq_add(dev, eq); |
| if (err) |
| goto err_irq; |
| |
| /* EQs are created in ARMED state |
| */ |
| eq_update_ci(eq, 1); |
| |
| kvfree(in); |
| return 0; |
| |
| err_irq: |
| free_irq(eq->irqn, eq); |
| |
| err_eq: |
| mlx5_cmd_destroy_eq(dev, eq->eqn); |
| |
| err_in: |
| kvfree(in); |
| |
| err_buf: |
| mlx5_buf_free(dev, &eq->buf); |
| return err; |
| } |
| |
| static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) |
| { |
| struct mlx5_eq_table *eq_table = dev->priv.eq_table; |
| struct mlx5_irq_info *irq_info; |
| int err; |
| |
| irq_info = &eq_table->irq_info[eq->vecidx]; |
| |
| mlx5_debug_eq_remove(dev, eq); |
| |
| free_irq(eq->irqn, irq_info->context); |
| irq_info->context = NULL; |
| |
| err = mlx5_cmd_destroy_eq(dev, eq->eqn); |
| if (err) |
| mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", |
| eq->eqn); |
| synchronize_irq(eq->irqn); |
| |
| mlx5_buf_free(dev, &eq->buf); |
| |
| return err; |
| } |
| |
| int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq) |
| { |
| struct mlx5_cq_table *table = &eq->cq_table; |
| int err; |
| |
| spin_lock_irq(&table->lock); |
| err = radix_tree_insert(&table->tree, cq->cqn, cq); |
| spin_unlock_irq(&table->lock); |
| |
| return err; |
| } |
| |
| int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq) |
| { |
| struct mlx5_cq_table *table = &eq->cq_table; |
| struct mlx5_core_cq *tmp; |
| |
| spin_lock_irq(&table->lock); |
| tmp = radix_tree_delete(&table->tree, cq->cqn); |
| spin_unlock_irq(&table->lock); |
| |
| if (!tmp) { |
| mlx5_core_warn(eq->dev, "cq 0x%x not found in eq 0x%x tree\n", eq->eqn, cq->cqn); |
| return -ENOENT; |
| } |
| |
| if (tmp != cq) { |
| mlx5_core_warn(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n", eq->eqn, cq->cqn); |
| return -EINVAL; |
| } |
| |
| return 0; |
| } |
| |
| int mlx5_eq_table_init(struct mlx5_core_dev *dev) |
| { |
| struct mlx5_eq_table *eq_table; |
| int err; |
| |
| eq_table = kvzalloc(sizeof(*eq_table), GFP_KERNEL); |
| if (!eq_table) |
| return -ENOMEM; |
| |
| dev->priv.eq_table = eq_table; |
| |
| err = mlx5_eq_debugfs_init(dev); |
| if (err) |
| goto kvfree_eq_table; |
| |
| mutex_init(&eq_table->lock); |
| |
| return 0; |
| |
| kvfree_eq_table: |
| kvfree(eq_table); |
| dev->priv.eq_table = NULL; |
| return err; |
| } |
| |
| void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev) |
| { |
| mlx5_eq_debugfs_cleanup(dev); |
| kvfree(dev->priv.eq_table); |
| } |
| |
| /* Async EQs */ |
| |
| static int create_async_eq(struct mlx5_core_dev *dev, const char *name, |
| struct mlx5_eq *eq, struct mlx5_eq_param *param) |
| { |
| struct mlx5_eq_table *eq_table = dev->priv.eq_table; |
| int err; |
| |
| mutex_lock(&eq_table->lock); |
| if (param->index >= MLX5_EQ_MAX_ASYNC_EQS) { |
| err = -ENOSPC; |
| goto unlock; |
| } |
| |
| err = create_map_eq(dev, eq, name, param); |
| unlock: |
| mutex_unlock(&eq_table->lock); |
| return err; |
| } |
| |
| static int destroy_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) |
| { |
| struct mlx5_eq_table *eq_table = dev->priv.eq_table; |
| int err; |
| |
| mutex_lock(&eq_table->lock); |
| err = destroy_unmap_eq(dev, eq); |
| mutex_unlock(&eq_table->lock); |
| return err; |
| } |
| |
| static u64 gather_async_events_mask(struct mlx5_core_dev *dev) |
| { |
| u64 async_event_mask = MLX5_ASYNC_EVENT_MASK; |
| |
| if (MLX5_VPORT_MANAGER(dev)) |
| async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE); |
| |
| if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH && |
| MLX5_CAP_GEN(dev, general_notification_event)) |
| async_event_mask |= (1ull << MLX5_EVENT_TYPE_GENERAL_EVENT); |
| |
| if (MLX5_CAP_GEN(dev, port_module_event)) |
| async_event_mask |= (1ull << MLX5_EVENT_TYPE_PORT_MODULE_EVENT); |
| else |
| mlx5_core_dbg(dev, "port_module_event is not set\n"); |
| |
| if (MLX5_PPS_CAP(dev)) |
| async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT); |
| |
| if (MLX5_CAP_GEN(dev, fpga)) |
| async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR) | |
| (1ull << MLX5_EVENT_TYPE_FPGA_QP_ERROR); |
| if (MLX5_CAP_GEN_MAX(dev, dct)) |
| async_event_mask |= (1ull << MLX5_EVENT_TYPE_DCT_DRAINED); |
| |
| if (MLX5_CAP_GEN(dev, temp_warn_event)) |
| async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT); |
| |
| if (MLX5_CAP_MCAM_REG(dev, tracer_registers)) |
| async_event_mask |= (1ull << MLX5_EVENT_TYPE_DEVICE_TRACER); |
| |
| return async_event_mask; |
| } |
| |
| static int create_async_eqs(struct mlx5_core_dev *dev) |
| { |
| struct mlx5_eq_table *table = dev->priv.eq_table; |
| struct mlx5_eq_param param = {}; |
| int err; |
| |
| param = (struct mlx5_eq_param) { |
| .index = MLX5_EQ_CMD_IDX, |
| .mask = 1ull << MLX5_EVENT_TYPE_CMD, |
| .nent = MLX5_NUM_CMD_EQE, |
| .context = &table->cmd_eq, |
| .handler = mlx5_eq_async_int, |
| }; |
| err = create_async_eq(dev, "mlx5_cmd_eq", &table->cmd_eq, ¶m); |
| if (err) { |
| mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err); |
| return err; |
| } |
| |
| mlx5_cmd_use_events(dev); |
| |
| param = (struct mlx5_eq_param) { |
| .index = MLX5_EQ_ASYNC_IDX, |
| .mask = gather_async_events_mask(dev), |
| .nent = MLX5_NUM_ASYNC_EQE, |
| .context = &table->async_eq, |
| .handler = mlx5_eq_async_int, |
| }; |
| err = create_async_eq(dev, "mlx5_async_eq", &table->async_eq, ¶m); |
| if (err) { |
| mlx5_core_warn(dev, "failed to create async EQ %d\n", err); |
| goto err1; |
| } |
| |
| param = (struct mlx5_eq_param) { |
| .index = MLX5_EQ_PAGEREQ_IDX, |
| .mask = 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, |
| .nent = /* TODO: sriov max_vf + */ 1, |
| .context = &table->pages_eq, |
| .handler = mlx5_eq_async_int, |
| }; |
| err = create_async_eq(dev, "mlx5_pages_eq", &table->pages_eq, ¶m); |
| if (err) { |
| mlx5_core_warn(dev, "failed to create pages EQ %d\n", err); |
| goto err2; |
| } |
| |
| #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
| if (MLX5_CAP_GEN(dev, pg)) { |
| err = create_pf_eq(dev, &table->pfault_eq); |
| if (err) { |
| mlx5_core_warn(dev, "failed to create page fault EQ %d\n", |
| err); |
| goto err3; |
| } |
| } |
| |
| return err; |
| err3: |
| destroy_async_eq(dev, &table->pages_eq); |
| #else |
| return err; |
| #endif |
| |
| err2: |
| destroy_async_eq(dev, &table->async_eq); |
| |
| err1: |
| mlx5_cmd_use_polling(dev); |
| destroy_async_eq(dev, &table->cmd_eq); |
| return err; |
| } |
| |
| static void destroy_async_eqs(struct mlx5_core_dev *dev) |
| { |
| struct mlx5_eq_table *table = dev->priv.eq_table; |
| int err; |
| |
| #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
| if (MLX5_CAP_GEN(dev, pg)) { |
| err = destroy_pf_eq(dev, &table->pfault_eq); |
| if (err) |
| mlx5_core_err(dev, "failed to destroy page fault eq, err(%d)\n", |
| err); |
| } |
| #endif |
| |
| err = destroy_async_eq(dev, &table->pages_eq); |
| if (err) |
| mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n", |
| err); |
| |
| err = destroy_async_eq(dev, &table->async_eq); |
| if (err) |
| mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n", |
| err); |
| mlx5_cmd_use_polling(dev); |
| |
| err = destroy_async_eq(dev, &table->cmd_eq); |
| if (err) |
| mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n", |
| err); |
| } |
| |
| struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev) |
| { |
| return &dev->priv.eq_table->async_eq; |
| } |
| |
| void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev) |
| { |
| synchronize_irq(dev->priv.eq_table->async_eq.irqn); |
| } |
| |
| void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev) |
| { |
| synchronize_irq(dev->priv.eq_table->cmd_eq.irqn); |
| } |
| |
| /* Generic EQ API for mlx5_core consumers |
| * Needed For RDMA ODP EQ for now |
| */ |
| struct mlx5_eq * |
| mlx5_eq_create_generic(struct mlx5_core_dev *dev, const char *name, |
| struct mlx5_eq_param *param) |
| { |
| struct mlx5_eq *eq = kvzalloc(sizeof(*eq), GFP_KERNEL); |
| int err; |
| |
| if (!eq) |
| return ERR_PTR(-ENOMEM); |
| |
| err = create_async_eq(dev, name, eq, param); |
| if (err) { |
| kvfree(eq); |
| eq = ERR_PTR(err); |
| } |
| |
| return eq; |
| } |
| EXPORT_SYMBOL(mlx5_eq_create_generic); |
| |
| int mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq) |
| { |
| int err; |
| |
| if (IS_ERR(eq)) |
| return -EINVAL; |
| |
| err = destroy_async_eq(dev, eq); |
| if (err) |
| goto out; |
| |
| kvfree(eq); |
| out: |
| return err; |
| } |
| EXPORT_SYMBOL(mlx5_eq_destroy_generic); |
| |
| struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc) |
| { |
| u32 ci = eq->cons_index + cc; |
| struct mlx5_eqe *eqe; |
| |
| eqe = get_eqe(eq, ci & (eq->nent - 1)); |
| eqe = ((eqe->owner & 1) ^ !!(ci & eq->nent)) ? NULL : eqe; |
| /* Make sure we read EQ entry contents after we've |
| * checked the ownership bit. |
| */ |
| if (eqe) |
| dma_rmb(); |
| |
| return eqe; |
| } |
| EXPORT_SYMBOL(mlx5_eq_get_eqe); |
| |
| void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm) |
| { |
| __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2); |
| u32 val; |
| |
| eq->cons_index += cc; |
| val = (eq->cons_index & 0xffffff) | (eq->eqn << 24); |
| |
| __raw_writel((__force u32)cpu_to_be32(val), addr); |
| /* We still want ordering, just not swabbing, so add a barrier */ |
| mb(); |
| } |
| EXPORT_SYMBOL(mlx5_eq_update_ci); |
| |
| /* Completion EQs */ |
| |
| static int set_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i) |
| { |
| struct mlx5_priv *priv = &mdev->priv; |
| int vecidx = MLX5_EQ_VEC_COMP_BASE + i; |
| int irq = pci_irq_vector(mdev->pdev, vecidx); |
| struct mlx5_irq_info *irq_info = &priv->eq_table->irq_info[vecidx]; |
| |
| if (!zalloc_cpumask_var(&irq_info->mask, GFP_KERNEL)) { |
| mlx5_core_warn(mdev, "zalloc_cpumask_var failed"); |
| return -ENOMEM; |
| } |
| |
| cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node), |
| irq_info->mask); |
| |
| if (IS_ENABLED(CONFIG_SMP) && |
| irq_set_affinity_hint(irq, irq_info->mask)) |
| mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq); |
| |
| return 0; |
| } |
| |
| static void clear_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i) |
| { |
| int vecidx = MLX5_EQ_VEC_COMP_BASE + i; |
| struct mlx5_priv *priv = &mdev->priv; |
| int irq = pci_irq_vector(mdev->pdev, vecidx); |
| struct mlx5_irq_info *irq_info = &priv->eq_table->irq_info[vecidx]; |
| |
| irq_set_affinity_hint(irq, NULL); |
| free_cpumask_var(irq_info->mask); |
| } |
| |
| static int set_comp_irq_affinity_hints(struct mlx5_core_dev *mdev) |
| { |
| int err; |
| int i; |
| |
| for (i = 0; i < mdev->priv.eq_table->num_comp_vectors; i++) { |
| err = set_comp_irq_affinity_hint(mdev, i); |
| if (err) |
| goto err_out; |
| } |
| |
| return 0; |
| |
| err_out: |
| for (i--; i >= 0; i--) |
| clear_comp_irq_affinity_hint(mdev, i); |
| |
| return err; |
| } |
| |
| static void clear_comp_irqs_affinity_hints(struct mlx5_core_dev *mdev) |
| { |
| int i; |
| |
| for (i = 0; i < mdev->priv.eq_table->num_comp_vectors; i++) |
| clear_comp_irq_affinity_hint(mdev, i); |
| } |
| |
| static void destroy_comp_eqs(struct mlx5_core_dev *dev) |
| { |
| struct mlx5_eq_table *table = dev->priv.eq_table; |
| struct mlx5_eq_comp *eq, *n; |
| |
| clear_comp_irqs_affinity_hints(dev); |
| |
| #ifdef CONFIG_RFS_ACCEL |
| if (table->rmap) { |
| free_irq_cpu_rmap(table->rmap); |
| table->rmap = NULL; |
| } |
| #endif |
| list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { |
| list_del(&eq->list); |
| if (destroy_unmap_eq(dev, &eq->core)) |
| mlx5_core_warn(dev, "failed to destroy comp EQ 0x%x\n", |
| eq->core.eqn); |
| tasklet_disable(&eq->tasklet_ctx.task); |
| kfree(eq); |
| } |
| } |
| |
| static int create_comp_eqs(struct mlx5_core_dev *dev) |
| { |
| struct mlx5_eq_table *table = dev->priv.eq_table; |
| char name[MLX5_MAX_IRQ_NAME]; |
| struct mlx5_eq_comp *eq; |
| int ncomp_vec; |
| int nent; |
| int err; |
| int i; |
| |
| INIT_LIST_HEAD(&table->comp_eqs_list); |
| ncomp_vec = table->num_comp_vectors; |
| nent = MLX5_COMP_EQ_SIZE; |
| #ifdef CONFIG_RFS_ACCEL |
| table->rmap = alloc_irq_cpu_rmap(ncomp_vec); |
| if (!table->rmap) |
| return -ENOMEM; |
| #endif |
| for (i = 0; i < ncomp_vec; i++) { |
| int vecidx = i + MLX5_EQ_VEC_COMP_BASE; |
| struct mlx5_eq_param param = {}; |
| |
| eq = kzalloc(sizeof(*eq), GFP_KERNEL); |
| if (!eq) { |
| err = -ENOMEM; |
| goto clean; |
| } |
| |
| INIT_LIST_HEAD(&eq->tasklet_ctx.list); |
| INIT_LIST_HEAD(&eq->tasklet_ctx.process_list); |
| spin_lock_init(&eq->tasklet_ctx.lock); |
| tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb, |
| (unsigned long)&eq->tasklet_ctx); |
| |
| #ifdef CONFIG_RFS_ACCEL |
| irq_cpu_rmap_add(table->rmap, pci_irq_vector(dev->pdev, vecidx)); |
| #endif |
| snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i); |
| param = (struct mlx5_eq_param) { |
| .index = vecidx, |
| .mask = 0, |
| .nent = nent, |
| .context = &eq->core, |
| .handler = mlx5_eq_comp_int |
| }; |
| err = create_map_eq(dev, &eq->core, name, ¶m); |
| if (err) { |
| kfree(eq); |
| goto clean; |
| } |
| mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn); |
| /* add tail, to keep the list ordered, for mlx5_vector2eqn to work */ |
| list_add_tail(&eq->list, &table->comp_eqs_list); |
| } |
| |
| err = set_comp_irq_affinity_hints(dev); |
| if (err) { |
| mlx5_core_err(dev, "Failed to alloc affinity hint cpumask\n"); |
| goto clean; |
| } |
| |
| return 0; |
| |
| clean: |
| destroy_comp_eqs(dev); |
| return err; |
| } |
| |
| int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, |
| unsigned int *irqn) |
| { |
| struct mlx5_eq_table *table = dev->priv.eq_table; |
| struct mlx5_eq_comp *eq, *n; |
| int err = -ENOENT; |
| int i = 0; |
| |
| list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { |
| if (i++ == vector) { |
| *eqn = eq->core.eqn; |
| *irqn = eq->core.irqn; |
| err = 0; |
| break; |
| } |
| } |
| |
| return err; |
| } |
| EXPORT_SYMBOL(mlx5_vector2eqn); |
| |
| unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev) |
| { |
| return dev->priv.eq_table->num_comp_vectors; |
| } |
| EXPORT_SYMBOL(mlx5_comp_vectors_count); |
| |
| struct cpumask * |
| mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector) |
| { |
| /* TODO: consider irq_get_affinity_mask(irq) */ |
| return dev->priv.eq_table->irq_info[vector + MLX5_EQ_VEC_COMP_BASE].mask; |
| } |
| EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask); |
| |
| struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev) |
| { |
| #ifdef CONFIG_RFS_ACCEL |
| return dev->priv.eq_table->rmap; |
| #else |
| return NULL; |
| #endif |
| } |
| |
| struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn) |
| { |
| struct mlx5_eq_table *table = dev->priv.eq_table; |
| struct mlx5_eq_comp *eq; |
| |
| list_for_each_entry(eq, &table->comp_eqs_list, list) { |
| if (eq->core.eqn == eqn) |
| return eq; |
| } |
| |
| return ERR_PTR(-ENOENT); |
| } |
| |
| /* This function should only be called after mlx5_cmd_force_teardown_hca */ |
| void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev) |
| { |
| struct mlx5_eq_table *table = dev->priv.eq_table; |
| int i, max_eqs; |
| |
| clear_comp_irqs_affinity_hints(dev); |
| |
| #ifdef CONFIG_RFS_ACCEL |
| if (table->rmap) { |
| free_irq_cpu_rmap(table->rmap); |
| table->rmap = NULL; |
| } |
| #endif |
| |
| mutex_lock(&table->lock); /* sync with create/destroy_async_eq */ |
| max_eqs = table->num_comp_vectors + MLX5_EQ_VEC_COMP_BASE; |
| for (i = max_eqs - 1; i >= 0; i--) { |
| if (!table->irq_info[i].context) |
| continue; |
| free_irq(pci_irq_vector(dev->pdev, i), table->irq_info[i].context); |
| table->irq_info[i].context = NULL; |
| } |
| mutex_unlock(&table->lock); |
| pci_free_irq_vectors(dev->pdev); |
| } |
| |
| static int alloc_irq_vectors(struct mlx5_core_dev *dev) |
| { |
| struct mlx5_priv *priv = &dev->priv; |
| struct mlx5_eq_table *table = priv->eq_table; |
| int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ? |
| MLX5_CAP_GEN(dev, max_num_eqs) : |
| 1 << MLX5_CAP_GEN(dev, log_max_eq); |
| int nvec; |
| int err; |
| |
| nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + |
| MLX5_EQ_VEC_COMP_BASE; |
| nvec = min_t(int, nvec, num_eqs); |
| if (nvec <= MLX5_EQ_VEC_COMP_BASE) |
| return -ENOMEM; |
| |
| table->irq_info = kcalloc(nvec, sizeof(*table->irq_info), GFP_KERNEL); |
| if (!table->irq_info) |
| return -ENOMEM; |
| |
| nvec = pci_alloc_irq_vectors(dev->pdev, MLX5_EQ_VEC_COMP_BASE + 1, |
| nvec, PCI_IRQ_MSIX); |
| if (nvec < 0) { |
| err = nvec; |
| goto err_free_irq_info; |
| } |
| |
| table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE; |
| |
| return 0; |
| |
| err_free_irq_info: |
| kfree(table->irq_info); |
| return err; |
| } |
| |
| static void free_irq_vectors(struct mlx5_core_dev *dev) |
| { |
| struct mlx5_priv *priv = &dev->priv; |
| |
| pci_free_irq_vectors(dev->pdev); |
| kfree(priv->eq_table->irq_info); |
| } |
| |
| int mlx5_eq_table_create(struct mlx5_core_dev *dev) |
| { |
| int err; |
| |
| err = alloc_irq_vectors(dev); |
| if (err) { |
| mlx5_core_err(dev, "alloc irq vectors failed\n"); |
| return err; |
| } |
| |
| err = create_async_eqs(dev); |
| if (err) { |
| mlx5_core_err(dev, "Failed to create async EQs\n"); |
| goto err_async_eqs; |
| } |
| |
| err = create_comp_eqs(dev); |
| if (err) { |
| mlx5_core_err(dev, "Failed to create completion EQs\n"); |
| goto err_comp_eqs; |
| } |
| |
| return 0; |
| err_comp_eqs: |
| destroy_async_eqs(dev); |
| err_async_eqs: |
| free_irq_vectors(dev); |
| return err; |
| } |
| |
| void mlx5_eq_table_destroy(struct mlx5_core_dev *dev) |
| { |
| destroy_comp_eqs(dev); |
| destroy_async_eqs(dev); |
| free_irq_vectors(dev); |
| } |