blob: a14465d7dbd0de36df9b071732330253d80e8508 [file] [log] [blame]
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*/
#include "cam_req_mgr_workq.h"
#include "cam_debug_util.h"
#define WORKQ_ACQUIRE_LOCK(workq, flags) {\
if ((workq)->in_irq) \
spin_lock_irqsave(&(workq)->lock_bh, (flags)); \
else \
spin_lock_bh(&(workq)->lock_bh); \
}
#define WORKQ_RELEASE_LOCK(workq, flags) {\
if ((workq)->in_irq) \
spin_unlock_irqrestore(&(workq)->lock_bh, (flags)); \
else \
spin_unlock_bh(&(workq)->lock_bh); \
}
struct crm_workq_task *cam_req_mgr_workq_get_task(
struct cam_req_mgr_core_workq *workq)
{
struct crm_workq_task *task = NULL;
unsigned long flags = 0;
if (!workq)
return NULL;
WORKQ_ACQUIRE_LOCK(workq, flags);
if (list_empty(&workq->task.empty_head))
goto end;
task = list_first_entry(&workq->task.empty_head,
struct crm_workq_task, entry);
if (task) {
atomic_sub(1, &workq->task.free_cnt);
list_del_init(&task->entry);
}
end:
WORKQ_RELEASE_LOCK(workq, flags);
return task;
}
EXPORT_SYMBOL_GPL(cam_req_mgr_workq_get_task);
static void cam_req_mgr_workq_put_task(struct crm_workq_task *task)
{
struct cam_req_mgr_core_workq *workq =
(struct cam_req_mgr_core_workq *)task->parent;
unsigned long flags = 0;
list_del_init(&task->entry);
task->cancel = 0;
task->process_cb = NULL;
task->priv = NULL;
WORKQ_ACQUIRE_LOCK(workq, flags);
list_add_tail(&task->entry,
&workq->task.empty_head);
atomic_add(1, &workq->task.free_cnt);
WORKQ_RELEASE_LOCK(workq, flags);
}
/**
* cam_req_mgr_process_task() - Process the enqueued task
* @task: pointer to task workq thread shall process
*/
static int cam_req_mgr_process_task(struct crm_workq_task *task)
{
struct cam_req_mgr_core_workq *workq = NULL;
if (!task)
return -EINVAL;
workq = (struct cam_req_mgr_core_workq *)task->parent;
if (task->process_cb)
task->process_cb(task->priv, task->payload);
else
CAM_WARN(CAM_CRM, "FATAL:no task handler registered for workq");
cam_req_mgr_workq_put_task(task);
return 0;
}
/**
* cam_req_mgr_process_workq() - main loop handling
* @w: workqueue task pointer
*/
static void cam_req_mgr_process_workq(struct kthread_work *w)
{
struct cam_req_mgr_core_workq *workq = NULL;
struct crm_workq_task *task;
int32_t i = CRM_TASK_PRIORITY_0;
unsigned long flags = 0;
if (!w) {
CAM_ERR(CAM_CRM, "NULL task pointer can not schedule");
return;
}
workq = (struct cam_req_mgr_core_workq *)
container_of(w, struct cam_req_mgr_core_workq, work);
while (i < CRM_TASK_PRIORITY_MAX) {
WORKQ_ACQUIRE_LOCK(workq, flags);
while (!list_empty(&workq->task.process_head[i])) {
task = list_first_entry(&workq->task.process_head[i],
struct crm_workq_task, entry);
atomic_sub(1, &workq->task.pending_cnt);
list_del_init(&task->entry);
WORKQ_RELEASE_LOCK(workq, flags);
cam_req_mgr_process_task(task);
CAM_DBG(CAM_CRM, "processed task %pK free_cnt %d",
task, atomic_read(&workq->task.free_cnt));
WORKQ_ACQUIRE_LOCK(workq, flags);
}
WORKQ_RELEASE_LOCK(workq, flags);
i++;
}
}
int cam_req_mgr_workq_enqueue_task(struct crm_workq_task *task,
void *priv, int32_t prio)
{
int rc = 0;
struct cam_req_mgr_core_workq *workq = NULL;
unsigned long flags = 0;
if (!task) {
CAM_WARN(CAM_CRM, "NULL task pointer can not schedule");
rc = -EINVAL;
goto end;
}
workq = (struct cam_req_mgr_core_workq *)task->parent;
if (!workq) {
CAM_DBG(CAM_CRM, "NULL workq pointer suspect mem corruption");
rc = -EINVAL;
goto end;
}
if (task->cancel == 1) {
cam_req_mgr_workq_put_task(task);
CAM_WARN(CAM_CRM, "task aborted and queued back to pool");
rc = 0;
goto end;
}
task->priv = priv;
task->priority =
(prio < CRM_TASK_PRIORITY_MAX && prio >= CRM_TASK_PRIORITY_0)
? prio : CRM_TASK_PRIORITY_0;
WORKQ_ACQUIRE_LOCK(workq, flags);
list_add_tail(&task->entry,
&workq->task.process_head[task->priority]);
atomic_add(1, &workq->task.pending_cnt);
CAM_DBG(CAM_CRM, "enq task %pK pending_cnt %d",
task, atomic_read(&workq->task.pending_cnt));
kthread_queue_work(&workq->job_worker, &workq->work);
WORKQ_RELEASE_LOCK(workq, flags);
end:
return rc;
}
EXPORT_SYMBOL_GPL(cam_req_mgr_workq_enqueue_task);
int cam_req_mgr_workq_create(char *name, int32_t num_tasks,
struct cam_req_mgr_core_workq **workq, enum crm_workq_context in_irq,
int flags)
{
int32_t i, max_active_tasks = 0;
struct crm_workq_task *task;
struct cam_req_mgr_core_workq *crm_workq = NULL;
char buf[128] = "crm_workq-";
struct sched_param param = { .sched_priority = 1 };
int error;
if (!*workq) {
crm_workq = kzalloc(sizeof(struct cam_req_mgr_core_workq),
GFP_KERNEL);
if (crm_workq == NULL)
return -ENOMEM;
if (flags & CAM_WORKQ_FLAG_SERIAL)
max_active_tasks = 1;
strlcat(buf, name, sizeof(buf));
CAM_DBG(CAM_CRM, "create workque crm_workq-%s", name);
kthread_init_worker(&crm_workq->job_worker);
crm_workq->job_worker_thread = kthread_run(kthread_worker_fn,
&crm_workq->job_worker,
buf);
if (IS_ERR(crm_workq->job_worker_thread)) {
kfree(crm_workq);
return -ENOMEM;
}
/* non-fatal error, ignore if occurs */
error = sched_setscheduler(crm_workq->job_worker_thread,
SCHED_FIFO,
&param);
if (error){
CAM_WARN(CAM_CRM, "Unable to set SCHED_FIFO, error %d",
error);
}
/* Workq attributes initialization */
kthread_init_work(&crm_workq->work, cam_req_mgr_process_workq);
spin_lock_init(&crm_workq->lock_bh);
CAM_DBG(CAM_CRM, "LOCK_DBG workq %s lock %pK",
name, &crm_workq->lock_bh);
/* Task attributes initialization */
atomic_set(&crm_workq->task.pending_cnt, 0);
atomic_set(&crm_workq->task.free_cnt, 0);
for (i = CRM_TASK_PRIORITY_0; i < CRM_TASK_PRIORITY_MAX; i++)
INIT_LIST_HEAD(&crm_workq->task.process_head[i]);
INIT_LIST_HEAD(&crm_workq->task.empty_head);
crm_workq->in_irq = in_irq;
crm_workq->task.num_task = num_tasks;
crm_workq->task.pool = kcalloc(crm_workq->task.num_task,
sizeof(struct crm_workq_task), GFP_KERNEL);
if (!crm_workq->task.pool) {
CAM_WARN(CAM_CRM, "Insufficient memory %zu",
sizeof(struct crm_workq_task) *
crm_workq->task.num_task);
kfree(crm_workq);
return -ENOMEM;
}
for (i = 0; i < crm_workq->task.num_task; i++) {
task = &crm_workq->task.pool[i];
task->parent = (void *)crm_workq;
/* Put all tasks in free pool */
INIT_LIST_HEAD(&task->entry);
cam_req_mgr_workq_put_task(task);
}
*workq = crm_workq;
CAM_DBG(CAM_CRM, "free tasks %d",
atomic_read(&crm_workq->task.free_cnt));
}
return 0;
}
EXPORT_SYMBOL_GPL(cam_req_mgr_workq_create);
void cam_req_mgr_workq_destroy(struct cam_req_mgr_core_workq **crm_workq)
{
unsigned long flags = 0;
struct task_struct *job;
CAM_DBG(CAM_CRM, "destroy workque %pK", crm_workq);
if (*crm_workq) {
WORKQ_ACQUIRE_LOCK(*crm_workq, flags);
if ((*crm_workq)->job_worker_thread) {
job = (*crm_workq)->job_worker_thread;
(*crm_workq)->job_worker_thread = NULL;
WORKQ_RELEASE_LOCK(*crm_workq, flags);
kthread_stop(job);
} else {
WORKQ_RELEASE_LOCK(*crm_workq, flags);
}
/* Destroy workq payload data */
kfree((*crm_workq)->task.pool[0].payload);
(*crm_workq)->task.pool[0].payload = NULL;
kfree((*crm_workq)->task.pool);
kfree(*crm_workq);
*crm_workq = NULL;
}
}
EXPORT_SYMBOL_GPL(cam_req_mgr_workq_destroy);