| /* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved. |
| * |
| * This program is free software; you can redistribute it and/or modify |
| * it under the terms of the GNU General Public License version 2 and |
| * only version 2 as published by the Free Software Foundation. |
| * |
| * This program is distributed in the hope that it will be useful, |
| * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| * GNU General Public License for more details. |
| * |
| */ |
| |
| #define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__ |
| |
| #include <linux/kernel.h> |
| #include <linux/init.h> |
| #include <linux/module.h> |
| #include <linux/kthread.h> |
| #include <linux/mutex.h> |
| #include <linux/msm_tsens.h> |
| #include <linux/workqueue.h> |
| #include <linux/completion.h> |
| #include <linux/cpu.h> |
| #include <linux/cpufreq.h> |
| #include <linux/msm_tsens.h> |
| #include <linux/msm_thermal.h> |
| #include <linux/platform_device.h> |
| #include <linux/of.h> |
| #include <linux/err.h> |
| #include <linux/slab.h> |
| #include <linux/of.h> |
| #include <linux/sysfs.h> |
| #include <linux/types.h> |
| #include <linux/thermal.h> |
| #include <linux/regulator/rpm-smd-regulator.h> |
| #include <linux/regulator/consumer.h> |
| #include <linux/regulator/driver.h> |
| #include <linux/msm_thermal_ioctl.h> |
| #include <soc/qcom/rpm-smd.h> |
| #include <soc/qcom/scm.h> |
| #include <linux/debugfs.h> |
| #include <linux/pm_opp.h> |
| #include <linux/sched/rt.h> |
| #include <linux/notifier.h> |
| #include <linux/reboot.h> |
| #include <linux/suspend.h> |
| #include <soc/qcom/msm-core.h> |
| #include <linux/cpumask.h> |
| #include <linux/qpnp/power-on.h> |
| |
| #define CREATE_TRACE_POINTS |
| #define TRACE_MSM_THERMAL |
| #include <trace/trace_thermal.h> |
| |
| #define MAX_CURRENT_UA 100000 |
| #define MAX_RAILS 5 |
| #define TSENS_NAME_FORMAT "tsens_tz_sensor%d" |
| #define THERM_SECURE_BITE_CMD 8 |
| #define SENSOR_SCALING_FACTOR 1 |
| #define MSM_THERMAL_NAME "msm_thermal" |
| #define MSM_TSENS_PRINT "log_tsens_temperature" |
| #define CPU_BUF_SIZE 64 |
| #define CPU_DEVICE "cpu%d" |
| |
| #define THERM_CREATE_DEBUGFS_DIR(_node, _name, _parent, _ret) \ |
| do { \ |
| _node = debugfs_create_dir(_name, _parent); \ |
| if (IS_ERR(_node)) { \ |
| _ret = PTR_ERR(_node); \ |
| pr_err("Error creating debugfs dir:%s. err:%d\n", \ |
| _name, _ret); \ |
| } \ |
| } while (0) |
| |
| #define UPDATE_THRESHOLD_SET(_val, _trip) do { \ |
| if (_trip == THERMAL_TRIP_CONFIGURABLE_HI) \ |
| _val |= 1; \ |
| else if (_trip == THERMAL_TRIP_CONFIGURABLE_LOW)\ |
| _val |= 2; \ |
| } while (0) |
| |
| static struct msm_thermal_data msm_thermal_info; |
| static struct delayed_work check_temp_work; |
| static bool core_control_enabled; |
| static uint32_t cpus_offlined; |
| static cpumask_var_t cpus_previously_online; |
| static DEFINE_MUTEX(core_control_mutex); |
| static struct kobject *cc_kobj; |
| static struct kobject *mx_kobj; |
| static struct task_struct *hotplug_task; |
| static struct task_struct *freq_mitigation_task; |
| static struct task_struct *thermal_monitor_task; |
| static struct completion hotplug_notify_complete; |
| static struct completion freq_mitigation_complete; |
| static struct completion thermal_monitor_complete; |
| |
| static int enabled; |
| static int polling_enabled; |
| static int rails_cnt; |
| static int sensor_cnt; |
| static int psm_rails_cnt; |
| static int ocr_rail_cnt; |
| static int limit_idx; |
| static int limit_idx_low; |
| static int limit_idx_high; |
| static int max_tsens_num; |
| static struct cpufreq_frequency_table *table; |
| static uint32_t usefreq; |
| static int freq_table_get; |
| static bool vdd_rstr_enabled; |
| static bool vdd_rstr_nodes_called; |
| static bool vdd_rstr_probed; |
| static bool sensor_info_nodes_called; |
| static bool sensor_info_probed; |
| static bool psm_enabled; |
| static bool psm_nodes_called; |
| static bool psm_probed; |
| static bool freq_mitigation_enabled; |
| static bool ocr_enabled; |
| static bool ocr_nodes_called; |
| static bool ocr_probed; |
| static bool ocr_reg_init_defer; |
| static bool hotplug_enabled; |
| static bool interrupt_mode_enable; |
| static bool msm_thermal_probed; |
| static bool gfx_crit_phase_ctrl_enabled; |
| static bool gfx_warm_phase_ctrl_enabled; |
| static bool cx_phase_ctrl_enabled; |
| static bool vdd_mx_enabled; |
| static bool therm_reset_enabled; |
| static bool online_core; |
| static bool cluster_info_probed; |
| static bool cluster_info_nodes_called; |
| static int *tsens_id_map; |
| static DEFINE_MUTEX(vdd_rstr_mutex); |
| static DEFINE_MUTEX(psm_mutex); |
| static DEFINE_MUTEX(cx_mutex); |
| static DEFINE_MUTEX(gfx_mutex); |
| static DEFINE_MUTEX(ocr_mutex); |
| static DEFINE_MUTEX(vdd_mx_mutex); |
| static DEFINE_MUTEX(threshold_mutex); |
| static uint32_t min_freq_limit; |
| static uint32_t curr_gfx_band; |
| static uint32_t curr_cx_band; |
| static struct kobj_attribute cx_mode_attr; |
| static struct kobj_attribute gfx_mode_attr; |
| static struct kobj_attribute mx_enabled_attr; |
| static struct attribute_group cx_attr_gp; |
| static struct attribute_group gfx_attr_gp; |
| static struct attribute_group mx_attr_group; |
| static struct regulator *vdd_mx; |
| static struct cpufreq_frequency_table *pending_freq_table_ptr; |
| static int pending_cpu_freq = -1; |
| static long *tsens_temp_at_panic; |
| static u32 tsens_temp_print; |
| static uint32_t bucket; |
| static cpumask_t throttling_mask; |
| |
| static LIST_HEAD(devices_list); |
| static LIST_HEAD(thresholds_list); |
| enum thermal_threshold { |
| HOTPLUG_THRESHOLD_HIGH, |
| HOTPLUG_THRESHOLD_LOW, |
| FREQ_THRESHOLD_HIGH, |
| FREQ_THRESHOLD_LOW, |
| THRESHOLD_MAX_NR, |
| }; |
| |
| struct cluster_info { |
| int cluster_id; |
| uint32_t entity_count; |
| struct cluster_info *child_entity_ptr; |
| struct cluster_info *parent_ptr; |
| struct cpufreq_frequency_table *freq_table; |
| int freq_idx; |
| int freq_idx_low; |
| int freq_idx_high; |
| cpumask_t cluster_cores; |
| bool sync_cluster; |
| uint32_t limited_max_freq; |
| uint32_t limited_min_freq; |
| }; |
| |
| struct cpu_info { |
| uint32_t cpu; |
| const char *sensor_type; |
| enum sensor_id_type id_type; |
| uint32_t sensor_id; |
| bool offline; |
| bool user_offline; |
| bool hotplug_thresh_clear; |
| struct sensor_threshold threshold[THRESHOLD_MAX_NR]; |
| bool max_freq; |
| uint32_t user_max_freq; |
| uint32_t shutdown_max_freq; |
| uint32_t user_min_freq; |
| uint32_t limited_max_freq; |
| uint32_t limited_min_freq; |
| bool freq_thresh_clear; |
| struct cluster_info *parent_ptr; |
| }; |
| |
| struct rail { |
| const char *name; |
| uint32_t freq_req; |
| uint32_t min_level; |
| uint32_t num_levels; |
| int32_t curr_level; |
| uint32_t levels[3]; |
| struct kobj_attribute value_attr; |
| struct kobj_attribute level_attr; |
| struct regulator *reg; |
| struct attribute_group attr_gp; |
| }; |
| |
| struct msm_sensor_info { |
| const char *name; |
| const char *alias; |
| const char *type; |
| uint32_t scaling_factor; |
| }; |
| |
| struct psm_rail { |
| const char *name; |
| uint8_t init; |
| uint8_t mode; |
| struct kobj_attribute mode_attr; |
| struct rpm_regulator *reg; |
| struct regulator *phase_reg; |
| struct attribute_group attr_gp; |
| }; |
| |
| struct devmgr_devices { |
| struct device_manager_data *hotplug_dev; |
| struct device_manager_data *cpufreq_dev[NR_CPUS]; |
| }; |
| |
| enum msm_thresh_list { |
| MSM_THERM_RESET, |
| MSM_VDD_RESTRICTION, |
| MSM_CX_PHASE_CTRL_HOT, |
| MSM_GFX_PHASE_CTRL_WARM, |
| MSM_GFX_PHASE_CTRL_HOT, |
| MSM_OCR, |
| MSM_VDD_MX_RESTRICTION, |
| MSM_LIST_MAX_NR, |
| }; |
| |
| enum msm_thermal_phase_ctrl { |
| MSM_CX_PHASE_CTRL, |
| MSM_GFX_PHASE_CTRL, |
| MSM_PHASE_CTRL_NR, |
| }; |
| |
| enum msm_temp_band { |
| MSM_COLD_CRITICAL = 1, |
| MSM_COLD, |
| MSM_COOL, |
| MSM_NORMAL, |
| MSM_WARM, |
| MSM_HOT, |
| MSM_HOT_CRITICAL, |
| MSM_TEMP_MAX_NR, |
| }; |
| |
| struct msm_thermal_debugfs_entry { |
| struct dentry *parent; |
| struct dentry *tsens_print; |
| }; |
| |
| static struct psm_rail *psm_rails; |
| static struct psm_rail *ocr_rails; |
| static struct rail *rails; |
| static struct msm_sensor_info *sensors; |
| static struct cpu_info cpus[NR_CPUS]; |
| static struct threshold_info *thresh; |
| static bool mx_restr_applied; |
| static struct cluster_info *core_ptr; |
| static struct msm_thermal_debugfs_entry *msm_therm_debugfs; |
| static struct devmgr_devices *devices; |
| |
| struct vdd_rstr_enable { |
| struct kobj_attribute ko_attr; |
| uint32_t enabled; |
| }; |
| |
| /* For SMPS only*/ |
| enum PMIC_SW_MODE { |
| PMIC_AUTO_MODE = RPM_REGULATOR_MODE_AUTO, |
| PMIC_IPEAK_MODE = RPM_REGULATOR_MODE_IPEAK, |
| PMIC_PWM_MODE = RPM_REGULATOR_MODE_HPM, |
| }; |
| |
| enum ocr_request { |
| OPTIMUM_CURRENT_MIN, |
| OPTIMUM_CURRENT_MAX, |
| OPTIMUM_CURRENT_NR, |
| }; |
| |
| #define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store) |
| |
| #define SYNC_CORE(_cpu) \ |
| (core_ptr && cpus[_cpu].parent_ptr->sync_cluster) |
| |
| #define VDD_RES_RO_ATTRIB(_rail, ko_attr, j, _name) \ |
| ko_attr.attr.name = __stringify(_name); \ |
| ko_attr.attr.mode = 0444; \ |
| ko_attr.show = vdd_rstr_reg_##_name##_show; \ |
| ko_attr.store = NULL; \ |
| sysfs_attr_init(&ko_attr.attr); \ |
| _rail.attr_gp.attrs[j] = &ko_attr.attr; |
| |
| #define VDD_RES_RW_ATTRIB(_rail, ko_attr, j, _name) \ |
| ko_attr.attr.name = __stringify(_name); \ |
| ko_attr.attr.mode = 0644; \ |
| ko_attr.show = vdd_rstr_reg_##_name##_show; \ |
| ko_attr.store = vdd_rstr_reg_##_name##_store; \ |
| sysfs_attr_init(&ko_attr.attr); \ |
| _rail.attr_gp.attrs[j] = &ko_attr.attr; |
| |
| #define VDD_RSTR_ENABLE_FROM_ATTRIBS(attr) \ |
| (container_of(attr, struct vdd_rstr_enable, ko_attr)); |
| |
| #define VDD_RSTR_REG_VALUE_FROM_ATTRIBS(attr) \ |
| (container_of(attr, struct rail, value_attr)); |
| |
| #define VDD_RSTR_REG_LEVEL_FROM_ATTRIBS(attr) \ |
| (container_of(attr, struct rail, level_attr)); |
| |
| #define OCR_RW_ATTRIB(_rail, ko_attr, j, _name) \ |
| ko_attr.attr.name = __stringify(_name); \ |
| ko_attr.attr.mode = 0644; \ |
| ko_attr.show = ocr_reg_##_name##_show; \ |
| ko_attr.store = ocr_reg_##_name##_store; \ |
| sysfs_attr_init(&ko_attr.attr); \ |
| _rail.attr_gp.attrs[j] = &ko_attr.attr; |
| |
| #define PSM_RW_ATTRIB(_rail, ko_attr, j, _name) \ |
| ko_attr.attr.name = __stringify(_name); \ |
| ko_attr.attr.mode = 0644; \ |
| ko_attr.show = psm_reg_##_name##_show; \ |
| ko_attr.store = psm_reg_##_name##_store; \ |
| sysfs_attr_init(&ko_attr.attr); \ |
| _rail.attr_gp.attrs[j] = &ko_attr.attr; |
| |
| #define PSM_REG_MODE_FROM_ATTRIBS(attr) \ |
| (container_of(attr, struct psm_rail, mode_attr)); |
| |
| #define PHASE_RW_ATTR(_phase, _name, _attr, j, _attr_gr) \ |
| _attr.attr.name = __stringify(_name); \ |
| _attr.attr.mode = 0644; \ |
| _attr.show = _phase##_phase_show; \ |
| _attr.store = _phase##_phase_store; \ |
| sysfs_attr_init(&_attr.attr); \ |
| _attr_gr.attrs[j] = &_attr.attr; |
| |
| #define MX_RW_ATTR(ko_attr, _name, _attr_gp) \ |
| ko_attr.attr.name = __stringify(_name); \ |
| ko_attr.attr.mode = 0644; \ |
| ko_attr.show = show_mx_##_name; \ |
| ko_attr.store = store_mx_##_name; \ |
| sysfs_attr_init(&ko_attr.attr); \ |
| _attr_gp.attrs[0] = &ko_attr.attr; |
| |
| void get_cluster_mask(uint32_t cpu, cpumask_t *mask) |
| { |
| int i; |
| |
| cpumask_set_cpu(cpu, mask); |
| if (core_ptr) { |
| for (i = 0; i < core_ptr->entity_count; i++) { |
| struct cluster_info *cluster_ptr = |
| &core_ptr->child_entity_ptr[i]; |
| if (*cluster_ptr->cluster_cores.bits & BIT(cpu)) { |
| cpumask_copy(mask, |
| &cluster_ptr->cluster_cores); |
| break; |
| } |
| } |
| } |
| } |
| |
| uint32_t get_core_max_freq(uint32_t cpu) |
| { |
| int i; |
| uint32_t max_freq = 0; |
| |
| if (core_ptr) { |
| for (i = 0; i < core_ptr->entity_count; i++) { |
| struct cluster_info *cluster_ptr = |
| &core_ptr->child_entity_ptr[i]; |
| if (*cluster_ptr->cluster_cores.bits & BIT(cpu)) { |
| if (cluster_ptr->freq_table) |
| max_freq = |
| cluster_ptr->freq_table |
| [cluster_ptr->freq_idx_high].frequency; |
| break; |
| } |
| } |
| } else { |
| if (table) |
| max_freq = table[limit_idx_high].frequency; |
| } |
| |
| return max_freq; |
| } |
| |
| static void cpus_previously_online_update(void) |
| { |
| char buf[CPU_BUF_SIZE]; |
| |
| get_online_cpus(); |
| cpumask_or(cpus_previously_online, cpus_previously_online, |
| cpu_online_mask); |
| put_online_cpus(); |
| cpulist_scnprintf(buf, sizeof(buf), cpus_previously_online); |
| pr_debug("%s\n", buf); |
| } |
| |
| uint32_t get_core_min_freq(uint32_t cpu) |
| { |
| int i; |
| uint32_t min_freq = UINT_MAX; |
| |
| if (core_ptr) { |
| for (i = 0; i < core_ptr->entity_count; i++) { |
| struct cluster_info *cluster_ptr = |
| &core_ptr->child_entity_ptr[i]; |
| if (*cluster_ptr->cluster_cores.bits & BIT(cpu)) { |
| if (cluster_ptr->freq_table) |
| min_freq = |
| cluster_ptr->freq_table[0].frequency; |
| break; |
| } |
| } |
| } else { |
| if (table) |
| min_freq = table[0].frequency; |
| } |
| |
| return min_freq; |
| } |
| |
| static int msm_thermal_reboot_callback( |
| struct notifier_block *nfb, unsigned long val, void *data) |
| { |
| if (val == SYS_RESTART || val == SYS_POWER_OFF || val == SYS_HALT) { |
| uint32_t cpu; |
| |
| for_each_possible_cpu(cpu) { |
| if (msm_thermal_info.freq_mitig_control_mask |
| & BIT(cpu)) { |
| cpus[cpu].shutdown_max_freq = |
| get_core_min_freq(cpu); |
| if (cpus[cpu].shutdown_max_freq == UINT_MAX) |
| continue; |
| if (freq_mitigation_task) { |
| pr_debug("Mitigate CPU%u to %u\n", cpu, |
| cpus[cpu].shutdown_max_freq); |
| complete(&freq_mitigation_complete); |
| } else { |
| pr_err( |
| "Freq mit task is not initialized\n"); |
| } |
| } |
| } |
| } |
| |
| return NOTIFY_DONE; |
| } |
| |
| static int msm_thermal_suspend_callback( |
| struct notifier_block *nfb, unsigned long action, void *data) |
| { |
| switch (action) { |
| case PM_POST_HIBERNATION: |
| case PM_POST_SUSPEND: |
| if (hotplug_task) |
| complete(&hotplug_notify_complete); |
| else |
| pr_debug("Hotplug task not initialized\n"); |
| break; |
| |
| default: |
| return NOTIFY_DONE; |
| } |
| |
| return NOTIFY_OK; |
| } |
| |
| static struct notifier_block msm_thermal_reboot_notifier = { |
| .notifier_call = msm_thermal_reboot_callback, |
| }; |
| |
| static struct device_manager_data *find_device_by_name(const char *device_name) |
| { |
| struct device_manager_data *dev_mgr = NULL; |
| |
| list_for_each_entry(dev_mgr, &devices_list, dev_ptr) { |
| if (strcmp(dev_mgr->device_name, device_name) == 0) |
| return dev_mgr; |
| } |
| |
| return NULL; |
| } |
| |
| static int validate_client(struct device_clnt_data *clnt) |
| { |
| int ret = 0; |
| struct device_manager_data *dev_mgr = NULL; |
| struct device_clnt_data *client_ptr = NULL; |
| |
| if (!clnt || !clnt->dev_mgr) { |
| pr_err("Invalid client\n"); |
| ret = -EINVAL; |
| goto validate_exit; |
| } |
| |
| list_for_each_entry(dev_mgr, &devices_list, dev_ptr) { |
| if (dev_mgr == clnt->dev_mgr) |
| break; |
| } |
| if (dev_mgr != clnt->dev_mgr) { |
| pr_err("Invalid device manager\n"); |
| ret = -EINVAL; |
| goto validate_exit; |
| } |
| |
| mutex_lock(&dev_mgr->clnt_lock); |
| list_for_each_entry(client_ptr, &dev_mgr->client_list, clnt_ptr) { |
| if (clnt == client_ptr) |
| break; |
| } |
| if (clnt != client_ptr) { |
| pr_err("Invalid client\n"); |
| ret = -EINVAL; |
| goto validate_unlock; |
| } |
| validate_unlock: |
| mutex_unlock(&dev_mgr->clnt_lock); |
| |
| validate_exit: |
| return ret; |
| } |
| |
| static int devmgr_client_cpufreq_update(struct device_manager_data *dev_mgr) |
| { |
| int ret = 0; |
| struct device_clnt_data *clnt = NULL; |
| uint32_t max_freq = UINT_MAX; |
| uint32_t min_freq = 0; |
| |
| mutex_lock(&dev_mgr->clnt_lock); |
| list_for_each_entry(clnt, &dev_mgr->client_list, clnt_ptr) { |
| if (!clnt->req_active) |
| continue; |
| max_freq = min(max_freq, clnt->request.freq.max_freq); |
| min_freq = max(min_freq, clnt->request.freq.min_freq); |
| } |
| if (dev_mgr->active_req.freq.max_freq == max_freq && |
| dev_mgr->active_req.freq.min_freq == min_freq) { |
| goto update_exit; |
| } |
| dev_mgr->active_req.freq.max_freq = max_freq; |
| dev_mgr->active_req.freq.min_freq = min_freq; |
| |
| if (freq_mitigation_task) { |
| complete(&freq_mitigation_complete); |
| } else { |
| pr_err("Frequency mitigation task is not initialized\n"); |
| ret = -ESRCH; |
| } |
| |
| update_exit: |
| mutex_unlock(&dev_mgr->clnt_lock); |
| return ret; |
| } |
| |
| static int devmgr_client_hotplug_update(struct device_manager_data *dev_mgr) |
| { |
| int ret = 0; |
| struct device_clnt_data *clnt = NULL; |
| cpumask_t offline_mask = CPU_MASK_NONE; |
| |
| mutex_lock(&dev_mgr->clnt_lock); |
| list_for_each_entry(clnt, &dev_mgr->client_list, clnt_ptr) { |
| if (!clnt->req_active) |
| continue; |
| cpumask_or(&offline_mask, &offline_mask, |
| &clnt->request.offline_mask); |
| } |
| if (cpumask_equal(&dev_mgr->active_req.offline_mask, &offline_mask)) |
| goto update_exit; |
| |
| cpumask_copy(&dev_mgr->active_req.offline_mask, &offline_mask); |
| |
| if (hotplug_task) { |
| complete(&hotplug_notify_complete); |
| } else { |
| pr_err("Hotplug task is not initialized\n"); |
| ret = -ESRCH; |
| } |
| |
| update_exit: |
| mutex_unlock(&dev_mgr->clnt_lock); |
| return ret; |
| } |
| |
| static int devmgr_hotplug_client_request_validate_and_update( |
| struct device_clnt_data *clnt, |
| union device_request *req, |
| enum device_req_type type) |
| { |
| if (type != HOTPLUG_MITIGATION_REQ) |
| return -EINVAL; |
| |
| cpumask_copy(&clnt->request.offline_mask, &req->offline_mask); |
| |
| if (!cpumask_empty(&req->offline_mask)) |
| clnt->req_active = true; |
| else |
| clnt->req_active = false; |
| |
| return 0; |
| } |
| |
| static int devmgr_cpufreq_client_request_validate_and_update( |
| struct device_clnt_data *clnt, |
| union device_request *req, |
| enum device_req_type type) |
| { |
| if (type != CPUFREQ_MITIGATION_REQ) |
| return -EINVAL; |
| |
| if (req->freq.max_freq < req->freq.min_freq) { |
| pr_err("Invalid Max and Min freq req. max:%u min:%u\n", |
| req->freq.max_freq, req->freq.min_freq); |
| return -EINVAL; |
| } |
| |
| clnt->request.freq.max_freq = req->freq.max_freq; |
| clnt->request.freq.min_freq = req->freq.min_freq; |
| |
| if ((req->freq.max_freq == CPUFREQ_MAX_NO_MITIGATION) && |
| (req->freq.min_freq == CPUFREQ_MIN_NO_MITIGATION)) |
| clnt->req_active = false; |
| else |
| clnt->req_active = true; |
| |
| return 0; |
| } |
| |
| int devmgr_client_request_mitigation(struct device_clnt_data *clnt, |
| enum device_req_type type, |
| union device_request *req) |
| { |
| int ret = 0; |
| struct device_manager_data *dev_mgr = NULL; |
| |
| if (!clnt || !req) { |
| pr_err("Invalid inputs for mitigation.\n"); |
| ret = -EINVAL; |
| goto req_exit; |
| } |
| |
| ret = validate_client(clnt); |
| if (ret) { |
| pr_err("Invalid mitigation client. ret:%d\n", ret); |
| goto req_exit; |
| } |
| |
| if (!clnt->dev_mgr->request_validate) { |
| pr_err("Invalid dev mgr request update\n"); |
| ret = -EINVAL; |
| goto req_exit; |
| } |
| |
| dev_mgr = clnt->dev_mgr; |
| mutex_lock(&dev_mgr->clnt_lock); |
| ret = dev_mgr->request_validate(clnt, req, type); |
| if (ret) { |
| pr_err("Invalid client request\n"); |
| goto req_unlock; |
| } |
| |
| req_unlock: |
| mutex_unlock(&dev_mgr->clnt_lock); |
| if (!ret && dev_mgr->update) |
| dev_mgr->update(dev_mgr); |
| |
| req_exit: |
| return ret; |
| } |
| |
| struct device_clnt_data *devmgr_register_mitigation_client(struct device *dev, |
| const char *device_name, |
| void (*callback)(struct device_clnt_data *, |
| union device_request *, void *)) |
| { |
| struct device_clnt_data *client = NULL; |
| struct device_manager_data *dev_mgr = NULL; |
| |
| if (!dev || !device_name) { |
| pr_err("Invalid input\n"); |
| return ERR_PTR(-EINVAL); |
| } |
| |
| dev_mgr = find_device_by_name(device_name); |
| if (!dev_mgr) { |
| pr_err("Invalid device %s\n", device_name); |
| return ERR_PTR(-EINVAL); |
| } |
| |
| client = devm_kzalloc(dev, |
| sizeof(struct device_clnt_data), GFP_KERNEL); |
| if (!client) { |
| pr_err("Memory alloc failed\n"); |
| return ERR_PTR(-ENOMEM); |
| } |
| |
| mutex_lock(&dev_mgr->clnt_lock); |
| client->dev_mgr = dev_mgr; |
| client->callback = callback; |
| list_add_tail(&client->clnt_ptr, &dev_mgr->client_list); |
| mutex_unlock(&dev_mgr->clnt_lock); |
| |
| return client; |
| } |
| |
| void devmgr_unregister_mitigation_client(struct device *dev, |
| struct device_clnt_data *clnt) |
| { |
| int ret = 0; |
| struct device_manager_data *dev_mgr = NULL; |
| |
| if (!clnt) { |
| pr_err("Invalid input\n"); |
| return; |
| } |
| |
| ret = validate_client(clnt); |
| if (ret) |
| return; |
| |
| dev_mgr = clnt->dev_mgr; |
| mutex_lock(&dev_mgr->clnt_lock); |
| list_del(&clnt->clnt_ptr); |
| mutex_unlock(&dev_mgr->clnt_lock); |
| devm_kfree(dev, clnt); |
| if (dev_mgr->update) |
| dev_mgr->update(dev_mgr); |
| } |
| |
| static int msm_thermal_cpufreq_callback(struct notifier_block *nfb, |
| unsigned long event, void *data) |
| { |
| struct cpufreq_policy *policy = data; |
| uint32_t max_freq_req, min_freq_req; |
| |
| switch (event) { |
| case CPUFREQ_INCOMPATIBLE: |
| if (SYNC_CORE(policy->cpu)) { |
| max_freq_req = |
| cpus[policy->cpu].parent_ptr->limited_max_freq; |
| min_freq_req = |
| cpus[policy->cpu].parent_ptr->limited_min_freq; |
| } else { |
| max_freq_req = cpus[policy->cpu].limited_max_freq; |
| min_freq_req = cpus[policy->cpu].limited_min_freq; |
| } |
| pr_debug("mitigating CPU%d to freq max: %u min: %u\n", |
| policy->cpu, max_freq_req, min_freq_req); |
| |
| cpufreq_verify_within_limits(policy, min_freq_req, |
| max_freq_req); |
| |
| if (max_freq_req < min_freq_req) |
| pr_err("Invalid frequency request Max:%u Min:%u\n", |
| max_freq_req, min_freq_req); |
| break; |
| |
| case CPUFREQ_CREATE_POLICY: |
| if (pending_cpu_freq != -1 && |
| (cpumask_first(policy->related_cpus) == |
| pending_cpu_freq)) { |
| pr_debug("Updating freq plan for cpu: %d\n", |
| policy->cpu); |
| pending_freq_table_ptr = cpufreq_frequency_get_table( |
| policy->cpu); |
| pending_cpu_freq = -1; |
| } |
| break; |
| } |
| return NOTIFY_OK; |
| } |
| |
| static struct notifier_block msm_thermal_cpufreq_notifier = { |
| .notifier_call = msm_thermal_cpufreq_callback, |
| }; |
| |
| static void update_cpu_freq(int cpu) |
| { |
| int ret = 0; |
| cpumask_t mask; |
| |
| get_cluster_mask(cpu, &mask); |
| if (cpu_online(cpu)) { |
| if ((cpumask_intersects(&mask, &throttling_mask)) |
| && (cpus[cpu].limited_max_freq |
| >= get_core_max_freq(cpu))) { |
| cpumask_xor(&throttling_mask, &mask, &throttling_mask); |
| set_cpu_throttled(&mask, false); |
| } else if (!cpumask_intersects(&mask, &throttling_mask)) { |
| cpumask_or(&throttling_mask, &mask, &throttling_mask); |
| set_cpu_throttled(&mask, true); |
| } |
| trace_thermal_pre_frequency_mit(cpu, |
| cpus[cpu].limited_max_freq, |
| cpus[cpu].limited_min_freq); |
| ret = cpufreq_update_policy(cpu); |
| trace_thermal_post_frequency_mit(cpu, |
| cpufreq_quick_get_max(cpu), |
| cpus[cpu].limited_min_freq); |
| if (ret) |
| pr_err("Unable to update policy for cpu:%d. err:%d\n", |
| cpu, ret); |
| } |
| } |
| |
| static int * __init get_sync_cluster(struct device *dev, int *cnt) |
| { |
| int *sync_cluster = NULL, cluster_cnt = 0, ret = 0; |
| char *key = "qcom,synchronous-cluster-id"; |
| |
| if (!of_get_property(dev->of_node, key, &cluster_cnt) |
| || cluster_cnt <= 0 || !core_ptr) |
| return NULL; |
| |
| cluster_cnt /= sizeof(__be32); |
| if (cluster_cnt > core_ptr->entity_count) { |
| pr_err("Invalid cluster count:%d\n", cluster_cnt); |
| return NULL; |
| } |
| sync_cluster = devm_kzalloc(dev, sizeof(int) * cluster_cnt, GFP_KERNEL); |
| if (!sync_cluster) { |
| pr_err("Memory alloc failed\n"); |
| return NULL; |
| } |
| |
| ret = of_property_read_u32_array(dev->of_node, key, sync_cluster, |
| cluster_cnt); |
| if (ret) { |
| pr_err("Error in reading property:%s. err:%d\n", key, ret); |
| devm_kfree(dev, sync_cluster); |
| return NULL; |
| } |
| *cnt = cluster_cnt; |
| |
| return sync_cluster; |
| } |
| |
| static void update_cpu_datastructure(struct cluster_info *cluster_ptr, |
| int *sync_cluster, int sync_cluster_cnt) |
| { |
| int i = 0; |
| bool is_sync_cluster = false; |
| |
| for (i = 0; (sync_cluster) && (i < sync_cluster_cnt); i++) { |
| if (cluster_ptr->cluster_id != sync_cluster[i]) |
| continue; |
| is_sync_cluster = true; |
| break; |
| } |
| |
| cluster_ptr->sync_cluster = is_sync_cluster; |
| pr_debug("Cluster ID:%d Sync cluster:%s Sibling mask:%lu\n", |
| cluster_ptr->cluster_id, is_sync_cluster ? "Yes" : "No", |
| *cluster_ptr->cluster_cores.bits); |
| for_each_cpu_mask(i, cluster_ptr->cluster_cores) { |
| cpus[i].parent_ptr = cluster_ptr; |
| } |
| } |
| |
| static ssize_t cluster_info_show( |
| struct kobject *kobj, struct kobj_attribute *attr, char *buf) |
| { |
| uint32_t i = 0; |
| ssize_t tot_size = 0, size = 0; |
| |
| for (; i < core_ptr->entity_count; i++) { |
| struct cluster_info *cluster_ptr = |
| &core_ptr->child_entity_ptr[i]; |
| |
| size = snprintf(&buf[tot_size], PAGE_SIZE - tot_size, |
| "%d:%lu:%d ", cluster_ptr->cluster_id, |
| *cluster_ptr->cluster_cores.bits, |
| cluster_ptr->sync_cluster); |
| if ((tot_size + size) >= PAGE_SIZE) { |
| pr_err("Not enough buffer size"); |
| break; |
| } |
| tot_size += size; |
| } |
| |
| return tot_size; |
| } |
| |
| static int create_thermal_debugfs(void) |
| { |
| int ret = 0; |
| |
| if (msm_therm_debugfs) |
| return ret; |
| |
| msm_therm_debugfs = devm_kzalloc(&msm_thermal_info.pdev->dev, |
| sizeof(struct msm_thermal_debugfs_entry), GFP_KERNEL); |
| if (!msm_therm_debugfs) { |
| ret = -ENOMEM; |
| pr_err("Memory alloc failed. err:%d\n", ret); |
| return ret; |
| } |
| |
| THERM_CREATE_DEBUGFS_DIR(msm_therm_debugfs->parent, MSM_THERMAL_NAME, |
| NULL, ret); |
| if (ret) |
| goto create_exit; |
| |
| msm_therm_debugfs->tsens_print = debugfs_create_bool(MSM_TSENS_PRINT, |
| 0600, msm_therm_debugfs->parent, &tsens_temp_print); |
| if (IS_ERR(msm_therm_debugfs->tsens_print)) { |
| ret = PTR_ERR(msm_therm_debugfs->tsens_print); |
| pr_err("Error creating debugfs:[%s]. err:%d\n", |
| MSM_TSENS_PRINT, ret); |
| goto create_exit; |
| } |
| |
| create_exit: |
| if (ret) { |
| debugfs_remove_recursive(msm_therm_debugfs->parent); |
| devm_kfree(&msm_thermal_info.pdev->dev, msm_therm_debugfs); |
| } |
| return ret; |
| } |
| |
| static struct kobj_attribute cluster_info_attr = __ATTR_RO(cluster_info); |
| static int create_cpu_topology_sysfs(void) |
| { |
| int ret = 0; |
| struct kobject *module_kobj = NULL; |
| |
| if (!cluster_info_probed) { |
| cluster_info_nodes_called = true; |
| return ret; |
| } |
| if (!core_ptr) |
| return ret; |
| |
| module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME); |
| if (!module_kobj) { |
| pr_err("cannot find kobject\n"); |
| return -ENODEV; |
| } |
| |
| sysfs_attr_init(&cluster_info_attr.attr); |
| ret = sysfs_create_file(module_kobj, &cluster_info_attr.attr); |
| if (ret) { |
| pr_err("cannot create cluster info attr group. err:%d\n", ret); |
| return ret; |
| } |
| |
| return ret; |
| } |
| |
| static int get_device_tree_cluster_info(struct device *dev, int *cluster_id, |
| cpumask_t *cluster_cpus) |
| { |
| int i, cluster_cnt = 0, ret = 0; |
| uint32_t val = 0; |
| char *key = "qcom,synchronous-cluster-map"; |
| |
| if (!of_get_property(dev->of_node, key, &cluster_cnt) |
| || cluster_cnt <= 0) { |
| pr_debug("Property %s not defined.\n", key); |
| return -ENODEV; |
| } |
| if (cluster_cnt % (sizeof(__be32) * 2)) { |
| pr_err("Invalid number(%d) of entry for %s\n", |
| cluster_cnt, key); |
| return -EINVAL; |
| } |
| cluster_cnt /= (sizeof(__be32) * 2); |
| |
| for (i = 0; i < cluster_cnt; i++) { |
| ret = of_property_read_u32_index(dev->of_node, key, |
| i * 2, &val); |
| if (ret) { |
| pr_err("Error reading index%d\n", i * 2); |
| return -EINVAL; |
| } |
| cluster_id[i] = val; |
| |
| of_property_read_u32_index(dev->of_node, key, i * 2 + 1, &val); |
| if (ret) { |
| pr_err("Error reading index%d\n", i * 2 + 1); |
| return -EINVAL; |
| } |
| *cluster_cpus[i].bits = val; |
| } |
| |
| return cluster_cnt; |
| } |
| |
| static int get_kernel_cluster_info(int *cluster_id, cpumask_t *cluster_cpus) |
| { |
| uint32_t _cpu, cluster_index, cluster_cnt; |
| |
| for (_cpu = 0, cluster_cnt = 0; _cpu < num_possible_cpus(); _cpu++) { |
| if (topology_physical_package_id(_cpu) < 0) { |
| pr_err("CPU%d topology not initialized.\n", _cpu); |
| return -ENODEV; |
| } |
| /* Do not use the sibling cpumask from topology module. |
| ** kernel topology module updates the sibling cpumask |
| ** only when the cores are brought online for the first time. |
| ** KTM figures out the sibling cpumask using the |
| ** cluster and core ID mapping. |
| */ |
| for (cluster_index = 0; cluster_index < num_possible_cpus(); |
| cluster_index++) { |
| if (cluster_id[cluster_index] == -1) { |
| cluster_id[cluster_index] = |
| topology_physical_package_id(_cpu); |
| *cluster_cpus[cluster_index].bits = 0; |
| cpumask_set_cpu(_cpu, |
| &cluster_cpus[cluster_index]); |
| cluster_cnt++; |
| break; |
| } |
| if (cluster_id[cluster_index] == |
| topology_physical_package_id(_cpu)) { |
| cpumask_set_cpu(_cpu, |
| &cluster_cpus[cluster_index]); |
| break; |
| } |
| } |
| } |
| |
| return cluster_cnt; |
| } |
| |
| static void update_cpu_topology(struct device *dev) |
| { |
| int cluster_id[NR_CPUS] = {[0 ... NR_CPUS-1] = -1}; |
| cpumask_t cluster_cpus[NR_CPUS]; |
| uint32_t i, j; |
| int cluster_cnt, cpu, sync_cluster_cnt = 0; |
| struct cluster_info *temp_ptr = NULL; |
| int *sync_cluster_id = NULL; |
| |
| cluster_info_probed = true; |
| cluster_cnt = get_kernel_cluster_info(cluster_id, cluster_cpus); |
| if (cluster_cnt <= 0) { |
| cluster_cnt = get_device_tree_cluster_info(dev, cluster_id, |
| cluster_cpus); |
| if (cluster_cnt <= 0) { |
| core_ptr = NULL; |
| pr_debug("Cluster Info not defined. KTM continues.\n"); |
| return; |
| } |
| } |
| |
| core_ptr = devm_kzalloc(dev, sizeof(struct cluster_info), GFP_KERNEL); |
| if (!core_ptr) { |
| pr_err("Memory alloc failed\n"); |
| return; |
| } |
| core_ptr->parent_ptr = NULL; |
| core_ptr->entity_count = cluster_cnt; |
| core_ptr->cluster_id = -1; |
| core_ptr->sync_cluster = false; |
| |
| sync_cluster_id = get_sync_cluster(dev, &sync_cluster_cnt); |
| if (!sync_cluster_id) { |
| devm_kfree(dev, core_ptr); |
| core_ptr = NULL; |
| return; |
| } |
| temp_ptr = devm_kzalloc(dev, sizeof(struct cluster_info) * cluster_cnt, |
| GFP_KERNEL); |
| if (!temp_ptr) { |
| pr_err("Memory alloc failed\n"); |
| devm_kfree(dev, core_ptr); |
| core_ptr = NULL; |
| return; |
| } |
| |
| for (i = 0; i < cluster_cnt; i++) { |
| pr_debug("Cluster_ID:%d CPU's:%lu\n", cluster_id[i], |
| *cluster_cpus[i].bits); |
| temp_ptr[i].cluster_id = cluster_id[i]; |
| temp_ptr[i].parent_ptr = core_ptr; |
| temp_ptr[i].cluster_cores = cluster_cpus[i]; |
| temp_ptr[i].limited_max_freq = UINT_MAX; |
| temp_ptr[i].limited_min_freq = 0; |
| temp_ptr[i].freq_idx = 0; |
| temp_ptr[i].freq_idx_low = 0; |
| temp_ptr[i].freq_idx_high = 0; |
| temp_ptr[i].freq_table = NULL; |
| j = 0; |
| for_each_cpu_mask(cpu, cluster_cpus[i]) |
| j++; |
| temp_ptr[i].entity_count = j; |
| temp_ptr[i].child_entity_ptr = NULL; |
| update_cpu_datastructure(&temp_ptr[i], sync_cluster_id, |
| sync_cluster_cnt); |
| } |
| core_ptr->child_entity_ptr = temp_ptr; |
| } |
| |
| static int __ref init_cluster_freq_table(void) |
| { |
| uint32_t _cluster = 0, _cpu = 0, table_len = 0, idx = 0; |
| int ret = 0, cpu_set; |
| char buf[CPU_BUF_SIZE]; |
| struct cluster_info *cluster_ptr = NULL; |
| struct cpufreq_policy *policy = NULL; |
| struct cpufreq_frequency_table *freq_table_ptr = NULL; |
| |
| for (; _cluster < core_ptr->entity_count; _cluster++, table_len = 0, |
| (policy && freq_table_ptr) ? cpufreq_cpu_put(policy) : 0, |
| policy = NULL, freq_table_ptr = NULL) { |
| cluster_ptr = &core_ptr->child_entity_ptr[_cluster]; |
| if (cluster_ptr->freq_table) |
| continue; |
| |
| for_each_cpu_mask(_cpu, cluster_ptr->cluster_cores) { |
| policy = cpufreq_cpu_get(_cpu); |
| if (!policy) |
| continue; |
| freq_table_ptr = cpufreq_frequency_get_table( |
| policy->cpu); |
| if (!freq_table_ptr) { |
| cpufreq_cpu_put(policy); |
| continue; |
| } else { |
| break; |
| } |
| } |
| if (!freq_table_ptr) { |
| _cpu = first_cpu(cluster_ptr->cluster_cores); |
| pr_debug( |
| "Online cpu%d in cluster%d to read cpufreq table\n", |
| cluster_ptr->cluster_id, _cpu); |
| pending_cpu_freq = _cpu; |
| if (!cpu_online(_cpu)) { |
| cpu_set = cpumask_test_cpu(_cpu, |
| cpus_previously_online); |
| #ifdef CONFIG_SMP |
| cpu_up(_cpu); |
| cpu_down(_cpu); |
| #endif |
| /* Remove prev online bit if we are first to |
| put it online */ |
| if (!cpu_set) { |
| cpumask_clear_cpu(_cpu, |
| cpus_previously_online); |
| cpumask_scnprintf(buf, sizeof(buf), |
| cpus_previously_online); |
| pr_debug("Reset prev online to %s\n", |
| buf); |
| } |
| } |
| freq_table_ptr = pending_freq_table_ptr; |
| } |
| if (!freq_table_ptr) { |
| pr_debug("Error reading cluster%d cpufreq table\n", |
| cluster_ptr->cluster_id); |
| ret = -EAGAIN; |
| continue; |
| } |
| |
| while (freq_table_ptr[table_len].frequency |
| != CPUFREQ_TABLE_END) |
| table_len++; |
| |
| cluster_ptr->freq_idx_low = 0; |
| cluster_ptr->freq_idx_high = cluster_ptr->freq_idx = |
| table_len - 1; |
| if (cluster_ptr->freq_idx_high < 0 |
| || (cluster_ptr->freq_idx_high |
| < cluster_ptr->freq_idx_low)) { |
| cluster_ptr->freq_idx = cluster_ptr->freq_idx_low = |
| cluster_ptr->freq_idx_high = 0; |
| WARN(1, "Cluster%d frequency table length:%d\n", |
| cluster_ptr->cluster_id, table_len); |
| ret = -EINVAL; |
| goto release_and_exit; |
| } |
| cluster_ptr->freq_table = devm_kzalloc( |
| &msm_thermal_info.pdev->dev, |
| sizeof(struct cpufreq_frequency_table) * table_len, |
| GFP_KERNEL); |
| if (!cluster_ptr->freq_table) { |
| pr_err("memory alloc failed\n"); |
| cluster_ptr->freq_idx = cluster_ptr->freq_idx_low = |
| cluster_ptr->freq_idx_high = 0; |
| ret = -ENOMEM; |
| goto release_and_exit; |
| } |
| for (idx = 0; idx < table_len; idx++) |
| cluster_ptr->freq_table[idx].frequency = |
| freq_table_ptr[idx].frequency; |
| } |
| |
| return ret; |
| release_and_exit: |
| cpufreq_cpu_put(policy); |
| return ret; |
| } |
| |
| static void update_cluster_freq(void) |
| { |
| int online_cpu = -1; |
| struct cluster_info *cluster_ptr = NULL; |
| uint32_t _cluster = 0, _cpu = 0, max = UINT_MAX, min = 0; |
| |
| if (!core_ptr) |
| return; |
| |
| for (; _cluster < core_ptr->entity_count; _cluster++, _cpu = 0, |
| online_cpu = -1, max = UINT_MAX, min = 0) { |
| /* |
| ** If a cluster is synchronous, go over the frequency limits |
| ** of each core in that cluster and aggregate the minimum |
| ** and maximum frequencies. After aggregating, request for |
| ** frequency update on the first online core in that cluster. |
| ** Cpufreq driver takes care of updating the frequency of |
| ** other cores in a synchronous cluster. |
| */ |
| cluster_ptr = &core_ptr->child_entity_ptr[_cluster]; |
| |
| if (!cluster_ptr->sync_cluster) |
| continue; |
| for_each_cpu_mask(_cpu, cluster_ptr->cluster_cores) { |
| if (online_cpu == -1 && cpu_online(_cpu)) |
| online_cpu = _cpu; |
| max = min(max, cpus[_cpu].limited_max_freq); |
| min = max(min, cpus[_cpu].limited_min_freq); |
| } |
| if (cluster_ptr->limited_max_freq == max |
| && cluster_ptr->limited_min_freq == min) |
| continue; |
| cluster_ptr->limited_max_freq = max; |
| cluster_ptr->limited_min_freq = min; |
| if (online_cpu != -1) |
| update_cpu_freq(online_cpu); |
| } |
| } |
| |
| static void do_cluster_freq_ctrl(long temp) |
| { |
| uint32_t _cluster = 0; |
| int _cpu = -1, freq_idx = 0; |
| bool mitigate = false; |
| struct cluster_info *cluster_ptr = NULL; |
| |
| if (temp >= msm_thermal_info.limit_temp_degC) |
| mitigate = true; |
| else if (temp < msm_thermal_info.limit_temp_degC - |
| msm_thermal_info.temp_hysteresis_degC) |
| mitigate = false; |
| else |
| return; |
| |
| get_online_cpus(); |
| for (; _cluster < core_ptr->entity_count; _cluster++) { |
| cluster_ptr = &core_ptr->child_entity_ptr[_cluster]; |
| if (!cluster_ptr->freq_table) |
| continue; |
| |
| if (mitigate) |
| freq_idx = max_t(int, cluster_ptr->freq_idx_low, |
| (cluster_ptr->freq_idx |
| - msm_thermal_info.bootup_freq_step)); |
| else |
| freq_idx = min_t(int, cluster_ptr->freq_idx_high, |
| (cluster_ptr->freq_idx |
| + msm_thermal_info.bootup_freq_step)); |
| if (freq_idx == cluster_ptr->freq_idx) |
| continue; |
| |
| cluster_ptr->freq_idx = freq_idx; |
| for_each_cpu_mask(_cpu, cluster_ptr->cluster_cores) { |
| if (!(msm_thermal_info.bootup_freq_control_mask |
| & BIT(_cpu))) |
| continue; |
| pr_info("Limiting CPU%d max frequency to %u. Temp:%ld\n" |
| , _cpu |
| , cluster_ptr->freq_table[freq_idx].frequency |
| , temp); |
| cpus[_cpu].limited_max_freq = |
| cluster_ptr->freq_table[freq_idx].frequency; |
| } |
| } |
| if (_cpu != -1) |
| update_cluster_freq(); |
| put_online_cpus(); |
| } |
| |
| /* If freq table exists, then we can send freq request */ |
| static int check_freq_table(void) |
| { |
| int ret = 0; |
| uint32_t i = 0; |
| static bool invalid_table; |
| |
| if (invalid_table) |
| return -EINVAL; |
| if (freq_table_get) |
| return 0; |
| |
| if (core_ptr) { |
| ret = init_cluster_freq_table(); |
| if (!ret) |
| freq_table_get = 1; |
| else if (ret == -EINVAL) |
| invalid_table = true; |
| return ret; |
| } |
| |
| table = cpufreq_frequency_get_table(0); |
| if (!table) { |
| pr_debug("error reading cpufreq table\n"); |
| return -EINVAL; |
| } |
| while (table[i].frequency != CPUFREQ_TABLE_END) |
| i++; |
| |
| limit_idx_low = 0; |
| limit_idx_high = limit_idx = i - 1; |
| if (limit_idx_high < 0 || limit_idx_high < limit_idx_low) { |
| invalid_table = true; |
| table = NULL; |
| limit_idx_low = limit_idx_high = limit_idx = 0; |
| WARN(1, "CPU0 frequency table length:%d\n", i); |
| return -EINVAL; |
| } |
| freq_table_get = 1; |
| |
| return 0; |
| } |
| |
| static int update_cpu_min_freq_all(uint32_t min) |
| { |
| uint32_t cpu = 0, _cluster = 0; |
| int ret = 0; |
| struct cluster_info *cluster_ptr = NULL; |
| bool valid_table = false; |
| |
| if (!freq_table_get) { |
| ret = check_freq_table(); |
| if (ret && !core_ptr) { |
| pr_err("Fail to get freq table. err:%d\n", ret); |
| return ret; |
| } |
| } |
| /* If min is larger than allowed max */ |
| if (core_ptr) { |
| for (; _cluster < core_ptr->entity_count; _cluster++) { |
| cluster_ptr = &core_ptr->child_entity_ptr[_cluster]; |
| if (!cluster_ptr->freq_table) |
| continue; |
| valid_table = true; |
| min = min(min, |
| cluster_ptr->freq_table[ |
| cluster_ptr->freq_idx_high].frequency); |
| } |
| if (!valid_table) |
| return ret; |
| } else { |
| min = min(min, table[limit_idx_high].frequency); |
| } |
| |
| pr_debug("Requesting min freq:%u for all CPU's\n", min); |
| if (freq_mitigation_task) { |
| min_freq_limit = min; |
| complete(&freq_mitigation_complete); |
| } else { |
| get_online_cpus(); |
| for_each_possible_cpu(cpu) { |
| cpus[cpu].limited_min_freq = min; |
| if (!SYNC_CORE(cpu)) |
| update_cpu_freq(cpu); |
| } |
| update_cluster_freq(); |
| put_online_cpus(); |
| } |
| |
| return ret; |
| } |
| |
| static int vdd_restriction_apply_freq(struct rail *r, int level) |
| { |
| int ret = 0; |
| |
| if (level == r->curr_level) |
| return ret; |
| |
| /* level = -1: disable, level = 0,1,2..n: enable */ |
| if (level == -1) { |
| ret = update_cpu_min_freq_all(r->min_level); |
| if (ret) |
| return ret; |
| else |
| r->curr_level = -1; |
| } else if (level >= 0 && level < (r->num_levels)) { |
| ret = update_cpu_min_freq_all(r->levels[level]); |
| if (ret) |
| return ret; |
| else |
| r->curr_level = level; |
| } else { |
| pr_err("level input:%d is not within range\n", level); |
| return -EINVAL; |
| } |
| |
| return ret; |
| } |
| |
| static int vdd_restriction_apply_voltage(struct rail *r, int level) |
| { |
| int ret = 0; |
| |
| if (r->reg == NULL) { |
| pr_err("%s don't have regulator handle. can't apply vdd\n", |
| r->name); |
| return -EFAULT; |
| } |
| if (level == r->curr_level) |
| return ret; |
| |
| /* level = -1: disable, level = 0,1,2..n: enable */ |
| if (level == -1) { |
| ret = regulator_set_voltage(r->reg, r->min_level, |
| r->levels[r->num_levels - 1]); |
| if (!ret) |
| r->curr_level = -1; |
| pr_debug("Requested min level for %s. curr level: %d\n", |
| r->name, r->curr_level); |
| } else if (level >= 0 && level < (r->num_levels)) { |
| ret = regulator_set_voltage(r->reg, r->levels[level], |
| r->levels[r->num_levels - 1]); |
| if (!ret) |
| r->curr_level = level; |
| pr_debug("Requesting level %d for %s. curr level: %d\n", |
| r->levels[level], r->name, r->levels[r->curr_level]); |
| } else { |
| pr_err("level input:%d is not within range\n", level); |
| return -EINVAL; |
| } |
| |
| return ret; |
| } |
| |
| /* Setting all rails the same mode */ |
| static int psm_set_mode_all(int mode) |
| { |
| int i = 0; |
| int fail_cnt = 0; |
| int ret = 0; |
| |
| pr_debug("Requesting PMIC Mode: %d\n", mode); |
| for (i = 0; i < psm_rails_cnt; i++) { |
| if (psm_rails[i].mode != mode) { |
| ret = rpm_regulator_set_mode(psm_rails[i].reg, mode); |
| if (ret) { |
| pr_err("Cannot set mode:%d for %s. err:%d", |
| mode, psm_rails[i].name, ret); |
| fail_cnt++; |
| } else |
| psm_rails[i].mode = mode; |
| } |
| } |
| |
| return fail_cnt ? (-EFAULT) : ret; |
| } |
| |
| static ssize_t vdd_rstr_en_show( |
| struct kobject *kobj, struct kobj_attribute *attr, char *buf) |
| { |
| struct vdd_rstr_enable *en = VDD_RSTR_ENABLE_FROM_ATTRIBS(attr); |
| |
| return snprintf(buf, PAGE_SIZE, "%d\n", en->enabled); |
| } |
| |
| static ssize_t vdd_rstr_en_store(struct kobject *kobj, |
| struct kobj_attribute *attr, const char *buf, size_t count) |
| { |
| int ret = 0; |
| int i = 0; |
| uint8_t en_cnt = 0; |
| uint8_t dis_cnt = 0; |
| uint32_t val = 0; |
| struct kernel_param kp; |
| struct vdd_rstr_enable *en = VDD_RSTR_ENABLE_FROM_ATTRIBS(attr); |
| |
| mutex_lock(&vdd_rstr_mutex); |
| kp.arg = &val; |
| ret = param_set_bool(buf, &kp); |
| if (ret) { |
| pr_err("Invalid input %s for enabled\n", buf); |
| goto done_vdd_rstr_en; |
| } |
| |
| if ((val == 0) && (en->enabled == 0)) |
| goto done_vdd_rstr_en; |
| |
| for (i = 0; i < rails_cnt; i++) { |
| if (rails[i].freq_req == 1 && freq_table_get) |
| ret = vdd_restriction_apply_freq(&rails[i], |
| (val) ? 0 : -1); |
| else |
| ret = vdd_restriction_apply_voltage(&rails[i], |
| (val) ? 0 : -1); |
| |
| /* |
| * Even if fail to set one rail, still try to set the |
| * others. Continue the loop |
| */ |
| if (ret) |
| pr_err("Set vdd restriction for %s failed\n", |
| rails[i].name); |
| else { |
| if (val) |
| en_cnt++; |
| else |
| dis_cnt++; |
| } |
| } |
| /* As long as one rail is enabled, vdd rstr is enabled */ |
| if (val && en_cnt) |
| en->enabled = 1; |
| else if (!val && (dis_cnt == rails_cnt)) |
| en->enabled = 0; |
| pr_debug("%s vdd restriction. curr: %d\n", |
| (val) ? "Enable" : "Disable", en->enabled); |
| |
| done_vdd_rstr_en: |
| mutex_unlock(&vdd_rstr_mutex); |
| return count; |
| } |
| |
| static int send_temperature_band(enum msm_thermal_phase_ctrl phase, |
| enum msm_temp_band req_band) |
| { |
| int ret = 0; |
| uint32_t msg_id; |
| struct msm_rpm_request *rpm_req; |
| unsigned int band = req_band; |
| uint32_t key, resource, resource_id; |
| |
| if (phase < 0 || phase >= MSM_PHASE_CTRL_NR || |
| req_band <= 0 || req_band >= MSM_TEMP_MAX_NR) { |
| pr_err("Invalid input\n"); |
| ret = -EINVAL; |
| goto phase_ctrl_exit; |
| } |
| switch (phase) { |
| case MSM_CX_PHASE_CTRL: |
| key = msm_thermal_info.cx_phase_request_key; |
| break; |
| case MSM_GFX_PHASE_CTRL: |
| key = msm_thermal_info.gfx_phase_request_key; |
| break; |
| default: |
| goto phase_ctrl_exit; |
| break; |
| } |
| |
| resource = msm_thermal_info.phase_rpm_resource_type; |
| resource_id = msm_thermal_info.phase_rpm_resource_id; |
| pr_debug("Sending %s temperature band %d\n", |
| (phase == MSM_CX_PHASE_CTRL) ? "CX" : "GFX", |
| req_band); |
| rpm_req = msm_rpm_create_request(MSM_RPM_CTX_ACTIVE_SET, |
| resource, resource_id, 1); |
| if (!rpm_req) { |
| pr_err("Creating RPM request failed\n"); |
| ret = -ENXIO; |
| goto phase_ctrl_exit; |
| } |
| |
| ret = msm_rpm_add_kvp_data(rpm_req, key, (const uint8_t *)&band, |
| (int)sizeof(band)); |
| if (ret) { |
| pr_err("Adding KVP data failed. err:%d\n", ret); |
| goto free_rpm_handle; |
| } |
| |
| msg_id = msm_rpm_send_request(rpm_req); |
| if (!msg_id) { |
| pr_err("RPM send request failed\n"); |
| ret = -ENXIO; |
| goto free_rpm_handle; |
| } |
| |
| ret = msm_rpm_wait_for_ack(msg_id); |
| if (ret) { |
| pr_err("RPM wait for ACK failed. err:%d\n", ret); |
| goto free_rpm_handle; |
| } |
| |
| free_rpm_handle: |
| msm_rpm_free_request(rpm_req); |
| phase_ctrl_exit: |
| return ret; |
| } |
| |
| static uint32_t msm_thermal_str_to_int(const char *inp) |
| { |
| int i, len; |
| uint32_t output = 0; |
| |
| len = strnlen(inp, sizeof(uint32_t)); |
| for (i = 0; i < len; i++) |
| output |= inp[i] << (i * 8); |
| |
| return output; |
| } |
| |
| static ssize_t sensor_info_show( |
| struct kobject *kobj, struct kobj_attribute *attr, char *buf) |
| { |
| int i; |
| ssize_t tot_size = 0, size = 0; |
| |
| for (i = 0; i < sensor_cnt; i++) { |
| size = snprintf(&buf[tot_size], PAGE_SIZE - tot_size, |
| "%s:%s:%s:%d ", |
| sensors[i].type, sensors[i].name, |
| sensors[i].alias ? : "", |
| sensors[i].scaling_factor); |
| if (tot_size + size >= PAGE_SIZE) { |
| pr_err("Not enough buffer size\n"); |
| break; |
| } |
| tot_size += size; |
| } |
| if (tot_size) |
| buf[tot_size - 1] = '\n'; |
| |
| return tot_size; |
| } |
| |
| static struct vdd_rstr_enable vdd_rstr_en = { |
| .ko_attr.attr.name = __stringify(enabled), |
| .ko_attr.attr.mode = 0644, |
| .ko_attr.show = vdd_rstr_en_show, |
| .ko_attr.store = vdd_rstr_en_store, |
| .enabled = 1, |
| }; |
| |
| static struct attribute *vdd_rstr_en_attribs[] = { |
| &vdd_rstr_en.ko_attr.attr, |
| NULL, |
| }; |
| |
| static struct attribute_group vdd_rstr_en_attribs_gp = { |
| .attrs = vdd_rstr_en_attribs, |
| }; |
| |
| static ssize_t vdd_rstr_reg_value_show( |
| struct kobject *kobj, struct kobj_attribute *attr, char *buf) |
| { |
| int val = 0; |
| struct rail *reg = VDD_RSTR_REG_VALUE_FROM_ATTRIBS(attr); |
| /* -1:disabled, -2:fail to get regualtor handle */ |
| if (reg->curr_level < 0) |
| val = reg->curr_level; |
| else |
| val = reg->levels[reg->curr_level]; |
| |
| return snprintf(buf, PAGE_SIZE, "%d\n", val); |
| } |
| |
| static ssize_t vdd_rstr_reg_level_show( |
| struct kobject *kobj, struct kobj_attribute *attr, char *buf) |
| { |
| struct rail *reg = VDD_RSTR_REG_LEVEL_FROM_ATTRIBS(attr); |
| return snprintf(buf, PAGE_SIZE, "%d\n", reg->curr_level); |
| } |
| |
| static ssize_t vdd_rstr_reg_level_store(struct kobject *kobj, |
| struct kobj_attribute *attr, const char *buf, size_t count) |
| { |
| int ret = 0; |
| int val = 0; |
| |
| struct rail *reg = VDD_RSTR_REG_LEVEL_FROM_ATTRIBS(attr); |
| |
| mutex_lock(&vdd_rstr_mutex); |
| if (vdd_rstr_en.enabled == 0) |
| goto done_store_level; |
| |
| ret = kstrtouint(buf, 10, &val); |
| if (ret) { |
| pr_err("Invalid input %s for level\n", buf); |
| goto done_store_level; |
| } |
| |
| if (val < 0 || val > reg->num_levels - 1) { |
| pr_err(" Invalid number %d for level\n", val); |
| goto done_store_level; |
| } |
| |
| if (val != reg->curr_level) { |
| if (reg->freq_req == 1 && freq_table_get) |
| update_cpu_min_freq_all(reg->levels[val]); |
| else { |
| ret = vdd_restriction_apply_voltage(reg, val); |
| if (ret) { |
| pr_err( \ |
| "Set vdd restriction for regulator %s failed. err:%d\n", |
| reg->name, ret); |
| goto done_store_level; |
| } |
| } |
| reg->curr_level = val; |
| pr_debug("Request level %d for %s\n", |
| reg->curr_level, reg->name); |
| } |
| |
| done_store_level: |
| mutex_unlock(&vdd_rstr_mutex); |
| return count; |
| } |
| |
| static int request_optimum_current(struct psm_rail *rail, enum ocr_request req) |
| { |
| int ret = 0; |
| |
| if ((!rail) || (req >= OPTIMUM_CURRENT_NR) || |
| (req < 0)) { |
| pr_err("Invalid input %d\n", req); |
| ret = -EINVAL; |
| goto request_ocr_exit; |
| } |
| |
| ret = regulator_set_optimum_mode(rail->phase_reg, |
| (req == OPTIMUM_CURRENT_MAX) ? MAX_CURRENT_UA : 0); |
| if (ret < 0) { |
| pr_err("Optimum current request failed. err:%d\n", ret); |
| goto request_ocr_exit; |
| } |
| ret = 0; /*regulator_set_optimum_mode returns the mode on success*/ |
| pr_debug("Requested optimum current mode: %d\n", req); |
| |
| request_ocr_exit: |
| return ret; |
| } |
| |
| static int ocr_set_mode_all(enum ocr_request req) |
| { |
| int ret = 0, i; |
| |
| for (i = 0; i < ocr_rail_cnt; i++) { |
| if (ocr_rails[i].mode == req) |
| continue; |
| ret = request_optimum_current(&ocr_rails[i], req); |
| if (ret) |
| goto ocr_set_mode_exit; |
| ocr_rails[i].mode = req; |
| } |
| |
| ocr_set_mode_exit: |
| return ret; |
| } |
| |
| static ssize_t ocr_reg_mode_show(struct kobject *kobj, |
| struct kobj_attribute *attr, char *buf) |
| { |
| struct psm_rail *reg = PSM_REG_MODE_FROM_ATTRIBS(attr); |
| return snprintf(buf, PAGE_SIZE, "%d\n", reg->mode); |
| } |
| |
| static ssize_t ocr_reg_mode_store(struct kobject *kobj, |
| struct kobj_attribute *attr, const char *buf, size_t count) |
| { |
| int ret = 0; |
| int val = 0; |
| struct psm_rail *reg = PSM_REG_MODE_FROM_ATTRIBS(attr); |
| |
| if (!ocr_enabled) |
| return count; |
| |
| mutex_lock(&ocr_mutex); |
| ret = kstrtoint(buf, 10, &val); |
| if (ret) { |
| pr_err("Invalid input %s for mode. err:%d\n", |
| buf, ret); |
| goto done_ocr_store; |
| } |
| |
| if ((val != OPTIMUM_CURRENT_MAX) && |
| (val != OPTIMUM_CURRENT_MIN)) { |
| pr_err("Invalid value %d for mode\n", val); |
| goto done_ocr_store; |
| } |
| |
| if (val != reg->mode) { |
| ret = request_optimum_current(reg, val); |
| if (ret) |
| goto done_ocr_store; |
| reg->mode = val; |
| } |
| |
| done_ocr_store: |
| mutex_unlock(&ocr_mutex); |
| return count; |
| } |
| |
| static ssize_t store_phase_request(const char *buf, size_t count, bool is_cx) |
| { |
| int ret = 0, val; |
| struct mutex *phase_mutex = (is_cx) ? (&cx_mutex) : (&gfx_mutex); |
| enum msm_thermal_phase_ctrl phase_req = (is_cx) ? MSM_CX_PHASE_CTRL : |
| MSM_GFX_PHASE_CTRL; |
| |
| ret = kstrtoint(buf, 10, &val); |
| if (ret) { |
| pr_err("Invalid input %s for %s temperature band\n", |
| buf, (is_cx) ? "CX" : "GFX"); |
| goto phase_store_exit; |
| } |
| if ((val <= 0) || (val >= MSM_TEMP_MAX_NR)) { |
| pr_err("Invalid input %d for %s temperature band\n", |
| val, (is_cx) ? "CX" : "GFX"); |
| ret = -EINVAL; |
| goto phase_store_exit; |
| } |
| mutex_lock(phase_mutex); |
| if (val != ((is_cx) ? curr_cx_band : curr_gfx_band)) { |
| ret = send_temperature_band(phase_req, val); |
| if (!ret) { |
| *((is_cx) ? &curr_cx_band : &curr_gfx_band) = val; |
| } else { |
| pr_err("Failed to send %d temp. band to %s rail\n", val, |
| (is_cx) ? "CX" : "GFX"); |
| goto phase_store_unlock_exit; |
| } |
| } |
| ret = count; |
| phase_store_unlock_exit: |
| mutex_unlock(phase_mutex); |
| phase_store_exit: |
| return ret; |
| } |
| |
| #define show_phase(_name, _variable) \ |
| static ssize_t _name##_phase_show(struct kobject *kobj, \ |
| struct kobj_attribute *attr, char *buf) \ |
| { \ |
| return snprintf(buf, PAGE_SIZE, "%u\n", _variable); \ |
| } |
| |
| #define store_phase(_name, _variable, _iscx) \ |
| static ssize_t _name##_phase_store(struct kobject *kobj, \ |
| struct kobj_attribute *attr, const char *buf, size_t count) \ |
| { \ |
| return store_phase_request(buf, count, _iscx); \ |
| } |
| |
| show_phase(gfx, curr_gfx_band) |
| show_phase(cx, curr_cx_band) |
| store_phase(gfx, curr_gfx_band, false) |
| store_phase(cx, curr_cx_band, true) |
| |
| static ssize_t psm_reg_mode_show( |
| struct kobject *kobj, struct kobj_attribute *attr, char *buf) |
| { |
| struct psm_rail *reg = PSM_REG_MODE_FROM_ATTRIBS(attr); |
| return snprintf(buf, PAGE_SIZE, "%d\n", reg->mode); |
| } |
| |
| static ssize_t psm_reg_mode_store(struct kobject *kobj, |
| struct kobj_attribute *attr, const char *buf, size_t count) |
| { |
| int ret = 0; |
| int val = 0; |
| struct psm_rail *reg = PSM_REG_MODE_FROM_ATTRIBS(attr); |
| |
| mutex_lock(&psm_mutex); |
| ret = kstrtoint(buf, 10, &val); |
| if (ret) { |
| pr_err("Invalid input %s for mode\n", buf); |
| goto done_psm_store; |
| } |
| |
| if ((val != PMIC_PWM_MODE) && (val != PMIC_AUTO_MODE)) { |
| pr_err("Invalid number %d for mode\n", val); |
| goto done_psm_store; |
| } |
| |
| if (val != reg->mode) { |
| ret = rpm_regulator_set_mode(reg->reg, val); |
| if (ret) { |
| pr_err("Fail to set Mode:%d for %s. err:%d\n", |
| val, reg->name, ret); |
| goto done_psm_store; |
| } |
| reg->mode = val; |
| } |
| |
| done_psm_store: |
| mutex_unlock(&psm_mutex); |
| return count; |
| } |
| |
| static int check_sensor_id(int sensor_id) |
| { |
| int i = 0; |
| bool hw_id_found = false; |
| int ret = 0; |
| |
| for (i = 0; i < max_tsens_num; i++) { |
| if (sensor_id == tsens_id_map[i]) { |
| hw_id_found = true; |
| break; |
| } |
| } |
| if (!hw_id_found) { |
| pr_err("Invalid sensor hw id:%d\n", sensor_id); |
| return -EINVAL; |
| } |
| |
| return ret; |
| } |
| |
| static int create_sensor_id_map(void) |
| { |
| int i = 0; |
| int ret = 0; |
| |
| tsens_id_map = kzalloc(sizeof(int) * max_tsens_num, |
| GFP_KERNEL); |
| if (!tsens_id_map) { |
| pr_err("Cannot allocate memory for tsens_id_map\n"); |
| return -ENOMEM; |
| } |
| |
| for (i = 0; i < max_tsens_num; i++) { |
| ret = tsens_get_hw_id_mapping(i, &tsens_id_map[i]); |
| /* If return -ENXIO, hw_id is default in sequence */ |
| if (ret) { |
| if (ret == -ENXIO) { |
| tsens_id_map[i] = i; |
| ret = 0; |
| } else { |
| pr_err("Failed to get hw id for id:%d.err:%d\n", |
| i, ret); |
| goto fail; |
| } |
| } |
| } |
| |
| return ret; |
| fail: |
| kfree(tsens_id_map); |
| return ret; |
| } |
| |
| /* 1:enable, 0:disable */ |
| static int vdd_restriction_apply_all(int en) |
| { |
| int i = 0; |
| int en_cnt = 0; |
| int dis_cnt = 0; |
| int fail_cnt = 0; |
| int ret = 0; |
| |
| for (i = 0; i < rails_cnt; i++) { |
| if (rails[i].freq_req == 1) |
| if (freq_table_get) |
| ret = vdd_restriction_apply_freq(&rails[i], |
| en ? 0 : -1); |
| else |
| continue; |
| else |
| ret = vdd_restriction_apply_voltage(&rails[i], |
| en ? 0 : -1); |
| if (ret) { |
| pr_err("Failed to %s for %s. err:%d", |
| (en) ? "enable" : "disable", |
| rails[i].name, ret); |
| fail_cnt++; |
| } else { |
| if (en) |
| en_cnt++; |
| else |
| dis_cnt++; |
| } |
| } |
| |
| /* As long as one rail is enabled, vdd rstr is enabled */ |
| if (en && en_cnt) |
| vdd_rstr_en.enabled = 1; |
| else if (!en && (dis_cnt == rails_cnt)) |
| vdd_rstr_en.enabled = 0; |
| |
| /* |
| * Check fail_cnt again to make sure all of the rails are applied |
| * restriction successfully or not |
| */ |
| if (fail_cnt) |
| return -EFAULT; |
| return ret; |
| } |
| |
| static int set_and_activate_threshold(uint32_t sensor_id, |
| struct sensor_threshold *threshold) |
| { |
| int ret = 0; |
| |
| ret = sensor_set_trip(sensor_id, threshold); |
| if (ret != 0) { |
| pr_err("sensor:%u Error in setting trip:%d. err:%d\n", |
| sensor_id, threshold->trip, ret); |
| goto set_done; |
| } |
| |
| ret = sensor_activate_trip(sensor_id, threshold, true); |
| if (ret != 0) { |
| pr_err("sensor:%u Error in enabling trip:%d. err:%d\n", |
| sensor_id, threshold->trip, ret); |
| goto set_done; |
| } |
| |
| set_done: |
| return ret; |
| } |
| |
| static int therm_get_temp(uint32_t id, enum sensor_id_type type, long *temp) |
| { |
| int ret = 0; |
| struct tsens_device tsens_dev; |
| |
| if (!temp) { |
| pr_err("Invalid value\n"); |
| ret = -EINVAL; |
| goto get_temp_exit; |
| } |
| |
| switch (type) { |
| case THERM_ZONE_ID: |
| ret = sensor_get_temp(id, temp); |
| if (ret) { |
| pr_err("Unable to read thermal zone sensor:%d\n", id); |
| goto get_temp_exit; |
| } |
| break; |
| case THERM_TSENS_ID: |
| tsens_dev.sensor_num = id; |
| ret = tsens_get_temp(&tsens_dev, temp); |
| if (ret) { |
| pr_err("Unable to read TSENS sensor:%d\n", |
| tsens_dev.sensor_num); |
| goto get_temp_exit; |
| } |
| break; |
| default: |
| pr_err("Invalid type\n"); |
| ret = -EINVAL; |
| goto get_temp_exit; |
| } |
| |
| get_temp_exit: |
| return ret; |
| } |
| |
| static int msm_thermal_panic_callback(struct notifier_block *nfb, |
| unsigned long event, void *data) |
| { |
| int i; |
| |
| for (i = 0; i < max_tsens_num; i++) { |
| therm_get_temp(tsens_id_map[i], |
| THERM_TSENS_ID, |
| &tsens_temp_at_panic[i]); |
| if (tsens_temp_print) |
| pr_err("tsens%d temperature:%ldC\n", |
| tsens_id_map[i], tsens_temp_at_panic[i]); |
| } |
| |
| return NOTIFY_OK; |
| } |
| |
| static struct notifier_block msm_thermal_panic_notifier = { |
| .notifier_call = msm_thermal_panic_callback, |
| }; |
| |
| int sensor_mgr_set_threshold(uint32_t zone_id, |
| struct sensor_threshold *threshold) |
| { |
| int i = 0, ret = 0; |
| long temp; |
| |
| if (!threshold) { |
| pr_err("Invalid input\n"); |
| ret = -EINVAL; |
| goto set_threshold_exit; |
| } |
| |
| ret = therm_get_temp(zone_id, THERM_ZONE_ID, &temp); |
| if (ret) { |
| pr_err("Unable to read temperature for zone:%d. err:%d\n", |
| zone_id, ret); |
| goto set_threshold_exit; |
| } |
| pr_debug("Sensor:[%d] temp:[%ld]\n", zone_id, temp); |
| while (i < MAX_THRESHOLD) { |
| switch (threshold[i].trip) { |
| case THERMAL_TRIP_CONFIGURABLE_HI: |
| if (threshold[i].temp >= temp) { |
| ret = set_and_activate_threshold(zone_id, |
| &threshold[i]); |
| if (ret) |
| goto set_threshold_exit; |
| UPDATE_THRESHOLD_SET(ret, |
| THERMAL_TRIP_CONFIGURABLE_HI); |
| } |
| break; |
| case THERMAL_TRIP_CONFIGURABLE_LOW: |
| if (threshold[i].temp <= temp) { |
| ret = set_and_activate_threshold(zone_id, |
| &threshold[i]); |
| if (ret) |
| goto set_threshold_exit; |
| UPDATE_THRESHOLD_SET(ret, |
| THERMAL_TRIP_CONFIGURABLE_LOW); |
| } |
| break; |
| default: |
| pr_err("zone:%u Invalid trip:%d\n", zone_id, |
| threshold[i].trip); |
| break; |
| } |
| i++; |
| } |
| set_threshold_exit: |
| return ret; |
| } |
| |
| static int apply_vdd_mx_restriction(void) |
| { |
| int ret = 0; |
| |
| if (mx_restr_applied) |
| goto done; |
| |
| ret = regulator_set_voltage(vdd_mx, msm_thermal_info.vdd_mx_min, |
| INT_MAX); |
| if (ret) { |
| pr_err("Failed to add mx vote, error %d\n", ret); |
| goto done; |
| } |
| |
| ret = regulator_enable(vdd_mx); |
| if (ret) |
| pr_err("Failed to vote for mx voltage %d, error %d\n", |
| msm_thermal_info.vdd_mx_min, ret); |
| else |
| mx_restr_applied = true; |
| |
| done: |
| return ret; |
| } |
| |
| static int remove_vdd_mx_restriction(void) |
| { |
| int ret = 0; |
| |
| if (!mx_restr_applied) |
| goto done; |
| |
| ret = regulator_disable(vdd_mx); |
| if (ret) { |
| pr_err("Failed to disable mx voting, error %d\n", ret); |
| goto done; |
| } |
| |
| ret = regulator_set_voltage(vdd_mx, 0, INT_MAX); |
| if (ret) |
| pr_err("Failed to remove mx vote, error %d\n", ret); |
| else |
| mx_restr_applied = false; |
| |
| done: |
| return ret; |
| } |
| |
| static int do_vdd_mx(void) |
| { |
| long temp = 0; |
| int ret = 0; |
| int i = 0; |
| int dis_cnt = 0; |
| |
| if (!vdd_mx_enabled) |
| return ret; |
| |
| mutex_lock(&vdd_mx_mutex); |
| for (i = 0; i < thresh[MSM_VDD_MX_RESTRICTION].thresh_ct; i++) { |
| ret = therm_get_temp( |
| thresh[MSM_VDD_MX_RESTRICTION].thresh_list[i].sensor_id, |
| thresh[MSM_VDD_MX_RESTRICTION].thresh_list[i].id_type, |
| &temp); |
| if (ret) { |
| pr_err("Unable to read TSENS sensor:%d, err:%d\n", |
| thresh[MSM_VDD_MX_RESTRICTION].thresh_list[i]. |
| sensor_id, ret); |
| dis_cnt++; |
| continue; |
| } |
| if (temp <= msm_thermal_info.vdd_mx_temp_degC) { |
| ret = apply_vdd_mx_restriction(); |
| if (ret) |
| pr_err( |
| "Failed to apply mx restriction\n"); |
| goto exit; |
| } else if (temp >= (msm_thermal_info.vdd_mx_temp_degC + |
| msm_thermal_info.vdd_mx_temp_hyst_degC)) { |
| dis_cnt++; |
| } |
| } |
| |
| if ((dis_cnt == thresh[MSM_VDD_MX_RESTRICTION].thresh_ct)) { |
| ret = remove_vdd_mx_restriction(); |
| if (ret) |
| pr_err("Failed to remove vdd mx restriction\n"); |
| } |
| |
| exit: |
| mutex_unlock(&vdd_mx_mutex); |
| return ret; |
| } |
| |
| static void vdd_mx_notify(struct therm_threshold *trig_thresh) |
| { |
| static uint32_t mx_sens_status; |
| int ret; |
| |
| pr_debug("Sensor%d trigger recevied for type %d\n", |
| trig_thresh->sensor_id, |
| trig_thresh->trip_triggered); |
| |
| if (!vdd_mx_enabled) |
| return; |
| |
| mutex_lock(&vdd_mx_mutex); |
| |
| switch (trig_thresh->trip_triggered) { |
| case THERMAL_TRIP_CONFIGURABLE_LOW: |
| mx_sens_status |= BIT(trig_thresh->sensor_id); |
| break; |
| case THERMAL_TRIP_CONFIGURABLE_HI: |
| if (mx_sens_status & BIT(trig_thresh->sensor_id)) |
| mx_sens_status ^= BIT(trig_thresh->sensor_id); |
| break; |
| default: |
| pr_err("Unsupported trip type\n"); |
| break; |
| } |
| |
| if (mx_sens_status) { |
| ret = apply_vdd_mx_restriction(); |
| if (ret) |
| pr_err("Failed to apply mx restriction\n"); |
| } else if (!mx_sens_status) { |
| ret = remove_vdd_mx_restriction(); |
| if (ret) |
| pr_err("Failed to remove vdd mx restriction\n"); |
| } |
| mutex_unlock(&vdd_mx_mutex); |
| sensor_mgr_set_threshold(trig_thresh->sensor_id, |
| trig_thresh->threshold); |
| } |
| |
| static void msm_thermal_bite(int tsens_id, long temp) |
| { |
| struct scm_desc desc; |
| |
| pr_err("TSENS:%d reached temperature:%ld. System reset\n", |
| tsens_id, temp); |
| |
| qpnp_pon_set_restart_reason(PON_RESTART_REASON_THERMAL); |
| |
| if (!is_scm_armv8()) { |
| scm_call_atomic1(SCM_SVC_BOOT, THERM_SECURE_BITE_CMD, 0); |
| } else { |
| desc.args[0] = 0; |
| desc.arginfo = SCM_ARGS(1); |
| scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_BOOT, |
| THERM_SECURE_BITE_CMD), &desc); |
| } |
| } |
| |
| static int do_therm_reset(void) |
| { |
| int ret = 0, i; |
| long temp = 0; |
| |
| if (!therm_reset_enabled) |
| return ret; |
| |
| for (i = 0; i < thresh[MSM_THERM_RESET].thresh_ct; i++) { |
| ret = therm_get_temp( |
| thresh[MSM_THERM_RESET].thresh_list[i].sensor_id, |
| thresh[MSM_THERM_RESET].thresh_list[i].id_type, |
| &temp); |
| if (ret) { |
| pr_err("Unable to read TSENS sensor:%d. err:%d\n", |
| thresh[MSM_THERM_RESET].thresh_list[i].sensor_id, |
| ret); |
| continue; |
| } |
| |
| if (temp >= msm_thermal_info.therm_reset_temp_degC) |
| msm_thermal_bite( |
| thresh[MSM_THERM_RESET].thresh_list[i].sensor_id, temp); |
| } |
| |
| return ret; |
| } |
| |
| static void therm_reset_notify(struct therm_threshold *thresh_data) |
| { |
| long temp; |
| int ret = 0; |
| |
| if (!therm_reset_enabled) |
| return; |
| |
| if (!thresh_data) { |
| pr_err("Invalid input\n"); |
| return; |
| } |
| |
| switch (thresh_data->trip_triggered) { |
| case THERMAL_TRIP_CONFIGURABLE_HI: |
| ret = therm_get_temp(thresh_data->sensor_id, |
| thresh_data->id_type, &temp); |
| if (ret) |
| pr_err("Unable to read TSENS sensor:%d. err:%d\n", |
| thresh_data->sensor_id, ret); |
| msm_thermal_bite(tsens_id_map[thresh_data->sensor_id], |
| temp); |
| break; |
| case THERMAL_TRIP_CONFIGURABLE_LOW: |
| break; |
| default: |
| pr_err("Invalid trip type\n"); |
| break; |
| } |
| sensor_mgr_set_threshold(thresh_data->sensor_id, |
| thresh_data->threshold); |
| } |
| |
| #ifdef CONFIG_SMP |
| static void __ref do_core_control(long temp) |
| { |
| int i = 0; |
| int ret = 0; |
| |
| if (!core_control_enabled) |
| return; |
| |
| mutex_lock(&core_control_mutex); |
| if (msm_thermal_info.core_control_mask && |
| temp >= msm_thermal_info.core_limit_temp_degC) { |
| for (i = num_possible_cpus(); i > 0; i--) { |
| if (!(msm_thermal_info.core_control_mask & BIT(i))) |
| continue; |
| if (cpus_offlined & BIT(i) && !cpu_online(i)) |
| continue; |
| pr_info("Set Offline: CPU%d Temp: %ld\n", |
| i, temp); |
| if (cpu_online(i)) { |
| trace_thermal_pre_core_offline(i); |
| ret = cpu_down(i); |
| if (ret) |
| pr_err("Error %d offline core %d\n", |
| ret, i); |
| trace_thermal_post_core_offline(i, |
| cpumask_test_cpu(i, cpu_online_mask)); |
| } |
| cpus_offlined |= BIT(i); |
| break; |
| } |
| } else if (msm_thermal_info.core_control_mask && cpus_offlined && |
| temp <= (msm_thermal_info.core_limit_temp_degC - |
| msm_thermal_info.core_temp_hysteresis_degC)) { |
| for (i = 0; i < num_possible_cpus(); i++) { |
| if (!(cpus_offlined & BIT(i))) |
| continue; |
| cpus_offlined &= ~BIT(i); |
| pr_info("Allow Online CPU%d Temp: %ld\n", |
| i, temp); |
| /* |
| * If this core is already online, then bring up the |
| * next offlined core. |
| */ |
| if (cpu_online(i)) |
| continue; |
| /* If this core wasn't previously online don't put it |
| online */ |
| if (!(cpumask_test_cpu(i, cpus_previously_online))) |
| continue; |
| trace_thermal_pre_core_online(i); |
| ret = cpu_up(i); |
| if (ret) |
| pr_err("Error %d online core %d\n", |
| ret, i); |
| trace_thermal_post_core_online(i, |
| cpumask_test_cpu(i, cpu_online_mask)); |
| break; |
| } |
| } |
| mutex_unlock(&core_control_mutex); |
| } |
| /* Call with core_control_mutex locked */ |
| static int __ref update_offline_cores(int val) |
| { |
| uint32_t cpu = 0; |
| int ret = 0; |
| uint32_t previous_cpus_offlined = 0; |
| |
| if (!core_control_enabled) |
| return 0; |
| |
| previous_cpus_offlined = cpus_offlined; |
| cpus_offlined = msm_thermal_info.core_control_mask & val; |
| |
| for_each_possible_cpu(cpu) { |
| if (cpus_offlined & BIT(cpu)) { |
| if (!cpu_online(cpu)) |
| continue; |
| trace_thermal_pre_core_offline(cpu); |
| ret = cpu_down(cpu); |
| if (ret) |
| pr_err("Unable to offline CPU%d. err:%d\n", |
| cpu, ret); |
| else |
| pr_debug("Offlined CPU%d\n", cpu); |
| trace_thermal_post_core_offline(cpu, |
| cpumask_test_cpu(cpu, cpu_online_mask)); |
| } else if (online_core && (previous_cpus_offlined & BIT(cpu))) { |
| if (cpu_online(cpu)) |
| continue; |
| /* If this core wasn't previously online don't put it |
| online */ |
| if (!(cpumask_test_cpu(cpu, cpus_previously_online))) |
| continue; |
| trace_thermal_pre_core_online(cpu); |
| ret = cpu_up(cpu); |
| if (ret && ret == notifier_to_errno(NOTIFY_BAD)) { |
| pr_debug("Onlining CPU%d is vetoed\n", cpu); |
| } else if (ret) { |
| cpus_offlined |= BIT(cpu); |
| pr_err("Unable to online CPU%d. err:%d\n", |
| cpu, ret); |
| } else { |
| pr_debug("Onlined CPU%d\n", cpu); |
| } |
| trace_thermal_post_core_online(cpu, |
| cpumask_test_cpu(cpu, cpu_online_mask)); |
| } |
| } |
| return ret; |
| } |
| |
| static __ref int do_hotplug(void *data) |
| { |
| int ret = 0; |
| uint32_t cpu = 0, mask = 0; |
| struct device_clnt_data *clnt = NULL; |
| struct sched_param param = {.sched_priority = MAX_RT_PRIO-2}; |
| |
| if (!core_control_enabled) { |
| pr_debug("Core control disabled\n"); |
| return -EINVAL; |
| } |
| |
| sched_setscheduler(current, SCHED_FIFO, ¶m); |
| while (!kthread_should_stop()) { |
| while (wait_for_completion_interruptible( |
| &hotplug_notify_complete) != 0) |
| ; |
| INIT_COMPLETION(hotplug_notify_complete); |
| mask = 0; |
| |
| mutex_lock(&core_control_mutex); |
| for_each_possible_cpu(cpu) { |
| if (hotplug_enabled && |
| cpus[cpu].hotplug_thresh_clear) { |
| ret = |
| sensor_mgr_set_threshold(cpus[cpu].sensor_id, |
| &cpus[cpu].threshold[HOTPLUG_THRESHOLD_HIGH]); |
| |
| if (cpus[cpu].offline |
| && !IS_LOW_THRESHOLD_SET(ret)) |
| cpus[cpu].offline = 0; |
| cpus[cpu].hotplug_thresh_clear = false; |
| } |
| if (cpus[cpu].offline || cpus[cpu].user_offline) |
| mask |= BIT(cpu); |
| } |
| if (devices && devices->hotplug_dev) { |
| mutex_lock(&devices->hotplug_dev->clnt_lock); |
| for_each_cpu_mask(cpu, |
| devices->hotplug_dev->active_req.offline_mask) |
| mask |= BIT(cpu); |
| mutex_unlock(&devices->hotplug_dev->clnt_lock); |
| } |
| update_offline_cores(mask); |
| mutex_unlock(&core_control_mutex); |
| |
| if (devices && devices->hotplug_dev) { |
| union device_request req; |
| |
| req.offline_mask = CPU_MASK_NONE; |
| mutex_lock(&devices->hotplug_dev->clnt_lock); |
| for_each_cpu_mask(cpu, |
| devices->hotplug_dev->active_req.offline_mask) |
| if (mask & BIT(cpu)) |
| cpumask_test_and_set_cpu(cpu, |
| &req.offline_mask); |
| |
| list_for_each_entry(clnt, |
| &devices->hotplug_dev->client_list, |
| clnt_ptr) { |
| if (clnt->callback) |
| clnt->callback(clnt, &req, |
| clnt->usr_data); |
| } |
| mutex_unlock(&devices->hotplug_dev->clnt_lock); |
| } |
| sysfs_notify(cc_kobj, NULL, "cpus_offlined"); |
| } |
| |
| return ret; |
| } |
| #else |
| static void __ref do_core_control(long temp) |
| { |
| return; |
| } |
| |
| static __ref int do_hotplug(void *data) |
| { |
| return 0; |
| } |
| |
| static int __ref update_offline_cores(int val) |
| { |
| return 0; |
| } |
| #endif |
| |
| static int do_gfx_phase_cond(void) |
| { |
| long temp = 0; |
| int ret = 0; |
| uint32_t new_req_band = curr_gfx_band; |
| |
| if (!gfx_warm_phase_ctrl_enabled && !gfx_crit_phase_ctrl_enabled) |
| return ret; |
| |
| mutex_lock(&gfx_mutex); |
| if (gfx_warm_phase_ctrl_enabled) { |
| ret = therm_get_temp( |
| thresh[MSM_GFX_PHASE_CTRL_WARM].thresh_list->sensor_id, |
| thresh[MSM_GFX_PHASE_CTRL_WARM].thresh_list->id_type, |
| &temp); |
| if (ret) { |
| pr_err("Unable to read TSENS sensor:%d. err:%d\n", |
| thresh[MSM_GFX_PHASE_CTRL_WARM].thresh_list->sensor_id, |
| ret); |
| goto gfx_phase_cond_exit; |
| } |
| } else { |
| ret = therm_get_temp( |
| thresh[MSM_GFX_PHASE_CTRL_HOT].thresh_list->sensor_id, |
| thresh[MSM_GFX_PHASE_CTRL_HOT].thresh_list->id_type, |
| &temp); |
| if (ret) { |
| pr_err("Unable to read TSENS sensor:%d. err:%d\n", |
| thresh[MSM_GFX_PHASE_CTRL_HOT].thresh_list->sensor_id, |
| ret); |
| goto gfx_phase_cond_exit; |
| } |
| } |
| |
| switch (curr_gfx_band) { |
| case MSM_HOT_CRITICAL: |
| if (temp < (msm_thermal_info.gfx_phase_hot_temp_degC - |
| msm_thermal_info.gfx_phase_hot_temp_hyst_degC)) |
| new_req_band = MSM_WARM; |
| break; |
| case MSM_WARM: |
| if (temp >= msm_thermal_info.gfx_phase_hot_temp_degC) |
| new_req_band = MSM_HOT_CRITICAL; |
| else if (temp < (msm_thermal_info.gfx_phase_warm_temp_degC - |
| msm_thermal_info.gfx_phase_warm_temp_hyst_degC)) |
| new_req_band = MSM_NORMAL; |
| break; |
| case MSM_NORMAL: |
| if (temp >= msm_thermal_info.gfx_phase_warm_temp_degC) |
| new_req_band = MSM_WARM; |
| break; |
| default: |
| if (temp >= msm_thermal_info.gfx_phase_hot_temp_degC) |
| new_req_band = MSM_HOT_CRITICAL; |
| else if (temp >= msm_thermal_info.gfx_phase_warm_temp_degC) |
| new_req_band = MSM_WARM; |
| else |
| new_req_band = MSM_NORMAL; |
| break; |
| } |
| |
| if (new_req_band != curr_gfx_band) { |
| ret = send_temperature_band(MSM_GFX_PHASE_CTRL, new_req_band); |
| if (!ret) { |
| pr_debug("Reached %d band. Temp:%ld\n", new_req_band, |
| temp); |
| curr_gfx_band = new_req_band; |
| } else { |
| pr_err("Error sending temp. band:%d. Temp:%ld. err:%d", |
| new_req_band, temp, ret); |
| } |
| } |
| |
| gfx_phase_cond_exit: |
| mutex_unlock(&gfx_mutex); |
| return ret; |
| } |
| |
| static int do_cx_phase_cond(void) |
| { |
| long temp = 0; |
| int i, ret = 0, dis_cnt = 0; |
| |
| if (!cx_phase_ctrl_enabled) |
| return ret; |
| |
| mutex_lock(&cx_mutex); |
| for (i = 0; i < thresh[MSM_CX_PHASE_CTRL_HOT].thresh_ct; i++) { |
| ret = therm_get_temp( |
| thresh[MSM_CX_PHASE_CTRL_HOT].thresh_list[i].sensor_id, |
| thresh[MSM_CX_PHASE_CTRL_HOT].thresh_list[i].id_type, |
| &temp); |
| if (ret) { |
| pr_err("Unable to read TSENS sensor:%d. err:%d\n", |
| thresh[MSM_CX_PHASE_CTRL_HOT].thresh_list[i].sensor_id, |
| ret); |
| dis_cnt++; |
| continue; |
| } |
| |
| if (temp >= msm_thermal_info.cx_phase_hot_temp_degC) { |
| if (curr_cx_band != MSM_HOT_CRITICAL) { |
| ret = send_temperature_band(MSM_CX_PHASE_CTRL, |
| MSM_HOT_CRITICAL); |
| if (!ret) { |
| pr_debug("band:HOT_CRITICAL Temp:%ld\n", |
| temp); |
| curr_cx_band = MSM_HOT_CRITICAL; |
| } else { |
| pr_err("Error %d sending HOT_CRITICAL", |
| ret); |
| } |
| } |
| goto cx_phase_cond_exit; |
| } else if (temp < (msm_thermal_info.cx_phase_hot_temp_degC - |
| msm_thermal_info.cx_phase_hot_temp_hyst_degC)) |
| dis_cnt++; |
| } |
| if (dis_cnt == max_tsens_num && curr_cx_band != MSM_WARM) { |
| ret = send_temperature_band(MSM_CX_PHASE_CTRL, MSM_WARM); |
| if (!ret) { |
| pr_debug("band:WARM Temp:%ld\n", temp); |
| curr_cx_band = MSM_WARM; |
| } else { |
| pr_err("Error sending WARM temp band. err:%d", |
| ret); |
| } |
| } |
| cx_phase_cond_exit: |
| mutex_unlock(&cx_mutex); |
| return ret; |
| } |
| |
| static int do_ocr(void) |
| { |
| long temp = 0; |
| int ret = 0; |
| int i = 0, j = 0; |
| int pfm_cnt = 0; |
| |
| if (!ocr_enabled) |
| return ret; |
| |
| mutex_lock(&ocr_mutex); |
| for (i = 0; i < thresh[MSM_OCR].thresh_ct; i++) { |
| ret = therm_get_temp( |
| thresh[MSM_OCR].thresh_list[i].sensor_id, |
| thresh[MSM_OCR].thresh_list[i].id_type, |
| &temp); |
| if (ret) { |
| pr_err("Unable to read TSENS sensor %d. err:%d\n", |
| thresh[MSM_OCR].thresh_list[i].sensor_id, |
| ret); |
| pfm_cnt++; |
| continue; |
| } |
| |
| if (temp > msm_thermal_info.ocr_temp_degC) { |
| if (ocr_rails[0].init != OPTIMUM_CURRENT_NR) |
| for (j = 0; j < ocr_rail_cnt; j++) |
| ocr_rails[j].init = OPTIMUM_CURRENT_NR; |
| ret = ocr_set_mode_all(OPTIMUM_CURRENT_MAX); |
| if (ret) |
| pr_err("Error setting max ocr. err:%d\n", |
| ret); |
| else |
| pr_debug("Requested MAX OCR. tsens:%d Temp:%ld", |
| thresh[MSM_OCR].thresh_list[i].sensor_id, temp); |
| goto do_ocr_exit; |
| } else if (temp <= (msm_thermal_info.ocr_temp_degC - |
| msm_thermal_info.ocr_temp_hyst_degC)) |
| pfm_cnt++; |
| } |
| |
| if (pfm_cnt == thresh[MSM_OCR].thresh_ct || |
| ocr_rails[0].init != OPTIMUM_CURRENT_NR) { |
| /* 'init' not equal to OPTIMUM_CURRENT_NR means this is the |
| ** first polling iteration after device probe. During first |
| ** iteration, if temperature is less than the set point, clear |
| ** the max current request made and reset the 'init'. |
| */ |
| if (ocr_rails[0].init != OPTIMUM_CURRENT_NR) |
| for (j = 0; j < ocr_rail_cnt; j++) |
| ocr_rails[j].init = OPTIMUM_CURRENT_NR; |
| ret = ocr_set_mode_all(OPTIMUM_CURRENT_MIN); |
| if (ret) { |
| pr_err("Error setting min ocr. err:%d\n", |
| ret); |
| goto do_ocr_exit; |
| } else { |
| pr_debug("Requested MIN OCR. Temp:%ld", temp); |
| } |
| } |
| do_ocr_exit: |
| mutex_unlock(&ocr_mutex); |
| return ret; |
| } |
| |
| static int do_vdd_restriction(void) |
| { |
| long temp = 0; |
| int ret = 0; |
| int i = 0; |
| int dis_cnt = 0; |
| |
| if (!vdd_rstr_enabled) |
| return ret; |
| |
| if (usefreq && !freq_table_get) { |
| if (check_freq_table() && !core_ptr) |
| return ret; |
| } |
| |
| mutex_lock(&vdd_rstr_mutex); |
| for (i = 0; i < thresh[MSM_VDD_RESTRICTION].thresh_ct; i++) { |
| ret = therm_get_temp( |
| thresh[MSM_VDD_RESTRICTION].thresh_list[i].sensor_id, |
| thresh[MSM_VDD_RESTRICTION].thresh_list[i].id_type, |
| &temp); |
| if (ret) { |
| pr_err("Unable to read TSENS sensor:%d. err:%d\n", |
| thresh[MSM_VDD_RESTRICTION].thresh_list[i].sensor_id, |
| ret); |
| dis_cnt++; |
| continue; |
| } |
| if (temp <= msm_thermal_info.vdd_rstr_temp_degC) { |
| ret = vdd_restriction_apply_all(1); |
| if (ret) { |
| pr_err( \ |
| "Enable vdd rstr for all failed. err:%d\n", |
| ret); |
| goto exit; |
| } |
| pr_debug("Enabled Vdd Restriction tsens:%d. Temp:%ld\n", |
| thresh[MSM_VDD_RESTRICTION].thresh_list[i].sensor_id, |
| temp); |
| goto exit; |
| } else if (temp > msm_thermal_info.vdd_rstr_temp_hyst_degC) |
| dis_cnt++; |
| } |
| if (dis_cnt == max_tsens_num) { |
| ret = vdd_restriction_apply_all(0); |
| if (ret) { |
| pr_err("Disable vdd rstr for all failed. err:%d\n", |
| ret); |
| goto exit; |
| } |
| pr_debug("Disabled Vdd Restriction\n"); |
| } |
| exit: |
| mutex_unlock(&vdd_rstr_mutex); |
| return ret; |
| } |
| |
| static int do_psm(void) |
| { |
| long temp = 0; |
| int ret = 0; |
| int i = 0; |
| int auto_cnt = 0; |
| |
| mutex_lock(&psm_mutex); |
| for (i = 0; i < max_tsens_num; i++) { |
| ret = therm_get_temp(tsens_id_map[i], THERM_TSENS_ID, &temp); |
| if (ret) { |
| pr_err("Unable to read TSENS sensor:%d. err:%d\n", |
| tsens_id_map[i], ret); |
| auto_cnt++; |
| continue; |
| } |
| |
| /* |
| * As long as one sensor is above the threshold, set PWM mode |
| * on all rails, and loop stops. Set auto mode when all rails |
| * are below thershold |
| */ |
| if (temp > msm_thermal_info.psm_temp_degC) { |
| ret = psm_set_mode_all(PMIC_PWM_MODE); |
| if (ret) { |
| pr_err("Set pwm mode for all failed. err:%d\n", |
| ret); |
| goto exit; |
| } |
| pr_debug("Requested PMIC PWM Mode tsens:%d. Temp:%ld\n", |
| tsens_id_map[i], temp); |
| break; |
| } else if (temp <= msm_thermal_info.psm_temp_hyst_degC) |
| auto_cnt++; |
| } |
| |
| if (auto_cnt == max_tsens_num) { |
| ret = psm_set_mode_all(PMIC_AUTO_MODE); |
| if (ret) { |
| pr_err("Set auto mode for all failed. err:%d\n", ret); |
| goto exit; |
| } |
| pr_debug("Requested PMIC AUTO Mode\n"); |
| } |
| |
| exit: |
| mutex_unlock(&psm_mutex); |
| return ret; |
| } |
| |
| static void do_freq_control(long temp) |
| { |
| uint32_t cpu = 0; |
| uint32_t max_freq = cpus[cpu].limited_max_freq; |
| |
| if (core_ptr) |
| return do_cluster_freq_ctrl(temp); |
| if (!freq_table_get) |
| return; |
| |
| if (temp >= msm_thermal_info.limit_temp_degC) { |
| if (limit_idx == limit_idx_low) |
| return; |
| |
| limit_idx -= msm_thermal_info.bootup_freq_step; |
| if (limit_idx < limit_idx_low) |
| limit_idx = limit_idx_low; |
| max_freq = table[limit_idx].frequency; |
| } else if (temp < msm_thermal_info.limit_temp_degC - |
| msm_thermal_info.temp_hysteresis_degC) { |
| if (limit_idx == limit_idx_high) |
| return; |
| |
| limit_idx += msm_thermal_info.bootup_freq_step; |
| if (limit_idx >= limit_idx_high) { |
| limit_idx = limit_idx_high; |
| max_freq = UINT_MAX; |
| } else |
| max_freq = table[limit_idx].frequency; |
| } |
| |
| if (max_freq == cpus[cpu].limited_max_freq) |
| return; |
| |
| /* Update new limits */ |
| get_online_cpus(); |
| for_each_possible_cpu(cpu) { |
| if (!(msm_thermal_info.bootup_freq_control_mask & BIT(cpu))) |
| continue; |
| pr_info("Limiting CPU%d max frequency to %u. Temp:%ld\n", |
| cpu, max_freq, temp); |
| cpus[cpu].limited_max_freq = max_freq; |
| if (!SYNC_CORE(cpu)) |
| update_cpu_freq(cpu); |
| } |
| update_cluster_freq(); |
| put_online_cpus(); |
| } |
| |
| static void check_temp(struct work_struct *work) |
| { |
| long temp = 0; |
| int ret = 0; |
| |
| do_therm_reset(); |
| |
| ret = therm_get_temp(msm_thermal_info.sensor_id, THERM_TSENS_ID, &temp); |
| if (ret) { |
| pr_err("Unable to read TSENS sensor:%d. err:%d\n", |
| msm_thermal_info.sensor_id, ret); |
| goto reschedule; |
| } |
| do_core_control(temp); |
| do_vdd_mx(); |
| do_psm(); |
| do_gfx_phase_cond(); |
| do_cx_phase_cond(); |
| do_ocr(); |
| |
| /* |
| ** All mitigation involving CPU frequency should be |
| ** placed below this check. The mitigation following this |
| ** frequency table check, should be able to handle the failure case. |
| */ |
| if (!freq_table_get) |
| check_freq_table(); |
| |
| do_vdd_restriction(); |
| do_freq_control(temp); |
| |
| reschedule: |
| if (polling_enabled) |
| schedule_delayed_work(&check_temp_work, |
| msecs_to_jiffies(msm_thermal_info.poll_ms)); |
| } |
| |
| static int __ref msm_thermal_cpu_callback(struct notifier_block *nfb, |
| unsigned long action, void *hcpu) |
| { |
| uint32_t cpu = (uintptr_t)hcpu; |
| |
| if (action == CPU_UP_PREPARE || action == CPU_UP_PREPARE_FROZEN) { |
| if (!cpumask_test_and_set_cpu(cpu, cpus_previously_online)) |
| pr_debug("Total prev cores online tracked %u\n", |
| cpumask_weight(cpus_previously_online)); |
| if (core_control_enabled && |
| (msm_thermal_info.core_control_mask & BIT(cpu)) && |
| (cpus_offlined & BIT(cpu))) { |
| pr_debug("Preventing CPU%d from coming online.\n", |
| cpu); |
| return NOTIFY_BAD; |
| } |
| } else if (action == CPU_DOWN_PREPARE || |
| action == CPU_DOWN_PREPARE_FROZEN) { |
| if (!cpumask_test_and_set_cpu(cpu, cpus_previously_online)) |
| pr_debug("Total prev cores online tracked %u\n", |
| cpumask_weight(cpus_previously_online)); |
| } |
| |
| pr_debug("voting for CPU%d to be online\n", cpu); |
| return NOTIFY_OK; |
| } |
| |
| static struct notifier_block __refdata msm_thermal_cpu_notifier = { |
| .notifier_call = msm_thermal_cpu_callback, |
| }; |
| static int hotplug_notify(enum thermal_trip_type type, int temp, void *data) |
| { |
| struct cpu_info *cpu_node = (struct cpu_info *)data; |
| |
| pr_info_ratelimited("%s reach temp threshold: %d\n", |
| cpu_node->sensor_type, temp); |
| |
| if (!(msm_thermal_info.core_control_mask & BIT(cpu_node->cpu))) |
| return 0; |
| switch (type) { |
| case THERMAL_TRIP_CONFIGURABLE_HI: |
| if (!(cpu_node->offline)) |
| cpu_node->offline = 1; |
| break; |
| case THERMAL_TRIP_CONFIGURABLE_LOW: |
| if (cpu_node->offline) |
| cpu_node->offline = 0; |
| break; |
| default: |
| break; |
| } |
| if (hotplug_task) { |
| cpu_node->hotplug_thresh_clear = true; |
| complete(&hotplug_notify_complete); |
| } else |
| pr_err("Hotplug task is not initialized\n"); |
| return 0; |
| } |
| /* Adjust cpus offlined bit based on temperature reading. */ |
| static int hotplug_init_cpu_offlined(void) |
| { |
| long temp = 0; |
| uint32_t cpu = 0; |
| |
| if (!hotplug_enabled) |
| return 0; |
| |
| mutex_lock(&core_control_mutex); |
| for_each_possible_cpu(cpu) { |
| if (!(msm_thermal_info.core_control_mask & BIT(cpus[cpu].cpu))) |
| continue; |
| if (therm_get_temp(cpus[cpu].sensor_id, cpus[cpu].id_type, |
| &temp)) { |
| pr_err("Unable to read TSENS sensor:%d.\n", |
| cpus[cpu].sensor_id); |
| mutex_unlock(&core_control_mutex); |
| return -EINVAL; |
| } |
| |
| if (temp >= msm_thermal_info.hotplug_temp_degC) |
| cpus[cpu].offline = 1; |
| else if (temp <= (msm_thermal_info.hotplug_temp_degC - |
| msm_thermal_info.hotplug_temp_hysteresis_degC)) |
| cpus[cpu].offline = 0; |
| } |
| mutex_unlock(&core_control_mutex); |
| |
| if (hotplug_task) |
| complete(&hotplug_notify_complete); |
| else { |
| pr_err("Hotplug task is not initialized\n"); |
| return -EINVAL; |
| } |
| return 0; |
| } |
| |
| static void hotplug_init(void) |
| { |
| uint32_t cpu = 0; |
| struct sensor_threshold *hi_thresh = NULL, *low_thresh = NULL; |
| |
| if (hotplug_task) |
|