From 84e9c58c21663b4008ddec3c431aef191c993ffd Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Wed, 5 Jul 2023 17:23:33 +0200 Subject: soc: qcom: Move power-domain drivers to the genpd dir To simplify with maintenance let's move the qcom power-domain drivers to the new genpd directory. Going forward, patches are intended to be managed through a separate git tree, according to MAINTAINERS. Cc: Bjorn Andersson Cc: Konrad Dybcio Cc: Andy Gross Cc: Acked-by: Bjorn Andersson Signed-off-by: Ulf Hansson --- MAINTAINERS | 2 +- drivers/genpd/Makefile | 1 + drivers/genpd/qcom/Makefile | 4 + drivers/genpd/qcom/cpr.c | 1757 +++++++++++++++++++++++++++++++++++++++++++ drivers/genpd/qcom/rpmhpd.c | 870 +++++++++++++++++++++ drivers/genpd/qcom/rpmpd.c | 992 ++++++++++++++++++++++++ drivers/soc/qcom/Makefile | 3 - drivers/soc/qcom/cpr.c | 1757 ------------------------------------------- drivers/soc/qcom/rpmhpd.c | 870 --------------------- drivers/soc/qcom/rpmpd.c | 992 ------------------------ 10 files changed, 3625 insertions(+), 3623 deletions(-) create mode 100644 drivers/genpd/qcom/Makefile create mode 100644 drivers/genpd/qcom/cpr.c create mode 100644 drivers/genpd/qcom/rpmhpd.c create mode 100644 drivers/genpd/qcom/rpmpd.c delete mode 100644 drivers/soc/qcom/cpr.c delete mode 100644 drivers/soc/qcom/rpmhpd.c delete mode 100644 drivers/soc/qcom/rpmpd.c diff --git a/MAINTAINERS b/MAINTAINERS index 77629ab4a5f0..9abd868abfc8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -17524,7 +17524,7 @@ L: linux-pm@vger.kernel.org L: linux-arm-msm@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/power/avs/qcom,cpr.yaml -F: drivers/soc/qcom/cpr.c +F: drivers/genpd/qcom/cpr.c QUALCOMM CPUFREQ DRIVER MSM8996/APQ8096 M: Ilia Lin diff --git a/drivers/genpd/Makefile b/drivers/genpd/Makefile index 1a0a56925756..dfdea14e2a8a 100644 --- a/drivers/genpd/Makefile +++ b/drivers/genpd/Makefile @@ -4,3 +4,4 @@ obj-y += amlogic/ obj-y += apple/ obj-y += bcm/ obj-y += mediatek/ +obj-y += qcom/ diff --git a/drivers/genpd/qcom/Makefile b/drivers/genpd/qcom/Makefile new file mode 100644 index 000000000000..403dfc5af095 --- /dev/null +++ b/drivers/genpd/qcom/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_QCOM_CPR) += cpr.o +obj-$(CONFIG_QCOM_RPMPD) += rpmpd.o +obj-$(CONFIG_QCOM_RPMHPD) += rpmhpd.o diff --git a/drivers/genpd/qcom/cpr.c b/drivers/genpd/qcom/cpr.c new file mode 100644 index 000000000000..144ea68e0920 --- /dev/null +++ b/drivers/genpd/qcom/cpr.c @@ -0,0 +1,1757 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2019, Linaro Limited + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Register Offsets for RB-CPR and Bit Definitions */ + +/* RBCPR Version Register */ +#define REG_RBCPR_VERSION 0 +#define RBCPR_VER_2 0x02 +#define FLAGS_IGNORE_1ST_IRQ_STATUS BIT(0) + +/* RBCPR Gate Count and Target Registers */ +#define REG_RBCPR_GCNT_TARGET(n) (0x60 + 4 * (n)) + +#define RBCPR_GCNT_TARGET_TARGET_SHIFT 0 +#define RBCPR_GCNT_TARGET_TARGET_MASK GENMASK(11, 0) +#define RBCPR_GCNT_TARGET_GCNT_SHIFT 12 +#define RBCPR_GCNT_TARGET_GCNT_MASK GENMASK(9, 0) + +/* RBCPR Timer Control */ +#define REG_RBCPR_TIMER_INTERVAL 0x44 +#define REG_RBIF_TIMER_ADJUST 0x4c + +#define RBIF_TIMER_ADJ_CONS_UP_MASK GENMASK(3, 0) +#define RBIF_TIMER_ADJ_CONS_UP_SHIFT 0 +#define RBIF_TIMER_ADJ_CONS_DOWN_MASK GENMASK(3, 0) +#define RBIF_TIMER_ADJ_CONS_DOWN_SHIFT 4 +#define RBIF_TIMER_ADJ_CLAMP_INT_MASK GENMASK(7, 0) +#define RBIF_TIMER_ADJ_CLAMP_INT_SHIFT 8 + +/* RBCPR Config Register */ +#define REG_RBIF_LIMIT 0x48 +#define RBIF_LIMIT_CEILING_MASK GENMASK(5, 0) +#define RBIF_LIMIT_CEILING_SHIFT 6 +#define RBIF_LIMIT_FLOOR_BITS 6 +#define RBIF_LIMIT_FLOOR_MASK GENMASK(5, 0) + +#define RBIF_LIMIT_CEILING_DEFAULT RBIF_LIMIT_CEILING_MASK +#define RBIF_LIMIT_FLOOR_DEFAULT 0 + +#define REG_RBIF_SW_VLEVEL 0x94 +#define RBIF_SW_VLEVEL_DEFAULT 0x20 + +#define REG_RBCPR_STEP_QUOT 0x80 +#define RBCPR_STEP_QUOT_STEPQUOT_MASK GENMASK(7, 0) +#define RBCPR_STEP_QUOT_IDLE_CLK_MASK GENMASK(3, 0) +#define RBCPR_STEP_QUOT_IDLE_CLK_SHIFT 8 + +/* RBCPR Control Register */ +#define REG_RBCPR_CTL 0x90 + +#define RBCPR_CTL_LOOP_EN BIT(0) +#define RBCPR_CTL_TIMER_EN BIT(3) +#define RBCPR_CTL_SW_AUTO_CONT_ACK_EN BIT(5) +#define RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN BIT(6) +#define RBCPR_CTL_COUNT_MODE BIT(10) +#define RBCPR_CTL_UP_THRESHOLD_MASK GENMASK(3, 0) +#define RBCPR_CTL_UP_THRESHOLD_SHIFT 24 +#define RBCPR_CTL_DN_THRESHOLD_MASK GENMASK(3, 0) +#define RBCPR_CTL_DN_THRESHOLD_SHIFT 28 + +/* RBCPR Ack/Nack Response */ +#define REG_RBIF_CONT_ACK_CMD 0x98 +#define REG_RBIF_CONT_NACK_CMD 0x9c + +/* RBCPR Result status Register */ +#define REG_RBCPR_RESULT_0 0xa0 + +#define RBCPR_RESULT0_BUSY_SHIFT 19 +#define RBCPR_RESULT0_BUSY_MASK BIT(RBCPR_RESULT0_BUSY_SHIFT) +#define RBCPR_RESULT0_ERROR_LT0_SHIFT 18 +#define RBCPR_RESULT0_ERROR_SHIFT 6 +#define RBCPR_RESULT0_ERROR_MASK GENMASK(11, 0) +#define RBCPR_RESULT0_ERROR_STEPS_SHIFT 2 +#define RBCPR_RESULT0_ERROR_STEPS_MASK GENMASK(3, 0) +#define RBCPR_RESULT0_STEP_UP_SHIFT 1 + +/* RBCPR Interrupt Control Register */ +#define REG_RBIF_IRQ_EN(n) (0x100 + 4 * (n)) +#define REG_RBIF_IRQ_CLEAR 0x110 +#define REG_RBIF_IRQ_STATUS 0x114 + +#define CPR_INT_DONE BIT(0) +#define CPR_INT_MIN BIT(1) +#define CPR_INT_DOWN BIT(2) +#define CPR_INT_MID BIT(3) +#define CPR_INT_UP BIT(4) +#define CPR_INT_MAX BIT(5) +#define CPR_INT_CLAMP BIT(6) +#define CPR_INT_ALL (CPR_INT_DONE | CPR_INT_MIN | CPR_INT_DOWN | \ + CPR_INT_MID | CPR_INT_UP | CPR_INT_MAX | CPR_INT_CLAMP) +#define CPR_INT_DEFAULT (CPR_INT_UP | CPR_INT_DOWN) + +#define CPR_NUM_RING_OSC 8 + +/* CPR eFuse parameters */ +#define CPR_FUSE_TARGET_QUOT_BITS_MASK GENMASK(11, 0) + +#define CPR_FUSE_MIN_QUOT_DIFF 50 + +#define FUSE_REVISION_UNKNOWN (-1) + +enum voltage_change_dir { + NO_CHANGE, + DOWN, + UP, +}; + +struct cpr_fuse { + char *ring_osc; + char *init_voltage; + char *quotient; + char *quotient_offset; +}; + +struct fuse_corner_data { + int ref_uV; + int max_uV; + int min_uV; + int max_volt_scale; + int max_quot_scale; + /* fuse quot */ + int quot_offset; + int quot_scale; + int quot_adjust; + /* fuse quot_offset */ + int quot_offset_scale; + int quot_offset_adjust; +}; + +struct cpr_fuses { + int init_voltage_step; + int init_voltage_width; + struct fuse_corner_data *fuse_corner_data; +}; + +struct corner_data { + unsigned int fuse_corner; + unsigned long freq; +}; + +struct cpr_desc { + unsigned int num_fuse_corners; + int min_diff_quot; + int *step_quot; + + unsigned int timer_delay_us; + unsigned int timer_cons_up; + unsigned int timer_cons_down; + unsigned int up_threshold; + unsigned int down_threshold; + unsigned int idle_clocks; + unsigned int gcnt_us; + unsigned int vdd_apc_step_up_limit; + unsigned int vdd_apc_step_down_limit; + unsigned int clamp_timer_interval; + + struct cpr_fuses cpr_fuses; + bool reduce_to_fuse_uV; + bool reduce_to_corner_uV; +}; + +struct acc_desc { + unsigned int enable_reg; + u32 enable_mask; + + struct reg_sequence *config; + struct reg_sequence *settings; + int num_regs_per_fuse; +}; + +struct cpr_acc_desc { + const struct cpr_desc *cpr_desc; + const struct acc_desc *acc_desc; +}; + +struct fuse_corner { + int min_uV; + int max_uV; + int uV; + int quot; + int step_quot; + const struct reg_sequence *accs; + int num_accs; + unsigned long max_freq; + u8 ring_osc_idx; +}; + +struct corner { + int min_uV; + int max_uV; + int uV; + int last_uV; + int quot_adjust; + u32 save_ctl; + u32 save_irq; + unsigned long freq; + struct fuse_corner *fuse_corner; +}; + +struct cpr_drv { + unsigned int num_corners; + unsigned int ref_clk_khz; + + struct generic_pm_domain pd; + struct device *dev; + struct device *attached_cpu_dev; + struct mutex lock; + void __iomem *base; + struct corner *corner; + struct regulator *vdd_apc; + struct clk *cpu_clk; + struct regmap *tcsr; + bool loop_disabled; + u32 gcnt; + unsigned long flags; + + struct fuse_corner *fuse_corners; + struct corner *corners; + + const struct cpr_desc *desc; + const struct acc_desc *acc_desc; + const struct cpr_fuse *cpr_fuses; + + struct dentry *debugfs; +}; + +static bool cpr_is_allowed(struct cpr_drv *drv) +{ + return !drv->loop_disabled; +} + +static void cpr_write(struct cpr_drv *drv, u32 offset, u32 value) +{ + writel_relaxed(value, drv->base + offset); +} + +static u32 cpr_read(struct cpr_drv *drv, u32 offset) +{ + return readl_relaxed(drv->base + offset); +} + +static void +cpr_masked_write(struct cpr_drv *drv, u32 offset, u32 mask, u32 value) +{ + u32 val; + + val = readl_relaxed(drv->base + offset); + val &= ~mask; + val |= value & mask; + writel_relaxed(val, drv->base + offset); +} + +static void cpr_irq_clr(struct cpr_drv *drv) +{ + cpr_write(drv, REG_RBIF_IRQ_CLEAR, CPR_INT_ALL); +} + +static void cpr_irq_clr_nack(struct cpr_drv *drv) +{ + cpr_irq_clr(drv); + cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1); +} + +static void cpr_irq_clr_ack(struct cpr_drv *drv) +{ + cpr_irq_clr(drv); + cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1); +} + +static void cpr_irq_set(struct cpr_drv *drv, u32 int_bits) +{ + cpr_write(drv, REG_RBIF_IRQ_EN(0), int_bits); +} + +static void cpr_ctl_modify(struct cpr_drv *drv, u32 mask, u32 value) +{ + cpr_masked_write(drv, REG_RBCPR_CTL, mask, value); +} + +static void cpr_ctl_enable(struct cpr_drv *drv, struct corner *corner) +{ + u32 val, mask; + const struct cpr_desc *desc = drv->desc; + + /* Program Consecutive Up & Down */ + val = desc->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT; + val |= desc->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT; + mask = RBIF_TIMER_ADJ_CONS_UP_MASK | RBIF_TIMER_ADJ_CONS_DOWN_MASK; + cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST, mask, val); + cpr_masked_write(drv, REG_RBCPR_CTL, + RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN | + RBCPR_CTL_SW_AUTO_CONT_ACK_EN, + corner->save_ctl); + cpr_irq_set(drv, corner->save_irq); + + if (cpr_is_allowed(drv) && corner->max_uV > corner->min_uV) + val = RBCPR_CTL_LOOP_EN; + else + val = 0; + cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, val); +} + +static void cpr_ctl_disable(struct cpr_drv *drv) +{ + cpr_irq_set(drv, 0); + cpr_ctl_modify(drv, RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN | + RBCPR_CTL_SW_AUTO_CONT_ACK_EN, 0); + cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST, + RBIF_TIMER_ADJ_CONS_UP_MASK | + RBIF_TIMER_ADJ_CONS_DOWN_MASK, 0); + cpr_irq_clr(drv); + cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1); + cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1); + cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, 0); +} + +static bool cpr_ctl_is_enabled(struct cpr_drv *drv) +{ + u32 reg_val; + + reg_val = cpr_read(drv, REG_RBCPR_CTL); + return reg_val & RBCPR_CTL_LOOP_EN; +} + +static bool cpr_ctl_is_busy(struct cpr_drv *drv) +{ + u32 reg_val; + + reg_val = cpr_read(drv, REG_RBCPR_RESULT_0); + return reg_val & RBCPR_RESULT0_BUSY_MASK; +} + +static void cpr_corner_save(struct cpr_drv *drv, struct corner *corner) +{ + corner->save_ctl = cpr_read(drv, REG_RBCPR_CTL); + corner->save_irq = cpr_read(drv, REG_RBIF_IRQ_EN(0)); +} + +static void cpr_corner_restore(struct cpr_drv *drv, struct corner *corner) +{ + u32 gcnt, ctl, irq, ro_sel, step_quot; + struct fuse_corner *fuse = corner->fuse_corner; + const struct cpr_desc *desc = drv->desc; + int i; + + ro_sel = fuse->ring_osc_idx; + gcnt = drv->gcnt; + gcnt |= fuse->quot - corner->quot_adjust; + + /* Program the step quotient and idle clocks */ + step_quot = desc->idle_clocks << RBCPR_STEP_QUOT_IDLE_CLK_SHIFT; + step_quot |= fuse->step_quot & RBCPR_STEP_QUOT_STEPQUOT_MASK; + cpr_write(drv, REG_RBCPR_STEP_QUOT, step_quot); + + /* Clear the target quotient value and gate count of all ROs */ + for (i = 0; i < CPR_NUM_RING_OSC; i++) + cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0); + + cpr_write(drv, REG_RBCPR_GCNT_TARGET(ro_sel), gcnt); + ctl = corner->save_ctl; + cpr_write(drv, REG_RBCPR_CTL, ctl); + irq = corner->save_irq; + cpr_irq_set(drv, irq); + dev_dbg(drv->dev, "gcnt = %#08x, ctl = %#08x, irq = %#08x\n", gcnt, + ctl, irq); +} + +static void cpr_set_acc(struct regmap *tcsr, struct fuse_corner *f, + struct fuse_corner *end) +{ + if (f == end) + return; + + if (f < end) { + for (f += 1; f <= end; f++) + regmap_multi_reg_write(tcsr, f->accs, f->num_accs); + } else { + for (f -= 1; f >= end; f--) + regmap_multi_reg_write(tcsr, f->accs, f->num_accs); + } +} + +static int cpr_pre_voltage(struct cpr_drv *drv, + struct fuse_corner *fuse_corner, + enum voltage_change_dir dir) +{ + struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner; + + if (drv->tcsr && dir == DOWN) + cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner); + + return 0; +} + +static int cpr_post_voltage(struct cpr_drv *drv, + struct fuse_corner *fuse_corner, + enum voltage_change_dir dir) +{ + struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner; + + if (drv->tcsr && dir == UP) + cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner); + + return 0; +} + +static int cpr_scale_voltage(struct cpr_drv *drv, struct corner *corner, + int new_uV, enum voltage_change_dir dir) +{ + int ret; + struct fuse_corner *fuse_corner = corner->fuse_corner; + + ret = cpr_pre_voltage(drv, fuse_corner, dir); + if (ret) + return ret; + + ret = regulator_set_voltage(drv->vdd_apc, new_uV, new_uV); + if (ret) { + dev_err_ratelimited(drv->dev, "failed to set apc voltage %d\n", + new_uV); + return ret; + } + + ret = cpr_post_voltage(drv, fuse_corner, dir); + if (ret) + return ret; + + return 0; +} + +static unsigned int cpr_get_cur_perf_state(struct cpr_drv *drv) +{ + return drv->corner ? drv->corner - drv->corners + 1 : 0; +} + +static int cpr_scale(struct cpr_drv *drv, enum voltage_change_dir dir) +{ + u32 val, error_steps, reg_mask; + int last_uV, new_uV, step_uV, ret; + struct corner *corner; + const struct cpr_desc *desc = drv->desc; + + if (dir != UP && dir != DOWN) + return 0; + + step_uV = regulator_get_linear_step(drv->vdd_apc); + if (!step_uV) + return -EINVAL; + + corner = drv->corner; + + val = cpr_read(drv, REG_RBCPR_RESULT_0); + + error_steps = val >> RBCPR_RESULT0_ERROR_STEPS_SHIFT; + error_steps &= RBCPR_RESULT0_ERROR_STEPS_MASK; + last_uV = corner->last_uV; + + if (dir == UP) { + if (desc->clamp_timer_interval && + error_steps < desc->up_threshold) { + /* + * Handle the case where another measurement started + * after the interrupt was triggered due to a core + * exiting from power collapse. + */ + error_steps = max(desc->up_threshold, + desc->vdd_apc_step_up_limit); + } + + if (last_uV >= corner->max_uV) { + cpr_irq_clr_nack(drv); + + /* Maximize the UP threshold */ + reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK; + reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT; + val = reg_mask; + cpr_ctl_modify(drv, reg_mask, val); + + /* Disable UP interrupt */ + cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_UP); + + return 0; + } + + if (error_steps > desc->vdd_apc_step_up_limit) + error_steps = desc->vdd_apc_step_up_limit; + + /* Calculate new voltage */ + new_uV = last_uV + error_steps * step_uV; + new_uV = min(new_uV, corner->max_uV); + + dev_dbg(drv->dev, + "UP: -> new_uV: %d last_uV: %d perf state: %u\n", + new_uV, last_uV, cpr_get_cur_perf_state(drv)); + } else { + if (desc->clamp_timer_interval && + error_steps < desc->down_threshold) { + /* + * Handle the case where another measurement started + * after the interrupt was triggered due to a core + * exiting from power collapse. + */ + error_steps = max(desc->down_threshold, + desc->vdd_apc_step_down_limit); + } + + if (last_uV <= corner->min_uV) { + cpr_irq_clr_nack(drv); + + /* Enable auto nack down */ + reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN; + val = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN; + + cpr_ctl_modify(drv, reg_mask, val); + + /* Disable DOWN interrupt */ + cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_DOWN); + + return 0; + } + + if (error_steps > desc->vdd_apc_step_down_limit) + error_steps = desc->vdd_apc_step_down_limit; + + /* Calculate new voltage */ + new_uV = last_uV - error_steps * step_uV; + new_uV = max(new_uV, corner->min_uV); + + dev_dbg(drv->dev, + "DOWN: -> new_uV: %d last_uV: %d perf state: %u\n", + new_uV, last_uV, cpr_get_cur_perf_state(drv)); + } + + ret = cpr_scale_voltage(drv, corner, new_uV, dir); + if (ret) { + cpr_irq_clr_nack(drv); + return ret; + } + drv->corner->last_uV = new_uV; + + if (dir == UP) { + /* Disable auto nack down */ + reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN; + val = 0; + } else { + /* Restore default threshold for UP */ + reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK; + reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT; + val = desc->up_threshold; + val <<= RBCPR_CTL_UP_THRESHOLD_SHIFT; + } + + cpr_ctl_modify(drv, reg_mask, val); + + /* Re-enable default interrupts */ + cpr_irq_set(drv, CPR_INT_DEFAULT); + + /* Ack */ + cpr_irq_clr_ack(drv); + + return 0; +} + +static irqreturn_t cpr_irq_handler(int irq, void *dev) +{ + struct cpr_drv *drv = dev; + const struct cpr_desc *desc = drv->desc; + irqreturn_t ret = IRQ_HANDLED; + u32 val; + + mutex_lock(&drv->lock); + + val = cpr_read(drv, REG_RBIF_IRQ_STATUS); + if (drv->flags & FLAGS_IGNORE_1ST_IRQ_STATUS) + val = cpr_read(drv, REG_RBIF_IRQ_STATUS); + + dev_dbg(drv->dev, "IRQ_STATUS = %#02x\n", val); + + if (!cpr_ctl_is_enabled(drv)) { + dev_dbg(drv->dev, "CPR is disabled\n"); + ret = IRQ_NONE; + } else if (cpr_ctl_is_busy(drv) && !desc->clamp_timer_interval) { + dev_dbg(drv->dev, "CPR measurement is not ready\n"); + } else if (!cpr_is_allowed(drv)) { + val = cpr_read(drv, REG_RBCPR_CTL); + dev_err_ratelimited(drv->dev, + "Interrupt broken? RBCPR_CTL = %#02x\n", + val); + ret = IRQ_NONE; + } else { + /* + * Following sequence of handling is as per each IRQ's + * priority + */ + if (val & CPR_INT_UP) { + cpr_scale(drv, UP); + } else if (val & CPR_INT_DOWN) { + cpr_scale(drv, DOWN); + } else if (val & CPR_INT_MIN) { + cpr_irq_clr_nack(drv); + } else if (val & CPR_INT_MAX) { + cpr_irq_clr_nack(drv); + } else if (val & CPR_INT_MID) { + /* RBCPR_CTL_SW_AUTO_CONT_ACK_EN is enabled */ + dev_dbg(drv->dev, "IRQ occurred for Mid Flag\n"); + } else { + dev_dbg(drv->dev, + "IRQ occurred for unknown flag (%#08x)\n", val); + } + + /* Save register values for the corner */ + cpr_corner_save(drv, drv->corner); + } + + mutex_unlock(&drv->lock); + + return ret; +} + +static int cpr_enable(struct cpr_drv *drv) +{ + int ret; + + ret = regulator_enable(drv->vdd_apc); + if (ret) + return ret; + + mutex_lock(&drv->lock); + + if (cpr_is_allowed(drv) && drv->corner) { + cpr_irq_clr(drv); + cpr_corner_restore(drv, drv->corner); + cpr_ctl_enable(drv, drv->corner); + } + + mutex_unlock(&drv->lock); + + return 0; +} + +static int cpr_disable(struct cpr_drv *drv) +{ + mutex_lock(&drv->lock); + + if (cpr_is_allowed(drv)) { + cpr_ctl_disable(drv); + cpr_irq_clr(drv); + } + + mutex_unlock(&drv->lock); + + return regulator_disable(drv->vdd_apc); +} + +static int cpr_config(struct cpr_drv *drv) +{ + int i; + u32 val, gcnt; + struct corner *corner; + const struct cpr_desc *desc = drv->desc; + + /* Disable interrupt and CPR */ + cpr_write(drv, REG_RBIF_IRQ_EN(0), 0); + cpr_write(drv, REG_RBCPR_CTL, 0); + + /* Program the default HW ceiling, floor and vlevel */ + val = (RBIF_LIMIT_CEILING_DEFAULT & RBIF_LIMIT_CEILING_MASK) + << RBIF_LIMIT_CEILING_SHIFT; + val |= RBIF_LIMIT_FLOOR_DEFAULT & RBIF_LIMIT_FLOOR_MASK; + cpr_write(drv, REG_RBIF_LIMIT, val); + cpr_write(drv, REG_RBIF_SW_VLEVEL, RBIF_SW_VLEVEL_DEFAULT); + + /* + * Clear the target quotient value and gate count of all + * ring oscillators + */ + for (i = 0; i < CPR_NUM_RING_OSC; i++) + cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0); + + /* Init and save gcnt */ + gcnt = (drv->ref_clk_khz * desc->gcnt_us) / 1000; + gcnt = gcnt & RBCPR_GCNT_TARGET_GCNT_MASK; + gcnt <<= RBCPR_GCNT_TARGET_GCNT_SHIFT; + drv->gcnt = gcnt; + + /* Program the delay count for the timer */ + val = (drv->ref_clk_khz * desc->timer_delay_us) / 1000; + cpr_write(drv, REG_RBCPR_TIMER_INTERVAL, val); + dev_dbg(drv->dev, "Timer count: %#0x (for %d us)\n", val, + desc->timer_delay_us); + + /* Program Consecutive Up & Down */ + val = desc->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT; + val |= desc->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT; + val |= desc->clamp_timer_interval << RBIF_TIMER_ADJ_CLAMP_INT_SHIFT; + cpr_write(drv, REG_RBIF_TIMER_ADJUST, val); + + /* Program the control register */ + val = desc->up_threshold << RBCPR_CTL_UP_THRESHOLD_SHIFT; + val |= desc->down_threshold << RBCPR_CTL_DN_THRESHOLD_SHIFT; + val |= RBCPR_CTL_TIMER_EN | RBCPR_CTL_COUNT_MODE; + val |= RBCPR_CTL_SW_AUTO_CONT_ACK_EN; + cpr_write(drv, REG_RBCPR_CTL, val); + + for (i = 0; i < drv->num_corners; i++) { + corner = &drv->corners[i]; + corner->save_ctl = val; + corner->save_irq = CPR_INT_DEFAULT; + } + + cpr_irq_set(drv, CPR_INT_DEFAULT); + + val = cpr_read(drv, REG_RBCPR_VERSION); + if (val <= RBCPR_VER_2) + drv->flags |= FLAGS_IGNORE_1ST_IRQ_STATUS; + + return 0; +} + +static int cpr_set_performance_state(struct generic_pm_domain *domain, + unsigned int state) +{ + struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd); + struct corner *corner, *end; + enum voltage_change_dir dir; + int ret = 0, new_uV; + + mutex_lock(&drv->lock); + + dev_dbg(drv->dev, "%s: setting perf state: %u (prev state: %u)\n", + __func__, state, cpr_get_cur_perf_state(drv)); + + /* + * Determine new corner we're going to. + * Remove one since lowest performance state is 1. + */ + corner = drv->corners + state - 1; + end = &drv->corners[drv->num_corners - 1]; + if (corner > end || corner < drv->corners) { + ret = -EINVAL; + goto unlock; + } + + /* Determine direction */ + if (drv->corner > corner) + dir = DOWN; + else if (drv->corner < corner) + dir = UP; + else + dir = NO_CHANGE; + + if (cpr_is_allowed(drv)) + new_uV = corner->last_uV; + else + new_uV = corner->uV; + + if (cpr_is_allowed(drv)) + cpr_ctl_disable(drv); + + ret = cpr_scale_voltage(drv, corner, new_uV, dir); + if (ret) + goto unlock; + + if (cpr_is_allowed(drv)) { + cpr_irq_clr(drv); + if (drv->corner != corner) + cpr_corner_restore(drv, corner); + cpr_ctl_enable(drv, corner); + } + + drv->corner = corner; + +unlock: + mutex_unlock(&drv->lock); + + return ret; +} + +static int +cpr_populate_ring_osc_idx(struct cpr_drv *drv) +{ + struct fuse_corner *fuse = drv->fuse_corners; + struct fuse_corner *end = fuse + drv->desc->num_fuse_corners; + const struct cpr_fuse *fuses = drv->cpr_fuses; + u32 data; + int ret; + + for (; fuse < end; fuse++, fuses++) { + ret = nvmem_cell_read_variable_le_u32(drv->dev, fuses->ring_osc, &data); + if (ret) + return ret; + fuse->ring_osc_idx = data; + } + + return 0; +} + +static int cpr_read_fuse_uV(const struct cpr_desc *desc, + const struct fuse_corner_data *fdata, + const char *init_v_efuse, + int step_volt, + struct cpr_drv *drv) +{ + int step_size_uV, steps, uV; + u32 bits = 0; + int ret; + + ret = nvmem_cell_read_variable_le_u32(drv->dev, init_v_efuse, &bits); + if (ret) + return ret; + + steps = bits & ~BIT(desc->cpr_fuses.init_voltage_width - 1); + /* Not two's complement.. instead highest bit is sign bit */ + if (bits & BIT(desc->cpr_fuses.init_voltage_width - 1)) + steps = -steps; + + step_size_uV = desc->cpr_fuses.init_voltage_step; + + uV = fdata->ref_uV + steps * step_size_uV; + return DIV_ROUND_UP(uV, step_volt) * step_volt; +} + +static int cpr_fuse_corner_init(struct cpr_drv *drv) +{ + const struct cpr_desc *desc = drv->desc; + const struct cpr_fuse *fuses = drv->cpr_fuses; + const struct acc_desc *acc_desc = drv->acc_desc; + int i; + unsigned int step_volt; + struct fuse_corner_data *fdata; + struct fuse_corner *fuse, *end; + int uV; + const struct reg_sequence *accs; + int ret; + + accs = acc_desc->settings; + + step_volt = regulator_get_linear_step(drv->vdd_apc); + if (!step_volt) + return -EINVAL; + + /* Populate fuse_corner members */ + fuse = drv->fuse_corners; + end = &fuse[desc->num_fuse_corners - 1]; + fdata = desc->cpr_fuses.fuse_corner_data; + + for (i = 0; fuse <= end; fuse++, fuses++, i++, fdata++) { + /* + * Update SoC voltages: platforms might choose a different + * regulators than the one used to characterize the algorithms + * (ie, init_voltage_step). + */ + fdata->min_uV = roundup(fdata->min_uV, step_volt); + fdata->max_uV = roundup(fdata->max_uV, step_volt); + + /* Populate uV */ + uV = cpr_read_fuse_uV(desc, fdata, fuses->init_voltage, + step_volt, drv); + if (uV < 0) + return uV; + + fuse->min_uV = fdata->min_uV; + fuse->max_uV = fdata->max_uV; + fuse->uV = clamp(uV, fuse->min_uV, fuse->max_uV); + + if (fuse == end) { + /* + * Allow the highest fuse corner's PVS voltage to + * define the ceiling voltage for that corner in order + * to support SoC's in which variable ceiling values + * are required. + */ + end->max_uV = max(end->max_uV, end->uV); + } + + /* Populate target quotient by scaling */ + ret = nvmem_cell_read_variable_le_u32(drv->dev, fuses->quotient, &fuse->quot); + if (ret) + return ret; + + fuse->quot *= fdata->quot_scale; + fuse->quot += fdata->quot_offset; + fuse->quot += fdata->quot_adjust; + fuse->step_quot = desc->step_quot[fuse->ring_osc_idx]; + + /* Populate acc settings */ + fuse->accs = accs; + fuse->num_accs = acc_desc->num_regs_per_fuse; + accs += acc_desc->num_regs_per_fuse; + } + + /* + * Restrict all fuse corner PVS voltages based upon per corner + * ceiling and floor voltages. + */ + for (fuse = drv->fuse_corners, i = 0; fuse <= end; fuse++, i++) { + if (fuse->uV > fuse->max_uV) + fuse->uV = fuse->max_uV; + else if (fuse->uV < fuse->min_uV) + fuse->uV = fuse->min_uV; + + ret = regulator_is_supported_voltage(drv->vdd_apc, + fuse->min_uV, + fuse->min_uV); + if (!ret) { + dev_err(drv->dev, + "min uV: %d (fuse corner: %d) not supported by regulator\n", + fuse->min_uV, i); + return -EINVAL; + } + + ret = regulator_is_supported_voltage(drv->vdd_apc, + fuse->max_uV, + fuse->max_uV); + if (!ret) { + dev_err(drv->dev, + "max uV: %d (fuse corner: %d) not supported by regulator\n", + fuse->max_uV, i); + return -EINVAL; + } + + dev_dbg(drv->dev, + "fuse corner %d: [%d %d %d] RO%hhu quot %d squot %d\n", + i, fuse->min_uV, fuse->uV, fuse->max_uV, + fuse->ring_osc_idx, fuse->quot, fuse->step_quot); + } + + return 0; +} + +static int cpr_calculate_scaling(const char *quot_offset, + struct cpr_drv *drv, + const struct fuse_corner_data *fdata, + const struct corner *corner) +{ + u32 quot_diff = 0; + unsigned long freq_diff; + int scaling; + const struct fuse_corner *fuse, *prev_fuse; + int ret; + + fuse = corner->fuse_corner; + prev_fuse = fuse - 1; + + if (quot_offset) { + ret = nvmem_cell_read_variable_le_u32(drv->dev, quot_offset, "_diff); + if (ret) + return ret; + + quot_diff *= fdata->quot_offset_scale; + quot_diff += fdata->quot_offset_adjust; + } else { + quot_diff = fuse->quot - prev_fuse->quot; + } + + freq_diff = fuse->max_freq - prev_fuse->max_freq; + freq_diff /= 1000000; /* Convert to MHz */ + scaling = 1000 * quot_diff / freq_diff; + return min(scaling, fdata->max_quot_scale); +} + +static int cpr_interpolate(const struct corner *corner, int step_volt, + const struct fuse_corner_data *fdata) +{ + unsigned long f_high, f_low, f_diff; + int uV_high, uV_low, uV; + u64 temp, temp_limit; + const struct fuse_corner *fuse, *prev_fuse; + + fuse = corner->fuse_corner; + prev_fuse = fuse - 1; + + f_high = fuse->max_freq; + f_low = prev_fuse->max_freq; + uV_high = fuse->uV; + uV_low = prev_fuse->uV; + f_diff = fuse->max_freq - corner->freq; + + /* + * Don't interpolate in the wrong direction. This could happen + * if the adjusted fuse voltage overlaps with the previous fuse's + * adjusted voltage. + */ + if (f_high <= f_low || uV_high <= uV_low || f_high <= corner->freq) + return corner->uV; + + temp = f_diff * (uV_high - uV_low); + temp = div64_ul(temp, f_high - f_low); + + /* + * max_volt_scale has units of uV/MHz while freq values + * have units of Hz. Divide by 1000000 to convert to. + */ + temp_limit = f_diff * fdata->max_volt_scale; + do_div(temp_limit, 1000000); + + uV = uV_high - min(temp, temp_limit); + return roundup(uV, step_volt); +} + +static unsigned int cpr_get_fuse_corner(struct dev_pm_opp *opp) +{ + struct device_node *np; + unsigned int fuse_corner = 0; + + np = dev_pm_opp_get_of_node(opp); + if (of_property_read_u32(np, "qcom,opp-fuse-level", &fuse_corner)) + pr_err("%s: missing 'qcom,opp-fuse-level' property\n", + __func__); + + of_node_put(np); + + return fuse_corner; +} + +static unsigned long cpr_get_opp_hz_for_req(struct dev_pm_opp *ref, + struct device *cpu_dev) +{ + u64 rate = 0; + struct device_node *ref_np; + struct device_node *desc_np; + struct device_node *child_np = NULL; + struct device_node *child_req_np = NULL; + + desc_np = dev_pm_opp_of_get_opp_desc_node(cpu_dev); + if (!desc_np) + return 0; + + ref_np = dev_pm_opp_get_of_node(ref); + if (!ref_np) + goto out_ref; + + do { + of_node_put(child_req_np); + child_np = of_get_next_available_child(desc_np, child_np); + child_req_np = of_parse_phandle(child_np, "required-opps", 0); + } while (child_np && child_req_np != ref_np); + + if (child_np && child_req_np == ref_np) + of_property_read_u64(child_np, "opp-hz", &rate); + + of_node_put(child_req_np); + of_node_put(child_np); + of_node_put(ref_np); +out_ref: + of_node_put(desc_np); + + return (unsigned long) rate; +} + +static int cpr_corner_init(struct cpr_drv *drv) +{ + const struct cpr_desc *desc = drv->desc; + const struct cpr_fuse *fuses = drv->cpr_fuses; + int i, level, scaling = 0; + unsigned int fnum, fc; + const char *quot_offset; + struct fuse_corner *fuse, *prev_fuse; + struct corner *corner, *end; + struct corner_data *cdata; + const struct fuse_corner_data *fdata; + bool apply_scaling; + unsigned long freq_diff, freq_diff_mhz; + unsigned long freq; + int step_volt = regulator_get_linear_step(drv->vdd_apc); + struct dev_pm_opp *opp; + + if (!step_volt) + return -EINVAL; + + corner = drv->corners; + end = &corner[drv->num_corners - 1]; + + cdata = devm_kcalloc(drv->dev, drv->num_corners, + sizeof(struct corner_data), + GFP_KERNEL); + if (!cdata) + return -ENOMEM; + + /* + * Store maximum frequency for each fuse corner based on the frequency + * plan + */ + for (level = 1; level <= drv->num_corners; level++) { + opp = dev_pm_opp_find_level_exact(&drv->pd.dev, level); + if (IS_ERR(opp)) + return -EINVAL; + fc = cpr_get_fuse_corner(opp); + if (!fc) { + dev_pm_opp_put(opp); + return -EINVAL; + } + fnum = fc - 1; + freq = cpr_get_opp_hz_for_req(opp, drv->attached_cpu_dev); + if (!freq) { + dev_pm_opp_put(opp); + return -EINVAL; + } + cdata[level - 1].fuse_corner = fnum; + cdata[level - 1].freq = freq; + + fuse = &drv->fuse_corners[fnum]; + dev_dbg(drv->dev, "freq: %lu level: %u fuse level: %u\n", + freq, dev_pm_opp_get_level(opp) - 1, fnum); + if (freq > fuse->max_freq) + fuse->max_freq = freq; + dev_pm_opp_put(opp); + } + + /* + * Get the quotient adjustment scaling factor, according to: + * + * scaling = min(1000 * (QUOT(corner_N) - QUOT(corner_N-1)) + * / (freq(corner_N) - freq(corner_N-1)), max_factor) + * + * QUOT(corner_N): quotient read from fuse for fuse corner N + * QUOT(corner_N-1): quotient read from fuse for fuse corner (N - 1) + * freq(corner_N): max frequency in MHz supported by fuse corner N + * freq(corner_N-1): max frequency in MHz supported by fuse corner + * (N - 1) + * + * Then walk through the corners mapped to each fuse corner + * and calculate the quotient adjustment for each one using the + * following formula: + * + * quot_adjust = (freq_max - freq_corner) * scaling / 1000 + * + * freq_max: max frequency in MHz supported by the fuse corner + * freq_corner: frequency in MHz corresponding to the corner + * scaling: calculated from above equation + * + * + * + + + * | v | + * q | f c o | f c + * u | c l | c + * o | f t | f + * t | c a | c + * | c f g | c f + * | e | + * +--------------- +---------------- + * 0 1 2 3 4 5 6 0 1 2 3 4 5 6 + * corner corner + * + * c = corner + * f = fuse corner + * + */ + for (apply_scaling = false, i = 0; corner <= end; corner++, i++) { + fnum = cdata[i].fuse_corner; + fdata = &desc->cpr_fuses.fuse_corner_data[fnum]; + quot_offset = fuses[fnum].quotient_offset; + fuse = &drv->fuse_corners[fnum]; + if (fnum) + prev_fuse = &drv->fuse_corners[fnum - 1]; + else + prev_fuse = NULL; + + corner->fuse_corner = fuse; + corner->freq = cdata[i].freq; + corner->uV = fuse->uV; + + if (prev_fuse && cdata[i - 1].freq == prev_fuse->max_freq) { + scaling = cpr_calculate_scaling(quot_offset, drv, + fdata, corner); + if (scaling < 0) + return scaling; + + apply_scaling = true; + } else if (corner->freq == fuse->max_freq) { + /* This is a fuse corner; don't scale anything */ + apply_scaling = false; + } + + if (apply_scaling) { + freq_diff = fuse->max_freq - corner->freq; + freq_diff_mhz = freq_diff / 1000000; + corner->quot_adjust = scaling * freq_diff_mhz / 1000; + + corner->uV = cpr_interpolate(corner, step_volt, fdata); + } + + corner->max_uV = fuse->max_uV; + corner->min_uV = fuse->min_uV; + corner->uV = clamp(corner->uV, corner->min_uV, corner->max_uV); + corner->last_uV = corner->uV; + + /* Reduce the ceiling voltage if needed */ + if (desc->reduce_to_corner_uV && corner->uV < corner->max_uV) + corner->max_uV = corner->uV; + else if (desc->reduce_to_fuse_uV && fuse->uV < corner->max_uV) + corner->max_uV = max(corner->min_uV, fuse->uV); + + dev_dbg(drv->dev, "corner %d: [%d %d %d] quot %d\n", i, + corner->min_uV, corner->uV, corner->max_uV, + fuse->quot - corner->quot_adjust); + } + + return 0; +} + +static const struct cpr_fuse *cpr_get_fuses(struct cpr_drv *drv) +{ + const struct cpr_desc *desc = drv->desc; + struct cpr_fuse *fuses; + int i; + + fuses = devm_kcalloc(drv->dev, desc->num_fuse_corners, + sizeof(struct cpr_fuse), + GFP_KERNEL); + if (!fuses) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < desc->num_fuse_corners; i++) { + char tbuf[32]; + + snprintf(tbuf, 32, "cpr_ring_osc%d", i + 1); + fuses[i].ring_osc = devm_kstrdup(drv->dev, tbuf, GFP_KERNEL); + if (!fuses[i].ring_osc) + return ERR_PTR(-ENOMEM); + + snprintf(tbuf, 32, "cpr_init_voltage%d", i + 1); + fuses[i].init_voltage = devm_kstrdup(drv->dev, tbuf, + GFP_KERNEL); + if (!fuses[i].init_voltage) + return ERR_PTR(-ENOMEM); + + snprintf(tbuf, 32, "cpr_quotient%d", i + 1); + fuses[i].quotient = devm_kstrdup(drv->dev, tbuf, GFP_KERNEL); + if (!fuses[i].quotient) + return ERR_PTR(-ENOMEM); + + snprintf(tbuf, 32, "cpr_quotient_offset%d", i + 1); + fuses[i].quotient_offset = devm_kstrdup(drv->dev, tbuf, + GFP_KERNEL); + if (!fuses[i].quotient_offset) + return ERR_PTR(-ENOMEM); + } + + return fuses; +} + +static void cpr_set_loop_allowed(struct cpr_drv *drv) +{ + drv->loop_disabled = false; +} + +static int cpr_init_parameters(struct cpr_drv *drv) +{ + const struct cpr_desc *desc = drv->desc; + struct clk *clk; + + clk = clk_get(drv->dev, "ref"); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + drv->ref_clk_khz = clk_get_rate(clk) / 1000; + clk_put(clk); + + if (desc->timer_cons_up > RBIF_TIMER_ADJ_CONS_UP_MASK || + desc->timer_cons_down > RBIF_TIMER_ADJ_CONS_DOWN_MASK || + desc->up_threshold > RBCPR_CTL_UP_THRESHOLD_MASK || + desc->down_threshold > RBCPR_CTL_DN_THRESHOLD_MASK || + desc->idle_clocks > RBCPR_STEP_QUOT_IDLE_CLK_MASK || + desc->clamp_timer_interval > RBIF_TIMER_ADJ_CLAMP_INT_MASK) + return -EINVAL; + + dev_dbg(drv->dev, "up threshold = %u, down threshold = %u\n", + desc->up_threshold, desc->down_threshold); + + return 0; +} + +static int cpr_find_initial_corner(struct cpr_drv *drv) +{ + unsigned long rate; + const struct corner *end; + struct corner *iter; + unsigned int i = 0; + + if (!drv->cpu_clk) { + dev_err(drv->dev, "cannot get rate from NULL clk\n"); + return -EINVAL; + } + + end = &drv->corners[drv->num_corners - 1]; + rate = clk_get_rate(drv->cpu_clk); + + /* + * Some bootloaders set a CPU clock frequency that is not defined + * in the OPP table. When running at an unlisted frequency, + * cpufreq_online() will change to the OPP which has the lowest + * frequency, at or above the unlisted frequency. + * Since cpufreq_online() always "rounds up" in the case of an + * unlisted frequency, this function always "rounds down" in case + * of an unlisted frequency. That way, when cpufreq_online() + * triggers the first ever call to cpr_set_performance_state(), + * it will correctly determine the direction as UP. + */ + for (iter = drv->corners; iter <= end; iter++) { + if (iter->freq > rate) + break; + i++; + if (iter->freq == rate) { + drv->corner = iter; + break; + } + if (iter->freq < rate) + drv->corner = iter; + } + + if (!drv->corner) { + dev_err(drv->dev, "boot up corner not found\n"); + return -EINVAL; + } + + dev_dbg(drv->dev, "boot up perf state: %u\n", i); + + return 0; +} + +static const struct cpr_desc qcs404_cpr_desc = { + .num_fuse_corners = 3, + .min_diff_quot = CPR_FUSE_MIN_QUOT_DIFF, + .step_quot = (int []){ 25, 25, 25, }, + .timer_delay_us = 5000, + .timer_cons_up = 0, + .timer_cons_down = 2, + .up_threshold = 1, + .down_threshold = 3, + .idle_clocks = 15, + .gcnt_us = 1, + .vdd_apc_step_up_limit = 1, + .vdd_apc_step_down_limit = 1, + .cpr_fuses = { + .init_voltage_step = 8000, + .init_voltage_width = 6, + .fuse_corner_data = (struct fuse_corner_data[]){ + /* fuse corner 0 */ + { + .ref_uV = 1224000, + .max_uV = 1224000, + .min_uV = 1048000, + .max_volt_scale = 0, + .max_quot_scale = 0, + .quot_offset = 0, + .quot_scale = 1, + .quot_adjust = 0, + .quot_offset_scale = 5, + .quot_offset_adjust = 0, + }, + /* fuse corner 1 */ + { + .ref_uV = 1288000, + .max_uV = 1288000, + .min_uV = 1048000, + .max_volt_scale = 2000, + .max_quot_scale = 1400, + .quot_offset = 0, + .quot_scale = 1, + .quot_adjust = -20, + .quot_offset_scale = 5, + .quot_offset_adjust = 0, + }, + /* fuse corner 2 */ + { + .ref_uV = 1352000, + .max_uV = 1384000, + .min_uV = 1088000, + .max_volt_scale = 2000, + .max_quot_scale = 1400, + .quot_offset = 0, + .quot_scale = 1, + .quot_adjust = 0, + .quot_offset_scale = 5, + .quot_offset_adjust = 0, + }, + }, + }, +}; + +static const struct acc_desc qcs404_acc_desc = { + .settings = (struct reg_sequence[]){ + { 0xb120, 0x1041040 }, + { 0xb124, 0x41 }, + { 0xb120, 0x0 }, + { 0xb124, 0x0 }, + { 0xb120, 0x0 }, + { 0xb124, 0x0 }, + }, + .config = (struct reg_sequence[]){ + { 0xb138, 0xff }, + { 0xb130, 0x5555 }, + }, + .num_regs_per_fuse = 2, +}; + +static const struct cpr_acc_desc qcs404_cpr_acc_desc = { + .cpr_desc = &qcs404_cpr_desc, + .acc_desc = &qcs404_acc_desc, +}; + +static unsigned int cpr_get_performance_state(struct generic_pm_domain *genpd, + struct dev_pm_opp *opp) +{ + return dev_pm_opp_get_level(opp); +} + +static int cpr_power_off(struct generic_pm_domain *domain) +{ + struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd); + + return cpr_disable(drv); +} + +static int cpr_power_on(struct generic_pm_domain *domain) +{ + struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd); + + return cpr_enable(drv); +} + +static int cpr_pd_attach_dev(struct generic_pm_domain *domain, + struct device *dev) +{ + struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd); + const struct acc_desc *acc_desc = drv->acc_desc; + int ret = 0; + + mutex_lock(&drv->lock); + + dev_dbg(drv->dev, "attach callback for: %s\n", dev_name(dev)); + + /* + * This driver only supports scaling voltage for a CPU cluster + * where all CPUs in the cluster share a single regulator. + * Therefore, save the struct device pointer only for the first + * CPU device that gets attached. There is no need to do any + * additional initialization when further CPUs get attached. + */ + if (drv->attached_cpu_dev) + goto unlock; + + /* + * cpr_scale_voltage() requires the direction (if we are changing + * to a higher or lower OPP). The first time + * cpr_set_performance_state() is called, there is no previous + * performance state defined. Therefore, we call + * cpr_find_initial_corner() that gets the CPU clock frequency + * set by the bootloader, so that we can determine the direction + * the first time cpr_set_performance_state() is called. + */ + drv->cpu_clk = devm_clk_get(dev, NULL); + if (IS_ERR(drv->cpu_clk)) { + ret = PTR_ERR(drv->cpu_clk); + if (ret != -EPROBE_DEFER) + dev_err(drv->dev, "could not get cpu clk: %d\n", ret); + goto unlock; + } + drv->attached_cpu_dev = dev; + + dev_dbg(drv->dev, "using cpu clk from: %s\n", + dev_name(drv->attached_cpu_dev)); + + /* + * Everything related to (virtual) corners has to be initialized + * here, when attaching to the power domain, since we need to know + * the maximum frequency for each fuse corner, and this is only + * available after the cpufreq driver has attached to us. + * The reason for this is that we need to know the highest + * frequency associated with each fuse corner. + */ + ret = dev_pm_opp_get_opp_count(&drv->pd.dev); + if (ret < 0) { + dev_err(drv->dev, "could not get OPP count\n"); + goto unlock; + } + drv->num_corners = ret; + + if (drv->num_corners < 2) { + dev_err(drv->dev, "need at least 2 OPPs to use CPR\n"); + ret = -EINVAL; + goto unlock; + } + + drv->corners = devm_kcalloc(drv->dev, drv->num_corners, + sizeof(*drv->corners), + GFP_KERNEL); + if (!drv->corners) { + ret = -ENOMEM; + goto unlock; + } + + ret = cpr_corner_init(drv); + if (ret) + goto unlock; + + cpr_set_loop_allowed(drv); + + ret = cpr_init_parameters(drv); + if (ret) + goto unlock; + + /* Configure CPR HW but keep it disabled */ + ret = cpr_config(drv); + if (ret) + goto unlock; + + ret = cpr_find_initial_corner(drv); + if (ret) + goto unlock; + + if (acc_desc->config) + regmap_multi_reg_write(drv->tcsr, acc_desc->config, + acc_desc->num_regs_per_fuse); + + /* Enable ACC if required */ + if (acc_desc->enable_mask) + regmap_update_bits(drv->tcsr, acc_desc->enable_reg, + acc_desc->enable_mask, + acc_desc->enable_mask); + + dev_info(drv->dev, "driver initialized with %u OPPs\n", + drv->num_corners); + +unlock: + mutex_unlock(&drv->lock); + + return ret; +} + +static int cpr_debug_info_show(struct seq_file *s, void *unused) +{ + u32 gcnt, ro_sel, ctl, irq_status, reg, error_steps; + u32 step_dn, step_up, error, error_lt0, busy; + struct cpr_drv *drv = s->private; + struct fuse_corner *fuse_corner; + struct corner *corner; + + corner = drv->corner; + fuse_corner = corner->fuse_corner; + + seq_printf(s, "corner, current_volt = %d uV\n", + corner->last_uV); + + ro_sel = fuse_corner->ring_osc_idx; + gcnt = cpr_read(drv, REG_RBCPR_GCNT_TARGET(ro_sel)); + seq_printf(s, "rbcpr_gcnt_target (%u) = %#02X\n", ro_sel, gcnt); + + ctl = cpr_read(drv, REG_RBCPR_CTL); + seq_printf(s, "rbcpr_ctl = %#02X\n", ctl); + + irq_status = cpr_read(drv, REG_RBIF_IRQ_STATUS); + seq_printf(s, "rbcpr_irq_status = %#02X\n", irq_status); + + reg = cpr_read(drv, REG_RBCPR_RESULT_0); + seq_printf(s, "rbcpr_result_0 = %#02X\n", reg); + + step_dn = reg & 0x01; + step_up = (reg >> RBCPR_RESULT0_STEP_UP_SHIFT) & 0x01; + seq_printf(s, " [step_dn = %u", step_dn); + + seq_printf(s, ", step_up = %u", step_up); + + error_steps = (reg >> RBCPR_RESULT0_ERROR_STEPS_SHIFT) + & RBCPR_RESULT0_ERROR_STEPS_MASK; + seq_printf(s, ", error_steps = %u", error_steps); + + error = (reg >> RBCPR_RESULT0_ERROR_SHIFT) & RBCPR_RESULT0_ERROR_MASK; + seq_printf(s, ", error = %u", error); + + error_lt0 = (reg >> RBCPR_RESULT0_ERROR_LT0_SHIFT) & 0x01; + seq_printf(s, ", error_lt_0 = %u", error_lt0); + + busy = (reg >> RBCPR_RESULT0_BUSY_SHIFT) & 0x01; + seq_printf(s, ", busy = %u]\n", busy); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(cpr_debug_info); + +static void cpr_debugfs_init(struct cpr_drv *drv) +{ + drv->debugfs = debugfs_create_dir("qcom_cpr", NULL); + + debugfs_create_file("debug_info", 0444, drv->debugfs, + drv, &cpr_debug_info_fops); +} + +static int cpr_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct cpr_drv *drv; + int irq, ret; + const struct cpr_acc_desc *data; + struct device_node *np; + u32 cpr_rev = FUSE_REVISION_UNKNOWN; + + data = of_device_get_match_data(dev); + if (!data || !data->cpr_desc || !data->acc_desc) + return -EINVAL; + + drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL); + if (!drv) + return -ENOMEM; + drv->dev = dev; + drv->desc = data->cpr_desc; + drv->acc_desc = data->acc_desc; + + drv->fuse_corners = devm_kcalloc(dev, drv->desc->num_fuse_corners, + sizeof(*drv->fuse_corners), + GFP_KERNEL); + if (!drv->fuse_corners) + return -ENOMEM; + + np = of_parse_phandle(dev->of_node, "acc-syscon", 0); + if (!np) + return -ENODEV; + + drv->tcsr = syscon_node_to_regmap(np); + of_node_put(np); + if (IS_ERR(drv->tcsr)) + return PTR_ERR(drv->tcsr); + + drv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(drv->base)) + return PTR_ERR(drv->base); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return -EINVAL; + + drv->vdd_apc = devm_regulator_get(dev, "vdd-apc"); + if (IS_ERR(drv->vdd_apc)) + return PTR_ERR(drv->vdd_apc); + + /* + * Initialize fuse corners, since it simply depends + * on data in efuses. + * Everything related to (virtual) corners has to be + * initialized after attaching to the power domain, + * since it depends on the CPU's OPP table. + */ + ret = nvmem_cell_read_variable_le_u32(dev, "cpr_fuse_revision", &cpr_rev); + if (ret) + return ret; + + drv->cpr_fuses = cpr_get_fuses(drv); + if (IS_ERR(drv->cpr_fuses)) + return PTR_ERR(drv->cpr_fuses); + + ret = cpr_populate_ring_osc_idx(drv); + if (ret) + return ret; + + ret = cpr_fuse_corner_init(drv); + if (ret) + return ret; + + mutex_init(&drv->lock); + + ret = devm_request_threaded_irq(dev, irq, NULL, + cpr_irq_handler, + IRQF_ONESHOT | IRQF_TRIGGER_RISING, + "cpr", drv); + if (ret) + return ret; + + drv->pd.name = devm_kstrdup_const(dev, dev->of_node->full_name, + GFP_KERNEL); + if (!drv->pd.name) + return -EINVAL; + + drv->pd.power_off = cpr_power_off; + drv->pd.power_on = cpr_power_on; + drv->pd.set_performance_state = cpr_set_performance_state; + drv->pd.opp_to_performance_state = cpr_get_performance_state; + drv->pd.attach_dev = cpr_pd_attach_dev; + + ret = pm_genpd_init(&drv->pd, NULL, true); + if (ret) + return ret; + + ret = of_genpd_add_provider_simple(dev->of_node, &drv->pd); + if (ret) + goto err_remove_genpd; + + platform_set_drvdata(pdev, drv); + cpr_debugfs_init(drv); + + return 0; + +err_remove_genpd: + pm_genpd_remove(&drv->pd); + return ret; +} + +static int cpr_remove(struct platform_device *pdev) +{ + struct cpr_drv *drv = platform_get_drvdata(pdev); + + if (cpr_is_allowed(drv)) { + cpr_ctl_disable(drv); + cpr_irq_set(drv, 0); + } + + of_genpd_del_provider(pdev->dev.of_node); + pm_genpd_remove(&drv->pd); + + debugfs_remove_recursive(drv->debugfs); + + return 0; +} + +static const struct of_device_id cpr_match_table[] = { + { .compatible = "qcom,qcs404-cpr", .data = &qcs404_cpr_acc_desc }, + { } +}; +MODULE_DEVICE_TABLE(of, cpr_match_table); + +static struct platform_driver cpr_driver = { + .probe = cpr_probe, + .remove = cpr_remove, + .driver = { + .name = "qcom-cpr", + .of_match_table = cpr_match_table, + }, +}; +module_platform_driver(cpr_driver); + +MODULE_DESCRIPTION("Core Power Reduction (CPR) driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/genpd/qcom/rpmhpd.c b/drivers/genpd/qcom/rpmhpd.c new file mode 100644 index 000000000000..63c35a32065b --- /dev/null +++ b/drivers/genpd/qcom/rpmhpd.c @@ -0,0 +1,870 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018, The Linux Foundation. All rights reserved.*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define domain_to_rpmhpd(domain) container_of(domain, struct rpmhpd, pd) + +#define RPMH_ARC_MAX_LEVELS 16 + +/** + * struct rpmhpd - top level RPMh power domain resource data structure + * @dev: rpmh power domain controller device + * @pd: generic_pm_domain corresponding to the power domain + * @parent: generic_pm_domain corresponding to the parent's power domain + * @peer: A peer power domain in case Active only Voting is + * supported + * @active_only: True if it represents an Active only peer + * @corner: current corner + * @active_corner: current active corner + * @enable_corner: lowest non-zero corner + * @level: An array of level (vlvl) to corner (hlvl) mappings + * derived from cmd-db + * @level_count: Number of levels supported by the power domain. max + * being 16 (0 - 15) + * @enabled: true if the power domain is enabled + * @res_name: Resource name used for cmd-db lookup + * @addr: Resource address as looped up using resource name from + * cmd-db + * @state_synced: Indicator that sync_state has been invoked for the rpmhpd resource + */ +struct rpmhpd { + struct device *dev; + struct generic_pm_domain pd; + struct generic_pm_domain *parent; + struct rpmhpd *peer; + const bool active_only; + unsigned int corner; + unsigned int active_corner; + unsigned int enable_corner; + u32 level[RPMH_ARC_MAX_LEVELS]; + size_t level_count; + bool enabled; + const char *res_name; + u32 addr; + bool state_synced; +}; + +struct rpmhpd_desc { + struct rpmhpd **rpmhpds; + size_t num_pds; +}; + +static DEFINE_MUTEX(rpmhpd_lock); + +/* RPMH powerdomains */ + +static struct rpmhpd cx_ao; +static struct rpmhpd mx; +static struct rpmhpd mx_ao; +static struct rpmhpd cx = { + .pd = { .name = "cx", }, + .peer = &cx_ao, + .res_name = "cx.lvl", +}; + +static struct rpmhpd cx_ao = { + .pd = { .name = "cx_ao", }, + .active_only = true, + .peer = &cx, + .res_name = "cx.lvl", +}; + +static struct rpmhpd cx_ao_w_mx_parent; +static struct rpmhpd cx_w_mx_parent = { + .pd = { .name = "cx", }, + .peer = &cx_ao_w_mx_parent, + .parent = &mx.pd, + .res_name = "cx.lvl", +}; + +static struct rpmhpd cx_ao_w_mx_parent = { + .pd = { .name = "cx_ao", }, + .active_only = true, + .peer = &cx_w_mx_parent, + .parent = &mx_ao.pd, + .res_name = "cx.lvl", +}; + +static struct rpmhpd ebi = { + .pd = { .name = "ebi", }, + .res_name = "ebi.lvl", +}; + +static struct rpmhpd gfx = { + .pd = { .name = "gfx", }, + .res_name = "gfx.lvl", +}; + +static struct rpmhpd lcx = { + .pd = { .name = "lcx", }, + .res_name = "lcx.lvl", +}; + +static struct rpmhpd lmx = { + .pd = { .name = "lmx", }, + .res_name = "lmx.lvl", +}; + +static struct rpmhpd mmcx_ao; +static struct rpmhpd mmcx = { + .pd = { .name = "mmcx", }, + .peer = &mmcx_ao, + .res_name = "mmcx.lvl", +}; + +static struct rpmhpd mmcx_ao = { + .pd = { .name = "mmcx_ao", }, + .active_only = true, + .peer = &mmcx, + .res_name = "mmcx.lvl", +}; + +static struct rpmhpd mmcx_ao_w_cx_parent; +static struct rpmhpd mmcx_w_cx_parent = { + .pd = { .name = "mmcx", }, + .peer = &mmcx_ao_w_cx_parent, + .parent = &cx.pd, + .res_name = "mmcx.lvl", +}; + +static struct rpmhpd mmcx_ao_w_cx_parent = { + .pd = { .name = "mmcx_ao", }, + .active_only = true, + .peer = &mmcx_w_cx_parent, + .parent = &cx_ao.pd, + .res_name = "mmcx.lvl", +}; + +static struct rpmhpd mss = { + .pd = { .name = "mss", }, + .res_name = "mss.lvl", +}; + +static struct rpmhpd mx_ao; +static struct rpmhpd mx = { + .pd = { .name = "mx", }, + .peer = &mx_ao, + .res_name = "mx.lvl", +}; + +static struct rpmhpd mx_ao = { + .pd = { .name = "mx_ao", }, + .active_only = true, + .peer = &mx, + .res_name = "mx.lvl", +}; + +static struct rpmhpd mxc_ao; +static struct rpmhpd mxc = { + .pd = { .name = "mxc", }, + .peer = &mxc_ao, + .res_name = "mxc.lvl", +}; + +static struct rpmhpd mxc_ao = { + .pd = { .name = "mxc_ao", }, + .active_only = true, + .peer = &mxc, + .res_name = "mxc.lvl", +}; + +static struct rpmhpd nsp = { + .pd = { .name = "nsp", }, + .res_name = "nsp.lvl", +}; + +static struct rpmhpd nsp0 = { + .pd = { .name = "nsp0", }, + .res_name = "nsp0.lvl", +}; + +static struct rpmhpd nsp1 = { + .pd = { .name = "nsp1", }, + .res_name = "nsp1.lvl", +}; + +static struct rpmhpd qphy = { + .pd = { .name = "qphy", }, + .res_name = "qphy.lvl", +}; + +/* SA8540P RPMH powerdomains */ +static struct rpmhpd *sa8540p_rpmhpds[] = { + [SC8280XP_CX] = &cx, + [SC8280XP_CX_AO] = &cx_ao, + [SC8280XP_EBI] = &ebi, + [SC8280XP_GFX] = &gfx, + [SC8280XP_LCX] = &lcx, + [SC8280XP_LMX] = &lmx, + [SC8280XP_MMCX] = &mmcx, + [SC8280XP_MMCX_AO] = &mmcx_ao, + [SC8280XP_MX] = &mx, + [SC8280XP_MX_AO] = &mx_ao, + [SC8280XP_NSP] = &nsp, +}; + +static const struct rpmhpd_desc sa8540p_desc = { + .rpmhpds = sa8540p_rpmhpds, + .num_pds = ARRAY_SIZE(sa8540p_rpmhpds), +}; + +/* SA8775P RPMH power domains */ +static struct rpmhpd *sa8775p_rpmhpds[] = { + [SA8775P_CX] = &cx, + [SA8775P_CX_AO] = &cx_ao, + [SA8775P_EBI] = &ebi, + [SA8775P_GFX] = &gfx, + [SA8775P_LCX] = &lcx, + [SA8775P_LMX] = &lmx, + [SA8775P_MMCX] = &mmcx, + [SA8775P_MMCX_AO] = &mmcx_ao, + [SA8775P_MXC] = &mxc, + [SA8775P_MXC_AO] = &mxc_ao, + [SA8775P_MX] = &mx, + [SA8775P_MX_AO] = &mx_ao, + [SA8775P_NSP0] = &nsp0, + [SA8775P_NSP1] = &nsp1, +}; + +static const struct rpmhpd_desc sa8775p_desc = { + .rpmhpds = sa8775p_rpmhpds, + .num_pds = ARRAY_SIZE(sa8775p_rpmhpds), +}; + +/* SDM670 RPMH powerdomains */ +static struct rpmhpd *sdm670_rpmhpds[] = { + [SDM670_CX] = &cx_w_mx_parent, + [SDM670_CX_AO] = &cx_ao_w_mx_parent, + [SDM670_GFX] = &gfx, + [SDM670_LCX] = &lcx, + [SDM670_LMX] = &lmx, + [SDM670_MSS] = &mss, + [SDM670_MX] = &mx, + [SDM670_MX_AO] = &mx_ao, +}; + +static const struct rpmhpd_desc sdm670_desc = { + .rpmhpds = sdm670_rpmhpds, + .num_pds = ARRAY_SIZE(sdm670_rpmhpds), +}; + +/* SDM845 RPMH powerdomains */ +static struct rpmhpd *sdm845_rpmhpds[] = { + [SDM845_CX] = &cx_w_mx_parent, + [SDM845_CX_AO] = &cx_ao_w_mx_parent, + [SDM845_EBI] = &ebi, + [SDM845_GFX] = &gfx, + [SDM845_LCX] = &lcx, + [SDM845_LMX] = &lmx, + [SDM845_MSS] = &mss, + [SDM845_MX] = &mx, + [SDM845_MX_AO] = &mx_ao, +}; + +static const struct rpmhpd_desc sdm845_desc = { + .rpmhpds = sdm845_rpmhpds, + .num_pds = ARRAY_SIZE(sdm845_rpmhpds), +}; + +/* SDX55 RPMH powerdomains */ +static struct rpmhpd *sdx55_rpmhpds[] = { + [SDX55_CX] = &cx_w_mx_parent, + [SDX55_MSS] = &mss, + [SDX55_MX] = &mx, +}; + +static const struct rpmhpd_desc sdx55_desc = { + .rpmhpds = sdx55_rpmhpds, + .num_pds = ARRAY_SIZE(sdx55_rpmhpds), +}; + +/* SDX65 RPMH powerdomains */ +static struct rpmhpd *sdx65_rpmhpds[] = { + [SDX65_CX] = &cx_w_mx_parent, + [SDX65_CX_AO] = &cx_ao_w_mx_parent, + [SDX65_MSS] = &mss, + [SDX65_MX] = &mx, + [SDX65_MX_AO] = &mx_ao, + [SDX65_MXC] = &mxc, +}; + +static const struct rpmhpd_desc sdx65_desc = { + .rpmhpds = sdx65_rpmhpds, + .num_pds = ARRAY_SIZE(sdx65_rpmhpds), +}; + +/* SM6350 RPMH powerdomains */ +static struct rpmhpd *sm6350_rpmhpds[] = { + [SM6350_CX] = &cx_w_mx_parent, + [SM6350_GFX] = &gfx, + [SM6350_LCX] = &lcx, + [SM6350_LMX] = &lmx, + [SM6350_MSS] = &mss, + [SM6350_MX] = &mx, +}; + +static const struct rpmhpd_desc sm6350_desc = { + .rpmhpds = sm6350_rpmhpds, + .num_pds = ARRAY_SIZE(sm6350_rpmhpds), +}; + +/* SM8150 RPMH powerdomains */ +static struct rpmhpd *sm8150_rpmhpds[] = { + [SM8150_CX] = &cx_w_mx_parent, + [SM8150_CX_AO] = &cx_ao_w_mx_parent, + [SM8150_EBI] = &ebi, + [SM8150_GFX] = &gfx, + [SM8150_LCX] = &lcx, + [SM8150_LMX] = &lmx, + [SM8150_MMCX] = &mmcx, + [SM8150_MMCX_AO] = &mmcx_ao, + [SM8150_MSS] = &mss, + [SM8150_MX] = &mx, + [SM8150_MX_AO] = &mx_ao, +}; + +static const struct rpmhpd_desc sm8150_desc = { + .rpmhpds = sm8150_rpmhpds, + .num_pds = ARRAY_SIZE(sm8150_rpmhpds), +}; + +static struct rpmhpd *sa8155p_rpmhpds[] = { + [SA8155P_CX] = &cx_w_mx_parent, + [SA8155P_CX_AO] = &cx_ao_w_mx_parent, + [SA8155P_EBI] = &ebi, + [SA8155P_GFX] = &gfx, + [SA8155P_MSS] = &mss, + [SA8155P_MX] = &mx, + [SA8155P_MX_AO] = &mx_ao, +}; + +static const struct rpmhpd_desc sa8155p_desc = { + .rpmhpds = sa8155p_rpmhpds, + .num_pds = ARRAY_SIZE(sa8155p_rpmhpds), +}; + +/* SM8250 RPMH powerdomains */ +static struct rpmhpd *sm8250_rpmhpds[] = { + [SM8250_CX] = &cx_w_mx_parent, + [SM8250_CX_AO] = &cx_ao_w_mx_parent, + [SM8250_EBI] = &ebi, + [SM8250_GFX] = &gfx, + [SM8250_LCX] = &lcx, + [SM8250_LMX] = &lmx, + [SM8250_MMCX] = &mmcx, + [SM8250_MMCX_AO] = &mmcx_ao, + [SM8250_MX] = &mx, + [SM8250_MX_AO] = &mx_ao, +}; + +static const struct rpmhpd_desc sm8250_desc = { + .rpmhpds = sm8250_rpmhpds, + .num_pds = ARRAY_SIZE(sm8250_rpmhpds), +}; + +/* SM8350 Power domains */ +static struct rpmhpd *sm8350_rpmhpds[] = { + [SM8350_CX] = &cx_w_mx_parent, + [SM8350_CX_AO] = &cx_ao_w_mx_parent, + [SM8350_EBI] = &ebi, + [SM8350_GFX] = &gfx, + [SM8350_LCX] = &lcx, + [SM8350_LMX] = &lmx, + [SM8350_MMCX] = &mmcx, + [SM8350_MMCX_AO] = &mmcx_ao, + [SM8350_MSS] = &mss, + [SM8350_MX] = &mx, + [SM8350_MX_AO] = &mx_ao, + [SM8350_MXC] = &mxc, + [SM8350_MXC_AO] = &mxc_ao, +}; + +static const struct rpmhpd_desc sm8350_desc = { + .rpmhpds = sm8350_rpmhpds, + .num_pds = ARRAY_SIZE(sm8350_rpmhpds), +}; + +/* SM8450 RPMH powerdomains */ +static struct rpmhpd *sm8450_rpmhpds[] = { + [SM8450_CX] = &cx, + [SM8450_CX_AO] = &cx_ao, + [SM8450_EBI] = &ebi, + [SM8450_GFX] = &gfx, + [SM8450_LCX] = &lcx, + [SM8450_LMX] = &lmx, + [SM8450_MMCX] = &mmcx_w_cx_parent, + [SM8450_MMCX_AO] = &mmcx_ao_w_cx_parent, + [SM8450_MSS] = &mss, + [SM8450_MX] = &mx, + [SM8450_MX_AO] = &mx_ao, + [SM8450_MXC] = &mxc, + [SM8450_MXC_AO] = &mxc_ao, +}; + +static const struct rpmhpd_desc sm8450_desc = { + .rpmhpds = sm8450_rpmhpds, + .num_pds = ARRAY_SIZE(sm8450_rpmhpds), +}; + +/* SM8550 RPMH powerdomains */ +static struct rpmhpd *sm8550_rpmhpds[] = { + [SM8550_CX] = &cx, + [SM8550_CX_AO] = &cx_ao, + [SM8550_EBI] = &ebi, + [SM8550_GFX] = &gfx, + [SM8550_LCX] = &lcx, + [SM8550_LMX] = &lmx, + [SM8550_MMCX] = &mmcx_w_cx_parent, + [SM8550_MMCX_AO] = &mmcx_ao_w_cx_parent, + [SM8550_MSS] = &mss, + [SM8550_MX] = &mx, + [SM8550_MX_AO] = &mx_ao, + [SM8550_MXC] = &mxc, + [SM8550_MXC_AO] = &mxc_ao, + [SM8550_NSP] = &nsp, +}; + +static const struct rpmhpd_desc sm8550_desc = { + .rpmhpds = sm8550_rpmhpds, + .num_pds = ARRAY_SIZE(sm8550_rpmhpds), +}; + +/* QDU1000/QRU1000 RPMH powerdomains */ +static struct rpmhpd *qdu1000_rpmhpds[] = { + [QDU1000_CX] = &cx, + [QDU1000_EBI] = &ebi, + [QDU1000_MSS] = &mss, + [QDU1000_MX] = &mx, +}; + +static const struct rpmhpd_desc qdu1000_desc = { + .rpmhpds = qdu1000_rpmhpds, + .num_pds = ARRAY_SIZE(qdu1000_rpmhpds), +}; + +/* SC7180 RPMH powerdomains */ +static struct rpmhpd *sc7180_rpmhpds[] = { + [SC7180_CX] = &cx_w_mx_parent, + [SC7180_CX_AO] = &cx_ao_w_mx_parent, + [SC7180_GFX] = &gfx, + [SC7180_LCX] = &lcx, + [SC7180_LMX] = &lmx, + [SC7180_MSS] = &mss, + [SC7180_MX] = &mx, + [SC7180_MX_AO] = &mx_ao, +}; + +static const struct rpmhpd_desc sc7180_desc = { + .rpmhpds = sc7180_rpmhpds, + .num_pds = ARRAY_SIZE(sc7180_rpmhpds), +}; + +/* SC7280 RPMH powerdomains */ +static struct rpmhpd *sc7280_rpmhpds[] = { + [SC7280_CX] = &cx, + [SC7280_CX_AO] = &cx_ao, + [SC7280_EBI] = &ebi, + [SC7280_GFX] = &gfx, + [SC7280_LCX] = &lcx, + [SC7280_LMX] = &lmx, + [SC7280_MSS] = &mss, + [SC7280_MX] = &mx, + [SC7280_MX_AO] = &mx_ao, +}; + +static const struct rpmhpd_desc sc7280_desc = { + .rpmhpds = sc7280_rpmhpds, + .num_pds = ARRAY_SIZE(sc7280_rpmhpds), +}; + +/* SC8180x RPMH powerdomains */ +static struct rpmhpd *sc8180x_rpmhpds[] = { + [SC8180X_CX] = &cx_w_mx_parent, + [SC8180X_CX_AO] = &cx_ao_w_mx_parent, + [SC8180X_EBI] = &ebi, + [SC8180X_GFX] = &gfx, + [SC8180X_LCX] = &lcx, + [SC8180X_LMX] = &lmx, + [SC8180X_MMCX] = &mmcx, + [SC8180X_MMCX_AO] = &mmcx_ao, + [SC8180X_MSS] = &mss, + [SC8180X_MX] = &mx, + [SC8180X_MX_AO] = &mx_ao, +}; + +static const struct rpmhpd_desc sc8180x_desc = { + .rpmhpds = sc8180x_rpmhpds, + .num_pds = ARRAY_SIZE(sc8180x_rpmhpds), +}; + +/* SC8280xp RPMH powerdomains */ +static struct rpmhpd *sc8280xp_rpmhpds[] = { + [SC8280XP_CX] = &cx, + [SC8280XP_CX_AO] = &cx_ao, + [SC8280XP_EBI] = &ebi, + [SC8280XP_GFX] = &gfx, + [SC8280XP_LCX] = &lcx, + [SC8280XP_LMX] = &lmx, + [SC8280XP_MMCX] = &mmcx, + [SC8280XP_MMCX_AO] = &mmcx_ao, + [SC8280XP_MX] = &mx, + [SC8280XP_MX_AO] = &mx_ao, + [SC8280XP_NSP] = &nsp, + [SC8280XP_QPHY] = &qphy, +}; + +static const struct rpmhpd_desc sc8280xp_desc = { + .rpmhpds = sc8280xp_rpmhpds, + .num_pds = ARRAY_SIZE(sc8280xp_rpmhpds), +}; + +static const struct of_device_id rpmhpd_match_table[] = { + { .compatible = "qcom,qdu1000-rpmhpd", .data = &qdu1000_desc }, + { .compatible = "qcom,sa8155p-rpmhpd", .data = &sa8155p_desc }, + { .compatible = "qcom,sa8540p-rpmhpd", .data = &sa8540p_desc }, + { .compatible = "qcom,sa8775p-rpmhpd", .data = &sa8775p_desc }, + { .compatible = "qcom,sc7180-rpmhpd", .data = &sc7180_desc }, + { .compatible = "qcom,sc7280-rpmhpd", .data = &sc7280_desc }, + { .compatible = "qcom,sc8180x-rpmhpd", .data = &sc8180x_desc }, + { .compatible = "qcom,sc8280xp-rpmhpd", .data = &sc8280xp_desc }, + { .compatible = "qcom,sdm670-rpmhpd", .data = &sdm670_desc }, + { .compatible = "qcom,sdm845-rpmhpd", .data = &sdm845_desc }, + { .compatible = "qcom,sdx55-rpmhpd", .data = &sdx55_desc}, + { .compatible = "qcom,sdx65-rpmhpd", .data = &sdx65_desc}, + { .compatible = "qcom,sm6350-rpmhpd", .data = &sm6350_desc }, + { .compatible = "qcom,sm8150-rpmhpd", .data = &sm8150_desc }, + { .compatible = "qcom,sm8250-rpmhpd", .data = &sm8250_desc }, + { .compatible = "qcom,sm8350-rpmhpd", .data = &sm8350_desc }, + { .compatible = "qcom,sm8450-rpmhpd", .data = &sm8450_desc }, + { .compatible = "qcom,sm8550-rpmhpd", .data = &sm8550_desc }, + { } +}; +MODULE_DEVICE_TABLE(of, rpmhpd_match_table); + +static int rpmhpd_send_corner(struct rpmhpd *pd, int state, + unsigned int corner, bool sync) +{ + struct tcs_cmd cmd = { + .addr = pd->addr, + .data = corner, + }; + + /* + * Wait for an ack only when we are increasing the + * perf state of the power domain + */ + if (sync) + return rpmh_write(pd->dev, state, &cmd, 1); + else + return rpmh_write_async(pd->dev, state, &cmd, 1); +} + +static void to_active_sleep(struct rpmhpd *pd, unsigned int corner, + unsigned int *active, unsigned int *sleep) +{ + *active = corner; + + if (pd->active_only) + *sleep = 0; + else + *sleep = *active; +} + +/* + * This function is used to aggregate the votes across the active only + * resources and its peers. The aggregated votes are sent to RPMh as + * ACTIVE_ONLY votes (which take effect immediately), as WAKE_ONLY votes + * (applied by RPMh on system wakeup) and as SLEEP votes (applied by RPMh + * on system sleep). + * We send ACTIVE_ONLY votes for resources without any peers. For others, + * which have an active only peer, all 3 votes are sent. + */ +static int rpmhpd_aggregate_corner(struct rpmhpd *pd, unsigned int corner) +{ + int ret; + struct rpmhpd *peer = pd->peer; + unsigned int active_corner, sleep_corner; + unsigned int this_active_corner = 0, this_sleep_corner = 0; + unsigned int peer_active_corner = 0, peer_sleep_corner = 0; + + if (pd->state_synced) { + to_active_sleep(pd, corner, &this_active_corner, &this_sleep_corner); + } else { + /* Clamp to highest corner if sync_state hasn't happened */ + this_active_corner = pd->level_count - 1; + this_sleep_corner = pd->level_count - 1; + } + + if (peer && peer->enabled) + to_active_sleep(peer, peer->corner, &peer_active_corner, + &peer_sleep_corner); + + active_corner = max(this_active_corner, peer_active_corner); + + ret = rpmhpd_send_corner(pd, RPMH_ACTIVE_ONLY_STATE, active_corner, + active_corner > pd->active_corner); + if (ret) + return ret; + + pd->active_corner = active_corner; + + if (peer) { + peer->active_corner = active_corner; + + ret = rpmhpd_send_corner(pd, RPMH_WAKE_ONLY_STATE, + active_corner, false); + if (ret) + return ret; + + sleep_corner = max(this_sleep_corner, peer_sleep_corner); + + return rpmhpd_send_corner(pd, RPMH_SLEEP_STATE, sleep_corner, + false); + } + + return ret; +} + +static int rpmhpd_power_on(struct generic_pm_domain *domain) +{ + struct rpmhpd *pd = domain_to_rpmhpd(domain); + unsigned int corner; + int ret; + + mutex_lock(&rpmhpd_lock); + + corner = max(pd->corner, pd->enable_corner); + ret = rpmhpd_aggregate_corner(pd, corner); + if (!ret) + pd->enabled = true; + + mutex_unlock(&rpmhpd_lock); + + return ret; +} + +static int rpmhpd_power_off(struct generic_pm_domain *domain) +{ + struct rpmhpd *pd = domain_to_rpmhpd(domain); + int ret; + + mutex_lock(&rpmhpd_lock); + + ret = rpmhpd_aggregate_corner(pd, 0); + if (!ret) + pd->enabled = false; + + mutex_unlock(&rpmhpd_lock); + + return ret; +} + +static int rpmhpd_set_performance_state(struct generic_pm_domain *domain, + unsigned int level) +{ + struct rpmhpd *pd = domain_to_rpmhpd(domain); + int ret = 0, i; + + mutex_lock(&rpmhpd_lock); + + for (i = 0; i < pd->level_count; i++) + if (level <= pd->level[i]) + break; + + /* + * If the level requested is more than that supported by the + * max corner, just set it to max anyway. + */ + if (i == pd->level_count) + i--; + + if (pd->enabled) { + /* Ensure that the domain isn't turn off */ + if (i < pd->enable_corner) + i = pd->enable_corner; + + ret = rpmhpd_aggregate_corner(pd, i); + if (ret) + goto out; + } + + pd->corner = i; +out: + mutex_unlock(&rpmhpd_lock); + + return ret; +} + +static unsigned int rpmhpd_get_performance_state(struct generic_pm_domain *genpd, + struct dev_pm_opp *opp) +{ + return dev_pm_opp_get_level(opp); +} + +static int rpmhpd_update_level_mapping(struct rpmhpd *rpmhpd) +{ + int i; + const u16 *buf; + + buf = cmd_db_read_aux_data(rpmhpd->res_name, &rpmhpd->level_count); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + /* 2 bytes used for each command DB aux data entry */ + rpmhpd->level_count >>= 1; + + if (rpmhpd->level_count > RPMH_ARC_MAX_LEVELS) + return -EINVAL; + + for (i = 0; i < rpmhpd->level_count; i++) { + rpmhpd->level[i] = buf[i]; + + /* Remember the first corner with non-zero level */ + if (!rpmhpd->level[rpmhpd->enable_corner] && rpmhpd->level[i]) + rpmhpd->enable_corner = i; + + /* + * The AUX data may be zero padded. These 0 valued entries at + * the end of the map must be ignored. + */ + if (i > 0 && rpmhpd->level[i] == 0) { + rpmhpd->level_count = i; + break; + } + pr_debug("%s: ARC hlvl=%2d --> vlvl=%4u\n", rpmhpd->res_name, i, + rpmhpd->level[i]); + } + + return 0; +} + +static int rpmhpd_probe(struct platform_device *pdev) +{ + int i, ret; + size_t num_pds; + struct device *dev = &pdev->dev; + struct genpd_onecell_data *data; + struct rpmhpd **rpmhpds; + const struct rpmhpd_desc *desc; + + desc = of_device_get_match_data(dev); + if (!desc) + return -EINVAL; + + rpmhpds = desc->rpmhpds; + num_pds = desc->num_pds; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->domains = devm_kcalloc(dev, num_pds, sizeof(*data->domains), + GFP_KERNEL); + if (!data->domains) + return -ENOMEM; + + data->num_domains = num_pds; + + for (i = 0; i < num_pds; i++) { + if (!rpmhpds[i]) + continue; + + rpmhpds[i]->dev = dev; + rpmhpds[i]->addr = cmd_db_read_addr(rpmhpds[i]->res_name); + if (!rpmhpds[i]->addr) { + dev_err(dev, "Could not find RPMh address for resource %s\n", + rpmhpds[i]->res_name); + return -ENODEV; + } + + ret = cmd_db_read_slave_id(rpmhpds[i]->res_name); + if (ret != CMD_DB_HW_ARC) { + dev_err(dev, "RPMh slave ID mismatch\n"); + return -EINVAL; + } + + ret = rpmhpd_update_level_mapping(rpmhpds[i]); + if (ret) + return ret; + + rpmhpds[i]->pd.power_off = rpmhpd_power_off; + rpmhpds[i]->pd.power_on = rpmhpd_power_on; + rpmhpds[i]->pd.set_performance_state = rpmhpd_set_performance_state; + rpmhpds[i]->pd.opp_to_performance_state = rpmhpd_get_performance_state; + pm_genpd_init(&rpmhpds[i]->pd, NULL, true); + + data->domains[i] = &rpmhpds[i]->pd; + } + + /* Add subdomains */ + for (i = 0; i < num_pds; i++) { + if (!rpmhpds[i]) + continue; + if (rpmhpds[i]->parent) + pm_genpd_add_subdomain(rpmhpds[i]->parent, + &rpmhpds[i]->pd); + } + + return of_genpd_add_provider_onecell(pdev->dev.of_node, data); +} + +static void rpmhpd_sync_state(struct device *dev) +{ + const struct rpmhpd_desc *desc = of_device_get_match_data(dev); + struct rpmhpd **rpmhpds = desc->rpmhpds; + unsigned int corner; + struct rpmhpd *pd; + unsigned int i; + int ret; + + mutex_lock(&rpmhpd_lock); + for (i = 0; i < desc->num_pds; i++) { + pd = rpmhpds[i]; + if (!pd) + continue; + + pd->state_synced = true; + if (pd->enabled) + corner = max(pd->corner, pd->enable_corner); + else + corner = 0; + + ret = rpmhpd_aggregate_corner(pd, corner); + if (ret) + dev_err(dev, "failed to sync %s\n", pd->res_name); + } + mutex_unlock(&rpmhpd_lock); +} + +static struct platform_driver rpmhpd_driver = { + .driver = { + .name = "qcom-rpmhpd", + .of_match_table = rpmhpd_match_table, + .suppress_bind_attrs = true, + .sync_state = rpmhpd_sync_state, + }, + .probe = rpmhpd_probe, +}; + +static int __init rpmhpd_init(void) +{ + return platform_driver_register(&rpmhpd_driver); +} +core_initcall(rpmhpd_init); + +MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPMh Power Domain Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/genpd/qcom/rpmpd.c b/drivers/genpd/qcom/rpmpd.c new file mode 100644 index 000000000000..99b017fd76b7 --- /dev/null +++ b/drivers/genpd/qcom/rpmpd.c @@ -0,0 +1,992 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define domain_to_rpmpd(domain) container_of(domain, struct rpmpd, pd) + +/* Resource types: + * RPMPD_X is X encoded as a little-endian, lower-case, ASCII string */ +#define RPMPD_SMPA 0x61706d73 +#define RPMPD_LDOA 0x616f646c +#define RPMPD_SMPB 0x62706d73 +#define RPMPD_LDOB 0x626f646c +#define RPMPD_RWCX 0x78637772 +#define RPMPD_RWMX 0x786d7772 +#define RPMPD_RWLC 0x636c7772 +#define RPMPD_RWLM 0x6d6c7772 +#define RPMPD_RWSC 0x63737772 +#define RPMPD_RWSM 0x6d737772 +#define RPMPD_RWGX 0x78677772 + +/* Operation Keys */ +#define KEY_CORNER 0x6e726f63 /* corn */ +#define KEY_ENABLE 0x6e657773 /* swen */ +#define KEY_FLOOR_CORNER 0x636676 /* vfc */ +#define KEY_FLOOR_LEVEL 0x6c6676 /* vfl */ +#define KEY_LEVEL 0x6c766c76 /* vlvl */ + +#define MAX_CORNER_RPMPD_STATE 6 + +struct rpmpd_req { + __le32 key; + __le32 nbytes; + __le32 value; +}; + +struct rpmpd { + struct generic_pm_domain pd; + struct generic_pm_domain *parent; + struct rpmpd *peer; + const bool active_only; + unsigned int corner; + bool enabled; + const int res_type; + const int res_id; + struct qcom_smd_rpm *rpm; + unsigned int max_state; + __le32 key; +}; + +struct rpmpd_desc { + struct rpmpd **rpmpds; + size_t num_pds; + unsigned int max_state; +}; + +static DEFINE_MUTEX(rpmpd_lock); + +/* CX */ +static struct rpmpd cx_rwcx0_lvl_ao; +static struct rpmpd cx_rwcx0_lvl = { + .pd = { .name = "cx", }, + .peer = &cx_rwcx0_lvl_ao, + .res_type = RPMPD_RWCX, + .res_id = 0, + .key = KEY_LEVEL, +}; + +static struct rpmpd cx_rwcx0_lvl_ao = { + .pd = { .name = "cx_ao", }, + .peer = &cx_rwcx0_lvl, + .active_only = true, + .res_type = RPMPD_RWCX, + .res_id = 0, + .key = KEY_LEVEL, +}; + +static struct rpmpd cx_s1a_corner_ao; +static struct rpmpd cx_s1a_corner = { + .pd = { .name = "cx", }, + .peer = &cx_s1a_corner_ao, + .res_type = RPMPD_SMPA, + .res_id = 1, + .key = KEY_CORNER, +}; + +static struct rpmpd cx_s1a_corner_ao = { + .pd = { .name = "cx_ao", }, + .peer = &cx_s1a_corner, + .active_only = true, + .res_type = RPMPD_SMPA, + .res_id = 1, + .key = KEY_CORNER, +}; + +static struct rpmpd cx_s2a_corner_ao; +static struct rpmpd cx_s2a_corner = { + .pd = { .name = "cx", }, + .peer = &cx_s2a_corner_ao, + .res_type = RPMPD_SMPA, + .res_id = 2, + .key = KEY_CORNER, +}; + +static struct rpmpd cx_s2a_corner_ao = { + .pd = { .name = "cx_ao", }, + .peer = &cx_s2a_corner, + .active_only = true, + .res_type = RPMPD_SMPA, + .res_id = 2, + .key = KEY_CORNER, +}; + +static struct rpmpd cx_s2a_lvl_ao; +static struct rpmpd cx_s2a_lvl = { + .pd = { .name = "cx", }, + .peer = &cx_s2a_lvl_ao, + .res_type = RPMPD_SMPA, + .res_id = 2, + .key = KEY_LEVEL, +}; + +static struct rpmpd cx_s2a_lvl_ao = { + .pd = { .name = "cx_ao", }, + .peer = &cx_s2a_lvl, + .active_only = true, + .res_type = RPMPD_SMPA, + .res_id = 2, + .key = KEY_LEVEL, +}; + +static struct rpmpd cx_s3a_lvl_ao; +static struct rpmpd cx_s3a_lvl = { + .pd = { .name = "cx", }, + .peer = &cx_s3a_lvl_ao, + .res_type = RPMPD_SMPA, + .res_id = 3, + .key = KEY_LEVEL, +}; + +static struct rpmpd cx_s3a_lvl_ao = { + .pd = { .name = "cx_ao", }, + .peer = &cx_s3a_lvl, + .active_only = true, + .res_type = RPMPD_SMPA, + .res_id = 3, + .key = KEY_LEVEL, +}; + +static struct rpmpd cx_rwcx0_vfl = { + .pd = { .name = "cx_vfl", }, + .res_type = RPMPD_RWCX, + .res_id = 0, + .key = KEY_FLOOR_LEVEL, +}; + +static struct rpmpd cx_rwsc2_vfl = { + .pd = { .name = "cx_vfl", }, + .res_type = RPMPD_RWSC, + .res_id = 2, + .key = KEY_FLOOR_LEVEL, +}; + +static struct rpmpd cx_s1a_vfc = { + .pd = { .name = "cx_vfc", }, + .res_type = RPMPD_SMPA, + .res_id = 1, + .key = KEY_FLOOR_CORNER, +}; + +static struct rpmpd cx_s2a_vfc = { + .pd = { .name = "cx_vfc", }, + .res_type = RPMPD_SMPA, + .res_id = 2, + .key = KEY_FLOOR_CORNER, +}; + +static struct rpmpd cx_s2a_vfl = { + .pd = { .name = "cx_vfl", }, + .res_type = RPMPD_SMPA, + .res_id = 2, + .key = KEY_FLOOR_LEVEL, +}; + +static struct rpmpd cx_s3a_vfl = { + .pd = { .name = "cx_vfl", }, + .res_type = RPMPD_SMPA, + .res_id = 3, + .key = KEY_FLOOR_LEVEL, +}; + +/* G(F)X */ +static struct rpmpd gfx_s2b_corner = { + .pd = { .name = "gfx", }, + .res_type = RPMPD_SMPB, + .res_id = 2, + .key = KEY_CORNER, +}; + +static struct rpmpd gfx_s2b_vfc = { + .pd = { .name = "gfx_vfc", }, + .res_type = RPMPD_SMPB, + .res_id = 2, + .key = KEY_FLOOR_CORNER, +}; + +static struct rpmpd mx_rwmx0_lvl; +static struct rpmpd gx_rwgx0_lvl_ao; +static struct rpmpd gx_rwgx0_lvl = { + .pd = { .name = "gx", }, + .peer = &gx_rwgx0_lvl_ao, + .res_type = RPMPD_RWGX, + .parent = &mx_rwmx0_lvl.pd, + .res_id = 0, + .key = KEY_LEVEL, +}; + +static struct rpmpd mx_rwmx0_lvl_ao; +static struct rpmpd gx_rwgx0_lvl_ao = { + .pd = { .name = "gx_ao", }, + .peer = &gx_rwgx0_lvl, + .parent = &mx_rwmx0_lvl_ao.pd, + .active_only = true, + .res_type = RPMPD_RWGX, + .res_id = 0, + .key = KEY_LEVEL, +}; + +/* MX */ +static struct rpmpd mx_l3a_corner_ao; +static struct rpmpd mx_l3a_corner = { + .pd = { .name = "mx", }, + .peer = &mx_l3a_corner_ao, + .res_type = RPMPD_LDOA, + .res_id = 3, + .key = KEY_CORNER, +}; + +static struct rpmpd mx_l3a_corner_ao = { + .pd = { .name = "mx_ao", }, + .peer = &mx_l3a_corner, + .active_only = true, + .res_type = RPMPD_LDOA, + .res_id = 3, + .key = KEY_CORNER, +}; + +static struct rpmpd mx_l12a_lvl_ao; +static struct rpmpd mx_l12a_lvl = { + .pd = { .name = "mx", }, + .peer = &mx_l12a_lvl_ao, + .res_type = RPMPD_LDOA, + .res_id = 12, + .key = KEY_LEVEL, +}; + +static struct rpmpd mx_l12a_lvl_ao = { + .pd = { .name = "mx_ao", }, + .peer = &mx_l12a_lvl, + .active_only = true, + .res_type = RPMPD_LDOA, + .res_id = 12, + .key = KEY_LEVEL, +}; + +static struct rpmpd mx_s2a_corner_ao; +static struct rpmpd mx_s2a_corner = { + .pd = { .name = "mx", }, + .peer = &mx_s2a_corner_ao, + .res_type = RPMPD_SMPA, + .res_id = 2, + .key = KEY_CORNER, +}; + +static struct rpmpd mx_s2a_corner_ao = { + .pd = { .name = "mx_ao", }, + .peer = &mx_s2a_corner, + .active_only = true, + .res_type = RPMPD_SMPA, + .res_id = 2, + .key = KEY_CORNER, +}; + +static struct rpmpd mx_rwmx0_lvl_ao; +static struct rpmpd mx_rwmx0_lvl = { + .pd = { .name = "mx", }, + .peer = &mx_rwmx0_lvl_ao, + .res_type = RPMPD_RWMX, + .res_id = 0, + .key = KEY_LEVEL, +}; + +static struct rpmpd mx_rwmx0_lvl_ao = { + .pd = { .name = "mx_ao", }, + .peer = &mx_rwmx0_lvl, + .active_only = true, + .res_type = RPMPD_RWMX, + .res_id = 0, + .key = KEY_LEVEL, +}; + +static struct rpmpd mx_s6a_lvl_ao; +static struct rpmpd mx_s6a_lvl = { + .pd = { .name = "mx", }, + .peer = &mx_s6a_lvl_ao, + .res_type = RPMPD_SMPA, + .res_id = 6, + .key = KEY_LEVEL, +}; + +static struct rpmpd mx_s6a_lvl_ao = { + .pd = { .name = "mx_ao", }, + .peer = &mx_s6a_lvl, + .active_only = true, + .res_type = RPMPD_SMPA, + .res_id = 6, + .key = KEY_LEVEL, +}; + +static struct rpmpd mx_s7a_lvl_ao; +static struct rpmpd mx_s7a_lvl = { + .pd = { .name = "mx", }, + .peer = &mx_s7a_lvl_ao, + .res_type = RPMPD_SMPA, + .res_id = 7, + .key = KEY_LEVEL, +}; + +static struct rpmpd mx_s7a_lvl_ao = { + .pd = { .name = "mx_ao", }, + .peer = &mx_s7a_lvl, + .active_only = true, + .res_type = RPMPD_SMPA, + .res_id = 7, + .key = KEY_LEVEL, +}; + +static struct rpmpd mx_l12a_vfl = { + .pd = { .name = "mx_vfl", }, + .res_type = RPMPD_LDOA, + .res_id = 12, + .key = KEY_FLOOR_LEVEL, +}; + +static struct rpmpd mx_rwmx0_vfl = { + .pd = { .name = "mx_vfl", }, + .res_type = RPMPD_RWMX, + .res_id = 0, + .key = KEY_FLOOR_LEVEL, +}; + +static struct rpmpd mx_rwsm6_vfl = { + .pd = { .name = "mx_vfl", }, + .res_type = RPMPD_RWSM, + .res_id = 6, + .key = KEY_FLOOR_LEVEL, +}; + +/* MD */ +static struct rpmpd md_s1a_corner_ao; +static struct rpmpd md_s1a_corner = { + .pd = { .name = "md", }, + .peer = &md_s1a_corner_ao, + .res_type = RPMPD_SMPA, + .res_id = 1, + .key = KEY_CORNER, +}; + +static struct rpmpd md_s1a_corner_ao = { + .pd = { .name = "md_ao", }, + .peer = &md_s1a_corner, + .active_only = true, + .res_type = RPMPD_SMPA, + .res_id = 1, + .key = KEY_CORNER, +}; + +static struct rpmpd md_s1a_lvl_ao; +static struct rpmpd md_s1a_lvl = { + .pd = { .name = "md", }, + .peer = &md_s1a_lvl_ao, + .res_type = RPMPD_SMPA, + .res_id = 1, + .key = KEY_LEVEL, +}; + +static struct rpmpd md_s1a_lvl_ao = { + .pd = { .name = "md_ao", }, + .peer = &md_s1a_lvl, + .active_only = true, + .res_type = RPMPD_SMPA, + .res_id = 1, + .key = KEY_LEVEL, +}; + +static struct rpmpd md_s1a_vfc = { + .pd = { .name = "md_vfc", }, + .res_type = RPMPD_SMPA, + .res_id = 1, + .key = KEY_FLOOR_CORNER, +}; + +/* LPI_CX */ +static struct rpmpd lpi_cx_rwlc0_lvl = { + .pd = { .name = "lpi_cx", }, + .res_type = RPMPD_RWLC, + .res_id = 0, + .key = KEY_LEVEL, +}; + +static struct rpmpd lpi_cx_rwlc0_vfl = { + .pd = { .name = "lpi_cx_vfl", }, + .res_type = RPMPD_RWLC, + .res_id = 0, + .key = KEY_FLOOR_LEVEL, +}; + +/* LPI_MX */ +static struct rpmpd lpi_mx_rwlm0_lvl = { + .pd = { .name = "lpi_mx", }, + .res_type = RPMPD_RWLM, + .res_id = 0, + .key = KEY_LEVEL, +}; + +static struct rpmpd lpi_mx_rwlm0_vfl = { + .pd = { .name = "lpi_mx_vfl", }, + .res_type = RPMPD_RWLM, + .res_id = 0, + .key = KEY_FLOOR_LEVEL, +}; + +/* SSC_CX */ +static struct rpmpd ssc_cx_l26a_corner = { + .pd = { .name = "ssc_cx", }, + .res_type = RPMPD_LDOA, + .res_id = 26, + .key = KEY_CORNER, +}; + +static struct rpmpd ssc_cx_rwlc0_lvl = { + .pd = { .name = "ssc_cx", }, + .res_type = RPMPD_RWLC, + .res_id = 0, + .key = KEY_LEVEL, +}; + +static struct rpmpd ssc_cx_rwsc0_lvl = { + .pd = { .name = "ssc_cx", }, + .res_type = RPMPD_RWSC, + .res_id = 0, + .key = KEY_LEVEL, +}; + +static struct rpmpd ssc_cx_l26a_vfc = { + .pd = { .name = "ssc_cx_vfc", }, + .res_type = RPMPD_LDOA, + .res_id = 26, + .key = KEY_FLOOR_CORNER, +}; + +static struct rpmpd ssc_cx_rwlc0_vfl = { + .pd = { .name = "ssc_cx_vfl", }, + .res_type = RPMPD_RWLC, + .res_id = 0, + .key = KEY_FLOOR_LEVEL, +}; + +static struct rpmpd ssc_cx_rwsc0_vfl = { + .pd = { .name = "ssc_cx_vfl", }, + .res_type = RPMPD_RWSC, + .res_id = 0, + .key = KEY_FLOOR_LEVEL, +}; + +/* SSC_MX */ +static struct rpmpd ssc_mx_rwlm0_lvl = { + .pd = { .name = "ssc_mx", }, + .res_type = RPMPD_RWLM, + .res_id = 0, + .key = KEY_LEVEL, +}; + +static struct rpmpd ssc_mx_rwsm0_lvl = { + .pd = { .name = "ssc_mx", }, + .res_type = RPMPD_RWSM, + .res_id = 0, + .key = KEY_LEVEL, +}; + +static struct rpmpd ssc_mx_rwlm0_vfl = { + .pd = { .name = "ssc_mx_vfl", }, + .res_type = RPMPD_RWLM, + .res_id = 0, + .key = KEY_FLOOR_LEVEL, +}; + +static struct rpmpd ssc_mx_rwsm0_vfl = { + .pd = { .name = "ssc_mx_vfl", }, + .res_type = RPMPD_RWSM, + .res_id = 0, + .key = KEY_FLOOR_LEVEL, +}; + +static struct rpmpd *mdm9607_rpmpds[] = { + [MDM9607_VDDCX] = &cx_s3a_lvl, + [MDM9607_VDDCX_AO] = &cx_s3a_lvl_ao, + [MDM9607_VDDCX_VFL] = &cx_s3a_vfl, + [MDM9607_VDDMX] = &mx_l12a_lvl, + [MDM9607_VDDMX_AO] = &mx_l12a_lvl_ao, + [MDM9607_VDDMX_VFL] = &mx_l12a_vfl, +}; + +static const struct rpmpd_desc mdm9607_desc = { + .rpmpds = mdm9607_rpmpds, + .num_pds = ARRAY_SIZE(mdm9607_rpmpds), + .max_state = RPM_SMD_LEVEL_TURBO, +}; + +static struct rpmpd *msm8226_rpmpds[] = { + [MSM8226_VDDCX] = &cx_s1a_corner, + [MSM8226_VDDCX_AO] = &cx_s1a_corner_ao, + [MSM8226_VDDCX_VFC] = &cx_s1a_vfc, +}; + +static const struct rpmpd_desc msm8226_desc = { + .rpmpds = msm8226_rpmpds, + .num_pds = ARRAY_SIZE(msm8226_rpmpds), + .max_state = MAX_CORNER_RPMPD_STATE, +}; + +static struct rpmpd *msm8939_rpmpds[] = { + [MSM8939_VDDMDCX] = &md_s1a_corner, + [MSM8939_VDDMDCX_AO] = &md_s1a_corner_ao, + [MSM8939_VDDMDCX_VFC] = &md_s1a_vfc, + [MSM8939_VDDCX] = &cx_s2a_corner, + [MSM8939_VDDCX_AO] = &cx_s2a_corner_ao, + [MSM8939_VDDCX_VFC] = &cx_s2a_vfc, + [MSM8939_VDDMX] = &mx_l3a_corner, + [MSM8939_VDDMX_AO] = &mx_l3a_corner_ao, +}; + +static const struct rpmpd_desc msm8939_desc = { + .rpmpds = msm8939_rpmpds, + .num_pds = ARRAY_SIZE(msm8939_rpmpds), + .max_state = MAX_CORNER_RPMPD_STATE, +}; + +static struct rpmpd *msm8916_rpmpds[] = { + [MSM8916_VDDCX] = &cx_s1a_corner, + [MSM8916_VDDCX_AO] = &cx_s1a_corner_ao, + [MSM8916_VDDCX_VFC] = &cx_s1a_vfc, + [MSM8916_VDDMX] = &mx_l3a_corner, + [MSM8916_VDDMX_AO] = &mx_l3a_corner_ao, +}; + +static const struct rpmpd_desc msm8916_desc = { + .rpmpds = msm8916_rpmpds, + .num_pds = ARRAY_SIZE(msm8916_rpmpds), + .max_state = MAX_CORNER_RPMPD_STATE, +}; + +static struct rpmpd *msm8953_rpmpds[] = { + [MSM8953_VDDMD] = &md_s1a_lvl, + [MSM8953_VDDMD_AO] = &md_s1a_lvl_ao, + [MSM8953_VDDCX] = &cx_s2a_lvl, + [MSM8953_VDDCX_AO] = &cx_s2a_lvl_ao, + [MSM8953_VDDCX_VFL] = &cx_s2a_vfl, + [MSM8953_VDDMX] = &mx_s7a_lvl, + [MSM8953_VDDMX_AO] = &mx_s7a_lvl_ao, +}; + +static const struct rpmpd_desc msm8953_desc = { + .rpmpds = msm8953_rpmpds, + .num_pds = ARRAY_SIZE(msm8953_rpmpds), + .max_state = RPM_SMD_LEVEL_TURBO, +}; + +static struct rpmpd *msm8976_rpmpds[] = { + [MSM8976_VDDCX] = &cx_s2a_lvl, + [MSM8976_VDDCX_AO] = &cx_s2a_lvl_ao, + [MSM8976_VDDCX_VFL] = &cx_rwsc2_vfl, + [MSM8976_VDDMX] = &mx_s6a_lvl, + [MSM8976_VDDMX_AO] = &mx_s6a_lvl_ao, + [MSM8976_VDDMX_VFL] = &mx_rwsm6_vfl, +}; + +static const struct rpmpd_desc msm8976_desc = { + .rpmpds = msm8976_rpmpds, + .num_pds = ARRAY_SIZE(msm8976_rpmpds), + .max_state = RPM_SMD_LEVEL_TURBO_HIGH, +}; + +static struct rpmpd *msm8994_rpmpds[] = { + [MSM8994_VDDCX] = &cx_s1a_corner, + [MSM8994_VDDCX_AO] = &cx_s1a_corner_ao, + [MSM8994_VDDCX_VFC] = &cx_s1a_vfc, + [MSM8994_VDDMX] = &mx_s2a_corner, + [MSM8994_VDDMX_AO] = &mx_s2a_corner_ao, + + /* Attention! *Some* 8994 boards with pm8004 may use SMPC here! */ + [MSM8994_VDDGFX] = &gfx_s2b_corner, + [MSM8994_VDDGFX_VFC] = &gfx_s2b_vfc, +}; + +static const struct rpmpd_desc msm8994_desc = { + .rpmpds = msm8994_rpmpds, + .num_pds = ARRAY_SIZE(msm8994_rpmpds), + .max_state = MAX_CORNER_RPMPD_STATE, +}; + +static struct rpmpd *msm8996_rpmpds[] = { + [MSM8996_VDDCX] = &cx_s1a_corner, + [MSM8996_VDDCX_AO] = &cx_s1a_corner_ao, + [MSM8996_VDDCX_VFC] = &cx_s1a_vfc, + [MSM8996_VDDMX] = &mx_s2a_corner, + [MSM8996_VDDMX_AO] = &mx_s2a_corner_ao, + [MSM8996_VDDSSCX] = &ssc_cx_l26a_corner, + [MSM8996_VDDSSCX_VFC] = &ssc_cx_l26a_vfc, +}; + +static const struct rpmpd_desc msm8996_desc = { + .rpmpds = msm8996_rpmpds, + .num_pds = ARRAY_SIZE(msm8996_rpmpds), + .max_state = MAX_CORNER_RPMPD_STATE, +}; + +static struct rpmpd *msm8998_rpmpds[] = { + [MSM8998_VDDCX] = &cx_rwcx0_lvl, + [MSM8998_VDDCX_AO] = &cx_rwcx0_lvl_ao, + [MSM8998_VDDCX_VFL] = &cx_rwcx0_vfl, + [MSM8998_VDDMX] = &mx_rwmx0_lvl, + [MSM8998_VDDMX_AO] = &mx_rwmx0_lvl_ao, + [MSM8998_VDDMX_VFL] = &mx_rwmx0_vfl, + [MSM8998_SSCCX] = &ssc_cx_rwsc0_lvl, + [MSM8998_SSCCX_VFL] = &ssc_cx_rwsc0_vfl, + [MSM8998_SSCMX] = &ssc_mx_rwsm0_lvl, + [MSM8998_SSCMX_VFL] = &ssc_mx_rwsm0_vfl, +}; + +static const struct rpmpd_desc msm8998_desc = { + .rpmpds = msm8998_rpmpds, + .num_pds = ARRAY_SIZE(msm8998_rpmpds), + .max_state = RPM_SMD_LEVEL_BINNING, +}; + +static struct rpmpd *qcs404_rpmpds[] = { + [QCS404_VDDMX] = &mx_rwmx0_lvl, + [QCS404_VDDMX_AO] = &mx_rwmx0_lvl_ao, + [QCS404_VDDMX_VFL] = &mx_rwmx0_vfl, + [QCS404_LPICX] = &lpi_cx_rwlc0_lvl, + [QCS404_LPICX_VFL] = &lpi_cx_rwlc0_vfl, + [QCS404_LPIMX] = &lpi_mx_rwlm0_lvl, + [QCS404_LPIMX_VFL] = &lpi_mx_rwlm0_vfl, +}; + +static const struct rpmpd_desc qcs404_desc = { + .rpmpds = qcs404_rpmpds, + .num_pds = ARRAY_SIZE(qcs404_rpmpds), + .max_state = RPM_SMD_LEVEL_BINNING, +}; + +static struct rpmpd *sdm660_rpmpds[] = { + [SDM660_VDDCX] = &cx_rwcx0_lvl, + [SDM660_VDDCX_AO] = &cx_rwcx0_lvl_ao, + [SDM660_VDDCX_VFL] = &cx_rwcx0_vfl, + [SDM660_VDDMX] = &mx_rwmx0_lvl, + [SDM660_VDDMX_AO] = &mx_rwmx0_lvl_ao, + [SDM660_VDDMX_VFL] = &mx_rwmx0_vfl, + [SDM660_SSCCX] = &ssc_cx_rwlc0_lvl, + [SDM660_SSCCX_VFL] = &ssc_cx_rwlc0_vfl, + [SDM660_SSCMX] = &ssc_mx_rwlm0_lvl, + [SDM660_SSCMX_VFL] = &ssc_mx_rwlm0_vfl, +}; + +static const struct rpmpd_desc sdm660_desc = { + .rpmpds = sdm660_rpmpds, + .num_pds = ARRAY_SIZE(sdm660_rpmpds), + .max_state = RPM_SMD_LEVEL_TURBO, +}; + +static struct rpmpd *sm6115_rpmpds[] = { + [SM6115_VDDCX] = &cx_rwcx0_lvl, + [SM6115_VDDCX_AO] = &cx_rwcx0_lvl_ao, + [SM6115_VDDCX_VFL] = &cx_rwcx0_vfl, + [SM6115_VDDMX] = &mx_rwmx0_lvl, + [SM6115_VDDMX_AO] = &mx_rwmx0_lvl_ao, + [SM6115_VDDMX_VFL] = &mx_rwmx0_vfl, + [SM6115_VDD_LPI_CX] = &lpi_cx_rwlc0_lvl, + [SM6115_VDD_LPI_MX] = &lpi_mx_rwlm0_lvl, +}; + +static const struct rpmpd_desc sm6115_desc = { + .rpmpds = sm6115_rpmpds, + .num_pds = ARRAY_SIZE(sm6115_rpmpds), + .max_state = RPM_SMD_LEVEL_TURBO_NO_CPR, +}; + +static struct rpmpd *sm6125_rpmpds[] = { + [SM6125_VDDCX] = &cx_rwcx0_lvl, + [SM6125_VDDCX_AO] = &cx_rwcx0_lvl_ao, + [SM6125_VDDCX_VFL] = &cx_rwcx0_vfl, + [SM6125_VDDMX] = &mx_rwmx0_lvl, + [SM6125_VDDMX_AO] = &mx_rwmx0_lvl_ao, + [SM6125_VDDMX_VFL] = &mx_rwmx0_vfl, +}; + +static const struct rpmpd_desc sm6125_desc = { + .rpmpds = sm6125_rpmpds, + .num_pds = ARRAY_SIZE(sm6125_rpmpds), + .max_state = RPM_SMD_LEVEL_BINNING, +}; + +static struct rpmpd *sm6375_rpmpds[] = { + [SM6375_VDDCX] = &cx_rwcx0_lvl, + [SM6375_VDDCX_AO] = &cx_rwcx0_lvl_ao, + [SM6375_VDDCX_VFL] = &cx_rwcx0_vfl, + [SM6375_VDDMX] = &mx_rwmx0_lvl, + [SM6375_VDDMX_AO] = &mx_rwmx0_lvl_ao, + [SM6375_VDDMX_VFL] = &mx_rwmx0_vfl, + [SM6375_VDDGX] = &gx_rwgx0_lvl, + [SM6375_VDDGX_AO] = &gx_rwgx0_lvl_ao, + [SM6375_VDD_LPI_CX] = &lpi_cx_rwlc0_lvl, + [SM6375_VDD_LPI_MX] = &lpi_mx_rwlm0_lvl, +}; + +static const struct rpmpd_desc sm6375_desc = { + .rpmpds = sm6375_rpmpds, + .num_pds = ARRAY_SIZE(sm6375_rpmpds), + .max_state = RPM_SMD_LEVEL_TURBO_NO_CPR, +}; + +static struct rpmpd *qcm2290_rpmpds[] = { + [QCM2290_VDDCX] = &cx_rwcx0_lvl, + [QCM2290_VDDCX_AO] = &cx_rwcx0_lvl_ao, + [QCM2290_VDDCX_VFL] = &cx_rwcx0_vfl, + [QCM2290_VDDMX] = &mx_rwmx0_lvl, + [QCM2290_VDDMX_AO] = &mx_rwmx0_lvl_ao, + [QCM2290_VDDMX_VFL] = &mx_rwmx0_vfl, + [QCM2290_VDD_LPI_CX] = &lpi_cx_rwlc0_lvl, + [QCM2290_VDD_LPI_MX] = &lpi_mx_rwlm0_lvl, +}; + +static const struct rpmpd_desc qcm2290_desc = { + .rpmpds = qcm2290_rpmpds, + .num_pds = ARRAY_SIZE(qcm2290_rpmpds), + .max_state = RPM_SMD_LEVEL_TURBO_NO_CPR, +}; + +static const struct of_device_id rpmpd_match_table[] = { + { .compatible = "qcom,mdm9607-rpmpd", .data = &mdm9607_desc }, + { .compatible = "qcom,msm8226-rpmpd", .data = &msm8226_desc }, + { .compatible = "qcom,msm8909-rpmpd", .data = &msm8916_desc }, + { .compatible = "qcom,msm8916-rpmpd", .data = &msm8916_desc }, + { .compatible = "qcom,msm8939-rpmpd", .data = &msm8939_desc }, + { .compatible = "qcom,msm8953-rpmpd", .data = &msm8953_desc }, + { .compatible = "qcom,msm8976-rpmpd", .data = &msm8976_desc }, + { .compatible = "qcom,msm8994-rpmpd", .data = &msm8994_desc }, + { .compatible = "qcom,msm8996-rpmpd", .data = &msm8996_desc }, + { .compatible = "qcom,msm8998-rpmpd", .data = &msm8998_desc }, + { .compatible = "qcom,qcm2290-rpmpd", .data = &qcm2290_desc }, + { .compatible = "qcom,qcs404-rpmpd", .data = &qcs404_desc }, + { .compatible = "qcom,sdm660-rpmpd", .data = &sdm660_desc }, + { .compatible = "qcom,sm6115-rpmpd", .data = &sm6115_desc }, + { .compatible = "qcom,sm6125-rpmpd", .data = &sm6125_desc }, + { .compatible = "qcom,sm6375-rpmpd", .data = &sm6375_desc }, + { } +}; +MODULE_DEVICE_TABLE(of, rpmpd_match_table); + +static int rpmpd_send_enable(struct rpmpd *pd, bool enable) +{ + struct rpmpd_req req = { + .key = KEY_ENABLE, + .nbytes = cpu_to_le32(sizeof(u32)), + .value = cpu_to_le32(enable), + }; + + return qcom_rpm_smd_write(pd->rpm, QCOM_SMD_RPM_ACTIVE_STATE, + pd->res_type, pd->res_id, &req, sizeof(req)); +} + +static int rpmpd_send_corner(struct rpmpd *pd, int state, unsigned int corner) +{ + struct rpmpd_req req = { + .key = pd->key, + .nbytes = cpu_to_le32(sizeof(u32)), + .value = cpu_to_le32(corner), + }; + + return qcom_rpm_smd_write(pd->rpm, state, pd->res_type, pd->res_id, + &req, sizeof(req)); +}; + +static void to_active_sleep(struct rpmpd *pd, unsigned int corner, + unsigned int *active, unsigned int *sleep) +{ + *active = corner; + + if (pd->active_only) + *sleep = 0; + else + *sleep = *active; +} + +static int rpmpd_aggregate_corner(struct rpmpd *pd) +{ + int ret; + struct rpmpd *peer = pd->peer; + unsigned int active_corner, sleep_corner; + unsigned int this_active_corner = 0, this_sleep_corner = 0; + unsigned int peer_active_corner = 0, peer_sleep_corner = 0; + + to_active_sleep(pd, pd->corner, &this_active_corner, &this_sleep_corner); + + if (peer && peer->enabled) + to_active_sleep(peer, peer->corner, &peer_active_corner, + &peer_sleep_corner); + + active_corner = max(this_active_corner, peer_active_corner); + + ret = rpmpd_send_corner(pd, QCOM_SMD_RPM_ACTIVE_STATE, active_corner); + if (ret) + return ret; + + sleep_corner = max(this_sleep_corner, peer_sleep_corner); + + return rpmpd_send_corner(pd, QCOM_SMD_RPM_SLEEP_STATE, sleep_corner); +} + +static int rpmpd_power_on(struct generic_pm_domain *domain) +{ + int ret; + struct rpmpd *pd = domain_to_rpmpd(domain); + + mutex_lock(&rpmpd_lock); + + ret = rpmpd_send_enable(pd, true); + if (ret) + goto out; + + pd->enabled = true; + + if (pd->corner) + ret = rpmpd_aggregate_corner(pd); + +out: + mutex_unlock(&rpmpd_lock); + + return ret; +} + +static int rpmpd_power_off(struct generic_pm_domain *domain) +{ + int ret; + struct rpmpd *pd = domain_to_rpmpd(domain); + + mutex_lock(&rpmpd_lock); + + ret = rpmpd_send_enable(pd, false); + if (!ret) + pd->enabled = false; + + mutex_unlock(&rpmpd_lock); + + return ret; +} + +static int rpmpd_set_performance(struct generic_pm_domain *domain, + unsigned int state) +{ + int ret = 0; + struct rpmpd *pd = domain_to_rpmpd(domain); + + if (state > pd->max_state) + state = pd->max_state; + + mutex_lock(&rpmpd_lock); + + pd->corner = state; + + /* Always send updates for vfc and vfl */ + if (!pd->enabled && pd->key != cpu_to_le32(KEY_FLOOR_CORNER) && + pd->key != cpu_to_le32(KEY_FLOOR_LEVEL)) + goto out; + + ret = rpmpd_aggregate_corner(pd); + +out: + mutex_unlock(&rpmpd_lock); + + return ret; +} + +static unsigned int rpmpd_get_performance(struct generic_pm_domain *genpd, + struct dev_pm_opp *opp) +{ + return dev_pm_opp_get_level(opp); +} + +static int rpmpd_probe(struct platform_device *pdev) +{ + int i; + size_t num; + struct genpd_onecell_data *data; + struct qcom_smd_rpm *rpm; + struct rpmpd **rpmpds; + const struct rpmpd_desc *desc; + + rpm = dev_get_drvdata(pdev->dev.parent); + if (!rpm) { + dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n"); + return -ENODEV; + } + + desc = of_device_get_match_data(&pdev->dev); + if (!desc) + return -EINVAL; + + rpmpds = desc->rpmpds; + num = desc->num_pds; + + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->domains = devm_kcalloc(&pdev->dev, num, sizeof(*data->domains), + GFP_KERNEL); + if (!data->domains) + return -ENOMEM; + + data->num_domains = num; + + for (i = 0; i < num; i++) { + if (!rpmpds[i]) { + dev_warn(&pdev->dev, "rpmpds[] with empty entry at index=%d\n", + i); + continue; + } + + rpmpds[i]->rpm = rpm; + rpmpds[i]->max_state = desc->max_state; + rpmpds[i]->pd.power_off = rpmpd_power_off; + rpmpds[i]->pd.power_on = rpmpd_power_on; + rpmpds[i]->pd.set_performance_state = rpmpd_set_performance; + rpmpds[i]->pd.opp_to_performance_state = rpmpd_get_performance; + pm_genpd_init(&rpmpds[i]->pd, NULL, true); + + data->domains[i] = &rpmpds[i]->pd; + } + + /* Add subdomains */ + for (i = 0; i < num; i++) { + if (!rpmpds[i]) + continue; + + if (rpmpds[i]->parent) + pm_genpd_add_subdomain(rpmpds[i]->parent, &rpmpds[i]->pd); + } + + return of_genpd_add_provider_onecell(pdev->dev.of_node, data); +} + +static struct platform_driver rpmpd_driver = { + .driver = { + .name = "qcom-rpmpd", + .of_match_table = rpmpd_match_table, + .suppress_bind_attrs = true, + }, + .probe = rpmpd_probe, +}; + +static int __init rpmpd_init(void) +{ + return platform_driver_register(&rpmpd_driver); +} +core_initcall(rpmpd_init); + +MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPM Power Domain Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index 99114c71092b..f548a7150bb2 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -3,7 +3,6 @@ CFLAGS_rpmh-rsc.o := -I$(src) obj-$(CONFIG_QCOM_AOSS_QMP) += qcom_aoss.o obj-$(CONFIG_QCOM_GENI_SE) += qcom-geni-se.o obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o -obj-$(CONFIG_QCOM_CPR) += cpr.o obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o obj-$(CONFIG_QCOM_OCMEM) += ocmem.o @@ -29,8 +28,6 @@ obj-$(CONFIG_QCOM_STATS) += qcom_stats.o obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o obj-$(CONFIG_QCOM_APR) += apr.o obj-$(CONFIG_QCOM_LLCC) += llcc-qcom.o -obj-$(CONFIG_QCOM_RPMHPD) += rpmhpd.o -obj-$(CONFIG_QCOM_RPMPD) += rpmpd.o obj-$(CONFIG_QCOM_KRYO_L2_ACCESSORS) += kryo-l2-accessors.o obj-$(CONFIG_QCOM_ICC_BWMON) += icc-bwmon.o qcom_ice-objs += ice.o diff --git a/drivers/soc/qcom/cpr.c b/drivers/soc/qcom/cpr.c deleted file mode 100644 index 144ea68e0920..000000000000 --- a/drivers/soc/qcom/cpr.c +++ /dev/null @@ -1,1757 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. - * Copyright (c) 2019, Linaro Limited - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* Register Offsets for RB-CPR and Bit Definitions */ - -/* RBCPR Version Register */ -#define REG_RBCPR_VERSION 0 -#define RBCPR_VER_2 0x02 -#define FLAGS_IGNORE_1ST_IRQ_STATUS BIT(0) - -/* RBCPR Gate Count and Target Registers */ -#define REG_RBCPR_GCNT_TARGET(n) (0x60 + 4 * (n)) - -#define RBCPR_GCNT_TARGET_TARGET_SHIFT 0 -#define RBCPR_GCNT_TARGET_TARGET_MASK GENMASK(11, 0) -#define RBCPR_GCNT_TARGET_GCNT_SHIFT 12 -#define RBCPR_GCNT_TARGET_GCNT_MASK GENMASK(9, 0) - -/* RBCPR Timer Control */ -#define REG_RBCPR_TIMER_INTERVAL 0x44 -#define REG_RBIF_TIMER_ADJUST 0x4c - -#define RBIF_TIMER_ADJ_CONS_UP_MASK GENMASK(3, 0) -#define RBIF_TIMER_ADJ_CONS_UP_SHIFT 0 -#define RBIF_TIMER_ADJ_CONS_DOWN_MASK GENMASK(3, 0) -#define RBIF_TIMER_ADJ_CONS_DOWN_SHIFT 4 -#define RBIF_TIMER_ADJ_CLAMP_INT_MASK GENMASK(7, 0) -#define RBIF_TIMER_ADJ_CLAMP_INT_SHIFT 8 - -/* RBCPR Config Register */ -#define REG_RBIF_LIMIT 0x48 -#define RBIF_LIMIT_CEILING_MASK GENMASK(5, 0) -#define RBIF_LIMIT_CEILING_SHIFT 6 -#define RBIF_LIMIT_FLOOR_BITS 6 -#define RBIF_LIMIT_FLOOR_MASK GENMASK(5, 0) - -#define RBIF_LIMIT_CEILING_DEFAULT RBIF_LIMIT_CEILING_MASK -#define RBIF_LIMIT_FLOOR_DEFAULT 0 - -#define REG_RBIF_SW_VLEVEL 0x94 -#define RBIF_SW_VLEVEL_DEFAULT 0x20 - -#define REG_RBCPR_STEP_QUOT 0x80 -#define RBCPR_STEP_QUOT_STEPQUOT_MASK GENMASK(7, 0) -#define RBCPR_STEP_QUOT_IDLE_CLK_MASK GENMASK(3, 0) -#define RBCPR_STEP_QUOT_IDLE_CLK_SHIFT 8 - -/* RBCPR Control Register */ -#define REG_RBCPR_CTL 0x90 - -#define RBCPR_CTL_LOOP_EN BIT(0) -#define RBCPR_CTL_TIMER_EN BIT(3) -#define RBCPR_CTL_SW_AUTO_CONT_ACK_EN BIT(5) -#define RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN BIT(6) -#define RBCPR_CTL_COUNT_MODE BIT(10) -#define RBCPR_CTL_UP_THRESHOLD_MASK GENMASK(3, 0) -#define RBCPR_CTL_UP_THRESHOLD_SHIFT 24 -#define RBCPR_CTL_DN_THRESHOLD_MASK GENMASK(3, 0) -#define RBCPR_CTL_DN_THRESHOLD_SHIFT 28 - -/* RBCPR Ack/Nack Response */ -#define REG_RBIF_CONT_ACK_CMD 0x98 -#define REG_RBIF_CONT_NACK_CMD 0x9c - -/* RBCPR Result status Register */ -#define REG_RBCPR_RESULT_0 0xa0 - -#define RBCPR_RESULT0_BUSY_SHIFT 19 -#define RBCPR_RESULT0_BUSY_MASK BIT(RBCPR_RESULT0_BUSY_SHIFT) -#define RBCPR_RESULT0_ERROR_LT0_SHIFT 18 -#define RBCPR_RESULT0_ERROR_SHIFT 6 -#define RBCPR_RESULT0_ERROR_MASK GENMASK(11, 0) -#define RBCPR_RESULT0_ERROR_STEPS_SHIFT 2 -#define RBCPR_RESULT0_ERROR_STEPS_MASK GENMASK(3, 0) -#define RBCPR_RESULT0_STEP_UP_SHIFT 1 - -/* RBCPR Interrupt Control Register */ -#define REG_RBIF_IRQ_EN(n) (0x100 + 4 * (n)) -#define REG_RBIF_IRQ_CLEAR 0x110 -#define REG_RBIF_IRQ_STATUS 0x114 - -#define CPR_INT_DONE BIT(0) -#define CPR_INT_MIN BIT(1) -#define CPR_INT_DOWN BIT(2) -#define CPR_INT_MID BIT(3) -#define CPR_INT_UP BIT(4) -#define CPR_INT_MAX BIT(5) -#define CPR_INT_CLAMP BIT(6) -#define CPR_INT_ALL (CPR_INT_DONE | CPR_INT_MIN | CPR_INT_DOWN | \ - CPR_INT_MID | CPR_INT_UP | CPR_INT_MAX | CPR_INT_CLAMP) -#define CPR_INT_DEFAULT (CPR_INT_UP | CPR_INT_DOWN) - -#define CPR_NUM_RING_OSC 8 - -/* CPR eFuse parameters */ -#define CPR_FUSE_TARGET_QUOT_BITS_MASK GENMASK(11, 0) - -#define CPR_FUSE_MIN_QUOT_DIFF 50 - -#define FUSE_REVISION_UNKNOWN (-1) - -enum voltage_change_dir { - NO_CHANGE, - DOWN, - UP, -}; - -struct cpr_fuse { - char *ring_osc; - char *init_voltage; - char *quotient; - char *quotient_offset; -}; - -struct fuse_corner_data { - int ref_uV; - int max_uV; - int min_uV; - int max_volt_scale; - int max_quot_scale; - /* fuse quot */ - int quot_offset; - int quot_scale; - int quot_adjust; - /* fuse quot_offset */ - int quot_offset_scale; - int quot_offset_adjust; -}; - -struct cpr_fuses { - int init_voltage_step; - int init_voltage_width; - struct fuse_corner_data *fuse_corner_data; -}; - -struct corner_data { - unsigned int fuse_corner; - unsigned long freq; -}; - -struct cpr_desc { - unsigned int num_fuse_corners; - int min_diff_quot; - int *step_quot; - - unsigned int timer_delay_us; - unsigned int timer_cons_up; - unsigned int timer_cons_down; - unsigned int up_threshold; - unsigned int down_threshold; - unsigned int idle_clocks; - unsigned int gcnt_us; - unsigned int vdd_apc_step_up_limit; - unsigned int vdd_apc_step_down_limit; - unsigned int clamp_timer_interval; - - struct cpr_fuses cpr_fuses; - bool reduce_to_fuse_uV; - bool reduce_to_corner_uV; -}; - -struct acc_desc { - unsigned int enable_reg; - u32 enable_mask; - - struct reg_sequence *config; - struct reg_sequence *settings; - int num_regs_per_fuse; -}; - -struct cpr_acc_desc { - const struct cpr_desc *cpr_desc; - const struct acc_desc *acc_desc; -}; - -struct fuse_corner { - int min_uV; - int max_uV; - int uV; - int quot; - int step_quot; - const struct reg_sequence *accs; - int num_accs; - unsigned long max_freq; - u8 ring_osc_idx; -}; - -struct corner { - int min_uV; - int max_uV; - int uV; - int last_uV; - int quot_adjust; - u32 save_ctl; - u32 save_irq; - unsigned long freq; - struct fuse_corner *fuse_corner; -}; - -struct cpr_drv { - unsigned int num_corners; - unsigned int ref_clk_khz; - - struct generic_pm_domain pd; - struct device *dev; - struct device *attached_cpu_dev; - struct mutex lock; - void __iomem *base; - struct corner *corner; - struct regulator *vdd_apc; - struct clk *cpu_clk; - struct regmap *tcsr; - bool loop_disabled; - u32 gcnt; - unsigned long flags; - - struct fuse_corner *fuse_corners; - struct corner *corners; - - const struct cpr_desc *desc; - const struct acc_desc *acc_desc; - const struct cpr_fuse *cpr_fuses; - - struct dentry *debugfs; -}; - -static bool cpr_is_allowed(struct cpr_drv *drv) -{ - return !drv->loop_disabled; -} - -static void cpr_write(struct cpr_drv *drv, u32 offset, u32 value) -{ - writel_relaxed(value, drv->base + offset); -} - -static u32 cpr_read(struct cpr_drv *drv, u32 offset) -{ - return readl_relaxed(drv->base + offset); -} - -static void -cpr_masked_write(struct cpr_drv *drv, u32 offset, u32 mask, u32 value) -{ - u32 val; - - val = readl_relaxed(drv->base + offset); - val &= ~mask; - val |= value & mask; - writel_relaxed(val, drv->base + offset); -} - -static void cpr_irq_clr(struct cpr_drv *drv) -{ - cpr_write(drv, REG_RBIF_IRQ_CLEAR, CPR_INT_ALL); -} - -static void cpr_irq_clr_nack(struct cpr_drv *drv) -{ - cpr_irq_clr(drv); - cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1); -} - -static void cpr_irq_clr_ack(struct cpr_drv *drv) -{ - cpr_irq_clr(drv); - cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1); -} - -static void cpr_irq_set(struct cpr_drv *drv, u32 int_bits) -{ - cpr_write(drv, REG_RBIF_IRQ_EN(0), int_bits); -} - -static void cpr_ctl_modify(struct cpr_drv *drv, u32 mask, u32 value) -{ - cpr_masked_write(drv, REG_RBCPR_CTL, mask, value); -} - -static void cpr_ctl_enable(struct cpr_drv *drv, struct corner *corner) -{ - u32 val, mask; - const struct cpr_desc *desc = drv->desc; - - /* Program Consecutive Up & Down */ - val = desc->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT; - val |= desc->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT; - mask = RBIF_TIMER_ADJ_CONS_UP_MASK | RBIF_TIMER_ADJ_CONS_DOWN_MASK; - cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST, mask, val); - cpr_masked_write(drv, REG_RBCPR_CTL, - RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN | - RBCPR_CTL_SW_AUTO_CONT_ACK_EN, - corner->save_ctl); - cpr_irq_set(drv, corner->save_irq); - - if (cpr_is_allowed(drv) && corner->max_uV > corner->min_uV) - val = RBCPR_CTL_LOOP_EN; - else - val = 0; - cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, val); -} - -static void cpr_ctl_disable(struct cpr_drv *drv) -{ - cpr_irq_set(drv, 0); - cpr_ctl_modify(drv, RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN | - RBCPR_CTL_SW_AUTO_CONT_ACK_EN, 0); - cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST, - RBIF_TIMER_ADJ_CONS_UP_MASK | - RBIF_TIMER_ADJ_CONS_DOWN_MASK, 0); - cpr_irq_clr(drv); - cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1); - cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1); - cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, 0); -} - -static bool cpr_ctl_is_enabled(struct cpr_drv *drv) -{ - u32 reg_val; - - reg_val = cpr_read(drv, REG_RBCPR_CTL); - return reg_val & RBCPR_CTL_LOOP_EN; -} - -static bool cpr_ctl_is_busy(struct cpr_drv *drv) -{ - u32 reg_val; - - reg_val = cpr_read(drv, REG_RBCPR_RESULT_0); - return reg_val & RBCPR_RESULT0_BUSY_MASK; -} - -static void cpr_corner_save(struct cpr_drv *drv, struct corner *corner) -{ - corner->save_ctl = cpr_read(drv, REG_RBCPR_CTL); - corner->save_irq = cpr_read(drv, REG_RBIF_IRQ_EN(0)); -} - -static void cpr_corner_restore(struct cpr_drv *drv, struct corner *corner) -{ - u32 gcnt, ctl, irq, ro_sel, step_quot; - struct fuse_corner *fuse = corner->fuse_corner; - const struct cpr_desc *desc = drv->desc; - int i; - - ro_sel = fuse->ring_osc_idx; - gcnt = drv->gcnt; - gcnt |= fuse->quot - corner->quot_adjust; - - /* Program the step quotient and idle clocks */ - step_quot = desc->idle_clocks << RBCPR_STEP_QUOT_IDLE_CLK_SHIFT; - step_quot |= fuse->step_quot & RBCPR_STEP_QUOT_STEPQUOT_MASK; - cpr_write(drv, REG_RBCPR_STEP_QUOT, step_quot); - - /* Clear the target quotient value and gate count of all ROs */ - for (i = 0; i < CPR_NUM_RING_OSC; i++) - cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0); - - cpr_write(drv, REG_RBCPR_GCNT_TARGET(ro_sel), gcnt); - ctl = corner->save_ctl; - cpr_write(drv, REG_RBCPR_CTL, ctl); - irq = corner->save_irq; - cpr_irq_set(drv, irq); - dev_dbg(drv->dev, "gcnt = %#08x, ctl = %#08x, irq = %#08x\n", gcnt, - ctl, irq); -} - -static void cpr_set_acc(struct regmap *tcsr, struct fuse_corner *f, - struct fuse_corner *end) -{ - if (f == end) - return; - - if (f < end) { - for (f += 1; f <= end; f++) - regmap_multi_reg_write(tcsr, f->accs, f->num_accs); - } else { - for (f -= 1; f >= end; f--) - regmap_multi_reg_write(tcsr, f->accs, f->num_accs); - } -} - -static int cpr_pre_voltage(struct cpr_drv *drv, - struct fuse_corner *fuse_corner, - enum voltage_change_dir dir) -{ - struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner; - - if (drv->tcsr && dir == DOWN) - cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner); - - return 0; -} - -static int cpr_post_voltage(struct cpr_drv *drv, - struct fuse_corner *fuse_corner, - enum voltage_change_dir dir) -{ - struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner; - - if (drv->tcsr && dir == UP) - cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner); - - return 0; -} - -static int cpr_scale_voltage(struct cpr_drv *drv, struct corner *corner, - int new_uV, enum voltage_change_dir dir) -{ - int ret; - struct fuse_corner *fuse_corner = corner->fuse_corner; - - ret = cpr_pre_voltage(drv, fuse_corner, dir); - if (ret) - return ret; - - ret = regulator_set_voltage(drv->vdd_apc, new_uV, new_uV); - if (ret) { - dev_err_ratelimited(drv->dev, "failed to set apc voltage %d\n", - new_uV); - return ret; - } - - ret = cpr_post_voltage(drv, fuse_corner, dir); - if (ret) - return ret; - - return 0; -} - -static unsigned int cpr_get_cur_perf_state(struct cpr_drv *drv) -{ - return drv->corner ? drv->corner - drv->corners + 1 : 0; -} - -static int cpr_scale(struct cpr_drv *drv, enum voltage_change_dir dir) -{ - u32 val, error_steps, reg_mask; - int last_uV, new_uV, step_uV, ret; - struct corner *corner; - const struct cpr_desc *desc = drv->desc; - - if (dir != UP && dir != DOWN) - return 0; - - step_uV = regulator_get_linear_step(drv->vdd_apc); - if (!step_uV) - return -EINVAL; - - corner = drv->corner; - - val = cpr_read(drv, REG_RBCPR_RESULT_0); - - error_steps = val >> RBCPR_RESULT0_ERROR_STEPS_SHIFT; - error_steps &= RBCPR_RESULT0_ERROR_STEPS_MASK; - last_uV = corner->last_uV; - - if (dir == UP) { - if (desc->clamp_timer_interval && - error_steps < desc->up_threshold) { - /* - * Handle the case where another measurement started - * after the interrupt was triggered due to a core - * exiting from power collapse. - */ - error_steps = max(desc->up_threshold, - desc->vdd_apc_step_up_limit); - } - - if (last_uV >= corner->max_uV) { - cpr_irq_clr_nack(drv); - - /* Maximize the UP threshold */ - reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK; - reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT; - val = reg_mask; - cpr_ctl_modify(drv, reg_mask, val); - - /* Disable UP interrupt */ - cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_UP); - - return 0; - } - - if (error_steps > desc->vdd_apc_step_up_limit) - error_steps = desc->vdd_apc_step_up_limit; - - /* Calculate new voltage */ - new_uV = last_uV + error_steps * step_uV; - new_uV = min(new_uV, corner->max_uV); - - dev_dbg(drv->dev, - "UP: -> new_uV: %d last_uV: %d perf state: %u\n", - new_uV, last_uV, cpr_get_cur_perf_state(drv)); - } else { - if (desc->clamp_timer_interval && - error_steps < desc->down_threshold) { - /* - * Handle the case where another measurement started - * after the interrupt was triggered due to a core - * exiting from power collapse. - */ - error_steps = max(desc->down_threshold, - desc->vdd_apc_step_down_limit); - } - - if (last_uV <= corner->min_uV) { - cpr_irq_clr_nack(drv); - - /* Enable auto nack down */ - reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN; - val = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN; - - cpr_ctl_modify(drv, reg_mask, val); - - /* Disable DOWN interrupt */ - cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_DOWN); - - return 0; - } - - if (error_steps > desc->vdd_apc_step_down_limit) - error_steps = desc->vdd_apc_step_down_limit; - - /* Calculate new voltage */ - new_uV = last_uV - error_steps * step_uV; - new_uV = max(new_uV, corner->min_uV); - - dev_dbg(drv->dev, - "DOWN: -> new_uV: %d last_uV: %d perf state: %u\n", - new_uV, last_uV, cpr_get_cur_perf_state(drv)); - } - - ret = cpr_scale_voltage(drv, corner, new_uV, dir); - if (ret) { - cpr_irq_clr_nack(drv); - return ret; - } - drv->corner->last_uV = new_uV; - - if (dir == UP) { - /* Disable auto nack down */ - reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN; - val = 0; - } else { - /* Restore default threshold for UP */ - reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK; - reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT; - val = desc->up_threshold; - val <<= RBCPR_CTL_UP_THRESHOLD_SHIFT; - } - - cpr_ctl_modify(drv, reg_mask, val); - - /* Re-enable default interrupts */ - cpr_irq_set(drv, CPR_INT_DEFAULT); - - /* Ack */ - cpr_irq_clr_ack(drv); - - return 0; -} - -static irqreturn_t cpr_irq_handler(int irq, void *dev) -{ - struct cpr_drv *drv = dev; - const struct cpr_desc *desc = drv->desc; - irqreturn_t ret = IRQ_HANDLED; - u32 val; - - mutex_lock(&drv->lock); - - val = cpr_read(drv, REG_RBIF_IRQ_STATUS); - if (drv->flags & FLAGS_IGNORE_1ST_IRQ_STATUS) - val = cpr_read(drv, REG_RBIF_IRQ_STATUS); - - dev_dbg(drv->dev, "IRQ_STATUS = %#02x\n", val); - - if (!cpr_ctl_is_enabled(drv)) { - dev_dbg(drv->dev, "CPR is disabled\n"); - ret = IRQ_NONE; - } else if (cpr_ctl_is_busy(drv) && !desc->clamp_timer_interval) { - dev_dbg(drv->dev, "CPR measurement is not ready\n"); - } else if (!cpr_is_allowed(drv)) { - val = cpr_read(drv, REG_RBCPR_CTL); - dev_err_ratelimited(drv->dev, - "Interrupt broken? RBCPR_CTL = %#02x\n", - val); - ret = IRQ_NONE; - } else { - /* - * Following sequence of handling is as per each IRQ's - * priority - */ - if (val & CPR_INT_UP) { - cpr_scale(drv, UP); - } else if (val & CPR_INT_DOWN) { - cpr_scale(drv, DOWN); - } else if (val & CPR_INT_MIN) { - cpr_irq_clr_nack(drv); - } else if (val & CPR_INT_MAX) { - cpr_irq_clr_nack(drv); - } else if (val & CPR_INT_MID) { - /* RBCPR_CTL_SW_AUTO_CONT_ACK_EN is enabled */ - dev_dbg(drv->dev, "IRQ occurred for Mid Flag\n"); - } else { - dev_dbg(drv->dev, - "IRQ occurred for unknown flag (%#08x)\n", val); - } - - /* Save register values for the corner */ - cpr_corner_save(drv, drv->corner); - } - - mutex_unlock(&drv->lock); - - return ret; -} - -static int cpr_enable(struct cpr_drv *drv) -{ - int ret; - - ret = regulator_enable(drv->vdd_apc); - if (ret) - return ret; - - mutex_lock(&drv->lock); - - if (cpr_is_allowed(drv) && drv->corner) { - cpr_irq_clr(drv); - cpr_corner_restore(drv, drv->corner); - cpr_ctl_enable(drv, drv->corner); - } - - mutex_unlock(&drv->lock); - - return 0; -} - -static int cpr_disable(struct cpr_drv *drv) -{ - mutex_lock(&drv->lock); - - if (cpr_is_allowed(drv)) { - cpr_ctl_disable(drv); - cpr_irq_clr(drv); - } - - mutex_unlock(&drv->lock); - - return regulator_disable(drv->vdd_apc); -} - -static int cpr_config(struct cpr_drv *drv) -{ - int i; - u32 val, gcnt; - struct corner *corner; - const struct cpr_desc *desc = drv->desc; - - /* Disable interrupt and CPR */ - cpr_write(drv, REG_RBIF_IRQ_EN(0), 0); - cpr_write(drv, REG_RBCPR_CTL, 0); - - /* Program the default HW ceiling, floor and vlevel */ - val = (RBIF_LIMIT_CEILING_DEFAULT & RBIF_LIMIT_CEILING_MASK) - << RBIF_LIMIT_CEILING_SHIFT; - val |= RBIF_LIMIT_FLOOR_DEFAULT & RBIF_LIMIT_FLOOR_MASK; - cpr_write(drv, REG_RBIF_LIMIT, val); - cpr_write(drv, REG_RBIF_SW_VLEVEL, RBIF_SW_VLEVEL_DEFAULT); - - /* - * Clear the target quotient value and gate count of all - * ring oscillators - */ - for (i = 0; i < CPR_NUM_RING_OSC; i++) - cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0); - - /* Init and save gcnt */ - gcnt = (drv->ref_clk_khz * desc->gcnt_us) / 1000; - gcnt = gcnt & RBCPR_GCNT_TARGET_GCNT_MASK; - gcnt <<= RBCPR_GCNT_TARGET_GCNT_SHIFT; - drv->gcnt = gcnt; - - /* Program the delay count for the timer */ - val = (drv->ref_clk_khz * desc->timer_delay_us) / 1000; - cpr_write(drv, REG_RBCPR_TIMER_INTERVAL, val); - dev_dbg(drv->dev, "Timer count: %#0x (for %d us)\n", val, - desc->timer_delay_us); - - /* Program Consecutive Up & Down */ - val = desc->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT; - val |= desc->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT; - val |= desc->clamp_timer_interval << RBIF_TIMER_ADJ_CLAMP_INT_SHIFT; - cpr_write(drv, REG_RBIF_TIMER_ADJUST, val); - - /* Program the control register */ - val = desc->up_threshold << RBCPR_CTL_UP_THRESHOLD_SHIFT; - val |= desc->down_threshold << RBCPR_CTL_DN_THRESHOLD_SHIFT; - val |= RBCPR_CTL_TIMER_EN | RBCPR_CTL_COUNT_MODE; - val |= RBCPR_CTL_SW_AUTO_CONT_ACK_EN; - cpr_write(drv, REG_RBCPR_CTL, val); - - for (i = 0; i < drv->num_corners; i++) { - corner = &drv->corners[i]; - corner->save_ctl = val; - corner->save_irq = CPR_INT_DEFAULT; - } - - cpr_irq_set(drv, CPR_INT_DEFAULT); - - val = cpr_read(drv, REG_RBCPR_VERSION); - if (val <= RBCPR_VER_2) - drv->flags |= FLAGS_IGNORE_1ST_IRQ_STATUS; - - return 0; -} - -static int cpr_set_performance_state(struct generic_pm_domain *domain, - unsigned int state) -{ - struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd); - struct corner *corner, *end; - enum voltage_change_dir dir; - int ret = 0, new_uV; - - mutex_lock(&drv->lock); - - dev_dbg(drv->dev, "%s: setting perf state: %u (prev state: %u)\n", - __func__, state, cpr_get_cur_perf_state(drv)); - - /* - * Determine new corner we're going to. - * Remove one since lowest performance state is 1. - */ - corner = drv->corners + state - 1; - end = &drv->corners[drv->num_corners - 1]; - if (corner > end || corner < drv->corners) { - ret = -EINVAL; - goto unlock; - } - - /* Determine direction */ - if (drv->corner > corner) - dir = DOWN; - else if (drv->corner < corner) - dir = UP; - else - dir = NO_CHANGE; - - if (cpr_is_allowed(drv)) - new_uV = corner->last_uV; - else - new_uV = corner->uV; - - if (cpr_is_allowed(drv)) - cpr_ctl_disable(drv); - - ret = cpr_scale_voltage(drv, corner, new_uV, dir); - if (ret) - goto unlock; - - if (cpr_is_allowed(drv)) { - cpr_irq_clr(drv); - if (drv->corner != corner) - cpr_corner_restore(drv, corner); - cpr_ctl_enable(drv, corner); - } - - drv->corner = corner; - -unlock: - mutex_unlock(&drv->lock); - - return ret; -} - -static int -cpr_populate_ring_osc_idx(struct cpr_drv *drv) -{ - struct fuse_corner *fuse = drv->fuse_corners; - struct fuse_corner *end = fuse + drv->desc->num_fuse_corners; - const struct cpr_fuse *fuses = drv->cpr_fuses; - u32 data; - int ret; - - for (; fuse < end; fuse++, fuses++) { - ret = nvmem_cell_read_variable_le_u32(drv->dev, fuses->ring_osc, &data); - if (ret) - return ret; - fuse->ring_osc_idx = data; - } - - return 0; -} - -static int cpr_read_fuse_uV(const struct cpr_desc *desc, - const struct fuse_corner_data *fdata, - const char *init_v_efuse, - int step_volt, - struct cpr_drv *drv) -{ - int step_size_uV, steps, uV; - u32 bits = 0; - int ret; - - ret = nvmem_cell_read_variable_le_u32(drv->dev, init_v_efuse, &bits); - if (ret) - return ret; - - steps = bits & ~BIT(desc->cpr_fuses.init_voltage_width - 1); - /* Not two's complement.. instead highest bit is sign bit */ - if (bits & BIT(desc->cpr_fuses.init_voltage_width - 1)) - steps = -steps; - - step_size_uV = desc->cpr_fuses.init_voltage_step; - - uV = fdata->ref_uV + steps * step_size_uV; - return DIV_ROUND_UP(uV, step_volt) * step_volt; -} - -static int cpr_fuse_corner_init(struct cpr_drv *drv) -{ - const struct cpr_desc *desc = drv->desc; - const struct cpr_fuse *fuses = drv->cpr_fuses; - const struct acc_desc *acc_desc = drv->acc_desc; - int i; - unsigned int step_volt; - struct fuse_corner_data *fdata; - struct fuse_corner *fuse, *end; - int uV; - const struct reg_sequence *accs; - int ret; - - accs = acc_desc->settings; - - step_volt = regulator_get_linear_step(drv->vdd_apc); - if (!step_volt) - return -EINVAL; - - /* Populate fuse_corner members */ - fuse = drv->fuse_corners; - end = &fuse[desc->num_fuse_corners - 1]; - fdata = desc->cpr_fuses.fuse_corner_data; - - for (i = 0; fuse <= end; fuse++, fuses++, i++, fdata++) { - /* - * Update SoC voltages: platforms might choose a different - * regulators than the one used to characterize the algorithms - * (ie, init_voltage_step). - */ - fdata->min_uV = roundup(fdata->min_uV, step_volt); - fdata->max_uV = roundup(fdata->max_uV, step_volt); - - /* Populate uV */ - uV = cpr_read_fuse_uV(desc, fdata, fuses->init_voltage, - step_volt, drv); - if (uV < 0) - return uV; - - fuse->min_uV = fdata->min_uV; - fuse->max_uV = fdata->max_uV; - fuse->uV = clamp(uV, fuse->min_uV, fuse->max_uV); - - if (fuse == end) { - /* - * Allow the highest fuse corner's PVS voltage to - * define the ceiling voltage for that corner in order - * to support SoC's in which variable ceiling values - * are required. - */ - end->max_uV = max(end->max_uV, end->uV); - } - - /* Populate target quotient by scaling */ - ret = nvmem_cell_read_variable_le_u32(drv->dev, fuses->quotient, &fuse->quot); - if (ret) - return ret; - - fuse->quot *= fdata->quot_scale; - fuse->quot += fdata->quot_offset; - fuse->quot += fdata->quot_adjust; - fuse->step_quot = desc->step_quot[fuse->ring_osc_idx]; - - /* Populate acc settings */ - fuse->accs = accs; - fuse->num_accs = acc_desc->num_regs_per_fuse; - accs += acc_desc->num_regs_per_fuse; - } - - /* - * Restrict all fuse corner PVS voltages based upon per corner - * ceiling and floor voltages. - */ - for (fuse = drv->fuse_corners, i = 0; fuse <= end; fuse++, i++) { - if (fuse->uV > fuse->max_uV) - fuse->uV = fuse->max_uV; - else if (fuse->uV < fuse->min_uV) - fuse->uV = fuse->min_uV; - - ret = regulator_is_supported_voltage(drv->vdd_apc, - fuse->min_uV, - fuse->min_uV); - if (!ret) { - dev_err(drv->dev, - "min uV: %d (fuse corner: %d) not supported by regulator\n", - fuse->min_uV, i); - return -EINVAL; - } - - ret = regulator_is_supported_voltage(drv->vdd_apc, - fuse->max_uV, - fuse->max_uV); - if (!ret) { - dev_err(drv->dev, - "max uV: %d (fuse corner: %d) not supported by regulator\n", - fuse->max_uV, i); - return -EINVAL; - } - - dev_dbg(drv->dev, - "fuse corner %d: [%d %d %d] RO%hhu quot %d squot %d\n", - i, fuse->min_uV, fuse->uV, fuse->max_uV, - fuse->ring_osc_idx, fuse->quot, fuse->step_quot); - } - - return 0; -} - -static int cpr_calculate_scaling(const char *quot_offset, - struct cpr_drv *drv, - const struct fuse_corner_data *fdata, - const struct corner *corner) -{ - u32 quot_diff = 0; - unsigned long freq_diff; - int scaling; - const struct fuse_corner *fuse, *prev_fuse; - int ret; - - fuse = corner->fuse_corner; - prev_fuse = fuse - 1; - - if (quot_offset) { - ret = nvmem_cell_read_variable_le_u32(drv->dev, quot_offset, "_diff); - if (ret) - return ret; - - quot_diff *= fdata->quot_offset_scale; - quot_diff += fdata->quot_offset_adjust; - } else { - quot_diff = fuse->quot - prev_fuse->quot; - } - - freq_diff = fuse->max_freq - prev_fuse->max_freq; - freq_diff /= 1000000; /* Convert to MHz */ - scaling = 1000 * quot_diff / freq_diff; - return min(scaling, fdata->max_quot_scale); -} - -static int cpr_interpolate(const struct corner *corner, int step_volt, - const struct fuse_corner_data *fdata) -{ - unsigned long f_high, f_low, f_diff; - int uV_high, uV_low, uV; - u64 temp, temp_limit; - const struct fuse_corner *fuse, *prev_fuse; - - fuse = corner->fuse_corner; - prev_fuse = fuse - 1; - - f_high = fuse->max_freq; - f_low = prev_fuse->max_freq; - uV_high = fuse->uV; - uV_low = prev_fuse->uV; - f_diff = fuse->max_freq - corner->freq; - - /* - * Don't interpolate in the wrong direction. This could happen - * if the adjusted fuse voltage overlaps with the previous fuse's - * adjusted voltage. - */ - if (f_high <= f_low || uV_high <= uV_low || f_high <= corner->freq) - return corner->uV; - - temp = f_diff * (uV_high - uV_low); - temp = div64_ul(temp, f_high - f_low); - - /* - * max_volt_scale has units of uV/MHz while freq values - * have units of Hz. Divide by 1000000 to convert to. - */ - temp_limit = f_diff * fdata->max_volt_scale; - do_div(temp_limit, 1000000); - - uV = uV_high - min(temp, temp_limit); - return roundup(uV, step_volt); -} - -static unsigned int cpr_get_fuse_corner(struct dev_pm_opp *opp) -{ - struct device_node *np; - unsigned int fuse_corner = 0; - - np = dev_pm_opp_get_of_node(opp); - if (of_property_read_u32(np, "qcom,opp-fuse-level", &fuse_corner)) - pr_err("%s: missing 'qcom,opp-fuse-level' property\n", - __func__); - - of_node_put(np); - - return fuse_corner; -} - -static unsigned long cpr_get_opp_hz_for_req(struct dev_pm_opp *ref, - struct device *cpu_dev) -{ - u64 rate = 0; - struct device_node *ref_np; - struct device_node *desc_np; - struct device_node *child_np = NULL; - struct device_node *child_req_np = NULL; - - desc_np = dev_pm_opp_of_get_opp_desc_node(cpu_dev); - if (!desc_np) - return 0; - - ref_np = dev_pm_opp_get_of_node(ref); - if (!ref_np) - goto out_ref; - - do { - of_node_put(child_req_np); - child_np = of_get_next_available_child(desc_np, child_np); - child_req_np = of_parse_phandle(child_np, "required-opps", 0); - } while (child_np && child_req_np != ref_np); - - if (child_np && child_req_np == ref_np) - of_property_read_u64(child_np, "opp-hz", &rate); - - of_node_put(child_req_np); - of_node_put(child_np); - of_node_put(ref_np); -out_ref: - of_node_put(desc_np); - - return (unsigned long) rate; -} - -static int cpr_corner_init(struct cpr_drv *drv) -{ - const struct cpr_desc *desc = drv->desc; - const struct cpr_fuse *fuses = drv->cpr_fuses; - int i, level, scaling = 0; - unsigned int fnum, fc; - const char *quot_offset; - struct fuse_corner *fuse, *prev_fuse; - struct corner *corner, *end; - struct corner_data *cdata; - const struct fuse_corner_data *fdata; - bool apply_scaling; - unsigned long freq_diff, freq_diff_mhz; - unsigned long freq; - int step_volt = regulator_get_linear_step(drv->vdd_apc); - struct dev_pm_opp *opp; - - if (!step_volt) - return -EINVAL; - - corner = drv->corners; - end = &corner[drv->num_corners - 1]; - - cdata = devm_kcalloc(drv->dev, drv->num_corners, - sizeof(struct corner_data), - GFP_KERNEL); - if (!cdata) - return -ENOMEM; - - /* - * Store maximum frequency for each fuse corner based on the frequency - * plan - */ - for (level = 1; level <= drv->num_corners; level++) { - opp = dev_pm_opp_find_level_exact(&drv->pd.dev, level); - if (IS_ERR(opp)) - return -EINVAL; - fc = cpr_get_fuse_corner(opp); - if (!fc) { - dev_pm_opp_put(opp); - return -EINVAL; - } - fnum = fc - 1; - freq = cpr_get_opp_hz_for_req(opp, drv->attached_cpu_dev); - if (!freq) { - dev_pm_opp_put(opp); - return -EINVAL; - } - cdata[level - 1].fuse_corner = fnum; - cdata[level - 1].freq = freq; - - fuse = &drv->fuse_corners[fnum]; - dev_dbg(drv->dev, "freq: %lu level: %u fuse level: %u\n", - freq, dev_pm_opp_get_level(opp) - 1, fnum); - if (freq > fuse->max_freq) - fuse->max_freq = freq; - dev_pm_opp_put(opp); - } - - /* - * Get the quotient adjustment scaling factor, according to: - * - * scaling = min(1000 * (QUOT(corner_N) - QUOT(corner_N-1)) - * / (freq(corner_N) - freq(corner_N-1)), max_factor) - * - * QUOT(corner_N): quotient read from fuse for fuse corner N - * QUOT(corner_N-1): quotient read from fuse for fuse corner (N - 1) - * freq(corner_N): max frequency in MHz supported by fuse corner N - * freq(corner_N-1): max frequency in MHz supported by fuse corner - * (N - 1) - * - * Then walk through the corners mapped to each fuse corner - * and calculate the quotient adjustment for each one using the - * following formula: - * - * quot_adjust = (freq_max - freq_corner) * scaling / 1000 - * - * freq_max: max frequency in MHz supported by the fuse corner - * freq_corner: frequency in MHz corresponding to the corner - * scaling: calculated from above equation - * - * - * + + - * | v | - * q | f c o | f c - * u | c l | c - * o | f t | f - * t | c a | c - * | c f g | c f - * | e | - * +--------------- +---------------- - * 0 1 2 3 4 5 6 0 1 2 3 4 5 6 - * corner corner - * - * c = corner - * f = fuse corner - * - */ - for (apply_scaling = false, i = 0; corner <= end; corner++, i++) { - fnum = cdata[i].fuse_corner; - fdata = &desc->cpr_fuses.fuse_corner_data[fnum]; - quot_offset = fuses[fnum].quotient_offset; - fuse = &drv->fuse_corners[fnum]; - if (fnum) - prev_fuse = &drv->fuse_corners[fnum - 1]; - else - prev_fuse = NULL; - - corner->fuse_corner = fuse; - corner->freq = cdata[i].freq; - corner->uV = fuse->uV; - - if (prev_fuse && cdata[i - 1].freq == prev_fuse->max_freq) { - scaling = cpr_calculate_scaling(quot_offset, drv, - fdata, corner); - if (scaling < 0) - return scaling; - - apply_scaling = true; - } else if (corner->freq == fuse->max_freq) { - /* This is a fuse corner; don't scale anything */ - apply_scaling = false; - } - - if (apply_scaling) { - freq_diff = fuse->max_freq - corner->freq; - freq_diff_mhz = freq_diff / 1000000; - corner->quot_adjust = scaling * freq_diff_mhz / 1000; - - corner->uV = cpr_interpolate(corner, step_volt, fdata); - } - - corner->max_uV = fuse->max_uV; - corner->min_uV = fuse->min_uV; - corner->uV = clamp(corner->uV, corner->min_uV, corner->max_uV); - corner->last_uV = corner->uV; - - /* Reduce the ceiling voltage if needed */ - if (desc->reduce_to_corner_uV && corner->uV < corner->max_uV) - corner->max_uV = corner->uV; - else if (desc->reduce_to_fuse_uV && fuse->uV < corner->max_uV) - corner->max_uV = max(corner->min_uV, fuse->uV); - - dev_dbg(drv->dev, "corner %d: [%d %d %d] quot %d\n", i, - corner->min_uV, corner->uV, corner->max_uV, - fuse->quot - corner->quot_adjust); - } - - return 0; -} - -static const struct cpr_fuse *cpr_get_fuses(struct cpr_drv *drv) -{ - const struct cpr_desc *desc = drv->desc; - struct cpr_fuse *fuses; - int i; - - fuses = devm_kcalloc(drv->dev, desc->num_fuse_corners, - sizeof(struct cpr_fuse), - GFP_KERNEL); - if (!fuses) - return ERR_PTR(-ENOMEM); - - for (i = 0; i < desc->num_fuse_corners; i++) { - char tbuf[32]; - - snprintf(tbuf, 32, "cpr_ring_osc%d", i + 1); - fuses[i].ring_osc = devm_kstrdup(drv->dev, tbuf, GFP_KERNEL); - if (!fuses[i].ring_osc) - return ERR_PTR(-ENOMEM); - - snprintf(tbuf, 32, "cpr_init_voltage%d", i + 1); - fuses[i].init_voltage = devm_kstrdup(drv->dev, tbuf, - GFP_KERNEL); - if (!fuses[i].init_voltage) - return ERR_PTR(-ENOMEM); - - snprintf(tbuf, 32, "cpr_quotient%d", i + 1); - fuses[i].quotient = devm_kstrdup(drv->dev, tbuf, GFP_KERNEL); - if (!fuses[i].quotient) - return ERR_PTR(-ENOMEM); - - snprintf(tbuf, 32, "cpr_quotient_offset%d", i + 1); - fuses[i].quotient_offset = devm_kstrdup(drv->dev, tbuf, - GFP_KERNEL); - if (!fuses[i].quotient_offset) - return ERR_PTR(-ENOMEM); - } - - return fuses; -} - -static void cpr_set_loop_allowed(struct cpr_drv *drv) -{ - drv->loop_disabled = false; -} - -static int cpr_init_parameters(struct cpr_drv *drv) -{ - const struct cpr_desc *desc = drv->desc; - struct clk *clk; - - clk = clk_get(drv->dev, "ref"); - if (IS_ERR(clk)) - return PTR_ERR(clk); - - drv->ref_clk_khz = clk_get_rate(clk) / 1000; - clk_put(clk); - - if (desc->timer_cons_up > RBIF_TIMER_ADJ_CONS_UP_MASK || - desc->timer_cons_down > RBIF_TIMER_ADJ_CONS_DOWN_MASK || - desc->up_threshold > RBCPR_CTL_UP_THRESHOLD_MASK || - desc->down_threshold > RBCPR_CTL_DN_THRESHOLD_MASK || - desc->idle_clocks > RBCPR_STEP_QUOT_IDLE_CLK_MASK || - desc->clamp_timer_interval > RBIF_TIMER_ADJ_CLAMP_INT_MASK) - return -EINVAL; - - dev_dbg(drv->dev, "up threshold = %u, down threshold = %u\n", - desc->up_threshold, desc->down_threshold); - - return 0; -} - -static int cpr_find_initial_corner(struct cpr_drv *drv) -{ - unsigned long rate; - const struct corner *end; - struct corner *iter; - unsigned int i = 0; - - if (!drv->cpu_clk) { - dev_err(drv->dev, "cannot get rate from NULL clk\n"); - return -EINVAL; - } - - end = &drv->corners[drv->num_corners - 1]; - rate = clk_get_rate(drv->cpu_clk); - - /* - * Some bootloaders set a CPU clock frequency that is not defined - * in the OPP table. When running at an unlisted frequency, - * cpufreq_online() will change to the OPP which has the lowest - * frequency, at or above the unlisted frequency. - * Since cpufreq_online() always "rounds up" in the case of an - * unlisted frequency, this function always "rounds down" in case - * of an unlisted frequency. That way, when cpufreq_online() - * triggers the first ever call to cpr_set_performance_state(), - * it will correctly determine the direction as UP. - */ - for (iter = drv->corners; iter <= end; iter++) { - if (iter->freq > rate) - break; - i++; - if (iter->freq == rate) { - drv->corner = iter; - break; - } - if (iter->freq < rate) - drv->corner = iter; - } - - if (!drv->corner) { - dev_err(drv->dev, "boot up corner not found\n"); - return -EINVAL; - } - - dev_dbg(drv->dev, "boot up perf state: %u\n", i); - - return 0; -} - -static const struct cpr_desc qcs404_cpr_desc = { - .num_fuse_corners = 3, - .min_diff_quot = CPR_FUSE_MIN_QUOT_DIFF, - .step_quot = (int []){ 25, 25, 25, }, - .timer_delay_us = 5000, - .timer_cons_up = 0, - .timer_cons_down = 2, - .up_threshold = 1, - .down_threshold = 3, - .idle_clocks = 15, - .gcnt_us = 1, - .vdd_apc_step_up_limit = 1, - .vdd_apc_step_down_limit = 1, - .cpr_fuses = { - .init_voltage_step = 8000, - .init_voltage_width = 6, - .fuse_corner_data = (struct fuse_corner_data[]){ - /* fuse corner 0 */ - { - .ref_uV = 1224000, - .max_uV = 1224000, - .min_uV = 1048000, - .max_volt_scale = 0, - .max_quot_scale = 0, - .quot_offset = 0, - .quot_scale = 1, - .quot_adjust = 0, - .quot_offset_scale = 5, - .quot_offset_adjust = 0, - }, - /* fuse corner 1 */ - { - .ref_uV = 1288000, - .max_uV = 1288000, - .min_uV = 1048000, - .max_volt_scale = 2000, - .max_quot_scale = 1400, - .quot_offset = 0, - .quot_scale = 1, - .quot_adjust = -20, - .quot_offset_scale = 5, - .quot_offset_adjust = 0, - }, - /* fuse corner 2 */ - { - .ref_uV = 1352000, - .max_uV = 1384000, - .min_uV = 1088000, - .max_volt_scale = 2000, - .max_quot_scale = 1400, - .quot_offset = 0, - .quot_scale = 1, - .quot_adjust = 0, - .quot_offset_scale = 5, - .quot_offset_adjust = 0, - }, - }, - }, -}; - -static const struct acc_desc qcs404_acc_desc = { - .settings = (struct reg_sequence[]){ - { 0xb120, 0x1041040 }, - { 0xb124, 0x41 }, - { 0xb120, 0x0 }, - { 0xb124, 0x0 }, - { 0xb120, 0x0 }, - { 0xb124, 0x0 }, - }, - .config = (struct reg_sequence[]){ - { 0xb138, 0xff }, - { 0xb130, 0x5555 }, - }, - .num_regs_per_fuse = 2, -}; - -static const struct cpr_acc_desc qcs404_cpr_acc_desc = { - .cpr_desc = &qcs404_cpr_desc, - .acc_desc = &qcs404_acc_desc, -}; - -static unsigned int cpr_get_performance_state(struct generic_pm_domain *genpd, - struct dev_pm_opp *opp) -{ - return dev_pm_opp_get_level(opp); -} - -static int cpr_power_off(struct generic_pm_domain *domain) -{ - struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd); - - return cpr_disable(drv); -} - -static int cpr_power_on(struct generic_pm_domain *domain) -{ - struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd); - - return cpr_enable(drv); -} - -static int cpr_pd_attach_dev(struct generic_pm_domain *domain, - struct device *dev) -{ - struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd); - const struct acc_desc *acc_desc = drv->acc_desc; - int ret = 0; - - mutex_lock(&drv->lock); - - dev_dbg(drv->dev, "attach callback for: %s\n", dev_name(dev)); - - /* - * This driver only supports scaling voltage for a CPU cluster - * where all CPUs in the cluster share a single regulator. - * Therefore, save the struct device pointer only for the first - * CPU device that gets attached. There is no need to do any - * additional initialization when further CPUs get attached. - */ - if (drv->attached_cpu_dev) - goto unlock; - - /* - * cpr_scale_voltage() requires the direction (if we are changing - * to a higher or lower OPP). The first time - * cpr_set_performance_state() is called, there is no previous - * performance state defined. Therefore, we call - * cpr_find_initial_corner() that gets the CPU clock frequency - * set by the bootloader, so that we can determine the direction - * the first time cpr_set_performance_state() is called. - */ - drv->cpu_clk = devm_clk_get(dev, NULL); - if (IS_ERR(drv->cpu_clk)) { - ret = PTR_ERR(drv->cpu_clk); - if (ret != -EPROBE_DEFER) - dev_err(drv->dev, "could not get cpu clk: %d\n", ret); - goto unlock; - } - drv->attached_cpu_dev = dev; - - dev_dbg(drv->dev, "using cpu clk from: %s\n", - dev_name(drv->attached_cpu_dev)); - - /* - * Everything related to (virtual) corners has to be initialized - * here, when attaching to the power domain, since we need to know - * the maximum frequency for each fuse corner, and this is only - * available after the cpufreq driver has attached to us. - * The reason for this is that we need to know the highest - * frequency associated with each fuse corner. - */ - ret = dev_pm_opp_get_opp_count(&drv->pd.dev); - if (ret < 0) { - dev_err(drv->dev, "could not get OPP count\n"); - goto unlock; - } - drv->num_corners = ret; - - if (drv->num_corners < 2) { - dev_err(drv->dev, "need at least 2 OPPs to use CPR\n"); - ret = -EINVAL; - goto unlock; - } - - drv->corners = devm_kcalloc(drv->dev, drv->num_corners, - sizeof(*drv->corners), - GFP_KERNEL); - if (!drv->corners) { - ret = -ENOMEM; - goto unlock; - } - - ret = cpr_corner_init(drv); - if (ret) - goto unlock; - - cpr_set_loop_allowed(drv); - - ret = cpr_init_parameters(drv); - if (ret) - goto unlock; - - /* Configure CPR HW but keep it disabled */ - ret = cpr_config(drv); - if (ret) - goto unlock; - - ret = cpr_find_initial_corner(drv); - if (ret) - goto unlock; - - if (acc_desc->config) - regmap_multi_reg_write(drv->tcsr, acc_desc->config, - acc_desc->num_regs_per_fuse); - - /* Enable ACC if required */ - if (acc_desc->enable_mask) - regmap_update_bits(drv->tcsr, acc_desc->enable_reg, - acc_desc->enable_mask, - acc_desc->enable_mask); - - dev_info(drv->dev, "driver initialized with %u OPPs\n", - drv->num_corners); - -unlock: - mutex_unlock(&drv->lock); - - return ret; -} - -static int cpr_debug_info_show(struct seq_file *s, void *unused) -{ - u32 gcnt, ro_sel, ctl, irq_status, reg, error_steps; - u32 step_dn, step_up, error, error_lt0, busy; - struct cpr_drv *drv = s->private; - struct fuse_corner *fuse_corner; - struct corner *corner; - - corner = drv->corner; - fuse_corner = corner->fuse_corner; - - seq_printf(s, "corner, current_volt = %d uV\n", - corner->last_uV); - - ro_sel = fuse_corner->ring_osc_idx; - gcnt = cpr_read(drv, REG_RBCPR_GCNT_TARGET(ro_sel)); - seq_printf(s, "rbcpr_gcnt_target (%u) = %#02X\n", ro_sel, gcnt); - - ctl = cpr_read(drv, REG_RBCPR_CTL); - seq_printf(s, "rbcpr_ctl = %#02X\n", ctl); - - irq_status = cpr_read(drv, REG_RBIF_IRQ_STATUS); - seq_printf(s, "rbcpr_irq_status = %#02X\n", irq_status); - - reg = cpr_read(drv, REG_RBCPR_RESULT_0); - seq_printf(s, "rbcpr_result_0 = %#02X\n", reg); - - step_dn = reg & 0x01; - step_up = (reg >> RBCPR_RESULT0_STEP_UP_SHIFT) & 0x01; - seq_printf(s, " [step_dn = %u", step_dn); - - seq_printf(s, ", step_up = %u", step_up); - - error_steps = (reg >> RBCPR_RESULT0_ERROR_STEPS_SHIFT) - & RBCPR_RESULT0_ERROR_STEPS_MASK; - seq_printf(s, ", error_steps = %u", error_steps); - - error = (reg >> RBCPR_RESULT0_ERROR_SHIFT) & RBCPR_RESULT0_ERROR_MASK; - seq_printf(s, ", error = %u", error); - - error_lt0 = (reg >> RBCPR_RESULT0_ERROR_LT0_SHIFT) & 0x01; - seq_printf(s, ", error_lt_0 = %u", error_lt0); - - busy = (reg >> RBCPR_RESULT0_BUSY_SHIFT) & 0x01; - seq_printf(s, ", busy = %u]\n", busy); - - return 0; -} -DEFINE_SHOW_ATTRIBUTE(cpr_debug_info); - -static void cpr_debugfs_init(struct cpr_drv *drv) -{ - drv->debugfs = debugfs_create_dir("qcom_cpr", NULL); - - debugfs_create_file("debug_info", 0444, drv->debugfs, - drv, &cpr_debug_info_fops); -} - -static int cpr_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct cpr_drv *drv; - int irq, ret; - const struct cpr_acc_desc *data; - struct device_node *np; - u32 cpr_rev = FUSE_REVISION_UNKNOWN; - - data = of_device_get_match_data(dev); - if (!data || !data->cpr_desc || !data->acc_desc) - return -EINVAL; - - drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL); - if (!drv) - return -ENOMEM; - drv->dev = dev; - drv->desc = data->cpr_desc; - drv->acc_desc = data->acc_desc; - - drv->fuse_corners = devm_kcalloc(dev, drv->desc->num_fuse_corners, - sizeof(*drv->fuse_corners), - GFP_KERNEL); - if (!drv->fuse_corners) - return -ENOMEM; - - np = of_parse_phandle(dev->of_node, "acc-syscon", 0); - if (!np) - return -ENODEV; - - drv->tcsr = syscon_node_to_regmap(np); - of_node_put(np); - if (IS_ERR(drv->tcsr)) - return PTR_ERR(drv->tcsr); - - drv->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(drv->base)) - return PTR_ERR(drv->base); - - irq = platform_get_irq(pdev, 0); - if (irq < 0) - return -EINVAL; - - drv->vdd_apc = devm_regulator_get(dev, "vdd-apc"); - if (IS_ERR(drv->vdd_apc)) - return PTR_ERR(drv->vdd_apc); - - /* - * Initialize fuse corners, since it simply depends - * on data in efuses. - * Everything related to (virtual) corners has to be - * initialized after attaching to the power domain, - * since it depends on the CPU's OPP table. - */ - ret = nvmem_cell_read_variable_le_u32(dev, "cpr_fuse_revision", &cpr_rev); - if (ret) - return ret; - - drv->cpr_fuses = cpr_get_fuses(drv); - if (IS_ERR(drv->cpr_fuses)) - return PTR_ERR(drv->cpr_fuses); - - ret = cpr_populate_ring_osc_idx(drv); - if (ret) - return ret; - - ret = cpr_fuse_corner_init(drv); - if (ret) - return ret; - - mutex_init(&drv->lock); - - ret = devm_request_threaded_irq(dev, irq, NULL, - cpr_irq_handler, - IRQF_ONESHOT | IRQF_TRIGGER_RISING, - "cpr", drv); - if (ret) - return ret; - - drv->pd.name = devm_kstrdup_const(dev, dev->of_node->full_name, - GFP_KERNEL); - if (!drv->pd.name) - return -EINVAL; - - drv->pd.power_off = cpr_power_off; - drv->pd.power_on = cpr_power_on; - drv->pd.set_performance_state = cpr_set_performance_state; - drv->pd.opp_to_performance_state = cpr_get_performance_state; - drv->pd.attach_dev = cpr_pd_attach_dev; - - ret = pm_genpd_init(&drv->pd, NULL, true); - if (ret) - return ret; - - ret = of_genpd_add_provider_simple(dev->of_node, &drv->pd); - if (ret) - goto err_remove_genpd; - - platform_set_drvdata(pdev, drv); - cpr_debugfs_init(drv); - - return 0; - -err_remove_genpd: - pm_genpd_remove(&drv->pd); - return ret; -} - -static int cpr_remove(struct platform_device *pdev) -{ - struct cpr_drv *drv = platform_get_drvdata(pdev); - - if (cpr_is_allowed(drv)) { - cpr_ctl_disable(drv); - cpr_irq_set(drv, 0); - } - - of_genpd_del_provider(pdev->dev.of_node); - pm_genpd_remove(&drv->pd); - - debugfs_remove_recursive(drv->debugfs); - - return 0; -} - -static const struct of_device_id cpr_match_table[] = { - { .compatible = "qcom,qcs404-cpr", .data = &qcs404_cpr_acc_desc }, - { } -}; -MODULE_DEVICE_TABLE(of, cpr_match_table); - -static struct platform_driver cpr_driver = { - .probe = cpr_probe, - .remove = cpr_remove, - .driver = { - .name = "qcom-cpr", - .of_match_table = cpr_match_table, - }, -}; -module_platform_driver(cpr_driver); - -MODULE_DESCRIPTION("Core Power Reduction (CPR) driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c deleted file mode 100644 index 63c35a32065b..000000000000 --- a/drivers/soc/qcom/rpmhpd.c +++ /dev/null @@ -1,870 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2018, The Linux Foundation. All rights reserved.*/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define domain_to_rpmhpd(domain) container_of(domain, struct rpmhpd, pd) - -#define RPMH_ARC_MAX_LEVELS 16 - -/** - * struct rpmhpd - top level RPMh power domain resource data structure - * @dev: rpmh power domain controller device - * @pd: generic_pm_domain corresponding to the power domain - * @parent: generic_pm_domain corresponding to the parent's power domain - * @peer: A peer power domain in case Active only Voting is - * supported - * @active_only: True if it represents an Active only peer - * @corner: current corner - * @active_corner: current active corner - * @enable_corner: lowest non-zero corner - * @level: An array of level (vlvl) to corner (hlvl) mappings - * derived from cmd-db - * @level_count: Number of levels supported by the power domain. max - * being 16 (0 - 15) - * @enabled: true if the power domain is enabled - * @res_name: Resource name used for cmd-db lookup - * @addr: Resource address as looped up using resource name from - * cmd-db - * @state_synced: Indicator that sync_state has been invoked for the rpmhpd resource - */ -struct rpmhpd { - struct device *dev; - struct generic_pm_domain pd; - struct generic_pm_domain *parent; - struct rpmhpd *peer; - const bool active_only; - unsigned int corner; - unsigned int active_corner; - unsigned int enable_corner; - u32 level[RPMH_ARC_MAX_LEVELS]; - size_t level_count; - bool enabled; - const char *res_name; - u32 addr; - bool state_synced; -}; - -struct rpmhpd_desc { - struct rpmhpd **rpmhpds; - size_t num_pds; -}; - -static DEFINE_MUTEX(rpmhpd_lock); - -/* RPMH powerdomains */ - -static struct rpmhpd cx_ao; -static struct rpmhpd mx; -static struct rpmhpd mx_ao; -static struct rpmhpd cx = { - .pd = { .name = "cx", }, - .peer = &cx_ao, - .res_name = "cx.lvl", -}; - -static struct rpmhpd cx_ao = { - .pd = { .name = "cx_ao", }, - .active_only = true, - .peer = &cx, - .res_name = "cx.lvl", -}; - -static struct rpmhpd cx_ao_w_mx_parent; -static struct rpmhpd cx_w_mx_parent = { - .pd = { .name = "cx", }, - .peer = &cx_ao_w_mx_parent, - .parent = &mx.pd, - .res_name = "cx.lvl", -}; - -static struct rpmhpd cx_ao_w_mx_parent = { - .pd = { .name = "cx_ao", }, - .active_only = true, - .peer = &cx_w_mx_parent, - .parent = &mx_ao.pd, - .res_name = "cx.lvl", -}; - -static struct rpmhpd ebi = { - .pd = { .name = "ebi", }, - .res_name = "ebi.lvl", -}; - -static struct rpmhpd gfx = { - .pd = { .name = "gfx", }, - .res_name = "gfx.lvl", -}; - -static struct rpmhpd lcx = { - .pd = { .name = "lcx", }, - .res_name = "lcx.lvl", -}; - -static struct rpmhpd lmx = { - .pd = { .name = "lmx", }, - .res_name = "lmx.lvl", -}; - -static struct rpmhpd mmcx_ao; -static struct rpmhpd mmcx = { - .pd = { .name = "mmcx", }, - .peer = &mmcx_ao, - .res_name = "mmcx.lvl", -}; - -static struct rpmhpd mmcx_ao = { - .pd = { .name = "mmcx_ao", }, - .active_only = true, - .peer = &mmcx, - .res_name = "mmcx.lvl", -}; - -static struct rpmhpd mmcx_ao_w_cx_parent; -static struct rpmhpd mmcx_w_cx_parent = { - .pd = { .name = "mmcx", }, - .peer = &mmcx_ao_w_cx_parent, - .parent = &cx.pd, - .res_name = "mmcx.lvl", -}; - -static struct rpmhpd mmcx_ao_w_cx_parent = { - .pd = { .name = "mmcx_ao", }, - .active_only = true, - .peer = &mmcx_w_cx_parent, - .parent = &cx_ao.pd, - .res_name = "mmcx.lvl", -}; - -static struct rpmhpd mss = { - .pd = { .name = "mss", }, - .res_name = "mss.lvl", -}; - -static struct rpmhpd mx_ao; -static struct rpmhpd mx = { - .pd = { .name = "mx", }, - .peer = &mx_ao, - .res_name = "mx.lvl", -}; - -static struct rpmhpd mx_ao = { - .pd = { .name = "mx_ao", }, - .active_only = true, - .peer = &mx, - .res_name = "mx.lvl", -}; - -static struct rpmhpd mxc_ao; -static struct rpmhpd mxc = { - .pd = { .name = "mxc", }, - .peer = &mxc_ao, - .res_name = "mxc.lvl", -}; - -static struct rpmhpd mxc_ao = { - .pd = { .name = "mxc_ao", }, - .active_only = true, - .peer = &mxc, - .res_name = "mxc.lvl", -}; - -static struct rpmhpd nsp = { - .pd = { .name = "nsp", }, - .res_name = "nsp.lvl", -}; - -static struct rpmhpd nsp0 = { - .pd = { .name = "nsp0", }, - .res_name = "nsp0.lvl", -}; - -static struct rpmhpd nsp1 = { - .pd = { .name = "nsp1", }, - .res_name = "nsp1.lvl", -}; - -static struct rpmhpd qphy = { - .pd = { .name = "qphy", }, - .res_name = "qphy.lvl", -}; - -/* SA8540P RPMH powerdomains */ -static struct rpmhpd *sa8540p_rpmhpds[] = { - [SC8280XP_CX] = &cx, - [SC8280XP_CX_AO] = &cx_ao, - [SC8280XP_EBI] = &ebi, - [SC8280XP_GFX] = &gfx, - [SC8280XP_LCX] = &lcx, - [SC8280XP_LMX] = &lmx, - [SC8280XP_MMCX] = &mmcx, - [SC8280XP_MMCX_AO] = &mmcx_ao, - [SC8280XP_MX] = &mx, - [SC8280XP_MX_AO] = &mx_ao, - [SC8280XP_NSP] = &nsp, -}; - -static const struct rpmhpd_desc sa8540p_desc = { - .rpmhpds = sa8540p_rpmhpds, - .num_pds = ARRAY_SIZE(sa8540p_rpmhpds), -}; - -/* SA8775P RPMH power domains */ -static struct rpmhpd *sa8775p_rpmhpds[] = { - [SA8775P_CX] = &cx, - [SA8775P_CX_AO] = &cx_ao, - [SA8775P_EBI] = &ebi, - [SA8775P_GFX] = &gfx, - [SA8775P_LCX] = &lcx, - [SA8775P_LMX] = &lmx, - [SA8775P_MMCX] = &mmcx, - [SA8775P_MMCX_AO] = &mmcx_ao, - [SA8775P_MXC] = &mxc, - [SA8775P_MXC_AO] = &mxc_ao, - [SA8775P_MX] = &mx, - [SA8775P_MX_AO] = &mx_ao, - [SA8775P_NSP0] = &nsp0, - [SA8775P_NSP1] = &nsp1, -}; - -static const struct rpmhpd_desc sa8775p_desc = { - .rpmhpds = sa8775p_rpmhpds, - .num_pds = ARRAY_SIZE(sa8775p_rpmhpds), -}; - -/* SDM670 RPMH powerdomains */ -static struct rpmhpd *sdm670_rpmhpds[] = { - [SDM670_CX] = &cx_w_mx_parent, - [SDM670_CX_AO] = &cx_ao_w_mx_parent, - [SDM670_GFX] = &gfx, - [SDM670_LCX] = &lcx, - [SDM670_LMX] = &lmx, - [SDM670_MSS] = &mss, - [SDM670_MX] = &mx, - [SDM670_MX_AO] = &mx_ao, -}; - -static const struct rpmhpd_desc sdm670_desc = { - .rpmhpds = sdm670_rpmhpds, - .num_pds = ARRAY_SIZE(sdm670_rpmhpds), -}; - -/* SDM845 RPMH powerdomains */ -static struct rpmhpd *sdm845_rpmhpds[] = { - [SDM845_CX] = &cx_w_mx_parent, - [SDM845_CX_AO] = &cx_ao_w_mx_parent, - [SDM845_EBI] = &ebi, - [SDM845_GFX] = &gfx, - [SDM845_LCX] = &lcx, - [SDM845_LMX] = &lmx, - [SDM845_MSS] = &mss, - [SDM845_MX] = &mx, - [SDM845_MX_AO] = &mx_ao, -}; - -static const struct rpmhpd_desc sdm845_desc = { - .rpmhpds = sdm845_rpmhpds, - .num_pds = ARRAY_SIZE(sdm845_rpmhpds), -}; - -/* SDX55 RPMH powerdomains */ -static struct rpmhpd *sdx55_rpmhpds[] = { - [SDX55_CX] = &cx_w_mx_parent, - [SDX55_MSS] = &mss, - [SDX55_MX] = &mx, -}; - -static const struct rpmhpd_desc sdx55_desc = { - .rpmhpds = sdx55_rpmhpds, - .num_pds = ARRAY_SIZE(sdx55_rpmhpds), -}; - -/* SDX65 RPMH powerdomains */ -static struct rpmhpd *sdx65_rpmhpds[] = { - [SDX65_CX] = &cx_w_mx_parent, - [SDX65_CX_AO] = &cx_ao_w_mx_parent, - [SDX65_MSS] = &mss, - [SDX65_MX] = &mx, - [SDX65_MX_AO] = &mx_ao, - [SDX65_MXC] = &mxc, -}; - -static const struct rpmhpd_desc sdx65_desc = { - .rpmhpds = sdx65_rpmhpds, - .num_pds = ARRAY_SIZE(sdx65_rpmhpds), -}; - -/* SM6350 RPMH powerdomains */ -static struct rpmhpd *sm6350_rpmhpds[] = { - [SM6350_CX] = &cx_w_mx_parent, - [SM6350_GFX] = &gfx, - [SM6350_LCX] = &lcx, - [SM6350_LMX] = &lmx, - [SM6350_MSS] = &mss, - [SM6350_MX] = &mx, -}; - -static const struct rpmhpd_desc sm6350_desc = { - .rpmhpds = sm6350_rpmhpds, - .num_pds = ARRAY_SIZE(sm6350_rpmhpds), -}; - -/* SM8150 RPMH powerdomains */ -static struct rpmhpd *sm8150_rpmhpds[] = { - [SM8150_CX] = &cx_w_mx_parent, - [SM8150_CX_AO] = &cx_ao_w_mx_parent, - [SM8150_EBI] = &ebi, - [SM8150_GFX] = &gfx, - [SM8150_LCX] = &lcx, - [SM8150_LMX] = &lmx, - [SM8150_MMCX] = &mmcx, - [SM8150_MMCX_AO] = &mmcx_ao, - [SM8150_MSS] = &mss, - [SM8150_MX] = &mx, - [SM8150_MX_AO] = &mx_ao, -}; - -static const struct rpmhpd_desc sm8150_desc = { - .rpmhpds = sm8150_rpmhpds, - .num_pds = ARRAY_SIZE(sm8150_rpmhpds), -}; - -static struct rpmhpd *sa8155p_rpmhpds[] = { - [SA8155P_CX] = &cx_w_mx_parent, - [SA8155P_CX_AO] = &cx_ao_w_mx_parent, - [SA8155P_EBI] = &ebi, - [SA8155P_GFX] = &gfx, - [SA8155P_MSS] = &mss, - [SA8155P_MX] = &mx, - [SA8155P_MX_AO] = &mx_ao, -}; - -static const struct rpmhpd_desc sa8155p_desc = { - .rpmhpds = sa8155p_rpmhpds, - .num_pds = ARRAY_SIZE(sa8155p_rpmhpds), -}; - -/* SM8250 RPMH powerdomains */ -static struct rpmhpd *sm8250_rpmhpds[] = { - [SM8250_CX] = &cx_w_mx_parent, - [SM8250_CX_AO] = &cx_ao_w_mx_parent, - [SM8250_EBI] = &ebi, - [SM8250_GFX] = &gfx, - [SM8250_LCX] = &lcx, - [SM8250_LMX] = &lmx, - [SM8250_MMCX] = &mmcx, - [SM8250_MMCX_AO] = &mmcx_ao, - [SM8250_MX] = &mx, - [SM8250_MX_AO] = &mx_ao, -}; - -static const struct rpmhpd_desc sm8250_desc = { - .rpmhpds = sm8250_rpmhpds, - .num_pds = ARRAY_SIZE(sm8250_rpmhpds), -}; - -/* SM8350 Power domains */ -static struct rpmhpd *sm8350_rpmhpds[] = { - [SM8350_CX] = &cx_w_mx_parent, - [SM8350_CX_AO] = &cx_ao_w_mx_parent, - [SM8350_EBI] = &ebi, - [SM8350_GFX] = &gfx, - [SM8350_LCX] = &lcx, - [SM8350_LMX] = &lmx, - [SM8350_MMCX] = &mmcx, - [SM8350_MMCX_AO] = &mmcx_ao, - [SM8350_MSS] = &mss, - [SM8350_MX] = &mx, - [SM8350_MX_AO] = &mx_ao, - [SM8350_MXC] = &mxc, - [SM8350_MXC_AO] = &mxc_ao, -}; - -static const struct rpmhpd_desc sm8350_desc = { - .rpmhpds = sm8350_rpmhpds, - .num_pds = ARRAY_SIZE(sm8350_rpmhpds), -}; - -/* SM8450 RPMH powerdomains */ -static struct rpmhpd *sm8450_rpmhpds[] = { - [SM8450_CX] = &cx, - [SM8450_CX_AO] = &cx_ao, - [SM8450_EBI] = &ebi, - [SM8450_GFX] = &gfx, - [SM8450_LCX] = &lcx, - [SM8450_LMX] = &lmx, - [SM8450_MMCX] = &mmcx_w_cx_parent, - [SM8450_MMCX_AO] = &mmcx_ao_w_cx_parent, - [SM8450_MSS] = &mss, - [SM8450_MX] = &mx, - [SM8450_MX_AO] = &mx_ao, - [SM8450_MXC] = &mxc, - [SM8450_MXC_AO] = &mxc_ao, -}; - -static const struct rpmhpd_desc sm8450_desc = { - .rpmhpds = sm8450_rpmhpds, - .num_pds = ARRAY_SIZE(sm8450_rpmhpds), -}; - -/* SM8550 RPMH powerdomains */ -static struct rpmhpd *sm8550_rpmhpds[] = { - [SM8550_CX] = &cx, - [SM8550_CX_AO] = &cx_ao, - [SM8550_EBI] = &ebi, - [SM8550_GFX] = &gfx, - [SM8550_LCX] = &lcx, - [SM8550_LMX] = &lmx, - [SM8550_MMCX] = &mmcx_w_cx_parent, - [SM8550_MMCX_AO] = &mmcx_ao_w_cx_parent, - [SM8550_MSS] = &mss, - [SM8550_MX] = &mx, - [SM8550_MX_AO] = &mx_ao, - [SM8550_MXC] = &mxc, - [SM8550_MXC_AO] = &mxc_ao, - [SM8550_NSP] = &nsp, -}; - -static const struct rpmhpd_desc sm8550_desc = { - .rpmhpds = sm8550_rpmhpds, - .num_pds = ARRAY_SIZE(sm8550_rpmhpds), -}; - -/* QDU1000/QRU1000 RPMH powerdomains */ -static struct rpmhpd *qdu1000_rpmhpds[] = { - [QDU1000_CX] = &cx, - [QDU1000_EBI] = &ebi, - [QDU1000_MSS] = &mss, - [QDU1000_MX] = &mx, -}; - -static const struct rpmhpd_desc qdu1000_desc = { - .rpmhpds = qdu1000_rpmhpds, - .num_pds = ARRAY_SIZE(qdu1000_rpmhpds), -}; - -/* SC7180 RPMH powerdomains */ -static struct rpmhpd *sc7180_rpmhpds[] = { - [SC7180_CX] = &cx_w_mx_parent, - [SC7180_CX_AO] = &cx_ao_w_mx_parent, - [SC7180_GFX] = &gfx, - [SC7180_LCX] = &lcx, - [SC7180_LMX] = &lmx, - [SC7180_MSS] = &mss, - [SC7180_MX] = &mx, - [SC7180_MX_AO] = &mx_ao, -}; - -static const struct rpmhpd_desc sc7180_desc = { - .rpmhpds = sc7180_rpmhpds, - .num_pds = ARRAY_SIZE(sc7180_rpmhpds), -}; - -/* SC7280 RPMH powerdomains */ -static struct rpmhpd *sc7280_rpmhpds[] = { - [SC7280_CX] = &cx, - [SC7280_CX_AO] = &cx_ao, - [SC7280_EBI] = &ebi, - [SC7280_GFX] = &gfx, - [SC7280_LCX] = &lcx, - [SC7280_LMX] = &lmx, - [SC7280_MSS] = &mss, - [SC7280_MX] = &mx, - [SC7280_MX_AO] = &mx_ao, -}; - -static const struct rpmhpd_desc sc7280_desc = { - .rpmhpds = sc7280_rpmhpds, - .num_pds = ARRAY_SIZE(sc7280_rpmhpds), -}; - -/* SC8180x RPMH powerdomains */ -static struct rpmhpd *sc8180x_rpmhpds[] = { - [SC8180X_CX] = &cx_w_mx_parent, - [SC8180X_CX_AO] = &cx_ao_w_mx_parent, - [SC8180X_EBI] = &ebi, - [SC8180X_GFX] = &gfx, - [SC8180X_LCX] = &lcx, - [SC8180X_LMX] = &lmx, - [SC8180X_MMCX] = &mmcx, - [SC8180X_MMCX_AO] = &mmcx_ao, - [SC8180X_MSS] = &mss, - [SC8180X_MX] = &mx, - [SC8180X_MX_AO] = &mx_ao, -}; - -static const struct rpmhpd_desc sc8180x_desc = { - .rpmhpds = sc8180x_rpmhpds, - .num_pds = ARRAY_SIZE(sc8180x_rpmhpds), -}; - -/* SC8280xp RPMH powerdomains */ -static struct rpmhpd *sc8280xp_rpmhpds[] = { - [SC8280XP_CX] = &cx, - [SC8280XP_CX_AO] = &cx_ao, - [SC8280XP_EBI] = &ebi, - [SC8280XP_GFX] = &gfx, - [SC8280XP_LCX] = &lcx, - [SC8280XP_LMX] = &lmx, - [SC8280XP_MMCX] = &mmcx, - [SC8280XP_MMCX_AO] = &mmcx_ao, - [SC8280XP_MX] = &mx, - [SC8280XP_MX_AO] = &mx_ao, - [SC8280XP_NSP] = &nsp, - [SC8280XP_QPHY] = &qphy, -}; - -static const struct rpmhpd_desc sc8280xp_desc = { - .rpmhpds = sc8280xp_rpmhpds, - .num_pds = ARRAY_SIZE(sc8280xp_rpmhpds), -}; - -static const struct of_device_id rpmhpd_match_table[] = { - { .compatible = "qcom,qdu1000-rpmhpd", .data = &qdu1000_desc }, - { .compatible = "qcom,sa8155p-rpmhpd", .data = &sa8155p_desc }, - { .compatible = "qcom,sa8540p-rpmhpd", .data = &sa8540p_desc }, - { .compatible = "qcom,sa8775p-rpmhpd", .data = &sa8775p_desc }, - { .compatible = "qcom,sc7180-rpmhpd", .data = &sc7180_desc }, - { .compatible = "qcom,sc7280-rpmhpd", .data = &sc7280_desc }, - { .compatible = "qcom,sc8180x-rpmhpd", .data = &sc8180x_desc }, - { .compatible = "qcom,sc8280xp-rpmhpd", .data = &sc8280xp_desc }, - { .compatible = "qcom,sdm670-rpmhpd", .data = &sdm670_desc }, - { .compatible = "qcom,sdm845-rpmhpd", .data = &sdm845_desc }, - { .compatible = "qcom,sdx55-rpmhpd", .data = &sdx55_desc}, - { .compatible = "qcom,sdx65-rpmhpd", .data = &sdx65_desc}, - { .compatible = "qcom,sm6350-rpmhpd", .data = &sm6350_desc }, - { .compatible = "qcom,sm8150-rpmhpd", .data = &sm8150_desc }, - { .compatible = "qcom,sm8250-rpmhpd", .data = &sm8250_desc }, - { .compatible = "qcom,sm8350-rpmhpd", .data = &sm8350_desc }, - { .compatible = "qcom,sm8450-rpmhpd", .data = &sm8450_desc }, - { .compatible = "qcom,sm8550-rpmhpd", .data = &sm8550_desc }, - { } -}; -MODULE_DEVICE_TABLE(of, rpmhpd_match_table); - -static int rpmhpd_send_corner(struct rpmhpd *pd, int state, - unsigned int corner, bool sync) -{ - struct tcs_cmd cmd = { - .addr = pd->addr, - .data = corner, - }; - - /* - * Wait for an ack only when we are increasing the - * perf state of the power domain - */ - if (sync) - return rpmh_write(pd->dev, state, &cmd, 1); - else - return rpmh_write_async(pd->dev, state, &cmd, 1); -} - -static void to_active_sleep(struct rpmhpd *pd, unsigned int corner, - unsigned int *active, unsigned int *sleep) -{ - *active = corner; - - if (pd->active_only) - *sleep = 0; - else - *sleep = *active; -} - -/* - * This function is used to aggregate the votes across the active only - * resources and its peers. The aggregated votes are sent to RPMh as - * ACTIVE_ONLY votes (which take effect immediately), as WAKE_ONLY votes - * (applied by RPMh on system wakeup) and as SLEEP votes (applied by RPMh - * on system sleep). - * We send ACTIVE_ONLY votes for resources without any peers. For others, - * which have an active only peer, all 3 votes are sent. - */ -static int rpmhpd_aggregate_corner(struct rpmhpd *pd, unsigned int corner) -{ - int ret; - struct rpmhpd *peer = pd->peer; - unsigned int active_corner, sleep_corner; - unsigned int this_active_corner = 0, this_sleep_corner = 0; - unsigned int peer_active_corner = 0, peer_sleep_corner = 0; - - if (pd->state_synced) { - to_active_sleep(pd, corner, &this_active_corner, &this_sleep_corner); - } else { - /* Clamp to highest corner if sync_state hasn't happened */ - this_active_corner = pd->level_count - 1; - this_sleep_corner = pd->level_count - 1; - } - - if (peer && peer->enabled) - to_active_sleep(peer, peer->corner, &peer_active_corner, - &peer_sleep_corner); - - active_corner = max(this_active_corner, peer_active_corner); - - ret = rpmhpd_send_corner(pd, RPMH_ACTIVE_ONLY_STATE, active_corner, - active_corner > pd->active_corner); - if (ret) - return ret; - - pd->active_corner = active_corner; - - if (peer) { - peer->active_corner = active_corner; - - ret = rpmhpd_send_corner(pd, RPMH_WAKE_ONLY_STATE, - active_corner, false); - if (ret) - return ret; - - sleep_corner = max(this_sleep_corner, peer_sleep_corner); - - return rpmhpd_send_corner(pd, RPMH_SLEEP_STATE, sleep_corner, - false); - } - - return ret; -} - -static int rpmhpd_power_on(struct generic_pm_domain *domain) -{ - struct rpmhpd *pd = domain_to_rpmhpd(domain); - unsigned int corner; - int ret; - - mutex_lock(&rpmhpd_lock); - - corner = max(pd->corner, pd->enable_corner); - ret = rpmhpd_aggregate_corner(pd, corner); - if (!ret) - pd->enabled = true; - - mutex_unlock(&rpmhpd_lock); - - return ret; -} - -static int rpmhpd_power_off(struct generic_pm_domain *domain) -{ - struct rpmhpd *pd = domain_to_rpmhpd(domain); - int ret; - - mutex_lock(&rpmhpd_lock); - - ret = rpmhpd_aggregate_corner(pd, 0); - if (!ret) - pd->enabled = false; - - mutex_unlock(&rpmhpd_lock); - - return ret; -} - -static int rpmhpd_set_performance_state(struct generic_pm_domain *domain, - unsigned int level) -{ - struct rpmhpd *pd = domain_to_rpmhpd(domain); - int ret = 0, i; - - mutex_lock(&rpmhpd_lock); - - for (i = 0; i < pd->level_count; i++) - if (level <= pd->level[i]) - break; - - /* - * If the level requested is more than that supported by the - * max corner, just set it to max anyway. - */ - if (i == pd->level_count) - i--; - - if (pd->enabled) { - /* Ensure that the domain isn't turn off */ - if (i < pd->enable_corner) - i = pd->enable_corner; - - ret = rpmhpd_aggregate_corner(pd, i); - if (ret) - goto out; - } - - pd->corner = i; -out: - mutex_unlock(&rpmhpd_lock); - - return ret; -} - -static unsigned int rpmhpd_get_performance_state(struct generic_pm_domain *genpd, - struct dev_pm_opp *opp) -{ - return dev_pm_opp_get_level(opp); -} - -static int rpmhpd_update_level_mapping(struct rpmhpd *rpmhpd) -{ - int i; - const u16 *buf; - - buf = cmd_db_read_aux_data(rpmhpd->res_name, &rpmhpd->level_count); - if (IS_ERR(buf)) - return PTR_ERR(buf); - - /* 2 bytes used for each command DB aux data entry */ - rpmhpd->level_count >>= 1; - - if (rpmhpd->level_count > RPMH_ARC_MAX_LEVELS) - return -EINVAL; - - for (i = 0; i < rpmhpd->level_count; i++) { - rpmhpd->level[i] = buf[i]; - - /* Remember the first corner with non-zero level */ - if (!rpmhpd->level[rpmhpd->enable_corner] && rpmhpd->level[i]) - rpmhpd->enable_corner = i; - - /* - * The AUX data may be zero padded. These 0 valued entries at - * the end of the map must be ignored. - */ - if (i > 0 && rpmhpd->level[i] == 0) { - rpmhpd->level_count = i; - break; - } - pr_debug("%s: ARC hlvl=%2d --> vlvl=%4u\n", rpmhpd->res_name, i, - rpmhpd->level[i]); - } - - return 0; -} - -static int rpmhpd_probe(struct platform_device *pdev) -{ - int i, ret; - size_t num_pds; - struct device *dev = &pdev->dev; - struct genpd_onecell_data *data; - struct rpmhpd **rpmhpds; - const struct rpmhpd_desc *desc; - - desc = of_device_get_match_data(dev); - if (!desc) - return -EINVAL; - - rpmhpds = desc->rpmhpds; - num_pds = desc->num_pds; - - data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); - if (!data) - return -ENOMEM; - - data->domains = devm_kcalloc(dev, num_pds, sizeof(*data->domains), - GFP_KERNEL); - if (!data->domains) - return -ENOMEM; - - data->num_domains = num_pds; - - for (i = 0; i < num_pds; i++) { - if (!rpmhpds[i]) - continue; - - rpmhpds[i]->dev = dev; - rpmhpds[i]->addr = cmd_db_read_addr(rpmhpds[i]->res_name); - if (!rpmhpds[i]->addr) { - dev_err(dev, "Could not find RPMh address for resource %s\n", - rpmhpds[i]->res_name); - return -ENODEV; - } - - ret = cmd_db_read_slave_id(rpmhpds[i]->res_name); - if (ret != CMD_DB_HW_ARC) { - dev_err(dev, "RPMh slave ID mismatch\n"); - return -EINVAL; - } - - ret = rpmhpd_update_level_mapping(rpmhpds[i]); - if (ret) - return ret; - - rpmhpds[i]->pd.power_off = rpmhpd_power_off; - rpmhpds[i]->pd.power_on = rpmhpd_power_on; - rpmhpds[i]->pd.set_performance_state = rpmhpd_set_performance_state; - rpmhpds[i]->pd.opp_to_performance_state = rpmhpd_get_performance_state; - pm_genpd_init(&rpmhpds[i]->pd, NULL, true); - - data->domains[i] = &rpmhpds[i]->pd; - } - - /* Add subdomains */ - for (i = 0; i < num_pds; i++) { - if (!rpmhpds[i]) - continue; - if (rpmhpds[i]->parent) - pm_genpd_add_subdomain(rpmhpds[i]->parent, - &rpmhpds[i]->pd); - } - - return of_genpd_add_provider_onecell(pdev->dev.of_node, data); -} - -static void rpmhpd_sync_state(struct device *dev) -{ - const struct rpmhpd_desc *desc = of_device_get_match_data(dev); - struct rpmhpd **rpmhpds = desc->rpmhpds; - unsigned int corner; - struct rpmhpd *pd; - unsigned int i; - int ret; - - mutex_lock(&rpmhpd_lock); - for (i = 0; i < desc->num_pds; i++) { - pd = rpmhpds[i]; - if (!pd) - continue; - - pd->state_synced = true; - if (pd->enabled) - corner = max(pd->corner, pd->enable_corner); - else - corner = 0; - - ret = rpmhpd_aggregate_corner(pd, corner); - if (ret) - dev_err(dev, "failed to sync %s\n", pd->res_name); - } - mutex_unlock(&rpmhpd_lock); -} - -static struct platform_driver rpmhpd_driver = { - .driver = { - .name = "qcom-rpmhpd", - .of_match_table = rpmhpd_match_table, - .suppress_bind_attrs = true, - .sync_state = rpmhpd_sync_state, - }, - .probe = rpmhpd_probe, -}; - -static int __init rpmhpd_init(void) -{ - return platform_driver_register(&rpmhpd_driver); -} -core_initcall(rpmhpd_init); - -MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPMh Power Domain Driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c deleted file mode 100644 index 99b017fd76b7..000000000000 --- a/drivers/soc/qcom/rpmpd.c +++ /dev/null @@ -1,992 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#define domain_to_rpmpd(domain) container_of(domain, struct rpmpd, pd) - -/* Resource types: - * RPMPD_X is X encoded as a little-endian, lower-case, ASCII string */ -#define RPMPD_SMPA 0x61706d73 -#define RPMPD_LDOA 0x616f646c -#define RPMPD_SMPB 0x62706d73 -#define RPMPD_LDOB 0x626f646c -#define RPMPD_RWCX 0x78637772 -#define RPMPD_RWMX 0x786d7772 -#define RPMPD_RWLC 0x636c7772 -#define RPMPD_RWLM 0x6d6c7772 -#define RPMPD_RWSC 0x63737772 -#define RPMPD_RWSM 0x6d737772 -#define RPMPD_RWGX 0x78677772 - -/* Operation Keys */ -#define KEY_CORNER 0x6e726f63 /* corn */ -#define KEY_ENABLE 0x6e657773 /* swen */ -#define KEY_FLOOR_CORNER 0x636676 /* vfc */ -#define KEY_FLOOR_LEVEL 0x6c6676 /* vfl */ -#define KEY_LEVEL 0x6c766c76 /* vlvl */ - -#define MAX_CORNER_RPMPD_STATE 6 - -struct rpmpd_req { - __le32 key; - __le32 nbytes; - __le32 value; -}; - -struct rpmpd { - struct generic_pm_domain pd; - struct generic_pm_domain *parent; - struct rpmpd *peer; - const bool active_only; - unsigned int corner; - bool enabled; - const int res_type; - const int res_id; - struct qcom_smd_rpm *rpm; - unsigned int max_state; - __le32 key; -}; - -struct rpmpd_desc { - struct rpmpd **rpmpds; - size_t num_pds; - unsigned int max_state; -}; - -static DEFINE_MUTEX(rpmpd_lock); - -/* CX */ -static struct rpmpd cx_rwcx0_lvl_ao; -static struct rpmpd cx_rwcx0_lvl = { - .pd = { .name = "cx", }, - .peer = &cx_rwcx0_lvl_ao, - .res_type = RPMPD_RWCX, - .res_id = 0, - .key = KEY_LEVEL, -}; - -static struct rpmpd cx_rwcx0_lvl_ao = { - .pd = { .name = "cx_ao", }, - .peer = &cx_rwcx0_lvl, - .active_only = true, - .res_type = RPMPD_RWCX, - .res_id = 0, - .key = KEY_LEVEL, -}; - -static struct rpmpd cx_s1a_corner_ao; -static struct rpmpd cx_s1a_corner = { - .pd = { .name = "cx", }, - .peer = &cx_s1a_corner_ao, - .res_type = RPMPD_SMPA, - .res_id = 1, - .key = KEY_CORNER, -}; - -static struct rpmpd cx_s1a_corner_ao = { - .pd = { .name = "cx_ao", }, - .peer = &cx_s1a_corner, - .active_only = true, - .res_type = RPMPD_SMPA, - .res_id = 1, - .key = KEY_CORNER, -}; - -static struct rpmpd cx_s2a_corner_ao; -static struct rpmpd cx_s2a_corner = { - .pd = { .name = "cx", }, - .peer = &cx_s2a_corner_ao, - .res_type = RPMPD_SMPA, - .res_id = 2, - .key = KEY_CORNER, -}; - -static struct rpmpd cx_s2a_corner_ao = { - .pd = { .name = "cx_ao", }, - .peer = &cx_s2a_corner, - .active_only = true, - .res_type = RPMPD_SMPA, - .res_id = 2, - .key = KEY_CORNER, -}; - -static struct rpmpd cx_s2a_lvl_ao; -static struct rpmpd cx_s2a_lvl = { - .pd = { .name = "cx", }, - .peer = &cx_s2a_lvl_ao, - .res_type = RPMPD_SMPA, - .res_id = 2, - .key = KEY_LEVEL, -}; - -static struct rpmpd cx_s2a_lvl_ao = { - .pd = { .name = "cx_ao", }, - .peer = &cx_s2a_lvl, - .active_only = true, - .res_type = RPMPD_SMPA, - .res_id = 2, - .key = KEY_LEVEL, -}; - -static struct rpmpd cx_s3a_lvl_ao; -static struct rpmpd cx_s3a_lvl = { - .pd = { .name = "cx", }, - .peer = &cx_s3a_lvl_ao, - .res_type = RPMPD_SMPA, - .res_id = 3, - .key = KEY_LEVEL, -}; - -static struct rpmpd cx_s3a_lvl_ao = { - .pd = { .name = "cx_ao", }, - .peer = &cx_s3a_lvl, - .active_only = true, - .res_type = RPMPD_SMPA, - .res_id = 3, - .key = KEY_LEVEL, -}; - -static struct rpmpd cx_rwcx0_vfl = { - .pd = { .name = "cx_vfl", }, - .res_type = RPMPD_RWCX, - .res_id = 0, - .key = KEY_FLOOR_LEVEL, -}; - -static struct rpmpd cx_rwsc2_vfl = { - .pd = { .name = "cx_vfl", }, - .res_type = RPMPD_RWSC, - .res_id = 2, - .key = KEY_FLOOR_LEVEL, -}; - -static struct rpmpd cx_s1a_vfc = { - .pd = { .name = "cx_vfc", }, - .res_type = RPMPD_SMPA, - .res_id = 1, - .key = KEY_FLOOR_CORNER, -}; - -static struct rpmpd cx_s2a_vfc = { - .pd = { .name = "cx_vfc", }, - .res_type = RPMPD_SMPA, - .res_id = 2, - .key = KEY_FLOOR_CORNER, -}; - -static struct rpmpd cx_s2a_vfl = { - .pd = { .name = "cx_vfl", }, - .res_type = RPMPD_SMPA, - .res_id = 2, - .key = KEY_FLOOR_LEVEL, -}; - -static struct rpmpd cx_s3a_vfl = { - .pd = { .name = "cx_vfl", }, - .res_type = RPMPD_SMPA, - .res_id = 3, - .key = KEY_FLOOR_LEVEL, -}; - -/* G(F)X */ -static struct rpmpd gfx_s2b_corner = { - .pd = { .name = "gfx", }, - .res_type = RPMPD_SMPB, - .res_id = 2, - .key = KEY_CORNER, -}; - -static struct rpmpd gfx_s2b_vfc = { - .pd = { .name = "gfx_vfc", }, - .res_type = RPMPD_SMPB, - .res_id = 2, - .key = KEY_FLOOR_CORNER, -}; - -static struct rpmpd mx_rwmx0_lvl; -static struct rpmpd gx_rwgx0_lvl_ao; -static struct rpmpd gx_rwgx0_lvl = { - .pd = { .name = "gx", }, - .peer = &gx_rwgx0_lvl_ao, - .res_type = RPMPD_RWGX, - .parent = &mx_rwmx0_lvl.pd, - .res_id = 0, - .key = KEY_LEVEL, -}; - -static struct rpmpd mx_rwmx0_lvl_ao; -static struct rpmpd gx_rwgx0_lvl_ao = { - .pd = { .name = "gx_ao", }, - .peer = &gx_rwgx0_lvl, - .parent = &mx_rwmx0_lvl_ao.pd, - .active_only = true, - .res_type = RPMPD_RWGX, - .res_id = 0, - .key = KEY_LEVEL, -}; - -/* MX */ -static struct rpmpd mx_l3a_corner_ao; -static struct rpmpd mx_l3a_corner = { - .pd = { .name = "mx", }, - .peer = &mx_l3a_corner_ao, - .res_type = RPMPD_LDOA, - .res_id = 3, - .key = KEY_CORNER, -}; - -static struct rpmpd mx_l3a_corner_ao = { - .pd = { .name = "mx_ao", }, - .peer = &mx_l3a_corner, - .active_only = true, - .res_type = RPMPD_LDOA, - .res_id = 3, - .key = KEY_CORNER, -}; - -static struct rpmpd mx_l12a_lvl_ao; -static struct rpmpd mx_l12a_lvl = { - .pd = { .name = "mx", }, - .peer = &mx_l12a_lvl_ao, - .res_type = RPMPD_LDOA, - .res_id = 12, - .key = KEY_LEVEL, -}; - -static struct rpmpd mx_l12a_lvl_ao = { - .pd = { .name = "mx_ao", }, - .peer = &mx_l12a_lvl, - .active_only = true, - .res_type = RPMPD_LDOA, - .res_id = 12, - .key = KEY_LEVEL, -}; - -static struct rpmpd mx_s2a_corner_ao; -static struct rpmpd mx_s2a_corner = { - .pd = { .name = "mx", }, - .peer = &mx_s2a_corner_ao, - .res_type = RPMPD_SMPA, - .res_id = 2, - .key = KEY_CORNER, -}; - -static struct rpmpd mx_s2a_corner_ao = { - .pd = { .name = "mx_ao", }, - .peer = &mx_s2a_corner, - .active_only = true, - .res_type = RPMPD_SMPA, - .res_id = 2, - .key = KEY_CORNER, -}; - -static struct rpmpd mx_rwmx0_lvl_ao; -static struct rpmpd mx_rwmx0_lvl = { - .pd = { .name = "mx", }, - .peer = &mx_rwmx0_lvl_ao, - .res_type = RPMPD_RWMX, - .res_id = 0, - .key = KEY_LEVEL, -}; - -static struct rpmpd mx_rwmx0_lvl_ao = { - .pd = { .name = "mx_ao", }, - .peer = &mx_rwmx0_lvl, - .active_only = true, - .res_type = RPMPD_RWMX, - .res_id = 0, - .key = KEY_LEVEL, -}; - -static struct rpmpd mx_s6a_lvl_ao; -static struct rpmpd mx_s6a_lvl = { - .pd = { .name = "mx", }, - .peer = &mx_s6a_lvl_ao, - .res_type = RPMPD_SMPA, - .res_id = 6, - .key = KEY_LEVEL, -}; - -static struct rpmpd mx_s6a_lvl_ao = { - .pd = { .name = "mx_ao", }, - .peer = &mx_s6a_lvl, - .active_only = true, - .res_type = RPMPD_SMPA, - .res_id = 6, - .key = KEY_LEVEL, -}; - -static struct rpmpd mx_s7a_lvl_ao; -static struct rpmpd mx_s7a_lvl = { - .pd = { .name = "mx", }, - .peer = &mx_s7a_lvl_ao, - .res_type = RPMPD_SMPA, - .res_id = 7, - .key = KEY_LEVEL, -}; - -static struct rpmpd mx_s7a_lvl_ao = { - .pd = { .name = "mx_ao", }, - .peer = &mx_s7a_lvl, - .active_only = true, - .res_type = RPMPD_SMPA, - .res_id = 7, - .key = KEY_LEVEL, -}; - -static struct rpmpd mx_l12a_vfl = { - .pd = { .name = "mx_vfl", }, - .res_type = RPMPD_LDOA, - .res_id = 12, - .key = KEY_FLOOR_LEVEL, -}; - -static struct rpmpd mx_rwmx0_vfl = { - .pd = { .name = "mx_vfl", }, - .res_type = RPMPD_RWMX, - .res_id = 0, - .key = KEY_FLOOR_LEVEL, -}; - -static struct rpmpd mx_rwsm6_vfl = { - .pd = { .name = "mx_vfl", }, - .res_type = RPMPD_RWSM, - .res_id = 6, - .key = KEY_FLOOR_LEVEL, -}; - -/* MD */ -static struct rpmpd md_s1a_corner_ao; -static struct rpmpd md_s1a_corner = { - .pd = { .name = "md", }, - .peer = &md_s1a_corner_ao, - .res_type = RPMPD_SMPA, - .res_id = 1, - .key = KEY_CORNER, -}; - -static struct rpmpd md_s1a_corner_ao = { - .pd = { .name = "md_ao", }, - .peer = &md_s1a_corner, - .active_only = true, - .res_type = RPMPD_SMPA, - .res_id = 1, - .key = KEY_CORNER, -}; - -static struct rpmpd md_s1a_lvl_ao; -static struct rpmpd md_s1a_lvl = { - .pd = { .name = "md", }, - .peer = &md_s1a_lvl_ao, - .res_type = RPMPD_SMPA, - .res_id = 1, - .key = KEY_LEVEL, -}; - -static struct rpmpd md_s1a_lvl_ao = { - .pd = { .name = "md_ao", }, - .peer = &md_s1a_lvl, - .active_only = true, - .res_type = RPMPD_SMPA, - .res_id = 1, - .key = KEY_LEVEL, -}; - -static struct rpmpd md_s1a_vfc = { - .pd = { .name = "md_vfc", }, - .res_type = RPMPD_SMPA, - .res_id = 1, - .key = KEY_FLOOR_CORNER, -}; - -/* LPI_CX */ -static struct rpmpd lpi_cx_rwlc0_lvl = { - .pd = { .name = "lpi_cx", }, - .res_type = RPMPD_RWLC, - .res_id = 0, - .key = KEY_LEVEL, -}; - -static struct rpmpd lpi_cx_rwlc0_vfl = { - .pd = { .name = "lpi_cx_vfl", }, - .res_type = RPMPD_RWLC, - .res_id = 0, - .key = KEY_FLOOR_LEVEL, -}; - -/* LPI_MX */ -static struct rpmpd lpi_mx_rwlm0_lvl = { - .pd = { .name = "lpi_mx", }, - .res_type = RPMPD_RWLM, - .res_id = 0, - .key = KEY_LEVEL, -}; - -static struct rpmpd lpi_mx_rwlm0_vfl = { - .pd = { .name = "lpi_mx_vfl", }, - .res_type = RPMPD_RWLM, - .res_id = 0, - .key = KEY_FLOOR_LEVEL, -}; - -/* SSC_CX */ -static struct rpmpd ssc_cx_l26a_corner = { - .pd = { .name = "ssc_cx", }, - .res_type = RPMPD_LDOA, - .res_id = 26, - .key = KEY_CORNER, -}; - -static struct rpmpd ssc_cx_rwlc0_lvl = { - .pd = { .name = "ssc_cx", }, - .res_type = RPMPD_RWLC, - .res_id = 0, - .key = KEY_LEVEL, -}; - -static struct rpmpd ssc_cx_rwsc0_lvl = { - .pd = { .name = "ssc_cx", }, - .res_type = RPMPD_RWSC, - .res_id = 0, - .key = KEY_LEVEL, -}; - -static struct rpmpd ssc_cx_l26a_vfc = { - .pd = { .name = "ssc_cx_vfc", }, - .res_type = RPMPD_LDOA, - .res_id = 26, - .key = KEY_FLOOR_CORNER, -}; - -static struct rpmpd ssc_cx_rwlc0_vfl = { - .pd = { .name = "ssc_cx_vfl", }, - .res_type = RPMPD_RWLC, - .res_id = 0, - .key = KEY_FLOOR_LEVEL, -}; - -static struct rpmpd ssc_cx_rwsc0_vfl = { - .pd = { .name = "ssc_cx_vfl", }, - .res_type = RPMPD_RWSC, - .res_id = 0, - .key = KEY_FLOOR_LEVEL, -}; - -/* SSC_MX */ -static struct rpmpd ssc_mx_rwlm0_lvl = { - .pd = { .name = "ssc_mx", }, - .res_type = RPMPD_RWLM, - .res_id = 0, - .key = KEY_LEVEL, -}; - -static struct rpmpd ssc_mx_rwsm0_lvl = { - .pd = { .name = "ssc_mx", }, - .res_type = RPMPD_RWSM, - .res_id = 0, - .key = KEY_LEVEL, -}; - -static struct rpmpd ssc_mx_rwlm0_vfl = { - .pd = { .name = "ssc_mx_vfl", }, - .res_type = RPMPD_RWLM, - .res_id = 0, - .key = KEY_FLOOR_LEVEL, -}; - -static struct rpmpd ssc_mx_rwsm0_vfl = { - .pd = { .name = "ssc_mx_vfl", }, - .res_type = RPMPD_RWSM, - .res_id = 0, - .key = KEY_FLOOR_LEVEL, -}; - -static struct rpmpd *mdm9607_rpmpds[] = { - [MDM9607_VDDCX] = &cx_s3a_lvl, - [MDM9607_VDDCX_AO] = &cx_s3a_lvl_ao, - [MDM9607_VDDCX_VFL] = &cx_s3a_vfl, - [MDM9607_VDDMX] = &mx_l12a_lvl, - [MDM9607_VDDMX_AO] = &mx_l12a_lvl_ao, - [MDM9607_VDDMX_VFL] = &mx_l12a_vfl, -}; - -static const struct rpmpd_desc mdm9607_desc = { - .rpmpds = mdm9607_rpmpds, - .num_pds = ARRAY_SIZE(mdm9607_rpmpds), - .max_state = RPM_SMD_LEVEL_TURBO, -}; - -static struct rpmpd *msm8226_rpmpds[] = { - [MSM8226_VDDCX] = &cx_s1a_corner, - [MSM8226_VDDCX_AO] = &cx_s1a_corner_ao, - [MSM8226_VDDCX_VFC] = &cx_s1a_vfc, -}; - -static const struct rpmpd_desc msm8226_desc = { - .rpmpds = msm8226_rpmpds, - .num_pds = ARRAY_SIZE(msm8226_rpmpds), - .max_state = MAX_CORNER_RPMPD_STATE, -}; - -static struct rpmpd *msm8939_rpmpds[] = { - [MSM8939_VDDMDCX] = &md_s1a_corner, - [MSM8939_VDDMDCX_AO] = &md_s1a_corner_ao, - [MSM8939_VDDMDCX_VFC] = &md_s1a_vfc, - [MSM8939_VDDCX] = &cx_s2a_corner, - [MSM8939_VDDCX_AO] = &cx_s2a_corner_ao, - [MSM8939_VDDCX_VFC] = &cx_s2a_vfc, - [MSM8939_VDDMX] = &mx_l3a_corner, - [MSM8939_VDDMX_AO] = &mx_l3a_corner_ao, -}; - -static const struct rpmpd_desc msm8939_desc = { - .rpmpds = msm8939_rpmpds, - .num_pds = ARRAY_SIZE(msm8939_rpmpds), - .max_state = MAX_CORNER_RPMPD_STATE, -}; - -static struct rpmpd *msm8916_rpmpds[] = { - [MSM8916_VDDCX] = &cx_s1a_corner, - [MSM8916_VDDCX_AO] = &cx_s1a_corner_ao, - [MSM8916_VDDCX_VFC] = &cx_s1a_vfc, - [MSM8916_VDDMX] = &mx_l3a_corner, - [MSM8916_VDDMX_AO] = &mx_l3a_corner_ao, -}; - -static const struct rpmpd_desc msm8916_desc = { - .rpmpds = msm8916_rpmpds, - .num_pds = ARRAY_SIZE(msm8916_rpmpds), - .max_state = MAX_CORNER_RPMPD_STATE, -}; - -static struct rpmpd *msm8953_rpmpds[] = { - [MSM8953_VDDMD] = &md_s1a_lvl, - [MSM8953_VDDMD_AO] = &md_s1a_lvl_ao, - [MSM8953_VDDCX] = &cx_s2a_lvl, - [MSM8953_VDDCX_AO] = &cx_s2a_lvl_ao, - [MSM8953_VDDCX_VFL] = &cx_s2a_vfl, - [MSM8953_VDDMX] = &mx_s7a_lvl, - [MSM8953_VDDMX_AO] = &mx_s7a_lvl_ao, -}; - -static const struct rpmpd_desc msm8953_desc = { - .rpmpds = msm8953_rpmpds, - .num_pds = ARRAY_SIZE(msm8953_rpmpds), - .max_state = RPM_SMD_LEVEL_TURBO, -}; - -static struct rpmpd *msm8976_rpmpds[] = { - [MSM8976_VDDCX] = &cx_s2a_lvl, - [MSM8976_VDDCX_AO] = &cx_s2a_lvl_ao, - [MSM8976_VDDCX_VFL] = &cx_rwsc2_vfl, - [MSM8976_VDDMX] = &mx_s6a_lvl, - [MSM8976_VDDMX_AO] = &mx_s6a_lvl_ao, - [MSM8976_VDDMX_VFL] = &mx_rwsm6_vfl, -}; - -static const struct rpmpd_desc msm8976_desc = { - .rpmpds = msm8976_rpmpds, - .num_pds = ARRAY_SIZE(msm8976_rpmpds), - .max_state = RPM_SMD_LEVEL_TURBO_HIGH, -}; - -static struct rpmpd *msm8994_rpmpds[] = { - [MSM8994_VDDCX] = &cx_s1a_corner, - [MSM8994_VDDCX_AO] = &cx_s1a_corner_ao, - [MSM8994_VDDCX_VFC] = &cx_s1a_vfc, - [MSM8994_VDDMX] = &mx_s2a_corner, - [MSM8994_VDDMX_AO] = &mx_s2a_corner_ao, - - /* Attention! *Some* 8994 boards with pm8004 may use SMPC here! */ - [MSM8994_VDDGFX] = &gfx_s2b_corner, - [MSM8994_VDDGFX_VFC] = &gfx_s2b_vfc, -}; - -static const struct rpmpd_desc msm8994_desc = { - .rpmpds = msm8994_rpmpds, - .num_pds = ARRAY_SIZE(msm8994_rpmpds), - .max_state = MAX_CORNER_RPMPD_STATE, -}; - -static struct rpmpd *msm8996_rpmpds[] = { - [MSM8996_VDDCX] = &cx_s1a_corner, - [MSM8996_VDDCX_AO] = &cx_s1a_corner_ao, - [MSM8996_VDDCX_VFC] = &cx_s1a_vfc, - [MSM8996_VDDMX] = &mx_s2a_corner, - [MSM8996_VDDMX_AO] = &mx_s2a_corner_ao, - [MSM8996_VDDSSCX] = &ssc_cx_l26a_corner, - [MSM8996_VDDSSCX_VFC] = &ssc_cx_l26a_vfc, -}; - -static const struct rpmpd_desc msm8996_desc = { - .rpmpds = msm8996_rpmpds, - .num_pds = ARRAY_SIZE(msm8996_rpmpds), - .max_state = MAX_CORNER_RPMPD_STATE, -}; - -static struct rpmpd *msm8998_rpmpds[] = { - [MSM8998_VDDCX] = &cx_rwcx0_lvl, - [MSM8998_VDDCX_AO] = &cx_rwcx0_lvl_ao, - [MSM8998_VDDCX_VFL] = &cx_rwcx0_vfl, - [MSM8998_VDDMX] = &mx_rwmx0_lvl, - [MSM8998_VDDMX_AO] = &mx_rwmx0_lvl_ao, - [MSM8998_VDDMX_VFL] = &mx_rwmx0_vfl, - [MSM8998_SSCCX] = &ssc_cx_rwsc0_lvl, - [MSM8998_SSCCX_VFL] = &ssc_cx_rwsc0_vfl, - [MSM8998_SSCMX] = &ssc_mx_rwsm0_lvl, - [MSM8998_SSCMX_VFL] = &ssc_mx_rwsm0_vfl, -}; - -static const struct rpmpd_desc msm8998_desc = { - .rpmpds = msm8998_rpmpds, - .num_pds = ARRAY_SIZE(msm8998_rpmpds), - .max_state = RPM_SMD_LEVEL_BINNING, -}; - -static struct rpmpd *qcs404_rpmpds[] = { - [QCS404_VDDMX] = &mx_rwmx0_lvl, - [QCS404_VDDMX_AO] = &mx_rwmx0_lvl_ao, - [QCS404_VDDMX_VFL] = &mx_rwmx0_vfl, - [QCS404_LPICX] = &lpi_cx_rwlc0_lvl, - [QCS404_LPICX_VFL] = &lpi_cx_rwlc0_vfl, - [QCS404_LPIMX] = &lpi_mx_rwlm0_lvl, - [QCS404_LPIMX_VFL] = &lpi_mx_rwlm0_vfl, -}; - -static const struct rpmpd_desc qcs404_desc = { - .rpmpds = qcs404_rpmpds, - .num_pds = ARRAY_SIZE(qcs404_rpmpds), - .max_state = RPM_SMD_LEVEL_BINNING, -}; - -static struct rpmpd *sdm660_rpmpds[] = { - [SDM660_VDDCX] = &cx_rwcx0_lvl, - [SDM660_VDDCX_AO] = &cx_rwcx0_lvl_ao, - [SDM660_VDDCX_VFL] = &cx_rwcx0_vfl, - [SDM660_VDDMX] = &mx_rwmx0_lvl, - [SDM660_VDDMX_AO] = &mx_rwmx0_lvl_ao, - [SDM660_VDDMX_VFL] = &mx_rwmx0_vfl, - [SDM660_SSCCX] = &ssc_cx_rwlc0_lvl, - [SDM660_SSCCX_VFL] = &ssc_cx_rwlc0_vfl, - [SDM660_SSCMX] = &ssc_mx_rwlm0_lvl, - [SDM660_SSCMX_VFL] = &ssc_mx_rwlm0_vfl, -}; - -static const struct rpmpd_desc sdm660_desc = { - .rpmpds = sdm660_rpmpds, - .num_pds = ARRAY_SIZE(sdm660_rpmpds), - .max_state = RPM_SMD_LEVEL_TURBO, -}; - -static struct rpmpd *sm6115_rpmpds[] = { - [SM6115_VDDCX] = &cx_rwcx0_lvl, - [SM6115_VDDCX_AO] = &cx_rwcx0_lvl_ao, - [SM6115_VDDCX_VFL] = &cx_rwcx0_vfl, - [SM6115_VDDMX] = &mx_rwmx0_lvl, - [SM6115_VDDMX_AO] = &mx_rwmx0_lvl_ao, - [SM6115_VDDMX_VFL] = &mx_rwmx0_vfl, - [SM6115_VDD_LPI_CX] = &lpi_cx_rwlc0_lvl, - [SM6115_VDD_LPI_MX] = &lpi_mx_rwlm0_lvl, -}; - -static const struct rpmpd_desc sm6115_desc = { - .rpmpds = sm6115_rpmpds, - .num_pds = ARRAY_SIZE(sm6115_rpmpds), - .max_state = RPM_SMD_LEVEL_TURBO_NO_CPR, -}; - -static struct rpmpd *sm6125_rpmpds[] = { - [SM6125_VDDCX] = &cx_rwcx0_lvl, - [SM6125_VDDCX_AO] = &cx_rwcx0_lvl_ao, - [SM6125_VDDCX_VFL] = &cx_rwcx0_vfl, - [SM6125_VDDMX] = &mx_rwmx0_lvl, - [SM6125_VDDMX_AO] = &mx_rwmx0_lvl_ao, - [SM6125_VDDMX_VFL] = &mx_rwmx0_vfl, -}; - -static const struct rpmpd_desc sm6125_desc = { - .rpmpds = sm6125_rpmpds, - .num_pds = ARRAY_SIZE(sm6125_rpmpds), - .max_state = RPM_SMD_LEVEL_BINNING, -}; - -static struct rpmpd *sm6375_rpmpds[] = { - [SM6375_VDDCX] = &cx_rwcx0_lvl, - [SM6375_VDDCX_AO] = &cx_rwcx0_lvl_ao, - [SM6375_VDDCX_VFL] = &cx_rwcx0_vfl, - [SM6375_VDDMX] = &mx_rwmx0_lvl, - [SM6375_VDDMX_AO] = &mx_rwmx0_lvl_ao, - [SM6375_VDDMX_VFL] = &mx_rwmx0_vfl, - [SM6375_VDDGX] = &gx_rwgx0_lvl, - [SM6375_VDDGX_AO] = &gx_rwgx0_lvl_ao, - [SM6375_VDD_LPI_CX] = &lpi_cx_rwlc0_lvl, - [SM6375_VDD_LPI_MX] = &lpi_mx_rwlm0_lvl, -}; - -static const struct rpmpd_desc sm6375_desc = { - .rpmpds = sm6375_rpmpds, - .num_pds = ARRAY_SIZE(sm6375_rpmpds), - .max_state = RPM_SMD_LEVEL_TURBO_NO_CPR, -}; - -static struct rpmpd *qcm2290_rpmpds[] = { - [QCM2290_VDDCX] = &cx_rwcx0_lvl, - [QCM2290_VDDCX_AO] = &cx_rwcx0_lvl_ao, - [QCM2290_VDDCX_VFL] = &cx_rwcx0_vfl, - [QCM2290_VDDMX] = &mx_rwmx0_lvl, - [QCM2290_VDDMX_AO] = &mx_rwmx0_lvl_ao, - [QCM2290_VDDMX_VFL] = &mx_rwmx0_vfl, - [QCM2290_VDD_LPI_CX] = &lpi_cx_rwlc0_lvl, - [QCM2290_VDD_LPI_MX] = &lpi_mx_rwlm0_lvl, -}; - -static const struct rpmpd_desc qcm2290_desc = { - .rpmpds = qcm2290_rpmpds, - .num_pds = ARRAY_SIZE(qcm2290_rpmpds), - .max_state = RPM_SMD_LEVEL_TURBO_NO_CPR, -}; - -static const struct of_device_id rpmpd_match_table[] = { - { .compatible = "qcom,mdm9607-rpmpd", .data = &mdm9607_desc }, - { .compatible = "qcom,msm8226-rpmpd", .data = &msm8226_desc }, - { .compatible = "qcom,msm8909-rpmpd", .data = &msm8916_desc }, - { .compatible = "qcom,msm8916-rpmpd", .data = &msm8916_desc }, - { .compatible = "qcom,msm8939-rpmpd", .data = &msm8939_desc }, - { .compatible = "qcom,msm8953-rpmpd", .data = &msm8953_desc }, - { .compatible = "qcom,msm8976-rpmpd", .data = &msm8976_desc }, - { .compatible = "qcom,msm8994-rpmpd", .data = &msm8994_desc }, - { .compatible = "qcom,msm8996-rpmpd", .data = &msm8996_desc }, - { .compatible = "qcom,msm8998-rpmpd", .data = &msm8998_desc }, - { .compatible = "qcom,qcm2290-rpmpd", .data = &qcm2290_desc }, - { .compatible = "qcom,qcs404-rpmpd", .data = &qcs404_desc }, - { .compatible = "qcom,sdm660-rpmpd", .data = &sdm660_desc }, - { .compatible = "qcom,sm6115-rpmpd", .data = &sm6115_desc }, - { .compatible = "qcom,sm6125-rpmpd", .data = &sm6125_desc }, - { .compatible = "qcom,sm6375-rpmpd", .data = &sm6375_desc }, - { } -}; -MODULE_DEVICE_TABLE(of, rpmpd_match_table); - -static int rpmpd_send_enable(struct rpmpd *pd, bool enable) -{ - struct rpmpd_req req = { - .key = KEY_ENABLE, - .nbytes = cpu_to_le32(sizeof(u32)), - .value = cpu_to_le32(enable), - }; - - return qcom_rpm_smd_write(pd->rpm, QCOM_SMD_RPM_ACTIVE_STATE, - pd->res_type, pd->res_id, &req, sizeof(req)); -} - -static int rpmpd_send_corner(struct rpmpd *pd, int state, unsigned int corner) -{ - struct rpmpd_req req = { - .key = pd->key, - .nbytes = cpu_to_le32(sizeof(u32)), - .value = cpu_to_le32(corner), - }; - - return qcom_rpm_smd_write(pd->rpm, state, pd->res_type, pd->res_id, - &req, sizeof(req)); -}; - -static void to_active_sleep(struct rpmpd *pd, unsigned int corner, - unsigned int *active, unsigned int *sleep) -{ - *active = corner; - - if (pd->active_only) - *sleep = 0; - else - *sleep = *active; -} - -static int rpmpd_aggregate_corner(struct rpmpd *pd) -{ - int ret; - struct rpmpd *peer = pd->peer; - unsigned int active_corner, sleep_corner; - unsigned int this_active_corner = 0, this_sleep_corner = 0; - unsigned int peer_active_corner = 0, peer_sleep_corner = 0; - - to_active_sleep(pd, pd->corner, &this_active_corner, &this_sleep_corner); - - if (peer && peer->enabled) - to_active_sleep(peer, peer->corner, &peer_active_corner, - &peer_sleep_corner); - - active_corner = max(this_active_corner, peer_active_corner); - - ret = rpmpd_send_corner(pd, QCOM_SMD_RPM_ACTIVE_STATE, active_corner); - if (ret) - return ret; - - sleep_corner = max(this_sleep_corner, peer_sleep_corner); - - return rpmpd_send_corner(pd, QCOM_SMD_RPM_SLEEP_STATE, sleep_corner); -} - -static int rpmpd_power_on(struct generic_pm_domain *domain) -{ - int ret; - struct rpmpd *pd = domain_to_rpmpd(domain); - - mutex_lock(&rpmpd_lock); - - ret = rpmpd_send_enable(pd, true); - if (ret) - goto out; - - pd->enabled = true; - - if (pd->corner) - ret = rpmpd_aggregate_corner(pd); - -out: - mutex_unlock(&rpmpd_lock); - - return ret; -} - -static int rpmpd_power_off(struct generic_pm_domain *domain) -{ - int ret; - struct rpmpd *pd = domain_to_rpmpd(domain); - - mutex_lock(&rpmpd_lock); - - ret = rpmpd_send_enable(pd, false); - if (!ret) - pd->enabled = false; - - mutex_unlock(&rpmpd_lock); - - return ret; -} - -static int rpmpd_set_performance(struct generic_pm_domain *domain, - unsigned int state) -{ - int ret = 0; - struct rpmpd *pd = domain_to_rpmpd(domain); - - if (state > pd->max_state) - state = pd->max_state; - - mutex_lock(&rpmpd_lock); - - pd->corner = state; - - /* Always send updates for vfc and vfl */ - if (!pd->enabled && pd->key != cpu_to_le32(KEY_FLOOR_CORNER) && - pd->key != cpu_to_le32(KEY_FLOOR_LEVEL)) - goto out; - - ret = rpmpd_aggregate_corner(pd); - -out: - mutex_unlock(&rpmpd_lock); - - return ret; -} - -static unsigned int rpmpd_get_performance(struct generic_pm_domain *genpd, - struct dev_pm_opp *opp) -{ - return dev_pm_opp_get_level(opp); -} - -static int rpmpd_probe(struct platform_device *pdev) -{ - int i; - size_t num; - struct genpd_onecell_data *data; - struct qcom_smd_rpm *rpm; - struct rpmpd **rpmpds; - const struct rpmpd_desc *desc; - - rpm = dev_get_drvdata(pdev->dev.parent); - if (!rpm) { - dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n"); - return -ENODEV; - } - - desc = of_device_get_match_data(&pdev->dev); - if (!desc) - return -EINVAL; - - rpmpds = desc->rpmpds; - num = desc->num_pds; - - data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); - if (!data) - return -ENOMEM; - - data->domains = devm_kcalloc(&pdev->dev, num, sizeof(*data->domains), - GFP_KERNEL); - if (!data->domains) - return -ENOMEM; - - data->num_domains = num; - - for (i = 0; i < num; i++) { - if (!rpmpds[i]) { - dev_warn(&pdev->dev, "rpmpds[] with empty entry at index=%d\n", - i); - continue; - } - - rpmpds[i]->rpm = rpm; - rpmpds[i]->max_state = desc->max_state; - rpmpds[i]->pd.power_off = rpmpd_power_off; - rpmpds[i]->pd.power_on = rpmpd_power_on; - rpmpds[i]->pd.set_performance_state = rpmpd_set_performance; - rpmpds[i]->pd.opp_to_performance_state = rpmpd_get_performance; - pm_genpd_init(&rpmpds[i]->pd, NULL, true); - - data->domains[i] = &rpmpds[i]->pd; - } - - /* Add subdomains */ - for (i = 0; i < num; i++) { - if (!rpmpds[i]) - continue; - - if (rpmpds[i]->parent) - pm_genpd_add_subdomain(rpmpds[i]->parent, &rpmpds[i]->pd); - } - - return of_genpd_add_provider_onecell(pdev->dev.of_node, data); -} - -static struct platform_driver rpmpd_driver = { - .driver = { - .name = "qcom-rpmpd", - .of_match_table = rpmpd_match_table, - .suppress_bind_attrs = true, - }, - .probe = rpmpd_probe, -}; - -static int __init rpmpd_init(void) -{ - return platform_driver_register(&rpmpd_driver); -} -core_initcall(rpmpd_init); - -MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPM Power Domain Driver"); -MODULE_LICENSE("GPL v2"); -- cgit v1.2.3-70-g09d2