cpc_desc
struct cpc_desc *cpc_desc;
cpc_desc = per_cpu(cpc_desc_ptr, cpu);
if (!cpc_desc) {
raw_spin_lock_irqsave(&cpc_desc->rmw_lock, flags);
raw_spin_unlock_irqrestore(&cpc_desc->rmw_lock, flags);
raw_spin_unlock_irqrestore(&cpc_desc->rmw_lock, flags);
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
if (!cpc_desc) {
reg = &cpc_desc->cpc_regs[reg_idx];
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
if (!cpc_desc) {
reg = &cpc_desc->cpc_regs[reg_idx];
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
if (!cpc_desc) {
highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ];
nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ];
guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF];
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
return CPC_IN_PCC(&cpc_desc->cpc_regs[DELIVERED_CTR]) ||
CPC_IN_PCC(&cpc_desc->cpc_regs[REFERENCE_CTR]) ||
CPC_IN_PCC(&cpc_desc->cpc_regs[CTR_WRAP_TIME]) ||
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
if (!cpc_desc) {
delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
if (!cpc_desc) {
auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
epp_set_reg = &cpc_desc->cpc_regs[ENERGY_PERF];
#define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); \
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
if (!cpc_desc) {
desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
min_perf_reg = &cpc_desc->cpc_regs[MIN_PERF];
max_perf_reg = &cpc_desc->cpc_regs[MAX_PERF];
cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt;
cpc_desc->write_cmd_status = 0;
cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt);
ret = cpc_desc->write_cmd_status;
struct cpc_desc *cpc_desc;
cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
if (!cpc_desc)
desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
struct cpc_desc *cpc_ptr;
struct cpc_desc *cpc_ptr;
struct cpc_desc *cpc_ptr, *match_cpc_ptr;
struct cpc_desc *cpc_ptr;
cpc_ptr = kzalloc_obj(struct cpc_desc);
struct cpc_desc *cpc_ptr;
static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);