Skip to content

Commit a28b2bf

Browse files
ionela-voinescurafaeljw
authored andcommitted
cppc_cpufreq: replace per-cpu data array with a list
The cppc_cpudata per-cpu storage was inefficient (1) additional to causing functional issues (2) when CPUs are hotplugged out, due to per-cpu data being improperly initialised. (1) The amount of information needed for CPPC performance control in its cpufreq driver depends on the domain (PSD) coordination type: ANY: One set of CPPC control and capability data (e.g desired performance, highest/lowest performance, etc) applies to all CPUs in the domain. ALL: Same as ANY. To be noted that this type is not currently supported. When supported, information about which CPUs belong to a domain is needed in order for frequency change requests to be sent to each of them. HW: It's necessary to store CPPC control and capability information for all the CPUs. HW will then coordinate the performance state based on their limitations and requests. NONE: Same as HW. No HW coordination is expected. Despite this, the previous initialisation code would indiscriminately allocate memory for all CPUs (all_cpu_data) and unnecessarily duplicate performance capabilities and the domain sharing mask and type for each possible CPU. (2) With the current per-cpu structure, when having ANY coordination, the cppc_cpudata cpu information is not initialised (will remain 0) for all CPUs in a policy, other than policy->cpu. When policy->cpu is hotplugged out, the driver will incorrectly use the uninitialised (0) value of the other CPUs when making frequency changes. Additionally, the previous values stored in the perf_ctrls.desired_perf will be lost when policy->cpu changes. Therefore replace the array of per cpu data with a list. The memory for each structure is allocated at policy init, where a single structure can be allocated per policy, not per cpu. In order to accommodate the struct list_head node in the cppc_cpudata structure, the now unused cpu and cur_policy variables are removed. For example, on a arm64 Juno platform with 6 CPUs: (0, 1, 2, 3) in PSD1, (4, 5) in PSD2 - ANY coordination, the memory allocation comparison shows: Before patch: - ANY coordination: total slack req alloc/free caller 0 0 0 0/1 _kernel_size_le_hi32+0x0xffff800008ff7810 0 0 0 0/6 _kernel_size_le_hi32+0x0xffff800008ff7808 128 80 48 1/0 _kernel_size_le_hi32+0x0xffff800008ffc070 768 0 768 6/0 _kernel_size_le_hi32+0x0xffff800008ffc0e4 After patch: - ANY coordination: total slack req alloc/free caller 256 0 256 2/0 _kernel_size_le_hi32+0x0xffff800008fed410 0 0 0 0/2 _kernel_size_le_hi32+0x0xffff800008fed274 Additional notes: - A pointer to the policy's cppc_cpudata is stored in policy->driver_data - Driver registration is skipped if _CPC entries are not present. Signed-off-by: Ionela Voinescu <[email protected]> Tested-by: Mian Yousaf Kaukab <[email protected]> Signed-off-by: Rafael J. Wysocki <[email protected]>
1 parent cfdc589 commit a28b2bf

File tree

3 files changed

+154
-167
lines changed

3 files changed

+154
-167
lines changed

drivers/acpi/cppc_acpi.c

Lines changed: 60 additions & 81 deletions
Original file line numberDiff line numberDiff line change
@@ -413,109 +413,88 @@ static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
413413
return result;
414414
}
415415

416+
bool acpi_cpc_valid(void)
417+
{
418+
struct cpc_desc *cpc_ptr;
419+
int cpu;
420+
421+
for_each_possible_cpu(cpu) {
422+
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
423+
if (!cpc_ptr)
424+
return false;
425+
}
426+
427+
return true;
428+
}
429+
EXPORT_SYMBOL_GPL(acpi_cpc_valid);
430+
416431
/**
417-
* acpi_get_psd_map - Map the CPUs in a common freq domain.
418-
* @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info.
432+
* acpi_get_psd_map - Map the CPUs in the freq domain of a given cpu
433+
* @cpu: Find all CPUs that share a domain with cpu.
434+
* @cpu_data: Pointer to CPU specific CPPC data including PSD info.
419435
*
420436
* Return: 0 for success or negative value for err.
421437
*/
422-
int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data)
438+
int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data)
423439
{
424-
int count_target;
425-
int retval = 0;
426-
unsigned int i, j;
427-
cpumask_var_t covered_cpus;
428-
struct cppc_cpudata *pr, *match_pr;
429-
struct acpi_psd_package *pdomain;
430-
struct acpi_psd_package *match_pdomain;
431440
struct cpc_desc *cpc_ptr, *match_cpc_ptr;
432-
433-
if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
434-
return -ENOMEM;
441+
struct acpi_psd_package *match_pdomain;
442+
struct acpi_psd_package *pdomain;
443+
int count_target, i;
435444

436445
/*
437446
* Now that we have _PSD data from all CPUs, let's setup P-state
438447
* domain info.
439448
*/
440-
for_each_possible_cpu(i) {
441-
if (cpumask_test_cpu(i, covered_cpus))
442-
continue;
443-
444-
pr = all_cpu_data[i];
445-
cpc_ptr = per_cpu(cpc_desc_ptr, i);
446-
if (!cpc_ptr) {
447-
retval = -EFAULT;
448-
goto err_ret;
449-
}
449+
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
450+
if (!cpc_ptr)
451+
return -EFAULT;
450452

451-
pdomain = &(cpc_ptr->domain_info);
452-
cpumask_set_cpu(i, pr->shared_cpu_map);
453-
cpumask_set_cpu(i, covered_cpus);
454-
if (pdomain->num_processors <= 1)
455-
continue;
453+
pdomain = &(cpc_ptr->domain_info);
454+
cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
455+
if (pdomain->num_processors <= 1)
456+
return 0;
456457

457-
/* Validate the Domain info */
458-
count_target = pdomain->num_processors;
459-
if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
460-
pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
461-
else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
462-
pr->shared_type = CPUFREQ_SHARED_TYPE_HW;
463-
else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
464-
pr->shared_type = CPUFREQ_SHARED_TYPE_ANY;
465-
466-
for_each_possible_cpu(j) {
467-
if (i == j)
468-
continue;
469-
470-
match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
471-
if (!match_cpc_ptr) {
472-
retval = -EFAULT;
473-
goto err_ret;
474-
}
458+
/* Validate the Domain info */
459+
count_target = pdomain->num_processors;
460+
if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
461+
cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ALL;
462+
else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
463+
cpu_data->shared_type = CPUFREQ_SHARED_TYPE_HW;
464+
else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
465+
cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ANY;
475466

476-
match_pdomain = &(match_cpc_ptr->domain_info);
477-
if (match_pdomain->domain != pdomain->domain)
478-
continue;
467+
for_each_possible_cpu(i) {
468+
if (i == cpu)
469+
continue;
479470

480-
/* Here i and j are in the same domain */
481-
if (match_pdomain->num_processors != count_target) {
482-
retval = -EFAULT;
483-
goto err_ret;
484-
}
471+
match_cpc_ptr = per_cpu(cpc_desc_ptr, i);
472+
if (!match_cpc_ptr)
473+
goto err_fault;
485474

486-
if (pdomain->coord_type != match_pdomain->coord_type) {
487-
retval = -EFAULT;
488-
goto err_ret;
489-
}
475+
match_pdomain = &(match_cpc_ptr->domain_info);
476+
if (match_pdomain->domain != pdomain->domain)
477+
continue;
490478

491-
cpumask_set_cpu(j, covered_cpus);
492-
cpumask_set_cpu(j, pr->shared_cpu_map);
493-
}
479+
/* Here i and cpu are in the same domain */
480+
if (match_pdomain->num_processors != count_target)
481+
goto err_fault;
494482

495-
for_each_cpu(j, pr->shared_cpu_map) {
496-
if (i == j)
497-
continue;
483+
if (pdomain->coord_type != match_pdomain->coord_type)
484+
goto err_fault;
498485

499-
match_pr = all_cpu_data[j];
500-
match_pr->shared_type = pr->shared_type;
501-
cpumask_copy(match_pr->shared_cpu_map,
502-
pr->shared_cpu_map);
503-
}
486+
cpumask_set_cpu(i, cpu_data->shared_cpu_map);
504487
}
505-
goto out;
506488

507-
err_ret:
508-
for_each_possible_cpu(i) {
509-
pr = all_cpu_data[i];
489+
return 0;
510490

511-
/* Assume no coordination on any error parsing domain info */
512-
cpumask_clear(pr->shared_cpu_map);
513-
cpumask_set_cpu(i, pr->shared_cpu_map);
514-
pr->shared_type = CPUFREQ_SHARED_TYPE_NONE;
515-
}
516-
out:
517-
free_cpumask_var(covered_cpus);
518-
return retval;
491+
err_fault:
492+
/* Assume no coordination on any error parsing domain info */
493+
cpumask_clear(cpu_data->shared_cpu_map);
494+
cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
495+
cpu_data->shared_type = CPUFREQ_SHARED_TYPE_NONE;
496+
497+
return -EFAULT;
519498
}
520499
EXPORT_SYMBOL_GPL(acpi_get_psd_map);
521500

0 commit comments

Comments
 (0)