]> nv-tegra.nvidia Code Review - linux-2.6.git/blob - drivers/acpi/processor_perflib.c
merge linus into release branch
[linux-2.6.git] / drivers / acpi / processor_perflib.c
1 /*
2  * processor_perflib.c - ACPI Processor P-States Library ($Revision: 71 $)
3  *
4  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6  *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
7  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8  *                      - Added processor hotplug support
9  *
10  *
11  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12  *
13  *  This program is free software; you can redistribute it and/or modify
14  *  it under the terms of the GNU General Public License as published by
15  *  the Free Software Foundation; either version 2 of the License, or (at
16  *  your option) any later version.
17  *
18  *  This program is distributed in the hope that it will be useful, but
19  *  WITHOUT ANY WARRANTY; without even the implied warranty of
20  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21  *  General Public License for more details.
22  *
23  *  You should have received a copy of the GNU General Public License along
24  *  with this program; if not, write to the Free Software Foundation, Inc.,
25  *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
26  *
27  */
28
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/init.h>
32 #include <linux/cpufreq.h>
33
34 #ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/mutex.h>
38
39 #include <asm/uaccess.h>
40 #endif
41
42 #include <acpi/acpi_bus.h>
43 #include <acpi/processor.h>
44
45 #define ACPI_PROCESSOR_COMPONENT        0x01000000
46 #define ACPI_PROCESSOR_CLASS            "processor"
47 #define ACPI_PROCESSOR_DRIVER_NAME      "ACPI Processor Driver"
48 #define ACPI_PROCESSOR_FILE_PERFORMANCE "performance"
49 #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
50 ACPI_MODULE_NAME("acpi_processor")
51
52 static DEFINE_MUTEX(performance_mutex);
53
54 /*
55  * _PPC support is implemented as a CPUfreq policy notifier:
56  * This means each time a CPUfreq driver registered also with
57  * the ACPI core is asked to change the speed policy, the maximum
58  * value is adjusted so that it is within the platform limit.
59  *
60  * Also, when a new platform limit value is detected, the CPUfreq
61  * policy is adjusted accordingly.
62  */
63
64 #define PPC_REGISTERED   1
65 #define PPC_IN_USE       2
66
67 static int acpi_processor_ppc_status = 0;
68
69 static int acpi_processor_ppc_notifier(struct notifier_block *nb,
70                                        unsigned long event, void *data)
71 {
72         struct cpufreq_policy *policy = data;
73         struct acpi_processor *pr;
74         unsigned int ppc = 0;
75
76         mutex_lock(&performance_mutex);
77
78         if (event != CPUFREQ_INCOMPATIBLE)
79                 goto out;
80
81         pr = processors[policy->cpu];
82         if (!pr || !pr->performance)
83                 goto out;
84
85         ppc = (unsigned int)pr->performance_platform_limit;
86         if (!ppc)
87                 goto out;
88
89         if (ppc > pr->performance->state_count)
90                 goto out;
91
92         cpufreq_verify_within_limits(policy, 0,
93                                      pr->performance->states[ppc].
94                                      core_frequency * 1000);
95
96       out:
97         mutex_unlock(&performance_mutex);
98
99         return 0;
100 }
101
102 static struct notifier_block acpi_ppc_notifier_block = {
103         .notifier_call = acpi_processor_ppc_notifier,
104 };
105
106 static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
107 {
108         acpi_status status = 0;
109         unsigned long ppc = 0;
110
111         ACPI_FUNCTION_TRACE("acpi_processor_get_platform_limit");
112
113         if (!pr)
114                 return_VALUE(-EINVAL);
115
116         /*
117          * _PPC indicates the maximum state currently supported by the platform
118          * (e.g. 0 = states 0..n; 1 = states 1..n; etc.
119          */
120         status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc);
121
122         if (status != AE_NOT_FOUND)
123                 acpi_processor_ppc_status |= PPC_IN_USE;
124
125         if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
126                 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _PPC\n"));
127                 return_VALUE(-ENODEV);
128         }
129
130         pr->performance_platform_limit = (int)ppc;
131
132         return_VALUE(0);
133 }
134
135 int acpi_processor_ppc_has_changed(struct acpi_processor *pr)
136 {
137         int ret = acpi_processor_get_platform_limit(pr);
138         if (ret < 0)
139                 return (ret);
140         else
141                 return cpufreq_update_policy(pr->id);
142 }
143
144 void acpi_processor_ppc_init(void)
145 {
146         if (!cpufreq_register_notifier
147             (&acpi_ppc_notifier_block, CPUFREQ_POLICY_NOTIFIER))
148                 acpi_processor_ppc_status |= PPC_REGISTERED;
149         else
150                 printk(KERN_DEBUG
151                        "Warning: Processor Platform Limit not supported.\n");
152 }
153
154 void acpi_processor_ppc_exit(void)
155 {
156         if (acpi_processor_ppc_status & PPC_REGISTERED)
157                 cpufreq_unregister_notifier(&acpi_ppc_notifier_block,
158                                             CPUFREQ_POLICY_NOTIFIER);
159
160         acpi_processor_ppc_status &= ~PPC_REGISTERED;
161 }
162
163 static int acpi_processor_get_performance_control(struct acpi_processor *pr)
164 {
165         int result = 0;
166         acpi_status status = 0;
167         struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
168         union acpi_object *pct = NULL;
169         union acpi_object obj = { 0 };
170
171         ACPI_FUNCTION_TRACE("acpi_processor_get_performance_control");
172
173         status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer);
174         if (ACPI_FAILURE(status)) {
175                 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _PCT\n"));
176                 return_VALUE(-ENODEV);
177         }
178
179         pct = (union acpi_object *)buffer.pointer;
180         if (!pct || (pct->type != ACPI_TYPE_PACKAGE)
181             || (pct->package.count != 2)) {
182                 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PCT data\n"));
183                 result = -EFAULT;
184                 goto end;
185         }
186
187         /*
188          * control_register
189          */
190
191         obj = pct->package.elements[0];
192
193         if ((obj.type != ACPI_TYPE_BUFFER)
194             || (obj.buffer.length < sizeof(struct acpi_pct_register))
195             || (obj.buffer.pointer == NULL)) {
196                 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
197                                   "Invalid _PCT data (control_register)\n"));
198                 result = -EFAULT;
199                 goto end;
200         }
201         memcpy(&pr->performance->control_register, obj.buffer.pointer,
202                sizeof(struct acpi_pct_register));
203
204         /*
205          * status_register
206          */
207
208         obj = pct->package.elements[1];
209
210         if ((obj.type != ACPI_TYPE_BUFFER)
211             || (obj.buffer.length < sizeof(struct acpi_pct_register))
212             || (obj.buffer.pointer == NULL)) {
213                 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
214                                   "Invalid _PCT data (status_register)\n"));
215                 result = -EFAULT;
216                 goto end;
217         }
218
219         memcpy(&pr->performance->status_register, obj.buffer.pointer,
220                sizeof(struct acpi_pct_register));
221
222       end:
223         acpi_os_free(buffer.pointer);
224
225         return_VALUE(result);
226 }
227
228 static int acpi_processor_get_performance_states(struct acpi_processor *pr)
229 {
230         int result = 0;
231         acpi_status status = AE_OK;
232         struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
233         struct acpi_buffer format = { sizeof("NNNNNN"), "NNNNNN" };
234         struct acpi_buffer state = { 0, NULL };
235         union acpi_object *pss = NULL;
236         int i;
237
238         ACPI_FUNCTION_TRACE("acpi_processor_get_performance_states");
239
240         status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
241         if (ACPI_FAILURE(status)) {
242                 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _PSS\n"));
243                 return_VALUE(-ENODEV);
244         }
245
246         pss = (union acpi_object *)buffer.pointer;
247         if (!pss || (pss->type != ACPI_TYPE_PACKAGE)) {
248                 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSS data\n"));
249                 result = -EFAULT;
250                 goto end;
251         }
252
253         ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d performance states\n",
254                           pss->package.count));
255
256         pr->performance->state_count = pss->package.count;
257         pr->performance->states =
258             kmalloc(sizeof(struct acpi_processor_px) * pss->package.count,
259                     GFP_KERNEL);
260         if (!pr->performance->states) {
261                 result = -ENOMEM;
262                 goto end;
263         }
264
265         for (i = 0; i < pr->performance->state_count; i++) {
266
267                 struct acpi_processor_px *px = &(pr->performance->states[i]);
268
269                 state.length = sizeof(struct acpi_processor_px);
270                 state.pointer = px;
271
272                 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
273
274                 status = acpi_extract_package(&(pss->package.elements[i]),
275                                               &format, &state);
276                 if (ACPI_FAILURE(status)) {
277                         ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
278                                           "Invalid _PSS data\n"));
279                         result = -EFAULT;
280                         kfree(pr->performance->states);
281                         goto end;
282                 }
283
284                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
285                                   "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n",
286                                   i,
287                                   (u32) px->core_frequency,
288                                   (u32) px->power,
289                                   (u32) px->transition_latency,
290                                   (u32) px->bus_master_latency,
291                                   (u32) px->control, (u32) px->status));
292
293                 if (!px->core_frequency) {
294                         ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
295                                           "Invalid _PSS data: freq is zero\n"));
296                         result = -EFAULT;
297                         kfree(pr->performance->states);
298                         goto end;
299                 }
300         }
301
302       end:
303         acpi_os_free(buffer.pointer);
304
305         return_VALUE(result);
306 }
307
308 static int acpi_processor_get_performance_info(struct acpi_processor *pr)
309 {
310         int result = 0;
311         acpi_status status = AE_OK;
312         acpi_handle handle = NULL;
313
314         ACPI_FUNCTION_TRACE("acpi_processor_get_performance_info");
315
316         if (!pr || !pr->performance || !pr->handle)
317                 return_VALUE(-EINVAL);
318
319         status = acpi_get_handle(pr->handle, "_PCT", &handle);
320         if (ACPI_FAILURE(status)) {
321                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
322                                   "ACPI-based processor performance control unavailable\n"));
323                 return_VALUE(-ENODEV);
324         }
325
326         result = acpi_processor_get_performance_control(pr);
327         if (result)
328                 return_VALUE(result);
329
330         result = acpi_processor_get_performance_states(pr);
331         if (result)
332                 return_VALUE(result);
333
334         result = acpi_processor_get_platform_limit(pr);
335         if (result)
336                 return_VALUE(result);
337
338         return_VALUE(0);
339 }
340
341 int acpi_processor_notify_smm(struct module *calling_module)
342 {
343         acpi_status status;
344         static int is_done = 0;
345
346         ACPI_FUNCTION_TRACE("acpi_processor_notify_smm");
347
348         if (!(acpi_processor_ppc_status & PPC_REGISTERED))
349                 return_VALUE(-EBUSY);
350
351         if (!try_module_get(calling_module))
352                 return_VALUE(-EINVAL);
353
354         /* is_done is set to negative if an error occured,
355          * and to postitive if _no_ error occured, but SMM
356          * was already notified. This avoids double notification
357          * which might lead to unexpected results...
358          */
359         if (is_done > 0) {
360                 module_put(calling_module);
361                 return_VALUE(0);
362         } else if (is_done < 0) {
363                 module_put(calling_module);
364                 return_VALUE(is_done);
365         }
366
367         is_done = -EIO;
368
369         /* Can't write pstate_cnt to smi_cmd if either value is zero */
370         if ((!acpi_fadt.smi_cmd) || (!acpi_fadt.pstate_cnt)) {
371                 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_cnt\n"));
372                 module_put(calling_module);
373                 return_VALUE(0);
374         }
375
376         ACPI_DEBUG_PRINT((ACPI_DB_INFO,
377                           "Writing pstate_cnt [0x%x] to smi_cmd [0x%x]\n",
378                           acpi_fadt.pstate_cnt, acpi_fadt.smi_cmd));
379
380         /* FADT v1 doesn't support pstate_cnt, many BIOS vendors use
381          * it anyway, so we need to support it... */
382         if (acpi_fadt_is_v1) {
383                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
384                                   "Using v1.0 FADT reserved value for pstate_cnt\n"));
385         }
386
387         status = acpi_os_write_port(acpi_fadt.smi_cmd,
388                                     (u32) acpi_fadt.pstate_cnt, 8);
389         if (ACPI_FAILURE(status)) {
390                 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
391                                   "Failed to write pstate_cnt [0x%x] to "
392                                   "smi_cmd [0x%x]\n", acpi_fadt.pstate_cnt,
393                                   acpi_fadt.smi_cmd));
394                 module_put(calling_module);
395                 return_VALUE(status);
396         }
397
398         /* Success. If there's no _PPC, we need to fear nothing, so
399          * we can allow the cpufreq driver to be rmmod'ed. */
400         is_done = 1;
401
402         if (!(acpi_processor_ppc_status & PPC_IN_USE))
403                 module_put(calling_module);
404
405         return_VALUE(0);
406 }
407
408 EXPORT_SYMBOL(acpi_processor_notify_smm);
409
410 #ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF
411 /* /proc/acpi/processor/../performance interface (DEPRECATED) */
412
413 static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file);
414 static struct file_operations acpi_processor_perf_fops = {
415         .open = acpi_processor_perf_open_fs,
416         .read = seq_read,
417         .llseek = seq_lseek,
418         .release = single_release,
419 };
420
421 static int acpi_processor_perf_seq_show(struct seq_file *seq, void *offset)
422 {
423         struct acpi_processor *pr = (struct acpi_processor *)seq->private;
424         int i;
425
426         ACPI_FUNCTION_TRACE("acpi_processor_perf_seq_show");
427
428         if (!pr)
429                 goto end;
430
431         if (!pr->performance) {
432                 seq_puts(seq, "<not supported>\n");
433                 goto end;
434         }
435
436         seq_printf(seq, "state count:             %d\n"
437                    "active state:            P%d\n",
438                    pr->performance->state_count, pr->performance->state);
439
440         seq_puts(seq, "states:\n");
441         for (i = 0; i < pr->performance->state_count; i++)
442                 seq_printf(seq,
443                            "   %cP%d:                  %d MHz, %d mW, %d uS\n",
444                            (i == pr->performance->state ? '*' : ' '), i,
445                            (u32) pr->performance->states[i].core_frequency,
446                            (u32) pr->performance->states[i].power,
447                            (u32) pr->performance->states[i].transition_latency);
448
449       end:
450         return_VALUE(0);
451 }
452
453 static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file)
454 {
455         return single_open(file, acpi_processor_perf_seq_show,
456                            PDE(inode)->data);
457 }
458
459 static ssize_t
460 acpi_processor_write_performance(struct file *file,
461                                  const char __user * buffer,
462                                  size_t count, loff_t * data)
463 {
464         int result = 0;
465         struct seq_file *m = (struct seq_file *)file->private_data;
466         struct acpi_processor *pr = (struct acpi_processor *)m->private;
467         struct acpi_processor_performance *perf;
468         char state_string[12] = { '\0' };
469         unsigned int new_state = 0;
470         struct cpufreq_policy policy;
471
472         ACPI_FUNCTION_TRACE("acpi_processor_write_performance");
473
474         if (!pr || (count > sizeof(state_string) - 1))
475                 return_VALUE(-EINVAL);
476
477         perf = pr->performance;
478         if (!perf)
479                 return_VALUE(-EINVAL);
480
481         if (copy_from_user(state_string, buffer, count))
482                 return_VALUE(-EFAULT);
483
484         state_string[count] = '\0';
485         new_state = simple_strtoul(state_string, NULL, 0);
486
487         if (new_state >= perf->state_count)
488                 return_VALUE(-EINVAL);
489
490         cpufreq_get_policy(&policy, pr->id);
491
492         policy.cpu = pr->id;
493         policy.min = perf->states[new_state].core_frequency * 1000;
494         policy.max = perf->states[new_state].core_frequency * 1000;
495
496         result = cpufreq_set_policy(&policy);
497         if (result)
498                 return_VALUE(result);
499
500         return_VALUE(count);
501 }
502
503 static void acpi_cpufreq_add_file(struct acpi_processor *pr)
504 {
505         struct proc_dir_entry *entry = NULL;
506         struct acpi_device *device = NULL;
507
508         ACPI_FUNCTION_TRACE("acpi_cpufreq_addfile");
509
510         if (acpi_bus_get_device(pr->handle, &device))
511                 return_VOID;
512
513         /* add file 'performance' [R/W] */
514         entry = create_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE,
515                                   S_IFREG | S_IRUGO | S_IWUSR,
516                                   acpi_device_dir(device));
517         if (!entry)
518                 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
519                                   "Unable to create '%s' fs entry\n",
520                                   ACPI_PROCESSOR_FILE_PERFORMANCE));
521         else {
522                 acpi_processor_perf_fops.write = acpi_processor_write_performance;
523                 entry->proc_fops = &acpi_processor_perf_fops;
524                 entry->data = acpi_driver_data(device);
525                 entry->owner = THIS_MODULE;
526         }
527         return_VOID;
528 }
529
530 static void acpi_cpufreq_remove_file(struct acpi_processor *pr)
531 {
532         struct acpi_device *device = NULL;
533
534         ACPI_FUNCTION_TRACE("acpi_cpufreq_addfile");
535
536         if (acpi_bus_get_device(pr->handle, &device))
537                 return_VOID;
538
539         /* remove file 'performance' */
540         remove_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE,
541                           acpi_device_dir(device));
542
543         return_VOID;
544 }
545
546 #else
547 static void acpi_cpufreq_add_file(struct acpi_processor *pr)
548 {
549         return;
550 }
551 static void acpi_cpufreq_remove_file(struct acpi_processor *pr)
552 {
553         return;
554 }
555 #endif                          /* CONFIG_X86_ACPI_CPUFREQ_PROC_INTF */
556
557 static int acpi_processor_get_psd(struct acpi_processor *pr)
558 {
559         int result = 0;
560         acpi_status status = AE_OK;
561         struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
562         struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
563         struct acpi_buffer state = {0, NULL};
564         union acpi_object  *psd = NULL;
565         struct acpi_psd_package *pdomain;
566
567         status = acpi_evaluate_object(pr->handle, "_PSD", NULL, &buffer);
568         if (ACPI_FAILURE(status)) {
569                 return -ENODEV;
570         }
571
572         psd = (union acpi_object *) buffer.pointer;
573         if (!psd || (psd->type != ACPI_TYPE_PACKAGE)) {
574                 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
575                 result = -EFAULT;
576                 goto end;
577         }
578
579         if (psd->package.count != 1) {
580                 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
581                 result = -EFAULT;
582                 goto end;
583         }
584
585         pdomain = &(pr->performance->domain_info);
586
587         state.length = sizeof(struct acpi_psd_package);
588         state.pointer = pdomain;
589
590         status = acpi_extract_package(&(psd->package.elements[0]),
591                 &format, &state);
592         if (ACPI_FAILURE(status)) {
593                 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
594                 result = -EFAULT;
595                 goto end;
596         }
597
598         if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
599                 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _PSD:num_entries\n"));
600                 result = -EFAULT;
601                 goto end;
602         }
603
604         if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
605                 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _PSD:revision\n"));
606                 result = -EFAULT;
607                 goto end;
608         }
609
610 end:
611         acpi_os_free(buffer.pointer);
612         return result;
613 }
614
615 int acpi_processor_preregister_performance(
616                 struct acpi_processor_performance **performance)
617 {
618         int count, count_target;
619         int retval = 0;
620         unsigned int i, j;
621         cpumask_t covered_cpus;
622         struct acpi_processor *pr;
623         struct acpi_psd_package *pdomain;
624         struct acpi_processor *match_pr;
625         struct acpi_psd_package *match_pdomain;
626
627         mutex_lock(&performance_mutex);
628
629         retval = 0;
630
631         /* Call _PSD for all CPUs */
632         for_each_possible_cpu(i) {
633                 pr = processors[i];
634                 if (!pr) {
635                         /* Look only at processors in ACPI namespace */
636                         continue;
637                 }
638
639                 if (pr->performance) {
640                         retval = -EBUSY;
641                         continue;
642                 }
643
644                 if (!performance || !performance[i]) {
645                         retval = -EINVAL;
646                         continue;
647                 }
648
649                 pr->performance = performance[i];
650                 cpu_set(i, pr->performance->shared_cpu_map);
651                 if (acpi_processor_get_psd(pr)) {
652                         retval = -EINVAL;
653                         continue;
654                 }
655         }
656         if (retval)
657                 goto err_ret;
658
659         /*
660          * Now that we have _PSD data from all CPUs, lets setup P-state 
661          * domain info.
662          */
663         for_each_possible_cpu(i) {
664                 pr = processors[i];
665                 if (!pr)
666                         continue;
667
668                 /* Basic validity check for domain info */
669                 pdomain = &(pr->performance->domain_info);
670                 if ((pdomain->revision != ACPI_PSD_REV0_REVISION) ||
671                     (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES)) {
672                         retval = -EINVAL;
673                         goto err_ret;
674                 }
675                 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
676                     pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
677                     pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
678                         retval = -EINVAL;
679                         goto err_ret;
680                 }
681         }
682
683         cpus_clear(covered_cpus);
684         for_each_possible_cpu(i) {
685                 pr = processors[i];
686                 if (!pr)
687                         continue;
688
689                 if (cpu_isset(i, covered_cpus))
690                         continue;
691
692                 pdomain = &(pr->performance->domain_info);
693                 cpu_set(i, pr->performance->shared_cpu_map);
694                 cpu_set(i, covered_cpus);
695                 if (pdomain->num_processors <= 1)
696                         continue;
697
698                 /* Validate the Domain info */
699                 count_target = pdomain->num_processors;
700                 count = 1;
701                 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL ||
702                     pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL) {
703                         pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
704                 } else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY) {
705                         pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY;
706                 }
707
708                 for_each_possible_cpu(j) {
709                         if (i == j)
710                                 continue;
711
712                         match_pr = processors[j];
713                         if (!match_pr)
714                                 continue;
715
716                         match_pdomain = &(match_pr->performance->domain_info);
717                         if (match_pdomain->domain != pdomain->domain)
718                                 continue;
719
720                         /* Here i and j are in the same domain */
721
722                         if (match_pdomain->num_processors != count_target) {
723                                 retval = -EINVAL;
724                                 goto err_ret;
725                         }
726
727                         if (pdomain->coord_type != match_pdomain->coord_type) {
728                                 retval = -EINVAL;
729                                 goto err_ret;
730                         }
731
732                         cpu_set(j, covered_cpus);
733                         cpu_set(j, pr->performance->shared_cpu_map);
734                         count++;
735                 }
736
737                 for_each_possible_cpu(j) {
738                         if (i == j)
739                                 continue;
740
741                         match_pr = processors[j];
742                         if (!match_pr)
743                                 continue;
744
745                         match_pdomain = &(match_pr->performance->domain_info);
746                         if (match_pdomain->domain != pdomain->domain)
747                                 continue;
748
749                         match_pr->performance->shared_type = 
750                                         pr->performance->shared_type;
751                         match_pr->performance->shared_cpu_map =
752                                 pr->performance->shared_cpu_map;
753                 }
754         }
755
756 err_ret:
757         if (retval) {
758                 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error while parsing _PSD domain information. Assuming no coordination\n"));
759         }
760
761         for_each_possible_cpu(i) {
762                 pr = processors[i];
763                 if (!pr || !pr->performance)
764                         continue;
765
766                 /* Assume no coordination on any error parsing domain info */
767                 if (retval) {
768                         cpus_clear(pr->performance->shared_cpu_map);
769                         cpu_set(i, pr->performance->shared_cpu_map);
770                         pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
771                 }
772                 pr->performance = NULL; /* Will be set for real in register */
773         }
774
775         mutex_unlock(&performance_mutex);
776         return retval;
777 }
778 EXPORT_SYMBOL(acpi_processor_preregister_performance);
779
780
781 int
782 acpi_processor_register_performance(struct acpi_processor_performance
783                                     *performance, unsigned int cpu)
784 {
785         struct acpi_processor *pr;
786
787         ACPI_FUNCTION_TRACE("acpi_processor_register_performance");
788
789         if (!(acpi_processor_ppc_status & PPC_REGISTERED))
790                 return_VALUE(-EINVAL);
791
792         mutex_lock(&performance_mutex);
793
794         pr = processors[cpu];
795         if (!pr) {
796                 mutex_unlock(&performance_mutex);
797                 return_VALUE(-ENODEV);
798         }
799
800         if (pr->performance) {
801                 mutex_unlock(&performance_mutex);
802                 return_VALUE(-EBUSY);
803         }
804
805         WARN_ON(!performance);
806
807         pr->performance = performance;
808
809         if (acpi_processor_get_performance_info(pr)) {
810                 pr->performance = NULL;
811                 mutex_unlock(&performance_mutex);
812                 return_VALUE(-EIO);
813         }
814
815         acpi_cpufreq_add_file(pr);
816
817         mutex_unlock(&performance_mutex);
818         return_VALUE(0);
819 }
820
821 EXPORT_SYMBOL(acpi_processor_register_performance);
822
823 void
824 acpi_processor_unregister_performance(struct acpi_processor_performance
825                                       *performance, unsigned int cpu)
826 {
827         struct acpi_processor *pr;
828
829         ACPI_FUNCTION_TRACE("acpi_processor_unregister_performance");
830
831         mutex_lock(&performance_mutex);
832
833         pr = processors[cpu];
834         if (!pr) {
835                 mutex_unlock(&performance_mutex);
836                 return_VOID;
837         }
838
839         if (pr->performance)
840                 kfree(pr->performance->states);
841         pr->performance = NULL;
842
843         acpi_cpufreq_remove_file(pr);
844
845         mutex_unlock(&performance_mutex);
846
847         return_VOID;
848 }
849
850 EXPORT_SYMBOL(acpi_processor_unregister_performance);