cstate.c 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103
  1. /*
  2. * arch/i386/kernel/acpi/cstate.c
  3. *
  4. * Copyright (C) 2005 Intel Corporation
  5. * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
  6. * - Added _PDC for SMP C-states on Intel CPUs
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/module.h>
  10. #include <linux/init.h>
  11. #include <linux/acpi.h>
  12. #include <acpi/processor.h>
  13. #include <asm/acpi.h>
  14. static void acpi_processor_power_init_intel_pdc(struct acpi_processor_power
  15. *pow)
  16. {
  17. struct acpi_object_list *obj_list;
  18. union acpi_object *obj;
  19. u32 *buf;
  20. /* allocate and initialize pdc. It will be used later. */
  21. obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
  22. if (!obj_list) {
  23. printk(KERN_ERR "Memory allocation error\n");
  24. return;
  25. }
  26. obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
  27. if (!obj) {
  28. printk(KERN_ERR "Memory allocation error\n");
  29. kfree(obj_list);
  30. return;
  31. }
  32. buf = kmalloc(12, GFP_KERNEL);
  33. if (!buf) {
  34. printk(KERN_ERR "Memory allocation error\n");
  35. kfree(obj);
  36. kfree(obj_list);
  37. return;
  38. }
  39. buf[0] = ACPI_PDC_REVISION_ID;
  40. buf[1] = 1;
  41. buf[2] = ACPI_PDC_C_CAPABILITY_SMP;
  42. obj->type = ACPI_TYPE_BUFFER;
  43. obj->buffer.length = 12;
  44. obj->buffer.pointer = (u8 *) buf;
  45. obj_list->count = 1;
  46. obj_list->pointer = obj;
  47. pow->pdc = obj_list;
  48. return;
  49. }
  50. /* Initialize _PDC data based on the CPU vendor */
  51. void acpi_processor_power_init_pdc(struct acpi_processor_power *pow,
  52. unsigned int cpu)
  53. {
  54. struct cpuinfo_x86 *c = cpu_data + cpu;
  55. pow->pdc = NULL;
  56. if (c->x86_vendor == X86_VENDOR_INTEL)
  57. acpi_processor_power_init_intel_pdc(pow);
  58. return;
  59. }
  60. EXPORT_SYMBOL(acpi_processor_power_init_pdc);
  61. /*
  62. * Initialize bm_flags based on the CPU cache properties
  63. * On SMP it depends on cache configuration
  64. * - When cache is not shared among all CPUs, we flush cache
  65. * before entering C3.
  66. * - When cache is shared among all CPUs, we use bm_check
  67. * mechanism as in UP case
  68. *
  69. * This routine is called only after all the CPUs are online
  70. */
  71. void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags,
  72. unsigned int cpu)
  73. {
  74. struct cpuinfo_x86 *c = cpu_data + cpu;
  75. flags->bm_check = 0;
  76. if (num_online_cpus() == 1)
  77. flags->bm_check = 1;
  78. else if (c->x86_vendor == X86_VENDOR_INTEL) {
  79. /*
  80. * Today all CPUs that support C3 share cache.
  81. * TBD: This needs to look at cache shared map, once
  82. * multi-core detection patch makes to the base.
  83. */
  84. flags->bm_check = 1;
  85. }
  86. }
  87. EXPORT_SYMBOL(acpi_processor_power_init_bm_check);