osl.c 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849
  1. /*
  2. * acpi_osl.c - OS-dependent functions ($Revision: 83 $)
  3. *
  4. * Copyright (C) 2000 Andrew Henroid
  5. * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
  6. * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  7. * Copyright (c) 2008 Intel Corporation
  8. * Author: Matthew Wilcox <willy@linux.intel.com>
  9. *
  10. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2 of the License, or
  15. * (at your option) any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License
  23. * along with this program; if not, write to the Free Software
  24. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  25. *
  26. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  27. *
  28. */
  29. #include <linux/module.h>
  30. #include <linux/kernel.h>
  31. #include <linux/slab.h>
  32. #include <linux/mm.h>
  33. #include <linux/highmem.h>
  34. #include <linux/pci.h>
  35. #include <linux/interrupt.h>
  36. #include <linux/kmod.h>
  37. #include <linux/delay.h>
  38. #include <linux/workqueue.h>
  39. #include <linux/nmi.h>
  40. #include <linux/acpi.h>
  41. #include <linux/acpi_io.h>
  42. #include <linux/efi.h>
  43. #include <linux/ioport.h>
  44. #include <linux/list.h>
  45. #include <linux/jiffies.h>
  46. #include <linux/semaphore.h>
  47. #include <asm/io.h>
  48. #include <asm/uaccess.h>
  49. #include <acpi/acpi.h>
  50. #include <acpi/acpi_bus.h>
  51. #include <acpi/processor.h>
  52. #include "internal.h"
  53. #define _COMPONENT ACPI_OS_SERVICES
  54. ACPI_MODULE_NAME("osl");
  55. #define PREFIX "ACPI: "
  56. struct acpi_os_dpc {
  57. acpi_osd_exec_callback function;
  58. void *context;
  59. struct work_struct work;
  60. int wait;
  61. };
  62. #ifdef CONFIG_ACPI_CUSTOM_DSDT
  63. #include CONFIG_ACPI_CUSTOM_DSDT_FILE
  64. #endif
  65. #ifdef ENABLE_DEBUGGER
  66. #include <linux/kdb.h>
  67. /* stuff for debugger support */
  68. int acpi_in_debugger;
  69. EXPORT_SYMBOL(acpi_in_debugger);
  70. extern char line_buf[80];
  71. #endif /*ENABLE_DEBUGGER */
  72. static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl,
  73. u32 pm1b_ctrl);
  74. static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a,
  75. u32 val_b);
  76. static acpi_osd_handler acpi_irq_handler;
  77. static void *acpi_irq_context;
  78. static struct workqueue_struct *kacpid_wq;
  79. static struct workqueue_struct *kacpi_notify_wq;
  80. static struct workqueue_struct *kacpi_hotplug_wq;
  81. /*
  82. * This list of permanent mappings is for memory that may be accessed from
  83. * interrupt context, where we can't do the ioremap().
  84. */
  85. struct acpi_ioremap {
  86. struct list_head list;
  87. void __iomem *virt;
  88. acpi_physical_address phys;
  89. acpi_size size;
  90. unsigned long refcount;
  91. };
  92. static LIST_HEAD(acpi_ioremaps);
  93. static DEFINE_MUTEX(acpi_ioremap_lock);
  94. static void __init acpi_osi_setup_late(void);
  95. /*
  96. * The story of _OSI(Linux)
  97. *
  98. * From pre-history through Linux-2.6.22,
  99. * Linux responded TRUE upon a BIOS OSI(Linux) query.
  100. *
  101. * Unfortunately, reference BIOS writers got wind of this
  102. * and put OSI(Linux) in their example code, quickly exposing
  103. * this string as ill-conceived and opening the door to
  104. * an un-bounded number of BIOS incompatibilities.
  105. *
  106. * For example, OSI(Linux) was used on resume to re-POST a
  107. * video card on one system, because Linux at that time
  108. * could not do a speedy restore in its native driver.
  109. * But then upon gaining quick native restore capability,
  110. * Linux has no way to tell the BIOS to skip the time-consuming
  111. * POST -- putting Linux at a permanent performance disadvantage.
  112. * On another system, the BIOS writer used OSI(Linux)
  113. * to infer native OS support for IPMI! On other systems,
  114. * OSI(Linux) simply got in the way of Linux claiming to
  115. * be compatible with other operating systems, exposing
  116. * BIOS issues such as skipped device initialization.
  117. *
  118. * So "Linux" turned out to be a really poor chose of
  119. * OSI string, and from Linux-2.6.23 onward we respond FALSE.
  120. *
  121. * BIOS writers should NOT query _OSI(Linux) on future systems.
  122. * Linux will complain on the console when it sees it, and return FALSE.
  123. * To get Linux to return TRUE for your system will require
  124. * a kernel source update to add a DMI entry,
  125. * or boot with "acpi_osi=Linux"
  126. */
  127. static struct osi_linux {
  128. unsigned int enable:1;
  129. unsigned int dmi:1;
  130. unsigned int cmdline:1;
  131. unsigned int default_disabling:1;
  132. } osi_linux = {0, 0, 0, 0};
  133. static u32 acpi_osi_handler(acpi_string interface, u32 supported)
  134. {
  135. if (!strcmp("Linux", interface)) {
  136. printk_once(KERN_NOTICE FW_BUG PREFIX
  137. "BIOS _OSI(Linux) query %s%s\n",
  138. osi_linux.enable ? "honored" : "ignored",
  139. osi_linux.cmdline ? " via cmdline" :
  140. osi_linux.dmi ? " via DMI" : "");
  141. }
  142. return supported;
  143. }
  144. static void __init acpi_request_region (struct acpi_generic_address *gas,
  145. unsigned int length, char *desc)
  146. {
  147. u64 addr;
  148. /* Handle possible alignment issues */
  149. memcpy(&addr, &gas->address, sizeof(addr));
  150. if (!addr || !length)
  151. return;
  152. /* Resources are never freed */
  153. if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
  154. request_region(addr, length, desc);
  155. else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
  156. request_mem_region(addr, length, desc);
  157. }
  158. static int __init acpi_reserve_resources(void)
  159. {
  160. acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
  161. "ACPI PM1a_EVT_BLK");
  162. acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
  163. "ACPI PM1b_EVT_BLK");
  164. acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
  165. "ACPI PM1a_CNT_BLK");
  166. acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
  167. "ACPI PM1b_CNT_BLK");
  168. if (acpi_gbl_FADT.pm_timer_length == 4)
  169. acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
  170. acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
  171. "ACPI PM2_CNT_BLK");
  172. /* Length of GPE blocks must be a non-negative multiple of 2 */
  173. if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
  174. acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
  175. acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
  176. if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
  177. acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
  178. acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
  179. return 0;
  180. }
  181. device_initcall(acpi_reserve_resources);
  182. void acpi_os_printf(const char *fmt, ...)
  183. {
  184. va_list args;
  185. va_start(args, fmt);
  186. acpi_os_vprintf(fmt, args);
  187. va_end(args);
  188. }
  189. void acpi_os_vprintf(const char *fmt, va_list args)
  190. {
  191. static char buffer[512];
  192. vsprintf(buffer, fmt, args);
  193. #ifdef ENABLE_DEBUGGER
  194. if (acpi_in_debugger) {
  195. kdb_printf("%s", buffer);
  196. } else {
  197. printk(KERN_CONT "%s", buffer);
  198. }
  199. #else
  200. printk(KERN_CONT "%s", buffer);
  201. #endif
  202. }
  203. #ifdef CONFIG_KEXEC
  204. static unsigned long acpi_rsdp;
  205. static int __init setup_acpi_rsdp(char *arg)
  206. {
  207. acpi_rsdp = simple_strtoul(arg, NULL, 16);
  208. return 0;
  209. }
  210. early_param("acpi_rsdp", setup_acpi_rsdp);
  211. #endif
  212. acpi_physical_address __init acpi_os_get_root_pointer(void)
  213. {
  214. #ifdef CONFIG_KEXEC
  215. if (acpi_rsdp)
  216. return acpi_rsdp;
  217. #endif
  218. if (efi_enabled(EFI_CONFIG_TABLES)) {
  219. if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
  220. return efi.acpi20;
  221. else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
  222. return efi.acpi;
  223. else {
  224. printk(KERN_ERR PREFIX
  225. "System description tables not found\n");
  226. return 0;
  227. }
  228. } else {
  229. acpi_physical_address pa = 0;
  230. acpi_find_root_pointer(&pa);
  231. return pa;
  232. }
  233. }
  234. /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
  235. static struct acpi_ioremap *
  236. acpi_map_lookup(acpi_physical_address phys, acpi_size size)
  237. {
  238. struct acpi_ioremap *map;
  239. list_for_each_entry_rcu(map, &acpi_ioremaps, list)
  240. if (map->phys <= phys &&
  241. phys + size <= map->phys + map->size)
  242. return map;
  243. return NULL;
  244. }
  245. /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
  246. static void __iomem *
  247. acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
  248. {
  249. struct acpi_ioremap *map;
  250. map = acpi_map_lookup(phys, size);
  251. if (map)
  252. return map->virt + (phys - map->phys);
  253. return NULL;
  254. }
  255. void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
  256. {
  257. struct acpi_ioremap *map;
  258. void __iomem *virt = NULL;
  259. mutex_lock(&acpi_ioremap_lock);
  260. map = acpi_map_lookup(phys, size);
  261. if (map) {
  262. virt = map->virt + (phys - map->phys);
  263. map->refcount++;
  264. }
  265. mutex_unlock(&acpi_ioremap_lock);
  266. return virt;
  267. }
  268. EXPORT_SYMBOL_GPL(acpi_os_get_iomem);
  269. /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
  270. static struct acpi_ioremap *
  271. acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
  272. {
  273. struct acpi_ioremap *map;
  274. list_for_each_entry_rcu(map, &acpi_ioremaps, list)
  275. if (map->virt <= virt &&
  276. virt + size <= map->virt + map->size)
  277. return map;
  278. return NULL;
  279. }
  280. #ifndef CONFIG_IA64
  281. #define should_use_kmap(pfn) page_is_ram(pfn)
  282. #else
  283. /* ioremap will take care of cache attributes */
  284. #define should_use_kmap(pfn) 0
  285. #endif
  286. static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
  287. {
  288. unsigned long pfn;
  289. pfn = pg_off >> PAGE_SHIFT;
  290. if (should_use_kmap(pfn)) {
  291. if (pg_sz > PAGE_SIZE)
  292. return NULL;
  293. return (void __iomem __force *)kmap(pfn_to_page(pfn));
  294. } else
  295. return acpi_os_ioremap(pg_off, pg_sz);
  296. }
  297. static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
  298. {
  299. unsigned long pfn;
  300. pfn = pg_off >> PAGE_SHIFT;
  301. if (should_use_kmap(pfn))
  302. kunmap(pfn_to_page(pfn));
  303. else
  304. iounmap(vaddr);
  305. }
  306. void __iomem *__init_refok
  307. acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
  308. {
  309. struct acpi_ioremap *map;
  310. void __iomem *virt;
  311. acpi_physical_address pg_off;
  312. acpi_size pg_sz;
  313. if (phys > ULONG_MAX) {
  314. printk(KERN_ERR PREFIX "Cannot map memory that high\n");
  315. return NULL;
  316. }
  317. if (!acpi_gbl_permanent_mmap)
  318. return __acpi_map_table((unsigned long)phys, size);
  319. mutex_lock(&acpi_ioremap_lock);
  320. /* Check if there's a suitable mapping already. */
  321. map = acpi_map_lookup(phys, size);
  322. if (map) {
  323. map->refcount++;
  324. goto out;
  325. }
  326. map = kzalloc(sizeof(*map), GFP_KERNEL);
  327. if (!map) {
  328. mutex_unlock(&acpi_ioremap_lock);
  329. return NULL;
  330. }
  331. pg_off = round_down(phys, PAGE_SIZE);
  332. pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
  333. virt = acpi_map(pg_off, pg_sz);
  334. if (!virt) {
  335. mutex_unlock(&acpi_ioremap_lock);
  336. kfree(map);
  337. return NULL;
  338. }
  339. INIT_LIST_HEAD(&map->list);
  340. map->virt = virt;
  341. map->phys = pg_off;
  342. map->size = pg_sz;
  343. map->refcount = 1;
  344. list_add_tail_rcu(&map->list, &acpi_ioremaps);
  345. out:
  346. mutex_unlock(&acpi_ioremap_lock);
  347. return map->virt + (phys - map->phys);
  348. }
  349. EXPORT_SYMBOL_GPL(acpi_os_map_memory);
  350. static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
  351. {
  352. if (!--map->refcount)
  353. list_del_rcu(&map->list);
  354. }
  355. static void acpi_os_map_cleanup(struct acpi_ioremap *map)
  356. {
  357. if (!map->refcount) {
  358. synchronize_rcu();
  359. acpi_unmap(map->phys, map->virt);
  360. kfree(map);
  361. }
  362. }
  363. void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
  364. {
  365. struct acpi_ioremap *map;
  366. if (!acpi_gbl_permanent_mmap) {
  367. __acpi_unmap_table(virt, size);
  368. return;
  369. }
  370. mutex_lock(&acpi_ioremap_lock);
  371. map = acpi_map_lookup_virt(virt, size);
  372. if (!map) {
  373. mutex_unlock(&acpi_ioremap_lock);
  374. WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
  375. return;
  376. }
  377. acpi_os_drop_map_ref(map);
  378. mutex_unlock(&acpi_ioremap_lock);
  379. acpi_os_map_cleanup(map);
  380. }
  381. EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
  382. void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
  383. {
  384. if (!acpi_gbl_permanent_mmap)
  385. __acpi_unmap_table(virt, size);
  386. }
  387. int acpi_os_map_generic_address(struct acpi_generic_address *gas)
  388. {
  389. u64 addr;
  390. void __iomem *virt;
  391. if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
  392. return 0;
  393. /* Handle possible alignment issues */
  394. memcpy(&addr, &gas->address, sizeof(addr));
  395. if (!addr || !gas->bit_width)
  396. return -EINVAL;
  397. virt = acpi_os_map_memory(addr, gas->bit_width / 8);
  398. if (!virt)
  399. return -EIO;
  400. return 0;
  401. }
  402. EXPORT_SYMBOL(acpi_os_map_generic_address);
  403. void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
  404. {
  405. u64 addr;
  406. struct acpi_ioremap *map;
  407. if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
  408. return;
  409. /* Handle possible alignment issues */
  410. memcpy(&addr, &gas->address, sizeof(addr));
  411. if (!addr || !gas->bit_width)
  412. return;
  413. mutex_lock(&acpi_ioremap_lock);
  414. map = acpi_map_lookup(addr, gas->bit_width / 8);
  415. if (!map) {
  416. mutex_unlock(&acpi_ioremap_lock);
  417. return;
  418. }
  419. acpi_os_drop_map_ref(map);
  420. mutex_unlock(&acpi_ioremap_lock);
  421. acpi_os_map_cleanup(map);
  422. }
  423. EXPORT_SYMBOL(acpi_os_unmap_generic_address);
  424. #ifdef ACPI_FUTURE_USAGE
  425. acpi_status
  426. acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
  427. {
  428. if (!phys || !virt)
  429. return AE_BAD_PARAMETER;
  430. *phys = virt_to_phys(virt);
  431. return AE_OK;
  432. }
  433. #endif
  434. #define ACPI_MAX_OVERRIDE_LEN 100
  435. static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
  436. acpi_status
  437. acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
  438. acpi_string * new_val)
  439. {
  440. if (!init_val || !new_val)
  441. return AE_BAD_PARAMETER;
  442. *new_val = NULL;
  443. if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
  444. printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
  445. acpi_os_name);
  446. *new_val = acpi_os_name;
  447. }
  448. return AE_OK;
  449. }
  450. #ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
  451. #include <linux/earlycpio.h>
  452. #include <linux/memblock.h>
  453. static u64 acpi_tables_addr;
  454. static int all_tables_size;
  455. /* Copied from acpica/tbutils.c:acpi_tb_checksum() */
  456. u8 __init acpi_table_checksum(u8 *buffer, u32 length)
  457. {
  458. u8 sum = 0;
  459. u8 *end = buffer + length;
  460. while (buffer < end)
  461. sum = (u8) (sum + *(buffer++));
  462. return sum;
  463. }
  464. /* All but ACPI_SIG_RSDP and ACPI_SIG_FACS: */
  465. static const char * const table_sigs[] = {
  466. ACPI_SIG_BERT, ACPI_SIG_CPEP, ACPI_SIG_ECDT, ACPI_SIG_EINJ,
  467. ACPI_SIG_ERST, ACPI_SIG_HEST, ACPI_SIG_MADT, ACPI_SIG_MSCT,
  468. ACPI_SIG_SBST, ACPI_SIG_SLIT, ACPI_SIG_SRAT, ACPI_SIG_ASF,
  469. ACPI_SIG_BOOT, ACPI_SIG_DBGP, ACPI_SIG_DMAR, ACPI_SIG_HPET,
  470. ACPI_SIG_IBFT, ACPI_SIG_IVRS, ACPI_SIG_MCFG, ACPI_SIG_MCHI,
  471. ACPI_SIG_SLIC, ACPI_SIG_SPCR, ACPI_SIG_SPMI, ACPI_SIG_TCPA,
  472. ACPI_SIG_UEFI, ACPI_SIG_WAET, ACPI_SIG_WDAT, ACPI_SIG_WDDT,
  473. ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT, ACPI_SIG_PSDT,
  474. ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, NULL };
  475. #define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
  476. /* Must not increase 10 or needs code modification below */
  477. #define ACPI_OVERRIDE_TABLES 10
  478. void __init acpi_initrd_override(void *data, size_t size)
  479. {
  480. int sig, no, table_nr = 0, total_offset = 0;
  481. long offset = 0;
  482. struct acpi_table_header *table;
  483. char cpio_path[32] = "kernel/firmware/acpi/";
  484. struct cpio_data file;
  485. struct cpio_data early_initrd_files[ACPI_OVERRIDE_TABLES];
  486. char *p;
  487. if (data == NULL || size == 0)
  488. return;
  489. for (no = 0; no < ACPI_OVERRIDE_TABLES; no++) {
  490. file = find_cpio_data(cpio_path, data, size, &offset);
  491. if (!file.data)
  492. break;
  493. data += offset;
  494. size -= offset;
  495. if (file.size < sizeof(struct acpi_table_header)) {
  496. pr_err("ACPI OVERRIDE: Table smaller than ACPI header [%s%s]\n",
  497. cpio_path, file.name);
  498. continue;
  499. }
  500. table = file.data;
  501. for (sig = 0; table_sigs[sig]; sig++)
  502. if (!memcmp(table->signature, table_sigs[sig], 4))
  503. break;
  504. if (!table_sigs[sig]) {
  505. pr_err("ACPI OVERRIDE: Unknown signature [%s%s]\n",
  506. cpio_path, file.name);
  507. continue;
  508. }
  509. if (file.size != table->length) {
  510. pr_err("ACPI OVERRIDE: File length does not match table length [%s%s]\n",
  511. cpio_path, file.name);
  512. continue;
  513. }
  514. if (acpi_table_checksum(file.data, table->length)) {
  515. pr_err("ACPI OVERRIDE: Bad table checksum [%s%s]\n",
  516. cpio_path, file.name);
  517. continue;
  518. }
  519. pr_info("%4.4s ACPI table found in initrd [%s%s][0x%x]\n",
  520. table->signature, cpio_path, file.name, table->length);
  521. all_tables_size += table->length;
  522. early_initrd_files[table_nr].data = file.data;
  523. early_initrd_files[table_nr].size = file.size;
  524. table_nr++;
  525. }
  526. if (table_nr == 0)
  527. return;
  528. acpi_tables_addr =
  529. memblock_find_in_range(0, max_low_pfn_mapped << PAGE_SHIFT,
  530. all_tables_size, PAGE_SIZE);
  531. if (!acpi_tables_addr) {
  532. WARN_ON(1);
  533. return;
  534. }
  535. /*
  536. * Only calling e820_add_reserve does not work and the
  537. * tables are invalid (memory got used) later.
  538. * memblock_reserve works as expected and the tables won't get modified.
  539. * But it's not enough on X86 because ioremap will
  540. * complain later (used by acpi_os_map_memory) that the pages
  541. * that should get mapped are not marked "reserved".
  542. * Both memblock_reserve and e820_add_region (via arch_reserve_mem_area)
  543. * works fine.
  544. */
  545. memblock_reserve(acpi_tables_addr, all_tables_size);
  546. arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
  547. p = early_ioremap(acpi_tables_addr, all_tables_size);
  548. for (no = 0; no < table_nr; no++) {
  549. memcpy(p + total_offset, early_initrd_files[no].data,
  550. early_initrd_files[no].size);
  551. total_offset += early_initrd_files[no].size;
  552. }
  553. early_iounmap(p, all_tables_size);
  554. }
  555. #endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */
  556. static void acpi_table_taint(struct acpi_table_header *table)
  557. {
  558. pr_warn(PREFIX
  559. "Override [%4.4s-%8.8s], this is unsafe: tainting kernel\n",
  560. table->signature, table->oem_table_id);
  561. add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
  562. }
  563. acpi_status
  564. acpi_os_table_override(struct acpi_table_header * existing_table,
  565. struct acpi_table_header ** new_table)
  566. {
  567. if (!existing_table || !new_table)
  568. return AE_BAD_PARAMETER;
  569. *new_table = NULL;
  570. #ifdef CONFIG_ACPI_CUSTOM_DSDT
  571. if (strncmp(existing_table->signature, "DSDT", 4) == 0)
  572. *new_table = (struct acpi_table_header *)AmlCode;
  573. #endif
  574. if (*new_table != NULL)
  575. acpi_table_taint(existing_table);
  576. return AE_OK;
  577. }
  578. acpi_status
  579. acpi_os_physical_table_override(struct acpi_table_header *existing_table,
  580. acpi_physical_address *address,
  581. u32 *table_length)
  582. {
  583. #ifndef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
  584. *table_length = 0;
  585. *address = 0;
  586. return AE_OK;
  587. #else
  588. int table_offset = 0;
  589. struct acpi_table_header *table;
  590. *table_length = 0;
  591. *address = 0;
  592. if (!acpi_tables_addr)
  593. return AE_OK;
  594. do {
  595. if (table_offset + ACPI_HEADER_SIZE > all_tables_size) {
  596. WARN_ON(1);
  597. return AE_OK;
  598. }
  599. table = acpi_os_map_memory(acpi_tables_addr + table_offset,
  600. ACPI_HEADER_SIZE);
  601. if (table_offset + table->length > all_tables_size) {
  602. acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
  603. WARN_ON(1);
  604. return AE_OK;
  605. }
  606. table_offset += table->length;
  607. if (memcmp(existing_table->signature, table->signature, 4)) {
  608. acpi_os_unmap_memory(table,
  609. ACPI_HEADER_SIZE);
  610. continue;
  611. }
  612. /* Only override tables with matching oem id */
  613. if (memcmp(table->oem_table_id, existing_table->oem_table_id,
  614. ACPI_OEM_TABLE_ID_SIZE)) {
  615. acpi_os_unmap_memory(table,
  616. ACPI_HEADER_SIZE);
  617. continue;
  618. }
  619. table_offset -= table->length;
  620. *table_length = table->length;
  621. acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
  622. *address = acpi_tables_addr + table_offset;
  623. break;
  624. } while (table_offset + ACPI_HEADER_SIZE < all_tables_size);
  625. if (*address != 0)
  626. acpi_table_taint(existing_table);
  627. return AE_OK;
  628. #endif
  629. }
  630. static irqreturn_t acpi_irq(int irq, void *dev_id)
  631. {
  632. u32 handled;
  633. handled = (*acpi_irq_handler) (acpi_irq_context);
  634. if (handled) {
  635. acpi_irq_handled++;
  636. return IRQ_HANDLED;
  637. } else {
  638. acpi_irq_not_handled++;
  639. return IRQ_NONE;
  640. }
  641. }
  642. acpi_status
  643. acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
  644. void *context)
  645. {
  646. unsigned int irq;
  647. acpi_irq_stats_init();
  648. /*
  649. * ACPI interrupts different from the SCI in our copy of the FADT are
  650. * not supported.
  651. */
  652. if (gsi != acpi_gbl_FADT.sci_interrupt)
  653. return AE_BAD_PARAMETER;
  654. if (acpi_irq_handler)
  655. return AE_ALREADY_ACQUIRED;
  656. if (acpi_gsi_to_irq(gsi, &irq) < 0) {
  657. printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
  658. gsi);
  659. return AE_OK;
  660. }
  661. acpi_irq_handler = handler;
  662. acpi_irq_context = context;
  663. if (request_irq(irq, acpi_irq, IRQF_SHARED | IRQF_NO_SUSPEND, "acpi", acpi_irq)) {
  664. printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
  665. acpi_irq_handler = NULL;
  666. return AE_NOT_ACQUIRED;
  667. }
  668. return AE_OK;
  669. }
  670. acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
  671. {
  672. if (irq != acpi_gbl_FADT.sci_interrupt)
  673. return AE_BAD_PARAMETER;
  674. free_irq(irq, acpi_irq);
  675. acpi_irq_handler = NULL;
  676. return AE_OK;
  677. }
  678. /*
  679. * Running in interpreter thread context, safe to sleep
  680. */
  681. void acpi_os_sleep(u64 ms)
  682. {
  683. schedule_timeout_interruptible(msecs_to_jiffies(ms));
  684. }
  685. void acpi_os_stall(u32 us)
  686. {
  687. while (us) {
  688. u32 delay = 1000;
  689. if (delay > us)
  690. delay = us;
  691. udelay(delay);
  692. touch_nmi_watchdog();
  693. us -= delay;
  694. }
  695. }
  696. /*
  697. * Support ACPI 3.0 AML Timer operand
  698. * Returns 64-bit free-running, monotonically increasing timer
  699. * with 100ns granularity
  700. */
  701. u64 acpi_os_get_timer(void)
  702. {
  703. u64 time_ns = ktime_to_ns(ktime_get());
  704. do_div(time_ns, 100);
  705. return time_ns;
  706. }
  707. acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
  708. {
  709. u32 dummy;
  710. if (!value)
  711. value = &dummy;
  712. *value = 0;
  713. if (width <= 8) {
  714. *(u8 *) value = inb(port);
  715. } else if (width <= 16) {
  716. *(u16 *) value = inw(port);
  717. } else if (width <= 32) {
  718. *(u32 *) value = inl(port);
  719. } else {
  720. BUG();
  721. }
  722. return AE_OK;
  723. }
  724. EXPORT_SYMBOL(acpi_os_read_port);
  725. acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
  726. {
  727. if (width <= 8) {
  728. outb(value, port);
  729. } else if (width <= 16) {
  730. outw(value, port);
  731. } else if (width <= 32) {
  732. outl(value, port);
  733. } else {
  734. BUG();
  735. }
  736. return AE_OK;
  737. }
  738. EXPORT_SYMBOL(acpi_os_write_port);
  739. #ifdef readq
  740. static inline u64 read64(const volatile void __iomem *addr)
  741. {
  742. return readq(addr);
  743. }
  744. #else
  745. static inline u64 read64(const volatile void __iomem *addr)
  746. {
  747. u64 l, h;
  748. l = readl(addr);
  749. h = readl(addr+4);
  750. return l | (h << 32);
  751. }
  752. #endif
  753. acpi_status
  754. acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
  755. {
  756. void __iomem *virt_addr;
  757. unsigned int size = width / 8;
  758. bool unmap = false;
  759. u64 dummy;
  760. rcu_read_lock();
  761. virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
  762. if (!virt_addr) {
  763. rcu_read_unlock();
  764. virt_addr = acpi_os_ioremap(phys_addr, size);
  765. if (!virt_addr)
  766. return AE_BAD_ADDRESS;
  767. unmap = true;
  768. }
  769. if (!value)
  770. value = &dummy;
  771. switch (width) {
  772. case 8:
  773. *(u8 *) value = readb(virt_addr);
  774. break;
  775. case 16:
  776. *(u16 *) value = readw(virt_addr);
  777. break;
  778. case 32:
  779. *(u32 *) value = readl(virt_addr);
  780. break;
  781. case 64:
  782. *(u64 *) value = read64(virt_addr);
  783. break;
  784. default:
  785. BUG();
  786. }
  787. if (unmap)
  788. iounmap(virt_addr);
  789. else
  790. rcu_read_unlock();
  791. return AE_OK;
  792. }
  793. #ifdef writeq
  794. static inline void write64(u64 val, volatile void __iomem *addr)
  795. {
  796. writeq(val, addr);
  797. }
  798. #else
  799. static inline void write64(u64 val, volatile void __iomem *addr)
  800. {
  801. writel(val, addr);
  802. writel(val>>32, addr+4);
  803. }
  804. #endif
  805. acpi_status
  806. acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
  807. {
  808. void __iomem *virt_addr;
  809. unsigned int size = width / 8;
  810. bool unmap = false;
  811. rcu_read_lock();
  812. virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
  813. if (!virt_addr) {
  814. rcu_read_unlock();
  815. virt_addr = acpi_os_ioremap(phys_addr, size);
  816. if (!virt_addr)
  817. return AE_BAD_ADDRESS;
  818. unmap = true;
  819. }
  820. switch (width) {
  821. case 8:
  822. writeb(value, virt_addr);
  823. break;
  824. case 16:
  825. writew(value, virt_addr);
  826. break;
  827. case 32:
  828. writel(value, virt_addr);
  829. break;
  830. case 64:
  831. write64(value, virt_addr);
  832. break;
  833. default:
  834. BUG();
  835. }
  836. if (unmap)
  837. iounmap(virt_addr);
  838. else
  839. rcu_read_unlock();
  840. return AE_OK;
  841. }
  842. acpi_status
  843. acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
  844. u64 *value, u32 width)
  845. {
  846. int result, size;
  847. u32 value32;
  848. if (!value)
  849. return AE_BAD_PARAMETER;
  850. switch (width) {
  851. case 8:
  852. size = 1;
  853. break;
  854. case 16:
  855. size = 2;
  856. break;
  857. case 32:
  858. size = 4;
  859. break;
  860. default:
  861. return AE_ERROR;
  862. }
  863. result = raw_pci_read(pci_id->segment, pci_id->bus,
  864. PCI_DEVFN(pci_id->device, pci_id->function),
  865. reg, size, &value32);
  866. *value = value32;
  867. return (result ? AE_ERROR : AE_OK);
  868. }
  869. acpi_status
  870. acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
  871. u64 value, u32 width)
  872. {
  873. int result, size;
  874. switch (width) {
  875. case 8:
  876. size = 1;
  877. break;
  878. case 16:
  879. size = 2;
  880. break;
  881. case 32:
  882. size = 4;
  883. break;
  884. default:
  885. return AE_ERROR;
  886. }
  887. result = raw_pci_write(pci_id->segment, pci_id->bus,
  888. PCI_DEVFN(pci_id->device, pci_id->function),
  889. reg, size, value);
  890. return (result ? AE_ERROR : AE_OK);
  891. }
  892. static void acpi_os_execute_deferred(struct work_struct *work)
  893. {
  894. struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
  895. if (dpc->wait)
  896. acpi_os_wait_events_complete();
  897. dpc->function(dpc->context);
  898. kfree(dpc);
  899. }
  900. /*******************************************************************************
  901. *
  902. * FUNCTION: acpi_os_execute
  903. *
  904. * PARAMETERS: Type - Type of the callback
  905. * Function - Function to be executed
  906. * Context - Function parameters
  907. *
  908. * RETURN: Status
  909. *
  910. * DESCRIPTION: Depending on type, either queues function for deferred execution or
  911. * immediately executes function on a separate thread.
  912. *
  913. ******************************************************************************/
  914. static acpi_status __acpi_os_execute(acpi_execute_type type,
  915. acpi_osd_exec_callback function, void *context, int hp)
  916. {
  917. acpi_status status = AE_OK;
  918. struct acpi_os_dpc *dpc;
  919. struct workqueue_struct *queue;
  920. int ret;
  921. ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
  922. "Scheduling function [%p(%p)] for deferred execution.\n",
  923. function, context));
  924. /*
  925. * Allocate/initialize DPC structure. Note that this memory will be
  926. * freed by the callee. The kernel handles the work_struct list in a
  927. * way that allows us to also free its memory inside the callee.
  928. * Because we may want to schedule several tasks with different
  929. * parameters we can't use the approach some kernel code uses of
  930. * having a static work_struct.
  931. */
  932. dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
  933. if (!dpc)
  934. return AE_NO_MEMORY;
  935. dpc->function = function;
  936. dpc->context = context;
  937. /*
  938. * We can't run hotplug code in keventd_wq/kacpid_wq/kacpid_notify_wq
  939. * because the hotplug code may call driver .remove() functions,
  940. * which invoke flush_scheduled_work/acpi_os_wait_events_complete
  941. * to flush these workqueues.
  942. *
  943. * To prevent lockdep from complaining unnecessarily, make sure that
  944. * there is a different static lockdep key for each workqueue by using
  945. * INIT_WORK() for each of them separately.
  946. */
  947. if (hp) {
  948. queue = kacpi_hotplug_wq;
  949. dpc->wait = 1;
  950. INIT_WORK(&dpc->work, acpi_os_execute_deferred);
  951. } else if (type == OSL_NOTIFY_HANDLER) {
  952. queue = kacpi_notify_wq;
  953. INIT_WORK(&dpc->work, acpi_os_execute_deferred);
  954. } else {
  955. queue = kacpid_wq;
  956. INIT_WORK(&dpc->work, acpi_os_execute_deferred);
  957. }
  958. /*
  959. * On some machines, a software-initiated SMI causes corruption unless
  960. * the SMI runs on CPU 0. An SMI can be initiated by any AML, but
  961. * typically it's done in GPE-related methods that are run via
  962. * workqueues, so we can avoid the known corruption cases by always
  963. * queueing on CPU 0.
  964. */
  965. ret = queue_work_on(0, queue, &dpc->work);
  966. if (!ret) {
  967. printk(KERN_ERR PREFIX
  968. "Call to queue_work() failed.\n");
  969. status = AE_ERROR;
  970. kfree(dpc);
  971. }
  972. return status;
  973. }
  974. acpi_status acpi_os_execute(acpi_execute_type type,
  975. acpi_osd_exec_callback function, void *context)
  976. {
  977. return __acpi_os_execute(type, function, context, 0);
  978. }
  979. EXPORT_SYMBOL(acpi_os_execute);
  980. acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function,
  981. void *context)
  982. {
  983. return __acpi_os_execute(0, function, context, 1);
  984. }
  985. EXPORT_SYMBOL(acpi_os_hotplug_execute);
  986. void acpi_os_wait_events_complete(void)
  987. {
  988. flush_workqueue(kacpid_wq);
  989. flush_workqueue(kacpi_notify_wq);
  990. }
  991. EXPORT_SYMBOL(acpi_os_wait_events_complete);
  992. acpi_status
  993. acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
  994. {
  995. struct semaphore *sem = NULL;
  996. sem = acpi_os_allocate(sizeof(struct semaphore));
  997. if (!sem)
  998. return AE_NO_MEMORY;
  999. memset(sem, 0, sizeof(struct semaphore));
  1000. sema_init(sem, initial_units);
  1001. *handle = (acpi_handle *) sem;
  1002. ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
  1003. *handle, initial_units));
  1004. return AE_OK;
  1005. }
  1006. /*
  1007. * TODO: A better way to delete semaphores? Linux doesn't have a
  1008. * 'delete_semaphore()' function -- may result in an invalid
  1009. * pointer dereference for non-synchronized consumers. Should
  1010. * we at least check for blocked threads and signal/cancel them?
  1011. */
  1012. acpi_status acpi_os_delete_semaphore(acpi_handle handle)
  1013. {
  1014. struct semaphore *sem = (struct semaphore *)handle;
  1015. if (!sem)
  1016. return AE_BAD_PARAMETER;
  1017. ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
  1018. BUG_ON(!list_empty(&sem->wait_list));
  1019. kfree(sem);
  1020. sem = NULL;
  1021. return AE_OK;
  1022. }
  1023. /*
  1024. * TODO: Support for units > 1?
  1025. */
  1026. acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
  1027. {
  1028. acpi_status status = AE_OK;
  1029. struct semaphore *sem = (struct semaphore *)handle;
  1030. long jiffies;
  1031. int ret = 0;
  1032. if (!sem || (units < 1))
  1033. return AE_BAD_PARAMETER;
  1034. if (units > 1)
  1035. return AE_SUPPORT;
  1036. ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
  1037. handle, units, timeout));
  1038. if (timeout == ACPI_WAIT_FOREVER)
  1039. jiffies = MAX_SCHEDULE_TIMEOUT;
  1040. else
  1041. jiffies = msecs_to_jiffies(timeout);
  1042. ret = down_timeout(sem, jiffies);
  1043. if (ret)
  1044. status = AE_TIME;
  1045. if (ACPI_FAILURE(status)) {
  1046. ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
  1047. "Failed to acquire semaphore[%p|%d|%d], %s",
  1048. handle, units, timeout,
  1049. acpi_format_exception(status)));
  1050. } else {
  1051. ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
  1052. "Acquired semaphore[%p|%d|%d]", handle,
  1053. units, timeout));
  1054. }
  1055. return status;
  1056. }
  1057. /*
  1058. * TODO: Support for units > 1?
  1059. */
  1060. acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
  1061. {
  1062. struct semaphore *sem = (struct semaphore *)handle;
  1063. if (!sem || (units < 1))
  1064. return AE_BAD_PARAMETER;
  1065. if (units > 1)
  1066. return AE_SUPPORT;
  1067. ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
  1068. units));
  1069. up(sem);
  1070. return AE_OK;
  1071. }
  1072. #ifdef ACPI_FUTURE_USAGE
  1073. u32 acpi_os_get_line(char *buffer)
  1074. {
  1075. #ifdef ENABLE_DEBUGGER
  1076. if (acpi_in_debugger) {
  1077. u32 chars;
  1078. kdb_read(buffer, sizeof(line_buf));
  1079. /* remove the CR kdb includes */
  1080. chars = strlen(buffer) - 1;
  1081. buffer[chars] = '\0';
  1082. }
  1083. #endif
  1084. return 0;
  1085. }
  1086. #endif /* ACPI_FUTURE_USAGE */
  1087. acpi_status acpi_os_signal(u32 function, void *info)
  1088. {
  1089. switch (function) {
  1090. case ACPI_SIGNAL_FATAL:
  1091. printk(KERN_ERR PREFIX "Fatal opcode executed\n");
  1092. break;
  1093. case ACPI_SIGNAL_BREAKPOINT:
  1094. /*
  1095. * AML Breakpoint
  1096. * ACPI spec. says to treat it as a NOP unless
  1097. * you are debugging. So if/when we integrate
  1098. * AML debugger into the kernel debugger its
  1099. * hook will go here. But until then it is
  1100. * not useful to print anything on breakpoints.
  1101. */
  1102. break;
  1103. default:
  1104. break;
  1105. }
  1106. return AE_OK;
  1107. }
  1108. static int __init acpi_os_name_setup(char *str)
  1109. {
  1110. char *p = acpi_os_name;
  1111. int count = ACPI_MAX_OVERRIDE_LEN - 1;
  1112. if (!str || !*str)
  1113. return 0;
  1114. for (; count-- && str && *str; str++) {
  1115. if (isalnum(*str) || *str == ' ' || *str == ':')
  1116. *p++ = *str;
  1117. else if (*str == '\'' || *str == '"')
  1118. continue;
  1119. else
  1120. break;
  1121. }
  1122. *p = 0;
  1123. return 1;
  1124. }
  1125. __setup("acpi_os_name=", acpi_os_name_setup);
  1126. #define OSI_STRING_LENGTH_MAX 64 /* arbitrary */
  1127. #define OSI_STRING_ENTRIES_MAX 16 /* arbitrary */
  1128. struct osi_setup_entry {
  1129. char string[OSI_STRING_LENGTH_MAX];
  1130. bool enable;
  1131. };
  1132. static struct osi_setup_entry
  1133. osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = {
  1134. {"Module Device", true},
  1135. {"Processor Device", true},
  1136. {"3.0 _SCP Extensions", true},
  1137. {"Processor Aggregator Device", true},
  1138. };
  1139. void __init acpi_osi_setup(char *str)
  1140. {
  1141. struct osi_setup_entry *osi;
  1142. bool enable = true;
  1143. int i;
  1144. if (!acpi_gbl_create_osi_method)
  1145. return;
  1146. if (str == NULL || *str == '\0') {
  1147. printk(KERN_INFO PREFIX "_OSI method disabled\n");
  1148. acpi_gbl_create_osi_method = FALSE;
  1149. return;
  1150. }
  1151. if (*str == '!') {
  1152. str++;
  1153. if (*str == '\0') {
  1154. osi_linux.default_disabling = 1;
  1155. return;
  1156. } else if (*str == '*') {
  1157. acpi_update_interfaces(ACPI_DISABLE_ALL_STRINGS);
  1158. for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
  1159. osi = &osi_setup_entries[i];
  1160. osi->enable = false;
  1161. }
  1162. return;
  1163. }
  1164. enable = false;
  1165. }
  1166. for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
  1167. osi = &osi_setup_entries[i];
  1168. if (!strcmp(osi->string, str)) {
  1169. osi->enable = enable;
  1170. break;
  1171. } else if (osi->string[0] == '\0') {
  1172. osi->enable = enable;
  1173. strncpy(osi->string, str, OSI_STRING_LENGTH_MAX);
  1174. break;
  1175. }
  1176. }
  1177. }
  1178. static void __init set_osi_linux(unsigned int enable)
  1179. {
  1180. if (osi_linux.enable != enable)
  1181. osi_linux.enable = enable;
  1182. if (osi_linux.enable)
  1183. acpi_osi_setup("Linux");
  1184. else
  1185. acpi_osi_setup("!Linux");
  1186. return;
  1187. }
  1188. static void __init acpi_cmdline_osi_linux(unsigned int enable)
  1189. {
  1190. osi_linux.cmdline = 1; /* cmdline set the default and override DMI */
  1191. osi_linux.dmi = 0;
  1192. set_osi_linux(enable);
  1193. return;
  1194. }
  1195. void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
  1196. {
  1197. printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
  1198. if (enable == -1)
  1199. return;
  1200. osi_linux.dmi = 1; /* DMI knows that this box asks OSI(Linux) */
  1201. set_osi_linux(enable);
  1202. return;
  1203. }
  1204. /*
  1205. * Modify the list of "OS Interfaces" reported to BIOS via _OSI
  1206. *
  1207. * empty string disables _OSI
  1208. * string starting with '!' disables that string
  1209. * otherwise string is added to list, augmenting built-in strings
  1210. */
  1211. static void __init acpi_osi_setup_late(void)
  1212. {
  1213. struct osi_setup_entry *osi;
  1214. char *str;
  1215. int i;
  1216. acpi_status status;
  1217. if (osi_linux.default_disabling) {
  1218. status = acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
  1219. if (ACPI_SUCCESS(status))
  1220. printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors\n");
  1221. }
  1222. for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
  1223. osi = &osi_setup_entries[i];
  1224. str = osi->string;
  1225. if (*str == '\0')
  1226. break;
  1227. if (osi->enable) {
  1228. status = acpi_install_interface(str);
  1229. if (ACPI_SUCCESS(status))
  1230. printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
  1231. } else {
  1232. status = acpi_remove_interface(str);
  1233. if (ACPI_SUCCESS(status))
  1234. printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
  1235. }
  1236. }
  1237. }
  1238. static int __init osi_setup(char *str)
  1239. {
  1240. if (str && !strcmp("Linux", str))
  1241. acpi_cmdline_osi_linux(1);
  1242. else if (str && !strcmp("!Linux", str))
  1243. acpi_cmdline_osi_linux(0);
  1244. else
  1245. acpi_osi_setup(str);
  1246. return 1;
  1247. }
  1248. __setup("acpi_osi=", osi_setup);
  1249. /* enable serialization to combat AE_ALREADY_EXISTS errors */
  1250. static int __init acpi_serialize_setup(char *str)
  1251. {
  1252. printk(KERN_INFO PREFIX "serialize enabled\n");
  1253. acpi_gbl_all_methods_serialized = TRUE;
  1254. return 1;
  1255. }
  1256. __setup("acpi_serialize", acpi_serialize_setup);
  1257. /* Check of resource interference between native drivers and ACPI
  1258. * OperationRegions (SystemIO and System Memory only).
  1259. * IO ports and memory declared in ACPI might be used by the ACPI subsystem
  1260. * in arbitrary AML code and can interfere with legacy drivers.
  1261. * acpi_enforce_resources= can be set to:
  1262. *
  1263. * - strict (default) (2)
  1264. * -> further driver trying to access the resources will not load
  1265. * - lax (1)
  1266. * -> further driver trying to access the resources will load, but you
  1267. * get a system message that something might go wrong...
  1268. *
  1269. * - no (0)
  1270. * -> ACPI Operation Region resources will not be registered
  1271. *
  1272. */
  1273. #define ENFORCE_RESOURCES_STRICT 2
  1274. #define ENFORCE_RESOURCES_LAX 1
  1275. #define ENFORCE_RESOURCES_NO 0
  1276. static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
  1277. static int __init acpi_enforce_resources_setup(char *str)
  1278. {
  1279. if (str == NULL || *str == '\0')
  1280. return 0;
  1281. if (!strcmp("strict", str))
  1282. acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
  1283. else if (!strcmp("lax", str))
  1284. acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
  1285. else if (!strcmp("no", str))
  1286. acpi_enforce_resources = ENFORCE_RESOURCES_NO;
  1287. return 1;
  1288. }
  1289. __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
  1290. /* Check for resource conflicts between ACPI OperationRegions and native
  1291. * drivers */
  1292. int acpi_check_resource_conflict(const struct resource *res)
  1293. {
  1294. acpi_adr_space_type space_id;
  1295. acpi_size length;
  1296. u8 warn = 0;
  1297. int clash = 0;
  1298. if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
  1299. return 0;
  1300. if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
  1301. return 0;
  1302. if (res->flags & IORESOURCE_IO)
  1303. space_id = ACPI_ADR_SPACE_SYSTEM_IO;
  1304. else
  1305. space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
  1306. length = resource_size(res);
  1307. if (acpi_enforce_resources != ENFORCE_RESOURCES_NO)
  1308. warn = 1;
  1309. clash = acpi_check_address_range(space_id, res->start, length, warn);
  1310. if (clash) {
  1311. if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
  1312. if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
  1313. printk(KERN_NOTICE "ACPI: This conflict may"
  1314. " cause random problems and system"
  1315. " instability\n");
  1316. printk(KERN_INFO "ACPI: If an ACPI driver is available"
  1317. " for this device, you should use it instead of"
  1318. " the native driver\n");
  1319. }
  1320. if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
  1321. return -EBUSY;
  1322. }
  1323. return 0;
  1324. }
  1325. EXPORT_SYMBOL(acpi_check_resource_conflict);
  1326. int acpi_check_region(resource_size_t start, resource_size_t n,
  1327. const char *name)
  1328. {
  1329. struct resource res = {
  1330. .start = start,
  1331. .end = start + n - 1,
  1332. .name = name,
  1333. .flags = IORESOURCE_IO,
  1334. };
  1335. return acpi_check_resource_conflict(&res);
  1336. }
  1337. EXPORT_SYMBOL(acpi_check_region);
  1338. /*
  1339. * Let drivers know whether the resource checks are effective
  1340. */
  1341. int acpi_resources_are_enforced(void)
  1342. {
  1343. return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
  1344. }
  1345. EXPORT_SYMBOL(acpi_resources_are_enforced);
  1346. /*
  1347. * Deallocate the memory for a spinlock.
  1348. */
  1349. void acpi_os_delete_lock(acpi_spinlock handle)
  1350. {
  1351. ACPI_FREE(handle);
  1352. }
  1353. /*
  1354. * Acquire a spinlock.
  1355. *
  1356. * handle is a pointer to the spinlock_t.
  1357. */
  1358. acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
  1359. {
  1360. acpi_cpu_flags flags;
  1361. spin_lock_irqsave(lockp, flags);
  1362. return flags;
  1363. }
  1364. /*
  1365. * Release a spinlock. See above.
  1366. */
  1367. void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
  1368. {
  1369. spin_unlock_irqrestore(lockp, flags);
  1370. }
  1371. #ifndef ACPI_USE_LOCAL_CACHE
  1372. /*******************************************************************************
  1373. *
  1374. * FUNCTION: acpi_os_create_cache
  1375. *
  1376. * PARAMETERS: name - Ascii name for the cache
  1377. * size - Size of each cached object
  1378. * depth - Maximum depth of the cache (in objects) <ignored>
  1379. * cache - Where the new cache object is returned
  1380. *
  1381. * RETURN: status
  1382. *
  1383. * DESCRIPTION: Create a cache object
  1384. *
  1385. ******************************************************************************/
  1386. acpi_status
  1387. acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
  1388. {
  1389. *cache = kmem_cache_create(name, size, 0, 0, NULL);
  1390. if (*cache == NULL)
  1391. return AE_ERROR;
  1392. else
  1393. return AE_OK;
  1394. }
  1395. /*******************************************************************************
  1396. *
  1397. * FUNCTION: acpi_os_purge_cache
  1398. *
  1399. * PARAMETERS: Cache - Handle to cache object
  1400. *
  1401. * RETURN: Status
  1402. *
  1403. * DESCRIPTION: Free all objects within the requested cache.
  1404. *
  1405. ******************************************************************************/
  1406. acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
  1407. {
  1408. kmem_cache_shrink(cache);
  1409. return (AE_OK);
  1410. }
  1411. /*******************************************************************************
  1412. *
  1413. * FUNCTION: acpi_os_delete_cache
  1414. *
  1415. * PARAMETERS: Cache - Handle to cache object
  1416. *
  1417. * RETURN: Status
  1418. *
  1419. * DESCRIPTION: Free all objects within the requested cache and delete the
  1420. * cache object.
  1421. *
  1422. ******************************************************************************/
  1423. acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
  1424. {
  1425. kmem_cache_destroy(cache);
  1426. return (AE_OK);
  1427. }
  1428. /*******************************************************************************
  1429. *
  1430. * FUNCTION: acpi_os_release_object
  1431. *
  1432. * PARAMETERS: Cache - Handle to cache object
  1433. * Object - The object to be released
  1434. *
  1435. * RETURN: None
  1436. *
  1437. * DESCRIPTION: Release an object to the specified cache. If cache is full,
  1438. * the object is deleted.
  1439. *
  1440. ******************************************************************************/
  1441. acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
  1442. {
  1443. kmem_cache_free(cache, object);
  1444. return (AE_OK);
  1445. }
  1446. #endif
  1447. static int __init acpi_no_auto_ssdt_setup(char *s)
  1448. {
  1449. printk(KERN_NOTICE PREFIX "SSDT auto-load disabled\n");
  1450. acpi_gbl_disable_ssdt_table_load = TRUE;
  1451. return 1;
  1452. }
  1453. __setup("acpi_no_auto_ssdt", acpi_no_auto_ssdt_setup);
  1454. acpi_status __init acpi_os_initialize(void)
  1455. {
  1456. acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
  1457. acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
  1458. acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
  1459. acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
  1460. return AE_OK;
  1461. }
  1462. acpi_status __init acpi_os_initialize1(void)
  1463. {
  1464. kacpid_wq = alloc_workqueue("kacpid", 0, 1);
  1465. kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
  1466. kacpi_hotplug_wq = alloc_workqueue("kacpi_hotplug", 0, 1);
  1467. BUG_ON(!kacpid_wq);
  1468. BUG_ON(!kacpi_notify_wq);
  1469. BUG_ON(!kacpi_hotplug_wq);
  1470. acpi_install_interface_handler(acpi_osi_handler);
  1471. acpi_osi_setup_late();
  1472. return AE_OK;
  1473. }
  1474. acpi_status acpi_os_terminate(void)
  1475. {
  1476. if (acpi_irq_handler) {
  1477. acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt,
  1478. acpi_irq_handler);
  1479. }
  1480. acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block);
  1481. acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
  1482. acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
  1483. acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
  1484. destroy_workqueue(kacpid_wq);
  1485. destroy_workqueue(kacpi_notify_wq);
  1486. destroy_workqueue(kacpi_hotplug_wq);
  1487. return AE_OK;
  1488. }
  1489. acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control,
  1490. u32 pm1b_control)
  1491. {
  1492. int rc = 0;
  1493. if (__acpi_os_prepare_sleep)
  1494. rc = __acpi_os_prepare_sleep(sleep_state,
  1495. pm1a_control, pm1b_control);
  1496. if (rc < 0)
  1497. return AE_ERROR;
  1498. else if (rc > 0)
  1499. return AE_CTRL_SKIP;
  1500. return AE_OK;
  1501. }
  1502. void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
  1503. u32 pm1a_ctrl, u32 pm1b_ctrl))
  1504. {
  1505. __acpi_os_prepare_sleep = func;
  1506. }
  1507. acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
  1508. u32 val_b)
  1509. {
  1510. int rc = 0;
  1511. if (__acpi_os_prepare_extended_sleep)
  1512. rc = __acpi_os_prepare_extended_sleep(sleep_state,
  1513. val_a, val_b);
  1514. if (rc < 0)
  1515. return AE_ERROR;
  1516. else if (rc > 0)
  1517. return AE_CTRL_SKIP;
  1518. return AE_OK;
  1519. }
  1520. void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
  1521. u32 val_a, u32 val_b))
  1522. {
  1523. __acpi_os_prepare_extended_sleep = func;
  1524. }
  1525. void alloc_acpi_hp_work(acpi_handle handle, u32 type, void *context,
  1526. void (*func)(struct work_struct *work))
  1527. {
  1528. struct acpi_hp_work *hp_work;
  1529. int ret;
  1530. hp_work = kmalloc(sizeof(*hp_work), GFP_KERNEL);
  1531. if (!hp_work)
  1532. return;
  1533. hp_work->handle = handle;
  1534. hp_work->type = type;
  1535. hp_work->context = context;
  1536. INIT_WORK(&hp_work->work, func);
  1537. ret = queue_work(kacpi_hotplug_wq, &hp_work->work);
  1538. if (!ret)
  1539. kfree(hp_work);
  1540. }
  1541. EXPORT_SYMBOL_GPL(alloc_acpi_hp_work);