osl.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505
  1. /*
  2. * acpi_osl.c - OS-dependent functions ($Revision: 83 $)
  3. *
  4. * Copyright (C) 2000 Andrew Henroid
  5. * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
  6. * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  7. * Copyright (c) 2008 Intel Corporation
  8. * Author: Matthew Wilcox <willy@linux.intel.com>
  9. *
  10. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2 of the License, or
  15. * (at your option) any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License
  23. * along with this program; if not, write to the Free Software
  24. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  25. *
  26. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  27. *
  28. */
  29. #include <linux/module.h>
  30. #include <linux/kernel.h>
  31. #include <linux/slab.h>
  32. #include <linux/mm.h>
  33. #include <linux/pci.h>
  34. #include <linux/interrupt.h>
  35. #include <linux/kmod.h>
  36. #include <linux/delay.h>
  37. #include <linux/workqueue.h>
  38. #include <linux/nmi.h>
  39. #include <linux/acpi.h>
  40. #include <linux/efi.h>
  41. #include <linux/ioport.h>
  42. #include <linux/list.h>
  43. #include <linux/jiffies.h>
  44. #include <linux/semaphore.h>
  45. #include <asm/io.h>
  46. #include <asm/uaccess.h>
  47. #include <acpi/acpi.h>
  48. #include <acpi/acpi_bus.h>
  49. #include <acpi/processor.h>
  50. #define _COMPONENT ACPI_OS_SERVICES
  51. ACPI_MODULE_NAME("osl");
  52. #define PREFIX "ACPI: "
  53. struct acpi_os_dpc {
  54. acpi_osd_exec_callback function;
  55. void *context;
  56. struct work_struct work;
  57. int wait;
  58. };
  59. #ifdef CONFIG_ACPI_CUSTOM_DSDT
  60. #include CONFIG_ACPI_CUSTOM_DSDT_FILE
  61. #endif
  62. #ifdef ENABLE_DEBUGGER
  63. #include <linux/kdb.h>
  64. /* stuff for debugger support */
  65. int acpi_in_debugger;
  66. EXPORT_SYMBOL(acpi_in_debugger);
  67. extern char line_buf[80];
  68. #endif /*ENABLE_DEBUGGER */
  69. static unsigned int acpi_irq_irq;
  70. static acpi_osd_handler acpi_irq_handler;
  71. static void *acpi_irq_context;
  72. static struct workqueue_struct *kacpid_wq;
  73. static struct workqueue_struct *kacpi_notify_wq;
  74. static struct workqueue_struct *kacpi_hotplug_wq;
  75. struct acpi_res_list {
  76. resource_size_t start;
  77. resource_size_t end;
  78. acpi_adr_space_type resource_type; /* IO port, System memory, ...*/
  79. char name[5]; /* only can have a length of 4 chars, make use of this
  80. one instead of res->name, no need to kalloc then */
  81. struct list_head resource_list;
  82. int count;
  83. };
  84. static LIST_HEAD(resource_list_head);
  85. static DEFINE_SPINLOCK(acpi_res_lock);
  86. #define OSI_STRING_LENGTH_MAX 64 /* arbitrary */
  87. static char osi_additional_string[OSI_STRING_LENGTH_MAX];
  88. /*
  89. * The story of _OSI(Linux)
  90. *
  91. * From pre-history through Linux-2.6.22,
  92. * Linux responded TRUE upon a BIOS OSI(Linux) query.
  93. *
  94. * Unfortunately, reference BIOS writers got wind of this
  95. * and put OSI(Linux) in their example code, quickly exposing
  96. * this string as ill-conceived and opening the door to
  97. * an un-bounded number of BIOS incompatibilities.
  98. *
  99. * For example, OSI(Linux) was used on resume to re-POST a
  100. * video card on one system, because Linux at that time
  101. * could not do a speedy restore in its native driver.
  102. * But then upon gaining quick native restore capability,
  103. * Linux has no way to tell the BIOS to skip the time-consuming
  104. * POST -- putting Linux at a permanent performance disadvantage.
  105. * On another system, the BIOS writer used OSI(Linux)
  106. * to infer native OS support for IPMI! On other systems,
  107. * OSI(Linux) simply got in the way of Linux claiming to
  108. * be compatible with other operating systems, exposing
  109. * BIOS issues such as skipped device initialization.
  110. *
  111. * So "Linux" turned out to be a really poor chose of
  112. * OSI string, and from Linux-2.6.23 onward we respond FALSE.
  113. *
  114. * BIOS writers should NOT query _OSI(Linux) on future systems.
  115. * Linux will complain on the console when it sees it, and return FALSE.
  116. * To get Linux to return TRUE for your system will require
  117. * a kernel source update to add a DMI entry,
  118. * or boot with "acpi_osi=Linux"
  119. */
  120. static struct osi_linux {
  121. unsigned int enable:1;
  122. unsigned int dmi:1;
  123. unsigned int cmdline:1;
  124. unsigned int known:1;
  125. } osi_linux = { 0, 0, 0, 0};
  126. static void __init acpi_request_region (struct acpi_generic_address *addr,
  127. unsigned int length, char *desc)
  128. {
  129. struct resource *res;
  130. if (!addr->address || !length)
  131. return;
  132. if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
  133. res = request_region(addr->address, length, desc);
  134. else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
  135. res = request_mem_region(addr->address, length, desc);
  136. }
  137. static int __init acpi_reserve_resources(void)
  138. {
  139. acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
  140. "ACPI PM1a_EVT_BLK");
  141. acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
  142. "ACPI PM1b_EVT_BLK");
  143. acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
  144. "ACPI PM1a_CNT_BLK");
  145. acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
  146. "ACPI PM1b_CNT_BLK");
  147. if (acpi_gbl_FADT.pm_timer_length == 4)
  148. acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
  149. acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
  150. "ACPI PM2_CNT_BLK");
  151. /* Length of GPE blocks must be a non-negative multiple of 2 */
  152. if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
  153. acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
  154. acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
  155. if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
  156. acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
  157. acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
  158. return 0;
  159. }
  160. device_initcall(acpi_reserve_resources);
  161. acpi_status __init acpi_os_initialize(void)
  162. {
  163. return AE_OK;
  164. }
  165. static void bind_to_cpu0(struct work_struct *work)
  166. {
  167. set_cpus_allowed_ptr(current, cpumask_of(0));
  168. kfree(work);
  169. }
  170. static void bind_workqueue(struct workqueue_struct *wq)
  171. {
  172. struct work_struct *work;
  173. work = kzalloc(sizeof(struct work_struct), GFP_KERNEL);
  174. INIT_WORK(work, bind_to_cpu0);
  175. queue_work(wq, work);
  176. }
  177. acpi_status acpi_os_initialize1(void)
  178. {
  179. /*
  180. * On some machines, a software-initiated SMI causes corruption unless
  181. * the SMI runs on CPU 0. An SMI can be initiated by any AML, but
  182. * typically it's done in GPE-related methods that are run via
  183. * workqueues, so we can avoid the known corruption cases by binding
  184. * the workqueues to CPU 0.
  185. */
  186. kacpid_wq = create_singlethread_workqueue("kacpid");
  187. bind_workqueue(kacpid_wq);
  188. kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify");
  189. bind_workqueue(kacpi_notify_wq);
  190. kacpi_hotplug_wq = create_singlethread_workqueue("kacpi_hotplug");
  191. bind_workqueue(kacpi_hotplug_wq);
  192. BUG_ON(!kacpid_wq);
  193. BUG_ON(!kacpi_notify_wq);
  194. BUG_ON(!kacpi_hotplug_wq);
  195. return AE_OK;
  196. }
  197. acpi_status acpi_os_terminate(void)
  198. {
  199. if (acpi_irq_handler) {
  200. acpi_os_remove_interrupt_handler(acpi_irq_irq,
  201. acpi_irq_handler);
  202. }
  203. destroy_workqueue(kacpid_wq);
  204. destroy_workqueue(kacpi_notify_wq);
  205. destroy_workqueue(kacpi_hotplug_wq);
  206. return AE_OK;
  207. }
  208. void acpi_os_printf(const char *fmt, ...)
  209. {
  210. va_list args;
  211. va_start(args, fmt);
  212. acpi_os_vprintf(fmt, args);
  213. va_end(args);
  214. }
  215. void acpi_os_vprintf(const char *fmt, va_list args)
  216. {
  217. static char buffer[512];
  218. vsprintf(buffer, fmt, args);
  219. #ifdef ENABLE_DEBUGGER
  220. if (acpi_in_debugger) {
  221. kdb_printf("%s", buffer);
  222. } else {
  223. printk(KERN_CONT "%s", buffer);
  224. }
  225. #else
  226. printk(KERN_CONT "%s", buffer);
  227. #endif
  228. }
  229. acpi_physical_address __init acpi_os_get_root_pointer(void)
  230. {
  231. if (efi_enabled) {
  232. if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
  233. return efi.acpi20;
  234. else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
  235. return efi.acpi;
  236. else {
  237. printk(KERN_ERR PREFIX
  238. "System description tables not found\n");
  239. return 0;
  240. }
  241. } else {
  242. acpi_physical_address pa = 0;
  243. acpi_find_root_pointer(&pa);
  244. return pa;
  245. }
  246. }
  247. void __iomem *__init_refok
  248. acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
  249. {
  250. if (phys > ULONG_MAX) {
  251. printk(KERN_ERR PREFIX "Cannot map memory that high\n");
  252. return NULL;
  253. }
  254. if (acpi_gbl_permanent_mmap)
  255. /*
  256. * ioremap checks to ensure this is in reserved space
  257. */
  258. return ioremap((unsigned long)phys, size);
  259. else
  260. return __acpi_map_table((unsigned long)phys, size);
  261. }
  262. EXPORT_SYMBOL_GPL(acpi_os_map_memory);
  263. void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
  264. {
  265. if (acpi_gbl_permanent_mmap)
  266. iounmap(virt);
  267. else
  268. __acpi_unmap_table(virt, size);
  269. }
  270. EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
  271. void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
  272. {
  273. if (!acpi_gbl_permanent_mmap)
  274. __acpi_unmap_table(virt, size);
  275. }
  276. #ifdef ACPI_FUTURE_USAGE
  277. acpi_status
  278. acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
  279. {
  280. if (!phys || !virt)
  281. return AE_BAD_PARAMETER;
  282. *phys = virt_to_phys(virt);
  283. return AE_OK;
  284. }
  285. #endif
  286. #define ACPI_MAX_OVERRIDE_LEN 100
  287. static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
  288. acpi_status
  289. acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
  290. acpi_string * new_val)
  291. {
  292. if (!init_val || !new_val)
  293. return AE_BAD_PARAMETER;
  294. *new_val = NULL;
  295. if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
  296. printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
  297. acpi_os_name);
  298. *new_val = acpi_os_name;
  299. }
  300. return AE_OK;
  301. }
  302. acpi_status
  303. acpi_os_table_override(struct acpi_table_header * existing_table,
  304. struct acpi_table_header ** new_table)
  305. {
  306. if (!existing_table || !new_table)
  307. return AE_BAD_PARAMETER;
  308. *new_table = NULL;
  309. #ifdef CONFIG_ACPI_CUSTOM_DSDT
  310. if (strncmp(existing_table->signature, "DSDT", 4) == 0)
  311. *new_table = (struct acpi_table_header *)AmlCode;
  312. #endif
  313. if (*new_table != NULL) {
  314. printk(KERN_WARNING PREFIX "Override [%4.4s-%8.8s], "
  315. "this is unsafe: tainting kernel\n",
  316. existing_table->signature,
  317. existing_table->oem_table_id);
  318. add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
  319. }
  320. return AE_OK;
  321. }
  322. static irqreturn_t acpi_irq(int irq, void *dev_id)
  323. {
  324. u32 handled;
  325. handled = (*acpi_irq_handler) (acpi_irq_context);
  326. if (handled) {
  327. acpi_irq_handled++;
  328. return IRQ_HANDLED;
  329. } else {
  330. acpi_irq_not_handled++;
  331. return IRQ_NONE;
  332. }
  333. }
  334. acpi_status
  335. acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
  336. void *context)
  337. {
  338. unsigned int irq;
  339. acpi_irq_stats_init();
  340. /*
  341. * Ignore the GSI from the core, and use the value in our copy of the
  342. * FADT. It may not be the same if an interrupt source override exists
  343. * for the SCI.
  344. */
  345. gsi = acpi_gbl_FADT.sci_interrupt;
  346. if (acpi_gsi_to_irq(gsi, &irq) < 0) {
  347. printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
  348. gsi);
  349. return AE_OK;
  350. }
  351. acpi_irq_handler = handler;
  352. acpi_irq_context = context;
  353. if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
  354. printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
  355. return AE_NOT_ACQUIRED;
  356. }
  357. acpi_irq_irq = irq;
  358. return AE_OK;
  359. }
  360. acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
  361. {
  362. if (irq) {
  363. free_irq(irq, acpi_irq);
  364. acpi_irq_handler = NULL;
  365. acpi_irq_irq = 0;
  366. }
  367. return AE_OK;
  368. }
  369. /*
  370. * Running in interpreter thread context, safe to sleep
  371. */
  372. void acpi_os_sleep(u64 ms)
  373. {
  374. schedule_timeout_interruptible(msecs_to_jiffies(ms));
  375. }
  376. void acpi_os_stall(u32 us)
  377. {
  378. while (us) {
  379. u32 delay = 1000;
  380. if (delay > us)
  381. delay = us;
  382. udelay(delay);
  383. touch_nmi_watchdog();
  384. us -= delay;
  385. }
  386. }
  387. /*
  388. * Support ACPI 3.0 AML Timer operand
  389. * Returns 64-bit free-running, monotonically increasing timer
  390. * with 100ns granularity
  391. */
  392. u64 acpi_os_get_timer(void)
  393. {
  394. static u64 t;
  395. #ifdef CONFIG_HPET
  396. /* TBD: use HPET if available */
  397. #endif
  398. #ifdef CONFIG_X86_PM_TIMER
  399. /* TBD: default to PM timer if HPET was not available */
  400. #endif
  401. if (!t)
  402. printk(KERN_ERR PREFIX "acpi_os_get_timer() TBD\n");
  403. return ++t;
  404. }
  405. acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
  406. {
  407. u32 dummy;
  408. if (!value)
  409. value = &dummy;
  410. *value = 0;
  411. if (width <= 8) {
  412. *(u8 *) value = inb(port);
  413. } else if (width <= 16) {
  414. *(u16 *) value = inw(port);
  415. } else if (width <= 32) {
  416. *(u32 *) value = inl(port);
  417. } else {
  418. BUG();
  419. }
  420. return AE_OK;
  421. }
  422. EXPORT_SYMBOL(acpi_os_read_port);
  423. acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
  424. {
  425. if (width <= 8) {
  426. outb(value, port);
  427. } else if (width <= 16) {
  428. outw(value, port);
  429. } else if (width <= 32) {
  430. outl(value, port);
  431. } else {
  432. BUG();
  433. }
  434. return AE_OK;
  435. }
  436. EXPORT_SYMBOL(acpi_os_write_port);
  437. acpi_status
  438. acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
  439. {
  440. u32 dummy;
  441. void __iomem *virt_addr;
  442. virt_addr = ioremap(phys_addr, width);
  443. if (!value)
  444. value = &dummy;
  445. switch (width) {
  446. case 8:
  447. *(u8 *) value = readb(virt_addr);
  448. break;
  449. case 16:
  450. *(u16 *) value = readw(virt_addr);
  451. break;
  452. case 32:
  453. *(u32 *) value = readl(virt_addr);
  454. break;
  455. default:
  456. BUG();
  457. }
  458. iounmap(virt_addr);
  459. return AE_OK;
  460. }
  461. acpi_status
  462. acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
  463. {
  464. void __iomem *virt_addr;
  465. virt_addr = ioremap(phys_addr, width);
  466. switch (width) {
  467. case 8:
  468. writeb(value, virt_addr);
  469. break;
  470. case 16:
  471. writew(value, virt_addr);
  472. break;
  473. case 32:
  474. writel(value, virt_addr);
  475. break;
  476. default:
  477. BUG();
  478. }
  479. iounmap(virt_addr);
  480. return AE_OK;
  481. }
  482. acpi_status
  483. acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
  484. u32 *value, u32 width)
  485. {
  486. int result, size;
  487. if (!value)
  488. return AE_BAD_PARAMETER;
  489. switch (width) {
  490. case 8:
  491. size = 1;
  492. break;
  493. case 16:
  494. size = 2;
  495. break;
  496. case 32:
  497. size = 4;
  498. break;
  499. default:
  500. return AE_ERROR;
  501. }
  502. result = raw_pci_read(pci_id->segment, pci_id->bus,
  503. PCI_DEVFN(pci_id->device, pci_id->function),
  504. reg, size, value);
  505. return (result ? AE_ERROR : AE_OK);
  506. }
  507. acpi_status
  508. acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
  509. u64 value, u32 width)
  510. {
  511. int result, size;
  512. switch (width) {
  513. case 8:
  514. size = 1;
  515. break;
  516. case 16:
  517. size = 2;
  518. break;
  519. case 32:
  520. size = 4;
  521. break;
  522. default:
  523. return AE_ERROR;
  524. }
  525. result = raw_pci_write(pci_id->segment, pci_id->bus,
  526. PCI_DEVFN(pci_id->device, pci_id->function),
  527. reg, size, value);
  528. return (result ? AE_ERROR : AE_OK);
  529. }
  530. /* TODO: Change code to take advantage of driver model more */
  531. static void acpi_os_derive_pci_id_2(acpi_handle rhandle, /* upper bound */
  532. acpi_handle chandle, /* current node */
  533. struct acpi_pci_id **id,
  534. int *is_bridge, u8 * bus_number)
  535. {
  536. acpi_handle handle;
  537. struct acpi_pci_id *pci_id = *id;
  538. acpi_status status;
  539. unsigned long long temp;
  540. acpi_object_type type;
  541. acpi_get_parent(chandle, &handle);
  542. if (handle != rhandle) {
  543. acpi_os_derive_pci_id_2(rhandle, handle, &pci_id, is_bridge,
  544. bus_number);
  545. status = acpi_get_type(handle, &type);
  546. if ((ACPI_FAILURE(status)) || (type != ACPI_TYPE_DEVICE))
  547. return;
  548. status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
  549. &temp);
  550. if (ACPI_SUCCESS(status)) {
  551. u32 val;
  552. pci_id->device = ACPI_HIWORD(ACPI_LODWORD(temp));
  553. pci_id->function = ACPI_LOWORD(ACPI_LODWORD(temp));
  554. if (*is_bridge)
  555. pci_id->bus = *bus_number;
  556. /* any nicer way to get bus number of bridge ? */
  557. status =
  558. acpi_os_read_pci_configuration(pci_id, 0x0e, &val,
  559. 8);
  560. if (ACPI_SUCCESS(status)
  561. && ((val & 0x7f) == 1 || (val & 0x7f) == 2)) {
  562. status =
  563. acpi_os_read_pci_configuration(pci_id, 0x18,
  564. &val, 8);
  565. if (!ACPI_SUCCESS(status)) {
  566. /* Certainly broken... FIX ME */
  567. return;
  568. }
  569. *is_bridge = 1;
  570. pci_id->bus = val;
  571. status =
  572. acpi_os_read_pci_configuration(pci_id, 0x19,
  573. &val, 8);
  574. if (ACPI_SUCCESS(status)) {
  575. *bus_number = val;
  576. }
  577. } else
  578. *is_bridge = 0;
  579. }
  580. }
  581. }
  582. void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound */
  583. acpi_handle chandle, /* current node */
  584. struct acpi_pci_id **id)
  585. {
  586. int is_bridge = 1;
  587. u8 bus_number = (*id)->bus;
  588. acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number);
  589. }
  590. static void acpi_os_execute_deferred(struct work_struct *work)
  591. {
  592. struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
  593. if (dpc->wait)
  594. acpi_os_wait_events_complete(NULL);
  595. dpc->function(dpc->context);
  596. kfree(dpc);
  597. }
  598. /*******************************************************************************
  599. *
  600. * FUNCTION: acpi_os_execute
  601. *
  602. * PARAMETERS: Type - Type of the callback
  603. * Function - Function to be executed
  604. * Context - Function parameters
  605. *
  606. * RETURN: Status
  607. *
  608. * DESCRIPTION: Depending on type, either queues function for deferred execution or
  609. * immediately executes function on a separate thread.
  610. *
  611. ******************************************************************************/
  612. static acpi_status __acpi_os_execute(acpi_execute_type type,
  613. acpi_osd_exec_callback function, void *context, int hp)
  614. {
  615. acpi_status status = AE_OK;
  616. struct acpi_os_dpc *dpc;
  617. struct workqueue_struct *queue;
  618. int ret;
  619. ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
  620. "Scheduling function [%p(%p)] for deferred execution.\n",
  621. function, context));
  622. /*
  623. * Allocate/initialize DPC structure. Note that this memory will be
  624. * freed by the callee. The kernel handles the work_struct list in a
  625. * way that allows us to also free its memory inside the callee.
  626. * Because we may want to schedule several tasks with different
  627. * parameters we can't use the approach some kernel code uses of
  628. * having a static work_struct.
  629. */
  630. dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
  631. if (!dpc)
  632. return AE_NO_MEMORY;
  633. dpc->function = function;
  634. dpc->context = context;
  635. /*
  636. * We can't run hotplug code in keventd_wq/kacpid_wq/kacpid_notify_wq
  637. * because the hotplug code may call driver .remove() functions,
  638. * which invoke flush_scheduled_work/acpi_os_wait_events_complete
  639. * to flush these workqueues.
  640. */
  641. queue = hp ? kacpi_hotplug_wq :
  642. (type == OSL_NOTIFY_HANDLER ? kacpi_notify_wq : kacpid_wq);
  643. dpc->wait = hp ? 1 : 0;
  644. if (queue == kacpi_hotplug_wq)
  645. INIT_WORK(&dpc->work, acpi_os_execute_deferred);
  646. else if (queue == kacpi_notify_wq)
  647. INIT_WORK(&dpc->work, acpi_os_execute_deferred);
  648. else
  649. INIT_WORK(&dpc->work, acpi_os_execute_deferred);
  650. ret = queue_work(queue, &dpc->work);
  651. if (!ret) {
  652. printk(KERN_ERR PREFIX
  653. "Call to queue_work() failed.\n");
  654. status = AE_ERROR;
  655. kfree(dpc);
  656. }
  657. return status;
  658. }
  659. acpi_status acpi_os_execute(acpi_execute_type type,
  660. acpi_osd_exec_callback function, void *context)
  661. {
  662. return __acpi_os_execute(type, function, context, 0);
  663. }
  664. EXPORT_SYMBOL(acpi_os_execute);
  665. acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function,
  666. void *context)
  667. {
  668. return __acpi_os_execute(0, function, context, 1);
  669. }
  670. void acpi_os_wait_events_complete(void *context)
  671. {
  672. flush_workqueue(kacpid_wq);
  673. flush_workqueue(kacpi_notify_wq);
  674. }
  675. EXPORT_SYMBOL(acpi_os_wait_events_complete);
  676. /*
  677. * Allocate the memory for a spinlock and initialize it.
  678. */
  679. acpi_status acpi_os_create_lock(acpi_spinlock * handle)
  680. {
  681. spin_lock_init(*handle);
  682. return AE_OK;
  683. }
  684. /*
  685. * Deallocate the memory for a spinlock.
  686. */
  687. void acpi_os_delete_lock(acpi_spinlock handle)
  688. {
  689. return;
  690. }
  691. acpi_status
  692. acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
  693. {
  694. struct semaphore *sem = NULL;
  695. sem = acpi_os_allocate(sizeof(struct semaphore));
  696. if (!sem)
  697. return AE_NO_MEMORY;
  698. memset(sem, 0, sizeof(struct semaphore));
  699. sema_init(sem, initial_units);
  700. *handle = (acpi_handle *) sem;
  701. ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
  702. *handle, initial_units));
  703. return AE_OK;
  704. }
  705. /*
  706. * TODO: A better way to delete semaphores? Linux doesn't have a
  707. * 'delete_semaphore()' function -- may result in an invalid
  708. * pointer dereference for non-synchronized consumers. Should
  709. * we at least check for blocked threads and signal/cancel them?
  710. */
  711. acpi_status acpi_os_delete_semaphore(acpi_handle handle)
  712. {
  713. struct semaphore *sem = (struct semaphore *)handle;
  714. if (!sem)
  715. return AE_BAD_PARAMETER;
  716. ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
  717. BUG_ON(!list_empty(&sem->wait_list));
  718. kfree(sem);
  719. sem = NULL;
  720. return AE_OK;
  721. }
  722. /*
  723. * TODO: Support for units > 1?
  724. */
  725. acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
  726. {
  727. acpi_status status = AE_OK;
  728. struct semaphore *sem = (struct semaphore *)handle;
  729. long jiffies;
  730. int ret = 0;
  731. if (!sem || (units < 1))
  732. return AE_BAD_PARAMETER;
  733. if (units > 1)
  734. return AE_SUPPORT;
  735. ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
  736. handle, units, timeout));
  737. if (timeout == ACPI_WAIT_FOREVER)
  738. jiffies = MAX_SCHEDULE_TIMEOUT;
  739. else
  740. jiffies = msecs_to_jiffies(timeout);
  741. ret = down_timeout(sem, jiffies);
  742. if (ret)
  743. status = AE_TIME;
  744. if (ACPI_FAILURE(status)) {
  745. ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
  746. "Failed to acquire semaphore[%p|%d|%d], %s",
  747. handle, units, timeout,
  748. acpi_format_exception(status)));
  749. } else {
  750. ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
  751. "Acquired semaphore[%p|%d|%d]", handle,
  752. units, timeout));
  753. }
  754. return status;
  755. }
  756. /*
  757. * TODO: Support for units > 1?
  758. */
  759. acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
  760. {
  761. struct semaphore *sem = (struct semaphore *)handle;
  762. if (!sem || (units < 1))
  763. return AE_BAD_PARAMETER;
  764. if (units > 1)
  765. return AE_SUPPORT;
  766. ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
  767. units));
  768. up(sem);
  769. return AE_OK;
  770. }
  771. #ifdef ACPI_FUTURE_USAGE
  772. u32 acpi_os_get_line(char *buffer)
  773. {
  774. #ifdef ENABLE_DEBUGGER
  775. if (acpi_in_debugger) {
  776. u32 chars;
  777. kdb_read(buffer, sizeof(line_buf));
  778. /* remove the CR kdb includes */
  779. chars = strlen(buffer) - 1;
  780. buffer[chars] = '\0';
  781. }
  782. #endif
  783. return 0;
  784. }
  785. #endif /* ACPI_FUTURE_USAGE */
  786. acpi_status acpi_os_signal(u32 function, void *info)
  787. {
  788. switch (function) {
  789. case ACPI_SIGNAL_FATAL:
  790. printk(KERN_ERR PREFIX "Fatal opcode executed\n");
  791. break;
  792. case ACPI_SIGNAL_BREAKPOINT:
  793. /*
  794. * AML Breakpoint
  795. * ACPI spec. says to treat it as a NOP unless
  796. * you are debugging. So if/when we integrate
  797. * AML debugger into the kernel debugger its
  798. * hook will go here. But until then it is
  799. * not useful to print anything on breakpoints.
  800. */
  801. break;
  802. default:
  803. break;
  804. }
  805. return AE_OK;
  806. }
  807. static int __init acpi_os_name_setup(char *str)
  808. {
  809. char *p = acpi_os_name;
  810. int count = ACPI_MAX_OVERRIDE_LEN - 1;
  811. if (!str || !*str)
  812. return 0;
  813. for (; count-- && str && *str; str++) {
  814. if (isalnum(*str) || *str == ' ' || *str == ':')
  815. *p++ = *str;
  816. else if (*str == '\'' || *str == '"')
  817. continue;
  818. else
  819. break;
  820. }
  821. *p = 0;
  822. return 1;
  823. }
  824. __setup("acpi_os_name=", acpi_os_name_setup);
  825. static void __init set_osi_linux(unsigned int enable)
  826. {
  827. if (osi_linux.enable != enable) {
  828. osi_linux.enable = enable;
  829. printk(KERN_NOTICE PREFIX "%sed _OSI(Linux)\n",
  830. enable ? "Add": "Delet");
  831. }
  832. return;
  833. }
  834. static void __init acpi_cmdline_osi_linux(unsigned int enable)
  835. {
  836. osi_linux.cmdline = 1; /* cmdline set the default */
  837. set_osi_linux(enable);
  838. return;
  839. }
  840. void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
  841. {
  842. osi_linux.dmi = 1; /* DMI knows that this box asks OSI(Linux) */
  843. printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
  844. if (enable == -1)
  845. return;
  846. osi_linux.known = 1; /* DMI knows which OSI(Linux) default needed */
  847. set_osi_linux(enable);
  848. return;
  849. }
  850. /*
  851. * Modify the list of "OS Interfaces" reported to BIOS via _OSI
  852. *
  853. * empty string disables _OSI
  854. * string starting with '!' disables that string
  855. * otherwise string is added to list, augmenting built-in strings
  856. */
  857. int __init acpi_osi_setup(char *str)
  858. {
  859. if (str == NULL || *str == '\0') {
  860. printk(KERN_INFO PREFIX "_OSI method disabled\n");
  861. acpi_gbl_create_osi_method = FALSE;
  862. } else if (!strcmp("!Linux", str)) {
  863. acpi_cmdline_osi_linux(0); /* !enable */
  864. } else if (*str == '!') {
  865. if (acpi_osi_invalidate(++str) == AE_OK)
  866. printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
  867. } else if (!strcmp("Linux", str)) {
  868. acpi_cmdline_osi_linux(1); /* enable */
  869. } else if (*osi_additional_string == '\0') {
  870. strncpy(osi_additional_string, str, OSI_STRING_LENGTH_MAX);
  871. printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
  872. }
  873. return 1;
  874. }
  875. __setup("acpi_osi=", acpi_osi_setup);
  876. /* enable serialization to combat AE_ALREADY_EXISTS errors */
  877. static int __init acpi_serialize_setup(char *str)
  878. {
  879. printk(KERN_INFO PREFIX "serialize enabled\n");
  880. acpi_gbl_all_methods_serialized = TRUE;
  881. return 1;
  882. }
  883. __setup("acpi_serialize", acpi_serialize_setup);
  884. /*
  885. * Wake and Run-Time GPES are expected to be separate.
  886. * We disable wake-GPEs at run-time to prevent spurious
  887. * interrupts.
  888. *
  889. * However, if a system exists that shares Wake and
  890. * Run-time events on the same GPE this flag is available
  891. * to tell Linux to keep the wake-time GPEs enabled at run-time.
  892. */
  893. static int __init acpi_wake_gpes_always_on_setup(char *str)
  894. {
  895. printk(KERN_INFO PREFIX "wake GPEs not disabled\n");
  896. acpi_gbl_leave_wake_gpes_disabled = FALSE;
  897. return 1;
  898. }
  899. __setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup);
  900. /* Check of resource interference between native drivers and ACPI
  901. * OperationRegions (SystemIO and System Memory only).
  902. * IO ports and memory declared in ACPI might be used by the ACPI subsystem
  903. * in arbitrary AML code and can interfere with legacy drivers.
  904. * acpi_enforce_resources= can be set to:
  905. *
  906. * - strict (default) (2)
  907. * -> further driver trying to access the resources will not load
  908. * - lax (1)
  909. * -> further driver trying to access the resources will load, but you
  910. * get a system message that something might go wrong...
  911. *
  912. * - no (0)
  913. * -> ACPI Operation Region resources will not be registered
  914. *
  915. */
  916. #define ENFORCE_RESOURCES_STRICT 2
  917. #define ENFORCE_RESOURCES_LAX 1
  918. #define ENFORCE_RESOURCES_NO 0
  919. static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
  920. static int __init acpi_enforce_resources_setup(char *str)
  921. {
  922. if (str == NULL || *str == '\0')
  923. return 0;
  924. if (!strcmp("strict", str))
  925. acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
  926. else if (!strcmp("lax", str))
  927. acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
  928. else if (!strcmp("no", str))
  929. acpi_enforce_resources = ENFORCE_RESOURCES_NO;
  930. return 1;
  931. }
  932. __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
  933. /* Check for resource conflicts between ACPI OperationRegions and native
  934. * drivers */
  935. int acpi_check_resource_conflict(const struct resource *res)
  936. {
  937. struct acpi_res_list *res_list_elem;
  938. int ioport;
  939. int clash = 0;
  940. if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
  941. return 0;
  942. if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
  943. return 0;
  944. ioport = res->flags & IORESOURCE_IO;
  945. spin_lock(&acpi_res_lock);
  946. list_for_each_entry(res_list_elem, &resource_list_head,
  947. resource_list) {
  948. if (ioport && (res_list_elem->resource_type
  949. != ACPI_ADR_SPACE_SYSTEM_IO))
  950. continue;
  951. if (!ioport && (res_list_elem->resource_type
  952. != ACPI_ADR_SPACE_SYSTEM_MEMORY))
  953. continue;
  954. if (res->end < res_list_elem->start
  955. || res_list_elem->end < res->start)
  956. continue;
  957. clash = 1;
  958. break;
  959. }
  960. spin_unlock(&acpi_res_lock);
  961. if (clash) {
  962. if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
  963. printk(KERN_WARNING "ACPI: resource %s %pR"
  964. " conflicts with ACPI region %s %pR\n",
  965. res->name, res, res_list_elem->name,
  966. res_list_elem);
  967. if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
  968. printk(KERN_NOTICE "ACPI: This conflict may"
  969. " cause random problems and system"
  970. " instability\n");
  971. printk(KERN_INFO "ACPI: If an ACPI driver is available"
  972. " for this device, you should use it instead of"
  973. " the native driver\n");
  974. }
  975. if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
  976. return -EBUSY;
  977. }
  978. return 0;
  979. }
  980. EXPORT_SYMBOL(acpi_check_resource_conflict);
  981. int acpi_check_region(resource_size_t start, resource_size_t n,
  982. const char *name)
  983. {
  984. struct resource res = {
  985. .start = start,
  986. .end = start + n - 1,
  987. .name = name,
  988. .flags = IORESOURCE_IO,
  989. };
  990. return acpi_check_resource_conflict(&res);
  991. }
  992. EXPORT_SYMBOL(acpi_check_region);
  993. int acpi_check_mem_region(resource_size_t start, resource_size_t n,
  994. const char *name)
  995. {
  996. struct resource res = {
  997. .start = start,
  998. .end = start + n - 1,
  999. .name = name,
  1000. .flags = IORESOURCE_MEM,
  1001. };
  1002. return acpi_check_resource_conflict(&res);
  1003. }
  1004. EXPORT_SYMBOL(acpi_check_mem_region);
  1005. /*
  1006. * Let drivers know whether the resource checks are effective
  1007. */
  1008. int acpi_resources_are_enforced(void)
  1009. {
  1010. return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
  1011. }
  1012. EXPORT_SYMBOL(acpi_resources_are_enforced);
  1013. /*
  1014. * Acquire a spinlock.
  1015. *
  1016. * handle is a pointer to the spinlock_t.
  1017. */
  1018. acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
  1019. {
  1020. acpi_cpu_flags flags;
  1021. spin_lock_irqsave(lockp, flags);
  1022. return flags;
  1023. }
  1024. /*
  1025. * Release a spinlock. See above.
  1026. */
  1027. void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
  1028. {
  1029. spin_unlock_irqrestore(lockp, flags);
  1030. }
  1031. #ifndef ACPI_USE_LOCAL_CACHE
  1032. /*******************************************************************************
  1033. *
  1034. * FUNCTION: acpi_os_create_cache
  1035. *
  1036. * PARAMETERS: name - Ascii name for the cache
  1037. * size - Size of each cached object
  1038. * depth - Maximum depth of the cache (in objects) <ignored>
  1039. * cache - Where the new cache object is returned
  1040. *
  1041. * RETURN: status
  1042. *
  1043. * DESCRIPTION: Create a cache object
  1044. *
  1045. ******************************************************************************/
  1046. acpi_status
  1047. acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
  1048. {
  1049. *cache = kmem_cache_create(name, size, 0, 0, NULL);
  1050. if (*cache == NULL)
  1051. return AE_ERROR;
  1052. else
  1053. return AE_OK;
  1054. }
  1055. /*******************************************************************************
  1056. *
  1057. * FUNCTION: acpi_os_purge_cache
  1058. *
  1059. * PARAMETERS: Cache - Handle to cache object
  1060. *
  1061. * RETURN: Status
  1062. *
  1063. * DESCRIPTION: Free all objects within the requested cache.
  1064. *
  1065. ******************************************************************************/
  1066. acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
  1067. {
  1068. kmem_cache_shrink(cache);
  1069. return (AE_OK);
  1070. }
  1071. /*******************************************************************************
  1072. *
  1073. * FUNCTION: acpi_os_delete_cache
  1074. *
  1075. * PARAMETERS: Cache - Handle to cache object
  1076. *
  1077. * RETURN: Status
  1078. *
  1079. * DESCRIPTION: Free all objects within the requested cache and delete the
  1080. * cache object.
  1081. *
  1082. ******************************************************************************/
  1083. acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
  1084. {
  1085. kmem_cache_destroy(cache);
  1086. return (AE_OK);
  1087. }
  1088. /*******************************************************************************
  1089. *
  1090. * FUNCTION: acpi_os_release_object
  1091. *
  1092. * PARAMETERS: Cache - Handle to cache object
  1093. * Object - The object to be released
  1094. *
  1095. * RETURN: None
  1096. *
  1097. * DESCRIPTION: Release an object to the specified cache. If cache is full,
  1098. * the object is deleted.
  1099. *
  1100. ******************************************************************************/
  1101. acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
  1102. {
  1103. kmem_cache_free(cache, object);
  1104. return (AE_OK);
  1105. }
  1106. /******************************************************************************
  1107. *
  1108. * FUNCTION: acpi_os_validate_interface
  1109. *
  1110. * PARAMETERS: interface - Requested interface to be validated
  1111. *
  1112. * RETURN: AE_OK if interface is supported, AE_SUPPORT otherwise
  1113. *
  1114. * DESCRIPTION: Match an interface string to the interfaces supported by the
  1115. * host. Strings originate from an AML call to the _OSI method.
  1116. *
  1117. *****************************************************************************/
  1118. acpi_status
  1119. acpi_os_validate_interface (char *interface)
  1120. {
  1121. if (!strncmp(osi_additional_string, interface, OSI_STRING_LENGTH_MAX))
  1122. return AE_OK;
  1123. if (!strcmp("Linux", interface)) {
  1124. printk(KERN_NOTICE PREFIX
  1125. "BIOS _OSI(Linux) query %s%s\n",
  1126. osi_linux.enable ? "honored" : "ignored",
  1127. osi_linux.cmdline ? " via cmdline" :
  1128. osi_linux.dmi ? " via DMI" : "");
  1129. if (osi_linux.enable)
  1130. return AE_OK;
  1131. }
  1132. return AE_SUPPORT;
  1133. }
  1134. static inline int acpi_res_list_add(struct acpi_res_list *res)
  1135. {
  1136. struct acpi_res_list *res_list_elem;
  1137. list_for_each_entry(res_list_elem, &resource_list_head,
  1138. resource_list) {
  1139. if (res->resource_type == res_list_elem->resource_type &&
  1140. res->start == res_list_elem->start &&
  1141. res->end == res_list_elem->end) {
  1142. /*
  1143. * The Region(addr,len) already exist in the list,
  1144. * just increase the count
  1145. */
  1146. res_list_elem->count++;
  1147. return 0;
  1148. }
  1149. }
  1150. res->count = 1;
  1151. list_add(&res->resource_list, &resource_list_head);
  1152. return 1;
  1153. }
  1154. static inline void acpi_res_list_del(struct acpi_res_list *res)
  1155. {
  1156. struct acpi_res_list *res_list_elem;
  1157. list_for_each_entry(res_list_elem, &resource_list_head,
  1158. resource_list) {
  1159. if (res->resource_type == res_list_elem->resource_type &&
  1160. res->start == res_list_elem->start &&
  1161. res->end == res_list_elem->end) {
  1162. /*
  1163. * If the res count is decreased to 0,
  1164. * remove and free it
  1165. */
  1166. if (--res_list_elem->count == 0) {
  1167. list_del(&res_list_elem->resource_list);
  1168. kfree(res_list_elem);
  1169. }
  1170. return;
  1171. }
  1172. }
  1173. }
  1174. acpi_status
  1175. acpi_os_invalidate_address(
  1176. u8 space_id,
  1177. acpi_physical_address address,
  1178. acpi_size length)
  1179. {
  1180. struct acpi_res_list res;
  1181. switch (space_id) {
  1182. case ACPI_ADR_SPACE_SYSTEM_IO:
  1183. case ACPI_ADR_SPACE_SYSTEM_MEMORY:
  1184. /* Only interference checks against SystemIO and SystemMemory
  1185. are needed */
  1186. res.start = address;
  1187. res.end = address + length - 1;
  1188. res.resource_type = space_id;
  1189. spin_lock(&acpi_res_lock);
  1190. acpi_res_list_del(&res);
  1191. spin_unlock(&acpi_res_lock);
  1192. break;
  1193. case ACPI_ADR_SPACE_PCI_CONFIG:
  1194. case ACPI_ADR_SPACE_EC:
  1195. case ACPI_ADR_SPACE_SMBUS:
  1196. case ACPI_ADR_SPACE_CMOS:
  1197. case ACPI_ADR_SPACE_PCI_BAR_TARGET:
  1198. case ACPI_ADR_SPACE_DATA_TABLE:
  1199. case ACPI_ADR_SPACE_FIXED_HARDWARE:
  1200. break;
  1201. }
  1202. return AE_OK;
  1203. }
  1204. /******************************************************************************
  1205. *
  1206. * FUNCTION: acpi_os_validate_address
  1207. *
  1208. * PARAMETERS: space_id - ACPI space ID
  1209. * address - Physical address
  1210. * length - Address length
  1211. *
  1212. * RETURN: AE_OK if address/length is valid for the space_id. Otherwise,
  1213. * should return AE_AML_ILLEGAL_ADDRESS.
  1214. *
  1215. * DESCRIPTION: Validate a system address via the host OS. Used to validate
  1216. * the addresses accessed by AML operation regions.
  1217. *
  1218. *****************************************************************************/
  1219. acpi_status
  1220. acpi_os_validate_address (
  1221. u8 space_id,
  1222. acpi_physical_address address,
  1223. acpi_size length,
  1224. char *name)
  1225. {
  1226. struct acpi_res_list *res;
  1227. int added;
  1228. if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
  1229. return AE_OK;
  1230. switch (space_id) {
  1231. case ACPI_ADR_SPACE_SYSTEM_IO:
  1232. case ACPI_ADR_SPACE_SYSTEM_MEMORY:
  1233. /* Only interference checks against SystemIO and SystemMemory
  1234. are needed */
  1235. res = kzalloc(sizeof(struct acpi_res_list), GFP_KERNEL);
  1236. if (!res)
  1237. return AE_OK;
  1238. /* ACPI names are fixed to 4 bytes, still better use strlcpy */
  1239. strlcpy(res->name, name, 5);
  1240. res->start = address;
  1241. res->end = address + length - 1;
  1242. res->resource_type = space_id;
  1243. spin_lock(&acpi_res_lock);
  1244. added = acpi_res_list_add(res);
  1245. spin_unlock(&acpi_res_lock);
  1246. pr_debug("%s %s resource: start: 0x%llx, end: 0x%llx, "
  1247. "name: %s\n", added ? "Added" : "Already exist",
  1248. (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
  1249. ? "SystemIO" : "System Memory",
  1250. (unsigned long long)res->start,
  1251. (unsigned long long)res->end,
  1252. res->name);
  1253. if (!added)
  1254. kfree(res);
  1255. break;
  1256. case ACPI_ADR_SPACE_PCI_CONFIG:
  1257. case ACPI_ADR_SPACE_EC:
  1258. case ACPI_ADR_SPACE_SMBUS:
  1259. case ACPI_ADR_SPACE_CMOS:
  1260. case ACPI_ADR_SPACE_PCI_BAR_TARGET:
  1261. case ACPI_ADR_SPACE_DATA_TABLE:
  1262. case ACPI_ADR_SPACE_FIXED_HARDWARE:
  1263. break;
  1264. }
  1265. return AE_OK;
  1266. }
  1267. #endif