evgpeblk.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204
  1. /******************************************************************************
  2. *
  3. * Module Name: evgpeblk - GPE block creation and initialization.
  4. *
  5. *****************************************************************************/
  6. /*
  7. * Copyright (C) 2000 - 2010, Intel Corp.
  8. * All rights reserved.
  9. *
  10. * Redistribution and use in source and binary forms, with or without
  11. * modification, are permitted provided that the following conditions
  12. * are met:
  13. * 1. Redistributions of source code must retain the above copyright
  14. * notice, this list of conditions, and the following disclaimer,
  15. * without modification.
  16. * 2. Redistributions in binary form must reproduce at minimum a disclaimer
  17. * substantially similar to the "NO WARRANTY" disclaimer below
  18. * ("Disclaimer") and any redistribution must be conditioned upon
  19. * including a substantially similar Disclaimer requirement for further
  20. * binary redistribution.
  21. * 3. Neither the names of the above-listed copyright holders nor the names
  22. * of any contributors may be used to endorse or promote products derived
  23. * from this software without specific prior written permission.
  24. *
  25. * Alternatively, this software may be distributed under the terms of the
  26. * GNU General Public License ("GPL") version 2 as published by the Free
  27. * Software Foundation.
  28. *
  29. * NO WARRANTY
  30. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  31. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  32. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
  33. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  34. * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  35. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  36. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  37. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  38. * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
  39. * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  40. * POSSIBILITY OF SUCH DAMAGES.
  41. */
  42. #include <acpi/acpi.h>
  43. #include "accommon.h"
  44. #include "acevents.h"
  45. #include "acnamesp.h"
  46. #define _COMPONENT ACPI_EVENTS
  47. ACPI_MODULE_NAME("evgpeblk")
  48. /* Local prototypes */
  49. static acpi_status
  50. acpi_ev_save_method_info(acpi_handle obj_handle,
  51. u32 level, void *obj_desc, void **return_value);
  52. static acpi_status
  53. acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
  54. u32 level, void *info, void **return_value);
  55. static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
  56. interrupt_number);
  57. static acpi_status
  58. acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt);
  59. static acpi_status
  60. acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
  61. u32 interrupt_number);
  62. static acpi_status
  63. acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block);
  64. /*******************************************************************************
  65. *
  66. * FUNCTION: acpi_ev_valid_gpe_event
  67. *
  68. * PARAMETERS: gpe_event_info - Info for this GPE
  69. *
  70. * RETURN: TRUE if the gpe_event is valid
  71. *
  72. * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
  73. * Should be called only when the GPE lists are semaphore locked
  74. * and not subject to change.
  75. *
  76. ******************************************************************************/
  77. u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
  78. {
  79. struct acpi_gpe_xrupt_info *gpe_xrupt_block;
  80. struct acpi_gpe_block_info *gpe_block;
  81. ACPI_FUNCTION_ENTRY();
  82. /* No need for spin lock since we are not changing any list elements */
  83. /* Walk the GPE interrupt levels */
  84. gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
  85. while (gpe_xrupt_block) {
  86. gpe_block = gpe_xrupt_block->gpe_block_list_head;
  87. /* Walk the GPE blocks on this interrupt level */
  88. while (gpe_block) {
  89. if ((&gpe_block->event_info[0] <= gpe_event_info) &&
  90. (&gpe_block->event_info[((acpi_size)
  91. gpe_block->
  92. register_count) * 8] >
  93. gpe_event_info)) {
  94. return (TRUE);
  95. }
  96. gpe_block = gpe_block->next;
  97. }
  98. gpe_xrupt_block = gpe_xrupt_block->next;
  99. }
  100. return (FALSE);
  101. }
  102. /*******************************************************************************
  103. *
  104. * FUNCTION: acpi_ev_walk_gpe_list
  105. *
  106. * PARAMETERS: gpe_walk_callback - Routine called for each GPE block
  107. * Context - Value passed to callback
  108. *
  109. * RETURN: Status
  110. *
  111. * DESCRIPTION: Walk the GPE lists.
  112. *
  113. ******************************************************************************/
  114. acpi_status
  115. acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
  116. {
  117. struct acpi_gpe_block_info *gpe_block;
  118. struct acpi_gpe_xrupt_info *gpe_xrupt_info;
  119. acpi_status status = AE_OK;
  120. acpi_cpu_flags flags;
  121. ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
  122. flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
  123. /* Walk the interrupt level descriptor list */
  124. gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
  125. while (gpe_xrupt_info) {
  126. /* Walk all Gpe Blocks attached to this interrupt level */
  127. gpe_block = gpe_xrupt_info->gpe_block_list_head;
  128. while (gpe_block) {
  129. /* One callback per GPE block */
  130. status =
  131. gpe_walk_callback(gpe_xrupt_info, gpe_block,
  132. context);
  133. if (ACPI_FAILURE(status)) {
  134. if (status == AE_CTRL_END) { /* Callback abort */
  135. status = AE_OK;
  136. }
  137. goto unlock_and_exit;
  138. }
  139. gpe_block = gpe_block->next;
  140. }
  141. gpe_xrupt_info = gpe_xrupt_info->next;
  142. }
  143. unlock_and_exit:
  144. acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
  145. return_ACPI_STATUS(status);
  146. }
  147. /*******************************************************************************
  148. *
  149. * FUNCTION: acpi_ev_delete_gpe_handlers
  150. *
  151. * PARAMETERS: gpe_xrupt_info - GPE Interrupt info
  152. * gpe_block - Gpe Block info
  153. *
  154. * RETURN: Status
  155. *
  156. * DESCRIPTION: Delete all Handler objects found in the GPE data structs.
  157. * Used only prior to termination.
  158. *
  159. ******************************************************************************/
  160. acpi_status
  161. acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
  162. struct acpi_gpe_block_info *gpe_block,
  163. void *context)
  164. {
  165. struct acpi_gpe_event_info *gpe_event_info;
  166. u32 i;
  167. u32 j;
  168. ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
  169. /* Examine each GPE Register within the block */
  170. for (i = 0; i < gpe_block->register_count; i++) {
  171. /* Now look at the individual GPEs in this byte register */
  172. for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
  173. gpe_event_info = &gpe_block->event_info[((acpi_size) i *
  174. ACPI_GPE_REGISTER_WIDTH)
  175. + j];
  176. if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
  177. ACPI_GPE_DISPATCH_HANDLER) {
  178. ACPI_FREE(gpe_event_info->dispatch.handler);
  179. gpe_event_info->dispatch.handler = NULL;
  180. gpe_event_info->flags &=
  181. ~ACPI_GPE_DISPATCH_MASK;
  182. }
  183. }
  184. }
  185. return_ACPI_STATUS(AE_OK);
  186. }
  187. /*******************************************************************************
  188. *
  189. * FUNCTION: acpi_ev_save_method_info
  190. *
  191. * PARAMETERS: Callback from walk_namespace
  192. *
  193. * RETURN: Status
  194. *
  195. * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
  196. * control method under the _GPE portion of the namespace.
  197. * Extract the name and GPE type from the object, saving this
  198. * information for quick lookup during GPE dispatch
  199. *
  200. * The name of each GPE control method is of the form:
  201. * "_Lxx" or "_Exx"
  202. * Where:
  203. * L - means that the GPE is level triggered
  204. * E - means that the GPE is edge triggered
  205. * xx - is the GPE number [in HEX]
  206. *
  207. ******************************************************************************/
  208. static acpi_status
  209. acpi_ev_save_method_info(acpi_handle obj_handle,
  210. u32 level, void *obj_desc, void **return_value)
  211. {
  212. struct acpi_gpe_block_info *gpe_block = (void *)obj_desc;
  213. struct acpi_gpe_event_info *gpe_event_info;
  214. u32 gpe_number;
  215. char name[ACPI_NAME_SIZE + 1];
  216. u8 type;
  217. ACPI_FUNCTION_TRACE(ev_save_method_info);
  218. /*
  219. * _Lxx and _Exx GPE method support
  220. *
  221. * 1) Extract the name from the object and convert to a string
  222. */
  223. ACPI_MOVE_32_TO_32(name,
  224. &((struct acpi_namespace_node *)obj_handle)->name.
  225. integer);
  226. name[ACPI_NAME_SIZE] = 0;
  227. /*
  228. * 2) Edge/Level determination is based on the 2nd character
  229. * of the method name
  230. *
  231. * NOTE: Default GPE type is RUNTIME. May be changed later to WAKE
  232. * if a _PRW object is found that points to this GPE.
  233. */
  234. switch (name[1]) {
  235. case 'L':
  236. type = ACPI_GPE_LEVEL_TRIGGERED;
  237. break;
  238. case 'E':
  239. type = ACPI_GPE_EDGE_TRIGGERED;
  240. break;
  241. default:
  242. /* Unknown method type, just ignore it! */
  243. ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
  244. "Ignoring unknown GPE method type: %s "
  245. "(name not of form _Lxx or _Exx)", name));
  246. return_ACPI_STATUS(AE_OK);
  247. }
  248. /* Convert the last two characters of the name to the GPE Number */
  249. gpe_number = ACPI_STRTOUL(&name[2], NULL, 16);
  250. if (gpe_number == ACPI_UINT32_MAX) {
  251. /* Conversion failed; invalid method, just ignore it */
  252. ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
  253. "Could not extract GPE number from name: %s "
  254. "(name is not of form _Lxx or _Exx)", name));
  255. return_ACPI_STATUS(AE_OK);
  256. }
  257. /* Ensure that we have a valid GPE number for this GPE block */
  258. if ((gpe_number < gpe_block->block_base_number) ||
  259. (gpe_number >= (gpe_block->block_base_number +
  260. (gpe_block->register_count * 8)))) {
  261. /*
  262. * Not valid for this GPE block, just ignore it. However, it may be
  263. * valid for a different GPE block, since GPE0 and GPE1 methods both
  264. * appear under \_GPE.
  265. */
  266. return_ACPI_STATUS(AE_OK);
  267. }
  268. /*
  269. * Now we can add this information to the gpe_event_info block for use
  270. * during dispatch of this GPE.
  271. */
  272. gpe_event_info =
  273. &gpe_block->event_info[gpe_number - gpe_block->block_base_number];
  274. gpe_event_info->flags = (u8) (type | ACPI_GPE_DISPATCH_METHOD);
  275. gpe_event_info->dispatch.method_node =
  276. (struct acpi_namespace_node *)obj_handle;
  277. ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
  278. "Registered GPE method %s as GPE number 0x%.2X\n",
  279. name, gpe_number));
  280. return_ACPI_STATUS(AE_OK);
  281. }
  282. /*******************************************************************************
  283. *
  284. * FUNCTION: acpi_ev_match_prw_and_gpe
  285. *
  286. * PARAMETERS: Callback from walk_namespace
  287. *
  288. * RETURN: Status. NOTE: We ignore errors so that the _PRW walk is
  289. * not aborted on a single _PRW failure.
  290. *
  291. * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
  292. * Device. Run the _PRW method. If present, extract the GPE
  293. * number and mark the GPE as a WAKE GPE.
  294. *
  295. ******************************************************************************/
  296. static acpi_status
  297. acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
  298. u32 level, void *info, void **return_value)
  299. {
  300. struct acpi_gpe_walk_info *gpe_info = (void *)info;
  301. struct acpi_namespace_node *gpe_device;
  302. struct acpi_gpe_block_info *gpe_block;
  303. struct acpi_namespace_node *target_gpe_device;
  304. struct acpi_gpe_event_info *gpe_event_info;
  305. union acpi_operand_object *pkg_desc;
  306. union acpi_operand_object *obj_desc;
  307. u32 gpe_number;
  308. acpi_status status;
  309. ACPI_FUNCTION_TRACE(ev_match_prw_and_gpe);
  310. /* Check for a _PRW method under this device */
  311. status = acpi_ut_evaluate_object(obj_handle, METHOD_NAME__PRW,
  312. ACPI_BTYPE_PACKAGE, &pkg_desc);
  313. if (ACPI_FAILURE(status)) {
  314. /* Ignore all errors from _PRW, we don't want to abort the subsystem */
  315. return_ACPI_STATUS(AE_OK);
  316. }
  317. /* The returned _PRW package must have at least two elements */
  318. if (pkg_desc->package.count < 2) {
  319. goto cleanup;
  320. }
  321. /* Extract pointers from the input context */
  322. gpe_device = gpe_info->gpe_device;
  323. gpe_block = gpe_info->gpe_block;
  324. /*
  325. * The _PRW object must return a package, we are only interested in the
  326. * first element
  327. */
  328. obj_desc = pkg_desc->package.elements[0];
  329. if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
  330. /* Use FADT-defined GPE device (from definition of _PRW) */
  331. target_gpe_device = acpi_gbl_fadt_gpe_device;
  332. /* Integer is the GPE number in the FADT described GPE blocks */
  333. gpe_number = (u32) obj_desc->integer.value;
  334. } else if (obj_desc->common.type == ACPI_TYPE_PACKAGE) {
  335. /* Package contains a GPE reference and GPE number within a GPE block */
  336. if ((obj_desc->package.count < 2) ||
  337. ((obj_desc->package.elements[0])->common.type !=
  338. ACPI_TYPE_LOCAL_REFERENCE) ||
  339. ((obj_desc->package.elements[1])->common.type !=
  340. ACPI_TYPE_INTEGER)) {
  341. goto cleanup;
  342. }
  343. /* Get GPE block reference and decode */
  344. target_gpe_device =
  345. obj_desc->package.elements[0]->reference.node;
  346. gpe_number = (u32) obj_desc->package.elements[1]->integer.value;
  347. } else {
  348. /* Unknown type, just ignore it */
  349. goto cleanup;
  350. }
  351. /*
  352. * Is this GPE within this block?
  353. *
  354. * TRUE if and only if these conditions are true:
  355. * 1) The GPE devices match.
  356. * 2) The GPE index(number) is within the range of the Gpe Block
  357. * associated with the GPE device.
  358. */
  359. if ((gpe_device == target_gpe_device) &&
  360. (gpe_number >= gpe_block->block_base_number) &&
  361. (gpe_number < gpe_block->block_base_number +
  362. (gpe_block->register_count * 8))) {
  363. gpe_event_info = &gpe_block->event_info[gpe_number -
  364. gpe_block->
  365. block_base_number];
  366. gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
  367. }
  368. cleanup:
  369. acpi_ut_remove_reference(pkg_desc);
  370. return_ACPI_STATUS(AE_OK);
  371. }
  372. /*******************************************************************************
  373. *
  374. * FUNCTION: acpi_ev_get_gpe_xrupt_block
  375. *
  376. * PARAMETERS: interrupt_number - Interrupt for a GPE block
  377. *
  378. * RETURN: A GPE interrupt block
  379. *
  380. * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
  381. * block per unique interrupt level used for GPEs. Should be
  382. * called only when the GPE lists are semaphore locked and not
  383. * subject to change.
  384. *
  385. ******************************************************************************/
  386. static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
  387. interrupt_number)
  388. {
  389. struct acpi_gpe_xrupt_info *next_gpe_xrupt;
  390. struct acpi_gpe_xrupt_info *gpe_xrupt;
  391. acpi_status status;
  392. acpi_cpu_flags flags;
  393. ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
  394. /* No need for lock since we are not changing any list elements here */
  395. next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
  396. while (next_gpe_xrupt) {
  397. if (next_gpe_xrupt->interrupt_number == interrupt_number) {
  398. return_PTR(next_gpe_xrupt);
  399. }
  400. next_gpe_xrupt = next_gpe_xrupt->next;
  401. }
  402. /* Not found, must allocate a new xrupt descriptor */
  403. gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
  404. if (!gpe_xrupt) {
  405. return_PTR(NULL);
  406. }
  407. gpe_xrupt->interrupt_number = interrupt_number;
  408. /* Install new interrupt descriptor with spin lock */
  409. flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
  410. if (acpi_gbl_gpe_xrupt_list_head) {
  411. next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
  412. while (next_gpe_xrupt->next) {
  413. next_gpe_xrupt = next_gpe_xrupt->next;
  414. }
  415. next_gpe_xrupt->next = gpe_xrupt;
  416. gpe_xrupt->previous = next_gpe_xrupt;
  417. } else {
  418. acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
  419. }
  420. acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
  421. /* Install new interrupt handler if not SCI_INT */
  422. if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
  423. status = acpi_os_install_interrupt_handler(interrupt_number,
  424. acpi_ev_gpe_xrupt_handler,
  425. gpe_xrupt);
  426. if (ACPI_FAILURE(status)) {
  427. ACPI_ERROR((AE_INFO,
  428. "Could not install GPE interrupt handler at level 0x%X",
  429. interrupt_number));
  430. return_PTR(NULL);
  431. }
  432. }
  433. return_PTR(gpe_xrupt);
  434. }
  435. /*******************************************************************************
  436. *
  437. * FUNCTION: acpi_ev_delete_gpe_xrupt
  438. *
  439. * PARAMETERS: gpe_xrupt - A GPE interrupt info block
  440. *
  441. * RETURN: Status
  442. *
  443. * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
  444. * interrupt handler if not the SCI interrupt.
  445. *
  446. ******************************************************************************/
  447. static acpi_status
  448. acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
  449. {
  450. acpi_status status;
  451. acpi_cpu_flags flags;
  452. ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
  453. /* We never want to remove the SCI interrupt handler */
  454. if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
  455. gpe_xrupt->gpe_block_list_head = NULL;
  456. return_ACPI_STATUS(AE_OK);
  457. }
  458. /* Disable this interrupt */
  459. status =
  460. acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
  461. acpi_ev_gpe_xrupt_handler);
  462. if (ACPI_FAILURE(status)) {
  463. return_ACPI_STATUS(status);
  464. }
  465. /* Unlink the interrupt block with lock */
  466. flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
  467. if (gpe_xrupt->previous) {
  468. gpe_xrupt->previous->next = gpe_xrupt->next;
  469. } else {
  470. /* No previous, update list head */
  471. acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
  472. }
  473. if (gpe_xrupt->next) {
  474. gpe_xrupt->next->previous = gpe_xrupt->previous;
  475. }
  476. acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
  477. /* Free the block */
  478. ACPI_FREE(gpe_xrupt);
  479. return_ACPI_STATUS(AE_OK);
  480. }
  481. /*******************************************************************************
  482. *
  483. * FUNCTION: acpi_ev_install_gpe_block
  484. *
  485. * PARAMETERS: gpe_block - New GPE block
  486. * interrupt_number - Xrupt to be associated with this
  487. * GPE block
  488. *
  489. * RETURN: Status
  490. *
  491. * DESCRIPTION: Install new GPE block with mutex support
  492. *
  493. ******************************************************************************/
  494. static acpi_status
  495. acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
  496. u32 interrupt_number)
  497. {
  498. struct acpi_gpe_block_info *next_gpe_block;
  499. struct acpi_gpe_xrupt_info *gpe_xrupt_block;
  500. acpi_status status;
  501. acpi_cpu_flags flags;
  502. ACPI_FUNCTION_TRACE(ev_install_gpe_block);
  503. status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
  504. if (ACPI_FAILURE(status)) {
  505. return_ACPI_STATUS(status);
  506. }
  507. gpe_xrupt_block = acpi_ev_get_gpe_xrupt_block(interrupt_number);
  508. if (!gpe_xrupt_block) {
  509. status = AE_NO_MEMORY;
  510. goto unlock_and_exit;
  511. }
  512. /* Install the new block at the end of the list with lock */
  513. flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
  514. if (gpe_xrupt_block->gpe_block_list_head) {
  515. next_gpe_block = gpe_xrupt_block->gpe_block_list_head;
  516. while (next_gpe_block->next) {
  517. next_gpe_block = next_gpe_block->next;
  518. }
  519. next_gpe_block->next = gpe_block;
  520. gpe_block->previous = next_gpe_block;
  521. } else {
  522. gpe_xrupt_block->gpe_block_list_head = gpe_block;
  523. }
  524. gpe_block->xrupt_block = gpe_xrupt_block;
  525. acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
  526. unlock_and_exit:
  527. status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
  528. return_ACPI_STATUS(status);
  529. }
  530. /*******************************************************************************
  531. *
  532. * FUNCTION: acpi_ev_delete_gpe_block
  533. *
  534. * PARAMETERS: gpe_block - Existing GPE block
  535. *
  536. * RETURN: Status
  537. *
  538. * DESCRIPTION: Remove a GPE block
  539. *
  540. ******************************************************************************/
  541. acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
  542. {
  543. acpi_status status;
  544. acpi_cpu_flags flags;
  545. ACPI_FUNCTION_TRACE(ev_install_gpe_block);
  546. status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
  547. if (ACPI_FAILURE(status)) {
  548. return_ACPI_STATUS(status);
  549. }
  550. /* Disable all GPEs in this block */
  551. status =
  552. acpi_hw_disable_gpe_block(gpe_block->xrupt_block, gpe_block, NULL);
  553. if (!gpe_block->previous && !gpe_block->next) {
  554. /* This is the last gpe_block on this interrupt */
  555. status = acpi_ev_delete_gpe_xrupt(gpe_block->xrupt_block);
  556. if (ACPI_FAILURE(status)) {
  557. goto unlock_and_exit;
  558. }
  559. } else {
  560. /* Remove the block on this interrupt with lock */
  561. flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
  562. if (gpe_block->previous) {
  563. gpe_block->previous->next = gpe_block->next;
  564. } else {
  565. gpe_block->xrupt_block->gpe_block_list_head =
  566. gpe_block->next;
  567. }
  568. if (gpe_block->next) {
  569. gpe_block->next->previous = gpe_block->previous;
  570. }
  571. acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
  572. }
  573. acpi_current_gpe_count -=
  574. gpe_block->register_count * ACPI_GPE_REGISTER_WIDTH;
  575. /* Free the gpe_block */
  576. ACPI_FREE(gpe_block->register_info);
  577. ACPI_FREE(gpe_block->event_info);
  578. ACPI_FREE(gpe_block);
  579. unlock_and_exit:
  580. status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
  581. return_ACPI_STATUS(status);
  582. }
  583. /*******************************************************************************
  584. *
  585. * FUNCTION: acpi_ev_create_gpe_info_blocks
  586. *
  587. * PARAMETERS: gpe_block - New GPE block
  588. *
  589. * RETURN: Status
  590. *
  591. * DESCRIPTION: Create the register_info and event_info blocks for this GPE block
  592. *
  593. ******************************************************************************/
  594. static acpi_status
  595. acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
  596. {
  597. struct acpi_gpe_register_info *gpe_register_info = NULL;
  598. struct acpi_gpe_event_info *gpe_event_info = NULL;
  599. struct acpi_gpe_event_info *this_event;
  600. struct acpi_gpe_register_info *this_register;
  601. u32 i;
  602. u32 j;
  603. acpi_status status;
  604. ACPI_FUNCTION_TRACE(ev_create_gpe_info_blocks);
  605. /* Allocate the GPE register information block */
  606. gpe_register_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block->
  607. register_count *
  608. sizeof(struct
  609. acpi_gpe_register_info));
  610. if (!gpe_register_info) {
  611. ACPI_ERROR((AE_INFO,
  612. "Could not allocate the GpeRegisterInfo table"));
  613. return_ACPI_STATUS(AE_NO_MEMORY);
  614. }
  615. /*
  616. * Allocate the GPE event_info block. There are eight distinct GPEs
  617. * per register. Initialization to zeros is sufficient.
  618. */
  619. gpe_event_info = ACPI_ALLOCATE_ZEROED(((acpi_size) gpe_block->
  620. register_count *
  621. ACPI_GPE_REGISTER_WIDTH) *
  622. sizeof(struct
  623. acpi_gpe_event_info));
  624. if (!gpe_event_info) {
  625. ACPI_ERROR((AE_INFO,
  626. "Could not allocate the GpeEventInfo table"));
  627. status = AE_NO_MEMORY;
  628. goto error_exit;
  629. }
  630. /* Save the new Info arrays in the GPE block */
  631. gpe_block->register_info = gpe_register_info;
  632. gpe_block->event_info = gpe_event_info;
  633. /*
  634. * Initialize the GPE Register and Event structures. A goal of these
  635. * tables is to hide the fact that there are two separate GPE register
  636. * sets in a given GPE hardware block, the status registers occupy the
  637. * first half, and the enable registers occupy the second half.
  638. */
  639. this_register = gpe_register_info;
  640. this_event = gpe_event_info;
  641. for (i = 0; i < gpe_block->register_count; i++) {
  642. /* Init the register_info for this GPE register (8 GPEs) */
  643. this_register->base_gpe_number =
  644. (u8) (gpe_block->block_base_number +
  645. (i * ACPI_GPE_REGISTER_WIDTH));
  646. this_register->status_address.address =
  647. gpe_block->block_address.address + i;
  648. this_register->enable_address.address =
  649. gpe_block->block_address.address + i +
  650. gpe_block->register_count;
  651. this_register->status_address.space_id =
  652. gpe_block->block_address.space_id;
  653. this_register->enable_address.space_id =
  654. gpe_block->block_address.space_id;
  655. this_register->status_address.bit_width =
  656. ACPI_GPE_REGISTER_WIDTH;
  657. this_register->enable_address.bit_width =
  658. ACPI_GPE_REGISTER_WIDTH;
  659. this_register->status_address.bit_offset = 0;
  660. this_register->enable_address.bit_offset = 0;
  661. /* Init the event_info for each GPE within this register */
  662. for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
  663. this_event->gpe_number =
  664. (u8) (this_register->base_gpe_number + j);
  665. this_event->register_info = this_register;
  666. this_event++;
  667. }
  668. /* Disable all GPEs within this register */
  669. status = acpi_hw_write(0x00, &this_register->enable_address);
  670. if (ACPI_FAILURE(status)) {
  671. goto error_exit;
  672. }
  673. /* Clear any pending GPE events within this register */
  674. status = acpi_hw_write(0xFF, &this_register->status_address);
  675. if (ACPI_FAILURE(status)) {
  676. goto error_exit;
  677. }
  678. this_register++;
  679. }
  680. return_ACPI_STATUS(AE_OK);
  681. error_exit:
  682. if (gpe_register_info) {
  683. ACPI_FREE(gpe_register_info);
  684. }
  685. if (gpe_event_info) {
  686. ACPI_FREE(gpe_event_info);
  687. }
  688. return_ACPI_STATUS(status);
  689. }
  690. /*******************************************************************************
  691. *
  692. * FUNCTION: acpi_ev_create_gpe_block
  693. *
  694. * PARAMETERS: gpe_device - Handle to the parent GPE block
  695. * gpe_block_address - Address and space_iD
  696. * register_count - Number of GPE register pairs in the block
  697. * gpe_block_base_number - Starting GPE number for the block
  698. * interrupt_number - H/W interrupt for the block
  699. * return_gpe_block - Where the new block descriptor is returned
  700. *
  701. * RETURN: Status
  702. *
  703. * DESCRIPTION: Create and Install a block of GPE registers. All GPEs within
  704. * the block are disabled at exit.
  705. * Note: Assumes namespace is locked.
  706. *
  707. ******************************************************************************/
  708. acpi_status
  709. acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
  710. struct acpi_generic_address *gpe_block_address,
  711. u32 register_count,
  712. u8 gpe_block_base_number,
  713. u32 interrupt_number,
  714. struct acpi_gpe_block_info **return_gpe_block)
  715. {
  716. acpi_status status;
  717. struct acpi_gpe_block_info *gpe_block;
  718. ACPI_FUNCTION_TRACE(ev_create_gpe_block);
  719. if (!register_count) {
  720. return_ACPI_STATUS(AE_OK);
  721. }
  722. /* Allocate a new GPE block */
  723. gpe_block = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_block_info));
  724. if (!gpe_block) {
  725. return_ACPI_STATUS(AE_NO_MEMORY);
  726. }
  727. /* Initialize the new GPE block */
  728. gpe_block->node = gpe_device;
  729. gpe_block->register_count = register_count;
  730. gpe_block->block_base_number = gpe_block_base_number;
  731. ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address,
  732. sizeof(struct acpi_generic_address));
  733. /*
  734. * Create the register_info and event_info sub-structures
  735. * Note: disables and clears all GPEs in the block
  736. */
  737. status = acpi_ev_create_gpe_info_blocks(gpe_block);
  738. if (ACPI_FAILURE(status)) {
  739. ACPI_FREE(gpe_block);
  740. return_ACPI_STATUS(status);
  741. }
  742. /* Install the new block in the global lists */
  743. status = acpi_ev_install_gpe_block(gpe_block, interrupt_number);
  744. if (ACPI_FAILURE(status)) {
  745. ACPI_FREE(gpe_block);
  746. return_ACPI_STATUS(status);
  747. }
  748. /* Find all GPE methods (_Lxx, _Exx) for this block */
  749. status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device,
  750. ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK,
  751. acpi_ev_save_method_info, NULL,
  752. gpe_block, NULL);
  753. /* Return the new block */
  754. if (return_gpe_block) {
  755. (*return_gpe_block) = gpe_block;
  756. }
  757. ACPI_DEBUG_PRINT((ACPI_DB_INIT,
  758. "GPE %02X to %02X [%4.4s] %u regs on int 0x%X\n",
  759. (u32) gpe_block->block_base_number,
  760. (u32) (gpe_block->block_base_number +
  761. ((gpe_block->register_count *
  762. ACPI_GPE_REGISTER_WIDTH) - 1)),
  763. gpe_device->name.ascii, gpe_block->register_count,
  764. interrupt_number));
  765. /* Update global count of currently available GPEs */
  766. acpi_current_gpe_count += register_count * ACPI_GPE_REGISTER_WIDTH;
  767. return_ACPI_STATUS(AE_OK);
  768. }
  769. /*******************************************************************************
  770. *
  771. * FUNCTION: acpi_ev_initialize_gpe_block
  772. *
  773. * PARAMETERS: gpe_device - Handle to the parent GPE block
  774. * gpe_block - Gpe Block info
  775. *
  776. * RETURN: Status
  777. *
  778. * DESCRIPTION: Initialize and enable a GPE block. First find and run any
  779. * _PRT methods associated with the block, then enable the
  780. * appropriate GPEs.
  781. * Note: Assumes namespace is locked.
  782. *
  783. ******************************************************************************/
  784. acpi_status
  785. acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
  786. struct acpi_gpe_block_info *gpe_block)
  787. {
  788. struct acpi_gpe_event_info *gpe_event_info;
  789. struct acpi_gpe_walk_info gpe_info;
  790. u32 wake_gpe_count;
  791. u32 gpe_enabled_count;
  792. u32 i;
  793. u32 j;
  794. ACPI_FUNCTION_TRACE(ev_initialize_gpe_block);
  795. /* Ignore a null GPE block (e.g., if no GPE block 1 exists) */
  796. if (!gpe_block) {
  797. return_ACPI_STATUS(AE_OK);
  798. }
  799. /*
  800. * Runtime option: Should wake GPEs be enabled at runtime? The default
  801. * is no, they should only be enabled just as the machine goes to sleep.
  802. */
  803. if (acpi_gbl_leave_wake_gpes_disabled) {
  804. /*
  805. * Differentiate runtime vs wake GPEs, via the _PRW control methods.
  806. * Each GPE that has one or more _PRWs that reference it is by
  807. * definition a wake GPE and will not be enabled while the machine
  808. * is running.
  809. */
  810. gpe_info.gpe_block = gpe_block;
  811. gpe_info.gpe_device = gpe_device;
  812. acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
  813. ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK,
  814. acpi_ev_match_prw_and_gpe, NULL,
  815. &gpe_info, NULL);
  816. }
  817. /*
  818. * Enable all GPEs that have a corresponding method and aren't
  819. * capable of generating wakeups. Any other GPEs within this block
  820. * must be enabled via the acpi_enable_gpe() interface.
  821. */
  822. wake_gpe_count = 0;
  823. gpe_enabled_count = 0;
  824. if (gpe_device == acpi_gbl_fadt_gpe_device)
  825. gpe_device = NULL;
  826. for (i = 0; i < gpe_block->register_count; i++) {
  827. for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
  828. acpi_status status;
  829. acpi_size gpe_index;
  830. int gpe_number;
  831. /* Get the info block for this particular GPE */
  832. gpe_index = (acpi_size)i * ACPI_GPE_REGISTER_WIDTH + j;
  833. gpe_event_info = &gpe_block->event_info[gpe_index];
  834. if (gpe_event_info->flags & ACPI_GPE_CAN_WAKE) {
  835. wake_gpe_count++;
  836. if (acpi_gbl_leave_wake_gpes_disabled)
  837. continue;
  838. }
  839. if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD))
  840. continue;
  841. gpe_number = gpe_index + gpe_block->block_base_number;
  842. status = acpi_enable_gpe(gpe_device, gpe_number,
  843. ACPI_GPE_TYPE_RUNTIME);
  844. if (ACPI_FAILURE(status))
  845. ACPI_ERROR((AE_INFO,
  846. "Failed to enable GPE %02X\n",
  847. gpe_number));
  848. else
  849. gpe_enabled_count++;
  850. }
  851. }
  852. ACPI_DEBUG_PRINT((ACPI_DB_INIT,
  853. "Found %u Wake, Enabled %u Runtime GPEs in this block\n",
  854. wake_gpe_count, gpe_enabled_count));
  855. return_ACPI_STATUS(AE_OK);
  856. }
  857. /*******************************************************************************
  858. *
  859. * FUNCTION: acpi_ev_gpe_initialize
  860. *
  861. * PARAMETERS: None
  862. *
  863. * RETURN: Status
  864. *
  865. * DESCRIPTION: Initialize the GPE data structures
  866. *
  867. ******************************************************************************/
  868. acpi_status acpi_ev_gpe_initialize(void)
  869. {
  870. u32 register_count0 = 0;
  871. u32 register_count1 = 0;
  872. u32 gpe_number_max = 0;
  873. acpi_status status;
  874. ACPI_FUNCTION_TRACE(ev_gpe_initialize);
  875. status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
  876. if (ACPI_FAILURE(status)) {
  877. return_ACPI_STATUS(status);
  878. }
  879. /*
  880. * Initialize the GPE Block(s) defined in the FADT
  881. *
  882. * Why the GPE register block lengths are divided by 2: From the ACPI
  883. * Spec, section "General-Purpose Event Registers", we have:
  884. *
  885. * "Each register block contains two registers of equal length
  886. * GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the
  887. * GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN
  888. * The length of the GPE1_STS and GPE1_EN registers is equal to
  889. * half the GPE1_LEN. If a generic register block is not supported
  890. * then its respective block pointer and block length values in the
  891. * FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need
  892. * to be the same size."
  893. */
  894. /*
  895. * Determine the maximum GPE number for this machine.
  896. *
  897. * Note: both GPE0 and GPE1 are optional, and either can exist without
  898. * the other.
  899. *
  900. * If EITHER the register length OR the block address are zero, then that
  901. * particular block is not supported.
  902. */
  903. if (acpi_gbl_FADT.gpe0_block_length &&
  904. acpi_gbl_FADT.xgpe0_block.address) {
  905. /* GPE block 0 exists (has both length and address > 0) */
  906. register_count0 = (u16) (acpi_gbl_FADT.gpe0_block_length / 2);
  907. gpe_number_max =
  908. (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
  909. /* Install GPE Block 0 */
  910. status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
  911. &acpi_gbl_FADT.xgpe0_block,
  912. register_count0, 0,
  913. acpi_gbl_FADT.sci_interrupt,
  914. &acpi_gbl_gpe_fadt_blocks[0]);
  915. if (ACPI_FAILURE(status)) {
  916. ACPI_EXCEPTION((AE_INFO, status,
  917. "Could not create GPE Block 0"));
  918. }
  919. }
  920. if (acpi_gbl_FADT.gpe1_block_length &&
  921. acpi_gbl_FADT.xgpe1_block.address) {
  922. /* GPE block 1 exists (has both length and address > 0) */
  923. register_count1 = (u16) (acpi_gbl_FADT.gpe1_block_length / 2);
  924. /* Check for GPE0/GPE1 overlap (if both banks exist) */
  925. if ((register_count0) &&
  926. (gpe_number_max >= acpi_gbl_FADT.gpe1_base)) {
  927. ACPI_ERROR((AE_INFO,
  928. "GPE0 block (GPE 0 to %d) overlaps the GPE1 block "
  929. "(GPE %d to %d) - Ignoring GPE1",
  930. gpe_number_max, acpi_gbl_FADT.gpe1_base,
  931. acpi_gbl_FADT.gpe1_base +
  932. ((register_count1 *
  933. ACPI_GPE_REGISTER_WIDTH) - 1)));
  934. /* Ignore GPE1 block by setting the register count to zero */
  935. register_count1 = 0;
  936. } else {
  937. /* Install GPE Block 1 */
  938. status =
  939. acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
  940. &acpi_gbl_FADT.xgpe1_block,
  941. register_count1,
  942. acpi_gbl_FADT.gpe1_base,
  943. acpi_gbl_FADT.
  944. sci_interrupt,
  945. &acpi_gbl_gpe_fadt_blocks
  946. [1]);
  947. if (ACPI_FAILURE(status)) {
  948. ACPI_EXCEPTION((AE_INFO, status,
  949. "Could not create GPE Block 1"));
  950. }
  951. /*
  952. * GPE0 and GPE1 do not have to be contiguous in the GPE number
  953. * space. However, GPE0 always starts at GPE number zero.
  954. */
  955. gpe_number_max = acpi_gbl_FADT.gpe1_base +
  956. ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
  957. }
  958. }
  959. /* Exit if there are no GPE registers */
  960. if ((register_count0 + register_count1) == 0) {
  961. /* GPEs are not required by ACPI, this is OK */
  962. ACPI_DEBUG_PRINT((ACPI_DB_INIT,
  963. "There are no GPE blocks defined in the FADT\n"));
  964. status = AE_OK;
  965. goto cleanup;
  966. }
  967. /* Check for Max GPE number out-of-range */
  968. if (gpe_number_max > ACPI_GPE_MAX) {
  969. ACPI_ERROR((AE_INFO,
  970. "Maximum GPE number from FADT is too large: 0x%X",
  971. gpe_number_max));
  972. status = AE_BAD_VALUE;
  973. goto cleanup;
  974. }
  975. cleanup:
  976. (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
  977. return_ACPI_STATUS(AE_OK);
  978. }