evgpeblk.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176
  1. /******************************************************************************
  2. *
  3. * Module Name: evgpeblk - GPE block creation and initialization.
  4. *
  5. *****************************************************************************/
  6. /*
  7. * Copyright (C) 2000 - 2006, R. Byron Moore
  8. * All rights reserved.
  9. *
  10. * Redistribution and use in source and binary forms, with or without
  11. * modification, are permitted provided that the following conditions
  12. * are met:
  13. * 1. Redistributions of source code must retain the above copyright
  14. * notice, this list of conditions, and the following disclaimer,
  15. * without modification.
  16. * 2. Redistributions in binary form must reproduce at minimum a disclaimer
  17. * substantially similar to the "NO WARRANTY" disclaimer below
  18. * ("Disclaimer") and any redistribution must be conditioned upon
  19. * including a substantially similar Disclaimer requirement for further
  20. * binary redistribution.
  21. * 3. Neither the names of the above-listed copyright holders nor the names
  22. * of any contributors may be used to endorse or promote products derived
  23. * from this software without specific prior written permission.
  24. *
  25. * Alternatively, this software may be distributed under the terms of the
  26. * GNU General Public License ("GPL") version 2 as published by the Free
  27. * Software Foundation.
  28. *
  29. * NO WARRANTY
  30. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  31. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  32. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
  33. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  34. * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  35. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  36. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  37. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  38. * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
  39. * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  40. * POSSIBILITY OF SUCH DAMAGES.
  41. */
  42. #include <acpi/acpi.h>
  43. #include <acpi/acevents.h>
  44. #include <acpi/acnamesp.h>
  45. #define _COMPONENT ACPI_EVENTS
  46. ACPI_MODULE_NAME("evgpeblk")
  47. /* Local prototypes */
  48. static acpi_status
  49. acpi_ev_save_method_info(acpi_handle obj_handle,
  50. u32 level, void *obj_desc, void **return_value);
  51. static acpi_status
  52. acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
  53. u32 level, void *info, void **return_value);
  54. static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
  55. interrupt_number);
  56. static acpi_status
  57. acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt);
  58. static acpi_status
  59. acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
  60. u32 interrupt_number);
  61. static acpi_status
  62. acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block);
  63. /*******************************************************************************
  64. *
  65. * FUNCTION: acpi_ev_valid_gpe_event
  66. *
  67. * PARAMETERS: gpe_event_info - Info for this GPE
  68. *
  69. * RETURN: TRUE if the gpe_event is valid
  70. *
  71. * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
  72. * Should be called only when the GPE lists are semaphore locked
  73. * and not subject to change.
  74. *
  75. ******************************************************************************/
  76. u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
  77. {
  78. struct acpi_gpe_xrupt_info *gpe_xrupt_block;
  79. struct acpi_gpe_block_info *gpe_block;
  80. ACPI_FUNCTION_ENTRY();
  81. /* No need for spin lock since we are not changing any list elements */
  82. /* Walk the GPE interrupt levels */
  83. gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
  84. while (gpe_xrupt_block) {
  85. gpe_block = gpe_xrupt_block->gpe_block_list_head;
  86. /* Walk the GPE blocks on this interrupt level */
  87. while (gpe_block) {
  88. if ((&gpe_block->event_info[0] <= gpe_event_info) &&
  89. (&gpe_block->
  90. event_info[((acpi_size) gpe_block->
  91. register_count) * 8] >
  92. gpe_event_info)) {
  93. return (TRUE);
  94. }
  95. gpe_block = gpe_block->next;
  96. }
  97. gpe_xrupt_block = gpe_xrupt_block->next;
  98. }
  99. return (FALSE);
  100. }
  101. /*******************************************************************************
  102. *
  103. * FUNCTION: acpi_ev_walk_gpe_list
  104. *
  105. * PARAMETERS: gpe_walk_callback - Routine called for each GPE block
  106. *
  107. * RETURN: Status
  108. *
  109. * DESCRIPTION: Walk the GPE lists.
  110. *
  111. ******************************************************************************/
  112. acpi_status acpi_ev_walk_gpe_list(ACPI_GPE_CALLBACK gpe_walk_callback)
  113. {
  114. struct acpi_gpe_block_info *gpe_block;
  115. struct acpi_gpe_xrupt_info *gpe_xrupt_info;
  116. acpi_status status = AE_OK;
  117. acpi_native_uint flags;
  118. ACPI_FUNCTION_TRACE("ev_walk_gpe_list");
  119. flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
  120. /* Walk the interrupt level descriptor list */
  121. gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
  122. while (gpe_xrupt_info) {
  123. /* Walk all Gpe Blocks attached to this interrupt level */
  124. gpe_block = gpe_xrupt_info->gpe_block_list_head;
  125. while (gpe_block) {
  126. /* One callback per GPE block */
  127. status = gpe_walk_callback(gpe_xrupt_info, gpe_block);
  128. if (ACPI_FAILURE(status)) {
  129. goto unlock_and_exit;
  130. }
  131. gpe_block = gpe_block->next;
  132. }
  133. gpe_xrupt_info = gpe_xrupt_info->next;
  134. }
  135. unlock_and_exit:
  136. acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
  137. return_ACPI_STATUS(status);
  138. }
  139. /*******************************************************************************
  140. *
  141. * FUNCTION: acpi_ev_delete_gpe_handlers
  142. *
  143. * PARAMETERS: gpe_xrupt_info - GPE Interrupt info
  144. * gpe_block - Gpe Block info
  145. *
  146. * RETURN: Status
  147. *
  148. * DESCRIPTION: Delete all Handler objects found in the GPE data structs.
  149. * Used only prior to termination.
  150. *
  151. ******************************************************************************/
  152. acpi_status
  153. acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
  154. struct acpi_gpe_block_info *gpe_block)
  155. {
  156. struct acpi_gpe_event_info *gpe_event_info;
  157. acpi_native_uint i;
  158. acpi_native_uint j;
  159. ACPI_FUNCTION_TRACE("ev_delete_gpe_handlers");
  160. /* Examine each GPE Register within the block */
  161. for (i = 0; i < gpe_block->register_count; i++) {
  162. /* Now look at the individual GPEs in this byte register */
  163. for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
  164. gpe_event_info =
  165. &gpe_block->
  166. event_info[(i * ACPI_GPE_REGISTER_WIDTH) + j];
  167. if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
  168. ACPI_GPE_DISPATCH_HANDLER) {
  169. ACPI_MEM_FREE(gpe_event_info->dispatch.handler);
  170. gpe_event_info->dispatch.handler = NULL;
  171. gpe_event_info->flags &=
  172. ~ACPI_GPE_DISPATCH_MASK;
  173. }
  174. }
  175. }
  176. return_ACPI_STATUS(AE_OK);
  177. }
  178. /*******************************************************************************
  179. *
  180. * FUNCTION: acpi_ev_save_method_info
  181. *
  182. * PARAMETERS: Callback from walk_namespace
  183. *
  184. * RETURN: Status
  185. *
  186. * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
  187. * control method under the _GPE portion of the namespace.
  188. * Extract the name and GPE type from the object, saving this
  189. * information for quick lookup during GPE dispatch
  190. *
  191. * The name of each GPE control method is of the form:
  192. * "_Lxx" or "_Exx"
  193. * Where:
  194. * L - means that the GPE is level triggered
  195. * E - means that the GPE is edge triggered
  196. * xx - is the GPE number [in HEX]
  197. *
  198. ******************************************************************************/
  199. static acpi_status
  200. acpi_ev_save_method_info(acpi_handle obj_handle,
  201. u32 level, void *obj_desc, void **return_value)
  202. {
  203. struct acpi_gpe_block_info *gpe_block = (void *)obj_desc;
  204. struct acpi_gpe_event_info *gpe_event_info;
  205. u32 gpe_number;
  206. char name[ACPI_NAME_SIZE + 1];
  207. u8 type;
  208. acpi_status status;
  209. ACPI_FUNCTION_TRACE("ev_save_method_info");
  210. /*
  211. * _Lxx and _Exx GPE method support
  212. *
  213. * 1) Extract the name from the object and convert to a string
  214. */
  215. ACPI_MOVE_32_TO_32(name,
  216. &((struct acpi_namespace_node *)obj_handle)->name.
  217. integer);
  218. name[ACPI_NAME_SIZE] = 0;
  219. /*
  220. * 2) Edge/Level determination is based on the 2nd character
  221. * of the method name
  222. *
  223. * NOTE: Default GPE type is RUNTIME. May be changed later to WAKE
  224. * if a _PRW object is found that points to this GPE.
  225. */
  226. switch (name[1]) {
  227. case 'L':
  228. type = ACPI_GPE_LEVEL_TRIGGERED;
  229. break;
  230. case 'E':
  231. type = ACPI_GPE_EDGE_TRIGGERED;
  232. break;
  233. default:
  234. /* Unknown method type, just ignore it! */
  235. ACPI_REPORT_ERROR(("Unknown GPE method type: %s (name not of form _Lxx or _Exx)\n", name));
  236. return_ACPI_STATUS(AE_OK);
  237. }
  238. /* Convert the last two characters of the name to the GPE Number */
  239. gpe_number = ACPI_STRTOUL(&name[2], NULL, 16);
  240. if (gpe_number == ACPI_UINT32_MAX) {
  241. /* Conversion failed; invalid method, just ignore it */
  242. ACPI_REPORT_ERROR(("Could not extract GPE number from name: %s (name is not of form _Lxx or _Exx)\n", name));
  243. return_ACPI_STATUS(AE_OK);
  244. }
  245. /* Ensure that we have a valid GPE number for this GPE block */
  246. if ((gpe_number < gpe_block->block_base_number) ||
  247. (gpe_number >=
  248. (gpe_block->block_base_number +
  249. (gpe_block->register_count * 8)))) {
  250. /*
  251. * Not valid for this GPE block, just ignore it
  252. * However, it may be valid for a different GPE block, since GPE0 and GPE1
  253. * methods both appear under \_GPE.
  254. */
  255. return_ACPI_STATUS(AE_OK);
  256. }
  257. /*
  258. * Now we can add this information to the gpe_event_info block
  259. * for use during dispatch of this GPE. Default type is RUNTIME, although
  260. * this may change when the _PRW methods are executed later.
  261. */
  262. gpe_event_info =
  263. &gpe_block->event_info[gpe_number - gpe_block->block_base_number];
  264. gpe_event_info->flags = (u8)
  265. (type | ACPI_GPE_DISPATCH_METHOD | ACPI_GPE_TYPE_RUNTIME);
  266. gpe_event_info->dispatch.method_node =
  267. (struct acpi_namespace_node *)obj_handle;
  268. /* Update enable mask, but don't enable the HW GPE as of yet */
  269. status = acpi_ev_enable_gpe(gpe_event_info, FALSE);
  270. ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
  271. "Registered GPE method %s as GPE number 0x%.2X\n",
  272. name, gpe_number));
  273. return_ACPI_STATUS(status);
  274. }
  275. /*******************************************************************************
  276. *
  277. * FUNCTION: acpi_ev_match_prw_and_gpe
  278. *
  279. * PARAMETERS: Callback from walk_namespace
  280. *
  281. * RETURN: Status. NOTE: We ignore errors so that the _PRW walk is
  282. * not aborted on a single _PRW failure.
  283. *
  284. * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
  285. * Device. Run the _PRW method. If present, extract the GPE
  286. * number and mark the GPE as a WAKE GPE.
  287. *
  288. ******************************************************************************/
  289. static acpi_status
  290. acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
  291. u32 level, void *info, void **return_value)
  292. {
  293. struct acpi_gpe_walk_info *gpe_info = (void *)info;
  294. struct acpi_namespace_node *gpe_device;
  295. struct acpi_gpe_block_info *gpe_block;
  296. struct acpi_namespace_node *target_gpe_device;
  297. struct acpi_gpe_event_info *gpe_event_info;
  298. union acpi_operand_object *pkg_desc;
  299. union acpi_operand_object *obj_desc;
  300. u32 gpe_number;
  301. acpi_status status;
  302. ACPI_FUNCTION_TRACE("ev_match_prw_and_gpe");
  303. /* Check for a _PRW method under this device */
  304. status = acpi_ut_evaluate_object(obj_handle, METHOD_NAME__PRW,
  305. ACPI_BTYPE_PACKAGE, &pkg_desc);
  306. if (ACPI_FAILURE(status)) {
  307. /* Ignore all errors from _PRW, we don't want to abort the subsystem */
  308. return_ACPI_STATUS(AE_OK);
  309. }
  310. /* The returned _PRW package must have at least two elements */
  311. if (pkg_desc->package.count < 2) {
  312. goto cleanup;
  313. }
  314. /* Extract pointers from the input context */
  315. gpe_device = gpe_info->gpe_device;
  316. gpe_block = gpe_info->gpe_block;
  317. /*
  318. * The _PRW object must return a package, we are only interested
  319. * in the first element
  320. */
  321. obj_desc = pkg_desc->package.elements[0];
  322. if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) {
  323. /* Use FADT-defined GPE device (from definition of _PRW) */
  324. target_gpe_device = acpi_gbl_fadt_gpe_device;
  325. /* Integer is the GPE number in the FADT described GPE blocks */
  326. gpe_number = (u32) obj_desc->integer.value;
  327. } else if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_PACKAGE) {
  328. /* Package contains a GPE reference and GPE number within a GPE block */
  329. if ((obj_desc->package.count < 2) ||
  330. (ACPI_GET_OBJECT_TYPE(obj_desc->package.elements[0]) !=
  331. ACPI_TYPE_LOCAL_REFERENCE)
  332. || (ACPI_GET_OBJECT_TYPE(obj_desc->package.elements[1]) !=
  333. ACPI_TYPE_INTEGER)) {
  334. goto cleanup;
  335. }
  336. /* Get GPE block reference and decode */
  337. target_gpe_device =
  338. obj_desc->package.elements[0]->reference.node;
  339. gpe_number = (u32) obj_desc->package.elements[1]->integer.value;
  340. } else {
  341. /* Unknown type, just ignore it */
  342. goto cleanup;
  343. }
  344. /*
  345. * Is this GPE within this block?
  346. *
  347. * TRUE iff these conditions are true:
  348. * 1) The GPE devices match.
  349. * 2) The GPE index(number) is within the range of the Gpe Block
  350. * associated with the GPE device.
  351. */
  352. if ((gpe_device == target_gpe_device) &&
  353. (gpe_number >= gpe_block->block_base_number) &&
  354. (gpe_number <
  355. gpe_block->block_base_number + (gpe_block->register_count * 8))) {
  356. gpe_event_info =
  357. &gpe_block->event_info[gpe_number -
  358. gpe_block->block_base_number];
  359. /* Mark GPE for WAKE-ONLY but WAKE_DISABLED */
  360. gpe_event_info->flags &=
  361. ~(ACPI_GPE_WAKE_ENABLED | ACPI_GPE_RUN_ENABLED);
  362. status =
  363. acpi_ev_set_gpe_type(gpe_event_info, ACPI_GPE_TYPE_WAKE);
  364. if (ACPI_FAILURE(status)) {
  365. goto cleanup;
  366. }
  367. status =
  368. acpi_ev_update_gpe_enable_masks(gpe_event_info,
  369. ACPI_GPE_DISABLE);
  370. }
  371. cleanup:
  372. acpi_ut_remove_reference(pkg_desc);
  373. return_ACPI_STATUS(AE_OK);
  374. }
  375. /*******************************************************************************
  376. *
  377. * FUNCTION: acpi_ev_get_gpe_xrupt_block
  378. *
  379. * PARAMETERS: interrupt_number - Interrupt for a GPE block
  380. *
  381. * RETURN: A GPE interrupt block
  382. *
  383. * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
  384. * block per unique interrupt level used for GPEs.
  385. * Should be called only when the GPE lists are semaphore locked
  386. * and not subject to change.
  387. *
  388. ******************************************************************************/
  389. static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
  390. interrupt_number)
  391. {
  392. struct acpi_gpe_xrupt_info *next_gpe_xrupt;
  393. struct acpi_gpe_xrupt_info *gpe_xrupt;
  394. acpi_status status;
  395. acpi_native_uint flags;
  396. ACPI_FUNCTION_TRACE("ev_get_gpe_xrupt_block");
  397. /* No need for lock since we are not changing any list elements here */
  398. next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
  399. while (next_gpe_xrupt) {
  400. if (next_gpe_xrupt->interrupt_number == interrupt_number) {
  401. return_PTR(next_gpe_xrupt);
  402. }
  403. next_gpe_xrupt = next_gpe_xrupt->next;
  404. }
  405. /* Not found, must allocate a new xrupt descriptor */
  406. gpe_xrupt = ACPI_MEM_CALLOCATE(sizeof(struct acpi_gpe_xrupt_info));
  407. if (!gpe_xrupt) {
  408. return_PTR(NULL);
  409. }
  410. gpe_xrupt->interrupt_number = interrupt_number;
  411. /* Install new interrupt descriptor with spin lock */
  412. flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
  413. if (acpi_gbl_gpe_xrupt_list_head) {
  414. next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
  415. while (next_gpe_xrupt->next) {
  416. next_gpe_xrupt = next_gpe_xrupt->next;
  417. }
  418. next_gpe_xrupt->next = gpe_xrupt;
  419. gpe_xrupt->previous = next_gpe_xrupt;
  420. } else {
  421. acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
  422. }
  423. acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
  424. /* Install new interrupt handler if not SCI_INT */
  425. if (interrupt_number != acpi_gbl_FADT->sci_int) {
  426. status = acpi_os_install_interrupt_handler(interrupt_number,
  427. acpi_ev_gpe_xrupt_handler,
  428. gpe_xrupt);
  429. if (ACPI_FAILURE(status)) {
  430. ACPI_REPORT_ERROR(("Could not install GPE interrupt handler at level 0x%X\n", interrupt_number));
  431. return_PTR(NULL);
  432. }
  433. }
  434. return_PTR(gpe_xrupt);
  435. }
  436. /*******************************************************************************
  437. *
  438. * FUNCTION: acpi_ev_delete_gpe_xrupt
  439. *
  440. * PARAMETERS: gpe_xrupt - A GPE interrupt info block
  441. *
  442. * RETURN: Status
  443. *
  444. * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
  445. * interrupt handler if not the SCI interrupt.
  446. *
  447. ******************************************************************************/
  448. static acpi_status
  449. acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
  450. {
  451. acpi_status status;
  452. acpi_native_uint flags;
  453. ACPI_FUNCTION_TRACE("ev_delete_gpe_xrupt");
  454. /* We never want to remove the SCI interrupt handler */
  455. if (gpe_xrupt->interrupt_number == acpi_gbl_FADT->sci_int) {
  456. gpe_xrupt->gpe_block_list_head = NULL;
  457. return_ACPI_STATUS(AE_OK);
  458. }
  459. /* Disable this interrupt */
  460. status =
  461. acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
  462. acpi_ev_gpe_xrupt_handler);
  463. if (ACPI_FAILURE(status)) {
  464. return_ACPI_STATUS(status);
  465. }
  466. /* Unlink the interrupt block with lock */
  467. flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
  468. if (gpe_xrupt->previous) {
  469. gpe_xrupt->previous->next = gpe_xrupt->next;
  470. }
  471. if (gpe_xrupt->next) {
  472. gpe_xrupt->next->previous = gpe_xrupt->previous;
  473. }
  474. acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
  475. /* Free the block */
  476. ACPI_MEM_FREE(gpe_xrupt);
  477. return_ACPI_STATUS(AE_OK);
  478. }
  479. /*******************************************************************************
  480. *
  481. * FUNCTION: acpi_ev_install_gpe_block
  482. *
  483. * PARAMETERS: gpe_block - New GPE block
  484. * interrupt_number - Xrupt to be associated with this GPE block
  485. *
  486. * RETURN: Status
  487. *
  488. * DESCRIPTION: Install new GPE block with mutex support
  489. *
  490. ******************************************************************************/
  491. static acpi_status
  492. acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
  493. u32 interrupt_number)
  494. {
  495. struct acpi_gpe_block_info *next_gpe_block;
  496. struct acpi_gpe_xrupt_info *gpe_xrupt_block;
  497. acpi_status status;
  498. acpi_native_uint flags;
  499. ACPI_FUNCTION_TRACE("ev_install_gpe_block");
  500. status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
  501. if (ACPI_FAILURE(status)) {
  502. return_ACPI_STATUS(status);
  503. }
  504. gpe_xrupt_block = acpi_ev_get_gpe_xrupt_block(interrupt_number);
  505. if (!gpe_xrupt_block) {
  506. status = AE_NO_MEMORY;
  507. goto unlock_and_exit;
  508. }
  509. /* Install the new block at the end of the list with lock */
  510. flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
  511. if (gpe_xrupt_block->gpe_block_list_head) {
  512. next_gpe_block = gpe_xrupt_block->gpe_block_list_head;
  513. while (next_gpe_block->next) {
  514. next_gpe_block = next_gpe_block->next;
  515. }
  516. next_gpe_block->next = gpe_block;
  517. gpe_block->previous = next_gpe_block;
  518. } else {
  519. gpe_xrupt_block->gpe_block_list_head = gpe_block;
  520. }
  521. gpe_block->xrupt_block = gpe_xrupt_block;
  522. acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
  523. unlock_and_exit:
  524. status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
  525. return_ACPI_STATUS(status);
  526. }
  527. /*******************************************************************************
  528. *
  529. * FUNCTION: acpi_ev_delete_gpe_block
  530. *
  531. * PARAMETERS: gpe_block - Existing GPE block
  532. *
  533. * RETURN: Status
  534. *
  535. * DESCRIPTION: Remove a GPE block
  536. *
  537. ******************************************************************************/
  538. acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
  539. {
  540. acpi_status status;
  541. acpi_native_uint flags;
  542. ACPI_FUNCTION_TRACE("ev_install_gpe_block");
  543. status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
  544. if (ACPI_FAILURE(status)) {
  545. return_ACPI_STATUS(status);
  546. }
  547. /* Disable all GPEs in this block */
  548. status = acpi_hw_disable_gpe_block(gpe_block->xrupt_block, gpe_block);
  549. if (!gpe_block->previous && !gpe_block->next) {
  550. /* This is the last gpe_block on this interrupt */
  551. status = acpi_ev_delete_gpe_xrupt(gpe_block->xrupt_block);
  552. if (ACPI_FAILURE(status)) {
  553. goto unlock_and_exit;
  554. }
  555. } else {
  556. /* Remove the block on this interrupt with lock */
  557. flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
  558. if (gpe_block->previous) {
  559. gpe_block->previous->next = gpe_block->next;
  560. } else {
  561. gpe_block->xrupt_block->gpe_block_list_head =
  562. gpe_block->next;
  563. }
  564. if (gpe_block->next) {
  565. gpe_block->next->previous = gpe_block->previous;
  566. }
  567. acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
  568. }
  569. /* Free the gpe_block */
  570. ACPI_MEM_FREE(gpe_block->register_info);
  571. ACPI_MEM_FREE(gpe_block->event_info);
  572. ACPI_MEM_FREE(gpe_block);
  573. unlock_and_exit:
  574. status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
  575. return_ACPI_STATUS(status);
  576. }
  577. /*******************************************************************************
  578. *
  579. * FUNCTION: acpi_ev_create_gpe_info_blocks
  580. *
  581. * PARAMETERS: gpe_block - New GPE block
  582. *
  583. * RETURN: Status
  584. *
  585. * DESCRIPTION: Create the register_info and event_info blocks for this GPE block
  586. *
  587. ******************************************************************************/
  588. static acpi_status
  589. acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
  590. {
  591. struct acpi_gpe_register_info *gpe_register_info = NULL;
  592. struct acpi_gpe_event_info *gpe_event_info = NULL;
  593. struct acpi_gpe_event_info *this_event;
  594. struct acpi_gpe_register_info *this_register;
  595. acpi_native_uint i;
  596. acpi_native_uint j;
  597. acpi_status status;
  598. ACPI_FUNCTION_TRACE("ev_create_gpe_info_blocks");
  599. /* Allocate the GPE register information block */
  600. gpe_register_info = ACPI_MEM_CALLOCATE((acpi_size) gpe_block->
  601. register_count *
  602. sizeof(struct
  603. acpi_gpe_register_info));
  604. if (!gpe_register_info) {
  605. ACPI_REPORT_ERROR(("Could not allocate the gpe_register_info table\n"));
  606. return_ACPI_STATUS(AE_NO_MEMORY);
  607. }
  608. /*
  609. * Allocate the GPE event_info block. There are eight distinct GPEs
  610. * per register. Initialization to zeros is sufficient.
  611. */
  612. gpe_event_info = ACPI_MEM_CALLOCATE(((acpi_size) gpe_block->
  613. register_count *
  614. ACPI_GPE_REGISTER_WIDTH) *
  615. sizeof(struct acpi_gpe_event_info));
  616. if (!gpe_event_info) {
  617. ACPI_REPORT_ERROR(("Could not allocate the gpe_event_info table\n"));
  618. status = AE_NO_MEMORY;
  619. goto error_exit;
  620. }
  621. /* Save the new Info arrays in the GPE block */
  622. gpe_block->register_info = gpe_register_info;
  623. gpe_block->event_info = gpe_event_info;
  624. /*
  625. * Initialize the GPE Register and Event structures. A goal of these
  626. * tables is to hide the fact that there are two separate GPE register sets
  627. * in a given GPE hardware block, the status registers occupy the first half,
  628. * and the enable registers occupy the second half.
  629. */
  630. this_register = gpe_register_info;
  631. this_event = gpe_event_info;
  632. for (i = 0; i < gpe_block->register_count; i++) {
  633. /* Init the register_info for this GPE register (8 GPEs) */
  634. this_register->base_gpe_number =
  635. (u8) (gpe_block->block_base_number +
  636. (i * ACPI_GPE_REGISTER_WIDTH));
  637. ACPI_STORE_ADDRESS(this_register->status_address.address,
  638. (gpe_block->block_address.address + i));
  639. ACPI_STORE_ADDRESS(this_register->enable_address.address,
  640. (gpe_block->block_address.address
  641. + i + gpe_block->register_count));
  642. this_register->status_address.address_space_id =
  643. gpe_block->block_address.address_space_id;
  644. this_register->enable_address.address_space_id =
  645. gpe_block->block_address.address_space_id;
  646. this_register->status_address.register_bit_width =
  647. ACPI_GPE_REGISTER_WIDTH;
  648. this_register->enable_address.register_bit_width =
  649. ACPI_GPE_REGISTER_WIDTH;
  650. this_register->status_address.register_bit_offset =
  651. ACPI_GPE_REGISTER_WIDTH;
  652. this_register->enable_address.register_bit_offset =
  653. ACPI_GPE_REGISTER_WIDTH;
  654. /* Init the event_info for each GPE within this register */
  655. for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
  656. this_event->register_bit = acpi_gbl_decode_to8bit[j];
  657. this_event->register_info = this_register;
  658. this_event++;
  659. }
  660. /* Disable all GPEs within this register */
  661. status = acpi_hw_low_level_write(ACPI_GPE_REGISTER_WIDTH, 0x00,
  662. &this_register->
  663. enable_address);
  664. if (ACPI_FAILURE(status)) {
  665. goto error_exit;
  666. }
  667. /* Clear any pending GPE events within this register */
  668. status = acpi_hw_low_level_write(ACPI_GPE_REGISTER_WIDTH, 0xFF,
  669. &this_register->
  670. status_address);
  671. if (ACPI_FAILURE(status)) {
  672. goto error_exit;
  673. }
  674. this_register++;
  675. }
  676. return_ACPI_STATUS(AE_OK);
  677. error_exit:
  678. if (gpe_register_info) {
  679. ACPI_MEM_FREE(gpe_register_info);
  680. }
  681. if (gpe_event_info) {
  682. ACPI_MEM_FREE(gpe_event_info);
  683. }
  684. return_ACPI_STATUS(status);
  685. }
  686. /*******************************************************************************
  687. *
  688. * FUNCTION: acpi_ev_create_gpe_block
  689. *
  690. * PARAMETERS: gpe_device - Handle to the parent GPE block
  691. * gpe_block_address - Address and space_iD
  692. * register_count - Number of GPE register pairs in the block
  693. * gpe_block_base_number - Starting GPE number for the block
  694. * interrupt_number - H/W interrupt for the block
  695. * return_gpe_block - Where the new block descriptor is returned
  696. *
  697. * RETURN: Status
  698. *
  699. * DESCRIPTION: Create and Install a block of GPE registers. All GPEs within
  700. * the block are disabled at exit.
  701. * Note: Assumes namespace is locked.
  702. *
  703. ******************************************************************************/
  704. acpi_status
  705. acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
  706. struct acpi_generic_address *gpe_block_address,
  707. u32 register_count,
  708. u8 gpe_block_base_number,
  709. u32 interrupt_number,
  710. struct acpi_gpe_block_info **return_gpe_block)
  711. {
  712. acpi_status status;
  713. struct acpi_gpe_block_info *gpe_block;
  714. ACPI_FUNCTION_TRACE("ev_create_gpe_block");
  715. if (!register_count) {
  716. return_ACPI_STATUS(AE_OK);
  717. }
  718. /* Allocate a new GPE block */
  719. gpe_block = ACPI_MEM_CALLOCATE(sizeof(struct acpi_gpe_block_info));
  720. if (!gpe_block) {
  721. return_ACPI_STATUS(AE_NO_MEMORY);
  722. }
  723. /* Initialize the new GPE block */
  724. gpe_block->node = gpe_device;
  725. gpe_block->register_count = register_count;
  726. gpe_block->block_base_number = gpe_block_base_number;
  727. ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address,
  728. sizeof(struct acpi_generic_address));
  729. /*
  730. * Create the register_info and event_info sub-structures
  731. * Note: disables and clears all GPEs in the block
  732. */
  733. status = acpi_ev_create_gpe_info_blocks(gpe_block);
  734. if (ACPI_FAILURE(status)) {
  735. ACPI_MEM_FREE(gpe_block);
  736. return_ACPI_STATUS(status);
  737. }
  738. /* Install the new block in the global lists */
  739. status = acpi_ev_install_gpe_block(gpe_block, interrupt_number);
  740. if (ACPI_FAILURE(status)) {
  741. ACPI_MEM_FREE(gpe_block);
  742. return_ACPI_STATUS(status);
  743. }
  744. /* Find all GPE methods (_Lxx, _Exx) for this block */
  745. status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device,
  746. ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK,
  747. acpi_ev_save_method_info, gpe_block,
  748. NULL);
  749. /* Return the new block */
  750. if (return_gpe_block) {
  751. (*return_gpe_block) = gpe_block;
  752. }
  753. ACPI_DEBUG_PRINT((ACPI_DB_INIT,
  754. "GPE %02X to %02X [%4.4s] %u regs on int 0x%X\n",
  755. (u32) gpe_block->block_base_number,
  756. (u32) (gpe_block->block_base_number +
  757. ((gpe_block->register_count *
  758. ACPI_GPE_REGISTER_WIDTH) - 1)),
  759. gpe_device->name.ascii, gpe_block->register_count,
  760. interrupt_number));
  761. return_ACPI_STATUS(AE_OK);
  762. }
  763. /*******************************************************************************
  764. *
  765. * FUNCTION: acpi_ev_initialize_gpe_block
  766. *
  767. * PARAMETERS: gpe_device - Handle to the parent GPE block
  768. * gpe_block - Gpe Block info
  769. *
  770. * RETURN: Status
  771. *
  772. * DESCRIPTION: Initialize and enable a GPE block. First find and run any
  773. * _PRT methods associated with the block, then enable the
  774. * appropriate GPEs.
  775. * Note: Assumes namespace is locked.
  776. *
  777. ******************************************************************************/
  778. acpi_status
  779. acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
  780. struct acpi_gpe_block_info *gpe_block)
  781. {
  782. acpi_status status;
  783. struct acpi_gpe_event_info *gpe_event_info;
  784. struct acpi_gpe_walk_info gpe_info;
  785. u32 wake_gpe_count;
  786. u32 gpe_enabled_count;
  787. acpi_native_uint i;
  788. acpi_native_uint j;
  789. ACPI_FUNCTION_TRACE("ev_initialize_gpe_block");
  790. /* Ignore a null GPE block (e.g., if no GPE block 1 exists) */
  791. if (!gpe_block) {
  792. return_ACPI_STATUS(AE_OK);
  793. }
  794. /*
  795. * Runtime option: Should wake GPEs be enabled at runtime? The default
  796. * is no, they should only be enabled just as the machine goes to sleep.
  797. */
  798. if (acpi_gbl_leave_wake_gpes_disabled) {
  799. /*
  800. * Differentiate runtime vs wake GPEs, via the _PRW control methods.
  801. * Each GPE that has one or more _PRWs that reference it is by
  802. * definition a wake GPE and will not be enabled while the machine
  803. * is running.
  804. */
  805. gpe_info.gpe_block = gpe_block;
  806. gpe_info.gpe_device = gpe_device;
  807. status =
  808. acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
  809. ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK,
  810. acpi_ev_match_prw_and_gpe, &gpe_info,
  811. NULL);
  812. }
  813. /*
  814. * Enable all GPEs in this block that have these attributes:
  815. * 1) are "runtime" or "run/wake" GPEs, and
  816. * 2) have a corresponding _Lxx or _Exx method
  817. *
  818. * Any other GPEs within this block must be enabled via the acpi_enable_gpe()
  819. * external interface.
  820. */
  821. wake_gpe_count = 0;
  822. gpe_enabled_count = 0;
  823. for (i = 0; i < gpe_block->register_count; i++) {
  824. for (j = 0; j < 8; j++) {
  825. /* Get the info block for this particular GPE */
  826. gpe_event_info =
  827. &gpe_block->
  828. event_info[(i * ACPI_GPE_REGISTER_WIDTH) + j];
  829. if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
  830. ACPI_GPE_DISPATCH_METHOD)
  831. && (gpe_event_info->
  832. flags & ACPI_GPE_TYPE_RUNTIME)) {
  833. gpe_enabled_count++;
  834. }
  835. if (gpe_event_info->flags & ACPI_GPE_TYPE_WAKE) {
  836. wake_gpe_count++;
  837. }
  838. }
  839. }
  840. ACPI_DEBUG_PRINT((ACPI_DB_INIT,
  841. "Found %u Wake, Enabled %u Runtime GPEs in this block\n",
  842. wake_gpe_count, gpe_enabled_count));
  843. /* Enable all valid runtime GPEs found above */
  844. status = acpi_hw_enable_runtime_gpe_block(NULL, gpe_block);
  845. if (ACPI_FAILURE(status)) {
  846. ACPI_REPORT_ERROR(("Could not enable GPEs in gpe_block %p\n",
  847. gpe_block));
  848. }
  849. return_ACPI_STATUS(status);
  850. }
  851. /*******************************************************************************
  852. *
  853. * FUNCTION: acpi_ev_gpe_initialize
  854. *
  855. * PARAMETERS: None
  856. *
  857. * RETURN: Status
  858. *
  859. * DESCRIPTION: Initialize the GPE data structures
  860. *
  861. ******************************************************************************/
  862. acpi_status acpi_ev_gpe_initialize(void)
  863. {
  864. u32 register_count0 = 0;
  865. u32 register_count1 = 0;
  866. u32 gpe_number_max = 0;
  867. acpi_status status;
  868. ACPI_FUNCTION_TRACE("ev_gpe_initialize");
  869. status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
  870. if (ACPI_FAILURE(status)) {
  871. return_ACPI_STATUS(status);
  872. }
  873. /*
  874. * Initialize the GPE Block(s) defined in the FADT
  875. *
  876. * Why the GPE register block lengths are divided by 2: From the ACPI Spec,
  877. * section "General-Purpose Event Registers", we have:
  878. *
  879. * "Each register block contains two registers of equal length
  880. * GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the
  881. * GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN
  882. * The length of the GPE1_STS and GPE1_EN registers is equal to
  883. * half the GPE1_LEN. If a generic register block is not supported
  884. * then its respective block pointer and block length values in the
  885. * FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need
  886. * to be the same size."
  887. */
  888. /*
  889. * Determine the maximum GPE number for this machine.
  890. *
  891. * Note: both GPE0 and GPE1 are optional, and either can exist without
  892. * the other.
  893. *
  894. * If EITHER the register length OR the block address are zero, then that
  895. * particular block is not supported.
  896. */
  897. if (acpi_gbl_FADT->gpe0_blk_len && acpi_gbl_FADT->xgpe0_blk.address) {
  898. /* GPE block 0 exists (has both length and address > 0) */
  899. register_count0 = (u16) (acpi_gbl_FADT->gpe0_blk_len / 2);
  900. gpe_number_max =
  901. (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
  902. /* Install GPE Block 0 */
  903. status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
  904. &acpi_gbl_FADT->xgpe0_blk,
  905. register_count0, 0,
  906. acpi_gbl_FADT->sci_int,
  907. &acpi_gbl_gpe_fadt_blocks[0]);
  908. if (ACPI_FAILURE(status)) {
  909. ACPI_REPORT_ERROR(("Could not create GPE Block 0, %s\n",
  910. acpi_format_exception(status)));
  911. }
  912. }
  913. if (acpi_gbl_FADT->gpe1_blk_len && acpi_gbl_FADT->xgpe1_blk.address) {
  914. /* GPE block 1 exists (has both length and address > 0) */
  915. register_count1 = (u16) (acpi_gbl_FADT->gpe1_blk_len / 2);
  916. /* Check for GPE0/GPE1 overlap (if both banks exist) */
  917. if ((register_count0) &&
  918. (gpe_number_max >= acpi_gbl_FADT->gpe1_base)) {
  919. ACPI_REPORT_ERROR(("GPE0 block (GPE 0 to %d) overlaps the GPE1 block (GPE %d to %d) - Ignoring GPE1\n", gpe_number_max, acpi_gbl_FADT->gpe1_base, acpi_gbl_FADT->gpe1_base + ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1)));
  920. /* Ignore GPE1 block by setting the register count to zero */
  921. register_count1 = 0;
  922. } else {
  923. /* Install GPE Block 1 */
  924. status =
  925. acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
  926. &acpi_gbl_FADT->xgpe1_blk,
  927. register_count1,
  928. acpi_gbl_FADT->gpe1_base,
  929. acpi_gbl_FADT->sci_int,
  930. &acpi_gbl_gpe_fadt_blocks
  931. [1]);
  932. if (ACPI_FAILURE(status)) {
  933. ACPI_REPORT_ERROR(("Could not create GPE Block 1, %s\n", acpi_format_exception(status)));
  934. }
  935. /*
  936. * GPE0 and GPE1 do not have to be contiguous in the GPE number
  937. * space. However, GPE0 always starts at GPE number zero.
  938. */
  939. gpe_number_max = acpi_gbl_FADT->gpe1_base +
  940. ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
  941. }
  942. }
  943. /* Exit if there are no GPE registers */
  944. if ((register_count0 + register_count1) == 0) {
  945. /* GPEs are not required by ACPI, this is OK */
  946. ACPI_DEBUG_PRINT((ACPI_DB_INIT,
  947. "There are no GPE blocks defined in the FADT\n"));
  948. status = AE_OK;
  949. goto cleanup;
  950. }
  951. /* Check for Max GPE number out-of-range */
  952. if (gpe_number_max > ACPI_GPE_MAX) {
  953. ACPI_REPORT_ERROR(("Maximum GPE number from FADT is too large: 0x%X\n", gpe_number_max));
  954. status = AE_BAD_VALUE;
  955. goto cleanup;
  956. }
  957. cleanup:
  958. (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
  959. return_ACPI_STATUS(AE_OK);
  960. }