evgpeblk.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449
  1. /******************************************************************************
  2. *
  3. * Module Name: evgpeblk - GPE block creation and initialization.
  4. *
  5. *****************************************************************************/
  6. /*
  7. * Copyright (C) 2000 - 2010, Intel Corp.
  8. * All rights reserved.
  9. *
  10. * Redistribution and use in source and binary forms, with or without
  11. * modification, are permitted provided that the following conditions
  12. * are met:
  13. * 1. Redistributions of source code must retain the above copyright
  14. * notice, this list of conditions, and the following disclaimer,
  15. * without modification.
  16. * 2. Redistributions in binary form must reproduce at minimum a disclaimer
  17. * substantially similar to the "NO WARRANTY" disclaimer below
  18. * ("Disclaimer") and any redistribution must be conditioned upon
  19. * including a substantially similar Disclaimer requirement for further
  20. * binary redistribution.
  21. * 3. Neither the names of the above-listed copyright holders nor the names
  22. * of any contributors may be used to endorse or promote products derived
  23. * from this software without specific prior written permission.
  24. *
  25. * Alternatively, this software may be distributed under the terms of the
  26. * GNU General Public License ("GPL") version 2 as published by the Free
  27. * Software Foundation.
  28. *
  29. * NO WARRANTY
  30. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  31. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  32. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
  33. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  34. * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  35. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  36. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  37. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  38. * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
  39. * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  40. * POSSIBILITY OF SUCH DAMAGES.
  41. */
  42. #include <acpi/acpi.h>
  43. #include "accommon.h"
  44. #include "acevents.h"
  45. #include "acnamesp.h"
  46. #include "acinterp.h"
  47. #define _COMPONENT ACPI_EVENTS
  48. ACPI_MODULE_NAME("evgpeblk")
  49. /* Local prototypes */
  50. static acpi_status
  51. acpi_ev_match_gpe_method(acpi_handle obj_handle,
  52. u32 level, void *obj_desc, void **return_value);
  53. static acpi_status
  54. acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
  55. u32 level, void *info, void **return_value);
  56. static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
  57. interrupt_number);
  58. static acpi_status
  59. acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt);
  60. static acpi_status
  61. acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
  62. u32 interrupt_number);
  63. static acpi_status
  64. acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block);
  65. /*******************************************************************************
  66. *
  67. * FUNCTION: acpi_ev_valid_gpe_event
  68. *
  69. * PARAMETERS: gpe_event_info - Info for this GPE
  70. *
  71. * RETURN: TRUE if the gpe_event is valid
  72. *
  73. * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
  74. * Should be called only when the GPE lists are semaphore locked
  75. * and not subject to change.
  76. *
  77. ******************************************************************************/
  78. u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
  79. {
  80. struct acpi_gpe_xrupt_info *gpe_xrupt_block;
  81. struct acpi_gpe_block_info *gpe_block;
  82. ACPI_FUNCTION_ENTRY();
  83. /* No need for spin lock since we are not changing any list elements */
  84. /* Walk the GPE interrupt levels */
  85. gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
  86. while (gpe_xrupt_block) {
  87. gpe_block = gpe_xrupt_block->gpe_block_list_head;
  88. /* Walk the GPE blocks on this interrupt level */
  89. while (gpe_block) {
  90. if ((&gpe_block->event_info[0] <= gpe_event_info) &&
  91. (&gpe_block->event_info[gpe_block->gpe_count] >
  92. gpe_event_info)) {
  93. return (TRUE);
  94. }
  95. gpe_block = gpe_block->next;
  96. }
  97. gpe_xrupt_block = gpe_xrupt_block->next;
  98. }
  99. return (FALSE);
  100. }
  101. /*******************************************************************************
  102. *
  103. * FUNCTION: acpi_ev_walk_gpe_list
  104. *
  105. * PARAMETERS: gpe_walk_callback - Routine called for each GPE block
  106. * Context - Value passed to callback
  107. *
  108. * RETURN: Status
  109. *
  110. * DESCRIPTION: Walk the GPE lists.
  111. *
  112. ******************************************************************************/
  113. acpi_status
  114. acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
  115. {
  116. struct acpi_gpe_block_info *gpe_block;
  117. struct acpi_gpe_xrupt_info *gpe_xrupt_info;
  118. acpi_status status = AE_OK;
  119. acpi_cpu_flags flags;
  120. ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
  121. flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
  122. /* Walk the interrupt level descriptor list */
  123. gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
  124. while (gpe_xrupt_info) {
  125. /* Walk all Gpe Blocks attached to this interrupt level */
  126. gpe_block = gpe_xrupt_info->gpe_block_list_head;
  127. while (gpe_block) {
  128. /* One callback per GPE block */
  129. status =
  130. gpe_walk_callback(gpe_xrupt_info, gpe_block,
  131. context);
  132. if (ACPI_FAILURE(status)) {
  133. if (status == AE_CTRL_END) { /* Callback abort */
  134. status = AE_OK;
  135. }
  136. goto unlock_and_exit;
  137. }
  138. gpe_block = gpe_block->next;
  139. }
  140. gpe_xrupt_info = gpe_xrupt_info->next;
  141. }
  142. unlock_and_exit:
  143. acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
  144. return_ACPI_STATUS(status);
  145. }
  146. /*******************************************************************************
  147. *
  148. * FUNCTION: acpi_ev_delete_gpe_handlers
  149. *
  150. * PARAMETERS: gpe_xrupt_info - GPE Interrupt info
  151. * gpe_block - Gpe Block info
  152. *
  153. * RETURN: Status
  154. *
  155. * DESCRIPTION: Delete all Handler objects found in the GPE data structs.
  156. * Used only prior to termination.
  157. *
  158. ******************************************************************************/
  159. acpi_status
  160. acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
  161. struct acpi_gpe_block_info *gpe_block,
  162. void *context)
  163. {
  164. struct acpi_gpe_event_info *gpe_event_info;
  165. u32 i;
  166. u32 j;
  167. ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
  168. /* Examine each GPE Register within the block */
  169. for (i = 0; i < gpe_block->register_count; i++) {
  170. /* Now look at the individual GPEs in this byte register */
  171. for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
  172. gpe_event_info = &gpe_block->event_info[((acpi_size) i *
  173. ACPI_GPE_REGISTER_WIDTH)
  174. + j];
  175. if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
  176. ACPI_GPE_DISPATCH_HANDLER) {
  177. ACPI_FREE(gpe_event_info->dispatch.handler);
  178. gpe_event_info->dispatch.handler = NULL;
  179. gpe_event_info->flags &=
  180. ~ACPI_GPE_DISPATCH_MASK;
  181. }
  182. }
  183. }
  184. return_ACPI_STATUS(AE_OK);
  185. }
  186. /*******************************************************************************
  187. *
  188. * FUNCTION: acpi_ev_match_gpe_method
  189. *
  190. * PARAMETERS: Callback from walk_namespace
  191. *
  192. * RETURN: Status
  193. *
  194. * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
  195. * control method under the _GPE portion of the namespace.
  196. * Extract the name and GPE type from the object, saving this
  197. * information for quick lookup during GPE dispatch. Allows a
  198. * per-owner_id evaluation if execute_by_owner_id is TRUE in the
  199. * walk_info parameter block.
  200. *
  201. * The name of each GPE control method is of the form:
  202. * "_Lxx" or "_Exx", where:
  203. * L - means that the GPE is level triggered
  204. * E - means that the GPE is edge triggered
  205. * xx - is the GPE number [in HEX]
  206. *
  207. * If walk_info->execute_by_owner_id is TRUE, we only execute examine GPE methods
  208. * with that owner.
  209. * If walk_info->enable_this_gpe is TRUE, the GPE that is referred to by a GPE
  210. * method is immediately enabled (Used for Load/load_table operators)
  211. *
  212. ******************************************************************************/
  213. static acpi_status
  214. acpi_ev_match_gpe_method(acpi_handle obj_handle,
  215. u32 level, void *context, void **return_value)
  216. {
  217. struct acpi_namespace_node *method_node =
  218. ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle);
  219. struct acpi_gpe_walk_info *walk_info =
  220. ACPI_CAST_PTR(struct acpi_gpe_walk_info, context);
  221. struct acpi_gpe_event_info *gpe_event_info;
  222. struct acpi_namespace_node *gpe_device;
  223. acpi_status status;
  224. u32 gpe_number;
  225. char name[ACPI_NAME_SIZE + 1];
  226. u8 type;
  227. ACPI_FUNCTION_TRACE(ev_match_gpe_method);
  228. /* Check if requested owner_id matches this owner_id */
  229. if ((walk_info->execute_by_owner_id) &&
  230. (method_node->owner_id != walk_info->owner_id)) {
  231. return_ACPI_STATUS(AE_OK);
  232. }
  233. /*
  234. * Match and decode the _Lxx and _Exx GPE method names
  235. *
  236. * 1) Extract the method name and null terminate it
  237. */
  238. ACPI_MOVE_32_TO_32(name, &method_node->name.integer);
  239. name[ACPI_NAME_SIZE] = 0;
  240. /* 2) Name must begin with an underscore */
  241. if (name[0] != '_') {
  242. return_ACPI_STATUS(AE_OK); /* Ignore this method */
  243. }
  244. /*
  245. * 3) Edge/Level determination is based on the 2nd character
  246. * of the method name
  247. *
  248. * NOTE: Default GPE type is RUNTIME only. Later, if a _PRW object is
  249. * found that points to this GPE, the ACPI_GPE_CAN_WAKE flag is set.
  250. */
  251. switch (name[1]) {
  252. case 'L':
  253. type = ACPI_GPE_LEVEL_TRIGGERED;
  254. break;
  255. case 'E':
  256. type = ACPI_GPE_EDGE_TRIGGERED;
  257. break;
  258. default:
  259. /* Unknown method type, just ignore it */
  260. ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
  261. "Ignoring unknown GPE method type: %s "
  262. "(name not of form _Lxx or _Exx)", name));
  263. return_ACPI_STATUS(AE_OK);
  264. }
  265. /* 4) The last two characters of the name are the hex GPE Number */
  266. gpe_number = ACPI_STRTOUL(&name[2], NULL, 16);
  267. if (gpe_number == ACPI_UINT32_MAX) {
  268. /* Conversion failed; invalid method, just ignore it */
  269. ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
  270. "Could not extract GPE number from name: %s "
  271. "(name is not of form _Lxx or _Exx)", name));
  272. return_ACPI_STATUS(AE_OK);
  273. }
  274. /* Ensure that we have a valid GPE number for this GPE block */
  275. gpe_event_info =
  276. acpi_ev_low_get_gpe_info(gpe_number, walk_info->gpe_block);
  277. if (!gpe_event_info) {
  278. /*
  279. * This gpe_number is not valid for this GPE block, just ignore it.
  280. * However, it may be valid for a different GPE block, since GPE0
  281. * and GPE1 methods both appear under \_GPE.
  282. */
  283. return_ACPI_STATUS(AE_OK);
  284. }
  285. if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
  286. ACPI_GPE_DISPATCH_HANDLER) {
  287. /* If there is already a handler, ignore this GPE method */
  288. return_ACPI_STATUS(AE_OK);
  289. }
  290. if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
  291. ACPI_GPE_DISPATCH_METHOD) {
  292. /*
  293. * If there is already a method, ignore this method. But check
  294. * for a type mismatch (if both the _Lxx AND _Exx exist)
  295. */
  296. if (type != (gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK)) {
  297. ACPI_ERROR((AE_INFO,
  298. "For GPE 0x%.2X, found both _L%2.2X and _E%2.2X methods",
  299. gpe_number, gpe_number, gpe_number));
  300. }
  301. return_ACPI_STATUS(AE_OK);
  302. }
  303. /*
  304. * Add the GPE information from above to the gpe_event_info block for
  305. * use during dispatch of this GPE.
  306. */
  307. gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_METHOD);
  308. gpe_event_info->dispatch.method_node = method_node;
  309. /*
  310. * Enable this GPE if requested. This only happens when during the
  311. * execution of a Load or load_table operator. We have found a new
  312. * GPE method and want to immediately enable the GPE if it is a
  313. * runtime GPE.
  314. */
  315. if (walk_info->enable_this_gpe) {
  316. /* Ignore GPEs that can wake the system */
  317. if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE) ||
  318. !acpi_gbl_leave_wake_gpes_disabled) {
  319. walk_info->count++;
  320. gpe_device = walk_info->gpe_device;
  321. if (gpe_device == acpi_gbl_fadt_gpe_device) {
  322. gpe_device = NULL;
  323. }
  324. status = acpi_enable_gpe(gpe_device, gpe_number,
  325. ACPI_GPE_TYPE_RUNTIME);
  326. if (ACPI_FAILURE(status)) {
  327. ACPI_EXCEPTION((AE_INFO, status,
  328. "Could not enable GPE 0x%02X",
  329. gpe_number));
  330. }
  331. }
  332. }
  333. ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
  334. "Registered GPE method %s as GPE number 0x%.2X\n",
  335. name, gpe_number));
  336. return_ACPI_STATUS(AE_OK);
  337. }
  338. /*******************************************************************************
  339. *
  340. * FUNCTION: acpi_ev_match_prw_and_gpe
  341. *
  342. * PARAMETERS: Callback from walk_namespace
  343. *
  344. * RETURN: Status. NOTE: We ignore errors so that the _PRW walk is
  345. * not aborted on a single _PRW failure.
  346. *
  347. * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
  348. * Device. Run the _PRW method. If present, extract the GPE
  349. * number and mark the GPE as a CAN_WAKE GPE. Allows a
  350. * per-owner_id execution if execute_by_owner_id is TRUE in the
  351. * walk_info parameter block.
  352. *
  353. * If walk_info->execute_by_owner_id is TRUE, we only execute _PRWs with that
  354. * owner.
  355. * If walk_info->gpe_device is NULL, we execute every _PRW found. Otherwise,
  356. * we only execute _PRWs that refer to the input gpe_device.
  357. *
  358. ******************************************************************************/
  359. static acpi_status
  360. acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
  361. u32 level, void *context, void **return_value)
  362. {
  363. struct acpi_gpe_walk_info *walk_info =
  364. ACPI_CAST_PTR(struct acpi_gpe_walk_info, context);
  365. struct acpi_namespace_node *gpe_device;
  366. struct acpi_gpe_block_info *gpe_block;
  367. struct acpi_namespace_node *target_gpe_device;
  368. struct acpi_namespace_node *prw_node;
  369. struct acpi_gpe_event_info *gpe_event_info;
  370. union acpi_operand_object *pkg_desc;
  371. union acpi_operand_object *obj_desc;
  372. u32 gpe_number;
  373. acpi_status status;
  374. ACPI_FUNCTION_TRACE(ev_match_prw_and_gpe);
  375. /* Check for a _PRW method under this device */
  376. status = acpi_ns_get_node(obj_handle, METHOD_NAME__PRW,
  377. ACPI_NS_NO_UPSEARCH, &prw_node);
  378. if (ACPI_FAILURE(status)) {
  379. return_ACPI_STATUS(AE_OK);
  380. }
  381. /* Check if requested owner_id matches this owner_id */
  382. if ((walk_info->execute_by_owner_id) &&
  383. (prw_node->owner_id != walk_info->owner_id)) {
  384. return_ACPI_STATUS(AE_OK);
  385. }
  386. /* Execute the _PRW */
  387. status = acpi_ut_evaluate_object(prw_node, NULL,
  388. ACPI_BTYPE_PACKAGE, &pkg_desc);
  389. if (ACPI_FAILURE(status)) {
  390. return_ACPI_STATUS(AE_OK);
  391. }
  392. /* The returned _PRW package must have at least two elements */
  393. if (pkg_desc->package.count < 2) {
  394. goto cleanup;
  395. }
  396. /* Extract pointers from the input context */
  397. gpe_device = walk_info->gpe_device;
  398. gpe_block = walk_info->gpe_block;
  399. /*
  400. * The _PRW object must return a package, we are only interested
  401. * in the first element
  402. */
  403. obj_desc = pkg_desc->package.elements[0];
  404. if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
  405. /* Use FADT-defined GPE device (from definition of _PRW) */
  406. target_gpe_device = NULL;
  407. if (gpe_device) {
  408. target_gpe_device = acpi_gbl_fadt_gpe_device;
  409. }
  410. /* Integer is the GPE number in the FADT described GPE blocks */
  411. gpe_number = (u32) obj_desc->integer.value;
  412. } else if (obj_desc->common.type == ACPI_TYPE_PACKAGE) {
  413. /* Package contains a GPE reference and GPE number within a GPE block */
  414. if ((obj_desc->package.count < 2) ||
  415. ((obj_desc->package.elements[0])->common.type !=
  416. ACPI_TYPE_LOCAL_REFERENCE) ||
  417. ((obj_desc->package.elements[1])->common.type !=
  418. ACPI_TYPE_INTEGER)) {
  419. goto cleanup;
  420. }
  421. /* Get GPE block reference and decode */
  422. target_gpe_device =
  423. obj_desc->package.elements[0]->reference.node;
  424. gpe_number = (u32) obj_desc->package.elements[1]->integer.value;
  425. } else {
  426. /* Unknown type, just ignore it */
  427. goto cleanup;
  428. }
  429. /* Get the gpe_event_info for this GPE */
  430. if (gpe_device) {
  431. /*
  432. * Is this GPE within this block?
  433. *
  434. * TRUE if and only if these conditions are true:
  435. * 1) The GPE devices match.
  436. * 2) The GPE index(number) is within the range of the Gpe Block
  437. * associated with the GPE device.
  438. */
  439. if (gpe_device != target_gpe_device) {
  440. goto cleanup;
  441. }
  442. gpe_event_info =
  443. acpi_ev_low_get_gpe_info(gpe_number, gpe_block);
  444. } else {
  445. /* gpe_device is NULL, just match the target_device and gpe_number */
  446. gpe_event_info =
  447. acpi_ev_get_gpe_event_info(target_gpe_device, gpe_number);
  448. }
  449. if (gpe_event_info) {
  450. if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
  451. /* This GPE can wake the system */
  452. gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
  453. walk_info->count++;
  454. }
  455. }
  456. cleanup:
  457. acpi_ut_remove_reference(pkg_desc);
  458. return_ACPI_STATUS(AE_OK);
  459. }
  460. /*******************************************************************************
  461. *
  462. * FUNCTION: acpi_ev_get_gpe_xrupt_block
  463. *
  464. * PARAMETERS: interrupt_number - Interrupt for a GPE block
  465. *
  466. * RETURN: A GPE interrupt block
  467. *
  468. * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
  469. * block per unique interrupt level used for GPEs. Should be
  470. * called only when the GPE lists are semaphore locked and not
  471. * subject to change.
  472. *
  473. ******************************************************************************/
  474. static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
  475. interrupt_number)
  476. {
  477. struct acpi_gpe_xrupt_info *next_gpe_xrupt;
  478. struct acpi_gpe_xrupt_info *gpe_xrupt;
  479. acpi_status status;
  480. acpi_cpu_flags flags;
  481. ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
  482. /* No need for lock since we are not changing any list elements here */
  483. next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
  484. while (next_gpe_xrupt) {
  485. if (next_gpe_xrupt->interrupt_number == interrupt_number) {
  486. return_PTR(next_gpe_xrupt);
  487. }
  488. next_gpe_xrupt = next_gpe_xrupt->next;
  489. }
  490. /* Not found, must allocate a new xrupt descriptor */
  491. gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
  492. if (!gpe_xrupt) {
  493. return_PTR(NULL);
  494. }
  495. gpe_xrupt->interrupt_number = interrupt_number;
  496. /* Install new interrupt descriptor with spin lock */
  497. flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
  498. if (acpi_gbl_gpe_xrupt_list_head) {
  499. next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
  500. while (next_gpe_xrupt->next) {
  501. next_gpe_xrupt = next_gpe_xrupt->next;
  502. }
  503. next_gpe_xrupt->next = gpe_xrupt;
  504. gpe_xrupt->previous = next_gpe_xrupt;
  505. } else {
  506. acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
  507. }
  508. acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
  509. /* Install new interrupt handler if not SCI_INT */
  510. if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
  511. status = acpi_os_install_interrupt_handler(interrupt_number,
  512. acpi_ev_gpe_xrupt_handler,
  513. gpe_xrupt);
  514. if (ACPI_FAILURE(status)) {
  515. ACPI_ERROR((AE_INFO,
  516. "Could not install GPE interrupt handler at level 0x%X",
  517. interrupt_number));
  518. return_PTR(NULL);
  519. }
  520. }
  521. return_PTR(gpe_xrupt);
  522. }
  523. /*******************************************************************************
  524. *
  525. * FUNCTION: acpi_ev_delete_gpe_xrupt
  526. *
  527. * PARAMETERS: gpe_xrupt - A GPE interrupt info block
  528. *
  529. * RETURN: Status
  530. *
  531. * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
  532. * interrupt handler if not the SCI interrupt.
  533. *
  534. ******************************************************************************/
  535. static acpi_status
  536. acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
  537. {
  538. acpi_status status;
  539. acpi_cpu_flags flags;
  540. ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
  541. /* We never want to remove the SCI interrupt handler */
  542. if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
  543. gpe_xrupt->gpe_block_list_head = NULL;
  544. return_ACPI_STATUS(AE_OK);
  545. }
  546. /* Disable this interrupt */
  547. status =
  548. acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
  549. acpi_ev_gpe_xrupt_handler);
  550. if (ACPI_FAILURE(status)) {
  551. return_ACPI_STATUS(status);
  552. }
  553. /* Unlink the interrupt block with lock */
  554. flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
  555. if (gpe_xrupt->previous) {
  556. gpe_xrupt->previous->next = gpe_xrupt->next;
  557. } else {
  558. /* No previous, update list head */
  559. acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
  560. }
  561. if (gpe_xrupt->next) {
  562. gpe_xrupt->next->previous = gpe_xrupt->previous;
  563. }
  564. acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
  565. /* Free the block */
  566. ACPI_FREE(gpe_xrupt);
  567. return_ACPI_STATUS(AE_OK);
  568. }
  569. /*******************************************************************************
  570. *
  571. * FUNCTION: acpi_ev_install_gpe_block
  572. *
  573. * PARAMETERS: gpe_block - New GPE block
  574. * interrupt_number - Xrupt to be associated with this
  575. * GPE block
  576. *
  577. * RETURN: Status
  578. *
  579. * DESCRIPTION: Install new GPE block with mutex support
  580. *
  581. ******************************************************************************/
  582. static acpi_status
  583. acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
  584. u32 interrupt_number)
  585. {
  586. struct acpi_gpe_block_info *next_gpe_block;
  587. struct acpi_gpe_xrupt_info *gpe_xrupt_block;
  588. acpi_status status;
  589. acpi_cpu_flags flags;
  590. ACPI_FUNCTION_TRACE(ev_install_gpe_block);
  591. status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
  592. if (ACPI_FAILURE(status)) {
  593. return_ACPI_STATUS(status);
  594. }
  595. gpe_xrupt_block = acpi_ev_get_gpe_xrupt_block(interrupt_number);
  596. if (!gpe_xrupt_block) {
  597. status = AE_NO_MEMORY;
  598. goto unlock_and_exit;
  599. }
  600. /* Install the new block at the end of the list with lock */
  601. flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
  602. if (gpe_xrupt_block->gpe_block_list_head) {
  603. next_gpe_block = gpe_xrupt_block->gpe_block_list_head;
  604. while (next_gpe_block->next) {
  605. next_gpe_block = next_gpe_block->next;
  606. }
  607. next_gpe_block->next = gpe_block;
  608. gpe_block->previous = next_gpe_block;
  609. } else {
  610. gpe_xrupt_block->gpe_block_list_head = gpe_block;
  611. }
  612. gpe_block->xrupt_block = gpe_xrupt_block;
  613. acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
  614. unlock_and_exit:
  615. status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
  616. return_ACPI_STATUS(status);
  617. }
  618. /*******************************************************************************
  619. *
  620. * FUNCTION: acpi_ev_delete_gpe_block
  621. *
  622. * PARAMETERS: gpe_block - Existing GPE block
  623. *
  624. * RETURN: Status
  625. *
  626. * DESCRIPTION: Remove a GPE block
  627. *
  628. ******************************************************************************/
  629. acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
  630. {
  631. acpi_status status;
  632. acpi_cpu_flags flags;
  633. ACPI_FUNCTION_TRACE(ev_install_gpe_block);
  634. status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
  635. if (ACPI_FAILURE(status)) {
  636. return_ACPI_STATUS(status);
  637. }
  638. /* Disable all GPEs in this block */
  639. status =
  640. acpi_hw_disable_gpe_block(gpe_block->xrupt_block, gpe_block, NULL);
  641. if (!gpe_block->previous && !gpe_block->next) {
  642. /* This is the last gpe_block on this interrupt */
  643. status = acpi_ev_delete_gpe_xrupt(gpe_block->xrupt_block);
  644. if (ACPI_FAILURE(status)) {
  645. goto unlock_and_exit;
  646. }
  647. } else {
  648. /* Remove the block on this interrupt with lock */
  649. flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
  650. if (gpe_block->previous) {
  651. gpe_block->previous->next = gpe_block->next;
  652. } else {
  653. gpe_block->xrupt_block->gpe_block_list_head =
  654. gpe_block->next;
  655. }
  656. if (gpe_block->next) {
  657. gpe_block->next->previous = gpe_block->previous;
  658. }
  659. acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
  660. }
  661. acpi_current_gpe_count -= gpe_block->gpe_count;
  662. /* Free the gpe_block */
  663. ACPI_FREE(gpe_block->register_info);
  664. ACPI_FREE(gpe_block->event_info);
  665. ACPI_FREE(gpe_block);
  666. unlock_and_exit:
  667. status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
  668. return_ACPI_STATUS(status);
  669. }
  670. /*******************************************************************************
  671. *
  672. * FUNCTION: acpi_ev_create_gpe_info_blocks
  673. *
  674. * PARAMETERS: gpe_block - New GPE block
  675. *
  676. * RETURN: Status
  677. *
  678. * DESCRIPTION: Create the register_info and event_info blocks for this GPE block
  679. *
  680. ******************************************************************************/
  681. static acpi_status
  682. acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
  683. {
  684. struct acpi_gpe_register_info *gpe_register_info = NULL;
  685. struct acpi_gpe_event_info *gpe_event_info = NULL;
  686. struct acpi_gpe_event_info *this_event;
  687. struct acpi_gpe_register_info *this_register;
  688. u32 i;
  689. u32 j;
  690. acpi_status status;
  691. ACPI_FUNCTION_TRACE(ev_create_gpe_info_blocks);
  692. /* Allocate the GPE register information block */
  693. gpe_register_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block->
  694. register_count *
  695. sizeof(struct
  696. acpi_gpe_register_info));
  697. if (!gpe_register_info) {
  698. ACPI_ERROR((AE_INFO,
  699. "Could not allocate the GpeRegisterInfo table"));
  700. return_ACPI_STATUS(AE_NO_MEMORY);
  701. }
  702. /*
  703. * Allocate the GPE event_info block. There are eight distinct GPEs
  704. * per register. Initialization to zeros is sufficient.
  705. */
  706. gpe_event_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block->gpe_count *
  707. sizeof(struct
  708. acpi_gpe_event_info));
  709. if (!gpe_event_info) {
  710. ACPI_ERROR((AE_INFO,
  711. "Could not allocate the GpeEventInfo table"));
  712. status = AE_NO_MEMORY;
  713. goto error_exit;
  714. }
  715. /* Save the new Info arrays in the GPE block */
  716. gpe_block->register_info = gpe_register_info;
  717. gpe_block->event_info = gpe_event_info;
  718. /*
  719. * Initialize the GPE Register and Event structures. A goal of these
  720. * tables is to hide the fact that there are two separate GPE register
  721. * sets in a given GPE hardware block, the status registers occupy the
  722. * first half, and the enable registers occupy the second half.
  723. */
  724. this_register = gpe_register_info;
  725. this_event = gpe_event_info;
  726. for (i = 0; i < gpe_block->register_count; i++) {
  727. /* Init the register_info for this GPE register (8 GPEs) */
  728. this_register->base_gpe_number =
  729. (u8) (gpe_block->block_base_number +
  730. (i * ACPI_GPE_REGISTER_WIDTH));
  731. this_register->status_address.address =
  732. gpe_block->block_address.address + i;
  733. this_register->enable_address.address =
  734. gpe_block->block_address.address + i +
  735. gpe_block->register_count;
  736. this_register->status_address.space_id =
  737. gpe_block->block_address.space_id;
  738. this_register->enable_address.space_id =
  739. gpe_block->block_address.space_id;
  740. this_register->status_address.bit_width =
  741. ACPI_GPE_REGISTER_WIDTH;
  742. this_register->enable_address.bit_width =
  743. ACPI_GPE_REGISTER_WIDTH;
  744. this_register->status_address.bit_offset = 0;
  745. this_register->enable_address.bit_offset = 0;
  746. /* Init the event_info for each GPE within this register */
  747. for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
  748. this_event->gpe_number =
  749. (u8) (this_register->base_gpe_number + j);
  750. this_event->register_info = this_register;
  751. this_event++;
  752. }
  753. /* Disable all GPEs within this register */
  754. status = acpi_hw_write(0x00, &this_register->enable_address);
  755. if (ACPI_FAILURE(status)) {
  756. goto error_exit;
  757. }
  758. /* Clear any pending GPE events within this register */
  759. status = acpi_hw_write(0xFF, &this_register->status_address);
  760. if (ACPI_FAILURE(status)) {
  761. goto error_exit;
  762. }
  763. this_register++;
  764. }
  765. return_ACPI_STATUS(AE_OK);
  766. error_exit:
  767. if (gpe_register_info) {
  768. ACPI_FREE(gpe_register_info);
  769. }
  770. if (gpe_event_info) {
  771. ACPI_FREE(gpe_event_info);
  772. }
  773. return_ACPI_STATUS(status);
  774. }
  775. /*******************************************************************************
  776. *
  777. * FUNCTION: acpi_ev_create_gpe_block
  778. *
  779. * PARAMETERS: gpe_device - Handle to the parent GPE block
  780. * gpe_block_address - Address and space_iD
  781. * register_count - Number of GPE register pairs in the block
  782. * gpe_block_base_number - Starting GPE number for the block
  783. * interrupt_number - H/W interrupt for the block
  784. * return_gpe_block - Where the new block descriptor is returned
  785. *
  786. * RETURN: Status
  787. *
  788. * DESCRIPTION: Create and Install a block of GPE registers. All GPEs within
  789. * the block are disabled at exit.
  790. * Note: Assumes namespace is locked.
  791. *
  792. ******************************************************************************/
  793. acpi_status
  794. acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
  795. struct acpi_generic_address *gpe_block_address,
  796. u32 register_count,
  797. u8 gpe_block_base_number,
  798. u32 interrupt_number,
  799. struct acpi_gpe_block_info **return_gpe_block)
  800. {
  801. acpi_status status;
  802. struct acpi_gpe_block_info *gpe_block;
  803. struct acpi_gpe_walk_info walk_info;
  804. ACPI_FUNCTION_TRACE(ev_create_gpe_block);
  805. if (!register_count) {
  806. return_ACPI_STATUS(AE_OK);
  807. }
  808. /* Allocate a new GPE block */
  809. gpe_block = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_block_info));
  810. if (!gpe_block) {
  811. return_ACPI_STATUS(AE_NO_MEMORY);
  812. }
  813. /* Initialize the new GPE block */
  814. gpe_block->node = gpe_device;
  815. gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH);
  816. gpe_block->register_count = register_count;
  817. gpe_block->block_base_number = gpe_block_base_number;
  818. ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address,
  819. sizeof(struct acpi_generic_address));
  820. /*
  821. * Create the register_info and event_info sub-structures
  822. * Note: disables and clears all GPEs in the block
  823. */
  824. status = acpi_ev_create_gpe_info_blocks(gpe_block);
  825. if (ACPI_FAILURE(status)) {
  826. ACPI_FREE(gpe_block);
  827. return_ACPI_STATUS(status);
  828. }
  829. /* Install the new block in the global lists */
  830. status = acpi_ev_install_gpe_block(gpe_block, interrupt_number);
  831. if (ACPI_FAILURE(status)) {
  832. ACPI_FREE(gpe_block);
  833. return_ACPI_STATUS(status);
  834. }
  835. /* Find all GPE methods (_Lxx or_Exx) for this block */
  836. walk_info.gpe_block = gpe_block;
  837. walk_info.gpe_device = gpe_device;
  838. walk_info.enable_this_gpe = FALSE;
  839. walk_info.execute_by_owner_id = FALSE;
  840. status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device,
  841. ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK,
  842. acpi_ev_match_gpe_method, NULL,
  843. &walk_info, NULL);
  844. /* Return the new block */
  845. if (return_gpe_block) {
  846. (*return_gpe_block) = gpe_block;
  847. }
  848. ACPI_DEBUG_PRINT((ACPI_DB_INIT,
  849. "GPE %02X to %02X [%4.4s] %u regs on int 0x%X\n",
  850. (u32) gpe_block->block_base_number,
  851. (u32) (gpe_block->block_base_number +
  852. (gpe_block->gpe_count - 1)),
  853. gpe_device->name.ascii, gpe_block->register_count,
  854. interrupt_number));
  855. /* Update global count of currently available GPEs */
  856. acpi_current_gpe_count += gpe_block->gpe_count;
  857. return_ACPI_STATUS(AE_OK);
  858. }
  859. /*******************************************************************************
  860. *
  861. * FUNCTION: acpi_ev_update_gpes
  862. *
  863. * PARAMETERS: table_owner_id - ID of the newly-loaded ACPI table
  864. *
  865. * RETURN: None
  866. *
  867. * DESCRIPTION: Check for new GPE methods (_Lxx/_Exx) made available as a
  868. * result of a Load() or load_table() operation. If new GPE
  869. * methods have been installed, register the new methods and
  870. * enable and runtime GPEs that are associated with them. Also,
  871. * run any newly loaded _PRW methods in order to discover any
  872. * new CAN_WAKE GPEs.
  873. *
  874. ******************************************************************************/
  875. void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
  876. {
  877. struct acpi_gpe_xrupt_info *gpe_xrupt_info;
  878. struct acpi_gpe_block_info *gpe_block;
  879. struct acpi_gpe_walk_info walk_info;
  880. acpi_status status = AE_OK;
  881. u32 new_wake_gpe_count = 0;
  882. /* We will examine only _PRW/_Lxx/_Exx methods owned by this table */
  883. walk_info.owner_id = table_owner_id;
  884. walk_info.execute_by_owner_id = TRUE;
  885. walk_info.count = 0;
  886. if (acpi_gbl_leave_wake_gpes_disabled) {
  887. /*
  888. * 1) Run any newly-loaded _PRW methods to find any GPEs that
  889. * can now be marked as CAN_WAKE GPEs. Note: We must run the
  890. * _PRW methods before we process the _Lxx/_Exx methods because
  891. * we will enable all runtime GPEs associated with the new
  892. * _Lxx/_Exx methods at the time we process those methods.
  893. *
  894. * Unlock interpreter so that we can run the _PRW methods.
  895. */
  896. walk_info.gpe_block = NULL;
  897. walk_info.gpe_device = NULL;
  898. acpi_ex_exit_interpreter();
  899. status =
  900. acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
  901. ACPI_UINT32_MAX,
  902. ACPI_NS_WALK_NO_UNLOCK,
  903. acpi_ev_match_prw_and_gpe, NULL,
  904. &walk_info, NULL);
  905. if (ACPI_FAILURE(status)) {
  906. ACPI_EXCEPTION((AE_INFO, status,
  907. "While executing _PRW methods"));
  908. }
  909. acpi_ex_enter_interpreter();
  910. new_wake_gpe_count = walk_info.count;
  911. }
  912. /*
  913. * 2) Find any _Lxx/_Exx GPE methods that have just been loaded.
  914. *
  915. * Any GPEs that correspond to new _Lxx/_Exx methods and are not
  916. * marked as CAN_WAKE are immediately enabled.
  917. *
  918. * Examine the namespace underneath each gpe_device within the
  919. * gpe_block lists.
  920. */
  921. status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
  922. if (ACPI_FAILURE(status)) {
  923. return;
  924. }
  925. walk_info.count = 0;
  926. walk_info.enable_this_gpe = TRUE;
  927. /* Walk the interrupt level descriptor list */
  928. gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
  929. while (gpe_xrupt_info) {
  930. /* Walk all Gpe Blocks attached to this interrupt level */
  931. gpe_block = gpe_xrupt_info->gpe_block_list_head;
  932. while (gpe_block) {
  933. walk_info.gpe_block = gpe_block;
  934. walk_info.gpe_device = gpe_block->node;
  935. status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD,
  936. walk_info.gpe_device,
  937. ACPI_UINT32_MAX,
  938. ACPI_NS_WALK_NO_UNLOCK,
  939. acpi_ev_match_gpe_method,
  940. NULL, &walk_info, NULL);
  941. if (ACPI_FAILURE(status)) {
  942. ACPI_EXCEPTION((AE_INFO, status,
  943. "While decoding _Lxx/_Exx methods"));
  944. }
  945. gpe_block = gpe_block->next;
  946. }
  947. gpe_xrupt_info = gpe_xrupt_info->next;
  948. }
  949. if (walk_info.count || new_wake_gpe_count) {
  950. ACPI_INFO((AE_INFO,
  951. "Enabled %u new runtime GPEs, added %u new wakeup GPEs",
  952. walk_info.count, new_wake_gpe_count));
  953. }
  954. (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
  955. return;
  956. }
  957. /*******************************************************************************
  958. *
  959. * FUNCTION: acpi_ev_initialize_gpe_block
  960. *
  961. * PARAMETERS: gpe_device - Handle to the parent GPE block
  962. * gpe_block - Gpe Block info
  963. *
  964. * RETURN: Status
  965. *
  966. * DESCRIPTION: Initialize and enable a GPE block. First find and run any
  967. * _PRT methods associated with the block, then enable the
  968. * appropriate GPEs.
  969. * Note: Assumes namespace is locked.
  970. *
  971. ******************************************************************************/
  972. acpi_status
  973. acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
  974. struct acpi_gpe_block_info *gpe_block)
  975. {
  976. acpi_status status;
  977. struct acpi_gpe_event_info *gpe_event_info;
  978. struct acpi_gpe_walk_info walk_info;
  979. u32 wake_gpe_count;
  980. u32 gpe_enabled_count;
  981. u32 gpe_index;
  982. u32 gpe_number;
  983. u32 i;
  984. u32 j;
  985. ACPI_FUNCTION_TRACE(ev_initialize_gpe_block);
  986. /* Ignore a null GPE block (e.g., if no GPE block 1 exists) */
  987. if (!gpe_block) {
  988. return_ACPI_STATUS(AE_OK);
  989. }
  990. /*
  991. * Runtime option: Should wake GPEs be enabled at runtime? The default
  992. * is no, they should only be enabled just as the machine goes to sleep.
  993. */
  994. if (acpi_gbl_leave_wake_gpes_disabled) {
  995. /*
  996. * Differentiate runtime vs wake GPEs, via the _PRW control methods.
  997. * Each GPE that has one or more _PRWs that reference it is by
  998. * definition a wake GPE and will not be enabled while the machine
  999. * is running.
  1000. */
  1001. walk_info.gpe_block = gpe_block;
  1002. walk_info.gpe_device = gpe_device;
  1003. walk_info.execute_by_owner_id = FALSE;
  1004. status =
  1005. acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
  1006. ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK,
  1007. acpi_ev_match_prw_and_gpe, NULL,
  1008. &walk_info, NULL);
  1009. if (ACPI_FAILURE(status)) {
  1010. ACPI_EXCEPTION((AE_INFO, status,
  1011. "While executing _PRW methods"));
  1012. }
  1013. }
  1014. /*
  1015. * Enable all GPEs that have a corresponding method and are not
  1016. * capable of generating wakeups. Any other GPEs within this block
  1017. * must be enabled via the acpi_enable_gpe interface.
  1018. */
  1019. wake_gpe_count = 0;
  1020. gpe_enabled_count = 0;
  1021. if (gpe_device == acpi_gbl_fadt_gpe_device) {
  1022. gpe_device = NULL;
  1023. }
  1024. for (i = 0; i < gpe_block->register_count; i++) {
  1025. for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
  1026. /* Get the info block for this particular GPE */
  1027. gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j;
  1028. gpe_event_info = &gpe_block->event_info[gpe_index];
  1029. if (gpe_event_info->flags & ACPI_GPE_CAN_WAKE) {
  1030. wake_gpe_count++;
  1031. if (acpi_gbl_leave_wake_gpes_disabled) {
  1032. continue;
  1033. }
  1034. }
  1035. /* Ignore GPEs that have no corresponding _Lxx/_Exx method */
  1036. if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD)) {
  1037. continue;
  1038. }
  1039. /* Enable this GPE */
  1040. gpe_number = gpe_index + gpe_block->block_base_number;
  1041. status = acpi_enable_gpe(gpe_device, gpe_number,
  1042. ACPI_GPE_TYPE_RUNTIME);
  1043. if (ACPI_FAILURE(status)) {
  1044. ACPI_EXCEPTION((AE_INFO, status,
  1045. "Could not enable GPE 0x%02X",
  1046. gpe_number));
  1047. continue;
  1048. }
  1049. gpe_enabled_count++;
  1050. }
  1051. }
  1052. if (gpe_enabled_count || wake_gpe_count) {
  1053. ACPI_DEBUG_PRINT((ACPI_DB_INIT,
  1054. "Enabled %u Runtime GPEs, added %u Wake GPEs in this block\n",
  1055. gpe_enabled_count, wake_gpe_count));
  1056. }
  1057. return_ACPI_STATUS(AE_OK);
  1058. }
  1059. /*******************************************************************************
  1060. *
  1061. * FUNCTION: acpi_ev_gpe_initialize
  1062. *
  1063. * PARAMETERS: None
  1064. *
  1065. * RETURN: Status
  1066. *
  1067. * DESCRIPTION: Initialize the GPE data structures
  1068. *
  1069. ******************************************************************************/
  1070. acpi_status acpi_ev_gpe_initialize(void)
  1071. {
  1072. u32 register_count0 = 0;
  1073. u32 register_count1 = 0;
  1074. u32 gpe_number_max = 0;
  1075. acpi_status status;
  1076. ACPI_FUNCTION_TRACE(ev_gpe_initialize);
  1077. status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
  1078. if (ACPI_FAILURE(status)) {
  1079. return_ACPI_STATUS(status);
  1080. }
  1081. /*
  1082. * Initialize the GPE Block(s) defined in the FADT
  1083. *
  1084. * Why the GPE register block lengths are divided by 2: From the ACPI
  1085. * Spec, section "General-Purpose Event Registers", we have:
  1086. *
  1087. * "Each register block contains two registers of equal length
  1088. * GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the
  1089. * GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN
  1090. * The length of the GPE1_STS and GPE1_EN registers is equal to
  1091. * half the GPE1_LEN. If a generic register block is not supported
  1092. * then its respective block pointer and block length values in the
  1093. * FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need
  1094. * to be the same size."
  1095. */
  1096. /*
  1097. * Determine the maximum GPE number for this machine.
  1098. *
  1099. * Note: both GPE0 and GPE1 are optional, and either can exist without
  1100. * the other.
  1101. *
  1102. * If EITHER the register length OR the block address are zero, then that
  1103. * particular block is not supported.
  1104. */
  1105. if (acpi_gbl_FADT.gpe0_block_length &&
  1106. acpi_gbl_FADT.xgpe0_block.address) {
  1107. /* GPE block 0 exists (has both length and address > 0) */
  1108. register_count0 = (u16) (acpi_gbl_FADT.gpe0_block_length / 2);
  1109. gpe_number_max =
  1110. (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
  1111. /* Install GPE Block 0 */
  1112. status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
  1113. &acpi_gbl_FADT.xgpe0_block,
  1114. register_count0, 0,
  1115. acpi_gbl_FADT.sci_interrupt,
  1116. &acpi_gbl_gpe_fadt_blocks[0]);
  1117. if (ACPI_FAILURE(status)) {
  1118. ACPI_EXCEPTION((AE_INFO, status,
  1119. "Could not create GPE Block 0"));
  1120. }
  1121. }
  1122. if (acpi_gbl_FADT.gpe1_block_length &&
  1123. acpi_gbl_FADT.xgpe1_block.address) {
  1124. /* GPE block 1 exists (has both length and address > 0) */
  1125. register_count1 = (u16) (acpi_gbl_FADT.gpe1_block_length / 2);
  1126. /* Check for GPE0/GPE1 overlap (if both banks exist) */
  1127. if ((register_count0) &&
  1128. (gpe_number_max >= acpi_gbl_FADT.gpe1_base)) {
  1129. ACPI_ERROR((AE_INFO,
  1130. "GPE0 block (GPE 0 to %u) overlaps the GPE1 block "
  1131. "(GPE %u to %u) - Ignoring GPE1",
  1132. gpe_number_max, acpi_gbl_FADT.gpe1_base,
  1133. acpi_gbl_FADT.gpe1_base +
  1134. ((register_count1 *
  1135. ACPI_GPE_REGISTER_WIDTH) - 1)));
  1136. /* Ignore GPE1 block by setting the register count to zero */
  1137. register_count1 = 0;
  1138. } else {
  1139. /* Install GPE Block 1 */
  1140. status =
  1141. acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
  1142. &acpi_gbl_FADT.xgpe1_block,
  1143. register_count1,
  1144. acpi_gbl_FADT.gpe1_base,
  1145. acpi_gbl_FADT.
  1146. sci_interrupt,
  1147. &acpi_gbl_gpe_fadt_blocks
  1148. [1]);
  1149. if (ACPI_FAILURE(status)) {
  1150. ACPI_EXCEPTION((AE_INFO, status,
  1151. "Could not create GPE Block 1"));
  1152. }
  1153. /*
  1154. * GPE0 and GPE1 do not have to be contiguous in the GPE number
  1155. * space. However, GPE0 always starts at GPE number zero.
  1156. */
  1157. gpe_number_max = acpi_gbl_FADT.gpe1_base +
  1158. ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
  1159. }
  1160. }
  1161. /* Exit if there are no GPE registers */
  1162. if ((register_count0 + register_count1) == 0) {
  1163. /* GPEs are not required by ACPI, this is OK */
  1164. ACPI_DEBUG_PRINT((ACPI_DB_INIT,
  1165. "There are no GPE blocks defined in the FADT\n"));
  1166. status = AE_OK;
  1167. goto cleanup;
  1168. }
  1169. /* Check for Max GPE number out-of-range */
  1170. if (gpe_number_max > ACPI_GPE_MAX) {
  1171. ACPI_ERROR((AE_INFO,
  1172. "Maximum GPE number from FADT is too large: 0x%X",
  1173. gpe_number_max));
  1174. status = AE_BAD_VALUE;
  1175. goto cleanup;
  1176. }
  1177. cleanup:
  1178. (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
  1179. return_ACPI_STATUS(AE_OK);
  1180. }