via-pmu.c 65 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736
  1. /*
  2. * Device driver for the via-pmu on Apple Powermacs.
  3. *
  4. * The VIA (versatile interface adapter) interfaces to the PMU,
  5. * a 6805 microprocessor core whose primary function is to control
  6. * battery charging and system power on the PowerBook 3400 and 2400.
  7. * The PMU also controls the ADB (Apple Desktop Bus) which connects
  8. * to the keyboard and mouse, as well as the non-volatile RAM
  9. * and the RTC (real time clock) chip.
  10. *
  11. * Copyright (C) 1998 Paul Mackerras and Fabio Riccardi.
  12. * Copyright (C) 2001-2002 Benjamin Herrenschmidt
  13. *
  14. * THIS DRIVER IS BECOMING A TOTAL MESS !
  15. * - Cleanup atomically disabling reply to PMU events after
  16. * a sleep or a freq. switch
  17. * - Move sleep code out of here to pmac_pm, merge into new
  18. * common PM infrastructure
  19. * - Save/Restore PCI space properly
  20. *
  21. */
  22. #include <stdarg.h>
  23. #include <linux/types.h>
  24. #include <linux/errno.h>
  25. #include <linux/kernel.h>
  26. #include <linux/delay.h>
  27. #include <linux/sched.h>
  28. #include <linux/miscdevice.h>
  29. #include <linux/blkdev.h>
  30. #include <linux/pci.h>
  31. #include <linux/slab.h>
  32. #include <linux/poll.h>
  33. #include <linux/adb.h>
  34. #include <linux/pmu.h>
  35. #include <linux/cuda.h>
  36. #include <linux/module.h>
  37. #include <linux/spinlock.h>
  38. #include <linux/pm.h>
  39. #include <linux/proc_fs.h>
  40. #include <linux/init.h>
  41. #include <linux/interrupt.h>
  42. #include <linux/device.h>
  43. #include <linux/sysdev.h>
  44. #include <linux/freezer.h>
  45. #include <linux/syscalls.h>
  46. #include <linux/suspend.h>
  47. #include <linux/cpu.h>
  48. #include <asm/prom.h>
  49. #include <asm/machdep.h>
  50. #include <asm/io.h>
  51. #include <asm/pgtable.h>
  52. #include <asm/system.h>
  53. #include <asm/sections.h>
  54. #include <asm/irq.h>
  55. #include <asm/pmac_feature.h>
  56. #include <asm/pmac_pfunc.h>
  57. #include <asm/pmac_low_i2c.h>
  58. #include <asm/uaccess.h>
  59. #include <asm/mmu_context.h>
  60. #include <asm/cputable.h>
  61. #include <asm/time.h>
  62. #include <asm/backlight.h>
  63. #include "via-pmu-event.h"
  64. /* Some compile options */
  65. #define DEBUG_SLEEP
  66. /* Misc minor number allocated for /dev/pmu */
  67. #define PMU_MINOR 154
  68. /* How many iterations between battery polls */
  69. #define BATTERY_POLLING_COUNT 2
  70. static volatile unsigned char __iomem *via;
  71. /* VIA registers - spaced 0x200 bytes apart */
  72. #define RS 0x200 /* skip between registers */
  73. #define B 0 /* B-side data */
  74. #define A RS /* A-side data */
  75. #define DIRB (2*RS) /* B-side direction (1=output) */
  76. #define DIRA (3*RS) /* A-side direction (1=output) */
  77. #define T1CL (4*RS) /* Timer 1 ctr/latch (low 8 bits) */
  78. #define T1CH (5*RS) /* Timer 1 counter (high 8 bits) */
  79. #define T1LL (6*RS) /* Timer 1 latch (low 8 bits) */
  80. #define T1LH (7*RS) /* Timer 1 latch (high 8 bits) */
  81. #define T2CL (8*RS) /* Timer 2 ctr/latch (low 8 bits) */
  82. #define T2CH (9*RS) /* Timer 2 counter (high 8 bits) */
  83. #define SR (10*RS) /* Shift register */
  84. #define ACR (11*RS) /* Auxiliary control register */
  85. #define PCR (12*RS) /* Peripheral control register */
  86. #define IFR (13*RS) /* Interrupt flag register */
  87. #define IER (14*RS) /* Interrupt enable register */
  88. #define ANH (15*RS) /* A-side data, no handshake */
  89. /* Bits in B data register: both active low */
  90. #define TACK 0x08 /* Transfer acknowledge (input) */
  91. #define TREQ 0x10 /* Transfer request (output) */
  92. /* Bits in ACR */
  93. #define SR_CTRL 0x1c /* Shift register control bits */
  94. #define SR_EXT 0x0c /* Shift on external clock */
  95. #define SR_OUT 0x10 /* Shift out if 1 */
  96. /* Bits in IFR and IER */
  97. #define IER_SET 0x80 /* set bits in IER */
  98. #define IER_CLR 0 /* clear bits in IER */
  99. #define SR_INT 0x04 /* Shift register full/empty */
  100. #define CB2_INT 0x08
  101. #define CB1_INT 0x10 /* transition on CB1 input */
  102. static volatile enum pmu_state {
  103. idle,
  104. sending,
  105. intack,
  106. reading,
  107. reading_intr,
  108. locked,
  109. } pmu_state;
  110. static volatile enum int_data_state {
  111. int_data_empty,
  112. int_data_fill,
  113. int_data_ready,
  114. int_data_flush
  115. } int_data_state[2] = { int_data_empty, int_data_empty };
  116. static struct adb_request *current_req;
  117. static struct adb_request *last_req;
  118. static struct adb_request *req_awaiting_reply;
  119. static unsigned char interrupt_data[2][32];
  120. static int interrupt_data_len[2];
  121. static int int_data_last;
  122. static unsigned char *reply_ptr;
  123. static int data_index;
  124. static int data_len;
  125. static volatile int adb_int_pending;
  126. static volatile int disable_poll;
  127. static struct device_node *vias;
  128. static int pmu_kind = PMU_UNKNOWN;
  129. static int pmu_fully_inited;
  130. static int pmu_has_adb;
  131. static struct device_node *gpio_node;
  132. static unsigned char __iomem *gpio_reg;
  133. static int gpio_irq = NO_IRQ;
  134. static int gpio_irq_enabled = -1;
  135. static volatile int pmu_suspended;
  136. static spinlock_t pmu_lock;
  137. static u8 pmu_intr_mask;
  138. static int pmu_version;
  139. static int drop_interrupts;
  140. #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_PPC32)
  141. static int option_lid_wakeup = 1;
  142. #endif /* CONFIG_PM_SLEEP && CONFIG_PPC32 */
  143. #if (defined(CONFIG_PM_SLEEP)&&defined(CONFIG_PPC32))||defined(CONFIG_PMAC_BACKLIGHT_LEGACY)
  144. static int sleep_in_progress;
  145. #endif
  146. static unsigned long async_req_locks;
  147. static unsigned int pmu_irq_stats[11];
  148. static struct proc_dir_entry *proc_pmu_root;
  149. static struct proc_dir_entry *proc_pmu_info;
  150. static struct proc_dir_entry *proc_pmu_irqstats;
  151. static struct proc_dir_entry *proc_pmu_options;
  152. static int option_server_mode;
  153. int pmu_battery_count;
  154. int pmu_cur_battery;
  155. unsigned int pmu_power_flags = PMU_PWR_AC_PRESENT;
  156. struct pmu_battery_info pmu_batteries[PMU_MAX_BATTERIES];
  157. static int query_batt_timer = BATTERY_POLLING_COUNT;
  158. static struct adb_request batt_req;
  159. static struct proc_dir_entry *proc_pmu_batt[PMU_MAX_BATTERIES];
  160. int __fake_sleep;
  161. int asleep;
  162. #ifdef CONFIG_ADB
  163. static int adb_dev_map;
  164. static int pmu_adb_flags;
  165. static int pmu_probe(void);
  166. static int pmu_init(void);
  167. static int pmu_send_request(struct adb_request *req, int sync);
  168. static int pmu_adb_autopoll(int devs);
  169. static int pmu_adb_reset_bus(void);
  170. #endif /* CONFIG_ADB */
  171. static int init_pmu(void);
  172. static void pmu_start(void);
  173. static irqreturn_t via_pmu_interrupt(int irq, void *arg);
  174. static irqreturn_t gpio1_interrupt(int irq, void *arg);
  175. static int proc_get_info(char *page, char **start, off_t off,
  176. int count, int *eof, void *data);
  177. static int proc_get_irqstats(char *page, char **start, off_t off,
  178. int count, int *eof, void *data);
  179. static void pmu_pass_intr(unsigned char *data, int len);
  180. static int proc_get_batt(char *page, char **start, off_t off,
  181. int count, int *eof, void *data);
  182. static int proc_read_options(char *page, char **start, off_t off,
  183. int count, int *eof, void *data);
  184. static int proc_write_options(struct file *file, const char __user *buffer,
  185. unsigned long count, void *data);
  186. #ifdef CONFIG_ADB
  187. struct adb_driver via_pmu_driver = {
  188. "PMU",
  189. pmu_probe,
  190. pmu_init,
  191. pmu_send_request,
  192. pmu_adb_autopoll,
  193. pmu_poll_adb,
  194. pmu_adb_reset_bus
  195. };
  196. #endif /* CONFIG_ADB */
  197. extern void low_sleep_handler(void);
  198. extern void enable_kernel_altivec(void);
  199. extern void enable_kernel_fp(void);
  200. #ifdef DEBUG_SLEEP
  201. int pmu_polled_request(struct adb_request *req);
  202. int pmu_wink(struct adb_request *req);
  203. #endif
  204. /*
  205. * This table indicates for each PMU opcode:
  206. * - the number of data bytes to be sent with the command, or -1
  207. * if a length byte should be sent,
  208. * - the number of response bytes which the PMU will return, or
  209. * -1 if it will send a length byte.
  210. */
  211. static const s8 pmu_data_len[256][2] = {
  212. /* 0 1 2 3 4 5 6 7 */
  213. /*00*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
  214. /*08*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},
  215. /*10*/ { 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
  216. /*18*/ { 0, 1},{ 0, 1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{ 0, 0},
  217. /*20*/ {-1, 0},{ 0, 0},{ 2, 0},{ 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0},
  218. /*28*/ { 0,-1},{ 0,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{ 0,-1},
  219. /*30*/ { 4, 0},{20, 0},{-1, 0},{ 3, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
  220. /*38*/ { 0, 4},{ 0,20},{ 2,-1},{ 2, 1},{ 3,-1},{-1,-1},{-1,-1},{ 4, 0},
  221. /*40*/ { 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
  222. /*48*/ { 0, 1},{ 0, 1},{-1,-1},{ 1, 0},{ 1, 0},{-1,-1},{-1,-1},{-1,-1},
  223. /*50*/ { 1, 0},{ 0, 0},{ 2, 0},{ 2, 0},{-1, 0},{ 1, 0},{ 3, 0},{ 1, 0},
  224. /*58*/ { 0, 1},{ 1, 0},{ 0, 2},{ 0, 2},{ 0,-1},{-1,-1},{-1,-1},{-1,-1},
  225. /*60*/ { 2, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
  226. /*68*/ { 0, 3},{ 0, 3},{ 0, 2},{ 0, 8},{ 0,-1},{ 0,-1},{-1,-1},{-1,-1},
  227. /*70*/ { 1, 0},{ 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
  228. /*78*/ { 0,-1},{ 0,-1},{-1,-1},{-1,-1},{-1,-1},{ 5, 1},{ 4, 1},{ 4, 1},
  229. /*80*/ { 4, 0},{-1, 0},{ 0, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
  230. /*88*/ { 0, 5},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},
  231. /*90*/ { 1, 0},{ 2, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
  232. /*98*/ { 0, 1},{ 0, 1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},
  233. /*a0*/ { 2, 0},{ 2, 0},{ 2, 0},{ 4, 0},{-1, 0},{ 0, 0},{-1, 0},{-1, 0},
  234. /*a8*/ { 1, 1},{ 1, 0},{ 3, 0},{ 2, 0},{-1,-1},{-1,-1},{-1,-1},{-1,-1},
  235. /*b0*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
  236. /*b8*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},
  237. /*c0*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
  238. /*c8*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},
  239. /*d0*/ { 0, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
  240. /*d8*/ { 1, 1},{ 1, 1},{-1,-1},{-1,-1},{ 0, 1},{ 0,-1},{-1,-1},{-1,-1},
  241. /*e0*/ {-1, 0},{ 4, 0},{ 0, 1},{-1, 0},{-1, 0},{ 4, 0},{-1, 0},{-1, 0},
  242. /*e8*/ { 3,-1},{-1,-1},{ 0, 1},{-1,-1},{ 0,-1},{-1,-1},{-1,-1},{ 0, 0},
  243. /*f0*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
  244. /*f8*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},
  245. };
  246. static char *pbook_type[] = {
  247. "Unknown PowerBook",
  248. "PowerBook 2400/3400/3500(G3)",
  249. "PowerBook G3 Series",
  250. "1999 PowerBook G3",
  251. "Core99"
  252. };
  253. int __init find_via_pmu(void)
  254. {
  255. u64 taddr;
  256. const u32 *reg;
  257. if (via != 0)
  258. return 1;
  259. vias = of_find_node_by_name(NULL, "via-pmu");
  260. if (vias == NULL)
  261. return 0;
  262. reg = of_get_property(vias, "reg", NULL);
  263. if (reg == NULL) {
  264. printk(KERN_ERR "via-pmu: No \"reg\" property !\n");
  265. goto fail;
  266. }
  267. taddr = of_translate_address(vias, reg);
  268. if (taddr == OF_BAD_ADDR) {
  269. printk(KERN_ERR "via-pmu: Can't translate address !\n");
  270. goto fail;
  271. }
  272. spin_lock_init(&pmu_lock);
  273. pmu_has_adb = 1;
  274. pmu_intr_mask = PMU_INT_PCEJECT |
  275. PMU_INT_SNDBRT |
  276. PMU_INT_ADB |
  277. PMU_INT_TICK;
  278. if (vias->parent->name && ((strcmp(vias->parent->name, "ohare") == 0)
  279. || of_device_is_compatible(vias->parent, "ohare")))
  280. pmu_kind = PMU_OHARE_BASED;
  281. else if (of_device_is_compatible(vias->parent, "paddington"))
  282. pmu_kind = PMU_PADDINGTON_BASED;
  283. else if (of_device_is_compatible(vias->parent, "heathrow"))
  284. pmu_kind = PMU_HEATHROW_BASED;
  285. else if (of_device_is_compatible(vias->parent, "Keylargo")
  286. || of_device_is_compatible(vias->parent, "K2-Keylargo")) {
  287. struct device_node *gpiop;
  288. struct device_node *adbp;
  289. u64 gaddr = OF_BAD_ADDR;
  290. pmu_kind = PMU_KEYLARGO_BASED;
  291. adbp = of_find_node_by_type(NULL, "adb");
  292. pmu_has_adb = (adbp != NULL);
  293. of_node_put(adbp);
  294. pmu_intr_mask = PMU_INT_PCEJECT |
  295. PMU_INT_SNDBRT |
  296. PMU_INT_ADB |
  297. PMU_INT_TICK |
  298. PMU_INT_ENVIRONMENT;
  299. gpiop = of_find_node_by_name(NULL, "gpio");
  300. if (gpiop) {
  301. reg = of_get_property(gpiop, "reg", NULL);
  302. if (reg)
  303. gaddr = of_translate_address(gpiop, reg);
  304. if (gaddr != OF_BAD_ADDR)
  305. gpio_reg = ioremap(gaddr, 0x10);
  306. }
  307. if (gpio_reg == NULL) {
  308. printk(KERN_ERR "via-pmu: Can't find GPIO reg !\n");
  309. goto fail_gpio;
  310. }
  311. } else
  312. pmu_kind = PMU_UNKNOWN;
  313. via = ioremap(taddr, 0x2000);
  314. if (via == NULL) {
  315. printk(KERN_ERR "via-pmu: Can't map address !\n");
  316. goto fail;
  317. }
  318. out_8(&via[IER], IER_CLR | 0x7f); /* disable all intrs */
  319. out_8(&via[IFR], 0x7f); /* clear IFR */
  320. pmu_state = idle;
  321. if (!init_pmu()) {
  322. via = NULL;
  323. return 0;
  324. }
  325. printk(KERN_INFO "PMU driver v%d initialized for %s, firmware: %02x\n",
  326. PMU_DRIVER_VERSION, pbook_type[pmu_kind], pmu_version);
  327. sys_ctrler = SYS_CTRLER_PMU;
  328. return 1;
  329. fail:
  330. of_node_put(vias);
  331. iounmap(gpio_reg);
  332. gpio_reg = NULL;
  333. fail_gpio:
  334. vias = NULL;
  335. return 0;
  336. }
  337. #ifdef CONFIG_ADB
  338. static int pmu_probe(void)
  339. {
  340. return vias == NULL? -ENODEV: 0;
  341. }
  342. static int __init pmu_init(void)
  343. {
  344. if (vias == NULL)
  345. return -ENODEV;
  346. return 0;
  347. }
  348. #endif /* CONFIG_ADB */
  349. /*
  350. * We can't wait until pmu_init gets called, that happens too late.
  351. * It happens after IDE and SCSI initialization, which can take a few
  352. * seconds, and by that time the PMU could have given up on us and
  353. * turned us off.
  354. * Thus this is called with arch_initcall rather than device_initcall.
  355. */
  356. static int __init via_pmu_start(void)
  357. {
  358. unsigned int irq;
  359. if (vias == NULL)
  360. return -ENODEV;
  361. batt_req.complete = 1;
  362. irq = irq_of_parse_and_map(vias, 0);
  363. if (irq == NO_IRQ) {
  364. printk(KERN_ERR "via-pmu: can't map interrupt\n");
  365. return -ENODEV;
  366. }
  367. if (request_irq(irq, via_pmu_interrupt, 0, "VIA-PMU", (void *)0)) {
  368. printk(KERN_ERR "via-pmu: can't request irq %d\n", irq);
  369. return -ENODEV;
  370. }
  371. if (pmu_kind == PMU_KEYLARGO_BASED) {
  372. gpio_node = of_find_node_by_name(NULL, "extint-gpio1");
  373. if (gpio_node == NULL)
  374. gpio_node = of_find_node_by_name(NULL,
  375. "pmu-interrupt");
  376. if (gpio_node)
  377. gpio_irq = irq_of_parse_and_map(gpio_node, 0);
  378. if (gpio_irq != NO_IRQ) {
  379. if (request_irq(gpio_irq, gpio1_interrupt, 0,
  380. "GPIO1 ADB", (void *)0))
  381. printk(KERN_ERR "pmu: can't get irq %d"
  382. " (GPIO1)\n", gpio_irq);
  383. else
  384. gpio_irq_enabled = 1;
  385. }
  386. }
  387. /* Enable interrupts */
  388. out_8(&via[IER], IER_SET | SR_INT | CB1_INT);
  389. pmu_fully_inited = 1;
  390. /* Make sure PMU settle down before continuing. This is _very_ important
  391. * since the IDE probe may shut interrupts down for quite a bit of time. If
  392. * a PMU communication is pending while this happens, the PMU may timeout
  393. * Not that on Core99 machines, the PMU keeps sending us environement
  394. * messages, we should find a way to either fix IDE or make it call
  395. * pmu_suspend() before masking interrupts. This can also happens while
  396. * scolling with some fbdevs.
  397. */
  398. do {
  399. pmu_poll();
  400. } while (pmu_state != idle);
  401. return 0;
  402. }
  403. arch_initcall(via_pmu_start);
  404. /*
  405. * This has to be done after pci_init, which is a subsys_initcall.
  406. */
  407. static int __init via_pmu_dev_init(void)
  408. {
  409. if (vias == NULL)
  410. return -ENODEV;
  411. #ifdef CONFIG_PMAC_BACKLIGHT
  412. /* Initialize backlight */
  413. pmu_backlight_init();
  414. #endif
  415. #ifdef CONFIG_PPC32
  416. if (machine_is_compatible("AAPL,3400/2400") ||
  417. machine_is_compatible("AAPL,3500")) {
  418. int mb = pmac_call_feature(PMAC_FTR_GET_MB_INFO,
  419. NULL, PMAC_MB_INFO_MODEL, 0);
  420. pmu_battery_count = 1;
  421. if (mb == PMAC_TYPE_COMET)
  422. pmu_batteries[0].flags |= PMU_BATT_TYPE_COMET;
  423. else
  424. pmu_batteries[0].flags |= PMU_BATT_TYPE_HOOPER;
  425. } else if (machine_is_compatible("AAPL,PowerBook1998") ||
  426. machine_is_compatible("PowerBook1,1")) {
  427. pmu_battery_count = 2;
  428. pmu_batteries[0].flags |= PMU_BATT_TYPE_SMART;
  429. pmu_batteries[1].flags |= PMU_BATT_TYPE_SMART;
  430. } else {
  431. struct device_node* prim =
  432. of_find_node_by_name(NULL, "power-mgt");
  433. const u32 *prim_info = NULL;
  434. if (prim)
  435. prim_info = of_get_property(prim, "prim-info", NULL);
  436. if (prim_info) {
  437. /* Other stuffs here yet unknown */
  438. pmu_battery_count = (prim_info[6] >> 16) & 0xff;
  439. pmu_batteries[0].flags |= PMU_BATT_TYPE_SMART;
  440. if (pmu_battery_count > 1)
  441. pmu_batteries[1].flags |= PMU_BATT_TYPE_SMART;
  442. }
  443. of_node_put(prim);
  444. }
  445. #endif /* CONFIG_PPC32 */
  446. /* Create /proc/pmu */
  447. proc_pmu_root = proc_mkdir("pmu", NULL);
  448. if (proc_pmu_root) {
  449. long i;
  450. for (i=0; i<pmu_battery_count; i++) {
  451. char title[16];
  452. sprintf(title, "battery_%ld", i);
  453. proc_pmu_batt[i] = create_proc_read_entry(title, 0, proc_pmu_root,
  454. proc_get_batt, (void *)i);
  455. }
  456. proc_pmu_info = create_proc_read_entry("info", 0, proc_pmu_root,
  457. proc_get_info, NULL);
  458. proc_pmu_irqstats = create_proc_read_entry("interrupts", 0, proc_pmu_root,
  459. proc_get_irqstats, NULL);
  460. proc_pmu_options = create_proc_entry("options", 0600, proc_pmu_root);
  461. if (proc_pmu_options) {
  462. proc_pmu_options->read_proc = proc_read_options;
  463. proc_pmu_options->write_proc = proc_write_options;
  464. }
  465. }
  466. return 0;
  467. }
  468. device_initcall(via_pmu_dev_init);
  469. static int
  470. init_pmu(void)
  471. {
  472. int timeout;
  473. struct adb_request req;
  474. out_8(&via[B], via[B] | TREQ); /* negate TREQ */
  475. out_8(&via[DIRB], (via[DIRB] | TREQ) & ~TACK); /* TACK in, TREQ out */
  476. pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask);
  477. timeout = 100000;
  478. while (!req.complete) {
  479. if (--timeout < 0) {
  480. printk(KERN_ERR "init_pmu: no response from PMU\n");
  481. return 0;
  482. }
  483. udelay(10);
  484. pmu_poll();
  485. }
  486. /* ack all pending interrupts */
  487. timeout = 100000;
  488. interrupt_data[0][0] = 1;
  489. while (interrupt_data[0][0] || pmu_state != idle) {
  490. if (--timeout < 0) {
  491. printk(KERN_ERR "init_pmu: timed out acking intrs\n");
  492. return 0;
  493. }
  494. if (pmu_state == idle)
  495. adb_int_pending = 1;
  496. via_pmu_interrupt(0, NULL);
  497. udelay(10);
  498. }
  499. /* Tell PMU we are ready. */
  500. if (pmu_kind == PMU_KEYLARGO_BASED) {
  501. pmu_request(&req, NULL, 2, PMU_SYSTEM_READY, 2);
  502. while (!req.complete)
  503. pmu_poll();
  504. }
  505. /* Read PMU version */
  506. pmu_request(&req, NULL, 1, PMU_GET_VERSION);
  507. pmu_wait_complete(&req);
  508. if (req.reply_len > 0)
  509. pmu_version = req.reply[0];
  510. /* Read server mode setting */
  511. if (pmu_kind == PMU_KEYLARGO_BASED) {
  512. pmu_request(&req, NULL, 2, PMU_POWER_EVENTS,
  513. PMU_PWR_GET_POWERUP_EVENTS);
  514. pmu_wait_complete(&req);
  515. if (req.reply_len == 2) {
  516. if (req.reply[1] & PMU_PWR_WAKEUP_AC_INSERT)
  517. option_server_mode = 1;
  518. printk(KERN_INFO "via-pmu: Server Mode is %s\n",
  519. option_server_mode ? "enabled" : "disabled");
  520. }
  521. }
  522. return 1;
  523. }
  524. int
  525. pmu_get_model(void)
  526. {
  527. return pmu_kind;
  528. }
  529. static void pmu_set_server_mode(int server_mode)
  530. {
  531. struct adb_request req;
  532. if (pmu_kind != PMU_KEYLARGO_BASED)
  533. return;
  534. option_server_mode = server_mode;
  535. pmu_request(&req, NULL, 2, PMU_POWER_EVENTS, PMU_PWR_GET_POWERUP_EVENTS);
  536. pmu_wait_complete(&req);
  537. if (req.reply_len < 2)
  538. return;
  539. if (server_mode)
  540. pmu_request(&req, NULL, 4, PMU_POWER_EVENTS,
  541. PMU_PWR_SET_POWERUP_EVENTS,
  542. req.reply[0], PMU_PWR_WAKEUP_AC_INSERT);
  543. else
  544. pmu_request(&req, NULL, 4, PMU_POWER_EVENTS,
  545. PMU_PWR_CLR_POWERUP_EVENTS,
  546. req.reply[0], PMU_PWR_WAKEUP_AC_INSERT);
  547. pmu_wait_complete(&req);
  548. }
  549. /* This new version of the code for 2400/3400/3500 powerbooks
  550. * is inspired from the implementation in gkrellm-pmu
  551. */
  552. static void
  553. done_battery_state_ohare(struct adb_request* req)
  554. {
  555. /* format:
  556. * [0] : flags
  557. * 0x01 : AC indicator
  558. * 0x02 : charging
  559. * 0x04 : battery exist
  560. * 0x08 :
  561. * 0x10 :
  562. * 0x20 : full charged
  563. * 0x40 : pcharge reset
  564. * 0x80 : battery exist
  565. *
  566. * [1][2] : battery voltage
  567. * [3] : CPU temperature
  568. * [4] : battery temperature
  569. * [5] : current
  570. * [6][7] : pcharge
  571. * --tkoba
  572. */
  573. unsigned int bat_flags = PMU_BATT_TYPE_HOOPER;
  574. long pcharge, charge, vb, vmax, lmax;
  575. long vmax_charging, vmax_charged;
  576. long amperage, voltage, time, max;
  577. int mb = pmac_call_feature(PMAC_FTR_GET_MB_INFO,
  578. NULL, PMAC_MB_INFO_MODEL, 0);
  579. if (req->reply[0] & 0x01)
  580. pmu_power_flags |= PMU_PWR_AC_PRESENT;
  581. else
  582. pmu_power_flags &= ~PMU_PWR_AC_PRESENT;
  583. if (mb == PMAC_TYPE_COMET) {
  584. vmax_charged = 189;
  585. vmax_charging = 213;
  586. lmax = 6500;
  587. } else {
  588. vmax_charged = 330;
  589. vmax_charging = 330;
  590. lmax = 6500;
  591. }
  592. vmax = vmax_charged;
  593. /* If battery installed */
  594. if (req->reply[0] & 0x04) {
  595. bat_flags |= PMU_BATT_PRESENT;
  596. if (req->reply[0] & 0x02)
  597. bat_flags |= PMU_BATT_CHARGING;
  598. vb = (req->reply[1] << 8) | req->reply[2];
  599. voltage = (vb * 265 + 72665) / 10;
  600. amperage = req->reply[5];
  601. if ((req->reply[0] & 0x01) == 0) {
  602. if (amperage > 200)
  603. vb += ((amperage - 200) * 15)/100;
  604. } else if (req->reply[0] & 0x02) {
  605. vb = (vb * 97) / 100;
  606. vmax = vmax_charging;
  607. }
  608. charge = (100 * vb) / vmax;
  609. if (req->reply[0] & 0x40) {
  610. pcharge = (req->reply[6] << 8) + req->reply[7];
  611. if (pcharge > lmax)
  612. pcharge = lmax;
  613. pcharge *= 100;
  614. pcharge = 100 - pcharge / lmax;
  615. if (pcharge < charge)
  616. charge = pcharge;
  617. }
  618. if (amperage > 0)
  619. time = (charge * 16440) / amperage;
  620. else
  621. time = 0;
  622. max = 100;
  623. amperage = -amperage;
  624. } else
  625. charge = max = amperage = voltage = time = 0;
  626. pmu_batteries[pmu_cur_battery].flags = bat_flags;
  627. pmu_batteries[pmu_cur_battery].charge = charge;
  628. pmu_batteries[pmu_cur_battery].max_charge = max;
  629. pmu_batteries[pmu_cur_battery].amperage = amperage;
  630. pmu_batteries[pmu_cur_battery].voltage = voltage;
  631. pmu_batteries[pmu_cur_battery].time_remaining = time;
  632. clear_bit(0, &async_req_locks);
  633. }
  634. static void
  635. done_battery_state_smart(struct adb_request* req)
  636. {
  637. /* format:
  638. * [0] : format of this structure (known: 3,4,5)
  639. * [1] : flags
  640. *
  641. * format 3 & 4:
  642. *
  643. * [2] : charge
  644. * [3] : max charge
  645. * [4] : current
  646. * [5] : voltage
  647. *
  648. * format 5:
  649. *
  650. * [2][3] : charge
  651. * [4][5] : max charge
  652. * [6][7] : current
  653. * [8][9] : voltage
  654. */
  655. unsigned int bat_flags = PMU_BATT_TYPE_SMART;
  656. int amperage;
  657. unsigned int capa, max, voltage;
  658. if (req->reply[1] & 0x01)
  659. pmu_power_flags |= PMU_PWR_AC_PRESENT;
  660. else
  661. pmu_power_flags &= ~PMU_PWR_AC_PRESENT;
  662. capa = max = amperage = voltage = 0;
  663. if (req->reply[1] & 0x04) {
  664. bat_flags |= PMU_BATT_PRESENT;
  665. switch(req->reply[0]) {
  666. case 3:
  667. case 4: capa = req->reply[2];
  668. max = req->reply[3];
  669. amperage = *((signed char *)&req->reply[4]);
  670. voltage = req->reply[5];
  671. break;
  672. case 5: capa = (req->reply[2] << 8) | req->reply[3];
  673. max = (req->reply[4] << 8) | req->reply[5];
  674. amperage = *((signed short *)&req->reply[6]);
  675. voltage = (req->reply[8] << 8) | req->reply[9];
  676. break;
  677. default:
  678. printk(KERN_WARNING "pmu.c : unrecognized battery info, len: %d, %02x %02x %02x %02x\n",
  679. req->reply_len, req->reply[0], req->reply[1], req->reply[2], req->reply[3]);
  680. break;
  681. }
  682. }
  683. if ((req->reply[1] & 0x01) && (amperage > 0))
  684. bat_flags |= PMU_BATT_CHARGING;
  685. pmu_batteries[pmu_cur_battery].flags = bat_flags;
  686. pmu_batteries[pmu_cur_battery].charge = capa;
  687. pmu_batteries[pmu_cur_battery].max_charge = max;
  688. pmu_batteries[pmu_cur_battery].amperage = amperage;
  689. pmu_batteries[pmu_cur_battery].voltage = voltage;
  690. if (amperage) {
  691. if ((req->reply[1] & 0x01) && (amperage > 0))
  692. pmu_batteries[pmu_cur_battery].time_remaining
  693. = ((max-capa) * 3600) / amperage;
  694. else
  695. pmu_batteries[pmu_cur_battery].time_remaining
  696. = (capa * 3600) / (-amperage);
  697. } else
  698. pmu_batteries[pmu_cur_battery].time_remaining = 0;
  699. pmu_cur_battery = (pmu_cur_battery + 1) % pmu_battery_count;
  700. clear_bit(0, &async_req_locks);
  701. }
  702. static void
  703. query_battery_state(void)
  704. {
  705. if (test_and_set_bit(0, &async_req_locks))
  706. return;
  707. if (pmu_kind == PMU_OHARE_BASED)
  708. pmu_request(&batt_req, done_battery_state_ohare,
  709. 1, PMU_BATTERY_STATE);
  710. else
  711. pmu_request(&batt_req, done_battery_state_smart,
  712. 2, PMU_SMART_BATTERY_STATE, pmu_cur_battery+1);
  713. }
  714. static int
  715. proc_get_info(char *page, char **start, off_t off,
  716. int count, int *eof, void *data)
  717. {
  718. char* p = page;
  719. p += sprintf(p, "PMU driver version : %d\n", PMU_DRIVER_VERSION);
  720. p += sprintf(p, "PMU firmware version : %02x\n", pmu_version);
  721. p += sprintf(p, "AC Power : %d\n",
  722. ((pmu_power_flags & PMU_PWR_AC_PRESENT) != 0) || pmu_battery_count == 0);
  723. p += sprintf(p, "Battery count : %d\n", pmu_battery_count);
  724. return p - page;
  725. }
  726. static int
  727. proc_get_irqstats(char *page, char **start, off_t off,
  728. int count, int *eof, void *data)
  729. {
  730. int i;
  731. char* p = page;
  732. static const char *irq_names[] = {
  733. "Total CB1 triggered events",
  734. "Total GPIO1 triggered events",
  735. "PC-Card eject button",
  736. "Sound/Brightness button",
  737. "ADB message",
  738. "Battery state change",
  739. "Environment interrupt",
  740. "Tick timer",
  741. "Ghost interrupt (zero len)",
  742. "Empty interrupt (empty mask)",
  743. "Max irqs in a row"
  744. };
  745. for (i=0; i<11; i++) {
  746. p += sprintf(p, " %2u: %10u (%s)\n",
  747. i, pmu_irq_stats[i], irq_names[i]);
  748. }
  749. return p - page;
  750. }
  751. static int
  752. proc_get_batt(char *page, char **start, off_t off,
  753. int count, int *eof, void *data)
  754. {
  755. long batnum = (long)data;
  756. char *p = page;
  757. p += sprintf(p, "\n");
  758. p += sprintf(p, "flags : %08x\n",
  759. pmu_batteries[batnum].flags);
  760. p += sprintf(p, "charge : %d\n",
  761. pmu_batteries[batnum].charge);
  762. p += sprintf(p, "max_charge : %d\n",
  763. pmu_batteries[batnum].max_charge);
  764. p += sprintf(p, "current : %d\n",
  765. pmu_batteries[batnum].amperage);
  766. p += sprintf(p, "voltage : %d\n",
  767. pmu_batteries[batnum].voltage);
  768. p += sprintf(p, "time rem. : %d\n",
  769. pmu_batteries[batnum].time_remaining);
  770. return p - page;
  771. }
  772. static int
  773. proc_read_options(char *page, char **start, off_t off,
  774. int count, int *eof, void *data)
  775. {
  776. char *p = page;
  777. #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_PPC32)
  778. if (pmu_kind == PMU_KEYLARGO_BASED &&
  779. pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,-1) >= 0)
  780. p += sprintf(p, "lid_wakeup=%d\n", option_lid_wakeup);
  781. #endif
  782. if (pmu_kind == PMU_KEYLARGO_BASED)
  783. p += sprintf(p, "server_mode=%d\n", option_server_mode);
  784. return p - page;
  785. }
  786. static int
  787. proc_write_options(struct file *file, const char __user *buffer,
  788. unsigned long count, void *data)
  789. {
  790. char tmp[33];
  791. char *label, *val;
  792. unsigned long fcount = count;
  793. if (!count)
  794. return -EINVAL;
  795. if (count > 32)
  796. count = 32;
  797. if (copy_from_user(tmp, buffer, count))
  798. return -EFAULT;
  799. tmp[count] = 0;
  800. label = tmp;
  801. while(*label == ' ')
  802. label++;
  803. val = label;
  804. while(*val && (*val != '=')) {
  805. if (*val == ' ')
  806. *val = 0;
  807. val++;
  808. }
  809. if ((*val) == 0)
  810. return -EINVAL;
  811. *(val++) = 0;
  812. while(*val == ' ')
  813. val++;
  814. #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_PPC32)
  815. if (pmu_kind == PMU_KEYLARGO_BASED &&
  816. pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,-1) >= 0)
  817. if (!strcmp(label, "lid_wakeup"))
  818. option_lid_wakeup = ((*val) == '1');
  819. #endif
  820. if (pmu_kind == PMU_KEYLARGO_BASED && !strcmp(label, "server_mode")) {
  821. int new_value;
  822. new_value = ((*val) == '1');
  823. if (new_value != option_server_mode)
  824. pmu_set_server_mode(new_value);
  825. }
  826. return fcount;
  827. }
  828. #ifdef CONFIG_ADB
  829. /* Send an ADB command */
  830. static int
  831. pmu_send_request(struct adb_request *req, int sync)
  832. {
  833. int i, ret;
  834. if ((vias == NULL) || (!pmu_fully_inited)) {
  835. req->complete = 1;
  836. return -ENXIO;
  837. }
  838. ret = -EINVAL;
  839. switch (req->data[0]) {
  840. case PMU_PACKET:
  841. for (i = 0; i < req->nbytes - 1; ++i)
  842. req->data[i] = req->data[i+1];
  843. --req->nbytes;
  844. if (pmu_data_len[req->data[0]][1] != 0) {
  845. req->reply[0] = ADB_RET_OK;
  846. req->reply_len = 1;
  847. } else
  848. req->reply_len = 0;
  849. ret = pmu_queue_request(req);
  850. break;
  851. case CUDA_PACKET:
  852. switch (req->data[1]) {
  853. case CUDA_GET_TIME:
  854. if (req->nbytes != 2)
  855. break;
  856. req->data[0] = PMU_READ_RTC;
  857. req->nbytes = 1;
  858. req->reply_len = 3;
  859. req->reply[0] = CUDA_PACKET;
  860. req->reply[1] = 0;
  861. req->reply[2] = CUDA_GET_TIME;
  862. ret = pmu_queue_request(req);
  863. break;
  864. case CUDA_SET_TIME:
  865. if (req->nbytes != 6)
  866. break;
  867. req->data[0] = PMU_SET_RTC;
  868. req->nbytes = 5;
  869. for (i = 1; i <= 4; ++i)
  870. req->data[i] = req->data[i+1];
  871. req->reply_len = 3;
  872. req->reply[0] = CUDA_PACKET;
  873. req->reply[1] = 0;
  874. req->reply[2] = CUDA_SET_TIME;
  875. ret = pmu_queue_request(req);
  876. break;
  877. }
  878. break;
  879. case ADB_PACKET:
  880. if (!pmu_has_adb)
  881. return -ENXIO;
  882. for (i = req->nbytes - 1; i > 1; --i)
  883. req->data[i+2] = req->data[i];
  884. req->data[3] = req->nbytes - 2;
  885. req->data[2] = pmu_adb_flags;
  886. /*req->data[1] = req->data[1];*/
  887. req->data[0] = PMU_ADB_CMD;
  888. req->nbytes += 2;
  889. req->reply_expected = 1;
  890. req->reply_len = 0;
  891. ret = pmu_queue_request(req);
  892. break;
  893. }
  894. if (ret) {
  895. req->complete = 1;
  896. return ret;
  897. }
  898. if (sync)
  899. while (!req->complete)
  900. pmu_poll();
  901. return 0;
  902. }
  903. /* Enable/disable autopolling */
  904. static int
  905. pmu_adb_autopoll(int devs)
  906. {
  907. struct adb_request req;
  908. if ((vias == NULL) || (!pmu_fully_inited) || !pmu_has_adb)
  909. return -ENXIO;
  910. if (devs) {
  911. adb_dev_map = devs;
  912. pmu_request(&req, NULL, 5, PMU_ADB_CMD, 0, 0x86,
  913. adb_dev_map >> 8, adb_dev_map);
  914. pmu_adb_flags = 2;
  915. } else {
  916. pmu_request(&req, NULL, 1, PMU_ADB_POLL_OFF);
  917. pmu_adb_flags = 0;
  918. }
  919. while (!req.complete)
  920. pmu_poll();
  921. return 0;
  922. }
  923. /* Reset the ADB bus */
  924. static int
  925. pmu_adb_reset_bus(void)
  926. {
  927. struct adb_request req;
  928. int save_autopoll = adb_dev_map;
  929. if ((vias == NULL) || (!pmu_fully_inited) || !pmu_has_adb)
  930. return -ENXIO;
  931. /* anyone got a better idea?? */
  932. pmu_adb_autopoll(0);
  933. req.nbytes = 5;
  934. req.done = NULL;
  935. req.data[0] = PMU_ADB_CMD;
  936. req.data[1] = 0;
  937. req.data[2] = ADB_BUSRESET;
  938. req.data[3] = 0;
  939. req.data[4] = 0;
  940. req.reply_len = 0;
  941. req.reply_expected = 1;
  942. if (pmu_queue_request(&req) != 0) {
  943. printk(KERN_ERR "pmu_adb_reset_bus: pmu_queue_request failed\n");
  944. return -EIO;
  945. }
  946. pmu_wait_complete(&req);
  947. if (save_autopoll != 0)
  948. pmu_adb_autopoll(save_autopoll);
  949. return 0;
  950. }
  951. #endif /* CONFIG_ADB */
  952. /* Construct and send a pmu request */
  953. int
  954. pmu_request(struct adb_request *req, void (*done)(struct adb_request *),
  955. int nbytes, ...)
  956. {
  957. va_list list;
  958. int i;
  959. if (vias == NULL)
  960. return -ENXIO;
  961. if (nbytes < 0 || nbytes > 32) {
  962. printk(KERN_ERR "pmu_request: bad nbytes (%d)\n", nbytes);
  963. req->complete = 1;
  964. return -EINVAL;
  965. }
  966. req->nbytes = nbytes;
  967. req->done = done;
  968. va_start(list, nbytes);
  969. for (i = 0; i < nbytes; ++i)
  970. req->data[i] = va_arg(list, int);
  971. va_end(list);
  972. req->reply_len = 0;
  973. req->reply_expected = 0;
  974. return pmu_queue_request(req);
  975. }
  976. int
  977. pmu_queue_request(struct adb_request *req)
  978. {
  979. unsigned long flags;
  980. int nsend;
  981. if (via == NULL) {
  982. req->complete = 1;
  983. return -ENXIO;
  984. }
  985. if (req->nbytes <= 0) {
  986. req->complete = 1;
  987. return 0;
  988. }
  989. nsend = pmu_data_len[req->data[0]][0];
  990. if (nsend >= 0 && req->nbytes != nsend + 1) {
  991. req->complete = 1;
  992. return -EINVAL;
  993. }
  994. req->next = NULL;
  995. req->sent = 0;
  996. req->complete = 0;
  997. spin_lock_irqsave(&pmu_lock, flags);
  998. if (current_req != 0) {
  999. last_req->next = req;
  1000. last_req = req;
  1001. } else {
  1002. current_req = req;
  1003. last_req = req;
  1004. if (pmu_state == idle)
  1005. pmu_start();
  1006. }
  1007. spin_unlock_irqrestore(&pmu_lock, flags);
  1008. return 0;
  1009. }
  1010. static inline void
  1011. wait_for_ack(void)
  1012. {
  1013. /* Sightly increased the delay, I had one occurrence of the message
  1014. * reported
  1015. */
  1016. int timeout = 4000;
  1017. while ((in_8(&via[B]) & TACK) == 0) {
  1018. if (--timeout < 0) {
  1019. printk(KERN_ERR "PMU not responding (!ack)\n");
  1020. return;
  1021. }
  1022. udelay(10);
  1023. }
  1024. }
  1025. /* New PMU seems to be very sensitive to those timings, so we make sure
  1026. * PCI is flushed immediately */
  1027. static inline void
  1028. send_byte(int x)
  1029. {
  1030. volatile unsigned char __iomem *v = via;
  1031. out_8(&v[ACR], in_8(&v[ACR]) | SR_OUT | SR_EXT);
  1032. out_8(&v[SR], x);
  1033. out_8(&v[B], in_8(&v[B]) & ~TREQ); /* assert TREQ */
  1034. (void)in_8(&v[B]);
  1035. }
  1036. static inline void
  1037. recv_byte(void)
  1038. {
  1039. volatile unsigned char __iomem *v = via;
  1040. out_8(&v[ACR], (in_8(&v[ACR]) & ~SR_OUT) | SR_EXT);
  1041. in_8(&v[SR]); /* resets SR */
  1042. out_8(&v[B], in_8(&v[B]) & ~TREQ);
  1043. (void)in_8(&v[B]);
  1044. }
  1045. static inline void
  1046. pmu_done(struct adb_request *req)
  1047. {
  1048. void (*done)(struct adb_request *) = req->done;
  1049. mb();
  1050. req->complete = 1;
  1051. /* Here, we assume that if the request has a done member, the
  1052. * struct request will survive to setting req->complete to 1
  1053. */
  1054. if (done)
  1055. (*done)(req);
  1056. }
  1057. static void
  1058. pmu_start(void)
  1059. {
  1060. struct adb_request *req;
  1061. /* assert pmu_state == idle */
  1062. /* get the packet to send */
  1063. req = current_req;
  1064. if (req == 0 || pmu_state != idle
  1065. || (/*req->reply_expected && */req_awaiting_reply))
  1066. return;
  1067. pmu_state = sending;
  1068. data_index = 1;
  1069. data_len = pmu_data_len[req->data[0]][0];
  1070. /* Sounds safer to make sure ACK is high before writing. This helped
  1071. * kill a problem with ADB and some iBooks
  1072. */
  1073. wait_for_ack();
  1074. /* set the shift register to shift out and send a byte */
  1075. send_byte(req->data[0]);
  1076. }
  1077. void
  1078. pmu_poll(void)
  1079. {
  1080. if (!via)
  1081. return;
  1082. if (disable_poll)
  1083. return;
  1084. via_pmu_interrupt(0, NULL);
  1085. }
  1086. void
  1087. pmu_poll_adb(void)
  1088. {
  1089. if (!via)
  1090. return;
  1091. if (disable_poll)
  1092. return;
  1093. /* Kicks ADB read when PMU is suspended */
  1094. adb_int_pending = 1;
  1095. do {
  1096. via_pmu_interrupt(0, NULL);
  1097. } while (pmu_suspended && (adb_int_pending || pmu_state != idle
  1098. || req_awaiting_reply));
  1099. }
  1100. void
  1101. pmu_wait_complete(struct adb_request *req)
  1102. {
  1103. if (!via)
  1104. return;
  1105. while((pmu_state != idle && pmu_state != locked) || !req->complete)
  1106. via_pmu_interrupt(0, NULL);
  1107. }
  1108. /* This function loops until the PMU is idle and prevents it from
  1109. * anwsering to ADB interrupts. pmu_request can still be called.
  1110. * This is done to avoid spurrious shutdowns when we know we'll have
  1111. * interrupts switched off for a long time
  1112. */
  1113. void
  1114. pmu_suspend(void)
  1115. {
  1116. unsigned long flags;
  1117. if (!via)
  1118. return;
  1119. spin_lock_irqsave(&pmu_lock, flags);
  1120. pmu_suspended++;
  1121. if (pmu_suspended > 1) {
  1122. spin_unlock_irqrestore(&pmu_lock, flags);
  1123. return;
  1124. }
  1125. do {
  1126. spin_unlock_irqrestore(&pmu_lock, flags);
  1127. if (req_awaiting_reply)
  1128. adb_int_pending = 1;
  1129. via_pmu_interrupt(0, NULL);
  1130. spin_lock_irqsave(&pmu_lock, flags);
  1131. if (!adb_int_pending && pmu_state == idle && !req_awaiting_reply) {
  1132. if (gpio_irq >= 0)
  1133. disable_irq_nosync(gpio_irq);
  1134. out_8(&via[IER], CB1_INT | IER_CLR);
  1135. spin_unlock_irqrestore(&pmu_lock, flags);
  1136. break;
  1137. }
  1138. } while (1);
  1139. }
  1140. void
  1141. pmu_resume(void)
  1142. {
  1143. unsigned long flags;
  1144. if (!via || (pmu_suspended < 1))
  1145. return;
  1146. spin_lock_irqsave(&pmu_lock, flags);
  1147. pmu_suspended--;
  1148. if (pmu_suspended > 0) {
  1149. spin_unlock_irqrestore(&pmu_lock, flags);
  1150. return;
  1151. }
  1152. adb_int_pending = 1;
  1153. if (gpio_irq >= 0)
  1154. enable_irq(gpio_irq);
  1155. out_8(&via[IER], CB1_INT | IER_SET);
  1156. spin_unlock_irqrestore(&pmu_lock, flags);
  1157. pmu_poll();
  1158. }
  1159. /* Interrupt data could be the result data from an ADB cmd */
  1160. static void
  1161. pmu_handle_data(unsigned char *data, int len)
  1162. {
  1163. unsigned char ints, pirq;
  1164. int i = 0;
  1165. asleep = 0;
  1166. if (drop_interrupts || len < 1) {
  1167. adb_int_pending = 0;
  1168. pmu_irq_stats[8]++;
  1169. return;
  1170. }
  1171. /* Get PMU interrupt mask */
  1172. ints = data[0];
  1173. /* Record zero interrupts for stats */
  1174. if (ints == 0)
  1175. pmu_irq_stats[9]++;
  1176. /* Hack to deal with ADB autopoll flag */
  1177. if (ints & PMU_INT_ADB)
  1178. ints &= ~(PMU_INT_ADB_AUTO | PMU_INT_AUTO_SRQ_POLL);
  1179. next:
  1180. if (ints == 0) {
  1181. if (i > pmu_irq_stats[10])
  1182. pmu_irq_stats[10] = i;
  1183. return;
  1184. }
  1185. for (pirq = 0; pirq < 8; pirq++)
  1186. if (ints & (1 << pirq))
  1187. break;
  1188. pmu_irq_stats[pirq]++;
  1189. i++;
  1190. ints &= ~(1 << pirq);
  1191. /* Note: for some reason, we get an interrupt with len=1,
  1192. * data[0]==0 after each normal ADB interrupt, at least
  1193. * on the Pismo. Still investigating... --BenH
  1194. */
  1195. if ((1 << pirq) & PMU_INT_ADB) {
  1196. if ((data[0] & PMU_INT_ADB_AUTO) == 0) {
  1197. struct adb_request *req = req_awaiting_reply;
  1198. if (req == 0) {
  1199. printk(KERN_ERR "PMU: extra ADB reply\n");
  1200. return;
  1201. }
  1202. req_awaiting_reply = NULL;
  1203. if (len <= 2)
  1204. req->reply_len = 0;
  1205. else {
  1206. memcpy(req->reply, data + 1, len - 1);
  1207. req->reply_len = len - 1;
  1208. }
  1209. pmu_done(req);
  1210. } else {
  1211. if (len == 4 && data[1] == 0x2c) {
  1212. extern int xmon_wants_key, xmon_adb_keycode;
  1213. if (xmon_wants_key) {
  1214. xmon_adb_keycode = data[2];
  1215. return;
  1216. }
  1217. }
  1218. #ifdef CONFIG_ADB
  1219. /*
  1220. * XXX On the [23]400 the PMU gives us an up
  1221. * event for keycodes 0x74 or 0x75 when the PC
  1222. * card eject buttons are released, so we
  1223. * ignore those events.
  1224. */
  1225. if (!(pmu_kind == PMU_OHARE_BASED && len == 4
  1226. && data[1] == 0x2c && data[3] == 0xff
  1227. && (data[2] & ~1) == 0xf4))
  1228. adb_input(data+1, len-1, 1);
  1229. #endif /* CONFIG_ADB */
  1230. }
  1231. }
  1232. /* Sound/brightness button pressed */
  1233. else if ((1 << pirq) & PMU_INT_SNDBRT) {
  1234. #ifdef CONFIG_PMAC_BACKLIGHT
  1235. if (len == 3)
  1236. pmac_backlight_set_legacy_brightness_pmu(data[1] >> 4);
  1237. #endif
  1238. }
  1239. /* Tick interrupt */
  1240. else if ((1 << pirq) & PMU_INT_TICK) {
  1241. /* Environement or tick interrupt, query batteries */
  1242. if (pmu_battery_count) {
  1243. if ((--query_batt_timer) == 0) {
  1244. query_battery_state();
  1245. query_batt_timer = BATTERY_POLLING_COUNT;
  1246. }
  1247. }
  1248. }
  1249. else if ((1 << pirq) & PMU_INT_ENVIRONMENT) {
  1250. if (pmu_battery_count)
  1251. query_battery_state();
  1252. pmu_pass_intr(data, len);
  1253. /* len == 6 is probably a bad check. But how do I
  1254. * know what PMU versions send what events here? */
  1255. if (len == 6) {
  1256. via_pmu_event(PMU_EVT_POWER, !!(data[1]&8));
  1257. via_pmu_event(PMU_EVT_LID, data[1]&1);
  1258. }
  1259. } else {
  1260. pmu_pass_intr(data, len);
  1261. }
  1262. goto next;
  1263. }
  1264. static struct adb_request*
  1265. pmu_sr_intr(void)
  1266. {
  1267. struct adb_request *req;
  1268. int bite = 0;
  1269. if (via[B] & TREQ) {
  1270. printk(KERN_ERR "PMU: spurious SR intr (%x)\n", via[B]);
  1271. out_8(&via[IFR], SR_INT);
  1272. return NULL;
  1273. }
  1274. /* The ack may not yet be low when we get the interrupt */
  1275. while ((in_8(&via[B]) & TACK) != 0)
  1276. ;
  1277. /* if reading grab the byte, and reset the interrupt */
  1278. if (pmu_state == reading || pmu_state == reading_intr)
  1279. bite = in_8(&via[SR]);
  1280. /* reset TREQ and wait for TACK to go high */
  1281. out_8(&via[B], in_8(&via[B]) | TREQ);
  1282. wait_for_ack();
  1283. switch (pmu_state) {
  1284. case sending:
  1285. req = current_req;
  1286. if (data_len < 0) {
  1287. data_len = req->nbytes - 1;
  1288. send_byte(data_len);
  1289. break;
  1290. }
  1291. if (data_index <= data_len) {
  1292. send_byte(req->data[data_index++]);
  1293. break;
  1294. }
  1295. req->sent = 1;
  1296. data_len = pmu_data_len[req->data[0]][1];
  1297. if (data_len == 0) {
  1298. pmu_state = idle;
  1299. current_req = req->next;
  1300. if (req->reply_expected)
  1301. req_awaiting_reply = req;
  1302. else
  1303. return req;
  1304. } else {
  1305. pmu_state = reading;
  1306. data_index = 0;
  1307. reply_ptr = req->reply + req->reply_len;
  1308. recv_byte();
  1309. }
  1310. break;
  1311. case intack:
  1312. data_index = 0;
  1313. data_len = -1;
  1314. pmu_state = reading_intr;
  1315. reply_ptr = interrupt_data[int_data_last];
  1316. recv_byte();
  1317. if (gpio_irq >= 0 && !gpio_irq_enabled) {
  1318. enable_irq(gpio_irq);
  1319. gpio_irq_enabled = 1;
  1320. }
  1321. break;
  1322. case reading:
  1323. case reading_intr:
  1324. if (data_len == -1) {
  1325. data_len = bite;
  1326. if (bite > 32)
  1327. printk(KERN_ERR "PMU: bad reply len %d\n", bite);
  1328. } else if (data_index < 32) {
  1329. reply_ptr[data_index++] = bite;
  1330. }
  1331. if (data_index < data_len) {
  1332. recv_byte();
  1333. break;
  1334. }
  1335. if (pmu_state == reading_intr) {
  1336. pmu_state = idle;
  1337. int_data_state[int_data_last] = int_data_ready;
  1338. interrupt_data_len[int_data_last] = data_len;
  1339. } else {
  1340. req = current_req;
  1341. /*
  1342. * For PMU sleep and freq change requests, we lock the
  1343. * PMU until it's explicitly unlocked. This avoids any
  1344. * spurrious event polling getting in
  1345. */
  1346. current_req = req->next;
  1347. req->reply_len += data_index;
  1348. if (req->data[0] == PMU_SLEEP || req->data[0] == PMU_CPU_SPEED)
  1349. pmu_state = locked;
  1350. else
  1351. pmu_state = idle;
  1352. return req;
  1353. }
  1354. break;
  1355. default:
  1356. printk(KERN_ERR "via_pmu_interrupt: unknown state %d?\n",
  1357. pmu_state);
  1358. }
  1359. return NULL;
  1360. }
  1361. static irqreturn_t
  1362. via_pmu_interrupt(int irq, void *arg)
  1363. {
  1364. unsigned long flags;
  1365. int intr;
  1366. int nloop = 0;
  1367. int int_data = -1;
  1368. struct adb_request *req = NULL;
  1369. int handled = 0;
  1370. /* This is a bit brutal, we can probably do better */
  1371. spin_lock_irqsave(&pmu_lock, flags);
  1372. ++disable_poll;
  1373. for (;;) {
  1374. intr = in_8(&via[IFR]) & (SR_INT | CB1_INT);
  1375. if (intr == 0)
  1376. break;
  1377. handled = 1;
  1378. if (++nloop > 1000) {
  1379. printk(KERN_DEBUG "PMU: stuck in intr loop, "
  1380. "intr=%x, ier=%x pmu_state=%d\n",
  1381. intr, in_8(&via[IER]), pmu_state);
  1382. break;
  1383. }
  1384. out_8(&via[IFR], intr);
  1385. if (intr & CB1_INT) {
  1386. adb_int_pending = 1;
  1387. pmu_irq_stats[0]++;
  1388. }
  1389. if (intr & SR_INT) {
  1390. req = pmu_sr_intr();
  1391. if (req)
  1392. break;
  1393. }
  1394. }
  1395. recheck:
  1396. if (pmu_state == idle) {
  1397. if (adb_int_pending) {
  1398. if (int_data_state[0] == int_data_empty)
  1399. int_data_last = 0;
  1400. else if (int_data_state[1] == int_data_empty)
  1401. int_data_last = 1;
  1402. else
  1403. goto no_free_slot;
  1404. pmu_state = intack;
  1405. int_data_state[int_data_last] = int_data_fill;
  1406. /* Sounds safer to make sure ACK is high before writing.
  1407. * This helped kill a problem with ADB and some iBooks
  1408. */
  1409. wait_for_ack();
  1410. send_byte(PMU_INT_ACK);
  1411. adb_int_pending = 0;
  1412. } else if (current_req)
  1413. pmu_start();
  1414. }
  1415. no_free_slot:
  1416. /* Mark the oldest buffer for flushing */
  1417. if (int_data_state[!int_data_last] == int_data_ready) {
  1418. int_data_state[!int_data_last] = int_data_flush;
  1419. int_data = !int_data_last;
  1420. } else if (int_data_state[int_data_last] == int_data_ready) {
  1421. int_data_state[int_data_last] = int_data_flush;
  1422. int_data = int_data_last;
  1423. }
  1424. --disable_poll;
  1425. spin_unlock_irqrestore(&pmu_lock, flags);
  1426. /* Deal with completed PMU requests outside of the lock */
  1427. if (req) {
  1428. pmu_done(req);
  1429. req = NULL;
  1430. }
  1431. /* Deal with interrupt datas outside of the lock */
  1432. if (int_data >= 0) {
  1433. pmu_handle_data(interrupt_data[int_data], interrupt_data_len[int_data]);
  1434. spin_lock_irqsave(&pmu_lock, flags);
  1435. ++disable_poll;
  1436. int_data_state[int_data] = int_data_empty;
  1437. int_data = -1;
  1438. goto recheck;
  1439. }
  1440. return IRQ_RETVAL(handled);
  1441. }
  1442. void
  1443. pmu_unlock(void)
  1444. {
  1445. unsigned long flags;
  1446. spin_lock_irqsave(&pmu_lock, flags);
  1447. if (pmu_state == locked)
  1448. pmu_state = idle;
  1449. adb_int_pending = 1;
  1450. spin_unlock_irqrestore(&pmu_lock, flags);
  1451. }
  1452. static irqreturn_t
  1453. gpio1_interrupt(int irq, void *arg)
  1454. {
  1455. unsigned long flags;
  1456. if ((in_8(gpio_reg + 0x9) & 0x02) == 0) {
  1457. spin_lock_irqsave(&pmu_lock, flags);
  1458. if (gpio_irq_enabled > 0) {
  1459. disable_irq_nosync(gpio_irq);
  1460. gpio_irq_enabled = 0;
  1461. }
  1462. pmu_irq_stats[1]++;
  1463. adb_int_pending = 1;
  1464. spin_unlock_irqrestore(&pmu_lock, flags);
  1465. via_pmu_interrupt(0, NULL);
  1466. return IRQ_HANDLED;
  1467. }
  1468. return IRQ_NONE;
  1469. }
  1470. void
  1471. pmu_enable_irled(int on)
  1472. {
  1473. struct adb_request req;
  1474. if (vias == NULL)
  1475. return ;
  1476. if (pmu_kind == PMU_KEYLARGO_BASED)
  1477. return ;
  1478. pmu_request(&req, NULL, 2, PMU_POWER_CTRL, PMU_POW_IRLED |
  1479. (on ? PMU_POW_ON : PMU_POW_OFF));
  1480. pmu_wait_complete(&req);
  1481. }
  1482. void
  1483. pmu_restart(void)
  1484. {
  1485. struct adb_request req;
  1486. if (via == NULL)
  1487. return;
  1488. local_irq_disable();
  1489. drop_interrupts = 1;
  1490. if (pmu_kind != PMU_KEYLARGO_BASED) {
  1491. pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, PMU_INT_ADB |
  1492. PMU_INT_TICK );
  1493. while(!req.complete)
  1494. pmu_poll();
  1495. }
  1496. pmu_request(&req, NULL, 1, PMU_RESET);
  1497. pmu_wait_complete(&req);
  1498. for (;;)
  1499. ;
  1500. }
  1501. void
  1502. pmu_shutdown(void)
  1503. {
  1504. struct adb_request req;
  1505. if (via == NULL)
  1506. return;
  1507. local_irq_disable();
  1508. drop_interrupts = 1;
  1509. if (pmu_kind != PMU_KEYLARGO_BASED) {
  1510. pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, PMU_INT_ADB |
  1511. PMU_INT_TICK );
  1512. pmu_wait_complete(&req);
  1513. } else {
  1514. /* Disable server mode on shutdown or we'll just
  1515. * wake up again
  1516. */
  1517. pmu_set_server_mode(0);
  1518. }
  1519. pmu_request(&req, NULL, 5, PMU_SHUTDOWN,
  1520. 'M', 'A', 'T', 'T');
  1521. pmu_wait_complete(&req);
  1522. for (;;)
  1523. ;
  1524. }
  1525. int
  1526. pmu_present(void)
  1527. {
  1528. return via != 0;
  1529. }
  1530. #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_PPC32)
  1531. /*
  1532. * This struct is used to store config register values for
  1533. * PCI devices which may get powered off when we sleep.
  1534. */
  1535. static struct pci_save {
  1536. u16 command;
  1537. u16 cache_lat;
  1538. u16 intr;
  1539. u32 rom_address;
  1540. } *pbook_pci_saves;
  1541. static int pbook_npci_saves;
  1542. static void
  1543. pbook_alloc_pci_save(void)
  1544. {
  1545. int npci;
  1546. struct pci_dev *pd = NULL;
  1547. npci = 0;
  1548. while ((pd = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pd)) != NULL) {
  1549. ++npci;
  1550. }
  1551. if (npci == 0)
  1552. return;
  1553. pbook_pci_saves = (struct pci_save *)
  1554. kmalloc(npci * sizeof(struct pci_save), GFP_KERNEL);
  1555. pbook_npci_saves = npci;
  1556. }
  1557. static void
  1558. pbook_free_pci_save(void)
  1559. {
  1560. if (pbook_pci_saves == NULL)
  1561. return;
  1562. kfree(pbook_pci_saves);
  1563. pbook_pci_saves = NULL;
  1564. pbook_npci_saves = 0;
  1565. }
  1566. static void
  1567. pbook_pci_save(void)
  1568. {
  1569. struct pci_save *ps = pbook_pci_saves;
  1570. struct pci_dev *pd = NULL;
  1571. int npci = pbook_npci_saves;
  1572. if (ps == NULL)
  1573. return;
  1574. while ((pd = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pd)) != NULL) {
  1575. if (npci-- == 0) {
  1576. pci_dev_put(pd);
  1577. return;
  1578. }
  1579. pci_read_config_word(pd, PCI_COMMAND, &ps->command);
  1580. pci_read_config_word(pd, PCI_CACHE_LINE_SIZE, &ps->cache_lat);
  1581. pci_read_config_word(pd, PCI_INTERRUPT_LINE, &ps->intr);
  1582. pci_read_config_dword(pd, PCI_ROM_ADDRESS, &ps->rom_address);
  1583. ++ps;
  1584. }
  1585. }
  1586. /* For this to work, we must take care of a few things: If gmac was enabled
  1587. * during boot, it will be in the pci dev list. If it's disabled at this point
  1588. * (and it will probably be), then you can't access it's config space.
  1589. */
  1590. static void
  1591. pbook_pci_restore(void)
  1592. {
  1593. u16 cmd;
  1594. struct pci_save *ps = pbook_pci_saves - 1;
  1595. struct pci_dev *pd = NULL;
  1596. int npci = pbook_npci_saves;
  1597. int j;
  1598. while ((pd = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pd)) != NULL) {
  1599. if (npci-- == 0)
  1600. return;
  1601. ps++;
  1602. if (ps->command == 0)
  1603. continue;
  1604. pci_read_config_word(pd, PCI_COMMAND, &cmd);
  1605. if ((ps->command & ~cmd) == 0)
  1606. continue;
  1607. switch (pd->hdr_type) {
  1608. case PCI_HEADER_TYPE_NORMAL:
  1609. for (j = 0; j < 6; ++j)
  1610. pci_write_config_dword(pd,
  1611. PCI_BASE_ADDRESS_0 + j*4,
  1612. pd->resource[j].start);
  1613. pci_write_config_dword(pd, PCI_ROM_ADDRESS,
  1614. ps->rom_address);
  1615. pci_write_config_word(pd, PCI_CACHE_LINE_SIZE,
  1616. ps->cache_lat);
  1617. pci_write_config_word(pd, PCI_INTERRUPT_LINE,
  1618. ps->intr);
  1619. pci_write_config_word(pd, PCI_COMMAND, ps->command);
  1620. break;
  1621. }
  1622. }
  1623. }
  1624. #ifdef DEBUG_SLEEP
  1625. /* N.B. This doesn't work on the 3400 */
  1626. void
  1627. pmu_blink(int n)
  1628. {
  1629. struct adb_request req;
  1630. memset(&req, 0, sizeof(req));
  1631. for (; n > 0; --n) {
  1632. req.nbytes = 4;
  1633. req.done = NULL;
  1634. req.data[0] = 0xee;
  1635. req.data[1] = 4;
  1636. req.data[2] = 0;
  1637. req.data[3] = 1;
  1638. req.reply[0] = ADB_RET_OK;
  1639. req.reply_len = 1;
  1640. req.reply_expected = 0;
  1641. pmu_polled_request(&req);
  1642. mdelay(50);
  1643. req.nbytes = 4;
  1644. req.done = NULL;
  1645. req.data[0] = 0xee;
  1646. req.data[1] = 4;
  1647. req.data[2] = 0;
  1648. req.data[3] = 0;
  1649. req.reply[0] = ADB_RET_OK;
  1650. req.reply_len = 1;
  1651. req.reply_expected = 0;
  1652. pmu_polled_request(&req);
  1653. mdelay(50);
  1654. }
  1655. mdelay(50);
  1656. }
  1657. #endif
  1658. /*
  1659. * Put the powerbook to sleep.
  1660. */
  1661. static u32 save_via[8];
  1662. static void
  1663. save_via_state(void)
  1664. {
  1665. save_via[0] = in_8(&via[ANH]);
  1666. save_via[1] = in_8(&via[DIRA]);
  1667. save_via[2] = in_8(&via[B]);
  1668. save_via[3] = in_8(&via[DIRB]);
  1669. save_via[4] = in_8(&via[PCR]);
  1670. save_via[5] = in_8(&via[ACR]);
  1671. save_via[6] = in_8(&via[T1CL]);
  1672. save_via[7] = in_8(&via[T1CH]);
  1673. }
  1674. static void
  1675. restore_via_state(void)
  1676. {
  1677. out_8(&via[ANH], save_via[0]);
  1678. out_8(&via[DIRA], save_via[1]);
  1679. out_8(&via[B], save_via[2]);
  1680. out_8(&via[DIRB], save_via[3]);
  1681. out_8(&via[PCR], save_via[4]);
  1682. out_8(&via[ACR], save_via[5]);
  1683. out_8(&via[T1CL], save_via[6]);
  1684. out_8(&via[T1CH], save_via[7]);
  1685. out_8(&via[IER], IER_CLR | 0x7f); /* disable all intrs */
  1686. out_8(&via[IFR], 0x7f); /* clear IFR */
  1687. out_8(&via[IER], IER_SET | SR_INT | CB1_INT);
  1688. }
  1689. extern void pmu_backlight_set_sleep(int sleep);
  1690. static int
  1691. pmac_suspend_devices(void)
  1692. {
  1693. int ret;
  1694. pm_prepare_console();
  1695. /* Sync the disks. */
  1696. /* XXX It would be nice to have some way to ensure that
  1697. * nobody is dirtying any new buffers while we wait. That
  1698. * could be achieved using the refrigerator for processes
  1699. * that swsusp uses
  1700. */
  1701. sys_sync();
  1702. /* Send suspend call to devices, hold the device core's dpm_sem */
  1703. ret = device_suspend(PMSG_SUSPEND);
  1704. if (ret) {
  1705. printk(KERN_ERR "Driver sleep failed\n");
  1706. return -EBUSY;
  1707. }
  1708. #ifdef CONFIG_PMAC_BACKLIGHT
  1709. /* Tell backlight code not to muck around with the chip anymore */
  1710. pmu_backlight_set_sleep(1);
  1711. #endif
  1712. /* Call platform functions marked "on sleep" */
  1713. pmac_pfunc_i2c_suspend();
  1714. pmac_pfunc_base_suspend();
  1715. /* Stop preemption */
  1716. preempt_disable();
  1717. /* Make sure the decrementer won't interrupt us */
  1718. asm volatile("mtdec %0" : : "r" (0x7fffffff));
  1719. /* Make sure any pending DEC interrupt occurring while we did
  1720. * the above didn't re-enable the DEC */
  1721. mb();
  1722. asm volatile("mtdec %0" : : "r" (0x7fffffff));
  1723. /* We can now disable MSR_EE. This code of course works properly only
  1724. * on UP machines... For SMP, if we ever implement sleep, we'll have to
  1725. * stop the "other" CPUs way before we do all that stuff.
  1726. */
  1727. local_irq_disable();
  1728. /* Broadcast power down irq
  1729. * This isn't that useful in most cases (only directly wired devices can
  1730. * use this but still... This will take care of sysdev's as well, so
  1731. * we exit from here with local irqs disabled and PIC off.
  1732. */
  1733. ret = device_power_down(PMSG_SUSPEND);
  1734. if (ret) {
  1735. wakeup_decrementer();
  1736. local_irq_enable();
  1737. preempt_enable();
  1738. device_resume();
  1739. printk(KERN_ERR "Driver powerdown failed\n");
  1740. return -EBUSY;
  1741. }
  1742. /* Wait for completion of async requests */
  1743. while (!batt_req.complete)
  1744. pmu_poll();
  1745. /* Giveup the lazy FPU & vec so we don't have to back them
  1746. * up from the low level code
  1747. */
  1748. enable_kernel_fp();
  1749. #ifdef CONFIG_ALTIVEC
  1750. if (cpu_has_feature(CPU_FTR_ALTIVEC))
  1751. enable_kernel_altivec();
  1752. #endif /* CONFIG_ALTIVEC */
  1753. return 0;
  1754. }
  1755. static int
  1756. pmac_wakeup_devices(void)
  1757. {
  1758. mdelay(100);
  1759. #ifdef CONFIG_PMAC_BACKLIGHT
  1760. /* Tell backlight code it can use the chip again */
  1761. pmu_backlight_set_sleep(0);
  1762. #endif
  1763. /* Power back up system devices (including the PIC) */
  1764. device_power_up();
  1765. /* Force a poll of ADB interrupts */
  1766. adb_int_pending = 1;
  1767. via_pmu_interrupt(0, NULL);
  1768. /* Restart jiffies & scheduling */
  1769. wakeup_decrementer();
  1770. /* Re-enable local CPU interrupts */
  1771. local_irq_enable();
  1772. mdelay(10);
  1773. preempt_enable();
  1774. /* Call platform functions marked "on wake" */
  1775. pmac_pfunc_base_resume();
  1776. pmac_pfunc_i2c_resume();
  1777. /* Resume devices */
  1778. device_resume();
  1779. pm_restore_console();
  1780. return 0;
  1781. }
  1782. #define GRACKLE_PM (1<<7)
  1783. #define GRACKLE_DOZE (1<<5)
  1784. #define GRACKLE_NAP (1<<4)
  1785. #define GRACKLE_SLEEP (1<<3)
  1786. static int powerbook_sleep_grackle(void)
  1787. {
  1788. unsigned long save_l2cr;
  1789. unsigned short pmcr1;
  1790. struct adb_request req;
  1791. int ret;
  1792. struct pci_dev *grackle;
  1793. grackle = pci_get_bus_and_slot(0, 0);
  1794. if (!grackle)
  1795. return -ENODEV;
  1796. ret = pmac_suspend_devices();
  1797. if (ret) {
  1798. printk(KERN_ERR "Sleep rejected by devices\n");
  1799. return ret;
  1800. }
  1801. /* Turn off various things. Darwin does some retry tests here... */
  1802. pmu_request(&req, NULL, 2, PMU_POWER_CTRL0, PMU_POW0_OFF|PMU_POW0_HARD_DRIVE);
  1803. pmu_wait_complete(&req);
  1804. pmu_request(&req, NULL, 2, PMU_POWER_CTRL,
  1805. PMU_POW_OFF|PMU_POW_BACKLIGHT|PMU_POW_IRLED|PMU_POW_MEDIABAY);
  1806. pmu_wait_complete(&req);
  1807. /* For 750, save backside cache setting and disable it */
  1808. save_l2cr = _get_L2CR(); /* (returns -1 if not available) */
  1809. if (!__fake_sleep) {
  1810. /* Ask the PMU to put us to sleep */
  1811. pmu_request(&req, NULL, 5, PMU_SLEEP, 'M', 'A', 'T', 'T');
  1812. pmu_wait_complete(&req);
  1813. }
  1814. /* The VIA is supposed not to be restored correctly*/
  1815. save_via_state();
  1816. /* We shut down some HW */
  1817. pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,1);
  1818. pci_read_config_word(grackle, 0x70, &pmcr1);
  1819. /* Apparently, MacOS uses NAP mode for Grackle ??? */
  1820. pmcr1 &= ~(GRACKLE_DOZE|GRACKLE_SLEEP);
  1821. pmcr1 |= GRACKLE_PM|GRACKLE_NAP;
  1822. pci_write_config_word(grackle, 0x70, pmcr1);
  1823. /* Call low-level ASM sleep handler */
  1824. if (__fake_sleep)
  1825. mdelay(5000);
  1826. else
  1827. low_sleep_handler();
  1828. /* We're awake again, stop grackle PM */
  1829. pci_read_config_word(grackle, 0x70, &pmcr1);
  1830. pmcr1 &= ~(GRACKLE_PM|GRACKLE_DOZE|GRACKLE_SLEEP|GRACKLE_NAP);
  1831. pci_write_config_word(grackle, 0x70, pmcr1);
  1832. pci_dev_put(grackle);
  1833. /* Make sure the PMU is idle */
  1834. pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,0);
  1835. restore_via_state();
  1836. /* Restore L2 cache */
  1837. if (save_l2cr != 0xffffffff && (save_l2cr & L2CR_L2E) != 0)
  1838. _set_L2CR(save_l2cr);
  1839. /* Restore userland MMU context */
  1840. set_context(current->active_mm->context.id, current->active_mm->pgd);
  1841. /* Power things up */
  1842. pmu_unlock();
  1843. pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask);
  1844. pmu_wait_complete(&req);
  1845. pmu_request(&req, NULL, 2, PMU_POWER_CTRL0,
  1846. PMU_POW0_ON|PMU_POW0_HARD_DRIVE);
  1847. pmu_wait_complete(&req);
  1848. pmu_request(&req, NULL, 2, PMU_POWER_CTRL,
  1849. PMU_POW_ON|PMU_POW_BACKLIGHT|PMU_POW_CHARGER|PMU_POW_IRLED|PMU_POW_MEDIABAY);
  1850. pmu_wait_complete(&req);
  1851. pmac_wakeup_devices();
  1852. return 0;
  1853. }
  1854. static int
  1855. powerbook_sleep_Core99(void)
  1856. {
  1857. unsigned long save_l2cr;
  1858. unsigned long save_l3cr;
  1859. struct adb_request req;
  1860. int ret;
  1861. if (pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,-1) < 0) {
  1862. printk(KERN_ERR "Sleep mode not supported on this machine\n");
  1863. return -ENOSYS;
  1864. }
  1865. if (num_online_cpus() > 1 || cpu_is_offline(0))
  1866. return -EAGAIN;
  1867. ret = pmac_suspend_devices();
  1868. if (ret) {
  1869. printk(KERN_ERR "Sleep rejected by devices\n");
  1870. return ret;
  1871. }
  1872. /* Stop environment and ADB interrupts */
  1873. pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, 0);
  1874. pmu_wait_complete(&req);
  1875. /* Tell PMU what events will wake us up */
  1876. pmu_request(&req, NULL, 4, PMU_POWER_EVENTS, PMU_PWR_CLR_WAKEUP_EVENTS,
  1877. 0xff, 0xff);
  1878. pmu_wait_complete(&req);
  1879. pmu_request(&req, NULL, 4, PMU_POWER_EVENTS, PMU_PWR_SET_WAKEUP_EVENTS,
  1880. 0, PMU_PWR_WAKEUP_KEY |
  1881. (option_lid_wakeup ? PMU_PWR_WAKEUP_LID_OPEN : 0));
  1882. pmu_wait_complete(&req);
  1883. /* Save the state of the L2 and L3 caches */
  1884. save_l3cr = _get_L3CR(); /* (returns -1 if not available) */
  1885. save_l2cr = _get_L2CR(); /* (returns -1 if not available) */
  1886. if (!__fake_sleep) {
  1887. /* Ask the PMU to put us to sleep */
  1888. pmu_request(&req, NULL, 5, PMU_SLEEP, 'M', 'A', 'T', 'T');
  1889. pmu_wait_complete(&req);
  1890. }
  1891. /* The VIA is supposed not to be restored correctly*/
  1892. save_via_state();
  1893. /* Shut down various ASICs. There's a chance that we can no longer
  1894. * talk to the PMU after this, so I moved it to _after_ sending the
  1895. * sleep command to it. Still need to be checked.
  1896. */
  1897. pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, 1);
  1898. /* Call low-level ASM sleep handler */
  1899. if (__fake_sleep)
  1900. mdelay(5000);
  1901. else
  1902. low_sleep_handler();
  1903. /* Restore Apple core ASICs state */
  1904. pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, 0);
  1905. /* Restore VIA */
  1906. restore_via_state();
  1907. /* tweak LPJ before cpufreq is there */
  1908. loops_per_jiffy *= 2;
  1909. /* Restore video */
  1910. pmac_call_early_video_resume();
  1911. /* Restore L2 cache */
  1912. if (save_l2cr != 0xffffffff && (save_l2cr & L2CR_L2E) != 0)
  1913. _set_L2CR(save_l2cr);
  1914. /* Restore L3 cache */
  1915. if (save_l3cr != 0xffffffff && (save_l3cr & L3CR_L3E) != 0)
  1916. _set_L3CR(save_l3cr);
  1917. /* Restore userland MMU context */
  1918. set_context(current->active_mm->context.id, current->active_mm->pgd);
  1919. /* Tell PMU we are ready */
  1920. pmu_unlock();
  1921. pmu_request(&req, NULL, 2, PMU_SYSTEM_READY, 2);
  1922. pmu_wait_complete(&req);
  1923. pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask);
  1924. pmu_wait_complete(&req);
  1925. /* Restore LPJ, cpufreq will adjust the cpu frequency */
  1926. loops_per_jiffy /= 2;
  1927. pmac_wakeup_devices();
  1928. return 0;
  1929. }
  1930. #define PB3400_MEM_CTRL 0xf8000000
  1931. #define PB3400_MEM_CTRL_SLEEP 0x70
  1932. static int
  1933. powerbook_sleep_3400(void)
  1934. {
  1935. int ret, i, x;
  1936. unsigned int hid0;
  1937. unsigned long p;
  1938. struct adb_request sleep_req;
  1939. void __iomem *mem_ctrl;
  1940. unsigned int __iomem *mem_ctrl_sleep;
  1941. /* first map in the memory controller registers */
  1942. mem_ctrl = ioremap(PB3400_MEM_CTRL, 0x100);
  1943. if (mem_ctrl == NULL) {
  1944. printk("powerbook_sleep_3400: ioremap failed\n");
  1945. return -ENOMEM;
  1946. }
  1947. mem_ctrl_sleep = mem_ctrl + PB3400_MEM_CTRL_SLEEP;
  1948. /* Allocate room for PCI save */
  1949. pbook_alloc_pci_save();
  1950. ret = pmac_suspend_devices();
  1951. if (ret) {
  1952. pbook_free_pci_save();
  1953. printk(KERN_ERR "Sleep rejected by devices\n");
  1954. return ret;
  1955. }
  1956. /* Save the state of PCI config space for some slots */
  1957. pbook_pci_save();
  1958. /* Set the memory controller to keep the memory refreshed
  1959. while we're asleep */
  1960. for (i = 0x403f; i >= 0x4000; --i) {
  1961. out_be32(mem_ctrl_sleep, i);
  1962. do {
  1963. x = (in_be32(mem_ctrl_sleep) >> 16) & 0x3ff;
  1964. } while (x == 0);
  1965. if (x >= 0x100)
  1966. break;
  1967. }
  1968. /* Ask the PMU to put us to sleep */
  1969. pmu_request(&sleep_req, NULL, 5, PMU_SLEEP, 'M', 'A', 'T', 'T');
  1970. while (!sleep_req.complete)
  1971. mb();
  1972. pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,1);
  1973. /* displacement-flush the L2 cache - necessary? */
  1974. for (p = KERNELBASE; p < KERNELBASE + 0x100000; p += 0x1000)
  1975. i = *(volatile int *)p;
  1976. asleep = 1;
  1977. /* Put the CPU into sleep mode */
  1978. hid0 = mfspr(SPRN_HID0);
  1979. hid0 = (hid0 & ~(HID0_NAP | HID0_DOZE)) | HID0_SLEEP;
  1980. mtspr(SPRN_HID0, hid0);
  1981. mtmsr(mfmsr() | MSR_POW | MSR_EE);
  1982. udelay(10);
  1983. /* OK, we're awake again, start restoring things */
  1984. out_be32(mem_ctrl_sleep, 0x3f);
  1985. pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,0);
  1986. pbook_pci_restore();
  1987. pmu_unlock();
  1988. /* wait for the PMU interrupt sequence to complete */
  1989. while (asleep)
  1990. mb();
  1991. pmac_wakeup_devices();
  1992. pbook_free_pci_save();
  1993. iounmap(mem_ctrl);
  1994. return 0;
  1995. }
  1996. #endif /* CONFIG_PM_SLEEP && CONFIG_PPC32 */
  1997. /*
  1998. * Support for /dev/pmu device
  1999. */
  2000. #define RB_SIZE 0x10
  2001. struct pmu_private {
  2002. struct list_head list;
  2003. int rb_get;
  2004. int rb_put;
  2005. struct rb_entry {
  2006. unsigned short len;
  2007. unsigned char data[16];
  2008. } rb_buf[RB_SIZE];
  2009. wait_queue_head_t wait;
  2010. spinlock_t lock;
  2011. #if defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_PMAC_BACKLIGHT)
  2012. int backlight_locker;
  2013. #endif
  2014. };
  2015. static LIST_HEAD(all_pmu_pvt);
  2016. static DEFINE_SPINLOCK(all_pvt_lock);
  2017. static void
  2018. pmu_pass_intr(unsigned char *data, int len)
  2019. {
  2020. struct pmu_private *pp;
  2021. struct list_head *list;
  2022. int i;
  2023. unsigned long flags;
  2024. if (len > sizeof(pp->rb_buf[0].data))
  2025. len = sizeof(pp->rb_buf[0].data);
  2026. spin_lock_irqsave(&all_pvt_lock, flags);
  2027. for (list = &all_pmu_pvt; (list = list->next) != &all_pmu_pvt; ) {
  2028. pp = list_entry(list, struct pmu_private, list);
  2029. spin_lock(&pp->lock);
  2030. i = pp->rb_put + 1;
  2031. if (i >= RB_SIZE)
  2032. i = 0;
  2033. if (i != pp->rb_get) {
  2034. struct rb_entry *rp = &pp->rb_buf[pp->rb_put];
  2035. rp->len = len;
  2036. memcpy(rp->data, data, len);
  2037. pp->rb_put = i;
  2038. wake_up_interruptible(&pp->wait);
  2039. }
  2040. spin_unlock(&pp->lock);
  2041. }
  2042. spin_unlock_irqrestore(&all_pvt_lock, flags);
  2043. }
  2044. static int
  2045. pmu_open(struct inode *inode, struct file *file)
  2046. {
  2047. struct pmu_private *pp;
  2048. unsigned long flags;
  2049. pp = kmalloc(sizeof(struct pmu_private), GFP_KERNEL);
  2050. if (pp == 0)
  2051. return -ENOMEM;
  2052. pp->rb_get = pp->rb_put = 0;
  2053. spin_lock_init(&pp->lock);
  2054. init_waitqueue_head(&pp->wait);
  2055. spin_lock_irqsave(&all_pvt_lock, flags);
  2056. #if defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_PMAC_BACKLIGHT)
  2057. pp->backlight_locker = 0;
  2058. #endif
  2059. list_add(&pp->list, &all_pmu_pvt);
  2060. spin_unlock_irqrestore(&all_pvt_lock, flags);
  2061. file->private_data = pp;
  2062. return 0;
  2063. }
  2064. static ssize_t
  2065. pmu_read(struct file *file, char __user *buf,
  2066. size_t count, loff_t *ppos)
  2067. {
  2068. struct pmu_private *pp = file->private_data;
  2069. DECLARE_WAITQUEUE(wait, current);
  2070. unsigned long flags;
  2071. int ret = 0;
  2072. if (count < 1 || pp == 0)
  2073. return -EINVAL;
  2074. if (!access_ok(VERIFY_WRITE, buf, count))
  2075. return -EFAULT;
  2076. spin_lock_irqsave(&pp->lock, flags);
  2077. add_wait_queue(&pp->wait, &wait);
  2078. current->state = TASK_INTERRUPTIBLE;
  2079. for (;;) {
  2080. ret = -EAGAIN;
  2081. if (pp->rb_get != pp->rb_put) {
  2082. int i = pp->rb_get;
  2083. struct rb_entry *rp = &pp->rb_buf[i];
  2084. ret = rp->len;
  2085. spin_unlock_irqrestore(&pp->lock, flags);
  2086. if (ret > count)
  2087. ret = count;
  2088. if (ret > 0 && copy_to_user(buf, rp->data, ret))
  2089. ret = -EFAULT;
  2090. if (++i >= RB_SIZE)
  2091. i = 0;
  2092. spin_lock_irqsave(&pp->lock, flags);
  2093. pp->rb_get = i;
  2094. }
  2095. if (ret >= 0)
  2096. break;
  2097. if (file->f_flags & O_NONBLOCK)
  2098. break;
  2099. ret = -ERESTARTSYS;
  2100. if (signal_pending(current))
  2101. break;
  2102. spin_unlock_irqrestore(&pp->lock, flags);
  2103. schedule();
  2104. spin_lock_irqsave(&pp->lock, flags);
  2105. }
  2106. current->state = TASK_RUNNING;
  2107. remove_wait_queue(&pp->wait, &wait);
  2108. spin_unlock_irqrestore(&pp->lock, flags);
  2109. return ret;
  2110. }
  2111. static ssize_t
  2112. pmu_write(struct file *file, const char __user *buf,
  2113. size_t count, loff_t *ppos)
  2114. {
  2115. return 0;
  2116. }
  2117. static unsigned int
  2118. pmu_fpoll(struct file *filp, poll_table *wait)
  2119. {
  2120. struct pmu_private *pp = filp->private_data;
  2121. unsigned int mask = 0;
  2122. unsigned long flags;
  2123. if (pp == 0)
  2124. return 0;
  2125. poll_wait(filp, &pp->wait, wait);
  2126. spin_lock_irqsave(&pp->lock, flags);
  2127. if (pp->rb_get != pp->rb_put)
  2128. mask |= POLLIN;
  2129. spin_unlock_irqrestore(&pp->lock, flags);
  2130. return mask;
  2131. }
  2132. static int
  2133. pmu_release(struct inode *inode, struct file *file)
  2134. {
  2135. struct pmu_private *pp = file->private_data;
  2136. unsigned long flags;
  2137. if (pp != 0) {
  2138. file->private_data = NULL;
  2139. spin_lock_irqsave(&all_pvt_lock, flags);
  2140. list_del(&pp->list);
  2141. spin_unlock_irqrestore(&all_pvt_lock, flags);
  2142. #if defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_PMAC_BACKLIGHT)
  2143. if (pp->backlight_locker)
  2144. pmac_backlight_enable();
  2145. #endif
  2146. kfree(pp);
  2147. }
  2148. return 0;
  2149. }
  2150. static int
  2151. pmu_ioctl(struct inode * inode, struct file *filp,
  2152. u_int cmd, u_long arg)
  2153. {
  2154. __u32 __user *argp = (__u32 __user *)arg;
  2155. int error = -EINVAL;
  2156. switch (cmd) {
  2157. #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_PPC32)
  2158. case PMU_IOC_SLEEP:
  2159. if (!capable(CAP_SYS_ADMIN))
  2160. return -EACCES;
  2161. if (sleep_in_progress)
  2162. return -EBUSY;
  2163. sleep_in_progress = 1;
  2164. switch (pmu_kind) {
  2165. case PMU_OHARE_BASED:
  2166. error = powerbook_sleep_3400();
  2167. break;
  2168. case PMU_HEATHROW_BASED:
  2169. case PMU_PADDINGTON_BASED:
  2170. error = powerbook_sleep_grackle();
  2171. break;
  2172. case PMU_KEYLARGO_BASED:
  2173. error = powerbook_sleep_Core99();
  2174. break;
  2175. default:
  2176. error = -ENOSYS;
  2177. }
  2178. sleep_in_progress = 0;
  2179. break;
  2180. case PMU_IOC_CAN_SLEEP:
  2181. if (pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,-1) < 0)
  2182. return put_user(0, argp);
  2183. else
  2184. return put_user(1, argp);
  2185. #endif /* CONFIG_PM_SLEEP && CONFIG_PPC32 */
  2186. #ifdef CONFIG_PMAC_BACKLIGHT_LEGACY
  2187. /* Compatibility ioctl's for backlight */
  2188. case PMU_IOC_GET_BACKLIGHT:
  2189. {
  2190. int brightness;
  2191. if (sleep_in_progress)
  2192. return -EBUSY;
  2193. brightness = pmac_backlight_get_legacy_brightness();
  2194. if (brightness < 0)
  2195. return brightness;
  2196. else
  2197. return put_user(brightness, argp);
  2198. }
  2199. case PMU_IOC_SET_BACKLIGHT:
  2200. {
  2201. int brightness;
  2202. if (sleep_in_progress)
  2203. return -EBUSY;
  2204. error = get_user(brightness, argp);
  2205. if (error)
  2206. return error;
  2207. return pmac_backlight_set_legacy_brightness(brightness);
  2208. }
  2209. #ifdef CONFIG_INPUT_ADBHID
  2210. case PMU_IOC_GRAB_BACKLIGHT: {
  2211. struct pmu_private *pp = filp->private_data;
  2212. if (pp->backlight_locker)
  2213. return 0;
  2214. pp->backlight_locker = 1;
  2215. pmac_backlight_disable();
  2216. return 0;
  2217. }
  2218. #endif /* CONFIG_INPUT_ADBHID */
  2219. #endif /* CONFIG_PMAC_BACKLIGHT_LEGACY */
  2220. case PMU_IOC_GET_MODEL:
  2221. return put_user(pmu_kind, argp);
  2222. case PMU_IOC_HAS_ADB:
  2223. return put_user(pmu_has_adb, argp);
  2224. }
  2225. return error;
  2226. }
  2227. static const struct file_operations pmu_device_fops = {
  2228. .read = pmu_read,
  2229. .write = pmu_write,
  2230. .poll = pmu_fpoll,
  2231. .ioctl = pmu_ioctl,
  2232. .open = pmu_open,
  2233. .release = pmu_release,
  2234. };
  2235. static struct miscdevice pmu_device = {
  2236. PMU_MINOR, "pmu", &pmu_device_fops
  2237. };
  2238. static int pmu_device_init(void)
  2239. {
  2240. if (!via)
  2241. return 0;
  2242. if (misc_register(&pmu_device) < 0)
  2243. printk(KERN_ERR "via-pmu: cannot register misc device.\n");
  2244. return 0;
  2245. }
  2246. device_initcall(pmu_device_init);
  2247. #ifdef DEBUG_SLEEP
  2248. static inline void
  2249. polled_handshake(volatile unsigned char __iomem *via)
  2250. {
  2251. via[B] &= ~TREQ; eieio();
  2252. while ((via[B] & TACK) != 0)
  2253. ;
  2254. via[B] |= TREQ; eieio();
  2255. while ((via[B] & TACK) == 0)
  2256. ;
  2257. }
  2258. static inline void
  2259. polled_send_byte(volatile unsigned char __iomem *via, int x)
  2260. {
  2261. via[ACR] |= SR_OUT | SR_EXT; eieio();
  2262. via[SR] = x; eieio();
  2263. polled_handshake(via);
  2264. }
  2265. static inline int
  2266. polled_recv_byte(volatile unsigned char __iomem *via)
  2267. {
  2268. int x;
  2269. via[ACR] = (via[ACR] & ~SR_OUT) | SR_EXT; eieio();
  2270. x = via[SR]; eieio();
  2271. polled_handshake(via);
  2272. x = via[SR]; eieio();
  2273. return x;
  2274. }
  2275. int
  2276. pmu_polled_request(struct adb_request *req)
  2277. {
  2278. unsigned long flags;
  2279. int i, l, c;
  2280. volatile unsigned char __iomem *v = via;
  2281. req->complete = 1;
  2282. c = req->data[0];
  2283. l = pmu_data_len[c][0];
  2284. if (l >= 0 && req->nbytes != l + 1)
  2285. return -EINVAL;
  2286. local_irq_save(flags);
  2287. while (pmu_state != idle)
  2288. pmu_poll();
  2289. while ((via[B] & TACK) == 0)
  2290. ;
  2291. polled_send_byte(v, c);
  2292. if (l < 0) {
  2293. l = req->nbytes - 1;
  2294. polled_send_byte(v, l);
  2295. }
  2296. for (i = 1; i <= l; ++i)
  2297. polled_send_byte(v, req->data[i]);
  2298. l = pmu_data_len[c][1];
  2299. if (l < 0)
  2300. l = polled_recv_byte(v);
  2301. for (i = 0; i < l; ++i)
  2302. req->reply[i + req->reply_len] = polled_recv_byte(v);
  2303. if (req->done)
  2304. (*req->done)(req);
  2305. local_irq_restore(flags);
  2306. return 0;
  2307. }
  2308. #endif /* DEBUG_SLEEP */
  2309. /* FIXME: This is a temporary set of callbacks to enable us
  2310. * to do suspend-to-disk.
  2311. */
  2312. #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_PPC32)
  2313. int pmu_sys_suspended;
  2314. static int pmu_sys_suspend(struct sys_device *sysdev, pm_message_t state)
  2315. {
  2316. if (state.event != PM_EVENT_SUSPEND || pmu_sys_suspended)
  2317. return 0;
  2318. /* Suspend PMU event interrupts */
  2319. pmu_suspend();
  2320. pmu_sys_suspended = 1;
  2321. return 0;
  2322. }
  2323. static int pmu_sys_resume(struct sys_device *sysdev)
  2324. {
  2325. struct adb_request req;
  2326. if (!pmu_sys_suspended)
  2327. return 0;
  2328. /* Tell PMU we are ready */
  2329. pmu_request(&req, NULL, 2, PMU_SYSTEM_READY, 2);
  2330. pmu_wait_complete(&req);
  2331. /* Resume PMU event interrupts */
  2332. pmu_resume();
  2333. pmu_sys_suspended = 0;
  2334. return 0;
  2335. }
  2336. #endif /* CONFIG_PM_SLEEP && CONFIG_PPC32 */
  2337. static struct sysdev_class pmu_sysclass = {
  2338. set_kset_name("pmu"),
  2339. };
  2340. static struct sys_device device_pmu = {
  2341. .cls = &pmu_sysclass,
  2342. };
  2343. static struct sysdev_driver driver_pmu = {
  2344. #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_PPC32)
  2345. .suspend = &pmu_sys_suspend,
  2346. .resume = &pmu_sys_resume,
  2347. #endif /* CONFIG_PM_SLEEP && CONFIG_PPC32 */
  2348. };
  2349. static int __init init_pmu_sysfs(void)
  2350. {
  2351. int rc;
  2352. rc = sysdev_class_register(&pmu_sysclass);
  2353. if (rc) {
  2354. printk(KERN_ERR "Failed registering PMU sys class\n");
  2355. return -ENODEV;
  2356. }
  2357. rc = sysdev_register(&device_pmu);
  2358. if (rc) {
  2359. printk(KERN_ERR "Failed registering PMU sys device\n");
  2360. return -ENODEV;
  2361. }
  2362. rc = sysdev_driver_register(&pmu_sysclass, &driver_pmu);
  2363. if (rc) {
  2364. printk(KERN_ERR "Failed registering PMU sys driver\n");
  2365. return -ENODEV;
  2366. }
  2367. return 0;
  2368. }
  2369. subsys_initcall(init_pmu_sysfs);
  2370. EXPORT_SYMBOL(pmu_request);
  2371. EXPORT_SYMBOL(pmu_queue_request);
  2372. EXPORT_SYMBOL(pmu_poll);
  2373. EXPORT_SYMBOL(pmu_poll_adb);
  2374. EXPORT_SYMBOL(pmu_wait_complete);
  2375. EXPORT_SYMBOL(pmu_suspend);
  2376. EXPORT_SYMBOL(pmu_resume);
  2377. EXPORT_SYMBOL(pmu_unlock);
  2378. #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_PPC32)
  2379. EXPORT_SYMBOL(pmu_enable_irled);
  2380. EXPORT_SYMBOL(pmu_battery_count);
  2381. EXPORT_SYMBOL(pmu_batteries);
  2382. EXPORT_SYMBOL(pmu_power_flags);
  2383. #endif /* CONFIG_PM_SLEEP && CONFIG_PPC32 */