iop.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323
  1. /*
  2. * Functions to handle I2O controllers and I2O message handling
  3. *
  4. * Copyright (C) 1999-2002 Red Hat Software
  5. *
  6. * Written by Alan Cox, Building Number Three Ltd
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the
  10. * Free Software Foundation; either version 2 of the License, or (at your
  11. * option) any later version.
  12. *
  13. * A lot of the I2O message side code from this is taken from the
  14. * Red Creek RCPCI45 adapter driver by Red Creek Communications
  15. *
  16. * Fixes/additions:
  17. * Philipp Rumpf
  18. * Juha Sievänen <Juha.Sievanen@cs.Helsinki.FI>
  19. * Auvo Häkkinen <Auvo.Hakkinen@cs.Helsinki.FI>
  20. * Deepak Saxena <deepak@plexity.net>
  21. * Boji T Kannanthanam <boji.t.kannanthanam@intel.com>
  22. * Alan Cox <alan@redhat.com>:
  23. * Ported to Linux 2.5.
  24. * Markus Lidel <Markus.Lidel@shadowconnect.com>:
  25. * Minor fixes for 2.6.
  26. */
  27. #include <linux/module.h>
  28. #include <linux/i2o.h>
  29. #include <linux/delay.h>
  30. #define OSM_VERSION "$Rev$"
  31. #define OSM_DESCRIPTION "I2O subsystem"
  32. /* global I2O controller list */
  33. LIST_HEAD(i2o_controllers);
  34. /*
  35. * global I2O System Table. Contains information about all the IOPs in the
  36. * system. Used to inform IOPs about each others existence.
  37. */
  38. static struct i2o_dma i2o_systab;
  39. static int i2o_hrt_get(struct i2o_controller *c);
  40. /* Module internal functions from other sources */
  41. extern struct i2o_driver i2o_exec_driver;
  42. extern int i2o_exec_lct_get(struct i2o_controller *);
  43. extern void i2o_device_remove(struct i2o_device *);
  44. extern int __init i2o_driver_init(void);
  45. extern void __exit i2o_driver_exit(void);
  46. extern int __init i2o_exec_init(void);
  47. extern void __exit i2o_exec_exit(void);
  48. extern int __init i2o_pci_init(void);
  49. extern void __exit i2o_pci_exit(void);
  50. extern int i2o_device_init(void);
  51. extern void i2o_device_exit(void);
  52. /**
  53. * i2o_msg_nop - Returns a message which is not used
  54. * @c: I2O controller from which the message was created
  55. * @m: message which should be returned
  56. *
  57. * If you fetch a message via i2o_msg_get, and can't use it, you must
  58. * return the message with this function. Otherwise the message frame
  59. * is lost.
  60. */
  61. void i2o_msg_nop(struct i2o_controller *c, u32 m)
  62. {
  63. struct i2o_message __iomem *msg = i2o_msg_in_to_virt(c, m);
  64. writel(THREE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
  65. writel(I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | ADAPTER_TID,
  66. &msg->u.head[1]);
  67. writel(0, &msg->u.head[2]);
  68. writel(0, &msg->u.head[3]);
  69. i2o_msg_post(c, m);
  70. };
  71. /**
  72. * i2o_msg_get_wait - obtain an I2O message from the IOP
  73. * @c: I2O controller
  74. * @msg: pointer to a I2O message pointer
  75. * @wait: how long to wait until timeout
  76. *
  77. * This function waits up to wait seconds for a message slot to be
  78. * available.
  79. *
  80. * On a success the message is returned and the pointer to the message is
  81. * set in msg. The returned message is the physical page frame offset
  82. * address from the read port (see the i2o spec). If no message is
  83. * available returns I2O_QUEUE_EMPTY and msg is leaved untouched.
  84. */
  85. u32 i2o_msg_get_wait(struct i2o_controller *c, struct i2o_message __iomem **msg,
  86. int wait)
  87. {
  88. unsigned long timeout = jiffies + wait * HZ;
  89. u32 m;
  90. while ((m = i2o_msg_get(c, msg)) == I2O_QUEUE_EMPTY) {
  91. if (time_after(jiffies, timeout)) {
  92. pr_debug("%s: Timeout waiting for message frame.\n",
  93. c->name);
  94. return I2O_QUEUE_EMPTY;
  95. }
  96. set_current_state(TASK_UNINTERRUPTIBLE);
  97. schedule_timeout(1);
  98. }
  99. return m;
  100. };
  101. #if BITS_PER_LONG == 64
  102. /**
  103. * i2o_cntxt_list_add - Append a pointer to context list and return a id
  104. * @c: controller to which the context list belong
  105. * @ptr: pointer to add to the context list
  106. *
  107. * Because the context field in I2O is only 32-bit large, on 64-bit the
  108. * pointer is to large to fit in the context field. The i2o_cntxt_list
  109. * functions therefore map pointers to context fields.
  110. *
  111. * Returns context id > 0 on success or 0 on failure.
  112. */
  113. u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
  114. {
  115. struct i2o_context_list_element *entry;
  116. unsigned long flags;
  117. if (!ptr)
  118. printk(KERN_ERR "%s: couldn't add NULL pointer to context list!"
  119. "\n", c->name);
  120. entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
  121. if (!entry) {
  122. printk(KERN_ERR "%s: Could not allocate memory for context "
  123. "list element\n", c->name);
  124. return 0;
  125. }
  126. entry->ptr = ptr;
  127. entry->timestamp = jiffies;
  128. INIT_LIST_HEAD(&entry->list);
  129. spin_lock_irqsave(&c->context_list_lock, flags);
  130. if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
  131. atomic_inc(&c->context_list_counter);
  132. entry->context = atomic_read(&c->context_list_counter);
  133. list_add(&entry->list, &c->context_list);
  134. spin_unlock_irqrestore(&c->context_list_lock, flags);
  135. pr_debug("%s: Add context to list %p -> %d\n", c->name, ptr, context);
  136. return entry->context;
  137. };
  138. /**
  139. * i2o_cntxt_list_remove - Remove a pointer from the context list
  140. * @c: controller to which the context list belong
  141. * @ptr: pointer which should be removed from the context list
  142. *
  143. * Removes a previously added pointer from the context list and returns
  144. * the matching context id.
  145. *
  146. * Returns context id on succes or 0 on failure.
  147. */
  148. u32 i2o_cntxt_list_remove(struct i2o_controller * c, void *ptr)
  149. {
  150. struct i2o_context_list_element *entry;
  151. u32 context = 0;
  152. unsigned long flags;
  153. spin_lock_irqsave(&c->context_list_lock, flags);
  154. list_for_each_entry(entry, &c->context_list, list)
  155. if (entry->ptr == ptr) {
  156. list_del(&entry->list);
  157. context = entry->context;
  158. kfree(entry);
  159. break;
  160. }
  161. spin_unlock_irqrestore(&c->context_list_lock, flags);
  162. if (!context)
  163. printk(KERN_WARNING "%s: Could not remove nonexistent ptr "
  164. "%p\n", c->name, ptr);
  165. pr_debug("%s: remove ptr from context list %d -> %p\n", c->name,
  166. context, ptr);
  167. return context;
  168. };
  169. /**
  170. * i2o_cntxt_list_get - Get a pointer from the context list and remove it
  171. * @c: controller to which the context list belong
  172. * @context: context id to which the pointer belong
  173. *
  174. * Returns pointer to the matching context id on success or NULL on
  175. * failure.
  176. */
  177. void *i2o_cntxt_list_get(struct i2o_controller *c, u32 context)
  178. {
  179. struct i2o_context_list_element *entry;
  180. unsigned long flags;
  181. void *ptr = NULL;
  182. spin_lock_irqsave(&c->context_list_lock, flags);
  183. list_for_each_entry(entry, &c->context_list, list)
  184. if (entry->context == context) {
  185. list_del(&entry->list);
  186. ptr = entry->ptr;
  187. kfree(entry);
  188. break;
  189. }
  190. spin_unlock_irqrestore(&c->context_list_lock, flags);
  191. if (!ptr)
  192. printk(KERN_WARNING "%s: context id %d not found\n", c->name,
  193. context);
  194. pr_debug("%s: get ptr from context list %d -> %p\n", c->name, context,
  195. ptr);
  196. return ptr;
  197. };
  198. /**
  199. * i2o_cntxt_list_get_ptr - Get a context id from the context list
  200. * @c: controller to which the context list belong
  201. * @ptr: pointer to which the context id should be fetched
  202. *
  203. * Returns context id which matches to the pointer on succes or 0 on
  204. * failure.
  205. */
  206. u32 i2o_cntxt_list_get_ptr(struct i2o_controller * c, void *ptr)
  207. {
  208. struct i2o_context_list_element *entry;
  209. u32 context = 0;
  210. unsigned long flags;
  211. spin_lock_irqsave(&c->context_list_lock, flags);
  212. list_for_each_entry(entry, &c->context_list, list)
  213. if (entry->ptr == ptr) {
  214. context = entry->context;
  215. break;
  216. }
  217. spin_unlock_irqrestore(&c->context_list_lock, flags);
  218. if (!context)
  219. printk(KERN_WARNING "%s: Could not find nonexistent ptr "
  220. "%p\n", c->name, ptr);
  221. pr_debug("%s: get context id from context list %p -> %d\n", c->name,
  222. ptr, context);
  223. return context;
  224. };
  225. #endif
  226. /**
  227. * i2o_iop_find - Find an I2O controller by id
  228. * @unit: unit number of the I2O controller to search for
  229. *
  230. * Lookup the I2O controller on the controller list.
  231. *
  232. * Returns pointer to the I2O controller on success or NULL if not found.
  233. */
  234. struct i2o_controller *i2o_find_iop(int unit)
  235. {
  236. struct i2o_controller *c;
  237. list_for_each_entry(c, &i2o_controllers, list) {
  238. if (c->unit == unit)
  239. return c;
  240. }
  241. return NULL;
  242. };
  243. /**
  244. * i2o_iop_find_device - Find a I2O device on an I2O controller
  245. * @c: I2O controller where the I2O device hangs on
  246. * @tid: TID of the I2O device to search for
  247. *
  248. * Searches the devices of the I2O controller for a device with TID tid and
  249. * returns it.
  250. *
  251. * Returns a pointer to the I2O device if found, otherwise NULL.
  252. */
  253. struct i2o_device *i2o_iop_find_device(struct i2o_controller *c, u16 tid)
  254. {
  255. struct i2o_device *dev;
  256. list_for_each_entry(dev, &c->devices, list)
  257. if (dev->lct_data.tid == tid)
  258. return dev;
  259. return NULL;
  260. };
  261. /**
  262. * i2o_quiesce_controller - quiesce controller
  263. * @c: controller
  264. *
  265. * Quiesce an IOP. Causes IOP to make external operation quiescent
  266. * (i2o 'READY' state). Internal operation of the IOP continues normally.
  267. *
  268. * Returns 0 on success or negative error code on failure.
  269. */
  270. static int i2o_iop_quiesce(struct i2o_controller *c)
  271. {
  272. struct i2o_message __iomem *msg;
  273. u32 m;
  274. i2o_status_block *sb = c->status_block.virt;
  275. int rc;
  276. i2o_status_get(c);
  277. /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
  278. if ((sb->iop_state != ADAPTER_STATE_READY) &&
  279. (sb->iop_state != ADAPTER_STATE_OPERATIONAL))
  280. return 0;
  281. m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
  282. if (m == I2O_QUEUE_EMPTY)
  283. return -ETIMEDOUT;
  284. writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
  285. writel(I2O_CMD_SYS_QUIESCE << 24 | HOST_TID << 12 | ADAPTER_TID,
  286. &msg->u.head[1]);
  287. /* Long timeout needed for quiesce if lots of devices */
  288. if ((rc = i2o_msg_post_wait(c, m, 240)))
  289. printk(KERN_INFO "%s: Unable to quiesce (status=%#x).\n",
  290. c->name, -rc);
  291. else
  292. pr_debug("%s: Quiesced.\n", c->name);
  293. i2o_status_get(c); // Entered READY state
  294. return rc;
  295. };
  296. /**
  297. * i2o_iop_enable - move controller from ready to OPERATIONAL
  298. * @c: I2O controller
  299. *
  300. * Enable IOP. This allows the IOP to resume external operations and
  301. * reverses the effect of a quiesce. Returns zero or an error code if
  302. * an error occurs.
  303. */
  304. static int i2o_iop_enable(struct i2o_controller *c)
  305. {
  306. struct i2o_message __iomem *msg;
  307. u32 m;
  308. i2o_status_block *sb = c->status_block.virt;
  309. int rc;
  310. i2o_status_get(c);
  311. /* Enable only allowed on READY state */
  312. if (sb->iop_state != ADAPTER_STATE_READY)
  313. return -EINVAL;
  314. m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
  315. if (m == I2O_QUEUE_EMPTY)
  316. return -ETIMEDOUT;
  317. writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
  318. writel(I2O_CMD_SYS_ENABLE << 24 | HOST_TID << 12 | ADAPTER_TID,
  319. &msg->u.head[1]);
  320. /* How long of a timeout do we need? */
  321. if ((rc = i2o_msg_post_wait(c, m, 240)))
  322. printk(KERN_ERR "%s: Could not enable (status=%#x).\n",
  323. c->name, -rc);
  324. else
  325. pr_debug("%s: Enabled.\n", c->name);
  326. i2o_status_get(c); // entered OPERATIONAL state
  327. return rc;
  328. };
  329. /**
  330. * i2o_iop_quiesce_all - Quiesce all I2O controllers on the system
  331. *
  332. * Quiesce all I2O controllers which are connected to the system.
  333. */
  334. static inline void i2o_iop_quiesce_all(void)
  335. {
  336. struct i2o_controller *c, *tmp;
  337. list_for_each_entry_safe(c, tmp, &i2o_controllers, list) {
  338. if (!c->no_quiesce)
  339. i2o_iop_quiesce(c);
  340. }
  341. };
  342. /**
  343. * i2o_iop_enable_all - Enables all controllers on the system
  344. *
  345. * Enables all I2O controllers which are connected to the system.
  346. */
  347. static inline void i2o_iop_enable_all(void)
  348. {
  349. struct i2o_controller *c, *tmp;
  350. list_for_each_entry_safe(c, tmp, &i2o_controllers, list)
  351. i2o_iop_enable(c);
  352. };
  353. /**
  354. * i2o_clear_controller - Bring I2O controller into HOLD state
  355. * @c: controller
  356. *
  357. * Clear an IOP to HOLD state, ie. terminate external operations, clear all
  358. * input queues and prepare for a system restart. IOP's internal operation
  359. * continues normally and the outbound queue is alive. The IOP is not
  360. * expected to rebuild its LCT.
  361. *
  362. * Returns 0 on success or negative error code on failure.
  363. */
  364. static int i2o_iop_clear(struct i2o_controller *c)
  365. {
  366. struct i2o_message __iomem *msg;
  367. u32 m;
  368. int rc;
  369. m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
  370. if (m == I2O_QUEUE_EMPTY)
  371. return -ETIMEDOUT;
  372. /* Quiesce all IOPs first */
  373. i2o_iop_quiesce_all();
  374. writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
  375. writel(I2O_CMD_ADAPTER_CLEAR << 24 | HOST_TID << 12 | ADAPTER_TID,
  376. &msg->u.head[1]);
  377. if ((rc = i2o_msg_post_wait(c, m, 30)))
  378. printk(KERN_INFO "%s: Unable to clear (status=%#x).\n",
  379. c->name, -rc);
  380. else
  381. pr_debug("%s: Cleared.\n", c->name);
  382. /* Enable all IOPs */
  383. i2o_iop_enable_all();
  384. return rc;
  385. }
  386. /**
  387. * i2o_iop_init_outbound_queue - setup the outbound message queue
  388. * @c: I2O controller
  389. *
  390. * Clear and (re)initialize IOP's outbound queue and post the message
  391. * frames to the IOP.
  392. *
  393. * Returns 0 on success or a negative errno code on failure.
  394. */
  395. static int i2o_iop_init_outbound_queue(struct i2o_controller *c)
  396. {
  397. u8 *status = c->status.virt;
  398. u32 m;
  399. struct i2o_message __iomem *msg;
  400. ulong timeout;
  401. int i;
  402. osm_debug("%s: Initializing Outbound Queue...\n", c->name);
  403. memset(status, 0, 4);
  404. m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
  405. if (m == I2O_QUEUE_EMPTY)
  406. return -ETIMEDOUT;
  407. writel(EIGHT_WORD_MSG_SIZE | TRL_OFFSET_6, &msg->u.head[0]);
  408. writel(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 | ADAPTER_TID,
  409. &msg->u.head[1]);
  410. writel(i2o_exec_driver.context, &msg->u.s.icntxt);
  411. writel(0x0106, &msg->u.s.tcntxt); /* FIXME: why 0x0106, maybe in
  412. Spec? */
  413. writel(PAGE_SIZE, &msg->body[0]);
  414. /* Outbound msg frame size in words and Initcode */
  415. writel(MSG_FRAME_SIZE << 16 | 0x80, &msg->body[1]);
  416. writel(0xd0000004, &msg->body[2]);
  417. writel(i2o_dma_low(c->status.phys), &msg->body[3]);
  418. writel(i2o_dma_high(c->status.phys), &msg->body[4]);
  419. i2o_msg_post(c, m);
  420. timeout = jiffies + I2O_TIMEOUT_INIT_OUTBOUND_QUEUE * HZ;
  421. while (*status <= I2O_CMD_IN_PROGRESS) {
  422. if (time_after(jiffies, timeout)) {
  423. osm_warn("%s: Timeout Initializing\n", c->name);
  424. return -ETIMEDOUT;
  425. }
  426. set_current_state(TASK_UNINTERRUPTIBLE);
  427. schedule_timeout(1);
  428. rmb();
  429. }
  430. m = c->out_queue.phys;
  431. /* Post frames */
  432. for (i = 0; i < NMBR_MSG_FRAMES; i++) {
  433. i2o_flush_reply(c, m);
  434. udelay(1); /* Promise */
  435. m += MSG_FRAME_SIZE * 4;
  436. }
  437. return 0;
  438. }
  439. /**
  440. * i2o_iop_reset - reset an I2O controller
  441. * @c: controller to reset
  442. *
  443. * Reset the IOP into INIT state and wait until IOP gets into RESET state.
  444. * Terminate all external operations, clear IOP's inbound and outbound
  445. * queues, terminate all DDMs, and reload the IOP's operating environment
  446. * and all local DDMs. The IOP rebuilds its LCT.
  447. */
  448. static int i2o_iop_reset(struct i2o_controller *c)
  449. {
  450. u8 *status = c->status.virt;
  451. struct i2o_message __iomem *msg;
  452. u32 m;
  453. unsigned long timeout;
  454. i2o_status_block *sb = c->status_block.virt;
  455. int rc = 0;
  456. pr_debug("%s: Resetting controller\n", c->name);
  457. m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
  458. if (m == I2O_QUEUE_EMPTY)
  459. return -ETIMEDOUT;
  460. memset(status, 0, 8);
  461. /* Quiesce all IOPs first */
  462. i2o_iop_quiesce_all();
  463. writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
  464. writel(I2O_CMD_ADAPTER_RESET << 24 | HOST_TID << 12 | ADAPTER_TID,
  465. &msg->u.head[1]);
  466. writel(i2o_exec_driver.context, &msg->u.s.icntxt);
  467. writel(0, &msg->u.s.tcntxt); //FIXME: use reasonable transaction context
  468. writel(0, &msg->body[0]);
  469. writel(0, &msg->body[1]);
  470. writel(i2o_dma_low(c->status.phys), &msg->body[2]);
  471. writel(i2o_dma_high(c->status.phys), &msg->body[3]);
  472. i2o_msg_post(c, m);
  473. /* Wait for a reply */
  474. timeout = jiffies + I2O_TIMEOUT_RESET * HZ;
  475. while (!*status) {
  476. if (time_after(jiffies, timeout))
  477. break;
  478. set_current_state(TASK_UNINTERRUPTIBLE);
  479. schedule_timeout(1);
  480. rmb();
  481. }
  482. switch (*status) {
  483. case I2O_CMD_REJECTED:
  484. osm_warn("%s: IOP reset rejected\n", c->name);
  485. rc = -EPERM;
  486. break;
  487. case I2O_CMD_IN_PROGRESS:
  488. /*
  489. * Once the reset is sent, the IOP goes into the INIT state
  490. * which is indeterminate. We need to wait until the IOP has
  491. * rebooted before we can let the system talk to it. We read
  492. * the inbound Free_List until a message is available. If we
  493. * can't read one in the given ammount of time, we assume the
  494. * IOP could not reboot properly.
  495. */
  496. pr_debug("%s: Reset in progress, waiting for reboot...\n",
  497. c->name);
  498. m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET);
  499. while (m == I2O_QUEUE_EMPTY) {
  500. if (time_after(jiffies, timeout)) {
  501. printk(KERN_ERR "%s: IOP reset timeout.\n",
  502. c->name);
  503. rc = -ETIMEDOUT;
  504. goto exit;
  505. }
  506. set_current_state(TASK_UNINTERRUPTIBLE);
  507. schedule_timeout(1);
  508. m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET);
  509. }
  510. i2o_msg_nop(c, m);
  511. /* from here all quiesce commands are safe */
  512. c->no_quiesce = 0;
  513. /* verify if controller is in state RESET */
  514. i2o_status_get(c);
  515. if (!c->promise && (sb->iop_state != ADAPTER_STATE_RESET))
  516. osm_warn("%s: reset completed, but adapter not in RESET"
  517. " state.\n", c->name);
  518. else
  519. osm_debug("%s: reset completed.\n", c->name);
  520. break;
  521. default:
  522. osm_err("%s: IOP reset timeout.\n", c->name);
  523. rc = -ETIMEDOUT;
  524. break;
  525. }
  526. exit:
  527. /* Enable all IOPs */
  528. i2o_iop_enable_all();
  529. return rc;
  530. };
  531. /**
  532. * i2o_iop_activate - Bring controller up to HOLD
  533. * @c: controller
  534. *
  535. * This function brings an I2O controller into HOLD state. The adapter
  536. * is reset if necessary and then the queues and resource table are read.
  537. *
  538. * Returns 0 on success or negative error code on failure.
  539. */
  540. static int i2o_iop_activate(struct i2o_controller *c)
  541. {
  542. i2o_status_block *sb = c->status_block.virt;
  543. int rc;
  544. int state;
  545. /* In INIT state, Wait Inbound Q to initialize (in i2o_status_get) */
  546. /* In READY state, Get status */
  547. rc = i2o_status_get(c);
  548. if (rc) {
  549. printk(KERN_INFO "%s: Unable to obtain status, "
  550. "attempting a reset.\n", c->name);
  551. rc = i2o_iop_reset(c);
  552. if (rc)
  553. return rc;
  554. }
  555. if (sb->i2o_version > I2OVER15) {
  556. printk(KERN_ERR "%s: Not running version 1.5 of the I2O "
  557. "Specification.\n", c->name);
  558. return -ENODEV;
  559. }
  560. switch (sb->iop_state) {
  561. case ADAPTER_STATE_FAULTED:
  562. printk(KERN_CRIT "%s: hardware fault\n", c->name);
  563. return -EFAULT;
  564. case ADAPTER_STATE_READY:
  565. case ADAPTER_STATE_OPERATIONAL:
  566. case ADAPTER_STATE_HOLD:
  567. case ADAPTER_STATE_FAILED:
  568. pr_debug("%s: already running, trying to reset...\n", c->name);
  569. rc = i2o_iop_reset(c);
  570. if (rc)
  571. return rc;
  572. }
  573. /* preserve state */
  574. state = sb->iop_state;
  575. rc = i2o_iop_init_outbound_queue(c);
  576. if (rc)
  577. return rc;
  578. /* if adapter was not in RESET state clear now */
  579. if (state != ADAPTER_STATE_RESET)
  580. i2o_iop_clear(c);
  581. i2o_status_get(c);
  582. if (sb->iop_state != ADAPTER_STATE_HOLD) {
  583. osm_err("%s: failed to bring IOP into HOLD state\n", c->name);
  584. return -EIO;
  585. }
  586. return i2o_hrt_get(c);
  587. };
  588. /**
  589. * i2o_iop_systab_set - Set the I2O System Table of the specified IOP
  590. * @c: I2O controller to which the system table should be send
  591. *
  592. * Before the systab could be set i2o_systab_build() must be called.
  593. *
  594. * Returns 0 on success or negative error code on failure.
  595. */
  596. static int i2o_iop_systab_set(struct i2o_controller *c)
  597. {
  598. struct i2o_message __iomem *msg;
  599. u32 m;
  600. i2o_status_block *sb = c->status_block.virt;
  601. struct device *dev = &c->pdev->dev;
  602. struct resource *root;
  603. int rc;
  604. if (sb->current_mem_size < sb->desired_mem_size) {
  605. struct resource *res = &c->mem_resource;
  606. res->name = c->pdev->bus->name;
  607. res->flags = IORESOURCE_MEM;
  608. res->start = 0;
  609. res->end = 0;
  610. printk(KERN_INFO "%s: requires private memory resources.\n",
  611. c->name);
  612. root = pci_find_parent_resource(c->pdev, res);
  613. if (root == NULL)
  614. printk(KERN_WARNING "%s: Can't find parent resource!\n",
  615. c->name);
  616. if (root && allocate_resource(root, res, sb->desired_mem_size, sb->desired_mem_size, sb->desired_mem_size, 1 << 20, /* Unspecified, so use 1Mb and play safe */
  617. NULL, NULL) >= 0) {
  618. c->mem_alloc = 1;
  619. sb->current_mem_size = 1 + res->end - res->start;
  620. sb->current_mem_base = res->start;
  621. printk(KERN_INFO "%s: allocated %ld bytes of PCI memory"
  622. " at 0x%08lX.\n", c->name,
  623. 1 + res->end - res->start, res->start);
  624. }
  625. }
  626. if (sb->current_io_size < sb->desired_io_size) {
  627. struct resource *res = &c->io_resource;
  628. res->name = c->pdev->bus->name;
  629. res->flags = IORESOURCE_IO;
  630. res->start = 0;
  631. res->end = 0;
  632. printk(KERN_INFO "%s: requires private memory resources.\n",
  633. c->name);
  634. root = pci_find_parent_resource(c->pdev, res);
  635. if (root == NULL)
  636. printk(KERN_WARNING "%s: Can't find parent resource!\n",
  637. c->name);
  638. if (root && allocate_resource(root, res, sb->desired_io_size, sb->desired_io_size, sb->desired_io_size, 1 << 20, /* Unspecified, so use 1Mb and play safe */
  639. NULL, NULL) >= 0) {
  640. c->io_alloc = 1;
  641. sb->current_io_size = 1 + res->end - res->start;
  642. sb->current_mem_base = res->start;
  643. printk(KERN_INFO "%s: allocated %ld bytes of PCI I/O at"
  644. " 0x%08lX.\n", c->name,
  645. 1 + res->end - res->start, res->start);
  646. }
  647. }
  648. m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
  649. if (m == I2O_QUEUE_EMPTY)
  650. return -ETIMEDOUT;
  651. i2o_systab.phys = dma_map_single(dev, i2o_systab.virt, i2o_systab.len,
  652. PCI_DMA_TODEVICE);
  653. if (!i2o_systab.phys) {
  654. i2o_msg_nop(c, m);
  655. return -ENOMEM;
  656. }
  657. writel(I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6, &msg->u.head[0]);
  658. writel(I2O_CMD_SYS_TAB_SET << 24 | HOST_TID << 12 | ADAPTER_TID,
  659. &msg->u.head[1]);
  660. /*
  661. * Provide three SGL-elements:
  662. * System table (SysTab), Private memory space declaration and
  663. * Private i/o space declaration
  664. *
  665. * FIXME: is this still true?
  666. * Nasty one here. We can't use dma_alloc_coherent to send the
  667. * same table to everyone. We have to go remap it for them all
  668. */
  669. writel(c->unit + 2, &msg->body[0]);
  670. writel(0, &msg->body[1]);
  671. writel(0x54000000 | i2o_systab.len, &msg->body[2]);
  672. writel(i2o_systab.phys, &msg->body[3]);
  673. writel(0x54000000 | sb->current_mem_size, &msg->body[4]);
  674. writel(sb->current_mem_base, &msg->body[5]);
  675. writel(0xd4000000 | sb->current_io_size, &msg->body[6]);
  676. writel(sb->current_io_base, &msg->body[6]);
  677. rc = i2o_msg_post_wait(c, m, 120);
  678. dma_unmap_single(dev, i2o_systab.phys, i2o_systab.len,
  679. PCI_DMA_TODEVICE);
  680. if (rc < 0)
  681. printk(KERN_ERR "%s: Unable to set SysTab (status=%#x).\n",
  682. c->name, -rc);
  683. else
  684. pr_debug("%s: SysTab set.\n", c->name);
  685. i2o_status_get(c); // Entered READY state
  686. return rc;
  687. }
  688. /**
  689. * i2o_iop_online - Bring a controller online into OPERATIONAL state.
  690. * @c: I2O controller
  691. *
  692. * Send the system table and enable the I2O controller.
  693. *
  694. * Returns 0 on success or negativer error code on failure.
  695. */
  696. static int i2o_iop_online(struct i2o_controller *c)
  697. {
  698. int rc;
  699. rc = i2o_iop_systab_set(c);
  700. if (rc)
  701. return rc;
  702. /* In READY state */
  703. pr_debug("%s: Attempting to enable...\n", c->name);
  704. rc = i2o_iop_enable(c);
  705. if (rc)
  706. return rc;
  707. return 0;
  708. };
  709. /**
  710. * i2o_iop_remove - Remove the I2O controller from the I2O core
  711. * @c: I2O controller
  712. *
  713. * Remove the I2O controller from the I2O core. If devices are attached to
  714. * the controller remove these also and finally reset the controller.
  715. */
  716. void i2o_iop_remove(struct i2o_controller *c)
  717. {
  718. struct i2o_device *dev, *tmp;
  719. pr_debug("%s: deleting controller\n", c->name);
  720. i2o_driver_notify_controller_remove_all(c);
  721. list_del(&c->list);
  722. list_for_each_entry_safe(dev, tmp, &c->devices, list)
  723. i2o_device_remove(dev);
  724. device_del(&c->device);
  725. /* Ask the IOP to switch to RESET state */
  726. i2o_iop_reset(c);
  727. put_device(&c->device);
  728. }
  729. /**
  730. * i2o_systab_build - Build system table
  731. *
  732. * The system table contains information about all the IOPs in the system
  733. * (duh) and is used by the Executives on the IOPs to establish peer2peer
  734. * connections. We're not supporting peer2peer at the moment, but this
  735. * will be needed down the road for things like lan2lan forwarding.
  736. *
  737. * Returns 0 on success or negative error code on failure.
  738. */
  739. static int i2o_systab_build(void)
  740. {
  741. struct i2o_controller *c, *tmp;
  742. int num_controllers = 0;
  743. u32 change_ind = 0;
  744. int count = 0;
  745. struct i2o_sys_tbl *systab = i2o_systab.virt;
  746. list_for_each_entry_safe(c, tmp, &i2o_controllers, list)
  747. num_controllers++;
  748. if (systab) {
  749. change_ind = systab->change_ind;
  750. kfree(i2o_systab.virt);
  751. }
  752. /* Header + IOPs */
  753. i2o_systab.len = sizeof(struct i2o_sys_tbl) + num_controllers *
  754. sizeof(struct i2o_sys_tbl_entry);
  755. systab = i2o_systab.virt = kmalloc(i2o_systab.len, GFP_KERNEL);
  756. if (!systab) {
  757. printk(KERN_ERR "i2o: unable to allocate memory for System "
  758. "Table\n");
  759. return -ENOMEM;
  760. }
  761. memset(systab, 0, i2o_systab.len);
  762. systab->version = I2OVERSION;
  763. systab->change_ind = change_ind + 1;
  764. list_for_each_entry_safe(c, tmp, &i2o_controllers, list) {
  765. i2o_status_block *sb;
  766. if (count >= num_controllers) {
  767. printk(KERN_ERR "i2o: controller added while building "
  768. "system table\n");
  769. break;
  770. }
  771. sb = c->status_block.virt;
  772. /*
  773. * Get updated IOP state so we have the latest information
  774. *
  775. * We should delete the controller at this point if it
  776. * doesn't respond since if it's not on the system table
  777. * it is techninically not part of the I2O subsystem...
  778. */
  779. if (unlikely(i2o_status_get(c))) {
  780. printk(KERN_ERR "%s: Deleting b/c could not get status"
  781. " while attempting to build system table\n",
  782. c->name);
  783. i2o_iop_remove(c);
  784. continue; // try the next one
  785. }
  786. systab->iops[count].org_id = sb->org_id;
  787. systab->iops[count].iop_id = c->unit + 2;
  788. systab->iops[count].seg_num = 0;
  789. systab->iops[count].i2o_version = sb->i2o_version;
  790. systab->iops[count].iop_state = sb->iop_state;
  791. systab->iops[count].msg_type = sb->msg_type;
  792. systab->iops[count].frame_size = sb->inbound_frame_size;
  793. systab->iops[count].last_changed = change_ind;
  794. systab->iops[count].iop_capabilities = sb->iop_capabilities;
  795. systab->iops[count].inbound_low =
  796. i2o_dma_low(c->base.phys + I2O_IN_PORT);
  797. systab->iops[count].inbound_high =
  798. i2o_dma_high(c->base.phys + I2O_IN_PORT);
  799. count++;
  800. }
  801. systab->num_entries = count;
  802. return 0;
  803. };
  804. /**
  805. * i2o_parse_hrt - Parse the hardware resource table.
  806. * @c: I2O controller
  807. *
  808. * We don't do anything with it except dumping it (in debug mode).
  809. *
  810. * Returns 0.
  811. */
  812. static int i2o_parse_hrt(struct i2o_controller *c)
  813. {
  814. i2o_dump_hrt(c);
  815. return 0;
  816. };
  817. /**
  818. * i2o_status_get - Get the status block from the I2O controller
  819. * @c: I2O controller
  820. *
  821. * Issue a status query on the controller. This updates the attached
  822. * status block. The status block could then be accessed through
  823. * c->status_block.
  824. *
  825. * Returns 0 on sucess or negative error code on failure.
  826. */
  827. int i2o_status_get(struct i2o_controller *c)
  828. {
  829. struct i2o_message __iomem *msg;
  830. u32 m;
  831. u8 *status_block;
  832. unsigned long timeout;
  833. status_block = (u8 *) c->status_block.virt;
  834. memset(status_block, 0, sizeof(i2o_status_block));
  835. m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
  836. if (m == I2O_QUEUE_EMPTY)
  837. return -ETIMEDOUT;
  838. writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
  839. writel(I2O_CMD_STATUS_GET << 24 | HOST_TID << 12 | ADAPTER_TID,
  840. &msg->u.head[1]);
  841. writel(i2o_exec_driver.context, &msg->u.s.icntxt);
  842. writel(0, &msg->u.s.tcntxt); // FIXME: use resonable transaction context
  843. writel(0, &msg->body[0]);
  844. writel(0, &msg->body[1]);
  845. writel(i2o_dma_low(c->status_block.phys), &msg->body[2]);
  846. writel(i2o_dma_high(c->status_block.phys), &msg->body[3]);
  847. writel(sizeof(i2o_status_block), &msg->body[4]); /* always 88 bytes */
  848. i2o_msg_post(c, m);
  849. /* Wait for a reply */
  850. timeout = jiffies + I2O_TIMEOUT_STATUS_GET * HZ;
  851. while (status_block[87] != 0xFF) {
  852. if (time_after(jiffies, timeout)) {
  853. printk(KERN_ERR "%s: Get status timeout.\n", c->name);
  854. return -ETIMEDOUT;
  855. }
  856. set_current_state(TASK_UNINTERRUPTIBLE);
  857. schedule_timeout(1);
  858. rmb();
  859. }
  860. #ifdef DEBUG
  861. i2o_debug_state(c);
  862. #endif
  863. return 0;
  864. }
  865. /*
  866. * i2o_hrt_get - Get the Hardware Resource Table from the I2O controller
  867. * @c: I2O controller from which the HRT should be fetched
  868. *
  869. * The HRT contains information about possible hidden devices but is
  870. * mostly useless to us.
  871. *
  872. * Returns 0 on success or negativer error code on failure.
  873. */
  874. static int i2o_hrt_get(struct i2o_controller *c)
  875. {
  876. int rc;
  877. int i;
  878. i2o_hrt *hrt = c->hrt.virt;
  879. u32 size = sizeof(i2o_hrt);
  880. struct device *dev = &c->pdev->dev;
  881. for (i = 0; i < I2O_HRT_GET_TRIES; i++) {
  882. struct i2o_message __iomem *msg;
  883. u32 m;
  884. m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
  885. if (m == I2O_QUEUE_EMPTY)
  886. return -ETIMEDOUT;
  887. writel(SIX_WORD_MSG_SIZE | SGL_OFFSET_4, &msg->u.head[0]);
  888. writel(I2O_CMD_HRT_GET << 24 | HOST_TID << 12 | ADAPTER_TID,
  889. &msg->u.head[1]);
  890. writel(0xd0000000 | c->hrt.len, &msg->body[0]);
  891. writel(c->hrt.phys, &msg->body[1]);
  892. rc = i2o_msg_post_wait_mem(c, m, 20, &c->hrt);
  893. if (rc < 0) {
  894. printk(KERN_ERR "%s: Unable to get HRT (status=%#x)\n",
  895. c->name, -rc);
  896. return rc;
  897. }
  898. size = hrt->num_entries * hrt->entry_len << 2;
  899. if (size > c->hrt.len) {
  900. if (i2o_dma_realloc(dev, &c->hrt, size, GFP_KERNEL))
  901. return -ENOMEM;
  902. else
  903. hrt = c->hrt.virt;
  904. } else
  905. return i2o_parse_hrt(c);
  906. }
  907. printk(KERN_ERR "%s: Unable to get HRT after %d tries, giving up\n",
  908. c->name, I2O_HRT_GET_TRIES);
  909. return -EBUSY;
  910. }
  911. /**
  912. * i2o_iop_free - Free the i2o_controller struct
  913. * @c: I2O controller to free
  914. */
  915. void i2o_iop_free(struct i2o_controller *c)
  916. {
  917. kfree(c);
  918. };
  919. /**
  920. * i2o_iop_release - release the memory for a I2O controller
  921. * @dev: I2O controller which should be released
  922. *
  923. * Release the allocated memory. This function is called if refcount of
  924. * device reaches 0 automatically.
  925. */
  926. static void i2o_iop_release(struct device *dev)
  927. {
  928. struct i2o_controller *c = to_i2o_controller(dev);
  929. i2o_iop_free(c);
  930. };
  931. /**
  932. * i2o_iop_alloc - Allocate and initialize a i2o_controller struct
  933. *
  934. * Allocate the necessary memory for a i2o_controller struct and
  935. * initialize the lists.
  936. *
  937. * Returns a pointer to the I2O controller or a negative error code on
  938. * failure.
  939. */
  940. struct i2o_controller *i2o_iop_alloc(void)
  941. {
  942. static int unit = 0; /* 0 and 1 are NULL IOP and Local Host */
  943. struct i2o_controller *c;
  944. c = kmalloc(sizeof(*c), GFP_KERNEL);
  945. if (!c) {
  946. printk(KERN_ERR "i2o: Insufficient memory to allocate a I2O "
  947. "controller.\n");
  948. return ERR_PTR(-ENOMEM);
  949. }
  950. memset(c, 0, sizeof(*c));
  951. INIT_LIST_HEAD(&c->devices);
  952. spin_lock_init(&c->lock);
  953. init_MUTEX(&c->lct_lock);
  954. c->unit = unit++;
  955. sprintf(c->name, "iop%d", c->unit);
  956. device_initialize(&c->device);
  957. c->device.release = &i2o_iop_release;
  958. snprintf(c->device.bus_id, BUS_ID_SIZE, "iop%d", c->unit);
  959. #if BITS_PER_LONG == 64
  960. spin_lock_init(&c->context_list_lock);
  961. atomic_set(&c->context_list_counter, 0);
  962. INIT_LIST_HEAD(&c->context_list);
  963. #endif
  964. return c;
  965. };
  966. /**
  967. * i2o_iop_add - Initialize the I2O controller and add him to the I2O core
  968. * @c: controller
  969. *
  970. * Initialize the I2O controller and if no error occurs add him to the I2O
  971. * core.
  972. *
  973. * Returns 0 on success or negative error code on failure.
  974. */
  975. int i2o_iop_add(struct i2o_controller *c)
  976. {
  977. int rc;
  978. if((rc = device_add(&c->device))) {
  979. printk(KERN_ERR "%s: could not register controller\n", c->name);
  980. goto iop_reset;
  981. }
  982. printk(KERN_INFO "%s: Activating I2O controller...\n", c->name);
  983. printk(KERN_INFO "%s: This may take a few minutes if there are many "
  984. "devices\n", c->name);
  985. if ((rc = i2o_iop_activate(c))) {
  986. printk(KERN_ERR "%s: could not activate controller\n",
  987. c->name);
  988. goto iop_reset;
  989. }
  990. pr_debug("%s: building sys table...\n", c->name);
  991. if ((rc = i2o_systab_build()))
  992. goto iop_reset;
  993. pr_debug("%s: online controller...\n", c->name);
  994. if ((rc = i2o_iop_online(c)))
  995. goto iop_reset;
  996. pr_debug("%s: getting LCT...\n", c->name);
  997. if ((rc = i2o_exec_lct_get(c)))
  998. goto iop_reset;
  999. list_add(&c->list, &i2o_controllers);
  1000. i2o_driver_notify_controller_add_all(c);
  1001. printk(KERN_INFO "%s: Controller added\n", c->name);
  1002. return 0;
  1003. iop_reset:
  1004. i2o_iop_reset(c);
  1005. return rc;
  1006. };
  1007. /**
  1008. * i2o_event_register - Turn on/off event notification for a I2O device
  1009. * @dev: I2O device which should receive the event registration request
  1010. * @drv: driver which want to get notified
  1011. * @tcntxt: transaction context to use with this notifier
  1012. * @evt_mask: mask of events
  1013. *
  1014. * Create and posts an event registration message to the task. No reply
  1015. * is waited for, or expected. If you do not want further notifications,
  1016. * call the i2o_event_register again with a evt_mask of 0.
  1017. *
  1018. * Returns 0 on success or -ETIMEDOUT if no message could be fetched for
  1019. * sending the request.
  1020. */
  1021. int i2o_event_register(struct i2o_device *dev, struct i2o_driver *drv,
  1022. int tcntxt, u32 evt_mask)
  1023. {
  1024. struct i2o_controller *c = dev->iop;
  1025. struct i2o_message __iomem *msg;
  1026. u32 m;
  1027. m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
  1028. if (m == I2O_QUEUE_EMPTY)
  1029. return -ETIMEDOUT;
  1030. writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
  1031. writel(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | dev->lct_data.
  1032. tid, &msg->u.head[1]);
  1033. writel(drv->context, &msg->u.s.icntxt);
  1034. writel(tcntxt, &msg->u.s.tcntxt);
  1035. writel(evt_mask, &msg->body[0]);
  1036. i2o_msg_post(c, m);
  1037. return 0;
  1038. };
  1039. /**
  1040. * i2o_iop_init - I2O main initialization function
  1041. *
  1042. * Initialize the I2O drivers (OSM) functions, register the Executive OSM,
  1043. * initialize the I2O PCI part and finally initialize I2O device stuff.
  1044. *
  1045. * Returns 0 on success or negative error code on failure.
  1046. */
  1047. static int __init i2o_iop_init(void)
  1048. {
  1049. int rc = 0;
  1050. printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
  1051. rc = i2o_device_init();
  1052. if (rc)
  1053. goto exit;
  1054. rc = i2o_driver_init();
  1055. if (rc)
  1056. goto device_exit;
  1057. rc = i2o_exec_init();
  1058. if (rc)
  1059. goto driver_exit;
  1060. rc = i2o_pci_init();
  1061. if (rc < 0)
  1062. goto exec_exit;
  1063. return 0;
  1064. exec_exit:
  1065. i2o_exec_exit();
  1066. driver_exit:
  1067. i2o_driver_exit();
  1068. device_exit:
  1069. i2o_device_exit();
  1070. exit:
  1071. return rc;
  1072. }
  1073. /**
  1074. * i2o_iop_exit - I2O main exit function
  1075. *
  1076. * Removes I2O controllers from PCI subsystem and shut down OSMs.
  1077. */
  1078. static void __exit i2o_iop_exit(void)
  1079. {
  1080. i2o_pci_exit();
  1081. i2o_exec_exit();
  1082. i2o_driver_exit();
  1083. i2o_device_exit();
  1084. };
  1085. module_init(i2o_iop_init);
  1086. module_exit(i2o_iop_exit);
  1087. MODULE_AUTHOR("Red Hat Software");
  1088. MODULE_LICENSE("GPL");
  1089. MODULE_DESCRIPTION(OSM_DESCRIPTION);
  1090. MODULE_VERSION(OSM_VERSION);
  1091. #if BITS_PER_LONG == 64
  1092. EXPORT_SYMBOL(i2o_cntxt_list_add);
  1093. EXPORT_SYMBOL(i2o_cntxt_list_get);
  1094. EXPORT_SYMBOL(i2o_cntxt_list_remove);
  1095. EXPORT_SYMBOL(i2o_cntxt_list_get_ptr);
  1096. #endif
  1097. EXPORT_SYMBOL(i2o_msg_get_wait);
  1098. EXPORT_SYMBOL(i2o_msg_nop);
  1099. EXPORT_SYMBOL(i2o_find_iop);
  1100. EXPORT_SYMBOL(i2o_iop_find_device);
  1101. EXPORT_SYMBOL(i2o_event_register);
  1102. EXPORT_SYMBOL(i2o_status_get);
  1103. EXPORT_SYMBOL(i2o_controllers);