i2o.h 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257
  1. /*
  2. * I2O kernel space accessible structures/APIs
  3. *
  4. * (c) Copyright 1999, 2000 Red Hat Software
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. *
  11. *************************************************************************
  12. *
  13. * This header file defined the I2O APIs/structures for use by
  14. * the I2O kernel modules.
  15. *
  16. */
  17. #ifndef _I2O_H
  18. #define _I2O_H
  19. #ifdef __KERNEL__ /* This file to be included by kernel only */
  20. #include <linux/i2o-dev.h>
  21. /* How many different OSM's are we allowing */
  22. #define I2O_MAX_DRIVERS 8
  23. #include <linux/pci.h>
  24. #include <linux/dma-mapping.h>
  25. #include <linux/string.h>
  26. #include <linux/slab.h>
  27. #include <linux/workqueue.h> /* work_struct */
  28. #include <linux/mempool.h>
  29. #include <asm/io.h>
  30. #include <asm/semaphore.h> /* Needed for MUTEX init macros */
  31. /* message queue empty */
  32. #define I2O_QUEUE_EMPTY 0xffffffff
  33. /*
  34. * Cache strategies
  35. */
  36. /* The NULL strategy leaves everything up to the controller. This tends to be a
  37. * pessimal but functional choice.
  38. */
  39. #define CACHE_NULL 0
  40. /* Prefetch data when reading. We continually attempt to load the next 32 sectors
  41. * into the controller cache.
  42. */
  43. #define CACHE_PREFETCH 1
  44. /* Prefetch data when reading. We sometimes attempt to load the next 32 sectors
  45. * into the controller cache. When an I/O is less <= 8K we assume its probably
  46. * not sequential and don't prefetch (default)
  47. */
  48. #define CACHE_SMARTFETCH 2
  49. /* Data is written to the cache and then out on to the disk. The I/O must be
  50. * physically on the medium before the write is acknowledged (default without
  51. * NVRAM)
  52. */
  53. #define CACHE_WRITETHROUGH 17
  54. /* Data is written to the cache and then out on to the disk. The controller
  55. * is permitted to write back the cache any way it wants. (default if battery
  56. * backed NVRAM is present). It can be useful to set this for swap regardless of
  57. * battery state.
  58. */
  59. #define CACHE_WRITEBACK 18
  60. /* Optimise for under powered controllers, especially on RAID1 and RAID0. We
  61. * write large I/O's directly to disk bypassing the cache to avoid the extra
  62. * memory copy hits. Small writes are writeback cached
  63. */
  64. #define CACHE_SMARTBACK 19
  65. /* Optimise for under powered controllers, especially on RAID1 and RAID0. We
  66. * write large I/O's directly to disk bypassing the cache to avoid the extra
  67. * memory copy hits. Small writes are writethrough cached. Suitable for devices
  68. * lacking battery backup
  69. */
  70. #define CACHE_SMARTTHROUGH 20
  71. /*
  72. * Ioctl structures
  73. */
  74. #define BLKI2OGRSTRAT _IOR('2', 1, int)
  75. #define BLKI2OGWSTRAT _IOR('2', 2, int)
  76. #define BLKI2OSRSTRAT _IOW('2', 3, int)
  77. #define BLKI2OSWSTRAT _IOW('2', 4, int)
  78. /*
  79. * I2O Function codes
  80. */
  81. /*
  82. * Executive Class
  83. */
  84. #define I2O_CMD_ADAPTER_ASSIGN 0xB3
  85. #define I2O_CMD_ADAPTER_READ 0xB2
  86. #define I2O_CMD_ADAPTER_RELEASE 0xB5
  87. #define I2O_CMD_BIOS_INFO_SET 0xA5
  88. #define I2O_CMD_BOOT_DEVICE_SET 0xA7
  89. #define I2O_CMD_CONFIG_VALIDATE 0xBB
  90. #define I2O_CMD_CONN_SETUP 0xCA
  91. #define I2O_CMD_DDM_DESTROY 0xB1
  92. #define I2O_CMD_DDM_ENABLE 0xD5
  93. #define I2O_CMD_DDM_QUIESCE 0xC7
  94. #define I2O_CMD_DDM_RESET 0xD9
  95. #define I2O_CMD_DDM_SUSPEND 0xAF
  96. #define I2O_CMD_DEVICE_ASSIGN 0xB7
  97. #define I2O_CMD_DEVICE_RELEASE 0xB9
  98. #define I2O_CMD_HRT_GET 0xA8
  99. #define I2O_CMD_ADAPTER_CLEAR 0xBE
  100. #define I2O_CMD_ADAPTER_CONNECT 0xC9
  101. #define I2O_CMD_ADAPTER_RESET 0xBD
  102. #define I2O_CMD_LCT_NOTIFY 0xA2
  103. #define I2O_CMD_OUTBOUND_INIT 0xA1
  104. #define I2O_CMD_PATH_ENABLE 0xD3
  105. #define I2O_CMD_PATH_QUIESCE 0xC5
  106. #define I2O_CMD_PATH_RESET 0xD7
  107. #define I2O_CMD_STATIC_MF_CREATE 0xDD
  108. #define I2O_CMD_STATIC_MF_RELEASE 0xDF
  109. #define I2O_CMD_STATUS_GET 0xA0
  110. #define I2O_CMD_SW_DOWNLOAD 0xA9
  111. #define I2O_CMD_SW_UPLOAD 0xAB
  112. #define I2O_CMD_SW_REMOVE 0xAD
  113. #define I2O_CMD_SYS_ENABLE 0xD1
  114. #define I2O_CMD_SYS_MODIFY 0xC1
  115. #define I2O_CMD_SYS_QUIESCE 0xC3
  116. #define I2O_CMD_SYS_TAB_SET 0xA3
  117. /*
  118. * Utility Class
  119. */
  120. #define I2O_CMD_UTIL_NOP 0x00
  121. #define I2O_CMD_UTIL_ABORT 0x01
  122. #define I2O_CMD_UTIL_CLAIM 0x09
  123. #define I2O_CMD_UTIL_RELEASE 0x0B
  124. #define I2O_CMD_UTIL_PARAMS_GET 0x06
  125. #define I2O_CMD_UTIL_PARAMS_SET 0x05
  126. #define I2O_CMD_UTIL_EVT_REGISTER 0x13
  127. #define I2O_CMD_UTIL_EVT_ACK 0x14
  128. #define I2O_CMD_UTIL_CONFIG_DIALOG 0x10
  129. #define I2O_CMD_UTIL_DEVICE_RESERVE 0x0D
  130. #define I2O_CMD_UTIL_DEVICE_RELEASE 0x0F
  131. #define I2O_CMD_UTIL_LOCK 0x17
  132. #define I2O_CMD_UTIL_LOCK_RELEASE 0x19
  133. #define I2O_CMD_UTIL_REPLY_FAULT_NOTIFY 0x15
  134. /*
  135. * SCSI Host Bus Adapter Class
  136. */
  137. #define I2O_CMD_SCSI_EXEC 0x81
  138. #define I2O_CMD_SCSI_ABORT 0x83
  139. #define I2O_CMD_SCSI_BUSRESET 0x27
  140. /*
  141. * Bus Adapter Class
  142. */
  143. #define I2O_CMD_BUS_ADAPTER_RESET 0x85
  144. #define I2O_CMD_BUS_RESET 0x87
  145. #define I2O_CMD_BUS_SCAN 0x89
  146. #define I2O_CMD_BUS_QUIESCE 0x8b
  147. /*
  148. * Random Block Storage Class
  149. */
  150. #define I2O_CMD_BLOCK_READ 0x30
  151. #define I2O_CMD_BLOCK_WRITE 0x31
  152. #define I2O_CMD_BLOCK_CFLUSH 0x37
  153. #define I2O_CMD_BLOCK_MLOCK 0x49
  154. #define I2O_CMD_BLOCK_MUNLOCK 0x4B
  155. #define I2O_CMD_BLOCK_MMOUNT 0x41
  156. #define I2O_CMD_BLOCK_MEJECT 0x43
  157. #define I2O_CMD_BLOCK_POWER 0x70
  158. #define I2O_CMD_PRIVATE 0xFF
  159. /* Command status values */
  160. #define I2O_CMD_IN_PROGRESS 0x01
  161. #define I2O_CMD_REJECTED 0x02
  162. #define I2O_CMD_FAILED 0x03
  163. #define I2O_CMD_COMPLETED 0x04
  164. /* I2O API function return values */
  165. #define I2O_RTN_NO_ERROR 0
  166. #define I2O_RTN_NOT_INIT 1
  167. #define I2O_RTN_FREE_Q_EMPTY 2
  168. #define I2O_RTN_TCB_ERROR 3
  169. #define I2O_RTN_TRANSACTION_ERROR 4
  170. #define I2O_RTN_ADAPTER_ALREADY_INIT 5
  171. #define I2O_RTN_MALLOC_ERROR 6
  172. #define I2O_RTN_ADPTR_NOT_REGISTERED 7
  173. #define I2O_RTN_MSG_REPLY_TIMEOUT 8
  174. #define I2O_RTN_NO_STATUS 9
  175. #define I2O_RTN_NO_FIRM_VER 10
  176. #define I2O_RTN_NO_LINK_SPEED 11
  177. /* Reply message status defines for all messages */
  178. #define I2O_REPLY_STATUS_SUCCESS 0x00
  179. #define I2O_REPLY_STATUS_ABORT_DIRTY 0x01
  180. #define I2O_REPLY_STATUS_ABORT_NO_DATA_TRANSFER 0x02
  181. #define I2O_REPLY_STATUS_ABORT_PARTIAL_TRANSFER 0x03
  182. #define I2O_REPLY_STATUS_ERROR_DIRTY 0x04
  183. #define I2O_REPLY_STATUS_ERROR_NO_DATA_TRANSFER 0x05
  184. #define I2O_REPLY_STATUS_ERROR_PARTIAL_TRANSFER 0x06
  185. #define I2O_REPLY_STATUS_PROCESS_ABORT_DIRTY 0x08
  186. #define I2O_REPLY_STATUS_PROCESS_ABORT_NO_DATA_TRANSFER 0x09
  187. #define I2O_REPLY_STATUS_PROCESS_ABORT_PARTIAL_TRANSFER 0x0A
  188. #define I2O_REPLY_STATUS_TRANSACTION_ERROR 0x0B
  189. #define I2O_REPLY_STATUS_PROGRESS_REPORT 0x80
  190. /* Status codes and Error Information for Parameter functions */
  191. #define I2O_PARAMS_STATUS_SUCCESS 0x00
  192. #define I2O_PARAMS_STATUS_BAD_KEY_ABORT 0x01
  193. #define I2O_PARAMS_STATUS_BAD_KEY_CONTINUE 0x02
  194. #define I2O_PARAMS_STATUS_BUFFER_FULL 0x03
  195. #define I2O_PARAMS_STATUS_BUFFER_TOO_SMALL 0x04
  196. #define I2O_PARAMS_STATUS_FIELD_UNREADABLE 0x05
  197. #define I2O_PARAMS_STATUS_FIELD_UNWRITEABLE 0x06
  198. #define I2O_PARAMS_STATUS_INSUFFICIENT_FIELDS 0x07
  199. #define I2O_PARAMS_STATUS_INVALID_GROUP_ID 0x08
  200. #define I2O_PARAMS_STATUS_INVALID_OPERATION 0x09
  201. #define I2O_PARAMS_STATUS_NO_KEY_FIELD 0x0A
  202. #define I2O_PARAMS_STATUS_NO_SUCH_FIELD 0x0B
  203. #define I2O_PARAMS_STATUS_NON_DYNAMIC_GROUP 0x0C
  204. #define I2O_PARAMS_STATUS_OPERATION_ERROR 0x0D
  205. #define I2O_PARAMS_STATUS_SCALAR_ERROR 0x0E
  206. #define I2O_PARAMS_STATUS_TABLE_ERROR 0x0F
  207. #define I2O_PARAMS_STATUS_WRONG_GROUP_TYPE 0x10
  208. /* DetailedStatusCode defines for Executive, DDM, Util and Transaction error
  209. * messages: Table 3-2 Detailed Status Codes.*/
  210. #define I2O_DSC_SUCCESS 0x0000
  211. #define I2O_DSC_BAD_KEY 0x0002
  212. #define I2O_DSC_TCL_ERROR 0x0003
  213. #define I2O_DSC_REPLY_BUFFER_FULL 0x0004
  214. #define I2O_DSC_NO_SUCH_PAGE 0x0005
  215. #define I2O_DSC_INSUFFICIENT_RESOURCE_SOFT 0x0006
  216. #define I2O_DSC_INSUFFICIENT_RESOURCE_HARD 0x0007
  217. #define I2O_DSC_CHAIN_BUFFER_TOO_LARGE 0x0009
  218. #define I2O_DSC_UNSUPPORTED_FUNCTION 0x000A
  219. #define I2O_DSC_DEVICE_LOCKED 0x000B
  220. #define I2O_DSC_DEVICE_RESET 0x000C
  221. #define I2O_DSC_INAPPROPRIATE_FUNCTION 0x000D
  222. #define I2O_DSC_INVALID_INITIATOR_ADDRESS 0x000E
  223. #define I2O_DSC_INVALID_MESSAGE_FLAGS 0x000F
  224. #define I2O_DSC_INVALID_OFFSET 0x0010
  225. #define I2O_DSC_INVALID_PARAMETER 0x0011
  226. #define I2O_DSC_INVALID_REQUEST 0x0012
  227. #define I2O_DSC_INVALID_TARGET_ADDRESS 0x0013
  228. #define I2O_DSC_MESSAGE_TOO_LARGE 0x0014
  229. #define I2O_DSC_MESSAGE_TOO_SMALL 0x0015
  230. #define I2O_DSC_MISSING_PARAMETER 0x0016
  231. #define I2O_DSC_TIMEOUT 0x0017
  232. #define I2O_DSC_UNKNOWN_ERROR 0x0018
  233. #define I2O_DSC_UNKNOWN_FUNCTION 0x0019
  234. #define I2O_DSC_UNSUPPORTED_VERSION 0x001A
  235. #define I2O_DSC_DEVICE_BUSY 0x001B
  236. #define I2O_DSC_DEVICE_NOT_AVAILABLE 0x001C
  237. /* DetailedStatusCode defines for Block Storage Operation: Table 6-7 Detailed
  238. Status Codes.*/
  239. #define I2O_BSA_DSC_SUCCESS 0x0000
  240. #define I2O_BSA_DSC_MEDIA_ERROR 0x0001
  241. #define I2O_BSA_DSC_ACCESS_ERROR 0x0002
  242. #define I2O_BSA_DSC_DEVICE_FAILURE 0x0003
  243. #define I2O_BSA_DSC_DEVICE_NOT_READY 0x0004
  244. #define I2O_BSA_DSC_MEDIA_NOT_PRESENT 0x0005
  245. #define I2O_BSA_DSC_MEDIA_LOCKED 0x0006
  246. #define I2O_BSA_DSC_MEDIA_FAILURE 0x0007
  247. #define I2O_BSA_DSC_PROTOCOL_FAILURE 0x0008
  248. #define I2O_BSA_DSC_BUS_FAILURE 0x0009
  249. #define I2O_BSA_DSC_ACCESS_VIOLATION 0x000A
  250. #define I2O_BSA_DSC_WRITE_PROTECTED 0x000B
  251. #define I2O_BSA_DSC_DEVICE_RESET 0x000C
  252. #define I2O_BSA_DSC_VOLUME_CHANGED 0x000D
  253. #define I2O_BSA_DSC_TIMEOUT 0x000E
  254. /* FailureStatusCodes, Table 3-3 Message Failure Codes */
  255. #define I2O_FSC_TRANSPORT_SERVICE_SUSPENDED 0x81
  256. #define I2O_FSC_TRANSPORT_SERVICE_TERMINATED 0x82
  257. #define I2O_FSC_TRANSPORT_CONGESTION 0x83
  258. #define I2O_FSC_TRANSPORT_FAILURE 0x84
  259. #define I2O_FSC_TRANSPORT_STATE_ERROR 0x85
  260. #define I2O_FSC_TRANSPORT_TIME_OUT 0x86
  261. #define I2O_FSC_TRANSPORT_ROUTING_FAILURE 0x87
  262. #define I2O_FSC_TRANSPORT_INVALID_VERSION 0x88
  263. #define I2O_FSC_TRANSPORT_INVALID_OFFSET 0x89
  264. #define I2O_FSC_TRANSPORT_INVALID_MSG_FLAGS 0x8A
  265. #define I2O_FSC_TRANSPORT_FRAME_TOO_SMALL 0x8B
  266. #define I2O_FSC_TRANSPORT_FRAME_TOO_LARGE 0x8C
  267. #define I2O_FSC_TRANSPORT_INVALID_TARGET_ID 0x8D
  268. #define I2O_FSC_TRANSPORT_INVALID_INITIATOR_ID 0x8E
  269. #define I2O_FSC_TRANSPORT_INVALID_INITIATOR_CONTEXT 0x8F
  270. #define I2O_FSC_TRANSPORT_UNKNOWN_FAILURE 0xFF
  271. /* Device Claim Types */
  272. #define I2O_CLAIM_PRIMARY 0x01000000
  273. #define I2O_CLAIM_MANAGEMENT 0x02000000
  274. #define I2O_CLAIM_AUTHORIZED 0x03000000
  275. #define I2O_CLAIM_SECONDARY 0x04000000
  276. /* Message header defines for VersionOffset */
  277. #define I2OVER15 0x0001
  278. #define I2OVER20 0x0002
  279. /* Default is 1.5 */
  280. #define I2OVERSION I2OVER15
  281. #define SGL_OFFSET_0 I2OVERSION
  282. #define SGL_OFFSET_4 (0x0040 | I2OVERSION)
  283. #define SGL_OFFSET_5 (0x0050 | I2OVERSION)
  284. #define SGL_OFFSET_6 (0x0060 | I2OVERSION)
  285. #define SGL_OFFSET_7 (0x0070 | I2OVERSION)
  286. #define SGL_OFFSET_8 (0x0080 | I2OVERSION)
  287. #define SGL_OFFSET_9 (0x0090 | I2OVERSION)
  288. #define SGL_OFFSET_10 (0x00A0 | I2OVERSION)
  289. #define SGL_OFFSET_11 (0x00B0 | I2OVERSION)
  290. #define SGL_OFFSET_12 (0x00C0 | I2OVERSION)
  291. #define SGL_OFFSET(x) (((x)<<4) | I2OVERSION)
  292. /* Transaction Reply Lists (TRL) Control Word structure */
  293. #define TRL_SINGLE_FIXED_LENGTH 0x00
  294. #define TRL_SINGLE_VARIABLE_LENGTH 0x40
  295. #define TRL_MULTIPLE_FIXED_LENGTH 0x80
  296. /* msg header defines for MsgFlags */
  297. #define MSG_STATIC 0x0100
  298. #define MSG_64BIT_CNTXT 0x0200
  299. #define MSG_MULTI_TRANS 0x1000
  300. #define MSG_FAIL 0x2000
  301. #define MSG_FINAL 0x4000
  302. #define MSG_REPLY 0x8000
  303. /* minimum size msg */
  304. #define THREE_WORD_MSG_SIZE 0x00030000
  305. #define FOUR_WORD_MSG_SIZE 0x00040000
  306. #define FIVE_WORD_MSG_SIZE 0x00050000
  307. #define SIX_WORD_MSG_SIZE 0x00060000
  308. #define SEVEN_WORD_MSG_SIZE 0x00070000
  309. #define EIGHT_WORD_MSG_SIZE 0x00080000
  310. #define NINE_WORD_MSG_SIZE 0x00090000
  311. #define TEN_WORD_MSG_SIZE 0x000A0000
  312. #define ELEVEN_WORD_MSG_SIZE 0x000B0000
  313. #define I2O_MESSAGE_SIZE(x) ((x)<<16)
  314. /* special TID assignments */
  315. #define ADAPTER_TID 0
  316. #define HOST_TID 1
  317. /* outbound queue defines */
  318. #define I2O_MAX_OUTBOUND_MSG_FRAMES 128
  319. #define I2O_OUTBOUND_MSG_FRAME_SIZE 128 /* in 32-bit words */
  320. /* inbound queue definitions */
  321. #define I2O_MSG_INPOOL_MIN 32
  322. #define I2O_INBOUND_MSG_FRAME_SIZE 128 /* in 32-bit words */
  323. #define I2O_POST_WAIT_OK 0
  324. #define I2O_POST_WAIT_TIMEOUT -ETIMEDOUT
  325. #define I2O_CONTEXT_LIST_MIN_LENGTH 15
  326. #define I2O_CONTEXT_LIST_USED 0x01
  327. #define I2O_CONTEXT_LIST_DELETED 0x02
  328. /* timeouts */
  329. #define I2O_TIMEOUT_INIT_OUTBOUND_QUEUE 15
  330. #define I2O_TIMEOUT_MESSAGE_GET 5
  331. #define I2O_TIMEOUT_RESET 30
  332. #define I2O_TIMEOUT_STATUS_GET 5
  333. #define I2O_TIMEOUT_LCT_GET 360
  334. #define I2O_TIMEOUT_SCSI_SCB_ABORT 240
  335. /* retries */
  336. #define I2O_HRT_GET_TRIES 3
  337. #define I2O_LCT_GET_TRIES 3
  338. /* defines for max_sectors and max_phys_segments */
  339. #define I2O_MAX_SECTORS 1024
  340. #define I2O_MAX_SECTORS_LIMITED 256
  341. #define I2O_MAX_PHYS_SEGMENTS MAX_PHYS_SEGMENTS
  342. /*
  343. * Message structures
  344. */
  345. struct i2o_message {
  346. union {
  347. struct {
  348. u8 version_offset;
  349. u8 flags;
  350. u16 size;
  351. u32 target_tid:12;
  352. u32 init_tid:12;
  353. u32 function:8;
  354. u32 icntxt; /* initiator context */
  355. u32 tcntxt; /* transaction context */
  356. } s;
  357. u32 head[4];
  358. } u;
  359. /* List follows */
  360. u32 body[0];
  361. };
  362. /* MFA and I2O message used by mempool */
  363. struct i2o_msg_mfa {
  364. u32 mfa; /* MFA returned by the controller */
  365. struct i2o_message msg; /* I2O message */
  366. };
  367. /*
  368. * Each I2O device entity has one of these. There is one per device.
  369. */
  370. struct i2o_device {
  371. i2o_lct_entry lct_data; /* Device LCT information */
  372. struct i2o_controller *iop; /* Controlling IOP */
  373. struct list_head list; /* node in IOP devices list */
  374. struct device device;
  375. struct semaphore lock; /* device lock */
  376. };
  377. /*
  378. * Event structure provided to the event handling function
  379. */
  380. struct i2o_event {
  381. struct work_struct work;
  382. struct i2o_device *i2o_dev; /* I2O device pointer from which the
  383. event reply was initiated */
  384. u16 size; /* Size of data in 32-bit words */
  385. u32 tcntxt; /* Transaction context used at
  386. registration */
  387. u32 event_indicator; /* Event indicator from reply */
  388. u32 data[0]; /* Event data from reply */
  389. };
  390. /*
  391. * I2O classes which could be handled by the OSM
  392. */
  393. struct i2o_class_id {
  394. u16 class_id:12;
  395. };
  396. /*
  397. * I2O driver structure for OSMs
  398. */
  399. struct i2o_driver {
  400. char *name; /* OSM name */
  401. int context; /* Low 8 bits of the transaction info */
  402. struct i2o_class_id *classes; /* I2O classes that this OSM handles */
  403. /* Message reply handler */
  404. int (*reply) (struct i2o_controller *, u32, struct i2o_message *);
  405. /* Event handler */
  406. void (*event) (struct i2o_event *);
  407. struct workqueue_struct *event_queue; /* Event queue */
  408. struct device_driver driver;
  409. /* notification of changes */
  410. void (*notify_controller_add) (struct i2o_controller *);
  411. void (*notify_controller_remove) (struct i2o_controller *);
  412. void (*notify_device_add) (struct i2o_device *);
  413. void (*notify_device_remove) (struct i2o_device *);
  414. struct semaphore lock;
  415. };
  416. /*
  417. * Contains DMA mapped address information
  418. */
  419. struct i2o_dma {
  420. void *virt;
  421. dma_addr_t phys;
  422. size_t len;
  423. };
  424. /*
  425. * Contains slab cache and mempool information
  426. */
  427. struct i2o_pool {
  428. char *name;
  429. kmem_cache_t *slab;
  430. mempool_t *mempool;
  431. };
  432. /*
  433. * Contains IO mapped address information
  434. */
  435. struct i2o_io {
  436. void __iomem *virt;
  437. unsigned long phys;
  438. unsigned long len;
  439. };
  440. /*
  441. * Context queue entry, used for 32-bit context on 64-bit systems
  442. */
  443. struct i2o_context_list_element {
  444. struct list_head list;
  445. u32 context;
  446. void *ptr;
  447. unsigned long timestamp;
  448. };
  449. /*
  450. * Each I2O controller has one of these objects
  451. */
  452. struct i2o_controller {
  453. char name[16];
  454. int unit;
  455. int type;
  456. struct pci_dev *pdev; /* PCI device */
  457. unsigned int promise:1; /* Promise controller */
  458. unsigned int adaptec:1; /* DPT / Adaptec controller */
  459. unsigned int raptor:1; /* split bar */
  460. unsigned int no_quiesce:1; /* dont quiesce before reset */
  461. unsigned int short_req:1; /* use small block sizes */
  462. unsigned int limit_sectors:1; /* limit number of sectors / request */
  463. unsigned int pae_support:1; /* controller has 64-bit SGL support */
  464. struct list_head devices; /* list of I2O devices */
  465. struct list_head list; /* Controller list */
  466. void __iomem *in_port; /* Inbout port address */
  467. void __iomem *out_port; /* Outbound port address */
  468. void __iomem *irq_status; /* Interrupt status register address */
  469. void __iomem *irq_mask; /* Interrupt mask register address */
  470. struct i2o_dma status; /* IOP status block */
  471. struct i2o_dma hrt; /* HW Resource Table */
  472. i2o_lct *lct; /* Logical Config Table */
  473. struct i2o_dma dlct; /* Temp LCT */
  474. struct semaphore lct_lock; /* Lock for LCT updates */
  475. struct i2o_dma status_block; /* IOP status block */
  476. struct i2o_io base; /* controller messaging unit */
  477. struct i2o_io in_queue; /* inbound message queue Host->IOP */
  478. struct i2o_dma out_queue; /* outbound message queue IOP->Host */
  479. struct i2o_pool in_msg; /* mempool for inbound messages */
  480. unsigned int battery:1; /* Has a battery backup */
  481. unsigned int io_alloc:1; /* An I/O resource was allocated */
  482. unsigned int mem_alloc:1; /* A memory resource was allocated */
  483. struct resource io_resource; /* I/O resource allocated to the IOP */
  484. struct resource mem_resource; /* Mem resource allocated to the IOP */
  485. struct device device;
  486. struct class_device *classdev; /* I2O controller class device */
  487. struct i2o_device *exec; /* Executive */
  488. #if BITS_PER_LONG == 64
  489. spinlock_t context_list_lock; /* lock for context_list */
  490. atomic_t context_list_counter; /* needed for unique contexts */
  491. struct list_head context_list; /* list of context id's
  492. and pointers */
  493. #endif
  494. spinlock_t lock; /* lock for controller
  495. configuration */
  496. void *driver_data[I2O_MAX_DRIVERS]; /* storage for drivers */
  497. };
  498. /*
  499. * I2O System table entry
  500. *
  501. * The system table contains information about all the IOPs in the
  502. * system. It is sent to all IOPs so that they can create peer2peer
  503. * connections between them.
  504. */
  505. struct i2o_sys_tbl_entry {
  506. u16 org_id;
  507. u16 reserved1;
  508. u32 iop_id:12;
  509. u32 reserved2:20;
  510. u16 seg_num:12;
  511. u16 i2o_version:4;
  512. u8 iop_state;
  513. u8 msg_type;
  514. u16 frame_size;
  515. u16 reserved3;
  516. u32 last_changed;
  517. u32 iop_capabilities;
  518. u32 inbound_low;
  519. u32 inbound_high;
  520. };
  521. struct i2o_sys_tbl {
  522. u8 num_entries;
  523. u8 version;
  524. u16 reserved1;
  525. u32 change_ind;
  526. u32 reserved2;
  527. u32 reserved3;
  528. struct i2o_sys_tbl_entry iops[0];
  529. };
  530. extern struct list_head i2o_controllers;
  531. /* Message functions */
  532. static inline struct i2o_message *i2o_msg_get(struct i2o_controller *);
  533. extern struct i2o_message *i2o_msg_get_wait(struct i2o_controller *, int);
  534. static inline void i2o_msg_post(struct i2o_controller *, struct i2o_message *);
  535. static inline int i2o_msg_post_wait(struct i2o_controller *,
  536. struct i2o_message *, unsigned long);
  537. extern int i2o_msg_post_wait_mem(struct i2o_controller *, struct i2o_message *,
  538. unsigned long, struct i2o_dma *);
  539. static inline void i2o_flush_reply(struct i2o_controller *, u32);
  540. /* IOP functions */
  541. extern int i2o_status_get(struct i2o_controller *);
  542. extern int i2o_event_register(struct i2o_device *, struct i2o_driver *, int,
  543. u32);
  544. extern struct i2o_device *i2o_iop_find_device(struct i2o_controller *, u16);
  545. extern struct i2o_controller *i2o_find_iop(int);
  546. /* Functions needed for handling 64-bit pointers in 32-bit context */
  547. #if BITS_PER_LONG == 64
  548. extern u32 i2o_cntxt_list_add(struct i2o_controller *, void *);
  549. extern void *i2o_cntxt_list_get(struct i2o_controller *, u32);
  550. extern u32 i2o_cntxt_list_remove(struct i2o_controller *, void *);
  551. extern u32 i2o_cntxt_list_get_ptr(struct i2o_controller *, void *);
  552. static inline u32 i2o_ptr_low(void *ptr)
  553. {
  554. return (u32) (u64) ptr;
  555. };
  556. static inline u32 i2o_ptr_high(void *ptr)
  557. {
  558. return (u32) ((u64) ptr >> 32);
  559. };
  560. static inline u32 i2o_dma_low(dma_addr_t dma_addr)
  561. {
  562. return (u32) (u64) dma_addr;
  563. };
  564. static inline u32 i2o_dma_high(dma_addr_t dma_addr)
  565. {
  566. return (u32) ((u64) dma_addr >> 32);
  567. };
  568. #else
  569. static inline u32 i2o_cntxt_list_add(struct i2o_controller *c, void *ptr)
  570. {
  571. return (u32) ptr;
  572. };
  573. static inline void *i2o_cntxt_list_get(struct i2o_controller *c, u32 context)
  574. {
  575. return (void *)context;
  576. };
  577. static inline u32 i2o_cntxt_list_remove(struct i2o_controller *c, void *ptr)
  578. {
  579. return (u32) ptr;
  580. };
  581. static inline u32 i2o_cntxt_list_get_ptr(struct i2o_controller *c, void *ptr)
  582. {
  583. return (u32) ptr;
  584. };
  585. static inline u32 i2o_ptr_low(void *ptr)
  586. {
  587. return (u32) ptr;
  588. };
  589. static inline u32 i2o_ptr_high(void *ptr)
  590. {
  591. return 0;
  592. };
  593. static inline u32 i2o_dma_low(dma_addr_t dma_addr)
  594. {
  595. return (u32) dma_addr;
  596. };
  597. static inline u32 i2o_dma_high(dma_addr_t dma_addr)
  598. {
  599. return 0;
  600. };
  601. #endif
  602. /**
  603. * i2o_sg_tablesize - Calculate the maximum number of elements in a SGL
  604. * @c: I2O controller for which the calculation should be done
  605. * @body_size: maximum body size used for message in 32-bit words.
  606. *
  607. * Return the maximum number of SG elements in a SG list.
  608. */
  609. static inline u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size)
  610. {
  611. i2o_status_block *sb = c->status_block.virt;
  612. u16 sg_count =
  613. (sb->inbound_frame_size - sizeof(struct i2o_message) / 4) -
  614. body_size;
  615. if (c->pae_support) {
  616. /*
  617. * for 64-bit a SG attribute element must be added and each
  618. * SG element needs 12 bytes instead of 8.
  619. */
  620. sg_count -= 2;
  621. sg_count /= 3;
  622. } else
  623. sg_count /= 2;
  624. if (c->short_req && (sg_count > 8))
  625. sg_count = 8;
  626. return sg_count;
  627. };
  628. /**
  629. * i2o_dma_map_single - Map pointer to controller and fill in I2O message.
  630. * @c: I2O controller
  631. * @ptr: pointer to the data which should be mapped
  632. * @size: size of data in bytes
  633. * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE
  634. * @sg_ptr: pointer to the SG list inside the I2O message
  635. *
  636. * This function does all necessary DMA handling and also writes the I2O
  637. * SGL elements into the I2O message. For details on DMA handling see also
  638. * dma_map_single(). The pointer sg_ptr will only be set to the end of the
  639. * SG list if the allocation was successful.
  640. *
  641. * Returns DMA address which must be checked for failures using
  642. * dma_mapping_error().
  643. */
  644. static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
  645. size_t size,
  646. enum dma_data_direction direction,
  647. u32 ** sg_ptr)
  648. {
  649. u32 sg_flags;
  650. u32 *mptr = *sg_ptr;
  651. dma_addr_t dma_addr;
  652. switch (direction) {
  653. case DMA_TO_DEVICE:
  654. sg_flags = 0xd4000000;
  655. break;
  656. case DMA_FROM_DEVICE:
  657. sg_flags = 0xd0000000;
  658. break;
  659. default:
  660. return 0;
  661. }
  662. dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction);
  663. if (!dma_mapping_error(dma_addr)) {
  664. #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
  665. if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
  666. *mptr++ = cpu_to_le32(0x7C020002);
  667. *mptr++ = cpu_to_le32(PAGE_SIZE);
  668. }
  669. #endif
  670. *mptr++ = cpu_to_le32(sg_flags | size);
  671. *mptr++ = cpu_to_le32(i2o_dma_low(dma_addr));
  672. #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
  673. if ((sizeof(dma_addr_t) > 4) && c->pae_support)
  674. *mptr++ = cpu_to_le32(i2o_dma_high(dma_addr));
  675. #endif
  676. *sg_ptr = mptr;
  677. }
  678. return dma_addr;
  679. };
  680. /**
  681. * i2o_dma_map_sg - Map a SG List to controller and fill in I2O message.
  682. * @c: I2O controller
  683. * @sg: SG list to be mapped
  684. * @sg_count: number of elements in the SG list
  685. * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE
  686. * @sg_ptr: pointer to the SG list inside the I2O message
  687. *
  688. * This function does all necessary DMA handling and also writes the I2O
  689. * SGL elements into the I2O message. For details on DMA handling see also
  690. * dma_map_sg(). The pointer sg_ptr will only be set to the end of the SG
  691. * list if the allocation was successful.
  692. *
  693. * Returns 0 on failure or 1 on success.
  694. */
  695. static inline int i2o_dma_map_sg(struct i2o_controller *c,
  696. struct scatterlist *sg, int sg_count,
  697. enum dma_data_direction direction,
  698. u32 ** sg_ptr)
  699. {
  700. u32 sg_flags;
  701. u32 *mptr = *sg_ptr;
  702. switch (direction) {
  703. case DMA_TO_DEVICE:
  704. sg_flags = 0x14000000;
  705. break;
  706. case DMA_FROM_DEVICE:
  707. sg_flags = 0x10000000;
  708. break;
  709. default:
  710. return 0;
  711. }
  712. sg_count = dma_map_sg(&c->pdev->dev, sg, sg_count, direction);
  713. if (!sg_count)
  714. return 0;
  715. #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
  716. if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
  717. *mptr++ = cpu_to_le32(0x7C020002);
  718. *mptr++ = cpu_to_le32(PAGE_SIZE);
  719. }
  720. #endif
  721. while (sg_count-- > 0) {
  722. if (!sg_count)
  723. sg_flags |= 0xC0000000;
  724. *mptr++ = cpu_to_le32(sg_flags | sg_dma_len(sg));
  725. *mptr++ = cpu_to_le32(i2o_dma_low(sg_dma_address(sg)));
  726. #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
  727. if ((sizeof(dma_addr_t) > 4) && c->pae_support)
  728. *mptr++ = cpu_to_le32(i2o_dma_high(sg_dma_address(sg)));
  729. #endif
  730. sg++;
  731. }
  732. *sg_ptr = mptr;
  733. return 1;
  734. };
  735. /**
  736. * i2o_dma_alloc - Allocate DMA memory
  737. * @dev: struct device pointer to the PCI device of the I2O controller
  738. * @addr: i2o_dma struct which should get the DMA buffer
  739. * @len: length of the new DMA memory
  740. * @gfp_mask: GFP mask
  741. *
  742. * Allocate a coherent DMA memory and write the pointers into addr.
  743. *
  744. * Returns 0 on success or -ENOMEM on failure.
  745. */
  746. static inline int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr,
  747. size_t len, gfp_t gfp_mask)
  748. {
  749. struct pci_dev *pdev = to_pci_dev(dev);
  750. int dma_64 = 0;
  751. if ((sizeof(dma_addr_t) > 4) && (pdev->dma_mask == DMA_64BIT_MASK)) {
  752. dma_64 = 1;
  753. if (pci_set_dma_mask(pdev, DMA_32BIT_MASK))
  754. return -ENOMEM;
  755. }
  756. addr->virt = dma_alloc_coherent(dev, len, &addr->phys, gfp_mask);
  757. if ((sizeof(dma_addr_t) > 4) && dma_64)
  758. if (pci_set_dma_mask(pdev, DMA_64BIT_MASK))
  759. printk(KERN_WARNING "i2o: unable to set 64-bit DMA");
  760. if (!addr->virt)
  761. return -ENOMEM;
  762. memset(addr->virt, 0, len);
  763. addr->len = len;
  764. return 0;
  765. };
  766. /**
  767. * i2o_dma_free - Free DMA memory
  768. * @dev: struct device pointer to the PCI device of the I2O controller
  769. * @addr: i2o_dma struct which contains the DMA buffer
  770. *
  771. * Free a coherent DMA memory and set virtual address of addr to NULL.
  772. */
  773. static inline void i2o_dma_free(struct device *dev, struct i2o_dma *addr)
  774. {
  775. if (addr->virt) {
  776. if (addr->phys)
  777. dma_free_coherent(dev, addr->len, addr->virt,
  778. addr->phys);
  779. else
  780. kfree(addr->virt);
  781. addr->virt = NULL;
  782. }
  783. };
  784. /**
  785. * i2o_dma_realloc - Realloc DMA memory
  786. * @dev: struct device pointer to the PCI device of the I2O controller
  787. * @addr: pointer to a i2o_dma struct DMA buffer
  788. * @len: new length of memory
  789. * @gfp_mask: GFP mask
  790. *
  791. * If there was something allocated in the addr, free it first. If len > 0
  792. * than try to allocate it and write the addresses back to the addr
  793. * structure. If len == 0 set the virtual address to NULL.
  794. *
  795. * Returns the 0 on success or negative error code on failure.
  796. */
  797. static inline int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr,
  798. size_t len, gfp_t gfp_mask)
  799. {
  800. i2o_dma_free(dev, addr);
  801. if (len)
  802. return i2o_dma_alloc(dev, addr, len, gfp_mask);
  803. return 0;
  804. };
  805. /*
  806. * i2o_pool_alloc - Allocate an slab cache and mempool
  807. * @mempool: pointer to struct i2o_pool to write data into.
  808. * @name: name which is used to identify cache
  809. * @size: size of each object
  810. * @min_nr: minimum number of objects
  811. *
  812. * First allocates a slab cache with name and size. Then allocates a
  813. * mempool which uses the slab cache for allocation and freeing.
  814. *
  815. * Returns 0 on success or negative error code on failure.
  816. */
  817. static inline int i2o_pool_alloc(struct i2o_pool *pool, const char *name,
  818. size_t size, int min_nr)
  819. {
  820. pool->name = kmalloc(strlen(name) + 1, GFP_KERNEL);
  821. if (!pool->name)
  822. goto exit;
  823. strcpy(pool->name, name);
  824. pool->slab =
  825. kmem_cache_create(pool->name, size, 0, SLAB_HWCACHE_ALIGN, NULL,
  826. NULL);
  827. if (!pool->slab)
  828. goto free_name;
  829. pool->mempool =
  830. mempool_create(min_nr, mempool_alloc_slab, mempool_free_slab,
  831. pool->slab);
  832. if (!pool->mempool)
  833. goto free_slab;
  834. return 0;
  835. free_slab:
  836. kmem_cache_destroy(pool->slab);
  837. free_name:
  838. kfree(pool->name);
  839. exit:
  840. return -ENOMEM;
  841. };
  842. /*
  843. * i2o_pool_free - Free slab cache and mempool again
  844. * @mempool: pointer to struct i2o_pool which should be freed
  845. *
  846. * Note that you have to return all objects to the mempool again before
  847. * calling i2o_pool_free().
  848. */
  849. static inline void i2o_pool_free(struct i2o_pool *pool)
  850. {
  851. mempool_destroy(pool->mempool);
  852. kmem_cache_destroy(pool->slab);
  853. kfree(pool->name);
  854. };
  855. /* I2O driver (OSM) functions */
  856. extern int i2o_driver_register(struct i2o_driver *);
  857. extern void i2o_driver_unregister(struct i2o_driver *);
  858. /**
  859. * i2o_driver_notify_controller_add - Send notification of added controller
  860. * to a single I2O driver
  861. *
  862. * Send notification of added controller to a single registered driver.
  863. */
  864. static inline void i2o_driver_notify_controller_add(struct i2o_driver *drv,
  865. struct i2o_controller *c)
  866. {
  867. if (drv->notify_controller_add)
  868. drv->notify_controller_add(c);
  869. };
  870. /**
  871. * i2o_driver_notify_controller_remove - Send notification of removed
  872. * controller to a single I2O driver
  873. *
  874. * Send notification of removed controller to a single registered driver.
  875. */
  876. static inline void i2o_driver_notify_controller_remove(struct i2o_driver *drv,
  877. struct i2o_controller *c)
  878. {
  879. if (drv->notify_controller_remove)
  880. drv->notify_controller_remove(c);
  881. };
  882. /**
  883. * i2o_driver_notify_device_add - Send notification of added device to a
  884. * single I2O driver
  885. *
  886. * Send notification of added device to a single registered driver.
  887. */
  888. static inline void i2o_driver_notify_device_add(struct i2o_driver *drv,
  889. struct i2o_device *i2o_dev)
  890. {
  891. if (drv->notify_device_add)
  892. drv->notify_device_add(i2o_dev);
  893. };
  894. /**
  895. * i2o_driver_notify_device_remove - Send notification of removed device
  896. * to a single I2O driver
  897. *
  898. * Send notification of removed device to a single registered driver.
  899. */
  900. static inline void i2o_driver_notify_device_remove(struct i2o_driver *drv,
  901. struct i2o_device *i2o_dev)
  902. {
  903. if (drv->notify_device_remove)
  904. drv->notify_device_remove(i2o_dev);
  905. };
  906. extern void i2o_driver_notify_controller_add_all(struct i2o_controller *);
  907. extern void i2o_driver_notify_controller_remove_all(struct i2o_controller *);
  908. extern void i2o_driver_notify_device_add_all(struct i2o_device *);
  909. extern void i2o_driver_notify_device_remove_all(struct i2o_device *);
  910. /* I2O device functions */
  911. extern int i2o_device_claim(struct i2o_device *);
  912. extern int i2o_device_claim_release(struct i2o_device *);
  913. /* Exec OSM functions */
  914. extern int i2o_exec_lct_get(struct i2o_controller *);
  915. /* device / driver / kobject conversion functions */
  916. #define to_i2o_driver(drv) container_of(drv,struct i2o_driver, driver)
  917. #define to_i2o_device(dev) container_of(dev, struct i2o_device, device)
  918. #define to_i2o_controller(dev) container_of(dev, struct i2o_controller, device)
  919. #define kobj_to_i2o_device(kobj) to_i2o_device(container_of(kobj, struct device, kobj))
  920. /**
  921. * i2o_out_to_virt - Turn an I2O message to a virtual address
  922. * @c: controller
  923. * @m: message engine value
  924. *
  925. * Turn a receive message from an I2O controller bus address into
  926. * a Linux virtual address. The shared page frame is a linear block
  927. * so we simply have to shift the offset. This function does not
  928. * work for sender side messages as they are ioremap objects
  929. * provided by the I2O controller.
  930. */
  931. static inline struct i2o_message *i2o_msg_out_to_virt(struct i2o_controller *c,
  932. u32 m)
  933. {
  934. BUG_ON(m < c->out_queue.phys
  935. || m >= c->out_queue.phys + c->out_queue.len);
  936. return c->out_queue.virt + (m - c->out_queue.phys);
  937. };
  938. /**
  939. * i2o_msg_in_to_virt - Turn an I2O message to a virtual address
  940. * @c: controller
  941. * @m: message engine value
  942. *
  943. * Turn a send message from an I2O controller bus address into
  944. * a Linux virtual address. The shared page frame is a linear block
  945. * so we simply have to shift the offset. This function does not
  946. * work for receive side messages as they are kmalloc objects
  947. * in a different pool.
  948. */
  949. static inline struct i2o_message __iomem *i2o_msg_in_to_virt(struct
  950. i2o_controller *c,
  951. u32 m)
  952. {
  953. return c->in_queue.virt + m;
  954. };
  955. /**
  956. * i2o_msg_get - obtain an I2O message from the IOP
  957. * @c: I2O controller
  958. *
  959. * This function tries to get a message frame. If no message frame is
  960. * available do not wait until one is availabe (see also i2o_msg_get_wait).
  961. * The returned pointer to the message frame is not in I/O memory, it is
  962. * allocated from a mempool. But because a MFA is allocated from the
  963. * controller too it is guaranteed that i2o_msg_post() will never fail.
  964. *
  965. * On a success a pointer to the message frame is returned. If the message
  966. * queue is empty -EBUSY is returned and if no memory is available -ENOMEM
  967. * is returned.
  968. */
  969. static inline struct i2o_message *i2o_msg_get(struct i2o_controller *c)
  970. {
  971. struct i2o_msg_mfa *mmsg = mempool_alloc(c->in_msg.mempool, GFP_ATOMIC);
  972. if (!mmsg)
  973. return ERR_PTR(-ENOMEM);
  974. mmsg->mfa = readl(c->in_port);
  975. if (mmsg->mfa == I2O_QUEUE_EMPTY) {
  976. mempool_free(mmsg, c->in_msg.mempool);
  977. return ERR_PTR(-EBUSY);
  978. }
  979. return &mmsg->msg;
  980. };
  981. /**
  982. * i2o_msg_post - Post I2O message to I2O controller
  983. * @c: I2O controller to which the message should be send
  984. * @msg: message returned by i2o_msg_get()
  985. *
  986. * Post the message to the I2O controller and return immediately.
  987. */
  988. static inline void i2o_msg_post(struct i2o_controller *c,
  989. struct i2o_message *msg)
  990. {
  991. struct i2o_msg_mfa *mmsg;
  992. mmsg = container_of(msg, struct i2o_msg_mfa, msg);
  993. memcpy_toio(i2o_msg_in_to_virt(c, mmsg->mfa), msg,
  994. (le32_to_cpu(msg->u.head[0]) >> 16) << 2);
  995. writel(mmsg->mfa, c->in_port);
  996. mempool_free(mmsg, c->in_msg.mempool);
  997. };
  998. /**
  999. * i2o_msg_post_wait - Post and wait a message and wait until return
  1000. * @c: controller
  1001. * @m: message to post
  1002. * @timeout: time in seconds to wait
  1003. *
  1004. * This API allows an OSM to post a message and then be told whether or
  1005. * not the system received a successful reply. If the message times out
  1006. * then the value '-ETIMEDOUT' is returned.
  1007. *
  1008. * Returns 0 on success or negative error code on failure.
  1009. */
  1010. static inline int i2o_msg_post_wait(struct i2o_controller *c,
  1011. struct i2o_message *msg,
  1012. unsigned long timeout)
  1013. {
  1014. return i2o_msg_post_wait_mem(c, msg, timeout, NULL);
  1015. };
  1016. /**
  1017. * i2o_msg_nop_mfa - Returns a fetched MFA back to the controller
  1018. * @c: I2O controller from which the MFA was fetched
  1019. * @mfa: MFA which should be returned
  1020. *
  1021. * This function must be used for preserved messages, because i2o_msg_nop()
  1022. * also returns the allocated memory back to the msg_pool mempool.
  1023. */
  1024. static inline void i2o_msg_nop_mfa(struct i2o_controller *c, u32 mfa)
  1025. {
  1026. struct i2o_message __iomem *msg;
  1027. u32 nop[3] = {
  1028. THREE_WORD_MSG_SIZE | SGL_OFFSET_0,
  1029. I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | ADAPTER_TID,
  1030. 0x00000000
  1031. };
  1032. msg = i2o_msg_in_to_virt(c, mfa);
  1033. memcpy_toio(msg, nop, sizeof(nop));
  1034. writel(mfa, c->in_port);
  1035. };
  1036. /**
  1037. * i2o_msg_nop - Returns a message which is not used
  1038. * @c: I2O controller from which the message was created
  1039. * @msg: message which should be returned
  1040. *
  1041. * If you fetch a message via i2o_msg_get, and can't use it, you must
  1042. * return the message with this function. Otherwise the MFA is lost as well
  1043. * as the allocated memory from the mempool.
  1044. */
  1045. static inline void i2o_msg_nop(struct i2o_controller *c,
  1046. struct i2o_message *msg)
  1047. {
  1048. struct i2o_msg_mfa *mmsg;
  1049. mmsg = container_of(msg, struct i2o_msg_mfa, msg);
  1050. i2o_msg_nop_mfa(c, mmsg->mfa);
  1051. mempool_free(mmsg, c->in_msg.mempool);
  1052. };
  1053. /**
  1054. * i2o_flush_reply - Flush reply from I2O controller
  1055. * @c: I2O controller
  1056. * @m: the message identifier
  1057. *
  1058. * The I2O controller must be informed that the reply message is not needed
  1059. * anymore. If you forget to flush the reply, the message frame can't be
  1060. * used by the controller anymore and is therefore lost.
  1061. */
  1062. static inline void i2o_flush_reply(struct i2o_controller *c, u32 m)
  1063. {
  1064. writel(m, c->out_port);
  1065. };
  1066. /*
  1067. * Endian handling wrapped into the macro - keeps the core code
  1068. * cleaner.
  1069. */
  1070. #define i2o_raw_writel(val, mem) __raw_writel(cpu_to_le32(val), mem)
  1071. extern int i2o_parm_field_get(struct i2o_device *, int, int, void *, int);
  1072. extern int i2o_parm_table_get(struct i2o_device *, int, int, int, void *, int,
  1073. void *, int);
  1074. /* debugging and troubleshooting/diagnostic helpers. */
  1075. #define osm_printk(level, format, arg...) \
  1076. printk(level "%s: " format, OSM_NAME , ## arg)
  1077. #ifdef DEBUG
  1078. #define osm_debug(format, arg...) \
  1079. osm_printk(KERN_DEBUG, format , ## arg)
  1080. #else
  1081. #define osm_debug(format, arg...) \
  1082. do { } while (0)
  1083. #endif
  1084. #define osm_err(format, arg...) \
  1085. osm_printk(KERN_ERR, format , ## arg)
  1086. #define osm_info(format, arg...) \
  1087. osm_printk(KERN_INFO, format , ## arg)
  1088. #define osm_warn(format, arg...) \
  1089. osm_printk(KERN_WARNING, format , ## arg)
  1090. /* debugging functions */
  1091. extern void i2o_report_status(const char *, const char *, struct i2o_message *);
  1092. extern void i2o_dump_message(struct i2o_message *);
  1093. extern void i2o_dump_hrt(struct i2o_controller *c);
  1094. extern void i2o_debug_state(struct i2o_controller *c);
  1095. #endif /* __KERNEL__ */
  1096. #endif /* _I2O_H */