cregs.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743
  1. /*
  2. * Filename: cregs.c
  3. *
  4. *
  5. * Authors: Joshua Morris <josh.h.morris@us.ibm.com>
  6. * Philip Kelleher <pjk1939@linux.vnet.ibm.com>
  7. *
  8. * (C) Copyright 2013 IBM Corporation
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License as
  12. * published by the Free Software Foundation; either version 2 of the
  13. * License, or (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software Foundation,
  22. * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  23. */
  24. #include <linux/completion.h>
  25. #include <linux/slab.h>
  26. #include "rsxx_priv.h"
  27. #define CREG_TIMEOUT_MSEC 10000
  28. typedef void (*creg_cmd_cb)(struct rsxx_cardinfo *card,
  29. struct creg_cmd *cmd,
  30. int st);
  31. struct creg_cmd {
  32. struct list_head list;
  33. creg_cmd_cb cb;
  34. void *cb_private;
  35. unsigned int op;
  36. unsigned int addr;
  37. int cnt8;
  38. void *buf;
  39. unsigned int stream;
  40. unsigned int status;
  41. };
  42. static struct kmem_cache *creg_cmd_pool;
  43. /*------------ Private Functions --------------*/
  44. #if defined(__LITTLE_ENDIAN)
  45. #define LITTLE_ENDIAN 1
  46. #elif defined(__BIG_ENDIAN)
  47. #define LITTLE_ENDIAN 0
  48. #else
  49. #error Unknown endianess!!! Aborting...
  50. #endif
  51. static void copy_to_creg_data(struct rsxx_cardinfo *card,
  52. int cnt8,
  53. void *buf,
  54. unsigned int stream)
  55. {
  56. int i = 0;
  57. u32 *data = buf;
  58. for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
  59. /*
  60. * Firmware implementation makes it necessary to byte swap on
  61. * little endian processors.
  62. */
  63. if (LITTLE_ENDIAN && stream)
  64. iowrite32be(data[i], card->regmap + CREG_DATA(i));
  65. else
  66. iowrite32(data[i], card->regmap + CREG_DATA(i));
  67. }
  68. }
  69. static void copy_from_creg_data(struct rsxx_cardinfo *card,
  70. int cnt8,
  71. void *buf,
  72. unsigned int stream)
  73. {
  74. int i = 0;
  75. u32 *data = buf;
  76. for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
  77. /*
  78. * Firmware implementation makes it necessary to byte swap on
  79. * little endian processors.
  80. */
  81. if (LITTLE_ENDIAN && stream)
  82. data[i] = ioread32be(card->regmap + CREG_DATA(i));
  83. else
  84. data[i] = ioread32(card->regmap + CREG_DATA(i));
  85. }
  86. }
  87. static struct creg_cmd *pop_active_cmd(struct rsxx_cardinfo *card)
  88. {
  89. struct creg_cmd *cmd;
  90. /*
  91. * Spin lock is needed because this can be called in atomic/interrupt
  92. * context.
  93. */
  94. spin_lock_bh(&card->creg_ctrl.pop_lock);
  95. cmd = card->creg_ctrl.active_cmd;
  96. card->creg_ctrl.active_cmd = NULL;
  97. spin_unlock_bh(&card->creg_ctrl.pop_lock);
  98. return cmd;
  99. }
  100. static void creg_issue_cmd(struct rsxx_cardinfo *card, struct creg_cmd *cmd)
  101. {
  102. iowrite32(cmd->addr, card->regmap + CREG_ADD);
  103. iowrite32(cmd->cnt8, card->regmap + CREG_CNT);
  104. if (cmd->op == CREG_OP_WRITE) {
  105. if (cmd->buf)
  106. copy_to_creg_data(card, cmd->cnt8,
  107. cmd->buf, cmd->stream);
  108. }
  109. /* Data copy must complete before initiating the command. */
  110. wmb();
  111. /* Setting the valid bit will kick off the command. */
  112. iowrite32(cmd->op, card->regmap + CREG_CMD);
  113. }
  114. static void creg_kick_queue(struct rsxx_cardinfo *card)
  115. {
  116. if (card->creg_ctrl.active || list_empty(&card->creg_ctrl.queue))
  117. return;
  118. card->creg_ctrl.active = 1;
  119. card->creg_ctrl.active_cmd = list_first_entry(&card->creg_ctrl.queue,
  120. struct creg_cmd, list);
  121. list_del(&card->creg_ctrl.active_cmd->list);
  122. card->creg_ctrl.q_depth--;
  123. /*
  124. * We have to set the timer before we push the new command. Otherwise,
  125. * we could create a race condition that would occur if the timer
  126. * was not canceled, and expired after the new command was pushed,
  127. * but before the command was issued to hardware.
  128. */
  129. mod_timer(&card->creg_ctrl.cmd_timer,
  130. jiffies + msecs_to_jiffies(CREG_TIMEOUT_MSEC));
  131. creg_issue_cmd(card, card->creg_ctrl.active_cmd);
  132. }
  133. static int creg_queue_cmd(struct rsxx_cardinfo *card,
  134. unsigned int op,
  135. unsigned int addr,
  136. unsigned int cnt8,
  137. void *buf,
  138. int stream,
  139. creg_cmd_cb callback,
  140. void *cb_private)
  141. {
  142. struct creg_cmd *cmd;
  143. /* Don't queue stuff up if we're halted. */
  144. if (unlikely(card->halt))
  145. return -EINVAL;
  146. if (card->creg_ctrl.reset)
  147. return -EAGAIN;
  148. if (cnt8 > MAX_CREG_DATA8)
  149. return -EINVAL;
  150. cmd = kmem_cache_alloc(creg_cmd_pool, GFP_KERNEL);
  151. if (!cmd)
  152. return -ENOMEM;
  153. INIT_LIST_HEAD(&cmd->list);
  154. cmd->op = op;
  155. cmd->addr = addr;
  156. cmd->cnt8 = cnt8;
  157. cmd->buf = buf;
  158. cmd->stream = stream;
  159. cmd->cb = callback;
  160. cmd->cb_private = cb_private;
  161. cmd->status = 0;
  162. mutex_lock(&card->creg_ctrl.lock);
  163. list_add_tail(&cmd->list, &card->creg_ctrl.queue);
  164. card->creg_ctrl.q_depth++;
  165. creg_kick_queue(card);
  166. mutex_unlock(&card->creg_ctrl.lock);
  167. return 0;
  168. }
  169. static void creg_cmd_timed_out(unsigned long data)
  170. {
  171. struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) data;
  172. struct creg_cmd *cmd;
  173. cmd = pop_active_cmd(card);
  174. if (cmd == NULL) {
  175. card->creg_ctrl.creg_stats.creg_timeout++;
  176. dev_warn(CARD_TO_DEV(card),
  177. "No active command associated with timeout!\n");
  178. return;
  179. }
  180. if (cmd->cb)
  181. cmd->cb(card, cmd, -ETIMEDOUT);
  182. kmem_cache_free(creg_cmd_pool, cmd);
  183. spin_lock(&card->creg_ctrl.pop_lock);
  184. card->creg_ctrl.active = 0;
  185. creg_kick_queue(card);
  186. spin_unlock(&card->creg_ctrl.pop_lock);
  187. }
  188. static void creg_cmd_done(struct work_struct *work)
  189. {
  190. struct rsxx_cardinfo *card;
  191. struct creg_cmd *cmd;
  192. int st = 0;
  193. card = container_of(work, struct rsxx_cardinfo,
  194. creg_ctrl.done_work);
  195. /*
  196. * The timer could not be cancelled for some reason,
  197. * race to pop the active command.
  198. */
  199. if (del_timer_sync(&card->creg_ctrl.cmd_timer) == 0)
  200. card->creg_ctrl.creg_stats.failed_cancel_timer++;
  201. cmd = pop_active_cmd(card);
  202. if (cmd == NULL) {
  203. dev_err(CARD_TO_DEV(card),
  204. "Spurious creg interrupt!\n");
  205. return;
  206. }
  207. card->creg_ctrl.creg_stats.stat = ioread32(card->regmap + CREG_STAT);
  208. cmd->status = card->creg_ctrl.creg_stats.stat;
  209. if ((cmd->status & CREG_STAT_STATUS_MASK) == 0) {
  210. dev_err(CARD_TO_DEV(card),
  211. "Invalid status on creg command\n");
  212. /*
  213. * At this point we're probably reading garbage from HW. Don't
  214. * do anything else that could mess up the system and let
  215. * the sync function return an error.
  216. */
  217. st = -EIO;
  218. goto creg_done;
  219. } else if (cmd->status & CREG_STAT_ERROR) {
  220. st = -EIO;
  221. }
  222. if ((cmd->op == CREG_OP_READ)) {
  223. unsigned int cnt8 = ioread32(card->regmap + CREG_CNT);
  224. /* Paranoid Sanity Checks */
  225. if (!cmd->buf) {
  226. dev_err(CARD_TO_DEV(card),
  227. "Buffer not given for read.\n");
  228. st = -EIO;
  229. goto creg_done;
  230. }
  231. if (cnt8 != cmd->cnt8) {
  232. dev_err(CARD_TO_DEV(card),
  233. "count mismatch\n");
  234. st = -EIO;
  235. goto creg_done;
  236. }
  237. copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream);
  238. }
  239. creg_done:
  240. if (cmd->cb)
  241. cmd->cb(card, cmd, st);
  242. kmem_cache_free(creg_cmd_pool, cmd);
  243. mutex_lock(&card->creg_ctrl.lock);
  244. card->creg_ctrl.active = 0;
  245. creg_kick_queue(card);
  246. mutex_unlock(&card->creg_ctrl.lock);
  247. }
  248. static void creg_reset(struct rsxx_cardinfo *card)
  249. {
  250. struct creg_cmd *cmd = NULL;
  251. struct creg_cmd *tmp;
  252. unsigned long flags;
  253. if (!mutex_trylock(&card->creg_ctrl.reset_lock))
  254. return;
  255. card->creg_ctrl.reset = 1;
  256. spin_lock_irqsave(&card->irq_lock, flags);
  257. rsxx_disable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
  258. spin_unlock_irqrestore(&card->irq_lock, flags);
  259. dev_warn(CARD_TO_DEV(card),
  260. "Resetting creg interface for recovery\n");
  261. /* Cancel outstanding commands */
  262. mutex_lock(&card->creg_ctrl.lock);
  263. list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
  264. list_del(&cmd->list);
  265. card->creg_ctrl.q_depth--;
  266. if (cmd->cb)
  267. cmd->cb(card, cmd, -ECANCELED);
  268. kmem_cache_free(creg_cmd_pool, cmd);
  269. }
  270. cmd = card->creg_ctrl.active_cmd;
  271. card->creg_ctrl.active_cmd = NULL;
  272. if (cmd) {
  273. if (timer_pending(&card->creg_ctrl.cmd_timer))
  274. del_timer_sync(&card->creg_ctrl.cmd_timer);
  275. if (cmd->cb)
  276. cmd->cb(card, cmd, -ECANCELED);
  277. kmem_cache_free(creg_cmd_pool, cmd);
  278. card->creg_ctrl.active = 0;
  279. }
  280. mutex_unlock(&card->creg_ctrl.lock);
  281. card->creg_ctrl.reset = 0;
  282. spin_lock_irqsave(&card->irq_lock, flags);
  283. rsxx_enable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
  284. spin_unlock_irqrestore(&card->irq_lock, flags);
  285. mutex_unlock(&card->creg_ctrl.reset_lock);
  286. }
  287. /* Used for synchronous accesses */
  288. struct creg_completion {
  289. struct completion *cmd_done;
  290. int st;
  291. u32 creg_status;
  292. };
  293. static void creg_cmd_done_cb(struct rsxx_cardinfo *card,
  294. struct creg_cmd *cmd,
  295. int st)
  296. {
  297. struct creg_completion *cmd_completion;
  298. cmd_completion = (struct creg_completion *)cmd->cb_private;
  299. BUG_ON(!cmd_completion);
  300. cmd_completion->st = st;
  301. cmd_completion->creg_status = cmd->status;
  302. complete(cmd_completion->cmd_done);
  303. }
  304. static int __issue_creg_rw(struct rsxx_cardinfo *card,
  305. unsigned int op,
  306. unsigned int addr,
  307. unsigned int cnt8,
  308. void *buf,
  309. int stream,
  310. unsigned int *hw_stat)
  311. {
  312. DECLARE_COMPLETION_ONSTACK(cmd_done);
  313. struct creg_completion completion;
  314. unsigned long timeout;
  315. int st;
  316. INIT_COMPLETION(cmd_done);
  317. completion.cmd_done = &cmd_done;
  318. completion.st = 0;
  319. completion.creg_status = 0;
  320. st = creg_queue_cmd(card, op, addr, cnt8, buf, stream, creg_cmd_done_cb,
  321. &completion);
  322. if (st)
  323. return st;
  324. timeout = msecs_to_jiffies((CREG_TIMEOUT_MSEC *
  325. card->creg_ctrl.q_depth) + 20000);
  326. /*
  327. * The creg interface is guaranteed to complete. It has a timeout
  328. * mechanism that will kick in if hardware does not respond.
  329. */
  330. st = wait_for_completion_timeout(completion.cmd_done, timeout);
  331. if (st == 0) {
  332. /*
  333. * This is really bad, because the kernel timer did not
  334. * expire and notify us of a timeout!
  335. */
  336. dev_crit(CARD_TO_DEV(card),
  337. "cregs timer failed\n");
  338. creg_reset(card);
  339. return -EIO;
  340. }
  341. *hw_stat = completion.creg_status;
  342. if (completion.st) {
  343. dev_warn(CARD_TO_DEV(card),
  344. "creg command failed(%d x%08x)\n",
  345. completion.st, addr);
  346. return completion.st;
  347. }
  348. return 0;
  349. }
  350. static int issue_creg_rw(struct rsxx_cardinfo *card,
  351. u32 addr,
  352. unsigned int size8,
  353. void *data,
  354. int stream,
  355. int read)
  356. {
  357. unsigned int hw_stat;
  358. unsigned int xfer;
  359. unsigned int op;
  360. int st;
  361. op = read ? CREG_OP_READ : CREG_OP_WRITE;
  362. do {
  363. xfer = min_t(unsigned int, size8, MAX_CREG_DATA8);
  364. st = __issue_creg_rw(card, op, addr, xfer,
  365. data, stream, &hw_stat);
  366. if (st)
  367. return st;
  368. data = (void *)((char *)data + xfer);
  369. addr += xfer;
  370. size8 -= xfer;
  371. } while (size8);
  372. return 0;
  373. }
  374. /* ---------------------------- Public API ---------------------------------- */
  375. int rsxx_creg_write(struct rsxx_cardinfo *card,
  376. u32 addr,
  377. unsigned int size8,
  378. void *data,
  379. int byte_stream)
  380. {
  381. return issue_creg_rw(card, addr, size8, data, byte_stream, 0);
  382. }
  383. int rsxx_creg_read(struct rsxx_cardinfo *card,
  384. u32 addr,
  385. unsigned int size8,
  386. void *data,
  387. int byte_stream)
  388. {
  389. return issue_creg_rw(card, addr, size8, data, byte_stream, 1);
  390. }
  391. int rsxx_get_card_state(struct rsxx_cardinfo *card, unsigned int *state)
  392. {
  393. return rsxx_creg_read(card, CREG_ADD_CARD_STATE,
  394. sizeof(*state), state, 0);
  395. }
  396. int rsxx_get_card_size8(struct rsxx_cardinfo *card, u64 *size8)
  397. {
  398. unsigned int size;
  399. int st;
  400. st = rsxx_creg_read(card, CREG_ADD_CARD_SIZE,
  401. sizeof(size), &size, 0);
  402. if (st)
  403. return st;
  404. *size8 = (u64)size * RSXX_HW_BLK_SIZE;
  405. return 0;
  406. }
  407. int rsxx_get_num_targets(struct rsxx_cardinfo *card,
  408. unsigned int *n_targets)
  409. {
  410. return rsxx_creg_read(card, CREG_ADD_NUM_TARGETS,
  411. sizeof(*n_targets), n_targets, 0);
  412. }
  413. int rsxx_get_card_capabilities(struct rsxx_cardinfo *card,
  414. u32 *capabilities)
  415. {
  416. return rsxx_creg_read(card, CREG_ADD_CAPABILITIES,
  417. sizeof(*capabilities), capabilities, 0);
  418. }
  419. int rsxx_issue_card_cmd(struct rsxx_cardinfo *card, u32 cmd)
  420. {
  421. return rsxx_creg_write(card, CREG_ADD_CARD_CMD,
  422. sizeof(cmd), &cmd, 0);
  423. }
  424. /*----------------- HW Log Functions -------------------*/
  425. static void hw_log_msg(struct rsxx_cardinfo *card, const char *str, int len)
  426. {
  427. static char level;
  428. /*
  429. * New messages start with "<#>", where # is the log level. Messages
  430. * that extend past the log buffer will use the previous level
  431. */
  432. if ((len > 3) && (str[0] == '<') && (str[2] == '>')) {
  433. level = str[1];
  434. str += 3; /* Skip past the log level. */
  435. len -= 3;
  436. }
  437. switch (level) {
  438. case '0':
  439. dev_emerg(CARD_TO_DEV(card), "HW: %.*s", len, str);
  440. break;
  441. case '1':
  442. dev_alert(CARD_TO_DEV(card), "HW: %.*s", len, str);
  443. break;
  444. case '2':
  445. dev_crit(CARD_TO_DEV(card), "HW: %.*s", len, str);
  446. break;
  447. case '3':
  448. dev_err(CARD_TO_DEV(card), "HW: %.*s", len, str);
  449. break;
  450. case '4':
  451. dev_warn(CARD_TO_DEV(card), "HW: %.*s", len, str);
  452. break;
  453. case '5':
  454. dev_notice(CARD_TO_DEV(card), "HW: %.*s", len, str);
  455. break;
  456. case '6':
  457. dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
  458. break;
  459. case '7':
  460. dev_dbg(CARD_TO_DEV(card), "HW: %.*s", len, str);
  461. break;
  462. default:
  463. dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
  464. break;
  465. }
  466. }
  467. /*
  468. * The substrncpy() function copies to string(up to count bytes) point to by src
  469. * (including the terminating '\0' character) to dest. Returns the number of
  470. * bytes copied to dest.
  471. */
  472. static int substrncpy(char *dest, const char *src, int count)
  473. {
  474. int max_cnt = count;
  475. while (count) {
  476. count--;
  477. *dest = *src;
  478. if (*dest == '\0')
  479. break;
  480. src++;
  481. dest++;
  482. }
  483. return max_cnt - count;
  484. }
  485. static void read_hw_log_done(struct rsxx_cardinfo *card,
  486. struct creg_cmd *cmd,
  487. int st)
  488. {
  489. char *buf;
  490. char *log_str;
  491. int cnt;
  492. int len;
  493. int off;
  494. buf = cmd->buf;
  495. off = 0;
  496. /* Failed getting the log message */
  497. if (st)
  498. return;
  499. while (off < cmd->cnt8) {
  500. log_str = &card->log.buf[card->log.buf_len];
  501. cnt = min(cmd->cnt8 - off, LOG_BUF_SIZE8 - card->log.buf_len);
  502. len = substrncpy(log_str, &buf[off], cnt);
  503. off += len;
  504. card->log.buf_len += len;
  505. /*
  506. * Flush the log if we've hit the end of a message or if we've
  507. * run out of buffer space.
  508. */
  509. if ((log_str[len - 1] == '\0') ||
  510. (card->log.buf_len == LOG_BUF_SIZE8)) {
  511. if (card->log.buf_len != 1) /* Don't log blank lines. */
  512. hw_log_msg(card, card->log.buf,
  513. card->log.buf_len);
  514. card->log.buf_len = 0;
  515. }
  516. }
  517. if (cmd->status & CREG_STAT_LOG_PENDING)
  518. rsxx_read_hw_log(card);
  519. }
  520. int rsxx_read_hw_log(struct rsxx_cardinfo *card)
  521. {
  522. int st;
  523. st = creg_queue_cmd(card, CREG_OP_READ, CREG_ADD_LOG,
  524. sizeof(card->log.tmp), card->log.tmp,
  525. 1, read_hw_log_done, NULL);
  526. if (st)
  527. dev_err(CARD_TO_DEV(card),
  528. "Failed getting log text\n");
  529. return st;
  530. }
  531. /*-------------- IOCTL REG Access ------------------*/
  532. static int issue_reg_cmd(struct rsxx_cardinfo *card,
  533. struct rsxx_reg_access *cmd,
  534. int read)
  535. {
  536. unsigned int op = read ? CREG_OP_READ : CREG_OP_WRITE;
  537. return __issue_creg_rw(card, op, cmd->addr, cmd->cnt, cmd->data,
  538. cmd->stream, &cmd->stat);
  539. }
  540. int rsxx_reg_access(struct rsxx_cardinfo *card,
  541. struct rsxx_reg_access __user *ucmd,
  542. int read)
  543. {
  544. struct rsxx_reg_access cmd;
  545. int st;
  546. st = copy_from_user(&cmd, ucmd, sizeof(cmd));
  547. if (st)
  548. return -EFAULT;
  549. st = issue_reg_cmd(card, &cmd, read);
  550. if (st)
  551. return st;
  552. st = put_user(cmd.stat, &ucmd->stat);
  553. if (st)
  554. return -EFAULT;
  555. if (read) {
  556. st = copy_to_user(ucmd->data, cmd.data, cmd.cnt);
  557. if (st)
  558. return -EFAULT;
  559. }
  560. return 0;
  561. }
  562. /*------------ Initialization & Setup --------------*/
  563. int rsxx_creg_setup(struct rsxx_cardinfo *card)
  564. {
  565. card->creg_ctrl.active_cmd = NULL;
  566. INIT_WORK(&card->creg_ctrl.done_work, creg_cmd_done);
  567. mutex_init(&card->creg_ctrl.reset_lock);
  568. INIT_LIST_HEAD(&card->creg_ctrl.queue);
  569. mutex_init(&card->creg_ctrl.lock);
  570. spin_lock_init(&card->creg_ctrl.pop_lock);
  571. setup_timer(&card->creg_ctrl.cmd_timer, creg_cmd_timed_out,
  572. (unsigned long) card);
  573. return 0;
  574. }
  575. void rsxx_creg_destroy(struct rsxx_cardinfo *card)
  576. {
  577. struct creg_cmd *cmd;
  578. struct creg_cmd *tmp;
  579. int cnt = 0;
  580. /* Cancel outstanding commands */
  581. mutex_lock(&card->creg_ctrl.lock);
  582. list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
  583. list_del(&cmd->list);
  584. if (cmd->cb)
  585. cmd->cb(card, cmd, -ECANCELED);
  586. kmem_cache_free(creg_cmd_pool, cmd);
  587. cnt++;
  588. }
  589. if (cnt)
  590. dev_info(CARD_TO_DEV(card),
  591. "Canceled %d queue creg commands\n", cnt);
  592. cmd = card->creg_ctrl.active_cmd;
  593. card->creg_ctrl.active_cmd = NULL;
  594. if (cmd) {
  595. if (timer_pending(&card->creg_ctrl.cmd_timer))
  596. del_timer_sync(&card->creg_ctrl.cmd_timer);
  597. if (cmd->cb)
  598. cmd->cb(card, cmd, -ECANCELED);
  599. dev_info(CARD_TO_DEV(card),
  600. "Canceled active creg command\n");
  601. kmem_cache_free(creg_cmd_pool, cmd);
  602. }
  603. mutex_unlock(&card->creg_ctrl.lock);
  604. cancel_work_sync(&card->creg_ctrl.done_work);
  605. }
  606. int rsxx_creg_init(void)
  607. {
  608. creg_cmd_pool = KMEM_CACHE(creg_cmd, SLAB_HWCACHE_ALIGN);
  609. if (!creg_cmd_pool)
  610. return -ENOMEM;
  611. return 0;
  612. }
  613. void rsxx_creg_cleanup(void)
  614. {
  615. kmem_cache_destroy(creg_cmd_pool);
  616. }