cregs.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758
  1. /*
  2. * Filename: cregs.c
  3. *
  4. *
  5. * Authors: Joshua Morris <josh.h.morris@us.ibm.com>
  6. * Philip Kelleher <pjk1939@linux.vnet.ibm.com>
  7. *
  8. * (C) Copyright 2013 IBM Corporation
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License as
  12. * published by the Free Software Foundation; either version 2 of the
  13. * License, or (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software Foundation,
  22. * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  23. */
  24. #include <linux/completion.h>
  25. #include <linux/slab.h>
  26. #include "rsxx_priv.h"
  27. #define CREG_TIMEOUT_MSEC 10000
  28. typedef void (*creg_cmd_cb)(struct rsxx_cardinfo *card,
  29. struct creg_cmd *cmd,
  30. int st);
  31. struct creg_cmd {
  32. struct list_head list;
  33. creg_cmd_cb cb;
  34. void *cb_private;
  35. unsigned int op;
  36. unsigned int addr;
  37. int cnt8;
  38. void *buf;
  39. unsigned int stream;
  40. unsigned int status;
  41. };
  42. static struct kmem_cache *creg_cmd_pool;
  43. /*------------ Private Functions --------------*/
  44. #if defined(__LITTLE_ENDIAN)
  45. #define LITTLE_ENDIAN 1
  46. #elif defined(__BIG_ENDIAN)
  47. #define LITTLE_ENDIAN 0
  48. #else
  49. #error Unknown endianess!!! Aborting...
  50. #endif
  51. static void copy_to_creg_data(struct rsxx_cardinfo *card,
  52. int cnt8,
  53. void *buf,
  54. unsigned int stream)
  55. {
  56. int i = 0;
  57. u32 *data = buf;
  58. for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
  59. /*
  60. * Firmware implementation makes it necessary to byte swap on
  61. * little endian processors.
  62. */
  63. if (LITTLE_ENDIAN && stream)
  64. iowrite32be(data[i], card->regmap + CREG_DATA(i));
  65. else
  66. iowrite32(data[i], card->regmap + CREG_DATA(i));
  67. }
  68. }
  69. static void copy_from_creg_data(struct rsxx_cardinfo *card,
  70. int cnt8,
  71. void *buf,
  72. unsigned int stream)
  73. {
  74. int i = 0;
  75. u32 *data = buf;
  76. for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
  77. /*
  78. * Firmware implementation makes it necessary to byte swap on
  79. * little endian processors.
  80. */
  81. if (LITTLE_ENDIAN && stream)
  82. data[i] = ioread32be(card->regmap + CREG_DATA(i));
  83. else
  84. data[i] = ioread32(card->regmap + CREG_DATA(i));
  85. }
  86. }
  87. static struct creg_cmd *pop_active_cmd(struct rsxx_cardinfo *card)
  88. {
  89. struct creg_cmd *cmd;
  90. /*
  91. * Spin lock is needed because this can be called in atomic/interrupt
  92. * context.
  93. */
  94. spin_lock_bh(&card->creg_ctrl.lock);
  95. cmd = card->creg_ctrl.active_cmd;
  96. card->creg_ctrl.active_cmd = NULL;
  97. spin_unlock_bh(&card->creg_ctrl.lock);
  98. return cmd;
  99. }
  100. static void creg_issue_cmd(struct rsxx_cardinfo *card, struct creg_cmd *cmd)
  101. {
  102. iowrite32(cmd->addr, card->regmap + CREG_ADD);
  103. iowrite32(cmd->cnt8, card->regmap + CREG_CNT);
  104. if (cmd->op == CREG_OP_WRITE) {
  105. if (cmd->buf)
  106. copy_to_creg_data(card, cmd->cnt8,
  107. cmd->buf, cmd->stream);
  108. }
  109. /*
  110. * Data copy must complete before initiating the command. This is
  111. * needed for weakly ordered processors (i.e. PowerPC), so that all
  112. * neccessary registers are written before we kick the hardware.
  113. */
  114. wmb();
  115. /* Setting the valid bit will kick off the command. */
  116. iowrite32(cmd->op, card->regmap + CREG_CMD);
  117. }
  118. static void creg_kick_queue(struct rsxx_cardinfo *card)
  119. {
  120. if (card->creg_ctrl.active || list_empty(&card->creg_ctrl.queue))
  121. return;
  122. card->creg_ctrl.active = 1;
  123. card->creg_ctrl.active_cmd = list_first_entry(&card->creg_ctrl.queue,
  124. struct creg_cmd, list);
  125. list_del(&card->creg_ctrl.active_cmd->list);
  126. card->creg_ctrl.q_depth--;
  127. /*
  128. * We have to set the timer before we push the new command. Otherwise,
  129. * we could create a race condition that would occur if the timer
  130. * was not canceled, and expired after the new command was pushed,
  131. * but before the command was issued to hardware.
  132. */
  133. mod_timer(&card->creg_ctrl.cmd_timer,
  134. jiffies + msecs_to_jiffies(CREG_TIMEOUT_MSEC));
  135. creg_issue_cmd(card, card->creg_ctrl.active_cmd);
  136. }
  137. static int creg_queue_cmd(struct rsxx_cardinfo *card,
  138. unsigned int op,
  139. unsigned int addr,
  140. unsigned int cnt8,
  141. void *buf,
  142. int stream,
  143. creg_cmd_cb callback,
  144. void *cb_private)
  145. {
  146. struct creg_cmd *cmd;
  147. /* Don't queue stuff up if we're halted. */
  148. if (unlikely(card->halt))
  149. return -EINVAL;
  150. if (card->creg_ctrl.reset)
  151. return -EAGAIN;
  152. if (cnt8 > MAX_CREG_DATA8)
  153. return -EINVAL;
  154. cmd = kmem_cache_alloc(creg_cmd_pool, GFP_KERNEL);
  155. if (!cmd)
  156. return -ENOMEM;
  157. INIT_LIST_HEAD(&cmd->list);
  158. cmd->op = op;
  159. cmd->addr = addr;
  160. cmd->cnt8 = cnt8;
  161. cmd->buf = buf;
  162. cmd->stream = stream;
  163. cmd->cb = callback;
  164. cmd->cb_private = cb_private;
  165. cmd->status = 0;
  166. spin_lock(&card->creg_ctrl.lock);
  167. list_add_tail(&cmd->list, &card->creg_ctrl.queue);
  168. card->creg_ctrl.q_depth++;
  169. creg_kick_queue(card);
  170. spin_unlock(&card->creg_ctrl.lock);
  171. return 0;
  172. }
  173. static void creg_cmd_timed_out(unsigned long data)
  174. {
  175. struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) data;
  176. struct creg_cmd *cmd;
  177. cmd = pop_active_cmd(card);
  178. if (cmd == NULL) {
  179. card->creg_ctrl.creg_stats.creg_timeout++;
  180. dev_warn(CARD_TO_DEV(card),
  181. "No active command associated with timeout!\n");
  182. return;
  183. }
  184. if (cmd->cb)
  185. cmd->cb(card, cmd, -ETIMEDOUT);
  186. kmem_cache_free(creg_cmd_pool, cmd);
  187. spin_lock(&card->creg_ctrl.lock);
  188. card->creg_ctrl.active = 0;
  189. creg_kick_queue(card);
  190. spin_unlock(&card->creg_ctrl.lock);
  191. }
  192. static void creg_cmd_done(struct work_struct *work)
  193. {
  194. struct rsxx_cardinfo *card;
  195. struct creg_cmd *cmd;
  196. int st = 0;
  197. card = container_of(work, struct rsxx_cardinfo,
  198. creg_ctrl.done_work);
  199. /*
  200. * The timer could not be cancelled for some reason,
  201. * race to pop the active command.
  202. */
  203. if (del_timer_sync(&card->creg_ctrl.cmd_timer) == 0)
  204. card->creg_ctrl.creg_stats.failed_cancel_timer++;
  205. cmd = pop_active_cmd(card);
  206. if (cmd == NULL) {
  207. dev_err(CARD_TO_DEV(card),
  208. "Spurious creg interrupt!\n");
  209. return;
  210. }
  211. card->creg_ctrl.creg_stats.stat = ioread32(card->regmap + CREG_STAT);
  212. cmd->status = card->creg_ctrl.creg_stats.stat;
  213. if ((cmd->status & CREG_STAT_STATUS_MASK) == 0) {
  214. dev_err(CARD_TO_DEV(card),
  215. "Invalid status on creg command\n");
  216. /*
  217. * At this point we're probably reading garbage from HW. Don't
  218. * do anything else that could mess up the system and let
  219. * the sync function return an error.
  220. */
  221. st = -EIO;
  222. goto creg_done;
  223. } else if (cmd->status & CREG_STAT_ERROR) {
  224. st = -EIO;
  225. }
  226. if ((cmd->op == CREG_OP_READ)) {
  227. unsigned int cnt8 = ioread32(card->regmap + CREG_CNT);
  228. /* Paranoid Sanity Checks */
  229. if (!cmd->buf) {
  230. dev_err(CARD_TO_DEV(card),
  231. "Buffer not given for read.\n");
  232. st = -EIO;
  233. goto creg_done;
  234. }
  235. if (cnt8 != cmd->cnt8) {
  236. dev_err(CARD_TO_DEV(card),
  237. "count mismatch\n");
  238. st = -EIO;
  239. goto creg_done;
  240. }
  241. copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream);
  242. }
  243. creg_done:
  244. if (cmd->cb)
  245. cmd->cb(card, cmd, st);
  246. kmem_cache_free(creg_cmd_pool, cmd);
  247. spin_lock(&card->creg_ctrl.lock);
  248. card->creg_ctrl.active = 0;
  249. creg_kick_queue(card);
  250. spin_unlock(&card->creg_ctrl.lock);
  251. }
  252. static void creg_reset(struct rsxx_cardinfo *card)
  253. {
  254. struct creg_cmd *cmd = NULL;
  255. struct creg_cmd *tmp;
  256. unsigned long flags;
  257. /*
  258. * mutex_trylock is used here because if reset_lock is taken then a
  259. * reset is already happening. So, we can just go ahead and return.
  260. */
  261. if (!mutex_trylock(&card->creg_ctrl.reset_lock))
  262. return;
  263. card->creg_ctrl.reset = 1;
  264. spin_lock_irqsave(&card->irq_lock, flags);
  265. rsxx_disable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
  266. spin_unlock_irqrestore(&card->irq_lock, flags);
  267. dev_warn(CARD_TO_DEV(card),
  268. "Resetting creg interface for recovery\n");
  269. /* Cancel outstanding commands */
  270. spin_lock(&card->creg_ctrl.lock);
  271. list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
  272. list_del(&cmd->list);
  273. card->creg_ctrl.q_depth--;
  274. if (cmd->cb)
  275. cmd->cb(card, cmd, -ECANCELED);
  276. kmem_cache_free(creg_cmd_pool, cmd);
  277. }
  278. cmd = card->creg_ctrl.active_cmd;
  279. card->creg_ctrl.active_cmd = NULL;
  280. if (cmd) {
  281. if (timer_pending(&card->creg_ctrl.cmd_timer))
  282. del_timer_sync(&card->creg_ctrl.cmd_timer);
  283. if (cmd->cb)
  284. cmd->cb(card, cmd, -ECANCELED);
  285. kmem_cache_free(creg_cmd_pool, cmd);
  286. card->creg_ctrl.active = 0;
  287. }
  288. spin_unlock(&card->creg_ctrl.lock);
  289. card->creg_ctrl.reset = 0;
  290. spin_lock_irqsave(&card->irq_lock, flags);
  291. rsxx_enable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
  292. spin_unlock_irqrestore(&card->irq_lock, flags);
  293. mutex_unlock(&card->creg_ctrl.reset_lock);
  294. }
  295. /* Used for synchronous accesses */
  296. struct creg_completion {
  297. struct completion *cmd_done;
  298. int st;
  299. u32 creg_status;
  300. };
  301. static void creg_cmd_done_cb(struct rsxx_cardinfo *card,
  302. struct creg_cmd *cmd,
  303. int st)
  304. {
  305. struct creg_completion *cmd_completion;
  306. cmd_completion = cmd->cb_private;
  307. BUG_ON(!cmd_completion);
  308. cmd_completion->st = st;
  309. cmd_completion->creg_status = cmd->status;
  310. complete(cmd_completion->cmd_done);
  311. }
  312. static int __issue_creg_rw(struct rsxx_cardinfo *card,
  313. unsigned int op,
  314. unsigned int addr,
  315. unsigned int cnt8,
  316. void *buf,
  317. int stream,
  318. unsigned int *hw_stat)
  319. {
  320. DECLARE_COMPLETION_ONSTACK(cmd_done);
  321. struct creg_completion completion;
  322. unsigned long timeout;
  323. int st;
  324. completion.cmd_done = &cmd_done;
  325. completion.st = 0;
  326. completion.creg_status = 0;
  327. st = creg_queue_cmd(card, op, addr, cnt8, buf, stream, creg_cmd_done_cb,
  328. &completion);
  329. if (st)
  330. return st;
  331. /*
  332. * This timeout is neccessary for unresponsive hardware. The additional
  333. * 20 seconds to used to guarantee that each cregs requests has time to
  334. * complete.
  335. */
  336. timeout = msecs_to_jiffies((CREG_TIMEOUT_MSEC *
  337. card->creg_ctrl.q_depth) + 20000);
  338. /*
  339. * The creg interface is guaranteed to complete. It has a timeout
  340. * mechanism that will kick in if hardware does not respond.
  341. */
  342. st = wait_for_completion_timeout(completion.cmd_done, timeout);
  343. if (st == 0) {
  344. /*
  345. * This is really bad, because the kernel timer did not
  346. * expire and notify us of a timeout!
  347. */
  348. dev_crit(CARD_TO_DEV(card),
  349. "cregs timer failed\n");
  350. creg_reset(card);
  351. return -EIO;
  352. }
  353. *hw_stat = completion.creg_status;
  354. if (completion.st) {
  355. dev_warn(CARD_TO_DEV(card),
  356. "creg command failed(%d x%08x)\n",
  357. completion.st, addr);
  358. return completion.st;
  359. }
  360. return 0;
  361. }
  362. static int issue_creg_rw(struct rsxx_cardinfo *card,
  363. u32 addr,
  364. unsigned int size8,
  365. void *data,
  366. int stream,
  367. int read)
  368. {
  369. unsigned int hw_stat;
  370. unsigned int xfer;
  371. unsigned int op;
  372. int st;
  373. op = read ? CREG_OP_READ : CREG_OP_WRITE;
  374. do {
  375. xfer = min_t(unsigned int, size8, MAX_CREG_DATA8);
  376. st = __issue_creg_rw(card, op, addr, xfer,
  377. data, stream, &hw_stat);
  378. if (st)
  379. return st;
  380. data = (char *)data + xfer;
  381. addr += xfer;
  382. size8 -= xfer;
  383. } while (size8);
  384. return 0;
  385. }
  386. /* ---------------------------- Public API ---------------------------------- */
  387. int rsxx_creg_write(struct rsxx_cardinfo *card,
  388. u32 addr,
  389. unsigned int size8,
  390. void *data,
  391. int byte_stream)
  392. {
  393. return issue_creg_rw(card, addr, size8, data, byte_stream, 0);
  394. }
  395. int rsxx_creg_read(struct rsxx_cardinfo *card,
  396. u32 addr,
  397. unsigned int size8,
  398. void *data,
  399. int byte_stream)
  400. {
  401. return issue_creg_rw(card, addr, size8, data, byte_stream, 1);
  402. }
  403. int rsxx_get_card_state(struct rsxx_cardinfo *card, unsigned int *state)
  404. {
  405. return rsxx_creg_read(card, CREG_ADD_CARD_STATE,
  406. sizeof(*state), state, 0);
  407. }
  408. int rsxx_get_card_size8(struct rsxx_cardinfo *card, u64 *size8)
  409. {
  410. unsigned int size;
  411. int st;
  412. st = rsxx_creg_read(card, CREG_ADD_CARD_SIZE,
  413. sizeof(size), &size, 0);
  414. if (st)
  415. return st;
  416. *size8 = (u64)size * RSXX_HW_BLK_SIZE;
  417. return 0;
  418. }
  419. int rsxx_get_num_targets(struct rsxx_cardinfo *card,
  420. unsigned int *n_targets)
  421. {
  422. return rsxx_creg_read(card, CREG_ADD_NUM_TARGETS,
  423. sizeof(*n_targets), n_targets, 0);
  424. }
  425. int rsxx_get_card_capabilities(struct rsxx_cardinfo *card,
  426. u32 *capabilities)
  427. {
  428. return rsxx_creg_read(card, CREG_ADD_CAPABILITIES,
  429. sizeof(*capabilities), capabilities, 0);
  430. }
  431. int rsxx_issue_card_cmd(struct rsxx_cardinfo *card, u32 cmd)
  432. {
  433. return rsxx_creg_write(card, CREG_ADD_CARD_CMD,
  434. sizeof(cmd), &cmd, 0);
  435. }
  436. /*----------------- HW Log Functions -------------------*/
  437. static void hw_log_msg(struct rsxx_cardinfo *card, const char *str, int len)
  438. {
  439. static char level;
  440. /*
  441. * New messages start with "<#>", where # is the log level. Messages
  442. * that extend past the log buffer will use the previous level
  443. */
  444. if ((len > 3) && (str[0] == '<') && (str[2] == '>')) {
  445. level = str[1];
  446. str += 3; /* Skip past the log level. */
  447. len -= 3;
  448. }
  449. switch (level) {
  450. case '0':
  451. dev_emerg(CARD_TO_DEV(card), "HW: %.*s", len, str);
  452. break;
  453. case '1':
  454. dev_alert(CARD_TO_DEV(card), "HW: %.*s", len, str);
  455. break;
  456. case '2':
  457. dev_crit(CARD_TO_DEV(card), "HW: %.*s", len, str);
  458. break;
  459. case '3':
  460. dev_err(CARD_TO_DEV(card), "HW: %.*s", len, str);
  461. break;
  462. case '4':
  463. dev_warn(CARD_TO_DEV(card), "HW: %.*s", len, str);
  464. break;
  465. case '5':
  466. dev_notice(CARD_TO_DEV(card), "HW: %.*s", len, str);
  467. break;
  468. case '6':
  469. dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
  470. break;
  471. case '7':
  472. dev_dbg(CARD_TO_DEV(card), "HW: %.*s", len, str);
  473. break;
  474. default:
  475. dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
  476. break;
  477. }
  478. }
  479. /*
  480. * The substrncpy function copies the src string (which includes the
  481. * terminating '\0' character), up to the count into the dest pointer.
  482. * Returns the number of bytes copied to dest.
  483. */
  484. static int substrncpy(char *dest, const char *src, int count)
  485. {
  486. int max_cnt = count;
  487. while (count) {
  488. count--;
  489. *dest = *src;
  490. if (*dest == '\0')
  491. break;
  492. src++;
  493. dest++;
  494. }
  495. return max_cnt - count;
  496. }
  497. static void read_hw_log_done(struct rsxx_cardinfo *card,
  498. struct creg_cmd *cmd,
  499. int st)
  500. {
  501. char *buf;
  502. char *log_str;
  503. int cnt;
  504. int len;
  505. int off;
  506. buf = cmd->buf;
  507. off = 0;
  508. /* Failed getting the log message */
  509. if (st)
  510. return;
  511. while (off < cmd->cnt8) {
  512. log_str = &card->log.buf[card->log.buf_len];
  513. cnt = min(cmd->cnt8 - off, LOG_BUF_SIZE8 - card->log.buf_len);
  514. len = substrncpy(log_str, &buf[off], cnt);
  515. off += len;
  516. card->log.buf_len += len;
  517. /*
  518. * Flush the log if we've hit the end of a message or if we've
  519. * run out of buffer space.
  520. */
  521. if ((log_str[len - 1] == '\0') ||
  522. (card->log.buf_len == LOG_BUF_SIZE8)) {
  523. if (card->log.buf_len != 1) /* Don't log blank lines. */
  524. hw_log_msg(card, card->log.buf,
  525. card->log.buf_len);
  526. card->log.buf_len = 0;
  527. }
  528. }
  529. if (cmd->status & CREG_STAT_LOG_PENDING)
  530. rsxx_read_hw_log(card);
  531. }
  532. int rsxx_read_hw_log(struct rsxx_cardinfo *card)
  533. {
  534. int st;
  535. st = creg_queue_cmd(card, CREG_OP_READ, CREG_ADD_LOG,
  536. sizeof(card->log.tmp), card->log.tmp,
  537. 1, read_hw_log_done, NULL);
  538. if (st)
  539. dev_err(CARD_TO_DEV(card),
  540. "Failed getting log text\n");
  541. return st;
  542. }
  543. /*-------------- IOCTL REG Access ------------------*/
  544. static int issue_reg_cmd(struct rsxx_cardinfo *card,
  545. struct rsxx_reg_access *cmd,
  546. int read)
  547. {
  548. unsigned int op = read ? CREG_OP_READ : CREG_OP_WRITE;
  549. return __issue_creg_rw(card, op, cmd->addr, cmd->cnt, cmd->data,
  550. cmd->stream, &cmd->stat);
  551. }
  552. int rsxx_reg_access(struct rsxx_cardinfo *card,
  553. struct rsxx_reg_access __user *ucmd,
  554. int read)
  555. {
  556. struct rsxx_reg_access cmd;
  557. int st;
  558. st = copy_from_user(&cmd, ucmd, sizeof(cmd));
  559. if (st)
  560. return -EFAULT;
  561. if (cmd.cnt > RSXX_MAX_REG_CNT)
  562. return -EFAULT;
  563. st = issue_reg_cmd(card, &cmd, read);
  564. if (st)
  565. return st;
  566. st = put_user(cmd.stat, &ucmd->stat);
  567. if (st)
  568. return -EFAULT;
  569. if (read) {
  570. st = copy_to_user(ucmd->data, cmd.data, cmd.cnt);
  571. if (st)
  572. return -EFAULT;
  573. }
  574. return 0;
  575. }
  576. /*------------ Initialization & Setup --------------*/
  577. int rsxx_creg_setup(struct rsxx_cardinfo *card)
  578. {
  579. card->creg_ctrl.active_cmd = NULL;
  580. INIT_WORK(&card->creg_ctrl.done_work, creg_cmd_done);
  581. mutex_init(&card->creg_ctrl.reset_lock);
  582. INIT_LIST_HEAD(&card->creg_ctrl.queue);
  583. spin_lock_init(&card->creg_ctrl.lock);
  584. setup_timer(&card->creg_ctrl.cmd_timer, creg_cmd_timed_out,
  585. (unsigned long) card);
  586. return 0;
  587. }
  588. void rsxx_creg_destroy(struct rsxx_cardinfo *card)
  589. {
  590. struct creg_cmd *cmd;
  591. struct creg_cmd *tmp;
  592. int cnt = 0;
  593. /* Cancel outstanding commands */
  594. spin_lock(&card->creg_ctrl.lock);
  595. list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
  596. list_del(&cmd->list);
  597. if (cmd->cb)
  598. cmd->cb(card, cmd, -ECANCELED);
  599. kmem_cache_free(creg_cmd_pool, cmd);
  600. cnt++;
  601. }
  602. if (cnt)
  603. dev_info(CARD_TO_DEV(card),
  604. "Canceled %d queue creg commands\n", cnt);
  605. cmd = card->creg_ctrl.active_cmd;
  606. card->creg_ctrl.active_cmd = NULL;
  607. if (cmd) {
  608. if (timer_pending(&card->creg_ctrl.cmd_timer))
  609. del_timer_sync(&card->creg_ctrl.cmd_timer);
  610. if (cmd->cb)
  611. cmd->cb(card, cmd, -ECANCELED);
  612. dev_info(CARD_TO_DEV(card),
  613. "Canceled active creg command\n");
  614. kmem_cache_free(creg_cmd_pool, cmd);
  615. }
  616. spin_unlock(&card->creg_ctrl.lock);
  617. cancel_work_sync(&card->creg_ctrl.done_work);
  618. }
  619. int rsxx_creg_init(void)
  620. {
  621. creg_cmd_pool = KMEM_CACHE(creg_cmd, SLAB_HWCACHE_ALIGN);
  622. if (!creg_cmd_pool)
  623. return -ENOMEM;
  624. return 0;
  625. }
  626. void rsxx_creg_cleanup(void)
  627. {
  628. kmem_cache_destroy(creg_cmd_pool);
  629. }