interrupt.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699
  1. /*
  2. *
  3. * Intel Management Engine Interface (Intel MEI) Linux driver
  4. * Copyright (c) 2003-2012, Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. */
  16. #include <linux/export.h>
  17. #include <linux/pci.h>
  18. #include <linux/kthread.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/fs.h>
  21. #include <linux/jiffies.h>
  22. #include <linux/mei.h>
  23. #include "mei_dev.h"
  24. #include "hbm.h"
  25. #include "hw-me.h"
  26. #include "client.h"
  27. /**
  28. * mei_irq_compl_handler - dispatch complete handelers
  29. * for the completed callbacks
  30. *
  31. * @dev - mei device
  32. * @compl_list - list of completed cbs
  33. */
  34. void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *compl_list)
  35. {
  36. struct mei_cl_cb *cb, *next;
  37. struct mei_cl *cl;
  38. list_for_each_entry_safe(cb, next, &compl_list->list, list) {
  39. cl = cb->cl;
  40. list_del(&cb->list);
  41. if (!cl)
  42. continue;
  43. dev_dbg(&dev->pdev->dev, "completing call back.\n");
  44. if (cl == &dev->iamthif_cl)
  45. mei_amthif_complete(dev, cb);
  46. else
  47. mei_cl_complete(cl, cb);
  48. }
  49. }
  50. EXPORT_SYMBOL_GPL(mei_irq_compl_handler);
  51. /**
  52. * mei_cl_hbm_equal - check if hbm is addressed to the client
  53. *
  54. * @cl: host client
  55. * @mei_hdr: header of mei client message
  56. *
  57. * returns true if matches, false otherwise
  58. */
  59. static inline int mei_cl_hbm_equal(struct mei_cl *cl,
  60. struct mei_msg_hdr *mei_hdr)
  61. {
  62. return cl->host_client_id == mei_hdr->host_addr &&
  63. cl->me_client_id == mei_hdr->me_addr;
  64. }
  65. /**
  66. * mei_cl_is_reading - checks if the client
  67. is the one to read this message
  68. *
  69. * @cl: mei client
  70. * @mei_hdr: header of mei message
  71. *
  72. * returns true on match and false otherwise
  73. */
  74. static bool mei_cl_is_reading(struct mei_cl *cl, struct mei_msg_hdr *mei_hdr)
  75. {
  76. return mei_cl_hbm_equal(cl, mei_hdr) &&
  77. cl->state == MEI_FILE_CONNECTED &&
  78. cl->reading_state != MEI_READ_COMPLETE;
  79. }
  80. /**
  81. * mei_irq_read_client_message - process client message
  82. *
  83. * @dev: the device structure
  84. * @mei_hdr: header of mei client message
  85. * @complete_list: An instance of our list structure
  86. *
  87. * returns 0 on success, <0 on failure.
  88. */
  89. static int mei_cl_irq_read_msg(struct mei_device *dev,
  90. struct mei_msg_hdr *mei_hdr,
  91. struct mei_cl_cb *complete_list)
  92. {
  93. struct mei_cl *cl;
  94. struct mei_cl_cb *cb, *next;
  95. unsigned char *buffer = NULL;
  96. list_for_each_entry_safe(cb, next, &dev->read_list.list, list) {
  97. cl = cb->cl;
  98. if (!cl || !mei_cl_is_reading(cl, mei_hdr))
  99. continue;
  100. cl->reading_state = MEI_READING;
  101. if (cb->response_buffer.size == 0 ||
  102. cb->response_buffer.data == NULL) {
  103. dev_err(&dev->pdev->dev, "response buffer is not allocated.\n");
  104. list_del(&cb->list);
  105. return -ENOMEM;
  106. }
  107. if (cb->response_buffer.size < mei_hdr->length + cb->buf_idx) {
  108. dev_dbg(&dev->pdev->dev, "message overflow. size %d len %d idx %ld\n",
  109. cb->response_buffer.size,
  110. mei_hdr->length, cb->buf_idx);
  111. buffer = krealloc(cb->response_buffer.data,
  112. mei_hdr->length + cb->buf_idx,
  113. GFP_KERNEL);
  114. if (!buffer) {
  115. dev_err(&dev->pdev->dev, "allocation failed.\n");
  116. list_del(&cb->list);
  117. return -ENOMEM;
  118. }
  119. cb->response_buffer.data = buffer;
  120. cb->response_buffer.size =
  121. mei_hdr->length + cb->buf_idx;
  122. }
  123. buffer = cb->response_buffer.data + cb->buf_idx;
  124. mei_read_slots(dev, buffer, mei_hdr->length);
  125. cb->buf_idx += mei_hdr->length;
  126. if (mei_hdr->msg_complete) {
  127. cl->status = 0;
  128. list_del(&cb->list);
  129. dev_dbg(&dev->pdev->dev, "completed read H cl = %d, ME cl = %d, length = %lu\n",
  130. cl->host_client_id,
  131. cl->me_client_id,
  132. cb->buf_idx);
  133. list_add_tail(&cb->list, &complete_list->list);
  134. }
  135. break;
  136. }
  137. dev_dbg(&dev->pdev->dev, "message read\n");
  138. if (!buffer) {
  139. mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length);
  140. dev_dbg(&dev->pdev->dev, "discarding message " MEI_HDR_FMT "\n",
  141. MEI_HDR_PRM(mei_hdr));
  142. }
  143. return 0;
  144. }
  145. /**
  146. * _mei_irq_thread_close - processes close related operation.
  147. *
  148. * @dev: the device structure.
  149. * @slots: free slots.
  150. * @cb_pos: callback block.
  151. * @cl: private data of the file object.
  152. * @cmpl_list: complete list.
  153. *
  154. * returns 0, OK; otherwise, error.
  155. */
  156. static int _mei_irq_thread_close(struct mei_device *dev, s32 *slots,
  157. struct mei_cl_cb *cb_pos,
  158. struct mei_cl *cl,
  159. struct mei_cl_cb *cmpl_list)
  160. {
  161. u32 msg_slots =
  162. mei_data2slots(sizeof(struct hbm_client_connect_request));
  163. if (*slots < msg_slots)
  164. return -EMSGSIZE;
  165. *slots -= msg_slots;
  166. if (mei_hbm_cl_disconnect_req(dev, cl)) {
  167. cl->status = 0;
  168. cb_pos->buf_idx = 0;
  169. list_move_tail(&cb_pos->list, &cmpl_list->list);
  170. return -EIO;
  171. }
  172. cl->state = MEI_FILE_DISCONNECTING;
  173. cl->status = 0;
  174. cb_pos->buf_idx = 0;
  175. list_move_tail(&cb_pos->list, &dev->ctrl_rd_list.list);
  176. cl->timer_count = MEI_CONNECT_TIMEOUT;
  177. return 0;
  178. }
  179. /**
  180. * _mei_irq_thread_read - processes read related operation.
  181. *
  182. * @dev: the device structure.
  183. * @slots: free slots.
  184. * @cb_pos: callback block.
  185. * @cl: private data of the file object.
  186. * @cmpl_list: complete list.
  187. *
  188. * returns 0, OK; otherwise, error.
  189. */
  190. static int _mei_irq_thread_read(struct mei_device *dev, s32 *slots,
  191. struct mei_cl_cb *cb_pos,
  192. struct mei_cl *cl,
  193. struct mei_cl_cb *cmpl_list)
  194. {
  195. u32 msg_slots = mei_data2slots(sizeof(struct hbm_flow_control));
  196. if (*slots < msg_slots) {
  197. /* return the cancel routine */
  198. list_del(&cb_pos->list);
  199. return -EMSGSIZE;
  200. }
  201. *slots -= msg_slots;
  202. if (mei_hbm_cl_flow_control_req(dev, cl)) {
  203. cl->status = -ENODEV;
  204. cb_pos->buf_idx = 0;
  205. list_move_tail(&cb_pos->list, &cmpl_list->list);
  206. return -ENODEV;
  207. }
  208. list_move_tail(&cb_pos->list, &dev->read_list.list);
  209. return 0;
  210. }
  211. /**
  212. * _mei_irq_thread_ioctl - processes ioctl related operation.
  213. *
  214. * @dev: the device structure.
  215. * @slots: free slots.
  216. * @cb_pos: callback block.
  217. * @cl: private data of the file object.
  218. * @cmpl_list: complete list.
  219. *
  220. * returns 0, OK; otherwise, error.
  221. */
  222. static int _mei_irq_thread_ioctl(struct mei_device *dev, s32 *slots,
  223. struct mei_cl_cb *cb_pos,
  224. struct mei_cl *cl,
  225. struct mei_cl_cb *cmpl_list)
  226. {
  227. u32 msg_slots =
  228. mei_data2slots(sizeof(struct hbm_client_connect_request));
  229. if (*slots < msg_slots) {
  230. /* return the cancel routine */
  231. list_del(&cb_pos->list);
  232. return -EMSGSIZE;
  233. }
  234. *slots -= msg_slots;
  235. cl->state = MEI_FILE_CONNECTING;
  236. if (mei_hbm_cl_connect_req(dev, cl)) {
  237. cl->status = -ENODEV;
  238. cb_pos->buf_idx = 0;
  239. list_del(&cb_pos->list);
  240. return -ENODEV;
  241. } else {
  242. list_move_tail(&cb_pos->list, &dev->ctrl_rd_list.list);
  243. cl->timer_count = MEI_CONNECT_TIMEOUT;
  244. }
  245. return 0;
  246. }
  247. /**
  248. * mei_irq_thread_write_complete - write messages to device.
  249. *
  250. * @dev: the device structure.
  251. * @slots: free slots.
  252. * @cb: callback block.
  253. * @cmpl_list: complete list.
  254. *
  255. * returns 0, OK; otherwise, error.
  256. */
  257. static int mei_irq_thread_write_complete(struct mei_device *dev, s32 *slots,
  258. struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list)
  259. {
  260. struct mei_msg_hdr mei_hdr;
  261. struct mei_cl *cl = cb->cl;
  262. size_t len = cb->request_buffer.size - cb->buf_idx;
  263. u32 msg_slots = mei_data2slots(len);
  264. mei_hdr.host_addr = cl->host_client_id;
  265. mei_hdr.me_addr = cl->me_client_id;
  266. mei_hdr.reserved = 0;
  267. if (*slots >= msg_slots) {
  268. mei_hdr.length = len;
  269. mei_hdr.msg_complete = 1;
  270. /* Split the message only if we can write the whole host buffer */
  271. } else if (*slots == dev->hbuf_depth) {
  272. msg_slots = *slots;
  273. len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
  274. mei_hdr.length = len;
  275. mei_hdr.msg_complete = 0;
  276. } else {
  277. /* wait for next time the host buffer is empty */
  278. return 0;
  279. }
  280. dev_dbg(&dev->pdev->dev, "buf: size = %d idx = %lu\n",
  281. cb->request_buffer.size, cb->buf_idx);
  282. dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr));
  283. *slots -= msg_slots;
  284. if (mei_write_message(dev, &mei_hdr,
  285. cb->request_buffer.data + cb->buf_idx)) {
  286. cl->status = -ENODEV;
  287. list_move_tail(&cb->list, &cmpl_list->list);
  288. return -ENODEV;
  289. }
  290. cl->status = 0;
  291. cb->buf_idx += mei_hdr.length;
  292. if (mei_hdr.msg_complete) {
  293. if (mei_cl_flow_ctrl_reduce(cl))
  294. return -ENODEV;
  295. list_move_tail(&cb->list, &dev->write_waiting_list.list);
  296. }
  297. return 0;
  298. }
  299. /**
  300. * mei_irq_read_handler - bottom half read routine after ISR to
  301. * handle the read processing.
  302. *
  303. * @dev: the device structure
  304. * @cmpl_list: An instance of our list structure
  305. * @slots: slots to read.
  306. *
  307. * returns 0 on success, <0 on failure.
  308. */
  309. int mei_irq_read_handler(struct mei_device *dev,
  310. struct mei_cl_cb *cmpl_list, s32 *slots)
  311. {
  312. struct mei_msg_hdr *mei_hdr;
  313. struct mei_cl *cl_pos = NULL;
  314. struct mei_cl *cl_next = NULL;
  315. int ret = 0;
  316. if (!dev->rd_msg_hdr) {
  317. dev->rd_msg_hdr = mei_read_hdr(dev);
  318. dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
  319. (*slots)--;
  320. dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
  321. }
  322. mei_hdr = (struct mei_msg_hdr *) &dev->rd_msg_hdr;
  323. dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
  324. if (mei_hdr->reserved || !dev->rd_msg_hdr) {
  325. dev_dbg(&dev->pdev->dev, "corrupted message header.\n");
  326. ret = -EBADMSG;
  327. goto end;
  328. }
  329. if (mei_hdr->host_addr || mei_hdr->me_addr) {
  330. list_for_each_entry_safe(cl_pos, cl_next,
  331. &dev->file_list, link) {
  332. dev_dbg(&dev->pdev->dev,
  333. "list_for_each_entry_safe read host"
  334. " client = %d, ME client = %d\n",
  335. cl_pos->host_client_id,
  336. cl_pos->me_client_id);
  337. if (mei_cl_hbm_equal(cl_pos, mei_hdr))
  338. break;
  339. }
  340. if (&cl_pos->link == &dev->file_list) {
  341. dev_dbg(&dev->pdev->dev, "corrupted message header\n");
  342. ret = -EBADMSG;
  343. goto end;
  344. }
  345. }
  346. if (((*slots) * sizeof(u32)) < mei_hdr->length) {
  347. dev_err(&dev->pdev->dev,
  348. "we can't read the message slots =%08x.\n",
  349. *slots);
  350. /* we can't read the message */
  351. ret = -ERANGE;
  352. goto end;
  353. }
  354. /* decide where to read the message too */
  355. if (!mei_hdr->host_addr) {
  356. dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_bus_message.\n");
  357. mei_hbm_dispatch(dev, mei_hdr);
  358. dev_dbg(&dev->pdev->dev, "end mei_irq_thread_read_bus_message.\n");
  359. } else if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id &&
  360. (MEI_FILE_CONNECTED == dev->iamthif_cl.state) &&
  361. (dev->iamthif_state == MEI_IAMTHIF_READING)) {
  362. dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_iamthif_message.\n");
  363. dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
  364. ret = mei_amthif_irq_read_msg(dev, mei_hdr, cmpl_list);
  365. if (ret)
  366. goto end;
  367. } else {
  368. dev_dbg(&dev->pdev->dev, "call mei_cl_irq_read_msg.\n");
  369. dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
  370. ret = mei_cl_irq_read_msg(dev, mei_hdr, cmpl_list);
  371. if (ret)
  372. goto end;
  373. }
  374. /* reset the number of slots and header */
  375. *slots = mei_count_full_read_slots(dev);
  376. dev->rd_msg_hdr = 0;
  377. if (*slots == -EOVERFLOW) {
  378. /* overflow - reset */
  379. dev_err(&dev->pdev->dev, "resetting due to slots overflow.\n");
  380. /* set the event since message has been read */
  381. ret = -ERANGE;
  382. goto end;
  383. }
  384. end:
  385. return ret;
  386. }
  387. EXPORT_SYMBOL_GPL(mei_irq_read_handler);
  388. /**
  389. * mei_irq_write_handler - dispatch write requests
  390. * after irq received
  391. *
  392. * @dev: the device structure
  393. * @cmpl_list: An instance of our list structure
  394. *
  395. * returns 0 on success, <0 on failure.
  396. */
  397. int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
  398. {
  399. struct mei_cl *cl;
  400. struct mei_cl_cb *pos = NULL, *next = NULL;
  401. struct mei_cl_cb *list;
  402. s32 slots;
  403. int ret;
  404. if (!mei_hbuf_is_ready(dev)) {
  405. dev_dbg(&dev->pdev->dev, "host buffer is not empty.\n");
  406. return 0;
  407. }
  408. slots = mei_hbuf_empty_slots(dev);
  409. if (slots <= 0)
  410. return -EMSGSIZE;
  411. /* complete all waiting for write CB */
  412. dev_dbg(&dev->pdev->dev, "complete all waiting for write cb.\n");
  413. list = &dev->write_waiting_list;
  414. list_for_each_entry_safe(pos, next, &list->list, list) {
  415. cl = pos->cl;
  416. if (cl == NULL)
  417. continue;
  418. cl->status = 0;
  419. list_del(&pos->list);
  420. if (MEI_WRITING == cl->writing_state &&
  421. pos->fop_type == MEI_FOP_WRITE &&
  422. cl != &dev->iamthif_cl) {
  423. dev_dbg(&dev->pdev->dev, "MEI WRITE COMPLETE\n");
  424. cl->writing_state = MEI_WRITE_COMPLETE;
  425. list_add_tail(&pos->list, &cmpl_list->list);
  426. }
  427. if (cl == &dev->iamthif_cl) {
  428. dev_dbg(&dev->pdev->dev, "check iamthif flow control.\n");
  429. if (dev->iamthif_flow_control_pending) {
  430. ret = mei_amthif_irq_read(dev, &slots);
  431. if (ret)
  432. return ret;
  433. }
  434. }
  435. }
  436. if (dev->wd_state == MEI_WD_STOPPING) {
  437. dev->wd_state = MEI_WD_IDLE;
  438. wake_up_interruptible(&dev->wait_stop_wd);
  439. }
  440. if (dev->wr_ext_msg.hdr.length) {
  441. mei_write_message(dev, &dev->wr_ext_msg.hdr,
  442. dev->wr_ext_msg.data);
  443. slots -= mei_data2slots(dev->wr_ext_msg.hdr.length);
  444. dev->wr_ext_msg.hdr.length = 0;
  445. }
  446. if (dev->dev_state == MEI_DEV_ENABLED) {
  447. if (dev->wd_pending &&
  448. mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) {
  449. if (mei_wd_send(dev))
  450. dev_dbg(&dev->pdev->dev, "wd send failed.\n");
  451. else if (mei_cl_flow_ctrl_reduce(&dev->wd_cl))
  452. return -ENODEV;
  453. dev->wd_pending = false;
  454. if (dev->wd_state == MEI_WD_RUNNING)
  455. slots -= mei_data2slots(MEI_WD_START_MSG_SIZE);
  456. else
  457. slots -= mei_data2slots(MEI_WD_STOP_MSG_SIZE);
  458. }
  459. }
  460. /* complete control write list CB */
  461. dev_dbg(&dev->pdev->dev, "complete control write list cb.\n");
  462. list_for_each_entry_safe(pos, next, &dev->ctrl_wr_list.list, list) {
  463. cl = pos->cl;
  464. if (!cl) {
  465. list_del(&pos->list);
  466. return -ENODEV;
  467. }
  468. switch (pos->fop_type) {
  469. case MEI_FOP_CLOSE:
  470. /* send disconnect message */
  471. ret = _mei_irq_thread_close(dev, &slots, pos,
  472. cl, cmpl_list);
  473. if (ret)
  474. return ret;
  475. break;
  476. case MEI_FOP_READ:
  477. /* send flow control message */
  478. ret = _mei_irq_thread_read(dev, &slots, pos,
  479. cl, cmpl_list);
  480. if (ret)
  481. return ret;
  482. break;
  483. case MEI_FOP_IOCTL:
  484. /* connect message */
  485. if (mei_cl_is_other_connecting(cl))
  486. continue;
  487. ret = _mei_irq_thread_ioctl(dev, &slots, pos,
  488. cl, cmpl_list);
  489. if (ret)
  490. return ret;
  491. break;
  492. default:
  493. BUG();
  494. }
  495. }
  496. /* complete write list CB */
  497. dev_dbg(&dev->pdev->dev, "complete write list cb.\n");
  498. list_for_each_entry_safe(pos, next, &dev->write_list.list, list) {
  499. cl = pos->cl;
  500. if (cl == NULL)
  501. continue;
  502. if (mei_cl_flow_ctrl_creds(cl) <= 0) {
  503. dev_dbg(&dev->pdev->dev,
  504. "No flow control credentials for client %d, not sending.\n",
  505. cl->host_client_id);
  506. continue;
  507. }
  508. if (cl == &dev->iamthif_cl)
  509. ret = mei_amthif_irq_write_complete(dev, &slots,
  510. pos, cmpl_list);
  511. else
  512. ret = mei_irq_thread_write_complete(dev, &slots, pos,
  513. cmpl_list);
  514. if (ret)
  515. return ret;
  516. }
  517. return 0;
  518. }
  519. EXPORT_SYMBOL_GPL(mei_irq_write_handler);
  520. /**
  521. * mei_timer - timer function.
  522. *
  523. * @work: pointer to the work_struct structure
  524. *
  525. * NOTE: This function is called by timer interrupt work
  526. */
  527. void mei_timer(struct work_struct *work)
  528. {
  529. unsigned long timeout;
  530. struct mei_cl *cl_pos = NULL;
  531. struct mei_cl *cl_next = NULL;
  532. struct mei_cl_cb *cb_pos = NULL;
  533. struct mei_cl_cb *cb_next = NULL;
  534. struct mei_device *dev = container_of(work,
  535. struct mei_device, timer_work.work);
  536. mutex_lock(&dev->device_lock);
  537. if (dev->dev_state != MEI_DEV_ENABLED) {
  538. if (dev->dev_state == MEI_DEV_INIT_CLIENTS) {
  539. if (dev->init_clients_timer) {
  540. if (--dev->init_clients_timer == 0) {
  541. dev_err(&dev->pdev->dev, "reset: init clients timeout hbm_state = %d.\n",
  542. dev->hbm_state);
  543. mei_reset(dev, 1);
  544. }
  545. }
  546. }
  547. goto out;
  548. }
  549. /*** connect/disconnect timeouts ***/
  550. list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) {
  551. if (cl_pos->timer_count) {
  552. if (--cl_pos->timer_count == 0) {
  553. dev_err(&dev->pdev->dev, "reset: connect/disconnect timeout.\n");
  554. mei_reset(dev, 1);
  555. goto out;
  556. }
  557. }
  558. }
  559. if (dev->iamthif_stall_timer) {
  560. if (--dev->iamthif_stall_timer == 0) {
  561. dev_err(&dev->pdev->dev, "reset: amthif hanged.\n");
  562. mei_reset(dev, 1);
  563. dev->iamthif_msg_buf_size = 0;
  564. dev->iamthif_msg_buf_index = 0;
  565. dev->iamthif_canceled = false;
  566. dev->iamthif_ioctl = true;
  567. dev->iamthif_state = MEI_IAMTHIF_IDLE;
  568. dev->iamthif_timer = 0;
  569. mei_io_cb_free(dev->iamthif_current_cb);
  570. dev->iamthif_current_cb = NULL;
  571. dev->iamthif_file_object = NULL;
  572. mei_amthif_run_next_cmd(dev);
  573. }
  574. }
  575. if (dev->iamthif_timer) {
  576. timeout = dev->iamthif_timer +
  577. mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
  578. dev_dbg(&dev->pdev->dev, "dev->iamthif_timer = %ld\n",
  579. dev->iamthif_timer);
  580. dev_dbg(&dev->pdev->dev, "timeout = %ld\n", timeout);
  581. dev_dbg(&dev->pdev->dev, "jiffies = %ld\n", jiffies);
  582. if (time_after(jiffies, timeout)) {
  583. /*
  584. * User didn't read the AMTHI data on time (15sec)
  585. * freeing AMTHI for other requests
  586. */
  587. dev_dbg(&dev->pdev->dev, "freeing AMTHI for other requests\n");
  588. list_for_each_entry_safe(cb_pos, cb_next,
  589. &dev->amthif_rd_complete_list.list, list) {
  590. cl_pos = cb_pos->file_object->private_data;
  591. /* Finding the AMTHI entry. */
  592. if (cl_pos == &dev->iamthif_cl)
  593. list_del(&cb_pos->list);
  594. }
  595. mei_io_cb_free(dev->iamthif_current_cb);
  596. dev->iamthif_current_cb = NULL;
  597. dev->iamthif_file_object->private_data = NULL;
  598. dev->iamthif_file_object = NULL;
  599. dev->iamthif_timer = 0;
  600. mei_amthif_run_next_cmd(dev);
  601. }
  602. }
  603. out:
  604. schedule_delayed_work(&dev->timer_work, 2 * HZ);
  605. mutex_unlock(&dev->device_lock);
  606. }