main.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163
  1. /*
  2. *
  3. * Intel Management Engine Interface (Intel MEI) Linux driver
  4. * Copyright (c) 2003-2012, Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. */
  16. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  17. #include <linux/module.h>
  18. #include <linux/moduleparam.h>
  19. #include <linux/kernel.h>
  20. #include <linux/device.h>
  21. #include <linux/fs.h>
  22. #include <linux/errno.h>
  23. #include <linux/types.h>
  24. #include <linux/fcntl.h>
  25. #include <linux/aio.h>
  26. #include <linux/pci.h>
  27. #include <linux/poll.h>
  28. #include <linux/init.h>
  29. #include <linux/ioctl.h>
  30. #include <linux/cdev.h>
  31. #include <linux/sched.h>
  32. #include <linux/uuid.h>
  33. #include <linux/compat.h>
  34. #include <linux/jiffies.h>
  35. #include <linux/interrupt.h>
  36. #include <linux/miscdevice.h>
  37. #include "mei_dev.h"
  38. #include <linux/mei.h>
  39. #include "interface.h"
  40. /* AMT device is a singleton on the platform */
  41. static struct pci_dev *mei_pdev;
  42. /* mei_pci_tbl - PCI Device ID Table */
  43. static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = {
  44. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
  45. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)},
  46. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)},
  47. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)},
  48. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)},
  49. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)},
  50. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)},
  51. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)},
  52. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)},
  53. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)},
  54. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)},
  55. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)},
  56. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)},
  57. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)},
  58. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)},
  59. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)},
  60. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)},
  61. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)},
  62. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)},
  63. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)},
  64. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)},
  65. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)},
  66. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)},
  67. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)},
  68. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)},
  69. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)},
  70. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)},
  71. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)},
  72. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
  73. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
  74. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
  75. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)},
  76. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)},
  77. /* required last entry */
  78. {0, }
  79. };
  80. MODULE_DEVICE_TABLE(pci, mei_pci_tbl);
  81. static DEFINE_MUTEX(mei_mutex);
  82. /**
  83. * mei_clear_list - removes all callbacks associated with file
  84. * from mei_cb_list
  85. *
  86. * @dev: device structure.
  87. * @file: file structure
  88. * @mei_cb_list: callbacks list
  89. *
  90. * mei_clear_list is called to clear resources associated with file
  91. * when application calls close function or Ctrl-C was pressed
  92. *
  93. * returns true if callback removed from the list, false otherwise
  94. */
  95. static bool mei_clear_list(struct mei_device *dev,
  96. struct file *file, struct list_head *mei_cb_list)
  97. {
  98. struct mei_cl_cb *cb_pos = NULL;
  99. struct mei_cl_cb *cb_next = NULL;
  100. struct file *file_temp;
  101. bool removed = false;
  102. /* list all list member */
  103. list_for_each_entry_safe(cb_pos, cb_next, mei_cb_list, list) {
  104. file_temp = (struct file *)cb_pos->file_object;
  105. /* check if list member associated with a file */
  106. if (file_temp == file) {
  107. /* remove member from the list */
  108. list_del(&cb_pos->list);
  109. /* check if cb equal to current iamthif cb */
  110. if (dev->iamthif_current_cb == cb_pos) {
  111. dev->iamthif_current_cb = NULL;
  112. /* send flow control to iamthif client */
  113. mei_send_flow_control(dev, &dev->iamthif_cl);
  114. }
  115. /* free all allocated buffers */
  116. mei_io_cb_free(cb_pos);
  117. cb_pos = NULL;
  118. removed = true;
  119. }
  120. }
  121. return removed;
  122. }
  123. /**
  124. * mei_clear_lists - removes all callbacks associated with file
  125. *
  126. * @dev: device structure
  127. * @file: file structure
  128. *
  129. * mei_clear_lists is called to clear resources associated with file
  130. * when application calls close function or Ctrl-C was pressed
  131. *
  132. * returns true if callback removed from the list, false otherwise
  133. */
  134. static bool mei_clear_lists(struct mei_device *dev, struct file *file)
  135. {
  136. bool removed = false;
  137. /* remove callbacks associated with a file */
  138. mei_clear_list(dev, file, &dev->amthi_cmd_list.list);
  139. if (mei_clear_list(dev, file,
  140. &dev->amthi_read_complete_list.list))
  141. removed = true;
  142. mei_clear_list(dev, file, &dev->ctrl_rd_list.list);
  143. if (mei_clear_list(dev, file, &dev->ctrl_wr_list.list))
  144. removed = true;
  145. if (mei_clear_list(dev, file, &dev->write_waiting_list.list))
  146. removed = true;
  147. if (mei_clear_list(dev, file, &dev->write_list.list))
  148. removed = true;
  149. /* check if iamthif_current_cb not NULL */
  150. if (dev->iamthif_current_cb && !removed) {
  151. /* check file and iamthif current cb association */
  152. if (dev->iamthif_current_cb->file_object == file) {
  153. /* remove cb */
  154. mei_io_cb_free(dev->iamthif_current_cb);
  155. dev->iamthif_current_cb = NULL;
  156. removed = true;
  157. }
  158. }
  159. return removed;
  160. }
  161. /**
  162. * find_read_list_entry - find read list entry
  163. *
  164. * @dev: device structure
  165. * @file: pointer to file structure
  166. *
  167. * returns cb on success, NULL on error
  168. */
  169. static struct mei_cl_cb *find_read_list_entry(
  170. struct mei_device *dev,
  171. struct mei_cl *cl)
  172. {
  173. struct mei_cl_cb *pos = NULL;
  174. struct mei_cl_cb *next = NULL;
  175. dev_dbg(&dev->pdev->dev, "remove read_list CB\n");
  176. list_for_each_entry_safe(pos, next, &dev->read_list.list, list) {
  177. struct mei_cl *cl_temp;
  178. cl_temp = (struct mei_cl *)pos->file_private;
  179. if (mei_cl_cmp_id(cl, cl_temp))
  180. return pos;
  181. }
  182. return NULL;
  183. }
  184. /**
  185. * mei_open - the open function
  186. *
  187. * @inode: pointer to inode structure
  188. * @file: pointer to file structure
  189. *
  190. * returns 0 on success, <0 on error
  191. */
  192. static int mei_open(struct inode *inode, struct file *file)
  193. {
  194. struct mei_cl *cl;
  195. struct mei_device *dev;
  196. unsigned long cl_id;
  197. int err;
  198. err = -ENODEV;
  199. if (!mei_pdev)
  200. goto out;
  201. dev = pci_get_drvdata(mei_pdev);
  202. if (!dev)
  203. goto out;
  204. mutex_lock(&dev->device_lock);
  205. err = -ENOMEM;
  206. cl = mei_cl_allocate(dev);
  207. if (!cl)
  208. goto out_unlock;
  209. err = -ENODEV;
  210. if (dev->dev_state != MEI_DEV_ENABLED) {
  211. dev_dbg(&dev->pdev->dev, "dev_state != MEI_ENABLED dev_state = %s\n",
  212. mei_dev_state_str(dev->dev_state));
  213. goto out_unlock;
  214. }
  215. err = -EMFILE;
  216. if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
  217. dev_err(&dev->pdev->dev, "open_handle_count exceded %d",
  218. MEI_MAX_OPEN_HANDLE_COUNT);
  219. goto out_unlock;
  220. }
  221. cl_id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
  222. if (cl_id >= MEI_CLIENTS_MAX) {
  223. dev_err(&dev->pdev->dev, "client_id exceded %d",
  224. MEI_CLIENTS_MAX) ;
  225. goto out_unlock;
  226. }
  227. cl->host_client_id = cl_id;
  228. dev_dbg(&dev->pdev->dev, "client_id = %d\n", cl->host_client_id);
  229. dev->open_handle_count++;
  230. list_add_tail(&cl->link, &dev->file_list);
  231. set_bit(cl->host_client_id, dev->host_clients_map);
  232. cl->state = MEI_FILE_INITIALIZING;
  233. cl->sm_state = 0;
  234. file->private_data = cl;
  235. mutex_unlock(&dev->device_lock);
  236. return nonseekable_open(inode, file);
  237. out_unlock:
  238. mutex_unlock(&dev->device_lock);
  239. kfree(cl);
  240. out:
  241. return err;
  242. }
  243. /**
  244. * mei_release - the release function
  245. *
  246. * @inode: pointer to inode structure
  247. * @file: pointer to file structure
  248. *
  249. * returns 0 on success, <0 on error
  250. */
  251. static int mei_release(struct inode *inode, struct file *file)
  252. {
  253. struct mei_cl *cl = file->private_data;
  254. struct mei_cl_cb *cb;
  255. struct mei_device *dev;
  256. int rets = 0;
  257. if (WARN_ON(!cl || !cl->dev))
  258. return -ENODEV;
  259. dev = cl->dev;
  260. mutex_lock(&dev->device_lock);
  261. if (cl != &dev->iamthif_cl) {
  262. if (cl->state == MEI_FILE_CONNECTED) {
  263. cl->state = MEI_FILE_DISCONNECTING;
  264. dev_dbg(&dev->pdev->dev,
  265. "disconnecting client host client = %d, "
  266. "ME client = %d\n",
  267. cl->host_client_id,
  268. cl->me_client_id);
  269. rets = mei_disconnect_host_client(dev, cl);
  270. }
  271. mei_cl_flush_queues(cl);
  272. dev_dbg(&dev->pdev->dev, "remove client host client = %d, ME client = %d\n",
  273. cl->host_client_id,
  274. cl->me_client_id);
  275. if (dev->open_handle_count > 0) {
  276. clear_bit(cl->host_client_id, dev->host_clients_map);
  277. dev->open_handle_count--;
  278. }
  279. mei_remove_client_from_file_list(dev, cl->host_client_id);
  280. /* free read cb */
  281. cb = NULL;
  282. if (cl->read_cb) {
  283. cb = find_read_list_entry(dev, cl);
  284. /* Remove entry from read list */
  285. if (cb)
  286. list_del(&cb->list);
  287. cb = cl->read_cb;
  288. cl->read_cb = NULL;
  289. }
  290. file->private_data = NULL;
  291. if (cb) {
  292. mei_io_cb_free(cb);
  293. cb = NULL;
  294. }
  295. kfree(cl);
  296. } else {
  297. if (dev->open_handle_count > 0)
  298. dev->open_handle_count--;
  299. if (dev->iamthif_file_object == file &&
  300. dev->iamthif_state != MEI_IAMTHIF_IDLE) {
  301. dev_dbg(&dev->pdev->dev, "amthi canceled iamthif state %d\n",
  302. dev->iamthif_state);
  303. dev->iamthif_canceled = true;
  304. if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE) {
  305. dev_dbg(&dev->pdev->dev, "run next amthi iamthif cb\n");
  306. mei_run_next_iamthif_cmd(dev);
  307. }
  308. }
  309. if (mei_clear_lists(dev, file))
  310. dev->iamthif_state = MEI_IAMTHIF_IDLE;
  311. }
  312. mutex_unlock(&dev->device_lock);
  313. return rets;
  314. }
  315. /**
  316. * mei_read - the read function.
  317. *
  318. * @file: pointer to file structure
  319. * @ubuf: pointer to user buffer
  320. * @length: buffer length
  321. * @offset: data offset in buffer
  322. *
  323. * returns >=0 data length on success , <0 on error
  324. */
  325. static ssize_t mei_read(struct file *file, char __user *ubuf,
  326. size_t length, loff_t *offset)
  327. {
  328. struct mei_cl *cl = file->private_data;
  329. struct mei_cl_cb *cb_pos = NULL;
  330. struct mei_cl_cb *cb = NULL;
  331. struct mei_device *dev;
  332. int i;
  333. int rets;
  334. int err;
  335. if (WARN_ON(!cl || !cl->dev))
  336. return -ENODEV;
  337. dev = cl->dev;
  338. mutex_lock(&dev->device_lock);
  339. if (dev->dev_state != MEI_DEV_ENABLED) {
  340. rets = -ENODEV;
  341. goto out;
  342. }
  343. if ((cl->sm_state & MEI_WD_STATE_INDEPENDENCE_MSG_SENT) == 0) {
  344. /* Do not allow to read watchdog client */
  345. i = mei_me_cl_by_uuid(dev, &mei_wd_guid);
  346. if (i >= 0) {
  347. struct mei_me_client *me_client = &dev->me_clients[i];
  348. if (cl->me_client_id == me_client->client_id) {
  349. rets = -EBADF;
  350. goto out;
  351. }
  352. }
  353. } else {
  354. cl->sm_state &= ~MEI_WD_STATE_INDEPENDENCE_MSG_SENT;
  355. }
  356. if (cl == &dev->iamthif_cl) {
  357. rets = amthi_read(dev, file, ubuf, length, offset);
  358. goto out;
  359. }
  360. if (cl->read_cb && cl->read_cb->buf_idx > *offset) {
  361. cb = cl->read_cb;
  362. goto copy_buffer;
  363. } else if (cl->read_cb && cl->read_cb->buf_idx > 0 &&
  364. cl->read_cb->buf_idx <= *offset) {
  365. cb = cl->read_cb;
  366. rets = 0;
  367. goto free;
  368. } else if ((!cl->read_cb || !cl->read_cb->buf_idx) && *offset > 0) {
  369. /*Offset needs to be cleaned for contiguous reads*/
  370. *offset = 0;
  371. rets = 0;
  372. goto out;
  373. }
  374. err = mei_start_read(dev, cl);
  375. if (err && err != -EBUSY) {
  376. dev_dbg(&dev->pdev->dev,
  377. "mei start read failure with status = %d\n", err);
  378. rets = err;
  379. goto out;
  380. }
  381. if (MEI_READ_COMPLETE != cl->reading_state &&
  382. !waitqueue_active(&cl->rx_wait)) {
  383. if (file->f_flags & O_NONBLOCK) {
  384. rets = -EAGAIN;
  385. goto out;
  386. }
  387. mutex_unlock(&dev->device_lock);
  388. if (wait_event_interruptible(cl->rx_wait,
  389. (MEI_READ_COMPLETE == cl->reading_state ||
  390. MEI_FILE_INITIALIZING == cl->state ||
  391. MEI_FILE_DISCONNECTED == cl->state ||
  392. MEI_FILE_DISCONNECTING == cl->state))) {
  393. if (signal_pending(current))
  394. return -EINTR;
  395. return -ERESTARTSYS;
  396. }
  397. mutex_lock(&dev->device_lock);
  398. if (MEI_FILE_INITIALIZING == cl->state ||
  399. MEI_FILE_DISCONNECTED == cl->state ||
  400. MEI_FILE_DISCONNECTING == cl->state) {
  401. rets = -EBUSY;
  402. goto out;
  403. }
  404. }
  405. cb = cl->read_cb;
  406. if (!cb) {
  407. rets = -ENODEV;
  408. goto out;
  409. }
  410. if (cl->reading_state != MEI_READ_COMPLETE) {
  411. rets = 0;
  412. goto out;
  413. }
  414. /* now copy the data to user space */
  415. copy_buffer:
  416. dev_dbg(&dev->pdev->dev, "cb->response_buffer size - %d\n",
  417. cb->response_buffer.size);
  418. dev_dbg(&dev->pdev->dev, "cb->buf_idx - %lu\n", cb->buf_idx);
  419. if (length == 0 || ubuf == NULL || *offset > cb->buf_idx) {
  420. rets = -EMSGSIZE;
  421. goto free;
  422. }
  423. /* length is being truncated to PAGE_SIZE,
  424. * however buf_idx may point beyond that */
  425. length = min_t(size_t, length, cb->buf_idx - *offset);
  426. if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) {
  427. rets = -EFAULT;
  428. goto free;
  429. }
  430. rets = length;
  431. *offset += length;
  432. if ((unsigned long)*offset < cb->buf_idx)
  433. goto out;
  434. free:
  435. cb_pos = find_read_list_entry(dev, cl);
  436. /* Remove entry from read list */
  437. if (cb_pos)
  438. list_del(&cb_pos->list);
  439. mei_io_cb_free(cb);
  440. cl->reading_state = MEI_IDLE;
  441. cl->read_cb = NULL;
  442. cl->read_pending = 0;
  443. out:
  444. dev_dbg(&dev->pdev->dev, "end mei read rets= %d\n", rets);
  445. mutex_unlock(&dev->device_lock);
  446. return rets;
  447. }
  448. /**
  449. * mei_write - the write function.
  450. *
  451. * @file: pointer to file structure
  452. * @ubuf: pointer to user buffer
  453. * @length: buffer length
  454. * @offset: data offset in buffer
  455. *
  456. * returns >=0 data length on success , <0 on error
  457. */
  458. static ssize_t mei_write(struct file *file, const char __user *ubuf,
  459. size_t length, loff_t *offset)
  460. {
  461. struct mei_cl *cl = file->private_data;
  462. struct mei_cl_cb *write_cb = NULL;
  463. struct mei_msg_hdr mei_hdr;
  464. struct mei_device *dev;
  465. unsigned long timeout = 0;
  466. int rets;
  467. int i;
  468. if (WARN_ON(!cl || !cl->dev))
  469. return -ENODEV;
  470. dev = cl->dev;
  471. mutex_lock(&dev->device_lock);
  472. if (dev->dev_state != MEI_DEV_ENABLED) {
  473. rets = -ENODEV;
  474. goto unlock_dev;
  475. }
  476. i = mei_me_cl_by_id(dev, cl->me_client_id);
  477. if (i < 0) {
  478. rets = -ENODEV;
  479. goto unlock_dev;
  480. }
  481. if (length > dev->me_clients[i].props.max_msg_length || length <= 0) {
  482. rets = -EMSGSIZE;
  483. goto unlock_dev;
  484. }
  485. if (cl->state != MEI_FILE_CONNECTED) {
  486. rets = -ENODEV;
  487. dev_err(&dev->pdev->dev, "host client = %d, is not connected to ME client = %d",
  488. cl->host_client_id, cl->me_client_id);
  489. goto unlock_dev;
  490. }
  491. if (cl == &dev->iamthif_cl) {
  492. write_cb = find_amthi_read_list_entry(dev, file);
  493. if (write_cb) {
  494. timeout = write_cb->read_time +
  495. msecs_to_jiffies(IAMTHIF_READ_TIMER);
  496. if (time_after(jiffies, timeout) ||
  497. cl->reading_state == MEI_READ_COMPLETE) {
  498. *offset = 0;
  499. list_del(&write_cb->list);
  500. mei_io_cb_free(write_cb);
  501. write_cb = NULL;
  502. }
  503. }
  504. }
  505. /* free entry used in read */
  506. if (cl->reading_state == MEI_READ_COMPLETE) {
  507. *offset = 0;
  508. write_cb = find_read_list_entry(dev, cl);
  509. if (write_cb) {
  510. list_del(&write_cb->list);
  511. mei_io_cb_free(write_cb);
  512. write_cb = NULL;
  513. cl->reading_state = MEI_IDLE;
  514. cl->read_cb = NULL;
  515. cl->read_pending = 0;
  516. }
  517. } else if (cl->reading_state == MEI_IDLE && !cl->read_pending)
  518. *offset = 0;
  519. write_cb = mei_io_cb_init(cl, file);
  520. if (!write_cb) {
  521. dev_err(&dev->pdev->dev, "write cb allocation failed\n");
  522. rets = -ENOMEM;
  523. goto unlock_dev;
  524. }
  525. rets = mei_io_cb_alloc_req_buf(write_cb, length);
  526. if (rets)
  527. goto unlock_dev;
  528. dev_dbg(&dev->pdev->dev, "cb request size = %zd\n", length);
  529. rets = copy_from_user(write_cb->request_buffer.data, ubuf, length);
  530. if (rets)
  531. goto unlock_dev;
  532. cl->sm_state = 0;
  533. if (length == 4 &&
  534. ((memcmp(mei_wd_state_independence_msg[0],
  535. write_cb->request_buffer.data, 4) == 0) ||
  536. (memcmp(mei_wd_state_independence_msg[1],
  537. write_cb->request_buffer.data, 4) == 0) ||
  538. (memcmp(mei_wd_state_independence_msg[2],
  539. write_cb->request_buffer.data, 4) == 0)))
  540. cl->sm_state |= MEI_WD_STATE_INDEPENDENCE_MSG_SENT;
  541. if (cl == &dev->iamthif_cl) {
  542. rets = mei_io_cb_alloc_resp_buf(write_cb, dev->iamthif_mtu);
  543. if (rets)
  544. goto unlock_dev;
  545. write_cb->major_file_operations = MEI_IOCTL;
  546. if (!list_empty(&dev->amthi_cmd_list.list) ||
  547. dev->iamthif_state != MEI_IAMTHIF_IDLE) {
  548. dev_dbg(&dev->pdev->dev, "amthi_state = %d\n",
  549. (int) dev->iamthif_state);
  550. dev_dbg(&dev->pdev->dev, "add amthi cb to amthi cmd waiting list\n");
  551. list_add_tail(&write_cb->list, &dev->amthi_cmd_list.list);
  552. } else {
  553. dev_dbg(&dev->pdev->dev, "call amthi write\n");
  554. rets = amthi_write(dev, write_cb);
  555. if (rets) {
  556. dev_err(&dev->pdev->dev, "amthi write failed with status = %d\n",
  557. rets);
  558. goto unlock_dev;
  559. }
  560. }
  561. mutex_unlock(&dev->device_lock);
  562. return length;
  563. }
  564. write_cb->major_file_operations = MEI_WRITE;
  565. dev_dbg(&dev->pdev->dev, "host client = %d, ME client = %d\n",
  566. cl->host_client_id, cl->me_client_id);
  567. rets = mei_flow_ctrl_creds(dev, cl);
  568. if (rets < 0)
  569. goto unlock_dev;
  570. if (rets && dev->mei_host_buffer_is_empty) {
  571. rets = 0;
  572. dev->mei_host_buffer_is_empty = false;
  573. if (length > mei_hbuf_max_data(dev)) {
  574. mei_hdr.length = mei_hbuf_max_data(dev);
  575. mei_hdr.msg_complete = 0;
  576. } else {
  577. mei_hdr.length = length;
  578. mei_hdr.msg_complete = 1;
  579. }
  580. mei_hdr.host_addr = cl->host_client_id;
  581. mei_hdr.me_addr = cl->me_client_id;
  582. mei_hdr.reserved = 0;
  583. dev_dbg(&dev->pdev->dev, "call mei_write_message header=%08x.\n",
  584. *((u32 *) &mei_hdr));
  585. if (mei_write_message(dev, &mei_hdr,
  586. (unsigned char *) (write_cb->request_buffer.data),
  587. mei_hdr.length)) {
  588. rets = -ENODEV;
  589. goto unlock_dev;
  590. }
  591. cl->writing_state = MEI_WRITING;
  592. write_cb->buf_idx = mei_hdr.length;
  593. if (mei_hdr.msg_complete) {
  594. if (mei_flow_ctrl_reduce(dev, cl)) {
  595. rets = -ENODEV;
  596. goto unlock_dev;
  597. }
  598. list_add_tail(&write_cb->list, &dev->write_waiting_list.list);
  599. } else {
  600. list_add_tail(&write_cb->list, &dev->write_list.list);
  601. }
  602. } else {
  603. write_cb->buf_idx = 0;
  604. cl->writing_state = MEI_WRITING;
  605. list_add_tail(&write_cb->list, &dev->write_list.list);
  606. }
  607. mutex_unlock(&dev->device_lock);
  608. return length;
  609. unlock_dev:
  610. mutex_unlock(&dev->device_lock);
  611. mei_io_cb_free(write_cb);
  612. return rets;
  613. }
  614. /**
  615. * mei_ioctl - the IOCTL function
  616. *
  617. * @file: pointer to file structure
  618. * @cmd: ioctl command
  619. * @data: pointer to mei message structure
  620. *
  621. * returns 0 on success , <0 on error
  622. */
  623. static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
  624. {
  625. struct mei_device *dev;
  626. struct mei_cl *cl = file->private_data;
  627. struct mei_connect_client_data *connect_data = NULL;
  628. int rets;
  629. if (cmd != IOCTL_MEI_CONNECT_CLIENT)
  630. return -EINVAL;
  631. if (WARN_ON(!cl || !cl->dev))
  632. return -ENODEV;
  633. dev = cl->dev;
  634. dev_dbg(&dev->pdev->dev, "IOCTL cmd = 0x%x", cmd);
  635. mutex_lock(&dev->device_lock);
  636. if (dev->dev_state != MEI_DEV_ENABLED) {
  637. rets = -ENODEV;
  638. goto out;
  639. }
  640. dev_dbg(&dev->pdev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n");
  641. connect_data = kzalloc(sizeof(struct mei_connect_client_data),
  642. GFP_KERNEL);
  643. if (!connect_data) {
  644. rets = -ENOMEM;
  645. goto out;
  646. }
  647. dev_dbg(&dev->pdev->dev, "copy connect data from user\n");
  648. if (copy_from_user(connect_data, (char __user *)data,
  649. sizeof(struct mei_connect_client_data))) {
  650. dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n");
  651. rets = -EFAULT;
  652. goto out;
  653. }
  654. rets = mei_ioctl_connect_client(file, connect_data);
  655. /* if all is ok, copying the data back to user. */
  656. if (rets)
  657. goto out;
  658. dev_dbg(&dev->pdev->dev, "copy connect data to user\n");
  659. if (copy_to_user((char __user *)data, connect_data,
  660. sizeof(struct mei_connect_client_data))) {
  661. dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n");
  662. rets = -EFAULT;
  663. goto out;
  664. }
  665. out:
  666. kfree(connect_data);
  667. mutex_unlock(&dev->device_lock);
  668. return rets;
  669. }
  670. /**
  671. * mei_compat_ioctl - the compat IOCTL function
  672. *
  673. * @file: pointer to file structure
  674. * @cmd: ioctl command
  675. * @data: pointer to mei message structure
  676. *
  677. * returns 0 on success , <0 on error
  678. */
  679. #ifdef CONFIG_COMPAT
  680. static long mei_compat_ioctl(struct file *file,
  681. unsigned int cmd, unsigned long data)
  682. {
  683. return mei_ioctl(file, cmd, (unsigned long)compat_ptr(data));
  684. }
  685. #endif
  686. /**
  687. * mei_poll - the poll function
  688. *
  689. * @file: pointer to file structure
  690. * @wait: pointer to poll_table structure
  691. *
  692. * returns poll mask
  693. */
  694. static unsigned int mei_poll(struct file *file, poll_table *wait)
  695. {
  696. struct mei_cl *cl = file->private_data;
  697. struct mei_device *dev;
  698. unsigned int mask = 0;
  699. if (WARN_ON(!cl || !cl->dev))
  700. return mask;
  701. dev = cl->dev;
  702. mutex_lock(&dev->device_lock);
  703. if (dev->dev_state != MEI_DEV_ENABLED)
  704. goto out;
  705. if (cl == &dev->iamthif_cl) {
  706. mutex_unlock(&dev->device_lock);
  707. poll_wait(file, &dev->iamthif_cl.wait, wait);
  708. mutex_lock(&dev->device_lock);
  709. if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
  710. dev->iamthif_file_object == file) {
  711. mask |= (POLLIN | POLLRDNORM);
  712. dev_dbg(&dev->pdev->dev, "run next amthi cb\n");
  713. mei_run_next_iamthif_cmd(dev);
  714. }
  715. goto out;
  716. }
  717. mutex_unlock(&dev->device_lock);
  718. poll_wait(file, &cl->tx_wait, wait);
  719. mutex_lock(&dev->device_lock);
  720. if (MEI_WRITE_COMPLETE == cl->writing_state)
  721. mask |= (POLLIN | POLLRDNORM);
  722. out:
  723. mutex_unlock(&dev->device_lock);
  724. return mask;
  725. }
  726. /*
  727. * file operations structure will be used for mei char device.
  728. */
  729. static const struct file_operations mei_fops = {
  730. .owner = THIS_MODULE,
  731. .read = mei_read,
  732. .unlocked_ioctl = mei_ioctl,
  733. #ifdef CONFIG_COMPAT
  734. .compat_ioctl = mei_compat_ioctl,
  735. #endif
  736. .open = mei_open,
  737. .release = mei_release,
  738. .write = mei_write,
  739. .poll = mei_poll,
  740. .llseek = no_llseek
  741. };
  742. /*
  743. * Misc Device Struct
  744. */
  745. static struct miscdevice mei_misc_device = {
  746. .name = "mei",
  747. .fops = &mei_fops,
  748. .minor = MISC_DYNAMIC_MINOR,
  749. };
  750. /**
  751. * mei_quirk_probe - probe for devices that doesn't valid ME interface
  752. * @pdev: PCI device structure
  753. * @ent: entry into pci_device_table
  754. *
  755. * returns true if ME Interface is valid, false otherwise
  756. */
  757. static bool __devinit mei_quirk_probe(struct pci_dev *pdev,
  758. const struct pci_device_id *ent)
  759. {
  760. u32 reg;
  761. if (ent->device == MEI_DEV_ID_PBG_1) {
  762. pci_read_config_dword(pdev, 0x48, &reg);
  763. /* make sure that bit 9 is up and bit 10 is down */
  764. if ((reg & 0x600) == 0x200) {
  765. dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
  766. return false;
  767. }
  768. }
  769. return true;
  770. }
  771. /**
  772. * mei_probe - Device Initialization Routine
  773. *
  774. * @pdev: PCI device structure
  775. * @ent: entry in kcs_pci_tbl
  776. *
  777. * returns 0 on success, <0 on failure.
  778. */
  779. static int __devinit mei_probe(struct pci_dev *pdev,
  780. const struct pci_device_id *ent)
  781. {
  782. struct mei_device *dev;
  783. int err;
  784. mutex_lock(&mei_mutex);
  785. if (!mei_quirk_probe(pdev, ent)) {
  786. err = -ENODEV;
  787. goto end;
  788. }
  789. if (mei_pdev) {
  790. err = -EEXIST;
  791. goto end;
  792. }
  793. /* enable pci dev */
  794. err = pci_enable_device(pdev);
  795. if (err) {
  796. dev_err(&pdev->dev, "failed to enable pci device.\n");
  797. goto end;
  798. }
  799. /* set PCI host mastering */
  800. pci_set_master(pdev);
  801. /* pci request regions for mei driver */
  802. err = pci_request_regions(pdev, KBUILD_MODNAME);
  803. if (err) {
  804. dev_err(&pdev->dev, "failed to get pci regions.\n");
  805. goto disable_device;
  806. }
  807. /* allocates and initializes the mei dev structure */
  808. dev = mei_device_init(pdev);
  809. if (!dev) {
  810. err = -ENOMEM;
  811. goto release_regions;
  812. }
  813. /* mapping IO device memory */
  814. dev->mem_addr = pci_iomap(pdev, 0, 0);
  815. if (!dev->mem_addr) {
  816. dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
  817. err = -ENOMEM;
  818. goto free_device;
  819. }
  820. pci_enable_msi(pdev);
  821. /* request and enable interrupt */
  822. if (pci_dev_msi_enabled(pdev))
  823. err = request_threaded_irq(pdev->irq,
  824. NULL,
  825. mei_interrupt_thread_handler,
  826. IRQF_ONESHOT, KBUILD_MODNAME, dev);
  827. else
  828. err = request_threaded_irq(pdev->irq,
  829. mei_interrupt_quick_handler,
  830. mei_interrupt_thread_handler,
  831. IRQF_SHARED, KBUILD_MODNAME, dev);
  832. if (err) {
  833. dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
  834. pdev->irq);
  835. goto disable_msi;
  836. }
  837. INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
  838. if (mei_hw_init(dev)) {
  839. dev_err(&pdev->dev, "init hw failure.\n");
  840. err = -ENODEV;
  841. goto release_irq;
  842. }
  843. err = misc_register(&mei_misc_device);
  844. if (err)
  845. goto release_irq;
  846. mei_pdev = pdev;
  847. pci_set_drvdata(pdev, dev);
  848. schedule_delayed_work(&dev->timer_work, HZ);
  849. mutex_unlock(&mei_mutex);
  850. pr_debug("initialization successful.\n");
  851. return 0;
  852. release_irq:
  853. /* disable interrupts */
  854. dev->host_hw_state = mei_hcsr_read(dev);
  855. mei_disable_interrupts(dev);
  856. flush_scheduled_work();
  857. free_irq(pdev->irq, dev);
  858. disable_msi:
  859. pci_disable_msi(pdev);
  860. pci_iounmap(pdev, dev->mem_addr);
  861. free_device:
  862. kfree(dev);
  863. release_regions:
  864. pci_release_regions(pdev);
  865. disable_device:
  866. pci_disable_device(pdev);
  867. end:
  868. mutex_unlock(&mei_mutex);
  869. dev_err(&pdev->dev, "initialization failed.\n");
  870. return err;
  871. }
  872. /**
  873. * mei_remove - Device Removal Routine
  874. *
  875. * @pdev: PCI device structure
  876. *
  877. * mei_remove is called by the PCI subsystem to alert the driver
  878. * that it should release a PCI device.
  879. */
  880. static void __devexit mei_remove(struct pci_dev *pdev)
  881. {
  882. struct mei_device *dev;
  883. if (mei_pdev != pdev)
  884. return;
  885. dev = pci_get_drvdata(pdev);
  886. if (!dev)
  887. return;
  888. mutex_lock(&dev->device_lock);
  889. cancel_delayed_work(&dev->timer_work);
  890. mei_wd_stop(dev);
  891. mei_pdev = NULL;
  892. if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
  893. dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
  894. mei_disconnect_host_client(dev, &dev->iamthif_cl);
  895. }
  896. if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
  897. dev->wd_cl.state = MEI_FILE_DISCONNECTING;
  898. mei_disconnect_host_client(dev, &dev->wd_cl);
  899. }
  900. /* Unregistering watchdog device */
  901. mei_watchdog_unregister(dev);
  902. /* remove entry if already in list */
  903. dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
  904. mei_remove_client_from_file_list(dev, dev->wd_cl.host_client_id);
  905. mei_remove_client_from_file_list(dev, dev->iamthif_cl.host_client_id);
  906. dev->iamthif_current_cb = NULL;
  907. dev->me_clients_num = 0;
  908. mutex_unlock(&dev->device_lock);
  909. flush_scheduled_work();
  910. /* disable interrupts */
  911. mei_disable_interrupts(dev);
  912. free_irq(pdev->irq, dev);
  913. pci_disable_msi(pdev);
  914. pci_set_drvdata(pdev, NULL);
  915. if (dev->mem_addr)
  916. pci_iounmap(pdev, dev->mem_addr);
  917. kfree(dev);
  918. pci_release_regions(pdev);
  919. pci_disable_device(pdev);
  920. misc_deregister(&mei_misc_device);
  921. }
  922. #ifdef CONFIG_PM
  923. static int mei_pci_suspend(struct device *device)
  924. {
  925. struct pci_dev *pdev = to_pci_dev(device);
  926. struct mei_device *dev = pci_get_drvdata(pdev);
  927. int err;
  928. if (!dev)
  929. return -ENODEV;
  930. mutex_lock(&dev->device_lock);
  931. cancel_delayed_work(&dev->timer_work);
  932. /* Stop watchdog if exists */
  933. err = mei_wd_stop(dev);
  934. /* Set new mei state */
  935. if (dev->dev_state == MEI_DEV_ENABLED ||
  936. dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
  937. dev->dev_state = MEI_DEV_POWER_DOWN;
  938. mei_reset(dev, 0);
  939. }
  940. mutex_unlock(&dev->device_lock);
  941. free_irq(pdev->irq, dev);
  942. pci_disable_msi(pdev);
  943. return err;
  944. }
  945. static int mei_pci_resume(struct device *device)
  946. {
  947. struct pci_dev *pdev = to_pci_dev(device);
  948. struct mei_device *dev;
  949. int err;
  950. dev = pci_get_drvdata(pdev);
  951. if (!dev)
  952. return -ENODEV;
  953. pci_enable_msi(pdev);
  954. /* request and enable interrupt */
  955. if (pci_dev_msi_enabled(pdev))
  956. err = request_threaded_irq(pdev->irq,
  957. NULL,
  958. mei_interrupt_thread_handler,
  959. IRQF_ONESHOT, KBUILD_MODNAME, dev);
  960. else
  961. err = request_threaded_irq(pdev->irq,
  962. mei_interrupt_quick_handler,
  963. mei_interrupt_thread_handler,
  964. IRQF_SHARED, KBUILD_MODNAME, dev);
  965. if (err) {
  966. dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
  967. pdev->irq);
  968. return err;
  969. }
  970. mutex_lock(&dev->device_lock);
  971. dev->dev_state = MEI_DEV_POWER_UP;
  972. mei_reset(dev, 1);
  973. mutex_unlock(&dev->device_lock);
  974. /* Start timer if stopped in suspend */
  975. schedule_delayed_work(&dev->timer_work, HZ);
  976. return err;
  977. }
  978. static SIMPLE_DEV_PM_OPS(mei_pm_ops, mei_pci_suspend, mei_pci_resume);
  979. #define MEI_PM_OPS (&mei_pm_ops)
  980. #else
  981. #define MEI_PM_OPS NULL
  982. #endif /* CONFIG_PM */
  983. /*
  984. * PCI driver structure
  985. */
  986. static struct pci_driver mei_driver = {
  987. .name = KBUILD_MODNAME,
  988. .id_table = mei_pci_tbl,
  989. .probe = mei_probe,
  990. .remove = __devexit_p(mei_remove),
  991. .shutdown = __devexit_p(mei_remove),
  992. .driver.pm = MEI_PM_OPS,
  993. };
  994. module_pci_driver(mei_driver);
  995. MODULE_AUTHOR("Intel Corporation");
  996. MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
  997. MODULE_LICENSE("GPL v2");