main.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189
  1. /*
  2. *
  3. * Intel Management Engine Interface (Intel MEI) Linux driver
  4. * Copyright (c) 2003-2012, Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. */
  16. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  17. #include <linux/module.h>
  18. #include <linux/moduleparam.h>
  19. #include <linux/kernel.h>
  20. #include <linux/device.h>
  21. #include <linux/fs.h>
  22. #include <linux/errno.h>
  23. #include <linux/types.h>
  24. #include <linux/fcntl.h>
  25. #include <linux/aio.h>
  26. #include <linux/pci.h>
  27. #include <linux/poll.h>
  28. #include <linux/init.h>
  29. #include <linux/ioctl.h>
  30. #include <linux/cdev.h>
  31. #include <linux/sched.h>
  32. #include <linux/uuid.h>
  33. #include <linux/compat.h>
  34. #include <linux/jiffies.h>
  35. #include <linux/interrupt.h>
  36. #include <linux/miscdevice.h>
  37. #include "mei_dev.h"
  38. #include <linux/mei.h>
  39. #include "interface.h"
  40. static const char mei_driver_name[] = "mei";
  41. /* The device pointer */
  42. /* Currently this driver works as long as there is only a single AMT device. */
  43. struct pci_dev *mei_device;
  44. /* mei_pci_tbl - PCI Device ID Table */
  45. static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = {
  46. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
  47. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)},
  48. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)},
  49. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)},
  50. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)},
  51. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)},
  52. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)},
  53. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)},
  54. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)},
  55. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)},
  56. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)},
  57. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)},
  58. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)},
  59. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)},
  60. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)},
  61. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)},
  62. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)},
  63. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)},
  64. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)},
  65. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)},
  66. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)},
  67. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)},
  68. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)},
  69. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)},
  70. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)},
  71. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)},
  72. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)},
  73. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)},
  74. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
  75. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
  76. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
  77. /* required last entry */
  78. {0, }
  79. };
  80. MODULE_DEVICE_TABLE(pci, mei_pci_tbl);
  81. static DEFINE_MUTEX(mei_mutex);
  82. /**
  83. * mei_clear_list - removes all callbacks associated with file
  84. * from mei_cb_list
  85. *
  86. * @dev: device structure.
  87. * @file: file structure
  88. * @mei_cb_list: callbacks list
  89. *
  90. * mei_clear_list is called to clear resources associated with file
  91. * when application calls close function or Ctrl-C was pressed
  92. *
  93. * returns true if callback removed from the list, false otherwise
  94. */
  95. static bool mei_clear_list(struct mei_device *dev,
  96. struct file *file, struct list_head *mei_cb_list)
  97. {
  98. struct mei_cl_cb *cb_pos = NULL;
  99. struct mei_cl_cb *cb_next = NULL;
  100. struct file *file_temp;
  101. bool removed = false;
  102. /* list all list member */
  103. list_for_each_entry_safe(cb_pos, cb_next, mei_cb_list, cb_list) {
  104. file_temp = (struct file *)cb_pos->file_object;
  105. /* check if list member associated with a file */
  106. if (file_temp == file) {
  107. /* remove member from the list */
  108. list_del(&cb_pos->cb_list);
  109. /* check if cb equal to current iamthif cb */
  110. if (dev->iamthif_current_cb == cb_pos) {
  111. dev->iamthif_current_cb = NULL;
  112. /* send flow control to iamthif client */
  113. mei_send_flow_control(dev, &dev->iamthif_cl);
  114. }
  115. /* free all allocated buffers */
  116. mei_free_cb_private(cb_pos);
  117. cb_pos = NULL;
  118. removed = true;
  119. }
  120. }
  121. return removed;
  122. }
  123. /**
  124. * mei_clear_lists - removes all callbacks associated with file
  125. *
  126. * @dev: device structure
  127. * @file: file structure
  128. *
  129. * mei_clear_lists is called to clear resources associated with file
  130. * when application calls close function or Ctrl-C was pressed
  131. *
  132. * returns true if callback removed from the list, false otherwise
  133. */
  134. static bool mei_clear_lists(struct mei_device *dev, struct file *file)
  135. {
  136. bool removed = false;
  137. /* remove callbacks associated with a file */
  138. mei_clear_list(dev, file, &dev->amthi_cmd_list.mei_cb.cb_list);
  139. if (mei_clear_list(dev, file,
  140. &dev->amthi_read_complete_list.mei_cb.cb_list))
  141. removed = true;
  142. mei_clear_list(dev, file, &dev->ctrl_rd_list.mei_cb.cb_list);
  143. if (mei_clear_list(dev, file, &dev->ctrl_wr_list.mei_cb.cb_list))
  144. removed = true;
  145. if (mei_clear_list(dev, file, &dev->write_waiting_list.mei_cb.cb_list))
  146. removed = true;
  147. if (mei_clear_list(dev, file, &dev->write_list.mei_cb.cb_list))
  148. removed = true;
  149. /* check if iamthif_current_cb not NULL */
  150. if (dev->iamthif_current_cb && !removed) {
  151. /* check file and iamthif current cb association */
  152. if (dev->iamthif_current_cb->file_object == file) {
  153. /* remove cb */
  154. mei_free_cb_private(dev->iamthif_current_cb);
  155. dev->iamthif_current_cb = NULL;
  156. removed = true;
  157. }
  158. }
  159. return removed;
  160. }
  161. /**
  162. * find_read_list_entry - find read list entry
  163. *
  164. * @dev: device structure
  165. * @file: pointer to file structure
  166. *
  167. * returns cb on success, NULL on error
  168. */
  169. static struct mei_cl_cb *find_read_list_entry(
  170. struct mei_device *dev,
  171. struct mei_cl *cl)
  172. {
  173. struct mei_cl_cb *pos = NULL;
  174. struct mei_cl_cb *next = NULL;
  175. dev_dbg(&dev->pdev->dev, "remove read_list CB\n");
  176. list_for_each_entry_safe(pos, next,
  177. &dev->read_list.mei_cb.cb_list, cb_list) {
  178. struct mei_cl *cl_temp;
  179. cl_temp = (struct mei_cl *)pos->file_private;
  180. if (mei_cl_cmp_id(cl, cl_temp))
  181. return pos;
  182. }
  183. return NULL;
  184. }
  185. /**
  186. * mei_open - the open function
  187. *
  188. * @inode: pointer to inode structure
  189. * @file: pointer to file structure
  190. *
  191. * returns 0 on success, <0 on error
  192. */
  193. static int mei_open(struct inode *inode, struct file *file)
  194. {
  195. struct mei_cl *cl;
  196. struct mei_device *dev;
  197. unsigned long cl_id;
  198. int err;
  199. err = -ENODEV;
  200. if (!mei_device)
  201. goto out;
  202. dev = pci_get_drvdata(mei_device);
  203. if (!dev)
  204. goto out;
  205. mutex_lock(&dev->device_lock);
  206. err = -ENOMEM;
  207. cl = mei_cl_allocate(dev);
  208. if (!cl)
  209. goto out_unlock;
  210. err = -ENODEV;
  211. if (dev->mei_state != MEI_ENABLED) {
  212. dev_dbg(&dev->pdev->dev, "mei_state != MEI_ENABLED mei_state= %d\n",
  213. dev->mei_state);
  214. goto out_unlock;
  215. }
  216. err = -EMFILE;
  217. if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT)
  218. goto out_unlock;
  219. cl_id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
  220. if (cl_id >= MEI_CLIENTS_MAX)
  221. goto out_unlock;
  222. cl->host_client_id = cl_id;
  223. dev_dbg(&dev->pdev->dev, "client_id = %d\n", cl->host_client_id);
  224. dev->open_handle_count++;
  225. list_add_tail(&cl->link, &dev->file_list);
  226. set_bit(cl->host_client_id, dev->host_clients_map);
  227. cl->state = MEI_FILE_INITIALIZING;
  228. cl->sm_state = 0;
  229. file->private_data = cl;
  230. mutex_unlock(&dev->device_lock);
  231. return nonseekable_open(inode, file);
  232. out_unlock:
  233. mutex_unlock(&dev->device_lock);
  234. kfree(cl);
  235. out:
  236. return err;
  237. }
  238. /**
  239. * mei_release - the release function
  240. *
  241. * @inode: pointer to inode structure
  242. * @file: pointer to file structure
  243. *
  244. * returns 0 on success, <0 on error
  245. */
  246. static int mei_release(struct inode *inode, struct file *file)
  247. {
  248. struct mei_cl *cl = file->private_data;
  249. struct mei_cl_cb *cb;
  250. struct mei_device *dev;
  251. int rets = 0;
  252. if (WARN_ON(!cl || !cl->dev))
  253. return -ENODEV;
  254. dev = cl->dev;
  255. mutex_lock(&dev->device_lock);
  256. if (cl != &dev->iamthif_cl) {
  257. if (cl->state == MEI_FILE_CONNECTED) {
  258. cl->state = MEI_FILE_DISCONNECTING;
  259. dev_dbg(&dev->pdev->dev,
  260. "disconnecting client host client = %d, "
  261. "ME client = %d\n",
  262. cl->host_client_id,
  263. cl->me_client_id);
  264. rets = mei_disconnect_host_client(dev, cl);
  265. }
  266. mei_cl_flush_queues(cl);
  267. dev_dbg(&dev->pdev->dev, "remove client host client = %d, ME client = %d\n",
  268. cl->host_client_id,
  269. cl->me_client_id);
  270. if (dev->open_handle_count > 0) {
  271. clear_bit(cl->host_client_id, dev->host_clients_map);
  272. dev->open_handle_count--;
  273. }
  274. mei_remove_client_from_file_list(dev, cl->host_client_id);
  275. /* free read cb */
  276. cb = NULL;
  277. if (cl->read_cb) {
  278. cb = find_read_list_entry(dev, cl);
  279. /* Remove entry from read list */
  280. if (cb)
  281. list_del(&cb->cb_list);
  282. cb = cl->read_cb;
  283. cl->read_cb = NULL;
  284. }
  285. file->private_data = NULL;
  286. if (cb) {
  287. mei_free_cb_private(cb);
  288. cb = NULL;
  289. }
  290. kfree(cl);
  291. } else {
  292. if (dev->open_handle_count > 0)
  293. dev->open_handle_count--;
  294. if (dev->iamthif_file_object == file &&
  295. dev->iamthif_state != MEI_IAMTHIF_IDLE) {
  296. dev_dbg(&dev->pdev->dev, "amthi canceled iamthif state %d\n",
  297. dev->iamthif_state);
  298. dev->iamthif_canceled = true;
  299. if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE) {
  300. dev_dbg(&dev->pdev->dev, "run next amthi iamthif cb\n");
  301. mei_run_next_iamthif_cmd(dev);
  302. }
  303. }
  304. if (mei_clear_lists(dev, file))
  305. dev->iamthif_state = MEI_IAMTHIF_IDLE;
  306. }
  307. mutex_unlock(&dev->device_lock);
  308. return rets;
  309. }
  310. /**
  311. * mei_read - the read function.
  312. *
  313. * @file: pointer to file structure
  314. * @ubuf: pointer to user buffer
  315. * @length: buffer length
  316. * @offset: data offset in buffer
  317. *
  318. * returns >=0 data length on success , <0 on error
  319. */
  320. static ssize_t mei_read(struct file *file, char __user *ubuf,
  321. size_t length, loff_t *offset)
  322. {
  323. struct mei_cl *cl = file->private_data;
  324. struct mei_cl_cb *cb_pos = NULL;
  325. struct mei_cl_cb *cb = NULL;
  326. struct mei_device *dev;
  327. int i;
  328. int rets;
  329. int err;
  330. if (WARN_ON(!cl || !cl->dev))
  331. return -ENODEV;
  332. dev = cl->dev;
  333. mutex_lock(&dev->device_lock);
  334. if (dev->mei_state != MEI_ENABLED) {
  335. rets = -ENODEV;
  336. goto out;
  337. }
  338. if ((cl->sm_state & MEI_WD_STATE_INDEPENDENCE_MSG_SENT) == 0) {
  339. /* Do not allow to read watchdog client */
  340. i = mei_find_me_client_index(dev, mei_wd_guid);
  341. if (i >= 0) {
  342. struct mei_me_client *me_client = &dev->me_clients[i];
  343. if (cl->me_client_id == me_client->client_id) {
  344. rets = -EBADF;
  345. goto out;
  346. }
  347. }
  348. } else {
  349. cl->sm_state &= ~MEI_WD_STATE_INDEPENDENCE_MSG_SENT;
  350. }
  351. if (cl == &dev->iamthif_cl) {
  352. rets = amthi_read(dev, file, ubuf, length, offset);
  353. goto out;
  354. }
  355. if (cl->read_cb && cl->read_cb->information > *offset) {
  356. cb = cl->read_cb;
  357. goto copy_buffer;
  358. } else if (cl->read_cb && cl->read_cb->information > 0 &&
  359. cl->read_cb->information <= *offset) {
  360. cb = cl->read_cb;
  361. rets = 0;
  362. goto free;
  363. } else if ((!cl->read_cb || !cl->read_cb->information) &&
  364. *offset > 0) {
  365. /*Offset needs to be cleaned for contiguous reads*/
  366. *offset = 0;
  367. rets = 0;
  368. goto out;
  369. }
  370. err = mei_start_read(dev, cl);
  371. if (err && err != -EBUSY) {
  372. dev_dbg(&dev->pdev->dev,
  373. "mei start read failure with status = %d\n", err);
  374. rets = err;
  375. goto out;
  376. }
  377. if (MEI_READ_COMPLETE != cl->reading_state &&
  378. !waitqueue_active(&cl->rx_wait)) {
  379. if (file->f_flags & O_NONBLOCK) {
  380. rets = -EAGAIN;
  381. goto out;
  382. }
  383. mutex_unlock(&dev->device_lock);
  384. if (wait_event_interruptible(cl->rx_wait,
  385. (MEI_READ_COMPLETE == cl->reading_state ||
  386. MEI_FILE_INITIALIZING == cl->state ||
  387. MEI_FILE_DISCONNECTED == cl->state ||
  388. MEI_FILE_DISCONNECTING == cl->state))) {
  389. if (signal_pending(current))
  390. return -EINTR;
  391. return -ERESTARTSYS;
  392. }
  393. mutex_lock(&dev->device_lock);
  394. if (MEI_FILE_INITIALIZING == cl->state ||
  395. MEI_FILE_DISCONNECTED == cl->state ||
  396. MEI_FILE_DISCONNECTING == cl->state) {
  397. rets = -EBUSY;
  398. goto out;
  399. }
  400. }
  401. cb = cl->read_cb;
  402. if (!cb) {
  403. rets = -ENODEV;
  404. goto out;
  405. }
  406. if (cl->reading_state != MEI_READ_COMPLETE) {
  407. rets = 0;
  408. goto out;
  409. }
  410. /* now copy the data to user space */
  411. copy_buffer:
  412. dev_dbg(&dev->pdev->dev, "cb->response_buffer size - %d\n",
  413. cb->response_buffer.size);
  414. dev_dbg(&dev->pdev->dev, "cb->information - %lu\n",
  415. cb->information);
  416. if (length == 0 || ubuf == NULL || *offset > cb->information) {
  417. rets = -EMSGSIZE;
  418. goto free;
  419. }
  420. /* length is being truncated to PAGE_SIZE, however, */
  421. /* information size may be longer */
  422. length = min_t(size_t, length, (cb->information - *offset));
  423. if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) {
  424. rets = -EFAULT;
  425. goto free;
  426. }
  427. rets = length;
  428. *offset += length;
  429. if ((unsigned long)*offset < cb->information)
  430. goto out;
  431. free:
  432. cb_pos = find_read_list_entry(dev, cl);
  433. /* Remove entry from read list */
  434. if (cb_pos)
  435. list_del(&cb_pos->cb_list);
  436. mei_free_cb_private(cb);
  437. cl->reading_state = MEI_IDLE;
  438. cl->read_cb = NULL;
  439. cl->read_pending = 0;
  440. out:
  441. dev_dbg(&dev->pdev->dev, "end mei read rets= %d\n", rets);
  442. mutex_unlock(&dev->device_lock);
  443. return rets;
  444. }
  445. /**
  446. * mei_write - the write function.
  447. *
  448. * @file: pointer to file structure
  449. * @ubuf: pointer to user buffer
  450. * @length: buffer length
  451. * @offset: data offset in buffer
  452. *
  453. * returns >=0 data length on success , <0 on error
  454. */
  455. static ssize_t mei_write(struct file *file, const char __user *ubuf,
  456. size_t length, loff_t *offset)
  457. {
  458. struct mei_cl *cl = file->private_data;
  459. struct mei_cl_cb *write_cb = NULL;
  460. struct mei_msg_hdr mei_hdr;
  461. struct mei_device *dev;
  462. unsigned long timeout = 0;
  463. int rets;
  464. int i;
  465. if (WARN_ON(!cl || !cl->dev))
  466. return -ENODEV;
  467. dev = cl->dev;
  468. mutex_lock(&dev->device_lock);
  469. if (dev->mei_state != MEI_ENABLED) {
  470. mutex_unlock(&dev->device_lock);
  471. return -ENODEV;
  472. }
  473. if (cl == &dev->iamthif_cl) {
  474. write_cb = find_amthi_read_list_entry(dev, file);
  475. if (write_cb) {
  476. timeout = write_cb->read_time +
  477. msecs_to_jiffies(IAMTHIF_READ_TIMER);
  478. if (time_after(jiffies, timeout) ||
  479. cl->reading_state == MEI_READ_COMPLETE) {
  480. *offset = 0;
  481. list_del(&write_cb->cb_list);
  482. mei_free_cb_private(write_cb);
  483. write_cb = NULL;
  484. }
  485. }
  486. }
  487. /* free entry used in read */
  488. if (cl->reading_state == MEI_READ_COMPLETE) {
  489. *offset = 0;
  490. write_cb = find_read_list_entry(dev, cl);
  491. if (write_cb) {
  492. list_del(&write_cb->cb_list);
  493. mei_free_cb_private(write_cb);
  494. write_cb = NULL;
  495. cl->reading_state = MEI_IDLE;
  496. cl->read_cb = NULL;
  497. cl->read_pending = 0;
  498. }
  499. } else if (cl->reading_state == MEI_IDLE && !cl->read_pending)
  500. *offset = 0;
  501. write_cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
  502. if (!write_cb) {
  503. mutex_unlock(&dev->device_lock);
  504. return -ENOMEM;
  505. }
  506. write_cb->file_object = file;
  507. write_cb->file_private = cl;
  508. write_cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
  509. rets = -ENOMEM;
  510. if (!write_cb->request_buffer.data)
  511. goto unlock_dev;
  512. dev_dbg(&dev->pdev->dev, "length =%d\n", (int) length);
  513. rets = -EFAULT;
  514. if (copy_from_user(write_cb->request_buffer.data, ubuf, length))
  515. goto unlock_dev;
  516. cl->sm_state = 0;
  517. if (length == 4 &&
  518. ((memcmp(mei_wd_state_independence_msg[0],
  519. write_cb->request_buffer.data, 4) == 0) ||
  520. (memcmp(mei_wd_state_independence_msg[1],
  521. write_cb->request_buffer.data, 4) == 0) ||
  522. (memcmp(mei_wd_state_independence_msg[2],
  523. write_cb->request_buffer.data, 4) == 0)))
  524. cl->sm_state |= MEI_WD_STATE_INDEPENDENCE_MSG_SENT;
  525. INIT_LIST_HEAD(&write_cb->cb_list);
  526. if (cl == &dev->iamthif_cl) {
  527. write_cb->response_buffer.data =
  528. kmalloc(dev->iamthif_mtu, GFP_KERNEL);
  529. if (!write_cb->response_buffer.data) {
  530. rets = -ENOMEM;
  531. goto unlock_dev;
  532. }
  533. if (dev->mei_state != MEI_ENABLED) {
  534. rets = -ENODEV;
  535. goto unlock_dev;
  536. }
  537. for (i = 0; i < dev->me_clients_num; i++) {
  538. if (dev->me_clients[i].client_id ==
  539. dev->iamthif_cl.me_client_id)
  540. break;
  541. }
  542. if (WARN_ON(dev->me_clients[i].client_id != cl->me_client_id)) {
  543. rets = -ENODEV;
  544. goto unlock_dev;
  545. }
  546. if (i == dev->me_clients_num ||
  547. (dev->me_clients[i].client_id !=
  548. dev->iamthif_cl.me_client_id)) {
  549. rets = -ENODEV;
  550. goto unlock_dev;
  551. } else if (length > dev->me_clients[i].props.max_msg_length ||
  552. length <= 0) {
  553. rets = -EMSGSIZE;
  554. goto unlock_dev;
  555. }
  556. write_cb->response_buffer.size = dev->iamthif_mtu;
  557. write_cb->major_file_operations = MEI_IOCTL;
  558. write_cb->information = 0;
  559. write_cb->request_buffer.size = length;
  560. if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) {
  561. rets = -ENODEV;
  562. goto unlock_dev;
  563. }
  564. if (!list_empty(&dev->amthi_cmd_list.mei_cb.cb_list) ||
  565. dev->iamthif_state != MEI_IAMTHIF_IDLE) {
  566. dev_dbg(&dev->pdev->dev, "amthi_state = %d\n",
  567. (int) dev->iamthif_state);
  568. dev_dbg(&dev->pdev->dev, "add amthi cb to amthi cmd waiting list\n");
  569. list_add_tail(&write_cb->cb_list,
  570. &dev->amthi_cmd_list.mei_cb.cb_list);
  571. rets = length;
  572. } else {
  573. dev_dbg(&dev->pdev->dev, "call amthi write\n");
  574. rets = amthi_write(dev, write_cb);
  575. if (rets) {
  576. dev_dbg(&dev->pdev->dev, "amthi write failed with status = %d\n",
  577. rets);
  578. goto unlock_dev;
  579. }
  580. rets = length;
  581. }
  582. mutex_unlock(&dev->device_lock);
  583. return rets;
  584. }
  585. write_cb->major_file_operations = MEI_WRITE;
  586. /* make sure information is zero before we start */
  587. write_cb->information = 0;
  588. write_cb->request_buffer.size = length;
  589. dev_dbg(&dev->pdev->dev, "host client = %d, ME client = %d\n",
  590. cl->host_client_id, cl->me_client_id);
  591. if (cl->state != MEI_FILE_CONNECTED) {
  592. rets = -ENODEV;
  593. dev_dbg(&dev->pdev->dev, "host client = %d, is not connected to ME client = %d",
  594. cl->host_client_id,
  595. cl->me_client_id);
  596. goto unlock_dev;
  597. }
  598. for (i = 0; i < dev->me_clients_num; i++) {
  599. if (dev->me_clients[i].client_id ==
  600. cl->me_client_id)
  601. break;
  602. }
  603. if (WARN_ON(dev->me_clients[i].client_id != cl->me_client_id)) {
  604. rets = -ENODEV;
  605. goto unlock_dev;
  606. }
  607. if (i == dev->me_clients_num) {
  608. rets = -ENODEV;
  609. goto unlock_dev;
  610. }
  611. if (length > dev->me_clients[i].props.max_msg_length || length <= 0) {
  612. rets = -EINVAL;
  613. goto unlock_dev;
  614. }
  615. write_cb->file_private = cl;
  616. rets = mei_flow_ctrl_creds(dev, cl);
  617. if (rets < 0)
  618. goto unlock_dev;
  619. if (rets && dev->mei_host_buffer_is_empty) {
  620. rets = 0;
  621. dev->mei_host_buffer_is_empty = false;
  622. if (length > mei_hbuf_max_data(dev)) {
  623. mei_hdr.length = mei_hbuf_max_data(dev);
  624. mei_hdr.msg_complete = 0;
  625. } else {
  626. mei_hdr.length = length;
  627. mei_hdr.msg_complete = 1;
  628. }
  629. mei_hdr.host_addr = cl->host_client_id;
  630. mei_hdr.me_addr = cl->me_client_id;
  631. mei_hdr.reserved = 0;
  632. dev_dbg(&dev->pdev->dev, "call mei_write_message header=%08x.\n",
  633. *((u32 *) &mei_hdr));
  634. if (mei_write_message(dev, &mei_hdr,
  635. (unsigned char *) (write_cb->request_buffer.data),
  636. mei_hdr.length)) {
  637. rets = -ENODEV;
  638. goto unlock_dev;
  639. }
  640. cl->writing_state = MEI_WRITING;
  641. write_cb->information = mei_hdr.length;
  642. if (mei_hdr.msg_complete) {
  643. if (mei_flow_ctrl_reduce(dev, cl)) {
  644. rets = -ENODEV;
  645. goto unlock_dev;
  646. }
  647. list_add_tail(&write_cb->cb_list,
  648. &dev->write_waiting_list.mei_cb.cb_list);
  649. } else {
  650. list_add_tail(&write_cb->cb_list,
  651. &dev->write_list.mei_cb.cb_list);
  652. }
  653. } else {
  654. write_cb->information = 0;
  655. cl->writing_state = MEI_WRITING;
  656. list_add_tail(&write_cb->cb_list,
  657. &dev->write_list.mei_cb.cb_list);
  658. }
  659. mutex_unlock(&dev->device_lock);
  660. return length;
  661. unlock_dev:
  662. mutex_unlock(&dev->device_lock);
  663. mei_free_cb_private(write_cb);
  664. return rets;
  665. }
  666. /**
  667. * mei_ioctl - the IOCTL function
  668. *
  669. * @file: pointer to file structure
  670. * @cmd: ioctl command
  671. * @data: pointer to mei message structure
  672. *
  673. * returns 0 on success , <0 on error
  674. */
  675. static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
  676. {
  677. struct mei_device *dev;
  678. struct mei_cl *cl = file->private_data;
  679. struct mei_connect_client_data *connect_data = NULL;
  680. int rets;
  681. if (cmd != IOCTL_MEI_CONNECT_CLIENT)
  682. return -EINVAL;
  683. if (WARN_ON(!cl || !cl->dev))
  684. return -ENODEV;
  685. dev = cl->dev;
  686. dev_dbg(&dev->pdev->dev, "IOCTL cmd = 0x%x", cmd);
  687. mutex_lock(&dev->device_lock);
  688. if (dev->mei_state != MEI_ENABLED) {
  689. rets = -ENODEV;
  690. goto out;
  691. }
  692. dev_dbg(&dev->pdev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n");
  693. connect_data = kzalloc(sizeof(struct mei_connect_client_data),
  694. GFP_KERNEL);
  695. if (!connect_data) {
  696. rets = -ENOMEM;
  697. goto out;
  698. }
  699. dev_dbg(&dev->pdev->dev, "copy connect data from user\n");
  700. if (copy_from_user(connect_data, (char __user *)data,
  701. sizeof(struct mei_connect_client_data))) {
  702. dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n");
  703. rets = -EFAULT;
  704. goto out;
  705. }
  706. rets = mei_ioctl_connect_client(file, connect_data);
  707. /* if all is ok, copying the data back to user. */
  708. if (rets)
  709. goto out;
  710. dev_dbg(&dev->pdev->dev, "copy connect data to user\n");
  711. if (copy_to_user((char __user *)data, connect_data,
  712. sizeof(struct mei_connect_client_data))) {
  713. dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n");
  714. rets = -EFAULT;
  715. goto out;
  716. }
  717. out:
  718. kfree(connect_data);
  719. mutex_unlock(&dev->device_lock);
  720. return rets;
  721. }
  722. /**
  723. * mei_compat_ioctl - the compat IOCTL function
  724. *
  725. * @file: pointer to file structure
  726. * @cmd: ioctl command
  727. * @data: pointer to mei message structure
  728. *
  729. * returns 0 on success , <0 on error
  730. */
  731. #ifdef CONFIG_COMPAT
  732. static long mei_compat_ioctl(struct file *file,
  733. unsigned int cmd, unsigned long data)
  734. {
  735. return mei_ioctl(file, cmd, (unsigned long)compat_ptr(data));
  736. }
  737. #endif
  738. /**
  739. * mei_poll - the poll function
  740. *
  741. * @file: pointer to file structure
  742. * @wait: pointer to poll_table structure
  743. *
  744. * returns poll mask
  745. */
  746. static unsigned int mei_poll(struct file *file, poll_table *wait)
  747. {
  748. struct mei_cl *cl = file->private_data;
  749. struct mei_device *dev;
  750. unsigned int mask = 0;
  751. if (WARN_ON(!cl || !cl->dev))
  752. return mask;
  753. dev = cl->dev;
  754. mutex_lock(&dev->device_lock);
  755. if (dev->mei_state != MEI_ENABLED)
  756. goto out;
  757. if (cl == &dev->iamthif_cl) {
  758. mutex_unlock(&dev->device_lock);
  759. poll_wait(file, &dev->iamthif_cl.wait, wait);
  760. mutex_lock(&dev->device_lock);
  761. if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
  762. dev->iamthif_file_object == file) {
  763. mask |= (POLLIN | POLLRDNORM);
  764. dev_dbg(&dev->pdev->dev, "run next amthi cb\n");
  765. mei_run_next_iamthif_cmd(dev);
  766. }
  767. goto out;
  768. }
  769. mutex_unlock(&dev->device_lock);
  770. poll_wait(file, &cl->tx_wait, wait);
  771. mutex_lock(&dev->device_lock);
  772. if (MEI_WRITE_COMPLETE == cl->writing_state)
  773. mask |= (POLLIN | POLLRDNORM);
  774. out:
  775. mutex_unlock(&dev->device_lock);
  776. return mask;
  777. }
  778. /*
  779. * file operations structure will be used for mei char device.
  780. */
  781. static const struct file_operations mei_fops = {
  782. .owner = THIS_MODULE,
  783. .read = mei_read,
  784. .unlocked_ioctl = mei_ioctl,
  785. #ifdef CONFIG_COMPAT
  786. .compat_ioctl = mei_compat_ioctl,
  787. #endif
  788. .open = mei_open,
  789. .release = mei_release,
  790. .write = mei_write,
  791. .poll = mei_poll,
  792. .llseek = no_llseek
  793. };
  794. /*
  795. * Misc Device Struct
  796. */
  797. static struct miscdevice mei_misc_device = {
  798. .name = "mei",
  799. .fops = &mei_fops,
  800. .minor = MISC_DYNAMIC_MINOR,
  801. };
  802. /**
  803. * mei_probe - Device Initialization Routine
  804. *
  805. * @pdev: PCI device structure
  806. * @ent: entry in kcs_pci_tbl
  807. *
  808. * returns 0 on success, <0 on failure.
  809. */
  810. static int __devinit mei_probe(struct pci_dev *pdev,
  811. const struct pci_device_id *ent)
  812. {
  813. struct mei_device *dev;
  814. int err;
  815. mutex_lock(&mei_mutex);
  816. if (mei_device) {
  817. err = -EEXIST;
  818. goto end;
  819. }
  820. /* enable pci dev */
  821. err = pci_enable_device(pdev);
  822. if (err) {
  823. dev_err(&pdev->dev, "failed to enable pci device.\n");
  824. goto end;
  825. }
  826. /* set PCI host mastering */
  827. pci_set_master(pdev);
  828. /* pci request regions for mei driver */
  829. err = pci_request_regions(pdev, mei_driver_name);
  830. if (err) {
  831. dev_err(&pdev->dev, "failed to get pci regions.\n");
  832. goto disable_device;
  833. }
  834. /* allocates and initializes the mei dev structure */
  835. dev = mei_device_init(pdev);
  836. if (!dev) {
  837. err = -ENOMEM;
  838. goto release_regions;
  839. }
  840. /* mapping IO device memory */
  841. dev->mem_addr = pci_iomap(pdev, 0, 0);
  842. if (!dev->mem_addr) {
  843. dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
  844. err = -ENOMEM;
  845. goto free_device;
  846. }
  847. pci_enable_msi(pdev);
  848. /* request and enable interrupt */
  849. if (pci_dev_msi_enabled(pdev))
  850. err = request_threaded_irq(pdev->irq,
  851. NULL,
  852. mei_interrupt_thread_handler,
  853. IRQF_ONESHOT, mei_driver_name, dev);
  854. else
  855. err = request_threaded_irq(pdev->irq,
  856. mei_interrupt_quick_handler,
  857. mei_interrupt_thread_handler,
  858. IRQF_SHARED, mei_driver_name, dev);
  859. if (err) {
  860. dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
  861. pdev->irq);
  862. goto disable_msi;
  863. }
  864. INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
  865. if (mei_hw_init(dev)) {
  866. dev_err(&pdev->dev, "init hw failure.\n");
  867. err = -ENODEV;
  868. goto release_irq;
  869. }
  870. err = misc_register(&mei_misc_device);
  871. if (err)
  872. goto release_irq;
  873. mei_device = pdev;
  874. pci_set_drvdata(pdev, dev);
  875. schedule_delayed_work(&dev->timer_work, HZ);
  876. mutex_unlock(&mei_mutex);
  877. pr_debug("initialization successful.\n");
  878. return 0;
  879. release_irq:
  880. /* disable interrupts */
  881. dev->host_hw_state = mei_hcsr_read(dev);
  882. mei_disable_interrupts(dev);
  883. flush_scheduled_work();
  884. free_irq(pdev->irq, dev);
  885. disable_msi:
  886. pci_disable_msi(pdev);
  887. pci_iounmap(pdev, dev->mem_addr);
  888. free_device:
  889. kfree(dev);
  890. release_regions:
  891. pci_release_regions(pdev);
  892. disable_device:
  893. pci_disable_device(pdev);
  894. end:
  895. mutex_unlock(&mei_mutex);
  896. dev_err(&pdev->dev, "initialization failed.\n");
  897. return err;
  898. }
  899. /**
  900. * mei_remove - Device Removal Routine
  901. *
  902. * @pdev: PCI device structure
  903. *
  904. * mei_remove is called by the PCI subsystem to alert the driver
  905. * that it should release a PCI device.
  906. */
  907. static void __devexit mei_remove(struct pci_dev *pdev)
  908. {
  909. struct mei_device *dev;
  910. if (mei_device != pdev)
  911. return;
  912. dev = pci_get_drvdata(pdev);
  913. if (!dev)
  914. return;
  915. mutex_lock(&dev->device_lock);
  916. mei_wd_stop(dev, false);
  917. mei_device = NULL;
  918. if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
  919. dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
  920. mei_disconnect_host_client(dev, &dev->iamthif_cl);
  921. }
  922. if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
  923. dev->wd_cl.state = MEI_FILE_DISCONNECTING;
  924. mei_disconnect_host_client(dev, &dev->wd_cl);
  925. }
  926. /* Unregistering watchdog device */
  927. mei_watchdog_unregister(dev);
  928. /* remove entry if already in list */
  929. dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
  930. mei_remove_client_from_file_list(dev, dev->wd_cl.host_client_id);
  931. mei_remove_client_from_file_list(dev, dev->iamthif_cl.host_client_id);
  932. dev->iamthif_current_cb = NULL;
  933. dev->me_clients_num = 0;
  934. mutex_unlock(&dev->device_lock);
  935. flush_scheduled_work();
  936. /* disable interrupts */
  937. mei_disable_interrupts(dev);
  938. free_irq(pdev->irq, dev);
  939. pci_disable_msi(pdev);
  940. pci_set_drvdata(pdev, NULL);
  941. if (dev->mem_addr)
  942. pci_iounmap(pdev, dev->mem_addr);
  943. kfree(dev);
  944. pci_release_regions(pdev);
  945. pci_disable_device(pdev);
  946. misc_deregister(&mei_misc_device);
  947. }
  948. #ifdef CONFIG_PM
  949. static int mei_pci_suspend(struct device *device)
  950. {
  951. struct pci_dev *pdev = to_pci_dev(device);
  952. struct mei_device *dev = pci_get_drvdata(pdev);
  953. int err;
  954. if (!dev)
  955. return -ENODEV;
  956. mutex_lock(&dev->device_lock);
  957. /* Stop watchdog if exists */
  958. err = mei_wd_stop(dev, true);
  959. /* Set new mei state */
  960. if (dev->mei_state == MEI_ENABLED ||
  961. dev->mei_state == MEI_RECOVERING_FROM_RESET) {
  962. dev->mei_state = MEI_POWER_DOWN;
  963. mei_reset(dev, 0);
  964. }
  965. mutex_unlock(&dev->device_lock);
  966. free_irq(pdev->irq, dev);
  967. pci_disable_msi(pdev);
  968. return err;
  969. }
  970. static int mei_pci_resume(struct device *device)
  971. {
  972. struct pci_dev *pdev = to_pci_dev(device);
  973. struct mei_device *dev;
  974. int err;
  975. dev = pci_get_drvdata(pdev);
  976. if (!dev)
  977. return -ENODEV;
  978. pci_enable_msi(pdev);
  979. /* request and enable interrupt */
  980. if (pci_dev_msi_enabled(pdev))
  981. err = request_threaded_irq(pdev->irq,
  982. NULL,
  983. mei_interrupt_thread_handler,
  984. IRQF_ONESHOT, mei_driver_name, dev);
  985. else
  986. err = request_threaded_irq(pdev->irq,
  987. mei_interrupt_quick_handler,
  988. mei_interrupt_thread_handler,
  989. IRQF_SHARED, mei_driver_name, dev);
  990. if (err) {
  991. dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
  992. pdev->irq);
  993. return err;
  994. }
  995. mutex_lock(&dev->device_lock);
  996. dev->mei_state = MEI_POWER_UP;
  997. mei_reset(dev, 1);
  998. mutex_unlock(&dev->device_lock);
  999. /* Start timer if stopped in suspend */
  1000. schedule_delayed_work(&dev->timer_work, HZ);
  1001. return err;
  1002. }
  1003. static SIMPLE_DEV_PM_OPS(mei_pm_ops, mei_pci_suspend, mei_pci_resume);
  1004. #define MEI_PM_OPS (&mei_pm_ops)
  1005. #else
  1006. #define MEI_PM_OPS NULL
  1007. #endif /* CONFIG_PM */
  1008. /*
  1009. * PCI driver structure
  1010. */
  1011. static struct pci_driver mei_driver = {
  1012. .name = mei_driver_name,
  1013. .id_table = mei_pci_tbl,
  1014. .probe = mei_probe,
  1015. .remove = __devexit_p(mei_remove),
  1016. .shutdown = __devexit_p(mei_remove),
  1017. .driver.pm = MEI_PM_OPS,
  1018. };
  1019. module_pci_driver(mei_driver);
  1020. MODULE_AUTHOR("Intel Corporation");
  1021. MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
  1022. MODULE_LICENSE("GPL v2");