main.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207
  1. /*
  2. *
  3. * Intel Management Engine Interface (Intel MEI) Linux driver
  4. * Copyright (c) 2003-2012, Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. */
  16. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  17. #include <linux/module.h>
  18. #include <linux/moduleparam.h>
  19. #include <linux/kernel.h>
  20. #include <linux/device.h>
  21. #include <linux/fs.h>
  22. #include <linux/errno.h>
  23. #include <linux/types.h>
  24. #include <linux/fcntl.h>
  25. #include <linux/aio.h>
  26. #include <linux/pci.h>
  27. #include <linux/poll.h>
  28. #include <linux/init.h>
  29. #include <linux/ioctl.h>
  30. #include <linux/cdev.h>
  31. #include <linux/sched.h>
  32. #include <linux/uuid.h>
  33. #include <linux/compat.h>
  34. #include <linux/jiffies.h>
  35. #include <linux/interrupt.h>
  36. #include <linux/miscdevice.h>
  37. #include "mei_dev.h"
  38. #include <linux/mei.h>
  39. #include "interface.h"
  40. /* AMT device is a singleton on the platform */
  41. static struct pci_dev *mei_pdev;
  42. /* mei_pci_tbl - PCI Device ID Table */
  43. static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = {
  44. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
  45. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)},
  46. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)},
  47. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)},
  48. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)},
  49. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)},
  50. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)},
  51. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)},
  52. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)},
  53. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)},
  54. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)},
  55. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)},
  56. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)},
  57. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)},
  58. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)},
  59. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)},
  60. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)},
  61. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)},
  62. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)},
  63. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)},
  64. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)},
  65. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)},
  66. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)},
  67. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)},
  68. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)},
  69. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)},
  70. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)},
  71. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)},
  72. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
  73. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
  74. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
  75. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)},
  76. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)},
  77. /* required last entry */
  78. {0, }
  79. };
  80. MODULE_DEVICE_TABLE(pci, mei_pci_tbl);
  81. static DEFINE_MUTEX(mei_mutex);
  82. /**
  83. * mei_clear_list - removes all callbacks associated with file
  84. * from mei_cb_list
  85. *
  86. * @dev: device structure.
  87. * @file: file structure
  88. * @mei_cb_list: callbacks list
  89. *
  90. * mei_clear_list is called to clear resources associated with file
  91. * when application calls close function or Ctrl-C was pressed
  92. *
  93. * returns true if callback removed from the list, false otherwise
  94. */
  95. static bool mei_clear_list(struct mei_device *dev,
  96. struct file *file, struct list_head *mei_cb_list)
  97. {
  98. struct mei_cl_cb *cb_pos = NULL;
  99. struct mei_cl_cb *cb_next = NULL;
  100. struct file *file_temp;
  101. bool removed = false;
  102. /* list all list member */
  103. list_for_each_entry_safe(cb_pos, cb_next, mei_cb_list, cb_list) {
  104. file_temp = (struct file *)cb_pos->file_object;
  105. /* check if list member associated with a file */
  106. if (file_temp == file) {
  107. /* remove member from the list */
  108. list_del(&cb_pos->cb_list);
  109. /* check if cb equal to current iamthif cb */
  110. if (dev->iamthif_current_cb == cb_pos) {
  111. dev->iamthif_current_cb = NULL;
  112. /* send flow control to iamthif client */
  113. mei_send_flow_control(dev, &dev->iamthif_cl);
  114. }
  115. /* free all allocated buffers */
  116. mei_free_cb_private(cb_pos);
  117. cb_pos = NULL;
  118. removed = true;
  119. }
  120. }
  121. return removed;
  122. }
  123. /**
  124. * mei_clear_lists - removes all callbacks associated with file
  125. *
  126. * @dev: device structure
  127. * @file: file structure
  128. *
  129. * mei_clear_lists is called to clear resources associated with file
  130. * when application calls close function or Ctrl-C was pressed
  131. *
  132. * returns true if callback removed from the list, false otherwise
  133. */
  134. static bool mei_clear_lists(struct mei_device *dev, struct file *file)
  135. {
  136. bool removed = false;
  137. /* remove callbacks associated with a file */
  138. mei_clear_list(dev, file, &dev->amthi_cmd_list.mei_cb.cb_list);
  139. if (mei_clear_list(dev, file,
  140. &dev->amthi_read_complete_list.mei_cb.cb_list))
  141. removed = true;
  142. mei_clear_list(dev, file, &dev->ctrl_rd_list.mei_cb.cb_list);
  143. if (mei_clear_list(dev, file, &dev->ctrl_wr_list.mei_cb.cb_list))
  144. removed = true;
  145. if (mei_clear_list(dev, file, &dev->write_waiting_list.mei_cb.cb_list))
  146. removed = true;
  147. if (mei_clear_list(dev, file, &dev->write_list.mei_cb.cb_list))
  148. removed = true;
  149. /* check if iamthif_current_cb not NULL */
  150. if (dev->iamthif_current_cb && !removed) {
  151. /* check file and iamthif current cb association */
  152. if (dev->iamthif_current_cb->file_object == file) {
  153. /* remove cb */
  154. mei_free_cb_private(dev->iamthif_current_cb);
  155. dev->iamthif_current_cb = NULL;
  156. removed = true;
  157. }
  158. }
  159. return removed;
  160. }
  161. /**
  162. * find_read_list_entry - find read list entry
  163. *
  164. * @dev: device structure
  165. * @file: pointer to file structure
  166. *
  167. * returns cb on success, NULL on error
  168. */
  169. static struct mei_cl_cb *find_read_list_entry(
  170. struct mei_device *dev,
  171. struct mei_cl *cl)
  172. {
  173. struct mei_cl_cb *pos = NULL;
  174. struct mei_cl_cb *next = NULL;
  175. dev_dbg(&dev->pdev->dev, "remove read_list CB\n");
  176. list_for_each_entry_safe(pos, next,
  177. &dev->read_list.mei_cb.cb_list, cb_list) {
  178. struct mei_cl *cl_temp;
  179. cl_temp = (struct mei_cl *)pos->file_private;
  180. if (mei_cl_cmp_id(cl, cl_temp))
  181. return pos;
  182. }
  183. return NULL;
  184. }
  185. /**
  186. * mei_open - the open function
  187. *
  188. * @inode: pointer to inode structure
  189. * @file: pointer to file structure
  190. *
  191. * returns 0 on success, <0 on error
  192. */
  193. static int mei_open(struct inode *inode, struct file *file)
  194. {
  195. struct mei_cl *cl;
  196. struct mei_device *dev;
  197. unsigned long cl_id;
  198. int err;
  199. err = -ENODEV;
  200. if (!mei_pdev)
  201. goto out;
  202. dev = pci_get_drvdata(mei_pdev);
  203. if (!dev)
  204. goto out;
  205. mutex_lock(&dev->device_lock);
  206. err = -ENOMEM;
  207. cl = mei_cl_allocate(dev);
  208. if (!cl)
  209. goto out_unlock;
  210. err = -ENODEV;
  211. if (dev->dev_state != MEI_DEV_ENABLED) {
  212. dev_dbg(&dev->pdev->dev, "dev_state != MEI_ENABLED dev_state = %s\n",
  213. mei_dev_state_str(dev->dev_state));
  214. goto out_unlock;
  215. }
  216. err = -EMFILE;
  217. if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
  218. dev_err(&dev->pdev->dev, "open_handle_count exceded %d",
  219. MEI_MAX_OPEN_HANDLE_COUNT);
  220. goto out_unlock;
  221. }
  222. cl_id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
  223. if (cl_id >= MEI_CLIENTS_MAX) {
  224. dev_err(&dev->pdev->dev, "client_id exceded %d",
  225. MEI_CLIENTS_MAX) ;
  226. goto out_unlock;
  227. }
  228. cl->host_client_id = cl_id;
  229. dev_dbg(&dev->pdev->dev, "client_id = %d\n", cl->host_client_id);
  230. dev->open_handle_count++;
  231. list_add_tail(&cl->link, &dev->file_list);
  232. set_bit(cl->host_client_id, dev->host_clients_map);
  233. cl->state = MEI_FILE_INITIALIZING;
  234. cl->sm_state = 0;
  235. file->private_data = cl;
  236. mutex_unlock(&dev->device_lock);
  237. return nonseekable_open(inode, file);
  238. out_unlock:
  239. mutex_unlock(&dev->device_lock);
  240. kfree(cl);
  241. out:
  242. return err;
  243. }
  244. /**
  245. * mei_release - the release function
  246. *
  247. * @inode: pointer to inode structure
  248. * @file: pointer to file structure
  249. *
  250. * returns 0 on success, <0 on error
  251. */
  252. static int mei_release(struct inode *inode, struct file *file)
  253. {
  254. struct mei_cl *cl = file->private_data;
  255. struct mei_cl_cb *cb;
  256. struct mei_device *dev;
  257. int rets = 0;
  258. if (WARN_ON(!cl || !cl->dev))
  259. return -ENODEV;
  260. dev = cl->dev;
  261. mutex_lock(&dev->device_lock);
  262. if (cl != &dev->iamthif_cl) {
  263. if (cl->state == MEI_FILE_CONNECTED) {
  264. cl->state = MEI_FILE_DISCONNECTING;
  265. dev_dbg(&dev->pdev->dev,
  266. "disconnecting client host client = %d, "
  267. "ME client = %d\n",
  268. cl->host_client_id,
  269. cl->me_client_id);
  270. rets = mei_disconnect_host_client(dev, cl);
  271. }
  272. mei_cl_flush_queues(cl);
  273. dev_dbg(&dev->pdev->dev, "remove client host client = %d, ME client = %d\n",
  274. cl->host_client_id,
  275. cl->me_client_id);
  276. if (dev->open_handle_count > 0) {
  277. clear_bit(cl->host_client_id, dev->host_clients_map);
  278. dev->open_handle_count--;
  279. }
  280. mei_remove_client_from_file_list(dev, cl->host_client_id);
  281. /* free read cb */
  282. cb = NULL;
  283. if (cl->read_cb) {
  284. cb = find_read_list_entry(dev, cl);
  285. /* Remove entry from read list */
  286. if (cb)
  287. list_del(&cb->cb_list);
  288. cb = cl->read_cb;
  289. cl->read_cb = NULL;
  290. }
  291. file->private_data = NULL;
  292. if (cb) {
  293. mei_free_cb_private(cb);
  294. cb = NULL;
  295. }
  296. kfree(cl);
  297. } else {
  298. if (dev->open_handle_count > 0)
  299. dev->open_handle_count--;
  300. if (dev->iamthif_file_object == file &&
  301. dev->iamthif_state != MEI_IAMTHIF_IDLE) {
  302. dev_dbg(&dev->pdev->dev, "amthi canceled iamthif state %d\n",
  303. dev->iamthif_state);
  304. dev->iamthif_canceled = true;
  305. if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE) {
  306. dev_dbg(&dev->pdev->dev, "run next amthi iamthif cb\n");
  307. mei_run_next_iamthif_cmd(dev);
  308. }
  309. }
  310. if (mei_clear_lists(dev, file))
  311. dev->iamthif_state = MEI_IAMTHIF_IDLE;
  312. }
  313. mutex_unlock(&dev->device_lock);
  314. return rets;
  315. }
  316. /**
  317. * mei_read - the read function.
  318. *
  319. * @file: pointer to file structure
  320. * @ubuf: pointer to user buffer
  321. * @length: buffer length
  322. * @offset: data offset in buffer
  323. *
  324. * returns >=0 data length on success , <0 on error
  325. */
  326. static ssize_t mei_read(struct file *file, char __user *ubuf,
  327. size_t length, loff_t *offset)
  328. {
  329. struct mei_cl *cl = file->private_data;
  330. struct mei_cl_cb *cb_pos = NULL;
  331. struct mei_cl_cb *cb = NULL;
  332. struct mei_device *dev;
  333. int i;
  334. int rets;
  335. int err;
  336. if (WARN_ON(!cl || !cl->dev))
  337. return -ENODEV;
  338. dev = cl->dev;
  339. mutex_lock(&dev->device_lock);
  340. if (dev->dev_state != MEI_DEV_ENABLED) {
  341. rets = -ENODEV;
  342. goto out;
  343. }
  344. if ((cl->sm_state & MEI_WD_STATE_INDEPENDENCE_MSG_SENT) == 0) {
  345. /* Do not allow to read watchdog client */
  346. i = mei_me_cl_by_uuid(dev, &mei_wd_guid);
  347. if (i >= 0) {
  348. struct mei_me_client *me_client = &dev->me_clients[i];
  349. if (cl->me_client_id == me_client->client_id) {
  350. rets = -EBADF;
  351. goto out;
  352. }
  353. }
  354. } else {
  355. cl->sm_state &= ~MEI_WD_STATE_INDEPENDENCE_MSG_SENT;
  356. }
  357. if (cl == &dev->iamthif_cl) {
  358. rets = amthi_read(dev, file, ubuf, length, offset);
  359. goto out;
  360. }
  361. if (cl->read_cb && cl->read_cb->information > *offset) {
  362. cb = cl->read_cb;
  363. goto copy_buffer;
  364. } else if (cl->read_cb && cl->read_cb->information > 0 &&
  365. cl->read_cb->information <= *offset) {
  366. cb = cl->read_cb;
  367. rets = 0;
  368. goto free;
  369. } else if ((!cl->read_cb || !cl->read_cb->information) &&
  370. *offset > 0) {
  371. /*Offset needs to be cleaned for contiguous reads*/
  372. *offset = 0;
  373. rets = 0;
  374. goto out;
  375. }
  376. err = mei_start_read(dev, cl);
  377. if (err && err != -EBUSY) {
  378. dev_dbg(&dev->pdev->dev,
  379. "mei start read failure with status = %d\n", err);
  380. rets = err;
  381. goto out;
  382. }
  383. if (MEI_READ_COMPLETE != cl->reading_state &&
  384. !waitqueue_active(&cl->rx_wait)) {
  385. if (file->f_flags & O_NONBLOCK) {
  386. rets = -EAGAIN;
  387. goto out;
  388. }
  389. mutex_unlock(&dev->device_lock);
  390. if (wait_event_interruptible(cl->rx_wait,
  391. (MEI_READ_COMPLETE == cl->reading_state ||
  392. MEI_FILE_INITIALIZING == cl->state ||
  393. MEI_FILE_DISCONNECTED == cl->state ||
  394. MEI_FILE_DISCONNECTING == cl->state))) {
  395. if (signal_pending(current))
  396. return -EINTR;
  397. return -ERESTARTSYS;
  398. }
  399. mutex_lock(&dev->device_lock);
  400. if (MEI_FILE_INITIALIZING == cl->state ||
  401. MEI_FILE_DISCONNECTED == cl->state ||
  402. MEI_FILE_DISCONNECTING == cl->state) {
  403. rets = -EBUSY;
  404. goto out;
  405. }
  406. }
  407. cb = cl->read_cb;
  408. if (!cb) {
  409. rets = -ENODEV;
  410. goto out;
  411. }
  412. if (cl->reading_state != MEI_READ_COMPLETE) {
  413. rets = 0;
  414. goto out;
  415. }
  416. /* now copy the data to user space */
  417. copy_buffer:
  418. dev_dbg(&dev->pdev->dev, "cb->response_buffer size - %d\n",
  419. cb->response_buffer.size);
  420. dev_dbg(&dev->pdev->dev, "cb->information - %lu\n",
  421. cb->information);
  422. if (length == 0 || ubuf == NULL || *offset > cb->information) {
  423. rets = -EMSGSIZE;
  424. goto free;
  425. }
  426. /* length is being truncated to PAGE_SIZE, however, */
  427. /* information size may be longer */
  428. length = min_t(size_t, length, (cb->information - *offset));
  429. if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) {
  430. rets = -EFAULT;
  431. goto free;
  432. }
  433. rets = length;
  434. *offset += length;
  435. if ((unsigned long)*offset < cb->information)
  436. goto out;
  437. free:
  438. cb_pos = find_read_list_entry(dev, cl);
  439. /* Remove entry from read list */
  440. if (cb_pos)
  441. list_del(&cb_pos->cb_list);
  442. mei_free_cb_private(cb);
  443. cl->reading_state = MEI_IDLE;
  444. cl->read_cb = NULL;
  445. cl->read_pending = 0;
  446. out:
  447. dev_dbg(&dev->pdev->dev, "end mei read rets= %d\n", rets);
  448. mutex_unlock(&dev->device_lock);
  449. return rets;
  450. }
  451. /**
  452. * mei_write - the write function.
  453. *
  454. * @file: pointer to file structure
  455. * @ubuf: pointer to user buffer
  456. * @length: buffer length
  457. * @offset: data offset in buffer
  458. *
  459. * returns >=0 data length on success , <0 on error
  460. */
  461. static ssize_t mei_write(struct file *file, const char __user *ubuf,
  462. size_t length, loff_t *offset)
  463. {
  464. struct mei_cl *cl = file->private_data;
  465. struct mei_cl_cb *write_cb = NULL;
  466. struct mei_msg_hdr mei_hdr;
  467. struct mei_device *dev;
  468. unsigned long timeout = 0;
  469. int rets;
  470. int i;
  471. if (WARN_ON(!cl || !cl->dev))
  472. return -ENODEV;
  473. dev = cl->dev;
  474. mutex_lock(&dev->device_lock);
  475. if (dev->dev_state != MEI_DEV_ENABLED) {
  476. mutex_unlock(&dev->device_lock);
  477. return -ENODEV;
  478. }
  479. if (cl == &dev->iamthif_cl) {
  480. write_cb = find_amthi_read_list_entry(dev, file);
  481. if (write_cb) {
  482. timeout = write_cb->read_time +
  483. msecs_to_jiffies(IAMTHIF_READ_TIMER);
  484. if (time_after(jiffies, timeout) ||
  485. cl->reading_state == MEI_READ_COMPLETE) {
  486. *offset = 0;
  487. list_del(&write_cb->cb_list);
  488. mei_free_cb_private(write_cb);
  489. write_cb = NULL;
  490. }
  491. }
  492. }
  493. /* free entry used in read */
  494. if (cl->reading_state == MEI_READ_COMPLETE) {
  495. *offset = 0;
  496. write_cb = find_read_list_entry(dev, cl);
  497. if (write_cb) {
  498. list_del(&write_cb->cb_list);
  499. mei_free_cb_private(write_cb);
  500. write_cb = NULL;
  501. cl->reading_state = MEI_IDLE;
  502. cl->read_cb = NULL;
  503. cl->read_pending = 0;
  504. }
  505. } else if (cl->reading_state == MEI_IDLE && !cl->read_pending)
  506. *offset = 0;
  507. write_cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
  508. if (!write_cb) {
  509. mutex_unlock(&dev->device_lock);
  510. return -ENOMEM;
  511. }
  512. write_cb->file_object = file;
  513. write_cb->file_private = cl;
  514. write_cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
  515. rets = -ENOMEM;
  516. if (!write_cb->request_buffer.data)
  517. goto unlock_dev;
  518. dev_dbg(&dev->pdev->dev, "length =%d\n", (int) length);
  519. rets = -EFAULT;
  520. if (copy_from_user(write_cb->request_buffer.data, ubuf, length))
  521. goto unlock_dev;
  522. cl->sm_state = 0;
  523. if (length == 4 &&
  524. ((memcmp(mei_wd_state_independence_msg[0],
  525. write_cb->request_buffer.data, 4) == 0) ||
  526. (memcmp(mei_wd_state_independence_msg[1],
  527. write_cb->request_buffer.data, 4) == 0) ||
  528. (memcmp(mei_wd_state_independence_msg[2],
  529. write_cb->request_buffer.data, 4) == 0)))
  530. cl->sm_state |= MEI_WD_STATE_INDEPENDENCE_MSG_SENT;
  531. INIT_LIST_HEAD(&write_cb->cb_list);
  532. if (cl == &dev->iamthif_cl) {
  533. write_cb->response_buffer.data =
  534. kmalloc(dev->iamthif_mtu, GFP_KERNEL);
  535. if (!write_cb->response_buffer.data) {
  536. rets = -ENOMEM;
  537. goto unlock_dev;
  538. }
  539. if (dev->dev_state != MEI_DEV_ENABLED) {
  540. rets = -ENODEV;
  541. goto unlock_dev;
  542. }
  543. i = mei_me_cl_by_id(dev, dev->iamthif_cl.me_client_id);
  544. if (i < 0) {
  545. rets = -ENODEV;
  546. goto unlock_dev;
  547. }
  548. if (length > dev->me_clients[i].props.max_msg_length ||
  549. length <= 0) {
  550. rets = -EMSGSIZE;
  551. goto unlock_dev;
  552. }
  553. write_cb->response_buffer.size = dev->iamthif_mtu;
  554. write_cb->major_file_operations = MEI_IOCTL;
  555. write_cb->information = 0;
  556. write_cb->request_buffer.size = length;
  557. if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) {
  558. rets = -ENODEV;
  559. goto unlock_dev;
  560. }
  561. if (!list_empty(&dev->amthi_cmd_list.mei_cb.cb_list) ||
  562. dev->iamthif_state != MEI_IAMTHIF_IDLE) {
  563. dev_dbg(&dev->pdev->dev, "amthi_state = %d\n",
  564. (int) dev->iamthif_state);
  565. dev_dbg(&dev->pdev->dev, "add amthi cb to amthi cmd waiting list\n");
  566. list_add_tail(&write_cb->cb_list,
  567. &dev->amthi_cmd_list.mei_cb.cb_list);
  568. rets = length;
  569. } else {
  570. dev_dbg(&dev->pdev->dev, "call amthi write\n");
  571. rets = amthi_write(dev, write_cb);
  572. if (rets) {
  573. dev_dbg(&dev->pdev->dev, "amthi write failed with status = %d\n",
  574. rets);
  575. goto unlock_dev;
  576. }
  577. rets = length;
  578. }
  579. mutex_unlock(&dev->device_lock);
  580. return rets;
  581. }
  582. write_cb->major_file_operations = MEI_WRITE;
  583. /* make sure information is zero before we start */
  584. write_cb->information = 0;
  585. write_cb->request_buffer.size = length;
  586. dev_dbg(&dev->pdev->dev, "host client = %d, ME client = %d\n",
  587. cl->host_client_id, cl->me_client_id);
  588. if (cl->state != MEI_FILE_CONNECTED) {
  589. rets = -ENODEV;
  590. dev_dbg(&dev->pdev->dev, "host client = %d, is not connected to ME client = %d",
  591. cl->host_client_id,
  592. cl->me_client_id);
  593. goto unlock_dev;
  594. }
  595. i = mei_me_cl_by_id(dev, cl->me_client_id);
  596. if (i < 0) {
  597. rets = -ENODEV;
  598. goto unlock_dev;
  599. }
  600. if (length > dev->me_clients[i].props.max_msg_length || length <= 0) {
  601. rets = -EINVAL;
  602. goto unlock_dev;
  603. }
  604. write_cb->file_private = cl;
  605. rets = mei_flow_ctrl_creds(dev, cl);
  606. if (rets < 0)
  607. goto unlock_dev;
  608. if (rets && dev->mei_host_buffer_is_empty) {
  609. rets = 0;
  610. dev->mei_host_buffer_is_empty = false;
  611. if (length > mei_hbuf_max_data(dev)) {
  612. mei_hdr.length = mei_hbuf_max_data(dev);
  613. mei_hdr.msg_complete = 0;
  614. } else {
  615. mei_hdr.length = length;
  616. mei_hdr.msg_complete = 1;
  617. }
  618. mei_hdr.host_addr = cl->host_client_id;
  619. mei_hdr.me_addr = cl->me_client_id;
  620. mei_hdr.reserved = 0;
  621. dev_dbg(&dev->pdev->dev, "call mei_write_message header=%08x.\n",
  622. *((u32 *) &mei_hdr));
  623. if (mei_write_message(dev, &mei_hdr,
  624. (unsigned char *) (write_cb->request_buffer.data),
  625. mei_hdr.length)) {
  626. rets = -ENODEV;
  627. goto unlock_dev;
  628. }
  629. cl->writing_state = MEI_WRITING;
  630. write_cb->information = mei_hdr.length;
  631. if (mei_hdr.msg_complete) {
  632. if (mei_flow_ctrl_reduce(dev, cl)) {
  633. rets = -ENODEV;
  634. goto unlock_dev;
  635. }
  636. list_add_tail(&write_cb->cb_list,
  637. &dev->write_waiting_list.mei_cb.cb_list);
  638. } else {
  639. list_add_tail(&write_cb->cb_list,
  640. &dev->write_list.mei_cb.cb_list);
  641. }
  642. } else {
  643. write_cb->information = 0;
  644. cl->writing_state = MEI_WRITING;
  645. list_add_tail(&write_cb->cb_list,
  646. &dev->write_list.mei_cb.cb_list);
  647. }
  648. mutex_unlock(&dev->device_lock);
  649. return length;
  650. unlock_dev:
  651. mutex_unlock(&dev->device_lock);
  652. mei_free_cb_private(write_cb);
  653. return rets;
  654. }
  655. /**
  656. * mei_ioctl - the IOCTL function
  657. *
  658. * @file: pointer to file structure
  659. * @cmd: ioctl command
  660. * @data: pointer to mei message structure
  661. *
  662. * returns 0 on success , <0 on error
  663. */
  664. static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
  665. {
  666. struct mei_device *dev;
  667. struct mei_cl *cl = file->private_data;
  668. struct mei_connect_client_data *connect_data = NULL;
  669. int rets;
  670. if (cmd != IOCTL_MEI_CONNECT_CLIENT)
  671. return -EINVAL;
  672. if (WARN_ON(!cl || !cl->dev))
  673. return -ENODEV;
  674. dev = cl->dev;
  675. dev_dbg(&dev->pdev->dev, "IOCTL cmd = 0x%x", cmd);
  676. mutex_lock(&dev->device_lock);
  677. if (dev->dev_state != MEI_DEV_ENABLED) {
  678. rets = -ENODEV;
  679. goto out;
  680. }
  681. dev_dbg(&dev->pdev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n");
  682. connect_data = kzalloc(sizeof(struct mei_connect_client_data),
  683. GFP_KERNEL);
  684. if (!connect_data) {
  685. rets = -ENOMEM;
  686. goto out;
  687. }
  688. dev_dbg(&dev->pdev->dev, "copy connect data from user\n");
  689. if (copy_from_user(connect_data, (char __user *)data,
  690. sizeof(struct mei_connect_client_data))) {
  691. dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n");
  692. rets = -EFAULT;
  693. goto out;
  694. }
  695. rets = mei_ioctl_connect_client(file, connect_data);
  696. /* if all is ok, copying the data back to user. */
  697. if (rets)
  698. goto out;
  699. dev_dbg(&dev->pdev->dev, "copy connect data to user\n");
  700. if (copy_to_user((char __user *)data, connect_data,
  701. sizeof(struct mei_connect_client_data))) {
  702. dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n");
  703. rets = -EFAULT;
  704. goto out;
  705. }
  706. out:
  707. kfree(connect_data);
  708. mutex_unlock(&dev->device_lock);
  709. return rets;
  710. }
  711. /**
  712. * mei_compat_ioctl - the compat IOCTL function
  713. *
  714. * @file: pointer to file structure
  715. * @cmd: ioctl command
  716. * @data: pointer to mei message structure
  717. *
  718. * returns 0 on success , <0 on error
  719. */
  720. #ifdef CONFIG_COMPAT
  721. static long mei_compat_ioctl(struct file *file,
  722. unsigned int cmd, unsigned long data)
  723. {
  724. return mei_ioctl(file, cmd, (unsigned long)compat_ptr(data));
  725. }
  726. #endif
  727. /**
  728. * mei_poll - the poll function
  729. *
  730. * @file: pointer to file structure
  731. * @wait: pointer to poll_table structure
  732. *
  733. * returns poll mask
  734. */
  735. static unsigned int mei_poll(struct file *file, poll_table *wait)
  736. {
  737. struct mei_cl *cl = file->private_data;
  738. struct mei_device *dev;
  739. unsigned int mask = 0;
  740. if (WARN_ON(!cl || !cl->dev))
  741. return mask;
  742. dev = cl->dev;
  743. mutex_lock(&dev->device_lock);
  744. if (dev->dev_state != MEI_DEV_ENABLED)
  745. goto out;
  746. if (cl == &dev->iamthif_cl) {
  747. mutex_unlock(&dev->device_lock);
  748. poll_wait(file, &dev->iamthif_cl.wait, wait);
  749. mutex_lock(&dev->device_lock);
  750. if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
  751. dev->iamthif_file_object == file) {
  752. mask |= (POLLIN | POLLRDNORM);
  753. dev_dbg(&dev->pdev->dev, "run next amthi cb\n");
  754. mei_run_next_iamthif_cmd(dev);
  755. }
  756. goto out;
  757. }
  758. mutex_unlock(&dev->device_lock);
  759. poll_wait(file, &cl->tx_wait, wait);
  760. mutex_lock(&dev->device_lock);
  761. if (MEI_WRITE_COMPLETE == cl->writing_state)
  762. mask |= (POLLIN | POLLRDNORM);
  763. out:
  764. mutex_unlock(&dev->device_lock);
  765. return mask;
  766. }
  767. /*
  768. * file operations structure will be used for mei char device.
  769. */
  770. static const struct file_operations mei_fops = {
  771. .owner = THIS_MODULE,
  772. .read = mei_read,
  773. .unlocked_ioctl = mei_ioctl,
  774. #ifdef CONFIG_COMPAT
  775. .compat_ioctl = mei_compat_ioctl,
  776. #endif
  777. .open = mei_open,
  778. .release = mei_release,
  779. .write = mei_write,
  780. .poll = mei_poll,
  781. .llseek = no_llseek
  782. };
  783. /*
  784. * Misc Device Struct
  785. */
  786. static struct miscdevice mei_misc_device = {
  787. .name = "mei",
  788. .fops = &mei_fops,
  789. .minor = MISC_DYNAMIC_MINOR,
  790. };
  791. /**
  792. * mei_quirk_probe - probe for devices that doesn't valid ME interface
  793. * @pdev: PCI device structure
  794. * @ent: entry into pci_device_table
  795. *
  796. * returns true if ME Interface is valid, false otherwise
  797. */
  798. static bool __devinit mei_quirk_probe(struct pci_dev *pdev,
  799. const struct pci_device_id *ent)
  800. {
  801. u32 reg;
  802. if (ent->device == MEI_DEV_ID_PBG_1) {
  803. pci_read_config_dword(pdev, 0x48, &reg);
  804. /* make sure that bit 9 is up and bit 10 is down */
  805. if ((reg & 0x600) == 0x200) {
  806. dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
  807. return false;
  808. }
  809. }
  810. return true;
  811. }
  812. /**
  813. * mei_probe - Device Initialization Routine
  814. *
  815. * @pdev: PCI device structure
  816. * @ent: entry in kcs_pci_tbl
  817. *
  818. * returns 0 on success, <0 on failure.
  819. */
  820. static int __devinit mei_probe(struct pci_dev *pdev,
  821. const struct pci_device_id *ent)
  822. {
  823. struct mei_device *dev;
  824. int err;
  825. mutex_lock(&mei_mutex);
  826. if (!mei_quirk_probe(pdev, ent)) {
  827. err = -ENODEV;
  828. goto end;
  829. }
  830. if (mei_pdev) {
  831. err = -EEXIST;
  832. goto end;
  833. }
  834. /* enable pci dev */
  835. err = pci_enable_device(pdev);
  836. if (err) {
  837. dev_err(&pdev->dev, "failed to enable pci device.\n");
  838. goto end;
  839. }
  840. /* set PCI host mastering */
  841. pci_set_master(pdev);
  842. /* pci request regions for mei driver */
  843. err = pci_request_regions(pdev, KBUILD_MODNAME);
  844. if (err) {
  845. dev_err(&pdev->dev, "failed to get pci regions.\n");
  846. goto disable_device;
  847. }
  848. /* allocates and initializes the mei dev structure */
  849. dev = mei_device_init(pdev);
  850. if (!dev) {
  851. err = -ENOMEM;
  852. goto release_regions;
  853. }
  854. /* mapping IO device memory */
  855. dev->mem_addr = pci_iomap(pdev, 0, 0);
  856. if (!dev->mem_addr) {
  857. dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
  858. err = -ENOMEM;
  859. goto free_device;
  860. }
  861. pci_enable_msi(pdev);
  862. /* request and enable interrupt */
  863. if (pci_dev_msi_enabled(pdev))
  864. err = request_threaded_irq(pdev->irq,
  865. NULL,
  866. mei_interrupt_thread_handler,
  867. IRQF_ONESHOT, KBUILD_MODNAME, dev);
  868. else
  869. err = request_threaded_irq(pdev->irq,
  870. mei_interrupt_quick_handler,
  871. mei_interrupt_thread_handler,
  872. IRQF_SHARED, KBUILD_MODNAME, dev);
  873. if (err) {
  874. dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
  875. pdev->irq);
  876. goto disable_msi;
  877. }
  878. INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
  879. if (mei_hw_init(dev)) {
  880. dev_err(&pdev->dev, "init hw failure.\n");
  881. err = -ENODEV;
  882. goto release_irq;
  883. }
  884. err = misc_register(&mei_misc_device);
  885. if (err)
  886. goto release_irq;
  887. mei_pdev = pdev;
  888. pci_set_drvdata(pdev, dev);
  889. schedule_delayed_work(&dev->timer_work, HZ);
  890. mutex_unlock(&mei_mutex);
  891. pr_debug("initialization successful.\n");
  892. return 0;
  893. release_irq:
  894. /* disable interrupts */
  895. dev->host_hw_state = mei_hcsr_read(dev);
  896. mei_disable_interrupts(dev);
  897. flush_scheduled_work();
  898. free_irq(pdev->irq, dev);
  899. disable_msi:
  900. pci_disable_msi(pdev);
  901. pci_iounmap(pdev, dev->mem_addr);
  902. free_device:
  903. kfree(dev);
  904. release_regions:
  905. pci_release_regions(pdev);
  906. disable_device:
  907. pci_disable_device(pdev);
  908. end:
  909. mutex_unlock(&mei_mutex);
  910. dev_err(&pdev->dev, "initialization failed.\n");
  911. return err;
  912. }
  913. /**
  914. * mei_remove - Device Removal Routine
  915. *
  916. * @pdev: PCI device structure
  917. *
  918. * mei_remove is called by the PCI subsystem to alert the driver
  919. * that it should release a PCI device.
  920. */
  921. static void __devexit mei_remove(struct pci_dev *pdev)
  922. {
  923. struct mei_device *dev;
  924. if (mei_pdev != pdev)
  925. return;
  926. dev = pci_get_drvdata(pdev);
  927. if (!dev)
  928. return;
  929. mutex_lock(&dev->device_lock);
  930. cancel_delayed_work(&dev->timer_work);
  931. mei_wd_stop(dev);
  932. mei_pdev = NULL;
  933. if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
  934. dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
  935. mei_disconnect_host_client(dev, &dev->iamthif_cl);
  936. }
  937. if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
  938. dev->wd_cl.state = MEI_FILE_DISCONNECTING;
  939. mei_disconnect_host_client(dev, &dev->wd_cl);
  940. }
  941. /* Unregistering watchdog device */
  942. mei_watchdog_unregister(dev);
  943. /* remove entry if already in list */
  944. dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
  945. mei_remove_client_from_file_list(dev, dev->wd_cl.host_client_id);
  946. mei_remove_client_from_file_list(dev, dev->iamthif_cl.host_client_id);
  947. dev->iamthif_current_cb = NULL;
  948. dev->me_clients_num = 0;
  949. mutex_unlock(&dev->device_lock);
  950. flush_scheduled_work();
  951. /* disable interrupts */
  952. mei_disable_interrupts(dev);
  953. free_irq(pdev->irq, dev);
  954. pci_disable_msi(pdev);
  955. pci_set_drvdata(pdev, NULL);
  956. if (dev->mem_addr)
  957. pci_iounmap(pdev, dev->mem_addr);
  958. kfree(dev);
  959. pci_release_regions(pdev);
  960. pci_disable_device(pdev);
  961. misc_deregister(&mei_misc_device);
  962. }
  963. #ifdef CONFIG_PM
  964. static int mei_pci_suspend(struct device *device)
  965. {
  966. struct pci_dev *pdev = to_pci_dev(device);
  967. struct mei_device *dev = pci_get_drvdata(pdev);
  968. int err;
  969. if (!dev)
  970. return -ENODEV;
  971. mutex_lock(&dev->device_lock);
  972. cancel_delayed_work(&dev->timer_work);
  973. /* Stop watchdog if exists */
  974. err = mei_wd_stop(dev);
  975. /* Set new mei state */
  976. if (dev->dev_state == MEI_DEV_ENABLED ||
  977. dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
  978. dev->dev_state = MEI_DEV_POWER_DOWN;
  979. mei_reset(dev, 0);
  980. }
  981. mutex_unlock(&dev->device_lock);
  982. free_irq(pdev->irq, dev);
  983. pci_disable_msi(pdev);
  984. return err;
  985. }
  986. static int mei_pci_resume(struct device *device)
  987. {
  988. struct pci_dev *pdev = to_pci_dev(device);
  989. struct mei_device *dev;
  990. int err;
  991. dev = pci_get_drvdata(pdev);
  992. if (!dev)
  993. return -ENODEV;
  994. pci_enable_msi(pdev);
  995. /* request and enable interrupt */
  996. if (pci_dev_msi_enabled(pdev))
  997. err = request_threaded_irq(pdev->irq,
  998. NULL,
  999. mei_interrupt_thread_handler,
  1000. IRQF_ONESHOT, KBUILD_MODNAME, dev);
  1001. else
  1002. err = request_threaded_irq(pdev->irq,
  1003. mei_interrupt_quick_handler,
  1004. mei_interrupt_thread_handler,
  1005. IRQF_SHARED, KBUILD_MODNAME, dev);
  1006. if (err) {
  1007. dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
  1008. pdev->irq);
  1009. return err;
  1010. }
  1011. mutex_lock(&dev->device_lock);
  1012. dev->dev_state = MEI_DEV_POWER_UP;
  1013. mei_reset(dev, 1);
  1014. mutex_unlock(&dev->device_lock);
  1015. /* Start timer if stopped in suspend */
  1016. schedule_delayed_work(&dev->timer_work, HZ);
  1017. return err;
  1018. }
  1019. static SIMPLE_DEV_PM_OPS(mei_pm_ops, mei_pci_suspend, mei_pci_resume);
  1020. #define MEI_PM_OPS (&mei_pm_ops)
  1021. #else
  1022. #define MEI_PM_OPS NULL
  1023. #endif /* CONFIG_PM */
  1024. /*
  1025. * PCI driver structure
  1026. */
  1027. static struct pci_driver mei_driver = {
  1028. .name = KBUILD_MODNAME,
  1029. .id_table = mei_pci_tbl,
  1030. .probe = mei_probe,
  1031. .remove = __devexit_p(mei_remove),
  1032. .shutdown = __devexit_p(mei_remove),
  1033. .driver.pm = MEI_PM_OPS,
  1034. };
  1035. module_pci_driver(mei_driver);
  1036. MODULE_AUTHOR("Intel Corporation");
  1037. MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
  1038. MODULE_LICENSE("GPL v2");