main.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111
  1. /*
  2. *
  3. * Intel Management Engine Interface (Intel MEI) Linux driver
  4. * Copyright (c) 2003-2012, Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. */
  16. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  17. #include <linux/module.h>
  18. #include <linux/moduleparam.h>
  19. #include <linux/kernel.h>
  20. #include <linux/device.h>
  21. #include <linux/fs.h>
  22. #include <linux/errno.h>
  23. #include <linux/types.h>
  24. #include <linux/fcntl.h>
  25. #include <linux/aio.h>
  26. #include <linux/pci.h>
  27. #include <linux/poll.h>
  28. #include <linux/init.h>
  29. #include <linux/ioctl.h>
  30. #include <linux/cdev.h>
  31. #include <linux/sched.h>
  32. #include <linux/uuid.h>
  33. #include <linux/compat.h>
  34. #include <linux/jiffies.h>
  35. #include <linux/interrupt.h>
  36. #include <linux/miscdevice.h>
  37. #include <linux/mei.h>
  38. #include "mei_dev.h"
  39. #include "hw-me.h"
  40. #include "client.h"
  41. /* AMT device is a singleton on the platform */
  42. static struct pci_dev *mei_pdev;
  43. /* mei_pci_tbl - PCI Device ID Table */
  44. static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = {
  45. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
  46. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)},
  47. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)},
  48. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)},
  49. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)},
  50. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)},
  51. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)},
  52. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)},
  53. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)},
  54. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)},
  55. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)},
  56. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)},
  57. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)},
  58. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)},
  59. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)},
  60. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)},
  61. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)},
  62. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)},
  63. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)},
  64. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)},
  65. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)},
  66. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)},
  67. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)},
  68. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)},
  69. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)},
  70. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)},
  71. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)},
  72. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)},
  73. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
  74. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
  75. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
  76. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)},
  77. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)},
  78. /* required last entry */
  79. {0, }
  80. };
  81. MODULE_DEVICE_TABLE(pci, mei_pci_tbl);
  82. static DEFINE_MUTEX(mei_mutex);
  83. /**
  84. * mei_open - the open function
  85. *
  86. * @inode: pointer to inode structure
  87. * @file: pointer to file structure
  88. *
  89. * returns 0 on success, <0 on error
  90. */
  91. static int mei_open(struct inode *inode, struct file *file)
  92. {
  93. struct mei_cl *cl;
  94. struct mei_device *dev;
  95. unsigned long cl_id;
  96. int err;
  97. err = -ENODEV;
  98. if (!mei_pdev)
  99. goto out;
  100. dev = pci_get_drvdata(mei_pdev);
  101. if (!dev)
  102. goto out;
  103. mutex_lock(&dev->device_lock);
  104. err = -ENOMEM;
  105. cl = mei_cl_allocate(dev);
  106. if (!cl)
  107. goto out_unlock;
  108. err = -ENODEV;
  109. if (dev->dev_state != MEI_DEV_ENABLED) {
  110. dev_dbg(&dev->pdev->dev, "dev_state != MEI_ENABLED dev_state = %s\n",
  111. mei_dev_state_str(dev->dev_state));
  112. goto out_unlock;
  113. }
  114. err = -EMFILE;
  115. if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
  116. dev_err(&dev->pdev->dev, "open_handle_count exceded %d",
  117. MEI_MAX_OPEN_HANDLE_COUNT);
  118. goto out_unlock;
  119. }
  120. cl_id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
  121. if (cl_id >= MEI_CLIENTS_MAX) {
  122. dev_err(&dev->pdev->dev, "client_id exceded %d",
  123. MEI_CLIENTS_MAX) ;
  124. goto out_unlock;
  125. }
  126. cl->host_client_id = cl_id;
  127. dev_dbg(&dev->pdev->dev, "client_id = %d\n", cl->host_client_id);
  128. dev->open_handle_count++;
  129. list_add_tail(&cl->link, &dev->file_list);
  130. set_bit(cl->host_client_id, dev->host_clients_map);
  131. cl->state = MEI_FILE_INITIALIZING;
  132. cl->sm_state = 0;
  133. file->private_data = cl;
  134. mutex_unlock(&dev->device_lock);
  135. return nonseekable_open(inode, file);
  136. out_unlock:
  137. mutex_unlock(&dev->device_lock);
  138. kfree(cl);
  139. out:
  140. return err;
  141. }
  142. /**
  143. * mei_release - the release function
  144. *
  145. * @inode: pointer to inode structure
  146. * @file: pointer to file structure
  147. *
  148. * returns 0 on success, <0 on error
  149. */
  150. static int mei_release(struct inode *inode, struct file *file)
  151. {
  152. struct mei_cl *cl = file->private_data;
  153. struct mei_cl_cb *cb;
  154. struct mei_device *dev;
  155. int rets = 0;
  156. if (WARN_ON(!cl || !cl->dev))
  157. return -ENODEV;
  158. dev = cl->dev;
  159. mutex_lock(&dev->device_lock);
  160. if (cl == &dev->iamthif_cl) {
  161. rets = mei_amthif_release(dev, file);
  162. goto out;
  163. }
  164. if (cl->state == MEI_FILE_CONNECTED) {
  165. cl->state = MEI_FILE_DISCONNECTING;
  166. dev_dbg(&dev->pdev->dev,
  167. "disconnecting client host client = %d, "
  168. "ME client = %d\n",
  169. cl->host_client_id,
  170. cl->me_client_id);
  171. rets = mei_cl_disconnect(cl);
  172. }
  173. mei_cl_flush_queues(cl);
  174. dev_dbg(&dev->pdev->dev, "remove client host client = %d, ME client = %d\n",
  175. cl->host_client_id,
  176. cl->me_client_id);
  177. if (dev->open_handle_count > 0) {
  178. clear_bit(cl->host_client_id, dev->host_clients_map);
  179. dev->open_handle_count--;
  180. }
  181. mei_cl_unlink(cl);
  182. /* free read cb */
  183. cb = NULL;
  184. if (cl->read_cb) {
  185. cb = mei_cl_find_read_cb(cl);
  186. /* Remove entry from read list */
  187. if (cb)
  188. list_del(&cb->list);
  189. cb = cl->read_cb;
  190. cl->read_cb = NULL;
  191. }
  192. file->private_data = NULL;
  193. if (cb) {
  194. mei_io_cb_free(cb);
  195. cb = NULL;
  196. }
  197. kfree(cl);
  198. out:
  199. mutex_unlock(&dev->device_lock);
  200. return rets;
  201. }
  202. /**
  203. * mei_read - the read function.
  204. *
  205. * @file: pointer to file structure
  206. * @ubuf: pointer to user buffer
  207. * @length: buffer length
  208. * @offset: data offset in buffer
  209. *
  210. * returns >=0 data length on success , <0 on error
  211. */
  212. static ssize_t mei_read(struct file *file, char __user *ubuf,
  213. size_t length, loff_t *offset)
  214. {
  215. struct mei_cl *cl = file->private_data;
  216. struct mei_cl_cb *cb_pos = NULL;
  217. struct mei_cl_cb *cb = NULL;
  218. struct mei_device *dev;
  219. int i;
  220. int rets;
  221. int err;
  222. if (WARN_ON(!cl || !cl->dev))
  223. return -ENODEV;
  224. dev = cl->dev;
  225. mutex_lock(&dev->device_lock);
  226. if (dev->dev_state != MEI_DEV_ENABLED) {
  227. rets = -ENODEV;
  228. goto out;
  229. }
  230. if ((cl->sm_state & MEI_WD_STATE_INDEPENDENCE_MSG_SENT) == 0) {
  231. /* Do not allow to read watchdog client */
  232. i = mei_me_cl_by_uuid(dev, &mei_wd_guid);
  233. if (i >= 0) {
  234. struct mei_me_client *me_client = &dev->me_clients[i];
  235. if (cl->me_client_id == me_client->client_id) {
  236. rets = -EBADF;
  237. goto out;
  238. }
  239. }
  240. } else {
  241. cl->sm_state &= ~MEI_WD_STATE_INDEPENDENCE_MSG_SENT;
  242. }
  243. if (cl == &dev->iamthif_cl) {
  244. rets = mei_amthif_read(dev, file, ubuf, length, offset);
  245. goto out;
  246. }
  247. if (cl->read_cb && cl->read_cb->buf_idx > *offset) {
  248. cb = cl->read_cb;
  249. goto copy_buffer;
  250. } else if (cl->read_cb && cl->read_cb->buf_idx > 0 &&
  251. cl->read_cb->buf_idx <= *offset) {
  252. cb = cl->read_cb;
  253. rets = 0;
  254. goto free;
  255. } else if ((!cl->read_cb || !cl->read_cb->buf_idx) && *offset > 0) {
  256. /*Offset needs to be cleaned for contiguous reads*/
  257. *offset = 0;
  258. rets = 0;
  259. goto out;
  260. }
  261. err = mei_cl_read_start(cl);
  262. if (err && err != -EBUSY) {
  263. dev_dbg(&dev->pdev->dev,
  264. "mei start read failure with status = %d\n", err);
  265. rets = err;
  266. goto out;
  267. }
  268. if (MEI_READ_COMPLETE != cl->reading_state &&
  269. !waitqueue_active(&cl->rx_wait)) {
  270. if (file->f_flags & O_NONBLOCK) {
  271. rets = -EAGAIN;
  272. goto out;
  273. }
  274. mutex_unlock(&dev->device_lock);
  275. if (wait_event_interruptible(cl->rx_wait,
  276. (MEI_READ_COMPLETE == cl->reading_state ||
  277. MEI_FILE_INITIALIZING == cl->state ||
  278. MEI_FILE_DISCONNECTED == cl->state ||
  279. MEI_FILE_DISCONNECTING == cl->state))) {
  280. if (signal_pending(current))
  281. return -EINTR;
  282. return -ERESTARTSYS;
  283. }
  284. mutex_lock(&dev->device_lock);
  285. if (MEI_FILE_INITIALIZING == cl->state ||
  286. MEI_FILE_DISCONNECTED == cl->state ||
  287. MEI_FILE_DISCONNECTING == cl->state) {
  288. rets = -EBUSY;
  289. goto out;
  290. }
  291. }
  292. cb = cl->read_cb;
  293. if (!cb) {
  294. rets = -ENODEV;
  295. goto out;
  296. }
  297. if (cl->reading_state != MEI_READ_COMPLETE) {
  298. rets = 0;
  299. goto out;
  300. }
  301. /* now copy the data to user space */
  302. copy_buffer:
  303. dev_dbg(&dev->pdev->dev, "cb->response_buffer size - %d\n",
  304. cb->response_buffer.size);
  305. dev_dbg(&dev->pdev->dev, "cb->buf_idx - %lu\n", cb->buf_idx);
  306. if (length == 0 || ubuf == NULL || *offset > cb->buf_idx) {
  307. rets = -EMSGSIZE;
  308. goto free;
  309. }
  310. /* length is being truncated to PAGE_SIZE,
  311. * however buf_idx may point beyond that */
  312. length = min_t(size_t, length, cb->buf_idx - *offset);
  313. if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) {
  314. rets = -EFAULT;
  315. goto free;
  316. }
  317. rets = length;
  318. *offset += length;
  319. if ((unsigned long)*offset < cb->buf_idx)
  320. goto out;
  321. free:
  322. cb_pos = mei_cl_find_read_cb(cl);
  323. /* Remove entry from read list */
  324. if (cb_pos)
  325. list_del(&cb_pos->list);
  326. mei_io_cb_free(cb);
  327. cl->reading_state = MEI_IDLE;
  328. cl->read_cb = NULL;
  329. out:
  330. dev_dbg(&dev->pdev->dev, "end mei read rets= %d\n", rets);
  331. mutex_unlock(&dev->device_lock);
  332. return rets;
  333. }
  334. /**
  335. * mei_write - the write function.
  336. *
  337. * @file: pointer to file structure
  338. * @ubuf: pointer to user buffer
  339. * @length: buffer length
  340. * @offset: data offset in buffer
  341. *
  342. * returns >=0 data length on success , <0 on error
  343. */
  344. static ssize_t mei_write(struct file *file, const char __user *ubuf,
  345. size_t length, loff_t *offset)
  346. {
  347. struct mei_cl *cl = file->private_data;
  348. struct mei_cl_cb *write_cb = NULL;
  349. struct mei_msg_hdr mei_hdr;
  350. struct mei_device *dev;
  351. unsigned long timeout = 0;
  352. int rets;
  353. int i;
  354. if (WARN_ON(!cl || !cl->dev))
  355. return -ENODEV;
  356. dev = cl->dev;
  357. mutex_lock(&dev->device_lock);
  358. if (dev->dev_state != MEI_DEV_ENABLED) {
  359. rets = -ENODEV;
  360. goto err;
  361. }
  362. i = mei_me_cl_by_id(dev, cl->me_client_id);
  363. if (i < 0) {
  364. rets = -ENODEV;
  365. goto err;
  366. }
  367. if (length > dev->me_clients[i].props.max_msg_length || length <= 0) {
  368. rets = -EMSGSIZE;
  369. goto err;
  370. }
  371. if (cl->state != MEI_FILE_CONNECTED) {
  372. rets = -ENODEV;
  373. dev_err(&dev->pdev->dev, "host client = %d, is not connected to ME client = %d",
  374. cl->host_client_id, cl->me_client_id);
  375. goto err;
  376. }
  377. if (cl == &dev->iamthif_cl) {
  378. write_cb = mei_amthif_find_read_list_entry(dev, file);
  379. if (write_cb) {
  380. timeout = write_cb->read_time +
  381. mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
  382. if (time_after(jiffies, timeout) ||
  383. cl->reading_state == MEI_READ_COMPLETE) {
  384. *offset = 0;
  385. list_del(&write_cb->list);
  386. mei_io_cb_free(write_cb);
  387. write_cb = NULL;
  388. }
  389. }
  390. }
  391. /* free entry used in read */
  392. if (cl->reading_state == MEI_READ_COMPLETE) {
  393. *offset = 0;
  394. write_cb = mei_cl_find_read_cb(cl);
  395. if (write_cb) {
  396. list_del(&write_cb->list);
  397. mei_io_cb_free(write_cb);
  398. write_cb = NULL;
  399. cl->reading_state = MEI_IDLE;
  400. cl->read_cb = NULL;
  401. }
  402. } else if (cl->reading_state == MEI_IDLE)
  403. *offset = 0;
  404. write_cb = mei_io_cb_init(cl, file);
  405. if (!write_cb) {
  406. dev_err(&dev->pdev->dev, "write cb allocation failed\n");
  407. rets = -ENOMEM;
  408. goto err;
  409. }
  410. rets = mei_io_cb_alloc_req_buf(write_cb, length);
  411. if (rets)
  412. goto err;
  413. dev_dbg(&dev->pdev->dev, "cb request size = %zd\n", length);
  414. rets = copy_from_user(write_cb->request_buffer.data, ubuf, length);
  415. if (rets)
  416. goto err;
  417. cl->sm_state = 0;
  418. if (length == 4 &&
  419. ((memcmp(mei_wd_state_independence_msg[0],
  420. write_cb->request_buffer.data, 4) == 0) ||
  421. (memcmp(mei_wd_state_independence_msg[1],
  422. write_cb->request_buffer.data, 4) == 0) ||
  423. (memcmp(mei_wd_state_independence_msg[2],
  424. write_cb->request_buffer.data, 4) == 0)))
  425. cl->sm_state |= MEI_WD_STATE_INDEPENDENCE_MSG_SENT;
  426. if (cl == &dev->iamthif_cl) {
  427. rets = mei_amthif_write(dev, write_cb);
  428. if (rets) {
  429. dev_err(&dev->pdev->dev,
  430. "amthi write failed with status = %d\n", rets);
  431. goto err;
  432. }
  433. mutex_unlock(&dev->device_lock);
  434. return length;
  435. }
  436. write_cb->fop_type = MEI_FOP_WRITE;
  437. dev_dbg(&dev->pdev->dev, "host client = %d, ME client = %d\n",
  438. cl->host_client_id, cl->me_client_id);
  439. rets = mei_cl_flow_ctrl_creds(cl);
  440. if (rets < 0)
  441. goto err;
  442. if (rets == 0 || dev->mei_host_buffer_is_empty == false) {
  443. write_cb->buf_idx = 0;
  444. mei_hdr.msg_complete = 0;
  445. cl->writing_state = MEI_WRITING;
  446. goto out;
  447. }
  448. dev->mei_host_buffer_is_empty = false;
  449. if (length > mei_hbuf_max_data(dev)) {
  450. mei_hdr.length = mei_hbuf_max_data(dev);
  451. mei_hdr.msg_complete = 0;
  452. } else {
  453. mei_hdr.length = length;
  454. mei_hdr.msg_complete = 1;
  455. }
  456. mei_hdr.host_addr = cl->host_client_id;
  457. mei_hdr.me_addr = cl->me_client_id;
  458. mei_hdr.reserved = 0;
  459. dev_dbg(&dev->pdev->dev, "write " MEI_HDR_FMT "\n",
  460. MEI_HDR_PRM(&mei_hdr));
  461. if (mei_write_message(dev, &mei_hdr, write_cb->request_buffer.data)) {
  462. rets = -ENODEV;
  463. goto err;
  464. }
  465. cl->writing_state = MEI_WRITING;
  466. write_cb->buf_idx = mei_hdr.length;
  467. out:
  468. if (mei_hdr.msg_complete) {
  469. if (mei_cl_flow_ctrl_reduce(cl)) {
  470. rets = -ENODEV;
  471. goto err;
  472. }
  473. list_add_tail(&write_cb->list, &dev->write_waiting_list.list);
  474. } else {
  475. list_add_tail(&write_cb->list, &dev->write_list.list);
  476. }
  477. mutex_unlock(&dev->device_lock);
  478. return length;
  479. err:
  480. mutex_unlock(&dev->device_lock);
  481. mei_io_cb_free(write_cb);
  482. return rets;
  483. }
  484. /**
  485. * mei_ioctl_connect_client - the connect to fw client IOCTL function
  486. *
  487. * @dev: the device structure
  488. * @data: IOCTL connect data, input and output parameters
  489. * @file: private data of the file object
  490. *
  491. * Locking: called under "dev->device_lock" lock
  492. *
  493. * returns 0 on success, <0 on failure.
  494. */
  495. static int mei_ioctl_connect_client(struct file *file,
  496. struct mei_connect_client_data *data)
  497. {
  498. struct mei_device *dev;
  499. struct mei_client *client;
  500. struct mei_cl *cl;
  501. int i;
  502. int rets;
  503. cl = file->private_data;
  504. if (WARN_ON(!cl || !cl->dev))
  505. return -ENODEV;
  506. dev = cl->dev;
  507. if (dev->dev_state != MEI_DEV_ENABLED) {
  508. rets = -ENODEV;
  509. goto end;
  510. }
  511. if (cl->state != MEI_FILE_INITIALIZING &&
  512. cl->state != MEI_FILE_DISCONNECTED) {
  513. rets = -EBUSY;
  514. goto end;
  515. }
  516. /* find ME client we're trying to connect to */
  517. i = mei_me_cl_by_uuid(dev, &data->in_client_uuid);
  518. if (i >= 0 && !dev->me_clients[i].props.fixed_address) {
  519. cl->me_client_id = dev->me_clients[i].client_id;
  520. cl->state = MEI_FILE_CONNECTING;
  521. }
  522. dev_dbg(&dev->pdev->dev, "Connect to FW Client ID = %d\n",
  523. cl->me_client_id);
  524. dev_dbg(&dev->pdev->dev, "FW Client - Protocol Version = %d\n",
  525. dev->me_clients[i].props.protocol_version);
  526. dev_dbg(&dev->pdev->dev, "FW Client - Max Msg Len = %d\n",
  527. dev->me_clients[i].props.max_msg_length);
  528. /* if we're connecting to amthi client then we will use the
  529. * existing connection
  530. */
  531. if (uuid_le_cmp(data->in_client_uuid, mei_amthi_guid) == 0) {
  532. dev_dbg(&dev->pdev->dev, "FW Client is amthi\n");
  533. if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) {
  534. rets = -ENODEV;
  535. goto end;
  536. }
  537. clear_bit(cl->host_client_id, dev->host_clients_map);
  538. mei_cl_unlink(cl);
  539. kfree(cl);
  540. cl = NULL;
  541. file->private_data = &dev->iamthif_cl;
  542. client = &data->out_client_properties;
  543. client->max_msg_length =
  544. dev->me_clients[i].props.max_msg_length;
  545. client->protocol_version =
  546. dev->me_clients[i].props.protocol_version;
  547. rets = dev->iamthif_cl.status;
  548. goto end;
  549. }
  550. if (cl->state != MEI_FILE_CONNECTING) {
  551. rets = -ENODEV;
  552. goto end;
  553. }
  554. /* prepare the output buffer */
  555. client = &data->out_client_properties;
  556. client->max_msg_length = dev->me_clients[i].props.max_msg_length;
  557. client->protocol_version = dev->me_clients[i].props.protocol_version;
  558. dev_dbg(&dev->pdev->dev, "Can connect?\n");
  559. rets = mei_cl_connect(cl, file);
  560. end:
  561. dev_dbg(&dev->pdev->dev, "free connect cb memory.");
  562. return rets;
  563. }
  564. /**
  565. * mei_ioctl - the IOCTL function
  566. *
  567. * @file: pointer to file structure
  568. * @cmd: ioctl command
  569. * @data: pointer to mei message structure
  570. *
  571. * returns 0 on success , <0 on error
  572. */
  573. static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
  574. {
  575. struct mei_device *dev;
  576. struct mei_cl *cl = file->private_data;
  577. struct mei_connect_client_data *connect_data = NULL;
  578. int rets;
  579. if (cmd != IOCTL_MEI_CONNECT_CLIENT)
  580. return -EINVAL;
  581. if (WARN_ON(!cl || !cl->dev))
  582. return -ENODEV;
  583. dev = cl->dev;
  584. dev_dbg(&dev->pdev->dev, "IOCTL cmd = 0x%x", cmd);
  585. mutex_lock(&dev->device_lock);
  586. if (dev->dev_state != MEI_DEV_ENABLED) {
  587. rets = -ENODEV;
  588. goto out;
  589. }
  590. dev_dbg(&dev->pdev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n");
  591. connect_data = kzalloc(sizeof(struct mei_connect_client_data),
  592. GFP_KERNEL);
  593. if (!connect_data) {
  594. rets = -ENOMEM;
  595. goto out;
  596. }
  597. dev_dbg(&dev->pdev->dev, "copy connect data from user\n");
  598. if (copy_from_user(connect_data, (char __user *)data,
  599. sizeof(struct mei_connect_client_data))) {
  600. dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n");
  601. rets = -EFAULT;
  602. goto out;
  603. }
  604. rets = mei_ioctl_connect_client(file, connect_data);
  605. /* if all is ok, copying the data back to user. */
  606. if (rets)
  607. goto out;
  608. dev_dbg(&dev->pdev->dev, "copy connect data to user\n");
  609. if (copy_to_user((char __user *)data, connect_data,
  610. sizeof(struct mei_connect_client_data))) {
  611. dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n");
  612. rets = -EFAULT;
  613. goto out;
  614. }
  615. out:
  616. kfree(connect_data);
  617. mutex_unlock(&dev->device_lock);
  618. return rets;
  619. }
  620. /**
  621. * mei_compat_ioctl - the compat IOCTL function
  622. *
  623. * @file: pointer to file structure
  624. * @cmd: ioctl command
  625. * @data: pointer to mei message structure
  626. *
  627. * returns 0 on success , <0 on error
  628. */
  629. #ifdef CONFIG_COMPAT
  630. static long mei_compat_ioctl(struct file *file,
  631. unsigned int cmd, unsigned long data)
  632. {
  633. return mei_ioctl(file, cmd, (unsigned long)compat_ptr(data));
  634. }
  635. #endif
  636. /**
  637. * mei_poll - the poll function
  638. *
  639. * @file: pointer to file structure
  640. * @wait: pointer to poll_table structure
  641. *
  642. * returns poll mask
  643. */
  644. static unsigned int mei_poll(struct file *file, poll_table *wait)
  645. {
  646. struct mei_cl *cl = file->private_data;
  647. struct mei_device *dev;
  648. unsigned int mask = 0;
  649. if (WARN_ON(!cl || !cl->dev))
  650. return mask;
  651. dev = cl->dev;
  652. mutex_lock(&dev->device_lock);
  653. if (dev->dev_state != MEI_DEV_ENABLED)
  654. goto out;
  655. if (cl == &dev->iamthif_cl) {
  656. mask = mei_amthif_poll(dev, file, wait);
  657. goto out;
  658. }
  659. mutex_unlock(&dev->device_lock);
  660. poll_wait(file, &cl->tx_wait, wait);
  661. mutex_lock(&dev->device_lock);
  662. if (MEI_WRITE_COMPLETE == cl->writing_state)
  663. mask |= (POLLIN | POLLRDNORM);
  664. out:
  665. mutex_unlock(&dev->device_lock);
  666. return mask;
  667. }
  668. /*
  669. * file operations structure will be used for mei char device.
  670. */
  671. static const struct file_operations mei_fops = {
  672. .owner = THIS_MODULE,
  673. .read = mei_read,
  674. .unlocked_ioctl = mei_ioctl,
  675. #ifdef CONFIG_COMPAT
  676. .compat_ioctl = mei_compat_ioctl,
  677. #endif
  678. .open = mei_open,
  679. .release = mei_release,
  680. .write = mei_write,
  681. .poll = mei_poll,
  682. .llseek = no_llseek
  683. };
  684. /*
  685. * Misc Device Struct
  686. */
  687. static struct miscdevice mei_misc_device = {
  688. .name = "mei",
  689. .fops = &mei_fops,
  690. .minor = MISC_DYNAMIC_MINOR,
  691. };
  692. /**
  693. * mei_quirk_probe - probe for devices that doesn't valid ME interface
  694. * @pdev: PCI device structure
  695. * @ent: entry into pci_device_table
  696. *
  697. * returns true if ME Interface is valid, false otherwise
  698. */
  699. static bool mei_quirk_probe(struct pci_dev *pdev,
  700. const struct pci_device_id *ent)
  701. {
  702. u32 reg;
  703. if (ent->device == MEI_DEV_ID_PBG_1) {
  704. pci_read_config_dword(pdev, 0x48, &reg);
  705. /* make sure that bit 9 is up and bit 10 is down */
  706. if ((reg & 0x600) == 0x200) {
  707. dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
  708. return false;
  709. }
  710. }
  711. return true;
  712. }
  713. /**
  714. * mei_probe - Device Initialization Routine
  715. *
  716. * @pdev: PCI device structure
  717. * @ent: entry in kcs_pci_tbl
  718. *
  719. * returns 0 on success, <0 on failure.
  720. */
  721. static int mei_probe(struct pci_dev *pdev,
  722. const struct pci_device_id *ent)
  723. {
  724. struct mei_device *dev;
  725. int err;
  726. mutex_lock(&mei_mutex);
  727. if (!mei_quirk_probe(pdev, ent)) {
  728. err = -ENODEV;
  729. goto end;
  730. }
  731. if (mei_pdev) {
  732. err = -EEXIST;
  733. goto end;
  734. }
  735. /* enable pci dev */
  736. err = pci_enable_device(pdev);
  737. if (err) {
  738. dev_err(&pdev->dev, "failed to enable pci device.\n");
  739. goto end;
  740. }
  741. /* set PCI host mastering */
  742. pci_set_master(pdev);
  743. /* pci request regions for mei driver */
  744. err = pci_request_regions(pdev, KBUILD_MODNAME);
  745. if (err) {
  746. dev_err(&pdev->dev, "failed to get pci regions.\n");
  747. goto disable_device;
  748. }
  749. /* allocates and initializes the mei dev structure */
  750. dev = mei_device_init(pdev);
  751. if (!dev) {
  752. err = -ENOMEM;
  753. goto release_regions;
  754. }
  755. /* mapping IO device memory */
  756. dev->mem_addr = pci_iomap(pdev, 0, 0);
  757. if (!dev->mem_addr) {
  758. dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
  759. err = -ENOMEM;
  760. goto free_device;
  761. }
  762. pci_enable_msi(pdev);
  763. /* request and enable interrupt */
  764. if (pci_dev_msi_enabled(pdev))
  765. err = request_threaded_irq(pdev->irq,
  766. NULL,
  767. mei_interrupt_thread_handler,
  768. IRQF_ONESHOT, KBUILD_MODNAME, dev);
  769. else
  770. err = request_threaded_irq(pdev->irq,
  771. mei_interrupt_quick_handler,
  772. mei_interrupt_thread_handler,
  773. IRQF_SHARED, KBUILD_MODNAME, dev);
  774. if (err) {
  775. dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
  776. pdev->irq);
  777. goto disable_msi;
  778. }
  779. INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
  780. INIT_WORK(&dev->init_work, mei_host_client_init);
  781. if (mei_hw_init(dev)) {
  782. dev_err(&pdev->dev, "init hw failure.\n");
  783. err = -ENODEV;
  784. goto release_irq;
  785. }
  786. err = misc_register(&mei_misc_device);
  787. if (err)
  788. goto release_irq;
  789. mei_pdev = pdev;
  790. pci_set_drvdata(pdev, dev);
  791. schedule_delayed_work(&dev->timer_work, HZ);
  792. mutex_unlock(&mei_mutex);
  793. pr_debug("initialization successful.\n");
  794. return 0;
  795. release_irq:
  796. /* disable interrupts */
  797. dev->host_hw_state = mei_hcsr_read(dev);
  798. mei_disable_interrupts(dev);
  799. flush_scheduled_work();
  800. free_irq(pdev->irq, dev);
  801. disable_msi:
  802. pci_disable_msi(pdev);
  803. pci_iounmap(pdev, dev->mem_addr);
  804. free_device:
  805. kfree(dev);
  806. release_regions:
  807. pci_release_regions(pdev);
  808. disable_device:
  809. pci_disable_device(pdev);
  810. end:
  811. mutex_unlock(&mei_mutex);
  812. dev_err(&pdev->dev, "initialization failed.\n");
  813. return err;
  814. }
  815. /**
  816. * mei_remove - Device Removal Routine
  817. *
  818. * @pdev: PCI device structure
  819. *
  820. * mei_remove is called by the PCI subsystem to alert the driver
  821. * that it should release a PCI device.
  822. */
  823. static void mei_remove(struct pci_dev *pdev)
  824. {
  825. struct mei_device *dev;
  826. if (mei_pdev != pdev)
  827. return;
  828. dev = pci_get_drvdata(pdev);
  829. if (!dev)
  830. return;
  831. mutex_lock(&dev->device_lock);
  832. cancel_delayed_work(&dev->timer_work);
  833. mei_wd_stop(dev);
  834. mei_pdev = NULL;
  835. if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
  836. dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
  837. mei_cl_disconnect(&dev->iamthif_cl);
  838. }
  839. if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
  840. dev->wd_cl.state = MEI_FILE_DISCONNECTING;
  841. mei_cl_disconnect(&dev->wd_cl);
  842. }
  843. /* Unregistering watchdog device */
  844. mei_watchdog_unregister(dev);
  845. /* remove entry if already in list */
  846. dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
  847. mei_cl_unlink(&dev->wd_cl);
  848. mei_cl_unlink(&dev->iamthif_cl);
  849. dev->iamthif_current_cb = NULL;
  850. dev->me_clients_num = 0;
  851. mutex_unlock(&dev->device_lock);
  852. flush_scheduled_work();
  853. /* disable interrupts */
  854. mei_disable_interrupts(dev);
  855. free_irq(pdev->irq, dev);
  856. pci_disable_msi(pdev);
  857. pci_set_drvdata(pdev, NULL);
  858. if (dev->mem_addr)
  859. pci_iounmap(pdev, dev->mem_addr);
  860. kfree(dev);
  861. pci_release_regions(pdev);
  862. pci_disable_device(pdev);
  863. misc_deregister(&mei_misc_device);
  864. }
  865. #ifdef CONFIG_PM
  866. static int mei_pci_suspend(struct device *device)
  867. {
  868. struct pci_dev *pdev = to_pci_dev(device);
  869. struct mei_device *dev = pci_get_drvdata(pdev);
  870. int err;
  871. if (!dev)
  872. return -ENODEV;
  873. mutex_lock(&dev->device_lock);
  874. cancel_delayed_work(&dev->timer_work);
  875. /* Stop watchdog if exists */
  876. err = mei_wd_stop(dev);
  877. /* Set new mei state */
  878. if (dev->dev_state == MEI_DEV_ENABLED ||
  879. dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
  880. dev->dev_state = MEI_DEV_POWER_DOWN;
  881. mei_reset(dev, 0);
  882. }
  883. mutex_unlock(&dev->device_lock);
  884. free_irq(pdev->irq, dev);
  885. pci_disable_msi(pdev);
  886. return err;
  887. }
  888. static int mei_pci_resume(struct device *device)
  889. {
  890. struct pci_dev *pdev = to_pci_dev(device);
  891. struct mei_device *dev;
  892. int err;
  893. dev = pci_get_drvdata(pdev);
  894. if (!dev)
  895. return -ENODEV;
  896. pci_enable_msi(pdev);
  897. /* request and enable interrupt */
  898. if (pci_dev_msi_enabled(pdev))
  899. err = request_threaded_irq(pdev->irq,
  900. NULL,
  901. mei_interrupt_thread_handler,
  902. IRQF_ONESHOT, KBUILD_MODNAME, dev);
  903. else
  904. err = request_threaded_irq(pdev->irq,
  905. mei_interrupt_quick_handler,
  906. mei_interrupt_thread_handler,
  907. IRQF_SHARED, KBUILD_MODNAME, dev);
  908. if (err) {
  909. dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
  910. pdev->irq);
  911. return err;
  912. }
  913. mutex_lock(&dev->device_lock);
  914. dev->dev_state = MEI_DEV_POWER_UP;
  915. mei_reset(dev, 1);
  916. mutex_unlock(&dev->device_lock);
  917. /* Start timer if stopped in suspend */
  918. schedule_delayed_work(&dev->timer_work, HZ);
  919. return err;
  920. }
  921. static SIMPLE_DEV_PM_OPS(mei_pm_ops, mei_pci_suspend, mei_pci_resume);
  922. #define MEI_PM_OPS (&mei_pm_ops)
  923. #else
  924. #define MEI_PM_OPS NULL
  925. #endif /* CONFIG_PM */
  926. /*
  927. * PCI driver structure
  928. */
  929. static struct pci_driver mei_driver = {
  930. .name = KBUILD_MODNAME,
  931. .id_table = mei_pci_tbl,
  932. .probe = mei_probe,
  933. .remove = mei_remove,
  934. .shutdown = mei_remove,
  935. .driver.pm = MEI_PM_OPS,
  936. };
  937. module_pci_driver(mei_driver);
  938. MODULE_AUTHOR("Intel Corporation");
  939. MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
  940. MODULE_LICENSE("GPL v2");