main.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035
  1. /*
  2. *
  3. * Intel Management Engine Interface (Intel MEI) Linux driver
  4. * Copyright (c) 2003-2012, Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. */
  16. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  17. #include <linux/module.h>
  18. #include <linux/moduleparam.h>
  19. #include <linux/kernel.h>
  20. #include <linux/device.h>
  21. #include <linux/fs.h>
  22. #include <linux/errno.h>
  23. #include <linux/types.h>
  24. #include <linux/fcntl.h>
  25. #include <linux/aio.h>
  26. #include <linux/pci.h>
  27. #include <linux/poll.h>
  28. #include <linux/init.h>
  29. #include <linux/ioctl.h>
  30. #include <linux/cdev.h>
  31. #include <linux/sched.h>
  32. #include <linux/uuid.h>
  33. #include <linux/compat.h>
  34. #include <linux/jiffies.h>
  35. #include <linux/interrupt.h>
  36. #include <linux/miscdevice.h>
  37. #include "mei_dev.h"
  38. #include <linux/mei.h>
  39. #include "interface.h"
  40. /* AMT device is a singleton on the platform */
  41. static struct pci_dev *mei_pdev;
  42. /* mei_pci_tbl - PCI Device ID Table */
  43. static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = {
  44. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
  45. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)},
  46. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)},
  47. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)},
  48. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)},
  49. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)},
  50. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)},
  51. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)},
  52. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)},
  53. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)},
  54. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)},
  55. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)},
  56. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)},
  57. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)},
  58. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)},
  59. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)},
  60. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)},
  61. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)},
  62. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)},
  63. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)},
  64. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)},
  65. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)},
  66. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)},
  67. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)},
  68. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)},
  69. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)},
  70. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)},
  71. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)},
  72. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
  73. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
  74. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
  75. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)},
  76. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)},
  77. /* required last entry */
  78. {0, }
  79. };
  80. MODULE_DEVICE_TABLE(pci, mei_pci_tbl);
  81. static DEFINE_MUTEX(mei_mutex);
  82. /**
  83. * find_read_list_entry - find read list entry
  84. *
  85. * @dev: device structure
  86. * @file: pointer to file structure
  87. *
  88. * returns cb on success, NULL on error
  89. */
  90. static struct mei_cl_cb *find_read_list_entry(
  91. struct mei_device *dev,
  92. struct mei_cl *cl)
  93. {
  94. struct mei_cl_cb *pos = NULL;
  95. struct mei_cl_cb *next = NULL;
  96. dev_dbg(&dev->pdev->dev, "remove read_list CB\n");
  97. list_for_each_entry_safe(pos, next, &dev->read_list.list, list)
  98. if (mei_cl_cmp_id(cl, pos->cl))
  99. return pos;
  100. return NULL;
  101. }
  102. /**
  103. * mei_open - the open function
  104. *
  105. * @inode: pointer to inode structure
  106. * @file: pointer to file structure
  107. *
  108. * returns 0 on success, <0 on error
  109. */
  110. static int mei_open(struct inode *inode, struct file *file)
  111. {
  112. struct mei_cl *cl;
  113. struct mei_device *dev;
  114. unsigned long cl_id;
  115. int err;
  116. err = -ENODEV;
  117. if (!mei_pdev)
  118. goto out;
  119. dev = pci_get_drvdata(mei_pdev);
  120. if (!dev)
  121. goto out;
  122. mutex_lock(&dev->device_lock);
  123. err = -ENOMEM;
  124. cl = mei_cl_allocate(dev);
  125. if (!cl)
  126. goto out_unlock;
  127. err = -ENODEV;
  128. if (dev->dev_state != MEI_DEV_ENABLED) {
  129. dev_dbg(&dev->pdev->dev, "dev_state != MEI_ENABLED dev_state = %s\n",
  130. mei_dev_state_str(dev->dev_state));
  131. goto out_unlock;
  132. }
  133. err = -EMFILE;
  134. if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
  135. dev_err(&dev->pdev->dev, "open_handle_count exceded %d",
  136. MEI_MAX_OPEN_HANDLE_COUNT);
  137. goto out_unlock;
  138. }
  139. cl_id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
  140. if (cl_id >= MEI_CLIENTS_MAX) {
  141. dev_err(&dev->pdev->dev, "client_id exceded %d",
  142. MEI_CLIENTS_MAX) ;
  143. goto out_unlock;
  144. }
  145. cl->host_client_id = cl_id;
  146. dev_dbg(&dev->pdev->dev, "client_id = %d\n", cl->host_client_id);
  147. dev->open_handle_count++;
  148. list_add_tail(&cl->link, &dev->file_list);
  149. set_bit(cl->host_client_id, dev->host_clients_map);
  150. cl->state = MEI_FILE_INITIALIZING;
  151. cl->sm_state = 0;
  152. file->private_data = cl;
  153. mutex_unlock(&dev->device_lock);
  154. return nonseekable_open(inode, file);
  155. out_unlock:
  156. mutex_unlock(&dev->device_lock);
  157. kfree(cl);
  158. out:
  159. return err;
  160. }
  161. /**
  162. * mei_release - the release function
  163. *
  164. * @inode: pointer to inode structure
  165. * @file: pointer to file structure
  166. *
  167. * returns 0 on success, <0 on error
  168. */
  169. static int mei_release(struct inode *inode, struct file *file)
  170. {
  171. struct mei_cl *cl = file->private_data;
  172. struct mei_cl_cb *cb;
  173. struct mei_device *dev;
  174. int rets = 0;
  175. if (WARN_ON(!cl || !cl->dev))
  176. return -ENODEV;
  177. dev = cl->dev;
  178. mutex_lock(&dev->device_lock);
  179. if (cl == &dev->iamthif_cl) {
  180. rets = mei_amthif_release(dev, file);
  181. goto out;
  182. }
  183. if (cl->state == MEI_FILE_CONNECTED) {
  184. cl->state = MEI_FILE_DISCONNECTING;
  185. dev_dbg(&dev->pdev->dev,
  186. "disconnecting client host client = %d, "
  187. "ME client = %d\n",
  188. cl->host_client_id,
  189. cl->me_client_id);
  190. rets = mei_disconnect_host_client(dev, cl);
  191. }
  192. mei_cl_flush_queues(cl);
  193. dev_dbg(&dev->pdev->dev, "remove client host client = %d, ME client = %d\n",
  194. cl->host_client_id,
  195. cl->me_client_id);
  196. if (dev->open_handle_count > 0) {
  197. clear_bit(cl->host_client_id, dev->host_clients_map);
  198. dev->open_handle_count--;
  199. }
  200. mei_me_cl_unlink(dev, cl);
  201. /* free read cb */
  202. cb = NULL;
  203. if (cl->read_cb) {
  204. cb = find_read_list_entry(dev, cl);
  205. /* Remove entry from read list */
  206. if (cb)
  207. list_del(&cb->list);
  208. cb = cl->read_cb;
  209. cl->read_cb = NULL;
  210. }
  211. file->private_data = NULL;
  212. if (cb) {
  213. mei_io_cb_free(cb);
  214. cb = NULL;
  215. }
  216. kfree(cl);
  217. out:
  218. mutex_unlock(&dev->device_lock);
  219. return rets;
  220. }
  221. /**
  222. * mei_read - the read function.
  223. *
  224. * @file: pointer to file structure
  225. * @ubuf: pointer to user buffer
  226. * @length: buffer length
  227. * @offset: data offset in buffer
  228. *
  229. * returns >=0 data length on success , <0 on error
  230. */
  231. static ssize_t mei_read(struct file *file, char __user *ubuf,
  232. size_t length, loff_t *offset)
  233. {
  234. struct mei_cl *cl = file->private_data;
  235. struct mei_cl_cb *cb_pos = NULL;
  236. struct mei_cl_cb *cb = NULL;
  237. struct mei_device *dev;
  238. int i;
  239. int rets;
  240. int err;
  241. if (WARN_ON(!cl || !cl->dev))
  242. return -ENODEV;
  243. dev = cl->dev;
  244. mutex_lock(&dev->device_lock);
  245. if (dev->dev_state != MEI_DEV_ENABLED) {
  246. rets = -ENODEV;
  247. goto out;
  248. }
  249. if ((cl->sm_state & MEI_WD_STATE_INDEPENDENCE_MSG_SENT) == 0) {
  250. /* Do not allow to read watchdog client */
  251. i = mei_me_cl_by_uuid(dev, &mei_wd_guid);
  252. if (i >= 0) {
  253. struct mei_me_client *me_client = &dev->me_clients[i];
  254. if (cl->me_client_id == me_client->client_id) {
  255. rets = -EBADF;
  256. goto out;
  257. }
  258. }
  259. } else {
  260. cl->sm_state &= ~MEI_WD_STATE_INDEPENDENCE_MSG_SENT;
  261. }
  262. if (cl == &dev->iamthif_cl) {
  263. rets = mei_amthif_read(dev, file, ubuf, length, offset);
  264. goto out;
  265. }
  266. if (cl->read_cb && cl->read_cb->buf_idx > *offset) {
  267. cb = cl->read_cb;
  268. goto copy_buffer;
  269. } else if (cl->read_cb && cl->read_cb->buf_idx > 0 &&
  270. cl->read_cb->buf_idx <= *offset) {
  271. cb = cl->read_cb;
  272. rets = 0;
  273. goto free;
  274. } else if ((!cl->read_cb || !cl->read_cb->buf_idx) && *offset > 0) {
  275. /*Offset needs to be cleaned for contiguous reads*/
  276. *offset = 0;
  277. rets = 0;
  278. goto out;
  279. }
  280. err = mei_start_read(dev, cl);
  281. if (err && err != -EBUSY) {
  282. dev_dbg(&dev->pdev->dev,
  283. "mei start read failure with status = %d\n", err);
  284. rets = err;
  285. goto out;
  286. }
  287. if (MEI_READ_COMPLETE != cl->reading_state &&
  288. !waitqueue_active(&cl->rx_wait)) {
  289. if (file->f_flags & O_NONBLOCK) {
  290. rets = -EAGAIN;
  291. goto out;
  292. }
  293. mutex_unlock(&dev->device_lock);
  294. if (wait_event_interruptible(cl->rx_wait,
  295. (MEI_READ_COMPLETE == cl->reading_state ||
  296. MEI_FILE_INITIALIZING == cl->state ||
  297. MEI_FILE_DISCONNECTED == cl->state ||
  298. MEI_FILE_DISCONNECTING == cl->state))) {
  299. if (signal_pending(current))
  300. return -EINTR;
  301. return -ERESTARTSYS;
  302. }
  303. mutex_lock(&dev->device_lock);
  304. if (MEI_FILE_INITIALIZING == cl->state ||
  305. MEI_FILE_DISCONNECTED == cl->state ||
  306. MEI_FILE_DISCONNECTING == cl->state) {
  307. rets = -EBUSY;
  308. goto out;
  309. }
  310. }
  311. cb = cl->read_cb;
  312. if (!cb) {
  313. rets = -ENODEV;
  314. goto out;
  315. }
  316. if (cl->reading_state != MEI_READ_COMPLETE) {
  317. rets = 0;
  318. goto out;
  319. }
  320. /* now copy the data to user space */
  321. copy_buffer:
  322. dev_dbg(&dev->pdev->dev, "cb->response_buffer size - %d\n",
  323. cb->response_buffer.size);
  324. dev_dbg(&dev->pdev->dev, "cb->buf_idx - %lu\n", cb->buf_idx);
  325. if (length == 0 || ubuf == NULL || *offset > cb->buf_idx) {
  326. rets = -EMSGSIZE;
  327. goto free;
  328. }
  329. /* length is being truncated to PAGE_SIZE,
  330. * however buf_idx may point beyond that */
  331. length = min_t(size_t, length, cb->buf_idx - *offset);
  332. if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) {
  333. rets = -EFAULT;
  334. goto free;
  335. }
  336. rets = length;
  337. *offset += length;
  338. if ((unsigned long)*offset < cb->buf_idx)
  339. goto out;
  340. free:
  341. cb_pos = find_read_list_entry(dev, cl);
  342. /* Remove entry from read list */
  343. if (cb_pos)
  344. list_del(&cb_pos->list);
  345. mei_io_cb_free(cb);
  346. cl->reading_state = MEI_IDLE;
  347. cl->read_cb = NULL;
  348. cl->read_pending = 0;
  349. out:
  350. dev_dbg(&dev->pdev->dev, "end mei read rets= %d\n", rets);
  351. mutex_unlock(&dev->device_lock);
  352. return rets;
  353. }
  354. /**
  355. * mei_write - the write function.
  356. *
  357. * @file: pointer to file structure
  358. * @ubuf: pointer to user buffer
  359. * @length: buffer length
  360. * @offset: data offset in buffer
  361. *
  362. * returns >=0 data length on success , <0 on error
  363. */
  364. static ssize_t mei_write(struct file *file, const char __user *ubuf,
  365. size_t length, loff_t *offset)
  366. {
  367. struct mei_cl *cl = file->private_data;
  368. struct mei_cl_cb *write_cb = NULL;
  369. struct mei_msg_hdr mei_hdr;
  370. struct mei_device *dev;
  371. unsigned long timeout = 0;
  372. int rets;
  373. int i;
  374. if (WARN_ON(!cl || !cl->dev))
  375. return -ENODEV;
  376. dev = cl->dev;
  377. mutex_lock(&dev->device_lock);
  378. if (dev->dev_state != MEI_DEV_ENABLED) {
  379. rets = -ENODEV;
  380. goto err;
  381. }
  382. i = mei_me_cl_by_id(dev, cl->me_client_id);
  383. if (i < 0) {
  384. rets = -ENODEV;
  385. goto err;
  386. }
  387. if (length > dev->me_clients[i].props.max_msg_length || length <= 0) {
  388. rets = -EMSGSIZE;
  389. goto err;
  390. }
  391. if (cl->state != MEI_FILE_CONNECTED) {
  392. rets = -ENODEV;
  393. dev_err(&dev->pdev->dev, "host client = %d, is not connected to ME client = %d",
  394. cl->host_client_id, cl->me_client_id);
  395. goto err;
  396. }
  397. if (cl == &dev->iamthif_cl) {
  398. write_cb = mei_amthif_find_read_list_entry(dev, file);
  399. if (write_cb) {
  400. timeout = write_cb->read_time +
  401. mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
  402. if (time_after(jiffies, timeout) ||
  403. cl->reading_state == MEI_READ_COMPLETE) {
  404. *offset = 0;
  405. list_del(&write_cb->list);
  406. mei_io_cb_free(write_cb);
  407. write_cb = NULL;
  408. }
  409. }
  410. }
  411. /* free entry used in read */
  412. if (cl->reading_state == MEI_READ_COMPLETE) {
  413. *offset = 0;
  414. write_cb = find_read_list_entry(dev, cl);
  415. if (write_cb) {
  416. list_del(&write_cb->list);
  417. mei_io_cb_free(write_cb);
  418. write_cb = NULL;
  419. cl->reading_state = MEI_IDLE;
  420. cl->read_cb = NULL;
  421. cl->read_pending = 0;
  422. }
  423. } else if (cl->reading_state == MEI_IDLE && !cl->read_pending)
  424. *offset = 0;
  425. write_cb = mei_io_cb_init(cl, file);
  426. if (!write_cb) {
  427. dev_err(&dev->pdev->dev, "write cb allocation failed\n");
  428. rets = -ENOMEM;
  429. goto err;
  430. }
  431. rets = mei_io_cb_alloc_req_buf(write_cb, length);
  432. if (rets)
  433. goto err;
  434. dev_dbg(&dev->pdev->dev, "cb request size = %zd\n", length);
  435. rets = copy_from_user(write_cb->request_buffer.data, ubuf, length);
  436. if (rets)
  437. goto err;
  438. cl->sm_state = 0;
  439. if (length == 4 &&
  440. ((memcmp(mei_wd_state_independence_msg[0],
  441. write_cb->request_buffer.data, 4) == 0) ||
  442. (memcmp(mei_wd_state_independence_msg[1],
  443. write_cb->request_buffer.data, 4) == 0) ||
  444. (memcmp(mei_wd_state_independence_msg[2],
  445. write_cb->request_buffer.data, 4) == 0)))
  446. cl->sm_state |= MEI_WD_STATE_INDEPENDENCE_MSG_SENT;
  447. if (cl == &dev->iamthif_cl) {
  448. rets = mei_amthif_write(dev, write_cb);
  449. if (rets) {
  450. dev_err(&dev->pdev->dev,
  451. "amthi write failed with status = %d\n", rets);
  452. goto err;
  453. }
  454. mutex_unlock(&dev->device_lock);
  455. return length;
  456. }
  457. write_cb->fop_type = MEI_FOP_WRITE;
  458. dev_dbg(&dev->pdev->dev, "host client = %d, ME client = %d\n",
  459. cl->host_client_id, cl->me_client_id);
  460. rets = mei_flow_ctrl_creds(dev, cl);
  461. if (rets < 0)
  462. goto err;
  463. if (rets == 0 || dev->mei_host_buffer_is_empty == false) {
  464. write_cb->buf_idx = 0;
  465. mei_hdr.msg_complete = 0;
  466. cl->writing_state = MEI_WRITING;
  467. goto out;
  468. }
  469. dev->mei_host_buffer_is_empty = false;
  470. if (length > mei_hbuf_max_data(dev)) {
  471. mei_hdr.length = mei_hbuf_max_data(dev);
  472. mei_hdr.msg_complete = 0;
  473. } else {
  474. mei_hdr.length = length;
  475. mei_hdr.msg_complete = 1;
  476. }
  477. mei_hdr.host_addr = cl->host_client_id;
  478. mei_hdr.me_addr = cl->me_client_id;
  479. mei_hdr.reserved = 0;
  480. dev_dbg(&dev->pdev->dev, "call mei_write_message header=%08x.\n",
  481. *((u32 *) &mei_hdr));
  482. if (mei_write_message(dev, &mei_hdr,
  483. write_cb->request_buffer.data, mei_hdr.length)) {
  484. rets = -ENODEV;
  485. goto err;
  486. }
  487. cl->writing_state = MEI_WRITING;
  488. write_cb->buf_idx = mei_hdr.length;
  489. out:
  490. if (mei_hdr.msg_complete) {
  491. if (mei_flow_ctrl_reduce(dev, cl)) {
  492. rets = -ENODEV;
  493. goto err;
  494. }
  495. list_add_tail(&write_cb->list, &dev->write_waiting_list.list);
  496. } else {
  497. list_add_tail(&write_cb->list, &dev->write_list.list);
  498. }
  499. mutex_unlock(&dev->device_lock);
  500. return length;
  501. err:
  502. mutex_unlock(&dev->device_lock);
  503. mei_io_cb_free(write_cb);
  504. return rets;
  505. }
  506. /**
  507. * mei_ioctl - the IOCTL function
  508. *
  509. * @file: pointer to file structure
  510. * @cmd: ioctl command
  511. * @data: pointer to mei message structure
  512. *
  513. * returns 0 on success , <0 on error
  514. */
  515. static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
  516. {
  517. struct mei_device *dev;
  518. struct mei_cl *cl = file->private_data;
  519. struct mei_connect_client_data *connect_data = NULL;
  520. int rets;
  521. if (cmd != IOCTL_MEI_CONNECT_CLIENT)
  522. return -EINVAL;
  523. if (WARN_ON(!cl || !cl->dev))
  524. return -ENODEV;
  525. dev = cl->dev;
  526. dev_dbg(&dev->pdev->dev, "IOCTL cmd = 0x%x", cmd);
  527. mutex_lock(&dev->device_lock);
  528. if (dev->dev_state != MEI_DEV_ENABLED) {
  529. rets = -ENODEV;
  530. goto out;
  531. }
  532. dev_dbg(&dev->pdev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n");
  533. connect_data = kzalloc(sizeof(struct mei_connect_client_data),
  534. GFP_KERNEL);
  535. if (!connect_data) {
  536. rets = -ENOMEM;
  537. goto out;
  538. }
  539. dev_dbg(&dev->pdev->dev, "copy connect data from user\n");
  540. if (copy_from_user(connect_data, (char __user *)data,
  541. sizeof(struct mei_connect_client_data))) {
  542. dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n");
  543. rets = -EFAULT;
  544. goto out;
  545. }
  546. rets = mei_ioctl_connect_client(file, connect_data);
  547. /* if all is ok, copying the data back to user. */
  548. if (rets)
  549. goto out;
  550. dev_dbg(&dev->pdev->dev, "copy connect data to user\n");
  551. if (copy_to_user((char __user *)data, connect_data,
  552. sizeof(struct mei_connect_client_data))) {
  553. dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n");
  554. rets = -EFAULT;
  555. goto out;
  556. }
  557. out:
  558. kfree(connect_data);
  559. mutex_unlock(&dev->device_lock);
  560. return rets;
  561. }
  562. /**
  563. * mei_compat_ioctl - the compat IOCTL function
  564. *
  565. * @file: pointer to file structure
  566. * @cmd: ioctl command
  567. * @data: pointer to mei message structure
  568. *
  569. * returns 0 on success , <0 on error
  570. */
  571. #ifdef CONFIG_COMPAT
  572. static long mei_compat_ioctl(struct file *file,
  573. unsigned int cmd, unsigned long data)
  574. {
  575. return mei_ioctl(file, cmd, (unsigned long)compat_ptr(data));
  576. }
  577. #endif
  578. /**
  579. * mei_poll - the poll function
  580. *
  581. * @file: pointer to file structure
  582. * @wait: pointer to poll_table structure
  583. *
  584. * returns poll mask
  585. */
  586. static unsigned int mei_poll(struct file *file, poll_table *wait)
  587. {
  588. struct mei_cl *cl = file->private_data;
  589. struct mei_device *dev;
  590. unsigned int mask = 0;
  591. if (WARN_ON(!cl || !cl->dev))
  592. return mask;
  593. dev = cl->dev;
  594. mutex_lock(&dev->device_lock);
  595. if (dev->dev_state != MEI_DEV_ENABLED)
  596. goto out;
  597. if (cl == &dev->iamthif_cl) {
  598. mask = mei_amthif_poll(dev, file, wait);
  599. goto out;
  600. }
  601. mutex_unlock(&dev->device_lock);
  602. poll_wait(file, &cl->tx_wait, wait);
  603. mutex_lock(&dev->device_lock);
  604. if (MEI_WRITE_COMPLETE == cl->writing_state)
  605. mask |= (POLLIN | POLLRDNORM);
  606. out:
  607. mutex_unlock(&dev->device_lock);
  608. return mask;
  609. }
  610. /*
  611. * file operations structure will be used for mei char device.
  612. */
  613. static const struct file_operations mei_fops = {
  614. .owner = THIS_MODULE,
  615. .read = mei_read,
  616. .unlocked_ioctl = mei_ioctl,
  617. #ifdef CONFIG_COMPAT
  618. .compat_ioctl = mei_compat_ioctl,
  619. #endif
  620. .open = mei_open,
  621. .release = mei_release,
  622. .write = mei_write,
  623. .poll = mei_poll,
  624. .llseek = no_llseek
  625. };
  626. /*
  627. * Misc Device Struct
  628. */
  629. static struct miscdevice mei_misc_device = {
  630. .name = "mei",
  631. .fops = &mei_fops,
  632. .minor = MISC_DYNAMIC_MINOR,
  633. };
  634. /**
  635. * mei_quirk_probe - probe for devices that doesn't valid ME interface
  636. * @pdev: PCI device structure
  637. * @ent: entry into pci_device_table
  638. *
  639. * returns true if ME Interface is valid, false otherwise
  640. */
  641. static bool mei_quirk_probe(struct pci_dev *pdev,
  642. const struct pci_device_id *ent)
  643. {
  644. u32 reg;
  645. if (ent->device == MEI_DEV_ID_PBG_1) {
  646. pci_read_config_dword(pdev, 0x48, &reg);
  647. /* make sure that bit 9 is up and bit 10 is down */
  648. if ((reg & 0x600) == 0x200) {
  649. dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
  650. return false;
  651. }
  652. }
  653. return true;
  654. }
  655. /**
  656. * mei_probe - Device Initialization Routine
  657. *
  658. * @pdev: PCI device structure
  659. * @ent: entry in kcs_pci_tbl
  660. *
  661. * returns 0 on success, <0 on failure.
  662. */
  663. static int mei_probe(struct pci_dev *pdev,
  664. const struct pci_device_id *ent)
  665. {
  666. struct mei_device *dev;
  667. int err;
  668. mutex_lock(&mei_mutex);
  669. if (!mei_quirk_probe(pdev, ent)) {
  670. err = -ENODEV;
  671. goto end;
  672. }
  673. if (mei_pdev) {
  674. err = -EEXIST;
  675. goto end;
  676. }
  677. /* enable pci dev */
  678. err = pci_enable_device(pdev);
  679. if (err) {
  680. dev_err(&pdev->dev, "failed to enable pci device.\n");
  681. goto end;
  682. }
  683. /* set PCI host mastering */
  684. pci_set_master(pdev);
  685. /* pci request regions for mei driver */
  686. err = pci_request_regions(pdev, KBUILD_MODNAME);
  687. if (err) {
  688. dev_err(&pdev->dev, "failed to get pci regions.\n");
  689. goto disable_device;
  690. }
  691. /* allocates and initializes the mei dev structure */
  692. dev = mei_device_init(pdev);
  693. if (!dev) {
  694. err = -ENOMEM;
  695. goto release_regions;
  696. }
  697. /* mapping IO device memory */
  698. dev->mem_addr = pci_iomap(pdev, 0, 0);
  699. if (!dev->mem_addr) {
  700. dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
  701. err = -ENOMEM;
  702. goto free_device;
  703. }
  704. pci_enable_msi(pdev);
  705. /* request and enable interrupt */
  706. if (pci_dev_msi_enabled(pdev))
  707. err = request_threaded_irq(pdev->irq,
  708. NULL,
  709. mei_interrupt_thread_handler,
  710. IRQF_ONESHOT, KBUILD_MODNAME, dev);
  711. else
  712. err = request_threaded_irq(pdev->irq,
  713. mei_interrupt_quick_handler,
  714. mei_interrupt_thread_handler,
  715. IRQF_SHARED, KBUILD_MODNAME, dev);
  716. if (err) {
  717. dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
  718. pdev->irq);
  719. goto disable_msi;
  720. }
  721. INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
  722. INIT_WORK(&dev->init_work, mei_host_client_init);
  723. if (mei_hw_init(dev)) {
  724. dev_err(&pdev->dev, "init hw failure.\n");
  725. err = -ENODEV;
  726. goto release_irq;
  727. }
  728. err = misc_register(&mei_misc_device);
  729. if (err)
  730. goto release_irq;
  731. mei_pdev = pdev;
  732. pci_set_drvdata(pdev, dev);
  733. schedule_delayed_work(&dev->timer_work, HZ);
  734. mutex_unlock(&mei_mutex);
  735. pr_debug("initialization successful.\n");
  736. return 0;
  737. release_irq:
  738. /* disable interrupts */
  739. dev->host_hw_state = mei_hcsr_read(dev);
  740. mei_disable_interrupts(dev);
  741. flush_scheduled_work();
  742. free_irq(pdev->irq, dev);
  743. disable_msi:
  744. pci_disable_msi(pdev);
  745. pci_iounmap(pdev, dev->mem_addr);
  746. free_device:
  747. kfree(dev);
  748. release_regions:
  749. pci_release_regions(pdev);
  750. disable_device:
  751. pci_disable_device(pdev);
  752. end:
  753. mutex_unlock(&mei_mutex);
  754. dev_err(&pdev->dev, "initialization failed.\n");
  755. return err;
  756. }
  757. /**
  758. * mei_remove - Device Removal Routine
  759. *
  760. * @pdev: PCI device structure
  761. *
  762. * mei_remove is called by the PCI subsystem to alert the driver
  763. * that it should release a PCI device.
  764. */
  765. static void mei_remove(struct pci_dev *pdev)
  766. {
  767. struct mei_device *dev;
  768. if (mei_pdev != pdev)
  769. return;
  770. dev = pci_get_drvdata(pdev);
  771. if (!dev)
  772. return;
  773. mutex_lock(&dev->device_lock);
  774. cancel_delayed_work(&dev->timer_work);
  775. mei_wd_stop(dev);
  776. mei_pdev = NULL;
  777. if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
  778. dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
  779. mei_disconnect_host_client(dev, &dev->iamthif_cl);
  780. }
  781. if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
  782. dev->wd_cl.state = MEI_FILE_DISCONNECTING;
  783. mei_disconnect_host_client(dev, &dev->wd_cl);
  784. }
  785. /* Unregistering watchdog device */
  786. mei_watchdog_unregister(dev);
  787. /* remove entry if already in list */
  788. dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
  789. mei_me_cl_unlink(dev, &dev->wd_cl);
  790. mei_me_cl_unlink(dev, &dev->iamthif_cl);
  791. dev->iamthif_current_cb = NULL;
  792. dev->me_clients_num = 0;
  793. mutex_unlock(&dev->device_lock);
  794. flush_scheduled_work();
  795. /* disable interrupts */
  796. mei_disable_interrupts(dev);
  797. free_irq(pdev->irq, dev);
  798. pci_disable_msi(pdev);
  799. pci_set_drvdata(pdev, NULL);
  800. if (dev->mem_addr)
  801. pci_iounmap(pdev, dev->mem_addr);
  802. kfree(dev);
  803. pci_release_regions(pdev);
  804. pci_disable_device(pdev);
  805. misc_deregister(&mei_misc_device);
  806. }
  807. #ifdef CONFIG_PM
  808. static int mei_pci_suspend(struct device *device)
  809. {
  810. struct pci_dev *pdev = to_pci_dev(device);
  811. struct mei_device *dev = pci_get_drvdata(pdev);
  812. int err;
  813. if (!dev)
  814. return -ENODEV;
  815. mutex_lock(&dev->device_lock);
  816. cancel_delayed_work(&dev->timer_work);
  817. /* Stop watchdog if exists */
  818. err = mei_wd_stop(dev);
  819. /* Set new mei state */
  820. if (dev->dev_state == MEI_DEV_ENABLED ||
  821. dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
  822. dev->dev_state = MEI_DEV_POWER_DOWN;
  823. mei_reset(dev, 0);
  824. }
  825. mutex_unlock(&dev->device_lock);
  826. free_irq(pdev->irq, dev);
  827. pci_disable_msi(pdev);
  828. return err;
  829. }
  830. static int mei_pci_resume(struct device *device)
  831. {
  832. struct pci_dev *pdev = to_pci_dev(device);
  833. struct mei_device *dev;
  834. int err;
  835. dev = pci_get_drvdata(pdev);
  836. if (!dev)
  837. return -ENODEV;
  838. pci_enable_msi(pdev);
  839. /* request and enable interrupt */
  840. if (pci_dev_msi_enabled(pdev))
  841. err = request_threaded_irq(pdev->irq,
  842. NULL,
  843. mei_interrupt_thread_handler,
  844. IRQF_ONESHOT, KBUILD_MODNAME, dev);
  845. else
  846. err = request_threaded_irq(pdev->irq,
  847. mei_interrupt_quick_handler,
  848. mei_interrupt_thread_handler,
  849. IRQF_SHARED, KBUILD_MODNAME, dev);
  850. if (err) {
  851. dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
  852. pdev->irq);
  853. return err;
  854. }
  855. mutex_lock(&dev->device_lock);
  856. dev->dev_state = MEI_DEV_POWER_UP;
  857. mei_reset(dev, 1);
  858. mutex_unlock(&dev->device_lock);
  859. /* Start timer if stopped in suspend */
  860. schedule_delayed_work(&dev->timer_work, HZ);
  861. return err;
  862. }
  863. static SIMPLE_DEV_PM_OPS(mei_pm_ops, mei_pci_suspend, mei_pci_resume);
  864. #define MEI_PM_OPS (&mei_pm_ops)
  865. #else
  866. #define MEI_PM_OPS NULL
  867. #endif /* CONFIG_PM */
  868. /*
  869. * PCI driver structure
  870. */
  871. static struct pci_driver mei_driver = {
  872. .name = KBUILD_MODNAME,
  873. .id_table = mei_pci_tbl,
  874. .probe = mei_probe,
  875. .remove = mei_remove,
  876. .shutdown = mei_remove,
  877. .driver.pm = MEI_PM_OPS,
  878. };
  879. module_pci_driver(mei_driver);
  880. MODULE_AUTHOR("Intel Corporation");
  881. MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
  882. MODULE_LICENSE("GPL v2");