main.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041
  1. /*
  2. *
  3. * Intel Management Engine Interface (Intel MEI) Linux driver
  4. * Copyright (c) 2003-2012, Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. */
  16. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  17. #include <linux/module.h>
  18. #include <linux/moduleparam.h>
  19. #include <linux/kernel.h>
  20. #include <linux/device.h>
  21. #include <linux/fs.h>
  22. #include <linux/errno.h>
  23. #include <linux/types.h>
  24. #include <linux/fcntl.h>
  25. #include <linux/aio.h>
  26. #include <linux/pci.h>
  27. #include <linux/poll.h>
  28. #include <linux/init.h>
  29. #include <linux/ioctl.h>
  30. #include <linux/cdev.h>
  31. #include <linux/sched.h>
  32. #include <linux/uuid.h>
  33. #include <linux/compat.h>
  34. #include <linux/jiffies.h>
  35. #include <linux/interrupt.h>
  36. #include <linux/miscdevice.h>
  37. #include "mei_dev.h"
  38. #include <linux/mei.h>
  39. #include "interface.h"
  40. /* AMT device is a singleton on the platform */
  41. static struct pci_dev *mei_pdev;
  42. /* mei_pci_tbl - PCI Device ID Table */
  43. static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = {
  44. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
  45. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)},
  46. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)},
  47. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)},
  48. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)},
  49. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)},
  50. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)},
  51. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)},
  52. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)},
  53. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)},
  54. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)},
  55. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)},
  56. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)},
  57. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)},
  58. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)},
  59. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)},
  60. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)},
  61. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)},
  62. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)},
  63. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)},
  64. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)},
  65. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)},
  66. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)},
  67. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)},
  68. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)},
  69. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)},
  70. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)},
  71. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)},
  72. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
  73. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
  74. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
  75. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)},
  76. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)},
  77. /* required last entry */
  78. {0, }
  79. };
  80. MODULE_DEVICE_TABLE(pci, mei_pci_tbl);
  81. static DEFINE_MUTEX(mei_mutex);
  82. /**
  83. * find_read_list_entry - find read list entry
  84. *
  85. * @dev: device structure
  86. * @file: pointer to file structure
  87. *
  88. * returns cb on success, NULL on error
  89. */
  90. static struct mei_cl_cb *find_read_list_entry(
  91. struct mei_device *dev,
  92. struct mei_cl *cl)
  93. {
  94. struct mei_cl_cb *pos = NULL;
  95. struct mei_cl_cb *next = NULL;
  96. dev_dbg(&dev->pdev->dev, "remove read_list CB\n");
  97. list_for_each_entry_safe(pos, next, &dev->read_list.list, list)
  98. if (mei_cl_cmp_id(cl, pos->cl))
  99. return pos;
  100. return NULL;
  101. }
  102. /**
  103. * mei_open - the open function
  104. *
  105. * @inode: pointer to inode structure
  106. * @file: pointer to file structure
  107. *
  108. * returns 0 on success, <0 on error
  109. */
  110. static int mei_open(struct inode *inode, struct file *file)
  111. {
  112. struct mei_cl *cl;
  113. struct mei_device *dev;
  114. unsigned long cl_id;
  115. int err;
  116. err = -ENODEV;
  117. if (!mei_pdev)
  118. goto out;
  119. dev = pci_get_drvdata(mei_pdev);
  120. if (!dev)
  121. goto out;
  122. mutex_lock(&dev->device_lock);
  123. err = -ENOMEM;
  124. cl = mei_cl_allocate(dev);
  125. if (!cl)
  126. goto out_unlock;
  127. err = -ENODEV;
  128. if (dev->dev_state != MEI_DEV_ENABLED) {
  129. dev_dbg(&dev->pdev->dev, "dev_state != MEI_ENABLED dev_state = %s\n",
  130. mei_dev_state_str(dev->dev_state));
  131. goto out_unlock;
  132. }
  133. err = -EMFILE;
  134. if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
  135. dev_err(&dev->pdev->dev, "open_handle_count exceded %d",
  136. MEI_MAX_OPEN_HANDLE_COUNT);
  137. goto out_unlock;
  138. }
  139. cl_id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
  140. if (cl_id >= MEI_CLIENTS_MAX) {
  141. dev_err(&dev->pdev->dev, "client_id exceded %d",
  142. MEI_CLIENTS_MAX) ;
  143. goto out_unlock;
  144. }
  145. cl->host_client_id = cl_id;
  146. dev_dbg(&dev->pdev->dev, "client_id = %d\n", cl->host_client_id);
  147. dev->open_handle_count++;
  148. list_add_tail(&cl->link, &dev->file_list);
  149. set_bit(cl->host_client_id, dev->host_clients_map);
  150. cl->state = MEI_FILE_INITIALIZING;
  151. cl->sm_state = 0;
  152. file->private_data = cl;
  153. mutex_unlock(&dev->device_lock);
  154. return nonseekable_open(inode, file);
  155. out_unlock:
  156. mutex_unlock(&dev->device_lock);
  157. kfree(cl);
  158. out:
  159. return err;
  160. }
  161. /**
  162. * mei_release - the release function
  163. *
  164. * @inode: pointer to inode structure
  165. * @file: pointer to file structure
  166. *
  167. * returns 0 on success, <0 on error
  168. */
  169. static int mei_release(struct inode *inode, struct file *file)
  170. {
  171. struct mei_cl *cl = file->private_data;
  172. struct mei_cl_cb *cb;
  173. struct mei_device *dev;
  174. int rets = 0;
  175. if (WARN_ON(!cl || !cl->dev))
  176. return -ENODEV;
  177. dev = cl->dev;
  178. mutex_lock(&dev->device_lock);
  179. if (cl == &dev->iamthif_cl) {
  180. rets = mei_amthif_release(dev, file);
  181. goto out;
  182. }
  183. if (cl->state == MEI_FILE_CONNECTED) {
  184. cl->state = MEI_FILE_DISCONNECTING;
  185. dev_dbg(&dev->pdev->dev,
  186. "disconnecting client host client = %d, "
  187. "ME client = %d\n",
  188. cl->host_client_id,
  189. cl->me_client_id);
  190. rets = mei_disconnect_host_client(dev, cl);
  191. }
  192. mei_cl_flush_queues(cl);
  193. dev_dbg(&dev->pdev->dev, "remove client host client = %d, ME client = %d\n",
  194. cl->host_client_id,
  195. cl->me_client_id);
  196. if (dev->open_handle_count > 0) {
  197. clear_bit(cl->host_client_id, dev->host_clients_map);
  198. dev->open_handle_count--;
  199. }
  200. mei_remove_client_from_file_list(dev, cl->host_client_id);
  201. /* free read cb */
  202. cb = NULL;
  203. if (cl->read_cb) {
  204. cb = find_read_list_entry(dev, cl);
  205. /* Remove entry from read list */
  206. if (cb)
  207. list_del(&cb->list);
  208. cb = cl->read_cb;
  209. cl->read_cb = NULL;
  210. }
  211. file->private_data = NULL;
  212. if (cb) {
  213. mei_io_cb_free(cb);
  214. cb = NULL;
  215. }
  216. kfree(cl);
  217. out:
  218. mutex_unlock(&dev->device_lock);
  219. return rets;
  220. }
  221. /**
  222. * mei_read - the read function.
  223. *
  224. * @file: pointer to file structure
  225. * @ubuf: pointer to user buffer
  226. * @length: buffer length
  227. * @offset: data offset in buffer
  228. *
  229. * returns >=0 data length on success , <0 on error
  230. */
  231. static ssize_t mei_read(struct file *file, char __user *ubuf,
  232. size_t length, loff_t *offset)
  233. {
  234. struct mei_cl *cl = file->private_data;
  235. struct mei_cl_cb *cb_pos = NULL;
  236. struct mei_cl_cb *cb = NULL;
  237. struct mei_device *dev;
  238. int i;
  239. int rets;
  240. int err;
  241. if (WARN_ON(!cl || !cl->dev))
  242. return -ENODEV;
  243. dev = cl->dev;
  244. mutex_lock(&dev->device_lock);
  245. if (dev->dev_state != MEI_DEV_ENABLED) {
  246. rets = -ENODEV;
  247. goto out;
  248. }
  249. if ((cl->sm_state & MEI_WD_STATE_INDEPENDENCE_MSG_SENT) == 0) {
  250. /* Do not allow to read watchdog client */
  251. i = mei_me_cl_by_uuid(dev, &mei_wd_guid);
  252. if (i >= 0) {
  253. struct mei_me_client *me_client = &dev->me_clients[i];
  254. if (cl->me_client_id == me_client->client_id) {
  255. rets = -EBADF;
  256. goto out;
  257. }
  258. }
  259. } else {
  260. cl->sm_state &= ~MEI_WD_STATE_INDEPENDENCE_MSG_SENT;
  261. }
  262. if (cl == &dev->iamthif_cl) {
  263. rets = mei_amthif_read(dev, file, ubuf, length, offset);
  264. goto out;
  265. }
  266. if (cl->read_cb && cl->read_cb->buf_idx > *offset) {
  267. cb = cl->read_cb;
  268. goto copy_buffer;
  269. } else if (cl->read_cb && cl->read_cb->buf_idx > 0 &&
  270. cl->read_cb->buf_idx <= *offset) {
  271. cb = cl->read_cb;
  272. rets = 0;
  273. goto free;
  274. } else if ((!cl->read_cb || !cl->read_cb->buf_idx) && *offset > 0) {
  275. /*Offset needs to be cleaned for contiguous reads*/
  276. *offset = 0;
  277. rets = 0;
  278. goto out;
  279. }
  280. err = mei_start_read(dev, cl);
  281. if (err && err != -EBUSY) {
  282. dev_dbg(&dev->pdev->dev,
  283. "mei start read failure with status = %d\n", err);
  284. rets = err;
  285. goto out;
  286. }
  287. if (MEI_READ_COMPLETE != cl->reading_state &&
  288. !waitqueue_active(&cl->rx_wait)) {
  289. if (file->f_flags & O_NONBLOCK) {
  290. rets = -EAGAIN;
  291. goto out;
  292. }
  293. mutex_unlock(&dev->device_lock);
  294. if (wait_event_interruptible(cl->rx_wait,
  295. (MEI_READ_COMPLETE == cl->reading_state ||
  296. MEI_FILE_INITIALIZING == cl->state ||
  297. MEI_FILE_DISCONNECTED == cl->state ||
  298. MEI_FILE_DISCONNECTING == cl->state))) {
  299. if (signal_pending(current))
  300. return -EINTR;
  301. return -ERESTARTSYS;
  302. }
  303. mutex_lock(&dev->device_lock);
  304. if (MEI_FILE_INITIALIZING == cl->state ||
  305. MEI_FILE_DISCONNECTED == cl->state ||
  306. MEI_FILE_DISCONNECTING == cl->state) {
  307. rets = -EBUSY;
  308. goto out;
  309. }
  310. }
  311. cb = cl->read_cb;
  312. if (!cb) {
  313. rets = -ENODEV;
  314. goto out;
  315. }
  316. if (cl->reading_state != MEI_READ_COMPLETE) {
  317. rets = 0;
  318. goto out;
  319. }
  320. /* now copy the data to user space */
  321. copy_buffer:
  322. dev_dbg(&dev->pdev->dev, "cb->response_buffer size - %d\n",
  323. cb->response_buffer.size);
  324. dev_dbg(&dev->pdev->dev, "cb->buf_idx - %lu\n", cb->buf_idx);
  325. if (length == 0 || ubuf == NULL || *offset > cb->buf_idx) {
  326. rets = -EMSGSIZE;
  327. goto free;
  328. }
  329. /* length is being truncated to PAGE_SIZE,
  330. * however buf_idx may point beyond that */
  331. length = min_t(size_t, length, cb->buf_idx - *offset);
  332. if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) {
  333. rets = -EFAULT;
  334. goto free;
  335. }
  336. rets = length;
  337. *offset += length;
  338. if ((unsigned long)*offset < cb->buf_idx)
  339. goto out;
  340. free:
  341. cb_pos = find_read_list_entry(dev, cl);
  342. /* Remove entry from read list */
  343. if (cb_pos)
  344. list_del(&cb_pos->list);
  345. mei_io_cb_free(cb);
  346. cl->reading_state = MEI_IDLE;
  347. cl->read_cb = NULL;
  348. cl->read_pending = 0;
  349. out:
  350. dev_dbg(&dev->pdev->dev, "end mei read rets= %d\n", rets);
  351. mutex_unlock(&dev->device_lock);
  352. return rets;
  353. }
  354. /**
  355. * mei_write - the write function.
  356. *
  357. * @file: pointer to file structure
  358. * @ubuf: pointer to user buffer
  359. * @length: buffer length
  360. * @offset: data offset in buffer
  361. *
  362. * returns >=0 data length on success , <0 on error
  363. */
  364. static ssize_t mei_write(struct file *file, const char __user *ubuf,
  365. size_t length, loff_t *offset)
  366. {
  367. struct mei_cl *cl = file->private_data;
  368. struct mei_cl_cb *write_cb = NULL;
  369. struct mei_msg_hdr mei_hdr;
  370. struct mei_device *dev;
  371. unsigned long timeout = 0;
  372. int rets;
  373. int i;
  374. if (WARN_ON(!cl || !cl->dev))
  375. return -ENODEV;
  376. dev = cl->dev;
  377. mutex_lock(&dev->device_lock);
  378. if (dev->dev_state != MEI_DEV_ENABLED) {
  379. rets = -ENODEV;
  380. goto err;
  381. }
  382. i = mei_me_cl_by_id(dev, cl->me_client_id);
  383. if (i < 0) {
  384. rets = -ENODEV;
  385. goto err;
  386. }
  387. if (length > dev->me_clients[i].props.max_msg_length || length <= 0) {
  388. rets = -EMSGSIZE;
  389. goto err;
  390. }
  391. if (cl->state != MEI_FILE_CONNECTED) {
  392. rets = -ENODEV;
  393. dev_err(&dev->pdev->dev, "host client = %d, is not connected to ME client = %d",
  394. cl->host_client_id, cl->me_client_id);
  395. goto err;
  396. }
  397. if (cl == &dev->iamthif_cl) {
  398. write_cb = mei_amthif_find_read_list_entry(dev, file);
  399. if (write_cb) {
  400. timeout = write_cb->read_time +
  401. mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
  402. if (time_after(jiffies, timeout) ||
  403. cl->reading_state == MEI_READ_COMPLETE) {
  404. *offset = 0;
  405. list_del(&write_cb->list);
  406. mei_io_cb_free(write_cb);
  407. write_cb = NULL;
  408. }
  409. }
  410. }
  411. /* free entry used in read */
  412. if (cl->reading_state == MEI_READ_COMPLETE) {
  413. *offset = 0;
  414. write_cb = find_read_list_entry(dev, cl);
  415. if (write_cb) {
  416. list_del(&write_cb->list);
  417. mei_io_cb_free(write_cb);
  418. write_cb = NULL;
  419. cl->reading_state = MEI_IDLE;
  420. cl->read_cb = NULL;
  421. cl->read_pending = 0;
  422. }
  423. } else if (cl->reading_state == MEI_IDLE && !cl->read_pending)
  424. *offset = 0;
  425. write_cb = mei_io_cb_init(cl, file);
  426. if (!write_cb) {
  427. dev_err(&dev->pdev->dev, "write cb allocation failed\n");
  428. rets = -ENOMEM;
  429. goto err;
  430. }
  431. rets = mei_io_cb_alloc_req_buf(write_cb, length);
  432. if (rets)
  433. goto err;
  434. dev_dbg(&dev->pdev->dev, "cb request size = %zd\n", length);
  435. rets = copy_from_user(write_cb->request_buffer.data, ubuf, length);
  436. if (rets)
  437. goto err;
  438. cl->sm_state = 0;
  439. if (length == 4 &&
  440. ((memcmp(mei_wd_state_independence_msg[0],
  441. write_cb->request_buffer.data, 4) == 0) ||
  442. (memcmp(mei_wd_state_independence_msg[1],
  443. write_cb->request_buffer.data, 4) == 0) ||
  444. (memcmp(mei_wd_state_independence_msg[2],
  445. write_cb->request_buffer.data, 4) == 0)))
  446. cl->sm_state |= MEI_WD_STATE_INDEPENDENCE_MSG_SENT;
  447. if (cl == &dev->iamthif_cl) {
  448. rets = mei_amthif_write(dev, write_cb);
  449. if (rets) {
  450. dev_err(&dev->pdev->dev,
  451. "amthi write failed with status = %d\n", rets);
  452. goto err;
  453. }
  454. mutex_unlock(&dev->device_lock);
  455. return length;
  456. }
  457. write_cb->fop_type = MEI_FOP_WRITE;
  458. dev_dbg(&dev->pdev->dev, "host client = %d, ME client = %d\n",
  459. cl->host_client_id, cl->me_client_id);
  460. rets = mei_flow_ctrl_creds(dev, cl);
  461. if (rets < 0)
  462. goto err;
  463. if (rets == 0 || dev->mei_host_buffer_is_empty == false) {
  464. write_cb->buf_idx = 0;
  465. mei_hdr.msg_complete = 0;
  466. cl->writing_state = MEI_WRITING;
  467. goto out;
  468. }
  469. dev->mei_host_buffer_is_empty = false;
  470. if (length > mei_hbuf_max_data(dev)) {
  471. mei_hdr.length = mei_hbuf_max_data(dev);
  472. mei_hdr.msg_complete = 0;
  473. } else {
  474. mei_hdr.length = length;
  475. mei_hdr.msg_complete = 1;
  476. }
  477. mei_hdr.host_addr = cl->host_client_id;
  478. mei_hdr.me_addr = cl->me_client_id;
  479. mei_hdr.reserved = 0;
  480. dev_dbg(&dev->pdev->dev, "call mei_write_message header=%08x.\n",
  481. *((u32 *) &mei_hdr));
  482. if (mei_write_message(dev, &mei_hdr,
  483. write_cb->request_buffer.data, mei_hdr.length)) {
  484. rets = -ENODEV;
  485. goto err;
  486. }
  487. cl->writing_state = MEI_WRITING;
  488. write_cb->buf_idx = mei_hdr.length;
  489. out:
  490. if (mei_hdr.msg_complete) {
  491. if (mei_flow_ctrl_reduce(dev, cl)) {
  492. rets = -ENODEV;
  493. goto err;
  494. }
  495. list_add_tail(&write_cb->list, &dev->write_waiting_list.list);
  496. } else {
  497. list_add_tail(&write_cb->list, &dev->write_list.list);
  498. }
  499. mutex_unlock(&dev->device_lock);
  500. return length;
  501. err:
  502. mutex_unlock(&dev->device_lock);
  503. mei_io_cb_free(write_cb);
  504. return rets;
  505. }
  506. /**
  507. * mei_ioctl - the IOCTL function
  508. *
  509. * @file: pointer to file structure
  510. * @cmd: ioctl command
  511. * @data: pointer to mei message structure
  512. *
  513. * returns 0 on success , <0 on error
  514. */
  515. static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
  516. {
  517. struct mei_device *dev;
  518. struct mei_cl *cl = file->private_data;
  519. struct mei_connect_client_data *connect_data = NULL;
  520. int rets;
  521. if (cmd != IOCTL_MEI_CONNECT_CLIENT)
  522. return -EINVAL;
  523. if (WARN_ON(!cl || !cl->dev))
  524. return -ENODEV;
  525. dev = cl->dev;
  526. dev_dbg(&dev->pdev->dev, "IOCTL cmd = 0x%x", cmd);
  527. mutex_lock(&dev->device_lock);
  528. if (dev->dev_state != MEI_DEV_ENABLED) {
  529. rets = -ENODEV;
  530. goto out;
  531. }
  532. dev_dbg(&dev->pdev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n");
  533. connect_data = kzalloc(sizeof(struct mei_connect_client_data),
  534. GFP_KERNEL);
  535. if (!connect_data) {
  536. rets = -ENOMEM;
  537. goto out;
  538. }
  539. dev_dbg(&dev->pdev->dev, "copy connect data from user\n");
  540. if (copy_from_user(connect_data, (char __user *)data,
  541. sizeof(struct mei_connect_client_data))) {
  542. dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n");
  543. rets = -EFAULT;
  544. goto out;
  545. }
  546. rets = mei_ioctl_connect_client(file, connect_data);
  547. /* if all is ok, copying the data back to user. */
  548. if (rets)
  549. goto out;
  550. dev_dbg(&dev->pdev->dev, "copy connect data to user\n");
  551. if (copy_to_user((char __user *)data, connect_data,
  552. sizeof(struct mei_connect_client_data))) {
  553. dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n");
  554. rets = -EFAULT;
  555. goto out;
  556. }
  557. out:
  558. kfree(connect_data);
  559. mutex_unlock(&dev->device_lock);
  560. return rets;
  561. }
  562. /**
  563. * mei_compat_ioctl - the compat IOCTL function
  564. *
  565. * @file: pointer to file structure
  566. * @cmd: ioctl command
  567. * @data: pointer to mei message structure
  568. *
  569. * returns 0 on success , <0 on error
  570. */
  571. #ifdef CONFIG_COMPAT
  572. static long mei_compat_ioctl(struct file *file,
  573. unsigned int cmd, unsigned long data)
  574. {
  575. return mei_ioctl(file, cmd, (unsigned long)compat_ptr(data));
  576. }
  577. #endif
  578. /**
  579. * mei_poll - the poll function
  580. *
  581. * @file: pointer to file structure
  582. * @wait: pointer to poll_table structure
  583. *
  584. * returns poll mask
  585. */
  586. static unsigned int mei_poll(struct file *file, poll_table *wait)
  587. {
  588. struct mei_cl *cl = file->private_data;
  589. struct mei_device *dev;
  590. unsigned int mask = 0;
  591. if (WARN_ON(!cl || !cl->dev))
  592. return mask;
  593. dev = cl->dev;
  594. mutex_lock(&dev->device_lock);
  595. if (dev->dev_state != MEI_DEV_ENABLED)
  596. goto out;
  597. if (cl == &dev->iamthif_cl) {
  598. mutex_unlock(&dev->device_lock);
  599. poll_wait(file, &dev->iamthif_cl.wait, wait);
  600. mutex_lock(&dev->device_lock);
  601. if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
  602. dev->iamthif_file_object == file) {
  603. mask |= (POLLIN | POLLRDNORM);
  604. dev_dbg(&dev->pdev->dev, "run next amthi cb\n");
  605. mei_amthif_run_next_cmd(dev);
  606. }
  607. goto out;
  608. }
  609. mutex_unlock(&dev->device_lock);
  610. poll_wait(file, &cl->tx_wait, wait);
  611. mutex_lock(&dev->device_lock);
  612. if (MEI_WRITE_COMPLETE == cl->writing_state)
  613. mask |= (POLLIN | POLLRDNORM);
  614. out:
  615. mutex_unlock(&dev->device_lock);
  616. return mask;
  617. }
  618. /*
  619. * file operations structure will be used for mei char device.
  620. */
  621. static const struct file_operations mei_fops = {
  622. .owner = THIS_MODULE,
  623. .read = mei_read,
  624. .unlocked_ioctl = mei_ioctl,
  625. #ifdef CONFIG_COMPAT
  626. .compat_ioctl = mei_compat_ioctl,
  627. #endif
  628. .open = mei_open,
  629. .release = mei_release,
  630. .write = mei_write,
  631. .poll = mei_poll,
  632. .llseek = no_llseek
  633. };
  634. /*
  635. * Misc Device Struct
  636. */
  637. static struct miscdevice mei_misc_device = {
  638. .name = "mei",
  639. .fops = &mei_fops,
  640. .minor = MISC_DYNAMIC_MINOR,
  641. };
  642. /**
  643. * mei_quirk_probe - probe for devices that doesn't valid ME interface
  644. * @pdev: PCI device structure
  645. * @ent: entry into pci_device_table
  646. *
  647. * returns true if ME Interface is valid, false otherwise
  648. */
  649. static bool __devinit mei_quirk_probe(struct pci_dev *pdev,
  650. const struct pci_device_id *ent)
  651. {
  652. u32 reg;
  653. if (ent->device == MEI_DEV_ID_PBG_1) {
  654. pci_read_config_dword(pdev, 0x48, &reg);
  655. /* make sure that bit 9 is up and bit 10 is down */
  656. if ((reg & 0x600) == 0x200) {
  657. dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
  658. return false;
  659. }
  660. }
  661. return true;
  662. }
  663. /**
  664. * mei_probe - Device Initialization Routine
  665. *
  666. * @pdev: PCI device structure
  667. * @ent: entry in kcs_pci_tbl
  668. *
  669. * returns 0 on success, <0 on failure.
  670. */
  671. static int __devinit mei_probe(struct pci_dev *pdev,
  672. const struct pci_device_id *ent)
  673. {
  674. struct mei_device *dev;
  675. int err;
  676. mutex_lock(&mei_mutex);
  677. if (!mei_quirk_probe(pdev, ent)) {
  678. err = -ENODEV;
  679. goto end;
  680. }
  681. if (mei_pdev) {
  682. err = -EEXIST;
  683. goto end;
  684. }
  685. /* enable pci dev */
  686. err = pci_enable_device(pdev);
  687. if (err) {
  688. dev_err(&pdev->dev, "failed to enable pci device.\n");
  689. goto end;
  690. }
  691. /* set PCI host mastering */
  692. pci_set_master(pdev);
  693. /* pci request regions for mei driver */
  694. err = pci_request_regions(pdev, KBUILD_MODNAME);
  695. if (err) {
  696. dev_err(&pdev->dev, "failed to get pci regions.\n");
  697. goto disable_device;
  698. }
  699. /* allocates and initializes the mei dev structure */
  700. dev = mei_device_init(pdev);
  701. if (!dev) {
  702. err = -ENOMEM;
  703. goto release_regions;
  704. }
  705. /* mapping IO device memory */
  706. dev->mem_addr = pci_iomap(pdev, 0, 0);
  707. if (!dev->mem_addr) {
  708. dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
  709. err = -ENOMEM;
  710. goto free_device;
  711. }
  712. pci_enable_msi(pdev);
  713. /* request and enable interrupt */
  714. if (pci_dev_msi_enabled(pdev))
  715. err = request_threaded_irq(pdev->irq,
  716. NULL,
  717. mei_interrupt_thread_handler,
  718. IRQF_ONESHOT, KBUILD_MODNAME, dev);
  719. else
  720. err = request_threaded_irq(pdev->irq,
  721. mei_interrupt_quick_handler,
  722. mei_interrupt_thread_handler,
  723. IRQF_SHARED, KBUILD_MODNAME, dev);
  724. if (err) {
  725. dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
  726. pdev->irq);
  727. goto disable_msi;
  728. }
  729. INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
  730. if (mei_hw_init(dev)) {
  731. dev_err(&pdev->dev, "init hw failure.\n");
  732. err = -ENODEV;
  733. goto release_irq;
  734. }
  735. err = misc_register(&mei_misc_device);
  736. if (err)
  737. goto release_irq;
  738. mei_pdev = pdev;
  739. pci_set_drvdata(pdev, dev);
  740. schedule_delayed_work(&dev->timer_work, HZ);
  741. mutex_unlock(&mei_mutex);
  742. pr_debug("initialization successful.\n");
  743. return 0;
  744. release_irq:
  745. /* disable interrupts */
  746. dev->host_hw_state = mei_hcsr_read(dev);
  747. mei_disable_interrupts(dev);
  748. flush_scheduled_work();
  749. free_irq(pdev->irq, dev);
  750. disable_msi:
  751. pci_disable_msi(pdev);
  752. pci_iounmap(pdev, dev->mem_addr);
  753. free_device:
  754. kfree(dev);
  755. release_regions:
  756. pci_release_regions(pdev);
  757. disable_device:
  758. pci_disable_device(pdev);
  759. end:
  760. mutex_unlock(&mei_mutex);
  761. dev_err(&pdev->dev, "initialization failed.\n");
  762. return err;
  763. }
  764. /**
  765. * mei_remove - Device Removal Routine
  766. *
  767. * @pdev: PCI device structure
  768. *
  769. * mei_remove is called by the PCI subsystem to alert the driver
  770. * that it should release a PCI device.
  771. */
  772. static void __devexit mei_remove(struct pci_dev *pdev)
  773. {
  774. struct mei_device *dev;
  775. if (mei_pdev != pdev)
  776. return;
  777. dev = pci_get_drvdata(pdev);
  778. if (!dev)
  779. return;
  780. mutex_lock(&dev->device_lock);
  781. cancel_delayed_work(&dev->timer_work);
  782. mei_wd_stop(dev);
  783. mei_pdev = NULL;
  784. if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
  785. dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
  786. mei_disconnect_host_client(dev, &dev->iamthif_cl);
  787. }
  788. if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
  789. dev->wd_cl.state = MEI_FILE_DISCONNECTING;
  790. mei_disconnect_host_client(dev, &dev->wd_cl);
  791. }
  792. /* Unregistering watchdog device */
  793. mei_watchdog_unregister(dev);
  794. /* remove entry if already in list */
  795. dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
  796. mei_remove_client_from_file_list(dev, dev->wd_cl.host_client_id);
  797. mei_remove_client_from_file_list(dev, dev->iamthif_cl.host_client_id);
  798. dev->iamthif_current_cb = NULL;
  799. dev->me_clients_num = 0;
  800. mutex_unlock(&dev->device_lock);
  801. flush_scheduled_work();
  802. /* disable interrupts */
  803. mei_disable_interrupts(dev);
  804. free_irq(pdev->irq, dev);
  805. pci_disable_msi(pdev);
  806. pci_set_drvdata(pdev, NULL);
  807. if (dev->mem_addr)
  808. pci_iounmap(pdev, dev->mem_addr);
  809. kfree(dev);
  810. pci_release_regions(pdev);
  811. pci_disable_device(pdev);
  812. misc_deregister(&mei_misc_device);
  813. }
  814. #ifdef CONFIG_PM
  815. static int mei_pci_suspend(struct device *device)
  816. {
  817. struct pci_dev *pdev = to_pci_dev(device);
  818. struct mei_device *dev = pci_get_drvdata(pdev);
  819. int err;
  820. if (!dev)
  821. return -ENODEV;
  822. mutex_lock(&dev->device_lock);
  823. cancel_delayed_work(&dev->timer_work);
  824. /* Stop watchdog if exists */
  825. err = mei_wd_stop(dev);
  826. /* Set new mei state */
  827. if (dev->dev_state == MEI_DEV_ENABLED ||
  828. dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
  829. dev->dev_state = MEI_DEV_POWER_DOWN;
  830. mei_reset(dev, 0);
  831. }
  832. mutex_unlock(&dev->device_lock);
  833. free_irq(pdev->irq, dev);
  834. pci_disable_msi(pdev);
  835. return err;
  836. }
  837. static int mei_pci_resume(struct device *device)
  838. {
  839. struct pci_dev *pdev = to_pci_dev(device);
  840. struct mei_device *dev;
  841. int err;
  842. dev = pci_get_drvdata(pdev);
  843. if (!dev)
  844. return -ENODEV;
  845. pci_enable_msi(pdev);
  846. /* request and enable interrupt */
  847. if (pci_dev_msi_enabled(pdev))
  848. err = request_threaded_irq(pdev->irq,
  849. NULL,
  850. mei_interrupt_thread_handler,
  851. IRQF_ONESHOT, KBUILD_MODNAME, dev);
  852. else
  853. err = request_threaded_irq(pdev->irq,
  854. mei_interrupt_quick_handler,
  855. mei_interrupt_thread_handler,
  856. IRQF_SHARED, KBUILD_MODNAME, dev);
  857. if (err) {
  858. dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
  859. pdev->irq);
  860. return err;
  861. }
  862. mutex_lock(&dev->device_lock);
  863. dev->dev_state = MEI_DEV_POWER_UP;
  864. mei_reset(dev, 1);
  865. mutex_unlock(&dev->device_lock);
  866. /* Start timer if stopped in suspend */
  867. schedule_delayed_work(&dev->timer_work, HZ);
  868. return err;
  869. }
  870. static SIMPLE_DEV_PM_OPS(mei_pm_ops, mei_pci_suspend, mei_pci_resume);
  871. #define MEI_PM_OPS (&mei_pm_ops)
  872. #else
  873. #define MEI_PM_OPS NULL
  874. #endif /* CONFIG_PM */
  875. /*
  876. * PCI driver structure
  877. */
  878. static struct pci_driver mei_driver = {
  879. .name = KBUILD_MODNAME,
  880. .id_table = mei_pci_tbl,
  881. .probe = mei_probe,
  882. .remove = __devexit_p(mei_remove),
  883. .shutdown = __devexit_p(mei_remove),
  884. .driver.pm = MEI_PM_OPS,
  885. };
  886. module_pci_driver(mei_driver);
  887. MODULE_AUTHOR("Intel Corporation");
  888. MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
  889. MODULE_LICENSE("GPL v2");