main.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230
  1. /*
  2. *
  3. * Intel Management Engine Interface (Intel MEI) Linux driver
  4. * Copyright (c) 2003-2012, Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. */
  16. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  17. #include <linux/module.h>
  18. #include <linux/moduleparam.h>
  19. #include <linux/kernel.h>
  20. #include <linux/device.h>
  21. #include <linux/fs.h>
  22. #include <linux/errno.h>
  23. #include <linux/types.h>
  24. #include <linux/fcntl.h>
  25. #include <linux/aio.h>
  26. #include <linux/pci.h>
  27. #include <linux/poll.h>
  28. #include <linux/init.h>
  29. #include <linux/ioctl.h>
  30. #include <linux/cdev.h>
  31. #include <linux/sched.h>
  32. #include <linux/uuid.h>
  33. #include <linux/compat.h>
  34. #include <linux/jiffies.h>
  35. #include <linux/interrupt.h>
  36. #include <linux/miscdevice.h>
  37. #include "mei_dev.h"
  38. #include <linux/mei.h>
  39. #include "interface.h"
  40. static const char mei_driver_name[] = "mei";
  41. /* The device pointer */
  42. /* Currently this driver works as long as there is only a single AMT device. */
  43. struct pci_dev *mei_device;
  44. /* mei_pci_tbl - PCI Device ID Table */
  45. static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = {
  46. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
  47. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)},
  48. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)},
  49. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)},
  50. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)},
  51. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)},
  52. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)},
  53. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)},
  54. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)},
  55. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)},
  56. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)},
  57. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)},
  58. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)},
  59. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)},
  60. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)},
  61. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)},
  62. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)},
  63. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)},
  64. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)},
  65. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)},
  66. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)},
  67. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)},
  68. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)},
  69. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)},
  70. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)},
  71. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)},
  72. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)},
  73. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)},
  74. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
  75. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
  76. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
  77. /* required last entry */
  78. {0, }
  79. };
  80. MODULE_DEVICE_TABLE(pci, mei_pci_tbl);
  81. static DEFINE_MUTEX(mei_mutex);
  82. /**
  83. * mei_clear_list - removes all callbacks associated with file
  84. * from mei_cb_list
  85. *
  86. * @dev: device structure.
  87. * @file: file structure
  88. * @mei_cb_list: callbacks list
  89. *
  90. * mei_clear_list is called to clear resources associated with file
  91. * when application calls close function or Ctrl-C was pressed
  92. *
  93. * returns true if callback removed from the list, false otherwise
  94. */
  95. static bool mei_clear_list(struct mei_device *dev,
  96. struct file *file, struct list_head *mei_cb_list)
  97. {
  98. struct mei_cl_cb *cb_pos = NULL;
  99. struct mei_cl_cb *cb_next = NULL;
  100. struct file *file_temp;
  101. bool removed = false;
  102. /* list all list member */
  103. list_for_each_entry_safe(cb_pos, cb_next, mei_cb_list, cb_list) {
  104. file_temp = (struct file *)cb_pos->file_object;
  105. /* check if list member associated with a file */
  106. if (file_temp == file) {
  107. /* remove member from the list */
  108. list_del(&cb_pos->cb_list);
  109. /* check if cb equal to current iamthif cb */
  110. if (dev->iamthif_current_cb == cb_pos) {
  111. dev->iamthif_current_cb = NULL;
  112. /* send flow control to iamthif client */
  113. mei_send_flow_control(dev, &dev->iamthif_cl);
  114. }
  115. /* free all allocated buffers */
  116. mei_free_cb_private(cb_pos);
  117. cb_pos = NULL;
  118. removed = true;
  119. }
  120. }
  121. return removed;
  122. }
  123. /**
  124. * mei_clear_lists - removes all callbacks associated with file
  125. *
  126. * @dev: device structure
  127. * @file: file structure
  128. *
  129. * mei_clear_lists is called to clear resources associated with file
  130. * when application calls close function or Ctrl-C was pressed
  131. *
  132. * returns true if callback removed from the list, false otherwise
  133. */
  134. static bool mei_clear_lists(struct mei_device *dev, struct file *file)
  135. {
  136. bool removed = false;
  137. /* remove callbacks associated with a file */
  138. mei_clear_list(dev, file, &dev->amthi_cmd_list.mei_cb.cb_list);
  139. if (mei_clear_list(dev, file,
  140. &dev->amthi_read_complete_list.mei_cb.cb_list))
  141. removed = true;
  142. mei_clear_list(dev, file, &dev->ctrl_rd_list.mei_cb.cb_list);
  143. if (mei_clear_list(dev, file, &dev->ctrl_wr_list.mei_cb.cb_list))
  144. removed = true;
  145. if (mei_clear_list(dev, file, &dev->write_waiting_list.mei_cb.cb_list))
  146. removed = true;
  147. if (mei_clear_list(dev, file, &dev->write_list.mei_cb.cb_list))
  148. removed = true;
  149. /* check if iamthif_current_cb not NULL */
  150. if (dev->iamthif_current_cb && !removed) {
  151. /* check file and iamthif current cb association */
  152. if (dev->iamthif_current_cb->file_object == file) {
  153. /* remove cb */
  154. mei_free_cb_private(dev->iamthif_current_cb);
  155. dev->iamthif_current_cb = NULL;
  156. removed = true;
  157. }
  158. }
  159. return removed;
  160. }
  161. /**
  162. * find_read_list_entry - find read list entry
  163. *
  164. * @dev: device structure
  165. * @file: pointer to file structure
  166. *
  167. * returns cb on success, NULL on error
  168. */
  169. static struct mei_cl_cb *find_read_list_entry(
  170. struct mei_device *dev,
  171. struct mei_cl *cl)
  172. {
  173. struct mei_cl_cb *pos = NULL;
  174. struct mei_cl_cb *next = NULL;
  175. dev_dbg(&dev->pdev->dev, "remove read_list CB\n");
  176. list_for_each_entry_safe(pos, next,
  177. &dev->read_list.mei_cb.cb_list, cb_list) {
  178. struct mei_cl *cl_temp;
  179. cl_temp = (struct mei_cl *)pos->file_private;
  180. if (mei_cl_cmp_id(cl, cl_temp))
  181. return pos;
  182. }
  183. return NULL;
  184. }
  185. /**
  186. * mei_open - the open function
  187. *
  188. * @inode: pointer to inode structure
  189. * @file: pointer to file structure
  190. *
  191. * returns 0 on success, <0 on error
  192. */
  193. static int mei_open(struct inode *inode, struct file *file)
  194. {
  195. struct mei_cl *cl;
  196. struct mei_device *dev;
  197. unsigned long cl_id;
  198. int err;
  199. err = -ENODEV;
  200. if (!mei_device)
  201. goto out;
  202. dev = pci_get_drvdata(mei_device);
  203. if (!dev)
  204. goto out;
  205. mutex_lock(&dev->device_lock);
  206. err = -ENOMEM;
  207. cl = mei_cl_allocate(dev);
  208. if (!cl)
  209. goto out_unlock;
  210. err = -ENODEV;
  211. if (dev->mei_state != MEI_ENABLED) {
  212. dev_dbg(&dev->pdev->dev, "mei_state != MEI_ENABLED mei_state= %d\n",
  213. dev->mei_state);
  214. goto out_unlock;
  215. }
  216. err = -EMFILE;
  217. if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT)
  218. goto out_unlock;
  219. cl_id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
  220. if (cl_id >= MEI_CLIENTS_MAX)
  221. goto out_unlock;
  222. cl->host_client_id = cl_id;
  223. dev_dbg(&dev->pdev->dev, "client_id = %d\n", cl->host_client_id);
  224. dev->open_handle_count++;
  225. list_add_tail(&cl->link, &dev->file_list);
  226. set_bit(cl->host_client_id, dev->host_clients_map);
  227. cl->state = MEI_FILE_INITIALIZING;
  228. cl->sm_state = 0;
  229. file->private_data = cl;
  230. mutex_unlock(&dev->device_lock);
  231. return nonseekable_open(inode, file);
  232. out_unlock:
  233. mutex_unlock(&dev->device_lock);
  234. kfree(cl);
  235. out:
  236. return err;
  237. }
  238. /**
  239. * mei_release - the release function
  240. *
  241. * @inode: pointer to inode structure
  242. * @file: pointer to file structure
  243. *
  244. * returns 0 on success, <0 on error
  245. */
  246. static int mei_release(struct inode *inode, struct file *file)
  247. {
  248. struct mei_cl *cl = file->private_data;
  249. struct mei_cl_cb *cb;
  250. struct mei_device *dev;
  251. int rets = 0;
  252. if (WARN_ON(!cl || !cl->dev))
  253. return -ENODEV;
  254. dev = cl->dev;
  255. mutex_lock(&dev->device_lock);
  256. if (cl != &dev->iamthif_cl) {
  257. if (cl->state == MEI_FILE_CONNECTED) {
  258. cl->state = MEI_FILE_DISCONNECTING;
  259. dev_dbg(&dev->pdev->dev,
  260. "disconnecting client host client = %d, "
  261. "ME client = %d\n",
  262. cl->host_client_id,
  263. cl->me_client_id);
  264. rets = mei_disconnect_host_client(dev, cl);
  265. }
  266. mei_cl_flush_queues(cl);
  267. dev_dbg(&dev->pdev->dev, "remove client host client = %d, ME client = %d\n",
  268. cl->host_client_id,
  269. cl->me_client_id);
  270. if (dev->open_handle_count > 0) {
  271. clear_bit(cl->host_client_id, dev->host_clients_map);
  272. dev->open_handle_count--;
  273. }
  274. mei_remove_client_from_file_list(dev, cl->host_client_id);
  275. /* free read cb */
  276. cb = NULL;
  277. if (cl->read_cb) {
  278. cb = find_read_list_entry(dev, cl);
  279. /* Remove entry from read list */
  280. if (cb)
  281. list_del(&cb->cb_list);
  282. cb = cl->read_cb;
  283. cl->read_cb = NULL;
  284. }
  285. file->private_data = NULL;
  286. if (cb) {
  287. mei_free_cb_private(cb);
  288. cb = NULL;
  289. }
  290. kfree(cl);
  291. } else {
  292. if (dev->open_handle_count > 0)
  293. dev->open_handle_count--;
  294. if (dev->iamthif_file_object == file &&
  295. dev->iamthif_state != MEI_IAMTHIF_IDLE) {
  296. dev_dbg(&dev->pdev->dev, "amthi canceled iamthif state %d\n",
  297. dev->iamthif_state);
  298. dev->iamthif_canceled = true;
  299. if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE) {
  300. dev_dbg(&dev->pdev->dev, "run next amthi iamthif cb\n");
  301. mei_run_next_iamthif_cmd(dev);
  302. }
  303. }
  304. if (mei_clear_lists(dev, file))
  305. dev->iamthif_state = MEI_IAMTHIF_IDLE;
  306. }
  307. mutex_unlock(&dev->device_lock);
  308. return rets;
  309. }
  310. /**
  311. * mei_read - the read function.
  312. *
  313. * @file: pointer to file structure
  314. * @ubuf: pointer to user buffer
  315. * @length: buffer length
  316. * @offset: data offset in buffer
  317. *
  318. * returns >=0 data length on success , <0 on error
  319. */
  320. static ssize_t mei_read(struct file *file, char __user *ubuf,
  321. size_t length, loff_t *offset)
  322. {
  323. struct mei_cl *cl = file->private_data;
  324. struct mei_cl_cb *cb_pos = NULL;
  325. struct mei_cl_cb *cb = NULL;
  326. struct mei_device *dev;
  327. int i;
  328. int rets;
  329. int err;
  330. if (WARN_ON(!cl || !cl->dev))
  331. return -ENODEV;
  332. dev = cl->dev;
  333. mutex_lock(&dev->device_lock);
  334. if (dev->mei_state != MEI_ENABLED) {
  335. rets = -ENODEV;
  336. goto out;
  337. }
  338. if ((cl->sm_state & MEI_WD_STATE_INDEPENDENCE_MSG_SENT) == 0) {
  339. /* Do not allow to read watchdog client */
  340. i = mei_find_me_client_index(dev, mei_wd_guid);
  341. if (i >= 0) {
  342. struct mei_me_client *me_client = &dev->me_clients[i];
  343. if (cl->me_client_id == me_client->client_id) {
  344. rets = -EBADF;
  345. goto out;
  346. }
  347. }
  348. } else {
  349. cl->sm_state &= ~MEI_WD_STATE_INDEPENDENCE_MSG_SENT;
  350. }
  351. if (cl == &dev->iamthif_cl) {
  352. rets = amthi_read(dev, file, ubuf, length, offset);
  353. goto out;
  354. }
  355. if (cl->read_cb && cl->read_cb->information > *offset) {
  356. cb = cl->read_cb;
  357. goto copy_buffer;
  358. } else if (cl->read_cb && cl->read_cb->information > 0 &&
  359. cl->read_cb->information <= *offset) {
  360. cb = cl->read_cb;
  361. rets = 0;
  362. goto free;
  363. } else if ((!cl->read_cb || !cl->read_cb->information) &&
  364. *offset > 0) {
  365. /*Offset needs to be cleaned for contiguous reads*/
  366. *offset = 0;
  367. rets = 0;
  368. goto out;
  369. }
  370. err = mei_start_read(dev, cl);
  371. if (err && err != -EBUSY) {
  372. dev_dbg(&dev->pdev->dev,
  373. "mei start read failure with status = %d\n", err);
  374. rets = err;
  375. goto out;
  376. }
  377. if (MEI_READ_COMPLETE != cl->reading_state &&
  378. !waitqueue_active(&cl->rx_wait)) {
  379. if (file->f_flags & O_NONBLOCK) {
  380. rets = -EAGAIN;
  381. goto out;
  382. }
  383. mutex_unlock(&dev->device_lock);
  384. if (wait_event_interruptible(cl->rx_wait,
  385. (MEI_READ_COMPLETE == cl->reading_state ||
  386. MEI_FILE_INITIALIZING == cl->state ||
  387. MEI_FILE_DISCONNECTED == cl->state ||
  388. MEI_FILE_DISCONNECTING == cl->state))) {
  389. if (signal_pending(current))
  390. return -EINTR;
  391. return -ERESTARTSYS;
  392. }
  393. mutex_lock(&dev->device_lock);
  394. if (MEI_FILE_INITIALIZING == cl->state ||
  395. MEI_FILE_DISCONNECTED == cl->state ||
  396. MEI_FILE_DISCONNECTING == cl->state) {
  397. rets = -EBUSY;
  398. goto out;
  399. }
  400. }
  401. cb = cl->read_cb;
  402. if (!cb) {
  403. rets = -ENODEV;
  404. goto out;
  405. }
  406. if (cl->reading_state != MEI_READ_COMPLETE) {
  407. rets = 0;
  408. goto out;
  409. }
  410. /* now copy the data to user space */
  411. copy_buffer:
  412. dev_dbg(&dev->pdev->dev, "cb->response_buffer size - %d\n",
  413. cb->response_buffer.size);
  414. dev_dbg(&dev->pdev->dev, "cb->information - %lu\n",
  415. cb->information);
  416. if (length == 0 || ubuf == NULL || *offset > cb->information) {
  417. rets = -EMSGSIZE;
  418. goto free;
  419. }
  420. /* length is being truncated to PAGE_SIZE, however, */
  421. /* information size may be longer */
  422. length = min_t(size_t, length, (cb->information - *offset));
  423. if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) {
  424. rets = -EFAULT;
  425. goto free;
  426. }
  427. rets = length;
  428. *offset += length;
  429. if ((unsigned long)*offset < cb->information)
  430. goto out;
  431. free:
  432. cb_pos = find_read_list_entry(dev, cl);
  433. /* Remove entry from read list */
  434. if (cb_pos)
  435. list_del(&cb_pos->cb_list);
  436. mei_free_cb_private(cb);
  437. cl->reading_state = MEI_IDLE;
  438. cl->read_cb = NULL;
  439. cl->read_pending = 0;
  440. out:
  441. dev_dbg(&dev->pdev->dev, "end mei read rets= %d\n", rets);
  442. mutex_unlock(&dev->device_lock);
  443. return rets;
  444. }
  445. /**
  446. * mei_write - the write function.
  447. *
  448. * @file: pointer to file structure
  449. * @ubuf: pointer to user buffer
  450. * @length: buffer length
  451. * @offset: data offset in buffer
  452. *
  453. * returns >=0 data length on success , <0 on error
  454. */
  455. static ssize_t mei_write(struct file *file, const char __user *ubuf,
  456. size_t length, loff_t *offset)
  457. {
  458. struct mei_cl *cl = file->private_data;
  459. struct mei_cl_cb *write_cb = NULL;
  460. struct mei_msg_hdr mei_hdr;
  461. struct mei_device *dev;
  462. unsigned long timeout = 0;
  463. int rets;
  464. int i;
  465. if (WARN_ON(!cl || !cl->dev))
  466. return -ENODEV;
  467. dev = cl->dev;
  468. mutex_lock(&dev->device_lock);
  469. if (dev->mei_state != MEI_ENABLED) {
  470. mutex_unlock(&dev->device_lock);
  471. return -ENODEV;
  472. }
  473. if (cl == &dev->iamthif_cl) {
  474. write_cb = find_amthi_read_list_entry(dev, file);
  475. if (write_cb) {
  476. timeout = write_cb->read_time +
  477. msecs_to_jiffies(IAMTHIF_READ_TIMER);
  478. if (time_after(jiffies, timeout) ||
  479. cl->reading_state == MEI_READ_COMPLETE) {
  480. *offset = 0;
  481. list_del(&write_cb->cb_list);
  482. mei_free_cb_private(write_cb);
  483. write_cb = NULL;
  484. }
  485. }
  486. }
  487. /* free entry used in read */
  488. if (cl->reading_state == MEI_READ_COMPLETE) {
  489. *offset = 0;
  490. write_cb = find_read_list_entry(dev, cl);
  491. if (write_cb) {
  492. list_del(&write_cb->cb_list);
  493. mei_free_cb_private(write_cb);
  494. write_cb = NULL;
  495. cl->reading_state = MEI_IDLE;
  496. cl->read_cb = NULL;
  497. cl->read_pending = 0;
  498. }
  499. } else if (cl->reading_state == MEI_IDLE && !cl->read_pending)
  500. *offset = 0;
  501. write_cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
  502. if (!write_cb) {
  503. mutex_unlock(&dev->device_lock);
  504. return -ENOMEM;
  505. }
  506. write_cb->file_object = file;
  507. write_cb->file_private = cl;
  508. write_cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
  509. rets = -ENOMEM;
  510. if (!write_cb->request_buffer.data)
  511. goto unlock_dev;
  512. dev_dbg(&dev->pdev->dev, "length =%d\n", (int) length);
  513. rets = -EFAULT;
  514. if (copy_from_user(write_cb->request_buffer.data, ubuf, length))
  515. goto unlock_dev;
  516. cl->sm_state = 0;
  517. if (length == 4 &&
  518. ((memcmp(mei_wd_state_independence_msg[0],
  519. write_cb->request_buffer.data, 4) == 0) ||
  520. (memcmp(mei_wd_state_independence_msg[1],
  521. write_cb->request_buffer.data, 4) == 0) ||
  522. (memcmp(mei_wd_state_independence_msg[2],
  523. write_cb->request_buffer.data, 4) == 0)))
  524. cl->sm_state |= MEI_WD_STATE_INDEPENDENCE_MSG_SENT;
  525. INIT_LIST_HEAD(&write_cb->cb_list);
  526. if (cl == &dev->iamthif_cl) {
  527. write_cb->response_buffer.data =
  528. kmalloc(dev->iamthif_mtu, GFP_KERNEL);
  529. if (!write_cb->response_buffer.data) {
  530. rets = -ENOMEM;
  531. goto unlock_dev;
  532. }
  533. if (dev->mei_state != MEI_ENABLED) {
  534. rets = -ENODEV;
  535. goto unlock_dev;
  536. }
  537. for (i = 0; i < dev->me_clients_num; i++) {
  538. if (dev->me_clients[i].client_id ==
  539. dev->iamthif_cl.me_client_id)
  540. break;
  541. }
  542. if (WARN_ON(dev->me_clients[i].client_id != cl->me_client_id)) {
  543. rets = -ENODEV;
  544. goto unlock_dev;
  545. }
  546. if (i == dev->me_clients_num ||
  547. (dev->me_clients[i].client_id !=
  548. dev->iamthif_cl.me_client_id)) {
  549. rets = -ENODEV;
  550. goto unlock_dev;
  551. } else if (length > dev->me_clients[i].props.max_msg_length ||
  552. length <= 0) {
  553. rets = -EMSGSIZE;
  554. goto unlock_dev;
  555. }
  556. write_cb->response_buffer.size = dev->iamthif_mtu;
  557. write_cb->major_file_operations = MEI_IOCTL;
  558. write_cb->information = 0;
  559. write_cb->request_buffer.size = length;
  560. if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) {
  561. rets = -ENODEV;
  562. goto unlock_dev;
  563. }
  564. if (!list_empty(&dev->amthi_cmd_list.mei_cb.cb_list) ||
  565. dev->iamthif_state != MEI_IAMTHIF_IDLE) {
  566. dev_dbg(&dev->pdev->dev, "amthi_state = %d\n",
  567. (int) dev->iamthif_state);
  568. dev_dbg(&dev->pdev->dev, "add amthi cb to amthi cmd waiting list\n");
  569. list_add_tail(&write_cb->cb_list,
  570. &dev->amthi_cmd_list.mei_cb.cb_list);
  571. rets = length;
  572. } else {
  573. dev_dbg(&dev->pdev->dev, "call amthi write\n");
  574. rets = amthi_write(dev, write_cb);
  575. if (rets) {
  576. dev_dbg(&dev->pdev->dev, "amthi write failed with status = %d\n",
  577. rets);
  578. goto unlock_dev;
  579. }
  580. rets = length;
  581. }
  582. mutex_unlock(&dev->device_lock);
  583. return rets;
  584. }
  585. write_cb->major_file_operations = MEI_WRITE;
  586. /* make sure information is zero before we start */
  587. write_cb->information = 0;
  588. write_cb->request_buffer.size = length;
  589. dev_dbg(&dev->pdev->dev, "host client = %d, ME client = %d\n",
  590. cl->host_client_id, cl->me_client_id);
  591. if (cl->state != MEI_FILE_CONNECTED) {
  592. rets = -ENODEV;
  593. dev_dbg(&dev->pdev->dev, "host client = %d, is not connected to ME client = %d",
  594. cl->host_client_id,
  595. cl->me_client_id);
  596. goto unlock_dev;
  597. }
  598. for (i = 0; i < dev->me_clients_num; i++) {
  599. if (dev->me_clients[i].client_id ==
  600. cl->me_client_id)
  601. break;
  602. }
  603. if (WARN_ON(dev->me_clients[i].client_id != cl->me_client_id)) {
  604. rets = -ENODEV;
  605. goto unlock_dev;
  606. }
  607. if (i == dev->me_clients_num) {
  608. rets = -ENODEV;
  609. goto unlock_dev;
  610. }
  611. if (length > dev->me_clients[i].props.max_msg_length || length <= 0) {
  612. rets = -EINVAL;
  613. goto unlock_dev;
  614. }
  615. write_cb->file_private = cl;
  616. rets = mei_flow_ctrl_creds(dev, cl);
  617. if (rets < 0)
  618. goto unlock_dev;
  619. if (rets && dev->mei_host_buffer_is_empty) {
  620. rets = 0;
  621. dev->mei_host_buffer_is_empty = false;
  622. if (length > ((((dev->host_hw_state & H_CBD) >> 24) *
  623. sizeof(u32)) - sizeof(struct mei_msg_hdr))) {
  624. mei_hdr.length =
  625. (((dev->host_hw_state & H_CBD) >> 24) *
  626. sizeof(u32)) -
  627. sizeof(struct mei_msg_hdr);
  628. mei_hdr.msg_complete = 0;
  629. } else {
  630. mei_hdr.length = length;
  631. mei_hdr.msg_complete = 1;
  632. }
  633. mei_hdr.host_addr = cl->host_client_id;
  634. mei_hdr.me_addr = cl->me_client_id;
  635. mei_hdr.reserved = 0;
  636. dev_dbg(&dev->pdev->dev, "call mei_write_message header=%08x.\n",
  637. *((u32 *) &mei_hdr));
  638. if (mei_write_message(dev, &mei_hdr,
  639. (unsigned char *) (write_cb->request_buffer.data),
  640. mei_hdr.length)) {
  641. rets = -ENODEV;
  642. goto unlock_dev;
  643. }
  644. cl->writing_state = MEI_WRITING;
  645. write_cb->information = mei_hdr.length;
  646. if (mei_hdr.msg_complete) {
  647. if (mei_flow_ctrl_reduce(dev, cl)) {
  648. rets = -ENODEV;
  649. goto unlock_dev;
  650. }
  651. list_add_tail(&write_cb->cb_list,
  652. &dev->write_waiting_list.mei_cb.cb_list);
  653. } else {
  654. list_add_tail(&write_cb->cb_list,
  655. &dev->write_list.mei_cb.cb_list);
  656. }
  657. } else {
  658. write_cb->information = 0;
  659. cl->writing_state = MEI_WRITING;
  660. list_add_tail(&write_cb->cb_list,
  661. &dev->write_list.mei_cb.cb_list);
  662. }
  663. mutex_unlock(&dev->device_lock);
  664. return length;
  665. unlock_dev:
  666. mutex_unlock(&dev->device_lock);
  667. mei_free_cb_private(write_cb);
  668. return rets;
  669. }
  670. /**
  671. * mei_ioctl - the IOCTL function
  672. *
  673. * @file: pointer to file structure
  674. * @cmd: ioctl command
  675. * @data: pointer to mei message structure
  676. *
  677. * returns 0 on success , <0 on error
  678. */
  679. static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
  680. {
  681. struct mei_device *dev;
  682. struct mei_cl *cl = file->private_data;
  683. struct mei_connect_client_data *connect_data = NULL;
  684. int rets;
  685. if (cmd != IOCTL_MEI_CONNECT_CLIENT)
  686. return -EINVAL;
  687. if (WARN_ON(!cl || !cl->dev))
  688. return -ENODEV;
  689. dev = cl->dev;
  690. dev_dbg(&dev->pdev->dev, "IOCTL cmd = 0x%x", cmd);
  691. mutex_lock(&dev->device_lock);
  692. if (dev->mei_state != MEI_ENABLED) {
  693. rets = -ENODEV;
  694. goto out;
  695. }
  696. dev_dbg(&dev->pdev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n");
  697. connect_data = kzalloc(sizeof(struct mei_connect_client_data),
  698. GFP_KERNEL);
  699. if (!connect_data) {
  700. rets = -ENOMEM;
  701. goto out;
  702. }
  703. dev_dbg(&dev->pdev->dev, "copy connect data from user\n");
  704. if (copy_from_user(connect_data, (char __user *)data,
  705. sizeof(struct mei_connect_client_data))) {
  706. dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n");
  707. rets = -EFAULT;
  708. goto out;
  709. }
  710. rets = mei_ioctl_connect_client(file, connect_data);
  711. /* if all is ok, copying the data back to user. */
  712. if (rets)
  713. goto out;
  714. dev_dbg(&dev->pdev->dev, "copy connect data to user\n");
  715. if (copy_to_user((char __user *)data, connect_data,
  716. sizeof(struct mei_connect_client_data))) {
  717. dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n");
  718. rets = -EFAULT;
  719. goto out;
  720. }
  721. out:
  722. kfree(connect_data);
  723. mutex_unlock(&dev->device_lock);
  724. return rets;
  725. }
  726. /**
  727. * mei_compat_ioctl - the compat IOCTL function
  728. *
  729. * @file: pointer to file structure
  730. * @cmd: ioctl command
  731. * @data: pointer to mei message structure
  732. *
  733. * returns 0 on success , <0 on error
  734. */
  735. #ifdef CONFIG_COMPAT
  736. static long mei_compat_ioctl(struct file *file,
  737. unsigned int cmd, unsigned long data)
  738. {
  739. return mei_ioctl(file, cmd, (unsigned long)compat_ptr(data));
  740. }
  741. #endif
  742. /**
  743. * mei_poll - the poll function
  744. *
  745. * @file: pointer to file structure
  746. * @wait: pointer to poll_table structure
  747. *
  748. * returns poll mask
  749. */
  750. static unsigned int mei_poll(struct file *file, poll_table *wait)
  751. {
  752. struct mei_cl *cl = file->private_data;
  753. struct mei_device *dev;
  754. unsigned int mask = 0;
  755. if (WARN_ON(!cl || !cl->dev))
  756. return mask;
  757. dev = cl->dev;
  758. mutex_lock(&dev->device_lock);
  759. if (dev->mei_state != MEI_ENABLED)
  760. goto out;
  761. if (cl == &dev->iamthif_cl) {
  762. mutex_unlock(&dev->device_lock);
  763. poll_wait(file, &dev->iamthif_cl.wait, wait);
  764. mutex_lock(&dev->device_lock);
  765. if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
  766. dev->iamthif_file_object == file) {
  767. mask |= (POLLIN | POLLRDNORM);
  768. dev_dbg(&dev->pdev->dev, "run next amthi cb\n");
  769. mei_run_next_iamthif_cmd(dev);
  770. }
  771. goto out;
  772. }
  773. mutex_unlock(&dev->device_lock);
  774. poll_wait(file, &cl->tx_wait, wait);
  775. mutex_lock(&dev->device_lock);
  776. if (MEI_WRITE_COMPLETE == cl->writing_state)
  777. mask |= (POLLIN | POLLRDNORM);
  778. out:
  779. mutex_unlock(&dev->device_lock);
  780. return mask;
  781. }
  782. /*
  783. * file operations structure will be used for mei char device.
  784. */
  785. static const struct file_operations mei_fops = {
  786. .owner = THIS_MODULE,
  787. .read = mei_read,
  788. .unlocked_ioctl = mei_ioctl,
  789. #ifdef CONFIG_COMPAT
  790. .compat_ioctl = mei_compat_ioctl,
  791. #endif
  792. .open = mei_open,
  793. .release = mei_release,
  794. .write = mei_write,
  795. .poll = mei_poll,
  796. .llseek = no_llseek
  797. };
  798. /*
  799. * Misc Device Struct
  800. */
  801. static struct miscdevice mei_misc_device = {
  802. .name = "mei",
  803. .fops = &mei_fops,
  804. .minor = MISC_DYNAMIC_MINOR,
  805. };
  806. /**
  807. * mei_probe - Device Initialization Routine
  808. *
  809. * @pdev: PCI device structure
  810. * @ent: entry in kcs_pci_tbl
  811. *
  812. * returns 0 on success, <0 on failure.
  813. */
  814. static int __devinit mei_probe(struct pci_dev *pdev,
  815. const struct pci_device_id *ent)
  816. {
  817. struct mei_device *dev;
  818. int err;
  819. mutex_lock(&mei_mutex);
  820. if (mei_device) {
  821. err = -EEXIST;
  822. goto end;
  823. }
  824. /* enable pci dev */
  825. err = pci_enable_device(pdev);
  826. if (err) {
  827. dev_err(&pdev->dev, "failed to enable pci device.\n");
  828. goto end;
  829. }
  830. /* set PCI host mastering */
  831. pci_set_master(pdev);
  832. /* pci request regions for mei driver */
  833. err = pci_request_regions(pdev, mei_driver_name);
  834. if (err) {
  835. dev_err(&pdev->dev, "failed to get pci regions.\n");
  836. goto disable_device;
  837. }
  838. /* allocates and initializes the mei dev structure */
  839. dev = mei_device_init(pdev);
  840. if (!dev) {
  841. err = -ENOMEM;
  842. goto release_regions;
  843. }
  844. /* mapping IO device memory */
  845. dev->mem_addr = pci_iomap(pdev, 0, 0);
  846. if (!dev->mem_addr) {
  847. dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
  848. err = -ENOMEM;
  849. goto free_device;
  850. }
  851. pci_enable_msi(pdev);
  852. /* request and enable interrupt */
  853. if (pci_dev_msi_enabled(pdev))
  854. err = request_threaded_irq(pdev->irq,
  855. NULL,
  856. mei_interrupt_thread_handler,
  857. 0, mei_driver_name, dev);
  858. else
  859. err = request_threaded_irq(pdev->irq,
  860. mei_interrupt_quick_handler,
  861. mei_interrupt_thread_handler,
  862. IRQF_SHARED, mei_driver_name, dev);
  863. if (err) {
  864. dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
  865. pdev->irq);
  866. goto unmap_memory;
  867. }
  868. INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
  869. if (mei_hw_init(dev)) {
  870. dev_err(&pdev->dev, "init hw failure.\n");
  871. err = -ENODEV;
  872. goto release_irq;
  873. }
  874. err = misc_register(&mei_misc_device);
  875. if (err)
  876. goto release_irq;
  877. mei_device = pdev;
  878. pci_set_drvdata(pdev, dev);
  879. schedule_delayed_work(&dev->timer_work, HZ);
  880. mutex_unlock(&mei_mutex);
  881. pr_debug("initialization successful.\n");
  882. return 0;
  883. release_irq:
  884. /* disable interrupts */
  885. dev->host_hw_state = mei_hcsr_read(dev);
  886. mei_disable_interrupts(dev);
  887. flush_scheduled_work();
  888. free_irq(pdev->irq, dev);
  889. pci_disable_msi(pdev);
  890. unmap_memory:
  891. pci_iounmap(pdev, dev->mem_addr);
  892. free_device:
  893. kfree(dev);
  894. release_regions:
  895. pci_release_regions(pdev);
  896. disable_device:
  897. pci_disable_device(pdev);
  898. end:
  899. mutex_unlock(&mei_mutex);
  900. dev_err(&pdev->dev, "initialization failed.\n");
  901. return err;
  902. }
  903. /**
  904. * mei_remove - Device Removal Routine
  905. *
  906. * @pdev: PCI device structure
  907. *
  908. * mei_remove is called by the PCI subsystem to alert the driver
  909. * that it should release a PCI device.
  910. */
  911. static void __devexit mei_remove(struct pci_dev *pdev)
  912. {
  913. struct mei_device *dev;
  914. if (mei_device != pdev)
  915. return;
  916. dev = pci_get_drvdata(pdev);
  917. if (!dev)
  918. return;
  919. mutex_lock(&dev->device_lock);
  920. mei_wd_stop(dev, false);
  921. mei_device = NULL;
  922. if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
  923. dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
  924. mei_disconnect_host_client(dev, &dev->iamthif_cl);
  925. }
  926. if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
  927. dev->wd_cl.state = MEI_FILE_DISCONNECTING;
  928. mei_disconnect_host_client(dev, &dev->wd_cl);
  929. }
  930. /* Unregistering watchdog device */
  931. mei_watchdog_unregister(dev);
  932. /* remove entry if already in list */
  933. dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
  934. mei_remove_client_from_file_list(dev, dev->wd_cl.host_client_id);
  935. mei_remove_client_from_file_list(dev, dev->iamthif_cl.host_client_id);
  936. dev->iamthif_current_cb = NULL;
  937. dev->me_clients_num = 0;
  938. mutex_unlock(&dev->device_lock);
  939. flush_scheduled_work();
  940. /* disable interrupts */
  941. mei_disable_interrupts(dev);
  942. free_irq(pdev->irq, dev);
  943. pci_disable_msi(pdev);
  944. pci_set_drvdata(pdev, NULL);
  945. if (dev->mem_addr)
  946. pci_iounmap(pdev, dev->mem_addr);
  947. kfree(dev);
  948. pci_release_regions(pdev);
  949. pci_disable_device(pdev);
  950. }
  951. #ifdef CONFIG_PM
  952. static int mei_pci_suspend(struct device *device)
  953. {
  954. struct pci_dev *pdev = to_pci_dev(device);
  955. struct mei_device *dev = pci_get_drvdata(pdev);
  956. int err;
  957. if (!dev)
  958. return -ENODEV;
  959. mutex_lock(&dev->device_lock);
  960. /* Stop watchdog if exists */
  961. err = mei_wd_stop(dev, true);
  962. /* Set new mei state */
  963. if (dev->mei_state == MEI_ENABLED ||
  964. dev->mei_state == MEI_RECOVERING_FROM_RESET) {
  965. dev->mei_state = MEI_POWER_DOWN;
  966. mei_reset(dev, 0);
  967. }
  968. mutex_unlock(&dev->device_lock);
  969. free_irq(pdev->irq, dev);
  970. pci_disable_msi(pdev);
  971. return err;
  972. }
  973. static int mei_pci_resume(struct device *device)
  974. {
  975. struct pci_dev *pdev = to_pci_dev(device);
  976. struct mei_device *dev;
  977. int err;
  978. dev = pci_get_drvdata(pdev);
  979. if (!dev)
  980. return -ENODEV;
  981. pci_enable_msi(pdev);
  982. /* request and enable interrupt */
  983. if (pci_dev_msi_enabled(pdev))
  984. err = request_threaded_irq(pdev->irq,
  985. NULL,
  986. mei_interrupt_thread_handler,
  987. 0, mei_driver_name, dev);
  988. else
  989. err = request_threaded_irq(pdev->irq,
  990. mei_interrupt_quick_handler,
  991. mei_interrupt_thread_handler,
  992. IRQF_SHARED, mei_driver_name, dev);
  993. if (err) {
  994. dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
  995. pdev->irq);
  996. return err;
  997. }
  998. mutex_lock(&dev->device_lock);
  999. dev->mei_state = MEI_POWER_UP;
  1000. mei_reset(dev, 1);
  1001. mutex_unlock(&dev->device_lock);
  1002. /* Start timer if stopped in suspend */
  1003. schedule_delayed_work(&dev->timer_work, HZ);
  1004. return err;
  1005. }
  1006. static SIMPLE_DEV_PM_OPS(mei_pm_ops, mei_pci_suspend, mei_pci_resume);
  1007. #define MEI_PM_OPS (&mei_pm_ops)
  1008. #else
  1009. #define MEI_PM_OPS NULL
  1010. #endif /* CONFIG_PM */
  1011. /*
  1012. * PCI driver structure
  1013. */
  1014. static struct pci_driver mei_driver = {
  1015. .name = mei_driver_name,
  1016. .id_table = mei_pci_tbl,
  1017. .probe = mei_probe,
  1018. .remove = __devexit_p(mei_remove),
  1019. .shutdown = __devexit_p(mei_remove),
  1020. .driver.pm = MEI_PM_OPS,
  1021. };
  1022. /**
  1023. * mei_init_module - Driver Registration Routine
  1024. *
  1025. * mei_init_module is the first routine called when the driver is
  1026. * loaded. All it does is to register with the PCI subsystem.
  1027. *
  1028. * returns 0 on success, <0 on failure.
  1029. */
  1030. static int __init mei_init_module(void)
  1031. {
  1032. int ret;
  1033. pr_debug("loading.\n");
  1034. /* init pci module */
  1035. ret = pci_register_driver(&mei_driver);
  1036. if (ret < 0)
  1037. pr_err("error registering driver.\n");
  1038. return ret;
  1039. }
  1040. module_init(mei_init_module);
  1041. /**
  1042. * mei_exit_module - Driver Exit Cleanup Routine
  1043. *
  1044. * mei_exit_module is called just before the driver is removed
  1045. * from memory.
  1046. */
  1047. static void __exit mei_exit_module(void)
  1048. {
  1049. misc_deregister(&mei_misc_device);
  1050. pci_unregister_driver(&mei_driver);
  1051. pr_debug("unloaded successfully.\n");
  1052. }
  1053. module_exit(mei_exit_module);
  1054. MODULE_AUTHOR("Intel Corporation");
  1055. MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
  1056. MODULE_LICENSE("GPL v2");