main.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199
  1. /*
  2. *
  3. * Intel Management Engine Interface (Intel MEI) Linux driver
  4. * Copyright (c) 2003-2012, Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. */
  16. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  17. #include <linux/module.h>
  18. #include <linux/moduleparam.h>
  19. #include <linux/kernel.h>
  20. #include <linux/device.h>
  21. #include <linux/fs.h>
  22. #include <linux/errno.h>
  23. #include <linux/types.h>
  24. #include <linux/fcntl.h>
  25. #include <linux/aio.h>
  26. #include <linux/pci.h>
  27. #include <linux/poll.h>
  28. #include <linux/init.h>
  29. #include <linux/ioctl.h>
  30. #include <linux/cdev.h>
  31. #include <linux/sched.h>
  32. #include <linux/uuid.h>
  33. #include <linux/compat.h>
  34. #include <linux/jiffies.h>
  35. #include <linux/interrupt.h>
  36. #include <linux/miscdevice.h>
  37. #include "mei_dev.h"
  38. #include <linux/mei.h>
  39. #include "interface.h"
  40. /* AMT device is a singleton on the platform */
  41. static struct pci_dev *mei_pdev;
  42. /* mei_pci_tbl - PCI Device ID Table */
  43. static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = {
  44. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
  45. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)},
  46. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)},
  47. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)},
  48. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)},
  49. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)},
  50. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)},
  51. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)},
  52. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)},
  53. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)},
  54. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)},
  55. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)},
  56. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)},
  57. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)},
  58. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)},
  59. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)},
  60. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)},
  61. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)},
  62. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)},
  63. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)},
  64. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)},
  65. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)},
  66. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)},
  67. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)},
  68. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)},
  69. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)},
  70. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)},
  71. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)},
  72. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
  73. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
  74. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
  75. /* required last entry */
  76. {0, }
  77. };
  78. MODULE_DEVICE_TABLE(pci, mei_pci_tbl);
  79. static DEFINE_MUTEX(mei_mutex);
  80. /**
  81. * mei_clear_list - removes all callbacks associated with file
  82. * from mei_cb_list
  83. *
  84. * @dev: device structure.
  85. * @file: file structure
  86. * @mei_cb_list: callbacks list
  87. *
  88. * mei_clear_list is called to clear resources associated with file
  89. * when application calls close function or Ctrl-C was pressed
  90. *
  91. * returns true if callback removed from the list, false otherwise
  92. */
  93. static bool mei_clear_list(struct mei_device *dev,
  94. struct file *file, struct list_head *mei_cb_list)
  95. {
  96. struct mei_cl_cb *cb_pos = NULL;
  97. struct mei_cl_cb *cb_next = NULL;
  98. struct file *file_temp;
  99. bool removed = false;
  100. /* list all list member */
  101. list_for_each_entry_safe(cb_pos, cb_next, mei_cb_list, cb_list) {
  102. file_temp = (struct file *)cb_pos->file_object;
  103. /* check if list member associated with a file */
  104. if (file_temp == file) {
  105. /* remove member from the list */
  106. list_del(&cb_pos->cb_list);
  107. /* check if cb equal to current iamthif cb */
  108. if (dev->iamthif_current_cb == cb_pos) {
  109. dev->iamthif_current_cb = NULL;
  110. /* send flow control to iamthif client */
  111. mei_send_flow_control(dev, &dev->iamthif_cl);
  112. }
  113. /* free all allocated buffers */
  114. mei_free_cb_private(cb_pos);
  115. cb_pos = NULL;
  116. removed = true;
  117. }
  118. }
  119. return removed;
  120. }
  121. /**
  122. * mei_clear_lists - removes all callbacks associated with file
  123. *
  124. * @dev: device structure
  125. * @file: file structure
  126. *
  127. * mei_clear_lists is called to clear resources associated with file
  128. * when application calls close function or Ctrl-C was pressed
  129. *
  130. * returns true if callback removed from the list, false otherwise
  131. */
  132. static bool mei_clear_lists(struct mei_device *dev, struct file *file)
  133. {
  134. bool removed = false;
  135. /* remove callbacks associated with a file */
  136. mei_clear_list(dev, file, &dev->amthi_cmd_list.mei_cb.cb_list);
  137. if (mei_clear_list(dev, file,
  138. &dev->amthi_read_complete_list.mei_cb.cb_list))
  139. removed = true;
  140. mei_clear_list(dev, file, &dev->ctrl_rd_list.mei_cb.cb_list);
  141. if (mei_clear_list(dev, file, &dev->ctrl_wr_list.mei_cb.cb_list))
  142. removed = true;
  143. if (mei_clear_list(dev, file, &dev->write_waiting_list.mei_cb.cb_list))
  144. removed = true;
  145. if (mei_clear_list(dev, file, &dev->write_list.mei_cb.cb_list))
  146. removed = true;
  147. /* check if iamthif_current_cb not NULL */
  148. if (dev->iamthif_current_cb && !removed) {
  149. /* check file and iamthif current cb association */
  150. if (dev->iamthif_current_cb->file_object == file) {
  151. /* remove cb */
  152. mei_free_cb_private(dev->iamthif_current_cb);
  153. dev->iamthif_current_cb = NULL;
  154. removed = true;
  155. }
  156. }
  157. return removed;
  158. }
  159. /**
  160. * find_read_list_entry - find read list entry
  161. *
  162. * @dev: device structure
  163. * @file: pointer to file structure
  164. *
  165. * returns cb on success, NULL on error
  166. */
  167. static struct mei_cl_cb *find_read_list_entry(
  168. struct mei_device *dev,
  169. struct mei_cl *cl)
  170. {
  171. struct mei_cl_cb *pos = NULL;
  172. struct mei_cl_cb *next = NULL;
  173. dev_dbg(&dev->pdev->dev, "remove read_list CB\n");
  174. list_for_each_entry_safe(pos, next,
  175. &dev->read_list.mei_cb.cb_list, cb_list) {
  176. struct mei_cl *cl_temp;
  177. cl_temp = (struct mei_cl *)pos->file_private;
  178. if (mei_cl_cmp_id(cl, cl_temp))
  179. return pos;
  180. }
  181. return NULL;
  182. }
  183. /**
  184. * mei_open - the open function
  185. *
  186. * @inode: pointer to inode structure
  187. * @file: pointer to file structure
  188. *
  189. * returns 0 on success, <0 on error
  190. */
  191. static int mei_open(struct inode *inode, struct file *file)
  192. {
  193. struct mei_cl *cl;
  194. struct mei_device *dev;
  195. unsigned long cl_id;
  196. int err;
  197. err = -ENODEV;
  198. if (!mei_pdev)
  199. goto out;
  200. dev = pci_get_drvdata(mei_pdev);
  201. if (!dev)
  202. goto out;
  203. mutex_lock(&dev->device_lock);
  204. err = -ENOMEM;
  205. cl = mei_cl_allocate(dev);
  206. if (!cl)
  207. goto out_unlock;
  208. err = -ENODEV;
  209. if (dev->dev_state != MEI_DEV_ENABLED) {
  210. dev_dbg(&dev->pdev->dev, "dev_state != MEI_ENABLED dev_state = %s\n",
  211. mei_dev_state_str(dev->dev_state));
  212. goto out_unlock;
  213. }
  214. err = -EMFILE;
  215. if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT)
  216. goto out_unlock;
  217. cl_id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
  218. if (cl_id >= MEI_CLIENTS_MAX)
  219. goto out_unlock;
  220. cl->host_client_id = cl_id;
  221. dev_dbg(&dev->pdev->dev, "client_id = %d\n", cl->host_client_id);
  222. dev->open_handle_count++;
  223. list_add_tail(&cl->link, &dev->file_list);
  224. set_bit(cl->host_client_id, dev->host_clients_map);
  225. cl->state = MEI_FILE_INITIALIZING;
  226. cl->sm_state = 0;
  227. file->private_data = cl;
  228. mutex_unlock(&dev->device_lock);
  229. return nonseekable_open(inode, file);
  230. out_unlock:
  231. mutex_unlock(&dev->device_lock);
  232. kfree(cl);
  233. out:
  234. return err;
  235. }
  236. /**
  237. * mei_release - the release function
  238. *
  239. * @inode: pointer to inode structure
  240. * @file: pointer to file structure
  241. *
  242. * returns 0 on success, <0 on error
  243. */
  244. static int mei_release(struct inode *inode, struct file *file)
  245. {
  246. struct mei_cl *cl = file->private_data;
  247. struct mei_cl_cb *cb;
  248. struct mei_device *dev;
  249. int rets = 0;
  250. if (WARN_ON(!cl || !cl->dev))
  251. return -ENODEV;
  252. dev = cl->dev;
  253. mutex_lock(&dev->device_lock);
  254. if (cl != &dev->iamthif_cl) {
  255. if (cl->state == MEI_FILE_CONNECTED) {
  256. cl->state = MEI_FILE_DISCONNECTING;
  257. dev_dbg(&dev->pdev->dev,
  258. "disconnecting client host client = %d, "
  259. "ME client = %d\n",
  260. cl->host_client_id,
  261. cl->me_client_id);
  262. rets = mei_disconnect_host_client(dev, cl);
  263. }
  264. mei_cl_flush_queues(cl);
  265. dev_dbg(&dev->pdev->dev, "remove client host client = %d, ME client = %d\n",
  266. cl->host_client_id,
  267. cl->me_client_id);
  268. if (dev->open_handle_count > 0) {
  269. clear_bit(cl->host_client_id, dev->host_clients_map);
  270. dev->open_handle_count--;
  271. }
  272. mei_remove_client_from_file_list(dev, cl->host_client_id);
  273. /* free read cb */
  274. cb = NULL;
  275. if (cl->read_cb) {
  276. cb = find_read_list_entry(dev, cl);
  277. /* Remove entry from read list */
  278. if (cb)
  279. list_del(&cb->cb_list);
  280. cb = cl->read_cb;
  281. cl->read_cb = NULL;
  282. }
  283. file->private_data = NULL;
  284. if (cb) {
  285. mei_free_cb_private(cb);
  286. cb = NULL;
  287. }
  288. kfree(cl);
  289. } else {
  290. if (dev->open_handle_count > 0)
  291. dev->open_handle_count--;
  292. if (dev->iamthif_file_object == file &&
  293. dev->iamthif_state != MEI_IAMTHIF_IDLE) {
  294. dev_dbg(&dev->pdev->dev, "amthi canceled iamthif state %d\n",
  295. dev->iamthif_state);
  296. dev->iamthif_canceled = true;
  297. if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE) {
  298. dev_dbg(&dev->pdev->dev, "run next amthi iamthif cb\n");
  299. mei_run_next_iamthif_cmd(dev);
  300. }
  301. }
  302. if (mei_clear_lists(dev, file))
  303. dev->iamthif_state = MEI_IAMTHIF_IDLE;
  304. }
  305. mutex_unlock(&dev->device_lock);
  306. return rets;
  307. }
  308. /**
  309. * mei_read - the read function.
  310. *
  311. * @file: pointer to file structure
  312. * @ubuf: pointer to user buffer
  313. * @length: buffer length
  314. * @offset: data offset in buffer
  315. *
  316. * returns >=0 data length on success , <0 on error
  317. */
  318. static ssize_t mei_read(struct file *file, char __user *ubuf,
  319. size_t length, loff_t *offset)
  320. {
  321. struct mei_cl *cl = file->private_data;
  322. struct mei_cl_cb *cb_pos = NULL;
  323. struct mei_cl_cb *cb = NULL;
  324. struct mei_device *dev;
  325. int i;
  326. int rets;
  327. int err;
  328. if (WARN_ON(!cl || !cl->dev))
  329. return -ENODEV;
  330. dev = cl->dev;
  331. mutex_lock(&dev->device_lock);
  332. if (dev->dev_state != MEI_DEV_ENABLED) {
  333. rets = -ENODEV;
  334. goto out;
  335. }
  336. if ((cl->sm_state & MEI_WD_STATE_INDEPENDENCE_MSG_SENT) == 0) {
  337. /* Do not allow to read watchdog client */
  338. i = mei_me_cl_by_uuid(dev, &mei_wd_guid);
  339. if (i >= 0) {
  340. struct mei_me_client *me_client = &dev->me_clients[i];
  341. if (cl->me_client_id == me_client->client_id) {
  342. rets = -EBADF;
  343. goto out;
  344. }
  345. }
  346. } else {
  347. cl->sm_state &= ~MEI_WD_STATE_INDEPENDENCE_MSG_SENT;
  348. }
  349. if (cl == &dev->iamthif_cl) {
  350. rets = amthi_read(dev, file, ubuf, length, offset);
  351. goto out;
  352. }
  353. if (cl->read_cb && cl->read_cb->information > *offset) {
  354. cb = cl->read_cb;
  355. goto copy_buffer;
  356. } else if (cl->read_cb && cl->read_cb->information > 0 &&
  357. cl->read_cb->information <= *offset) {
  358. cb = cl->read_cb;
  359. rets = 0;
  360. goto free;
  361. } else if ((!cl->read_cb || !cl->read_cb->information) &&
  362. *offset > 0) {
  363. /*Offset needs to be cleaned for contiguous reads*/
  364. *offset = 0;
  365. rets = 0;
  366. goto out;
  367. }
  368. err = mei_start_read(dev, cl);
  369. if (err && err != -EBUSY) {
  370. dev_dbg(&dev->pdev->dev,
  371. "mei start read failure with status = %d\n", err);
  372. rets = err;
  373. goto out;
  374. }
  375. if (MEI_READ_COMPLETE != cl->reading_state &&
  376. !waitqueue_active(&cl->rx_wait)) {
  377. if (file->f_flags & O_NONBLOCK) {
  378. rets = -EAGAIN;
  379. goto out;
  380. }
  381. mutex_unlock(&dev->device_lock);
  382. if (wait_event_interruptible(cl->rx_wait,
  383. (MEI_READ_COMPLETE == cl->reading_state ||
  384. MEI_FILE_INITIALIZING == cl->state ||
  385. MEI_FILE_DISCONNECTED == cl->state ||
  386. MEI_FILE_DISCONNECTING == cl->state))) {
  387. if (signal_pending(current))
  388. return -EINTR;
  389. return -ERESTARTSYS;
  390. }
  391. mutex_lock(&dev->device_lock);
  392. if (MEI_FILE_INITIALIZING == cl->state ||
  393. MEI_FILE_DISCONNECTED == cl->state ||
  394. MEI_FILE_DISCONNECTING == cl->state) {
  395. rets = -EBUSY;
  396. goto out;
  397. }
  398. }
  399. cb = cl->read_cb;
  400. if (!cb) {
  401. rets = -ENODEV;
  402. goto out;
  403. }
  404. if (cl->reading_state != MEI_READ_COMPLETE) {
  405. rets = 0;
  406. goto out;
  407. }
  408. /* now copy the data to user space */
  409. copy_buffer:
  410. dev_dbg(&dev->pdev->dev, "cb->response_buffer size - %d\n",
  411. cb->response_buffer.size);
  412. dev_dbg(&dev->pdev->dev, "cb->information - %lu\n",
  413. cb->information);
  414. if (length == 0 || ubuf == NULL || *offset > cb->information) {
  415. rets = -EMSGSIZE;
  416. goto free;
  417. }
  418. /* length is being truncated to PAGE_SIZE, however, */
  419. /* information size may be longer */
  420. length = min_t(size_t, length, (cb->information - *offset));
  421. if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) {
  422. rets = -EFAULT;
  423. goto free;
  424. }
  425. rets = length;
  426. *offset += length;
  427. if ((unsigned long)*offset < cb->information)
  428. goto out;
  429. free:
  430. cb_pos = find_read_list_entry(dev, cl);
  431. /* Remove entry from read list */
  432. if (cb_pos)
  433. list_del(&cb_pos->cb_list);
  434. mei_free_cb_private(cb);
  435. cl->reading_state = MEI_IDLE;
  436. cl->read_cb = NULL;
  437. cl->read_pending = 0;
  438. out:
  439. dev_dbg(&dev->pdev->dev, "end mei read rets= %d\n", rets);
  440. mutex_unlock(&dev->device_lock);
  441. return rets;
  442. }
  443. /**
  444. * mei_write - the write function.
  445. *
  446. * @file: pointer to file structure
  447. * @ubuf: pointer to user buffer
  448. * @length: buffer length
  449. * @offset: data offset in buffer
  450. *
  451. * returns >=0 data length on success , <0 on error
  452. */
  453. static ssize_t mei_write(struct file *file, const char __user *ubuf,
  454. size_t length, loff_t *offset)
  455. {
  456. struct mei_cl *cl = file->private_data;
  457. struct mei_cl_cb *write_cb = NULL;
  458. struct mei_msg_hdr mei_hdr;
  459. struct mei_device *dev;
  460. unsigned long timeout = 0;
  461. int rets;
  462. int i;
  463. if (WARN_ON(!cl || !cl->dev))
  464. return -ENODEV;
  465. dev = cl->dev;
  466. mutex_lock(&dev->device_lock);
  467. if (dev->dev_state != MEI_DEV_ENABLED) {
  468. mutex_unlock(&dev->device_lock);
  469. return -ENODEV;
  470. }
  471. if (cl == &dev->iamthif_cl) {
  472. write_cb = find_amthi_read_list_entry(dev, file);
  473. if (write_cb) {
  474. timeout = write_cb->read_time +
  475. msecs_to_jiffies(IAMTHIF_READ_TIMER);
  476. if (time_after(jiffies, timeout) ||
  477. cl->reading_state == MEI_READ_COMPLETE) {
  478. *offset = 0;
  479. list_del(&write_cb->cb_list);
  480. mei_free_cb_private(write_cb);
  481. write_cb = NULL;
  482. }
  483. }
  484. }
  485. /* free entry used in read */
  486. if (cl->reading_state == MEI_READ_COMPLETE) {
  487. *offset = 0;
  488. write_cb = find_read_list_entry(dev, cl);
  489. if (write_cb) {
  490. list_del(&write_cb->cb_list);
  491. mei_free_cb_private(write_cb);
  492. write_cb = NULL;
  493. cl->reading_state = MEI_IDLE;
  494. cl->read_cb = NULL;
  495. cl->read_pending = 0;
  496. }
  497. } else if (cl->reading_state == MEI_IDLE && !cl->read_pending)
  498. *offset = 0;
  499. write_cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
  500. if (!write_cb) {
  501. mutex_unlock(&dev->device_lock);
  502. return -ENOMEM;
  503. }
  504. write_cb->file_object = file;
  505. write_cb->file_private = cl;
  506. write_cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
  507. rets = -ENOMEM;
  508. if (!write_cb->request_buffer.data)
  509. goto unlock_dev;
  510. dev_dbg(&dev->pdev->dev, "length =%d\n", (int) length);
  511. rets = -EFAULT;
  512. if (copy_from_user(write_cb->request_buffer.data, ubuf, length))
  513. goto unlock_dev;
  514. cl->sm_state = 0;
  515. if (length == 4 &&
  516. ((memcmp(mei_wd_state_independence_msg[0],
  517. write_cb->request_buffer.data, 4) == 0) ||
  518. (memcmp(mei_wd_state_independence_msg[1],
  519. write_cb->request_buffer.data, 4) == 0) ||
  520. (memcmp(mei_wd_state_independence_msg[2],
  521. write_cb->request_buffer.data, 4) == 0)))
  522. cl->sm_state |= MEI_WD_STATE_INDEPENDENCE_MSG_SENT;
  523. INIT_LIST_HEAD(&write_cb->cb_list);
  524. if (cl == &dev->iamthif_cl) {
  525. write_cb->response_buffer.data =
  526. kmalloc(dev->iamthif_mtu, GFP_KERNEL);
  527. if (!write_cb->response_buffer.data) {
  528. rets = -ENOMEM;
  529. goto unlock_dev;
  530. }
  531. if (dev->dev_state != MEI_DEV_ENABLED) {
  532. rets = -ENODEV;
  533. goto unlock_dev;
  534. }
  535. i = mei_me_cl_by_id(dev, dev->iamthif_cl.me_client_id);
  536. if (i < 0) {
  537. rets = -ENODEV;
  538. goto unlock_dev;
  539. }
  540. if (length > dev->me_clients[i].props.max_msg_length ||
  541. length <= 0) {
  542. rets = -EMSGSIZE;
  543. goto unlock_dev;
  544. }
  545. write_cb->response_buffer.size = dev->iamthif_mtu;
  546. write_cb->major_file_operations = MEI_IOCTL;
  547. write_cb->information = 0;
  548. write_cb->request_buffer.size = length;
  549. if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) {
  550. rets = -ENODEV;
  551. goto unlock_dev;
  552. }
  553. if (!list_empty(&dev->amthi_cmd_list.mei_cb.cb_list) ||
  554. dev->iamthif_state != MEI_IAMTHIF_IDLE) {
  555. dev_dbg(&dev->pdev->dev, "amthi_state = %d\n",
  556. (int) dev->iamthif_state);
  557. dev_dbg(&dev->pdev->dev, "add amthi cb to amthi cmd waiting list\n");
  558. list_add_tail(&write_cb->cb_list,
  559. &dev->amthi_cmd_list.mei_cb.cb_list);
  560. rets = length;
  561. } else {
  562. dev_dbg(&dev->pdev->dev, "call amthi write\n");
  563. rets = amthi_write(dev, write_cb);
  564. if (rets) {
  565. dev_dbg(&dev->pdev->dev, "amthi write failed with status = %d\n",
  566. rets);
  567. goto unlock_dev;
  568. }
  569. rets = length;
  570. }
  571. mutex_unlock(&dev->device_lock);
  572. return rets;
  573. }
  574. write_cb->major_file_operations = MEI_WRITE;
  575. /* make sure information is zero before we start */
  576. write_cb->information = 0;
  577. write_cb->request_buffer.size = length;
  578. dev_dbg(&dev->pdev->dev, "host client = %d, ME client = %d\n",
  579. cl->host_client_id, cl->me_client_id);
  580. if (cl->state != MEI_FILE_CONNECTED) {
  581. rets = -ENODEV;
  582. dev_dbg(&dev->pdev->dev, "host client = %d, is not connected to ME client = %d",
  583. cl->host_client_id,
  584. cl->me_client_id);
  585. goto unlock_dev;
  586. }
  587. i = mei_me_cl_by_id(dev, cl->me_client_id);
  588. if (i < 0) {
  589. rets = -ENODEV;
  590. goto unlock_dev;
  591. }
  592. if (length > dev->me_clients[i].props.max_msg_length || length <= 0) {
  593. rets = -EINVAL;
  594. goto unlock_dev;
  595. }
  596. write_cb->file_private = cl;
  597. rets = mei_flow_ctrl_creds(dev, cl);
  598. if (rets < 0)
  599. goto unlock_dev;
  600. if (rets && dev->mei_host_buffer_is_empty) {
  601. rets = 0;
  602. dev->mei_host_buffer_is_empty = false;
  603. if (length > mei_hbuf_max_data(dev)) {
  604. mei_hdr.length = mei_hbuf_max_data(dev);
  605. mei_hdr.msg_complete = 0;
  606. } else {
  607. mei_hdr.length = length;
  608. mei_hdr.msg_complete = 1;
  609. }
  610. mei_hdr.host_addr = cl->host_client_id;
  611. mei_hdr.me_addr = cl->me_client_id;
  612. mei_hdr.reserved = 0;
  613. dev_dbg(&dev->pdev->dev, "call mei_write_message header=%08x.\n",
  614. *((u32 *) &mei_hdr));
  615. if (mei_write_message(dev, &mei_hdr,
  616. (unsigned char *) (write_cb->request_buffer.data),
  617. mei_hdr.length)) {
  618. rets = -ENODEV;
  619. goto unlock_dev;
  620. }
  621. cl->writing_state = MEI_WRITING;
  622. write_cb->information = mei_hdr.length;
  623. if (mei_hdr.msg_complete) {
  624. if (mei_flow_ctrl_reduce(dev, cl)) {
  625. rets = -ENODEV;
  626. goto unlock_dev;
  627. }
  628. list_add_tail(&write_cb->cb_list,
  629. &dev->write_waiting_list.mei_cb.cb_list);
  630. } else {
  631. list_add_tail(&write_cb->cb_list,
  632. &dev->write_list.mei_cb.cb_list);
  633. }
  634. } else {
  635. write_cb->information = 0;
  636. cl->writing_state = MEI_WRITING;
  637. list_add_tail(&write_cb->cb_list,
  638. &dev->write_list.mei_cb.cb_list);
  639. }
  640. mutex_unlock(&dev->device_lock);
  641. return length;
  642. unlock_dev:
  643. mutex_unlock(&dev->device_lock);
  644. mei_free_cb_private(write_cb);
  645. return rets;
  646. }
  647. /**
  648. * mei_ioctl - the IOCTL function
  649. *
  650. * @file: pointer to file structure
  651. * @cmd: ioctl command
  652. * @data: pointer to mei message structure
  653. *
  654. * returns 0 on success , <0 on error
  655. */
  656. static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
  657. {
  658. struct mei_device *dev;
  659. struct mei_cl *cl = file->private_data;
  660. struct mei_connect_client_data *connect_data = NULL;
  661. int rets;
  662. if (cmd != IOCTL_MEI_CONNECT_CLIENT)
  663. return -EINVAL;
  664. if (WARN_ON(!cl || !cl->dev))
  665. return -ENODEV;
  666. dev = cl->dev;
  667. dev_dbg(&dev->pdev->dev, "IOCTL cmd = 0x%x", cmd);
  668. mutex_lock(&dev->device_lock);
  669. if (dev->dev_state != MEI_DEV_ENABLED) {
  670. rets = -ENODEV;
  671. goto out;
  672. }
  673. dev_dbg(&dev->pdev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n");
  674. connect_data = kzalloc(sizeof(struct mei_connect_client_data),
  675. GFP_KERNEL);
  676. if (!connect_data) {
  677. rets = -ENOMEM;
  678. goto out;
  679. }
  680. dev_dbg(&dev->pdev->dev, "copy connect data from user\n");
  681. if (copy_from_user(connect_data, (char __user *)data,
  682. sizeof(struct mei_connect_client_data))) {
  683. dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n");
  684. rets = -EFAULT;
  685. goto out;
  686. }
  687. rets = mei_ioctl_connect_client(file, connect_data);
  688. /* if all is ok, copying the data back to user. */
  689. if (rets)
  690. goto out;
  691. dev_dbg(&dev->pdev->dev, "copy connect data to user\n");
  692. if (copy_to_user((char __user *)data, connect_data,
  693. sizeof(struct mei_connect_client_data))) {
  694. dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n");
  695. rets = -EFAULT;
  696. goto out;
  697. }
  698. out:
  699. kfree(connect_data);
  700. mutex_unlock(&dev->device_lock);
  701. return rets;
  702. }
  703. /**
  704. * mei_compat_ioctl - the compat IOCTL function
  705. *
  706. * @file: pointer to file structure
  707. * @cmd: ioctl command
  708. * @data: pointer to mei message structure
  709. *
  710. * returns 0 on success , <0 on error
  711. */
  712. #ifdef CONFIG_COMPAT
  713. static long mei_compat_ioctl(struct file *file,
  714. unsigned int cmd, unsigned long data)
  715. {
  716. return mei_ioctl(file, cmd, (unsigned long)compat_ptr(data));
  717. }
  718. #endif
  719. /**
  720. * mei_poll - the poll function
  721. *
  722. * @file: pointer to file structure
  723. * @wait: pointer to poll_table structure
  724. *
  725. * returns poll mask
  726. */
  727. static unsigned int mei_poll(struct file *file, poll_table *wait)
  728. {
  729. struct mei_cl *cl = file->private_data;
  730. struct mei_device *dev;
  731. unsigned int mask = 0;
  732. if (WARN_ON(!cl || !cl->dev))
  733. return mask;
  734. dev = cl->dev;
  735. mutex_lock(&dev->device_lock);
  736. if (dev->dev_state != MEI_DEV_ENABLED)
  737. goto out;
  738. if (cl == &dev->iamthif_cl) {
  739. mutex_unlock(&dev->device_lock);
  740. poll_wait(file, &dev->iamthif_cl.wait, wait);
  741. mutex_lock(&dev->device_lock);
  742. if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
  743. dev->iamthif_file_object == file) {
  744. mask |= (POLLIN | POLLRDNORM);
  745. dev_dbg(&dev->pdev->dev, "run next amthi cb\n");
  746. mei_run_next_iamthif_cmd(dev);
  747. }
  748. goto out;
  749. }
  750. mutex_unlock(&dev->device_lock);
  751. poll_wait(file, &cl->tx_wait, wait);
  752. mutex_lock(&dev->device_lock);
  753. if (MEI_WRITE_COMPLETE == cl->writing_state)
  754. mask |= (POLLIN | POLLRDNORM);
  755. out:
  756. mutex_unlock(&dev->device_lock);
  757. return mask;
  758. }
  759. /*
  760. * file operations structure will be used for mei char device.
  761. */
  762. static const struct file_operations mei_fops = {
  763. .owner = THIS_MODULE,
  764. .read = mei_read,
  765. .unlocked_ioctl = mei_ioctl,
  766. #ifdef CONFIG_COMPAT
  767. .compat_ioctl = mei_compat_ioctl,
  768. #endif
  769. .open = mei_open,
  770. .release = mei_release,
  771. .write = mei_write,
  772. .poll = mei_poll,
  773. .llseek = no_llseek
  774. };
  775. /*
  776. * Misc Device Struct
  777. */
  778. static struct miscdevice mei_misc_device = {
  779. .name = "mei",
  780. .fops = &mei_fops,
  781. .minor = MISC_DYNAMIC_MINOR,
  782. };
  783. /**
  784. * mei_quirk_probe - probe for devices that doesn't valid ME interface
  785. * @pdev: PCI device structure
  786. * @ent: entry into pci_device_table
  787. *
  788. * returns true if ME Interface is valid, false otherwise
  789. */
  790. static bool __devinit mei_quirk_probe(struct pci_dev *pdev,
  791. const struct pci_device_id *ent)
  792. {
  793. u32 reg;
  794. if (ent->device == MEI_DEV_ID_PBG_1) {
  795. pci_read_config_dword(pdev, 0x48, &reg);
  796. /* make sure that bit 9 is up and bit 10 is down */
  797. if ((reg & 0x600) == 0x200) {
  798. dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
  799. return false;
  800. }
  801. }
  802. return true;
  803. }
  804. /**
  805. * mei_probe - Device Initialization Routine
  806. *
  807. * @pdev: PCI device structure
  808. * @ent: entry in kcs_pci_tbl
  809. *
  810. * returns 0 on success, <0 on failure.
  811. */
  812. static int __devinit mei_probe(struct pci_dev *pdev,
  813. const struct pci_device_id *ent)
  814. {
  815. struct mei_device *dev;
  816. int err;
  817. mutex_lock(&mei_mutex);
  818. if (!mei_quirk_probe(pdev, ent)) {
  819. err = -ENODEV;
  820. goto end;
  821. }
  822. if (mei_pdev) {
  823. err = -EEXIST;
  824. goto end;
  825. }
  826. /* enable pci dev */
  827. err = pci_enable_device(pdev);
  828. if (err) {
  829. dev_err(&pdev->dev, "failed to enable pci device.\n");
  830. goto end;
  831. }
  832. /* set PCI host mastering */
  833. pci_set_master(pdev);
  834. /* pci request regions for mei driver */
  835. err = pci_request_regions(pdev, KBUILD_MODNAME);
  836. if (err) {
  837. dev_err(&pdev->dev, "failed to get pci regions.\n");
  838. goto disable_device;
  839. }
  840. /* allocates and initializes the mei dev structure */
  841. dev = mei_device_init(pdev);
  842. if (!dev) {
  843. err = -ENOMEM;
  844. goto release_regions;
  845. }
  846. /* mapping IO device memory */
  847. dev->mem_addr = pci_iomap(pdev, 0, 0);
  848. if (!dev->mem_addr) {
  849. dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
  850. err = -ENOMEM;
  851. goto free_device;
  852. }
  853. pci_enable_msi(pdev);
  854. /* request and enable interrupt */
  855. if (pci_dev_msi_enabled(pdev))
  856. err = request_threaded_irq(pdev->irq,
  857. NULL,
  858. mei_interrupt_thread_handler,
  859. IRQF_ONESHOT, KBUILD_MODNAME, dev);
  860. else
  861. err = request_threaded_irq(pdev->irq,
  862. mei_interrupt_quick_handler,
  863. mei_interrupt_thread_handler,
  864. IRQF_SHARED, KBUILD_MODNAME, dev);
  865. if (err) {
  866. dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
  867. pdev->irq);
  868. goto disable_msi;
  869. }
  870. INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
  871. if (mei_hw_init(dev)) {
  872. dev_err(&pdev->dev, "init hw failure.\n");
  873. err = -ENODEV;
  874. goto release_irq;
  875. }
  876. err = misc_register(&mei_misc_device);
  877. if (err)
  878. goto release_irq;
  879. mei_pdev = pdev;
  880. pci_set_drvdata(pdev, dev);
  881. schedule_delayed_work(&dev->timer_work, HZ);
  882. mutex_unlock(&mei_mutex);
  883. pr_debug("initialization successful.\n");
  884. return 0;
  885. release_irq:
  886. /* disable interrupts */
  887. dev->host_hw_state = mei_hcsr_read(dev);
  888. mei_disable_interrupts(dev);
  889. flush_scheduled_work();
  890. free_irq(pdev->irq, dev);
  891. disable_msi:
  892. pci_disable_msi(pdev);
  893. pci_iounmap(pdev, dev->mem_addr);
  894. free_device:
  895. kfree(dev);
  896. release_regions:
  897. pci_release_regions(pdev);
  898. disable_device:
  899. pci_disable_device(pdev);
  900. end:
  901. mutex_unlock(&mei_mutex);
  902. dev_err(&pdev->dev, "initialization failed.\n");
  903. return err;
  904. }
  905. /**
  906. * mei_remove - Device Removal Routine
  907. *
  908. * @pdev: PCI device structure
  909. *
  910. * mei_remove is called by the PCI subsystem to alert the driver
  911. * that it should release a PCI device.
  912. */
  913. static void __devexit mei_remove(struct pci_dev *pdev)
  914. {
  915. struct mei_device *dev;
  916. if (mei_pdev != pdev)
  917. return;
  918. dev = pci_get_drvdata(pdev);
  919. if (!dev)
  920. return;
  921. mutex_lock(&dev->device_lock);
  922. cancel_delayed_work(&dev->timer_work);
  923. mei_wd_stop(dev);
  924. mei_pdev = NULL;
  925. if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
  926. dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
  927. mei_disconnect_host_client(dev, &dev->iamthif_cl);
  928. }
  929. if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
  930. dev->wd_cl.state = MEI_FILE_DISCONNECTING;
  931. mei_disconnect_host_client(dev, &dev->wd_cl);
  932. }
  933. /* Unregistering watchdog device */
  934. mei_watchdog_unregister(dev);
  935. /* remove entry if already in list */
  936. dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
  937. mei_remove_client_from_file_list(dev, dev->wd_cl.host_client_id);
  938. mei_remove_client_from_file_list(dev, dev->iamthif_cl.host_client_id);
  939. dev->iamthif_current_cb = NULL;
  940. dev->me_clients_num = 0;
  941. mutex_unlock(&dev->device_lock);
  942. flush_scheduled_work();
  943. /* disable interrupts */
  944. mei_disable_interrupts(dev);
  945. free_irq(pdev->irq, dev);
  946. pci_disable_msi(pdev);
  947. pci_set_drvdata(pdev, NULL);
  948. if (dev->mem_addr)
  949. pci_iounmap(pdev, dev->mem_addr);
  950. kfree(dev);
  951. pci_release_regions(pdev);
  952. pci_disable_device(pdev);
  953. misc_deregister(&mei_misc_device);
  954. }
  955. #ifdef CONFIG_PM
  956. static int mei_pci_suspend(struct device *device)
  957. {
  958. struct pci_dev *pdev = to_pci_dev(device);
  959. struct mei_device *dev = pci_get_drvdata(pdev);
  960. int err;
  961. if (!dev)
  962. return -ENODEV;
  963. mutex_lock(&dev->device_lock);
  964. cancel_delayed_work(&dev->timer_work);
  965. /* Stop watchdog if exists */
  966. err = mei_wd_stop(dev);
  967. /* Set new mei state */
  968. if (dev->dev_state == MEI_DEV_ENABLED ||
  969. dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
  970. dev->dev_state = MEI_DEV_POWER_DOWN;
  971. mei_reset(dev, 0);
  972. }
  973. mutex_unlock(&dev->device_lock);
  974. free_irq(pdev->irq, dev);
  975. pci_disable_msi(pdev);
  976. return err;
  977. }
  978. static int mei_pci_resume(struct device *device)
  979. {
  980. struct pci_dev *pdev = to_pci_dev(device);
  981. struct mei_device *dev;
  982. int err;
  983. dev = pci_get_drvdata(pdev);
  984. if (!dev)
  985. return -ENODEV;
  986. pci_enable_msi(pdev);
  987. /* request and enable interrupt */
  988. if (pci_dev_msi_enabled(pdev))
  989. err = request_threaded_irq(pdev->irq,
  990. NULL,
  991. mei_interrupt_thread_handler,
  992. IRQF_ONESHOT, KBUILD_MODNAME, dev);
  993. else
  994. err = request_threaded_irq(pdev->irq,
  995. mei_interrupt_quick_handler,
  996. mei_interrupt_thread_handler,
  997. IRQF_SHARED, KBUILD_MODNAME, dev);
  998. if (err) {
  999. dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
  1000. pdev->irq);
  1001. return err;
  1002. }
  1003. mutex_lock(&dev->device_lock);
  1004. dev->dev_state = MEI_DEV_POWER_UP;
  1005. mei_reset(dev, 1);
  1006. mutex_unlock(&dev->device_lock);
  1007. /* Start timer if stopped in suspend */
  1008. schedule_delayed_work(&dev->timer_work, HZ);
  1009. return err;
  1010. }
  1011. static SIMPLE_DEV_PM_OPS(mei_pm_ops, mei_pci_suspend, mei_pci_resume);
  1012. #define MEI_PM_OPS (&mei_pm_ops)
  1013. #else
  1014. #define MEI_PM_OPS NULL
  1015. #endif /* CONFIG_PM */
  1016. /*
  1017. * PCI driver structure
  1018. */
  1019. static struct pci_driver mei_driver = {
  1020. .name = KBUILD_MODNAME,
  1021. .id_table = mei_pci_tbl,
  1022. .probe = mei_probe,
  1023. .remove = __devexit_p(mei_remove),
  1024. .shutdown = __devexit_p(mei_remove),
  1025. .driver.pm = MEI_PM_OPS,
  1026. };
  1027. module_pci_driver(mei_driver);
  1028. MODULE_AUTHOR("Intel Corporation");
  1029. MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
  1030. MODULE_LICENSE("GPL v2");