hpilo.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769
  1. /*
  2. * Driver for HP iLO/iLO2 management processor.
  3. *
  4. * Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
  5. * David Altobelli <david.altobelli@hp.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/types.h>
  13. #include <linux/module.h>
  14. #include <linux/fs.h>
  15. #include <linux/pci.h>
  16. #include <linux/ioport.h>
  17. #include <linux/device.h>
  18. #include <linux/file.h>
  19. #include <linux/cdev.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/delay.h>
  22. #include <linux/uaccess.h>
  23. #include <linux/io.h>
  24. #include "hpilo.h"
  25. static struct class *ilo_class;
  26. static unsigned int ilo_major;
  27. static char ilo_hwdev[MAX_ILO_DEV];
  28. static inline int get_entry_id(int entry)
  29. {
  30. return (entry & ENTRY_MASK_DESCRIPTOR) >> ENTRY_BITPOS_DESCRIPTOR;
  31. }
  32. static inline int get_entry_len(int entry)
  33. {
  34. return ((entry & ENTRY_MASK_QWORDS) >> ENTRY_BITPOS_QWORDS) << 3;
  35. }
  36. static inline int mk_entry(int id, int len)
  37. {
  38. int qlen = len & 7 ? (len >> 3) + 1 : len >> 3;
  39. return id << ENTRY_BITPOS_DESCRIPTOR | qlen << ENTRY_BITPOS_QWORDS;
  40. }
  41. static inline int desc_mem_sz(int nr_entry)
  42. {
  43. return nr_entry << L2_QENTRY_SZ;
  44. }
  45. /*
  46. * FIFO queues, shared with hardware.
  47. *
  48. * If a queue has empty slots, an entry is added to the queue tail,
  49. * and that entry is marked as occupied.
  50. * Entries can be dequeued from the head of the list, when the device
  51. * has marked the entry as consumed.
  52. *
  53. * Returns true on successful queue/dequeue, false on failure.
  54. */
  55. static int fifo_enqueue(struct ilo_hwinfo *hw, char *fifobar, int entry)
  56. {
  57. struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar);
  58. int ret = 0;
  59. spin_lock(&hw->fifo_lock);
  60. if (!(fifo_q->fifobar[(fifo_q->tail + 1) & fifo_q->imask]
  61. & ENTRY_MASK_O)) {
  62. fifo_q->fifobar[fifo_q->tail & fifo_q->imask] |=
  63. (entry & ENTRY_MASK_NOSTATE) | fifo_q->merge;
  64. fifo_q->tail += 1;
  65. ret = 1;
  66. }
  67. spin_unlock(&hw->fifo_lock);
  68. return ret;
  69. }
  70. static int fifo_dequeue(struct ilo_hwinfo *hw, char *fifobar, int *entry)
  71. {
  72. struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar);
  73. int ret = 0;
  74. u64 c;
  75. spin_lock(&hw->fifo_lock);
  76. c = fifo_q->fifobar[fifo_q->head & fifo_q->imask];
  77. if (c & ENTRY_MASK_C) {
  78. if (entry)
  79. *entry = c & ENTRY_MASK_NOSTATE;
  80. fifo_q->fifobar[fifo_q->head & fifo_q->imask] =
  81. (c | ENTRY_MASK) + 1;
  82. fifo_q->head += 1;
  83. ret = 1;
  84. }
  85. spin_unlock(&hw->fifo_lock);
  86. return ret;
  87. }
  88. static int ilo_pkt_enqueue(struct ilo_hwinfo *hw, struct ccb *ccb,
  89. int dir, int id, int len)
  90. {
  91. char *fifobar;
  92. int entry;
  93. if (dir == SENDQ)
  94. fifobar = ccb->ccb_u1.send_fifobar;
  95. else
  96. fifobar = ccb->ccb_u3.recv_fifobar;
  97. entry = mk_entry(id, len);
  98. return fifo_enqueue(hw, fifobar, entry);
  99. }
  100. static int ilo_pkt_dequeue(struct ilo_hwinfo *hw, struct ccb *ccb,
  101. int dir, int *id, int *len, void **pkt)
  102. {
  103. char *fifobar, *desc;
  104. int entry = 0, pkt_id = 0;
  105. int ret;
  106. if (dir == SENDQ) {
  107. fifobar = ccb->ccb_u1.send_fifobar;
  108. desc = ccb->ccb_u2.send_desc;
  109. } else {
  110. fifobar = ccb->ccb_u3.recv_fifobar;
  111. desc = ccb->ccb_u4.recv_desc;
  112. }
  113. ret = fifo_dequeue(hw, fifobar, &entry);
  114. if (ret) {
  115. pkt_id = get_entry_id(entry);
  116. if (id)
  117. *id = pkt_id;
  118. if (len)
  119. *len = get_entry_len(entry);
  120. if (pkt)
  121. *pkt = (void *)(desc + desc_mem_sz(pkt_id));
  122. }
  123. return ret;
  124. }
  125. static inline void doorbell_set(struct ccb *ccb)
  126. {
  127. iowrite8(1, ccb->ccb_u5.db_base);
  128. }
  129. static inline void doorbell_clr(struct ccb *ccb)
  130. {
  131. iowrite8(2, ccb->ccb_u5.db_base);
  132. }
  133. static inline int ctrl_set(int l2sz, int idxmask, int desclim)
  134. {
  135. int active = 0, go = 1;
  136. return l2sz << CTRL_BITPOS_L2SZ |
  137. idxmask << CTRL_BITPOS_FIFOINDEXMASK |
  138. desclim << CTRL_BITPOS_DESCLIMIT |
  139. active << CTRL_BITPOS_A |
  140. go << CTRL_BITPOS_G;
  141. }
  142. static void ctrl_setup(struct ccb *ccb, int nr_desc, int l2desc_sz)
  143. {
  144. /* for simplicity, use the same parameters for send and recv ctrls */
  145. ccb->send_ctrl = ctrl_set(l2desc_sz, nr_desc-1, nr_desc-1);
  146. ccb->recv_ctrl = ctrl_set(l2desc_sz, nr_desc-1, nr_desc-1);
  147. }
  148. static inline int fifo_sz(int nr_entry)
  149. {
  150. /* size of a fifo is determined by the number of entries it contains */
  151. return (nr_entry * sizeof(u64)) + FIFOHANDLESIZE;
  152. }
  153. static void fifo_setup(void *base_addr, int nr_entry)
  154. {
  155. struct fifo *fifo_q = base_addr;
  156. int i;
  157. /* set up an empty fifo */
  158. fifo_q->head = 0;
  159. fifo_q->tail = 0;
  160. fifo_q->reset = 0;
  161. fifo_q->nrents = nr_entry;
  162. fifo_q->imask = nr_entry - 1;
  163. fifo_q->merge = ENTRY_MASK_O;
  164. for (i = 0; i < nr_entry; i++)
  165. fifo_q->fifobar[i] = 0;
  166. }
  167. static void ilo_ccb_close(struct pci_dev *pdev, struct ccb_data *data)
  168. {
  169. struct ccb *driver_ccb;
  170. struct ccb __iomem *device_ccb;
  171. int retries;
  172. driver_ccb = &data->driver_ccb;
  173. device_ccb = data->mapped_ccb;
  174. /* complicated dance to tell the hw we are stopping */
  175. doorbell_clr(driver_ccb);
  176. iowrite32(ioread32(&device_ccb->send_ctrl) & ~(1 << CTRL_BITPOS_G),
  177. &device_ccb->send_ctrl);
  178. iowrite32(ioread32(&device_ccb->recv_ctrl) & ~(1 << CTRL_BITPOS_G),
  179. &device_ccb->recv_ctrl);
  180. /* give iLO some time to process stop request */
  181. for (retries = MAX_WAIT; retries > 0; retries--) {
  182. doorbell_set(driver_ccb);
  183. udelay(WAIT_TIME);
  184. if (!(ioread32(&device_ccb->send_ctrl) & (1 << CTRL_BITPOS_A))
  185. &&
  186. !(ioread32(&device_ccb->recv_ctrl) & (1 << CTRL_BITPOS_A)))
  187. break;
  188. }
  189. if (retries == 0)
  190. dev_err(&pdev->dev, "Closing, but controller still active\n");
  191. /* clear the hw ccb */
  192. memset_io(device_ccb, 0, sizeof(struct ccb));
  193. /* free resources used to back send/recv queues */
  194. pci_free_consistent(pdev, data->dma_size, data->dma_va, data->dma_pa);
  195. }
  196. static int ilo_ccb_open(struct ilo_hwinfo *hw, struct ccb_data *data, int slot)
  197. {
  198. char *dma_va, *dma_pa;
  199. int pkt_id, pkt_sz, i, error;
  200. struct ccb *driver_ccb, *ilo_ccb;
  201. struct pci_dev *pdev;
  202. driver_ccb = &data->driver_ccb;
  203. ilo_ccb = &data->ilo_ccb;
  204. pdev = hw->ilo_dev;
  205. data->dma_size = 2 * fifo_sz(NR_QENTRY) +
  206. 2 * desc_mem_sz(NR_QENTRY) +
  207. ILO_START_ALIGN + ILO_CACHE_SZ;
  208. error = -ENOMEM;
  209. data->dma_va = pci_alloc_consistent(pdev, data->dma_size,
  210. &data->dma_pa);
  211. if (!data->dma_va)
  212. goto out;
  213. dma_va = (char *)data->dma_va;
  214. dma_pa = (char *)data->dma_pa;
  215. memset(dma_va, 0, data->dma_size);
  216. dma_va = (char *)roundup((unsigned long)dma_va, ILO_START_ALIGN);
  217. dma_pa = (char *)roundup((unsigned long)dma_pa, ILO_START_ALIGN);
  218. /*
  219. * Create two ccb's, one with virt addrs, one with phys addrs.
  220. * Copy the phys addr ccb to device shared mem.
  221. */
  222. ctrl_setup(driver_ccb, NR_QENTRY, L2_QENTRY_SZ);
  223. ctrl_setup(ilo_ccb, NR_QENTRY, L2_QENTRY_SZ);
  224. fifo_setup(dma_va, NR_QENTRY);
  225. driver_ccb->ccb_u1.send_fifobar = dma_va + FIFOHANDLESIZE;
  226. ilo_ccb->ccb_u1.send_fifobar = dma_pa + FIFOHANDLESIZE;
  227. dma_va += fifo_sz(NR_QENTRY);
  228. dma_pa += fifo_sz(NR_QENTRY);
  229. dma_va = (char *)roundup((unsigned long)dma_va, ILO_CACHE_SZ);
  230. dma_pa = (char *)roundup((unsigned long)dma_pa, ILO_CACHE_SZ);
  231. fifo_setup(dma_va, NR_QENTRY);
  232. driver_ccb->ccb_u3.recv_fifobar = dma_va + FIFOHANDLESIZE;
  233. ilo_ccb->ccb_u3.recv_fifobar = dma_pa + FIFOHANDLESIZE;
  234. dma_va += fifo_sz(NR_QENTRY);
  235. dma_pa += fifo_sz(NR_QENTRY);
  236. driver_ccb->ccb_u2.send_desc = dma_va;
  237. ilo_ccb->ccb_u2.send_desc = dma_pa;
  238. dma_pa += desc_mem_sz(NR_QENTRY);
  239. dma_va += desc_mem_sz(NR_QENTRY);
  240. driver_ccb->ccb_u4.recv_desc = dma_va;
  241. ilo_ccb->ccb_u4.recv_desc = dma_pa;
  242. driver_ccb->channel = slot;
  243. ilo_ccb->channel = slot;
  244. driver_ccb->ccb_u5.db_base = hw->db_vaddr + (slot << L2_DB_SIZE);
  245. ilo_ccb->ccb_u5.db_base = NULL; /* hw ccb's doorbell is not used */
  246. /* copy the ccb with physical addrs to device memory */
  247. data->mapped_ccb = (struct ccb __iomem *)
  248. (hw->ram_vaddr + (slot * ILOHW_CCB_SZ));
  249. memcpy_toio(data->mapped_ccb, ilo_ccb, sizeof(struct ccb));
  250. /* put packets on the send and receive queues */
  251. pkt_sz = 0;
  252. for (pkt_id = 0; pkt_id < NR_QENTRY; pkt_id++) {
  253. ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, pkt_sz);
  254. doorbell_set(driver_ccb);
  255. }
  256. pkt_sz = desc_mem_sz(1);
  257. for (pkt_id = 0; pkt_id < NR_QENTRY; pkt_id++)
  258. ilo_pkt_enqueue(hw, driver_ccb, RECVQ, pkt_id, pkt_sz);
  259. doorbell_clr(driver_ccb);
  260. /* make sure iLO is really handling requests */
  261. for (i = MAX_WAIT; i > 0; i--) {
  262. if (ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, NULL, NULL))
  263. break;
  264. udelay(WAIT_TIME);
  265. }
  266. if (i) {
  267. ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, 0);
  268. doorbell_set(driver_ccb);
  269. } else {
  270. dev_err(&pdev->dev, "Open could not dequeue a packet\n");
  271. error = -EBUSY;
  272. goto free;
  273. }
  274. return 0;
  275. free:
  276. ilo_ccb_close(pdev, data);
  277. out:
  278. return error;
  279. }
  280. static inline int is_channel_reset(struct ccb *ccb)
  281. {
  282. /* check for this particular channel needing a reset */
  283. return FIFOBARTOHANDLE(ccb->ccb_u1.send_fifobar)->reset;
  284. }
  285. static inline void set_channel_reset(struct ccb *ccb)
  286. {
  287. /* set a flag indicating this channel needs a reset */
  288. FIFOBARTOHANDLE(ccb->ccb_u1.send_fifobar)->reset = 1;
  289. }
  290. static inline int is_device_reset(struct ilo_hwinfo *hw)
  291. {
  292. /* check for global reset condition */
  293. return ioread32(&hw->mmio_vaddr[DB_OUT]) & (1 << DB_RESET);
  294. }
  295. static inline void clear_device(struct ilo_hwinfo *hw)
  296. {
  297. /* clear the device (reset bits, pending channel entries) */
  298. iowrite32(-1, &hw->mmio_vaddr[DB_OUT]);
  299. }
  300. static void ilo_locked_reset(struct ilo_hwinfo *hw)
  301. {
  302. int slot;
  303. /*
  304. * Mapped memory is zeroed on ilo reset, so set a per ccb flag
  305. * to indicate that this ccb needs to be closed and reopened.
  306. */
  307. for (slot = 0; slot < MAX_CCB; slot++) {
  308. if (!hw->ccb_alloc[slot])
  309. continue;
  310. set_channel_reset(&hw->ccb_alloc[slot]->driver_ccb);
  311. }
  312. clear_device(hw);
  313. }
  314. static void ilo_reset(struct ilo_hwinfo *hw)
  315. {
  316. spin_lock(&hw->alloc_lock);
  317. /* reset might have been handled after lock was taken */
  318. if (is_device_reset(hw))
  319. ilo_locked_reset(hw);
  320. spin_unlock(&hw->alloc_lock);
  321. }
  322. static ssize_t ilo_read(struct file *fp, char __user *buf,
  323. size_t len, loff_t *off)
  324. {
  325. int err, found, cnt, pkt_id, pkt_len;
  326. struct ccb_data *data;
  327. struct ccb *driver_ccb;
  328. struct ilo_hwinfo *hw;
  329. void *pkt;
  330. data = fp->private_data;
  331. driver_ccb = &data->driver_ccb;
  332. hw = data->ilo_hw;
  333. if (is_device_reset(hw) || is_channel_reset(driver_ccb)) {
  334. /*
  335. * If the device has been reset, applications
  336. * need to close and reopen all ccbs.
  337. */
  338. ilo_reset(hw);
  339. return -ENODEV;
  340. }
  341. /*
  342. * This function is to be called when data is expected
  343. * in the channel, and will return an error if no packet is found
  344. * during the loop below. The sleep/retry logic is to allow
  345. * applications to call read() immediately post write(),
  346. * and give iLO some time to process the sent packet.
  347. */
  348. cnt = 20;
  349. do {
  350. /* look for a received packet */
  351. found = ilo_pkt_dequeue(hw, driver_ccb, RECVQ, &pkt_id,
  352. &pkt_len, &pkt);
  353. if (found)
  354. break;
  355. cnt--;
  356. msleep(100);
  357. } while (!found && cnt);
  358. if (!found)
  359. return -EAGAIN;
  360. /* only copy the length of the received packet */
  361. if (pkt_len < len)
  362. len = pkt_len;
  363. err = copy_to_user(buf, pkt, len);
  364. /* return the received packet to the queue */
  365. ilo_pkt_enqueue(hw, driver_ccb, RECVQ, pkt_id, desc_mem_sz(1));
  366. return err ? -EFAULT : len;
  367. }
  368. static ssize_t ilo_write(struct file *fp, const char __user *buf,
  369. size_t len, loff_t *off)
  370. {
  371. int err, pkt_id, pkt_len;
  372. struct ccb_data *data;
  373. struct ccb *driver_ccb;
  374. struct ilo_hwinfo *hw;
  375. void *pkt;
  376. data = fp->private_data;
  377. driver_ccb = &data->driver_ccb;
  378. hw = data->ilo_hw;
  379. if (is_device_reset(hw) || is_channel_reset(driver_ccb)) {
  380. /*
  381. * If the device has been reset, applications
  382. * need to close and reopen all ccbs.
  383. */
  384. ilo_reset(hw);
  385. return -ENODEV;
  386. }
  387. /* get a packet to send the user command */
  388. if (!ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, &pkt_len, &pkt))
  389. return -EBUSY;
  390. /* limit the length to the length of the packet */
  391. if (pkt_len < len)
  392. len = pkt_len;
  393. /* on failure, set the len to 0 to return empty packet to the device */
  394. err = copy_from_user(pkt, buf, len);
  395. if (err)
  396. len = 0;
  397. /* send the packet */
  398. ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, len);
  399. doorbell_set(driver_ccb);
  400. return err ? -EFAULT : len;
  401. }
  402. static int ilo_close(struct inode *ip, struct file *fp)
  403. {
  404. int slot;
  405. struct ccb_data *data;
  406. struct ilo_hwinfo *hw;
  407. slot = iminor(ip) % MAX_CCB;
  408. hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev);
  409. spin_lock(&hw->alloc_lock);
  410. if (is_device_reset(hw))
  411. ilo_locked_reset(hw);
  412. if (hw->ccb_alloc[slot]->ccb_cnt == 1) {
  413. data = fp->private_data;
  414. ilo_ccb_close(hw->ilo_dev, data);
  415. kfree(data);
  416. hw->ccb_alloc[slot] = NULL;
  417. } else
  418. hw->ccb_alloc[slot]->ccb_cnt--;
  419. spin_unlock(&hw->alloc_lock);
  420. return 0;
  421. }
  422. static int ilo_open(struct inode *ip, struct file *fp)
  423. {
  424. int slot, error;
  425. struct ccb_data *data;
  426. struct ilo_hwinfo *hw;
  427. slot = iminor(ip) % MAX_CCB;
  428. hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev);
  429. /* new ccb allocation */
  430. data = kzalloc(sizeof(*data), GFP_KERNEL);
  431. if (!data)
  432. return -ENOMEM;
  433. spin_lock(&hw->alloc_lock);
  434. if (is_device_reset(hw))
  435. ilo_locked_reset(hw);
  436. /* each fd private_data holds sw/hw view of ccb */
  437. if (hw->ccb_alloc[slot] == NULL) {
  438. /* create a channel control block for this minor */
  439. error = ilo_ccb_open(hw, data, slot);
  440. if (!error) {
  441. hw->ccb_alloc[slot] = data;
  442. hw->ccb_alloc[slot]->ccb_cnt = 1;
  443. hw->ccb_alloc[slot]->ccb_excl = fp->f_flags & O_EXCL;
  444. hw->ccb_alloc[slot]->ilo_hw = hw;
  445. } else
  446. kfree(data);
  447. } else {
  448. kfree(data);
  449. if (fp->f_flags & O_EXCL || hw->ccb_alloc[slot]->ccb_excl) {
  450. /*
  451. * The channel exists, and either this open
  452. * or a previous open of this channel wants
  453. * exclusive access.
  454. */
  455. error = -EBUSY;
  456. } else {
  457. hw->ccb_alloc[slot]->ccb_cnt++;
  458. error = 0;
  459. }
  460. }
  461. spin_unlock(&hw->alloc_lock);
  462. if (!error)
  463. fp->private_data = hw->ccb_alloc[slot];
  464. return error;
  465. }
  466. static const struct file_operations ilo_fops = {
  467. .owner = THIS_MODULE,
  468. .read = ilo_read,
  469. .write = ilo_write,
  470. .open = ilo_open,
  471. .release = ilo_close,
  472. };
  473. static void ilo_unmap_device(struct pci_dev *pdev, struct ilo_hwinfo *hw)
  474. {
  475. pci_iounmap(pdev, hw->db_vaddr);
  476. pci_iounmap(pdev, hw->ram_vaddr);
  477. pci_iounmap(pdev, hw->mmio_vaddr);
  478. }
  479. static int __devinit ilo_map_device(struct pci_dev *pdev, struct ilo_hwinfo *hw)
  480. {
  481. int error = -ENOMEM;
  482. /* map the memory mapped i/o registers */
  483. hw->mmio_vaddr = pci_iomap(pdev, 1, 0);
  484. if (hw->mmio_vaddr == NULL) {
  485. dev_err(&pdev->dev, "Error mapping mmio\n");
  486. goto out;
  487. }
  488. /* map the adapter shared memory region */
  489. hw->ram_vaddr = pci_iomap(pdev, 2, MAX_CCB * ILOHW_CCB_SZ);
  490. if (hw->ram_vaddr == NULL) {
  491. dev_err(&pdev->dev, "Error mapping shared mem\n");
  492. goto mmio_free;
  493. }
  494. /* map the doorbell aperture */
  495. hw->db_vaddr = pci_iomap(pdev, 3, MAX_CCB * ONE_DB_SIZE);
  496. if (hw->db_vaddr == NULL) {
  497. dev_err(&pdev->dev, "Error mapping doorbell\n");
  498. goto ram_free;
  499. }
  500. return 0;
  501. ram_free:
  502. pci_iounmap(pdev, hw->ram_vaddr);
  503. mmio_free:
  504. pci_iounmap(pdev, hw->mmio_vaddr);
  505. out:
  506. return error;
  507. }
  508. static void ilo_remove(struct pci_dev *pdev)
  509. {
  510. int i, minor;
  511. struct ilo_hwinfo *ilo_hw = pci_get_drvdata(pdev);
  512. clear_device(ilo_hw);
  513. minor = MINOR(ilo_hw->cdev.dev);
  514. for (i = minor; i < minor + MAX_CCB; i++)
  515. device_destroy(ilo_class, MKDEV(ilo_major, i));
  516. cdev_del(&ilo_hw->cdev);
  517. ilo_unmap_device(pdev, ilo_hw);
  518. pci_release_regions(pdev);
  519. pci_disable_device(pdev);
  520. kfree(ilo_hw);
  521. ilo_hwdev[(minor / MAX_CCB)] = 0;
  522. }
  523. static int __devinit ilo_probe(struct pci_dev *pdev,
  524. const struct pci_device_id *ent)
  525. {
  526. int devnum, minor, start, error;
  527. struct ilo_hwinfo *ilo_hw;
  528. /* find a free range for device files */
  529. for (devnum = 0; devnum < MAX_ILO_DEV; devnum++) {
  530. if (ilo_hwdev[devnum] == 0) {
  531. ilo_hwdev[devnum] = 1;
  532. break;
  533. }
  534. }
  535. if (devnum == MAX_ILO_DEV) {
  536. dev_err(&pdev->dev, "Error finding free device\n");
  537. return -ENODEV;
  538. }
  539. /* track global allocations for this device */
  540. error = -ENOMEM;
  541. ilo_hw = kzalloc(sizeof(*ilo_hw), GFP_KERNEL);
  542. if (!ilo_hw)
  543. goto out;
  544. ilo_hw->ilo_dev = pdev;
  545. spin_lock_init(&ilo_hw->alloc_lock);
  546. spin_lock_init(&ilo_hw->fifo_lock);
  547. error = pci_enable_device(pdev);
  548. if (error)
  549. goto free;
  550. pci_set_master(pdev);
  551. error = pci_request_regions(pdev, ILO_NAME);
  552. if (error)
  553. goto disable;
  554. error = ilo_map_device(pdev, ilo_hw);
  555. if (error)
  556. goto free_regions;
  557. pci_set_drvdata(pdev, ilo_hw);
  558. clear_device(ilo_hw);
  559. cdev_init(&ilo_hw->cdev, &ilo_fops);
  560. ilo_hw->cdev.owner = THIS_MODULE;
  561. start = devnum * MAX_CCB;
  562. error = cdev_add(&ilo_hw->cdev, MKDEV(ilo_major, start), MAX_CCB);
  563. if (error) {
  564. dev_err(&pdev->dev, "Could not add cdev\n");
  565. goto unmap;
  566. }
  567. for (minor = 0 ; minor < MAX_CCB; minor++) {
  568. struct device *dev;
  569. dev = device_create(ilo_class, &pdev->dev,
  570. MKDEV(ilo_major, minor), NULL,
  571. "hpilo!d%dccb%d", devnum, minor);
  572. if (IS_ERR(dev))
  573. dev_err(&pdev->dev, "Could not create files\n");
  574. }
  575. return 0;
  576. unmap:
  577. ilo_unmap_device(pdev, ilo_hw);
  578. free_regions:
  579. pci_release_regions(pdev);
  580. disable:
  581. pci_disable_device(pdev);
  582. free:
  583. kfree(ilo_hw);
  584. out:
  585. ilo_hwdev[devnum] = 0;
  586. return error;
  587. }
  588. static struct pci_device_id ilo_devices[] = {
  589. { PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xB204) },
  590. { PCI_DEVICE(PCI_VENDOR_ID_HP, 0x3307) },
  591. { }
  592. };
  593. MODULE_DEVICE_TABLE(pci, ilo_devices);
  594. static struct pci_driver ilo_driver = {
  595. .name = ILO_NAME,
  596. .id_table = ilo_devices,
  597. .probe = ilo_probe,
  598. .remove = __devexit_p(ilo_remove),
  599. };
  600. static int __init ilo_init(void)
  601. {
  602. int error;
  603. dev_t dev;
  604. ilo_class = class_create(THIS_MODULE, "iLO");
  605. if (IS_ERR(ilo_class)) {
  606. error = PTR_ERR(ilo_class);
  607. goto out;
  608. }
  609. error = alloc_chrdev_region(&dev, 0, MAX_OPEN, ILO_NAME);
  610. if (error)
  611. goto class_destroy;
  612. ilo_major = MAJOR(dev);
  613. error = pci_register_driver(&ilo_driver);
  614. if (error)
  615. goto chr_remove;
  616. return 0;
  617. chr_remove:
  618. unregister_chrdev_region(dev, MAX_OPEN);
  619. class_destroy:
  620. class_destroy(ilo_class);
  621. out:
  622. return error;
  623. }
  624. static void __exit ilo_exit(void)
  625. {
  626. pci_unregister_driver(&ilo_driver);
  627. unregister_chrdev_region(MKDEV(ilo_major, 0), MAX_OPEN);
  628. class_destroy(ilo_class);
  629. }
  630. MODULE_VERSION("1.1");
  631. MODULE_ALIAS(ILO_NAME);
  632. MODULE_DESCRIPTION(ILO_NAME);
  633. MODULE_AUTHOR("David Altobelli <david.altobelli@hp.com>");
  634. MODULE_LICENSE("GPL v2");
  635. module_init(ilo_init);
  636. module_exit(ilo_exit);