c67x00-sched.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170
  1. /*
  2. * c67x00-sched.c: Cypress C67X00 USB Host Controller Driver - TD scheduling
  3. *
  4. * Copyright (C) 2006-2008 Barco N.V.
  5. * Derived from the Cypress cy7c67200/300 ezusb linux driver and
  6. * based on multiple host controller drivers inside the linux kernel.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
  21. * MA 02110-1301 USA.
  22. */
  23. #include <linux/kthread.h>
  24. #include "c67x00.h"
  25. #include "c67x00-hcd.h"
  26. /*
  27. * These are the stages for a control urb, they are kept
  28. * in both urb->interval and td->privdata.
  29. */
  30. #define SETUP_STAGE 0
  31. #define DATA_STAGE 1
  32. #define STATUS_STAGE 2
  33. /* -------------------------------------------------------------------------- */
  34. /**
  35. * struct c67x00_ep_data: Host endpoint data structure
  36. */
  37. struct c67x00_ep_data {
  38. struct list_head queue;
  39. struct list_head node;
  40. struct usb_host_endpoint *hep;
  41. struct usb_device *dev;
  42. u16 next_frame; /* For int/isoc transactions */
  43. };
  44. /**
  45. * struct c67x00_td
  46. *
  47. * Hardware parts are little endiannes, SW in CPU endianess.
  48. */
  49. struct c67x00_td {
  50. /* HW specific part */
  51. __le16 ly_base_addr; /* Bytes 0-1 */
  52. __le16 port_length; /* Bytes 2-3 */
  53. u8 pid_ep; /* Byte 4 */
  54. u8 dev_addr; /* Byte 5 */
  55. u8 ctrl_reg; /* Byte 6 */
  56. u8 status; /* Byte 7 */
  57. u8 retry_cnt; /* Byte 8 */
  58. #define TT_OFFSET 2
  59. #define TT_CONTROL 0
  60. #define TT_ISOCHRONOUS 1
  61. #define TT_BULK 2
  62. #define TT_INTERRUPT 3
  63. u8 residue; /* Byte 9 */
  64. __le16 next_td_addr; /* Bytes 10-11 */
  65. /* SW part */
  66. struct list_head td_list;
  67. u16 td_addr;
  68. void *data;
  69. struct urb *urb;
  70. unsigned long privdata;
  71. /* These are needed for handling the toggle bits:
  72. * an urb can be dequeued while a td is in progress
  73. * after checking the td, the toggle bit might need to
  74. * be fixed */
  75. struct c67x00_ep_data *ep_data;
  76. unsigned int pipe;
  77. };
  78. struct c67x00_urb_priv {
  79. struct list_head hep_node;
  80. struct urb *urb;
  81. int port;
  82. int cnt; /* packet number for isoc */
  83. int status;
  84. struct c67x00_ep_data *ep_data;
  85. };
  86. #define td_udev(td) ((td)->ep_data->dev)
  87. #define CY_TD_SIZE 12
  88. #define TD_PIDEP_OFFSET 0x04
  89. #define TD_PIDEPMASK_PID 0xF0
  90. #define TD_PIDEPMASK_EP 0x0F
  91. #define TD_PORTLENMASK_DL 0x02FF
  92. #define TD_PORTLENMASK_PN 0xC000
  93. #define TD_STATUS_OFFSET 0x07
  94. #define TD_STATUSMASK_ACK 0x01
  95. #define TD_STATUSMASK_ERR 0x02
  96. #define TD_STATUSMASK_TMOUT 0x04
  97. #define TD_STATUSMASK_SEQ 0x08
  98. #define TD_STATUSMASK_SETUP 0x10
  99. #define TD_STATUSMASK_OVF 0x20
  100. #define TD_STATUSMASK_NAK 0x40
  101. #define TD_STATUSMASK_STALL 0x80
  102. #define TD_ERROR_MASK (TD_STATUSMASK_ERR | TD_STATUSMASK_TMOUT | \
  103. TD_STATUSMASK_STALL)
  104. #define TD_RETRYCNT_OFFSET 0x08
  105. #define TD_RETRYCNTMASK_ACT_FLG 0x10
  106. #define TD_RETRYCNTMASK_TX_TYPE 0x0C
  107. #define TD_RETRYCNTMASK_RTY_CNT 0x03
  108. #define TD_RESIDUE_OVERFLOW 0x80
  109. #define TD_PID_IN 0x90
  110. /* Residue: signed 8bits, neg -> OVERFLOW, pos -> UNDERFLOW */
  111. #define td_residue(td) ((__s8)(td->residue))
  112. #define td_ly_base_addr(td) (__le16_to_cpu((td)->ly_base_addr))
  113. #define td_port_length(td) (__le16_to_cpu((td)->port_length))
  114. #define td_next_td_addr(td) (__le16_to_cpu((td)->next_td_addr))
  115. #define td_active(td) ((td)->retry_cnt & TD_RETRYCNTMASK_ACT_FLG)
  116. #define td_length(td) (td_port_length(td) & TD_PORTLENMASK_DL)
  117. #define td_sequence_ok(td) (!td->status || \
  118. (!(td->status & TD_STATUSMASK_SEQ) == \
  119. !(td->ctrl_reg & SEQ_SEL)))
  120. #define td_acked(td) (!td->status || \
  121. (td->status & TD_STATUSMASK_ACK))
  122. #define td_actual_bytes(td) (td_length(td) - td_residue(td))
  123. /* -------------------------------------------------------------------------- */
  124. #ifdef DEBUG
  125. /**
  126. * dbg_td - Dump the contents of the TD
  127. */
  128. static void dbg_td(struct c67x00_hcd *c67x00, struct c67x00_td *td, char *msg)
  129. {
  130. struct device *dev = c67x00_hcd_dev(c67x00);
  131. dev_dbg(dev, "### %s at 0x%04x\n", msg, td->td_addr);
  132. dev_dbg(dev, "urb: 0x%p\n", td->urb);
  133. dev_dbg(dev, "endpoint: %4d\n", usb_pipeendpoint(td->pipe));
  134. dev_dbg(dev, "pipeout: %4d\n", usb_pipeout(td->pipe));
  135. dev_dbg(dev, "ly_base_addr: 0x%04x\n", td_ly_base_addr(td));
  136. dev_dbg(dev, "port_length: 0x%04x\n", td_port_length(td));
  137. dev_dbg(dev, "pid_ep: 0x%02x\n", td->pid_ep);
  138. dev_dbg(dev, "dev_addr: 0x%02x\n", td->dev_addr);
  139. dev_dbg(dev, "ctrl_reg: 0x%02x\n", td->ctrl_reg);
  140. dev_dbg(dev, "status: 0x%02x\n", td->status);
  141. dev_dbg(dev, "retry_cnt: 0x%02x\n", td->retry_cnt);
  142. dev_dbg(dev, "residue: 0x%02x\n", td->residue);
  143. dev_dbg(dev, "next_td_addr: 0x%04x\n", td_next_td_addr(td));
  144. dev_dbg(dev, "data:");
  145. print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 1,
  146. td->data, td_length(td), 1);
  147. }
  148. #else /* DEBUG */
  149. static inline void
  150. dbg_td(struct c67x00_hcd *c67x00, struct c67x00_td *td, char *msg) { }
  151. #endif /* DEBUG */
  152. /* -------------------------------------------------------------------------- */
  153. /* Helper functions */
  154. static inline u16 c67x00_get_current_frame_number(struct c67x00_hcd *c67x00)
  155. {
  156. return c67x00_ll_husb_get_frame(c67x00->sie) & HOST_FRAME_MASK;
  157. }
  158. /**
  159. * frame_add
  160. * Software wraparound for framenumbers.
  161. */
  162. static inline u16 frame_add(u16 a, u16 b)
  163. {
  164. return (a + b) & HOST_FRAME_MASK;
  165. }
  166. /**
  167. * frame_after - is frame a after frame b
  168. */
  169. static inline int frame_after(u16 a, u16 b)
  170. {
  171. return ((HOST_FRAME_MASK + a - b) & HOST_FRAME_MASK) <
  172. (HOST_FRAME_MASK / 2);
  173. }
  174. /**
  175. * frame_after_eq - is frame a after or equal to frame b
  176. */
  177. static inline int frame_after_eq(u16 a, u16 b)
  178. {
  179. return ((HOST_FRAME_MASK + 1 + a - b) & HOST_FRAME_MASK) <
  180. (HOST_FRAME_MASK / 2);
  181. }
  182. /* -------------------------------------------------------------------------- */
  183. /**
  184. * c67x00_release_urb - remove link from all tds to this urb
  185. * Disconnects the urb from it's tds, so that it can be given back.
  186. * pre: urb->hcpriv != NULL
  187. */
  188. static void c67x00_release_urb(struct c67x00_hcd *c67x00, struct urb *urb)
  189. {
  190. struct c67x00_td *td;
  191. struct c67x00_urb_priv *urbp;
  192. BUG_ON(!urb);
  193. c67x00->urb_count--;
  194. if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
  195. c67x00->urb_iso_count--;
  196. if (c67x00->urb_iso_count == 0)
  197. c67x00->max_frame_bw = MAX_FRAME_BW_STD;
  198. }
  199. /* TODO this might be not so efficient when we've got many urbs!
  200. * Alternatives:
  201. * * only clear when needed
  202. * * keep a list of tds with each urbp
  203. */
  204. list_for_each_entry(td, &c67x00->td_list, td_list)
  205. if (urb == td->urb)
  206. td->urb = NULL;
  207. urbp = urb->hcpriv;
  208. urb->hcpriv = NULL;
  209. list_del(&urbp->hep_node);
  210. kfree(urbp);
  211. }
  212. /* -------------------------------------------------------------------------- */
  213. static struct c67x00_ep_data *
  214. c67x00_ep_data_alloc(struct c67x00_hcd *c67x00, struct urb *urb)
  215. {
  216. struct usb_host_endpoint *hep = urb->ep;
  217. struct c67x00_ep_data *ep_data;
  218. int type;
  219. c67x00->current_frame = c67x00_get_current_frame_number(c67x00);
  220. /* Check if endpoint already has a c67x00_ep_data struct allocated */
  221. if (hep->hcpriv) {
  222. ep_data = hep->hcpriv;
  223. if (frame_after(c67x00->current_frame, ep_data->next_frame))
  224. ep_data->next_frame =
  225. frame_add(c67x00->current_frame, 1);
  226. return hep->hcpriv;
  227. }
  228. /* Allocate and initialize a new c67x00 endpoint data structure */
  229. ep_data = kzalloc(sizeof(*ep_data), GFP_ATOMIC);
  230. if (!ep_data)
  231. return NULL;
  232. INIT_LIST_HEAD(&ep_data->queue);
  233. INIT_LIST_HEAD(&ep_data->node);
  234. ep_data->hep = hep;
  235. /* hold a reference to udev as long as this endpoint lives,
  236. * this is needed to possibly fix the data toggle */
  237. ep_data->dev = usb_get_dev(urb->dev);
  238. hep->hcpriv = ep_data;
  239. /* For ISOC and INT endpoints, start ASAP: */
  240. ep_data->next_frame = frame_add(c67x00->current_frame, 1);
  241. /* Add the endpoint data to one of the pipe lists; must be added
  242. in order of endpoint address */
  243. type = usb_pipetype(urb->pipe);
  244. if (list_empty(&ep_data->node)) {
  245. list_add(&ep_data->node, &c67x00->list[type]);
  246. } else {
  247. struct c67x00_ep_data *prev;
  248. list_for_each_entry(prev, &c67x00->list[type], node) {
  249. if (prev->hep->desc.bEndpointAddress >
  250. hep->desc.bEndpointAddress) {
  251. list_add(&ep_data->node, prev->node.prev);
  252. break;
  253. }
  254. }
  255. }
  256. return ep_data;
  257. }
  258. static int c67x00_ep_data_free(struct usb_host_endpoint *hep)
  259. {
  260. struct c67x00_ep_data *ep_data = hep->hcpriv;
  261. if (!ep_data)
  262. return 0;
  263. if (!list_empty(&ep_data->queue))
  264. return -EBUSY;
  265. usb_put_dev(ep_data->dev);
  266. list_del(&ep_data->queue);
  267. list_del(&ep_data->node);
  268. kfree(ep_data);
  269. hep->hcpriv = NULL;
  270. return 0;
  271. }
  272. void c67x00_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
  273. {
  274. struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
  275. unsigned long flags;
  276. if (!list_empty(&ep->urb_list))
  277. dev_warn(c67x00_hcd_dev(c67x00), "error: urb list not empty\n");
  278. spin_lock_irqsave(&c67x00->lock, flags);
  279. /* loop waiting for all transfers in the endpoint queue to complete */
  280. while (c67x00_ep_data_free(ep)) {
  281. /* Drop the lock so we can sleep waiting for the hardware */
  282. spin_unlock_irqrestore(&c67x00->lock, flags);
  283. /* it could happen that we reinitialize this completion, while
  284. * somebody was waiting for that completion. The timeout and
  285. * while loop handle such cases, but this might be improved */
  286. INIT_COMPLETION(c67x00->endpoint_disable);
  287. c67x00_sched_kick(c67x00);
  288. wait_for_completion_timeout(&c67x00->endpoint_disable, 1 * HZ);
  289. spin_lock_irqsave(&c67x00->lock, flags);
  290. }
  291. spin_unlock_irqrestore(&c67x00->lock, flags);
  292. }
  293. /* -------------------------------------------------------------------------- */
  294. static inline int get_root_port(struct usb_device *dev)
  295. {
  296. while (dev->parent->parent)
  297. dev = dev->parent;
  298. return dev->portnum;
  299. }
  300. int c67x00_urb_enqueue(struct usb_hcd *hcd,
  301. struct urb *urb, gfp_t mem_flags)
  302. {
  303. int ret;
  304. unsigned long flags;
  305. struct c67x00_urb_priv *urbp;
  306. struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
  307. int port = get_root_port(urb->dev)-1;
  308. spin_lock_irqsave(&c67x00->lock, flags);
  309. /* Make sure host controller is running */
  310. if (!HC_IS_RUNNING(hcd->state)) {
  311. ret = -ENODEV;
  312. goto err_not_linked;
  313. }
  314. ret = usb_hcd_link_urb_to_ep(hcd, urb);
  315. if (ret)
  316. goto err_not_linked;
  317. /* Allocate and initialize urb private data */
  318. urbp = kzalloc(sizeof(*urbp), mem_flags);
  319. if (!urbp) {
  320. ret = -ENOMEM;
  321. goto err_urbp;
  322. }
  323. INIT_LIST_HEAD(&urbp->hep_node);
  324. urbp->urb = urb;
  325. urbp->port = port;
  326. urbp->ep_data = c67x00_ep_data_alloc(c67x00, urb);
  327. if (!urbp->ep_data) {
  328. ret = -ENOMEM;
  329. goto err_epdata;
  330. }
  331. /* TODO claim bandwidth with usb_claim_bandwidth?
  332. * also release it somewhere! */
  333. urb->hcpriv = urbp;
  334. urb->actual_length = 0; /* Nothing received/transmitted yet */
  335. switch (usb_pipetype(urb->pipe)) {
  336. case PIPE_CONTROL:
  337. urb->interval = SETUP_STAGE;
  338. break;
  339. case PIPE_INTERRUPT:
  340. break;
  341. case PIPE_BULK:
  342. break;
  343. case PIPE_ISOCHRONOUS:
  344. if (c67x00->urb_iso_count == 0)
  345. c67x00->max_frame_bw = MAX_FRAME_BW_ISO;
  346. c67x00->urb_iso_count++;
  347. /* Assume always URB_ISO_ASAP, FIXME */
  348. if (list_empty(&urbp->ep_data->queue))
  349. urb->start_frame = urbp->ep_data->next_frame;
  350. else {
  351. /* Go right after the last one */
  352. struct urb *last_urb;
  353. last_urb = list_entry(urbp->ep_data->queue.prev,
  354. struct c67x00_urb_priv,
  355. hep_node)->urb;
  356. urb->start_frame =
  357. frame_add(last_urb->start_frame,
  358. last_urb->number_of_packets *
  359. last_urb->interval);
  360. }
  361. urbp->cnt = 0;
  362. break;
  363. }
  364. /* Add the URB to the endpoint queue */
  365. list_add_tail(&urbp->hep_node, &urbp->ep_data->queue);
  366. /* If this is the only URB, kick start the controller */
  367. if (!c67x00->urb_count++)
  368. c67x00_ll_hpi_enable_sofeop(c67x00->sie);
  369. c67x00_sched_kick(c67x00);
  370. spin_unlock_irqrestore(&c67x00->lock, flags);
  371. return 0;
  372. err_epdata:
  373. kfree(urbp);
  374. err_urbp:
  375. usb_hcd_unlink_urb_from_ep(hcd, urb);
  376. err_not_linked:
  377. spin_unlock_irqrestore(&c67x00->lock, flags);
  378. return ret;
  379. }
  380. int c67x00_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
  381. {
  382. struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
  383. unsigned long flags;
  384. int rc;
  385. spin_lock_irqsave(&c67x00->lock, flags);
  386. rc = usb_hcd_check_unlink_urb(hcd, urb, status);
  387. if (rc)
  388. goto done;
  389. c67x00_release_urb(c67x00, urb);
  390. usb_hcd_unlink_urb_from_ep(hcd, urb);
  391. spin_unlock(&c67x00->lock);
  392. usb_hcd_giveback_urb(hcd, urb, status);
  393. spin_lock(&c67x00->lock);
  394. spin_unlock_irqrestore(&c67x00->lock, flags);
  395. return 0;
  396. done:
  397. spin_unlock_irqrestore(&c67x00->lock, flags);
  398. return rc;
  399. }
  400. /* -------------------------------------------------------------------------- */
  401. /*
  402. * pre: c67x00 locked, urb unlocked
  403. */
  404. static void
  405. c67x00_giveback_urb(struct c67x00_hcd *c67x00, struct urb *urb, int status)
  406. {
  407. struct c67x00_urb_priv *urbp;
  408. if (!urb)
  409. return;
  410. urbp = urb->hcpriv;
  411. urbp->status = status;
  412. list_del_init(&urbp->hep_node);
  413. c67x00_release_urb(c67x00, urb);
  414. usb_hcd_unlink_urb_from_ep(c67x00_hcd_to_hcd(c67x00), urb);
  415. spin_unlock(&c67x00->lock);
  416. usb_hcd_giveback_urb(c67x00_hcd_to_hcd(c67x00), urb, urbp->status);
  417. spin_lock(&c67x00->lock);
  418. }
  419. /* -------------------------------------------------------------------------- */
  420. static int c67x00_claim_frame_bw(struct c67x00_hcd *c67x00, struct urb *urb,
  421. int len, int periodic)
  422. {
  423. struct c67x00_urb_priv *urbp = urb->hcpriv;
  424. int bit_time;
  425. /* According to the C67x00 BIOS user manual, page 3-18,19, the
  426. * following calculations provide the full speed bit times for
  427. * a transaction.
  428. *
  429. * FS(in) = 112.5 + 9.36*BC + HOST_DELAY
  430. * FS(in,iso) = 90.5 + 9.36*BC + HOST_DELAY
  431. * FS(out) = 112.5 + 9.36*BC + HOST_DELAY
  432. * FS(out,iso) = 78.4 + 9.36*BC + HOST_DELAY
  433. * LS(in) = 802.4 + 75.78*BC + HOST_DELAY
  434. * LS(out) = 802.6 + 74.67*BC + HOST_DELAY
  435. *
  436. * HOST_DELAY == 106 for the c67200 and c67300.
  437. */
  438. /* make calculations in 1/100 bit times to maintain resolution */
  439. if (urbp->ep_data->dev->speed == USB_SPEED_LOW) {
  440. /* Low speed pipe */
  441. if (usb_pipein(urb->pipe))
  442. bit_time = 80240 + 7578*len;
  443. else
  444. bit_time = 80260 + 7467*len;
  445. } else {
  446. /* FS pipes */
  447. if (usb_pipeisoc(urb->pipe))
  448. bit_time = usb_pipein(urb->pipe) ? 9050 : 7840;
  449. else
  450. bit_time = 11250;
  451. bit_time += 936*len;
  452. }
  453. /* Scale back down to integer bit times. Use a host delay of 106.
  454. * (this is the only place it is used) */
  455. bit_time = ((bit_time+50) / 100) + 106;
  456. if (unlikely(bit_time + c67x00->bandwidth_allocated >=
  457. c67x00->max_frame_bw))
  458. return -EMSGSIZE;
  459. if (unlikely(c67x00->next_td_addr + CY_TD_SIZE >=
  460. c67x00->td_base_addr + SIE_TD_SIZE))
  461. return -EMSGSIZE;
  462. if (unlikely(c67x00->next_buf_addr + len >=
  463. c67x00->buf_base_addr + SIE_TD_BUF_SIZE))
  464. return -EMSGSIZE;
  465. if (periodic) {
  466. if (unlikely(bit_time + c67x00->periodic_bw_allocated >=
  467. MAX_PERIODIC_BW(c67x00->max_frame_bw)))
  468. return -EMSGSIZE;
  469. c67x00->periodic_bw_allocated += bit_time;
  470. }
  471. c67x00->bandwidth_allocated += bit_time;
  472. return 0;
  473. }
  474. /* -------------------------------------------------------------------------- */
  475. /**
  476. * td_addr and buf_addr must be word aligned
  477. */
  478. static int c67x00_create_td(struct c67x00_hcd *c67x00, struct urb *urb,
  479. void *data, int len, int pid, int toggle,
  480. unsigned long privdata)
  481. {
  482. struct c67x00_td *td;
  483. struct c67x00_urb_priv *urbp = urb->hcpriv;
  484. const __u8 active_flag = 1, retry_cnt = 1;
  485. __u8 cmd = 0;
  486. int tt = 0;
  487. if (c67x00_claim_frame_bw(c67x00, urb, len, usb_pipeisoc(urb->pipe)
  488. || usb_pipeint(urb->pipe)))
  489. return -EMSGSIZE; /* Not really an error, but expected */
  490. td = kzalloc(sizeof(*td), GFP_ATOMIC);
  491. if (!td)
  492. return -ENOMEM;
  493. td->pipe = urb->pipe;
  494. td->ep_data = urbp->ep_data;
  495. if ((td_udev(td)->speed == USB_SPEED_LOW) &&
  496. !(c67x00->low_speed_ports & (1 << urbp->port)))
  497. cmd |= PREAMBLE_EN;
  498. switch (usb_pipetype(td->pipe)) {
  499. case PIPE_ISOCHRONOUS:
  500. tt = TT_ISOCHRONOUS;
  501. cmd |= ISO_EN;
  502. break;
  503. case PIPE_CONTROL:
  504. tt = TT_CONTROL;
  505. break;
  506. case PIPE_BULK:
  507. tt = TT_BULK;
  508. break;
  509. case PIPE_INTERRUPT:
  510. tt = TT_INTERRUPT;
  511. break;
  512. }
  513. if (toggle)
  514. cmd |= SEQ_SEL;
  515. cmd |= ARM_EN;
  516. /* SW part */
  517. td->td_addr = c67x00->next_td_addr;
  518. c67x00->next_td_addr = c67x00->next_td_addr + CY_TD_SIZE;
  519. /* HW part */
  520. td->ly_base_addr = __cpu_to_le16(c67x00->next_buf_addr);
  521. td->port_length = __cpu_to_le16((c67x00->sie->sie_num << 15) |
  522. (urbp->port << 14) | (len & 0x3FF));
  523. td->pid_ep = ((pid & 0xF) << TD_PIDEP_OFFSET) |
  524. (usb_pipeendpoint(td->pipe) & 0xF);
  525. td->dev_addr = usb_pipedevice(td->pipe) & 0x7F;
  526. td->ctrl_reg = cmd;
  527. td->status = 0;
  528. td->retry_cnt = (tt << TT_OFFSET) | (active_flag << 4) | retry_cnt;
  529. td->residue = 0;
  530. td->next_td_addr = __cpu_to_le16(c67x00->next_td_addr);
  531. /* SW part */
  532. td->data = data;
  533. td->urb = urb;
  534. td->privdata = privdata;
  535. c67x00->next_buf_addr += (len + 1) & ~0x01; /* properly align */
  536. list_add_tail(&td->td_list, &c67x00->td_list);
  537. return 0;
  538. }
  539. static inline void c67x00_release_td(struct c67x00_td *td)
  540. {
  541. list_del_init(&td->td_list);
  542. kfree(td);
  543. }
  544. /* -------------------------------------------------------------------------- */
  545. static int c67x00_add_data_urb(struct c67x00_hcd *c67x00, struct urb *urb)
  546. {
  547. int remaining;
  548. int toggle;
  549. int pid;
  550. int ret = 0;
  551. int maxps;
  552. int need_empty;
  553. toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
  554. usb_pipeout(urb->pipe));
  555. remaining = urb->transfer_buffer_length - urb->actual_length;
  556. maxps = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
  557. need_empty = (urb->transfer_flags & URB_ZERO_PACKET) &&
  558. usb_pipeout(urb->pipe) && !(remaining % maxps);
  559. while (remaining || need_empty) {
  560. int len;
  561. char *td_buf;
  562. len = (remaining > maxps) ? maxps : remaining;
  563. if (!len)
  564. need_empty = 0;
  565. pid = usb_pipeout(urb->pipe) ? USB_PID_OUT : USB_PID_IN;
  566. td_buf = urb->transfer_buffer + urb->transfer_buffer_length -
  567. remaining;
  568. ret = c67x00_create_td(c67x00, urb, td_buf, len, pid, toggle,
  569. DATA_STAGE);
  570. if (ret)
  571. return ret; /* td wasn't created */
  572. toggle ^= 1;
  573. remaining -= len;
  574. if (usb_pipecontrol(urb->pipe))
  575. break;
  576. }
  577. return 0;
  578. }
  579. /**
  580. * return 0 in case more bandwidth is available, else errorcode
  581. */
  582. static int c67x00_add_ctrl_urb(struct c67x00_hcd *c67x00, struct urb *urb)
  583. {
  584. int ret;
  585. int pid;
  586. switch (urb->interval) {
  587. default:
  588. case SETUP_STAGE:
  589. ret = c67x00_create_td(c67x00, urb, urb->setup_packet,
  590. 8, USB_PID_SETUP, 0, SETUP_STAGE);
  591. if (ret)
  592. return ret;
  593. urb->interval = SETUP_STAGE;
  594. usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
  595. usb_pipeout(urb->pipe), 1);
  596. break;
  597. case DATA_STAGE:
  598. if (urb->transfer_buffer_length) {
  599. ret = c67x00_add_data_urb(c67x00, urb);
  600. if (ret)
  601. return ret;
  602. break;
  603. } /* else fallthrough */
  604. case STATUS_STAGE:
  605. pid = !usb_pipeout(urb->pipe) ? USB_PID_OUT : USB_PID_IN;
  606. ret = c67x00_create_td(c67x00, urb, NULL, 0, pid, 1,
  607. STATUS_STAGE);
  608. if (ret)
  609. return ret;
  610. break;
  611. }
  612. return 0;
  613. }
  614. /*
  615. * return 0 in case more bandwidth is available, else errorcode
  616. */
  617. static int c67x00_add_int_urb(struct c67x00_hcd *c67x00, struct urb *urb)
  618. {
  619. struct c67x00_urb_priv *urbp = urb->hcpriv;
  620. if (frame_after_eq(c67x00->current_frame, urbp->ep_data->next_frame)) {
  621. urbp->ep_data->next_frame =
  622. frame_add(urbp->ep_data->next_frame, urb->interval);
  623. return c67x00_add_data_urb(c67x00, urb);
  624. }
  625. return 0;
  626. }
  627. static int c67x00_add_iso_urb(struct c67x00_hcd *c67x00, struct urb *urb)
  628. {
  629. struct c67x00_urb_priv *urbp = urb->hcpriv;
  630. if (frame_after_eq(c67x00->current_frame, urbp->ep_data->next_frame)) {
  631. char *td_buf;
  632. int len, pid, ret;
  633. BUG_ON(urbp->cnt >= urb->number_of_packets);
  634. td_buf = urb->transfer_buffer +
  635. urb->iso_frame_desc[urbp->cnt].offset;
  636. len = urb->iso_frame_desc[urbp->cnt].length;
  637. pid = usb_pipeout(urb->pipe) ? USB_PID_OUT : USB_PID_IN;
  638. ret = c67x00_create_td(c67x00, urb, td_buf, len, pid, 0,
  639. urbp->cnt);
  640. if (ret) {
  641. printk(KERN_DEBUG "create failed: %d\n", ret);
  642. urb->iso_frame_desc[urbp->cnt].actual_length = 0;
  643. urb->iso_frame_desc[urbp->cnt].status = ret;
  644. if (urbp->cnt + 1 == urb->number_of_packets)
  645. c67x00_giveback_urb(c67x00, urb, 0);
  646. }
  647. urbp->ep_data->next_frame =
  648. frame_add(urbp->ep_data->next_frame, urb->interval);
  649. urbp->cnt++;
  650. }
  651. return 0;
  652. }
  653. /* -------------------------------------------------------------------------- */
  654. static void c67x00_fill_from_list(struct c67x00_hcd *c67x00, int type,
  655. int (*add)(struct c67x00_hcd *, struct urb *))
  656. {
  657. struct c67x00_ep_data *ep_data;
  658. struct urb *urb;
  659. /* traverse every endpoint on the list */
  660. list_for_each_entry(ep_data, &c67x00->list[type], node) {
  661. if (!list_empty(&ep_data->queue)) {
  662. /* and add the first urb */
  663. /* isochronous transfer rely on this */
  664. urb = list_entry(ep_data->queue.next,
  665. struct c67x00_urb_priv,
  666. hep_node)->urb;
  667. add(c67x00, urb);
  668. }
  669. }
  670. }
  671. static void c67x00_fill_frame(struct c67x00_hcd *c67x00)
  672. {
  673. struct c67x00_td *td, *ttd;
  674. /* Check if we can proceed */
  675. if (!list_empty(&c67x00->td_list)) {
  676. dev_warn(c67x00_hcd_dev(c67x00),
  677. "TD list not empty! This should not happen!\n");
  678. list_for_each_entry_safe(td, ttd, &c67x00->td_list, td_list) {
  679. dbg_td(c67x00, td, "Unprocessed td");
  680. c67x00_release_td(td);
  681. }
  682. }
  683. /* Reinitialize variables */
  684. c67x00->bandwidth_allocated = 0;
  685. c67x00->periodic_bw_allocated = 0;
  686. c67x00->next_td_addr = c67x00->td_base_addr;
  687. c67x00->next_buf_addr = c67x00->buf_base_addr;
  688. /* Fill the list */
  689. c67x00_fill_from_list(c67x00, PIPE_ISOCHRONOUS, c67x00_add_iso_urb);
  690. c67x00_fill_from_list(c67x00, PIPE_INTERRUPT, c67x00_add_int_urb);
  691. c67x00_fill_from_list(c67x00, PIPE_CONTROL, c67x00_add_ctrl_urb);
  692. c67x00_fill_from_list(c67x00, PIPE_BULK, c67x00_add_data_urb);
  693. }
  694. /* -------------------------------------------------------------------------- */
  695. /**
  696. * Get TD from C67X00
  697. */
  698. static inline void
  699. c67x00_parse_td(struct c67x00_hcd *c67x00, struct c67x00_td *td)
  700. {
  701. c67x00_ll_read_mem_le16(c67x00->sie->dev,
  702. td->td_addr, td, CY_TD_SIZE);
  703. if (usb_pipein(td->pipe) && td_actual_bytes(td))
  704. c67x00_ll_read_mem_le16(c67x00->sie->dev, td_ly_base_addr(td),
  705. td->data, td_actual_bytes(td));
  706. }
  707. static int c67x00_td_to_error(struct c67x00_hcd *c67x00, struct c67x00_td *td)
  708. {
  709. if (td->status & TD_STATUSMASK_ERR) {
  710. dbg_td(c67x00, td, "ERROR_FLAG");
  711. return -EILSEQ;
  712. }
  713. if (td->status & TD_STATUSMASK_STALL) {
  714. /* dbg_td(c67x00, td, "STALL"); */
  715. return -EPIPE;
  716. }
  717. if (td->status & TD_STATUSMASK_TMOUT) {
  718. dbg_td(c67x00, td, "TIMEOUT");
  719. return -ETIMEDOUT;
  720. }
  721. return 0;
  722. }
  723. static inline int c67x00_end_of_data(struct c67x00_td *td)
  724. {
  725. int maxps, need_empty, remaining;
  726. struct urb *urb = td->urb;
  727. int act_bytes;
  728. act_bytes = td_actual_bytes(td);
  729. if (unlikely(!act_bytes))
  730. return 1; /* This was an empty packet */
  731. maxps = usb_maxpacket(td_udev(td), td->pipe, usb_pipeout(td->pipe));
  732. if (unlikely(act_bytes < maxps))
  733. return 1; /* Smaller then full packet */
  734. remaining = urb->transfer_buffer_length - urb->actual_length;
  735. need_empty = (urb->transfer_flags & URB_ZERO_PACKET) &&
  736. usb_pipeout(urb->pipe) && !(remaining % maxps);
  737. if (unlikely(!remaining && !need_empty))
  738. return 1;
  739. return 0;
  740. }
  741. /* -------------------------------------------------------------------------- */
  742. /* Remove all td's from the list which come
  743. * after last_td and are meant for the same pipe.
  744. * This is used when a short packet has occured */
  745. static inline void c67x00_clear_pipe(struct c67x00_hcd *c67x00,
  746. struct c67x00_td *last_td)
  747. {
  748. struct c67x00_td *td, *tmp;
  749. td = last_td;
  750. tmp = last_td;
  751. while (td->td_list.next != &c67x00->td_list) {
  752. td = list_entry(td->td_list.next, struct c67x00_td, td_list);
  753. if (td->pipe == last_td->pipe) {
  754. c67x00_release_td(td);
  755. td = tmp;
  756. }
  757. tmp = td;
  758. }
  759. }
  760. /* -------------------------------------------------------------------------- */
  761. static void c67x00_handle_successful_td(struct c67x00_hcd *c67x00,
  762. struct c67x00_td *td)
  763. {
  764. struct urb *urb = td->urb;
  765. if (!urb)
  766. return;
  767. urb->actual_length += td_actual_bytes(td);
  768. switch (usb_pipetype(td->pipe)) {
  769. /* isochronous tds are handled separately */
  770. case PIPE_CONTROL:
  771. switch (td->privdata) {
  772. case SETUP_STAGE:
  773. urb->interval =
  774. urb->transfer_buffer_length ?
  775. DATA_STAGE : STATUS_STAGE;
  776. /* Don't count setup_packet with normal data: */
  777. urb->actual_length = 0;
  778. break;
  779. case DATA_STAGE:
  780. if (c67x00_end_of_data(td)) {
  781. urb->interval = STATUS_STAGE;
  782. c67x00_clear_pipe(c67x00, td);
  783. }
  784. break;
  785. case STATUS_STAGE:
  786. urb->interval = 0;
  787. c67x00_giveback_urb(c67x00, urb, 0);
  788. break;
  789. }
  790. break;
  791. case PIPE_INTERRUPT:
  792. case PIPE_BULK:
  793. if (unlikely(c67x00_end_of_data(td))) {
  794. c67x00_clear_pipe(c67x00, td);
  795. c67x00_giveback_urb(c67x00, urb, 0);
  796. }
  797. break;
  798. }
  799. }
  800. static void c67x00_handle_isoc(struct c67x00_hcd *c67x00, struct c67x00_td *td)
  801. {
  802. struct urb *urb = td->urb;
  803. struct c67x00_urb_priv *urbp;
  804. int cnt;
  805. if (!urb)
  806. return;
  807. urbp = urb->hcpriv;
  808. cnt = td->privdata;
  809. if (td->status & TD_ERROR_MASK)
  810. urb->error_count++;
  811. urb->iso_frame_desc[cnt].actual_length = td_actual_bytes(td);
  812. urb->iso_frame_desc[cnt].status = c67x00_td_to_error(c67x00, td);
  813. if (cnt + 1 == urb->number_of_packets) /* Last packet */
  814. c67x00_giveback_urb(c67x00, urb, 0);
  815. }
  816. /* -------------------------------------------------------------------------- */
  817. /**
  818. * c67x00_check_td_list - handle tds which have been processed by the c67x00
  819. * pre: current_td == 0
  820. */
  821. static inline void c67x00_check_td_list(struct c67x00_hcd *c67x00)
  822. {
  823. struct c67x00_td *td, *tmp;
  824. struct urb *urb;
  825. int ack_ok;
  826. int clear_endpoint;
  827. list_for_each_entry_safe(td, tmp, &c67x00->td_list, td_list) {
  828. /* get the TD */
  829. c67x00_parse_td(c67x00, td);
  830. urb = td->urb; /* urb can be NULL! */
  831. ack_ok = 0;
  832. clear_endpoint = 1;
  833. /* Handle isochronous transfers separately */
  834. if (usb_pipeisoc(td->pipe)) {
  835. clear_endpoint = 0;
  836. c67x00_handle_isoc(c67x00, td);
  837. goto cont;
  838. }
  839. /* When an error occurs, all td's for that pipe go into an
  840. * inactive state. This state matches successful transfers so
  841. * we must make sure not to service them. */
  842. if (td->status & TD_ERROR_MASK) {
  843. c67x00_giveback_urb(c67x00, urb,
  844. c67x00_td_to_error(c67x00, td));
  845. goto cont;
  846. }
  847. if ((td->status & TD_STATUSMASK_NAK) || !td_sequence_ok(td) ||
  848. !td_acked(td))
  849. goto cont;
  850. /* Sequence ok and acked, don't need to fix toggle */
  851. ack_ok = 1;
  852. if (unlikely(td->status & TD_STATUSMASK_OVF)) {
  853. if (td_residue(td) & TD_RESIDUE_OVERFLOW) {
  854. /* Overflow */
  855. c67x00_giveback_urb(c67x00, urb, -EOVERFLOW);
  856. goto cont;
  857. }
  858. }
  859. clear_endpoint = 0;
  860. c67x00_handle_successful_td(c67x00, td);
  861. cont:
  862. if (clear_endpoint)
  863. c67x00_clear_pipe(c67x00, td);
  864. if (ack_ok)
  865. usb_settoggle(td_udev(td), usb_pipeendpoint(td->pipe),
  866. usb_pipeout(td->pipe),
  867. !(td->ctrl_reg & SEQ_SEL));
  868. /* next in list could have been removed, due to clear_pipe! */
  869. tmp = list_entry(td->td_list.next, typeof(*td), td_list);
  870. c67x00_release_td(td);
  871. }
  872. }
  873. /* -------------------------------------------------------------------------- */
  874. static inline int c67x00_all_tds_processed(struct c67x00_hcd *c67x00)
  875. {
  876. /* If all tds are processed, we can check the previous frame (if
  877. * there was any) and start our next frame.
  878. */
  879. return !c67x00_ll_husb_get_current_td(c67x00->sie);
  880. }
  881. /**
  882. * Send td to C67X00
  883. */
  884. static void c67x00_send_td(struct c67x00_hcd *c67x00, struct c67x00_td *td)
  885. {
  886. int len = td_length(td);
  887. if (len && ((td->pid_ep & TD_PIDEPMASK_PID) != TD_PID_IN))
  888. c67x00_ll_write_mem_le16(c67x00->sie->dev, td_ly_base_addr(td),
  889. td->data, len);
  890. c67x00_ll_write_mem_le16(c67x00->sie->dev,
  891. td->td_addr, td, CY_TD_SIZE);
  892. }
  893. static void c67x00_send_frame(struct c67x00_hcd *c67x00)
  894. {
  895. struct c67x00_td *td;
  896. if (list_empty(&c67x00->td_list))
  897. dev_warn(c67x00_hcd_dev(c67x00),
  898. "%s: td list should not be empty here!\n",
  899. __func__);
  900. list_for_each_entry(td, &c67x00->td_list, td_list) {
  901. if (td->td_list.next == &c67x00->td_list)
  902. td->next_td_addr = 0; /* Last td in list */
  903. c67x00_send_td(c67x00, td);
  904. }
  905. c67x00_ll_husb_set_current_td(c67x00->sie, c67x00->td_base_addr);
  906. }
  907. /* -------------------------------------------------------------------------- */
  908. /**
  909. * c67x00_do_work - Schedulers state machine
  910. */
  911. static void c67x00_do_work(struct c67x00_hcd *c67x00)
  912. {
  913. spin_lock(&c67x00->lock);
  914. /* Make sure all tds are processed */
  915. if (!c67x00_all_tds_processed(c67x00))
  916. goto out;
  917. c67x00_check_td_list(c67x00);
  918. /* no td's are being processed (current == 0)
  919. * and all have been "checked" */
  920. complete(&c67x00->endpoint_disable);
  921. if (!list_empty(&c67x00->td_list))
  922. goto out;
  923. c67x00->current_frame = c67x00_get_current_frame_number(c67x00);
  924. if (c67x00->current_frame == c67x00->last_frame)
  925. goto out; /* Don't send tds in same frame */
  926. c67x00->last_frame = c67x00->current_frame;
  927. /* If no urbs are scheduled, our work is done */
  928. if (!c67x00->urb_count) {
  929. c67x00_ll_hpi_disable_sofeop(c67x00->sie);
  930. goto out;
  931. }
  932. c67x00_fill_frame(c67x00);
  933. if (!list_empty(&c67x00->td_list))
  934. /* TD's have been added to the frame */
  935. c67x00_send_frame(c67x00);
  936. out:
  937. spin_unlock(&c67x00->lock);
  938. }
  939. /* -------------------------------------------------------------------------- */
  940. static void c67x00_sched_tasklet(unsigned long __c67x00)
  941. {
  942. struct c67x00_hcd *c67x00 = (struct c67x00_hcd *)__c67x00;
  943. c67x00_do_work(c67x00);
  944. }
  945. void c67x00_sched_kick(struct c67x00_hcd *c67x00)
  946. {
  947. tasklet_hi_schedule(&c67x00->tasklet);
  948. }
  949. int c67x00_sched_start_scheduler(struct c67x00_hcd *c67x00)
  950. {
  951. tasklet_init(&c67x00->tasklet, c67x00_sched_tasklet,
  952. (unsigned long)c67x00);
  953. return 0;
  954. }
  955. void c67x00_sched_stop_scheduler(struct c67x00_hcd *c67x00)
  956. {
  957. tasklet_kill(&c67x00->tasklet);
  958. }