isp1760-hcd.c 57 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257
  1. /*
  2. * Driver for the NXP ISP1760 chip
  3. *
  4. * However, the code might contain some bugs. What doesn't work for sure is:
  5. * - ISO
  6. * - OTG
  7. e The interrupt line is configured as active low, level.
  8. *
  9. * (c) 2007 Sebastian Siewior <bigeasy@linutronix.de>
  10. *
  11. * (c) 2011 Arvid Brodin <arvid.brodin@enea.com>
  12. *
  13. */
  14. #include <linux/module.h>
  15. #include <linux/kernel.h>
  16. #include <linux/slab.h>
  17. #include <linux/list.h>
  18. #include <linux/usb.h>
  19. #include <linux/usb/hcd.h>
  20. #include <linux/debugfs.h>
  21. #include <linux/uaccess.h>
  22. #include <linux/io.h>
  23. #include <linux/mm.h>
  24. #include <linux/timer.h>
  25. #include <asm/unaligned.h>
  26. #include <asm/cacheflush.h>
  27. #include "isp1760-hcd.h"
  28. static struct kmem_cache *qtd_cachep;
  29. static struct kmem_cache *qh_cachep;
  30. static struct kmem_cache *urb_listitem_cachep;
  31. struct isp1760_hcd {
  32. u32 hcs_params;
  33. spinlock_t lock;
  34. struct slotinfo atl_slots[32];
  35. int atl_done_map;
  36. struct slotinfo int_slots[32];
  37. int int_done_map;
  38. struct memory_chunk memory_pool[BLOCKS];
  39. struct list_head controlqhs, bulkqhs, interruptqhs;
  40. /* periodic schedule support */
  41. #define DEFAULT_I_TDPS 1024
  42. unsigned periodic_size;
  43. unsigned i_thresh;
  44. unsigned long reset_done;
  45. unsigned long next_statechange;
  46. unsigned int devflags;
  47. };
  48. static inline struct isp1760_hcd *hcd_to_priv(struct usb_hcd *hcd)
  49. {
  50. return (struct isp1760_hcd *) (hcd->hcd_priv);
  51. }
  52. /* Section 2.2 Host Controller Capability Registers */
  53. #define HC_LENGTH(p) (((p)>>00)&0x00ff) /* bits 7:0 */
  54. #define HC_VERSION(p) (((p)>>16)&0xffff) /* bits 31:16 */
  55. #define HCS_INDICATOR(p) ((p)&(1 << 16)) /* true: has port indicators */
  56. #define HCS_PPC(p) ((p)&(1 << 4)) /* true: port power control */
  57. #define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */
  58. #define HCC_ISOC_CACHE(p) ((p)&(1 << 7)) /* true: can cache isoc frame */
  59. #define HCC_ISOC_THRES(p) (((p)>>4)&0x7) /* bits 6:4, uframes cached */
  60. /* Section 2.3 Host Controller Operational Registers */
  61. #define CMD_LRESET (1<<7) /* partial reset (no ports, etc) */
  62. #define CMD_RESET (1<<1) /* reset HC not bus */
  63. #define CMD_RUN (1<<0) /* start/stop HC */
  64. #define STS_PCD (1<<2) /* port change detect */
  65. #define FLAG_CF (1<<0) /* true: we'll support "high speed" */
  66. #define PORT_OWNER (1<<13) /* true: companion hc owns this port */
  67. #define PORT_POWER (1<<12) /* true: has power (see PPC) */
  68. #define PORT_USB11(x) (((x) & (3 << 10)) == (1 << 10)) /* USB 1.1 device */
  69. #define PORT_RESET (1<<8) /* reset port */
  70. #define PORT_SUSPEND (1<<7) /* suspend port */
  71. #define PORT_RESUME (1<<6) /* resume it */
  72. #define PORT_PE (1<<2) /* port enable */
  73. #define PORT_CSC (1<<1) /* connect status change */
  74. #define PORT_CONNECT (1<<0) /* device connected */
  75. #define PORT_RWC_BITS (PORT_CSC)
  76. struct isp1760_qtd {
  77. u8 packet_type;
  78. void *data_buffer;
  79. u32 payload_addr;
  80. /* the rest is HCD-private */
  81. struct list_head qtd_list;
  82. struct urb *urb;
  83. size_t length;
  84. size_t actual_length;
  85. /* QTD_ENQUEUED: waiting for transfer (inactive) */
  86. /* QTD_PAYLOAD_ALLOC: chip mem has been allocated for payload */
  87. /* QTD_XFER_STARTED: valid ptd has been written to isp176x - only
  88. interrupt handler may touch this qtd! */
  89. /* QTD_XFER_COMPLETE: payload has been transferred successfully */
  90. /* QTD_RETIRE: transfer error/abort qtd */
  91. #define QTD_ENQUEUED 0
  92. #define QTD_PAYLOAD_ALLOC 1
  93. #define QTD_XFER_STARTED 2
  94. #define QTD_XFER_COMPLETE 3
  95. #define QTD_RETIRE 4
  96. u32 status;
  97. };
  98. /* Queue head, one for each active endpoint */
  99. struct isp1760_qh {
  100. struct list_head qh_list;
  101. struct list_head qtd_list;
  102. u32 toggle;
  103. u32 ping;
  104. int slot;
  105. int tt_buffer_dirty; /* See USB2.0 spec section 11.17.5 */
  106. };
  107. struct urb_listitem {
  108. struct list_head urb_list;
  109. struct urb *urb;
  110. };
  111. /*
  112. * Access functions for isp176x registers (addresses 0..0x03FF).
  113. */
  114. static u32 reg_read32(void __iomem *base, u32 reg)
  115. {
  116. return readl(base + reg);
  117. }
  118. static void reg_write32(void __iomem *base, u32 reg, u32 val)
  119. {
  120. writel(val, base + reg);
  121. }
  122. /*
  123. * Access functions for isp176x memory (offset >= 0x0400).
  124. *
  125. * bank_reads8() reads memory locations prefetched by an earlier write to
  126. * HC_MEMORY_REG (see isp176x datasheet). Unless you want to do fancy multi-
  127. * bank optimizations, you should use the more generic mem_reads8() below.
  128. *
  129. * For access to ptd memory, use the specialized ptd_read() and ptd_write()
  130. * below.
  131. *
  132. * These functions copy via MMIO data to/from the device. memcpy_{to|from}io()
  133. * doesn't quite work because some people have to enforce 32-bit access
  134. */
  135. static void bank_reads8(void __iomem *src_base, u32 src_offset, u32 bank_addr,
  136. __u32 *dst, u32 bytes)
  137. {
  138. __u32 __iomem *src;
  139. u32 val;
  140. __u8 *src_byteptr;
  141. __u8 *dst_byteptr;
  142. src = src_base + (bank_addr | src_offset);
  143. if (src_offset < PAYLOAD_OFFSET) {
  144. while (bytes >= 4) {
  145. *dst = le32_to_cpu(__raw_readl(src));
  146. bytes -= 4;
  147. src++;
  148. dst++;
  149. }
  150. } else {
  151. while (bytes >= 4) {
  152. *dst = __raw_readl(src);
  153. bytes -= 4;
  154. src++;
  155. dst++;
  156. }
  157. }
  158. if (!bytes)
  159. return;
  160. /* in case we have 3, 2 or 1 by left. The dst buffer may not be fully
  161. * allocated.
  162. */
  163. if (src_offset < PAYLOAD_OFFSET)
  164. val = le32_to_cpu(__raw_readl(src));
  165. else
  166. val = __raw_readl(src);
  167. dst_byteptr = (void *) dst;
  168. src_byteptr = (void *) &val;
  169. while (bytes > 0) {
  170. *dst_byteptr = *src_byteptr;
  171. dst_byteptr++;
  172. src_byteptr++;
  173. bytes--;
  174. }
  175. }
  176. static void mem_reads8(void __iomem *src_base, u32 src_offset, void *dst,
  177. u32 bytes)
  178. {
  179. reg_write32(src_base, HC_MEMORY_REG, src_offset + ISP_BANK(0));
  180. ndelay(90);
  181. bank_reads8(src_base, src_offset, ISP_BANK(0), dst, bytes);
  182. }
  183. static void mem_writes8(void __iomem *dst_base, u32 dst_offset,
  184. __u32 const *src, u32 bytes)
  185. {
  186. __u32 __iomem *dst;
  187. dst = dst_base + dst_offset;
  188. if (dst_offset < PAYLOAD_OFFSET) {
  189. while (bytes >= 4) {
  190. __raw_writel(cpu_to_le32(*src), dst);
  191. bytes -= 4;
  192. src++;
  193. dst++;
  194. }
  195. } else {
  196. while (bytes >= 4) {
  197. __raw_writel(*src, dst);
  198. bytes -= 4;
  199. src++;
  200. dst++;
  201. }
  202. }
  203. if (!bytes)
  204. return;
  205. /* in case we have 3, 2 or 1 bytes left. The buffer is allocated and the
  206. * extra bytes should not be read by the HW.
  207. */
  208. if (dst_offset < PAYLOAD_OFFSET)
  209. __raw_writel(cpu_to_le32(*src), dst);
  210. else
  211. __raw_writel(*src, dst);
  212. }
  213. /*
  214. * Read and write ptds. 'ptd_offset' should be one of ISO_PTD_OFFSET,
  215. * INT_PTD_OFFSET, and ATL_PTD_OFFSET. 'slot' should be less than 32.
  216. */
  217. static void ptd_read(void __iomem *base, u32 ptd_offset, u32 slot,
  218. struct ptd *ptd)
  219. {
  220. reg_write32(base, HC_MEMORY_REG,
  221. ISP_BANK(0) + ptd_offset + slot*sizeof(*ptd));
  222. ndelay(90);
  223. bank_reads8(base, ptd_offset + slot*sizeof(*ptd), ISP_BANK(0),
  224. (void *) ptd, sizeof(*ptd));
  225. }
  226. static void ptd_write(void __iomem *base, u32 ptd_offset, u32 slot,
  227. struct ptd *ptd)
  228. {
  229. mem_writes8(base, ptd_offset + slot*sizeof(*ptd) + sizeof(ptd->dw0),
  230. &ptd->dw1, 7*sizeof(ptd->dw1));
  231. /* Make sure dw0 gets written last (after other dw's and after payload)
  232. since it contains the enable bit */
  233. wmb();
  234. mem_writes8(base, ptd_offset + slot*sizeof(*ptd), &ptd->dw0,
  235. sizeof(ptd->dw0));
  236. }
  237. /* memory management of the 60kb on the chip from 0x1000 to 0xffff */
  238. static void init_memory(struct isp1760_hcd *priv)
  239. {
  240. int i, curr;
  241. u32 payload_addr;
  242. payload_addr = PAYLOAD_OFFSET;
  243. for (i = 0; i < BLOCK_1_NUM; i++) {
  244. priv->memory_pool[i].start = payload_addr;
  245. priv->memory_pool[i].size = BLOCK_1_SIZE;
  246. priv->memory_pool[i].free = 1;
  247. payload_addr += priv->memory_pool[i].size;
  248. }
  249. curr = i;
  250. for (i = 0; i < BLOCK_2_NUM; i++) {
  251. priv->memory_pool[curr + i].start = payload_addr;
  252. priv->memory_pool[curr + i].size = BLOCK_2_SIZE;
  253. priv->memory_pool[curr + i].free = 1;
  254. payload_addr += priv->memory_pool[curr + i].size;
  255. }
  256. curr = i;
  257. for (i = 0; i < BLOCK_3_NUM; i++) {
  258. priv->memory_pool[curr + i].start = payload_addr;
  259. priv->memory_pool[curr + i].size = BLOCK_3_SIZE;
  260. priv->memory_pool[curr + i].free = 1;
  261. payload_addr += priv->memory_pool[curr + i].size;
  262. }
  263. WARN_ON(payload_addr - priv->memory_pool[0].start > PAYLOAD_AREA_SIZE);
  264. }
  265. static void alloc_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
  266. {
  267. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  268. int i;
  269. WARN_ON(qtd->payload_addr);
  270. if (!qtd->length)
  271. return;
  272. for (i = 0; i < BLOCKS; i++) {
  273. if (priv->memory_pool[i].size >= qtd->length &&
  274. priv->memory_pool[i].free) {
  275. priv->memory_pool[i].free = 0;
  276. qtd->payload_addr = priv->memory_pool[i].start;
  277. return;
  278. }
  279. }
  280. }
  281. static void free_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
  282. {
  283. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  284. int i;
  285. if (!qtd->payload_addr)
  286. return;
  287. for (i = 0; i < BLOCKS; i++) {
  288. if (priv->memory_pool[i].start == qtd->payload_addr) {
  289. WARN_ON(priv->memory_pool[i].free);
  290. priv->memory_pool[i].free = 1;
  291. qtd->payload_addr = 0;
  292. return;
  293. }
  294. }
  295. dev_err(hcd->self.controller, "%s: Invalid pointer: %08x\n",
  296. __func__, qtd->payload_addr);
  297. WARN_ON(1);
  298. qtd->payload_addr = 0;
  299. }
  300. static int handshake(struct usb_hcd *hcd, u32 reg,
  301. u32 mask, u32 done, int usec)
  302. {
  303. u32 result;
  304. do {
  305. result = reg_read32(hcd->regs, reg);
  306. if (result == ~0)
  307. return -ENODEV;
  308. result &= mask;
  309. if (result == done)
  310. return 0;
  311. udelay(1);
  312. usec--;
  313. } while (usec > 0);
  314. return -ETIMEDOUT;
  315. }
  316. /* reset a non-running (STS_HALT == 1) controller */
  317. static int ehci_reset(struct usb_hcd *hcd)
  318. {
  319. int retval;
  320. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  321. u32 command = reg_read32(hcd->regs, HC_USBCMD);
  322. command |= CMD_RESET;
  323. reg_write32(hcd->regs, HC_USBCMD, command);
  324. hcd->state = HC_STATE_HALT;
  325. priv->next_statechange = jiffies;
  326. retval = handshake(hcd, HC_USBCMD,
  327. CMD_RESET, 0, 250 * 1000);
  328. return retval;
  329. }
  330. static struct isp1760_qh *qh_alloc(gfp_t flags)
  331. {
  332. struct isp1760_qh *qh;
  333. qh = kmem_cache_zalloc(qh_cachep, flags);
  334. if (!qh)
  335. return NULL;
  336. INIT_LIST_HEAD(&qh->qh_list);
  337. INIT_LIST_HEAD(&qh->qtd_list);
  338. qh->slot = -1;
  339. return qh;
  340. }
  341. static void qh_free(struct isp1760_qh *qh)
  342. {
  343. WARN_ON(!list_empty(&qh->qtd_list));
  344. WARN_ON(qh->slot > -1);
  345. kmem_cache_free(qh_cachep, qh);
  346. }
  347. /* one-time init, only for memory state */
  348. static int priv_init(struct usb_hcd *hcd)
  349. {
  350. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  351. u32 hcc_params;
  352. spin_lock_init(&priv->lock);
  353. INIT_LIST_HEAD(&priv->interruptqhs);
  354. INIT_LIST_HEAD(&priv->controlqhs);
  355. INIT_LIST_HEAD(&priv->bulkqhs);
  356. /*
  357. * hw default: 1K periodic list heads, one per frame.
  358. * periodic_size can shrink by USBCMD update if hcc_params allows.
  359. */
  360. priv->periodic_size = DEFAULT_I_TDPS;
  361. /* controllers may cache some of the periodic schedule ... */
  362. hcc_params = reg_read32(hcd->regs, HC_HCCPARAMS);
  363. /* full frame cache */
  364. if (HCC_ISOC_CACHE(hcc_params))
  365. priv->i_thresh = 8;
  366. else /* N microframes cached */
  367. priv->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
  368. return 0;
  369. }
  370. static int isp1760_hc_setup(struct usb_hcd *hcd)
  371. {
  372. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  373. int result;
  374. u32 scratch, hwmode;
  375. /* Setup HW Mode Control: This assumes a level active-low interrupt */
  376. hwmode = HW_DATA_BUS_32BIT;
  377. if (priv->devflags & ISP1760_FLAG_BUS_WIDTH_16)
  378. hwmode &= ~HW_DATA_BUS_32BIT;
  379. if (priv->devflags & ISP1760_FLAG_ANALOG_OC)
  380. hwmode |= HW_ANA_DIGI_OC;
  381. if (priv->devflags & ISP1760_FLAG_DACK_POL_HIGH)
  382. hwmode |= HW_DACK_POL_HIGH;
  383. if (priv->devflags & ISP1760_FLAG_DREQ_POL_HIGH)
  384. hwmode |= HW_DREQ_POL_HIGH;
  385. if (priv->devflags & ISP1760_FLAG_INTR_POL_HIGH)
  386. hwmode |= HW_INTR_HIGH_ACT;
  387. if (priv->devflags & ISP1760_FLAG_INTR_EDGE_TRIG)
  388. hwmode |= HW_INTR_EDGE_TRIG;
  389. /*
  390. * We have to set this first in case we're in 16-bit mode.
  391. * Write it twice to ensure correct upper bits if switching
  392. * to 16-bit mode.
  393. */
  394. reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode);
  395. reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode);
  396. reg_write32(hcd->regs, HC_SCRATCH_REG, 0xdeadbabe);
  397. /* Change bus pattern */
  398. scratch = reg_read32(hcd->regs, HC_CHIP_ID_REG);
  399. scratch = reg_read32(hcd->regs, HC_SCRATCH_REG);
  400. if (scratch != 0xdeadbabe) {
  401. dev_err(hcd->self.controller, "Scratch test failed.\n");
  402. return -ENODEV;
  403. }
  404. /* pre reset */
  405. reg_write32(hcd->regs, HC_BUFFER_STATUS_REG, 0);
  406. reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
  407. reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
  408. reg_write32(hcd->regs, HC_ISO_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
  409. /* reset */
  410. reg_write32(hcd->regs, HC_RESET_REG, SW_RESET_RESET_ALL);
  411. mdelay(100);
  412. reg_write32(hcd->regs, HC_RESET_REG, SW_RESET_RESET_HC);
  413. mdelay(100);
  414. result = ehci_reset(hcd);
  415. if (result)
  416. return result;
  417. /* Step 11 passed */
  418. dev_info(hcd->self.controller, "bus width: %d, oc: %s\n",
  419. (priv->devflags & ISP1760_FLAG_BUS_WIDTH_16) ?
  420. 16 : 32, (priv->devflags & ISP1760_FLAG_ANALOG_OC) ?
  421. "analog" : "digital");
  422. /* ATL reset */
  423. reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode | ALL_ATX_RESET);
  424. mdelay(10);
  425. reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode);
  426. reg_write32(hcd->regs, HC_INTERRUPT_ENABLE, INTERRUPT_ENABLE_MASK);
  427. /*
  428. * PORT 1 Control register of the ISP1760 is the OTG control
  429. * register on ISP1761. Since there is no OTG or device controller
  430. * support in this driver, we use port 1 as a "normal" USB host port on
  431. * both chips.
  432. */
  433. reg_write32(hcd->regs, HC_PORT1_CTRL, PORT1_POWER | PORT1_INIT2);
  434. mdelay(10);
  435. priv->hcs_params = reg_read32(hcd->regs, HC_HCSPARAMS);
  436. return priv_init(hcd);
  437. }
  438. static u32 base_to_chip(u32 base)
  439. {
  440. return ((base - 0x400) >> 3);
  441. }
  442. static int last_qtd_of_urb(struct isp1760_qtd *qtd, struct isp1760_qh *qh)
  443. {
  444. struct urb *urb;
  445. if (list_is_last(&qtd->qtd_list, &qh->qtd_list))
  446. return 1;
  447. urb = qtd->urb;
  448. qtd = list_entry(qtd->qtd_list.next, typeof(*qtd), qtd_list);
  449. return (qtd->urb != urb);
  450. }
  451. /* magic numbers that can affect system performance */
  452. #define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
  453. #define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
  454. #define EHCI_TUNE_RL_TT 0
  455. #define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
  456. #define EHCI_TUNE_MULT_TT 1
  457. #define EHCI_TUNE_FLS 2 /* (small) 256 frame schedule */
  458. static void create_ptd_atl(struct isp1760_qh *qh,
  459. struct isp1760_qtd *qtd, struct ptd *ptd)
  460. {
  461. u32 maxpacket;
  462. u32 multi;
  463. u32 rl = RL_COUNTER;
  464. u32 nak = NAK_COUNTER;
  465. memset(ptd, 0, sizeof(*ptd));
  466. /* according to 3.6.2, max packet len can not be > 0x400 */
  467. maxpacket = usb_maxpacket(qtd->urb->dev, qtd->urb->pipe,
  468. usb_pipeout(qtd->urb->pipe));
  469. multi = 1 + ((maxpacket >> 11) & 0x3);
  470. maxpacket &= 0x7ff;
  471. /* DW0 */
  472. ptd->dw0 = DW0_VALID_BIT;
  473. ptd->dw0 |= TO_DW0_LENGTH(qtd->length);
  474. ptd->dw0 |= TO_DW0_MAXPACKET(maxpacket);
  475. ptd->dw0 |= TO_DW0_ENDPOINT(usb_pipeendpoint(qtd->urb->pipe));
  476. /* DW1 */
  477. ptd->dw1 = usb_pipeendpoint(qtd->urb->pipe) >> 1;
  478. ptd->dw1 |= TO_DW1_DEVICE_ADDR(usb_pipedevice(qtd->urb->pipe));
  479. ptd->dw1 |= TO_DW1_PID_TOKEN(qtd->packet_type);
  480. if (usb_pipebulk(qtd->urb->pipe))
  481. ptd->dw1 |= DW1_TRANS_BULK;
  482. else if (usb_pipeint(qtd->urb->pipe))
  483. ptd->dw1 |= DW1_TRANS_INT;
  484. if (qtd->urb->dev->speed != USB_SPEED_HIGH) {
  485. /* split transaction */
  486. ptd->dw1 |= DW1_TRANS_SPLIT;
  487. if (qtd->urb->dev->speed == USB_SPEED_LOW)
  488. ptd->dw1 |= DW1_SE_USB_LOSPEED;
  489. ptd->dw1 |= TO_DW1_PORT_NUM(qtd->urb->dev->ttport);
  490. ptd->dw1 |= TO_DW1_HUB_NUM(qtd->urb->dev->tt->hub->devnum);
  491. /* SE bit for Split INT transfers */
  492. if (usb_pipeint(qtd->urb->pipe) &&
  493. (qtd->urb->dev->speed == USB_SPEED_LOW))
  494. ptd->dw1 |= 2 << 16;
  495. rl = 0;
  496. nak = 0;
  497. } else {
  498. ptd->dw0 |= TO_DW0_MULTI(multi);
  499. if (usb_pipecontrol(qtd->urb->pipe) ||
  500. usb_pipebulk(qtd->urb->pipe))
  501. ptd->dw3 |= TO_DW3_PING(qh->ping);
  502. }
  503. /* DW2 */
  504. ptd->dw2 = 0;
  505. ptd->dw2 |= TO_DW2_DATA_START_ADDR(base_to_chip(qtd->payload_addr));
  506. ptd->dw2 |= TO_DW2_RL(rl);
  507. /* DW3 */
  508. ptd->dw3 |= TO_DW3_NAKCOUNT(nak);
  509. ptd->dw3 |= TO_DW3_DATA_TOGGLE(qh->toggle);
  510. if (usb_pipecontrol(qtd->urb->pipe)) {
  511. if (qtd->data_buffer == qtd->urb->setup_packet)
  512. ptd->dw3 &= ~TO_DW3_DATA_TOGGLE(1);
  513. else if (last_qtd_of_urb(qtd, qh))
  514. ptd->dw3 |= TO_DW3_DATA_TOGGLE(1);
  515. }
  516. ptd->dw3 |= DW3_ACTIVE_BIT;
  517. /* Cerr */
  518. ptd->dw3 |= TO_DW3_CERR(ERR_COUNTER);
  519. }
  520. static void transform_add_int(struct isp1760_qh *qh,
  521. struct isp1760_qtd *qtd, struct ptd *ptd)
  522. {
  523. u32 usof;
  524. u32 period;
  525. /*
  526. * Most of this is guessing. ISP1761 datasheet is quite unclear, and
  527. * the algorithm from the original Philips driver code, which was
  528. * pretty much used in this driver before as well, is quite horrendous
  529. * and, i believe, incorrect. The code below follows the datasheet and
  530. * USB2.0 spec as far as I can tell, and plug/unplug seems to be much
  531. * more reliable this way (fingers crossed...).
  532. */
  533. if (qtd->urb->dev->speed == USB_SPEED_HIGH) {
  534. /* urb->interval is in units of microframes (1/8 ms) */
  535. period = qtd->urb->interval >> 3;
  536. if (qtd->urb->interval > 4)
  537. usof = 0x01; /* One bit set =>
  538. interval 1 ms * uFrame-match */
  539. else if (qtd->urb->interval > 2)
  540. usof = 0x22; /* Two bits set => interval 1/2 ms */
  541. else if (qtd->urb->interval > 1)
  542. usof = 0x55; /* Four bits set => interval 1/4 ms */
  543. else
  544. usof = 0xff; /* All bits set => interval 1/8 ms */
  545. } else {
  546. /* urb->interval is in units of frames (1 ms) */
  547. period = qtd->urb->interval;
  548. usof = 0x0f; /* Execute Start Split on any of the
  549. four first uFrames */
  550. /*
  551. * First 8 bits in dw5 is uSCS and "specifies which uSOF the
  552. * complete split needs to be sent. Valid only for IN." Also,
  553. * "All bits can be set to one for every transfer." (p 82,
  554. * ISP1761 data sheet.) 0x1c is from Philips driver. Where did
  555. * that number come from? 0xff seems to work fine...
  556. */
  557. /* ptd->dw5 = 0x1c; */
  558. ptd->dw5 = 0xff; /* Execute Complete Split on any uFrame */
  559. }
  560. period = period >> 1;/* Ensure equal or shorter period than requested */
  561. period &= 0xf8; /* Mask off too large values and lowest unused 3 bits */
  562. ptd->dw2 |= period;
  563. ptd->dw4 = usof;
  564. }
  565. static void create_ptd_int(struct isp1760_qh *qh,
  566. struct isp1760_qtd *qtd, struct ptd *ptd)
  567. {
  568. create_ptd_atl(qh, qtd, ptd);
  569. transform_add_int(qh, qtd, ptd);
  570. }
  571. static void isp1760_urb_done(struct usb_hcd *hcd, struct urb *urb)
  572. __releases(priv->lock)
  573. __acquires(priv->lock)
  574. {
  575. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  576. if (!urb->unlinked) {
  577. if (urb->status == -EINPROGRESS)
  578. urb->status = 0;
  579. }
  580. if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) {
  581. void *ptr;
  582. for (ptr = urb->transfer_buffer;
  583. ptr < urb->transfer_buffer + urb->transfer_buffer_length;
  584. ptr += PAGE_SIZE)
  585. flush_dcache_page(virt_to_page(ptr));
  586. }
  587. /* complete() can reenter this HCD */
  588. usb_hcd_unlink_urb_from_ep(hcd, urb);
  589. spin_unlock(&priv->lock);
  590. usb_hcd_giveback_urb(hcd, urb, urb->status);
  591. spin_lock(&priv->lock);
  592. }
  593. static struct isp1760_qtd *qtd_alloc(gfp_t flags, struct urb *urb,
  594. u8 packet_type)
  595. {
  596. struct isp1760_qtd *qtd;
  597. qtd = kmem_cache_zalloc(qtd_cachep, flags);
  598. if (!qtd)
  599. return NULL;
  600. INIT_LIST_HEAD(&qtd->qtd_list);
  601. qtd->urb = urb;
  602. qtd->packet_type = packet_type;
  603. qtd->status = QTD_ENQUEUED;
  604. qtd->actual_length = 0;
  605. return qtd;
  606. }
  607. static void qtd_free(struct isp1760_qtd *qtd)
  608. {
  609. WARN_ON(qtd->payload_addr);
  610. kmem_cache_free(qtd_cachep, qtd);
  611. }
  612. static void start_bus_transfer(struct usb_hcd *hcd, u32 ptd_offset, int slot,
  613. struct slotinfo *slots, struct isp1760_qtd *qtd,
  614. struct isp1760_qh *qh, struct ptd *ptd)
  615. {
  616. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  617. int skip_map;
  618. WARN_ON((slot < 0) || (slot > 31));
  619. WARN_ON(qtd->length && !qtd->payload_addr);
  620. WARN_ON(slots[slot].qtd);
  621. WARN_ON(slots[slot].qh);
  622. WARN_ON(qtd->status != QTD_PAYLOAD_ALLOC);
  623. /* Make sure done map has not triggered from some unlinked transfer */
  624. if (ptd_offset == ATL_PTD_OFFSET) {
  625. priv->atl_done_map |= reg_read32(hcd->regs,
  626. HC_ATL_PTD_DONEMAP_REG);
  627. priv->atl_done_map &= ~(1 << slot);
  628. } else {
  629. priv->int_done_map |= reg_read32(hcd->regs,
  630. HC_INT_PTD_DONEMAP_REG);
  631. priv->int_done_map &= ~(1 << slot);
  632. }
  633. qh->slot = slot;
  634. qtd->status = QTD_XFER_STARTED;
  635. slots[slot].timestamp = jiffies;
  636. slots[slot].qtd = qtd;
  637. slots[slot].qh = qh;
  638. ptd_write(hcd->regs, ptd_offset, slot, ptd);
  639. if (ptd_offset == ATL_PTD_OFFSET) {
  640. skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
  641. skip_map &= ~(1 << qh->slot);
  642. reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, skip_map);
  643. } else {
  644. skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG);
  645. skip_map &= ~(1 << qh->slot);
  646. reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, skip_map);
  647. }
  648. }
  649. static int is_short_bulk(struct isp1760_qtd *qtd)
  650. {
  651. return (usb_pipebulk(qtd->urb->pipe) &&
  652. (qtd->actual_length < qtd->length));
  653. }
  654. static void collect_qtds(struct usb_hcd *hcd, struct isp1760_qh *qh,
  655. struct list_head *urb_list)
  656. {
  657. int last_qtd;
  658. struct isp1760_qtd *qtd, *qtd_next;
  659. struct urb_listitem *urb_listitem;
  660. list_for_each_entry_safe(qtd, qtd_next, &qh->qtd_list, qtd_list) {
  661. if (qtd->status < QTD_XFER_COMPLETE)
  662. break;
  663. last_qtd = last_qtd_of_urb(qtd, qh);
  664. if ((!last_qtd) && (qtd->status == QTD_RETIRE))
  665. qtd_next->status = QTD_RETIRE;
  666. if (qtd->status == QTD_XFER_COMPLETE) {
  667. if (qtd->actual_length) {
  668. switch (qtd->packet_type) {
  669. case IN_PID:
  670. mem_reads8(hcd->regs, qtd->payload_addr,
  671. qtd->data_buffer,
  672. qtd->actual_length);
  673. /* Fall through (?) */
  674. case OUT_PID:
  675. qtd->urb->actual_length +=
  676. qtd->actual_length;
  677. /* Fall through ... */
  678. case SETUP_PID:
  679. break;
  680. }
  681. }
  682. if (is_short_bulk(qtd)) {
  683. if (qtd->urb->transfer_flags & URB_SHORT_NOT_OK)
  684. qtd->urb->status = -EREMOTEIO;
  685. if (!last_qtd)
  686. qtd_next->status = QTD_RETIRE;
  687. }
  688. }
  689. if (qtd->payload_addr)
  690. free_mem(hcd, qtd);
  691. if (last_qtd) {
  692. if ((qtd->status == QTD_RETIRE) &&
  693. (qtd->urb->status == -EINPROGRESS))
  694. qtd->urb->status = -EPIPE;
  695. /* Defer calling of urb_done() since it releases lock */
  696. urb_listitem = kmem_cache_zalloc(urb_listitem_cachep,
  697. GFP_ATOMIC);
  698. if (unlikely(!urb_listitem))
  699. break; /* Try again on next call */
  700. urb_listitem->urb = qtd->urb;
  701. list_add_tail(&urb_listitem->urb_list, urb_list);
  702. }
  703. list_del(&qtd->qtd_list);
  704. qtd_free(qtd);
  705. }
  706. }
  707. #define ENQUEUE_DEPTH 2
  708. static void enqueue_qtds(struct usb_hcd *hcd, struct isp1760_qh *qh)
  709. {
  710. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  711. int ptd_offset;
  712. struct slotinfo *slots;
  713. int curr_slot, free_slot;
  714. int n;
  715. struct ptd ptd;
  716. struct isp1760_qtd *qtd;
  717. if (unlikely(list_empty(&qh->qtd_list))) {
  718. WARN_ON(1);
  719. return;
  720. }
  721. /* Make sure this endpoint's TT buffer is clean before queueing ptds */
  722. if (qh->tt_buffer_dirty)
  723. return;
  724. if (usb_pipeint(list_entry(qh->qtd_list.next, struct isp1760_qtd,
  725. qtd_list)->urb->pipe)) {
  726. ptd_offset = INT_PTD_OFFSET;
  727. slots = priv->int_slots;
  728. } else {
  729. ptd_offset = ATL_PTD_OFFSET;
  730. slots = priv->atl_slots;
  731. }
  732. free_slot = -1;
  733. for (curr_slot = 0; curr_slot < 32; curr_slot++) {
  734. if ((free_slot == -1) && (slots[curr_slot].qtd == NULL))
  735. free_slot = curr_slot;
  736. if (slots[curr_slot].qh == qh)
  737. break;
  738. }
  739. n = 0;
  740. list_for_each_entry(qtd, &qh->qtd_list, qtd_list) {
  741. if (qtd->status == QTD_ENQUEUED) {
  742. WARN_ON(qtd->payload_addr);
  743. alloc_mem(hcd, qtd);
  744. if ((qtd->length) && (!qtd->payload_addr))
  745. break;
  746. if ((qtd->length) &&
  747. ((qtd->packet_type == SETUP_PID) ||
  748. (qtd->packet_type == OUT_PID))) {
  749. mem_writes8(hcd->regs, qtd->payload_addr,
  750. qtd->data_buffer, qtd->length);
  751. }
  752. qtd->status = QTD_PAYLOAD_ALLOC;
  753. }
  754. if (qtd->status == QTD_PAYLOAD_ALLOC) {
  755. /*
  756. if ((curr_slot > 31) && (free_slot == -1))
  757. dev_dbg(hcd->self.controller, "%s: No slot "
  758. "available for transfer\n", __func__);
  759. */
  760. /* Start xfer for this endpoint if not already done */
  761. if ((curr_slot > 31) && (free_slot > -1)) {
  762. if (usb_pipeint(qtd->urb->pipe))
  763. create_ptd_int(qh, qtd, &ptd);
  764. else
  765. create_ptd_atl(qh, qtd, &ptd);
  766. start_bus_transfer(hcd, ptd_offset, free_slot,
  767. slots, qtd, qh, &ptd);
  768. curr_slot = free_slot;
  769. }
  770. n++;
  771. if (n >= ENQUEUE_DEPTH)
  772. break;
  773. }
  774. }
  775. }
  776. void schedule_ptds(struct usb_hcd *hcd)
  777. {
  778. struct isp1760_hcd *priv;
  779. struct isp1760_qh *qh, *qh_next;
  780. struct list_head *ep_queue;
  781. struct usb_host_endpoint *ep;
  782. LIST_HEAD(urb_list);
  783. struct urb_listitem *urb_listitem, *urb_listitem_next;
  784. if (!hcd) {
  785. WARN_ON(1);
  786. return;
  787. }
  788. priv = hcd_to_priv(hcd);
  789. /*
  790. * check finished/retired xfers, transfer payloads, call urb_done()
  791. */
  792. ep_queue = &priv->interruptqhs;
  793. while (ep_queue) {
  794. list_for_each_entry_safe(qh, qh_next, ep_queue, qh_list) {
  795. ep = list_entry(qh->qtd_list.next, struct isp1760_qtd,
  796. qtd_list)->urb->ep;
  797. collect_qtds(hcd, qh, &urb_list);
  798. if (list_empty(&qh->qtd_list)) {
  799. list_del(&qh->qh_list);
  800. if (ep->hcpriv == NULL) {
  801. /* Endpoint has been disabled, so we
  802. can free the associated queue head. */
  803. qh_free(qh);
  804. }
  805. }
  806. }
  807. if (ep_queue == &priv->interruptqhs)
  808. ep_queue = &priv->controlqhs;
  809. else if (ep_queue == &priv->controlqhs)
  810. ep_queue = &priv->bulkqhs;
  811. else
  812. ep_queue = NULL;
  813. }
  814. list_for_each_entry_safe(urb_listitem, urb_listitem_next, &urb_list,
  815. urb_list) {
  816. isp1760_urb_done(hcd, urb_listitem->urb);
  817. kmem_cache_free(urb_listitem_cachep, urb_listitem);
  818. }
  819. /*
  820. * Schedule packets for transfer.
  821. *
  822. * According to USB2.0 specification:
  823. *
  824. * 1st prio: interrupt xfers, up to 80 % of bandwidth
  825. * 2nd prio: control xfers
  826. * 3rd prio: bulk xfers
  827. *
  828. * ... but let's use a simpler scheme here (mostly because ISP1761 doc
  829. * is very unclear on how to prioritize traffic):
  830. *
  831. * 1) Enqueue any queued control transfers, as long as payload chip mem
  832. * and PTD ATL slots are available.
  833. * 2) Enqueue any queued INT transfers, as long as payload chip mem
  834. * and PTD INT slots are available.
  835. * 3) Enqueue any queued bulk transfers, as long as payload chip mem
  836. * and PTD ATL slots are available.
  837. *
  838. * Use double buffering (ENQUEUE_DEPTH==2) as a compromise between
  839. * conservation of chip mem and performance.
  840. *
  841. * I'm sure this scheme could be improved upon!
  842. */
  843. ep_queue = &priv->controlqhs;
  844. while (ep_queue) {
  845. list_for_each_entry_safe(qh, qh_next, ep_queue, qh_list)
  846. enqueue_qtds(hcd, qh);
  847. if (ep_queue == &priv->controlqhs)
  848. ep_queue = &priv->interruptqhs;
  849. else if (ep_queue == &priv->interruptqhs)
  850. ep_queue = &priv->bulkqhs;
  851. else
  852. ep_queue = NULL;
  853. }
  854. }
  855. #define PTD_STATE_QTD_DONE 1
  856. #define PTD_STATE_QTD_RELOAD 2
  857. #define PTD_STATE_URB_RETIRE 3
  858. static int check_int_transfer(struct usb_hcd *hcd, struct ptd *ptd,
  859. struct urb *urb)
  860. {
  861. __dw dw4;
  862. int i;
  863. dw4 = ptd->dw4;
  864. dw4 >>= 8;
  865. /* FIXME: ISP1761 datasheet does not say what to do with these. Do we
  866. need to handle these errors? Is it done in hardware? */
  867. if (ptd->dw3 & DW3_HALT_BIT) {
  868. urb->status = -EPROTO; /* Default unknown error */
  869. for (i = 0; i < 8; i++) {
  870. switch (dw4 & 0x7) {
  871. case INT_UNDERRUN:
  872. dev_dbg(hcd->self.controller, "%s: underrun "
  873. "during uFrame %d\n",
  874. __func__, i);
  875. urb->status = -ECOMM; /* Could not write data */
  876. break;
  877. case INT_EXACT:
  878. dev_dbg(hcd->self.controller, "%s: transaction "
  879. "error during uFrame %d\n",
  880. __func__, i);
  881. urb->status = -EPROTO; /* timeout, bad CRC, PID
  882. error etc. */
  883. break;
  884. case INT_BABBLE:
  885. dev_dbg(hcd->self.controller, "%s: babble "
  886. "error during uFrame %d\n",
  887. __func__, i);
  888. urb->status = -EOVERFLOW;
  889. break;
  890. }
  891. dw4 >>= 3;
  892. }
  893. return PTD_STATE_URB_RETIRE;
  894. }
  895. return PTD_STATE_QTD_DONE;
  896. }
  897. static int check_atl_transfer(struct usb_hcd *hcd, struct ptd *ptd,
  898. struct urb *urb)
  899. {
  900. WARN_ON(!ptd);
  901. if (ptd->dw3 & DW3_HALT_BIT) {
  902. if (ptd->dw3 & DW3_BABBLE_BIT)
  903. urb->status = -EOVERFLOW;
  904. else if (FROM_DW3_CERR(ptd->dw3))
  905. urb->status = -EPIPE; /* Stall */
  906. else if (ptd->dw3 & DW3_ERROR_BIT)
  907. urb->status = -EPROTO; /* XactErr */
  908. else
  909. urb->status = -EPROTO; /* Unknown */
  910. /*
  911. dev_dbg(hcd->self.controller, "%s: ptd error:\n"
  912. " dw0: %08x dw1: %08x dw2: %08x dw3: %08x\n"
  913. " dw4: %08x dw5: %08x dw6: %08x dw7: %08x\n",
  914. __func__,
  915. ptd->dw0, ptd->dw1, ptd->dw2, ptd->dw3,
  916. ptd->dw4, ptd->dw5, ptd->dw6, ptd->dw7);
  917. */
  918. return PTD_STATE_URB_RETIRE;
  919. }
  920. if ((ptd->dw3 & DW3_ERROR_BIT) && (ptd->dw3 & DW3_ACTIVE_BIT)) {
  921. /* Transfer Error, *but* active and no HALT -> reload */
  922. dev_dbg(hcd->self.controller, "PID error; reloading ptd\n");
  923. return PTD_STATE_QTD_RELOAD;
  924. }
  925. if (!FROM_DW3_NAKCOUNT(ptd->dw3) && (ptd->dw3 & DW3_ACTIVE_BIT)) {
  926. /*
  927. * NAKs are handled in HW by the chip. Usually if the
  928. * device is not able to send data fast enough.
  929. * This happens mostly on slower hardware.
  930. */
  931. return PTD_STATE_QTD_RELOAD;
  932. }
  933. return PTD_STATE_QTD_DONE;
  934. }
  935. static void handle_done_ptds(struct usb_hcd *hcd)
  936. {
  937. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  938. struct ptd ptd;
  939. struct isp1760_qh *qh;
  940. int slot;
  941. int state;
  942. struct slotinfo *slots;
  943. u32 ptd_offset;
  944. struct isp1760_qtd *qtd;
  945. int modified;
  946. int skip_map;
  947. skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG);
  948. priv->int_done_map &= ~skip_map;
  949. skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
  950. priv->atl_done_map &= ~skip_map;
  951. modified = priv->int_done_map || priv->atl_done_map;
  952. while (priv->int_done_map || priv->atl_done_map) {
  953. if (priv->int_done_map) {
  954. /* INT ptd */
  955. slot = __ffs(priv->int_done_map);
  956. priv->int_done_map &= ~(1 << slot);
  957. slots = priv->int_slots;
  958. /* This should not trigger, and could be removed if
  959. noone have any problems with it triggering: */
  960. if (!slots[slot].qh) {
  961. WARN_ON(1);
  962. continue;
  963. }
  964. ptd_offset = INT_PTD_OFFSET;
  965. ptd_read(hcd->regs, INT_PTD_OFFSET, slot, &ptd);
  966. state = check_int_transfer(hcd, &ptd,
  967. slots[slot].qtd->urb);
  968. } else {
  969. /* ATL ptd */
  970. slot = __ffs(priv->atl_done_map);
  971. priv->atl_done_map &= ~(1 << slot);
  972. slots = priv->atl_slots;
  973. /* This should not trigger, and could be removed if
  974. noone have any problems with it triggering: */
  975. if (!slots[slot].qh) {
  976. WARN_ON(1);
  977. continue;
  978. }
  979. ptd_offset = ATL_PTD_OFFSET;
  980. ptd_read(hcd->regs, ATL_PTD_OFFSET, slot, &ptd);
  981. state = check_atl_transfer(hcd, &ptd,
  982. slots[slot].qtd->urb);
  983. }
  984. qtd = slots[slot].qtd;
  985. slots[slot].qtd = NULL;
  986. qh = slots[slot].qh;
  987. slots[slot].qh = NULL;
  988. qh->slot = -1;
  989. WARN_ON(qtd->status != QTD_XFER_STARTED);
  990. switch (state) {
  991. case PTD_STATE_QTD_DONE:
  992. if ((usb_pipeint(qtd->urb->pipe)) &&
  993. (qtd->urb->dev->speed != USB_SPEED_HIGH))
  994. qtd->actual_length =
  995. FROM_DW3_SCS_NRBYTESTRANSFERRED(ptd.dw3);
  996. else
  997. qtd->actual_length =
  998. FROM_DW3_NRBYTESTRANSFERRED(ptd.dw3);
  999. qtd->status = QTD_XFER_COMPLETE;
  1000. if (list_is_last(&qtd->qtd_list, &qh->qtd_list) ||
  1001. is_short_bulk(qtd))
  1002. qtd = NULL;
  1003. else
  1004. qtd = list_entry(qtd->qtd_list.next,
  1005. typeof(*qtd), qtd_list);
  1006. qh->toggle = FROM_DW3_DATA_TOGGLE(ptd.dw3);
  1007. qh->ping = FROM_DW3_PING(ptd.dw3);
  1008. break;
  1009. case PTD_STATE_QTD_RELOAD: /* QTD_RETRY, for atls only */
  1010. qtd->status = QTD_PAYLOAD_ALLOC;
  1011. ptd.dw0 |= DW0_VALID_BIT;
  1012. /* RL counter = ERR counter */
  1013. ptd.dw3 &= ~TO_DW3_NAKCOUNT(0xf);
  1014. ptd.dw3 |= TO_DW3_NAKCOUNT(FROM_DW2_RL(ptd.dw2));
  1015. ptd.dw3 &= ~TO_DW3_CERR(3);
  1016. ptd.dw3 |= TO_DW3_CERR(ERR_COUNTER);
  1017. qh->toggle = FROM_DW3_DATA_TOGGLE(ptd.dw3);
  1018. qh->ping = FROM_DW3_PING(ptd.dw3);
  1019. break;
  1020. case PTD_STATE_URB_RETIRE:
  1021. qtd->status = QTD_RETIRE;
  1022. if ((qtd->urb->dev->speed != USB_SPEED_HIGH) &&
  1023. (qtd->urb->status != -EPIPE) &&
  1024. (qtd->urb->status != -EREMOTEIO)) {
  1025. qh->tt_buffer_dirty = 1;
  1026. if (usb_hub_clear_tt_buffer(qtd->urb))
  1027. /* Clear failed; let's hope things work
  1028. anyway */
  1029. qh->tt_buffer_dirty = 0;
  1030. }
  1031. qtd = NULL;
  1032. qh->toggle = 0;
  1033. qh->ping = 0;
  1034. break;
  1035. default:
  1036. WARN_ON(1);
  1037. continue;
  1038. }
  1039. if (qtd && (qtd->status == QTD_PAYLOAD_ALLOC)) {
  1040. if (slots == priv->int_slots) {
  1041. if (state == PTD_STATE_QTD_RELOAD)
  1042. dev_err(hcd->self.controller,
  1043. "%s: PTD_STATE_QTD_RELOAD on "
  1044. "interrupt packet\n", __func__);
  1045. if (state != PTD_STATE_QTD_RELOAD)
  1046. create_ptd_int(qh, qtd, &ptd);
  1047. } else {
  1048. if (state != PTD_STATE_QTD_RELOAD)
  1049. create_ptd_atl(qh, qtd, &ptd);
  1050. }
  1051. start_bus_transfer(hcd, ptd_offset, slot, slots, qtd,
  1052. qh, &ptd);
  1053. }
  1054. }
  1055. if (modified)
  1056. schedule_ptds(hcd);
  1057. }
  1058. static irqreturn_t isp1760_irq(struct usb_hcd *hcd)
  1059. {
  1060. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  1061. u32 imask;
  1062. irqreturn_t irqret = IRQ_NONE;
  1063. spin_lock(&priv->lock);
  1064. if (!(hcd->state & HC_STATE_RUNNING))
  1065. goto leave;
  1066. imask = reg_read32(hcd->regs, HC_INTERRUPT_REG);
  1067. if (unlikely(!imask))
  1068. goto leave;
  1069. reg_write32(hcd->regs, HC_INTERRUPT_REG, imask); /* Clear */
  1070. priv->int_done_map |= reg_read32(hcd->regs, HC_INT_PTD_DONEMAP_REG);
  1071. priv->atl_done_map |= reg_read32(hcd->regs, HC_ATL_PTD_DONEMAP_REG);
  1072. handle_done_ptds(hcd);
  1073. irqret = IRQ_HANDLED;
  1074. leave:
  1075. spin_unlock(&priv->lock);
  1076. return irqret;
  1077. }
  1078. /*
  1079. * Workaround for problem described in chip errata 2:
  1080. *
  1081. * Sometimes interrupts are not generated when ATL (not INT?) completion occurs.
  1082. * One solution suggested in the errata is to use SOF interrupts _instead_of_
  1083. * ATL done interrupts (the "instead of" might be important since it seems
  1084. * enabling ATL interrupts also causes the chip to sometimes - rarely - "forget"
  1085. * to set the PTD's done bit in addition to not generating an interrupt!).
  1086. *
  1087. * So if we use SOF + ATL interrupts, we sometimes get stale PTDs since their
  1088. * done bit is not being set. This is bad - it blocks the endpoint until reboot.
  1089. *
  1090. * If we use SOF interrupts only, we get latency between ptd completion and the
  1091. * actual handling. This is very noticeable in testusb runs which takes several
  1092. * minutes longer without ATL interrupts.
  1093. *
  1094. * A better solution is to run the code below every SLOT_CHECK_PERIOD ms. If it
  1095. * finds active ATL slots which are older than SLOT_TIMEOUT ms, it checks the
  1096. * slot's ACTIVE and VALID bits. If these are not set, the ptd is considered
  1097. * completed and its done map bit is set.
  1098. *
  1099. * The values of SLOT_TIMEOUT and SLOT_CHECK_PERIOD have been arbitrarily chosen
  1100. * not to cause too much lag when this HW bug occurs, while still hopefully
  1101. * ensuring that the check does not falsely trigger.
  1102. */
  1103. #define SLOT_TIMEOUT 300
  1104. #define SLOT_CHECK_PERIOD 200
  1105. static struct timer_list errata2_timer;
  1106. void errata2_function(unsigned long data)
  1107. {
  1108. struct usb_hcd *hcd = (struct usb_hcd *) data;
  1109. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  1110. int slot;
  1111. struct ptd ptd;
  1112. unsigned long spinflags;
  1113. spin_lock_irqsave(&priv->lock, spinflags);
  1114. for (slot = 0; slot < 32; slot++)
  1115. if (priv->atl_slots[slot].qh && time_after(jiffies,
  1116. priv->atl_slots[slot].timestamp +
  1117. SLOT_TIMEOUT * HZ / 1000)) {
  1118. ptd_read(hcd->regs, ATL_PTD_OFFSET, slot, &ptd);
  1119. if (!FROM_DW0_VALID(ptd.dw0) &&
  1120. !FROM_DW3_ACTIVE(ptd.dw3))
  1121. priv->atl_done_map |= 1 << slot;
  1122. }
  1123. if (priv->atl_done_map)
  1124. handle_done_ptds(hcd);
  1125. spin_unlock_irqrestore(&priv->lock, spinflags);
  1126. errata2_timer.expires = jiffies + SLOT_CHECK_PERIOD * HZ / 1000;
  1127. add_timer(&errata2_timer);
  1128. }
  1129. static int isp1760_run(struct usb_hcd *hcd)
  1130. {
  1131. int retval;
  1132. u32 temp;
  1133. u32 command;
  1134. u32 chipid;
  1135. hcd->uses_new_polling = 1;
  1136. hcd->state = HC_STATE_RUNNING;
  1137. /* Set PTD interrupt AND & OR maps */
  1138. reg_write32(hcd->regs, HC_ATL_IRQ_MASK_AND_REG, 0);
  1139. reg_write32(hcd->regs, HC_ATL_IRQ_MASK_OR_REG, 0xffffffff);
  1140. reg_write32(hcd->regs, HC_INT_IRQ_MASK_AND_REG, 0);
  1141. reg_write32(hcd->regs, HC_INT_IRQ_MASK_OR_REG, 0xffffffff);
  1142. reg_write32(hcd->regs, HC_ISO_IRQ_MASK_AND_REG, 0);
  1143. reg_write32(hcd->regs, HC_ISO_IRQ_MASK_OR_REG, 0xffffffff);
  1144. /* step 23 passed */
  1145. temp = reg_read32(hcd->regs, HC_HW_MODE_CTRL);
  1146. reg_write32(hcd->regs, HC_HW_MODE_CTRL, temp | HW_GLOBAL_INTR_EN);
  1147. command = reg_read32(hcd->regs, HC_USBCMD);
  1148. command &= ~(CMD_LRESET|CMD_RESET);
  1149. command |= CMD_RUN;
  1150. reg_write32(hcd->regs, HC_USBCMD, command);
  1151. retval = handshake(hcd, HC_USBCMD, CMD_RUN, CMD_RUN, 250 * 1000);
  1152. if (retval)
  1153. return retval;
  1154. /*
  1155. * XXX
  1156. * Spec says to write FLAG_CF as last config action, priv code grabs
  1157. * the semaphore while doing so.
  1158. */
  1159. down_write(&ehci_cf_port_reset_rwsem);
  1160. reg_write32(hcd->regs, HC_CONFIGFLAG, FLAG_CF);
  1161. retval = handshake(hcd, HC_CONFIGFLAG, FLAG_CF, FLAG_CF, 250 * 1000);
  1162. up_write(&ehci_cf_port_reset_rwsem);
  1163. if (retval)
  1164. return retval;
  1165. init_timer(&errata2_timer);
  1166. errata2_timer.function = errata2_function;
  1167. errata2_timer.data = (unsigned long) hcd;
  1168. errata2_timer.expires = jiffies + SLOT_CHECK_PERIOD * HZ / 1000;
  1169. add_timer(&errata2_timer);
  1170. chipid = reg_read32(hcd->regs, HC_CHIP_ID_REG);
  1171. dev_info(hcd->self.controller, "USB ISP %04x HW rev. %d started\n",
  1172. chipid & 0xffff, chipid >> 16);
  1173. /* PTD Register Init Part 2, Step 28 */
  1174. /* Setup registers controlling PTD checking */
  1175. reg_write32(hcd->regs, HC_ATL_PTD_LASTPTD_REG, 0x80000000);
  1176. reg_write32(hcd->regs, HC_INT_PTD_LASTPTD_REG, 0x80000000);
  1177. reg_write32(hcd->regs, HC_ISO_PTD_LASTPTD_REG, 0x00000001);
  1178. reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, 0xffffffff);
  1179. reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, 0xffffffff);
  1180. reg_write32(hcd->regs, HC_ISO_PTD_SKIPMAP_REG, 0xffffffff);
  1181. reg_write32(hcd->regs, HC_BUFFER_STATUS_REG,
  1182. ATL_BUF_FILL | INT_BUF_FILL);
  1183. /* GRR this is run-once init(), being done every time the HC starts.
  1184. * So long as they're part of class devices, we can't do it init()
  1185. * since the class device isn't created that early.
  1186. */
  1187. return 0;
  1188. }
  1189. static int qtd_fill(struct isp1760_qtd *qtd, void *databuffer, size_t len)
  1190. {
  1191. qtd->data_buffer = databuffer;
  1192. if (len > MAX_PAYLOAD_SIZE)
  1193. len = MAX_PAYLOAD_SIZE;
  1194. qtd->length = len;
  1195. return qtd->length;
  1196. }
  1197. static void qtd_list_free(struct list_head *qtd_list)
  1198. {
  1199. struct isp1760_qtd *qtd, *qtd_next;
  1200. list_for_each_entry_safe(qtd, qtd_next, qtd_list, qtd_list) {
  1201. list_del(&qtd->qtd_list);
  1202. qtd_free(qtd);
  1203. }
  1204. }
  1205. /*
  1206. * Packetize urb->transfer_buffer into list of packets of size wMaxPacketSize.
  1207. * Also calculate the PID type (SETUP/IN/OUT) for each packet.
  1208. */
  1209. #define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
  1210. static void packetize_urb(struct usb_hcd *hcd,
  1211. struct urb *urb, struct list_head *head, gfp_t flags)
  1212. {
  1213. struct isp1760_qtd *qtd;
  1214. void *buf;
  1215. int len, maxpacketsize;
  1216. u8 packet_type;
  1217. /*
  1218. * URBs map to sequences of QTDs: one logical transaction
  1219. */
  1220. if (!urb->transfer_buffer && urb->transfer_buffer_length) {
  1221. /* XXX This looks like usb storage / SCSI bug */
  1222. dev_err(hcd->self.controller,
  1223. "buf is null, dma is %08lx len is %d\n",
  1224. (long unsigned)urb->transfer_dma,
  1225. urb->transfer_buffer_length);
  1226. WARN_ON(1);
  1227. }
  1228. if (usb_pipein(urb->pipe))
  1229. packet_type = IN_PID;
  1230. else
  1231. packet_type = OUT_PID;
  1232. if (usb_pipecontrol(urb->pipe)) {
  1233. qtd = qtd_alloc(flags, urb, SETUP_PID);
  1234. if (!qtd)
  1235. goto cleanup;
  1236. qtd_fill(qtd, urb->setup_packet, sizeof(struct usb_ctrlrequest));
  1237. list_add_tail(&qtd->qtd_list, head);
  1238. /* for zero length DATA stages, STATUS is always IN */
  1239. if (urb->transfer_buffer_length == 0)
  1240. packet_type = IN_PID;
  1241. }
  1242. maxpacketsize = max_packet(usb_maxpacket(urb->dev, urb->pipe,
  1243. usb_pipeout(urb->pipe)));
  1244. /*
  1245. * buffer gets wrapped in one or more qtds;
  1246. * last one may be "short" (including zero len)
  1247. * and may serve as a control status ack
  1248. */
  1249. buf = urb->transfer_buffer;
  1250. len = urb->transfer_buffer_length;
  1251. for (;;) {
  1252. int this_qtd_len;
  1253. qtd = qtd_alloc(flags, urb, packet_type);
  1254. if (!qtd)
  1255. goto cleanup;
  1256. this_qtd_len = qtd_fill(qtd, buf, len);
  1257. list_add_tail(&qtd->qtd_list, head);
  1258. len -= this_qtd_len;
  1259. buf += this_qtd_len;
  1260. if (len <= 0)
  1261. break;
  1262. }
  1263. /*
  1264. * control requests may need a terminating data "status" ack;
  1265. * bulk ones may need a terminating short packet (zero length).
  1266. */
  1267. if (urb->transfer_buffer_length != 0) {
  1268. int one_more = 0;
  1269. if (usb_pipecontrol(urb->pipe)) {
  1270. one_more = 1;
  1271. if (packet_type == IN_PID)
  1272. packet_type = OUT_PID;
  1273. else
  1274. packet_type = IN_PID;
  1275. } else if (usb_pipebulk(urb->pipe)
  1276. && (urb->transfer_flags & URB_ZERO_PACKET)
  1277. && !(urb->transfer_buffer_length %
  1278. maxpacketsize)) {
  1279. one_more = 1;
  1280. }
  1281. if (one_more) {
  1282. qtd = qtd_alloc(flags, urb, packet_type);
  1283. if (!qtd)
  1284. goto cleanup;
  1285. /* never any data in such packets */
  1286. qtd_fill(qtd, NULL, 0);
  1287. list_add_tail(&qtd->qtd_list, head);
  1288. }
  1289. }
  1290. return;
  1291. cleanup:
  1292. qtd_list_free(head);
  1293. }
  1294. static int isp1760_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
  1295. gfp_t mem_flags)
  1296. {
  1297. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  1298. struct list_head *ep_queue;
  1299. struct isp1760_qh *qh, *qhit;
  1300. unsigned long spinflags;
  1301. LIST_HEAD(new_qtds);
  1302. int retval;
  1303. int qh_in_queue;
  1304. switch (usb_pipetype(urb->pipe)) {
  1305. case PIPE_CONTROL:
  1306. ep_queue = &priv->controlqhs;
  1307. break;
  1308. case PIPE_BULK:
  1309. ep_queue = &priv->bulkqhs;
  1310. break;
  1311. case PIPE_INTERRUPT:
  1312. if (urb->interval < 0)
  1313. return -EINVAL;
  1314. /* FIXME: Check bandwidth */
  1315. ep_queue = &priv->interruptqhs;
  1316. break;
  1317. case PIPE_ISOCHRONOUS:
  1318. dev_err(hcd->self.controller, "%s: isochronous USB packets "
  1319. "not yet supported\n",
  1320. __func__);
  1321. return -EPIPE;
  1322. default:
  1323. dev_err(hcd->self.controller, "%s: unknown pipe type\n",
  1324. __func__);
  1325. return -EPIPE;
  1326. }
  1327. if (usb_pipein(urb->pipe))
  1328. urb->actual_length = 0;
  1329. packetize_urb(hcd, urb, &new_qtds, mem_flags);
  1330. if (list_empty(&new_qtds))
  1331. return -ENOMEM;
  1332. retval = 0;
  1333. spin_lock_irqsave(&priv->lock, spinflags);
  1334. if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
  1335. retval = -ESHUTDOWN;
  1336. goto out;
  1337. }
  1338. retval = usb_hcd_link_urb_to_ep(hcd, urb);
  1339. if (retval)
  1340. goto out;
  1341. qh = urb->ep->hcpriv;
  1342. if (qh) {
  1343. qh_in_queue = 0;
  1344. list_for_each_entry(qhit, ep_queue, qh_list) {
  1345. if (qhit == qh) {
  1346. qh_in_queue = 1;
  1347. break;
  1348. }
  1349. }
  1350. if (!qh_in_queue)
  1351. list_add_tail(&qh->qh_list, ep_queue);
  1352. } else {
  1353. qh = qh_alloc(GFP_ATOMIC);
  1354. if (!qh) {
  1355. retval = -ENOMEM;
  1356. usb_hcd_unlink_urb_from_ep(hcd, urb);
  1357. goto out;
  1358. }
  1359. list_add_tail(&qh->qh_list, ep_queue);
  1360. urb->ep->hcpriv = qh;
  1361. }
  1362. list_splice_tail(&new_qtds, &qh->qtd_list);
  1363. schedule_ptds(hcd);
  1364. out:
  1365. spin_unlock_irqrestore(&priv->lock, spinflags);
  1366. return retval;
  1367. }
  1368. static void kill_transfer(struct usb_hcd *hcd, struct urb *urb,
  1369. struct isp1760_qh *qh)
  1370. {
  1371. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  1372. int skip_map;
  1373. WARN_ON(qh->slot == -1);
  1374. /* We need to forcefully reclaim the slot since some transfers never
  1375. return, e.g. interrupt transfers and NAKed bulk transfers. */
  1376. if (usb_pipecontrol(urb->pipe) || usb_pipebulk(urb->pipe)) {
  1377. skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
  1378. skip_map |= (1 << qh->slot);
  1379. reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, skip_map);
  1380. priv->atl_slots[qh->slot].qh = NULL;
  1381. priv->atl_slots[qh->slot].qtd = NULL;
  1382. } else {
  1383. skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG);
  1384. skip_map |= (1 << qh->slot);
  1385. reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, skip_map);
  1386. priv->int_slots[qh->slot].qh = NULL;
  1387. priv->int_slots[qh->slot].qtd = NULL;
  1388. }
  1389. qh->slot = -1;
  1390. }
  1391. /*
  1392. * Retire the qtds beginning at 'qtd' and belonging all to the same urb, killing
  1393. * any active transfer belonging to the urb in the process.
  1394. */
  1395. static void dequeue_urb_from_qtd(struct usb_hcd *hcd, struct isp1760_qh *qh,
  1396. struct isp1760_qtd *qtd)
  1397. {
  1398. struct urb *urb;
  1399. int urb_was_running;
  1400. urb = qtd->urb;
  1401. urb_was_running = 0;
  1402. list_for_each_entry_from(qtd, &qh->qtd_list, qtd_list) {
  1403. if (qtd->urb != urb)
  1404. break;
  1405. if (qtd->status >= QTD_XFER_STARTED)
  1406. urb_was_running = 1;
  1407. if (last_qtd_of_urb(qtd, qh) &&
  1408. (qtd->status >= QTD_XFER_COMPLETE))
  1409. urb_was_running = 0;
  1410. if (qtd->status == QTD_XFER_STARTED)
  1411. kill_transfer(hcd, urb, qh);
  1412. qtd->status = QTD_RETIRE;
  1413. }
  1414. if ((urb->dev->speed != USB_SPEED_HIGH) && urb_was_running) {
  1415. qh->tt_buffer_dirty = 1;
  1416. if (usb_hub_clear_tt_buffer(urb))
  1417. /* Clear failed; let's hope things work anyway */
  1418. qh->tt_buffer_dirty = 0;
  1419. }
  1420. }
  1421. static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
  1422. int status)
  1423. {
  1424. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  1425. unsigned long spinflags;
  1426. struct isp1760_qh *qh;
  1427. struct isp1760_qtd *qtd;
  1428. int retval = 0;
  1429. spin_lock_irqsave(&priv->lock, spinflags);
  1430. retval = usb_hcd_check_unlink_urb(hcd, urb, status);
  1431. if (retval)
  1432. goto out;
  1433. qh = urb->ep->hcpriv;
  1434. if (!qh) {
  1435. retval = -EINVAL;
  1436. goto out;
  1437. }
  1438. list_for_each_entry(qtd, &qh->qtd_list, qtd_list)
  1439. if (qtd->urb == urb) {
  1440. dequeue_urb_from_qtd(hcd, qh, qtd);
  1441. break;
  1442. }
  1443. urb->status = status;
  1444. schedule_ptds(hcd);
  1445. out:
  1446. spin_unlock_irqrestore(&priv->lock, spinflags);
  1447. return retval;
  1448. }
  1449. static void isp1760_endpoint_disable(struct usb_hcd *hcd,
  1450. struct usb_host_endpoint *ep)
  1451. {
  1452. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  1453. unsigned long spinflags;
  1454. struct isp1760_qh *qh;
  1455. struct isp1760_qtd *qtd;
  1456. spin_lock_irqsave(&priv->lock, spinflags);
  1457. qh = ep->hcpriv;
  1458. if (!qh)
  1459. goto out;
  1460. list_for_each_entry(qtd, &qh->qtd_list, qtd_list)
  1461. if (qtd->status != QTD_RETIRE) {
  1462. dequeue_urb_from_qtd(hcd, qh, qtd);
  1463. qtd->urb->status = -ECONNRESET;
  1464. }
  1465. ep->hcpriv = NULL;
  1466. /* Cannot free qh here since it will be parsed by schedule_ptds() */
  1467. schedule_ptds(hcd);
  1468. out:
  1469. spin_unlock_irqrestore(&priv->lock, spinflags);
  1470. }
  1471. static int isp1760_hub_status_data(struct usb_hcd *hcd, char *buf)
  1472. {
  1473. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  1474. u32 temp, status = 0;
  1475. u32 mask;
  1476. int retval = 1;
  1477. unsigned long flags;
  1478. /* if !USB_SUSPEND, root hub timers won't get shut down ... */
  1479. if (!HC_IS_RUNNING(hcd->state))
  1480. return 0;
  1481. /* init status to no-changes */
  1482. buf[0] = 0;
  1483. mask = PORT_CSC;
  1484. spin_lock_irqsave(&priv->lock, flags);
  1485. temp = reg_read32(hcd->regs, HC_PORTSC1);
  1486. if (temp & PORT_OWNER) {
  1487. if (temp & PORT_CSC) {
  1488. temp &= ~PORT_CSC;
  1489. reg_write32(hcd->regs, HC_PORTSC1, temp);
  1490. goto done;
  1491. }
  1492. }
  1493. /*
  1494. * Return status information even for ports with OWNER set.
  1495. * Otherwise khubd wouldn't see the disconnect event when a
  1496. * high-speed device is switched over to the companion
  1497. * controller by the user.
  1498. */
  1499. if ((temp & mask) != 0
  1500. || ((temp & PORT_RESUME) != 0
  1501. && time_after_eq(jiffies,
  1502. priv->reset_done))) {
  1503. buf [0] |= 1 << (0 + 1);
  1504. status = STS_PCD;
  1505. }
  1506. /* FIXME autosuspend idle root hubs */
  1507. done:
  1508. spin_unlock_irqrestore(&priv->lock, flags);
  1509. return status ? retval : 0;
  1510. }
  1511. static void isp1760_hub_descriptor(struct isp1760_hcd *priv,
  1512. struct usb_hub_descriptor *desc)
  1513. {
  1514. int ports = HCS_N_PORTS(priv->hcs_params);
  1515. u16 temp;
  1516. desc->bDescriptorType = 0x29;
  1517. /* priv 1.0, 2.3.9 says 20ms max */
  1518. desc->bPwrOn2PwrGood = 10;
  1519. desc->bHubContrCurrent = 0;
  1520. desc->bNbrPorts = ports;
  1521. temp = 1 + (ports / 8);
  1522. desc->bDescLength = 7 + 2 * temp;
  1523. /* ports removable, and usb 1.0 legacy PortPwrCtrlMask */
  1524. memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
  1525. memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
  1526. /* per-port overcurrent reporting */
  1527. temp = 0x0008;
  1528. if (HCS_PPC(priv->hcs_params))
  1529. /* per-port power control */
  1530. temp |= 0x0001;
  1531. else
  1532. /* no power switching */
  1533. temp |= 0x0002;
  1534. desc->wHubCharacteristics = cpu_to_le16(temp);
  1535. }
  1536. #define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
  1537. static int check_reset_complete(struct usb_hcd *hcd, int index,
  1538. int port_status)
  1539. {
  1540. if (!(port_status & PORT_CONNECT))
  1541. return port_status;
  1542. /* if reset finished and it's still not enabled -- handoff */
  1543. if (!(port_status & PORT_PE)) {
  1544. dev_info(hcd->self.controller,
  1545. "port %d full speed --> companion\n",
  1546. index + 1);
  1547. port_status |= PORT_OWNER;
  1548. port_status &= ~PORT_RWC_BITS;
  1549. reg_write32(hcd->regs, HC_PORTSC1, port_status);
  1550. } else
  1551. dev_info(hcd->self.controller, "port %d high speed\n",
  1552. index + 1);
  1553. return port_status;
  1554. }
  1555. static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
  1556. u16 wValue, u16 wIndex, char *buf, u16 wLength)
  1557. {
  1558. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  1559. int ports = HCS_N_PORTS(priv->hcs_params);
  1560. u32 temp, status;
  1561. unsigned long flags;
  1562. int retval = 0;
  1563. unsigned selector;
  1564. /*
  1565. * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
  1566. * HCS_INDICATOR may say we can change LEDs to off/amber/green.
  1567. * (track current state ourselves) ... blink for diagnostics,
  1568. * power, "this is the one", etc. EHCI spec supports this.
  1569. */
  1570. spin_lock_irqsave(&priv->lock, flags);
  1571. switch (typeReq) {
  1572. case ClearHubFeature:
  1573. switch (wValue) {
  1574. case C_HUB_LOCAL_POWER:
  1575. case C_HUB_OVER_CURRENT:
  1576. /* no hub-wide feature/status flags */
  1577. break;
  1578. default:
  1579. goto error;
  1580. }
  1581. break;
  1582. case ClearPortFeature:
  1583. if (!wIndex || wIndex > ports)
  1584. goto error;
  1585. wIndex--;
  1586. temp = reg_read32(hcd->regs, HC_PORTSC1);
  1587. /*
  1588. * Even if OWNER is set, so the port is owned by the
  1589. * companion controller, khubd needs to be able to clear
  1590. * the port-change status bits (especially
  1591. * USB_PORT_STAT_C_CONNECTION).
  1592. */
  1593. switch (wValue) {
  1594. case USB_PORT_FEAT_ENABLE:
  1595. reg_write32(hcd->regs, HC_PORTSC1, temp & ~PORT_PE);
  1596. break;
  1597. case USB_PORT_FEAT_C_ENABLE:
  1598. /* XXX error? */
  1599. break;
  1600. case USB_PORT_FEAT_SUSPEND:
  1601. if (temp & PORT_RESET)
  1602. goto error;
  1603. if (temp & PORT_SUSPEND) {
  1604. if ((temp & PORT_PE) == 0)
  1605. goto error;
  1606. /* resume signaling for 20 msec */
  1607. temp &= ~(PORT_RWC_BITS);
  1608. reg_write32(hcd->regs, HC_PORTSC1,
  1609. temp | PORT_RESUME);
  1610. priv->reset_done = jiffies +
  1611. msecs_to_jiffies(20);
  1612. }
  1613. break;
  1614. case USB_PORT_FEAT_C_SUSPEND:
  1615. /* we auto-clear this feature */
  1616. break;
  1617. case USB_PORT_FEAT_POWER:
  1618. if (HCS_PPC(priv->hcs_params))
  1619. reg_write32(hcd->regs, HC_PORTSC1,
  1620. temp & ~PORT_POWER);
  1621. break;
  1622. case USB_PORT_FEAT_C_CONNECTION:
  1623. reg_write32(hcd->regs, HC_PORTSC1, temp | PORT_CSC);
  1624. break;
  1625. case USB_PORT_FEAT_C_OVER_CURRENT:
  1626. /* XXX error ?*/
  1627. break;
  1628. case USB_PORT_FEAT_C_RESET:
  1629. /* GetPortStatus clears reset */
  1630. break;
  1631. default:
  1632. goto error;
  1633. }
  1634. reg_read32(hcd->regs, HC_USBCMD);
  1635. break;
  1636. case GetHubDescriptor:
  1637. isp1760_hub_descriptor(priv, (struct usb_hub_descriptor *)
  1638. buf);
  1639. break;
  1640. case GetHubStatus:
  1641. /* no hub-wide feature/status flags */
  1642. memset(buf, 0, 4);
  1643. break;
  1644. case GetPortStatus:
  1645. if (!wIndex || wIndex > ports)
  1646. goto error;
  1647. wIndex--;
  1648. status = 0;
  1649. temp = reg_read32(hcd->regs, HC_PORTSC1);
  1650. /* wPortChange bits */
  1651. if (temp & PORT_CSC)
  1652. status |= USB_PORT_STAT_C_CONNECTION << 16;
  1653. /* whoever resumes must GetPortStatus to complete it!! */
  1654. if (temp & PORT_RESUME) {
  1655. dev_err(hcd->self.controller, "Port resume should be skipped.\n");
  1656. /* Remote Wakeup received? */
  1657. if (!priv->reset_done) {
  1658. /* resume signaling for 20 msec */
  1659. priv->reset_done = jiffies
  1660. + msecs_to_jiffies(20);
  1661. /* check the port again */
  1662. mod_timer(&hcd->rh_timer, priv->reset_done);
  1663. }
  1664. /* resume completed? */
  1665. else if (time_after_eq(jiffies,
  1666. priv->reset_done)) {
  1667. status |= USB_PORT_STAT_C_SUSPEND << 16;
  1668. priv->reset_done = 0;
  1669. /* stop resume signaling */
  1670. temp = reg_read32(hcd->regs, HC_PORTSC1);
  1671. reg_write32(hcd->regs, HC_PORTSC1,
  1672. temp & ~(PORT_RWC_BITS | PORT_RESUME));
  1673. retval = handshake(hcd, HC_PORTSC1,
  1674. PORT_RESUME, 0, 2000 /* 2msec */);
  1675. if (retval != 0) {
  1676. dev_err(hcd->self.controller,
  1677. "port %d resume error %d\n",
  1678. wIndex + 1, retval);
  1679. goto error;
  1680. }
  1681. temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
  1682. }
  1683. }
  1684. /* whoever resets must GetPortStatus to complete it!! */
  1685. if ((temp & PORT_RESET)
  1686. && time_after_eq(jiffies,
  1687. priv->reset_done)) {
  1688. status |= USB_PORT_STAT_C_RESET << 16;
  1689. priv->reset_done = 0;
  1690. /* force reset to complete */
  1691. reg_write32(hcd->regs, HC_PORTSC1, temp & ~PORT_RESET);
  1692. /* REVISIT: some hardware needs 550+ usec to clear
  1693. * this bit; seems too long to spin routinely...
  1694. */
  1695. retval = handshake(hcd, HC_PORTSC1,
  1696. PORT_RESET, 0, 750);
  1697. if (retval != 0) {
  1698. dev_err(hcd->self.controller, "port %d reset error %d\n",
  1699. wIndex + 1, retval);
  1700. goto error;
  1701. }
  1702. /* see what we found out */
  1703. temp = check_reset_complete(hcd, wIndex,
  1704. reg_read32(hcd->regs, HC_PORTSC1));
  1705. }
  1706. /*
  1707. * Even if OWNER is set, there's no harm letting khubd
  1708. * see the wPortStatus values (they should all be 0 except
  1709. * for PORT_POWER anyway).
  1710. */
  1711. if (temp & PORT_OWNER)
  1712. dev_err(hcd->self.controller, "PORT_OWNER is set\n");
  1713. if (temp & PORT_CONNECT) {
  1714. status |= USB_PORT_STAT_CONNECTION;
  1715. /* status may be from integrated TT */
  1716. status |= USB_PORT_STAT_HIGH_SPEED;
  1717. }
  1718. if (temp & PORT_PE)
  1719. status |= USB_PORT_STAT_ENABLE;
  1720. if (temp & (PORT_SUSPEND|PORT_RESUME))
  1721. status |= USB_PORT_STAT_SUSPEND;
  1722. if (temp & PORT_RESET)
  1723. status |= USB_PORT_STAT_RESET;
  1724. if (temp & PORT_POWER)
  1725. status |= USB_PORT_STAT_POWER;
  1726. put_unaligned(cpu_to_le32(status), (__le32 *) buf);
  1727. break;
  1728. case SetHubFeature:
  1729. switch (wValue) {
  1730. case C_HUB_LOCAL_POWER:
  1731. case C_HUB_OVER_CURRENT:
  1732. /* no hub-wide feature/status flags */
  1733. break;
  1734. default:
  1735. goto error;
  1736. }
  1737. break;
  1738. case SetPortFeature:
  1739. selector = wIndex >> 8;
  1740. wIndex &= 0xff;
  1741. if (!wIndex || wIndex > ports)
  1742. goto error;
  1743. wIndex--;
  1744. temp = reg_read32(hcd->regs, HC_PORTSC1);
  1745. if (temp & PORT_OWNER)
  1746. break;
  1747. /* temp &= ~PORT_RWC_BITS; */
  1748. switch (wValue) {
  1749. case USB_PORT_FEAT_ENABLE:
  1750. reg_write32(hcd->regs, HC_PORTSC1, temp | PORT_PE);
  1751. break;
  1752. case USB_PORT_FEAT_SUSPEND:
  1753. if ((temp & PORT_PE) == 0
  1754. || (temp & PORT_RESET) != 0)
  1755. goto error;
  1756. reg_write32(hcd->regs, HC_PORTSC1, temp | PORT_SUSPEND);
  1757. break;
  1758. case USB_PORT_FEAT_POWER:
  1759. if (HCS_PPC(priv->hcs_params))
  1760. reg_write32(hcd->regs, HC_PORTSC1,
  1761. temp | PORT_POWER);
  1762. break;
  1763. case USB_PORT_FEAT_RESET:
  1764. if (temp & PORT_RESUME)
  1765. goto error;
  1766. /* line status bits may report this as low speed,
  1767. * which can be fine if this root hub has a
  1768. * transaction translator built in.
  1769. */
  1770. if ((temp & (PORT_PE|PORT_CONNECT)) == PORT_CONNECT
  1771. && PORT_USB11(temp)) {
  1772. temp |= PORT_OWNER;
  1773. } else {
  1774. temp |= PORT_RESET;
  1775. temp &= ~PORT_PE;
  1776. /*
  1777. * caller must wait, then call GetPortStatus
  1778. * usb 2.0 spec says 50 ms resets on root
  1779. */
  1780. priv->reset_done = jiffies +
  1781. msecs_to_jiffies(50);
  1782. }
  1783. reg_write32(hcd->regs, HC_PORTSC1, temp);
  1784. break;
  1785. default:
  1786. goto error;
  1787. }
  1788. reg_read32(hcd->regs, HC_USBCMD);
  1789. break;
  1790. default:
  1791. error:
  1792. /* "stall" on error */
  1793. retval = -EPIPE;
  1794. }
  1795. spin_unlock_irqrestore(&priv->lock, flags);
  1796. return retval;
  1797. }
  1798. static int isp1760_get_frame(struct usb_hcd *hcd)
  1799. {
  1800. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  1801. u32 fr;
  1802. fr = reg_read32(hcd->regs, HC_FRINDEX);
  1803. return (fr >> 3) % priv->periodic_size;
  1804. }
  1805. static void isp1760_stop(struct usb_hcd *hcd)
  1806. {
  1807. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  1808. u32 temp;
  1809. del_timer(&errata2_timer);
  1810. isp1760_hub_control(hcd, ClearPortFeature, USB_PORT_FEAT_POWER, 1,
  1811. NULL, 0);
  1812. mdelay(20);
  1813. spin_lock_irq(&priv->lock);
  1814. ehci_reset(hcd);
  1815. /* Disable IRQ */
  1816. temp = reg_read32(hcd->regs, HC_HW_MODE_CTRL);
  1817. reg_write32(hcd->regs, HC_HW_MODE_CTRL, temp &= ~HW_GLOBAL_INTR_EN);
  1818. spin_unlock_irq(&priv->lock);
  1819. reg_write32(hcd->regs, HC_CONFIGFLAG, 0);
  1820. }
  1821. static void isp1760_shutdown(struct usb_hcd *hcd)
  1822. {
  1823. u32 command, temp;
  1824. isp1760_stop(hcd);
  1825. temp = reg_read32(hcd->regs, HC_HW_MODE_CTRL);
  1826. reg_write32(hcd->regs, HC_HW_MODE_CTRL, temp &= ~HW_GLOBAL_INTR_EN);
  1827. command = reg_read32(hcd->regs, HC_USBCMD);
  1828. command &= ~CMD_RUN;
  1829. reg_write32(hcd->regs, HC_USBCMD, command);
  1830. }
  1831. static void isp1760_clear_tt_buffer_complete(struct usb_hcd *hcd,
  1832. struct usb_host_endpoint *ep)
  1833. {
  1834. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  1835. struct isp1760_qh *qh = ep->hcpriv;
  1836. unsigned long spinflags;
  1837. if (!qh)
  1838. return;
  1839. spin_lock_irqsave(&priv->lock, spinflags);
  1840. qh->tt_buffer_dirty = 0;
  1841. schedule_ptds(hcd);
  1842. spin_unlock_irqrestore(&priv->lock, spinflags);
  1843. }
  1844. static const struct hc_driver isp1760_hc_driver = {
  1845. .description = "isp1760-hcd",
  1846. .product_desc = "NXP ISP1760 USB Host Controller",
  1847. .hcd_priv_size = sizeof(struct isp1760_hcd),
  1848. .irq = isp1760_irq,
  1849. .flags = HCD_MEMORY | HCD_USB2,
  1850. .reset = isp1760_hc_setup,
  1851. .start = isp1760_run,
  1852. .stop = isp1760_stop,
  1853. .shutdown = isp1760_shutdown,
  1854. .urb_enqueue = isp1760_urb_enqueue,
  1855. .urb_dequeue = isp1760_urb_dequeue,
  1856. .endpoint_disable = isp1760_endpoint_disable,
  1857. .get_frame_number = isp1760_get_frame,
  1858. .hub_status_data = isp1760_hub_status_data,
  1859. .hub_control = isp1760_hub_control,
  1860. .clear_tt_buffer_complete = isp1760_clear_tt_buffer_complete,
  1861. };
  1862. int __init init_kmem_once(void)
  1863. {
  1864. urb_listitem_cachep = kmem_cache_create("isp1760 urb_listitem",
  1865. sizeof(struct urb_listitem), 0, SLAB_TEMPORARY |
  1866. SLAB_MEM_SPREAD, NULL);
  1867. if (!urb_listitem_cachep)
  1868. return -ENOMEM;
  1869. qtd_cachep = kmem_cache_create("isp1760_qtd",
  1870. sizeof(struct isp1760_qtd), 0, SLAB_TEMPORARY |
  1871. SLAB_MEM_SPREAD, NULL);
  1872. if (!qtd_cachep)
  1873. return -ENOMEM;
  1874. qh_cachep = kmem_cache_create("isp1760_qh", sizeof(struct isp1760_qh),
  1875. 0, SLAB_TEMPORARY | SLAB_MEM_SPREAD, NULL);
  1876. if (!qh_cachep) {
  1877. kmem_cache_destroy(qtd_cachep);
  1878. return -ENOMEM;
  1879. }
  1880. return 0;
  1881. }
  1882. void deinit_kmem_cache(void)
  1883. {
  1884. kmem_cache_destroy(qtd_cachep);
  1885. kmem_cache_destroy(qh_cachep);
  1886. kmem_cache_destroy(urb_listitem_cachep);
  1887. }
  1888. struct usb_hcd *isp1760_register(phys_addr_t res_start, resource_size_t res_len,
  1889. int irq, unsigned long irqflags,
  1890. struct device *dev, const char *busname,
  1891. unsigned int devflags)
  1892. {
  1893. struct usb_hcd *hcd;
  1894. struct isp1760_hcd *priv;
  1895. int ret;
  1896. if (usb_disabled())
  1897. return ERR_PTR(-ENODEV);
  1898. /* prevent usb-core allocating DMA pages */
  1899. dev->dma_mask = NULL;
  1900. hcd = usb_create_hcd(&isp1760_hc_driver, dev, dev_name(dev));
  1901. if (!hcd)
  1902. return ERR_PTR(-ENOMEM);
  1903. priv = hcd_to_priv(hcd);
  1904. priv->devflags = devflags;
  1905. init_memory(priv);
  1906. hcd->regs = ioremap(res_start, res_len);
  1907. if (!hcd->regs) {
  1908. ret = -EIO;
  1909. goto err_put;
  1910. }
  1911. hcd->irq = irq;
  1912. hcd->rsrc_start = res_start;
  1913. hcd->rsrc_len = res_len;
  1914. ret = usb_add_hcd(hcd, irq, irqflags);
  1915. if (ret)
  1916. goto err_unmap;
  1917. return hcd;
  1918. err_unmap:
  1919. iounmap(hcd->regs);
  1920. err_put:
  1921. usb_put_hcd(hcd);
  1922. return ERR_PTR(ret);
  1923. }
  1924. MODULE_DESCRIPTION("Driver for the ISP1760 USB-controller from NXP");
  1925. MODULE_AUTHOR("Sebastian Siewior <bigeasy@linuxtronix.de>");
  1926. MODULE_LICENSE("GPL v2");