commproc.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650
  1. /*
  2. * General Purpose functions for the global management of the
  3. * Communication Processor Module.
  4. * Copyright (c) 1997 Dan error_act (dmalek@jlc.net)
  5. *
  6. * In addition to the individual control of the communication
  7. * channels, there are a few functions that globally affect the
  8. * communication processor.
  9. *
  10. * Buffer descriptors must be allocated from the dual ported memory
  11. * space. The allocator for that is here. When the communication
  12. * process is reset, we reclaim the memory available. There is
  13. * currently no deallocator for this memory.
  14. * The amount of space available is platform dependent. On the
  15. * MBX, the EPPC software loads additional microcode into the
  16. * communication processor, and uses some of the DP ram for this
  17. * purpose. Current, the first 512 bytes and the last 256 bytes of
  18. * memory are used. Right now I am conservative and only use the
  19. * memory that can never be used for microcode. If there are
  20. * applications that require more DP ram, we can expand the boundaries
  21. * but then we have to be careful of any downloaded microcode.
  22. */
  23. #include <linux/errno.h>
  24. #include <linux/sched.h>
  25. #include <linux/kernel.h>
  26. #include <linux/dma-mapping.h>
  27. #include <linux/param.h>
  28. #include <linux/string.h>
  29. #include <linux/mm.h>
  30. #include <linux/interrupt.h>
  31. #include <linux/irq.h>
  32. #include <linux/module.h>
  33. #include <asm/mpc8xx.h>
  34. #include <asm/page.h>
  35. #include <asm/pgtable.h>
  36. #include <asm/8xx_immap.h>
  37. #include <asm/commproc.h>
  38. #include <asm/io.h>
  39. #include <asm/tlbflush.h>
  40. #include <asm/rheap.h>
  41. #include <asm/prom.h>
  42. #include <asm/cpm.h>
  43. #include <asm/fs_pd.h>
  44. #define CPM_MAP_SIZE (0x4000)
  45. #ifndef CONFIG_PPC_CPM_NEW_BINDING
  46. static void m8xx_cpm_dpinit(void);
  47. #endif
  48. static uint host_buffer; /* One page of host buffer */
  49. static uint host_end; /* end + 1 */
  50. cpm8xx_t __iomem *cpmp; /* Pointer to comm processor space */
  51. immap_t __iomem *mpc8xx_immr;
  52. static cpic8xx_t __iomem *cpic_reg;
  53. static struct irq_host *cpm_pic_host;
  54. static void cpm_mask_irq(unsigned int irq)
  55. {
  56. unsigned int cpm_vec = (unsigned int)irq_map[irq].hwirq;
  57. clrbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec));
  58. }
  59. static void cpm_unmask_irq(unsigned int irq)
  60. {
  61. unsigned int cpm_vec = (unsigned int)irq_map[irq].hwirq;
  62. setbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec));
  63. }
  64. static void cpm_end_irq(unsigned int irq)
  65. {
  66. unsigned int cpm_vec = (unsigned int)irq_map[irq].hwirq;
  67. out_be32(&cpic_reg->cpic_cisr, (1 << cpm_vec));
  68. }
  69. static struct irq_chip cpm_pic = {
  70. .typename = " CPM PIC ",
  71. .mask = cpm_mask_irq,
  72. .unmask = cpm_unmask_irq,
  73. .eoi = cpm_end_irq,
  74. };
  75. int cpm_get_irq(void)
  76. {
  77. int cpm_vec;
  78. /* Get the vector by setting the ACK bit and then reading
  79. * the register.
  80. */
  81. out_be16(&cpic_reg->cpic_civr, 1);
  82. cpm_vec = in_be16(&cpic_reg->cpic_civr);
  83. cpm_vec >>= 11;
  84. return irq_linear_revmap(cpm_pic_host, cpm_vec);
  85. }
  86. static int cpm_pic_host_map(struct irq_host *h, unsigned int virq,
  87. irq_hw_number_t hw)
  88. {
  89. pr_debug("cpm_pic_host_map(%d, 0x%lx)\n", virq, hw);
  90. get_irq_desc(virq)->status |= IRQ_LEVEL;
  91. set_irq_chip_and_handler(virq, &cpm_pic, handle_fasteoi_irq);
  92. return 0;
  93. }
  94. /* The CPM can generate the error interrupt when there is a race condition
  95. * between generating and masking interrupts. All we have to do is ACK it
  96. * and return. This is a no-op function so we don't need any special
  97. * tests in the interrupt handler.
  98. */
  99. static irqreturn_t cpm_error_interrupt(int irq, void *dev)
  100. {
  101. return IRQ_HANDLED;
  102. }
  103. static struct irqaction cpm_error_irqaction = {
  104. .handler = cpm_error_interrupt,
  105. .mask = CPU_MASK_NONE,
  106. .name = "error",
  107. };
  108. static struct irq_host_ops cpm_pic_host_ops = {
  109. .map = cpm_pic_host_map,
  110. };
  111. unsigned int cpm_pic_init(void)
  112. {
  113. struct device_node *np = NULL;
  114. struct resource res;
  115. unsigned int sirq = NO_IRQ, hwirq, eirq;
  116. int ret;
  117. pr_debug("cpm_pic_init\n");
  118. np = of_find_compatible_node(NULL, NULL, "fsl,cpm1-pic");
  119. if (np == NULL)
  120. np = of_find_compatible_node(NULL, "cpm-pic", "CPM");
  121. if (np == NULL) {
  122. printk(KERN_ERR "CPM PIC init: can not find cpm-pic node\n");
  123. return sirq;
  124. }
  125. ret = of_address_to_resource(np, 0, &res);
  126. if (ret)
  127. goto end;
  128. cpic_reg = ioremap(res.start, res.end - res.start + 1);
  129. if (cpic_reg == NULL)
  130. goto end;
  131. sirq = irq_of_parse_and_map(np, 0);
  132. if (sirq == NO_IRQ)
  133. goto end;
  134. /* Initialize the CPM interrupt controller. */
  135. hwirq = (unsigned int)irq_map[sirq].hwirq;
  136. out_be32(&cpic_reg->cpic_cicr,
  137. (CICR_SCD_SCC4 | CICR_SCC_SCC3 | CICR_SCB_SCC2 | CICR_SCA_SCC1) |
  138. ((hwirq/2) << 13) | CICR_HP_MASK);
  139. out_be32(&cpic_reg->cpic_cimr, 0);
  140. cpm_pic_host = irq_alloc_host(of_node_get(np), IRQ_HOST_MAP_LINEAR,
  141. 64, &cpm_pic_host_ops, 64);
  142. if (cpm_pic_host == NULL) {
  143. printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n");
  144. sirq = NO_IRQ;
  145. goto end;
  146. }
  147. /* Install our own error handler. */
  148. np = of_find_compatible_node(NULL, NULL, "fsl,cpm1");
  149. if (np == NULL)
  150. np = of_find_node_by_type(NULL, "cpm");
  151. if (np == NULL) {
  152. printk(KERN_ERR "CPM PIC init: can not find cpm node\n");
  153. goto end;
  154. }
  155. eirq = irq_of_parse_and_map(np, 0);
  156. if (eirq == NO_IRQ)
  157. goto end;
  158. if (setup_irq(eirq, &cpm_error_irqaction))
  159. printk(KERN_ERR "Could not allocate CPM error IRQ!");
  160. setbits32(&cpic_reg->cpic_cicr, CICR_IEN);
  161. end:
  162. of_node_put(np);
  163. return sirq;
  164. }
  165. void __init cpm_reset(void)
  166. {
  167. sysconf8xx_t __iomem *siu_conf;
  168. mpc8xx_immr = ioremap(get_immrbase(), 0x4000);
  169. if (!mpc8xx_immr) {
  170. printk(KERN_CRIT "Could not map IMMR\n");
  171. return;
  172. }
  173. cpmp = &mpc8xx_immr->im_cpm;
  174. #ifndef CONFIG_PPC_EARLY_DEBUG_CPM
  175. /* Perform a reset.
  176. */
  177. out_be16(&cpmp->cp_cpcr, CPM_CR_RST | CPM_CR_FLG);
  178. /* Wait for it.
  179. */
  180. while (in_be16(&cpmp->cp_cpcr) & CPM_CR_FLG);
  181. #endif
  182. #ifdef CONFIG_UCODE_PATCH
  183. cpm_load_patch(cpmp);
  184. #endif
  185. /* Set SDMA Bus Request priority 5.
  186. * On 860T, this also enables FEC priority 6. I am not sure
  187. * this is what we realy want for some applications, but the
  188. * manual recommends it.
  189. * Bit 25, FAM can also be set to use FEC aggressive mode (860T).
  190. */
  191. siu_conf = immr_map(im_siu_conf);
  192. out_be32(&siu_conf->sc_sdcr, 1);
  193. immr_unmap(siu_conf);
  194. #ifdef CONFIG_PPC_CPM_NEW_BINDING
  195. cpm_muram_init();
  196. #else
  197. /* Reclaim the DP memory for our use. */
  198. m8xx_cpm_dpinit();
  199. #endif
  200. }
  201. static DEFINE_SPINLOCK(cmd_lock);
  202. #define MAX_CR_CMD_LOOPS 10000
  203. int cpm_command(u32 command, u8 opcode)
  204. {
  205. int i, ret;
  206. unsigned long flags;
  207. if (command & 0xffffff0f)
  208. return -EINVAL;
  209. spin_lock_irqsave(&cmd_lock, flags);
  210. ret = 0;
  211. out_be16(&cpmp->cp_cpcr, command | CPM_CR_FLG | (opcode << 8));
  212. for (i = 0; i < MAX_CR_CMD_LOOPS; i++)
  213. if ((in_be16(&cpmp->cp_cpcr) & CPM_CR_FLG) == 0)
  214. goto out;
  215. printk(KERN_ERR "%s(): Not able to issue CPM command\n", __FUNCTION__);
  216. ret = -EIO;
  217. out:
  218. spin_unlock_irqrestore(&cmd_lock, flags);
  219. return ret;
  220. }
  221. EXPORT_SYMBOL(cpm_command);
  222. /* We used to do this earlier, but have to postpone as long as possible
  223. * to ensure the kernel VM is now running.
  224. */
  225. static void
  226. alloc_host_memory(void)
  227. {
  228. dma_addr_t physaddr;
  229. /* Set the host page for allocation.
  230. */
  231. host_buffer = (uint)dma_alloc_coherent(NULL, PAGE_SIZE, &physaddr,
  232. GFP_KERNEL);
  233. host_end = host_buffer + PAGE_SIZE;
  234. }
  235. /* We also own one page of host buffer space for the allocation of
  236. * UART "fifos" and the like.
  237. */
  238. uint
  239. m8xx_cpm_hostalloc(uint size)
  240. {
  241. uint retloc;
  242. if (host_buffer == 0)
  243. alloc_host_memory();
  244. if ((host_buffer + size) >= host_end)
  245. return(0);
  246. retloc = host_buffer;
  247. host_buffer += size;
  248. return(retloc);
  249. }
  250. /* Set a baud rate generator. This needs lots of work. There are
  251. * four BRGs, any of which can be wired to any channel.
  252. * The internal baud rate clock is the system clock divided by 16.
  253. * This assumes the baudrate is 16x oversampled by the uart.
  254. */
  255. #define BRG_INT_CLK (get_brgfreq())
  256. #define BRG_UART_CLK (BRG_INT_CLK/16)
  257. #define BRG_UART_CLK_DIV16 (BRG_UART_CLK/16)
  258. void
  259. cpm_setbrg(uint brg, uint rate)
  260. {
  261. u32 __iomem *bp;
  262. /* This is good enough to get SMCs running.....
  263. */
  264. bp = &cpmp->cp_brgc1;
  265. bp += brg;
  266. /* The BRG has a 12-bit counter. For really slow baud rates (or
  267. * really fast processors), we may have to further divide by 16.
  268. */
  269. if (((BRG_UART_CLK / rate) - 1) < 4096)
  270. out_be32(bp, (((BRG_UART_CLK / rate) - 1) << 1) | CPM_BRG_EN);
  271. else
  272. out_be32(bp, (((BRG_UART_CLK_DIV16 / rate) - 1) << 1) |
  273. CPM_BRG_EN | CPM_BRG_DIV16);
  274. }
  275. #ifndef CONFIG_PPC_CPM_NEW_BINDING
  276. /*
  277. * dpalloc / dpfree bits.
  278. */
  279. static spinlock_t cpm_dpmem_lock;
  280. /*
  281. * 16 blocks should be enough to satisfy all requests
  282. * until the memory subsystem goes up...
  283. */
  284. static rh_block_t cpm_boot_dpmem_rh_block[16];
  285. static rh_info_t cpm_dpmem_info;
  286. #define CPM_DPMEM_ALIGNMENT 8
  287. static u8 __iomem *dpram_vbase;
  288. static phys_addr_t dpram_pbase;
  289. static void m8xx_cpm_dpinit(void)
  290. {
  291. spin_lock_init(&cpm_dpmem_lock);
  292. dpram_vbase = cpmp->cp_dpmem;
  293. dpram_pbase = get_immrbase() + offsetof(immap_t, im_cpm.cp_dpmem);
  294. /* Initialize the info header */
  295. rh_init(&cpm_dpmem_info, CPM_DPMEM_ALIGNMENT,
  296. sizeof(cpm_boot_dpmem_rh_block) /
  297. sizeof(cpm_boot_dpmem_rh_block[0]),
  298. cpm_boot_dpmem_rh_block);
  299. /*
  300. * Attach the usable dpmem area.
  301. * XXX: This is actually crap. CPM_DATAONLY_BASE and
  302. * CPM_DATAONLY_SIZE are a subset of the available dparm. It varies
  303. * with the processor and the microcode patches applied / activated.
  304. * But the following should be at least safe.
  305. */
  306. rh_attach_region(&cpm_dpmem_info, CPM_DATAONLY_BASE, CPM_DATAONLY_SIZE);
  307. }
  308. /*
  309. * Allocate the requested size worth of DP memory.
  310. * This function returns an offset into the DPRAM area.
  311. * Use cpm_dpram_addr() to get the virtual address of the area.
  312. */
  313. unsigned long cpm_dpalloc(uint size, uint align)
  314. {
  315. unsigned long start;
  316. unsigned long flags;
  317. spin_lock_irqsave(&cpm_dpmem_lock, flags);
  318. cpm_dpmem_info.alignment = align;
  319. start = rh_alloc(&cpm_dpmem_info, size, "commproc");
  320. spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
  321. return (uint)start;
  322. }
  323. EXPORT_SYMBOL(cpm_dpalloc);
  324. int cpm_dpfree(unsigned long offset)
  325. {
  326. int ret;
  327. unsigned long flags;
  328. spin_lock_irqsave(&cpm_dpmem_lock, flags);
  329. ret = rh_free(&cpm_dpmem_info, offset);
  330. spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
  331. return ret;
  332. }
  333. EXPORT_SYMBOL(cpm_dpfree);
  334. unsigned long cpm_dpalloc_fixed(unsigned long offset, uint size, uint align)
  335. {
  336. unsigned long start;
  337. unsigned long flags;
  338. spin_lock_irqsave(&cpm_dpmem_lock, flags);
  339. cpm_dpmem_info.alignment = align;
  340. start = rh_alloc_fixed(&cpm_dpmem_info, offset, size, "commproc");
  341. spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
  342. return start;
  343. }
  344. EXPORT_SYMBOL(cpm_dpalloc_fixed);
  345. void cpm_dpdump(void)
  346. {
  347. rh_dump(&cpm_dpmem_info);
  348. }
  349. EXPORT_SYMBOL(cpm_dpdump);
  350. void *cpm_dpram_addr(unsigned long offset)
  351. {
  352. return (void *)(dpram_vbase + offset);
  353. }
  354. EXPORT_SYMBOL(cpm_dpram_addr);
  355. uint cpm_dpram_phys(u8 *addr)
  356. {
  357. return (dpram_pbase + (uint)(addr - dpram_vbase));
  358. }
  359. EXPORT_SYMBOL(cpm_dpram_phys);
  360. #endif /* !CONFIG_PPC_CPM_NEW_BINDING */
  361. struct cpm_ioport16 {
  362. __be16 dir, par, odr_sor, dat, intr;
  363. __be16 res[3];
  364. };
  365. struct cpm_ioport32 {
  366. __be32 dir, par, sor;
  367. };
  368. static void cpm1_set_pin32(int port, int pin, int flags)
  369. {
  370. struct cpm_ioport32 __iomem *iop;
  371. pin = 1 << (31 - pin);
  372. if (port == CPM_PORTB)
  373. iop = (struct cpm_ioport32 __iomem *)
  374. &mpc8xx_immr->im_cpm.cp_pbdir;
  375. else
  376. iop = (struct cpm_ioport32 __iomem *)
  377. &mpc8xx_immr->im_cpm.cp_pedir;
  378. if (flags & CPM_PIN_OUTPUT)
  379. setbits32(&iop->dir, pin);
  380. else
  381. clrbits32(&iop->dir, pin);
  382. if (!(flags & CPM_PIN_GPIO))
  383. setbits32(&iop->par, pin);
  384. else
  385. clrbits32(&iop->par, pin);
  386. if (port == CPM_PORTB) {
  387. if (flags & CPM_PIN_OPENDRAIN)
  388. setbits16(&mpc8xx_immr->im_cpm.cp_pbodr, pin);
  389. else
  390. clrbits16(&mpc8xx_immr->im_cpm.cp_pbodr, pin);
  391. }
  392. if (port == CPM_PORTE) {
  393. if (flags & CPM_PIN_SECONDARY)
  394. setbits32(&iop->sor, pin);
  395. else
  396. clrbits32(&iop->sor, pin);
  397. if (flags & CPM_PIN_OPENDRAIN)
  398. setbits32(&mpc8xx_immr->im_cpm.cp_peodr, pin);
  399. else
  400. clrbits32(&mpc8xx_immr->im_cpm.cp_peodr, pin);
  401. }
  402. }
  403. static void cpm1_set_pin16(int port, int pin, int flags)
  404. {
  405. struct cpm_ioport16 __iomem *iop =
  406. (struct cpm_ioport16 __iomem *)&mpc8xx_immr->im_ioport;
  407. pin = 1 << (15 - pin);
  408. if (port != 0)
  409. iop += port - 1;
  410. if (flags & CPM_PIN_OUTPUT)
  411. setbits16(&iop->dir, pin);
  412. else
  413. clrbits16(&iop->dir, pin);
  414. if (!(flags & CPM_PIN_GPIO))
  415. setbits16(&iop->par, pin);
  416. else
  417. clrbits16(&iop->par, pin);
  418. if (port == CPM_PORTA) {
  419. if (flags & CPM_PIN_OPENDRAIN)
  420. setbits16(&iop->odr_sor, pin);
  421. else
  422. clrbits16(&iop->odr_sor, pin);
  423. }
  424. if (port == CPM_PORTC) {
  425. if (flags & CPM_PIN_SECONDARY)
  426. setbits16(&iop->odr_sor, pin);
  427. else
  428. clrbits16(&iop->odr_sor, pin);
  429. }
  430. }
  431. void cpm1_set_pin(enum cpm_port port, int pin, int flags)
  432. {
  433. if (port == CPM_PORTB || port == CPM_PORTE)
  434. cpm1_set_pin32(port, pin, flags);
  435. else
  436. cpm1_set_pin16(port, pin, flags);
  437. }
  438. int cpm1_clk_setup(enum cpm_clk_target target, int clock, int mode)
  439. {
  440. int shift;
  441. int i, bits = 0;
  442. u32 __iomem *reg;
  443. u32 mask = 7;
  444. u8 clk_map[][3] = {
  445. {CPM_CLK_SCC1, CPM_BRG1, 0},
  446. {CPM_CLK_SCC1, CPM_BRG2, 1},
  447. {CPM_CLK_SCC1, CPM_BRG3, 2},
  448. {CPM_CLK_SCC1, CPM_BRG4, 3},
  449. {CPM_CLK_SCC1, CPM_CLK1, 4},
  450. {CPM_CLK_SCC1, CPM_CLK2, 5},
  451. {CPM_CLK_SCC1, CPM_CLK3, 6},
  452. {CPM_CLK_SCC1, CPM_CLK4, 7},
  453. {CPM_CLK_SCC2, CPM_BRG1, 0},
  454. {CPM_CLK_SCC2, CPM_BRG2, 1},
  455. {CPM_CLK_SCC2, CPM_BRG3, 2},
  456. {CPM_CLK_SCC2, CPM_BRG4, 3},
  457. {CPM_CLK_SCC2, CPM_CLK1, 4},
  458. {CPM_CLK_SCC2, CPM_CLK2, 5},
  459. {CPM_CLK_SCC2, CPM_CLK3, 6},
  460. {CPM_CLK_SCC2, CPM_CLK4, 7},
  461. {CPM_CLK_SCC3, CPM_BRG1, 0},
  462. {CPM_CLK_SCC3, CPM_BRG2, 1},
  463. {CPM_CLK_SCC3, CPM_BRG3, 2},
  464. {CPM_CLK_SCC3, CPM_BRG4, 3},
  465. {CPM_CLK_SCC3, CPM_CLK5, 4},
  466. {CPM_CLK_SCC3, CPM_CLK6, 5},
  467. {CPM_CLK_SCC3, CPM_CLK7, 6},
  468. {CPM_CLK_SCC3, CPM_CLK8, 7},
  469. {CPM_CLK_SCC4, CPM_BRG1, 0},
  470. {CPM_CLK_SCC4, CPM_BRG2, 1},
  471. {CPM_CLK_SCC4, CPM_BRG3, 2},
  472. {CPM_CLK_SCC4, CPM_BRG4, 3},
  473. {CPM_CLK_SCC4, CPM_CLK5, 4},
  474. {CPM_CLK_SCC4, CPM_CLK6, 5},
  475. {CPM_CLK_SCC4, CPM_CLK7, 6},
  476. {CPM_CLK_SCC4, CPM_CLK8, 7},
  477. {CPM_CLK_SMC1, CPM_BRG1, 0},
  478. {CPM_CLK_SMC1, CPM_BRG2, 1},
  479. {CPM_CLK_SMC1, CPM_BRG3, 2},
  480. {CPM_CLK_SMC1, CPM_BRG4, 3},
  481. {CPM_CLK_SMC1, CPM_CLK1, 4},
  482. {CPM_CLK_SMC1, CPM_CLK2, 5},
  483. {CPM_CLK_SMC1, CPM_CLK3, 6},
  484. {CPM_CLK_SMC1, CPM_CLK4, 7},
  485. {CPM_CLK_SMC2, CPM_BRG1, 0},
  486. {CPM_CLK_SMC2, CPM_BRG2, 1},
  487. {CPM_CLK_SMC2, CPM_BRG3, 2},
  488. {CPM_CLK_SMC2, CPM_BRG4, 3},
  489. {CPM_CLK_SMC2, CPM_CLK5, 4},
  490. {CPM_CLK_SMC2, CPM_CLK6, 5},
  491. {CPM_CLK_SMC2, CPM_CLK7, 6},
  492. {CPM_CLK_SMC2, CPM_CLK8, 7},
  493. };
  494. switch (target) {
  495. case CPM_CLK_SCC1:
  496. reg = &mpc8xx_immr->im_cpm.cp_sicr;
  497. shift = 0;
  498. break;
  499. case CPM_CLK_SCC2:
  500. reg = &mpc8xx_immr->im_cpm.cp_sicr;
  501. shift = 8;
  502. break;
  503. case CPM_CLK_SCC3:
  504. reg = &mpc8xx_immr->im_cpm.cp_sicr;
  505. shift = 16;
  506. break;
  507. case CPM_CLK_SCC4:
  508. reg = &mpc8xx_immr->im_cpm.cp_sicr;
  509. shift = 24;
  510. break;
  511. case CPM_CLK_SMC1:
  512. reg = &mpc8xx_immr->im_cpm.cp_simode;
  513. shift = 12;
  514. break;
  515. case CPM_CLK_SMC2:
  516. reg = &mpc8xx_immr->im_cpm.cp_simode;
  517. shift = 28;
  518. break;
  519. default:
  520. printk(KERN_ERR "cpm1_clock_setup: invalid clock target\n");
  521. return -EINVAL;
  522. }
  523. if (reg == &mpc8xx_immr->im_cpm.cp_sicr && mode == CPM_CLK_RX)
  524. shift += 3;
  525. for (i = 0; i < ARRAY_SIZE(clk_map); i++) {
  526. if (clk_map[i][0] == target && clk_map[i][1] == clock) {
  527. bits = clk_map[i][2];
  528. break;
  529. }
  530. }
  531. if (i == ARRAY_SIZE(clk_map)) {
  532. printk(KERN_ERR "cpm1_clock_setup: invalid clock combination\n");
  533. return -EINVAL;
  534. }
  535. bits <<= shift;
  536. mask <<= shift;
  537. out_be32(reg, (in_be32(reg) & ~mask) | bits);
  538. return 0;
  539. }