|
@@ -207,30 +207,32 @@ typedef struct dmadscr_s {
|
|
|
u64 pad_b;
|
|
|
} dmadscr_t;
|
|
|
|
|
|
-static dmadscr_t page_descr[NR_CPUS] __attribute__((aligned(SMP_CACHE_BYTES)));
|
|
|
+static dmadscr_t page_descr[DM_NUM_CHANNELS] __attribute__((aligned(SMP_CACHE_BYTES)));
|
|
|
|
|
|
void sb1_dma_init(void)
|
|
|
{
|
|
|
- int cpu = smp_processor_id();
|
|
|
- u64 base_val = CPHYSADDR(&page_descr[cpu]) | V_DM_DSCR_BASE_RINGSZ(1);
|
|
|
+ int i;
|
|
|
|
|
|
- __raw_writeq(base_val,
|
|
|
- IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
|
|
|
- __raw_writeq(base_val | M_DM_DSCR_BASE_RESET,
|
|
|
- IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
|
|
|
- __raw_writeq(base_val | M_DM_DSCR_BASE_ENABL,
|
|
|
- IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
|
|
|
+ for (i = 0; i < DM_NUM_CHANNELS; i++) {
|
|
|
+ u64 base_val = (u64)CPHYSADDR(&page_descr[i]) | V_DM_DSCR_BASE_RINGSZ(1);
|
|
|
+ void *base_reg = (void *)IOADDR(A_DM_REGISTER(i, R_DM_DSCR_BASE));
|
|
|
+
|
|
|
+ __raw_writeq(base_val, base_reg);
|
|
|
+ __raw_writeq(base_val | M_DM_DSCR_BASE_RESET, base_reg);
|
|
|
+ __raw_writeq(base_val | M_DM_DSCR_BASE_ENABL, base_reg);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
void clear_page(void *page)
|
|
|
{
|
|
|
- int cpu = smp_processor_id();
|
|
|
+ u64 to_phys = (u64)CPHYSADDR(page);
|
|
|
+ unsigned int cpu = smp_processor_id();
|
|
|
|
|
|
- /* if the page is above Kseg0, use old way */
|
|
|
+ /* if the page is not in KSEG0, use old way */
|
|
|
if ((long)KSEGX(page) != (long)CKSEG0)
|
|
|
return clear_page_cpu(page);
|
|
|
|
|
|
- page_descr[cpu].dscr_a = CPHYSADDR(page) | M_DM_DSCRA_ZERO_MEM | M_DM_DSCRA_L2C_DEST | M_DM_DSCRA_INTERRUPT;
|
|
|
+ page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_ZERO_MEM | M_DM_DSCRA_L2C_DEST | M_DM_DSCRA_INTERRUPT;
|
|
|
page_descr[cpu].dscr_b = V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
|
|
|
__raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
|
|
|
|
|
@@ -239,32 +241,32 @@ void clear_page(void *page)
|
|
|
* reliable way to delay completion detection.
|
|
|
*/
|
|
|
while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
|
|
|
- M_DM_DSCR_BASE_INTERRUPT)))
|
|
|
+ & M_DM_DSCR_BASE_INTERRUPT))
|
|
|
;
|
|
|
__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
|
|
|
}
|
|
|
|
|
|
void copy_page(void *to, void *from)
|
|
|
{
|
|
|
- unsigned long from_phys = CPHYSADDR(from);
|
|
|
- unsigned long to_phys = CPHYSADDR(to);
|
|
|
- int cpu = smp_processor_id();
|
|
|
+ u64 from_phys = (u64)CPHYSADDR(from);
|
|
|
+ u64 to_phys = (u64)CPHYSADDR(to);
|
|
|
+ unsigned int cpu = smp_processor_id();
|
|
|
|
|
|
- /* if either page is above Kseg0, use old way */
|
|
|
+ /* if any page is not in KSEG0, use old way */
|
|
|
if ((long)KSEGX(to) != (long)CKSEG0
|
|
|
|| (long)KSEGX(from) != (long)CKSEG0)
|
|
|
return copy_page_cpu(to, from);
|
|
|
|
|
|
- page_descr[cpu].dscr_a = CPHYSADDR(to_phys) | M_DM_DSCRA_L2C_DEST | M_DM_DSCRA_INTERRUPT;
|
|
|
- page_descr[cpu].dscr_b = CPHYSADDR(from_phys) | V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
|
|
|
- __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
|
|
|
+ page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_L2C_DEST | M_DM_DSCRA_INTERRUPT;
|
|
|
+ page_descr[cpu].dscr_b = from_phys | V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
|
|
|
+ __raw_writeq(1, (void *)IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
|
|
|
|
|
|
/*
|
|
|
* Don't really want to do it this way, but there's no
|
|
|
* reliable way to delay completion detection.
|
|
|
*/
|
|
|
- while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)) &
|
|
|
- M_DM_DSCR_BASE_INTERRUPT)))
|
|
|
+ while (!(__raw_readq((void *)IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
|
|
|
+ & M_DM_DSCR_BASE_INTERRUPT))
|
|
|
;
|
|
|
__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
|
|
|
}
|