|
@@ -39,9 +39,11 @@ struct l2x0_regs l2x0_saved_regs;
|
|
|
struct l2x0_of_data {
|
|
|
void (*setup)(const struct device_node *, u32 *, u32 *);
|
|
|
void (*save)(void);
|
|
|
- void (*resume)(void);
|
|
|
+ struct outer_cache_fns outer_cache;
|
|
|
};
|
|
|
|
|
|
+static bool of_init = false;
|
|
|
+
|
|
|
static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
|
|
|
{
|
|
|
/* wait for cache operation by line or way to complete */
|
|
@@ -380,13 +382,15 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
|
|
|
/* Save the value for resuming. */
|
|
|
l2x0_saved_regs.aux_ctrl = aux;
|
|
|
|
|
|
- outer_cache.inv_range = l2x0_inv_range;
|
|
|
- outer_cache.clean_range = l2x0_clean_range;
|
|
|
- outer_cache.flush_range = l2x0_flush_range;
|
|
|
- outer_cache.sync = l2x0_cache_sync;
|
|
|
- outer_cache.flush_all = l2x0_flush_all;
|
|
|
- outer_cache.inv_all = l2x0_inv_all;
|
|
|
- outer_cache.disable = l2x0_disable;
|
|
|
+ if (!of_init) {
|
|
|
+ outer_cache.inv_range = l2x0_inv_range;
|
|
|
+ outer_cache.clean_range = l2x0_clean_range;
|
|
|
+ outer_cache.flush_range = l2x0_flush_range;
|
|
|
+ outer_cache.sync = l2x0_cache_sync;
|
|
|
+ outer_cache.flush_all = l2x0_flush_all;
|
|
|
+ outer_cache.inv_all = l2x0_inv_all;
|
|
|
+ outer_cache.disable = l2x0_disable;
|
|
|
+ }
|
|
|
|
|
|
printk(KERN_INFO "%s cache controller enabled\n", type);
|
|
|
printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
|
|
@@ -537,15 +541,34 @@ static void pl310_resume(void)
|
|
|
}
|
|
|
|
|
|
static const struct l2x0_of_data pl310_data = {
|
|
|
- pl310_of_setup,
|
|
|
- pl310_save,
|
|
|
- pl310_resume,
|
|
|
+ .setup = pl310_of_setup,
|
|
|
+ .save = pl310_save,
|
|
|
+ .outer_cache = {
|
|
|
+ .resume = pl310_resume,
|
|
|
+ .inv_range = l2x0_inv_range,
|
|
|
+ .clean_range = l2x0_clean_range,
|
|
|
+ .flush_range = l2x0_flush_range,
|
|
|
+ .sync = l2x0_cache_sync,
|
|
|
+ .flush_all = l2x0_flush_all,
|
|
|
+ .inv_all = l2x0_inv_all,
|
|
|
+ .disable = l2x0_disable,
|
|
|
+ .set_debug = pl310_set_debug,
|
|
|
+ },
|
|
|
};
|
|
|
|
|
|
static const struct l2x0_of_data l2x0_data = {
|
|
|
- l2x0_of_setup,
|
|
|
- NULL,
|
|
|
- l2x0_resume,
|
|
|
+ .setup = l2x0_of_setup,
|
|
|
+ .save = NULL,
|
|
|
+ .outer_cache = {
|
|
|
+ .resume = l2x0_resume,
|
|
|
+ .inv_range = l2x0_inv_range,
|
|
|
+ .clean_range = l2x0_clean_range,
|
|
|
+ .flush_range = l2x0_flush_range,
|
|
|
+ .sync = l2x0_cache_sync,
|
|
|
+ .flush_all = l2x0_flush_all,
|
|
|
+ .inv_all = l2x0_inv_all,
|
|
|
+ .disable = l2x0_disable,
|
|
|
+ },
|
|
|
};
|
|
|
|
|
|
static const struct of_device_id l2x0_ids[] __initconst = {
|
|
@@ -585,9 +608,11 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
|
|
|
if (data->save)
|
|
|
data->save();
|
|
|
|
|
|
+ of_init = true;
|
|
|
l2x0_init(l2x0_base, aux_val, aux_mask);
|
|
|
|
|
|
- outer_cache.resume = data->resume;
|
|
|
+ memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache));
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
#endif
|