Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com> Signed-off-by: Michal Marek <mmarek@suse.cz>
@@ -24,6 +24,6 @@
# define SMP_CACHE_BYTES (1 << 3)
#endif
-#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+#define __read_mostly __attribute__((__section__(".data..read_mostly")))
#endif /* _ASM_IA64_CACHE_H */
@@ -28,7 +28,7 @@
#include "entry.h"
#define DATA8(sym, init_value) \
- .pushsection .data.read_mostly ; \
+ .pushsection .data..read_mostly ; \
.align 8 ; \
.global sym ; \
sym: ; \
@@ -14,7 +14,7 @@
#include <linux/init.h>
#include <xen/interface/elfnote.h>
- .section .data.read_mostly
+ .section .data..read_mostly
.align 8
.global xen_domain_type
xen_domain_type:
#define SMP_CACHE_BYTES L1_CACHE_BYTES
void parisc_cache_init(void); /* initializes cache-flushing */
void disable_sr_hashing_asm(int); /* low level support for above */
@@ -345,7 +345,7 @@ smp_slave_stext:
ENDPROC(stext)
#ifndef CONFIG_64BIT
.align 4
.export $global$,data
@@ -38,7 +38,7 @@ extern struct ppc64_caches ppc64_caches;
#endif /* __powerpc64__ && ! __ASSEMBLY__ */
#if !defined(__ASSEMBLY__)
#endif /* __KERNEL__ */
@@ -233,7 +233,7 @@ SECTIONS
CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
}
- .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
+ .data..read_mostly : AT(ADDR(.data..read_mostly) - LOAD_OFFSET) {
READ_MOSTLY_DATA(L1_CACHE_BYTES)
@@ -14,6 +14,6 @@
#define L1_CACHE_BYTES 256
#define L1_CACHE_SHIFT 8
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#ifndef __ASSEMBLY__
struct cache_info {
@@ -19,7 +19,7 @@
#define SMP_CACHE_BYTES (1 << SMP_CACHE_BYTES_SHIFT)
#ifdef CONFIG_SPARC32
#include <asm/asi.h>
@@ -7,7 +7,7 @@
#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
#define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
@@ -185,7 +185,7 @@
#define READ_MOSTLY_DATA(align) \
. = ALIGN(align); \
- *(.data.read_mostly)
+ *(.data..read_mostly)
#define CACHELINE_ALIGNED_DATA(align) \