فهرست منبع

[PATCH] powerpc: merge byteorder.h

powerpc: Merge byteorder.h

Essentially adopts the 64-bit version of this file.  The 32-bit version had
been using unsigned ints for arguments/return values that were actually
only 16 bits - the new file uses __u16 for these items as in the 64-bit
version of the header.  The order of some of the asm constraints
in the 64-bit version was slightly different than the 32-bit version,
but they produce identical code.

Signed-off-by: Becky Bruce <becky.bruce@freescale.com>
Signed-off-by: Kumar Gala <kumar.gala@freescale.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Becky Bruce 20 سال پیش
والد
کامیت
a559c91d77
2فایلهای تغییر یافته به همراه7 افزوده شده و 80 حذف شده
  1. 7 4
      include/asm-powerpc/byteorder.h
  2. 0 76
      include/asm-ppc/byteorder.h

+ 7 - 4
include/asm-ppc64/byteorder.h → include/asm-powerpc/byteorder.h

@@ -1,5 +1,5 @@
-#ifndef _PPC64_BYTEORDER_H
-#define _PPC64_BYTEORDER_H
+#ifndef _ASM_POWERPC_BYTEORDER_H
+#define _ASM_POWERPC_BYTEORDER_H
 
 /*
  * This program is free software; you can redistribute it and/or
@@ -77,10 +77,13 @@ static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 value)
 
 #ifndef __STRICT_ANSI__
 #define __BYTEORDER_HAS_U64__
-#endif
+#ifndef __powerpc64__
+#define __SWAB_64_THRU_32__
+#endif /* __powerpc64__ */
+#endif /* __STRICT_ANSI__ */
 
 #endif /* __GNUC__ */
 
 #include <linux/byteorder/big_endian.h>
 
-#endif /* _PPC64_BYTEORDER_H */
+#endif /* _ASM_POWERPC_BYTEORDER_H */

+ 0 - 76
include/asm-ppc/byteorder.h

@@ -1,76 +0,0 @@
-#ifndef _PPC_BYTEORDER_H
-#define _PPC_BYTEORDER_H
-
-#include <asm/types.h>
-#include <linux/compiler.h>
-
-#ifdef __GNUC__
-#ifdef __KERNEL__
-
-extern __inline__ unsigned ld_le16(const volatile unsigned short *addr)
-{
-	unsigned val;
-
-	__asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
-	return val;
-}
-
-extern __inline__ void st_le16(volatile unsigned short *addr, const unsigned val)
-{
-	__asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
-}
-
-extern __inline__ unsigned ld_le32(const volatile unsigned *addr)
-{
-	unsigned val;
-
-	__asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
-	return val;
-}
-
-extern __inline__ void st_le32(volatile unsigned *addr, const unsigned val)
-{
-	__asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
-}
-
-static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 value)
-{
-	__u16 result;
-
-	__asm__("rlwimi %0,%2,8,16,23" : "=&r" (result) : "0" (value >> 8), "r" (value));
-	return result;
-}
-
-static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 value)
-{
-	__u32 result;
-
-	__asm__("rlwimi %0,%2,24,16,23" : "=&r" (result) : "0" (value>>24), "r" (value));
-	__asm__("rlwimi %0,%2,8,8,15"   : "=&r" (result) : "0" (result),    "r" (value));
-	__asm__("rlwimi %0,%2,24,0,7"   : "=&r" (result) : "0" (result),    "r" (value));
-
-	return result;
-}
-#define __arch__swab32(x) ___arch__swab32(x)
-#define __arch__swab16(x) ___arch__swab16(x)
-
-/* The same, but returns converted value from the location pointer by addr. */
-#define __arch__swab16p(addr) ld_le16(addr)
-#define __arch__swab32p(addr) ld_le32(addr)
-
-/* The same, but do the conversion in situ, ie. put the value back to addr. */
-#define __arch__swab16s(addr) st_le16(addr,*addr)
-#define __arch__swab32s(addr) st_le32(addr,*addr)
-
-#endif /* __KERNEL__ */
-
-#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
-#  define __BYTEORDER_HAS_U64__
-#  define __SWAB_64_THRU_32__
-#endif
-
-#endif /* __GNUC__ */
-
-#include <linux/byteorder/big_endian.h>
-
-#endif /* _PPC_BYTEORDER_H */