2008-10-23 05:26:29 +00:00
|
|
|
#ifndef _ASM_X86_STRING_32_H
|
|
|
|
#define _ASM_X86_STRING_32_H
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
|
2008-03-23 08:03:33 +00:00
|
|
|
/* Let gcc decide whether to inline or use the out of line functions */
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#define __HAVE_ARCH_STRCPY
|
2007-07-21 15:09:59 +00:00
|
|
|
extern char *strcpy(char *dest, const char *src);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#define __HAVE_ARCH_STRNCPY
|
2007-07-21 15:09:59 +00:00
|
|
|
extern char *strncpy(char *dest, const char *src, size_t count);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#define __HAVE_ARCH_STRCAT
|
2007-07-21 15:09:59 +00:00
|
|
|
extern char *strcat(char *dest, const char *src);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#define __HAVE_ARCH_STRNCAT
|
2007-07-21 15:09:59 +00:00
|
|
|
extern char *strncat(char *dest, const char *src, size_t count);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#define __HAVE_ARCH_STRCMP
|
2007-07-21 15:09:59 +00:00
|
|
|
extern int strcmp(const char *cs, const char *ct);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#define __HAVE_ARCH_STRNCMP
|
2007-07-21 15:09:59 +00:00
|
|
|
extern int strncmp(const char *cs, const char *ct, size_t count);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#define __HAVE_ARCH_STRCHR
|
2007-07-21 15:09:59 +00:00
|
|
|
extern char *strchr(const char *s, int c);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#define __HAVE_ARCH_STRLEN
|
2007-07-21 15:09:59 +00:00
|
|
|
extern size_t strlen(const char *s);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-05-12 13:44:39 +00:00
|
|
|
static __always_inline void *__memcpy(void *to, const void *from, size_t n)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-05-12 13:44:39 +00:00
|
|
|
int d0, d1, d2;
|
|
|
|
asm volatile("rep ; movsl\n\t"
|
|
|
|
"movl %4,%%ecx\n\t"
|
|
|
|
"andl $3,%%ecx\n\t"
|
|
|
|
"jz 1f\n\t"
|
|
|
|
"rep ; movsb\n\t"
|
|
|
|
"1:"
|
|
|
|
: "=&c" (d0), "=&D" (d1), "=&S" (d2)
|
|
|
|
: "0" (n / 4), "g" (n), "1" ((long)to), "2" ((long)from)
|
|
|
|
: "memory");
|
|
|
|
return to;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2005-05-01 15:58:48 +00:00
|
|
|
* This looks ugly, but the compiler can optimize it totally,
|
2005-04-16 22:20:36 +00:00
|
|
|
* as the count is constant.
|
|
|
|
*/
|
2008-05-12 13:44:39 +00:00
|
|
|
static __always_inline void *__constant_memcpy(void *to, const void *from,
|
|
|
|
size_t n)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-05-01 15:58:48 +00:00
|
|
|
long esi, edi;
|
2008-05-12 13:44:39 +00:00
|
|
|
if (!n)
|
|
|
|
return to;
|
|
|
|
|
2005-05-01 15:58:48 +00:00
|
|
|
switch (n) {
|
2008-05-12 13:44:39 +00:00
|
|
|
case 1:
|
|
|
|
*(char *)to = *(char *)from;
|
|
|
|
return to;
|
|
|
|
case 2:
|
|
|
|
*(short *)to = *(short *)from;
|
|
|
|
return to;
|
|
|
|
case 4:
|
|
|
|
*(int *)to = *(int *)from;
|
|
|
|
return to;
|
|
|
|
case 3:
|
|
|
|
*(short *)to = *(short *)from;
|
|
|
|
*((char *)to + 2) = *((char *)from + 2);
|
|
|
|
return to;
|
|
|
|
case 5:
|
|
|
|
*(int *)to = *(int *)from;
|
|
|
|
*((char *)to + 4) = *((char *)from + 4);
|
|
|
|
return to;
|
|
|
|
case 6:
|
|
|
|
*(int *)to = *(int *)from;
|
|
|
|
*((short *)to + 2) = *((short *)from + 2);
|
|
|
|
return to;
|
|
|
|
case 8:
|
|
|
|
*(int *)to = *(int *)from;
|
|
|
|
*((int *)to + 1) = *((int *)from + 1);
|
|
|
|
return to;
|
2005-05-01 15:58:48 +00:00
|
|
|
}
|
2008-05-12 13:44:39 +00:00
|
|
|
|
|
|
|
esi = (long)from;
|
|
|
|
edi = (long)to;
|
|
|
|
if (n >= 5 * 4) {
|
2005-05-01 15:58:48 +00:00
|
|
|
/* large block: use rep prefix */
|
|
|
|
int ecx;
|
2008-05-12 13:44:39 +00:00
|
|
|
asm volatile("rep ; movsl"
|
|
|
|
: "=&c" (ecx), "=&D" (edi), "=&S" (esi)
|
|
|
|
: "0" (n / 4), "1" (edi), "2" (esi)
|
|
|
|
: "memory"
|
2005-05-01 15:58:48 +00:00
|
|
|
);
|
|
|
|
} else {
|
|
|
|
/* small block: don't clobber ecx + smaller code */
|
2008-05-12 13:44:39 +00:00
|
|
|
if (n >= 4 * 4)
|
|
|
|
asm volatile("movsl"
|
|
|
|
: "=&D"(edi), "=&S"(esi)
|
|
|
|
: "0"(edi), "1"(esi)
|
|
|
|
: "memory");
|
|
|
|
if (n >= 3 * 4)
|
|
|
|
asm volatile("movsl"
|
|
|
|
: "=&D"(edi), "=&S"(esi)
|
|
|
|
: "0"(edi), "1"(esi)
|
|
|
|
: "memory");
|
|
|
|
if (n >= 2 * 4)
|
|
|
|
asm volatile("movsl"
|
|
|
|
: "=&D"(edi), "=&S"(esi)
|
|
|
|
: "0"(edi), "1"(esi)
|
|
|
|
: "memory");
|
|
|
|
if (n >= 1 * 4)
|
|
|
|
asm volatile("movsl"
|
|
|
|
: "=&D"(edi), "=&S"(esi)
|
|
|
|
: "0"(edi), "1"(esi)
|
|
|
|
: "memory");
|
2005-05-01 15:58:48 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
switch (n % 4) {
|
2005-05-01 15:58:48 +00:00
|
|
|
/* tail */
|
2008-05-12 13:44:39 +00:00
|
|
|
case 0:
|
|
|
|
return to;
|
|
|
|
case 1:
|
|
|
|
asm volatile("movsb"
|
|
|
|
: "=&D"(edi), "=&S"(esi)
|
|
|
|
: "0"(edi), "1"(esi)
|
|
|
|
: "memory");
|
|
|
|
return to;
|
|
|
|
case 2:
|
|
|
|
asm volatile("movsw"
|
|
|
|
: "=&D"(edi), "=&S"(esi)
|
|
|
|
: "0"(edi), "1"(esi)
|
|
|
|
: "memory");
|
|
|
|
return to;
|
|
|
|
default:
|
|
|
|
asm volatile("movsw\n\tmovsb"
|
|
|
|
: "=&D"(edi), "=&S"(esi)
|
|
|
|
: "0"(edi), "1"(esi)
|
|
|
|
: "memory");
|
|
|
|
return to;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#define __HAVE_ARCH_MEMCPY
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_USE_3DNOW
|
|
|
|
|
|
|
|
#include <asm/mmx.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This CPU favours 3DNow strongly (eg AMD Athlon)
|
|
|
|
*/
|
|
|
|
|
2008-05-12 13:44:39 +00:00
|
|
|
static inline void *__constant_memcpy3d(void *to, const void *from, size_t len)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
if (len < 512)
|
|
|
|
return __constant_memcpy(to, from, len);
|
|
|
|
return _mmx_memcpy(to, from, len);
|
|
|
|
}
|
|
|
|
|
2008-05-12 13:44:39 +00:00
|
|
|
static inline void *__memcpy3d(void *to, const void *from, size_t len)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
if (len < 512)
|
|
|
|
return __memcpy(to, from, len);
|
|
|
|
return _mmx_memcpy(to, from, len);
|
|
|
|
}
|
|
|
|
|
2008-05-12 13:44:39 +00:00
|
|
|
#define memcpy(t, f, n) \
|
|
|
|
(__builtin_constant_p((n)) \
|
|
|
|
? __constant_memcpy3d((t), (f), (n)) \
|
|
|
|
: __memcpy3d((t), (f), (n)))
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
/*
|
|
|
|
* No 3D Now!
|
|
|
|
*/
|
2008-05-12 13:44:39 +00:00
|
|
|
|
2008-04-03 22:53:23 +00:00
|
|
|
#ifndef CONFIG_KMEMCHECK
|
x86: Use __builtin_memset and __builtin_memcpy for memset/memcpy
GCC provides reasonable memset/memcpy functions itself, with __builtin_memset
and __builtin_memcpy. For the "unknown" cases, it'll fall back to our
current existing functions, but for fixed size versions it'll inline
something smart. Quite often that will be the same as we have now,
but sometimes it can do something smarter (for example, if the code
then sets the first member of a struct, it can do a shorter memset).
In addition, and this is more important, gcc knows which registers and
such are not clobbered (while for our asm version it pretty much
acts like a compiler barrier), so for various cases it can avoid reloading
values.
The effect on codesize is shown below on my typical laptop .config:
text data bss dec hex filename
5605675 2041100 6525148 14171923 d83f13 vmlinux.before
5595849 2041668 6525148 14162665 d81ae9 vmlinux.after
Due to some not-so-good behavior in the gcc 3.x series, this change
is only done for GCC 4.x and above.
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
LKML-Reference: <20090928142122.6fc57e9c@infradead.org>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2009-09-28 12:21:22 +00:00
|
|
|
|
|
|
|
#if (__GNUC__ >= 4)
|
|
|
|
#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
|
|
|
|
#else
|
2008-05-12 13:44:39 +00:00
|
|
|
#define memcpy(t, f, n) \
|
|
|
|
(__builtin_constant_p((n)) \
|
|
|
|
? __constant_memcpy((t), (f), (n)) \
|
|
|
|
: __memcpy((t), (f), (n)))
|
x86: Use __builtin_memset and __builtin_memcpy for memset/memcpy
GCC provides reasonable memset/memcpy functions itself, with __builtin_memset
and __builtin_memcpy. For the "unknown" cases, it'll fall back to our
current existing functions, but for fixed size versions it'll inline
something smart. Quite often that will be the same as we have now,
but sometimes it can do something smarter (for example, if the code
then sets the first member of a struct, it can do a shorter memset).
In addition, and this is more important, gcc knows which registers and
such are not clobbered (while for our asm version it pretty much
acts like a compiler barrier), so for various cases it can avoid reloading
values.
The effect on codesize is shown below on my typical laptop .config:
text data bss dec hex filename
5605675 2041100 6525148 14171923 d83f13 vmlinux.before
5595849 2041668 6525148 14162665 d81ae9 vmlinux.after
Due to some not-so-good behavior in the gcc 3.x series, this change
is only done for GCC 4.x and above.
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
LKML-Reference: <20090928142122.6fc57e9c@infradead.org>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2009-09-28 12:21:22 +00:00
|
|
|
#endif
|
2008-04-03 22:53:23 +00:00
|
|
|
#else
|
|
|
|
/*
|
|
|
|
* kmemcheck becomes very happy if we use the REP instructions unconditionally,
|
|
|
|
* because it means that we know both memory operands in advance.
|
|
|
|
*/
|
|
|
|
#define memcpy(t, f, n) __memcpy((t), (f), (n))
|
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define __HAVE_ARCH_MEMMOVE
|
2008-05-12 13:44:39 +00:00
|
|
|
void *memmove(void *dest, const void *src, size_t n);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#define memcmp __builtin_memcmp
|
|
|
|
|
|
|
|
#define __HAVE_ARCH_MEMCHR
|
2008-05-12 13:44:39 +00:00
|
|
|
extern void *memchr(const void *cs, int c, size_t count);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-05-12 13:44:39 +00:00
|
|
|
static inline void *__memset_generic(void *s, char c, size_t count)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-05-12 13:44:39 +00:00
|
|
|
int d0, d1;
|
|
|
|
asm volatile("rep\n\t"
|
|
|
|
"stosb"
|
|
|
|
: "=&c" (d0), "=&D" (d1)
|
|
|
|
: "a" (c), "1" (s), "0" (count)
|
|
|
|
: "memory");
|
|
|
|
return s;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* we might want to write optimized versions of these later */
|
2008-05-12 13:44:39 +00:00
|
|
|
#define __constant_count_memset(s, c, count) __memset_generic((s), (c), (count))
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
2008-05-12 13:44:39 +00:00
|
|
|
* memset(x, 0, y) is a reasonably common thing to do, so we want to fill
|
2005-04-16 22:20:36 +00:00
|
|
|
* things 32 bits at a time even when we don't know the size of the
|
|
|
|
* area at compile-time..
|
|
|
|
*/
|
2008-05-12 13:44:39 +00:00
|
|
|
static __always_inline
|
|
|
|
void *__constant_c_memset(void *s, unsigned long c, size_t count)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-05-12 13:44:39 +00:00
|
|
|
int d0, d1;
|
|
|
|
asm volatile("rep ; stosl\n\t"
|
|
|
|
"testb $2,%b3\n\t"
|
|
|
|
"je 1f\n\t"
|
|
|
|
"stosw\n"
|
|
|
|
"1:\ttestb $1,%b3\n\t"
|
|
|
|
"je 2f\n\t"
|
|
|
|
"stosb\n"
|
|
|
|
"2:"
|
|
|
|
: "=&c" (d0), "=&D" (d1)
|
|
|
|
: "a" (c), "q" (count), "0" (count/4), "1" ((long)s)
|
|
|
|
: "memory");
|
|
|
|
return s;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Added by Gertjan van Wingerde to make minix and sysv module work */
|
|
|
|
#define __HAVE_ARCH_STRNLEN
|
2008-05-12 13:44:39 +00:00
|
|
|
extern size_t strnlen(const char *s, size_t count);
|
2005-04-16 22:20:36 +00:00
|
|
|
/* end of additional stuff */
|
|
|
|
|
|
|
|
#define __HAVE_ARCH_STRSTR
|
|
|
|
extern char *strstr(const char *cs, const char *ct);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This looks horribly ugly, but the compiler can optimize it totally,
|
|
|
|
* as we by now know that both pattern and count is constant..
|
|
|
|
*/
|
2008-05-12 13:44:39 +00:00
|
|
|
static __always_inline
|
|
|
|
void *__constant_c_and_count_memset(void *s, unsigned long pattern,
|
|
|
|
size_t count)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
switch (count) {
|
2008-05-12 13:44:39 +00:00
|
|
|
case 0:
|
|
|
|
return s;
|
|
|
|
case 1:
|
|
|
|
*(unsigned char *)s = pattern & 0xff;
|
|
|
|
return s;
|
|
|
|
case 2:
|
|
|
|
*(unsigned short *)s = pattern & 0xffff;
|
|
|
|
return s;
|
|
|
|
case 3:
|
|
|
|
*(unsigned short *)s = pattern & 0xffff;
|
|
|
|
*((unsigned char *)s + 2) = pattern & 0xff;
|
|
|
|
return s;
|
|
|
|
case 4:
|
|
|
|
*(unsigned long *)s = pattern;
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define COMMON(x) \
|
|
|
|
asm volatile("rep ; stosl" \
|
|
|
|
x \
|
|
|
|
: "=&c" (d0), "=&D" (d1) \
|
2008-05-26 20:36:53 +00:00
|
|
|
: "a" (eax), "0" (count/4), "1" ((long)s) \
|
2008-05-12 13:44:39 +00:00
|
|
|
: "memory")
|
|
|
|
|
|
|
|
{
|
|
|
|
int d0, d1;
|
2008-05-26 20:36:53 +00:00
|
|
|
#if __GNUC__ == 4 && __GNUC_MINOR__ == 0
|
|
|
|
/* Workaround for broken gcc 4.0 */
|
|
|
|
register unsigned long eax asm("%eax") = pattern;
|
|
|
|
#else
|
|
|
|
unsigned long eax = pattern;
|
|
|
|
#endif
|
|
|
|
|
2008-05-12 13:44:39 +00:00
|
|
|
switch (count % 4) {
|
2005-04-16 22:20:36 +00:00
|
|
|
case 0:
|
2008-05-12 13:44:39 +00:00
|
|
|
COMMON("");
|
2005-04-16 22:20:36 +00:00
|
|
|
return s;
|
|
|
|
case 1:
|
2008-05-12 13:44:39 +00:00
|
|
|
COMMON("\n\tstosb");
|
2005-04-16 22:20:36 +00:00
|
|
|
return s;
|
|
|
|
case 2:
|
2008-05-12 13:44:39 +00:00
|
|
|
COMMON("\n\tstosw");
|
2005-04-16 22:20:36 +00:00
|
|
|
return s;
|
2008-05-12 13:44:39 +00:00
|
|
|
default:
|
|
|
|
COMMON("\n\tstosw\n\tstosb");
|
2005-04-16 22:20:36 +00:00
|
|
|
return s;
|
2008-05-12 13:44:39 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2008-05-12 13:44:39 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#undef COMMON
|
|
|
|
}
|
|
|
|
|
2008-05-12 13:44:39 +00:00
|
|
|
#define __constant_c_x_memset(s, c, count) \
|
|
|
|
(__builtin_constant_p(count) \
|
|
|
|
? __constant_c_and_count_memset((s), (c), (count)) \
|
|
|
|
: __constant_c_memset((s), (c), (count)))
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-05-12 13:44:39 +00:00
|
|
|
#define __memset(s, c, count) \
|
|
|
|
(__builtin_constant_p(count) \
|
|
|
|
? __constant_count_memset((s), (c), (count)) \
|
|
|
|
: __memset_generic((s), (c), (count)))
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#define __HAVE_ARCH_MEMSET
|
x86: Use __builtin_memset and __builtin_memcpy for memset/memcpy
GCC provides reasonable memset/memcpy functions itself, with __builtin_memset
and __builtin_memcpy. For the "unknown" cases, it'll fall back to our
current existing functions, but for fixed size versions it'll inline
something smart. Quite often that will be the same as we have now,
but sometimes it can do something smarter (for example, if the code
then sets the first member of a struct, it can do a shorter memset).
In addition, and this is more important, gcc knows which registers and
such are not clobbered (while for our asm version it pretty much
acts like a compiler barrier), so for various cases it can avoid reloading
values.
The effect on codesize is shown below on my typical laptop .config:
text data bss dec hex filename
5605675 2041100 6525148 14171923 d83f13 vmlinux.before
5595849 2041668 6525148 14162665 d81ae9 vmlinux.after
Due to some not-so-good behavior in the gcc 3.x series, this change
is only done for GCC 4.x and above.
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
LKML-Reference: <20090928142122.6fc57e9c@infradead.org>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2009-09-28 12:21:22 +00:00
|
|
|
#if (__GNUC__ >= 4)
|
|
|
|
#define memset(s, c, count) __builtin_memset(s, c, count)
|
|
|
|
#else
|
2008-05-12 13:44:39 +00:00
|
|
|
#define memset(s, c, count) \
|
|
|
|
(__builtin_constant_p(c) \
|
|
|
|
? __constant_c_x_memset((s), (0x01010101UL * (unsigned char)(c)), \
|
|
|
|
(count)) \
|
|
|
|
: __memset((s), (c), (count)))
|
x86: Use __builtin_memset and __builtin_memcpy for memset/memcpy
GCC provides reasonable memset/memcpy functions itself, with __builtin_memset
and __builtin_memcpy. For the "unknown" cases, it'll fall back to our
current existing functions, but for fixed size versions it'll inline
something smart. Quite often that will be the same as we have now,
but sometimes it can do something smarter (for example, if the code
then sets the first member of a struct, it can do a shorter memset).
In addition, and this is more important, gcc knows which registers and
such are not clobbered (while for our asm version it pretty much
acts like a compiler barrier), so for various cases it can avoid reloading
values.
The effect on codesize is shown below on my typical laptop .config:
text data bss dec hex filename
5605675 2041100 6525148 14171923 d83f13 vmlinux.before
5595849 2041668 6525148 14162665 d81ae9 vmlinux.after
Due to some not-so-good behavior in the gcc 3.x series, this change
is only done for GCC 4.x and above.
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
LKML-Reference: <20090928142122.6fc57e9c@infradead.org>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2009-09-28 12:21:22 +00:00
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* find the first occurrence of byte 'c', or 1 past the area if none
|
|
|
|
*/
|
|
|
|
#define __HAVE_ARCH_MEMSCAN
|
2008-05-12 13:44:39 +00:00
|
|
|
extern void *memscan(void *addr, int c, size_t size);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#endif /* __KERNEL__ */
|
|
|
|
|
2008-10-23 05:26:29 +00:00
|
|
|
#endif /* _ASM_X86_STRING_32_H */
|