summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorAndi Kleen <ak@suse.de>2006-09-26 10:52:29 +0200
committerAndi Kleen <andi@basil.nowhere.org>2006-09-26 10:52:29 +0200
commitb1c78c0fcc29097567e1afc39701012e6d89adb7 (patch)
tree9b9a2a302740c7a68003ade0536ab244f20cb08c /include
parent3f14c746a61ec932c204aca820c02c293118c5df (diff)
[PATCH] Clean up and minor fixes to TLB flush
- Convert CR* accesses to dedicated inline functions and rewrite the rest as C inlines - Don't do a double flush for global flushes (pointed out by Zach Amsden) This was a bug workaround for old CPUs that don't do 64bit and is obsolete. - Add a proper memory clobber to invlpg - Remove an unused extern Signed-off-by: Andi Kleen <ak@suse.de>
Diffstat (limited to 'include')
-rw-r--r--include/asm-x86_64/pgtable.h2
-rw-r--r--include/asm-x86_64/tlbflush.h70
2 files changed, 35 insertions, 37 deletions
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index a31ab4e68a9b..0c1e2422400a 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -25,8 +25,6 @@ extern int nonx_setup(char *str);
extern void paging_init(void);
extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
-extern unsigned long pgkern_mask;
-
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
diff --git a/include/asm-x86_64/tlbflush.h b/include/asm-x86_64/tlbflush.h
index d16d5b60f419..983bd296c81a 100644
--- a/include/asm-x86_64/tlbflush.h
+++ b/include/asm-x86_64/tlbflush.h
@@ -4,44 +4,44 @@
#include <linux/mm.h>
#include <asm/processor.h>
-#define __flush_tlb() \
- do { \
- unsigned long tmpreg; \
- \
- __asm__ __volatile__( \
- "movq %%cr3, %0; # flush TLB \n" \
- "movq %0, %%cr3; \n" \
- : "=r" (tmpreg) \
- :: "memory"); \
- } while (0)
+static inline unsigned long get_cr3(void)
+{
+ unsigned long cr3;
+ asm volatile("mov %%cr3,%0" : "=r" (cr3));
+ return cr3;
+}
-/*
- * Global pages have to be flushed a bit differently. Not a real
- * performance problem because this does not happen often.
- */
-#define __flush_tlb_global() \
- do { \
- unsigned long tmpreg, cr4, cr4_orig; \
- \
- __asm__ __volatile__( \
- "movq %%cr4, %2; # turn off PGE \n" \
- "movq %2, %1; \n" \
- "andq %3, %1; \n" \
- "movq %1, %%cr4; \n" \
- "movq %%cr3, %0; # flush TLB \n" \
- "movq %0, %%cr3; \n" \
- "movq %2, %%cr4; # turn PGE back on \n" \
- : "=&r" (tmpreg), "=&r" (cr4), "=&r" (cr4_orig) \
- : "i" (~X86_CR4_PGE) \
- : "memory"); \
- } while (0)
-
-extern unsigned long pgkern_mask;
-
-#define __flush_tlb_all() __flush_tlb_global()
+static inline void set_cr3(unsigned long cr3)
+{
+ asm volatile("mov %0,%%cr3" :: "r" (cr3) : "memory");
+}
+
+static inline void __flush_tlb(void)
+{
+ set_cr3(get_cr3());
+}
+
+static inline unsigned long get_cr4(void)
+{
+ unsigned long cr4;
+ asm volatile("mov %%cr4,%0" : "=r" (cr4));
+ return cr4;
+}
+
+static inline void set_cr4(unsigned long cr4)
+{
+ asm volatile("mov %0,%%cr4" :: "r" (cr4) : "memory");
+}
+
+static inline void __flush_tlb_all(void)
+{
+ unsigned long cr4 = get_cr4();
+ set_cr4(cr4 & ~X86_CR4_PGE); /* clear PGE */
+ set_cr4(cr4); /* write old PGE again and flush TLBs */
+}
#define __flush_tlb_one(addr) \
- __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
+ __asm__ __volatile__("invlpg (%0)" :: "r" (addr) : "memory")
/*