diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2020-01-22 09:05:34 +0100 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2020-01-22 09:05:34 +0100 |
commit | c318f074d9fdeae16e19cbb2ed53b50d2bcdfdb8 (patch) | |
tree | 44c6552fa919dfe1fc681949bf5d81fcf6a2beb8 /include/asm-generic/cacheflush.h | |
parent | 7b2d7faa09fcbd0184634544f732f4b2da0b20a8 (diff) | |
parent | def9d2780727cec3313ed3522d0123158d87224d (diff) |
Merge 5.5-rc7 into staging-next
We want the staging fixes in here as well
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'include/asm-generic/cacheflush.h')
-rw-r--r-- | include/asm-generic/cacheflush.h | 33 |
1 files changed, 32 insertions, 1 deletions
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h index a950a22c4890..cac7404b2bdd 100644 --- a/include/asm-generic/cacheflush.h +++ b/include/asm-generic/cacheflush.h @@ -11,71 +11,102 @@ * The cache doesn't need to be flushed when TLB entries change when * the cache is mapped to physical memory, not virtual memory */ +#ifndef flush_cache_all static inline void flush_cache_all(void) { } +#endif +#ifndef flush_cache_mm static inline void flush_cache_mm(struct mm_struct *mm) { } +#endif +#ifndef flush_cache_dup_mm static inline void flush_cache_dup_mm(struct mm_struct *mm) { } +#endif +#ifndef flush_cache_range static inline void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { } +#endif +#ifndef flush_cache_page static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) { } +#endif +#ifndef flush_dcache_page static inline void flush_dcache_page(struct page *page) { } +#endif +#ifndef flush_dcache_mmap_lock static inline void flush_dcache_mmap_lock(struct address_space *mapping) { } +#endif +#ifndef flush_dcache_mmap_unlock static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { } +#endif +#ifndef flush_icache_range static inline void flush_icache_range(unsigned long start, unsigned long end) { } +#endif +#ifndef flush_icache_page static inline void flush_icache_page(struct vm_area_struct *vma, struct page *page) { } +#endif +#ifndef flush_icache_user_range static inline void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, unsigned long addr, int len) { } +#endif +#ifndef flush_cache_vmap static inline void flush_cache_vmap(unsigned long start, unsigned long end) { } +#endif +#ifndef flush_cache_vunmap static inline void flush_cache_vunmap(unsigned long start, unsigned long end) { } +#endif -#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ +#ifndef copy_to_user_page +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ do { \ memcpy(dst, src, len); \ flush_icache_user_range(vma, page, vaddr, len); \ } while (0) +#endif + +#ifndef copy_from_user_page #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ memcpy(dst, src, len) +#endif #endif /* __ASM_CACHEFLUSH_H */ |