diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-09-04 15:38:24 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-09-04 15:38:24 -0700 |
commit | 3f86ed6ec0b390c033eae7f9c487a3fea268e027 (patch) | |
tree | a0a1fa3e5c04e146bf9414833da9bc634df10b95 /arch/arc/mm | |
parent | ea4f9c37f75271d8256a326c938c95733e1fec35 (diff) | |
parent | c40cad3b0aa47d6d0995637178fb6607ac3d45c1 (diff) |
Merge tag 'arc-6.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
Pull ARC updates from Vineet Gupta:
- fixes for -Wmissing-prototype warnings
- missing compiler barrier in relaxed atomics
- some uaccess simplification, declutter
- removal of massive glocal struct cpuinfo_arc from bootlog code
- __switch_to consolidation (removal of inline asm variant)
- use GP to cache task pointer (vs. r25)
- misc rework of entry code
* tag 'arc-6.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc: (24 commits)
ARC: boot log: fix warning
arc: Explicitly include correct DT includes
ARC: pt_regs: create seperate type for ecr
ARCv2: entry: rearrange pt_regs slightly
ARC: entry: replace 8 byte ADD.ne with 4 byte ADD2.ne
ARC: entry: replace 8 byte OR with 4 byte BSET
ARC: entry: Add more common chores to EXCEPTION_PROLOGUE
ARC: entry: EV_MachineCheck dont re-read ECR
ARC: entry: ARcompact EV_ProtV to use r10 directly
ARC: entry: rework (non-functional)
ARC: __switch_to: move ksp to thread_info from thread_struct
ARC: __switch_to: asm with dwarf ops (vs. inline asm)
ARC: kernel stack: INIT_THREAD need not setup @init_stack in @ksp
ARC: entry: use gp to cache task pointer (vs. r25)
ARC: boot log: eliminate struct cpuinfo_arc #4: boot log per ISA
ARC: boot log: eliminate struct cpuinfo_arc #3: don't export
ARC: boot log: eliminate struct cpuinfo_arc #2: cache
ARC: boot log: eliminate struct cpuinfo_arc #1: mm
ARCv2: memset: don't prefetch for len == 0 which happens a alot
ARC: uaccess: elide unaliged handling if hardware supports
...
Diffstat (limited to 'arch/arc/mm')
-rw-r--r-- | arch/arc/mm/cache.c | 179 | ||||
-rw-r--r-- | arch/arc/mm/extable.c | 11 | ||||
-rw-r--r-- | arch/arc/mm/fault.c | 7 | ||||
-rw-r--r-- | arch/arc/mm/init.c | 1 | ||||
-rw-r--r-- | arch/arc/mm/tlb.c | 99 |
5 files changed, 111 insertions, 186 deletions
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index 3c16ee942a5c..f7e05c146637 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c @@ -28,6 +28,10 @@ int slc_enable = 1, ioc_enable = 1; unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */ unsigned long perip_end = 0xFFFFFFFF; /* legacy value */ +static struct cpuinfo_arc_cache { + unsigned int sz_k, line_len, colors; +} ic_info, dc_info, slc_info; + void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr, unsigned long sz, const int op, const int full_page); @@ -35,78 +39,24 @@ void (*__dma_cache_wback_inv)(phys_addr_t start, unsigned long sz); void (*__dma_cache_inv)(phys_addr_t start, unsigned long sz); void (*__dma_cache_wback)(phys_addr_t start, unsigned long sz); -char *arc_cache_mumbojumbo(int c, char *buf, int len) -{ - int n = 0; - struct cpuinfo_arc_cache *p; - -#define PR_CACHE(p, cfg, str) \ - if (!(p)->line_len) \ - n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \ - else \ - n += scnprintf(buf + n, len - n, \ - str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \ - (p)->sz_k, (p)->assoc, (p)->line_len, \ - (p)->vipt ? "VIPT" : "PIPT", \ - (p)->alias ? " aliasing" : "", \ - IS_USED_CFG(cfg)); - - PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache"); - PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache"); - - p = &cpuinfo_arc700[c].slc; - if (p->line_len) - n += scnprintf(buf + n, len - n, - "SLC\t\t: %uK, %uB Line%s\n", - p->sz_k, p->line_len, IS_USED_RUN(slc_enable)); - - n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n", - perip_base, - IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) ")); - - return buf; -} - -/* - * Read the Cache Build Confuration Registers, Decode them and save into - * the cpuinfo structure for later use. - * No Validation done here, simply read/convert the BCRs - */ -static void read_decode_cache_bcr_arcv2(int cpu) +static int read_decode_cache_bcr_arcv2(int c, char *buf, int len) { - struct cpuinfo_arc_cache *p_slc = &cpuinfo_arc700[cpu].slc; + struct cpuinfo_arc_cache *p_slc = &slc_info; + struct bcr_identity ident; struct bcr_generic sbcr; - - struct bcr_slc_cfg { -#ifdef CONFIG_CPU_BIG_ENDIAN - unsigned int pad:24, way:2, lsz:2, sz:4; -#else - unsigned int sz:4, lsz:2, way:2, pad:24; -#endif - } slc_cfg; - - struct bcr_clust_cfg { -#ifdef CONFIG_CPU_BIG_ENDIAN - unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8; -#else - unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7; -#endif - } cbcr; - - struct bcr_volatile { -#ifdef CONFIG_CPU_BIG_ENDIAN - unsigned int start:4, limit:4, pad:22, order:1, disable:1; -#else - unsigned int disable:1, order:1, pad:22, limit:4, start:4; -#endif - } vol; - + struct bcr_clust_cfg cbcr; + struct bcr_volatile vol; + int n = 0; READ_BCR(ARC_REG_SLC_BCR, sbcr); if (sbcr.ver) { + struct bcr_slc_cfg slc_cfg; READ_BCR(ARC_REG_SLC_CFG, slc_cfg); p_slc->sz_k = 128 << slc_cfg.sz; l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64; + n += scnprintf(buf + n, len - n, + "SLC\t\t: %uK, %uB Line%s\n", + p_slc->sz_k, p_slc->line_len, IS_USED_RUN(slc_enable)); } READ_BCR(ARC_REG_CLUSTER_BCR, cbcr); @@ -129,70 +79,83 @@ static void read_decode_cache_bcr_arcv2(int cpu) ioc_enable = 0; } + READ_BCR(AUX_IDENTITY, ident); + /* HS 2.0 didn't have AUX_VOL */ - if (cpuinfo_arc700[cpu].core.family > 0x51) { + if (ident.family > 0x51) { READ_BCR(AUX_VOL, vol); perip_base = vol.start << 28; /* HS 3.0 has limit and strict-ordering fields */ - if (cpuinfo_arc700[cpu].core.family > 0x52) + if (ident.family > 0x52) perip_end = (vol.limit << 28) - 1; } + + n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n", + perip_base, + IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) ")); + + return n; } -void read_decode_cache_bcr(void) +int arc_cache_mumbojumbo(int c, char *buf, int len) { - struct cpuinfo_arc_cache *p_ic, *p_dc; - unsigned int cpu = smp_processor_id(); - struct bcr_cache { -#ifdef CONFIG_CPU_BIG_ENDIAN - unsigned int pad:12, line_len:4, sz:4, config:4, ver:8; -#else - unsigned int ver:8, config:4, sz:4, line_len:4, pad:12; -#endif - } ibcr, dbcr; + struct cpuinfo_arc_cache *p_ic = &ic_info, *p_dc = &dc_info; + struct bcr_cache ibcr, dbcr; + int vipt, assoc; + int n = 0; - p_ic = &cpuinfo_arc700[cpu].icache; READ_BCR(ARC_REG_IC_BCR, ibcr); - if (!ibcr.ver) goto dc_chk; - if (ibcr.ver <= 3) { + if (is_isa_arcompact() && (ibcr.ver <= 3)) { BUG_ON(ibcr.config != 3); - p_ic->assoc = 2; /* Fixed to 2w set assoc */ - } else if (ibcr.ver >= 4) { - p_ic->assoc = 1 << ibcr.config; /* 1,2,4,8 */ + assoc = 2; /* Fixed to 2w set assoc */ + } else if (is_isa_arcv2() && (ibcr.ver >= 4)) { + assoc = 1 << ibcr.config; /* 1,2,4,8 */ } p_ic->line_len = 8 << ibcr.line_len; p_ic->sz_k = 1 << (ibcr.sz - 1); - p_ic->vipt = 1; - p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1; + p_ic->colors = p_ic->sz_k/assoc/TO_KB(PAGE_SIZE); + + n += scnprintf(buf + n, len - n, + "I-Cache\t\t: %uK, %dway/set, %uB Line, VIPT%s%s\n", + p_ic->sz_k, assoc, p_ic->line_len, + p_ic->colors > 1 ? " aliasing" : "", + IS_USED_CFG(CONFIG_ARC_HAS_ICACHE)); dc_chk: - p_dc = &cpuinfo_arc700[cpu].dcache; READ_BCR(ARC_REG_DC_BCR, dbcr); - if (!dbcr.ver) goto slc_chk; - if (dbcr.ver <= 3) { + if (is_isa_arcompact() && (dbcr.ver <= 3)) { BUG_ON(dbcr.config != 2); - p_dc->assoc = 4; /* Fixed to 4w set assoc */ - p_dc->vipt = 1; - p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1; - } else if (dbcr.ver >= 4) { - p_dc->assoc = 1 << dbcr.config; /* 1,2,4,8 */ - p_dc->vipt = 0; - p_dc->alias = 0; /* PIPT so can't VIPT alias */ + vipt = 1; + assoc = 4; /* Fixed to 4w set assoc */ + p_dc->colors = p_dc->sz_k/assoc/TO_KB(PAGE_SIZE); + } else if (is_isa_arcv2() && (dbcr.ver >= 4)) { + vipt = 0; + assoc = 1 << dbcr.config; /* 1,2,4,8 */ + p_dc->colors = 1; /* PIPT so can't VIPT alias */ } p_dc->line_len = 16 << dbcr.line_len; p_dc->sz_k = 1 << (dbcr.sz - 1); + n += scnprintf(buf + n, len - n, + "D-Cache\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", + p_dc->sz_k, assoc, p_dc->line_len, + vipt ? "VIPT" : "PIPT", + p_dc->colors > 1 ? " aliasing" : "", + IS_USED_CFG(CONFIG_ARC_HAS_DCACHE)); + slc_chk: if (is_isa_arcv2()) - read_decode_cache_bcr_arcv2(cpu); + n += read_decode_cache_bcr_arcv2(c, buf + n, len - n); + + return n; } /* @@ -581,7 +544,7 @@ static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr, #endif /* CONFIG_ARC_HAS_ICACHE */ -noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op) +static noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op) { #ifdef CONFIG_ISA_ARCV2 /* @@ -644,7 +607,7 @@ noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op) #endif } -noinline void slc_op_line(phys_addr_t paddr, unsigned long sz, const int op) +static __maybe_unused noinline void slc_op_line(phys_addr_t paddr, unsigned long sz, const int op) { #ifdef CONFIG_ISA_ARCV2 /* @@ -1082,7 +1045,7 @@ SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags) * 3. All Caches need to be disabled when setting up IOC to elide any in-flight * Coherency transactions */ -noinline void __init arc_ioc_setup(void) +static noinline void __init arc_ioc_setup(void) { unsigned int ioc_base, mem_sz; @@ -1144,12 +1107,10 @@ noinline void __init arc_ioc_setup(void) * one core suffices for all * - IOC setup / dma callbacks only need to be done once */ -void __init arc_cache_init_master(void) +static noinline void __init arc_cache_init_master(void) { - unsigned int __maybe_unused cpu = smp_processor_id(); - if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) { - struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; + struct cpuinfo_arc_cache *ic = &ic_info; if (!ic->line_len) panic("cache support enabled but non-existent cache\n"); @@ -1162,14 +1123,14 @@ void __init arc_cache_init_master(void) * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG * pair to provide vaddr/paddr respectively, just as in MMU v3 */ - if (is_isa_arcv2() && ic->alias) + if (is_isa_arcv2() && ic->colors > 1) _cache_line_loop_ic_fn = __cache_line_loop_v3; else _cache_line_loop_ic_fn = __cache_line_loop; } if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) { - struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache; + struct cpuinfo_arc_cache *dc = &dc_info; if (!dc->line_len) panic("cache support enabled but non-existent cache\n"); @@ -1181,14 +1142,13 @@ void __init arc_cache_init_master(void) /* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */ if (is_isa_arcompact()) { int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING); - int num_colors = dc->sz_k/dc->assoc/TO_KB(PAGE_SIZE); - if (dc->alias) { + if (dc->colors > 1) { if (!handled) panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n"); - if (CACHE_COLORS_NUM != num_colors) + if (CACHE_COLORS_NUM != dc->colors) panic("CACHE_COLORS_NUM not optimized for config\n"); - } else if (!dc->alias && handled) { + } else if (handled && dc->colors == 1) { panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n"); } } @@ -1231,9 +1191,6 @@ void __init arc_cache_init_master(void) void __ref arc_cache_init(void) { unsigned int __maybe_unused cpu = smp_processor_id(); - char str[256]; - - pr_info("%s", arc_cache_mumbojumbo(0, str, sizeof(str))); if (!cpu) arc_cache_init_master(); diff --git a/arch/arc/mm/extable.c b/arch/arc/mm/extable.c index 4e14c4244ea2..88fa3a4d4906 100644 --- a/arch/arc/mm/extable.c +++ b/arch/arc/mm/extable.c @@ -22,14 +22,3 @@ int fixup_exception(struct pt_regs *regs) return 0; } - -#ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE - -unsigned long arc_clear_user_noinline(void __user *to, - unsigned long n) -{ - return __arc_clear_user(to, n); -} -EXPORT_SYMBOL(arc_clear_user_noinline); - -#endif diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index f59e722d147f..95119a5e7761 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -13,6 +13,7 @@ #include <linux/kdebug.h> #include <linux/perf_event.h> #include <linux/mm_types.h> +#include <asm/entry.h> #include <asm/mmu.h> /* @@ -99,10 +100,10 @@ void do_page_fault(unsigned long address, struct pt_regs *regs) if (faulthandler_disabled() || !mm) goto no_context; - if (regs->ecr_cause & ECR_C_PROTV_STORE) /* ST/EX */ + if (regs->ecr.cause & ECR_C_PROTV_STORE) /* ST/EX */ write = 1; - else if ((regs->ecr_vec == ECR_V_PROTV) && - (regs->ecr_cause == ECR_C_PROTV_INST_FETCH)) + else if ((regs->ecr.vec == ECR_V_PROTV) && + (regs->ecr.cause == ECR_C_PROTV_INST_FETCH)) exec = 1; flags = FAULT_FLAG_DEFAULT; diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index 9f64d729c9f8..6a71b23f1383 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c @@ -15,6 +15,7 @@ #include <linux/highmem.h> #include <asm/page.h> #include <asm/sections.h> +#include <asm/setup.h> #include <asm/arcregs.h> pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE); diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index 6f40f37e6550..e536b2dcd4b0 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c @@ -18,7 +18,9 @@ /* A copy of the ASID from the PID reg is kept in asid_cache */ DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE; -static int __read_mostly pae_exists; +static struct cpuinfo_arc_mmu { + unsigned int ver, pg_sz_k, s_pg_sz_m, pae, sets, ways; +} mmuinfo; /* * Utility Routine to erase a J-TLB entry @@ -131,7 +133,7 @@ static void tlb_entry_insert(unsigned int pd0, phys_addr_t pd1) noinline void local_flush_tlb_all(void) { - struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; + struct cpuinfo_arc_mmu *mmu = &mmuinfo; unsigned long flags; unsigned int entry; int num_tlb = mmu->sets * mmu->ways; @@ -389,7 +391,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) /* * Routine to create a TLB entry */ -void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep) +static void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep) { unsigned long flags; unsigned int asid_or_sasid, rwx; @@ -564,89 +566,64 @@ void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, * the cpuinfo structure for later use. * No Validation is done here, simply read/convert the BCRs */ -void read_decode_mmu_bcr(void) +int arc_mmu_mumbojumbo(int c, char *buf, int len) { - struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; - unsigned int tmp; - struct bcr_mmu_3 { -#ifdef CONFIG_CPU_BIG_ENDIAN - unsigned int ver:8, ways:4, sets:4, res:3, sasid:1, pg_sz:4, - u_itlb:4, u_dtlb:4; -#else - unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, sasid:1, res:3, sets:4, - ways:4, ver:8; -#endif - } *mmu3; - - struct bcr_mmu_4 { -#ifdef CONFIG_CPU_BIG_ENDIAN - unsigned int ver:8, sasid:1, sz1:4, sz0:4, res:2, pae:1, - n_ways:2, n_entry:2, n_super:2, u_itlb:3, u_dtlb:3; -#else - /* DTLB ITLB JES JE JA */ - unsigned int u_dtlb:3, u_itlb:3, n_super:2, n_entry:2, n_ways:2, - pae:1, res:2, sz0:4, sz1:4, sasid:1, ver:8; -#endif - } *mmu4; + struct cpuinfo_arc_mmu *mmu = &mmuinfo; + unsigned int bcr, u_dtlb, u_itlb, sasid; + struct bcr_mmu_3 *mmu3; + struct bcr_mmu_4 *mmu4; + char super_pg[64] = ""; + int n = 0; - tmp = read_aux_reg(ARC_REG_MMU_BCR); - mmu->ver = (tmp >> 24); + bcr = read_aux_reg(ARC_REG_MMU_BCR); + mmu->ver = (bcr >> 24); if (is_isa_arcompact() && mmu->ver == 3) { - mmu3 = (struct bcr_mmu_3 *)&tmp; + mmu3 = (struct bcr_mmu_3 *)&bcr; mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1); mmu->sets = 1 << mmu3->sets; mmu->ways = 1 << mmu3->ways; - mmu->u_dtlb = mmu3->u_dtlb; - mmu->u_itlb = mmu3->u_itlb; - mmu->sasid = mmu3->sasid; + u_dtlb = mmu3->u_dtlb; + u_itlb = mmu3->u_itlb; + sasid = mmu3->sasid; } else { - mmu4 = (struct bcr_mmu_4 *)&tmp; + mmu4 = (struct bcr_mmu_4 *)&bcr; mmu->pg_sz_k = 1 << (mmu4->sz0 - 1); mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11); mmu->sets = 64 << mmu4->n_entry; mmu->ways = mmu4->n_ways * 2; - mmu->u_dtlb = mmu4->u_dtlb * 4; - mmu->u_itlb = mmu4->u_itlb * 4; - mmu->sasid = mmu4->sasid; - pae_exists = mmu->pae = mmu4->pae; + u_dtlb = mmu4->u_dtlb * 4; + u_itlb = mmu4->u_itlb * 4; + sasid = mmu4->sasid; + mmu->pae = mmu4->pae; } -} -char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len) -{ - int n = 0; - struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu; - char super_pg[64] = ""; - - if (p_mmu->s_pg_sz_m) - scnprintf(super_pg, 64, "%dM Super Page %s", - p_mmu->s_pg_sz_m, - IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE)); + if (mmu->s_pg_sz_m) + scnprintf(super_pg, 64, "/%dM%s", + mmu->s_pg_sz_m, + IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) ? " (THP enabled)":""); n += scnprintf(buf + n, len - n, - "MMU [v%x]\t: %dk PAGE, %s, swalk %d lvl, JTLB %d (%dx%d), uDTLB %d, uITLB %d%s%s\n", - p_mmu->ver, p_mmu->pg_sz_k, super_pg, CONFIG_PGTABLE_LEVELS, - p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways, - p_mmu->u_dtlb, p_mmu->u_itlb, - IS_AVAIL2(p_mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40)); - - return buf; + "MMU [v%x]\t: %dk%s, swalk %d lvl, JTLB %dx%d, uDTLB %d, uITLB %d%s%s%s\n", + mmu->ver, mmu->pg_sz_k, super_pg, CONFIG_PGTABLE_LEVELS, + mmu->sets, mmu->ways, + u_dtlb, u_itlb, + IS_AVAIL1(sasid, ", SASID"), + IS_AVAIL2(mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40)); + + return n; } int pae40_exist_but_not_enab(void) { - return pae_exists && !is_pae40_enabled(); + return mmuinfo.pae && !is_pae40_enabled(); } void arc_mmu_init(void) { - struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; - char str[256]; + struct cpuinfo_arc_mmu *mmu = &mmuinfo; int compat = 0; - pr_info("%s", arc_mmu_mumbojumbo(0, str, sizeof(str))); - /* * Can't be done in processor.h due to header include dependencies */ @@ -723,7 +700,7 @@ volatile int dup_pd_silent; /* Be silent abt it or complain (default) */ void do_tlb_overlap_fault(unsigned long cause, unsigned long address, struct pt_regs *regs) { - struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; + struct cpuinfo_arc_mmu *mmu = &mmuinfo; unsigned long flags; int set, n_ways = mmu->ways; |