1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
|
/*
* Copyright (C) 2005-2012 Imagination Technologies Ltd.
*
* This file contains the architecture-dependant parts of system setup.
*
*/
#include <linux/bootmem.h>
#include <linux/console.h>
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/genhd.h>
#include <linux/init.h>
#include <linux/initrd.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/of_fdt.h>
#include <linux/pfn.h>
#include <linux/root_dev.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/start_kernel.h>
#include <linux/string.h>
#include <asm/cachepart.h>
#include <asm/clock.h>
#include <asm/core_reg.h>
#include <asm/cpu.h>
#include <asm/da.h>
#include <asm/highmem.h>
#include <asm/hwthread.h>
#include <asm/l2cache.h>
#include <asm/mach/arch.h>
#include <asm/metag_regs.h>
#include <asm/mmu.h>
#include <asm/mmzone.h>
#include <asm/processor.h>
#include <asm/prom.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/traps.h>
/* Priv protect as many registers as possible. */
#define DEFAULT_PRIV (TXPRIVEXT_COPRO_BITS | \
TXPRIVEXT_TXTRIGGER_BIT | \
TXPRIVEXT_TXGBLCREG_BIT | \
TXPRIVEXT_ILOCK_BIT | \
TXPRIVEXT_TXITACCYC_BIT | \
TXPRIVEXT_TXDIVTIME_BIT | \
TXPRIVEXT_TXAMAREGX_BIT | \
TXPRIVEXT_TXTIMERI_BIT | \
TXPRIVEXT_TXSTATUS_BIT | \
TXPRIVEXT_TXDISABLE_BIT)
/* Meta2 specific bits. */
#ifdef CONFIG_METAG_META12
#define META2_PRIV 0
#else
#define META2_PRIV (TXPRIVEXT_TXTIMER_BIT | \
TXPRIVEXT_TRACE_BIT)
#endif
/* Unaligned access checking bits. */
#ifdef CONFIG_METAG_UNALIGNED
#define UNALIGNED_PRIV TXPRIVEXT_ALIGNREW_BIT
#else
#define UNALIGNED_PRIV 0
#endif
#define PRIV_BITS (DEFAULT_PRIV | \
META2_PRIV | \
UNALIGNED_PRIV)
extern char _heap_start[];
#ifdef CONFIG_METAG_BUILTIN_DTB
extern u32 __dtb_start[];
#endif
#ifdef CONFIG_DA_CONSOLE
/* Our early channel based console driver */
extern struct console dash_console;
#endif
struct machine_desc *machine_desc __initdata;
/*
* Map a Linux CPU number to a hardware thread ID
* In SMP this will be setup with the correct mapping at startup; in UP this
* will map to the HW thread on which we are running.
*/
u8 cpu_2_hwthread_id[NR_CPUS] __read_mostly = {
[0 ... NR_CPUS-1] = BAD_HWTHREAD_ID
};
/*
* Map a hardware thread ID to a Linux CPU number
* In SMP this will be fleshed out with the correct CPU ID for a particular
* hardware thread. In UP this will be initialised with the boot CPU ID.
*/
u8 hwthread_id_2_cpu[4] __read_mostly = {
[0 ... 3] = BAD_CPU_ID
};
/* The relative offset of the MMU mapped memory (from ldlk or bootloader)
* to the real physical memory. This is needed as we have to use the
* physical addresses in the MMU tables (pte entries), and not the virtual
* addresses.
* This variable is used in the __pa() and __va() macros, and should
* probably only be used via them.
*/
unsigned int meta_memoffset;
static char __initdata *original_cmd_line;
DEFINE_PER_CPU(PTBI, pTBI);
/*
* Mapping are specified as "CPU_ID:HWTHREAD_ID", e.g.
*
* "hwthread_map=0:1,1:2,2:3,3:0"
*
* Linux CPU ID HWTHREAD_ID
* ---------------------------
* 0 1
* 1 2
* 2 3
* 3 0
*/
static int __init parse_hwthread_map(char *p)
{
int cpu;
while (*p) {
cpu = (*p++) - '0';
if (cpu < 0 || cpu > 9)
goto err_cpu;
p++; /* skip semi-colon */
cpu_2_hwthread_id[cpu] = (*p++) - '0';
if (cpu_2_hwthread_id[cpu] >= 4)
goto err_thread;
hwthread_id_2_cpu[cpu_2_hwthread_id[cpu]] = cpu;
if (*p == ',')
p++; /* skip comma */
}
return 0;
err_cpu:
pr_err("%s: hwthread_map cpu argument out of range\n", __func__);
return -EINVAL;
err_thread:
pr_err("%s: hwthread_map thread argument out of range\n", __func__);
return -EINVAL;
}
early_param("hwthread_map", parse_hwthread_map);
void __init dump_machine_table(void)
{
struct machine_desc *p;
const char **compat;
pr_info("Available machine support:\n\tNAME\t\tCOMPATIBLE LIST\n");
for_each_machine_desc(p) {
pr_info("\t%s\t[", p->name);
for (compat = p->dt_compat; compat && *compat; ++compat)
printk(" '%s'", *compat);
printk(" ]\n");
}
pr_info("\nPlease check your kernel config and/or bootloader.\n");
hard_processor_halt(HALT_PANIC);
}
#ifdef CONFIG_METAG_HALT_ON_PANIC
static int metag_panic_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
hard_processor_halt(HALT_PANIC);
return NOTIFY_DONE;
}
static struct notifier_block metag_panic_block = {
metag_panic_event,
NULL,
0
};
#endif
void __init setup_arch(char **cmdline_p)
{
unsigned long start_pfn;
unsigned long text_start = (unsigned long)(&_stext);
unsigned long cpu = smp_processor_id();
unsigned long heap_start, heap_end;
unsigned long start_pte;
PTBI _pTBI;
PTBISEG p_heap;
int heap_id, i;
metag_cache_probe();
metag_da_probe();
#ifdef CONFIG_DA_CONSOLE
if (metag_da_enabled()) {
/* An early channel based console driver */
register_console(&dash_console);
add_preferred_console("ttyDA", 1, NULL);
}
#endif
/* try interpreting the argument as a device tree */
machine_desc = setup_machine_fdt(original_cmd_line);
/* if it doesn't look like a device tree it must be a command line */
if (!machine_desc) {
#ifdef CONFIG_METAG_BUILTIN_DTB
/* try the embedded device tree */
machine_desc = setup_machine_fdt(__dtb_start);
if (!machine_desc)
panic("Invalid embedded device tree.");
#else
/* use the default machine description */
machine_desc = default_machine_desc();
#endif
#ifndef CONFIG_CMDLINE_FORCE
/* append the bootloader cmdline to any builtin fdt cmdline */
if (boot_command_line[0] && original_cmd_line[0])
strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
strlcat(boot_command_line, original_cmd_line,
COMMAND_LINE_SIZE);
#endif
}
setup_meta_clocks(machine_desc->clocks);
*cmdline_p = boot_command_line;
parse_early_param();
/*
* Make sure we don't alias in dcache or icache
*/
check_for_cache_aliasing(cpu);
#ifdef CONFIG_METAG_HALT_ON_PANIC
atomic_notifier_chain_register(&panic_notifier_list,
&metag_panic_block);
#endif
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
#endif
if (!(__core_reg_get(TXSTATUS) & TXSTATUS_PSTAT_BIT))
panic("Privilege must be enabled for this thread.");
_pTBI = __TBI(TBID_ISTAT_BIT);
per_cpu(pTBI, cpu) = _pTBI;
if (!per_cpu(pTBI, cpu))
panic("No TBI found!");
/*
* Initialize all interrupt vectors to our copy of __TBIUnExpXXX,
* rather than the version from the bootloader. This makes call
* stacks easier to understand and may allow us to unmap the
* bootloader at some point.
*
* We need to keep the LWK handler that TBI installed in order to
* be able to do inter-thread comms.
*/
for (i = 0; i <= TBID_SIGNUM_MAX; i++)
if (i != TBID_SIGNUM_LWK)
_pTBI->fnSigs[i] = __TBIUnExpXXX;
/* A Meta requirement is that the kernel is loaded (virtually)
* at the PAGE_OFFSET.
*/
if (PAGE_OFFSET != text_start)
panic("Kernel not loaded at PAGE_OFFSET (%#x) but at %#lx.",
PAGE_OFFSET, text_start);
start_pte = mmu_read_second_level_page(text_start);
/*
* Kernel pages should have the PRIV bit set by the bootloader.
*/
if (!(start_pte & _PAGE_KERNEL))
panic("kernel pte does not have PRIV set");
/*
* See __pa and __va in include/asm/page.h.
* This value is negative when running in local space but the
* calculations work anyway.
*/
meta_memoffset = text_start - (start_pte & PAGE_MASK);
/* Now lets look at the heap space */
heap_id = (__TBIThreadId() & TBID_THREAD_BITS)
+ TBID_SEG(0, TBID_SEGSCOPE_LOCAL, TBID_SEGTYPE_HEAP);
p_heap = __TBIFindSeg(NULL, heap_id);
if (!p_heap)
panic("Could not find heap from TBI!");
/* The heap begins at the first full page after the kernel data. */
heap_start = (unsigned long) &_heap_start;
/* The heap ends at the end of the heap segment specified with
* ldlk.
*/
if (is_global_space(text_start)) {
pr_debug("WARNING: running in global space!\n");
heap_end = (unsigned long)p_heap->pGAddr + p_heap->Bytes;
} else {
heap_end = (unsigned long)p_heap->pLAddr + p_heap->Bytes;
}
ROOT_DEV = Root_RAM0;
/* init_mm is the mm struct used for the first task. It is then
* cloned for all other tasks spawned from that task.
*
* Note - we are using the virtual addresses here.
*/
init_mm.start_code = (unsigned long)(&_stext);
init_mm.end_code = (unsigned long)(&_etext);
init_mm.end_data = (unsigned long)(&_edata);
init_mm.brk = (unsigned long)heap_start;
min_low_pfn = PFN_UP(__pa(text_start));
max_low_pfn = PFN_DOWN(__pa(heap_end));
pfn_base = min_low_pfn;
/* Round max_pfn up to a 4Mb boundary. The free_bootmem_node()
* call later makes sure to keep the rounded up pages marked reserved.
*/
max_pfn = max_low_pfn + ((1 << MAX_ORDER) - 1);
max_pfn &= ~((1 << MAX_ORDER) - 1);
start_pfn = PFN_UP(__pa(heap_start));
if (min_low_pfn & ((1 << MAX_ORDER) - 1)) {
/* Theoretically, we could expand the space that the
* bootmem allocator covers - much as we do for the
* 'high' address, and then tell the bootmem system
* that the lowest chunk is 'not available'. Right
* now it is just much easier to constrain the
* user to always MAX_ORDER align their kernel space.
*/
panic("Kernel must be %d byte aligned, currently at %#lx.",
1 << (MAX_ORDER + PAGE_SHIFT),
min_low_pfn << PAGE_SHIFT);
}
#ifdef CONFIG_HIGHMEM
highstart_pfn = highend_pfn = max_pfn;
high_memory = (void *) __va(PFN_PHYS(highstart_pfn));
#else
high_memory = (void *)__va(PFN_PHYS(max_pfn));
#endif
paging_init(heap_end);
setup_txprivext();
/* Setup the boot cpu's mapping. The rest will be setup below. */
cpu_2_hwthread_id[smp_processor_id()] = hard_processor_id();
hwthread_id_2_cpu[hard_processor_id()] = smp_processor_id();
unflatten_device_tree();
#ifdef CONFIG_SMP
smp_init_cpus();
#endif
if (machine_desc->init_early)
machine_desc->init_early();
}
static int __init customize_machine(void)
{
/* customizes platform devices, or adds new ones */
if (machine_desc->init_machine)
machine_desc->init_machine();
return 0;
}
arch_initcall(customize_machine);
static int __init init_machine_late(void)
{
if (machine_desc->init_late)
machine_desc->init_late();
return 0;
}
late_initcall(init_machine_late);
#ifdef CONFIG_PROC_FS
/*
* Get CPU information for use by the procfs.
*/
static const char *get_cpu_capabilities(unsigned int txenable)
{
#ifdef CONFIG_METAG_META21
/* See CORE_ID in META HTP.GP TRM - Architecture Overview 2.1.238 */
int coreid = metag_in32(METAC_CORE_ID);
unsigned int dsp_type = (coreid >> 3) & 7;
unsigned int fpu_type = (coreid >> 7) & 3;
switch (dsp_type | fpu_type << 3) {
case (0x00): return "EDSP";
case (0x01): return "DSP";
case (0x08): return "EDSP+LFPU";
case (0x09): return "DSP+LFPU";
case (0x10): return "EDSP+FPU";
case (0x11): return "DSP+FPU";
}
return "UNKNOWN";
#else
if (!(txenable & TXENABLE_CLASS_BITS))
return "DSP";
else
return "";
#endif
}
static int show_cpuinfo(struct seq_file *m, void *v)
{
const char *cpu;
unsigned int txenable, thread_id, major, minor;
unsigned long clockfreq = get_coreclock();
#ifdef CONFIG_SMP
int i;
unsigned long lpj;
#endif
cpu = "META";
txenable = __core_reg_get(TXENABLE);
major = (txenable & TXENABLE_MAJOR_REV_BITS) >> TXENABLE_MAJOR_REV_S;
minor = (txenable & TXENABLE_MINOR_REV_BITS) >> TXENABLE_MINOR_REV_S;
thread_id = (txenable >> 8) & 0x3;
#ifdef CONFIG_SMP
for_each_online_cpu(i) {
lpj = per_cpu(cpu_data, i).loops_per_jiffy;
txenable = core_reg_read(TXUCT_ID, TXENABLE_REGNUM,
cpu_2_hwthread_id[i]);
seq_printf(m, "CPU:\t\t%s %d.%d (thread %d)\n"
"Clocking:\t%lu.%1luMHz\n"
"BogoMips:\t%lu.%02lu\n"
"Calibration:\t%lu loops\n"
"Capabilities:\t%s\n\n",
cpu, major, minor, i,
clockfreq / 1000000, (clockfreq / 100000) % 10,
lpj / (500000 / HZ), (lpj / (5000 / HZ)) % 100,
lpj,
get_cpu_capabilities(txenable));
}
#else
seq_printf(m, "CPU:\t\t%s %d.%d (thread %d)\n"
"Clocking:\t%lu.%1luMHz\n"
"BogoMips:\t%lu.%02lu\n"
"Calibration:\t%lu loops\n"
"Capabilities:\t%s\n",
cpu, major, minor, thread_id,
clockfreq / 1000000, (clockfreq / 100000) % 10,
loops_per_jiffy / (500000 / HZ),
(loops_per_jiffy / (5000 / HZ)) % 100,
loops_per_jiffy,
get_cpu_capabilities(txenable));
#endif /* CONFIG_SMP */
#ifdef CONFIG_METAG_L2C
if (meta_l2c_is_present()) {
seq_printf(m, "L2 cache:\t%s\n"
"L2 cache size:\t%d KB\n",
meta_l2c_is_enabled() ? "enabled" : "disabled",
meta_l2c_size() >> 10);
}
#endif
return 0;
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
return (void *)(*pos == 0);
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
return NULL;
}
static void c_stop(struct seq_file *m, void *v)
{
}
const struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = show_cpuinfo,
};
#endif /* CONFIG_PROC_FS */
void __init metag_start_kernel(char *args)
{
/* Zero the timer register so timestamps are from the point at
* which the kernel started running.
*/
__core_reg_set(TXTIMER, 0);
/* Clear the bss. */
memset(__bss_start, 0,
(unsigned long)__bss_stop - (unsigned long)__bss_start);
/* Remember where these are for use in setup_arch */
original_cmd_line = args;
current_thread_info()->cpu = hard_processor_id();
start_kernel();
}
/*
* Setup TXPRIVEXT register to be prevent userland from touching our
* precious registers.
*/
void setup_txprivext(void)
{
__core_reg_set(TXPRIVEXT, PRIV_BITS);
}
PTBI pTBI_get(unsigned int cpu)
{
return per_cpu(pTBI, cpu);
}
#if defined(CONFIG_METAG_DSP) && defined(CONFIG_METAG_FPU)
char capabilites[] = "dsp fpu";
#elif defined(CONFIG_METAG_DSP)
char capabilites[] = "dsp";
#elif defined(CONFIG_METAG_FPU)
char capabilites[] = "fpu";
#else
char capabilites[] = "";
#endif
static struct ctl_table caps_kern_table[] = {
{
.procname = "capabilities",
.data = capabilites,
.maxlen = sizeof(capabilites),
.mode = 0444,
.proc_handler = proc_dostring,
},
{}
};
static struct ctl_table caps_root_table[] = {
{
.procname = "kernel",
.mode = 0555,
.child = caps_kern_table,
},
{}
};
static int __init capabilities_register_sysctl(void)
{
struct ctl_table_header *caps_table_header;
caps_table_header = register_sysctl_table(caps_root_table);
if (!caps_table_header) {
pr_err("Unable to register CAPABILITIES sysctl\n");
return -ENOMEM;
}
return 0;
}
core_initcall(capabilities_register_sysctl);
|