summaryrefslogtreecommitdiff
path: root/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'scripts')
-rw-r--r--scripts/Makefile.lib13
-rwxr-xr-xscripts/decode_stacktrace.sh51
-rw-r--r--scripts/gdb/linux/kasan.py44
-rw-r--r--scripts/gdb/linux/proc.py4
-rw-r--r--scripts/gdb/linux/rbtree.py12
-rw-r--r--scripts/gdb/linux/stackdepot.py27
-rw-r--r--scripts/gdb/linux/timerlist.py31
-rw-r--r--scripts/gdb/vmlinux-gdb.py1
-rwxr-xr-xscripts/macro_checker.py131
-rwxr-xr-xscripts/xz_wrap.sh158
10 files changed, 418 insertions, 54 deletions
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 207325eaf1d1..dae2089e7bc6 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -530,14 +530,17 @@ quiet_cmd_fit = FIT $@
# XZ
# ---------------------------------------------------------------------------
-# Use xzkern to compress the kernel image and xzmisc to compress other things.
+# Use xzkern or xzkern_with_size to compress the kernel image and xzmisc to
+# compress other things.
#
# xzkern uses a big LZMA2 dictionary since it doesn't increase memory usage
# of the kernel decompressor. A BCJ filter is used if it is available for
-# the target architecture. xzkern also appends uncompressed size of the data
-# using size_append. The .xz format has the size information available at
-# the end of the file too, but it's in more complex format and it's good to
-# avoid changing the part of the boot code that reads the uncompressed size.
+# the target architecture.
+#
+# xzkern_with_size also appends uncompressed size of the data using
+# size_append. The .xz format has the size information available at the end
+# of the file too, but it's in more complex format and it's good to avoid
+# changing the part of the boot code that reads the uncompressed size.
# Note that the bytes added by size_append will make the xz tool think that
# the file is corrupt. This is expected.
#
diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh
index a0f50a5b4f7c..826836d264c6 100755
--- a/scripts/decode_stacktrace.sh
+++ b/scripts/decode_stacktrace.sh
@@ -1,11 +1,13 @@
-#!/bin/bash
+#!/usr/bin/env bash
# SPDX-License-Identifier: GPL-2.0
# (c) 2014, Sasha Levin <sasha.levin@oracle.com>
#set -x
usage() {
echo "Usage:"
- echo " $0 -r <release> | <vmlinux> [<base path>|auto] [<modules path>]"
+ echo " $0 -r <release>"
+ echo " $0 [<vmlinux> [<base_path>|auto [<modules_path>]]]"
+ echo " $0 -h"
}
# Try to find a Rust demangler
@@ -32,7 +34,10 @@ READELF=${UTIL_PREFIX}readelf${UTIL_SUFFIX}
ADDR2LINE=${UTIL_PREFIX}addr2line${UTIL_SUFFIX}
NM=${UTIL_PREFIX}nm${UTIL_SUFFIX}
-if [[ $1 == "-r" ]] ; then
+if [[ $1 == "-h" ]] ; then
+ usage
+ exit 0
+elif [[ $1 == "-r" ]] ; then
vmlinux=""
basepath="auto"
modpath=""
@@ -89,31 +94,32 @@ find_module() {
fi
fi
- if [[ "$modpath" != "" ]] ; then
- for fn in $(find "$modpath" -name "${module//_/[-_]}.ko*") ; do
- if ${READELF} -WS "$fn" | grep -qwF .debug_line ; then
- echo $fn
- return
- fi
- done
- return 1
- fi
-
- modpath=$(dirname "$vmlinux")
- find_module && return
-
- if [[ $release == "" ]] ; then
+ if [ -z $release ] ; then
release=$(gdb -ex 'print init_uts_ns.name.release' -ex 'quit' -quiet -batch "$vmlinux" 2>/dev/null | sed -n 's/\$1 = "\(.*\)".*/\1/p')
fi
+ if [ -n "${release}" ] ; then
+ release_dirs="/usr/lib/debug/lib/modules/$release /lib/modules/$release"
+ fi
- for dn in {/usr/lib/debug,}/lib/modules/$release ; do
- if [ -e "$dn" ] ; then
- modpath="$dn"
- find_module && return
+ found_without_debug_info=false
+ for dir in "$modpath" "$(dirname "$vmlinux")" ${release_dirs}; do
+ if [ -n "${dir}" ] && [ -e "${dir}" ]; then
+ for fn in $(find "$dir" -name "${module//_/[-_]}.ko*") ; do
+ if ${READELF} -WS "$fn" | grep -qwF .debug_line ; then
+ echo $fn
+ return
+ fi
+ found_without_debug_info=true
+ done
fi
done
- modpath=""
+ if [[ ${found_without_debug_info} == true ]]; then
+ echo "WARNING! No debugging info in module ${module}, rebuild with DEBUG_KERNEL and DEBUG_INFO" >&2
+ else
+ echo "WARNING! Cannot find .ko for module ${module}, please pass a valid module path" >&2
+ fi
+
return 1
}
@@ -131,7 +137,6 @@ parse_symbol() {
else
local objfile=$(find_module)
if [[ $objfile == "" ]] ; then
- echo "WARNING! Modules path isn't set, but is needed to parse this symbol" >&2
return
fi
if [[ $aarray_support == true ]]; then
diff --git a/scripts/gdb/linux/kasan.py b/scripts/gdb/linux/kasan.py
new file mode 100644
index 000000000000..56730b3fde0b
--- /dev/null
+++ b/scripts/gdb/linux/kasan.py
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright 2024 Canonical Ltd.
+#
+# Authors:
+# Kuan-Ying Lee <kuan-ying.lee@canonical.com>
+#
+
+import gdb
+from linux import constants, mm
+
+def help():
+ t = """Usage: lx-kasan_mem_to_shadow [Hex memory addr]
+ Example:
+ lx-kasan_mem_to_shadow 0xffff000008eca008\n"""
+ gdb.write("Unrecognized command\n")
+ raise gdb.GdbError(t)
+
+class KasanMemToShadow(gdb.Command):
+ """Translate memory address to kasan shadow address"""
+
+ p_ops = None
+
+ def __init__(self):
+ if constants.LX_CONFIG_KASAN_GENERIC or constants.LX_CONFIG_KASAN_SW_TAGS:
+ super(KasanMemToShadow, self).__init__("lx-kasan_mem_to_shadow", gdb.COMMAND_SUPPORT)
+
+ def invoke(self, args, from_tty):
+ if not constants.LX_CONFIG_KASAN_GENERIC or constants.LX_CONFIG_KASAN_SW_TAGS:
+ raise gdb.GdbError('CONFIG_KASAN_GENERIC or CONFIG_KASAN_SW_TAGS is not set')
+
+ argv = gdb.string_to_argv(args)
+ if len(argv) == 1:
+ if self.p_ops is None:
+ self.p_ops = mm.page_ops().ops
+ addr = int(argv[0], 16)
+ shadow_addr = self.kasan_mem_to_shadow(addr)
+ gdb.write('shadow addr: 0x%x\n' % shadow_addr)
+ else:
+ help()
+ def kasan_mem_to_shadow(self, addr):
+ return (addr >> self.p_ops.KASAN_SHADOW_SCALE_SHIFT) + self.p_ops.KASAN_SHADOW_OFFSET
+
+KasanMemToShadow()
diff --git a/scripts/gdb/linux/proc.py b/scripts/gdb/linux/proc.py
index 43c687e7a69d..65dd1bd12964 100644
--- a/scripts/gdb/linux/proc.py
+++ b/scripts/gdb/linux/proc.py
@@ -18,6 +18,7 @@ from linux import utils
from linux import tasks
from linux import lists
from linux import vfs
+from linux import rbtree
from struct import *
@@ -172,8 +173,7 @@ values of that process namespace"""
gdb.write("{:^18} {:^15} {:>9} {} {} options\n".format(
"mount", "super_block", "devname", "pathname", "fstype"))
- for mnt in lists.list_for_each_entry(namespace['list'],
- mount_ptr_type, "mnt_list"):
+ for mnt in rbtree.rb_inorder_for_each_entry(namespace['mounts'], mount_ptr_type, "mnt_node"):
devname = mnt['mnt_devname'].string()
devname = devname if devname else "none"
diff --git a/scripts/gdb/linux/rbtree.py b/scripts/gdb/linux/rbtree.py
index fe462855eefd..fcbcc5f4153c 100644
--- a/scripts/gdb/linux/rbtree.py
+++ b/scripts/gdb/linux/rbtree.py
@@ -9,6 +9,18 @@ from linux import utils
rb_root_type = utils.CachedType("struct rb_root")
rb_node_type = utils.CachedType("struct rb_node")
+def rb_inorder_for_each(root):
+ def inorder(node):
+ if node:
+ yield from inorder(node['rb_left'])
+ yield node
+ yield from inorder(node['rb_right'])
+
+ yield from inorder(root['rb_node'])
+
+def rb_inorder_for_each_entry(root, gdbtype, member):
+ for node in rb_inorder_for_each(root):
+ yield utils.container_of(node, gdbtype, member)
def rb_first(root):
if root.type == rb_root_type.get_type():
diff --git a/scripts/gdb/linux/stackdepot.py b/scripts/gdb/linux/stackdepot.py
index bb3a0f843931..37313a5a51a0 100644
--- a/scripts/gdb/linux/stackdepot.py
+++ b/scripts/gdb/linux/stackdepot.py
@@ -13,6 +13,13 @@ if constants.LX_CONFIG_STACKDEPOT:
stack_record_type = utils.CachedType('struct stack_record')
DEPOT_STACK_ALIGN = 4
+def help():
+ t = """Usage: lx-stack_depot_lookup [Hex handle value]
+ Example:
+ lx-stack_depot_lookup 0x00c80300\n"""
+ gdb.write("Unrecognized command\n")
+ raise gdb.GdbError(t)
+
def stack_depot_fetch(handle):
global DEPOT_STACK_ALIGN
global stack_record_type
@@ -57,3 +64,23 @@ def stack_depot_print(handle):
gdb.execute("x /i 0x%x" % (int(entries[i])))
except Exception as e:
gdb.write("%s\n" % e)
+
+class StackDepotLookup(gdb.Command):
+ """Search backtrace by handle"""
+
+ def __init__(self):
+ if constants.LX_CONFIG_STACKDEPOT:
+ super(StackDepotLookup, self).__init__("lx-stack_depot_lookup", gdb.COMMAND_SUPPORT)
+
+ def invoke(self, args, from_tty):
+ if not constants.LX_CONFIG_STACKDEPOT:
+ raise gdb.GdbError('CONFIG_STACKDEPOT is not set')
+
+ argv = gdb.string_to_argv(args)
+ if len(argv) == 1:
+ handle = int(argv[0], 16)
+ stack_depot_print(gdb.Value(handle).cast(utils.get_uint_type()))
+ else:
+ help()
+
+StackDepotLookup()
diff --git a/scripts/gdb/linux/timerlist.py b/scripts/gdb/linux/timerlist.py
index 64bc87191003..98445671fe83 100644
--- a/scripts/gdb/linux/timerlist.py
+++ b/scripts/gdb/linux/timerlist.py
@@ -87,21 +87,22 @@ def print_cpu(hrtimer_bases, cpu, max_clock_bases):
text += "\n"
if constants.LX_CONFIG_TICK_ONESHOT:
- fmts = [(" .{} : {}", 'nohz_mode'),
- (" .{} : {} nsecs", 'last_tick'),
- (" .{} : {}", 'tick_stopped'),
- (" .{} : {}", 'idle_jiffies'),
- (" .{} : {}", 'idle_calls'),
- (" .{} : {}", 'idle_sleeps'),
- (" .{} : {} nsecs", 'idle_entrytime'),
- (" .{} : {} nsecs", 'idle_waketime'),
- (" .{} : {} nsecs", 'idle_exittime'),
- (" .{} : {} nsecs", 'idle_sleeptime'),
- (" .{}: {} nsecs", 'iowait_sleeptime'),
- (" .{} : {}", 'last_jiffies'),
- (" .{} : {}", 'next_timer'),
- (" .{} : {} nsecs", 'idle_expires')]
- text += "\n".join([s.format(f, ts[f]) for s, f in fmts])
+ TS_FLAG_STOPPED = 1 << 1
+ TS_FLAG_NOHZ = 1 << 4
+ text += f" .{'nohz':15s}: {int(bool(ts['flags'] & TS_FLAG_NOHZ))}\n"
+ text += f" .{'last_tick':15s}: {ts['last_tick']}\n"
+ text += f" .{'tick_stopped':15s}: {int(bool(ts['flags'] & TS_FLAG_STOPPED))}\n"
+ text += f" .{'idle_jiffies':15s}: {ts['idle_jiffies']}\n"
+ text += f" .{'idle_calls':15s}: {ts['idle_calls']}\n"
+ text += f" .{'idle_sleeps':15s}: {ts['idle_sleeps']}\n"
+ text += f" .{'idle_entrytime':15s}: {ts['idle_entrytime']} nsecs\n"
+ text += f" .{'idle_waketime':15s}: {ts['idle_waketime']} nsecs\n"
+ text += f" .{'idle_exittime':15s}: {ts['idle_exittime']} nsecs\n"
+ text += f" .{'idle_sleeptime':15s}: {ts['idle_sleeptime']} nsecs\n"
+ text += f" .{'iowait_sleeptime':15s}: {ts['iowait_sleeptime']} nsecs\n"
+ text += f" .{'last_jiffies':15s}: {ts['last_jiffies']}\n"
+ text += f" .{'next_timer':15s}: {ts['next_timer']}\n"
+ text += f" .{'idle_expires':15s}: {ts['idle_expires']} nsecs\n"
text += "\njiffies: {}\n".format(jiffies)
text += "\n"
diff --git a/scripts/gdb/vmlinux-gdb.py b/scripts/gdb/vmlinux-gdb.py
index fc53cdf286f1..d4eeed4506fd 100644
--- a/scripts/gdb/vmlinux-gdb.py
+++ b/scripts/gdb/vmlinux-gdb.py
@@ -49,3 +49,4 @@ else:
import linux.page_owner
import linux.slab
import linux.vmalloc
+ import linux.kasan
diff --git a/scripts/macro_checker.py b/scripts/macro_checker.py
new file mode 100755
index 000000000000..ba550982e98f
--- /dev/null
+++ b/scripts/macro_checker.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python3
+# SPDX-License-Identifier: GPL-2.0
+# Author: Julian Sun <sunjunchao2870@gmail.com>
+
+""" Find macro definitions with unused parameters. """
+
+import argparse
+import os
+import re
+
+parser = argparse.ArgumentParser()
+
+parser.add_argument("path", type=str, help="The file or dir path that needs check")
+parser.add_argument("-v", "--verbose", action="store_true",
+ help="Check conditional macros, but may lead to more false positives")
+args = parser.parse_args()
+
+macro_pattern = r"#define\s+(\w+)\(([^)]*)\)"
+# below vars were used to reduce false positives
+fp_patterns = [r"\s*do\s*\{\s*\}\s*while\s*\(\s*0\s*\)",
+ r"\(?0\)?", r"\(?1\)?"]
+correct_macros = []
+cond_compile_mark = "#if"
+cond_compile_end = "#endif"
+
+def check_macro(macro_line, report):
+ match = re.match(macro_pattern, macro_line)
+ if match:
+ macro_def = re.sub(macro_pattern, '', macro_line)
+ identifier = match.group(1)
+ content = match.group(2)
+ arguments = [item.strip() for item in content.split(',') if item.strip()]
+
+ macro_def = macro_def.strip()
+ if not macro_def:
+ return
+ # used to reduce false positives, like #define endfor_nexthops(rt) }
+ if len(macro_def) == 1:
+ return
+
+ for fp_pattern in fp_patterns:
+ if (re.match(fp_pattern, macro_def)):
+ return
+
+ for arg in arguments:
+ # used to reduce false positives
+ if "..." in arg:
+ return
+ for arg in arguments:
+ if not arg in macro_def and report == False:
+ return
+ # if there is a correct macro with the same name, do not report it.
+ if not arg in macro_def and identifier not in correct_macros:
+ print(f"Argument {arg} is not used in function-line macro {identifier}")
+ return
+
+ correct_macros.append(identifier)
+
+
+# remove comment and whitespace
+def macro_strip(macro):
+ comment_pattern1 = r"\/\/*"
+ comment_pattern2 = r"\/\**\*\/"
+
+ macro = macro.strip()
+ macro = re.sub(comment_pattern1, '', macro)
+ macro = re.sub(comment_pattern2, '', macro)
+
+ return macro
+
+def file_check_macro(file_path, report):
+ # number of conditional compiling
+ cond_compile = 0
+ # only check .c and .h file
+ if not file_path.endswith(".c") and not file_path.endswith(".h"):
+ return
+
+ with open(file_path, "r") as f:
+ while True:
+ line = f.readline()
+ if not line:
+ break
+ line = line.strip()
+ if line.startswith(cond_compile_mark):
+ cond_compile += 1
+ continue
+ if line.startswith(cond_compile_end):
+ cond_compile -= 1
+ continue
+
+ macro = re.match(macro_pattern, line)
+ if macro:
+ macro = macro_strip(macro.string)
+ while macro[-1] == '\\':
+ macro = macro[0:-1]
+ macro = macro.strip()
+ macro += f.readline()
+ macro = macro_strip(macro)
+ if not args.verbose:
+ if file_path.endswith(".c") and cond_compile != 0:
+ continue
+ # 1 is for #ifdef xxx at the beginning of the header file
+ if file_path.endswith(".h") and cond_compile != 1:
+ continue
+ check_macro(macro, report)
+
+def get_correct_macros(path):
+ file_check_macro(path, False)
+
+def dir_check_macro(dir_path):
+
+ for dentry in os.listdir(dir_path):
+ path = os.path.join(dir_path, dentry)
+ if os.path.isdir(path):
+ dir_check_macro(path)
+ elif os.path.isfile(path):
+ get_correct_macros(path)
+ file_check_macro(path, True)
+
+
+def main():
+ if os.path.isfile(args.path):
+ get_correct_macros(args.path)
+ file_check_macro(args.path, True)
+ elif os.path.isdir(args.path):
+ dir_check_macro(args.path)
+ else:
+ print(f"{args.path} doesn't exit or is neither a file nor a dir")
+
+if __name__ == "__main__":
+ main() \ No newline at end of file
diff --git a/scripts/xz_wrap.sh b/scripts/xz_wrap.sh
index d06baf626abe..f19369687030 100755
--- a/scripts/xz_wrap.sh
+++ b/scripts/xz_wrap.sh
@@ -1,22 +1,162 @@
#!/bin/sh
+# SPDX-License-Identifier: 0BSD
#
# This is a wrapper for xz to compress the kernel image using appropriate
# compression options depending on the architecture.
#
# Author: Lasse Collin <lasse.collin@tukaani.org>
+
+# This has specialized settings for the following archs. However,
+# XZ-compressed kernel isn't currently supported on every listed arch.
#
-# This file has been put into the public domain.
-# You can do whatever you want with this file.
-#
+# Arch Align Notes
+# arm 2/4 ARM and ARM-Thumb2
+# arm64 4
+# csky 2
+# loongarch 4
+# mips 2/4 MicroMIPS is 2-byte aligned
+# parisc 4
+# powerpc 4 Uses its own wrapper for compressors instead of this.
+# riscv 2/4
+# s390 2
+# sh 2
+# sparc 4
+# x86 1
+
+# A few archs use 2-byte or 4-byte aligned instructions depending on
+# the kernel config. This function is used to check if the relevant
+# config option is set to "y".
+is_enabled()
+{
+ grep -q "^$1=y$" include/config/auto.conf
+}
+
+# XZ_VERSION is needed to disable features that aren't available in
+# old XZ Utils versions.
+XZ_VERSION=$($XZ --robot --version) || exit
+XZ_VERSION=$(printf '%s\n' "$XZ_VERSION" | sed -n 's/^XZ_VERSION=//p')
+# Assume that no BCJ filter is available.
BCJ=
-LZMA2OPTS=
+# Set the instruction alignment to 1, 2, or 4 bytes.
+#
+# Set the BCJ filter if one is available.
+# It must match the #ifdef usage in lib/decompress_unxz.c.
case $SRCARCH in
- x86) BCJ=--x86 ;;
- powerpc) BCJ=--powerpc ;;
- arm) BCJ=--arm ;;
- sparc) BCJ=--sparc ;;
+ arm)
+ if is_enabled CONFIG_THUMB2_KERNEL; then
+ ALIGN=2
+ BCJ=--armthumb
+ else
+ ALIGN=4
+ BCJ=--arm
+ fi
+ ;;
+
+ arm64)
+ ALIGN=4
+
+ # ARM64 filter was added in XZ Utils 5.4.0.
+ if [ "$XZ_VERSION" -ge 50040002 ]; then
+ BCJ=--arm64
+ else
+ echo "$0: Upgrading to xz >= 5.4.0" \
+ "would enable the ARM64 filter" \
+ "for better compression" >&2
+ fi
+ ;;
+
+ csky)
+ ALIGN=2
+ ;;
+
+ loongarch)
+ ALIGN=4
+ ;;
+
+ mips)
+ if is_enabled CONFIG_CPU_MICROMIPS; then
+ ALIGN=2
+ else
+ ALIGN=4
+ fi
+ ;;
+
+ parisc)
+ ALIGN=4
+ ;;
+
+ powerpc)
+ ALIGN=4
+
+ # The filter is only for big endian instruction encoding.
+ if is_enabled CONFIG_CPU_BIG_ENDIAN; then
+ BCJ=--powerpc
+ fi
+ ;;
+
+ riscv)
+ if is_enabled CONFIG_RISCV_ISA_C; then
+ ALIGN=2
+ else
+ ALIGN=4
+ fi
+
+ # RISC-V filter was added in XZ Utils 5.6.0.
+ if [ "$XZ_VERSION" -ge 50060002 ]; then
+ BCJ=--riscv
+ else
+ echo "$0: Upgrading to xz >= 5.6.0" \
+ "would enable the RISC-V filter" \
+ "for better compression" >&2
+ fi
+ ;;
+
+ s390)
+ ALIGN=2
+ ;;
+
+ sh)
+ ALIGN=2
+ ;;
+
+ sparc)
+ ALIGN=4
+ BCJ=--sparc
+ ;;
+
+ x86)
+ ALIGN=1
+ BCJ=--x86
+ ;;
+
+ *)
+ echo "$0: Arch-specific tuning is missing for '$SRCARCH'" >&2
+
+ # Guess 2-byte-aligned instructions. Guessing too low
+ # should hurt less than guessing too high.
+ ALIGN=2
+ ;;
+esac
+
+# Select the LZMA2 options matching the instruction alignment.
+case $ALIGN in
+ 1) LZMA2OPTS= ;;
+ 2) LZMA2OPTS=lp=1 ;;
+ 4) LZMA2OPTS=lp=2,lc=2 ;;
+ *) echo "$0: ALIGN wrong or missing" >&2; exit 1 ;;
esac
-exec $XZ --check=crc32 $BCJ --lzma2=$LZMA2OPTS,dict=32MiB
+# Use single-threaded mode because it compresses a little better
+# (and uses less RAM) than multithreaded mode.
+#
+# For the best compression, the dictionary size shouldn't be
+# smaller than the uncompressed kernel. 128 MiB dictionary
+# needs less than 1400 MiB of RAM in single-threaded mode.
+#
+# On the archs that use this script to compress the kernel,
+# decompression in the preboot code is done in single-call mode.
+# Thus the dictionary size doesn't affect the memory requirements
+# of the preboot decompressor at all.
+exec $XZ --check=crc32 --threads=1 $BCJ --lzma2=$LZMA2OPTS,dict=128MiB