From 72624a5f19d44046534cfefbedb5d47c9805b1c2 Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Wed, 6 Oct 2021 12:41:16 -0700 Subject: [PATCH 01/19] kpatch-build: Add sym->has_func_profiling support for aarch64 The "has_function_profiling" support field in the symbol struct is used to show that a function symbol is able to be patched. This is necessary to check that functions which need to be patched are able to be. On arm64 this means the presence of 2 NOP instructions at function entry which are patched by ftrace to call the ftrace handling code. These 2 NOPs are inserted by the compiler and the location of them is recorded in a section called "__patchable_function_entries". Check whether a symbol has a corresponding entry in the "__patchable_function_entries" section and if so mark it as "has_func_profiling". Signed-off-by: Suraj Jitindar Singh --- V1->V2: - Make error message standard across architectures when no patchable entry - Don't store __patchable_function_entries section in kpatch_find_func_profiling_calls(), instead find it each time --- kpatch-build/create-diff-object.c | 20 +++++++++++++++++++- kpatch-build/kpatch-elf.c | 3 +++ kpatch-build/kpatch-elf.h | 1 + 3 files changed, 23 insertions(+), 1 deletion(-) diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c index 25710e921..e8ec4d121 100644 --- a/kpatch-build/create-diff-object.c +++ b/kpatch-build/create-diff-object.c @@ -1694,7 +1694,7 @@ static void kpatch_check_func_profiling_calls(struct kpatch_elf *kelf) (sym->parent && sym->parent->status == CHANGED)) continue; if (!sym->twin->has_func_profiling) { - log_normal("function %s has no fentry/mcount call, unable to patch\n", + log_normal("function %s doesn't have patchable function entry, unable to patch\n", sym->name); errs++; } @@ -3957,6 +3957,24 @@ static void kpatch_find_func_profiling_calls(struct kpatch_elf *kelf) continue; switch(kelf->arch) { + case AARCH64: { + struct section *sec = find_section_by_name(&kelf->sections, + "__patchable_function_entries"); + /* + * If we can't find the __patchable_function_entries section or + * there are no relocations in it then not patchable. + */ + if (!sec || !sec->rela) + return; + list_for_each_entry(rela, &sec->rela->relas, list) { + if (rela->sym->sec && sym->sec == rela->sym->sec) { + sym->has_func_profiling = 1; + break; + } + } + + break; + } case PPC64: list_for_each_entry(rela, &sym->sec->rela->relas, list) { if (!strcmp(rela->sym->name, "_mcount")) { diff --git a/kpatch-build/kpatch-elf.c b/kpatch-build/kpatch-elf.c index 374d424cc..a01bde993 100644 --- a/kpatch-build/kpatch-elf.c +++ b/kpatch-build/kpatch-elf.c @@ -584,6 +584,9 @@ struct kpatch_elf *kpatch_elf_open(const char *name) if (!gelf_getehdr(kelf->elf, &ehdr)) ERROR("gelf_getehdr"); switch (ehdr.e_machine) { + case EM_AARCH64: + kelf->arch = AARCH64; + break; case EM_PPC64: kelf->arch = PPC64; break; diff --git a/kpatch-build/kpatch-elf.h b/kpatch-build/kpatch-elf.h index e32209b72..c1aed183b 100644 --- a/kpatch-build/kpatch-elf.h +++ b/kpatch-build/kpatch-elf.h @@ -115,6 +115,7 @@ enum architecture { PPC64 = 0x1 << 0, X86_64 = 0x1 << 1, S390 = 0x1 << 2, + AARCH64 = 0x1 << 3, }; struct kpatch_elf { From 4e3d05fdeff9e4a97d7633f367bb3dfa374a5794 Mon Sep 17 00:00:00 2001 From: Pete Swain Date: Fri, 28 Jul 2023 16:33:23 -0700 Subject: [PATCH 02/19] create-diff-object: Split kpatch_create_mcount_sections into alloc and populate The function kpatch_create_mcount_sections() allocates the __mcount_loc section and then populates it with functions which have a patchable entry. The following patch will add aarch64 support to this function where the allocation will have to be done before the kelf_patched is torn down. Thus split this function so that the allocation can be performed earlier and the populating as before. No intended functional change. Signed-off-by: Suraj Jitindar Singh --- V1->V2: - Add patch to series --- kpatch-build/create-diff-object.c | 31 ++++++++++++++++++++++--------- test/unit/objs | 2 +- 2 files changed, 23 insertions(+), 10 deletions(-) diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c index e8ec4d121..b4af927cc 100644 --- a/kpatch-build/create-diff-object.c +++ b/kpatch-build/create-diff-object.c @@ -3676,6 +3676,21 @@ static void kpatch_create_callbacks_objname_rela(struct kpatch_elf *kelf, char * } } +static void kpatch_alloc_mcount_sections(struct kpatch_elf *kelf, struct kpatch_elf *kelfout) +{ + int nr; + struct symbol *sym; + + nr = 0; + list_for_each_entry(sym, &kelfout->symbols, list) + if (sym->type == STT_FUNC && sym->status != SAME && + sym->has_func_profiling) + nr++; + + /* create text/rela section pair */ + create_section_pair(kelfout, "__mcount_loc", sizeof(void *), nr); +} + /* * This function basically reimplements the functionality of the Linux * recordmcount script, so that patched functions can be recognized by ftrace. @@ -3683,7 +3698,7 @@ static void kpatch_create_callbacks_objname_rela(struct kpatch_elf *kelf, char * * TODO: Eventually we can modify recordmount so that it recognizes our bundled * sections as valid and does this work for us. */ -static void kpatch_create_mcount_sections(struct kpatch_elf *kelf) +static void kpatch_populate_mcount_sections(struct kpatch_elf *kelf) { int nr, index; struct section *sec, *relasec; @@ -3692,15 +3707,10 @@ static void kpatch_create_mcount_sections(struct kpatch_elf *kelf) void **funcs; unsigned long insn_offset = 0; - nr = 0; - list_for_each_entry(sym, &kelf->symbols, list) - if (sym->type == STT_FUNC && sym->status != SAME && - sym->has_func_profiling) - nr++; - /* create text/rela section pair */ - sec = create_section_pair(kelf, "__mcount_loc", sizeof(void*), nr); + sec = find_section_by_name(&kelf->sections, "__mcount_loc"); relasec = sec->rela; + nr = (int) (sec->data->d_size / sizeof(void *)); /* populate sections */ index = 0; @@ -4146,6 +4156,9 @@ int main(int argc, char *argv[]) /* this is destructive to kelf_patched */ kpatch_migrate_included_elements(kelf_patched, &kelf_out); + /* this must be done before kelf_patched is torn down */ + kpatch_alloc_mcount_sections(kelf_patched, kelf_out); + /* * Teardown kelf_patched since we shouldn't access sections or symbols * through it anymore. Don't free however, since our section and symbol @@ -4164,7 +4177,7 @@ int main(int argc, char *argv[]) kpatch_create_callbacks_objname_rela(kelf_out, parent_name); kpatch_build_strings_section_data(kelf_out); - kpatch_create_mcount_sections(kelf_out); + kpatch_populate_mcount_sections(kelf_out); /* * At this point, the set of output sections and symbols is diff --git a/test/unit/objs b/test/unit/objs index a51c80a60..31f16a29c 160000 --- a/test/unit/objs +++ b/test/unit/objs @@ -1 +1 @@ -Subproject commit a51c80a60fc8ade7e7ec8ad875b2963f3a15a494 +Subproject commit 31f16a29c6c3dc9ac101d8ca780723a6667c219e From 90a5bddd9f193127d21558117967969ae4b14ffb Mon Sep 17 00:00:00 2001 From: Pete Swain Date: Mon, 9 Jan 2023 02:15:58 -0800 Subject: [PATCH 03/19] create-diff-object: Create __patchable_function_entries section for aarch64 The __mcount_loc section contains the addresses of patchable ftrace sites which is used by the ftrace infrastructure in the kernel to create a list of tracable functions and to know where to patch to enable tracing of them. On aarch64 this section is called __patchable_function_entries and is generated by the compiler. Either of __mcount_loc or __patchable_function_entries is recognised by the kernel but for aarch64 use __patchable_function_entries as it is what is expected. Add aarch64 support to kpatch_alloc_mcount_sections(). The SHF_LINK_ORDER section flag must be copied to ensure that it matches to avoid the following: ld: __patchable_function_entries has both ordered [...] and unordered [...] sections Add aarch64 support to kpatch_populate_mcount_sections(). Check for the 2 required NOP instructions on function entry, which may be preceded by a BTI C instruction depending on whether the function is a leaf function. This determines the offset of the patch site. Signed-off-by: Suraj Jitindar Singh --- V1->V2: - Don't preserve the __patchable_function_entries section from the patched elf as this is already verified by kpatch_check_func_profiling_calls() - Instead get the patch entry offset by checking for a preceding BTI C instr - Copy the section flags for __patchable_function_entries --- rebased, added sh_link fix from Suraj's later commit "kpatch-build: Enable ARM64 support" Signed-off-by: Pete Swain --- kpatch-build/create-diff-object.c | 75 +++++++++++++++++++++++++++++-- 1 file changed, 72 insertions(+), 3 deletions(-) diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c index b4af927cc..d2934cdd5 100644 --- a/kpatch-build/create-diff-object.c +++ b/kpatch-build/create-diff-object.c @@ -3676,6 +3676,11 @@ static void kpatch_create_callbacks_objname_rela(struct kpatch_elf *kelf, char * } } +/* + * Allocate the mcount/patchable_function_entry sections which must be done + * before the patched object is torn down so that the section flags can be + * copied. + */ static void kpatch_alloc_mcount_sections(struct kpatch_elf *kelf, struct kpatch_elf *kelfout) { int nr; @@ -3688,10 +3693,36 @@ static void kpatch_alloc_mcount_sections(struct kpatch_elf *kelf, struct kpatch_ nr++; /* create text/rela section pair */ - create_section_pair(kelfout, "__mcount_loc", sizeof(void *), nr); + switch(kelf->arch) { + case AARCH64: { + struct section *sec, *tmp; + + sec = create_section_pair(kelfout, "__patchable_function_entries", sizeof(void *), nr); + + /* + * Depending on the compiler the __patchable_function_entries section + * can be ordered or not, copy this flag to the section we created to + * avoid: + * ld: __patchable_function_entries has both ordered [...] and unordered [...] sections + */ + tmp = find_section_by_name(&kelf->sections, "__patchable_function_entries"); + sec->sh.sh_flags |= (tmp->sh.sh_flags & SHF_LINK_ORDER); + sec->sh.sh_link = 1; + break; + } + case PPC64: + case X86_64: + case S390: + create_section_pair(kelfout, "__mcount_loc", sizeof(void *), nr); + break; + default: + ERROR("unsupported arch\n"); + } } /* + * Populate the mcount sections allocated by kpatch_alloc_mcount_sections() + * previously. * This function basically reimplements the functionality of the Linux * recordmcount script, so that patched functions can be recognized by ftrace. * @@ -3707,8 +3738,18 @@ static void kpatch_populate_mcount_sections(struct kpatch_elf *kelf) void **funcs; unsigned long insn_offset = 0; - - sec = find_section_by_name(&kelf->sections, "__mcount_loc"); + switch(kelf->arch) { + case AARCH64: + sec = find_section_by_name(&kelf->sections, "__patchable_function_entries"); + break; + case PPC64: + case X86_64: + case S390: + sec = find_section_by_name(&kelf->sections, "__mcount_loc"); + break; + default: + ERROR("unsupported arch\n"); + } relasec = sec->rela; nr = (int) (sec->data->d_size / sizeof(void *)); @@ -3725,6 +3766,34 @@ static void kpatch_populate_mcount_sections(struct kpatch_elf *kelf) } switch(kelf->arch) { + case AARCH64: { + unsigned char *insn; + int i; + + insn = sym->sec->data->d_buf; + insn_offset = 0; + + /* + * If BTI (Branch Target Identification) is enabled then there + * might be an additional 'BTI C' instruction before the two + * patchable function entry 'NOP's. + * i.e. 0xd503245f (little endian) + */ + if (insn[0] == 0x5f) { + if (insn[1] != 0x24 || insn[2] != 0x03 || insn[3] != 0xd5) + ERROR("%s: unexpected instruction in patch section of function\n", sym->name); + insn_offset += 4; + insn += 4; + } + for (i = 0; i < 8; i += 4) { + /* We expect a NOP i.e. 0xd503201f (little endian) */ + if (insn[i] != 0x1f || insn[i + 1] != 0x20 || + insn[i + 2] != 0x03 || insn [i + 3] != 0xd5) + ERROR("%s: unexpected instruction in patch section of function\n", sym->name); + } + + break; + } case PPC64: { bool found = false; From 5dfe85a84109efc0d995ec1983978f66212b846b Mon Sep 17 00:00:00 2001 From: Pete Swain Date: Wed, 21 Dec 2022 13:38:59 -0800 Subject: [PATCH 04/19] kpatch-build: Enable ARM64 support Add the final support required for aarch64 and enable building on that arch. Signed-off-by: Suraj Jitindar Singh --- V1->V2: - Add # shellcheck disable=SC2086 - Add comment to kpatch_is_mapping_symbol() --- README.md | 2 +- kpatch-build/Makefile | 2 +- kpatch-build/create-diff-object.c | 74 +++++++++++++++++++++++++------ kpatch-build/kpatch-build | 3 ++ kpatch-build/kpatch-elf.c | 3 ++ 5 files changed, 69 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index 75995fe8c..78fd14bc4 100644 --- a/README.md +++ b/README.md @@ -40,7 +40,7 @@ Supported Architectures - [x] x86-64 - [x] ppc64le -- [ ] arm64 +- [x] arm64 - [x] s390 [upstream prerequisites](doc/s390-upstream-prerequisites.md) Installation diff --git a/kpatch-build/Makefile b/kpatch-build/Makefile index bebf3cd96..7fb223138 100644 --- a/kpatch-build/Makefile +++ b/kpatch-build/Makefile @@ -22,7 +22,7 @@ PLUGIN_CFLAGS := $(filter-out -Wconversion, $(CFLAGS)) PLUGIN_CFLAGS += -shared -I$(GCC_PLUGINS_DIR)/include \ -Igcc-plugins -fPIC -fno-rtti -O2 -Wall endif -ifeq ($(filter $(ARCH),s390x x86_64 ppc64le),) +ifeq ($(filter $(ARCH),aarch64 s390x x86_64 ppc64le),) $(error Unsupported architecture ${ARCH}, check https://github.com/dynup/kpatch/#supported-architectures) endif diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c index d2934cdd5..e2a56d3eb 100644 --- a/kpatch-build/create-diff-object.c +++ b/kpatch-build/create-diff-object.c @@ -173,6 +173,8 @@ static bool is_gcc6_localentry_bundled_sym(struct kpatch_elf *kelf, struct symbol *sym) { switch(kelf->arch) { + case AARCH64: + return false; case PPC64: return ((PPC64_LOCAL_ENTRY_OFFSET(sym->sym.st_other) != 0) && sym->sym.st_value == 8); @@ -228,6 +230,24 @@ static struct rela *toc_rela(const struct rela *rela) (unsigned int)rela->addend); } +/* + * Mapping symbols are used to mark and label the transitions between code and + * data in elf files. They begin with a "$" dollar symbol. Don't correlate them + * as they often all have the same name either "$x" to mark the start of code + * or "$d" to mark the start of data. + */ +static bool kpatch_is_mapping_symbol(struct kpatch_elf *kelf, struct symbol *sym) +{ + if (kelf->arch != AARCH64) + return false; + + if (sym->name && sym->name[0] == '$' && + sym->type == STT_NOTYPE && + sym->bind == STB_LOCAL) + return true; + return false; +} + /* * When compiling with -ffunction-sections and -fdata-sections, almost every * symbol gets its own dedicated section. We call such symbols "bundled" @@ -622,6 +642,13 @@ static void kpatch_compare_correlated_section(struct section *sec) goto out; } + /* As above but for __p_f_e users like aarch64 */ + if (!strcmp(sec->name, ".rela__patchable_function_entries") || + !strcmp(sec->name, "__patchable_function_entries")) { + sec->status = SAME; + goto out; + } + if (sec1->sh.sh_size != sec2->sh.sh_size || sec1->data->d_size != sec2->data->d_size || (sec1->rela && !sec2->rela) || @@ -733,7 +760,7 @@ static bool insn_is_load_immediate(struct kpatch_elf *kelf, void *addr) * 51b: e8 00 00 00 00 callq 520 * 51c: R_X86_64_PC32 ___might_sleep-0x4 */ -static bool kpatch_line_macro_change_only(struct kpatch_elf *kelf, +static bool _kpatch_line_macro_change_only(struct kpatch_elf *kelf, struct section *sec) { unsigned long offset, insn1_len, insn2_len; @@ -832,6 +859,23 @@ static bool kpatch_line_macro_change_only(struct kpatch_elf *kelf, return true; } +static bool kpatch_line_macro_change_only(struct kpatch_elf *kelf, + struct section *sec) +{ + switch(kelf->arch) { + case AARCH64: + /* TODO */ + return false; + case PPC64: + case S390: + case X86_64: + return _kpatch_line_macro_change_only(kelf, sec); + default: + ERROR("unsupported arch"); + } + return false; +} + /* * Child functions with "*.cold" names don't have _fentry_ calls, but "*.part", * often do. In the later case, it is not necessary to include the parent @@ -1069,15 +1113,15 @@ static void kpatch_correlate_sections(struct list_head *seclist_orig, } } -static void kpatch_correlate_symbols(struct list_head *symlist_orig, - struct list_head *symlist_patched) +static void kpatch_correlate_symbols(struct kpatch_elf *kelf_orig, + struct kpatch_elf *kelf_patched) { struct symbol *sym_orig, *sym_patched; - list_for_each_entry(sym_orig, symlist_orig, list) { + list_for_each_entry(sym_orig, &kelf_orig->symbols, list) { if (sym_orig->twin) continue; - list_for_each_entry(sym_patched, symlist_patched, list) { + list_for_each_entry(sym_patched, &kelf_patched->symbols, list) { if (kpatch_mangled_strcmp(sym_orig->name, sym_patched->name) || sym_orig->type != sym_patched->type || sym_patched->twin) continue; @@ -1097,6 +1141,9 @@ static void kpatch_correlate_symbols(struct list_head *symlist_orig, !strncmp(sym_orig->name, ".LC", 3)) continue; + if (kpatch_is_mapping_symbol(kelf_orig, sym_orig)) + continue; + /* group section symbols must have correlated sections */ if (sym_orig->sec && sym_orig->sec->sh.sh_type == SHT_GROUP && @@ -1502,7 +1549,7 @@ static void kpatch_correlate_elfs(struct kpatch_elf *kelf_orig, struct kpatch_elf *kelf_patched) { kpatch_correlate_sections(&kelf_orig->sections, &kelf_patched->sections); - kpatch_correlate_symbols(&kelf_orig->symbols, &kelf_patched->symbols); + kpatch_correlate_symbols(kelf_orig, kelf_patched); } static void kpatch_compare_correlated_elements(struct kpatch_elf *kelf) @@ -1618,7 +1665,8 @@ static void kpatch_replace_sections_syms(struct kpatch_elf *kelf) if (is_text_section(relasec->base) && !is_text_section(sym->sec) && - rela->type == R_X86_64_32S && + (rela->type == R_X86_64_32S || + rela->type == R_AARCH64_ABS64) && rela->addend == (long)sym->sec->sh.sh_size && end == (long)sym->sec->sh.sh_size) { @@ -2415,28 +2463,28 @@ static bool static_call_sites_group_filter(struct lookup_table *lookup, static struct special_section special_sections[] = { { .name = "__bug_table", - .arch = X86_64 | PPC64 | S390, + .arch = AARCH64 | X86_64 | PPC64 | S390, .group_size = bug_table_group_size, }, { .name = ".fixup", - .arch = X86_64 | PPC64 | S390, + .arch = AARCH64 | X86_64 | PPC64 | S390, .group_size = fixup_group_size, }, { .name = "__ex_table", /* must come after .fixup */ - .arch = X86_64 | PPC64 | S390, + .arch = AARCH64 | X86_64 | PPC64 | S390, .group_size = ex_table_group_size, }, { .name = "__jump_table", - .arch = X86_64 | PPC64 | S390, + .arch = AARCH64 | X86_64 | PPC64 | S390, .group_size = jump_table_group_size, .group_filter = jump_table_group_filter, }, { .name = ".printk_index", - .arch = X86_64 | PPC64 | S390, + .arch = AARCH64 | X86_64 | PPC64 | S390, .group_size = printk_index_group_size, }, { @@ -2451,7 +2499,7 @@ static struct special_section special_sections[] = { }, { .name = ".altinstructions", - .arch = X86_64 | S390, + .arch = AARCH64 | X86_64 | S390, .group_size = altinstructions_group_size, }, { diff --git a/kpatch-build/kpatch-build b/kpatch-build/kpatch-build index d01a8d987..e639aaa33 100755 --- a/kpatch-build/kpatch-build +++ b/kpatch-build/kpatch-build @@ -339,6 +339,9 @@ find_special_section_data() { # Arch-specific features case "$ARCH" in + "aarch64") + check[a]=true # alt_instr + ;; "x86_64") check[a]=true # alt_instr kernel_version_gte 5.10.0 && check[s]=true # static_call_site diff --git a/kpatch-build/kpatch-elf.c b/kpatch-build/kpatch-elf.c index a01bde993..c27567525 100644 --- a/kpatch-build/kpatch-elf.c +++ b/kpatch-build/kpatch-elf.c @@ -136,6 +136,8 @@ struct rela *find_rela_by_offset(struct section *relasec, unsigned int offset) unsigned int absolute_rela_type(struct kpatch_elf *kelf) { switch(kelf->arch) { + case AARCH64: + return R_AARCH64_ABS64; case PPC64: return R_PPC64_ADDR64; case X86_64: @@ -205,6 +207,7 @@ long rela_target_offset(struct kpatch_elf *kelf, struct section *relasec, struct section *sec = relasec->base; switch(kelf->arch) { + case AARCH64: case PPC64: add_off = 0; break; From 307ceff112b641d55d6d40a31b2ddf7d1cf4a0f9 Mon Sep 17 00:00:00 2001 From: Joe Lawrence Date: Mon, 14 Feb 2022 21:37:50 -0500 Subject: [PATCH 05/19] create-diff-object: add aarch64 ASSERT_RTNL macro detection On aarch64, only the ASSERT_RTNL macro is affected by source line number changes (WARN, BUG, etc. no longer embed line numbers in the instruction stream.) A small test function that invokes the macro for a line change from 42 to 43: 0000000000000000 : 0: d503245f bti c 4: d503201f nop 8: d503201f nop c: d503233f paciasp 10: a9bf7bfd stp x29, x30, [sp, #-16]! 14: 910003fd mov x29, sp 18: 94000000 bl 0 18: R_AARCH64_CALL26 rtnl_is_locked 1c: 34000080 cbz w0, 2c 20: a8c17bfd ldp x29, x30, [sp], #16 24: d50323bf autiasp 28: d65f03c0 ret 2c: 90000000 adrp x0, 0 2c: R_AARCH64_ADR_PREL_PG_HI21 .data.once 30: 39400001 ldrb w1, [x0] 30: R_AARCH64_LDST8_ABS_LO12_NC .data.once 34: 35ffff61 cbnz w1, 20 38: 52800022 mov w2, #0x1 // #1 3c: 90000001 adrp x1, 0 3c: R_AARCH64_ADR_PREL_PG_HI21 .rodata.str1.8+0x8 40: 39000002 strb w2, [x0] 40: R_AARCH64_LDST8_ABS_LO12_NC .data.once 44: 91000021 add x1, x1, #0x0 44: R_AARCH64_ADD_ABS_LO12_NC .rodata.str1.8+0x8 - 48: 52800542 mov w2, #0x2a // #42 + 48: 52800562 mov w2, #0x2b // #43 4c: 90000000 adrp x0, 0 4c: R_AARCH64_ADR_PREL_PG_HI21 .rodata.str1.8+0x20 50: 91000000 add x0, x0, #0x0 50: R_AARCH64_ADD_ABS_LO12_NC .rodata.str1.8+0x20 54: 94000000 bl 0 <__warn_printk> 54: R_AARCH64_CALL26 __warn_printk 58: d4210000 brk #0x800 5c: 17fffff1 b 20 Create an implementation of kpatch_line_macro_change_only() for aarch64 modeled after the other architectures. Only look for relocations to __warn_printk that ASSERT_RTNL invokes. Based-on-s390x-code-by: C. Erastus Toe Signed-off-by: Joe Lawrence --- kpatch-build/create-diff-object.c | 65 ++++++++++++++++++++++++++++++- kpatch-build/kpatch-elf.c | 2 + 2 files changed, 65 insertions(+), 2 deletions(-) diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c index e2a56d3eb..523ba2cff 100644 --- a/kpatch-build/create-diff-object.c +++ b/kpatch-build/create-diff-object.c @@ -859,13 +859,74 @@ static bool _kpatch_line_macro_change_only(struct kpatch_elf *kelf, return true; } +static bool _kpatch_line_macro_change_only_aarch64(struct kpatch_elf *kelf, + struct section *sec) +{ + unsigned char *start1, *start2; + unsigned long size, offset, insn_len; + struct rela *rela; + int lineonly = 0, found; + + insn_len = insn_length(kelf, NULL); + + if (sec->status != CHANGED || + is_rela_section(sec) || + !is_text_section(sec) || + sec->sh.sh_size != sec->twin->sh.sh_size || + !sec->rela || + sec->rela->status != SAME) + return false; + + start1 = sec->twin->data->d_buf; + start2 = sec->data->d_buf; + size = sec->sh.sh_size; + for (offset = 0; offset < size; offset += insn_len) { + if (!memcmp(start1 + offset, start2 + offset, insn_len)) + continue; + + /* Verify mov w2 */ + if (((start1[offset] & 0b11111) != 0x2) || (start1[offset+3] != 0x52) || + ((start1[offset] & 0b11111) != 0x2) || (start2[offset+3] != 0x52)) + return false; + + /* + * Verify zero or more string relas followed by a + * warn_slowpath_* or another similar rela. + */ + found = 0; + list_for_each_entry(rela, &sec->rela->relas, list) { + if (rela->offset < offset + insn_len) + continue; + if (rela->string) + continue; + if (!strncmp(rela->sym->name, "__warned.", 9) || + !strncmp(rela->sym->name, "__already_done.", 15)) + continue; + if (!strcmp(rela->sym->name, "__warn_printk")) { + found = 1; + break; + } + return false; + } + if (!found) + return false; + + lineonly = 1; + } + + if (!lineonly) + ERROR("no instruction changes detected for changed section %s", + sec->name); + + return true; +} + static bool kpatch_line_macro_change_only(struct kpatch_elf *kelf, struct section *sec) { switch(kelf->arch) { case AARCH64: - /* TODO */ - return false; + return _kpatch_line_macro_change_only_aarch64(kelf, sec); case PPC64: case S390: case X86_64: diff --git a/kpatch-build/kpatch-elf.c b/kpatch-build/kpatch-elf.c index c27567525..79eedaf3e 100644 --- a/kpatch-build/kpatch-elf.c +++ b/kpatch-build/kpatch-elf.c @@ -257,6 +257,8 @@ unsigned int insn_length(struct kpatch_elf *kelf, void *addr) char *insn = addr; switch(kelf->arch) { + case AARCH64: + return 4; case X86_64: insn_init(&decoded_insn, addr, 1); From 3eba78dcdbc8e2b632191e4b563a2b1532800fc8 Mon Sep 17 00:00:00 2001 From: Joe Lawrence Date: Tue, 4 Oct 2022 22:39:58 -0700 Subject: [PATCH 06/19] testing: add aarch unit tests Update the kpatch-unit-test-objs submodule reference to add aarch64 unit tests. Signed-off-by: Joe Lawrence --- .gitmodules | 3 ++- test/unit/Makefile | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.gitmodules b/.gitmodules index 49b10248a..f5b573ad6 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,4 @@ [submodule "test/unit/objs"] path = test/unit/objs - url = https://github.com/dynup/kpatch-unit-test-objs.git + url = https://github.com/joe-lawrence/kpatch-unit-test-objs.git + branch = initial-aarch64 diff --git a/test/unit/Makefile b/test/unit/Makefile index fde1717dd..e3ed7d718 100644 --- a/test/unit/Makefile +++ b/test/unit/Makefile @@ -1,4 +1,4 @@ -ARCHES ?= ppc64le x86_64 +ARCHES ?= aarch64 ppc64le x86_64 .PHONY: all clean submodule-check From 5c1e138d4eecc783227d2dd1ee720962836b30f7 Mon Sep 17 00:00:00 2001 From: Misono Tomohiro Date: Wed, 7 Sep 2022 10:38:01 +0900 Subject: [PATCH 07/19] create-diff-object: Fix mapping symbol handling on aarch64 It seems mapping symbols in aarch64 elf has section size of 0. So, exclude it in section symbol replacing code just like kpatch_correlate_symbols(). This fixes the data-read-mostly unit test on aarch64. Signed-off-by: Misono Tomohiro --- kpatch-build/create-diff-object.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c index 523ba2cff..1d3dc6371 100644 --- a/kpatch-build/create-diff-object.c +++ b/kpatch-build/create-diff-object.c @@ -1764,6 +1764,9 @@ static void kpatch_replace_sections_syms(struct kpatch_elf *kelf) */ } else if (target_off == start && target_off == end) { + if(kpatch_is_mapping_symbol(kelf, sym)) + continue; + /* * Allow replacement for references to * empty symbols. From 38e3a6f0f96b18aad76d370b8b232fc347391a1a Mon Sep 17 00:00:00 2001 From: Pete Swain Date: Fri, 28 Jul 2023 16:34:10 -0700 Subject: [PATCH 08/19] kpatch-syscall.h: add aarch64 helper Copy from kernel source tree. Signed-off-by: Misono Tomohiro --- kmod/patch/kpatch-syscall.h | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/kmod/patch/kpatch-syscall.h b/kmod/patch/kpatch-syscall.h index 3ff9189f6..fdbd4ccd5 100644 --- a/kmod/patch/kpatch-syscall.h +++ b/kmod/patch/kpatch-syscall.h @@ -176,7 +176,34 @@ # endif /* LINUX_VERSION_CODE */ -#endif /* CONFIG_X86_64 */ +#elif defined(CONFIG_ARM64) + +/* arm64/include/asm/syscall_wrapper.h versions */ + +#define SC_ARM64_REGS_TO_ARGS(x, ...) \ + __MAP(x,__SC_ARGS \ + ,,regs->regs[0],,regs->regs[1],,regs->regs[2] \ + ,,regs->regs[3],,regs->regs[4],,regs->regs[5]) + +#define __KPATCH_SYSCALL_DEFINEx(x, name, ...) \ + asmlinkage long __arm64_sys##name(const struct pt_regs *regs); \ + ALLOW_ERROR_INJECTION(__arm64_sys##name, ERRNO); \ + static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ + static inline long __kpatch_do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ + asmlinkage long __arm64_sys##name(const struct pt_regs *regs) \ + { \ + return __se_sys##name(SC_ARM64_REGS_TO_ARGS(x,__VA_ARGS__)); \ + } \ + static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ + { \ + long ret = __kpatch_do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \ + __MAP(x,__SC_TEST,__VA_ARGS__); \ + __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \ + return ret; \ + } \ + static inline long __kpatch_do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) + +#endif /* which arch */ #ifndef __KPATCH_SYSCALL_DEFINEx From 0c24960e3e91202b1b764a7c03df58bc7a2395ae Mon Sep 17 00:00:00 2001 From: Pete Swain Date: Mon, 10 Oct 2022 19:03:09 -0700 Subject: [PATCH 09/19] doc/arm64-upstream-prerequisites.md --- README.md | 2 +- doc/arm64-upstream-prerequisites.md | 11 +++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 doc/arm64-upstream-prerequisites.md diff --git a/README.md b/README.md index 78fd14bc4..b9c563000 100644 --- a/README.md +++ b/README.md @@ -40,7 +40,7 @@ Supported Architectures - [x] x86-64 - [x] ppc64le -- [x] arm64 +- [x] arm64 [upstream prerequisites](doc/arm64-upstream-prerequisites.md) - [x] s390 [upstream prerequisites](doc/s390-upstream-prerequisites.md) Installation diff --git a/doc/arm64-upstream-prerequisites.md b/doc/arm64-upstream-prerequisites.md new file mode 100644 index 000000000..3c49af860 --- /dev/null +++ b/doc/arm64-upstream-prerequisites.md @@ -0,0 +1,11 @@ +### arm64 backporting + +**Prerequisite kernel patches:** +**v5.19:** +- [Madhavan Venkataraman's [RFC PATCH v2 00/20] arm64: livepatch: Use ORC for dynamic frame pointer validation](https://lore.kernel.org/linux-arm-kernel/20220524001637.1707472-1-madvenka@linux.microsoft.com/) +- also tested against madvenka's earlier pre-objtool series up to v15 + +**v5.15 and v5.10:** +- under development, both known to work with backports of madvenka's v15, + but the objtool-using version above is likely to be the approach that + finally merges into upstream kernel From 3f1df3cc227ca4ab569e5a9310a8ce6344400c37 Mon Sep 17 00:00:00 2001 From: Pete Swain Date: Mon, 9 Jan 2023 02:16:57 -0800 Subject: [PATCH 10/19] arm64: per-func __patchable_function_entries sections new clang toolchain on arm64 produces individual __patchable_function_entries sections for each patchable func, in -ffunction-sections mode, rather than traditional single __mcount_loc section. Bend the existing logic to detect this multiplicity in the incoming kelf objects, and allocate N identical one-entry sections. These are retrieved as needed by a new function: find_nth_section_by_name() and attached to the .text sections they describe. These __pfe section are not actually arm64-specific, but a generic enhancement across gcc & clang, to allow better garbage collection of unreferenced object sections, and mcount/pfe objects which refer to them. The __pfe sections are combined in kernel-or-module final link, from 5.19.9's 9440155ccb948f8e3ce5308907a2e7378799be60. From clang-11, __pfe is supported for x86, though not yet used by kernel The split between allocate/populate phases here is necessary to enumerate/populate the outgoing section-headers before beginning to produce output sections Also adds some missing \n to log_debug()s Signed-off-by: Pete Swain --- kpatch-build/create-diff-object.c | 120 ++++++++++++++++++++++-------- kpatch-build/kpatch-elf.c | 32 ++++++-- kpatch-build/kpatch-elf.h | 3 + 3 files changed, 119 insertions(+), 36 deletions(-) mode change 100644 => 100755 kpatch-build/kpatch-elf.c diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c index 1d3dc6371..f3b9b6886 100644 --- a/kpatch-build/create-diff-object.c +++ b/kpatch-build/create-diff-object.c @@ -70,6 +70,7 @@ enum subsection { enum loglevel loglevel = NORMAL; bool KLP_ARCH; +bool multi_pfe; int jump_label_errors, static_call_errors; @@ -3271,7 +3272,7 @@ static void kpatch_create_patches_sections(struct kpatch_elf *kelf, if (sym->bind == STB_LOCAL && symbol.global) ERROR("can't find local symbol '%s' in symbol table", sym->name); - log_debug("lookup for %s: obj=%s sympos=%lu size=%lu", + log_debug("lookup for %s: obj=%s sympos=%lu size=%lu\n", sym->name, symbol.objname, symbol.sympos, symbol.size); @@ -3643,7 +3644,7 @@ static void kpatch_create_intermediate_sections(struct kpatch_elf *kelf, ERROR("can't find symbol '%s' in symbol table", rela->sym->name); - log_debug("lookup for %s: obj=%s sympos=%lu", + log_debug("lookup for %s: obj=%s sympos=%lu\n", rela->sym->name, symbol.objname, symbol.sympos); @@ -3797,19 +3798,24 @@ static void kpatch_alloc_mcount_sections(struct kpatch_elf *kelf, struct kpatch_ { int nr; struct symbol *sym; + int text_idx = 0; nr = 0; - list_for_each_entry(sym, &kelfout->symbols, list) + list_for_each_entry(sym, &kelfout->symbols, list) { if (sym->type == STT_FUNC && sym->status != SAME && - sym->has_func_profiling) + sym->has_func_profiling) { + text_idx = sym->sec->index; nr++; + } + } /* create text/rela section pair */ switch(kelf->arch) { case AARCH64: { - struct section *sec, *tmp; - - sec = create_section_pair(kelfout, "__patchable_function_entries", sizeof(void *), nr); + struct section *sec; + int entries = multi_pfe ? 1 : nr; + int copies = multi_pfe ? nr : 1; + int flags = 0, rflags = 0; /* * Depending on the compiler the __patchable_function_entries section @@ -3817,9 +3823,26 @@ static void kpatch_alloc_mcount_sections(struct kpatch_elf *kelf, struct kpatch_ * avoid: * ld: __patchable_function_entries has both ordered [...] and unordered [...] sections */ - tmp = find_section_by_name(&kelf->sections, "__patchable_function_entries"); - sec->sh.sh_flags |= (tmp->sh.sh_flags & SHF_LINK_ORDER); - sec->sh.sh_link = 1; + sec = find_section_by_name(&kelf->sections, "__patchable_function_entries"); + if (sec) { + flags = (sec->sh.sh_flags & (SHF_LINK_ORDER|SHF_WRITE)); + if (sec->rela) + rflags = (sec->rela->sh.sh_flags & (SHF_LINK_ORDER|SHF_WRITE)); + } + + for (nr = 0; nr < copies; nr++) { + sec = create_section_pair(kelfout, + "__patchable_function_entries", + sizeof(void *), entries); + + sec->sh.sh_flags |= flags; + if (sec->rela) + sec->rela->sh.sh_flags |= rflags; + if (multi_pfe) + sec->sh.sh_link = 0; + else + sec->sh.sh_link = text_idx; + } break; } case PPC64: @@ -3848,11 +3871,14 @@ static void kpatch_populate_mcount_sections(struct kpatch_elf *kelf) struct symbol *sym; struct rela *rela, *mcount_rela; void **funcs; - unsigned long insn_offset = 0; switch(kelf->arch) { case AARCH64: - sec = find_section_by_name(&kelf->sections, "__patchable_function_entries"); + if (multi_pfe) + sec = NULL; + else + sec = find_section_by_name(&kelf->sections, + "__patchable_function_entries"); break; case PPC64: case X86_64: @@ -3862,12 +3888,20 @@ static void kpatch_populate_mcount_sections(struct kpatch_elf *kelf) default: ERROR("unsupported arch\n"); } - relasec = sec->rela; - nr = (int) (sec->data->d_size / sizeof(void *)); + + if (multi_pfe) { + relasec = NULL; + nr = 0; + } else { + relasec = sec->rela; + nr = (int) (sec->data->d_size / sizeof(void *)); + } /* populate sections */ index = 0; list_for_each_entry(sym, &kelf->symbols, list) { + unsigned long insn_offset = 0; + if (sym->type != STT_FUNC || sym->status == SAME) continue; @@ -3883,7 +3917,6 @@ static void kpatch_populate_mcount_sections(struct kpatch_elf *kelf) int i; insn = sym->sec->data->d_buf; - insn_offset = 0; /* * If BTI (Branch Target Identification) is enabled then there @@ -3974,6 +4007,18 @@ static void kpatch_populate_mcount_sections(struct kpatch_elf *kelf) ERROR("unsupported arch"); } + if (multi_pfe) { + sec = find_nth_section_by_name(&kelf->sections, nr, "__patchable_function_entries"); + if (!sec) + ERROR("cannot retrieve pre-allocated __pfe #%d\n", nr); + + relasec = sec->rela; + sym->sec->pfe = sec; + sec->sh.sh_link = sec->index; + + nr++; + } + /* * 'rela' points to the mcount/fentry call. * @@ -3983,7 +4028,13 @@ static void kpatch_populate_mcount_sections(struct kpatch_elf *kelf) mcount_rela->sym = sym; mcount_rela->type = absolute_rela_type(kelf); mcount_rela->addend = insn_offset - sym->sym.st_value; - mcount_rela->offset = (unsigned int) (index * sizeof(*funcs)); + + if (multi_pfe) { + mcount_rela->offset = 0; + sec = NULL; + } else { + mcount_rela->offset = (unsigned int) (index * sizeof(*funcs)); + } index++; } @@ -4142,6 +4193,7 @@ static void kpatch_find_func_profiling_calls(struct kpatch_elf *kelf) struct symbol *sym; struct rela *rela; unsigned char *insn; + list_for_each_entry(sym, &kelf->symbols, list) { if (sym->type != STT_FUNC || sym->is_pfx || !sym->sec || !sym->sec->rela) @@ -4149,21 +4201,23 @@ static void kpatch_find_func_profiling_calls(struct kpatch_elf *kelf) switch(kelf->arch) { case AARCH64: { - struct section *sec = find_section_by_name(&kelf->sections, - "__patchable_function_entries"); - /* - * If we can't find the __patchable_function_entries section or - * there are no relocations in it then not patchable. - */ - if (!sec || !sec->rela) - return; - list_for_each_entry(rela, &sec->rela->relas, list) { - if (rela->sym->sec && sym->sec == rela->sym->sec) { - sym->has_func_profiling = 1; - break; + struct section *sec; + + list_for_each_entry(sec, &kelf->sections, list) { + if (strcmp(sec->name, "__patchable_function_entries")) + continue; + if (multi_pfe && sym->sec->pfe != sec) + continue; + if (!sec->rela) + continue; + + list_for_each_entry(rela, &sec->rela->relas, list) { + if (rela->sym->sec && sym->sec == rela->sym->sec) { + sym->has_func_profiling = 1; + goto next_symbol; + } } } - break; } case PPC64: @@ -4196,6 +4250,7 @@ static void kpatch_find_func_profiling_calls(struct kpatch_elf *kelf) default: ERROR("unsupported arch"); } + next_symbol:; } } @@ -4243,6 +4298,12 @@ static error_t parse_opt (int key, char *arg, struct argp_state *state) return 0; } +static bool has_multi_pfe(struct kpatch_elf *kelf) +{ + return !!find_nth_section_by_name(&kelf->sections, 1, + "__patchable_function_entries"); +} + static struct argp argp = { options, parse_opt, args_doc, NULL }; int main(int argc, char *argv[]) @@ -4276,6 +4337,7 @@ int main(int argc, char *argv[]) kelf_orig = kpatch_elf_open(orig_obj); kelf_patched = kpatch_elf_open(patched_obj); + multi_pfe = has_multi_pfe(kelf_orig) || has_multi_pfe(kelf_patched); kpatch_find_func_profiling_calls(kelf_orig); kpatch_find_func_profiling_calls(kelf_patched); diff --git a/kpatch-build/kpatch-elf.c b/kpatch-build/kpatch-elf.c old mode 100644 new mode 100755 index 79eedaf3e..a29bcb68a --- a/kpatch-build/kpatch-elf.c +++ b/kpatch-build/kpatch-elf.c @@ -88,17 +88,29 @@ struct section *find_section_by_index(struct list_head *list, unsigned int index return NULL; } -struct section *find_section_by_name(struct list_head *list, const char *name) +struct section *find_nth_section_by_name( struct list_head *list, int nth, const char *name) { struct section *sec; - list_for_each_entry(sec, list, list) - if (!strcmp(sec->name, name)) - return sec; + if (!list || !list->next || !name) + return NULL; + + list_for_each_entry(sec, list, list) { + if (strcmp(sec->name, name)) + continue; + if (--nth >= 0) + continue; + return sec; + } return NULL; } +struct section *find_section_by_name(struct list_head *list, const char *name) +{ + return find_nth_section_by_name(list, 0, name); +} + struct symbol *find_symbol_by_index(struct list_head *list, size_t index) { struct symbol *sym; @@ -985,11 +997,17 @@ void kpatch_reindex_elements(struct kpatch_elf *kelf) index = 0; list_for_each_entry(sym, &kelf->symbols, list) { sym->index = index++; - if (sym->sec) + if (sym->sec) { sym->sym.st_shndx = (unsigned short)sym->sec->index; - else if (sym->sym.st_shndx != SHN_ABS && - sym->sym.st_shndx != SHN_LIVEPATCH) + if (sym->sec->pfe) { + sym->sec->pfe->sh.sh_link = sym->sec->index; + if (sym->sec->pfe->rela) + sym->sec->pfe->rela->sh.sh_info = sym->sec->index; + } + } else if (sym->sym.st_shndx != SHN_ABS && + sym->sym.st_shndx != SHN_LIVEPATCH) { sym->sym.st_shndx = SHN_UNDEF; + } } } diff --git a/kpatch-build/kpatch-elf.h b/kpatch-build/kpatch-elf.h index c1aed183b..0f2fbf41b 100644 --- a/kpatch-build/kpatch-elf.h +++ b/kpatch-build/kpatch-elf.h @@ -65,6 +65,7 @@ struct section { struct symbol *secsym, *sym; }; }; + struct section *pfe; /* arm64 per-func __patchable_function_entries */ }; enum symbol_strip { @@ -138,6 +139,8 @@ bool is_debug_section(struct section *sec); struct section *find_section_by_index(struct list_head *list, unsigned int index); struct section *find_section_by_name(struct list_head *list, const char *name); +struct section *find_nth_section_by_name(struct list_head *list, int nth, + const char *name); struct symbol *find_symbol_by_index(struct list_head *list, size_t index); struct symbol *find_symbol_by_name(struct list_head *list, const char *name); struct rela *find_rela_by_offset(struct section *relasec, unsigned int offset); From 496fc793373c9269cf4eb7c477867a66a1153d78 Mon Sep 17 00:00:00 2001 From: Pete Swain Date: Wed, 12 Jul 2023 08:13:27 -0700 Subject: [PATCH 11/19] arm64 leaf-function fix On arm64, kpatch_find_func_profiling_calls() was skipping leaf functions, with no relocations, so they weren't patchable. Here other archs need to walk a function's reloc entries to check for __fentry__ or __mcount, so it's valid to skip over functions without sym->sec->rela, because they cannot be patchable, else they would have at least an __fentry__ call relocation. But arm64 marks functions patchable in a different way, with per-func __patchable_function_entries sections referring _to_ the func, not relocations _within_ the func, so a function w/o relocations for text or data can still be patchable. Move the sym->sec->rela check to the per-arch paths. This allows gcc-static-local-var-5.patch to generate livepatch, on arm64 & x86 Suggested-By: Bill Wendling Signed-off-by: Pete Swain --- kpatch-build/create-diff-object.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c index f3b9b6886..2c2d38329 100644 --- a/kpatch-build/create-diff-object.c +++ b/kpatch-build/create-diff-object.c @@ -4195,8 +4195,7 @@ static void kpatch_find_func_profiling_calls(struct kpatch_elf *kelf) unsigned char *insn; list_for_each_entry(sym, &kelf->symbols, list) { - if (sym->type != STT_FUNC || sym->is_pfx || - !sym->sec || !sym->sec->rela) + if (sym->type != STT_FUNC || sym->is_pfx || !sym->sec) continue; switch(kelf->arch) { @@ -4221,6 +4220,8 @@ static void kpatch_find_func_profiling_calls(struct kpatch_elf *kelf) break; } case PPC64: + if (!sym->sec->rela) + continue; list_for_each_entry(rela, &sym->sec->rela->relas, list) { if (!strcmp(rela->sym->name, "_mcount")) { sym->has_func_profiling = 1; @@ -4229,6 +4230,8 @@ static void kpatch_find_func_profiling_calls(struct kpatch_elf *kelf) } break; case X86_64: + if (!sym->sec->rela) + continue; rela = list_first_entry(&sym->sec->rela->relas, struct rela, list); if ((rela->type != R_X86_64_NONE && @@ -4240,6 +4243,8 @@ static void kpatch_find_func_profiling_calls(struct kpatch_elf *kelf) sym->has_func_profiling = 1; break; case S390: + if (!sym->sec->rela) + continue; /* Check for compiler generated fentry nop - jgnop 0 */ insn = sym->sec->data->d_buf; if (insn[0] == 0xc0 && insn[1] == 0x04 && From e74447cba3f130b1b2298658176b5c83a511d23c Mon Sep 17 00:00:00 2001 From: Pete Swain Date: Sun, 16 Oct 2022 22:55:44 -0700 Subject: [PATCH 12/19] kpatch-cc skip arch/arm64/kernel/vdso*/* Signed-off-by: Pete Swain --- kpatch-build/kpatch-cc | 1 + 1 file changed, 1 insertion(+) diff --git a/kpatch-build/kpatch-cc b/kpatch-build/kpatch-cc index 17aae25b6..d5ec99362 100755 --- a/kpatch-build/kpatch-cc +++ b/kpatch-build/kpatch-cc @@ -42,6 +42,7 @@ if [[ "$TOOLCHAINCMD" =~ ^(.*-)?gcc$ || "$TOOLCHAINCMD" =~ ^(.*-)?clang$ ]] ; th arch/s390/boot/*|\ arch/s390/purgatory/*|\ arch/s390/kernel/vdso64/*|\ + arch/arm64/kernel/vdso*/*|\ drivers/firmware/efi/libstub/*|\ init/version.o|\ init/version-timestamp.o|\ From fce17981acf01a2a78c7b4d583862f9f3654573b Mon Sep 17 00:00:00 2001 From: Pete Swain Date: Mon, 9 Jan 2023 02:18:33 -0800 Subject: [PATCH 13/19] adapt to clang/arm64 naming New toolchain/arch, new conventions for section/label/etc names gcc's .LCx symbols point to string literals in '.rodata..str1.*' sections. Clang creates similar .Ltmp%d symbols in '.rodata.str' The function is_string_literal_section() generalized (too much?) to match either - clang's/arm64 /^\.rodata\.str$/ - gcc's /^\.rodata\./ && /\.str1\./ Various matchers for .data.unlikely .bss.unlikely replaced by is_data_unlikely_section() generalized to match - gcc's ".data.unlikely" - clang's ".(data|bss).module_name.unlikely" .data.once handled similarly Signed-off-by: Pete Swain --- kpatch-build/create-diff-object.c | 34 +++++++++++++++++++++++++++---- 1 file changed, 30 insertions(+), 4 deletions(-) diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c index 2c2d38329..3a7899a24 100644 --- a/kpatch-build/create-diff-object.c +++ b/kpatch-build/create-diff-object.c @@ -352,6 +352,28 @@ static bool is_string_literal_section(struct section *sec) return !strncmp(sec->name, ".rodata.", 8) && strstr(sec->name, ".str"); } +/* gcc's ".data.unlikely" or clang's ".(data|bss).module_name.unlikely" */ +static bool is_data_unlikely_section(const char *name) +{ + size_t len = strlen(name); + + return (len >= 5 + 8 && + ((!strncmp(name, ".data.", 6) || + !strncmp(name, ".bss.", 5)) && + strstr(name + len - 9, ".unlikely"))); +} + +/* either ".data.once" or clang's ".(data|bss).module_name.once" */ +static bool is_data_once_section(const char *name) +{ + size_t len = strlen(name); + + return (len >= 5 + 4 && + (!strncmp(name, ".data.", 6) || + !strncmp(name, ".bss.", 5)) && + strstr(name + len - 5, ".once")); +} + /* * This function detects whether the given symbol is a "special" static local * variable (for lack of a better term). @@ -393,7 +415,7 @@ static bool is_special_static(struct symbol *sym) if (sym->type != STT_OBJECT || sym->bind != STB_LOCAL) return false; - if (!strcmp(sym->sec->name, ".data.once")) + if (is_data_once_section(sym->sec->name)) return true; for (var_name = var_names; *var_name; var_name++) { @@ -1198,9 +1220,11 @@ static void kpatch_correlate_symbols(struct kpatch_elf *kelf_orig, * The .LCx symbols point to string literals in * '.rodata..str1.*' sections. They get included * in kpatch_include_standard_elements(). + * Clang creates similar .Ltmp%d symbols in .rodata.str */ if (sym_orig->type == STT_NOTYPE && - !strncmp(sym_orig->name, ".LC", 3)) + !(strncmp(sym_orig->name, ".LC", 3) && + strncmp(sym_orig->name, ".Ltmp", 5))) continue; if (kpatch_is_mapping_symbol(kelf_orig, sym_orig)) @@ -1845,8 +1869,10 @@ static void kpatch_verify_patchability(struct kpatch_elf *kelf) * (.data.unlikely and .data.once is ok b/c it only has __warned vars) */ if (sec->include && sec->status != NEW && - (!strncmp(sec->name, ".data", 5) || !strncmp(sec->name, ".bss", 4)) && - (strcmp(sec->name, ".data.unlikely") && strcmp(sec->name, ".data.once"))) { + (!strncmp(sec->name, ".data", 5) || + !strncmp(sec->name, ".bss", 4)) && + !is_data_once_section(sec->name) && + !is_data_unlikely_section(sec->name)) { log_normal("data section %s selected for inclusion\n", sec->name); errs++; From 516287a7b518f5aa84534451c32968e2c50a278c Mon Sep 17 00:00:00 2001 From: Pete Swain Date: Mon, 30 Jan 2023 19:15:12 -0800 Subject: [PATCH 14/19] testing: freshen unit tests to address ppc64le fails merged in joe-lawrence/ppc64le-remove-eh_frame-take2 this should resolve github's test failures --- .gitmodules | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitmodules b/.gitmodules index f5b573ad6..824d77df9 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,4 +1,4 @@ [submodule "test/unit/objs"] path = test/unit/objs - url = https://github.com/joe-lawrence/kpatch-unit-test-objs.git - branch = initial-aarch64 + url = https://github.com/swine/kpatch-unit-test-objs.git + branch = remotes/github-swine/arm64 From 838b7453a8564301e25183f86565e72c2fdd19c0 Mon Sep 17 00:00:00 2001 From: Pete Swain Date: Tue, 31 Jan 2023 18:15:08 -0800 Subject: [PATCH 15/19] create-diff-object: merge aarch64 kpatch_line_macro_change_only() Generalized kpatch_line_macro_change_only() & insn_is_load_immediate() to collapse the aarch64 support back into parent. I'm assuming the 3rd start1 of the original /* Verify mov w2 */ if (((start1[offset] & 0b11111) != 0x2) || (start1[offset+3] != 0x52) || ((start1[offset] & 0b11111) != 0x2) || (start2[offset+3] != 0x52)) was a typo for start2. That's now absorbed into insn_is_load_immediate() leaving just one aarch64-specific piece: thinning out the match-list for diagnosing a __LINE__ reference, to just "__warn_printf". --- kpatch-build/create-diff-object.c | 96 ++++++------------------------- 1 file changed, 16 insertions(+), 80 deletions(-) diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c index 3a7899a24..dcd8b2350 100644 --- a/kpatch-build/create-diff-object.c +++ b/kpatch-build/create-diff-object.c @@ -750,6 +750,12 @@ static bool insn_is_load_immediate(struct kpatch_elf *kelf, void *addr) break; + case AARCH64: + /* Verify mov w2 */ + if ((insn[0] & 0b11111) == 0x2 && insn[3] == 0x52) + return true; + break; + default: ERROR("unsupported arch"); } @@ -783,13 +789,14 @@ static bool insn_is_load_immediate(struct kpatch_elf *kelf, void *addr) * 51b: e8 00 00 00 00 callq 520 * 51c: R_X86_64_PC32 ___might_sleep-0x4 */ -static bool _kpatch_line_macro_change_only(struct kpatch_elf *kelf, +static bool kpatch_line_macro_change_only(struct kpatch_elf *kelf, struct section *sec) { unsigned long offset, insn1_len, insn2_len; void *data1, *data2, *insn1, *insn2; struct rela *r, *rela; bool found, found_any = false; + bool warn_printk_only = (kelf->arch == AARCH64); if (sec->status != CHANGED || is_rela_section(sec) || @@ -853,8 +860,15 @@ static bool _kpatch_line_macro_change_only(struct kpatch_elf *kelf, !strncmp(rela->sym->name, "__func__.", 9)) continue; + if (!strcmp(rela->sym->name, "__warn_printk")) { + found = true; + break; + } + + if (warn_printk_only) + return false; + if (!strncmp(rela->sym->name, "warn_slowpath_", 14) || - !strcmp(rela->sym->name, "__warn_printk") || !strcmp(rela->sym->name, "__might_sleep") || !strcmp(rela->sym->name, "___might_sleep") || !strcmp(rela->sym->name, "__might_fault") || @@ -882,84 +896,6 @@ static bool _kpatch_line_macro_change_only(struct kpatch_elf *kelf, return true; } -static bool _kpatch_line_macro_change_only_aarch64(struct kpatch_elf *kelf, - struct section *sec) -{ - unsigned char *start1, *start2; - unsigned long size, offset, insn_len; - struct rela *rela; - int lineonly = 0, found; - - insn_len = insn_length(kelf, NULL); - - if (sec->status != CHANGED || - is_rela_section(sec) || - !is_text_section(sec) || - sec->sh.sh_size != sec->twin->sh.sh_size || - !sec->rela || - sec->rela->status != SAME) - return false; - - start1 = sec->twin->data->d_buf; - start2 = sec->data->d_buf; - size = sec->sh.sh_size; - for (offset = 0; offset < size; offset += insn_len) { - if (!memcmp(start1 + offset, start2 + offset, insn_len)) - continue; - - /* Verify mov w2 */ - if (((start1[offset] & 0b11111) != 0x2) || (start1[offset+3] != 0x52) || - ((start1[offset] & 0b11111) != 0x2) || (start2[offset+3] != 0x52)) - return false; - - /* - * Verify zero or more string relas followed by a - * warn_slowpath_* or another similar rela. - */ - found = 0; - list_for_each_entry(rela, &sec->rela->relas, list) { - if (rela->offset < offset + insn_len) - continue; - if (rela->string) - continue; - if (!strncmp(rela->sym->name, "__warned.", 9) || - !strncmp(rela->sym->name, "__already_done.", 15)) - continue; - if (!strcmp(rela->sym->name, "__warn_printk")) { - found = 1; - break; - } - return false; - } - if (!found) - return false; - - lineonly = 1; - } - - if (!lineonly) - ERROR("no instruction changes detected for changed section %s", - sec->name); - - return true; -} - -static bool kpatch_line_macro_change_only(struct kpatch_elf *kelf, - struct section *sec) -{ - switch(kelf->arch) { - case AARCH64: - return _kpatch_line_macro_change_only_aarch64(kelf, sec); - case PPC64: - case S390: - case X86_64: - return _kpatch_line_macro_change_only(kelf, sec); - default: - ERROR("unsupported arch"); - } - return false; -} - /* * Child functions with "*.cold" names don't have _fentry_ calls, but "*.part", * often do. In the later case, it is not necessary to include the parent From e31d631e7eee3c16970d3f62599171c627052bc7 Mon Sep 17 00:00:00 2001 From: Pete Swain Date: Fri, 28 Jul 2023 16:35:45 -0700 Subject: [PATCH 16/19] create-diff-object: keep ubsan section If CONFIG_UBSAN is enabled, ubsan section (.data..Lubsan_{data,type}) can be created. Keep them unconditionally. NOTE: This patch needs to be verified. Signed-off-by: Misono Tomohiro --- kpatch-build/create-diff-object.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c index dcd8b2350..d374c577d 100644 --- a/kpatch-build/create-diff-object.c +++ b/kpatch-build/create-diff-object.c @@ -1644,6 +1644,7 @@ static void kpatch_replace_sections_syms(struct kpatch_elf *kelf) !strcmp(rela->sym->name, ".fixup") || !strcmp(rela->sym->name, ".altinstr_replacement") || !strcmp(rela->sym->name, ".altinstr_aux") || + !strncmp(rela->sym->name, ".data..Lubsan", 13) || !strcmp(rela->sym->name, ".text..refcount") || !strncmp(rela->sym->name, "__ftr_alt_", 10)) continue; @@ -1808,7 +1809,8 @@ static void kpatch_verify_patchability(struct kpatch_elf *kelf) (!strncmp(sec->name, ".data", 5) || !strncmp(sec->name, ".bss", 4)) && !is_data_once_section(sec->name) && - !is_data_unlikely_section(sec->name)) { + !is_data_unlikely_section(sec->name) && + strncmp(sec->name, ".data..Lubsan", 13)) { log_normal("data section %s selected for inclusion\n", sec->name); errs++; @@ -1904,6 +1906,7 @@ static void kpatch_include_standard_elements(struct kpatch_elf *kelf) !strcmp(sec->name, ".symtab") || !strcmp(sec->name, ".toc") || !strcmp(sec->name, ".rodata") || + !strncmp(sec->name, ".data..Lubsan", 13) || is_string_literal_section(sec)) { kpatch_include_section(sec); } From 71c3747b13300be9c61654514ff5bd5e28600728 Mon Sep 17 00:00:00 2001 From: Pete Swain Date: Thu, 10 Aug 2023 10:43:28 -0700 Subject: [PATCH 17/19] uninit var in kpatch-elf.c Initialize add_off earlier, so it's obviously never used uninitialized. Clang was warning on this, even if gcc was not. No functional change, the only path which left it undefined would call ERROR() anyway. --- kpatch-build/kpatch-elf.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kpatch-build/kpatch-elf.c b/kpatch-build/kpatch-elf.c index a29bcb68a..4d6051362 100755 --- a/kpatch-build/kpatch-elf.c +++ b/kpatch-build/kpatch-elf.c @@ -215,13 +215,12 @@ static void rela_insn(const struct section *sec, const struct rela *rela, long rela_target_offset(struct kpatch_elf *kelf, struct section *relasec, struct rela *rela) { - long add_off; + long add_off = 0; struct section *sec = relasec->base; switch(kelf->arch) { case AARCH64: case PPC64: - add_off = 0; break; case X86_64: if (!is_text_section(sec) || From b44af0c5944f1568d052fc6eff543b9999085b93 Mon Sep 17 00:00:00 2001 From: zimao Date: Mon, 7 Aug 2023 21:56:50 +0000 Subject: [PATCH 18/19] create-diff-object: Remove the multi_pfe flag. In ARM64, every function section should have its own pfe section. It is a bug in GCC 11/12 which will only generate a single pfe section for all functions. The bug has been fixed in GCC 13.1. As the create-diff-object is generating the pfe sections on its own, we should also fix this bug, instead of try to repeat the bug. -- Adjusted whitespace in Zimao's proposed code. Signed-off-by: Pete Swain --- kpatch-build/create-diff-object.c | 159 ++++++++++-------------------- kpatch-build/kpatch-elf.c | 6 +- 2 files changed, 57 insertions(+), 108 deletions(-) diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c index d374c577d..8ce228148 100644 --- a/kpatch-build/create-diff-object.c +++ b/kpatch-build/create-diff-object.c @@ -70,7 +70,6 @@ enum subsection { enum loglevel loglevel = NORMAL; bool KLP_ARCH; -bool multi_pfe; int jump_label_errors, static_call_errors; @@ -3754,114 +3753,68 @@ static void kpatch_create_callbacks_objname_rela(struct kpatch_elf *kelf, char * } } -/* - * Allocate the mcount/patchable_function_entry sections which must be done - * before the patched object is torn down so that the section flags can be - * copied. - */ -static void kpatch_alloc_mcount_sections(struct kpatch_elf *kelf, struct kpatch_elf *kelfout) +static void kpatch_set_pfe_link(struct kpatch_elf *kelf) { - int nr; - struct symbol *sym; - int text_idx = 0; + struct section* sec; + struct rela *rela; - nr = 0; - list_for_each_entry(sym, &kelfout->symbols, list) { - if (sym->type == STT_FUNC && sym->status != SAME && - sym->has_func_profiling) { - text_idx = sym->sec->index; - nr++; + list_for_each_entry(sec, &kelf->sections, list) { + if (strcmp(sec->name, "__patchable_function_entries")) { + continue; } - } - - /* create text/rela section pair */ - switch(kelf->arch) { - case AARCH64: { - struct section *sec; - int entries = multi_pfe ? 1 : nr; - int copies = multi_pfe ? nr : 1; - int flags = 0, rflags = 0; - /* - * Depending on the compiler the __patchable_function_entries section - * can be ordered or not, copy this flag to the section we created to - * avoid: - * ld: __patchable_function_entries has both ordered [...] and unordered [...] sections - */ - sec = find_section_by_name(&kelf->sections, "__patchable_function_entries"); - if (sec) { - flags = (sec->sh.sh_flags & (SHF_LINK_ORDER|SHF_WRITE)); - if (sec->rela) - rflags = (sec->rela->sh.sh_flags & (SHF_LINK_ORDER|SHF_WRITE)); + if (!sec->rela) { + continue; } - - for (nr = 0; nr < copies; nr++) { - sec = create_section_pair(kelfout, - "__patchable_function_entries", - sizeof(void *), entries); - - sec->sh.sh_flags |= flags; - if (sec->rela) - sec->rela->sh.sh_flags |= rflags; - if (multi_pfe) - sec->sh.sh_link = 0; - else - sec->sh.sh_link = text_idx; + list_for_each_entry(rela, &sec->rela->relas, list) { + rela->sym->sec->pfe = sec; } - break; - } - case PPC64: - case X86_64: - case S390: - create_section_pair(kelfout, "__mcount_loc", sizeof(void *), nr); - break; - default: - ERROR("unsupported arch\n"); } } /* - * Populate the mcount sections allocated by kpatch_alloc_mcount_sections() - * previously. * This function basically reimplements the functionality of the Linux * recordmcount script, so that patched functions can be recognized by ftrace. * * TODO: Eventually we can modify recordmount so that it recognizes our bundled * sections as valid and does this work for us. */ -static void kpatch_populate_mcount_sections(struct kpatch_elf *kelf) +static void kpatch_create_mcount_sections(struct kpatch_elf *kelf) { int nr, index; - struct section *sec, *relasec; + struct section *relasec; struct symbol *sym; struct rela *rela, *mcount_rela; void **funcs; + bool pfe_per_function; - switch(kelf->arch) { + nr = 0; + list_for_each_entry(sym, &kelf->symbols, list) + if (sym->type == STT_FUNC && sym->status != SAME && + sym->has_func_profiling) + nr++; + + switch (kelf->arch) { case AARCH64: - if (multi_pfe) - sec = NULL; - else - sec = find_section_by_name(&kelf->sections, - "__patchable_function_entries"); + /* For aarch64, we will create separate __patchable_function_entries sections for each symbols. */ + pfe_per_function = true; + relasec = NULL; break; case PPC64: case X86_64: case S390: - sec = find_section_by_name(&kelf->sections, "__mcount_loc"); + { + struct section *sec; + + /* create text/rela section pair */ + sec = create_section_pair(kelf, "__mcount_loc", sizeof(void*), nr); + relasec = sec->rela; break; + } default: ERROR("unsupported arch\n"); } - if (multi_pfe) { - relasec = NULL; - nr = 0; - } else { - relasec = sec->rela; - nr = (int) (sec->data->d_size / sizeof(void *)); - } - /* populate sections */ index = 0; list_for_each_entry(sym, &kelf->symbols, list) { @@ -3878,6 +3831,7 @@ static void kpatch_populate_mcount_sections(struct kpatch_elf *kelf) switch(kelf->arch) { case AARCH64: { + struct section *sec; unsigned char *insn; int i; @@ -3902,6 +3856,14 @@ static void kpatch_populate_mcount_sections(struct kpatch_elf *kelf) ERROR("%s: unexpected instruction in patch section of function\n", sym->name); } + /* Allocate __patchable_function_entries for symbol */ + sec = create_section_pair(kelf, "__patchable_function_entries", sizeof(void *), 1); + sec->sh.sh_flags |= SHF_WRITE | SHF_LINK_ORDER; + /* We will reset this sh_link in the reindex function. */ + sec->sh.sh_link = 0; + + relasec = sec->rela; + sym->sec->pfe = sec; break; } case PPC64: { @@ -3972,18 +3934,6 @@ static void kpatch_populate_mcount_sections(struct kpatch_elf *kelf) ERROR("unsupported arch"); } - if (multi_pfe) { - sec = find_nth_section_by_name(&kelf->sections, nr, "__patchable_function_entries"); - if (!sec) - ERROR("cannot retrieve pre-allocated __pfe #%d\n", nr); - - relasec = sec->rela; - sym->sec->pfe = sec; - sec->sh.sh_link = sec->index; - - nr++; - } - /* * 'rela' points to the mcount/fentry call. * @@ -3994,9 +3944,8 @@ static void kpatch_populate_mcount_sections(struct kpatch_elf *kelf) mcount_rela->type = absolute_rela_type(kelf); mcount_rela->addend = insn_offset - sym->sym.st_value; - if (multi_pfe) { + if (pfe_per_function) { mcount_rela->offset = 0; - sec = NULL; } else { mcount_rela->offset = (unsigned int) (index * sizeof(*funcs)); } @@ -4166,19 +4115,21 @@ static void kpatch_find_func_profiling_calls(struct kpatch_elf *kelf) switch(kelf->arch) { case AARCH64: { struct section *sec; - list_for_each_entry(sec, &kelf->sections, list) { - if (strcmp(sec->name, "__patchable_function_entries")) + if (strcmp(sec->name, "__patchable_function_entries")) { continue; - if (multi_pfe && sym->sec->pfe != sec) + } + if (sym->sec->pfe != sec) { continue; - if (!sec->rela) + } + if (!sec->rela) { continue; + } list_for_each_entry(rela, &sec->rela->relas, list) { if (rela->sym->sec && sym->sec == rela->sym->sec) { sym->has_func_profiling = 1; - goto next_symbol; + goto next_symbol; } } } @@ -4268,12 +4219,6 @@ static error_t parse_opt (int key, char *arg, struct argp_state *state) return 0; } -static bool has_multi_pfe(struct kpatch_elf *kelf) -{ - return !!find_nth_section_by_name(&kelf->sections, 1, - "__patchable_function_entries"); -} - static struct argp argp = { options, parse_opt, args_doc, NULL }; int main(int argc, char *argv[]) @@ -4307,7 +4252,10 @@ int main(int argc, char *argv[]) kelf_orig = kpatch_elf_open(orig_obj); kelf_patched = kpatch_elf_open(patched_obj); - multi_pfe = has_multi_pfe(kelf_orig) || has_multi_pfe(kelf_patched); + + kpatch_set_pfe_link(kelf_orig); + kpatch_set_pfe_link(kelf_patched); + kpatch_find_func_profiling_calls(kelf_orig); kpatch_find_func_profiling_calls(kelf_patched); @@ -4369,9 +4317,6 @@ int main(int argc, char *argv[]) /* this is destructive to kelf_patched */ kpatch_migrate_included_elements(kelf_patched, &kelf_out); - /* this must be done before kelf_patched is torn down */ - kpatch_alloc_mcount_sections(kelf_patched, kelf_out); - /* * Teardown kelf_patched since we shouldn't access sections or symbols * through it anymore. Don't free however, since our section and symbol @@ -4390,7 +4335,7 @@ int main(int argc, char *argv[]) kpatch_create_callbacks_objname_rela(kelf_out, parent_name); kpatch_build_strings_section_data(kelf_out); - kpatch_populate_mcount_sections(kelf_out); + kpatch_create_mcount_sections(kelf_out); /* * At this point, the set of output sections and symbols is diff --git a/kpatch-build/kpatch-elf.c b/kpatch-build/kpatch-elf.c index 4d6051362..4f3386743 100755 --- a/kpatch-build/kpatch-elf.c +++ b/kpatch-build/kpatch-elf.c @@ -663,6 +663,7 @@ void kpatch_dump_kelf(struct kpatch_elf *kelf) if (sec->rela) printf(", rela-> %s", sec->rela->name); } + printf(", pfe-> [%d]", (sec->pfe) == NULL ? -1 : (int)sec->pfe->index); next: printf("\n"); } @@ -672,8 +673,10 @@ void kpatch_dump_kelf(struct kpatch_elf *kelf) printf("sym %02d, type %d, bind %d, ndx %02d, name %s (%s)", sym->index, sym->type, sym->bind, sym->sym.st_shndx, sym->name, status_str(sym->status)); - if (sym->sec && (sym->type == STT_FUNC || sym->type == STT_OBJECT)) + if (sym->sec && (sym->type == STT_FUNC || sym->type == STT_OBJECT)) { printf(" -> %s", sym->sec->name); + printf(", profiling: %d", sym->has_func_profiling); + } printf("\n"); } } @@ -942,6 +945,7 @@ struct section *create_section_pair(struct kpatch_elf *kelf, char *name, relasec->sh.sh_type = SHT_RELA; relasec->sh.sh_entsize = sizeof(GElf_Rela); relasec->sh.sh_addralign = 8; + relasec->sh.sh_flags = SHF_INFO_LINK; /* set text rela section pointer */ sec->rela = relasec; From 56386a92113ab2994419e35d3e603652170fd086 Mon Sep 17 00:00:00 2001 From: zimao Date: Wed, 8 Nov 2023 07:57:08 +0000 Subject: [PATCH 19/19] create-diff-object: add init value for pfe flag Set pfe_per_function default to false. --- kpatch-build/create-diff-object.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c index 8ce228148..91f329fb7 100644 --- a/kpatch-build/create-diff-object.c +++ b/kpatch-build/create-diff-object.c @@ -3786,7 +3786,7 @@ static void kpatch_create_mcount_sections(struct kpatch_elf *kelf) struct symbol *sym; struct rela *rela, *mcount_rela; void **funcs; - bool pfe_per_function; + bool pfe_per_function = false; nr = 0; list_for_each_entry(sym, &kelf->symbols, list)