Skip to content

Commit 56db97a

Browse files
seehearfeelgregkh
authored andcommitted
LoongArch: Rework KASAN initialization for PTW-enabled systems
commit 5ec5ac4 upstream. kasan_init_generic() indicates that kasan is fully initialized, so it should be put at end of kasan_init(). Otherwise bringing up the primary CPU failed when CONFIG_KASAN is set on PTW-enabled systems, here are the call chains: kernel_entry() start_kernel() setup_arch() kasan_init() kasan_init_generic() The reason is PTW-enabled systems have speculative accesses which means memory accesses to the shadow memory after kasan_init() may be executed by hardware before. However, accessing shadow memory is safe only after kasan fully initialized because kasan_init() uses a temporary PGD table until we have populated all levels of shadow page tables and writen the PGD register. Moving kasan_init_generic() later can defer the occasion of kasan_enabled(), so as to avoid speculative accesses on shadow pages. After moving kasan_init_generic() to the end, kasan_init() can no longer call kasan_mem_to_shadow() for shadow address conversion because it will always return kasan_early_shadow_page. On the other hand, we should keep the current logic of kasan_mem_to_shadow() for both the early and final stage because there may be instrumentation before kasan_init(). To solve this, we factor out a new mem_to_shadow() function from current kasan_mem_to_shadow() for the shadow address conversion in kasan_init(). Cc: stable@vger.kernel.org Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn> [ Huacai: To backport from upstream to 6.6 & 6.12, kasan_enabled() is replaced with kasan_arch_is_ready() and kasan_init_generic() is replaced with "kasan_early_stage = false". ] Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent da06bb0 commit 56db97a

1 file changed

Lines changed: 40 additions & 37 deletions

File tree

arch/loongarch/mm/kasan_init.c

Lines changed: 40 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -42,39 +42,43 @@ static pgd_t kasan_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
4242

4343
bool kasan_early_stage = true;
4444

45-
void *kasan_mem_to_shadow(const void *addr)
45+
static void *mem_to_shadow(const void *addr)
4646
{
47-
if (!kasan_arch_is_ready()) {
47+
unsigned long offset = 0;
48+
unsigned long maddr = (unsigned long)addr;
49+
unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
50+
51+
if (maddr >= FIXADDR_START)
4852
return (void *)(kasan_early_shadow_page);
49-
} else {
50-
unsigned long maddr = (unsigned long)addr;
51-
unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
52-
unsigned long offset = 0;
53-
54-
if (maddr >= FIXADDR_START)
55-
return (void *)(kasan_early_shadow_page);
56-
57-
maddr &= XRANGE_SHADOW_MASK;
58-
switch (xrange) {
59-
case XKPRANGE_CC_SEG:
60-
offset = XKPRANGE_CC_SHADOW_OFFSET;
61-
break;
62-
case XKPRANGE_UC_SEG:
63-
offset = XKPRANGE_UC_SHADOW_OFFSET;
64-
break;
65-
case XKPRANGE_WC_SEG:
66-
offset = XKPRANGE_WC_SHADOW_OFFSET;
67-
break;
68-
case XKVRANGE_VC_SEG:
69-
offset = XKVRANGE_VC_SHADOW_OFFSET;
70-
break;
71-
default:
72-
WARN_ON(1);
73-
return NULL;
74-
}
7553

76-
return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset);
54+
maddr &= XRANGE_SHADOW_MASK;
55+
switch (xrange) {
56+
case XKPRANGE_CC_SEG:
57+
offset = XKPRANGE_CC_SHADOW_OFFSET;
58+
break;
59+
case XKPRANGE_UC_SEG:
60+
offset = XKPRANGE_UC_SHADOW_OFFSET;
61+
break;
62+
case XKPRANGE_WC_SEG:
63+
offset = XKPRANGE_WC_SHADOW_OFFSET;
64+
break;
65+
case XKVRANGE_VC_SEG:
66+
offset = XKVRANGE_VC_SHADOW_OFFSET;
67+
break;
68+
default:
69+
WARN_ON(1);
70+
return NULL;
7771
}
72+
73+
return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset);
74+
}
75+
76+
void *kasan_mem_to_shadow(const void *addr)
77+
{
78+
if (kasan_arch_is_ready())
79+
return mem_to_shadow(addr);
80+
else
81+
return (void *)(kasan_early_shadow_page);
7882
}
7983

8084
const void *kasan_shadow_to_mem(const void *shadow_addr)
@@ -295,10 +299,8 @@ void __init kasan_init(void)
295299
/* Maps everything to a single page of zeroes */
296300
kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE, true);
297301

298-
kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START),
299-
kasan_mem_to_shadow((void *)KFENCE_AREA_END));
300-
301-
kasan_early_stage = false;
302+
kasan_populate_early_shadow(mem_to_shadow((void *)VMALLOC_START),
303+
mem_to_shadow((void *)KFENCE_AREA_END));
302304

303305
/* Populate the linear mapping */
304306
for_each_mem_range(i, &pa_start, &pa_end) {
@@ -308,13 +310,13 @@ void __init kasan_init(void)
308310
if (start >= end)
309311
break;
310312

311-
kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
312-
(unsigned long)kasan_mem_to_shadow(end), NUMA_NO_NODE);
313+
kasan_map_populate((unsigned long)mem_to_shadow(start),
314+
(unsigned long)mem_to_shadow(end), NUMA_NO_NODE);
313315
}
314316

315317
/* Populate modules mapping */
316-
kasan_map_populate((unsigned long)kasan_mem_to_shadow((void *)MODULES_VADDR),
317-
(unsigned long)kasan_mem_to_shadow((void *)MODULES_END), NUMA_NO_NODE);
318+
kasan_map_populate((unsigned long)mem_to_shadow((void *)MODULES_VADDR),
319+
(unsigned long)mem_to_shadow((void *)MODULES_END), NUMA_NO_NODE);
318320
/*
319321
* KAsan may reuse the contents of kasan_early_shadow_pte directly, so we
320322
* should make sure that it maps the zero page read-only.
@@ -329,5 +331,6 @@ void __init kasan_init(void)
329331

330332
/* At this point kasan is fully initialized. Enable error messages */
331333
init_task.kasan_depth = 0;
334+
kasan_early_stage = false;
332335
pr_info("KernelAddressSanitizer initialized.\n");
333336
}

0 commit comments

Comments
 (0)