Skip to content

Commit 613352e

Browse files
committed
Always use kpptr_to_paddr for kernel addresses
With this change, even architectures which do not use a distinct region to map the kernel ELF will use the `kpptr_to_addr` to translate any address from the kernel region. This ensures that these accesses are correctly checked for bounds when used and will make it easier to move the ELF mapping into a distinct region of virtual address space. Signed-off-by: Curtis Millar <[email protected]>
1 parent 3b6a63b commit 613352e

File tree

7 files changed

+35
-35
lines changed

7 files changed

+35
-35
lines changed

include/arch/arm/armv/armv7-a/armv/context_switch.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ static inline void armv_contextSwitch_HWASID(pde_t *cap_pd, hw_asid_t hw_asid)
3838
* do does not need a DSB
3939
*/
4040
dsb();
41-
writeTTBR0Ptr(addrFromPPtr(armKSGlobalPD));
41+
writeTTBR0Ptr(addrFromKPPtr(armKSGlobalPD));
4242
isb();
4343
setHardwareASID(hw_asid);
4444
writeTTBR0Ptr(addrFromPPtr(cap_pd));

src/arch/arm/32/kernel/vspace.c

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -257,7 +257,7 @@ BOOT_CODE void map_kernel_window(void)
257257
/* map log buffer page table. PTEs to be filled by user later by calling seL4_BenchmarkSetLogBuffer() */
258258
armKSGlobalPD[idx] =
259259
pde_pde_coarse_new(
260-
addrFromPPtr(armKSGlobalLogPT), /* address */
260+
addrFromKPPtr(armKSGlobalLogPT), /* address */
261261
true, /* P */
262262
0 /* Domain */
263263
);
@@ -271,7 +271,7 @@ BOOT_CODE void map_kernel_window(void)
271271
/* map page table covering last 1M of virtual address space to page directory */
272272
armKSGlobalPD[idx] =
273273
pde_pde_coarse_new(
274-
addrFromPPtr(armKSGlobalPT), /* address */
274+
addrFromKPPtr(armKSGlobalPT), /* address */
275275
true, /* P */
276276
0 /* Domain */
277277
);
@@ -281,7 +281,7 @@ BOOT_CODE void map_kernel_window(void)
281281

282282
/* map vector table */
283283
map_kernel_frame(
284-
addrFromPPtr(arm_vector_table),
284+
addrFromKPPtr(arm_vector_table),
285285
PPTR_VECTOR_TABLE,
286286
VMKernelOnly,
287287
vm_attributes_new(
@@ -294,7 +294,7 @@ BOOT_CODE void map_kernel_window(void)
294294
#ifdef CONFIG_KERNEL_GLOBALS_FRAME
295295
/* map globals frame */
296296
map_kernel_frame(
297-
addrFromPPtr(armKSGlobalsFrame),
297+
addrFromKPPtr(armKSGlobalsFrame),
298298
seL4_GlobalsFrame,
299299
VMReadOnly,
300300
vm_attributes_new(
@@ -322,7 +322,7 @@ BOOT_CODE void map_kernel_window(void)
322322
pde = pdeS1_pdeS1_invalid_new();
323323
armHSGlobalPGD[idx] = pde;
324324
}
325-
pde = pdeS1_pdeS1_coarse_new(0, 0, 0, 0, addrFromPPtr(armHSGlobalPD));
325+
pde = pdeS1_pdeS1_coarse_new(0, 0, 0, 0, addrFromKPPtr(armHSGlobalPD));
326326
armHSGlobalPGD[3] = pde;
327327

328328
/* Initialise PMD */
@@ -351,7 +351,7 @@ BOOT_CODE void map_kernel_window(void)
351351
phys += BIT(PT_INDEX_BITS + PAGE_BITS);
352352
}
353353
/* map page table covering last 2M of virtual address space */
354-
pde = pdeS1_pdeS1_coarse_new(0, 0, 0, 0, addrFromPPtr(armHSGlobalPT));
354+
pde = pdeS1_pdeS1_coarse_new(0, 0, 0, 0, addrFromKPPtr(armHSGlobalPT));
355355
armHSGlobalPD[idx] = pde;
356356

357357
/* now start initialising the page table */
@@ -375,7 +375,7 @@ BOOT_CODE void map_kernel_window(void)
375375
}
376376
/* map vector table */
377377
map_kernel_frame(
378-
addrFromPPtr(arm_vector_table),
378+
addrFromKPPtr(arm_vector_table),
379379
PPTR_VECTOR_TABLE,
380380
VMKernelOnly,
381381
vm_attributes_new(
@@ -388,7 +388,7 @@ BOOT_CODE void map_kernel_window(void)
388388
#ifdef CONFIG_KERNEL_GLOBALS_FRAME
389389
/* map globals frame */
390390
map_kernel_frame(
391-
addrFromPPtr(armKSGlobalsFrame),
391+
addrFromKPPtr(armKSGlobalsFrame),
392392
seL4_GlobalsFrame,
393393
VMReadOnly,
394394
vm_attributes_new(
@@ -401,7 +401,7 @@ BOOT_CODE void map_kernel_window(void)
401401
pteS2 = pte_pte_small_new(
402402
1, /* Not Executeable */
403403
0, /* Not contiguous */
404-
addrFromPPtr(armKSGlobalsFrame),
404+
addrFromKPPtr(armKSGlobalsFrame),
405405
1, /* AF -- always set */
406406
0, /* Not shared */
407407
HAPFromVMRights(VMReadOnly),
@@ -589,7 +589,7 @@ BOOT_CODE void activate_global_pd(void)
589589
that everything we've written (particularly the kernel page tables)
590590
is committed. */
591591
cleanInvalidateL1Caches();
592-
setCurrentPD(addrFromPPtr(armKSGlobalPD));
592+
setCurrentPD(addrFromKPPtr(armKSGlobalPD));
593593
invalidateLocalTLB();
594594
lockTLBEntry(PPTR_BASE);
595595
lockTLBEntry(PPTR_VECTOR_TABLE);
@@ -606,7 +606,7 @@ BOOT_CODE void activate_global_pd(void)
606606
cleanInvalidateL1Caches();
607607
/* Setup the memory attributes: We use 2 indicies (cachable/non-cachable) */
608608
setHMAIR((ATTRINDX_NONCACHEABLE << 0) | (ATTRINDX_CACHEABLE << 8), 0);
609-
setCurrentHypPD(addrFromPPtr(armHSGlobalPGD));
609+
setCurrentHypPD(addrFromKPPtr(armHSGlobalPGD));
610610
invalidateHypTLB();
611611
#if 0 /* Can't lock entries on A15 */
612612
lockTLBEntry(PPTR_BASE);
@@ -1050,9 +1050,9 @@ void setVMRoot(tcb_t *tcb)
10501050
if (cap_get_capType(threadRoot) != cap_page_directory_cap ||
10511051
!cap_page_directory_cap_get_capPDIsMapped(threadRoot)) {
10521052
#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1053-
setCurrentPD(addrFromPPtr(armUSGlobalPD));
1053+
setCurrentPD(addrFromKPPtr(armUSGlobalPD));
10541054
#else
1055-
setCurrentPD(addrFromPPtr(armKSGlobalPD));
1055+
setCurrentPD(addrFromKPPtr(armKSGlobalPD));
10561056
#endif
10571057
return;
10581058
}
@@ -1062,9 +1062,9 @@ void setVMRoot(tcb_t *tcb)
10621062
find_ret = findPDForASID(asid);
10631063
if (unlikely(find_ret.status != EXCEPTION_NONE || find_ret.pd != pd)) {
10641064
#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1065-
setCurrentPD(addrFromPPtr(armUSGlobalPD));
1065+
setCurrentPD(addrFromKPPtr(armUSGlobalPD));
10661066
#else
1067-
setCurrentPD(addrFromPPtr(armKSGlobalPD));
1067+
setCurrentPD(addrFromKPPtr(armKSGlobalPD));
10681068
#endif
10691069
return;
10701070
}
@@ -1248,7 +1248,7 @@ void copyGlobalMappings(pde_t *newPD)
12481248
/* Kernel and user MMUs are completely independent, however,
12491249
* we still need to share the globals page. */
12501250
pde_t pde;
1251-
pde = pde_pde_coarse_new(addrFromPPtr(armUSGlobalPT));
1251+
pde = pde_pde_coarse_new(addrFromKPPtr(armUSGlobalPT));
12521252
newPD[BIT(PD_INDEX_BITS) - 1] = pde;
12531253
#endif /* CONFIG_KERNEL_GLOBALS_FRAME */
12541254
#endif
@@ -2765,7 +2765,7 @@ exception_t benchmark_arch_map_logBuffer(word_t frame_cptr)
27652765
0 /* executable */
27662766
);
27672767

2768-
cleanByVA_PoU((vptr_t)&armKSGlobalLogPT[idx], pptr_to_paddr(&armKSGlobalLogPT[idx]));
2768+
cleanByVA_PoU((vptr_t)&armKSGlobalLogPT[idx], addrFromKPPtr(&armKSGlobalLogPT[idx]));
27692769
invalidateTranslationSingle(KS_LOG_PPTR + (idx * BIT(seL4_PageBits)));
27702770
}
27712771

src/arch/arm/32/object/objecttype.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -199,7 +199,7 @@ finaliseCap_ret_t Arch_finaliseCap(cap_t cap, bool_t final)
199199

200200
cleanCacheRange_PoU((pptr_t) &armKSGlobalLogPT[0],
201201
(pptr_t) &armKSGlobalLogPT[0] + BIT(seL4_PageTableBits),
202-
addrFromPPtr((void *)&armKSGlobalLogPT[0]));
202+
addrFromKPPtr((void *)&armKSGlobalLogPT[0]));
203203

204204
for (int idx = 0; idx < BIT(PT_INDEX_BITS); idx++) {
205205
invalidateTranslationSingle(KS_LOG_PPTR + (idx << seL4_PageBits));

src/arch/arm/64/kernel/thread.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ void Arch_switchToIdleThread(void)
3030
if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
3131
vcpu_switch(NULL);
3232
}
33-
setCurrentUserVSpaceRoot(ttbr_new(0, pptr_to_paddr(armKSGlobalUserVSpace)));
33+
setCurrentUserVSpaceRoot(ttbr_new(0, addrFromKPPtr(armKSGlobalUserVSpace)));
3434
}
3535

3636
void Arch_activateIdleThread(tcb_t *tcb)

src/arch/arm/64/kernel/vspace.c

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -249,12 +249,12 @@ BOOT_CODE void map_kernel_window(void)
249249

250250
/* place the PUD into the PGD */
251251
armKSGlobalKernelPGD[GET_PGD_INDEX(PPTR_BASE)] = pgde_pgde_pud_new(
252-
pptr_to_paddr(armKSGlobalKernelPUD));
252+
addrFromKPPtr(armKSGlobalKernelPUD));
253253

254254
/* place all PDs except the last one in PUD */
255255
for (idx = GET_PUD_INDEX(PPTR_BASE); idx < GET_PUD_INDEX(PPTR_TOP); idx++) {
256256
armKSGlobalKernelPUD[idx] = pude_pude_pd_new(
257-
pptr_to_paddr(&armKSGlobalKernelPDs[idx][0])
257+
addrFromKPPtr(&armKSGlobalKernelPDs[idx][0])
258258
);
259259
}
260260

@@ -279,12 +279,12 @@ BOOT_CODE void map_kernel_window(void)
279279

280280
/* put the PD into the PUD for device window */
281281
armKSGlobalKernelPUD[GET_PUD_INDEX(PPTR_TOP)] = pude_pude_pd_new(
282-
pptr_to_paddr(&armKSGlobalKernelPDs[BIT(PUD_INDEX_BITS) - 1][0])
282+
addrFromKPPtr(&armKSGlobalKernelPDs[BIT(PUD_INDEX_BITS) - 1][0])
283283
);
284284

285285
/* put the PT into the PD for device window */
286286
armKSGlobalKernelPDs[BIT(PUD_INDEX_BITS) - 1][BIT(PD_INDEX_BITS) - 1] = pde_pde_small_new(
287-
pptr_to_paddr(armKSGlobalKernelPT)
287+
addrFromKPPtr(armKSGlobalKernelPT)
288288
);
289289

290290
map_kernel_devices();
@@ -534,10 +534,10 @@ BOOT_CODE cap_t create_mapped_it_frame_cap(cap_t pd_cap, pptr_t pptr, vptr_t vpt
534534
BOOT_CODE void activate_kernel_vspace(void)
535535
{
536536
cleanInvalidateL1Caches();
537-
setCurrentKernelVSpaceRoot(ttbr_new(0, pptr_to_paddr(armKSGlobalKernelPGD)));
537+
setCurrentKernelVSpaceRoot(ttbr_new(0, addrFromKPPtr(armKSGlobalKernelPGD)));
538538

539539
/* Prevent elf-loader address translation to fill up TLB */
540-
setCurrentUserVSpaceRoot(ttbr_new(0, pptr_to_paddr(armKSGlobalUserVSpace)));
540+
setCurrentUserVSpaceRoot(ttbr_new(0, addrFromKPPtr(armKSGlobalUserVSpace)));
541541

542542
invalidateLocalTLB();
543543
lockTLBEntry(KERNEL_ELF_BASE);
@@ -991,15 +991,15 @@ void setVMRoot(tcb_t *tcb)
991991
threadRoot = TCB_PTR_CTE_PTR(tcb, tcbVTable)->cap;
992992

993993
if (!isValidNativeRoot(threadRoot)) {
994-
setCurrentUserVSpaceRoot(ttbr_new(0, pptr_to_paddr(armKSGlobalUserVSpace)));
994+
setCurrentUserVSpaceRoot(ttbr_new(0, addrFromKPPtr(armKSGlobalUserVSpace)));
995995
return;
996996
}
997997

998998
vspaceRoot = VSPACE_PTR(cap_vtable_root_get_basePtr(threadRoot));
999999
asid = cap_vtable_root_get_mappedASID(threadRoot);
10001000
find_ret = findVSpaceForASID(asid);
10011001
if (unlikely(find_ret.status != EXCEPTION_NONE || find_ret.vspace_root != vspaceRoot)) {
1002-
setCurrentUserVSpaceRoot(ttbr_new(0, pptr_to_paddr(armKSGlobalUserVSpace)));
1002+
setCurrentUserVSpaceRoot(ttbr_new(0, addrFromKPPtr(armKSGlobalUserVSpace)));
10031003
return;
10041004
}
10051005

@@ -2560,7 +2560,7 @@ exception_t benchmark_arch_map_logBuffer(word_t frame_cptr)
25602560
0, /* VMKernelOnly */
25612561
NORMAL);
25622562

2563-
cleanByVA_PoU((vptr_t)armKSGlobalLogPDE, pptr_to_paddr(armKSGlobalLogPDE));
2563+
cleanByVA_PoU((vptr_t)armKSGlobalLogPDE, addrFromKPPtr(armKSGlobalLogPDE));
25642564
invalidateTranslationSingle(KS_LOG_PPTR);
25652565
return EXCEPTION_NONE;
25662566
}

src/arch/x86/32/kernel/vspace.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -245,7 +245,7 @@ BOOT_CODE bool_t map_kernel_window(
245245
#ifdef CONFIG_KERNEL_LOG_BUFFER
246246
/* Map global page table for the log buffer */
247247
pde = pde_pde_pt_new(
248-
pptr_to_paddr(ia32KSGlobalLogPT), /* pt_base_address */
248+
kpptr_to_paddr(ia32KSGlobalLogPT), /* pt_base_address */
249249
0, /* avl */
250250
0, /* accessed */
251251
0, /* cache_disabled */
@@ -271,7 +271,7 @@ BOOT_CODE bool_t map_kernel_window(
271271

272272
/* map page table of last 4M of virtual address space to page directory */
273273
pde = pde_pde_pt_new(
274-
pptr_to_paddr(ia32KSGlobalPT), /* pt_base_address */
274+
kpptr_to_paddr(ia32KSGlobalPT), /* pt_base_address */
275275
0, /* avl */
276276
0, /* accessed */
277277
0, /* cache_disabled */
@@ -597,15 +597,15 @@ void setVMRoot(tcb_t *tcb)
597597
vspace_root = getValidNativeRoot(threadRoot);
598598
if (!vspace_root) {
599599
SMP_COND_STATEMENT(tlb_bitmap_unset(paddr_to_pptr(getCurrentPD()), getCurrentCPUIndex());)
600-
setCurrentPD(pptr_to_paddr(ia32KSGlobalPD));
600+
setCurrentPD(kpptr_to_paddr(ia32KSGlobalPD));
601601
return;
602602
}
603603

604604
asid = cap_get_capMappedASID(threadRoot);
605605
find_ret = findVSpaceForASID(asid);
606606
if (find_ret.status != EXCEPTION_NONE || find_ret.vspace_root != vspace_root) {
607607
SMP_COND_STATEMENT(tlb_bitmap_unset(paddr_to_pptr(getCurrentPD()), getCurrentCPUIndex());)
608-
setCurrentPD(pptr_to_paddr(ia32KSGlobalPD));
608+
setCurrentPD(kpptr_to_paddr(ia32KSGlobalPD));
609609
return;
610610
}
611611

src/plat/tk1/machine/smmu.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ static inline void smmu_disable(void)
8585
if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
8686
/* in hyp mode, we need call the hook in monitor mode */
8787
/* we need physical address here */
88-
paddr_t addr = addrFromPPtr(&do_smmu_disable);
88+
paddr_t addr = addrFromKPPtr(&do_smmu_disable);
8989
asm(".arch_extension sec\n");
9090
asm volatile("mov r0, %0\n\t"
9191
"dsb\nisb\n"
@@ -102,7 +102,7 @@ static inline void smmu_disable(void)
102102
static inline void smmu_enable(void)
103103
{
104104
if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
105-
paddr_t addr = addrFromPPtr(&do_smmu_enable);
105+
paddr_t addr = addrFromKPPtr(&do_smmu_enable);
106106
asm(".arch_extension sec\n");
107107
asm volatile("mov r0, %0\n\t"
108108
"dsb\nisb\n"

0 commit comments

Comments
 (0)