1- use memory:: { MmiRef , VirtualAddress , PhysicalAddress , MappedPages , PteFlags } ;
1+ use memory:: { create_identity_mapping , MmiRef , VirtualAddress , PhysicalAddress , PteFlags } ;
22use memory_aarch64:: { read_mmu_config, asm_set_mmu_config_x2_x3} ;
33use kernel_config:: memory:: { PAGE_SIZE , KERNEL_STACK_SIZE_IN_PAGES } ;
44use psci:: { cpu_on, error:: Error :: * } ;
@@ -8,6 +8,7 @@ use volatile::Volatile;
88use core:: arch:: asm;
99use cpu:: { CpuId , MpidrValue , current_cpu} ;
1010use arm_boards:: BOARD_CONFIG ;
11+ use mod_mgmt:: get_initial_kernel_namespace;
1112
1213/// The data items used when an AP core is booting up in ap_entry_point & ap_stage_two.
1314#[ cfg( target_arch = "aarch64" ) ]
@@ -42,79 +43,56 @@ pub fn handle_ap_cores(
4243) -> Result < u32 , & ' static str > {
4344 let mut online_secondary_cpus = 0 ;
4445
45- // This ApTrampolineData & MmuConfig will be read and written-to
46- // by all detected CPU cores, via both its physical
47- // and virtual addresses.
46+ // This ApTrampolineData & MmuConfig will be read and written to
47+ // by all detected CPU cores, via both its physical and virtual addresses.
4848 let mmu_config = read_mmu_config ( ) ;
4949 let mut ap_data: ApTrampolineData = Default :: default ( ) ;
5050 ap_data. ap_data_virt_addr . write ( VirtualAddress :: new_canonical ( & ap_data as * const _ as usize ) ) ;
5151 ap_data. ap_stage_two_virt_addr . write ( VirtualAddress :: new_canonical ( ap_stage_two as usize ) ) ;
5252
53- let entry_point_phys_addr: PhysicalAddress ;
54- let ap_data_phys_addr: PhysicalAddress ;
55- let mut ap_startup_mapped_pages: MappedPages ; // must be held throughout APs being booted up
53+ // Identity map one page of memory and copy the executable code of `ap_entry_point` into it.
54+ // This ensures that when we run that `ap_entry_point` code from the identity-mapped page,
55+ // it can safely enable the MMU, as the program counter will be valid
56+ // (and have the same value) both before and after the MMU is enabled.
57+ let rwx = PteFlags :: new ( ) . valid ( true ) . writable ( true ) . executable ( true ) ;
58+ let mut ap_startup_mapped_pages = create_identity_mapping ( 1 , rwx) ?;
59+ let virt_addr = ap_startup_mapped_pages. start_address ( ) ;
60+
5661 {
62+ let kernel_text_pages_ref = get_initial_kernel_namespace ( )
63+ . ok_or ( "BUG: couldn't get the initial kernel CrateNamespace" )
64+ . and_then ( |namespace| namespace. get_crate ( "nano_core" ) . ok_or ( "BUG: couldn't get the 'nano_core' crate" ) )
65+ . and_then ( |nano_core_crate| nano_core_crate. lock_as_ref ( ) . text_pages . clone ( ) . ok_or ( "BUG: nano_core crate had no text pages" ) ) ?;
66+ let kernel_text_pages = kernel_text_pages_ref. 0 . lock ( ) ;
67+
68+ let ap_entry_point = VirtualAddress :: new_canonical ( ap_entry_point as usize ) ;
69+ let src = kernel_text_pages. offset_of_address ( ap_entry_point)
70+ . ok_or ( "BUG: the 'ap_entry_point' virtual address was not covered by the kernel's text pages" )
71+ . and_then ( |offset| kernel_text_pages. as_slice ( offset, PAGE_SIZE ) ) ?;
72+
73+ let dst: & mut [ u8 ] = ap_startup_mapped_pages. as_slice_mut ( 0 , PAGE_SIZE ) ?;
74+ dst. copy_from_slice ( src) ;
75+
76+ // After copying the content into the identity page, remap it to remove write permissions.
77+ let mut kernel_mmi = kernel_mmi_ref. lock ( ) ;
78+ let rx = PteFlags :: new ( ) . valid ( true ) . executable ( true ) ;
79+ ap_startup_mapped_pages. remap ( & mut kernel_mmi. page_table , rx) ?;
80+ }
81+
82+ // We identity mapped the `ap_entry_point` above, but we need to translate
83+ // the virtual address of the `ap_data` in order to obtain its physical address.
84+ let entry_point_phys_addr = PhysicalAddress :: new_canonical ( virt_addr. value ( ) ) ;
85+ let ap_data_phys_addr = {
5786 let mut kernel_mmi = kernel_mmi_ref. lock ( ) ;
5887 let page_table = & mut kernel_mmi. page_table ;
5988
60- // get the physical address of the MmuConfig
89+ // Write the physical address of the MmuConfig struct into the ApData struct.
6190 let mmu_config_virt_addr = VirtualAddress :: new_canonical ( & mmu_config as * const _ as usize ) ;
6291 ap_data. ap_mmu_config . write ( page_table. translate ( mmu_config_virt_addr) . unwrap ( ) ) ;
6392
6493 // get physical address of the ApTrampolineData structure
65- ap_data_phys_addr = page_table. translate ( ap_data. ap_data_virt_addr . read ( ) ) . unwrap ( ) ;
66-
67- // This block identity-maps one page of writable+executable memory and
68- // copies the machine code of `ap_entry_point` to it. The fact that the
69- // mapping is identity-mapped allows this code to enable paging
70- // transparently without messing up with the PC register.
71- //
72- // However, there is currently no way in Theseus to dynamically find a
73- // memory range that is identity mappable, so the address is hardcoded.
74- //
75- // Another solution was implemented instead, which is why this one is
76- // soft-commented. The CPU has instruction caches, so when paging gets
77- // enabled, it can still execute some instructions located near PC without
78- // using the page directory. This allows us to branch to a physical address,
79- // enable paging, and then branch to a virtual address (even though at the
80- // time of the second branch, PC has an invalid address) where the
81- // initialization can continue.
82- // Note: caches must not be flushed before this second branch.
83- //
84- // It would still be preferable to use the first solution, as it doesn't
85- // rely on a cache trick.
86- if false {
87- const ID_MAPPED_ENTRY_POINT : usize = 0x5001_0000 ;
88-
89- entry_point_phys_addr = PhysicalAddress :: new_canonical ( ID_MAPPED_ENTRY_POINT ) ;
90-
91- // When the AArch64 core will enter startup code, it will do so with MMU disabled,
92- // which means these frames MUST be identity mapped.
93- let ap_startup_frames = memory:: allocate_frames_by_bytes_at ( entry_point_phys_addr, PAGE_SIZE )
94- . map_err ( |_e| "handle_ap_cores(): failed to allocate AP startup frames" ) ?;
95- let ap_startup_pages = memory:: allocate_pages_at ( VirtualAddress :: new_canonical ( ID_MAPPED_ENTRY_POINT ) , ap_startup_frames. size_in_frames ( ) )
96- . map_err ( |_e| "handle_ap_cores(): failed to allocate AP startup pages" ) ?;
97-
98- // map this RWX
99- let flags = PteFlags :: new ( ) . valid ( true ) . writable ( true ) . executable ( true ) ;
100- ap_startup_mapped_pages = page_table. map_allocated_pages_to (
101- ap_startup_pages,
102- ap_startup_frames,
103- flags,
104- ) ?;
105-
106- // copy the entry point code
107- // instructions must have an alignment of four bytes; a page's alignment will always be larger.
108- let dst = ap_startup_mapped_pages. as_slice_mut ( 0 , PAGE_SIZE ) . unwrap ( ) ;
109- let src = unsafe { ( ap_entry_point as * const [ u8 ; PAGE_SIZE ] ) . as_ref ( ) } . unwrap ( ) ;
110- dst. copy_from_slice ( src) ;
111-
112- } else {
113- // get the physical address of ap_entry_point
114- let entry_point_virt_addr = VirtualAddress :: new_canonical ( ap_entry_point as usize ) ;
115- entry_point_phys_addr = page_table. translate ( entry_point_virt_addr) . unwrap ( ) ;
116- }
117- }
94+ page_table. translate ( ap_data. ap_data_virt_addr . read ( ) ) . unwrap ( )
95+ } ;
11896
11997 let mut ap_stack = None ;
12098 for def_mpidr in BOARD_CONFIG . cpu_ids {
0 commit comments