@@ -38,7 +38,7 @@ use static_array_rb_tree::*;
3838
3939
4040/// Certain regions are pre-designated for special usage, specifically the kernel's initial identity mapping.
41- /// They will be allocated from if an address within them is specifically requested;
41+ /// They will be allocated from if an address within them is specifically
4242/// otherwise, they will only be allocated from as a "last resort" if all other non-designated address ranges are exhausted.
4343///
4444/// Any virtual addresses **less than or equal** to this address are considered "designated".
@@ -536,10 +536,15 @@ fn find_specific_chunk(
536536/// If no range is specified, this function first attempts to find a suitable chunk
537537/// that is **not** within the designated regions,
538538/// and only allocates from the designated regions as a backup option.
539+ ///
540+ /// If an alignment is specified (in terms of number of 4KiB pages), then the starting page
541+ /// in the allocated range must be aligned to that number of pages.
542+ /// If no specific alignment is needed, the default aligment of 1 page should be used.
539543fn find_any_chunk (
540544 list : & mut StaticArrayRBTree < Chunk > ,
541545 num_pages : usize ,
542546 within_range : Option < & PageRange > ,
547+ alignment_4k_pages : usize ,
543548) -> Result < ( AllocatedPages , DeferredAllocAction < ' static > ) , AllocationError > {
544549 let designated_low_end = DESIGNATED_PAGES_LOW_END . get ( )
545550 . ok_or ( AllocationError :: NotInitialized ) ?;
@@ -555,7 +560,8 @@ fn find_any_chunk(
555560 if let Some ( chunk) = elem {
556561 // Use max and min below to ensure that the range of pages we allocate from
557562 // is within *both* the current chunk's bounds and the range's bounds.
558- let lowest_possible_start_page = * max ( chunk. start ( ) , range. start ( ) ) ;
563+ let lowest_possible_start_page = max ( chunk. start ( ) , range. start ( ) )
564+ . align_up ( alignment_4k_pages) ;
559565 let highest_possible_end_page = * min ( chunk. end ( ) , range. end ( ) ) ;
560566 if lowest_possible_start_page + num_pages <= highest_possible_end_page {
561567 return adjust_chosen_chunk (
@@ -589,7 +595,8 @@ fn find_any_chunk(
589595 while let Some ( chunk) = cursor. get ( ) . map ( |w| w. deref ( ) ) {
590596 // Use max and min below to ensure that the range of pages we allocate from
591597 // is within *both* the current chunk's bounds and the range's bounds.
592- let lowest_possible_start_page = * max ( chunk. start ( ) , range. start ( ) ) ;
598+ let lowest_possible_start_page = max ( chunk. start ( ) , range. start ( ) )
599+ . align_up ( alignment_4k_pages) ;
593600 let highest_possible_end_page = * min ( chunk. end ( ) , range. end ( ) ) ;
594601 if lowest_possible_start_page + num_pages <= highest_possible_end_page {
595602 return adjust_chosen_chunk (
@@ -621,8 +628,14 @@ fn find_any_chunk(
621628 Inner :: Array ( ref mut arr) => {
622629 for elem in arr. iter_mut ( ) {
623630 if let Some ( chunk) = elem {
624- if num_pages <= chunk. size_in_pages ( ) {
625- return adjust_chosen_chunk ( * chunk. start ( ) , num_pages, & chunk. clone ( ) , ValueRefMut :: Array ( elem) ) ;
631+ let lowest_possible_start_page = chunk. start ( ) . align_up ( alignment_4k_pages) ;
632+ if lowest_possible_start_page + num_pages <= * chunk. end ( ) {
633+ return adjust_chosen_chunk (
634+ lowest_possible_start_page,
635+ num_pages,
636+ & chunk. clone ( ) ,
637+ ValueRefMut :: Array ( elem) ,
638+ ) ;
626639 }
627640 }
628641 }
@@ -644,8 +657,14 @@ fn find_any_chunk(
644657 // The first iterates over the lower designated region, from higher addresses to lower, down to zero.
645658 let mut cursor = tree. upper_bound_mut ( Bound :: Included ( designated_low_end) ) ;
646659 while let Some ( chunk) = cursor. get ( ) . map ( |w| w. deref ( ) ) {
647- if num_pages < chunk. size_in_pages ( ) {
648- return adjust_chosen_chunk ( * chunk. start ( ) , num_pages, & chunk. clone ( ) , ValueRefMut :: RBTree ( cursor) ) ;
660+ let lowest_possible_start_page = chunk. start ( ) . align_up ( alignment_4k_pages) ;
661+ if lowest_possible_start_page + num_pages <= * chunk. end ( ) {
662+ return adjust_chosen_chunk (
663+ lowest_possible_start_page,
664+ num_pages,
665+ & chunk. clone ( ) ,
666+ ValueRefMut :: RBTree ( cursor) ,
667+ ) ;
649668 }
650669 cursor. move_prev ( ) ;
651670 }
@@ -657,8 +676,14 @@ fn find_any_chunk(
657676 // we already iterated over non-designated pages in the first match statement above, so we're out of memory.
658677 break ;
659678 }
660- if num_pages < chunk. size_in_pages ( ) {
661- return adjust_chosen_chunk ( * chunk. start ( ) , num_pages, & chunk. clone ( ) , ValueRefMut :: RBTree ( cursor) ) ;
679+ let lowest_possible_start_page = chunk. start ( ) . align_up ( alignment_4k_pages) ;
680+ if lowest_possible_start_page + num_pages <= * chunk. end ( ) {
681+ return adjust_chosen_chunk (
682+ lowest_possible_start_page,
683+ num_pages,
684+ & chunk. clone ( ) ,
685+ ValueRefMut :: RBTree ( cursor) ,
686+ ) ;
662687 }
663688 cursor. move_prev ( ) ;
664689 }
@@ -729,23 +754,31 @@ fn adjust_chosen_chunk(
729754}
730755
731756
732- /// Possible options when requested pages from the page allocator.
757+ /// Possible options when requesting pages from the page allocator.
733758pub enum AllocationRequest < ' r > {
734- /// The allocated pages can be located at any virtual address.
735- Any ,
736759 /// The allocated pages must start exactly at the given `VirtualAddress`.
737760 AtVirtualAddress ( VirtualAddress ) ,
761+ /// The allocated pages may be located at any virtual address,
762+ /// but the starting page must be aligned to a multiple of `alignment_4k_pages`.
763+ /// An alignment of `1` page is equivalent to specifying no alignment requirement.
764+ ///
765+ /// Note: alignment is specified in number of 4KiB pages, not number of bytes.
766+ AlignedTo { alignment_4k_pages : usize } ,
738767 /// The allocated pages can be located anywhere within the given range.
739768 WithinRange ( & ' r PageRange ) ,
769+ /// The allocated pages can be located at any virtual address
770+ /// and have no special alignment requirements beyond a single page.
771+ Any ,
740772}
741773
774+
742775/// The core page allocation routine that allocates the given number of virtual pages,
743776/// optionally at the requested starting `VirtualAddress`.
744777///
745778/// This simply reserves a range of virtual addresses, it does not allocate
746779/// actual physical memory frames nor do any memory mapping.
747780/// Thus, the returned `AllocatedPages` aren't directly usable until they are mapped to physical frames.
748- ///
781+ ///
749782/// Allocation is based on a red-black tree and is thus `O(log(n))`.
750783/// Fragmentation isn't cleaned up until we're out of address space, but that's not really a big deal.
751784///
@@ -780,11 +813,14 @@ pub fn allocate_pages_deferred(
780813 AllocationRequest :: AtVirtualAddress ( vaddr) => {
781814 find_specific_chunk ( & mut locked_list, Page :: containing_address ( vaddr) , num_pages)
782815 }
783- AllocationRequest :: Any => {
784- find_any_chunk ( & mut locked_list, num_pages, None )
816+ AllocationRequest :: AlignedTo { alignment_4k_pages } => {
817+ find_any_chunk ( & mut locked_list, num_pages, None , alignment_4k_pages )
785818 }
786819 AllocationRequest :: WithinRange ( range) => {
787- find_any_chunk ( & mut locked_list, num_pages, Some ( range) )
820+ find_any_chunk ( & mut locked_list, num_pages, Some ( range) , 1 )
821+ }
822+ AllocationRequest :: Any => {
823+ find_any_chunk ( & mut locked_list, num_pages, None , 1 )
788824 }
789825 } ;
790826 res. map_err ( From :: from) // convert from AllocationError to &str
0 commit comments