@@ -56,6 +56,7 @@ pub struct MmapRegion {
5656 capacity : usize ,
5757 freelist : Mutex < Vec < Slot > > ,
5858 limits : Limits ,
59+ min_heap_alignment : usize ,
5960}
6061
6162impl Region for MmapRegion { }
@@ -281,6 +282,48 @@ impl MmapRegion {
281282 capacity : instance_capacity,
282283 freelist : Mutex :: new ( Vec :: with_capacity ( instance_capacity) ) ,
283284 limits : limits. clone ( ) ,
285+ min_heap_alignment : 0 , // No constaints on heap alignment by default
286+ } ) ;
287+ {
288+ let mut freelist = region. freelist . lock ( ) . unwrap ( ) ;
289+ for _ in 0 ..instance_capacity {
290+ freelist. push ( MmapRegion :: create_slot ( & region) ?) ;
291+ }
292+ }
293+
294+ Ok ( region)
295+ }
296+
297+ /// Create a new `MmapRegion` that can support a given number instances, each subject to the
298+ /// same runtime limits. Additionally, ensure that the heap is aligned at least to the
299+ /// specified amount. heap_alignment must be a power of 2.
300+ ///
301+ /// The region is returned in an `Arc`, because any instances created from it carry a reference
302+ /// back to the region.
303+ pub fn create_aligned (
304+ instance_capacity : usize ,
305+ limits : & Limits ,
306+ heap_alignment : usize ,
307+ ) -> Result < Arc < Self > , Error > {
308+ assert ! (
309+ SIGSTKSZ % host_page_size( ) == 0 ,
310+ "signal stack size is a multiple of host page size"
311+ ) ;
312+ limits. validate ( ) ?;
313+
314+ let is_power_of_2 = ( heap_alignment & ( heap_alignment - 1 ) ) == 0 ;
315+
316+ if !is_power_of_2 {
317+ return Err ( Error :: InvalidArgument (
318+ "heap_alignment must be a power of 2" ,
319+ ) ) ;
320+ }
321+
322+ let region = Arc :: new ( MmapRegion {
323+ capacity : instance_capacity,
324+ freelist : Mutex :: new ( Vec :: with_capacity ( instance_capacity) ) ,
325+ limits : limits. clone ( ) ,
326+ min_heap_alignment : heap_alignment,
284327 } ) ;
285328 {
286329 let mut freelist = region. freelist . lock ( ) . unwrap ( ) ;
@@ -294,15 +337,27 @@ impl MmapRegion {
294337
295338 fn create_slot ( region : & Arc < MmapRegion > ) -> Result < Slot , Error > {
296339 // get the chunk of virtual memory that the `Slot` will manage
297- let mem = unsafe {
298- mmap (
299- ptr:: null_mut ( ) ,
300- region. limits . total_memory_size ( ) ,
301- ProtFlags :: PROT_NONE ,
302- MapFlags :: MAP_ANON | MapFlags :: MAP_PRIVATE ,
303- 0 ,
304- 0 ,
305- ) ?
340+ let mem = if region. min_heap_alignment == 0 {
341+ unsafe {
342+ mmap (
343+ ptr:: null_mut ( ) ,
344+ region. limits . total_memory_size ( ) ,
345+ ProtFlags :: PROT_NONE ,
346+ MapFlags :: MAP_ANON | MapFlags :: MAP_PRIVATE ,
347+ 0 ,
348+ 0 ,
349+ ) ?
350+ }
351+ } else {
352+ unsafe {
353+ mmap_aligned (
354+ region. limits . total_memory_size ( ) ,
355+ ProtFlags :: PROT_NONE ,
356+ MapFlags :: MAP_ANON | MapFlags :: MAP_PRIVATE ,
357+ region. min_heap_alignment , // requested alignment
358+ instance_heap_offset ( ) , // offset that must be aligned
359+ ) ?
360+ }
306361 } ;
307362
308363 // set the first part of the memory to read/write so that the `Instance` can be stored there
@@ -343,7 +398,138 @@ impl MmapRegion {
343398 }
344399}
345400
401+ // Note alignment must be a power of 2
402+ // Offset must be a multiple of 4Kb (page size)
403+ unsafe fn mmap_aligned (
404+ requested_length : usize ,
405+ prot : ProtFlags ,
406+ flags : MapFlags ,
407+ alignment : usize ,
408+ alignment_offset : usize ,
409+ ) -> Result < * mut c_void , Error > {
410+ let addr = ptr:: null_mut ( ) ;
411+ let fd = 0 ;
412+ let offset = 0 ;
413+
414+ let padded_length = requested_length + alignment + alignment_offset;
415+ let unaligned = mmap ( addr, padded_length, prot, flags, fd, offset) ? as usize ;
416+
417+ // Round up the next address that has addr % alignment = 0
418+ let aligned_nonoffset = ( unaligned + ( alignment - 1 ) ) & !( alignment - 1 ) ;
419+
420+ // Currently offset 0 is aligned according to alignment
421+ // Alignment needs to be enforced at the given offset
422+ let aligned = if aligned_nonoffset - alignment_offset >= unaligned {
423+ aligned_nonoffset - alignment_offset
424+ } else {
425+ aligned_nonoffset - alignment_offset + alignment
426+ } ;
427+
428+ //Sanity check
429+ if aligned < unaligned
430+ || ( aligned + ( requested_length - 1 ) ) > ( unaligned + ( padded_length - 1 ) )
431+ || ( aligned + alignment_offset) % alignment != 0
432+ {
433+ // explicitly ignore failures now, as this is just a best-effort clean up after the last fail
434+ let _ = munmap ( unaligned as * mut c_void , padded_length) ;
435+ return Err ( Error :: Unsupported ( "Could not align memory" . to_string ( ) ) ) ;
436+ }
437+
438+ {
439+ let unused_front = aligned - unaligned;
440+ if unused_front != 0 {
441+ if munmap ( unaligned as * mut c_void , unused_front) . is_err ( ) {
442+ // explicitly ignore failures now, as this is just a best-effort clean up after the last fail
443+ let _ = munmap ( unaligned as * mut c_void , padded_length) ;
444+ return Err ( Error :: Unsupported ( "Could not align memory" . to_string ( ) ) ) ;
445+ }
446+ }
447+ }
448+
449+ {
450+ let unused_back = ( unaligned + ( padded_length - 1 ) ) - ( aligned + ( requested_length - 1 ) ) ;
451+ if unused_back != 0 {
452+ if munmap ( ( aligned + requested_length) as * mut c_void , unused_back) . is_err ( ) {
453+ // explicitly ignore failures now, as this is just a best-effort clean up after the last fail
454+ let _ = munmap ( unaligned as * mut c_void , padded_length) ;
455+ return Err ( Error :: Unsupported ( "Could not align memory" . to_string ( ) ) ) ;
456+ }
457+ }
458+ }
459+
460+ return Ok ( aligned as * mut c_void ) ;
461+ }
462+
346463// TODO: remove this once `nix` PR https://github.com/nix-rust/nix/pull/991 is merged
347464unsafe fn mprotect ( addr : * mut c_void , length : libc:: size_t , prot : ProtFlags ) -> nix:: Result < ( ) > {
348465 nix:: errno:: Errno :: result ( libc:: mprotect ( addr, length, prot. bits ( ) ) ) . map ( drop)
349466}
467+
468+ #[ cfg( test) ]
469+ mod tests2 {
470+ use super :: * ;
471+ use nix:: sys:: mman:: { munmap, MapFlags , ProtFlags } ;
472+
473+ #[ test]
474+ fn test_aligned_mem ( ) {
475+ let kb: usize = 1024 ;
476+ let mb: usize = 1024 * kb;
477+
478+ struct TestProps {
479+ pub mem_size : usize ,
480+ pub mem_align : usize ,
481+ pub offset : usize ,
482+ } ;
483+
484+ let tests = vec ! [
485+ TestProps {
486+ mem_size: 1 * mb,
487+ mem_align: 1 * mb,
488+ offset: 0 ,
489+ } ,
490+ TestProps {
491+ mem_size: 1 * mb,
492+ mem_align: 2 * mb,
493+ offset: 0 ,
494+ } ,
495+ TestProps {
496+ mem_size: 32 * mb,
497+ mem_align: 32 * mb,
498+ offset: 0 ,
499+ } ,
500+ TestProps {
501+ mem_size: 32 * mb,
502+ mem_align: 32 * mb,
503+ offset: 4 * kb,
504+ } ,
505+ ] ;
506+
507+ for test in tests {
508+ let mem = unsafe {
509+ mmap_aligned (
510+ test. mem_size ,
511+ ProtFlags :: PROT_READ | ProtFlags :: PROT_WRITE ,
512+ MapFlags :: MAP_ANON | MapFlags :: MAP_PRIVATE ,
513+ test. mem_align ,
514+ test. offset ,
515+ )
516+ . unwrap ( )
517+ } ;
518+
519+ // Check alignment
520+ let actual_align = ( ( mem as usize ) + test. offset ) % test. mem_align ;
521+ assert_eq ! ( actual_align, 0 ) ;
522+
523+ // Make sure the memory is accessible
524+ let mem_slice =
525+ unsafe { std:: slice:: from_raw_parts_mut ( mem as * mut u8 , test. mem_size ) } ;
526+ for loc in mem_slice {
527+ * loc = 1 ;
528+ }
529+
530+ unsafe {
531+ munmap ( mem, test. mem_size ) . unwrap ( ) ;
532+ }
533+ }
534+ }
535+ }
0 commit comments