瀏覽代碼

Run rustfmt on all files

Tom Almeida 5 年之前
父節點
當前提交
92ca1d73ef
共有 22 個文件被更改,包括 541 次插入291 次删除
  1. 1 1
      benches/mpsc.rs
  2. 1 3
      benches/sbrk.rs
  3. 3 1
      benches/vec.rs
  4. 3 0
      rustfmt.toml
  5. 141 84
      src/allocator.rs
  6. 82 47
      src/block.rs
  7. 174 81
      src/bookkeeper.rs
  8. 23 14
      src/brk.rs
  9. 9 6
      src/cell.rs
  10. 1 1
      src/fail.rs
  11. 24 10
      src/lib.rs
  12. 18 10
      src/log.rs
  13. 1 1
      src/prelude.rs
  14. 1 1
      src/ptr.rs
  15. 6 5
      src/sync.rs
  16. 4 2
      src/tls.rs
  17. 15 10
      src/vec.rs
  18. 2 1
      tests/arc.rs
  19. 2 1
      tests/cross_thread_drop.rs
  20. 1 1
      tests/mpsc.rs
  21. 27 9
      tests/util/mod.rs
  22. 2 2
      tests/vec.rs

+ 1 - 1
benches/mpsc.rs

@@ -3,8 +3,8 @@
 extern crate ralloc;
 extern crate test;
 
-use std::thread;
 use std::sync::mpsc;
+use std::thread;
 
 #[bench]
 fn bench_mpsc(b: &mut test::Bencher) {

+ 1 - 3
benches/sbrk.rs

@@ -5,7 +5,5 @@ extern crate test;
 
 #[bench]
 fn bench_sbrk(b: &mut test::Bencher) {
-    b.iter(|| {
-        ralloc::sbrk(200).unwrap()
-    });
+    b.iter(|| ralloc::sbrk(200).unwrap());
 }

+ 3 - 1
benches/vec.rs

@@ -8,7 +8,9 @@ fn bench_vec(b: &mut test::Bencher) {
     b.iter(|| {
         let mut stuff = Vec::with_capacity(10);
 
-        for i in 0..10000 { stuff.push(i) }
+        for i in 0..10000 {
+            stuff.push(i)
+        }
 
         stuff.reserve(100000);
 

+ 3 - 0
rustfmt.toml

@@ -0,0 +1,3 @@
+wrap_comments = true
+max_width = 80
+error_on_line_overflow = true

+ 141 - 84
src/allocator.rs

@@ -6,22 +6,25 @@ use prelude::*;
 
 use core::{mem, ops};
 
+use bookkeeper::{self, Allocator, Bookkeeper};
 use {brk, sync};
-use bookkeeper::{self, Bookkeeper, Allocator};
 
 use shim::config;
 
 #[cfg(feature = "tls")]
 use tls;
 
-/// Alias for the wrapper type of the thread-local variable holding the local allocator.
+/// Alias for the wrapper type of the thread-local variable holding the local
+/// allocator.
 #[cfg(feature = "tls")]
-type ThreadLocalAllocator = MoveCell<Option<LazyInit<fn() -> LocalAllocator, LocalAllocator>>>;
+type ThreadLocalAllocator =
+    MoveCell<Option<LazyInit<fn() -> LocalAllocator, LocalAllocator>>>;
 
 /// The global default allocator.
 // TODO: Remove these filthy function pointers.
-static GLOBAL_ALLOCATOR: sync::Mutex<LazyInit<fn() -> GlobalAllocator, GlobalAllocator>> =
-    sync::Mutex::new(LazyInit::new(GlobalAllocator::init));
+static GLOBAL_ALLOCATOR: sync::Mutex<
+    LazyInit<fn() -> GlobalAllocator, GlobalAllocator>,
+> = sync::Mutex::new(LazyInit::new(GlobalAllocator::init));
 #[cfg(feature = "tls")]
 tls! {
     /// The thread-local allocator.
@@ -30,18 +33,19 @@ tls! {
 
 /// Temporarily get the allocator.
 ///
-/// This is simply to avoid repeating ourself, so we let this take care of the hairy stuff:
+/// This is simply to avoid repeating ourself, so we let this take care of the
+/// hairy stuff:
 ///
 /// 1. Initialize the allocator if needed.
-/// 2. If the allocator is not yet initialized, fallback to the global allocator.
-/// 3. Unlock/move temporarily out of reference.
+/// 2. If the allocator is not yet initialized, fallback to the global
+/// allocator. 3. Unlock/move temporarily out of reference.
 ///
-/// This is a macro due to the lack of generic closure, which makes it impossible to have one
-/// closure for both cases (global and local).
-// TODO: Instead of falling back to the global allocator, the thread dtor should be set such that
-// it run after the TLS keys that might be declared.
+/// This is a macro due to the lack of generic closure, which makes it
+/// impossible to have one closure for both cases (global and local).
+// TODO: Instead of falling back to the global allocator, the thread dtor
+// should be set such that it run after the TLS keys that might be declared.
 macro_rules! get_allocator {
-    (|$v:ident| $b:expr) => {{
+    (| $v:ident | $b:expr) => {{
         // Get the thread allocator, if TLS is enabled
         #[cfg(feature = "tls")]
         {
@@ -58,9 +62,12 @@ macro_rules! get_allocator {
 
                     res
                 } else {
-                    // The local allocator seems to have been deinitialized, for this reason we fallback to
-                    // the global allocator.
-                    log!(WARNING, "Accessing the allocator after deinitialization of the local allocator.");
+                    // The local allocator seems to have been deinitialized, for this reason we
+                    // fallback to the global allocator.
+                    log!(
+                        WARNING,
+                        "Accessing the allocator after deinitialization of the local allocator."
+                    );
 
                     // Lock the global allocator.
                     let mut guard = GLOBAL_ALLOCATOR.lock();
@@ -82,7 +89,7 @@ macro_rules! get_allocator {
             let $v = guard.get();
             $b
         }
-    }}
+    }};
 }
 
 /// Derives `Deref` and `DerefMut` to the `inner` field.
@@ -108,9 +115,9 @@ macro_rules! derive_deref {
 
 /// Global SBRK-based allocator.
 ///
-/// This will extend the data segment whenever new memory is needed. Since this includes leaving
-/// userspace, this shouldn't be used when other allocators are available (i.e. the bookkeeper is
-/// local).
+/// This will extend the data segment whenever new memory is needed. Since this
+/// includes leaving userspace, this shouldn't be used when other allocators
+/// are available (i.e. the bookkeeper is local).
 struct GlobalAllocator {
     // The inner bookkeeper.
     inner: Bookkeeper,
@@ -123,8 +130,10 @@ impl GlobalAllocator {
         log!(NOTE, "Initializing the global allocator.");
 
         // The initial acquired segment.
-        let (aligner, initial_segment, excessive) =
-            brk::lock().canonical_brk(4 * bookkeeper::EXTRA_ELEMENTS * mem::size_of::<Block>(), mem::align_of::<Block>());
+        let (aligner, initial_segment, excessive) = brk::lock().canonical_brk(
+            4 * bookkeeper::EXTRA_ELEMENTS * mem::size_of::<Block>(),
+            mem::align_of::<Block>(),
+        );
 
         // Initialize the new allocator.
         let mut res = GlobalAllocator {
@@ -149,11 +158,13 @@ impl Allocator for GlobalAllocator {
     #[inline]
     fn alloc_fresh(&mut self, size: usize, align: usize) -> Block {
         // Obtain what you need.
-        let (alignment_block, res, excessive) = brk::lock().canonical_brk(size, align);
+        let (alignment_block, res, excessive) =
+            brk::lock().canonical_brk(size, align);
 
-        // Add it to the list. This will not change the order, since the pointer is higher than all
-        // the previous blocks (BRK extends the data segment). Although, it is worth noting that
-        // the stack is higher than the program break.
+        // Add it to the list. This will not change the order, since the
+        // pointer is higher than all the previous blocks (BRK extends
+        // the data segment). Although, it is worth noting that the
+        // stack is higher than the program break.
         self.push(alignment_block);
         self.push(excessive);
 
@@ -165,7 +176,9 @@ impl Allocator for GlobalAllocator {
             // memtrim the fack outta 'em.
 
             // Pop the last block.
-            let block = self.pop().expect("The byte count on the global allocator is invalid.");
+            let block = self
+                .pop()
+                .expect("The byte count on the global allocator is invalid.");
 
             // Check if the memtrim is worth it.
             if block.size() >= config::OS_MEMTRIM_WORTHY {
@@ -179,9 +192,10 @@ impl Allocator for GlobalAllocator {
                     self.push(block);
                 }
 
-                // Note that this block is the only block next to the program break, due to the
-                // segments being as long as possible. For that reason, repeating to push and
-                // release would fail.
+            // Note that this block is the only block next to the program
+            // break, due to the segments being as long as
+            // possible. For that reason, repeating to push and
+            // release would fail.
             } else {
                 /// Logging...
                 log!(WARNING, "Memtrimming for the global allocator failed.");
@@ -196,7 +210,8 @@ impl Allocator for GlobalAllocator {
 
 /// A local allocator.
 ///
-/// This acquires memory from the upstream (global) allocator, which is protected by a `Mutex`.
+/// This acquires memory from the upstream (global) allocator, which is
+/// protected by a `Mutex`.
 #[cfg(feature = "tls")]
 pub struct LocalAllocator {
     // The inner bookkeeper.
@@ -211,36 +226,45 @@ impl LocalAllocator {
         /// The destructor of the local allocator.
         ///
         /// This will simply free everything to the global allocator.
-        extern fn dtor(alloc: &ThreadLocalAllocator) {
+        extern "C" fn dtor(alloc: &ThreadLocalAllocator) {
             /// Logging...
             log!(NOTE, "Deinitializing and freeing the local allocator.");
 
-            // This is important! The thread destructors guarantee no other, and thus one could use the
-            // allocator _after_ this destructor have been finished. In fact, this is a real problem,
-            // and happens when using `Arc` and terminating the main thread, for this reason we place
-            // `None` as a permanent marker indicating that the allocator is deinitialized. After such
-            // a state is in place, all allocation calls will be redirected to the global allocator,
-            // which is of course still usable at this moment.
-            let alloc = alloc.replace(None).expect("Thread-local allocator is already freed.");
+            // This is important! The thread destructors guarantee no other,
+            // and thus one could use the allocator _after_ this
+            // destructor have been finished. In fact, this is a
+            // real problem, and happens when using `Arc` and
+            // terminating the main thread, for this reason we place
+            // `None` as a permanent marker indicating that the allocator is
+            // deinitialized. After such a state is in place, all
+            // allocation calls will be redirected to the global
+            // allocator, which is of course still usable at this
+            // moment.
+            let alloc = alloc
+                .replace(None)
+                .expect("Thread-local allocator is already freed.");
 
             // Lock the global allocator.
             let mut global_alloc = GLOBAL_ALLOCATOR.lock();
             let global_alloc = global_alloc.get();
 
-            // TODO: we know this is sorted, so we could abuse that fact to faster insertion in the
-            // global allocator.
+            // TODO: we know this is sorted, so we could abuse that fact to
+            // faster insertion in the global allocator.
 
-            alloc.into_inner().inner.for_each(move |block| global_alloc.free(block));
+            alloc
+                .into_inner()
+                .inner
+                .for_each(move |block| global_alloc.free(block));
         }
 
         /// Logging...
         log!(NOTE, "Initializing the local allocator.");
 
         // The initial acquired segment.
-        let initial_segment = GLOBAL_ALLOCATOR
-            .lock()
-            .get()
-            .alloc(4 * bookkeeper::EXTRA_ELEMENTS * mem::size_of::<Block>(), mem::align_of::<Block>());
+        let initial_segment = GLOBAL_ALLOCATOR.lock().get().alloc(
+            4 * bookkeeper::EXTRA_ELEMENTS * mem::size_of::<Block>(),
+            mem::align_of::<Block>(),
+        );
 
         unsafe {
             // LAST AUDIT: 2016-08-21 (Ticki).
@@ -262,17 +286,19 @@ derive_deref!(LocalAllocator, Bookkeeper);
 impl Allocator for LocalAllocator {
     #[inline]
     fn alloc_fresh(&mut self, size: usize, align: usize) -> Block {
-        // Get the block from the global allocator. Please note that we cannot canonicalize `size`,
-        // due to freeing excessive blocks would change the order.
+        // Get the block from the global allocator. Please note that we cannot
+        // canonicalize `size`, due to freeing excessive blocks would change
+        // the order.
         GLOBAL_ALLOCATOR.lock().get().alloc(size, align)
     }
 
     #[inline]
     fn on_new_memory(&mut self) {
-        // The idea is to free memory to the global allocator to unify small stubs and avoid
-        // fragmentation and thread accumulation.
+        // The idea is to free memory to the global allocator to unify small
+        // stubs and avoid fragmentation and thread accumulation.
         if self.total_bytes() < config::FRAGMENTATION_SCALE * self.len()
-           || self.total_bytes() > config::LOCAL_MEMTRIM_LIMIT {
+            || self.total_bytes() > config::LOCAL_MEMTRIM_LIMIT
+        {
             // Log stuff.
             log!(NOTE, "Memtrimming the local allocator.");
 
@@ -285,7 +311,9 @@ impl Allocator for LocalAllocator {
                 global_alloc.free(block);
 
                 // Memtrim 'till we won't memtrim anymore.
-                if self.total_bytes() < config::LOCAL_MEMTRIM_STOP { break; }
+                if self.total_bytes() < config::LOCAL_MEMTRIM_STOP {
+                    break;
+                }
             }
         }
     }
@@ -298,20 +326,25 @@ impl Allocator for LocalAllocator {
 /// The OOM handler handles out-of-memory conditions.
 #[inline]
 pub fn alloc(size: usize, align: usize) -> *mut u8 {
-    log!(CALL, "Allocating buffer of size {} (align {}).", size, align);
+    log!(
+        CALL,
+        "Allocating buffer of size {} (align {}).",
+        size,
+        align
+    );
 
     get_allocator!(|alloc| Pointer::from(alloc.alloc(size, align)).get())
 }
 
 /// Free a buffer.
 ///
-/// Note that this do not have to be a buffer allocated through ralloc. The only requirement is
-/// that it is not used after the free.
+/// Note that this do not have to be a buffer allocated through ralloc. The
+/// only requirement is that it is not used after the free.
 ///
 /// # Important!
 ///
-/// You should only allocate buffers allocated through `ralloc`. Anything else is considered
-/// invalid.
+/// You should only allocate buffers allocated through `ralloc`. Anything else
+/// is considered invalid.
 ///
 /// # Errors
 ///
@@ -319,26 +352,28 @@ pub fn alloc(size: usize, align: usize) -> *mut u8 {
 ///
 /// # Safety
 ///
-/// Rust assume that the allocation symbols returns correct values. For this reason, freeing
-/// invalid pointers might introduce memory unsafety.
+/// Rust assume that the allocation symbols returns correct values. For this
+/// reason, freeing invalid pointers might introduce memory unsafety.
 ///
 /// Secondly, freeing an used buffer can introduce use-after-free.
 #[inline]
 pub unsafe fn free(ptr: *mut u8, size: usize) {
     log!(CALL, "Freeing buffer of size {}.", size);
 
-    get_allocator!(|alloc| alloc.free(Block::from_raw_parts(Pointer::new(ptr), size)))
+    get_allocator!(
+        |alloc| alloc.free(Block::from_raw_parts(Pointer::new(ptr), size))
+    )
 }
 
 /// Reallocate memory.
 ///
-/// Reallocate the buffer starting at `ptr` with size `old_size`, to a buffer starting at the
-/// returned pointer with size `size`.
+/// Reallocate the buffer starting at `ptr` with size `old_size`, to a buffer
+/// starting at the returned pointer with size `size`.
 ///
 /// # Important!
 ///
-/// You should only reallocate buffers allocated through `ralloc`. Anything else is considered
-/// invalid.
+/// You should only reallocate buffers allocated through `ralloc`. Anything
+/// else is considered invalid.
 ///
 /// # Errors
 ///
@@ -346,39 +381,61 @@ pub unsafe fn free(ptr: *mut u8, size: usize) {
 ///
 /// # Safety
 ///
-/// Due to being able to potentially memcpy an arbitrary buffer, as well as shrinking a buffer,
-/// this is marked unsafe.
+/// Due to being able to potentially memcpy an arbitrary buffer, as well as
+/// shrinking a buffer, this is marked unsafe.
 #[inline]
-pub unsafe fn realloc(ptr: *mut u8, old_size: usize, size: usize, align: usize) -> *mut u8 {
-    log!(CALL, "Reallocating buffer of size {} to new size {}.", old_size, size);
-
-    get_allocator!(|alloc| {
-        Pointer::from(alloc.realloc(
-            Block::from_raw_parts(Pointer::new(ptr), old_size),
-            size,
-            align
-        )).get()
-    })
+pub unsafe fn realloc(
+    ptr: *mut u8,
+    old_size: usize,
+    size: usize,
+    align: usize,
+) -> *mut u8 {
+    log!(
+        CALL,
+        "Reallocating buffer of size {} to new size {}.",
+        old_size,
+        size
+    );
+
+    get_allocator!(|alloc| Pointer::from(alloc.realloc(
+        Block::from_raw_parts(Pointer::new(ptr), old_size),
+        size,
+        align
+    )).get())
 }
 
 /// Try to reallocate the buffer _inplace_.
 ///
-/// In case of success, return the new buffer's size. On failure, return the old size.
+/// In case of success, return the new buffer's size. On failure, return the
+/// old size.
 ///
 /// This can be used to shrink (truncate) a buffer as well.
 ///
 /// # Safety
 ///
-/// Due to being able to shrink (and thus free) the buffer, this is marked unsafe.
+/// Due to being able to shrink (and thus free) the buffer, this is marked
+/// unsafe.
 #[inline]
-pub unsafe fn realloc_inplace(ptr: *mut u8, old_size: usize, size: usize) -> Result<(), ()> {
-    log!(CALL, "Inplace reallocating buffer of size {} to new size {}.", old_size, size);
+pub unsafe fn realloc_inplace(
+    ptr: *mut u8,
+    old_size: usize,
+    size: usize,
+) -> Result<(), ()> {
+    log!(
+        CALL,
+        "Inplace reallocating buffer of size {} to new size {}.",
+        old_size,
+        size
+    );
 
     get_allocator!(|alloc| {
-        if alloc.realloc_inplace(
-            Block::from_raw_parts(Pointer::new(ptr), old_size),
-            size
-        ).is_ok() {
+        if alloc
+            .realloc_inplace(
+                Block::from_raw_parts(Pointer::new(ptr), old_size),
+                size,
+            )
+            .is_ok()
+        {
             Ok(())
         } else {
             Err(())

+ 82 - 47
src/block.rs

@@ -1,30 +1,32 @@
 //! Memory blocks.
 //!
-//! Blocks are the main unit for the memory bookkeeping. A block is a simple construct with a
-//! `Pointer` pointer and a size. Occupied (non-free) blocks are represented by a zero-sized block.
+//! Blocks are the main unit for the memory bookkeeping. A block is a simple
+//! construct with a `Pointer` pointer and a size. Occupied (non-free) blocks
+//! are represented by a zero-sized block.
 
 // TODO: Check the allow(cast_possible_wrap)s again.
 
 use prelude::*;
 
-use core::{ptr, cmp, mem, fmt};
+use core::{cmp, fmt, mem, ptr};
 
 /// A contiguous memory block.
 ///
 /// This provides a number of guarantees,
 ///
-/// 1. The buffer is valid for the block's lifetime, but not necessarily initialized.
-/// 2. The Block "owns" the inner data.
-/// 3. There is no interior mutability. Mutation requires either mutable access or ownership over
-///    the block.
-/// 4. The buffer is not aliased. That is, it do not overlap with other blocks or is aliased in any
-///    way.
+/// 1. The buffer is valid for the block's lifetime, but not necessarily
+/// initialized. 2. The Block "owns" the inner data.
+/// 3. There is no interior mutability. Mutation requires either mutable access
+/// or ownership over    the block.
+/// 4. The buffer is not aliased. That is, it do not overlap with other blocks
+/// or is aliased in any    way.
 ///
-/// All this is enforced through the type system. These invariants can only be broken through
-/// unsafe code.
+/// All this is enforced through the type system. These invariants can only be
+/// broken through unsafe code.
 ///
-/// Accessing it through an immutable reference does not break these guarantees. That is, you are
-/// not able to read/mutate without acquiring a _mutable_ reference.
+/// Accessing it through an immutable reference does not break these
+/// guarantees. That is, you are not able to read/mutate without acquiring a
+/// _mutable_ reference.
 #[must_use]
 pub struct Block {
     /// The size of this block, in bytes.
@@ -67,8 +69,8 @@ impl Block {
             ptr: unsafe {
                 // LAST AUDIT: 2016-08-21 (Ticki).
 
-                // By the invariants of this type (the end is addressable), this conversion isn't
-                // overflowing.
+                // By the invariants of this type (the end is addressable),
+                // this conversion isn't overflowing.
                 self.ptr.clone().offset(self.size as isize)
             },
         }
@@ -76,23 +78,26 @@ impl Block {
 
     /// Merge this block with a block to the right.
     ///
-    /// This will simply extend the block, adding the size of the block, and then set the size to
-    /// zero. The return value is `Ok(())` on success, and `Err(())` on failure (e.g., the blocks
-    /// are not adjacent).
+    /// This will simply extend the block, adding the size of the block, and
+    /// then set the size to zero. The return value is `Ok(())` on success,
+    /// and `Err(())` on failure (e.g., the blocks are not adjacent).
     ///
-    /// If you merge with a zero sized block, it will succeed, even if they are not adjacent.
+    /// If you merge with a zero sized block, it will succeed, even if they are
+    /// not adjacent.
     #[inline]
     pub fn merge_right(&mut self, block: &mut Block) -> Result<(), ()> {
         if block.is_empty() {
             Ok(())
         } else if self.left_to(block) {
-            // Since the end of `block` is bounded by the address space, adding them cannot
-            // overflow.
+            // Since the end of `block` is bounded by the address space, adding
+            // them cannot overflow.
             self.size += block.pop().size;
             // We pop it to make sure it isn't aliased.
 
             Ok(())
-        } else { Err(()) }
+        } else {
+            Err(())
+        }
     }
 
     /// Is this block empty/free?
@@ -128,7 +133,11 @@ impl Block {
             // LAST AUDIT: 2016-08-21 (Ticki).
 
             // From the invariants of `Block`, this copy is well-defined.
-            ptr::copy_nonoverlapping(self.ptr.get(), block.ptr.get(), self.size);
+            ptr::copy_nonoverlapping(
+                self.ptr.get(),
+                block.ptr.get(),
+                self.size,
+            );
         }
     }
 
@@ -142,8 +151,8 @@ impl Block {
             unsafe {
                 // LAST AUDIT: 2016-08-21 (Ticki).
 
-                // Since the memory of the block is inaccessible (read-wise), zeroing it is fully
-                // safe.
+                // Since the memory of the block is inaccessible (read-wise),
+                // zeroing it is fully safe.
                 intrinsics::volatile_set_memory(self.ptr.get(), 0, self.size);
             }
         }
@@ -160,7 +169,8 @@ impl Block {
     /// Is this block placed left to the given other block?
     #[inline]
     pub fn left_to(&self, to: &Block) -> bool {
-        // This won't overflow due to the end being bounded by the address space.
+        // This won't overflow due to the end being bounded by the address
+        // space.
         self.size + self.ptr.get() as usize == to.ptr.get() as usize
     }
 
@@ -171,7 +181,12 @@ impl Block {
     /// Panics if `pos` is out of bound.
     #[inline]
     pub fn split(self, pos: usize) -> (Block, Block) {
-        assert!(pos <= self.size, "Split {} out of bound (size is {})!", pos, self.size);
+        assert!(
+            pos <= self.size,
+            "Split {} out of bound (size is {})!",
+            pos,
+            self.size
+        );
 
         (
             Block {
@@ -183,11 +198,12 @@ impl Block {
                 ptr: unsafe {
                     // LAST AUDIT: 2016-08-21 (Ticki).
 
-                    // This won't overflow due to the assertion above, ensuring that it is bounded
-                    // by the address space. See the `split_at_mut` source from libcore.
+                    // This won't overflow due to the assertion above, ensuring
+                    // that it is bounded by the address
+                    // space. See the `split_at_mut` source from libcore.
                     self.ptr.offset(pos as isize)
                 },
-            }
+            },
         )
     }
 
@@ -199,15 +215,15 @@ impl Block {
         // Logging.
         log!(INTERNAL, "Padding {:?} to align {}", self, align);
 
-        // TODO: This functions suffers from external fragmentation. Leaving bigger segments might
-        // increase performance.
+        // TODO: This functions suffers from external fragmentation. Leaving
+        // bigger segments might increase performance.
 
-        // Calculate the aligner, which defines the smallest size required as precursor to align
-        // the block to `align`.
+        // Calculate the aligner, which defines the smallest size required as
+        // precursor to align the block to `align`.
         let aligner = (align - self.ptr.get() as usize % align) % align;
         //                                                 ^^^^^^^^
-        // To avoid wasting space on the case where the block is already aligned, we calculate it
-        // modulo `align`.
+        // To avoid wasting space on the case where the block is already
+        // aligned, we calculate it modulo `align`.
 
         // Bound check.
         if aligner < self.size {
@@ -224,11 +240,12 @@ impl Block {
                     ptr: unsafe {
                         // LAST AUDIT: 2016-08-21 (Ticki).
 
-                        // The aligner is bounded by the size, which itself is bounded by the
-                        // address space. Therefore, this conversion cannot overflow.
+                        // The aligner is bounded by the size, which itself is
+                        // bounded by the address space.
+                        // Therefore, this conversion cannot overflow.
                         old.ptr.offset(aligner as isize)
                     },
-                }
+                },
             ))
         } else {
             // Logging.
@@ -240,8 +257,8 @@ impl Block {
 
     /// Mark this block free to the debugger.
     ///
-    /// The debugger might do things like memleak and use-after-free checks. This methods informs
-    /// the debugger that this block is freed.
+    /// The debugger might do things like memleak and use-after-free checks.
+    /// This methods informs the debugger that this block is freed.
     #[inline]
     pub fn mark_free(self) -> Block {
         #[cfg(feature = "debugger")]
@@ -307,7 +324,10 @@ mod test {
     fn test_array() {
         let arr = b"Lorem ipsum dolor sit amet";
         let block = unsafe {
-            Block::from_raw_parts(Pointer::new(arr.as_ptr() as *mut u8), arr.len())
+            Block::from_raw_parts(
+                Pointer::new(arr.as_ptr() as *mut u8),
+                arr.len(),
+            )
         };
 
         // Test split.
@@ -320,14 +340,20 @@ mod test {
         assert!(!rest.is_empty());
         assert!(lorem.align(2).unwrap().1.aligned_to(2));
         assert!(rest.align(15).unwrap().1.aligned_to(15));
-        assert_eq!(Pointer::from(lorem).get() as usize + 5, Pointer::from(rest).get() as usize);
+        assert_eq!(
+            Pointer::from(lorem).get() as usize + 5,
+            Pointer::from(rest).get() as usize
+        );
     }
 
     #[test]
     fn test_merge() {
         let arr = b"Lorem ipsum dolor sit amet";
         let block = unsafe {
-            Block::from_raw_parts(Pointer::new(arr.as_ptr() as *mut u8), arr.len())
+            Block::from_raw_parts(
+                Pointer::new(arr.as_ptr() as *mut u8),
+                arr.len(),
+            )
         };
 
         let (mut lorem, mut rest) = block.split(5);
@@ -343,7 +369,10 @@ mod test {
     fn test_oob() {
         let arr = b"lorem";
         let block = unsafe {
-            Block::from_raw_parts(Pointer::new(arr.as_ptr() as *mut u8), arr.len())
+            Block::from_raw_parts(
+                Pointer::new(arr.as_ptr() as *mut u8),
+                arr.len(),
+            )
         };
 
         // Test OOB.
@@ -368,12 +397,18 @@ mod test {
     fn test_empty_lr() {
         let arr = b"Lorem ipsum dolor sit amet";
         let block = unsafe {
-            Block::from_raw_parts(Pointer::new(arr.as_ptr() as *mut u8), arr.len())
+            Block::from_raw_parts(
+                Pointer::new(arr.as_ptr() as *mut u8),
+                arr.len(),
+            )
         };
 
         assert!(block.empty_left().is_empty());
         assert!(block.empty_right().is_empty());
-        assert_eq!(Pointer::from(block.empty_left()).get() as *const u8, arr.as_ptr());
+        assert_eq!(
+            Pointer::from(block.empty_left()).get() as *const u8,
+            arr.as_ptr()
+        );
         assert_eq!(block.empty_right(), block.split(arr.len()).1);
     }
 }

+ 174 - 81
src/bookkeeper.rs

@@ -3,7 +3,7 @@
 use prelude::*;
 
 use core::ops::Range;
-use core::{ptr, mem, ops};
+use core::{mem, ops, ptr};
 
 use shim::config;
 
@@ -73,7 +73,10 @@ impl Bookkeeper {
     /// Create a new bookkeeper with some initial vector.
     pub fn new(vec: Vec<Block>) -> Bookkeeper {
         // Make sure the assumptions are satisfied.
-        debug_assert!(vec.capacity() >= EXTRA_ELEMENTS, "Not enough initial capacity of the vector.");
+        debug_assert!(
+            vec.capacity() >= EXTRA_ELEMENTS,
+            "Not enough initial capacity of the vector."
+        );
         debug_assert!(vec.is_empty(), "Initial vector isn't empty.");
 
         // TODO: When added use expr field attributes.
@@ -113,7 +116,9 @@ impl Bookkeeper {
         let len = self.pool.len();
 
         // Move left.
-        ind - self.pool.iter_mut()
+        ind - self
+            .pool
+            .iter_mut()
             .rev()
             .skip(len - ind)
             .take_while(|x| x.is_empty())
@@ -136,7 +141,9 @@ impl Bookkeeper {
         let len = self.pool.len();
 
         // Move left.
-        left_ind -= self.pool.iter_mut()
+        left_ind -= self
+            .pool
+            .iter_mut()
             .rev()
             .skip(len - left_ind)
             .take_while(|x| x.is_empty())
@@ -147,7 +154,9 @@ impl Bookkeeper {
         };
 
         // Move right.
-        right_ind += self.pool.iter()
+        right_ind += self
+            .pool
+            .iter()
             .skip(right_ind)
             .take_while(|x| x.is_empty())
             .count();
@@ -214,9 +223,11 @@ impl Bookkeeper {
             let mut it = self.pool.iter().enumerate().rev();
 
             // Check that the capacity is large enough.
-            assert!(self.reserving || self.pool.len() + EXTRA_ELEMENTS <= self.pool.capacity(),
-                    "The capacity should be at least {} more than the length of the pool.",
-                    EXTRA_ELEMENTS);
+            assert!(
+                self.reserving || self.pool.len() + EXTRA_ELEMENTS <= self.pool.capacity(),
+                "The capacity should be at least {} more than the length of the pool.",
+                EXTRA_ELEMENTS
+            );
 
             if let Some((_, x)) = it.next() {
                 // Make sure there are no leading empty blocks.
@@ -229,26 +240,51 @@ impl Bookkeeper {
                     total_bytes += i.size();
 
                     // Check if sorted.
-                    assert!(next >= i, "The block pool is not sorted at index, {} ({:?} < {:?}).",
-                            n, next, i);
+                    assert!(
+                        next >= i,
+                        "The block pool is not sorted at index, {} ({:?} < {:?}).",
+                        n,
+                        next,
+                        i
+                    );
                     // Make sure no blocks are adjacent.
-                    assert!(!i.left_to(next) || i.is_empty(), "Adjacent blocks at index, {} ({:?} and \
-                            {:?})", n, i, next);
+                    assert!(
+                        !i.left_to(next) || i.is_empty(),
+                        "Adjacent blocks at index, {} ({:?} and \
+                         {:?})",
+                        n,
+                        i,
+                        next
+                    );
                     // Make sure an empty block has the same address as its right neighbor.
-                    assert!(!i.is_empty() || i == next, "Empty block not adjacent to right neighbor \
-                            at index {} ({:?} and {:?})", n, i, next);
+                    assert!(
+                        !i.is_empty() || i == next,
+                        "Empty block not adjacent to right neighbor \
+                         at index {} ({:?} and {:?})",
+                        n,
+                        i,
+                        next
+                    );
 
                     // Set the variable tracking the previous block.
                     next = i;
                 }
 
                 // Check for trailing empty blocks.
-                assert!(!self.pool.last().unwrap().is_empty(), "Trailing empty blocks.");
+                assert!(
+                    !self.pool.last().unwrap().is_empty(),
+                    "Trailing empty blocks."
+                );
             }
 
             // Make sure the sum is maintained properly.
-            assert!(total_bytes == self.total_bytes, "The sum is not equal to the 'total_bytes' \
-                    field: {} ≠ {}.", total_bytes, self.total_bytes);
+            assert!(
+                total_bytes == self.total_bytes,
+                "The sum is not equal to the 'total_bytes' \
+                 field: {} ≠ {}.",
+                total_bytes,
+                self.total_bytes
+            );
         }
     }
 }
@@ -331,25 +367,31 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
         // Logging.
         bk_log!(self, "Allocating {} bytes with alignment {}.", size, align);
 
-        if let Some((n, b)) = self.pool.iter_mut().enumerate().filter_map(|(n, i)| {
-            if i.size() >= size {
-                // Try to split at the aligner.
-                i.align(align).and_then(|(mut a, mut b)| {
-                    if b.size() >= size {
-                        // Override the old block.
-                        *i = a;
-                        Some((n, b))
-                    } else {
-                        // Put the split block back together and place it back in its spot.
-                        a.merge_right(&mut b).expect("Unable to merge block right.");
-                        *i = a;
-                        None
-                    }
-                })
-            } else {
-                None
-            }
-        }).next() {
+        if let Some((n, b)) = self
+            .pool
+            .iter_mut()
+            .enumerate()
+            .filter_map(|(n, i)| {
+                if i.size() >= size {
+                    // Try to split at the aligner.
+                    i.align(align).and_then(|(mut a, mut b)| {
+                        if b.size() >= size {
+                            // Override the old block.
+                            *i = a;
+                            Some((n, b))
+                        } else {
+                            // Put the split block back together and place it back in its spot.
+                            a.merge_right(&mut b).expect("Unable to merge block right.");
+                            *i = a;
+                            None
+                        }
+                    })
+                } else {
+                    None
+                }
+            })
+            .next()
+        {
             // Update the pool byte count.
             self.total_bytes -= b.size();
 
@@ -368,8 +410,11 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
             // Check consistency.
             self.check();
             debug_assert!(res.aligned_to(align), "Alignment failed.");
-            debug_assert!(res.size() == size, "Requested space does not match with the returned \
-                          block.");
+            debug_assert!(
+                res.size() == size,
+                "Requested space does not match with the returned \
+                 block."
+            );
 
             res
         } else {
@@ -489,11 +534,14 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
                 // Check consistency.
                 self.check();
                 debug_assert!(res.aligned_to(align), "Alignment failed.");
-                debug_assert!(res.size() >= new_size, "Requested space does not match with the \
-                              returned block.");
+                debug_assert!(
+                    res.size() >= new_size,
+                    "Requested space does not match with the \
+                     returned block."
+                );
 
                 res
-            },
+            }
         }
     }
 
@@ -519,8 +567,11 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
         let res = self.realloc_inplace_bound(bound, block, new_size);
 
         // Check consistency.
-        debug_assert!(res.as_ref().ok().map_or(true, |x| x.size() == new_size), "Requested space \
-                      does not match with the returned block.");
+        debug_assert!(
+            res.as_ref().ok().map_or(true, |x| x.size() == new_size),
+            "Requested space \
+             does not match with the returned block."
+        );
 
         res
     }
@@ -528,13 +579,21 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
     /// Reallocate a block on a know index bound inplace.
     ///
     /// See [`realloc_inplace`](#method.realloc_inplace.html) for more information.
-    fn realloc_inplace_bound(&mut self, ind: Range<usize>, mut block: Block, new_size: usize) -> Result<Block, Block> {
+    fn realloc_inplace_bound(
+        &mut self,
+        ind: Range<usize>,
+        mut block: Block,
+        new_size: usize,
+    ) -> Result<Block, Block> {
         // Logging.
         bk_log!(self;ind, "Try inplace reallocating {:?} to size {}.", block, new_size);
 
         /// Assertions...
-        debug_assert!(self.find(&block) == ind.start, "Block is not inserted at the appropriate \
-                      index.");
+        debug_assert!(
+            self.find(&block) == ind.start,
+            "Block is not inserted at the appropriate \
+             index."
+        );
 
         if new_size <= block.size() {
             // Shrink the block.
@@ -553,7 +612,7 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
 
             return Ok(block);
 
-            // We check if `ind` is the end of the array.
+        // We check if `ind` is the end of the array.
         } else {
             let mut mergable = false;
             if let Some(entry) = self.pool.get_mut(ind.end) {
@@ -567,7 +626,8 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
                 bk_log!(self;ind, "Merging {:?} to the right.", block);
 
                 // We'll merge it with the block at the end of the range.
-                block.merge_right(&mut self.remove_at(ind.end))
+                block
+                    .merge_right(&mut self.remove_at(ind.end))
                     .expect("Unable to merge block right, to the end of the range.");
                 // Merge succeeded.
 
@@ -603,7 +663,9 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
         bk_log!(self;ind, "Freeing {:?}.", block);
 
         // Short circuit in case of empty block.
-        if block.is_empty() { return; }
+        if block.is_empty() {
+            return;
+        }
 
         // When compiled with `security`, we zero this block.
         block.sec_zero();
@@ -614,13 +676,17 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
         }
 
         // Assertions...
-        debug_assert!(self.find(&block) == ind.start, "Block is not inserted at the appropriate \
-                      index.");
+        debug_assert!(
+            self.find(&block) == ind.start,
+            "Block is not inserted at the appropriate \
+             index."
+        );
 
         // Try to merge it with the block to the right.
         if ind.end < self.pool.len() && block.left_to(&self.pool[ind.end]) {
             // Merge the block with the rightmost block in the range.
-            block.merge_right(&mut self.remove_at(ind.end))
+            block
+                .merge_right(&mut self.remove_at(ind.end))
                 .expect("Unable to merge block right to the block at the end of the range");
 
             // The merging succeeded. We proceed to try to close in the possible gap.
@@ -652,7 +718,12 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
     /// The returned pointer is guaranteed to be aligned to `align`.
     fn alloc_external(&mut self, size: usize, align: usize) -> Block {
         // Logging.
-        bk_log!(self, "Fresh allocation of size {} with alignment {}.", size, align);
+        bk_log!(
+            self,
+            "Fresh allocation of size {} with alignment {}.",
+            size,
+            align
+        );
 
         // Break it to me!
         let res = self.alloc_fresh(size, align);
@@ -681,8 +752,11 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
             self.total_bytes += block.size();
 
             // Some assertions...
-            debug_assert!(self.pool.is_empty() || &block > self.pool.last().unwrap(), "Pushing will \
-                          make the list unsorted.");
+            debug_assert!(
+                self.pool.is_empty() || &block > self.pool.last().unwrap(),
+                "Pushing will \
+                 make the list unsorted."
+            );
 
             // We will try to simply merge it with the last block.
             if let Some(x) = self.pool.last_mut() {
@@ -707,7 +781,6 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
                 }
             }
 
-
             // Merging failed. Note that trailing empty blocks are not allowed, hence the last block is
             // the only non-empty candidate which may be adjacent to `block`.
 
@@ -742,7 +815,10 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
         // Logging.
         bk_log!(self;min_cap, "Reserving {}.", min_cap);
 
-        if !self.reserving && (self.pool.capacity() < self.pool.len() + EXTRA_ELEMENTS || self.pool.capacity() < min_cap + EXTRA_ELEMENTS) {
+        if !self.reserving
+            && (self.pool.capacity() < self.pool.len() + EXTRA_ELEMENTS
+                || self.pool.capacity() < min_cap + EXTRA_ELEMENTS)
+        {
             // Reserve a little extra for performance reasons.
             // TODO: This should be moved to some new method.
             let new_cap = min_cap + EXTRA_ELEMENTS + config::extra_fresh(min_cap);
@@ -754,7 +830,8 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
             self.reserving = true;
 
             // Break it to me!
-            let new_buf = self.alloc_external(new_cap * mem::size_of::<Block>(), mem::align_of::<Block>());
+            let new_buf =
+                self.alloc_external(new_cap * mem::size_of::<Block>(), mem::align_of::<Block>());
 
             // Go back to the original state.
             self.reserving = false;
@@ -841,9 +918,16 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
         assert!(self.pool.len() >= ind, "Insertion out of bounds.");
 
         // Some assertions...
-        debug_assert!(self.pool.len() <= ind || block <= self.pool[ind], "Inserting at {} will make \
-                      the list unsorted.", ind);
-        debug_assert!(self.find(&block) == ind, "Block is not inserted at the appropriate index.");
+        debug_assert!(
+            self.pool.len() <= ind || block <= self.pool[ind],
+            "Inserting at {} will make \
+             the list unsorted.",
+            ind
+        );
+        debug_assert!(
+            self.find(&block) == ind,
+            "Block is not inserted at the appropriate index."
+        );
         debug_assert!(!block.is_empty(), "Inserting an empty block.");
 
         // Trigger the new memory event handler.
@@ -871,29 +955,31 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
             // LAST AUDIT: 2016-08-21 (Ticki).
 
             // Memmove the elements to make a gap to the new block.
-            ptr::copy(self.pool.get_unchecked(ind) as *const Block,
-                      self.pool.get_unchecked_mut(ind + 1) as *mut Block,
-                      // The gap defaults to the end of the pool.
-                      gap.unwrap_or_else(|| {
-                          // We will only extend the length if we were unable to fit it into the current length.
+            ptr::copy(
+                self.pool.get_unchecked(ind) as *const Block,
+                self.pool.get_unchecked_mut(ind + 1) as *mut Block,
+                // The gap defaults to the end of the pool.
+                gap.unwrap_or_else(|| {
+                    // We will only extend the length if we were unable to fit it into the current length.
 
-                          // Loooooooging...
-                          bk_log!(self;ind, "Block pool not long enough for shift. Extending.");
+                    // Loooooooging...
+                    bk_log!(self;ind, "Block pool not long enough for shift. Extending.");
 
-                          // Reserve space. This does not break order, due to the assumption that
-                          // `reserve` never breaks order.
-                          old_buf = unborrow!(self.reserve(self.pool.len() + 1));
+                    // Reserve space. This does not break order, due to the assumption that
+                    // `reserve` never breaks order.
+                    old_buf = unborrow!(self.reserve(self.pool.len() + 1));
 
-                          // We will move a block into reserved memory but outside of the vec's bounds. For
-                          // that reason, we push an uninitialized element to extend the length, which will
-                          // be assigned in the memcpy.
-                          let res = self.pool.push(mem::uninitialized());
+                    // We will move a block into reserved memory but outside of the vec's bounds. For
+                    // that reason, we push an uninitialized element to extend the length, which will
+                    // be assigned in the memcpy.
+                    let res = self.pool.push(mem::uninitialized());
 
-                          // Just some assertions...
-                          debug_assert!(res.is_ok(), "Push failed (buffer full).");
+                    // Just some assertions...
+                    debug_assert!(res.is_ok(), "Push failed (buffer full).");
 
-                          self.pool.len() - 1
-                      }) - ind);
+                    self.pool.len() - 1
+                }) - ind,
+            );
 
             // Update the pool byte count.
             self.total_bytes += block.size();
@@ -919,7 +1005,8 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
         let res = if ind + 1 == self.pool.len() {
             let block = self.pool[ind].pop();
             // Make sure there are no trailing empty blocks.
-            let new_len = self.pool.len() - self.pool.iter().rev().take_while(|x| x.is_empty()).count();
+            let new_len =
+                self.pool.len() - self.pool.iter().rev().take_while(|x| x.is_empty()).count();
 
             // Truncate the vector.
             self.pool.truncate(new_len);
@@ -935,7 +1022,13 @@ pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
 
             // Iterate over the pool from `ind` and down and set it to the  empty of our block.
             let skip = self.pool.len() - ind;
-            for place in self.pool.iter_mut().rev().skip(skip).take_while(|x| x.is_empty()) {
+            for place in self
+                .pool
+                .iter_mut()
+                .rev()
+                .skip(skip)
+                .take_while(|x| x.is_empty())
+            {
                 // Empty the blocks.
                 *place = empty2.empty_left();
             }

+ 23 - 14
src/brk.rs

@@ -4,19 +4,17 @@
 
 use prelude::*;
 
-use core::ptr;
 use core::convert::TryInto;
+use core::ptr;
 
-use shim::{syscalls, config};
+use shim::{config, syscalls};
 
-use {sync, fail};
+use {fail, sync};
 
 /// The BRK mutex.
 ///
 /// This is used for avoiding data races in multiple allocator.
-static BRK_MUTEX: Mutex<BrkState> = Mutex::new(BrkState {
-    current_brk: None,
-});
+static BRK_MUTEX: Mutex<BrkState> = Mutex::new(BrkState { current_brk: None });
 
 /// A cache of the BRK state.
 ///
@@ -108,9 +106,12 @@ impl BrkLock {
         if let Some(ref cur) = self.state.current_brk {
             let res = cur.clone();
             // Make sure that the break is set properly (i.e. there is no libc interference).
-            debug_assert!(res == current_brk(), "The cached program break is out of sync with the \
-                          actual program break. Are you interfering with BRK? If so, prefer the \
-                          provided 'sbrk' instead, then.");
+            debug_assert!(
+                res == current_brk(),
+                "The cached program break is out of sync with the \
+                 actual program break. Are you interfering with BRK? If so, prefer the \
+                 provided 'sbrk' instead, then."
+            );
 
             return res;
         }
@@ -146,17 +147,22 @@ impl BrkLock {
             Block::from_raw_parts(
                 // Important! The conversion is failable to avoid arithmetic overflow-based
                 // attacks.
-                self.sbrk(brk_size.try_into().unwrap()).unwrap_or_else(|()| fail::oom()),
+                self.sbrk(brk_size.try_into().unwrap())
+                    .unwrap_or_else(|()| fail::oom()),
                 brk_size,
             )
-        }.align(align).unwrap();
+        }.align(align)
+            .unwrap();
 
         // Split the block to leave the excessive space.
         let (res, excessive) = rest.split(size);
 
         // Make some assertions.
         debug_assert!(res.aligned_to(align), "Alignment failed.");
-        debug_assert!(res.size() + alignment_block.size() + excessive.size() == brk_size, "BRK memory leak.");
+        debug_assert!(
+            res.size() + alignment_block.size() + excessive.size() == brk_size,
+            "BRK memory leak."
+        );
 
         (alignment_block, res, excessive)
     }
@@ -180,8 +186,11 @@ pub fn lock() -> BrkLock {
 /// # Failure
 ///
 /// On failure the maximum pointer (`!0 as *mut u8`) is returned.
-pub unsafe extern fn sbrk(size: isize) -> *mut u8 {
-    lock().sbrk(size).unwrap_or_else(|()| Pointer::new(!0 as *mut u8)).get()
+pub unsafe extern "C" fn sbrk(size: isize) -> *mut u8 {
+    lock()
+        .sbrk(size)
+        .unwrap_or_else(|()| Pointer::new(!0 as *mut u8))
+        .get()
 }
 
 /// Get the current program break.

+ 9 - 6
src/cell.rs

@@ -25,13 +25,16 @@ impl<T> MoveCell<T> {
     /// Replace the inner data and return the old.
     #[inline]
     pub fn replace(&self, new: T) -> T {
-        mem::replace(unsafe {
-            // LAST AUDIT: 2016-08-21 (Ticki).
+        mem::replace(
+            unsafe {
+                // LAST AUDIT: 2016-08-21 (Ticki).
 
-            // This is safe due to never aliasing the value, but simply transfering ownership to
-            // the caller.
-            &mut *self.inner.get()
-        }, new)
+                // This is safe due to never aliasing the value, but simply transfering ownership to
+                // the caller.
+                &mut *self.inner.get()
+            },
+            new,
+        )
     }
 }
 

+ 1 - 1
src/fail.rs

@@ -2,8 +2,8 @@
 
 use prelude::*;
 
-use core::sync::atomic::{self, AtomicPtr};
 use core::mem;
+use core::sync::atomic::{self, AtomicPtr};
 
 use shim::config;
 

+ 24 - 10
src/lib.rs

@@ -11,13 +11,12 @@
 
 #![cfg_attr(feature = "clippy", feature(plugin))]
 #![cfg_attr(feature = "clippy", plugin(clippy))]
-
 #![no_std]
-
-#![feature(allocator_api, const_fn, core_intrinsics, stmt_expr_attributes,
-           optin_builtin_traits, type_ascription, thread_local, linkage,
-           try_from, const_unsafe_cell_new, const_atomic_bool_new,
-           const_nonzero_new, const_atomic_ptr_new)]
+#![feature(
+    allocator_api, const_fn, core_intrinsics, stmt_expr_attributes, optin_builtin_traits,
+    type_ascription, thread_local, linkage, try_from, const_unsafe_cell_new, const_atomic_bool_new,
+    const_nonzero_new, const_atomic_ptr_new
+)]
 #![warn(missing_docs)]
 
 extern crate ralloc_shim as shim;
@@ -44,8 +43,8 @@ mod ptr;
 mod sync;
 mod vec;
 
-use core::alloc::{Alloc, AllocErr, Layout, CannotReallocInPlace};
 use core::alloc::GlobalAlloc;
+use core::alloc::{Alloc, AllocErr, CannotReallocInPlace, Layout};
 use core::ptr::NonNull;
 
 pub use allocator::{alloc, free, realloc, realloc_inplace};
@@ -71,7 +70,12 @@ unsafe impl<'a> Alloc for &'a Allocator {
         allocator::free(ptr.as_ptr(), layout.size());
     }
 
-    unsafe fn realloc(&mut self, ptr: NonNull<u8>, layout: Layout, new_size: usize) -> Result<NonNull<u8>, AllocErr> {
+    unsafe fn realloc(
+        &mut self,
+        ptr: NonNull<u8>,
+        layout: Layout,
+        new_size: usize,
+    ) -> Result<NonNull<u8>, AllocErr> {
         let ptr = allocator::realloc(ptr.as_ptr(), layout.size(), new_size, layout.align());
         if ptr.is_null() {
             Err(AllocErr)
@@ -80,7 +84,12 @@ unsafe impl<'a> Alloc for &'a Allocator {
         }
     }
 
-    unsafe fn grow_in_place(&mut self, ptr: NonNull<u8>, layout: Layout, new_size: usize) -> Result<(), CannotReallocInPlace> {
+    unsafe fn grow_in_place(
+        &mut self,
+        ptr: NonNull<u8>,
+        layout: Layout,
+        new_size: usize,
+    ) -> Result<(), CannotReallocInPlace> {
         if allocator::realloc_inplace(ptr.as_ptr(), layout.size(), new_size).is_ok() {
             Ok(())
         } else {
@@ -88,7 +97,12 @@ unsafe impl<'a> Alloc for &'a Allocator {
         }
     }
 
-    unsafe fn shrink_in_place(&mut self, ptr: NonNull<u8>, layout: Layout, new_size: usize) -> Result<(), CannotReallocInPlace> {
+    unsafe fn shrink_in_place(
+        &mut self,
+        ptr: NonNull<u8>,
+        layout: Layout,
+        new_size: usize,
+    ) -> Result<(), CannotReallocInPlace> {
         if allocator::realloc_inplace(ptr.as_ptr(), layout.size(), new_size).is_ok() {
             Ok(())
         } else {

+ 18 - 10
src/log.rs

@@ -125,13 +125,13 @@ macro_rules! debug_assert {
 #[cfg(feature = "write")]
 #[macro_export]
 macro_rules! assert_eq {
-    ($left:expr, $right:expr) => ({
+    ($left:expr, $right:expr) => {{
         // We evaluate _once_.
         let left = &$left;
         let right = &$right;
 
         assert!(left == right, "(left: '{:?}', right: '{:?}')", left, right)
-    })
+    }};
 }
 
 /// Top-secret module.
@@ -139,8 +139,8 @@ macro_rules! assert_eq {
 pub mod internal {
     use prelude::*;
 
-    use core::fmt;
     use core::cell::Cell;
+    use core::fmt;
     use core::ops::Range;
 
     use shim::config;
@@ -179,7 +179,11 @@ pub mod internal {
 
     impl fmt::Write for LogWriter {
         fn write_str(&mut self, s: &str) -> fmt::Result {
-            if config::log(s) == !0 { Err(fmt::Error) } else { Ok(()) }
+            if config::log(s) == !0 {
+                Err(fmt::Error)
+            } else {
+                Ok(())
+            }
         }
     }
 
@@ -254,9 +258,13 @@ pub mod internal {
     }
 
     impl Cursor for () {
-        fn at(&self, _: &mut fmt::Formatter, _: usize) -> fmt::Result { Ok(()) }
+        fn at(&self, _: &mut fmt::Formatter, _: usize) -> fmt::Result {
+            Ok(())
+        }
 
-        fn after(&self, _: &mut fmt::Formatter) -> fmt::Result { Ok(()) }
+        fn after(&self, _: &mut fmt::Formatter) -> fmt::Result {
+            Ok(())
+        }
     }
 
     impl IntoCursor for () {
@@ -286,16 +294,16 @@ pub mod internal {
             Ok(())
         }
 
-        fn after(&self, _: &mut fmt::Formatter) -> fmt::Result { Ok(()) }
+        fn after(&self, _: &mut fmt::Formatter) -> fmt::Result {
+            Ok(())
+        }
     }
 
     impl IntoCursor for Range<usize> {
         type Cursor = RangeCursor;
 
         fn into_cursor(self) -> RangeCursor {
-            RangeCursor {
-                range: self,
-            }
+            RangeCursor { range: self }
         }
     }
 

+ 1 - 1
src/prelude.rs

@@ -5,6 +5,6 @@
 pub use block::Block;
 pub use cell::MoveCell;
 pub use lazy_init::LazyInit;
+pub use ptr::Pointer;
 pub use sync::Mutex;
 pub use vec::Vec;
-pub use ptr::Pointer;

+ 1 - 1
src/ptr.rs

@@ -1,7 +1,7 @@
 //! Pointer wrappers.
 
-use core::ptr::NonNull;
 use core::marker;
+use core::ptr::NonNull;
 
 /// A pointer wrapper type.
 ///

+ 6 - 5
src/sync.rs

@@ -1,8 +1,8 @@
 //! Synchronization primitives.
 
 use core::cell::UnsafeCell;
-use core::sync::atomic::{self, AtomicBool};
 use core::ops;
+use core::sync::atomic::{self, AtomicBool};
 
 use shim;
 
@@ -37,7 +37,10 @@ impl<T> Mutex<T> {
     pub fn lock(&self) -> MutexGuard<T> {
         // Lock the mutex.
         #[cfg(not(feature = "unsafe_no_mutex_lock"))]
-        while self.locked.compare_and_swap(false, true, atomic::Ordering::SeqCst) {
+        while self
+            .locked
+            .compare_and_swap(false, true, atomic::Ordering::SeqCst)
+        {
             // ,___,
             // {O,o}
             // |)``)
@@ -45,9 +48,7 @@ impl<T> Mutex<T> {
             shim::syscalls::sched_yield();
         }
 
-        MutexGuard {
-            mutex: self,
-        }
+        MutexGuard { mutex: self }
     }
 }
 

+ 4 - 2
src/tls.rs

@@ -30,7 +30,9 @@ impl<T: 'static> Key<T> {
     /// another thread.
     #[inline]
     pub fn with<F, R>(&self, f: F) -> R
-        where F: FnOnce(&T) -> R {
+    where
+        F: FnOnce(&T) -> R,
+    {
         // Logging.
         log!(INTERNAL, "Accessing TLS variable.");
 
@@ -42,7 +44,7 @@ impl<T: 'static> Key<T> {
     /// Note that this has to be registered for every thread, it is needed for.
     // TODO: Make this automatic on `Drop`.
     #[inline]
-    pub fn register_thread_destructor(&self, dtor: extern fn(&T)) {
+    pub fn register_thread_destructor(&self, dtor: extern "C" fn(&T)) {
         // Logging.
         log!(INTERNAL, "Registering thread destructor.");
 

+ 15 - 10
src/vec.rs

@@ -2,7 +2,7 @@
 
 use prelude::*;
 
-use core::{slice, ops, mem, ptr};
+use core::{mem, ops, ptr, slice};
 
 use leak::Leak;
 
@@ -54,7 +54,10 @@ impl<T: Leak> Vec<T> {
         let new_cap = block.size() / mem::size_of::<T>();
 
         // Make some assertions.
-        assert!(self.len <= new_cap, "Block not large enough to cover the vector.");
+        assert!(
+            self.len <= new_cap,
+            "Block not large enough to cover the vector."
+        );
         assert!(block.aligned_to(mem::align_of::<T>()), "Block not aligned.");
 
         let old = mem::replace(self, Vec::default());
@@ -140,9 +143,7 @@ impl<T: Leak> Vec<T> {
 
     /// Yield an iterator popping from the vector.
     pub fn pop_iter(&mut self) -> PopIter<T> {
-        PopIter {
-            vec: self,
-        }
+        PopIter { vec: self }
     }
 }
 
@@ -219,7 +220,7 @@ mod test {
         let mut vec = unsafe {
             Vec::from_raw_parts(
                 Block::from_raw_parts(Pointer::new(&mut buffer[0] as *mut u8), 32),
-                16
+                16,
             )
         };
 
@@ -232,8 +233,11 @@ mod test {
         assert_eq!(&*vec, b".aaaaaaaaaaaaaaabc");
 
         unsafe {
-            assert_eq!(vec.refill(
-                Block::from_raw_parts(Pointer::new(&mut buffer[0] as *mut u8), 32)).size(),
+            assert_eq!(
+                vec.refill(Block::from_raw_parts(
+                    Pointer::new(&mut buffer[0] as *mut u8),
+                    32
+                )).size(),
                 32
             );
         }
@@ -246,13 +250,14 @@ mod test {
         assert_eq!(vec.pop().unwrap(), b'_');
         vec.push(b'@').unwrap();
 
-
         vec.push(b'!').unwrap_err();
 
         assert_eq!(&*vec, b".aaaaaaaaaaaaaaabc_____________@");
         assert_eq!(vec.capacity(), 32);
 
-        for _ in 0..32 { vec.pop().unwrap(); }
+        for _ in 0..32 {
+            vec.pop().unwrap();
+        }
 
         assert!(vec.pop().is_none());
         assert!(vec.pop().is_none());

+ 2 - 1
tests/arc.rs

@@ -1,4 +1,5 @@
-//! This test is a more subtle one. It is one which can hit thread destructors unexpectedly.
+//! This test is a more subtle one. It is one which can hit thread destructors
+//! unexpectedly.
 
 extern crate ralloc;
 

+ 2 - 1
tests/cross_thread_drop.rs

@@ -30,7 +30,8 @@ fn cross_thread_drop() {
 fn cross_thread_drop_2() {
     util::multiply(|| {
         for _ in 0..10 {
-            let bx = thread::spawn(|| Box::new(0x11FE15C001u64)).join().unwrap();
+            let bx =
+                thread::spawn(|| Box::new(0x11FE15C001u64)).join().unwrap();
 
             thread::spawn(move || {
                 util::acid(|| {

+ 1 - 1
tests/mpsc.rs

@@ -2,8 +2,8 @@ extern crate ralloc;
 
 mod util;
 
-use std::thread;
 use std::sync::mpsc;
+use std::thread;
 
 #[test]
 fn mpsc_queue() {

+ 27 - 9
tests/util/mod.rs

@@ -1,6 +1,6 @@
 //! Test automation.
 
-use std::{thread, mem};
+use std::{mem, thread};
 
 /// Magic trait for boxed `FnOnce`s.
 ///
@@ -11,11 +11,15 @@ trait FnBox {
 }
 
 impl<F: FnOnce()> FnBox for F {
-    fn call_box(self: Box<Self>) { (*self)() }
+    fn call_box(self: Box<Self>) {
+        (*self)()
+    }
 }
 
 /// Like `std::thread::spawn`, but without the closure bounds.
-unsafe fn spawn_unsafe<'a, F: FnOnce() + Send + 'a>(func: F) -> thread::JoinHandle<()> {
+unsafe fn spawn_unsafe<'a, F: FnOnce() + Send + 'a>(
+    func: F,
+) -> thread::JoinHandle<()> {
     let closure: Box<FnBox + 'a> = Box::new(func);
     let closure: Box<FnBox + Send> = mem::transmute(closure);
     thread::spawn(move || closure.call_box())
@@ -46,12 +50,14 @@ pub fn multiply<F: Fn() + Sync + Send + 'static>(func: F) {
 
 /// Wrap a block in acid tests.
 ///
-/// This performs a number of temporary allocations to try to detect inconsistency.
+/// This performs a number of temporary allocations to try to detect
+/// inconsistency.
 ///
-/// The basic idea is that if the allocator is broken, it might allocate the same memory twice, or
-/// corrupt when allocating. Thus, we allocate some temporary segment and override it. This way we
-/// might be able to detect memory corruption through asserting memory consistency after the
-/// closure is completed.
+/// The basic idea is that if the allocator is broken, it might allocate the
+/// same memory twice, or corrupt when allocating. Thus, we allocate some
+/// temporary segment and override it. This way we might be able to detect
+/// memory corruption through asserting memory consistency after the closure is
+/// completed.
 #[allow(dead_code)]
 pub fn acid<F: FnOnce()>(func: F) {
     let mut vec = vec!["something", "yep", "yup"];
@@ -70,7 +76,19 @@ pub fn acid<F: FnOnce()>(func: F) {
     vec.push("heyaya");
     *bx = 55;
 
-    assert_eq!(vec, ["something", "yep", "yup", "lol", "lulz", "we", "are", "heyaya"]);
+    assert_eq!(
+        vec,
+        [
+            "something",
+            "yep",
+            "yup",
+            "lol",
+            "lulz",
+            "we",
+            "are",
+            "heyaya"
+        ]
+    );
     assert_eq!(*bx, 55);
     assert_eq!(*abc, "abc");
 }

+ 2 - 2
tests/vec.rs

@@ -8,7 +8,8 @@ fn simple_vec() {
         let mut vec = Vec::new();
 
         for i in 0..0xFFFF {
-            // We're going to annoy the allocator by allocating a small chunk, after which we push.
+            // We're going to annoy the allocator by allocating a small chunk,
+            // after which we push.
             let _bx = Box::new(4);
             vec.push(i);
         }
@@ -30,5 +31,4 @@ fn simple_vec() {
             assert_eq!(vec[i], 0);
         }
     });
-
 }