Initialize a crate for memory related tools (#89)

Initialize a crate for memory related tools. Mainly heapsize measurement through malloc_size_of traits.
This commit is contained in:
cheme 2019-01-16 10:43:43 +01:00 committed by GitHub
parent 5b56003075
commit e388dcfaa6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 1838 additions and 0 deletions

View File

@ -24,3 +24,4 @@ script:
- cd uint/ && cargo test --features=std,quickcheck --release && cd ..
- cd hashdb/ && cargo test --no-default-features && cd ..
- cd plain_hasher/ && cargo test --no-default-features && cd ..
- cd parity-util-mem/ && cargo test --features=estimate-heapsize && cd ..

View File

@ -18,5 +18,6 @@ members = [
"trie-standardmap",
"triehash",
"uint",
"parity-util-mem",
"primitive-types",
]

View File

@ -0,0 +1,38 @@
[package]
name = "parity-util-mem"
version = "0.1.0"
authors = ["Parity Technologies <admin@parity.io>"]
repository = "https://github.com/paritytech/parity-common"
description = "Collection of memory related utilities"
license = "GPL-3.0"
[dependencies]
clear_on_drop = "0.2"
cfg-if = "0.1.6"
malloc_size_of_derive = "0.1.0"
dlmalloc = { version = "0.1", features = ["global"], optional = true }
wee_alloc = { version = "0.4", optional = true }
jemallocator = { version = "0.1", optional = true }
elastic-array = { version = "*", optional = true }
ethereum-types = { version = "*", optional = true }
parking_lot = { version = "*", optional = true }
[target."cfg(windows)".dependencies.winapi]
version = "0.3.4"
[features]
default = ["std", "ethereum-impls"]
std = []
# when activated mem is removed through volatile primitive instead of clear_on_drop crate
volatile-erase = []
# use dlmalloc as global allocator
dlmalloc-global = ["dlmalloc", "estimate-heapsize"]
# use wee_alloc as global allocator
weealloc-global = ["wee_alloc", "estimate-heapsize"]
# use jemalloc as global allocator
jemalloc-global = ["jemallocator"]
# implement additional types
ethereum-impls = ["ethereum-types", "elastic-array", "parking_lot"]
# Full estimate: no call to allocator
estimate-heapsize = []

16
parity-util-mem/README.md Normal file
View File

@ -0,0 +1,16 @@
# parity-util-mem
Collection of memory related utilities.
## Features
- volatile-erase : Not set by default, `Memzero` struct will be erasing memory through a simple [`write_volatile`](https://doc.rust-lang.org/std/ptr/fn.write_volatile.html) call.
- estimate-heapsize : Do not use allocator, but `size_of` or `size_of_val`.
Others features define global allocator, see `src/alloc.rs`.
## Dependency
This crate groups common dependency, [`clear_on_drop`](https://crates.io/crates/clear_on_drop) is reexported, and a patched copy of unpublished [`malloc_size_of`](https://github.com/servo/servo/tree/master/components/malloc_size_of) from servo project is copied and partially reexported.
`Malloc_size_of` code is used internally as a module with a few modification to be able to implement type locally.

View File

@ -0,0 +1,12 @@
#!/bin/bash
# script/process to update code from servo project (malloc_size_of)
# untested, note that we do not use submodule due to size of git repo
git clone https://github.com/servo/servo.git
cd servo
git checkout 5bdea7dc1c80790a852a3fb03edfb2b8fbd403dc
git apply ../slim_malloc_size_of.patch
#git merge master
#cp components/malloc_size_of/lib.rs ../src/malloc_size.rs
#cd ..
#rm -rf ./servo

View File

@ -0,0 +1,716 @@
diff --git a/components/malloc_size_of/lib.rs b/components/malloc_size_of/lib.rs
index 778082b5f0..7f527c930e 100644
--- a/components/malloc_size_of/lib.rs
+++ b/components/malloc_size_of/lib.rs
@@ -43,55 +43,39 @@
//! measured as well as the thing it points to. E.g.
//! `<Box<_> as MallocSizeOf>::size_of(field, ops)`.
-extern crate app_units;
-#[cfg(feature = "servo")]
-extern crate crossbeam_channel;
-extern crate cssparser;
-extern crate euclid;
-extern crate hashglobe;
-#[cfg(feature = "servo")]
-extern crate hyper;
-#[cfg(feature = "servo")]
-extern crate hyper_serde;
-#[cfg(feature = "servo")]
-extern crate keyboard_types;
-#[cfg(feature = "servo")]
-extern crate mozjs as js;
-extern crate selectors;
-#[cfg(feature = "servo")]
-extern crate serde;
-#[cfg(feature = "servo")]
-extern crate serde_bytes;
-extern crate servo_arc;
-extern crate smallbitvec;
-extern crate smallvec;
-#[cfg(feature = "servo")]
-extern crate string_cache;
-extern crate thin_slice;
-#[cfg(feature = "servo")]
-extern crate time;
-#[cfg(feature = "url")]
-extern crate url;
-extern crate void;
-#[cfg(feature = "webrender_api")]
-extern crate webrender_api;
-#[cfg(feature = "servo")]
-extern crate xml5ever;
-
-#[cfg(feature = "servo")]
-use serde_bytes::ByteBuf;
+
+// This file is patched at commit 5bdea7dc1c80790a852a3fb03edfb2b8fbd403dc DO NOT EDIT.
+
+
+#[cfg(not(feature = "std"))]
+use alloc::vec::Vec;
+#[cfg(not(feature = "std"))]
+use alloc::string::String;
+#[cfg(not(feature = "std"))]
+mod std {
+ pub use core::*;
+ pub use alloc::collections;
+}
+
+#[cfg(feature = "std")]
+use std::sync::Arc;
+
use std::hash::{BuildHasher, Hash};
use std::mem::size_of;
use std::ops::Range;
use std::ops::{Deref, DerefMut};
+#[cfg(feature = "std")]
use std::os::raw::c_void;
-use void::Void;
+#[cfg(not(feature = "std"))]
+use core::ffi::c_void;
+#[cfg(not(feature = "std"))]
+pub use alloc::boxed::Box;
/// A C function that takes a pointer to a heap allocation and returns its size.
-type VoidPtrToSizeFn = unsafe extern "C" fn(ptr: *const c_void) -> usize;
+pub type VoidPtrToSizeFn = unsafe extern "C" fn(ptr: *const c_void) -> usize;
/// A closure implementing a stateful predicate on pointers.
-type VoidPtrToBoolFnMut = FnMut(*const c_void) -> bool;
+pub type VoidPtrToBoolFnMut = FnMut(*const c_void) -> bool;
/// Operations used when measuring heap usage of data structures.
pub struct MallocSizeOfOps {
@@ -216,6 +200,7 @@ pub trait MallocConditionalShallowSizeOf {
fn conditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize;
}
+#[cfg(not(feature = "estimate-heapsize"))]
impl MallocSizeOf for String {
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
unsafe { ops.malloc_size_of(self.as_ptr()) }
@@ -229,6 +214,7 @@ impl<'a, T: ?Sized> MallocSizeOf for &'a T {
}
}
+#[cfg(not(feature = "estimate-heapsize"))]
impl<T: ?Sized> MallocShallowSizeOf for Box<T> {
fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
unsafe { ops.malloc_size_of(&**self) }
@@ -241,24 +227,6 @@ impl<T: MallocSizeOf + ?Sized> MallocSizeOf for Box<T> {
}
}
-impl<T> MallocShallowSizeOf for thin_slice::ThinBoxedSlice<T> {
- fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
- let mut n = 0;
- unsafe {
- n += thin_slice::ThinBoxedSlice::spilled_storage(self)
- .map_or(0, |ptr| ops.malloc_size_of(ptr));
- n += ops.malloc_size_of(&**self);
- }
- n
- }
-}
-
-impl<T: MallocSizeOf> MallocSizeOf for thin_slice::ThinBoxedSlice<T> {
- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
- self.shallow_size_of(ops) + (**self).size_of(ops)
- }
-}
-
impl MallocSizeOf for () {
fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize {
0
@@ -329,6 +297,7 @@ impl<T: MallocSizeOf> MallocSizeOf for std::cell::RefCell<T> {
}
}
+#[cfg(feature = "std")]
impl<'a, B: ?Sized + ToOwned> MallocSizeOf for std::borrow::Cow<'a, B>
where
B::Owned: MallocSizeOf,
@@ -351,24 +320,7 @@ impl<T: MallocSizeOf> MallocSizeOf for [T] {
}
}
-#[cfg(feature = "servo")]
-impl MallocShallowSizeOf for ByteBuf {
- fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
- unsafe { ops.malloc_size_of(self.as_ptr()) }
- }
-}
-
-#[cfg(feature = "servo")]
-impl MallocSizeOf for ByteBuf {
- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
- let mut n = self.shallow_size_of(ops);
- for elem in self.iter() {
- n += elem.size_of(ops);
- }
- n
- }
-}
-
+#[cfg(not(feature = "estimate-heapsize"))]
impl<T> MallocShallowSizeOf for Vec<T> {
fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
unsafe { ops.malloc_size_of(self.as_ptr()) }
@@ -412,30 +364,7 @@ impl<T: MallocSizeOf> MallocSizeOf for std::collections::VecDeque<T> {
}
}
-impl<A: smallvec::Array> MallocShallowSizeOf for smallvec::SmallVec<A> {
- fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
- if self.spilled() {
- unsafe { ops.malloc_size_of(self.as_ptr()) }
- } else {
- 0
- }
- }
-}
-
-impl<A> MallocSizeOf for smallvec::SmallVec<A>
-where
- A: smallvec::Array,
- A::Item: MallocSizeOf,
-{
- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
- let mut n = self.shallow_size_of(ops);
- for elem in self.iter() {
- n += elem.size_of(ops);
- }
- n
- }
-}
-
+#[cfg(feature = "std")]
impl<T, S> MallocShallowSizeOf for std::collections::HashSet<T, S>
where
T: Eq + Hash,
@@ -457,6 +386,7 @@ where
}
}
+#[cfg(feature = "std")]
impl<T, S> MallocSizeOf for std::collections::HashSet<T, S>
where
T: Eq + Hash + MallocSizeOf,
@@ -471,59 +401,7 @@ where
}
}
-impl<T, S> MallocShallowSizeOf for hashglobe::hash_set::HashSet<T, S>
-where
- T: Eq + Hash,
- S: BuildHasher,
-{
- fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
- // See the implementation for std::collections::HashSet for details.
- if ops.has_malloc_enclosing_size_of() {
- self.iter()
- .next()
- .map_or(0, |t| unsafe { ops.malloc_enclosing_size_of(t) })
- } else {
- self.capacity() * (size_of::<T>() + size_of::<usize>())
- }
- }
-}
-
-impl<T, S> MallocSizeOf for hashglobe::hash_set::HashSet<T, S>
-where
- T: Eq + Hash + MallocSizeOf,
- S: BuildHasher,
-{
- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
- let mut n = self.shallow_size_of(ops);
- for t in self.iter() {
- n += t.size_of(ops);
- }
- n
- }
-}
-
-impl<T, S> MallocShallowSizeOf for hashglobe::fake::HashSet<T, S>
-where
- T: Eq + Hash,
- S: BuildHasher,
-{
- fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
- use std::ops::Deref;
- self.deref().shallow_size_of(ops)
- }
-}
-
-impl<T, S> MallocSizeOf for hashglobe::fake::HashSet<T, S>
-where
- T: Eq + Hash + MallocSizeOf,
- S: BuildHasher,
-{
- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
- use std::ops::Deref;
- self.deref().size_of(ops)
- }
-}
-
+#[cfg(feature = "std")]
impl<K, V, S> MallocShallowSizeOf for std::collections::HashMap<K, V, S>
where
K: Eq + Hash,
@@ -541,6 +419,7 @@ where
}
}
+#[cfg(feature = "std")]
impl<K, V, S> MallocSizeOf for std::collections::HashMap<K, V, S>
where
K: Eq + Hash + MallocSizeOf,
@@ -587,62 +466,6 @@ where
}
}
-impl<K, V, S> MallocShallowSizeOf for hashglobe::hash_map::HashMap<K, V, S>
-where
- K: Eq + Hash,
- S: BuildHasher,
-{
- fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
- // See the implementation for std::collections::HashSet for details.
- if ops.has_malloc_enclosing_size_of() {
- self.values()
- .next()
- .map_or(0, |v| unsafe { ops.malloc_enclosing_size_of(v) })
- } else {
- self.capacity() * (size_of::<V>() + size_of::<K>() + size_of::<usize>())
- }
- }
-}
-
-impl<K, V, S> MallocSizeOf for hashglobe::hash_map::HashMap<K, V, S>
-where
- K: Eq + Hash + MallocSizeOf,
- V: MallocSizeOf,
- S: BuildHasher,
-{
- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
- let mut n = self.shallow_size_of(ops);
- for (k, v) in self.iter() {
- n += k.size_of(ops);
- n += v.size_of(ops);
- }
- n
- }
-}
-
-impl<K, V, S> MallocShallowSizeOf for hashglobe::fake::HashMap<K, V, S>
-where
- K: Eq + Hash,
- S: BuildHasher,
-{
- fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
- use std::ops::Deref;
- self.deref().shallow_size_of(ops)
- }
-}
-
-impl<K, V, S> MallocSizeOf for hashglobe::fake::HashMap<K, V, S>
-where
- K: Eq + Hash + MallocSizeOf,
- V: MallocSizeOf,
- S: BuildHasher,
-{
- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
- use std::ops::Deref;
- self.deref().size_of(ops)
- }
-}
-
// PhantomData is always 0.
impl<T> MallocSizeOf for std::marker::PhantomData<T> {
fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize {
@@ -657,21 +480,43 @@ impl<T> MallocSizeOf for std::marker::PhantomData<T> {
//impl<T> !MallocSizeOf for Arc<T> { }
//impl<T> !MallocShallowSizeOf for Arc<T> { }
-impl<T> MallocUnconditionalShallowSizeOf for servo_arc::Arc<T> {
+#[cfg(feature = "std")]
+fn arc_ptr<T>(s: &Arc<T>) -> * const T {
+ &(**s) as *const T
+}
+
+
+// currently this seems only fine with jemalloc
+#[cfg(feature = "std")]
+#[cfg(not(feature = "estimate-heapsize"))]
+#[cfg(any(prefixed_jemalloc, target_os = "macos", target_os = "ios", target_os = "android", feature = "jemalloc-global"))]
+impl<T> MallocUnconditionalShallowSizeOf for Arc<T> {
fn unconditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
- unsafe { ops.malloc_size_of(self.heap_ptr()) }
+ unsafe { ops.malloc_size_of(arc_ptr(self)) }
}
}
-impl<T: MallocSizeOf> MallocUnconditionalSizeOf for servo_arc::Arc<T> {
+#[cfg(feature = "std")]
+#[cfg(not(feature = "estimate-heapsize"))]
+#[cfg(not(any(prefixed_jemalloc, target_os = "macos", target_os = "ios", target_os = "android", feature = "jemalloc-global")))]
+impl<T> MallocUnconditionalShallowSizeOf for Arc<T> {
+ fn unconditional_shallow_size_of(&self, _ops: &mut MallocSizeOfOps) -> usize {
+ size_of::<T>()
+ }
+}
+
+
+#[cfg(feature = "std")]
+impl<T: MallocSizeOf> MallocUnconditionalSizeOf for Arc<T> {
fn unconditional_size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
self.unconditional_shallow_size_of(ops) + (**self).size_of(ops)
}
}
-impl<T> MallocConditionalShallowSizeOf for servo_arc::Arc<T> {
+#[cfg(feature = "std")]
+impl<T> MallocConditionalShallowSizeOf for Arc<T> {
fn conditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
- if ops.have_seen_ptr(self.heap_ptr()) {
+ if ops.have_seen_ptr(arc_ptr(self)) {
0
} else {
self.unconditional_shallow_size_of(ops)
@@ -679,9 +524,10 @@ impl<T> MallocConditionalShallowSizeOf for servo_arc::Arc<T> {
}
}
-impl<T: MallocSizeOf> MallocConditionalSizeOf for servo_arc::Arc<T> {
+#[cfg(feature = "std")]
+impl<T: MallocSizeOf> MallocConditionalSizeOf for Arc<T> {
fn conditional_size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
- if ops.have_seen_ptr(self.heap_ptr()) {
+ if ops.have_seen_ptr(arc_ptr(self)) {
0
} else {
self.unconditional_size_of(ops)
@@ -695,203 +541,13 @@ impl<T: MallocSizeOf> MallocConditionalSizeOf for servo_arc::Arc<T> {
/// If a mutex is stored inside of an Arc value as a member of a data type that is being measured,
/// the Arc will not be automatically measured so there is no risk of overcounting the mutex's
/// contents.
+#[cfg(feature = "std")]
impl<T: MallocSizeOf> MallocSizeOf for std::sync::Mutex<T> {
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
(*self.lock().unwrap()).size_of(ops)
}
}
-impl MallocSizeOf for smallbitvec::SmallBitVec {
- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
- if let Some(ptr) = self.heap_ptr() {
- unsafe { ops.malloc_size_of(ptr) }
- } else {
- 0
- }
- }
-}
-
-impl<T: MallocSizeOf, Unit> MallocSizeOf for euclid::Length<T, Unit> {
- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
- self.0.size_of(ops)
- }
-}
-
-impl<T: MallocSizeOf, Src, Dst> MallocSizeOf for euclid::TypedScale<T, Src, Dst> {
- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
- self.0.size_of(ops)
- }
-}
-
-impl<T: MallocSizeOf, U> MallocSizeOf for euclid::TypedPoint2D<T, U> {
- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
- self.x.size_of(ops) + self.y.size_of(ops)
- }
-}
-
-impl<T: MallocSizeOf, U> MallocSizeOf for euclid::TypedRect<T, U> {
- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
- self.origin.size_of(ops) + self.size.size_of(ops)
- }
-}
-
-impl<T: MallocSizeOf, U> MallocSizeOf for euclid::TypedSideOffsets2D<T, U> {
- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
- self.top.size_of(ops) +
- self.right.size_of(ops) +
- self.bottom.size_of(ops) +
- self.left.size_of(ops)
- }
-}
-
-impl<T: MallocSizeOf, U> MallocSizeOf for euclid::TypedSize2D<T, U> {
- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
- self.width.size_of(ops) + self.height.size_of(ops)
- }
-}
-
-impl<T: MallocSizeOf, Src, Dst> MallocSizeOf for euclid::TypedTransform2D<T, Src, Dst> {
- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
- self.m11.size_of(ops) +
- self.m12.size_of(ops) +
- self.m21.size_of(ops) +
- self.m22.size_of(ops) +
- self.m31.size_of(ops) +
- self.m32.size_of(ops)
- }
-}
-
-impl<T: MallocSizeOf, Src, Dst> MallocSizeOf for euclid::TypedTransform3D<T, Src, Dst> {
- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
- self.m11.size_of(ops) +
- self.m12.size_of(ops) +
- self.m13.size_of(ops) +
- self.m14.size_of(ops) +
- self.m21.size_of(ops) +
- self.m22.size_of(ops) +
- self.m23.size_of(ops) +
- self.m24.size_of(ops) +
- self.m31.size_of(ops) +
- self.m32.size_of(ops) +
- self.m33.size_of(ops) +
- self.m34.size_of(ops) +
- self.m41.size_of(ops) +
- self.m42.size_of(ops) +
- self.m43.size_of(ops) +
- self.m44.size_of(ops)
- }
-}
-
-impl<T: MallocSizeOf, U> MallocSizeOf for euclid::TypedVector2D<T, U> {
- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
- self.x.size_of(ops) + self.y.size_of(ops)
- }
-}
-
-impl MallocSizeOf for selectors::parser::AncestorHashes {
- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
- let selectors::parser::AncestorHashes { ref packed_hashes } = *self;
- packed_hashes.size_of(ops)
- }
-}
-
-impl<Impl: selectors::parser::SelectorImpl> MallocSizeOf for selectors::parser::Selector<Impl>
-where
- Impl::NonTSPseudoClass: MallocSizeOf,
- Impl::PseudoElement: MallocSizeOf,
-{
- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
- let mut n = 0;
-
- // It's OK to measure this ThinArc directly because it's the
- // "primary" reference. (The secondary references are on the
- // Stylist.)
- n += unsafe { ops.malloc_size_of(self.thin_arc_heap_ptr()) };
- for component in self.iter_raw_match_order() {
- n += component.size_of(ops);
- }
-
- n
- }
-}
-
-impl<Impl: selectors::parser::SelectorImpl> MallocSizeOf for selectors::parser::Component<Impl>
-where
- Impl::NonTSPseudoClass: MallocSizeOf,
- Impl::PseudoElement: MallocSizeOf,
-{
- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
- use selectors::parser::Component;
-
- match self {
- Component::AttributeOther(ref attr_selector) => attr_selector.size_of(ops),
- Component::Negation(ref components) => components.size_of(ops),
- Component::NonTSPseudoClass(ref pseudo) => (*pseudo).size_of(ops),
- Component::Slotted(ref selector) | Component::Host(Some(ref selector)) => {
- selector.size_of(ops)
- },
- Component::PseudoElement(ref pseudo) => (*pseudo).size_of(ops),
- Component::Combinator(..) |
- Component::ExplicitAnyNamespace |
- Component::ExplicitNoNamespace |
- Component::DefaultNamespace(..) |
- Component::Namespace(..) |
- Component::ExplicitUniversalType |
- Component::LocalName(..) |
- Component::ID(..) |
- Component::Class(..) |
- Component::AttributeInNoNamespaceExists { .. } |
- Component::AttributeInNoNamespace { .. } |
- Component::FirstChild |
- Component::LastChild |
- Component::OnlyChild |
- Component::Root |
- Component::Empty |
- Component::Scope |
- Component::NthChild(..) |
- Component::NthLastChild(..) |
- Component::NthOfType(..) |
- Component::NthLastOfType(..) |
- Component::FirstOfType |
- Component::LastOfType |
- Component::OnlyOfType |
- Component::Host(None) => 0,
- }
- }
-}
-
-impl<Impl: selectors::parser::SelectorImpl> MallocSizeOf
- for selectors::attr::AttrSelectorWithOptionalNamespace<Impl>
-{
- fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize {
- 0
- }
-}
-
-impl MallocSizeOf for Void {
- #[inline]
- fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize {
- void::unreachable(*self)
- }
-}
-
-#[cfg(feature = "servo")]
-impl<Static: string_cache::StaticAtomSet> MallocSizeOf for string_cache::Atom<Static> {
- fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize {
- 0
- }
-}
-
-// This is measured properly by the heap measurement implemented in
-// SpiderMonkey.
-#[cfg(feature = "servo")]
-impl<T: Copy + js::rust::GCMethods> MallocSizeOf for js::jsapi::Heap<T> {
- fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize {
- 0
- }
-}
-
-/// For use on types where size_of() returns 0.
#[macro_export]
macro_rules! malloc_size_of_is_0(
($($ty:ty),+) => (
@@ -929,117 +585,6 @@ malloc_size_of_is_0!(Range<u8>, Range<u16>, Range<u32>, Range<u64>, Range<usize>
malloc_size_of_is_0!(Range<i8>, Range<i16>, Range<i32>, Range<i64>, Range<isize>);
malloc_size_of_is_0!(Range<f32>, Range<f64>);
-malloc_size_of_is_0!(app_units::Au);
-
-malloc_size_of_is_0!(cssparser::RGBA, cssparser::TokenSerializationType);
-
-#[cfg(feature = "url")]
-impl MallocSizeOf for url::Host {
- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
- match *self {
- url::Host::Domain(ref s) => s.size_of(ops),
- _ => 0,
- }
- }
-}
-#[cfg(feature = "webrender_api")]
-malloc_size_of_is_0!(webrender_api::BorderRadius);
-#[cfg(feature = "webrender_api")]
-malloc_size_of_is_0!(webrender_api::BorderStyle);
-#[cfg(feature = "webrender_api")]
-malloc_size_of_is_0!(webrender_api::BoxShadowClipMode);
-#[cfg(feature = "webrender_api")]
-malloc_size_of_is_0!(webrender_api::ClipAndScrollInfo);
-#[cfg(feature = "webrender_api")]
-malloc_size_of_is_0!(webrender_api::ColorF);
-#[cfg(feature = "webrender_api")]
-malloc_size_of_is_0!(webrender_api::ComplexClipRegion);
-#[cfg(feature = "webrender_api")]
-malloc_size_of_is_0!(webrender_api::ExtendMode);
-#[cfg(feature = "webrender_api")]
-malloc_size_of_is_0!(webrender_api::FilterOp);
-#[cfg(feature = "webrender_api")]
-malloc_size_of_is_0!(webrender_api::ExternalScrollId);
-#[cfg(feature = "webrender_api")]
-malloc_size_of_is_0!(webrender_api::FontInstanceKey);
-#[cfg(feature = "webrender_api")]
-malloc_size_of_is_0!(webrender_api::GradientStop);
-#[cfg(feature = "webrender_api")]
-malloc_size_of_is_0!(webrender_api::GlyphInstance);
-#[cfg(feature = "webrender_api")]
-malloc_size_of_is_0!(webrender_api::NinePatchBorder);
-#[cfg(feature = "webrender_api")]
-malloc_size_of_is_0!(webrender_api::ImageKey);
-#[cfg(feature = "webrender_api")]
-malloc_size_of_is_0!(webrender_api::ImageRendering);
-#[cfg(feature = "webrender_api")]
-malloc_size_of_is_0!(webrender_api::LineStyle);
-#[cfg(feature = "webrender_api")]
-malloc_size_of_is_0!(webrender_api::MixBlendMode);
-#[cfg(feature = "webrender_api")]
-malloc_size_of_is_0!(webrender_api::NormalBorder);
-#[cfg(feature = "webrender_api")]
-malloc_size_of_is_0!(webrender_api::RepeatMode);
-#[cfg(feature = "webrender_api")]
-malloc_size_of_is_0!(webrender_api::ScrollSensitivity);
-#[cfg(feature = "webrender_api")]
-malloc_size_of_is_0!(webrender_api::StickyOffsetBounds);
-#[cfg(feature = "webrender_api")]
-malloc_size_of_is_0!(webrender_api::TransformStyle);
-
-#[cfg(feature = "servo")]
-impl MallocSizeOf for keyboard_types::Key {
- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
- match self {
- keyboard_types::Key::Character(ref s) => s.size_of(ops),
- _ => 0,
- }
- }
-}
-
-#[cfg(feature = "servo")]
-malloc_size_of_is_0!(keyboard_types::Modifiers);
-
-#[cfg(feature = "servo")]
-impl MallocSizeOf for xml5ever::QualName {
- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
- self.prefix.size_of(ops) + self.ns.size_of(ops) + self.local.size_of(ops)
- }
-}
-
-#[cfg(feature = "servo")]
-malloc_size_of_is_0!(time::Duration);
-#[cfg(feature = "servo")]
-malloc_size_of_is_0!(time::Tm);
-
-#[cfg(feature = "servo")]
-impl<T> MallocSizeOf for hyper_serde::Serde<T>
-where
- for<'de> hyper_serde::De<T>: serde::Deserialize<'de>,
- for<'a> hyper_serde::Ser<'a, T>: serde::Serialize,
- T: MallocSizeOf,
-{
- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
- self.0.size_of(ops)
- }
-}
-
-// Placeholder for unique case where internals of Sender cannot be measured.
-// malloc size of is 0 macro complains about type supplied!
-#[cfg(feature = "servo")]
-impl<T> MallocSizeOf for crossbeam_channel::Sender<T> {
- fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize {
- 0
- }
-}
-
-#[cfg(feature = "servo")]
-impl MallocSizeOf for hyper::StatusCode {
- fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize {
- 0
- }
-}
-
/// Measurable that defers to inner value and used to verify MallocSizeOf implementation in a
/// struct.
#[derive(Clone)]

View File

@ -0,0 +1,145 @@
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! default allocator management
//! Features are:
//! - windows:
//! - no features: default implementation from servo `heapsize` crate
//! - weealloc: default to `estimate_size`
//! - dlmalloc: default to `estimate_size`
//! - jemalloc: default windows allocator is used instead
//! - arch x86:
//! - no features: use default alloc
//! - jemalloc: use jemallocator crate
//! - weealloc: default to `estimate_size`
//! - dlmalloc: default to `estimate_size`
//! - arch wasm32:
//! - no features: default to `estimate_size`
//! - weealloc: default to `estimate_size`
//! - dlmalloc: default to `estimate_size`
//! - jemalloc: compile error
use malloc_size::{MallocSizeOfOps, VoidPtrToSizeFn, MallocSizeOf};
#[cfg(feature = "std")]
use malloc_size::MallocUnconditionalSizeOf;
#[cfg(feature = "std")]
use std::os::raw::c_void;
#[cfg(not(feature = "std"))]
use core::ffi::c_void;
#[cfg(not(feature = "std"))]
use alloc::collections::btree_set::BTreeSet;
mod usable_size {
use super::*;
cfg_if! {
if #[cfg(any(
target_arch = "wasm32",
feature = "estimate-heapsize",
feature = "weealloc-global",
feature = "dlmalloc-global",
))] {
// do not try system allocator
/// Warning this is for compatibility only.
/// This function does panic: `estimate-heapsize` feature needs to be activated
/// to avoid this function call.
pub unsafe extern "C" fn malloc_usable_size(_ptr: *const c_void) -> usize {
unreachable!("estimate heapsize only")
}
} else if #[cfg(target_os = "windows")] {
// default windows allocator
extern crate winapi;
use self::winapi::um::heapapi::{GetProcessHeap, HeapSize, HeapValidate};
/// Get the size of a heap block.
/// Call windows allocator through `winapi` crate
pub unsafe extern "C" fn malloc_usable_size(mut ptr: *const c_void) -> usize {
let heap = GetProcessHeap();
if HeapValidate(heap, 0, ptr) == 0 {
ptr = *(ptr as *const *const c_void).offset(-1);
}
HeapSize(heap, 0, ptr) as usize
}
} else if #[cfg(feature = "jemalloc-global")] {
/// Use of jemalloc usable size C function through jemallocator crate call.
pub unsafe extern "C" fn malloc_usable_size(ptr: *const c_void) -> usize {
jemallocator::usable_size(ptr)
}
} else {
// default allocator used
/// Macos, ios and android calls jemalloc.
/// Linux call system allocator (currently malloc).
extern "C" {
#[cfg_attr(any(prefixed_jemalloc, target_os = "macos", target_os = "ios", target_os = "android"), link_name = "je_malloc_usable_size")]
pub fn malloc_usable_size(ptr: *const c_void) -> usize;
}
}
}
/// No enclosing function defined.
#[inline]
pub fn new_enclosing_size_fn() -> Option<VoidPtrToSizeFn> {
None
}
}
/// Get a new instance of a MallocSizeOfOps
pub fn new_malloc_size_ops() -> MallocSizeOfOps {
MallocSizeOfOps::new(
usable_size::malloc_usable_size,
usable_size::new_enclosing_size_fn(),
None,
)
}
/// Extension methods for `MallocSizeOf` trait, do not implement
/// directly.
/// It allows getting heapsize without exposing `MallocSizeOfOps`
/// (a single default `MallocSizeOfOps` is used for each call).
pub trait MallocSizeOfExt: MallocSizeOf {
/// Method to launch a heapsize measurement with a
/// fresh state.
fn malloc_size_of(&self) -> usize {
let mut ops = new_malloc_size_ops();
<Self as MallocSizeOf>::size_of(self, &mut ops)
}
}
impl<T: MallocSizeOf> MallocSizeOfExt for T { }
#[cfg(feature = "std")]
impl<T: MallocSizeOf> MallocSizeOf for std::sync::Arc<T> {
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
self.unconditional_size_of(ops)
}
}

View File

@ -0,0 +1,96 @@
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Implementation of `MallocSize` for common types :
//! - etheureum types uint and fixed hash.
//! - elastic_array arrays
//! - parking_lot mutex structures
extern crate elastic_array;
extern crate ethereum_types;
extern crate parking_lot;
use self::ethereum_types::{
U64, U128, U256, U512, H32, H64,
H128, H160, H256, H264, H512, H520,
Bloom
};
use self::elastic_array::{
ElasticArray2,
ElasticArray4,
ElasticArray8,
ElasticArray16,
ElasticArray32,
ElasticArray36,
ElasticArray64,
ElasticArray128,
ElasticArray256,
ElasticArray512,
ElasticArray1024,
ElasticArray2048,
};
use self::parking_lot::{Mutex, RwLock};
use super::{MallocSizeOf, MallocSizeOfOps};
#[cfg(not(feature = "std"))]
use core as std;
#[cfg(feature = "std")]
malloc_size_of_is_0!(std::time::Instant);
malloc_size_of_is_0!(std::time::Duration);
malloc_size_of_is_0!(
U64, U128, U256, U512, H32, H64,
H128, H160, H256, H264, H512, H520,
Bloom
);
macro_rules! impl_elastic_array {
($name: ident, $dummy: ident, $size: expr) => (
impl<T> MallocSizeOf for $name<T>
where T: MallocSizeOf {
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
self[..].size_of(ops)
}
}
)
}
impl_elastic_array!(ElasticArray2, ElasticArray2Dummy, 2);
impl_elastic_array!(ElasticArray4, ElasticArray4Dummy, 4);
impl_elastic_array!(ElasticArray8, ElasticArray8Dummy, 8);
impl_elastic_array!(ElasticArray16, ElasticArray16Dummy, 16);
impl_elastic_array!(ElasticArray32, ElasticArray32Dummy, 32);
impl_elastic_array!(ElasticArray36, ElasticArray36Dummy, 36);
impl_elastic_array!(ElasticArray64, ElasticArray64Dummy, 64);
impl_elastic_array!(ElasticArray128, ElasticArray128Dummy, 128);
impl_elastic_array!(ElasticArray256, ElasticArray256Dummy, 256);
impl_elastic_array!(ElasticArray512, ElasticArray512Dummy, 512);
impl_elastic_array!(ElasticArray1024, ElasticArray1024Dummy, 1024);
impl_elastic_array!(ElasticArray2048, ElasticArray2048Dummy, 2048);
impl<T: MallocSizeOf> MallocSizeOf for Mutex<T> {
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
(*self.lock()).size_of(ops)
}
}
impl<T: MallocSizeOf> MallocSizeOf for RwLock<T> {
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
self.read().size_of(ops)
}
}

157
parity-util-mem/src/lib.rs Normal file
View File

@ -0,0 +1,157 @@
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Crate for parity memory management related utilities.
//! It includes global allocator choice, heap measurement and
//! memory erasure.
#![cfg_attr(not(feature = "std"), no_std)]
#![cfg_attr(not(feature = "std"), feature(core_intrinsics))]
#![cfg_attr(not(feature = "std"), feature(alloc))]
#[macro_use]
extern crate cfg_if;
#[cfg(not(feature = "std"))]
extern crate alloc;
extern crate clear_on_drop as cod;
#[macro_use] extern crate malloc_size_of_derive as malloc_size_derive;
use std::ops::{Deref, DerefMut};
#[cfg(feature = "volatile-erase")]
use std::ptr;
#[cfg(not(feature = "volatile-erase"))]
pub use cod::clear::Clear;
cfg_if! {
if #[cfg(all(
feature = "jemalloc-global",
feature = "jemalloc-global",
not(target_os = "windows"),
not(target_arch = "wasm32")
))] {
extern crate jemallocator;
#[global_allocator]
/// Global allocator
pub static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
} else if #[cfg(feature = "dlmalloc-global")] {
extern crate dlmalloc;
#[global_allocator]
/// Global allocator
pub static ALLOC: dlmalloc::GlobalDlmalloc = dlmalloc::GlobalDlmalloc;
} else if #[cfg(feature = "weealloc-global")] {
extern crate wee_alloc;
#[global_allocator]
/// Global allocator
pub static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
} else {
// default allocator used
}
}
pub mod allocators;
#[cfg(feature = "estimate-heapsize")]
pub mod sizeof;
#[cfg(not(feature = "std"))]
use core as std;
/// This is a copy of patched crate `malloc_size_of` as a module.
/// We need to have it as an inner module to be able to define our own traits implementation,
/// if at some point the trait become standard enough we could use the right way of doing it
/// by implementing it in our type traits crates. At this time moving this trait to the primitive
/// types level would impact too much of the dependencies to be easily manageable.
#[macro_use] mod malloc_size;
#[cfg(feature = "ethereum-impls")]
pub mod impls;
/// Reexport clear_on_drop crate.
pub mod clear_on_drop {
pub use cod::*;
}
pub use malloc_size_derive::*;
pub use malloc_size::{
MallocSizeOfOps,
MallocSizeOf,
};
pub use allocators::MallocSizeOfExt;
/// Wrapper to zero out memory when dropped.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Memzero<T: AsMut<[u8]>> {
mem: T,
}
impl<T: AsMut<[u8]>> From<T> for Memzero<T> {
fn from(mem: T) -> Memzero<T> {
Memzero { mem }
}
}
#[cfg(feature = "volatile-erase")]
impl<T: AsMut<[u8]>> Drop for Memzero<T> {
fn drop(&mut self) {
unsafe {
for byte_ref in self.mem.as_mut() {
ptr::write_volatile(byte_ref, 0)
}
}
}
}
#[cfg(not(feature = "volatile-erase"))]
impl<T: AsMut<[u8]>> Drop for Memzero<T> {
fn drop(&mut self) {
self.as_mut().clear();
}
}
impl<T: AsMut<[u8]>> Deref for Memzero<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.mem
}
}
impl<T: AsMut<[u8]>> DerefMut for Memzero<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.mem
}
}
#[cfg(std)]
#[cfg(test)]
mod test {
use std::sync::Arc;
use super::MallocSizeOfExt;
#[test]
fn test_arc() {
let val = Arc::new("test".to_string());
let s = val.malloc_size_of();
assert!(s > 0);
}
}

View File

@ -0,0 +1,605 @@
// Copyright 2016-2017 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A crate for measuring the heap usage of data structures in a way that
//! integrates with Firefox's memory reporting, particularly the use of
//! mozjemalloc and DMD. In particular, it has the following features.
//! - It isn't bound to a particular heap allocator.
//! - It provides traits for both "shallow" and "deep" measurement, which gives
//! flexibility in the cases where the traits can't be used.
//! - It allows for measuring blocks even when only an interior pointer can be
//! obtained for heap allocations, e.g. `HashSet` and `HashMap`. (This relies
//! on the heap allocator having suitable support, which mozjemalloc has.)
//! - It allows handling of types like `Rc` and `Arc` by providing traits that
//! are different to the ones for non-graph structures.
//!
//! Suggested uses are as follows.
//! - When possible, use the `MallocSizeOf` trait. (Deriving support is
//! provided by the `malloc_size_of_derive` crate.)
//! - If you need an additional synchronization argument, provide a function
//! that is like the standard trait method, but with the extra argument.
//! - If you need multiple measurements for a type, provide a function named
//! `add_size_of` that takes a mutable reference to a struct that contains
//! the multiple measurement fields.
//! - When deep measurement (via `MallocSizeOf`) cannot be implemented for a
//! type, shallow measurement (via `MallocShallowSizeOf`) in combination with
//! iteration can be a useful substitute.
//! - `Rc` and `Arc` are always tricky, which is why `MallocSizeOf` is not (and
//! should not be) implemented for them.
//! - If an `Rc` or `Arc` is known to be a "primary" reference and can always
//! be measured, it should be measured via the `MallocUnconditionalSizeOf`
//! trait.
//! - If an `Rc` or `Arc` should be measured only if it hasn't been seen
//! before, it should be measured via the `MallocConditionalSizeOf` trait.
//! - Using universal function call syntax is a good idea when measuring boxed
//! fields in structs, because it makes it clear that the Box is being
//! measured as well as the thing it points to. E.g.
//! `<Box<_> as MallocSizeOf>::size_of(field, ops)`.
// This file is patched at commit 5bdea7dc1c80790a852a3fb03edfb2b8fbd403dc DO NOT EDIT.
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
#[cfg(not(feature = "std"))]
use alloc::string::String;
#[cfg(not(feature = "std"))]
mod std {
pub use core::*;
pub use alloc::collections;
}
#[cfg(feature = "std")]
use std::sync::Arc;
use std::hash::{BuildHasher, Hash};
use std::mem::size_of;
use std::ops::Range;
use std::ops::{Deref, DerefMut};
#[cfg(feature = "std")]
use std::os::raw::c_void;
#[cfg(not(feature = "std"))]
use core::ffi::c_void;
#[cfg(not(feature = "std"))]
pub use alloc::boxed::Box;
/// A C function that takes a pointer to a heap allocation and returns its size.
pub type VoidPtrToSizeFn = unsafe extern "C" fn(ptr: *const c_void) -> usize;
/// A closure implementing a stateful predicate on pointers.
pub type VoidPtrToBoolFnMut = FnMut(*const c_void) -> bool;
/// Operations used when measuring heap usage of data structures.
pub struct MallocSizeOfOps {
/// A function that returns the size of a heap allocation.
size_of_op: VoidPtrToSizeFn,
/// Like `size_of_op`, but can take an interior pointer. Optional because
/// not all allocators support this operation. If it's not provided, some
/// memory measurements will actually be computed estimates rather than
/// real and accurate measurements.
enclosing_size_of_op: Option<VoidPtrToSizeFn>,
/// Check if a pointer has been seen before, and remember it for next time.
/// Useful when measuring `Rc`s and `Arc`s. Optional, because many places
/// don't need it.
have_seen_ptr_op: Option<Box<VoidPtrToBoolFnMut>>,
}
impl MallocSizeOfOps {
pub fn new(
size_of: VoidPtrToSizeFn,
malloc_enclosing_size_of: Option<VoidPtrToSizeFn>,
have_seen_ptr: Option<Box<VoidPtrToBoolFnMut>>,
) -> Self {
MallocSizeOfOps {
size_of_op: size_of,
enclosing_size_of_op: malloc_enclosing_size_of,
have_seen_ptr_op: have_seen_ptr,
}
}
/// Check if an allocation is empty. This relies on knowledge of how Rust
/// handles empty allocations, which may change in the future.
fn is_empty<T: ?Sized>(ptr: *const T) -> bool {
// The correct condition is this:
// `ptr as usize <= ::std::mem::align_of::<T>()`
// But we can't call align_of() on a ?Sized T. So we approximate it
// with the following. 256 is large enough that it should always be
// larger than the required alignment, but small enough that it is
// always in the first page of memory and therefore not a legitimate
// address.
return ptr as *const usize as usize <= 256;
}
/// Call `size_of_op` on `ptr`, first checking that the allocation isn't
/// empty, because some types (such as `Vec`) utilize empty allocations.
pub unsafe fn malloc_size_of<T: ?Sized>(&self, ptr: *const T) -> usize {
if MallocSizeOfOps::is_empty(ptr) {
0
} else {
(self.size_of_op)(ptr as *const c_void)
}
}
/// Is an `enclosing_size_of_op` available?
pub fn has_malloc_enclosing_size_of(&self) -> bool {
self.enclosing_size_of_op.is_some()
}
/// Call `enclosing_size_of_op`, which must be available, on `ptr`, which
/// must not be empty.
pub unsafe fn malloc_enclosing_size_of<T>(&self, ptr: *const T) -> usize {
assert!(!MallocSizeOfOps::is_empty(ptr));
(self.enclosing_size_of_op.unwrap())(ptr as *const c_void)
}
/// Call `have_seen_ptr_op` on `ptr`.
pub fn have_seen_ptr<T>(&mut self, ptr: *const T) -> bool {
let have_seen_ptr_op = self
.have_seen_ptr_op
.as_mut()
.expect("missing have_seen_ptr_op");
have_seen_ptr_op(ptr as *const c_void)
}
}
/// Trait for measuring the "deep" heap usage of a data structure. This is the
/// most commonly-used of the traits.
pub trait MallocSizeOf {
/// Measure the heap usage of all descendant heap-allocated structures, but
/// not the space taken up by the value itself.
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize;
}
/// Trait for measuring the "shallow" heap usage of a container.
pub trait MallocShallowSizeOf {
/// Measure the heap usage of immediate heap-allocated descendant
/// structures, but not the space taken up by the value itself. Anything
/// beyond the immediate descendants must be measured separately, using
/// iteration.
fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize;
}
/// Like `MallocSizeOf`, but with a different name so it cannot be used
/// accidentally with derive(MallocSizeOf). For use with types like `Rc` and
/// `Arc` when appropriate (e.g. when measuring a "primary" reference).
pub trait MallocUnconditionalSizeOf {
/// Measure the heap usage of all heap-allocated descendant structures, but
/// not the space taken up by the value itself.
fn unconditional_size_of(&self, ops: &mut MallocSizeOfOps) -> usize;
}
/// `MallocUnconditionalSizeOf` combined with `MallocShallowSizeOf`.
pub trait MallocUnconditionalShallowSizeOf {
/// `unconditional_size_of` combined with `shallow_size_of`.
fn unconditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize;
}
/// Like `MallocSizeOf`, but only measures if the value hasn't already been
/// measured. For use with types like `Rc` and `Arc` when appropriate (e.g.
/// when there is no "primary" reference).
pub trait MallocConditionalSizeOf {
/// Measure the heap usage of all heap-allocated descendant structures, but
/// not the space taken up by the value itself, and only if that heap usage
/// hasn't already been measured.
fn conditional_size_of(&self, ops: &mut MallocSizeOfOps) -> usize;
}
/// `MallocConditionalSizeOf` combined with `MallocShallowSizeOf`.
pub trait MallocConditionalShallowSizeOf {
/// `conditional_size_of` combined with `shallow_size_of`.
fn conditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize;
}
#[cfg(not(feature = "estimate-heapsize"))]
impl MallocSizeOf for String {
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
unsafe { ops.malloc_size_of(self.as_ptr()) }
}
}
impl<'a, T: ?Sized> MallocSizeOf for &'a T {
fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize {
// Zero makes sense for a non-owning reference.
0
}
}
#[cfg(not(feature = "estimate-heapsize"))]
impl<T: ?Sized> MallocShallowSizeOf for Box<T> {
fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
unsafe { ops.malloc_size_of(&**self) }
}
}
impl<T: MallocSizeOf + ?Sized> MallocSizeOf for Box<T> {
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
self.shallow_size_of(ops) + (**self).size_of(ops)
}
}
impl MallocSizeOf for () {
fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize {
0
}
}
impl<T1, T2> MallocSizeOf for (T1, T2)
where
T1: MallocSizeOf,
T2: MallocSizeOf,
{
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
self.0.size_of(ops) + self.1.size_of(ops)
}
}
impl<T1, T2, T3> MallocSizeOf for (T1, T2, T3)
where
T1: MallocSizeOf,
T2: MallocSizeOf,
T3: MallocSizeOf,
{
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
self.0.size_of(ops) + self.1.size_of(ops) + self.2.size_of(ops)
}
}
impl<T1, T2, T3, T4> MallocSizeOf for (T1, T2, T3, T4)
where
T1: MallocSizeOf,
T2: MallocSizeOf,
T3: MallocSizeOf,
T4: MallocSizeOf,
{
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
self.0.size_of(ops) + self.1.size_of(ops) + self.2.size_of(ops) + self.3.size_of(ops)
}
}
impl<T: MallocSizeOf> MallocSizeOf for Option<T> {
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
if let Some(val) = self.as_ref() {
val.size_of(ops)
} else {
0
}
}
}
impl<T: MallocSizeOf, E: MallocSizeOf> MallocSizeOf for Result<T, E> {
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
match *self {
Ok(ref x) => x.size_of(ops),
Err(ref e) => e.size_of(ops),
}
}
}
impl<T: MallocSizeOf + Copy> MallocSizeOf for std::cell::Cell<T> {
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
self.get().size_of(ops)
}
}
impl<T: MallocSizeOf> MallocSizeOf for std::cell::RefCell<T> {
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
self.borrow().size_of(ops)
}
}
#[cfg(feature = "std")]
impl<'a, B: ?Sized + ToOwned> MallocSizeOf for std::borrow::Cow<'a, B>
where
B::Owned: MallocSizeOf,
{
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
match *self {
std::borrow::Cow::Borrowed(_) => 0,
std::borrow::Cow::Owned(ref b) => b.size_of(ops),
}
}
}
impl<T: MallocSizeOf> MallocSizeOf for [T] {
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
let mut n = 0;
for elem in self.iter() {
n += elem.size_of(ops);
}
n
}
}
#[cfg(not(feature = "estimate-heapsize"))]
impl<T> MallocShallowSizeOf for Vec<T> {
fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
unsafe { ops.malloc_size_of(self.as_ptr()) }
}
}
impl<T: MallocSizeOf> MallocSizeOf for Vec<T> {
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
let mut n = self.shallow_size_of(ops);
for elem in self.iter() {
n += elem.size_of(ops);
}
n
}
}
impl<T> MallocShallowSizeOf for std::collections::VecDeque<T> {
fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
if ops.has_malloc_enclosing_size_of() {
if let Some(front) = self.front() {
// The front element is an interior pointer.
unsafe { ops.malloc_enclosing_size_of(&*front) }
} else {
// This assumes that no memory is allocated when the VecDeque is empty.
0
}
} else {
// An estimate.
self.capacity() * size_of::<T>()
}
}
}
impl<T: MallocSizeOf> MallocSizeOf for std::collections::VecDeque<T> {
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
let mut n = self.shallow_size_of(ops);
for elem in self.iter() {
n += elem.size_of(ops);
}
n
}
}
#[cfg(feature = "std")]
impl<T, S> MallocShallowSizeOf for std::collections::HashSet<T, S>
where
T: Eq + Hash,
S: BuildHasher,
{
fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
if ops.has_malloc_enclosing_size_of() {
// The first value from the iterator gives us an interior pointer.
// `ops.malloc_enclosing_size_of()` then gives us the storage size.
// This assumes that the `HashSet`'s contents (values and hashes)
// are all stored in a single contiguous heap allocation.
self.iter()
.next()
.map_or(0, |t| unsafe { ops.malloc_enclosing_size_of(t) })
} else {
// An estimate.
self.capacity() * (size_of::<T>() + size_of::<usize>())
}
}
}
#[cfg(feature = "std")]
impl<T, S> MallocSizeOf for std::collections::HashSet<T, S>
where
T: Eq + Hash + MallocSizeOf,
S: BuildHasher,
{
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
let mut n = self.shallow_size_of(ops);
for t in self.iter() {
n += t.size_of(ops);
}
n
}
}
#[cfg(feature = "std")]
impl<K, V, S> MallocShallowSizeOf for std::collections::HashMap<K, V, S>
where
K: Eq + Hash,
S: BuildHasher,
{
fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
// See the implementation for std::collections::HashSet for details.
if ops.has_malloc_enclosing_size_of() {
self.values()
.next()
.map_or(0, |v| unsafe { ops.malloc_enclosing_size_of(v) })
} else {
self.capacity() * (size_of::<V>() + size_of::<K>() + size_of::<usize>())
}
}
}
#[cfg(feature = "std")]
impl<K, V, S> MallocSizeOf for std::collections::HashMap<K, V, S>
where
K: Eq + Hash + MallocSizeOf,
V: MallocSizeOf,
S: BuildHasher,
{
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
let mut n = self.shallow_size_of(ops);
for (k, v) in self.iter() {
n += k.size_of(ops);
n += v.size_of(ops);
}
n
}
}
impl<K, V> MallocShallowSizeOf for std::collections::BTreeMap<K, V>
where
K: Eq + Hash,
{
fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
if ops.has_malloc_enclosing_size_of() {
self.values()
.next()
.map_or(0, |v| unsafe { ops.malloc_enclosing_size_of(v) })
} else {
self.len() * (size_of::<V>() + size_of::<K>() + size_of::<usize>())
}
}
}
impl<K, V> MallocSizeOf for std::collections::BTreeMap<K, V>
where
K: Eq + Hash + MallocSizeOf,
V: MallocSizeOf,
{
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
let mut n = self.shallow_size_of(ops);
for (k, v) in self.iter() {
n += k.size_of(ops);
n += v.size_of(ops);
}
n
}
}
// PhantomData is always 0.
impl<T> MallocSizeOf for std::marker::PhantomData<T> {
fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize {
0
}
}
// XXX: we don't want MallocSizeOf to be defined for Rc and Arc. If negative
// trait bounds are ever allowed, this code should be uncommented.
// (We do have a compile-fail test for this:
// rc_arc_must_not_derive_malloc_size_of.rs)
//impl<T> !MallocSizeOf for Arc<T> { }
//impl<T> !MallocShallowSizeOf for Arc<T> { }
#[cfg(feature = "std")]
fn arc_ptr<T>(s: &Arc<T>) -> * const T {
&(**s) as *const T
}
// currently this seems only fine with jemalloc
#[cfg(feature = "std")]
#[cfg(not(feature = "estimate-heapsize"))]
#[cfg(any(prefixed_jemalloc, target_os = "macos", target_os = "ios", target_os = "android", feature = "jemalloc-global"))]
impl<T> MallocUnconditionalShallowSizeOf for Arc<T> {
fn unconditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
unsafe { ops.malloc_size_of(arc_ptr(self)) }
}
}
#[cfg(feature = "std")]
#[cfg(not(feature = "estimate-heapsize"))]
#[cfg(not(any(prefixed_jemalloc, target_os = "macos", target_os = "ios", target_os = "android", feature = "jemalloc-global")))]
impl<T> MallocUnconditionalShallowSizeOf for Arc<T> {
fn unconditional_shallow_size_of(&self, _ops: &mut MallocSizeOfOps) -> usize {
size_of::<T>()
}
}
#[cfg(feature = "std")]
impl<T: MallocSizeOf> MallocUnconditionalSizeOf for Arc<T> {
fn unconditional_size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
self.unconditional_shallow_size_of(ops) + (**self).size_of(ops)
}
}
#[cfg(feature = "std")]
impl<T> MallocConditionalShallowSizeOf for Arc<T> {
fn conditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
if ops.have_seen_ptr(arc_ptr(self)) {
0
} else {
self.unconditional_shallow_size_of(ops)
}
}
}
#[cfg(feature = "std")]
impl<T: MallocSizeOf> MallocConditionalSizeOf for Arc<T> {
fn conditional_size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
if ops.have_seen_ptr(arc_ptr(self)) {
0
} else {
self.unconditional_size_of(ops)
}
}
}
/// If a mutex is stored directly as a member of a data type that is being measured,
/// it is the unique owner of its contents and deserves to be measured.
///
/// If a mutex is stored inside of an Arc value as a member of a data type that is being measured,
/// the Arc will not be automatically measured so there is no risk of overcounting the mutex's
/// contents.
#[cfg(feature = "std")]
impl<T: MallocSizeOf> MallocSizeOf for std::sync::Mutex<T> {
fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
(*self.lock().unwrap()).size_of(ops)
}
}
#[macro_export]
macro_rules! malloc_size_of_is_0(
($($ty:ty),+) => (
$(
impl $crate::MallocSizeOf for $ty {
#[inline(always)]
fn size_of(&self, _: &mut $crate::MallocSizeOfOps) -> usize {
0
}
}
)+
);
($($ty:ident<$($gen:ident),+>),+) => (
$(
impl<$($gen: $crate::MallocSizeOf),+> $crate::MallocSizeOf for $ty<$($gen),+> {
#[inline(always)]
fn size_of(&self, _: &mut $crate::MallocSizeOfOps) -> usize {
0
}
}
)+
);
);
malloc_size_of_is_0!(bool, char, str);
malloc_size_of_is_0!(u8, u16, u32, u64, u128, usize);
malloc_size_of_is_0!(i8, i16, i32, i64, i128, isize);
malloc_size_of_is_0!(f32, f64);
malloc_size_of_is_0!(std::sync::atomic::AtomicBool);
malloc_size_of_is_0!(std::sync::atomic::AtomicIsize);
malloc_size_of_is_0!(std::sync::atomic::AtomicUsize);
malloc_size_of_is_0!(Range<u8>, Range<u16>, Range<u32>, Range<u64>, Range<usize>);
malloc_size_of_is_0!(Range<i8>, Range<i16>, Range<i32>, Range<i64>, Range<isize>);
malloc_size_of_is_0!(Range<f32>, Range<f64>);
/// Measurable that defers to inner value and used to verify MallocSizeOf implementation in a
/// struct.
#[derive(Clone)]
pub struct Measurable<T: MallocSizeOf>(pub T);
impl<T: MallocSizeOf> Deref for Measurable<T> {
type Target = T;
fn deref(&self) -> &T {
&self.0
}
}
impl<T: MallocSizeOf> DerefMut for Measurable<T> {
fn deref_mut(&mut self) -> &mut T {
&mut self.0
}
}

View File

@ -0,0 +1,51 @@
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Estimation for heapsize calculation. Usable to replace call to allocator method (for some
//! allocators or simply because we just need a deterministic cunsumption measurement).
use crate::malloc_size::{
MallocSizeOf,
MallocShallowSizeOf,
MallocUnconditionalShallowSizeOf,
MallocSizeOfOps
};
use std::mem::{size_of, size_of_val};
impl<T: ?Sized> MallocShallowSizeOf for Box<T> {
fn shallow_size_of(&self, _ops: &mut MallocSizeOfOps) -> usize {
size_of_val(&**self)
}
}
impl MallocSizeOf for String {
fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize {
self.capacity() * size_of::<u8>()
}
}
impl<T> MallocShallowSizeOf for Vec<T> {
fn shallow_size_of(&self, _ops: &mut MallocSizeOfOps) -> usize {
self.capacity() * size_of::<T>()
}
}
impl<T> MallocUnconditionalShallowSizeOf for std::sync::Arc<T> {
fn unconditional_shallow_size_of(&self, _ops: &mut MallocSizeOfOps) -> usize {
size_of::<T>()
}
}