Add tools

This commit is contained in:
2024-12-16 20:57:17 -08:00
parent fbb975674f
commit 3207a2c7ee
3678 changed files with 2074383 additions and 0 deletions

View File

@ -0,0 +1,5 @@
rust-std-x86_64-unknown-linux-gnu
rust-std-x86_64-unknown-redox
rustc
cargo
rust-src

View File

@ -0,0 +1,15 @@
# Add this folder to the python sys path; GDB Python-interpreter will now find modules in this path
import sys
from os import path
self_dir = path.dirname(path.realpath(__file__))
sys.path.append(self_dir)
# ruff: noqa: E402
import gdb
import gdb_lookup
# current_objfile can be none; even with `gdb foo-app`; sourcing this file after gdb init now works
try:
gdb_lookup.register_printers(gdb.current_objfile())
except Exception:
gdb_lookup.register_printers(gdb.selected_inferior().progspace)

View File

@ -0,0 +1,118 @@
import gdb
import gdb.printing
import re
from gdb_providers import *
from rust_types import *
_gdb_version_matched = re.search('([0-9]+)\\.([0-9]+)', gdb.VERSION)
gdb_version = [int(num) for num in _gdb_version_matched.groups()] if _gdb_version_matched else []
def register_printers(objfile):
objfile.pretty_printers.append(printer)
# BACKCOMPAT: rust 1.35
def is_hashbrown_hashmap(hash_map):
return len(hash_map.type.fields()) == 1
def classify_rust_type(type):
type_class = type.code
if type_class == gdb.TYPE_CODE_STRUCT:
return classify_struct(type.tag, type.fields())
if type_class == gdb.TYPE_CODE_UNION:
return classify_union(type.fields())
return RustType.OTHER
def check_enum_discriminant(valobj):
content = valobj[valobj.type.fields()[0]]
fields = content.type.fields()
if len(fields) > 1:
discriminant = int(content[fields[0]]) + 1
if discriminant > len(fields):
# invalid discriminant
return False
return True
# Helper for enum printing that checks the discriminant. Only used in
# older gdb.
def enum_provider(valobj):
if check_enum_discriminant(valobj):
return EnumProvider(valobj)
return None
# Helper to handle both old and new hash maps.
def hashmap_provider(valobj):
if is_hashbrown_hashmap(valobj):
return StdHashMapProvider(valobj)
else:
return StdOldHashMapProvider(valobj)
# Helper to handle both old and new hash sets.
def hashset_provider(valobj):
hash_map = valobj[valobj.type.fields()[0]]
if is_hashbrown_hashmap(hash_map):
return StdHashMapProvider(valobj, show_values=False)
else:
return StdOldHashMapProvider(hash_map, show_values=False)
class PrintByRustType(gdb.printing.SubPrettyPrinter):
def __init__(self, name, provider):
super(PrintByRustType, self).__init__(name)
self.provider = provider
def __call__(self, val):
if self.enabled:
return self.provider(val)
return None
class RustPrettyPrinter(gdb.printing.PrettyPrinter):
def __init__(self, name):
super(RustPrettyPrinter, self).__init__(name, [])
self.type_map = {}
def add(self, rust_type, provider):
# Just use the rust_type as the name.
printer = PrintByRustType(rust_type, provider)
self.type_map[rust_type] = printer
self.subprinters.append(printer)
def __call__(self, valobj):
rust_type = classify_rust_type(valobj.type)
if rust_type in self.type_map:
return self.type_map[rust_type](valobj)
return None
printer = RustPrettyPrinter("rust")
# use enum provider only for GDB <7.12
if gdb_version[0] < 7 or (gdb_version[0] == 7 and gdb_version[1] < 12):
printer.add(RustType.ENUM, enum_provider)
printer.add(RustType.STD_STRING, StdStringProvider)
printer.add(RustType.STD_OS_STRING, StdOsStringProvider)
printer.add(RustType.STD_STR, StdStrProvider)
printer.add(RustType.STD_SLICE, StdSliceProvider)
printer.add(RustType.STD_VEC, StdVecProvider)
printer.add(RustType.STD_VEC_DEQUE, StdVecDequeProvider)
printer.add(RustType.STD_BTREE_SET, StdBTreeSetProvider)
printer.add(RustType.STD_BTREE_MAP, StdBTreeMapProvider)
printer.add(RustType.STD_HASH_MAP, hashmap_provider)
printer.add(RustType.STD_HASH_SET, hashset_provider)
printer.add(RustType.STD_RC, StdRcProvider)
printer.add(RustType.STD_ARC, lambda valobj: StdRcProvider(valobj, is_atomic=True))
printer.add(RustType.STD_CELL, StdCellProvider)
printer.add(RustType.STD_REF, StdRefProvider)
printer.add(RustType.STD_REF_MUT, StdRefProvider)
printer.add(RustType.STD_REF_CELL, StdRefCellProvider)
printer.add(RustType.STD_NONZERO_NUMBER, StdNonZeroNumberProvider)

View File

@ -0,0 +1,458 @@
from sys import version_info
import gdb
if version_info[0] >= 3:
xrange = range
ZERO_FIELD = "__0"
FIRST_FIELD = "__1"
def unwrap_unique_or_non_null(unique_or_nonnull):
# BACKCOMPAT: rust 1.32
# https://github.com/rust-lang/rust/commit/7a0911528058e87d22ea305695f4047572c5e067
# BACKCOMPAT: rust 1.60
# https://github.com/rust-lang/rust/commit/2a91eeac1a2d27dd3de1bf55515d765da20fd86f
ptr = unique_or_nonnull["pointer"]
return ptr if ptr.type.code == gdb.TYPE_CODE_PTR else ptr[ptr.type.fields()[0]]
# GDB 14 has a tag class that indicates that extension methods are ok
# to call. Use of this tag only requires that printers hide local
# attributes and methods by prefixing them with "_".
if hasattr(gdb, 'ValuePrinter'):
printer_base = gdb.ValuePrinter
else:
printer_base = object
class EnumProvider(printer_base):
def __init__(self, valobj):
content = valobj[valobj.type.fields()[0]]
fields = content.type.fields()
self._empty = len(fields) == 0
if not self._empty:
if len(fields) == 1:
discriminant = 0
else:
discriminant = int(content[fields[0]]) + 1
self._active_variant = content[fields[discriminant]]
self._name = fields[discriminant].name
self._full_name = "{}::{}".format(valobj.type.name, self._name)
else:
self._full_name = valobj.type.name
def to_string(self):
return self._full_name
def children(self):
if not self._empty:
yield self._name, self._active_variant
class StdStringProvider(printer_base):
def __init__(self, valobj):
self._valobj = valobj
vec = valobj["vec"]
self._length = int(vec["len"])
self._data_ptr = unwrap_unique_or_non_null(vec["buf"]["ptr"])
def to_string(self):
return self._data_ptr.lazy_string(encoding="utf-8", length=self._length)
@staticmethod
def display_hint():
return "string"
class StdOsStringProvider(printer_base):
def __init__(self, valobj):
self._valobj = valobj
buf = self._valobj["inner"]["inner"]
is_windows = "Wtf8Buf" in buf.type.name
vec = buf[ZERO_FIELD] if is_windows else buf
self._length = int(vec["len"])
self._data_ptr = unwrap_unique_or_non_null(vec["buf"]["ptr"])
def to_string(self):
return self._data_ptr.lazy_string(encoding="utf-8", length=self._length)
def display_hint(self):
return "string"
class StdStrProvider(printer_base):
def __init__(self, valobj):
self._valobj = valobj
self._length = int(valobj["length"])
self._data_ptr = valobj["data_ptr"]
def to_string(self):
return self._data_ptr.lazy_string(encoding="utf-8", length=self._length)
@staticmethod
def display_hint():
return "string"
def _enumerate_array_elements(element_ptrs):
for (i, element_ptr) in enumerate(element_ptrs):
key = "[{}]".format(i)
element = element_ptr.dereference()
try:
# rust-lang/rust#64343: passing deref expr to `str` allows
# catching exception on garbage pointer
str(element)
except RuntimeError:
yield key, "inaccessible"
break
yield key, element
class StdSliceProvider(printer_base):
def __init__(self, valobj):
self._valobj = valobj
self._length = int(valobj["length"])
self._data_ptr = valobj["data_ptr"]
def to_string(self):
return "{}(size={})".format(self._valobj.type, self._length)
def children(self):
return _enumerate_array_elements(
self._data_ptr + index for index in xrange(self._length)
)
@staticmethod
def display_hint():
return "array"
class StdVecProvider(printer_base):
def __init__(self, valobj):
self._valobj = valobj
self._length = int(valobj["len"])
self._data_ptr = unwrap_unique_or_non_null(valobj["buf"]["ptr"])
def to_string(self):
return "Vec(size={})".format(self._length)
def children(self):
return _enumerate_array_elements(
self._data_ptr + index for index in xrange(self._length)
)
@staticmethod
def display_hint():
return "array"
class StdVecDequeProvider(printer_base):
def __init__(self, valobj):
self._valobj = valobj
self._head = int(valobj["head"])
self._size = int(valobj["len"])
# BACKCOMPAT: rust 1.75
cap = valobj["buf"]["cap"]
if cap.type.code != gdb.TYPE_CODE_INT:
cap = cap[ZERO_FIELD]
self._cap = int(cap)
self._data_ptr = unwrap_unique_or_non_null(valobj["buf"]["ptr"])
def to_string(self):
return "VecDeque(size={})".format(self._size)
def children(self):
return _enumerate_array_elements(
(self._data_ptr + ((self._head + index) % self._cap)) for index in xrange(self._size)
)
@staticmethod
def display_hint():
return "array"
class StdRcProvider(printer_base):
def __init__(self, valobj, is_atomic=False):
self._valobj = valobj
self._is_atomic = is_atomic
self._ptr = unwrap_unique_or_non_null(valobj["ptr"])
self._value = self._ptr["data" if is_atomic else "value"]
self._strong = self._ptr["strong"]["v" if is_atomic else "value"]["value"]
self._weak = self._ptr["weak"]["v" if is_atomic else "value"]["value"] - 1
def to_string(self):
if self._is_atomic:
return "Arc(strong={}, weak={})".format(int(self._strong), int(self._weak))
else:
return "Rc(strong={}, weak={})".format(int(self._strong), int(self._weak))
def children(self):
yield "value", self._value
yield "strong", self._strong
yield "weak", self._weak
class StdCellProvider(printer_base):
def __init__(self, valobj):
self._value = valobj["value"]["value"]
def to_string(self):
return "Cell"
def children(self):
yield "value", self._value
class StdRefProvider(printer_base):
def __init__(self, valobj):
self._value = valobj["value"].dereference()
self._borrow = valobj["borrow"]["borrow"]["value"]["value"]
def to_string(self):
borrow = int(self._borrow)
if borrow >= 0:
return "Ref(borrow={})".format(borrow)
else:
return "Ref(borrow_mut={})".format(-borrow)
def children(self):
yield "*value", self._value
yield "borrow", self._borrow
class StdRefCellProvider(printer_base):
def __init__(self, valobj):
self._value = valobj["value"]["value"]
self._borrow = valobj["borrow"]["value"]["value"]
def to_string(self):
borrow = int(self._borrow)
if borrow >= 0:
return "RefCell(borrow={})".format(borrow)
else:
return "RefCell(borrow_mut={})".format(-borrow)
def children(self):
yield "value", self._value
yield "borrow", self._borrow
class StdNonZeroNumberProvider(printer_base):
def __init__(self, valobj):
fields = valobj.type.fields()
assert len(fields) == 1
field = list(fields)[0]
inner_valobj = valobj[field.name]
inner_fields = inner_valobj.type.fields()
assert len(inner_fields) == 1
inner_field = list(inner_fields)[0]
self._value = str(inner_valobj[inner_field.name])
def to_string(self):
return self._value
# Yields children (in a provider's sense of the word) for a BTreeMap.
def children_of_btree_map(map):
# Yields each key/value pair in the node and in any child nodes.
def children_of_node(node_ptr, height):
def cast_to_internal(node):
internal_type_name = node.type.target().name.replace("LeafNode", "InternalNode", 1)
internal_type = gdb.lookup_type(internal_type_name)
return node.cast(internal_type.pointer())
if node_ptr.type.name.startswith("alloc::collections::btree::node::BoxedNode<"):
# BACKCOMPAT: rust 1.49
node_ptr = node_ptr["ptr"]
node_ptr = unwrap_unique_or_non_null(node_ptr)
leaf = node_ptr.dereference()
keys = leaf["keys"]
vals = leaf["vals"]
edges = cast_to_internal(node_ptr)["edges"] if height > 0 else None
length = leaf["len"]
for i in xrange(0, length + 1):
if height > 0:
child_ptr = edges[i]["value"]["value"]
for child in children_of_node(child_ptr, height - 1):
yield child
if i < length:
# Avoid "Cannot perform pointer math on incomplete type" on zero-sized arrays.
key_type_size = keys.type.sizeof
val_type_size = vals.type.sizeof
key = keys[i]["value"]["value"] if key_type_size > 0 else gdb.parse_and_eval("()")
val = vals[i]["value"]["value"] if val_type_size > 0 else gdb.parse_and_eval("()")
yield key, val
if map["length"] > 0:
root = map["root"]
if root.type.name.startswith("core::option::Option<"):
root = root.cast(gdb.lookup_type(root.type.name[21:-1]))
node_ptr = root["node"]
height = root["height"]
for child in children_of_node(node_ptr, height):
yield child
class StdBTreeSetProvider(printer_base):
def __init__(self, valobj):
self._valobj = valobj
def to_string(self):
return "BTreeSet(size={})".format(self._valobj["map"]["length"])
def children(self):
inner_map = self._valobj["map"]
for i, (child, _) in enumerate(children_of_btree_map(inner_map)):
yield "[{}]".format(i), child
@staticmethod
def display_hint():
return "array"
class StdBTreeMapProvider(printer_base):
def __init__(self, valobj):
self._valobj = valobj
def to_string(self):
return "BTreeMap(size={})".format(self._valobj["length"])
def children(self):
for i, (key, val) in enumerate(children_of_btree_map(self._valobj)):
yield "key{}".format(i), key
yield "val{}".format(i), val
@staticmethod
def display_hint():
return "map"
# BACKCOMPAT: rust 1.35
class StdOldHashMapProvider(printer_base):
def __init__(self, valobj, show_values=True):
self._valobj = valobj
self._show_values = show_values
self._table = self._valobj["table"]
self._size = int(self._table["size"])
self._hashes = self._table["hashes"]
self._hash_uint_type = self._hashes.type
self._hash_uint_size = self._hashes.type.sizeof
self._modulo = 2 ** self._hash_uint_size
self._data_ptr = self._hashes[ZERO_FIELD]["pointer"]
self._capacity_mask = int(self._table["capacity_mask"])
self._capacity = (self._capacity_mask + 1) % self._modulo
marker = self._table["marker"].type
self._pair_type = marker.template_argument(0)
self._pair_type_size = self._pair_type.sizeof
self._valid_indices = []
for idx in range(self._capacity):
data_ptr = self._data_ptr.cast(self._hash_uint_type.pointer())
address = data_ptr + idx
hash_uint = address.dereference()
hash_ptr = hash_uint[ZERO_FIELD]["pointer"]
if int(hash_ptr) != 0:
self._valid_indices.append(idx)
def to_string(self):
if self._show_values:
return "HashMap(size={})".format(self._size)
else:
return "HashSet(size={})".format(self._size)
def children(self):
start = int(self._data_ptr) & ~1
hashes = self._hash_uint_size * self._capacity
align = self._pair_type_size
len_rounded_up = (((((hashes + align) % self._modulo - 1) % self._modulo) & ~(
(align - 1) % self._modulo)) % self._modulo - hashes) % self._modulo
pairs_offset = hashes + len_rounded_up
pairs_start = gdb.Value(start + pairs_offset).cast(self._pair_type.pointer())
for index in range(self._size):
table_index = self._valid_indices[index]
idx = table_index & self._capacity_mask
element = (pairs_start + idx).dereference()
if self._show_values:
yield "key{}".format(index), element[ZERO_FIELD]
yield "val{}".format(index), element[FIRST_FIELD]
else:
yield "[{}]".format(index), element[ZERO_FIELD]
def display_hint(self):
return "map" if self._show_values else "array"
class StdHashMapProvider(printer_base):
def __init__(self, valobj, show_values=True):
self._valobj = valobj
self._show_values = show_values
table = self._table()
table_inner = table["table"]
capacity = int(table_inner["bucket_mask"]) + 1
ctrl = table_inner["ctrl"]["pointer"]
self._size = int(table_inner["items"])
self._pair_type = table.type.template_argument(0).strip_typedefs()
self._new_layout = not table_inner.type.has_key("data")
if self._new_layout:
self._data_ptr = ctrl.cast(self._pair_type.pointer())
else:
self._data_ptr = table_inner["data"]["pointer"]
self._valid_indices = []
for idx in range(capacity):
address = ctrl + idx
value = address.dereference()
is_presented = value & 128 == 0
if is_presented:
self._valid_indices.append(idx)
def _table(self):
if self._show_values:
hashbrown_hashmap = self._valobj["base"]
elif self._valobj.type.fields()[0].name == "map":
# BACKCOMPAT: rust 1.47
# HashSet wraps std::collections::HashMap, which wraps hashbrown::HashMap
hashbrown_hashmap = self._valobj["map"]["base"]
else:
# HashSet wraps hashbrown::HashSet, which wraps hashbrown::HashMap
hashbrown_hashmap = self._valobj["base"]["map"]
return hashbrown_hashmap["table"]
def to_string(self):
if self._show_values:
return "HashMap(size={})".format(self._size)
else:
return "HashSet(size={})".format(self._size)
def children(self):
pairs_start = self._data_ptr
for index in range(self._size):
idx = self._valid_indices[index]
if self._new_layout:
idx = -(idx + 1)
element = (pairs_start + idx).dereference()
if self._show_values:
yield "key{}".format(index), element[ZERO_FIELD]
yield "val{}".format(index), element[FIRST_FIELD]
else:
yield "[{}]".format(index), element[ZERO_FIELD]
def display_hint(self):
return "map" if self._show_values else "array"

View File

@ -0,0 +1,22 @@
type synthetic add -l lldb_lookup.synthetic_lookup -x ".*" --category Rust
type summary add -F lldb_lookup.summary_lookup -e -x -h "^(alloc::([a-z_]+::)+)String$" --category Rust
type summary add -F lldb_lookup.summary_lookup -e -x -h "^&(mut )?str$" --category Rust
type summary add -F lldb_lookup.summary_lookup -e -x -h "^&(mut )?\\[.+\\]$" --category Rust
type summary add -F lldb_lookup.summary_lookup -e -x -h "^(std::ffi::([a-z_]+::)+)OsString$" --category Rust
type summary add -F lldb_lookup.summary_lookup -e -x -h "^(alloc::([a-z_]+::)+)Vec<.+>$" --category Rust
type summary add -F lldb_lookup.summary_lookup -e -x -h "^(alloc::([a-z_]+::)+)VecDeque<.+>$" --category Rust
type summary add -F lldb_lookup.summary_lookup -e -x -h "^(alloc::([a-z_]+::)+)BTreeSet<.+>$" --category Rust
type summary add -F lldb_lookup.summary_lookup -e -x -h "^(alloc::([a-z_]+::)+)BTreeMap<.+>$" --category Rust
type summary add -F lldb_lookup.summary_lookup -e -x -h "^(std::collections::([a-z_]+::)+)HashMap<.+>$" --category Rust
type summary add -F lldb_lookup.summary_lookup -e -x -h "^(std::collections::([a-z_]+::)+)HashSet<.+>$" --category Rust
type summary add -F lldb_lookup.summary_lookup -e -x -h "^(alloc::([a-z_]+::)+)Rc<.+>$" --category Rust
type summary add -F lldb_lookup.summary_lookup -e -x -h "^(alloc::([a-z_]+::)+)Arc<.+>$" --category Rust
type summary add -F lldb_lookup.summary_lookup -e -x -h "^(core::([a-z_]+::)+)Cell<.+>$" --category Rust
type summary add -F lldb_lookup.summary_lookup -e -x -h "^(core::([a-z_]+::)+)Ref<.+>$" --category Rust
type summary add -F lldb_lookup.summary_lookup -e -x -h "^(core::([a-z_]+::)+)RefMut<.+>$" --category Rust
type summary add -F lldb_lookup.summary_lookup -e -x -h "^(core::([a-z_]+::)+)RefCell<.+>$" --category Rust
type summary add -F lldb_lookup.summary_lookup -e -x -h "^(core::([a-z_]+::)+)NonZero<.+>$" --category Rust
type summary add -F lldb_lookup.summary_lookup -e -x -h "^core::num::([a-z_]+::)*NonZero.+$" --category Rust
type summary add -F lldb_lookup.summary_lookup -e -x -h "^(std::([a-z_]+::)+)PathBuf$" --category Rust
type summary add -F lldb_lookup.summary_lookup -e -x -h "^&(mut )?(std::([a-z_]+::)+)Path$" --category Rust
type category enable Rust

View File

@ -0,0 +1,124 @@
import lldb
from lldb_providers import *
from rust_types import RustType, classify_struct, classify_union
# BACKCOMPAT: rust 1.35
def is_hashbrown_hashmap(hash_map):
return len(hash_map.type.fields) == 1
def classify_rust_type(type):
type_class = type.GetTypeClass()
if type_class == lldb.eTypeClassStruct:
return classify_struct(type.name, type.fields)
if type_class == lldb.eTypeClassUnion:
return classify_union(type.fields)
return RustType.OTHER
def summary_lookup(valobj, dict):
# type: (SBValue, dict) -> str
"""Returns the summary provider for the given value"""
rust_type = classify_rust_type(valobj.GetType())
if rust_type == RustType.STD_STRING:
return StdStringSummaryProvider(valobj, dict)
if rust_type == RustType.STD_OS_STRING:
return StdOsStringSummaryProvider(valobj, dict)
if rust_type == RustType.STD_STR:
return StdStrSummaryProvider(valobj, dict)
if rust_type == RustType.STD_VEC:
return SizeSummaryProvider(valobj, dict)
if rust_type == RustType.STD_VEC_DEQUE:
return SizeSummaryProvider(valobj, dict)
if rust_type == RustType.STD_SLICE:
return SizeSummaryProvider(valobj, dict)
if rust_type == RustType.STD_HASH_MAP:
return SizeSummaryProvider(valobj, dict)
if rust_type == RustType.STD_HASH_SET:
return SizeSummaryProvider(valobj, dict)
if rust_type == RustType.STD_RC:
return StdRcSummaryProvider(valobj, dict)
if rust_type == RustType.STD_ARC:
return StdRcSummaryProvider(valobj, dict)
if rust_type == RustType.STD_REF:
return StdRefSummaryProvider(valobj, dict)
if rust_type == RustType.STD_REF_MUT:
return StdRefSummaryProvider(valobj, dict)
if rust_type == RustType.STD_REF_CELL:
return StdRefSummaryProvider(valobj, dict)
if rust_type == RustType.STD_NONZERO_NUMBER:
return StdNonZeroNumberSummaryProvider(valobj, dict)
if rust_type == RustType.STD_PATHBUF:
return StdPathBufSummaryProvider(valobj, dict)
if rust_type == RustType.STD_PATH:
return StdPathSummaryProvider(valobj, dict)
return ""
def synthetic_lookup(valobj, dict):
# type: (SBValue, dict) -> object
"""Returns the synthetic provider for the given value"""
rust_type = classify_rust_type(valobj.GetType())
if rust_type == RustType.STRUCT:
return StructSyntheticProvider(valobj, dict)
if rust_type == RustType.STRUCT_VARIANT:
return StructSyntheticProvider(valobj, dict, is_variant=True)
if rust_type == RustType.TUPLE:
return TupleSyntheticProvider(valobj, dict)
if rust_type == RustType.TUPLE_VARIANT:
return TupleSyntheticProvider(valobj, dict, is_variant=True)
if rust_type == RustType.EMPTY:
return EmptySyntheticProvider(valobj, dict)
if rust_type == RustType.REGULAR_ENUM:
discriminant = valobj.GetChildAtIndex(0).GetChildAtIndex(0).GetValueAsUnsigned()
return synthetic_lookup(valobj.GetChildAtIndex(discriminant), dict)
if rust_type == RustType.SINGLETON_ENUM:
return synthetic_lookup(valobj.GetChildAtIndex(0), dict)
if rust_type == RustType.ENUM:
return ClangEncodedEnumProvider(valobj, dict)
if rust_type == RustType.STD_VEC:
return StdVecSyntheticProvider(valobj, dict)
if rust_type == RustType.STD_VEC_DEQUE:
return StdVecDequeSyntheticProvider(valobj, dict)
if rust_type == RustType.STD_SLICE:
return StdSliceSyntheticProvider(valobj, dict)
if rust_type == RustType.STD_HASH_MAP:
if is_hashbrown_hashmap(valobj):
return StdHashMapSyntheticProvider(valobj, dict)
else:
return StdOldHashMapSyntheticProvider(valobj, dict)
if rust_type == RustType.STD_HASH_SET:
hash_map = valobj.GetChildAtIndex(0)
if is_hashbrown_hashmap(hash_map):
return StdHashMapSyntheticProvider(valobj, dict, show_values=False)
else:
return StdOldHashMapSyntheticProvider(hash_map, dict, show_values=False)
if rust_type == RustType.STD_RC:
return StdRcSyntheticProvider(valobj, dict)
if rust_type == RustType.STD_ARC:
return StdRcSyntheticProvider(valobj, dict, is_atomic=True)
if rust_type == RustType.STD_CELL:
return StdCellSyntheticProvider(valobj, dict)
if rust_type == RustType.STD_REF:
return StdRefSyntheticProvider(valobj, dict)
if rust_type == RustType.STD_REF_MUT:
return StdRefSyntheticProvider(valobj, dict)
if rust_type == RustType.STD_REF_CELL:
return StdRefSyntheticProvider(valobj, dict, is_cell=True)
return DefaultSyntheticProvider(valobj, dict)

View File

@ -0,0 +1,835 @@
import sys
from lldb import SBData, SBError, eBasicTypeLong, eBasicTypeUnsignedLong, \
eBasicTypeUnsignedChar
# from lldb.formatters import Logger
####################################################################################################
# This file contains two kinds of pretty-printers: summary and synthetic.
#
# Important classes from LLDB module:
# SBValue: the value of a variable, a register, or an expression
# SBType: the data type; each SBValue has a corresponding SBType
#
# Summary provider is a function with the type `(SBValue, dict) -> str`.
# The first parameter is the object encapsulating the actual variable being displayed;
# The second parameter is an internal support parameter used by LLDB, and you should not touch it.
#
# Synthetic children is the way to provide a children-based representation of the object's value.
# Synthetic provider is a class that implements the following interface:
#
# class SyntheticChildrenProvider:
# def __init__(self, SBValue, dict)
# def num_children(self)
# def get_child_index(self, str)
# def get_child_at_index(self, int)
# def update(self)
# def has_children(self)
# def get_value(self)
#
#
# You can find more information and examples here:
# 1. https://lldb.llvm.org/varformats.html
# 2. https://lldb.llvm.org/use/python-reference.html
# 3. https://lldb.llvm.org/python_reference/lldb.formatters.cpp.libcxx-pysrc.html
# 4. https://github.com/llvm-mirror/lldb/tree/master/examples/summaries/cocoa
####################################################################################################
PY3 = sys.version_info[0] == 3
class ValueBuilder:
def __init__(self, valobj):
# type: (SBValue) -> ValueBuilder
self.valobj = valobj
process = valobj.GetProcess()
self.endianness = process.GetByteOrder()
self.pointer_size = process.GetAddressByteSize()
def from_int(self, name, value):
# type: (str, int) -> SBValue
type = self.valobj.GetType().GetBasicType(eBasicTypeLong)
data = SBData.CreateDataFromSInt64Array(self.endianness, self.pointer_size, [value])
return self.valobj.CreateValueFromData(name, data, type)
def from_uint(self, name, value):
# type: (str, int) -> SBValue
type = self.valobj.GetType().GetBasicType(eBasicTypeUnsignedLong)
data = SBData.CreateDataFromUInt64Array(self.endianness, self.pointer_size, [value])
return self.valobj.CreateValueFromData(name, data, type)
def unwrap_unique_or_non_null(unique_or_nonnull):
# BACKCOMPAT: rust 1.32
# https://github.com/rust-lang/rust/commit/7a0911528058e87d22ea305695f4047572c5e067
# BACKCOMPAT: rust 1.60
# https://github.com/rust-lang/rust/commit/2a91eeac1a2d27dd3de1bf55515d765da20fd86f
ptr = unique_or_nonnull.GetChildMemberWithName("pointer")
return ptr if ptr.TypeIsPointerType() else ptr.GetChildAtIndex(0)
class DefaultSyntheticProvider:
def __init__(self, valobj, dict):
# type: (SBValue, dict) -> DefaultSyntheticProvider
# logger = Logger.Logger()
# logger >> "Default synthetic provider for " + str(valobj.GetName())
self.valobj = valobj
def num_children(self):
# type: () -> int
return self.valobj.GetNumChildren()
def get_child_index(self, name):
# type: (str) -> int
return self.valobj.GetIndexOfChildWithName(name)
def get_child_at_index(self, index):
# type: (int) -> SBValue
return self.valobj.GetChildAtIndex(index)
def update(self):
# type: () -> None
pass
def has_children(self):
# type: () -> bool
return self.valobj.MightHaveChildren()
class EmptySyntheticProvider:
def __init__(self, valobj, dict):
# type: (SBValue, dict) -> EmptySyntheticProvider
# logger = Logger.Logger()
# logger >> "[EmptySyntheticProvider] for " + str(valobj.GetName())
self.valobj = valobj
def num_children(self):
# type: () -> int
return 0
def get_child_index(self, name):
# type: (str) -> int
return None
def get_child_at_index(self, index):
# type: (int) -> SBValue
return None
def update(self):
# type: () -> None
pass
def has_children(self):
# type: () -> bool
return False
def SizeSummaryProvider(valobj, dict):
# type: (SBValue, dict) -> str
return 'size=' + str(valobj.GetNumChildren())
def vec_to_string(vec):
length = vec.GetNumChildren()
chars = [vec.GetChildAtIndex(i).GetValueAsUnsigned() for i in range(length)]
return bytes(chars).decode(errors='replace') if PY3 else "".join(chr(char) for char in chars)
def StdStringSummaryProvider(valobj, dict):
# type: (SBValue, dict) -> str
# logger = Logger.Logger()
# logger >> "[StdStringSummaryProvider] for " + str(valobj.GetName())
vec = valobj.GetChildAtIndex(0)
return '"%s"' % vec_to_string(vec)
def StdOsStringSummaryProvider(valobj, dict):
# type: (SBValue, dict) -> str
# logger = Logger.Logger()
# logger >> "[StdOsStringSummaryProvider] for " + str(valobj.GetName())
buf = valobj.GetChildAtIndex(0).GetChildAtIndex(0)
is_windows = "Wtf8Buf" in buf.type.name
vec = buf.GetChildAtIndex(0) if is_windows else buf
return '"%s"' % vec_to_string(vec)
def StdStrSummaryProvider(valobj, dict):
# type: (SBValue, dict) -> str
# logger = Logger.Logger()
# logger >> "[StdStrSummaryProvider] for " + str(valobj.GetName())
length = valobj.GetChildMemberWithName("length").GetValueAsUnsigned()
if length == 0:
return '""'
data_ptr = valobj.GetChildMemberWithName("data_ptr")
start = data_ptr.GetValueAsUnsigned()
error = SBError()
process = data_ptr.GetProcess()
data = process.ReadMemory(start, length, error)
data = data.decode(encoding='UTF-8') if PY3 else data
return '"%s"' % data
def StdPathBufSummaryProvider(valobj, dict):
# type: (SBValue, dict) -> str
# logger = Logger.Logger()
# logger >> "[StdPathBufSummaryProvider] for " + str(valobj.GetName())
return StdOsStringSummaryProvider(valobj.GetChildMemberWithName("inner"), dict)
def StdPathSummaryProvider(valobj, dict):
# type: (SBValue, dict) -> str
# logger = Logger.Logger()
# logger >> "[StdPathSummaryProvider] for " + str(valobj.GetName())
length = valobj.GetChildMemberWithName("length").GetValueAsUnsigned()
if length == 0:
return '""'
data_ptr = valobj.GetChildMemberWithName("data_ptr")
start = data_ptr.GetValueAsUnsigned()
error = SBError()
process = data_ptr.GetProcess()
data = process.ReadMemory(start, length, error)
if PY3:
try:
data = data.decode(encoding='UTF-8')
except UnicodeDecodeError:
return '%r' % data
return '"%s"' % data
class StructSyntheticProvider:
"""Pretty-printer for structs and struct enum variants"""
def __init__(self, valobj, dict, is_variant=False):
# type: (SBValue, dict, bool) -> StructSyntheticProvider
# logger = Logger.Logger()
self.valobj = valobj
self.is_variant = is_variant
self.type = valobj.GetType()
self.fields = {}
if is_variant:
self.fields_count = self.type.GetNumberOfFields() - 1
real_fields = self.type.fields[1:]
else:
self.fields_count = self.type.GetNumberOfFields()
real_fields = self.type.fields
for number, field in enumerate(real_fields):
self.fields[field.name] = number
def num_children(self):
# type: () -> int
return self.fields_count
def get_child_index(self, name):
# type: (str) -> int
return self.fields.get(name, -1)
def get_child_at_index(self, index):
# type: (int) -> SBValue
if self.is_variant:
field = self.type.GetFieldAtIndex(index + 1)
else:
field = self.type.GetFieldAtIndex(index)
return self.valobj.GetChildMemberWithName(field.name)
def update(self):
# type: () -> None
pass
def has_children(self):
# type: () -> bool
return True
class ClangEncodedEnumProvider:
"""Pretty-printer for 'clang-encoded' enums support implemented in LLDB"""
DISCRIMINANT_MEMBER_NAME = "$discr$"
VALUE_MEMBER_NAME = "value"
def __init__(self, valobj, dict):
self.valobj = valobj
self.update()
def has_children(self):
return True
def num_children(self):
if self.is_default:
return 1
return 2
def get_child_index(self, name):
if name == ClangEncodedEnumProvider.VALUE_MEMBER_NAME:
return 0
if name == ClangEncodedEnumProvider.DISCRIMINANT_MEMBER_NAME:
return 1
return -1
def get_child_at_index(self, index):
if index == 0:
return self.variant.GetChildMemberWithName(ClangEncodedEnumProvider.VALUE_MEMBER_NAME)
if index == 1:
return self.variant.GetChildMemberWithName(
ClangEncodedEnumProvider.DISCRIMINANT_MEMBER_NAME)
def update(self):
all_variants = self.valobj.GetChildAtIndex(0)
index = self._getCurrentVariantIndex(all_variants)
self.variant = all_variants.GetChildAtIndex(index)
self.is_default = self.variant.GetIndexOfChildWithName(
ClangEncodedEnumProvider.DISCRIMINANT_MEMBER_NAME) == -1
def _getCurrentVariantIndex(self, all_variants):
default_index = 0
for i in range(all_variants.GetNumChildren()):
variant = all_variants.GetChildAtIndex(i)
discr = variant.GetChildMemberWithName(
ClangEncodedEnumProvider.DISCRIMINANT_MEMBER_NAME)
if discr.IsValid():
discr_unsigned_value = discr.GetValueAsUnsigned()
if variant.GetName() == f"$variant${discr_unsigned_value}":
return i
else:
default_index = i
return default_index
class TupleSyntheticProvider:
"""Pretty-printer for tuples and tuple enum variants"""
def __init__(self, valobj, dict, is_variant=False):
# type: (SBValue, dict, bool) -> TupleSyntheticProvider
# logger = Logger.Logger()
self.valobj = valobj
self.is_variant = is_variant
self.type = valobj.GetType()
if is_variant:
self.size = self.type.GetNumberOfFields() - 1
else:
self.size = self.type.GetNumberOfFields()
def num_children(self):
# type: () -> int
return self.size
def get_child_index(self, name):
# type: (str) -> int
if name.isdigit():
return int(name)
else:
return -1
def get_child_at_index(self, index):
# type: (int) -> SBValue
if self.is_variant:
field = self.type.GetFieldAtIndex(index + 1)
else:
field = self.type.GetFieldAtIndex(index)
element = self.valobj.GetChildMemberWithName(field.name)
return self.valobj.CreateValueFromData(str(index), element.GetData(), element.GetType())
def update(self):
# type: () -> None
pass
def has_children(self):
# type: () -> bool
return True
class StdVecSyntheticProvider:
"""Pretty-printer for alloc::vec::Vec<T>
struct Vec<T> { buf: RawVec<T>, len: usize }
rust 1.75: struct RawVec<T> { ptr: Unique<T>, cap: usize, ... }
rust 1.76: struct RawVec<T> { ptr: Unique<T>, cap: Cap(usize), ... }
rust 1.31.1: struct Unique<T: ?Sized> { pointer: NonZero<*const T>, ... }
rust 1.33.0: struct Unique<T: ?Sized> { pointer: *const T, ... }
rust 1.62.0: struct Unique<T: ?Sized> { pointer: NonNull<T>, ... }
struct NonZero<T>(T)
struct NonNull<T> { pointer: *const T }
"""
def __init__(self, valobj, dict):
# type: (SBValue, dict) -> StdVecSyntheticProvider
# logger = Logger.Logger()
# logger >> "[StdVecSyntheticProvider] for " + str(valobj.GetName())
self.valobj = valobj
self.update()
def num_children(self):
# type: () -> int
return self.length
def get_child_index(self, name):
# type: (str) -> int
index = name.lstrip('[').rstrip(']')
if index.isdigit():
return int(index)
else:
return -1
def get_child_at_index(self, index):
# type: (int) -> SBValue
start = self.data_ptr.GetValueAsUnsigned()
address = start + index * self.element_type_size
element = self.data_ptr.CreateValueFromAddress("[%s]" % index, address, self.element_type)
return element
def update(self):
# type: () -> None
self.length = self.valobj.GetChildMemberWithName("len").GetValueAsUnsigned()
self.buf = self.valobj.GetChildMemberWithName("buf")
self.data_ptr = unwrap_unique_or_non_null(self.buf.GetChildMemberWithName("ptr"))
self.element_type = self.data_ptr.GetType().GetPointeeType()
self.element_type_size = self.element_type.GetByteSize()
def has_children(self):
# type: () -> bool
return True
class StdSliceSyntheticProvider:
def __init__(self, valobj, dict):
self.valobj = valobj
self.update()
def num_children(self):
# type: () -> int
return self.length
def get_child_index(self, name):
# type: (str) -> int
index = name.lstrip('[').rstrip(']')
if index.isdigit():
return int(index)
else:
return -1
def get_child_at_index(self, index):
# type: (int) -> SBValue
start = self.data_ptr.GetValueAsUnsigned()
address = start + index * self.element_type_size
element = self.data_ptr.CreateValueFromAddress("[%s]" % index, address, self.element_type)
return element
def update(self):
# type: () -> None
self.length = self.valobj.GetChildMemberWithName("length").GetValueAsUnsigned()
self.data_ptr = self.valobj.GetChildMemberWithName("data_ptr")
self.element_type = self.data_ptr.GetType().GetPointeeType()
self.element_type_size = self.element_type.GetByteSize()
def has_children(self):
# type: () -> bool
return True
class StdVecDequeSyntheticProvider:
"""Pretty-printer for alloc::collections::vec_deque::VecDeque<T>
struct VecDeque<T> { head: usize, len: usize, buf: RawVec<T> }
"""
def __init__(self, valobj, dict):
# type: (SBValue, dict) -> StdVecDequeSyntheticProvider
# logger = Logger.Logger()
# logger >> "[StdVecDequeSyntheticProvider] for " + str(valobj.GetName())
self.valobj = valobj
self.update()
def num_children(self):
# type: () -> int
return self.size
def get_child_index(self, name):
# type: (str) -> int
index = name.lstrip('[').rstrip(']')
if index.isdigit() and int(index) < self.size:
return int(index)
else:
return -1
def get_child_at_index(self, index):
# type: (int) -> SBValue
start = self.data_ptr.GetValueAsUnsigned()
address = start + ((index + self.head) % self.cap) * self.element_type_size
element = self.data_ptr.CreateValueFromAddress("[%s]" % index, address, self.element_type)
return element
def update(self):
# type: () -> None
self.head = self.valobj.GetChildMemberWithName("head").GetValueAsUnsigned()
self.size = self.valobj.GetChildMemberWithName("len").GetValueAsUnsigned()
self.buf = self.valobj.GetChildMemberWithName("buf")
cap = self.buf.GetChildMemberWithName("cap")
if cap.GetType().num_fields == 1:
cap = cap.GetChildAtIndex(0)
self.cap = cap.GetValueAsUnsigned()
self.data_ptr = unwrap_unique_or_non_null(self.buf.GetChildMemberWithName("ptr"))
self.element_type = self.data_ptr.GetType().GetPointeeType()
self.element_type_size = self.element_type.GetByteSize()
def has_children(self):
# type: () -> bool
return True
# BACKCOMPAT: rust 1.35
class StdOldHashMapSyntheticProvider:
"""Pretty-printer for std::collections::hash::map::HashMap<K, V, S>
struct HashMap<K, V, S> {..., table: RawTable<K, V>, ... }
struct RawTable<K, V> { capacity_mask: usize, size: usize, hashes: TaggedHashUintPtr, ... }
"""
def __init__(self, valobj, dict, show_values=True):
# type: (SBValue, dict, bool) -> StdOldHashMapSyntheticProvider
self.valobj = valobj
self.show_values = show_values
self.update()
def num_children(self):
# type: () -> int
return self.size
def get_child_index(self, name):
# type: (str) -> int
index = name.lstrip('[').rstrip(']')
if index.isdigit():
return int(index)
else:
return -1
def get_child_at_index(self, index):
# type: (int) -> SBValue
# logger = Logger.Logger()
start = self.data_ptr.GetValueAsUnsigned() & ~1
# See `libstd/collections/hash/table.rs:raw_bucket_at
hashes = self.hash_uint_size * self.capacity
align = self.pair_type_size
# See `libcore/alloc.rs:padding_needed_for`
len_rounded_up = (((((hashes + align) % self.modulo - 1) % self.modulo) & ~(
(align - 1) % self.modulo)) % self.modulo - hashes) % self.modulo
# len_rounded_up = ((hashes + align - 1) & ~(align - 1)) - hashes
pairs_offset = hashes + len_rounded_up
pairs_start = start + pairs_offset
table_index = self.valid_indices[index]
idx = table_index & self.capacity_mask
address = pairs_start + idx * self.pair_type_size
element = self.data_ptr.CreateValueFromAddress("[%s]" % index, address, self.pair_type)
if self.show_values:
return element
else:
key = element.GetChildAtIndex(0)
return self.valobj.CreateValueFromData("[%s]" % index, key.GetData(), key.GetType())
def update(self):
# type: () -> None
# logger = Logger.Logger()
self.table = self.valobj.GetChildMemberWithName("table") # type: SBValue
self.size = self.table.GetChildMemberWithName("size").GetValueAsUnsigned()
self.hashes = self.table.GetChildMemberWithName("hashes")
self.hash_uint_type = self.hashes.GetType()
self.hash_uint_size = self.hashes.GetType().GetByteSize()
self.modulo = 2 ** self.hash_uint_size
self.data_ptr = self.hashes.GetChildAtIndex(0).GetChildAtIndex(0)
self.capacity_mask = self.table.GetChildMemberWithName("capacity_mask").GetValueAsUnsigned()
self.capacity = (self.capacity_mask + 1) % self.modulo
marker = self.table.GetChildMemberWithName("marker").GetType() # type: SBType
self.pair_type = marker.template_args[0]
self.pair_type_size = self.pair_type.GetByteSize()
self.valid_indices = []
for idx in range(self.capacity):
address = self.data_ptr.GetValueAsUnsigned() + idx * self.hash_uint_size
hash_uint = self.data_ptr.CreateValueFromAddress("[%s]" % idx, address,
self.hash_uint_type)
hash_ptr = hash_uint.GetChildAtIndex(0).GetChildAtIndex(0)
if hash_ptr.GetValueAsUnsigned() != 0:
self.valid_indices.append(idx)
# logger >> "Valid indices: {}".format(str(self.valid_indices))
def has_children(self):
# type: () -> bool
return True
class StdHashMapSyntheticProvider:
"""Pretty-printer for hashbrown's HashMap"""
def __init__(self, valobj, dict, show_values=True):
# type: (SBValue, dict, bool) -> StdHashMapSyntheticProvider
self.valobj = valobj
self.show_values = show_values
self.update()
def num_children(self):
# type: () -> int
return self.size
def get_child_index(self, name):
# type: (str) -> int
index = name.lstrip('[').rstrip(']')
if index.isdigit():
return int(index)
else:
return -1
def get_child_at_index(self, index):
# type: (int) -> SBValue
pairs_start = self.data_ptr.GetValueAsUnsigned()
idx = self.valid_indices[index]
if self.new_layout:
idx = -(idx + 1)
address = pairs_start + idx * self.pair_type_size
element = self.data_ptr.CreateValueFromAddress("[%s]" % index, address, self.pair_type)
if self.show_values:
return element
else:
key = element.GetChildAtIndex(0)
return self.valobj.CreateValueFromData("[%s]" % index, key.GetData(), key.GetType())
def update(self):
# type: () -> None
table = self.table()
inner_table = table.GetChildMemberWithName("table")
capacity = inner_table.GetChildMemberWithName("bucket_mask").GetValueAsUnsigned() + 1
ctrl = inner_table.GetChildMemberWithName("ctrl").GetChildAtIndex(0)
self.size = inner_table.GetChildMemberWithName("items").GetValueAsUnsigned()
self.pair_type = table.type.template_args[0]
if self.pair_type.IsTypedefType():
self.pair_type = self.pair_type.GetTypedefedType()
self.pair_type_size = self.pair_type.GetByteSize()
self.new_layout = not inner_table.GetChildMemberWithName("data").IsValid()
if self.new_layout:
self.data_ptr = ctrl.Cast(self.pair_type.GetPointerType())
else:
self.data_ptr = inner_table.GetChildMemberWithName("data").GetChildAtIndex(0)
u8_type = self.valobj.GetTarget().GetBasicType(eBasicTypeUnsignedChar)
u8_type_size = self.valobj.GetTarget().GetBasicType(eBasicTypeUnsignedChar).GetByteSize()
self.valid_indices = []
for idx in range(capacity):
address = ctrl.GetValueAsUnsigned() + idx * u8_type_size
value = ctrl.CreateValueFromAddress("ctrl[%s]" % idx, address,
u8_type).GetValueAsUnsigned()
is_present = value & 128 == 0
if is_present:
self.valid_indices.append(idx)
def table(self):
# type: () -> SBValue
if self.show_values:
hashbrown_hashmap = self.valobj.GetChildMemberWithName("base")
else:
# BACKCOMPAT: rust 1.47
# HashSet wraps either std HashMap or hashbrown::HashSet, which both
# wrap hashbrown::HashMap, so either way we "unwrap" twice.
hashbrown_hashmap = self.valobj.GetChildAtIndex(0).GetChildAtIndex(0)
return hashbrown_hashmap.GetChildMemberWithName("table")
def has_children(self):
# type: () -> bool
return True
def StdRcSummaryProvider(valobj, dict):
# type: (SBValue, dict) -> str
strong = valobj.GetChildMemberWithName("strong").GetValueAsUnsigned()
weak = valobj.GetChildMemberWithName("weak").GetValueAsUnsigned()
return "strong={}, weak={}".format(strong, weak)
class StdRcSyntheticProvider:
"""Pretty-printer for alloc::rc::Rc<T> and alloc::sync::Arc<T>
struct Rc<T> { ptr: NonNull<RcBox<T>>, ... }
rust 1.31.1: struct NonNull<T> { pointer: NonZero<*const T> }
rust 1.33.0: struct NonNull<T> { pointer: *const T }
struct NonZero<T>(T)
struct RcBox<T> { strong: Cell<usize>, weak: Cell<usize>, value: T }
struct Cell<T> { value: UnsafeCell<T> }
struct UnsafeCell<T> { value: T }
struct Arc<T> { ptr: NonNull<ArcInner<T>>, ... }
struct ArcInner<T> { strong: atomic::AtomicUsize, weak: atomic::AtomicUsize, data: T }
struct AtomicUsize { v: UnsafeCell<usize> }
"""
def __init__(self, valobj, dict, is_atomic=False):
# type: (SBValue, dict, bool) -> StdRcSyntheticProvider
self.valobj = valobj
self.ptr = unwrap_unique_or_non_null(self.valobj.GetChildMemberWithName("ptr"))
self.value = self.ptr.GetChildMemberWithName("data" if is_atomic else "value")
self.strong = self.ptr.GetChildMemberWithName("strong").GetChildAtIndex(
0).GetChildMemberWithName("value")
self.weak = self.ptr.GetChildMemberWithName("weak").GetChildAtIndex(
0).GetChildMemberWithName("value")
self.value_builder = ValueBuilder(valobj)
self.update()
def num_children(self):
# type: () -> int
# Actually there are 3 children, but only the `value` should be shown as a child
return 1
def get_child_index(self, name):
# type: (str) -> int
if name == "value":
return 0
if name == "strong":
return 1
if name == "weak":
return 2
return -1
def get_child_at_index(self, index):
# type: (int) -> SBValue
if index == 0:
return self.value
if index == 1:
return self.value_builder.from_uint("strong", self.strong_count)
if index == 2:
return self.value_builder.from_uint("weak", self.weak_count)
return None
def update(self):
# type: () -> None
self.strong_count = self.strong.GetValueAsUnsigned()
self.weak_count = self.weak.GetValueAsUnsigned() - 1
def has_children(self):
# type: () -> bool
return True
class StdCellSyntheticProvider:
"""Pretty-printer for std::cell::Cell"""
def __init__(self, valobj, dict):
# type: (SBValue, dict) -> StdCellSyntheticProvider
self.valobj = valobj
self.value = valobj.GetChildMemberWithName("value").GetChildAtIndex(0)
def num_children(self):
# type: () -> int
return 1
def get_child_index(self, name):
# type: (str) -> int
if name == "value":
return 0
return -1
def get_child_at_index(self, index):
# type: (int) -> SBValue
if index == 0:
return self.value
return None
def update(self):
# type: () -> None
pass
def has_children(self):
# type: () -> bool
return True
def StdRefSummaryProvider(valobj, dict):
# type: (SBValue, dict) -> str
borrow = valobj.GetChildMemberWithName("borrow").GetValueAsSigned()
return "borrow={}".format(borrow) if borrow >= 0 else "borrow_mut={}".format(-borrow)
class StdRefSyntheticProvider:
"""Pretty-printer for std::cell::Ref, std::cell::RefMut, and std::cell::RefCell"""
def __init__(self, valobj, dict, is_cell=False):
# type: (SBValue, dict, bool) -> StdRefSyntheticProvider
self.valobj = valobj
borrow = valobj.GetChildMemberWithName("borrow")
value = valobj.GetChildMemberWithName("value")
if is_cell:
self.borrow = borrow.GetChildMemberWithName("value").GetChildMemberWithName("value")
self.value = value.GetChildMemberWithName("value")
else:
self.borrow = borrow.GetChildMemberWithName("borrow").GetChildMemberWithName(
"value").GetChildMemberWithName("value")
self.value = value.Dereference()
self.value_builder = ValueBuilder(valobj)
self.update()
def num_children(self):
# type: () -> int
# Actually there are 2 children, but only the `value` should be shown as a child
return 1
def get_child_index(self, name):
if name == "value":
return 0
if name == "borrow":
return 1
return -1
def get_child_at_index(self, index):
# type: (int) -> SBValue
if index == 0:
return self.value
if index == 1:
return self.value_builder.from_int("borrow", self.borrow_count)
return None
def update(self):
# type: () -> None
self.borrow_count = self.borrow.GetValueAsSigned()
def has_children(self):
# type: () -> bool
return True
def StdNonZeroNumberSummaryProvider(valobj, _dict):
# type: (SBValue, dict) -> str
inner = valobj.GetChildAtIndex(0)
inner_inner = inner.GetChildAtIndex(0)
# FIXME: Avoid printing as character literal,
# see https://github.com/llvm/llvm-project/issues/65076.
if inner_inner.GetTypeName() in ['char', 'unsigned char']:
return str(inner_inner.GetValueAsSigned())
else:
return inner_inner.GetValue()

View File

@ -0,0 +1,127 @@
import re
class RustType(object):
OTHER = "Other"
STRUCT = "Struct"
TUPLE = "Tuple"
CSTYLE_VARIANT = "CStyleVariant"
TUPLE_VARIANT = "TupleVariant"
STRUCT_VARIANT = "StructVariant"
ENUM = "Enum"
EMPTY = "Empty"
SINGLETON_ENUM = "SingletonEnum"
REGULAR_ENUM = "RegularEnum"
COMPRESSED_ENUM = "CompressedEnum"
REGULAR_UNION = "RegularUnion"
STD_STRING = "StdString"
STD_OS_STRING = "StdOsString"
STD_STR = "StdStr"
STD_SLICE = "StdSlice"
STD_VEC = "StdVec"
STD_VEC_DEQUE = "StdVecDeque"
STD_BTREE_SET = "StdBTreeSet"
STD_BTREE_MAP = "StdBTreeMap"
STD_HASH_MAP = "StdHashMap"
STD_HASH_SET = "StdHashSet"
STD_RC = "StdRc"
STD_ARC = "StdArc"
STD_CELL = "StdCell"
STD_REF = "StdRef"
STD_REF_MUT = "StdRefMut"
STD_REF_CELL = "StdRefCell"
STD_NONZERO_NUMBER = "StdNonZeroNumber"
STD_PATH = "StdPath"
STD_PATHBUF = "StdPathBuf"
STD_STRING_REGEX = re.compile(r"^(alloc::([a-z_]+::)+)String$")
STD_STR_REGEX = re.compile(r"^&(mut )?str$")
STD_SLICE_REGEX = re.compile(r"^&(mut )?\[.+\]$")
STD_OS_STRING_REGEX = re.compile(r"^(std::ffi::([a-z_]+::)+)OsString$")
STD_VEC_REGEX = re.compile(r"^(alloc::([a-z_]+::)+)Vec<.+>$")
STD_VEC_DEQUE_REGEX = re.compile(r"^(alloc::([a-z_]+::)+)VecDeque<.+>$")
STD_BTREE_SET_REGEX = re.compile(r"^(alloc::([a-z_]+::)+)BTreeSet<.+>$")
STD_BTREE_MAP_REGEX = re.compile(r"^(alloc::([a-z_]+::)+)BTreeMap<.+>$")
STD_HASH_MAP_REGEX = re.compile(r"^(std::collections::([a-z_]+::)+)HashMap<.+>$")
STD_HASH_SET_REGEX = re.compile(r"^(std::collections::([a-z_]+::)+)HashSet<.+>$")
STD_RC_REGEX = re.compile(r"^(alloc::([a-z_]+::)+)Rc<.+>$")
STD_ARC_REGEX = re.compile(r"^(alloc::([a-z_]+::)+)Arc<.+>$")
STD_CELL_REGEX = re.compile(r"^(core::([a-z_]+::)+)Cell<.+>$")
STD_REF_REGEX = re.compile(r"^(core::([a-z_]+::)+)Ref<.+>$")
STD_REF_MUT_REGEX = re.compile(r"^(core::([a-z_]+::)+)RefMut<.+>$")
STD_REF_CELL_REGEX = re.compile(r"^(core::([a-z_]+::)+)RefCell<.+>$")
STD_NONZERO_NUMBER_REGEX = re.compile(r"^(core::([a-z_]+::)+)NonZero<.+>$")
STD_PATHBUF_REGEX = re.compile(r"^(std::([a-z_]+::)+)PathBuf$")
STD_PATH_REGEX = re.compile(r"^&(mut )?(std::([a-z_]+::)+)Path$")
TUPLE_ITEM_REGEX = re.compile(r"__\d+$")
ENCODED_ENUM_PREFIX = "RUST$ENCODED$ENUM$"
ENUM_DISR_FIELD_NAME = "<<variant>>"
ENUM_LLDB_ENCODED_VARIANTS = "$variants$"
STD_TYPE_TO_REGEX = {
RustType.STD_STRING: STD_STRING_REGEX,
RustType.STD_OS_STRING: STD_OS_STRING_REGEX,
RustType.STD_STR: STD_STR_REGEX,
RustType.STD_SLICE: STD_SLICE_REGEX,
RustType.STD_VEC: STD_VEC_REGEX,
RustType.STD_VEC_DEQUE: STD_VEC_DEQUE_REGEX,
RustType.STD_HASH_MAP: STD_HASH_MAP_REGEX,
RustType.STD_HASH_SET: STD_HASH_SET_REGEX,
RustType.STD_BTREE_SET: STD_BTREE_SET_REGEX,
RustType.STD_BTREE_MAP: STD_BTREE_MAP_REGEX,
RustType.STD_RC: STD_RC_REGEX,
RustType.STD_ARC: STD_ARC_REGEX,
RustType.STD_REF: STD_REF_REGEX,
RustType.STD_REF_MUT: STD_REF_MUT_REGEX,
RustType.STD_REF_CELL: STD_REF_CELL_REGEX,
RustType.STD_CELL: STD_CELL_REGEX,
RustType.STD_NONZERO_NUMBER: STD_NONZERO_NUMBER_REGEX,
RustType.STD_PATHBUF: STD_PATHBUF_REGEX,
RustType.STD_PATH: STD_PATH_REGEX,
}
def is_tuple_fields(fields):
# type: (list) -> bool
return all(TUPLE_ITEM_REGEX.match(str(field.name)) for field in fields)
def classify_struct(name, fields):
if len(fields) == 0:
return RustType.EMPTY
for ty, regex in STD_TYPE_TO_REGEX.items():
if regex.match(name):
return ty
# <<variant>> is emitted by GDB while LLDB(18.1+) emits "$variants$"
if (
fields[0].name == ENUM_DISR_FIELD_NAME
or fields[0].name == ENUM_LLDB_ENCODED_VARIANTS
):
return RustType.ENUM
if is_tuple_fields(fields):
return RustType.TUPLE
return RustType.STRUCT
def classify_union(fields):
if len(fields) == 0:
return RustType.EMPTY
first_variant_name = fields[0].name
if first_variant_name is None:
if len(fields) == 1:
return RustType.SINGLETON_ENUM
else:
return RustType.REGULAR_ENUM
elif first_variant_name.startswith(ENCODED_ENUM_PREFIX):
assert len(fields) == 1
return RustType.COMPRESSED_ENUM
else:
return RustType.REGULAR_UNION

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,43 @@
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/bin/cargo
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/etc/bash_completion.d/cargo
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/doc/cargo/LICENSE-APACHE
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/doc/cargo/LICENSE-MIT
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/doc/cargo/LICENSE-THIRD-PARTY
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/doc/cargo/README.md
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo-add.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo-bench.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo-build.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo-check.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo-clean.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo-doc.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo-fetch.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo-fix.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo-generate-lockfile.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo-help.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo-init.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo-install.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo-locate-project.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo-login.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo-logout.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo-metadata.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo-new.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo-owner.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo-package.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo-pkgid.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo-publish.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo-remove.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo-report.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo-run.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo-rustc.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo-rustdoc.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo-search.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo-test.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo-tree.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo-uninstall.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo-update.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo-vendor.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo-verify-project.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo-version.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo-yank.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/cargo.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/zsh/site-functions/_cargo

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,27 @@
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-linux-gnu/lib/libaddr2line-4cedd46c8fe71ed2.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-linux-gnu/lib/libadler-d4c79732e171d3a7.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-linux-gnu/lib/liballoc-9d49d41f0562822c.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-linux-gnu/lib/libcfg_if-ff2b7da4e0d0ab57.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-linux-gnu/lib/libcompiler_builtins-aa4f3746d6ca7ab9.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-linux-gnu/lib/libcore-676cf12434975822.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-linux-gnu/lib/libgetopts-f0255baadbad58d5.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-linux-gnu/lib/libgimli-a12b04ee154c7d90.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-linux-gnu/lib/libhashbrown-bc9cb01546955737.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-linux-gnu/lib/liblibc-ed2e44769842094d.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-linux-gnu/lib/libmemchr-6dd6946c48e6b075.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-linux-gnu/lib/libminiz_oxide-a15b106983ce6dd5.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-linux-gnu/lib/libobject-6cd682687136d976.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-linux-gnu/lib/libpanic_abort-8b5cb80252bd6f78.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-linux-gnu/lib/libpanic_unwind-18a344403809bc25.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-linux-gnu/lib/libproc_macro-2e816780bb8f66d3.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-linux-gnu/lib/librustc_demangle-ccac9e6a632e8b3e.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-linux-gnu/lib/librustc_std_workspace_alloc-1b927735a92f2ee4.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-linux-gnu/lib/librustc_std_workspace_core-c746aab067ded71e.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-linux-gnu/lib/librustc_std_workspace_std-d170c8249e99464e.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-linux-gnu/lib/libstd-16ad1350712368b9.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-linux-gnu/lib/libstd-16ad1350712368b9.so
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-linux-gnu/lib/libstd_detect-99ac4d3eb0cc7ebc.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-linux-gnu/lib/libsysroot-248945c060cd0acf.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-linux-gnu/lib/libtest-95a683f11cc4de96.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-linux-gnu/lib/libunicode_width-55f5c5a3f3b21d26.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-linux-gnu/lib/libunwind-7114cbcad6775962.rlib

View File

@ -0,0 +1,27 @@
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-redox/lib/libaddr2line-fde09826c65c794e.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-redox/lib/libadler-0ec997fbd84c7ae8.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-redox/lib/liballoc-313f892011fb7765.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-redox/lib/libcfg_if-aca4f48c3a15d8b0.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-redox/lib/libcompiler_builtins-9f607a544622e911.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-redox/lib/libcore-9b9e0d7ebf62a136.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-redox/lib/libgetopts-dc4505e324740ece.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-redox/lib/libgimli-2a24dd6cac9f61a4.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-redox/lib/libhashbrown-ee4dd7adc6c61bb2.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-redox/lib/liblibc-a580e978fdfc509e.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-redox/lib/libmemchr-af21bb8f0863910e.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-redox/lib/libminiz_oxide-2353b30ecc128dc2.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-redox/lib/libobject-af6c463d8756245e.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-redox/lib/libpanic_abort-3ab1894a4de1398e.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-redox/lib/libpanic_unwind-9236a22b311f3888.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-redox/lib/libproc_macro-4543cb6a6104871d.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-redox/lib/librustc_demangle-f92b4cd2af767a34.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-redox/lib/librustc_std_workspace_alloc-d8b3e362cfdbe426.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-redox/lib/librustc_std_workspace_core-fe3b2fe89881613b.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-redox/lib/librustc_std_workspace_std-f8a12704732d7091.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-redox/lib/libstd-ab55a832b4ed887c.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-redox/lib/libstd-ab55a832b4ed887c.so
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-redox/lib/libstd_detect-36d958d43a531111.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-redox/lib/libsysroot-f69cfae8521490dd.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-redox/lib/libtest-dd4ba117863b56ab.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-redox/lib/libunicode_width-c4ba38c408a4d721.rlib
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-redox/lib/libunwind-857e2c60f789b1b5.rlib

View File

@ -0,0 +1,24 @@
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/bin/rust-gdb
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/bin/rust-gdbgui
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/bin/rust-lldb
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/bin/rustc
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/librustc_driver-3cf91a42ef25efec.so
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/libstd-16ad1350712368b9.so
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/etc/gdb_load_rust_pretty_printers.py
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/etc/gdb_lookup.py
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/etc/gdb_providers.py
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/etc/lldb_commands
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/etc/lldb_lookup.py
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/etc/lldb_providers.py
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/etc/rust_types.py
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-linux-gnu/bin/gcc-ld/ld.lld
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-linux-gnu/bin/gcc-ld/ld64.lld
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-linux-gnu/bin/gcc-ld/lld-link
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-linux-gnu/bin/gcc-ld/wasm-ld
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/lib/rustlib/x86_64-unknown-linux-gnu/bin/rust-lld
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/doc/rustc/COPYRIGHT
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/doc/rustc/LICENSE-APACHE
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/doc/rustc/LICENSE-MIT
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/doc/rustc/README.md
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/rustc.1
file:/home/redox/redox/prefix/x86_64-unknown-redox/rust-install.partial/share/man/man1/rustdoc.1

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,39 @@
[package]
name = "alloc"
version = "0.0.0"
license = "MIT OR Apache-2.0"
repository = "https://github.com/rust-lang/rust.git"
description = "The Rust core allocation and collections library"
autotests = false
autobenches = false
edition = "2021"
[dependencies]
core = { path = "../core" }
compiler_builtins = { version = "0.1.110", features = ['rustc-dep-of-std', 'no-f16-f128'] }
[dev-dependencies]
rand = { version = "0.8.5", default-features = false, features = ["alloc"] }
rand_xorshift = "0.3.0"
[[test]]
name = "alloctests"
path = "tests/lib.rs"
[[bench]]
name = "allocbenches"
path = "benches/lib.rs"
test = true
[[bench]]
name = "vec_deque_append_bench"
path = "benches/vec_deque_append.rs"
harness = false
[features]
compiler-builtins-mem = ['compiler_builtins/mem']
compiler-builtins-c = ["compiler_builtins/c"]
compiler-builtins-no-asm = ["compiler_builtins/no-asm"]
compiler-builtins-mangled-names = ["compiler_builtins/mangled-names"]
# Make panics and failed asserts immediately abort without formatting any message
panic_immediate_abort = []

View File

@ -0,0 +1,91 @@
use std::collections::BinaryHeap;
use rand::seq::SliceRandom;
use test::{black_box, Bencher};
#[bench]
fn bench_find_smallest_1000(b: &mut Bencher) {
let mut rng = crate::bench_rng();
let mut vec: Vec<u32> = (0..100_000).collect();
vec.shuffle(&mut rng);
b.iter(|| {
let mut iter = vec.iter().copied();
let mut heap: BinaryHeap<_> = iter.by_ref().take(1000).collect();
for x in iter {
let mut max = heap.peek_mut().unwrap();
// This comparison should be true only 1% of the time.
// Unnecessary `sift_down`s will degrade performance
if x < *max {
*max = x;
}
}
heap
})
}
#[bench]
fn bench_peek_mut_deref_mut(b: &mut Bencher) {
let mut bheap = BinaryHeap::from(vec![42]);
let vec: Vec<u32> = (0..1_000_000).collect();
b.iter(|| {
let vec = black_box(&vec);
let mut peek_mut = bheap.peek_mut().unwrap();
// The compiler shouldn't be able to optimize away the `sift_down`
// assignment in `PeekMut`'s `DerefMut` implementation since
// the loop might not run.
for &i in vec.iter() {
*peek_mut = i;
}
// Remove the already minimal overhead of the sift_down
std::mem::forget(peek_mut);
})
}
#[bench]
fn bench_from_vec(b: &mut Bencher) {
let mut rng = crate::bench_rng();
let mut vec: Vec<u32> = (0..100_000).collect();
vec.shuffle(&mut rng);
b.iter(|| BinaryHeap::from(vec.clone()))
}
#[bench]
fn bench_into_sorted_vec(b: &mut Bencher) {
let bheap: BinaryHeap<i32> = (0..10_000).collect();
b.iter(|| bheap.clone().into_sorted_vec())
}
#[bench]
fn bench_push(b: &mut Bencher) {
let mut bheap = BinaryHeap::with_capacity(50_000);
let mut rng = crate::bench_rng();
let mut vec: Vec<u32> = (0..50_000).collect();
vec.shuffle(&mut rng);
b.iter(|| {
for &i in vec.iter() {
bheap.push(i);
}
black_box(&mut bheap);
bheap.clear();
})
}
#[bench]
fn bench_pop(b: &mut Bencher) {
let mut bheap = BinaryHeap::with_capacity(10_000);
b.iter(|| {
bheap.extend((0..10_000).rev());
black_box(&mut bheap);
while let Some(elem) = bheap.pop() {
black_box(elem);
}
})
}

View File

@ -0,0 +1,583 @@
use std::collections::BTreeMap;
use std::ops::RangeBounds;
use rand::{seq::SliceRandom, Rng};
use test::{black_box, Bencher};
macro_rules! map_insert_rand_bench {
($name: ident, $n: expr, $map: ident) => {
#[bench]
pub fn $name(b: &mut Bencher) {
let n: usize = $n;
let mut map = $map::new();
// setup
let mut rng = crate::bench_rng();
for _ in 0..n {
let i = rng.gen::<usize>() % n;
map.insert(i, i);
}
// measure
b.iter(|| {
let k = rng.gen::<usize>() % n;
map.insert(k, k);
map.remove(&k);
});
black_box(map);
}
};
}
macro_rules! map_insert_seq_bench {
($name: ident, $n: expr, $map: ident) => {
#[bench]
pub fn $name(b: &mut Bencher) {
let mut map = $map::new();
let n: usize = $n;
// setup
for i in 0..n {
map.insert(i * 2, i * 2);
}
// measure
let mut i = 1;
b.iter(|| {
map.insert(i, i);
map.remove(&i);
i = (i + 2) % n;
});
black_box(map);
}
};
}
macro_rules! map_from_iter_rand_bench {
($name: ident, $n: expr, $map: ident) => {
#[bench]
pub fn $name(b: &mut Bencher) {
let n: usize = $n;
// setup
let mut rng = crate::bench_rng();
let mut vec = Vec::with_capacity(n);
for _ in 0..n {
let i = rng.gen::<usize>() % n;
vec.push((i, i));
}
// measure
b.iter(|| {
let map: $map<_, _> = vec.iter().copied().collect();
black_box(map);
});
}
};
}
macro_rules! map_from_iter_seq_bench {
($name: ident, $n: expr, $map: ident) => {
#[bench]
pub fn $name(b: &mut Bencher) {
let n: usize = $n;
// setup
let mut vec = Vec::with_capacity(n);
for i in 0..n {
vec.push((i, i));
}
// measure
b.iter(|| {
let map: $map<_, _> = vec.iter().copied().collect();
black_box(map);
});
}
};
}
macro_rules! map_find_rand_bench {
($name: ident, $n: expr, $map: ident) => {
#[bench]
pub fn $name(b: &mut Bencher) {
let mut map = $map::new();
let n: usize = $n;
// setup
let mut rng = crate::bench_rng();
let mut keys: Vec<_> = (0..n).map(|_| rng.gen::<usize>() % n).collect();
for &k in &keys {
map.insert(k, k);
}
keys.shuffle(&mut rng);
// measure
let mut i = 0;
b.iter(|| {
let t = map.get(&keys[i]);
i = (i + 1) % n;
black_box(t);
})
}
};
}
macro_rules! map_find_seq_bench {
($name: ident, $n: expr, $map: ident) => {
#[bench]
pub fn $name(b: &mut Bencher) {
let mut map = $map::new();
let n: usize = $n;
// setup
for i in 0..n {
map.insert(i, i);
}
// measure
let mut i = 0;
b.iter(|| {
let x = map.get(&i);
i = (i + 1) % n;
black_box(x);
})
}
};
}
map_insert_rand_bench! {insert_rand_100, 100, BTreeMap}
map_insert_rand_bench! {insert_rand_10_000, 10_000, BTreeMap}
map_insert_seq_bench! {insert_seq_100, 100, BTreeMap}
map_insert_seq_bench! {insert_seq_10_000, 10_000, BTreeMap}
map_from_iter_rand_bench! {from_iter_rand_100, 100, BTreeMap}
map_from_iter_rand_bench! {from_iter_rand_10_000, 10_000, BTreeMap}
map_from_iter_seq_bench! {from_iter_seq_100, 100, BTreeMap}
map_from_iter_seq_bench! {from_iter_seq_10_000, 10_000, BTreeMap}
map_find_rand_bench! {find_rand_100, 100, BTreeMap}
map_find_rand_bench! {find_rand_10_000, 10_000, BTreeMap}
map_find_seq_bench! {find_seq_100, 100, BTreeMap}
map_find_seq_bench! {find_seq_10_000, 10_000, BTreeMap}
fn bench_iteration(b: &mut Bencher, size: i32) {
let mut map = BTreeMap::<i32, i32>::new();
let mut rng = crate::bench_rng();
for _ in 0..size {
map.insert(rng.gen(), rng.gen());
}
b.iter(|| {
for entry in &map {
black_box(entry);
}
});
}
#[bench]
pub fn iteration_20(b: &mut Bencher) {
bench_iteration(b, 20);
}
#[bench]
pub fn iteration_1000(b: &mut Bencher) {
bench_iteration(b, 1000);
}
#[bench]
pub fn iteration_100000(b: &mut Bencher) {
bench_iteration(b, 100000);
}
fn bench_iteration_mut(b: &mut Bencher, size: i32) {
let mut map = BTreeMap::<i32, i32>::new();
let mut rng = crate::bench_rng();
for _ in 0..size {
map.insert(rng.gen(), rng.gen());
}
b.iter(|| {
for kv in map.iter_mut() {
black_box(kv);
}
});
}
#[bench]
pub fn iteration_mut_20(b: &mut Bencher) {
bench_iteration_mut(b, 20);
}
#[bench]
pub fn iteration_mut_1000(b: &mut Bencher) {
bench_iteration_mut(b, 1000);
}
#[bench]
pub fn iteration_mut_100000(b: &mut Bencher) {
bench_iteration_mut(b, 100000);
}
fn bench_first_and_last_nightly(b: &mut Bencher, size: i32) {
let map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect();
b.iter(|| {
for _ in 0..10 {
black_box(map.first_key_value());
black_box(map.last_key_value());
}
});
}
fn bench_first_and_last_stable(b: &mut Bencher, size: i32) {
let map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect();
b.iter(|| {
for _ in 0..10 {
black_box(map.iter().next());
black_box(map.iter().next_back());
}
});
}
#[bench]
pub fn first_and_last_0_nightly(b: &mut Bencher) {
bench_first_and_last_nightly(b, 0);
}
#[bench]
pub fn first_and_last_0_stable(b: &mut Bencher) {
bench_first_and_last_stable(b, 0);
}
#[bench]
pub fn first_and_last_100_nightly(b: &mut Bencher) {
bench_first_and_last_nightly(b, 100);
}
#[bench]
pub fn first_and_last_100_stable(b: &mut Bencher) {
bench_first_and_last_stable(b, 100);
}
#[bench]
pub fn first_and_last_10k_nightly(b: &mut Bencher) {
bench_first_and_last_nightly(b, 10_000);
}
#[bench]
pub fn first_and_last_10k_stable(b: &mut Bencher) {
bench_first_and_last_stable(b, 10_000);
}
const BENCH_RANGE_SIZE: i32 = 145;
const BENCH_RANGE_COUNT: i32 = BENCH_RANGE_SIZE * (BENCH_RANGE_SIZE - 1) / 2;
fn bench_range<F, R>(b: &mut Bencher, f: F)
where
F: Fn(i32, i32) -> R,
R: RangeBounds<i32>,
{
let map: BTreeMap<_, _> = (0..BENCH_RANGE_SIZE).map(|i| (i, i)).collect();
b.iter(|| {
let mut c = 0;
for i in 0..BENCH_RANGE_SIZE {
for j in i + 1..BENCH_RANGE_SIZE {
let _ = black_box(map.range(f(i, j)));
c += 1;
}
}
debug_assert_eq!(c, BENCH_RANGE_COUNT);
});
}
#[bench]
pub fn range_included_excluded(b: &mut Bencher) {
bench_range(b, |i, j| i..j);
}
#[bench]
pub fn range_included_included(b: &mut Bencher) {
bench_range(b, |i, j| i..=j);
}
#[bench]
pub fn range_included_unbounded(b: &mut Bencher) {
bench_range(b, |i, _| i..);
}
#[bench]
pub fn range_unbounded_unbounded(b: &mut Bencher) {
bench_range(b, |_, _| ..);
}
fn bench_iter(b: &mut Bencher, repeats: i32, size: i32) {
let map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect();
b.iter(|| {
for _ in 0..repeats {
let _ = black_box(map.iter());
}
});
}
/// Contrast range_unbounded_unbounded with `iter()`.
#[bench]
pub fn range_unbounded_vs_iter(b: &mut Bencher) {
bench_iter(b, BENCH_RANGE_COUNT, BENCH_RANGE_SIZE);
}
#[bench]
pub fn iter_0(b: &mut Bencher) {
bench_iter(b, 1_000, 0);
}
#[bench]
pub fn iter_1(b: &mut Bencher) {
bench_iter(b, 1_000, 1);
}
#[bench]
pub fn iter_100(b: &mut Bencher) {
bench_iter(b, 1_000, 100);
}
#[bench]
pub fn iter_10k(b: &mut Bencher) {
bench_iter(b, 1_000, 10_000);
}
#[bench]
pub fn iter_1m(b: &mut Bencher) {
bench_iter(b, 1_000, 1_000_000);
}
const FAT: usize = 256;
// The returned map has small keys and values.
// Benchmarks on it have a counterpart in set.rs with the same keys and no values at all.
fn slim_map(n: usize) -> BTreeMap<usize, usize> {
(0..n).map(|i| (i, i)).collect::<BTreeMap<_, _>>()
}
// The returned map has small keys and large values.
fn fat_val_map(n: usize) -> BTreeMap<usize, [usize; FAT]> {
(0..n).map(|i| (i, [i; FAT])).collect::<BTreeMap<_, _>>()
}
#[bench]
pub fn clone_slim_100(b: &mut Bencher) {
let src = slim_map(100);
b.iter(|| src.clone())
}
#[bench]
pub fn clone_slim_100_and_clear(b: &mut Bencher) {
let src = slim_map(100);
b.iter(|| src.clone().clear())
}
#[bench]
pub fn clone_slim_100_and_drain_all(b: &mut Bencher) {
let src = slim_map(100);
b.iter(|| src.clone().extract_if(|_, _| true).count())
}
#[bench]
pub fn clone_slim_100_and_drain_half(b: &mut Bencher) {
let src = slim_map(100);
b.iter(|| {
let mut map = src.clone();
assert_eq!(map.extract_if(|i, _| i % 2 == 0).count(), 100 / 2);
assert_eq!(map.len(), 100 / 2);
})
}
#[bench]
pub fn clone_slim_100_and_into_iter(b: &mut Bencher) {
let src = slim_map(100);
b.iter(|| src.clone().into_iter().count())
}
#[bench]
pub fn clone_slim_100_and_pop_all(b: &mut Bencher) {
let src = slim_map(100);
b.iter(|| {
let mut map = src.clone();
while map.pop_first().is_some() {}
map
});
}
#[bench]
pub fn clone_slim_100_and_remove_all(b: &mut Bencher) {
let src = slim_map(100);
b.iter(|| {
let mut map = src.clone();
while let Some(elt) = map.iter().map(|(&i, _)| i).next() {
let v = map.remove(&elt);
debug_assert!(v.is_some());
}
map
});
}
#[bench]
pub fn clone_slim_100_and_remove_half(b: &mut Bencher) {
let src = slim_map(100);
b.iter(|| {
let mut map = src.clone();
for i in (0..100).step_by(2) {
let v = map.remove(&i);
debug_assert!(v.is_some());
}
assert_eq!(map.len(), 100 / 2);
map
})
}
#[bench]
pub fn clone_slim_10k(b: &mut Bencher) {
let src = slim_map(10_000);
b.iter(|| src.clone())
}
#[bench]
pub fn clone_slim_10k_and_clear(b: &mut Bencher) {
let src = slim_map(10_000);
b.iter(|| src.clone().clear())
}
#[bench]
pub fn clone_slim_10k_and_drain_all(b: &mut Bencher) {
let src = slim_map(10_000);
b.iter(|| src.clone().extract_if(|_, _| true).count())
}
#[bench]
pub fn clone_slim_10k_and_drain_half(b: &mut Bencher) {
let src = slim_map(10_000);
b.iter(|| {
let mut map = src.clone();
assert_eq!(map.extract_if(|i, _| i % 2 == 0).count(), 10_000 / 2);
assert_eq!(map.len(), 10_000 / 2);
})
}
#[bench]
pub fn clone_slim_10k_and_into_iter(b: &mut Bencher) {
let src = slim_map(10_000);
b.iter(|| src.clone().into_iter().count())
}
#[bench]
pub fn clone_slim_10k_and_pop_all(b: &mut Bencher) {
let src = slim_map(10_000);
b.iter(|| {
let mut map = src.clone();
while map.pop_first().is_some() {}
map
});
}
#[bench]
pub fn clone_slim_10k_and_remove_all(b: &mut Bencher) {
let src = slim_map(10_000);
b.iter(|| {
let mut map = src.clone();
while let Some(elt) = map.iter().map(|(&i, _)| i).next() {
let v = map.remove(&elt);
debug_assert!(v.is_some());
}
map
});
}
#[bench]
pub fn clone_slim_10k_and_remove_half(b: &mut Bencher) {
let src = slim_map(10_000);
b.iter(|| {
let mut map = src.clone();
for i in (0..10_000).step_by(2) {
let v = map.remove(&i);
debug_assert!(v.is_some());
}
assert_eq!(map.len(), 10_000 / 2);
map
})
}
#[bench]
pub fn clone_fat_val_100(b: &mut Bencher) {
let src = fat_val_map(100);
b.iter(|| src.clone())
}
#[bench]
pub fn clone_fat_val_100_and_clear(b: &mut Bencher) {
let src = fat_val_map(100);
b.iter(|| src.clone().clear())
}
#[bench]
pub fn clone_fat_val_100_and_drain_all(b: &mut Bencher) {
let src = fat_val_map(100);
b.iter(|| src.clone().extract_if(|_, _| true).count())
}
#[bench]
pub fn clone_fat_val_100_and_drain_half(b: &mut Bencher) {
let src = fat_val_map(100);
b.iter(|| {
let mut map = src.clone();
assert_eq!(map.extract_if(|i, _| i % 2 == 0).count(), 100 / 2);
assert_eq!(map.len(), 100 / 2);
})
}
#[bench]
pub fn clone_fat_val_100_and_into_iter(b: &mut Bencher) {
let src = fat_val_map(100);
b.iter(|| src.clone().into_iter().count())
}
#[bench]
pub fn clone_fat_val_100_and_pop_all(b: &mut Bencher) {
let src = fat_val_map(100);
b.iter(|| {
let mut map = src.clone();
while map.pop_first().is_some() {}
map
});
}
#[bench]
pub fn clone_fat_val_100_and_remove_all(b: &mut Bencher) {
let src = fat_val_map(100);
b.iter(|| {
let mut map = src.clone();
while let Some(elt) = map.iter().map(|(&i, _)| i).next() {
let v = map.remove(&elt);
debug_assert!(v.is_some());
}
map
});
}
#[bench]
pub fn clone_fat_val_100_and_remove_half(b: &mut Bencher) {
let src = fat_val_map(100);
b.iter(|| {
let mut map = src.clone();
for i in (0..100).step_by(2) {
let v = map.remove(&i);
debug_assert!(v.is_some());
}
assert_eq!(map.len(), 100 / 2);
map
})
}

View File

@ -0,0 +1,224 @@
use std::collections::BTreeSet;
use rand::Rng;
use test::Bencher;
fn random(n: usize) -> BTreeSet<usize> {
let mut rng = crate::bench_rng();
let mut set = BTreeSet::new();
while set.len() < n {
set.insert(rng.gen());
}
assert_eq!(set.len(), n);
set
}
fn neg(n: usize) -> BTreeSet<i32> {
let set: BTreeSet<i32> = (-(n as i32)..=-1).collect();
assert_eq!(set.len(), n);
set
}
fn pos(n: usize) -> BTreeSet<i32> {
let set: BTreeSet<i32> = (1..=(n as i32)).collect();
assert_eq!(set.len(), n);
set
}
fn stagger(n1: usize, factor: usize) -> [BTreeSet<u32>; 2] {
let n2 = n1 * factor;
let mut sets = [BTreeSet::new(), BTreeSet::new()];
for i in 0..(n1 + n2) {
let b = i % (factor + 1) != 0;
sets[b as usize].insert(i as u32);
}
assert_eq!(sets[0].len(), n1);
assert_eq!(sets[1].len(), n2);
sets
}
macro_rules! set_bench {
($name: ident, $set_func: ident, $result_func: ident, $sets: expr) => {
#[bench]
pub fn $name(b: &mut Bencher) {
// setup
let sets = $sets;
// measure
b.iter(|| sets[0].$set_func(&sets[1]).$result_func())
}
};
}
fn slim_set(n: usize) -> BTreeSet<usize> {
(0..n).collect::<BTreeSet<_>>()
}
#[bench]
pub fn clone_100(b: &mut Bencher) {
let src = slim_set(100);
b.iter(|| src.clone())
}
#[bench]
pub fn clone_100_and_clear(b: &mut Bencher) {
let src = slim_set(100);
b.iter(|| src.clone().clear())
}
#[bench]
pub fn clone_100_and_drain_all(b: &mut Bencher) {
let src = slim_set(100);
b.iter(|| src.clone().extract_if(|_| true).count())
}
#[bench]
pub fn clone_100_and_drain_half(b: &mut Bencher) {
let src = slim_set(100);
b.iter(|| {
let mut set = src.clone();
assert_eq!(set.extract_if(|i| i % 2 == 0).count(), 100 / 2);
assert_eq!(set.len(), 100 / 2);
})
}
#[bench]
pub fn clone_100_and_into_iter(b: &mut Bencher) {
let src = slim_set(100);
b.iter(|| src.clone().into_iter().count())
}
#[bench]
pub fn clone_100_and_pop_all(b: &mut Bencher) {
let src = slim_set(100);
b.iter(|| {
let mut set = src.clone();
while set.pop_first().is_some() {}
set
});
}
#[bench]
pub fn clone_100_and_remove_all(b: &mut Bencher) {
let src = slim_set(100);
b.iter(|| {
let mut set = src.clone();
while let Some(elt) = set.iter().copied().next() {
let ok = set.remove(&elt);
debug_assert!(ok);
}
set
});
}
#[bench]
pub fn clone_100_and_remove_half(b: &mut Bencher) {
let src = slim_set(100);
b.iter(|| {
let mut set = src.clone();
for i in (0..100).step_by(2) {
let ok = set.remove(&i);
debug_assert!(ok);
}
assert_eq!(set.len(), 100 / 2);
set
})
}
#[bench]
pub fn clone_10k(b: &mut Bencher) {
let src = slim_set(10_000);
b.iter(|| src.clone())
}
#[bench]
pub fn clone_10k_and_clear(b: &mut Bencher) {
let src = slim_set(10_000);
b.iter(|| src.clone().clear())
}
#[bench]
pub fn clone_10k_and_drain_all(b: &mut Bencher) {
let src = slim_set(10_000);
b.iter(|| src.clone().extract_if(|_| true).count())
}
#[bench]
pub fn clone_10k_and_drain_half(b: &mut Bencher) {
let src = slim_set(10_000);
b.iter(|| {
let mut set = src.clone();
assert_eq!(set.extract_if(|i| i % 2 == 0).count(), 10_000 / 2);
assert_eq!(set.len(), 10_000 / 2);
})
}
#[bench]
pub fn clone_10k_and_into_iter(b: &mut Bencher) {
let src = slim_set(10_000);
b.iter(|| src.clone().into_iter().count())
}
#[bench]
pub fn clone_10k_and_pop_all(b: &mut Bencher) {
let src = slim_set(10_000);
b.iter(|| {
let mut set = src.clone();
while set.pop_first().is_some() {}
set
});
}
#[bench]
pub fn clone_10k_and_remove_all(b: &mut Bencher) {
let src = slim_set(10_000);
b.iter(|| {
let mut set = src.clone();
while let Some(elt) = set.iter().copied().next() {
let ok = set.remove(&elt);
debug_assert!(ok);
}
set
});
}
#[bench]
pub fn clone_10k_and_remove_half(b: &mut Bencher) {
let src = slim_set(10_000);
b.iter(|| {
let mut set = src.clone();
for i in (0..10_000).step_by(2) {
let ok = set.remove(&i);
debug_assert!(ok);
}
assert_eq!(set.len(), 10_000 / 2);
set
})
}
set_bench! {intersection_100_neg_vs_100_pos, intersection, count, [neg(100), pos(100)]}
set_bench! {intersection_100_neg_vs_10k_pos, intersection, count, [neg(100), pos(10_000)]}
set_bench! {intersection_100_pos_vs_100_neg, intersection, count, [pos(100), neg(100)]}
set_bench! {intersection_100_pos_vs_10k_neg, intersection, count, [pos(100), neg(10_000)]}
set_bench! {intersection_10k_neg_vs_100_pos, intersection, count, [neg(10_000), pos(100)]}
set_bench! {intersection_10k_neg_vs_10k_pos, intersection, count, [neg(10_000), pos(10_000)]}
set_bench! {intersection_10k_pos_vs_100_neg, intersection, count, [pos(10_000), neg(100)]}
set_bench! {intersection_10k_pos_vs_10k_neg, intersection, count, [pos(10_000), neg(10_000)]}
set_bench! {intersection_random_100_vs_100, intersection, count, [random(100), random(100)]}
set_bench! {intersection_random_100_vs_10k, intersection, count, [random(100), random(10_000)]}
set_bench! {intersection_random_10k_vs_100, intersection, count, [random(10_000), random(100)]}
set_bench! {intersection_random_10k_vs_10k, intersection, count, [random(10_000), random(10_000)]}
set_bench! {intersection_staggered_100_vs_100, intersection, count, stagger(100, 1)}
set_bench! {intersection_staggered_10k_vs_10k, intersection, count, stagger(10_000, 1)}
set_bench! {intersection_staggered_100_vs_10k, intersection, count, stagger(100, 100)}
set_bench! {difference_random_100_vs_100, difference, count, [random(100), random(100)]}
set_bench! {difference_random_100_vs_10k, difference, count, [random(100), random(10_000)]}
set_bench! {difference_random_10k_vs_100, difference, count, [random(10_000), random(100)]}
set_bench! {difference_random_10k_vs_10k, difference, count, [random(10_000), random(10_000)]}
set_bench! {difference_staggered_100_vs_100, difference, count, stagger(100, 1)}
set_bench! {difference_staggered_10k_vs_10k, difference, count, stagger(10_000, 1)}
set_bench! {difference_staggered_100_vs_10k, difference, count, stagger(100, 100)}
set_bench! {is_subset_100_vs_100, is_subset, clone, [pos(100), pos(100)]}
set_bench! {is_subset_100_vs_10k, is_subset, clone, [pos(100), pos(10_000)]}
set_bench! {is_subset_10k_vs_100, is_subset, clone, [pos(10_000), pos(100)]}
set_bench! {is_subset_10k_vs_10k, is_subset, clone, [pos(10_000), pos(10_000)]}

View File

@ -0,0 +1,31 @@
// Disabling on android for the time being
// See https://github.com/rust-lang/rust/issues/73535#event-3477699747
#![cfg(not(target_os = "android"))]
// Disabling in Miri as these would take too long.
#![cfg(not(miri))]
#![feature(btree_extract_if)]
#![feature(iter_next_chunk)]
#![feature(repr_simd)]
#![feature(slice_partition_dedup)]
#![feature(strict_provenance)]
#![feature(test)]
#![deny(fuzzy_provenance_casts)]
extern crate test;
mod binary_heap;
mod btree;
mod linked_list;
mod slice;
mod str;
mod string;
mod vec;
mod vec_deque;
/// Returns a `rand::Rng` seeded with a consistent seed.
///
/// This is done to avoid introducing nondeterminism in benchmark results.
fn bench_rng() -> rand_xorshift::XorShiftRng {
const SEED: [u8; 16] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15];
rand::SeedableRng::from_seed(SEED)
}

View File

@ -0,0 +1,77 @@
use std::collections::LinkedList;
use test::Bencher;
#[bench]
fn bench_collect_into(b: &mut Bencher) {
let v = &[0; 64];
b.iter(|| {
let _: LinkedList<_> = v.iter().cloned().collect();
})
}
#[bench]
fn bench_push_front(b: &mut Bencher) {
let mut m: LinkedList<_> = LinkedList::new();
b.iter(|| {
m.push_front(0);
})
}
#[bench]
fn bench_push_back(b: &mut Bencher) {
let mut m: LinkedList<_> = LinkedList::new();
b.iter(|| {
m.push_back(0);
})
}
#[bench]
fn bench_push_back_pop_back(b: &mut Bencher) {
let mut m: LinkedList<_> = LinkedList::new();
b.iter(|| {
m.push_back(0);
m.pop_back();
})
}
#[bench]
fn bench_push_front_pop_front(b: &mut Bencher) {
let mut m: LinkedList<_> = LinkedList::new();
b.iter(|| {
m.push_front(0);
m.pop_front();
})
}
#[bench]
fn bench_iter(b: &mut Bencher) {
let v = &[0; 128];
let m: LinkedList<_> = v.iter().cloned().collect();
b.iter(|| {
assert!(m.iter().count() == 128);
})
}
#[bench]
fn bench_iter_mut(b: &mut Bencher) {
let v = &[0; 128];
let mut m: LinkedList<_> = v.iter().cloned().collect();
b.iter(|| {
assert!(m.iter_mut().count() == 128);
})
}
#[bench]
fn bench_iter_rev(b: &mut Bencher) {
let v = &[0; 128];
let m: LinkedList<_> = v.iter().cloned().collect();
b.iter(|| {
assert!(m.iter().rev().count() == 128);
})
}
#[bench]
fn bench_iter_mut_rev(b: &mut Bencher) {
let v = &[0; 128];
let mut m: LinkedList<_> = v.iter().cloned().collect();
b.iter(|| {
assert!(m.iter_mut().rev().count() == 128);
})
}

View File

@ -0,0 +1,379 @@
use std::{mem, ptr};
use rand::distributions::{Alphanumeric, DistString, Standard};
use rand::Rng;
use test::{black_box, Bencher};
#[bench]
fn iterator(b: &mut Bencher) {
// peculiar numbers to stop LLVM from optimising the summation
// out.
let v: Vec<_> = (0..100).map(|i| i ^ (i << 1) ^ (i >> 1)).collect();
b.iter(|| {
let mut sum = 0;
for x in &v {
sum += *x;
}
// sum == 11806, to stop dead code elimination.
if sum == 0 {
panic!()
}
})
}
#[bench]
fn mut_iterator(b: &mut Bencher) {
let mut v = vec![0; 100];
b.iter(|| {
let mut i = 0;
for x in &mut v {
*x = i;
i += 1;
}
})
}
#[bench]
fn concat(b: &mut Bencher) {
let xss: Vec<Vec<i32>> = (0..100).map(|i| (0..i).collect()).collect();
b.iter(|| {
xss.concat();
});
}
#[bench]
fn join(b: &mut Bencher) {
let xss: Vec<Vec<i32>> = (0..100).map(|i| (0..i).collect()).collect();
b.iter(|| xss.join(&0));
}
#[bench]
fn push(b: &mut Bencher) {
let mut vec = Vec::<i32>::new();
b.iter(|| {
vec.push(0);
black_box(&vec);
});
}
#[bench]
fn starts_with_same_vector(b: &mut Bencher) {
let vec: Vec<_> = (0..100).collect();
b.iter(|| vec.starts_with(&vec))
}
#[bench]
fn starts_with_single_element(b: &mut Bencher) {
let vec: Vec<_> = vec![0];
b.iter(|| vec.starts_with(&vec))
}
#[bench]
fn starts_with_diff_one_element_at_end(b: &mut Bencher) {
let vec: Vec<_> = (0..100).collect();
let mut match_vec: Vec<_> = (0..99).collect();
match_vec.push(0);
b.iter(|| vec.starts_with(&match_vec))
}
#[bench]
fn ends_with_same_vector(b: &mut Bencher) {
let vec: Vec<_> = (0..100).collect();
b.iter(|| vec.ends_with(&vec))
}
#[bench]
fn ends_with_single_element(b: &mut Bencher) {
let vec: Vec<_> = vec![0];
b.iter(|| vec.ends_with(&vec))
}
#[bench]
fn ends_with_diff_one_element_at_beginning(b: &mut Bencher) {
let vec: Vec<_> = (0..100).collect();
let mut match_vec: Vec<_> = (0..100).collect();
match_vec[0] = 200;
b.iter(|| vec.starts_with(&match_vec))
}
#[bench]
fn contains_last_element(b: &mut Bencher) {
let vec: Vec<_> = (0..100).collect();
b.iter(|| vec.contains(&99))
}
#[bench]
fn zero_1kb_from_elem(b: &mut Bencher) {
b.iter(|| vec![0u8; 1024]);
}
#[bench]
fn zero_1kb_set_memory(b: &mut Bencher) {
b.iter(|| {
let mut v = Vec::<u8>::with_capacity(1024);
unsafe {
let vp = v.as_mut_ptr();
ptr::write_bytes(vp, 0, 1024);
v.set_len(1024);
}
v
});
}
#[bench]
fn zero_1kb_loop_set(b: &mut Bencher) {
b.iter(|| {
let mut v = Vec::<u8>::with_capacity(1024);
unsafe {
v.set_len(1024);
}
for i in 0..1024 {
v[i] = 0;
}
});
}
#[bench]
fn zero_1kb_mut_iter(b: &mut Bencher) {
b.iter(|| {
let mut v = Vec::<u8>::with_capacity(1024);
unsafe {
v.set_len(1024);
}
for x in &mut v {
*x = 0;
}
v
});
}
#[bench]
fn random_inserts(b: &mut Bencher) {
let mut rng = crate::bench_rng();
b.iter(|| {
let mut v = vec![(0, 0); 30];
for _ in 0..100 {
let l = v.len();
v.insert(rng.gen::<usize>() % (l + 1), (1, 1));
}
})
}
#[bench]
fn random_removes(b: &mut Bencher) {
let mut rng = crate::bench_rng();
b.iter(|| {
let mut v = vec![(0, 0); 130];
for _ in 0..100 {
let l = v.len();
v.remove(rng.gen::<usize>() % l);
}
})
}
fn gen_ascending(len: usize) -> Vec<u64> {
(0..len as u64).collect()
}
fn gen_descending(len: usize) -> Vec<u64> {
(0..len as u64).rev().collect()
}
fn gen_random(len: usize) -> Vec<u64> {
let mut rng = crate::bench_rng();
(&mut rng).sample_iter(&Standard).take(len).collect()
}
fn gen_random_bytes(len: usize) -> Vec<u8> {
let mut rng = crate::bench_rng();
(&mut rng).sample_iter(&Standard).take(len).collect()
}
fn gen_mostly_ascending(len: usize) -> Vec<u64> {
let mut rng = crate::bench_rng();
let mut v = gen_ascending(len);
for _ in (0usize..).take_while(|x| x * x <= len) {
let x = rng.gen::<usize>() % len;
let y = rng.gen::<usize>() % len;
v.swap(x, y);
}
v
}
fn gen_mostly_descending(len: usize) -> Vec<u64> {
let mut rng = crate::bench_rng();
let mut v = gen_descending(len);
for _ in (0usize..).take_while(|x| x * x <= len) {
let x = rng.gen::<usize>() % len;
let y = rng.gen::<usize>() % len;
v.swap(x, y);
}
v
}
fn gen_strings(len: usize) -> Vec<String> {
let mut rng = crate::bench_rng();
let mut v = vec![];
for _ in 0..len {
let n = rng.gen::<usize>() % 20 + 1;
v.push(Alphanumeric.sample_string(&mut rng, n));
}
v
}
fn gen_big_random(len: usize) -> Vec<[u64; 16]> {
let mut rng = crate::bench_rng();
(&mut rng).sample_iter(&Standard).map(|x| [x; 16]).take(len).collect()
}
macro_rules! sort {
($f:ident, $name:ident, $gen:expr, $len:expr) => {
#[bench]
fn $name(b: &mut Bencher) {
let v = $gen($len);
b.iter(|| v.clone().$f());
b.bytes = $len * mem::size_of_val(&$gen(1)[0]) as u64;
}
};
}
macro_rules! sort_strings {
($f:ident, $name:ident, $gen:expr, $len:expr) => {
#[bench]
fn $name(b: &mut Bencher) {
let v = $gen($len);
let v = v.iter().map(|s| &**s).collect::<Vec<&str>>();
b.iter(|| v.clone().$f());
b.bytes = $len * mem::size_of::<&str>() as u64;
}
};
}
macro_rules! sort_expensive {
($f:ident, $name:ident, $gen:expr, $len:expr) => {
#[bench]
fn $name(b: &mut Bencher) {
let v = $gen($len);
b.iter(|| {
let mut v = v.clone();
let mut count = 0;
v.$f(|a: &u64, b: &u64| {
count += 1;
if count % 1_000_000_000 == 0 {
panic!("should not happen");
}
(*a as f64).cos().partial_cmp(&(*b as f64).cos()).unwrap()
});
black_box(count);
});
b.bytes = $len * mem::size_of_val(&$gen(1)[0]) as u64;
}
};
}
macro_rules! sort_lexicographic {
($f:ident, $name:ident, $gen:expr, $len:expr) => {
#[bench]
fn $name(b: &mut Bencher) {
let v = $gen($len);
b.iter(|| v.clone().$f(|x| x.to_string()));
b.bytes = $len * mem::size_of_val(&$gen(1)[0]) as u64;
}
};
}
sort!(sort, sort_small_ascending, gen_ascending, 10);
sort!(sort, sort_small_descending, gen_descending, 10);
sort!(sort, sort_small_random, gen_random, 10);
sort!(sort, sort_small_big, gen_big_random, 10);
sort!(sort, sort_medium_random, gen_random, 100);
sort!(sort, sort_large_ascending, gen_ascending, 10000);
sort!(sort, sort_large_descending, gen_descending, 10000);
sort!(sort, sort_large_mostly_ascending, gen_mostly_ascending, 10000);
sort!(sort, sort_large_mostly_descending, gen_mostly_descending, 10000);
sort!(sort, sort_large_random, gen_random, 10000);
sort!(sort, sort_large_big, gen_big_random, 10000);
sort_strings!(sort, sort_large_strings, gen_strings, 10000);
sort_expensive!(sort_by, sort_large_expensive, gen_random, 10000);
sort!(sort_unstable, sort_unstable_small_ascending, gen_ascending, 10);
sort!(sort_unstable, sort_unstable_small_descending, gen_descending, 10);
sort!(sort_unstable, sort_unstable_small_random, gen_random, 10);
sort!(sort_unstable, sort_unstable_small_big, gen_big_random, 10);
sort!(sort_unstable, sort_unstable_medium_random, gen_random, 100);
sort!(sort_unstable, sort_unstable_large_ascending, gen_ascending, 10000);
sort!(sort_unstable, sort_unstable_large_descending, gen_descending, 10000);
sort!(sort_unstable, sort_unstable_large_mostly_ascending, gen_mostly_ascending, 10000);
sort!(sort_unstable, sort_unstable_large_mostly_descending, gen_mostly_descending, 10000);
sort!(sort_unstable, sort_unstable_large_random, gen_random, 10000);
sort!(sort_unstable, sort_unstable_large_big, gen_big_random, 10000);
sort_strings!(sort_unstable, sort_unstable_large_strings, gen_strings, 10000);
sort_expensive!(sort_unstable_by, sort_unstable_large_expensive, gen_random, 10000);
sort_lexicographic!(sort_by_key, sort_by_key_lexicographic, gen_random, 10000);
sort_lexicographic!(sort_unstable_by_key, sort_unstable_by_key_lexicographic, gen_random, 10000);
sort_lexicographic!(sort_by_cached_key, sort_by_cached_key_lexicographic, gen_random, 10000);
macro_rules! reverse {
($name:ident, $ty:ty, $f:expr) => {
#[bench]
fn $name(b: &mut Bencher) {
// odd length and offset by 1 to be as unaligned as possible
let n = 0xFFFFF;
let mut v: Vec<_> = (0..1 + (n / mem::size_of::<$ty>() as u64)).map($f).collect();
b.iter(|| black_box(&mut v[1..]).reverse());
b.bytes = n;
}
};
}
reverse!(reverse_u8, u8, |x| x as u8);
reverse!(reverse_u16, u16, |x| x as u16);
reverse!(reverse_u8x3, [u8; 3], |x| [x as u8, (x >> 8) as u8, (x >> 16) as u8]);
reverse!(reverse_u32, u32, |x| x as u32);
reverse!(reverse_u64, u64, |x| x as u64);
reverse!(reverse_u128, u128, |x| x as u128);
#[repr(simd)]
struct F64x4(f64, f64, f64, f64);
reverse!(reverse_simd_f64x4, F64x4, |x| {
let x = x as f64;
F64x4(x, x, x, x)
});
macro_rules! rotate {
($name:ident, $gen:expr, $len:expr, $mid:expr) => {
#[bench]
fn $name(b: &mut Bencher) {
let size = mem::size_of_val(&$gen(1)[0]);
let mut v = $gen($len * 8 / size);
b.iter(|| black_box(&mut v).rotate_left(($mid * 8 + size - 1) / size));
b.bytes = (v.len() * size) as u64;
}
};
}
rotate!(rotate_tiny_by1, gen_random, 16, 1);
rotate!(rotate_tiny_half, gen_random, 16, 16 / 2);
rotate!(rotate_tiny_half_plus_one, gen_random, 16, 16 / 2 + 1);
rotate!(rotate_medium_by1, gen_random, 9158, 1);
rotate!(rotate_medium_by727_u64, gen_random, 9158, 727);
rotate!(rotate_medium_by727_bytes, gen_random_bytes, 9158, 727);
rotate!(rotate_medium_by727_strings, gen_strings, 9158, 727);
rotate!(rotate_medium_half, gen_random, 9158, 9158 / 2);
rotate!(rotate_medium_half_plus_one, gen_random, 9158, 9158 / 2 + 1);
// Intended to use more RAM than the machine has cache
rotate!(rotate_huge_by1, gen_random, 5 * 1024 * 1024, 1);
rotate!(rotate_huge_by9199_u64, gen_random, 5 * 1024 * 1024, 9199);
rotate!(rotate_huge_by9199_bytes, gen_random_bytes, 5 * 1024 * 1024, 9199);
rotate!(rotate_huge_by9199_strings, gen_strings, 5 * 1024 * 1024, 9199);
rotate!(rotate_huge_by9199_big, gen_big_random, 5 * 1024 * 1024, 9199);
rotate!(rotate_huge_by1234577_u64, gen_random, 5 * 1024 * 1024, 1234577);
rotate!(rotate_huge_by1234577_bytes, gen_random_bytes, 5 * 1024 * 1024, 1234577);
rotate!(rotate_huge_by1234577_strings, gen_strings, 5 * 1024 * 1024, 1234577);
rotate!(rotate_huge_by1234577_big, gen_big_random, 5 * 1024 * 1024, 1234577);
rotate!(rotate_huge_half, gen_random, 5 * 1024 * 1024, 5 * 1024 * 1024 / 2);
rotate!(rotate_huge_half_plus_one, gen_random, 5 * 1024 * 1024, 5 * 1024 * 1024 / 2 + 1);

View File

@ -0,0 +1,349 @@
use test::{black_box, Bencher};
#[bench]
fn char_iterator(b: &mut Bencher) {
let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb";
b.iter(|| s.chars().count());
}
#[bench]
fn char_iterator_for(b: &mut Bencher) {
let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb";
b.iter(|| {
for ch in s.chars() {
black_box(ch);
}
});
}
#[bench]
fn char_iterator_ascii(b: &mut Bencher) {
let s = "Mary had a little lamb, Little lamb
Mary had a little lamb, Little lamb
Mary had a little lamb, Little lamb
Mary had a little lamb, Little lamb
Mary had a little lamb, Little lamb
Mary had a little lamb, Little lamb";
b.iter(|| s.chars().count());
}
#[bench]
fn char_iterator_rev(b: &mut Bencher) {
let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb";
b.iter(|| s.chars().rev().count());
}
#[bench]
fn char_iterator_rev_for(b: &mut Bencher) {
let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb";
b.iter(|| {
for ch in s.chars().rev() {
black_box(ch);
}
});
}
#[bench]
fn char_indicesator(b: &mut Bencher) {
let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb";
let len = s.chars().count();
b.iter(|| assert_eq!(s.char_indices().count(), len));
}
#[bench]
fn char_indicesator_rev(b: &mut Bencher) {
let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb";
let len = s.chars().count();
b.iter(|| assert_eq!(s.char_indices().rev().count(), len));
}
#[bench]
fn split_unicode_ascii(b: &mut Bencher) {
let s = "ประเทศไทย中华Việt Namประเทศไทย中华Việt Nam";
b.iter(|| assert_eq!(s.split('V').count(), 3));
}
#[bench]
fn split_ascii(b: &mut Bencher) {
let s = "Mary had a little lamb, Little lamb, little-lamb.";
let len = s.split(' ').count();
b.iter(|| assert_eq!(s.split(' ').count(), len));
}
#[bench]
fn split_extern_fn(b: &mut Bencher) {
let s = "Mary had a little lamb, Little lamb, little-lamb.";
let len = s.split(' ').count();
fn pred(c: char) -> bool {
c == ' '
}
b.iter(|| assert_eq!(s.split(pred).count(), len));
}
#[bench]
fn split_closure(b: &mut Bencher) {
let s = "Mary had a little lamb, Little lamb, little-lamb.";
let len = s.split(' ').count();
b.iter(|| assert_eq!(s.split(|c: char| c == ' ').count(), len));
}
#[bench]
fn split_slice(b: &mut Bencher) {
let s = "Mary had a little lamb, Little lamb, little-lamb.";
let len = s.split(' ').count();
let c: &[char] = &[' '];
b.iter(|| assert_eq!(s.split(c).count(), len));
}
#[bench]
fn bench_join(b: &mut Bencher) {
let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb";
let sep = "";
let v = vec![s, s, s, s, s, s, s, s, s, s];
b.iter(|| {
assert_eq!(v.join(sep).len(), s.len() * 10 + sep.len() * 9);
})
}
#[bench]
fn bench_contains_short_short(b: &mut Bencher) {
let haystack = "Lorem ipsum dolor sit amet, consectetur adipiscing elit.";
let needle = "sit";
b.bytes = haystack.len() as u64;
b.iter(|| {
assert!(black_box(haystack).contains(black_box(needle)));
})
}
static LONG_HAYSTACK: &str = "\
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Suspendisse quis lorem sit amet dolor \
ultricies condimentum. Praesent iaculis purus elit, ac malesuada quam malesuada in. Duis sed orci \
eros. Suspendisse sit amet magna mollis, mollis nunc luctus, imperdiet mi. Integer fringilla non \
sem ut lacinia. Fusce varius tortor a risus porttitor hendrerit. Morbi mauris dui, ultricies nec \
tempus vel, gravida nec quam.
In est dui, tincidunt sed tempus interdum, adipiscing laoreet ante. Etiam tempor, tellus quis \
sagittis interdum, nulla purus mattis sem, quis auctor erat odio ac tellus. In nec nunc sit amet \
diam volutpat molestie at sed ipsum. Vestibulum laoreet consequat vulputate. Integer accumsan \
lorem ac dignissim placerat. Suspendisse convallis faucibus lorem. Aliquam erat volutpat. In vel \
eleifend felis. Sed suscipit nulla lorem, sed mollis est sollicitudin et. Nam fermentum egestas \
interdum. Curabitur ut nisi justo.
Sed sollicitudin ipsum tellus, ut condimentum leo eleifend nec. Cras ut velit ante. Phasellus nec \
mollis odio. Mauris molestie erat in arcu mattis, at aliquet dolor vehicula. Quisque malesuada \
lectus sit amet nisi pretium, a condimentum ipsum porta. Morbi at dapibus diam. Praesent egestas \
est sed risus elementum, eu rutrum metus ultrices. Etiam fermentum consectetur magna, id rutrum \
felis accumsan a. Aliquam ut pellentesque libero. Sed mi nulla, lobortis eu tortor id, suscipit \
ultricies neque. Morbi iaculis sit amet risus at iaculis. Praesent eget ligula quis turpis \
feugiat suscipit vel non arcu. Interdum et malesuada fames ac ante ipsum primis in faucibus. \
Aliquam sit amet placerat lorem.
Cras a lacus vel ante posuere elementum. Nunc est leo, bibendum ut facilisis vel, bibendum at \
mauris. Nullam adipiscing diam vel odio ornare, luctus adipiscing mi luctus. Nulla facilisi. \
Mauris adipiscing bibendum neque, quis adipiscing lectus tempus et. Sed feugiat erat et nisl \
lobortis pharetra. Donec vitae erat enim. Nullam sit amet felis et quam lacinia tincidunt. Aliquam \
suscipit dapibus urna. Sed volutpat urna in magna pulvinar volutpat. Phasellus nec tellus ac diam \
cursus accumsan.
Nam lectus enim, dapibus non nisi tempor, consectetur convallis massa. Maecenas eleifend dictum \
feugiat. Etiam quis mauris vel risus luctus mattis a a nunc. Nullam orci quam, imperdiet id \
vehicula in, porttitor ut nibh. Duis sagittis adipiscing nisl vitae congue. Donec mollis risus eu \
leo suscipit, varius porttitor nulla porta. Pellentesque ut sem nec nisi euismod vehicula. Nulla \
malesuada sollicitudin quam eu fermentum.";
#[bench]
fn bench_contains_2b_repeated_long(b: &mut Bencher) {
let haystack = LONG_HAYSTACK;
let needle = "::";
b.bytes = haystack.len() as u64;
b.iter(|| {
assert!(!black_box(haystack).contains(black_box(needle)));
})
}
#[bench]
fn bench_contains_short_long(b: &mut Bencher) {
let haystack = LONG_HAYSTACK;
let needle = "english";
b.bytes = haystack.len() as u64;
b.iter(|| {
assert!(!black_box(haystack).contains(black_box(needle)));
})
}
#[bench]
fn bench_contains_16b_in_long(b: &mut Bencher) {
let haystack = LONG_HAYSTACK;
let needle = "english language";
b.bytes = haystack.len() as u64;
b.iter(|| {
assert!(!black_box(haystack).contains(black_box(needle)));
})
}
#[bench]
fn bench_contains_32b_in_long(b: &mut Bencher) {
let haystack = LONG_HAYSTACK;
let needle = "the english language sample text";
b.bytes = haystack.len() as u64;
b.iter(|| {
assert!(!black_box(haystack).contains(black_box(needle)));
})
}
#[bench]
fn bench_contains_bad_naive(b: &mut Bencher) {
let haystack = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
let needle = "aaaaaaaab";
b.bytes = haystack.len() as u64;
b.iter(|| {
assert!(!black_box(haystack).contains(black_box(needle)));
})
}
#[bench]
fn bench_contains_bad_simd(b: &mut Bencher) {
let haystack = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
let needle = "aaabaaaa";
b.bytes = haystack.len() as u64;
b.iter(|| {
assert!(!black_box(haystack).contains(black_box(needle)));
})
}
#[bench]
fn bench_contains_equal(b: &mut Bencher) {
let haystack = "Lorem ipsum dolor sit amet, consectetur adipiscing elit.";
let needle = "Lorem ipsum dolor sit amet, consectetur adipiscing elit.";
b.bytes = haystack.len() as u64;
b.iter(|| {
assert!(black_box(haystack).contains(black_box(needle)));
})
}
macro_rules! make_test_inner {
($s:ident, $code:expr, $name:ident, $str:expr, $iters:expr) => {
#[bench]
fn $name(bencher: &mut Bencher) {
let mut $s = $str;
black_box(&mut $s);
bencher.iter(|| {
for _ in 0..$iters {
black_box($code);
}
});
}
};
}
macro_rules! make_test {
($name:ident, $s:ident, $code:expr) => {
make_test!($name, $s, $code, 1);
};
($name:ident, $s:ident, $code:expr, $iters:expr) => {
mod $name {
use test::Bencher;
use test::black_box;
// Short strings: 65 bytes each
make_test_inner!($s, $code, short_ascii,
"Mary had a little lamb, Little lamb Mary had a littl lamb, lamb!", $iters);
make_test_inner!($s, $code, short_mixed,
"ศไทย中华Việt Nam; Mary had a little lamb, Little lam!", $iters);
make_test_inner!($s, $code, short_pile_of_poo,
"💩💩💩💩💩💩💩💩💩💩💩💩💩💩💩💩!", $iters);
make_test_inner!($s, $code, long_lorem_ipsum,"\
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Suspendisse quis lorem sit amet dolor \
ultricies condimentum. Praesent iaculis purus elit, ac malesuada quam malesuada in. Duis sed orci \
eros. Suspendisse sit amet magna mollis, mollis nunc luctus, imperdiet mi. Integer fringilla non \
sem ut lacinia. Fusce varius tortor a risus porttitor hendrerit. Morbi mauris dui, ultricies nec \
tempus vel, gravida nec quam.
In est dui, tincidunt sed tempus interdum, adipiscing laoreet ante. Etiam tempor, tellus quis \
sagittis interdum, nulla purus mattis sem, quis auctor erat odio ac tellus. In nec nunc sit amet \
diam volutpat molestie at sed ipsum. Vestibulum laoreet consequat vulputate. Integer accumsan \
lorem ac dignissim placerat. Suspendisse convallis faucibus lorem. Aliquam erat volutpat. In vel \
eleifend felis. Sed suscipit nulla lorem, sed mollis est sollicitudin et. Nam fermentum egestas \
interdum. Curabitur ut nisi justo.
Sed sollicitudin ipsum tellus, ut condimentum leo eleifend nec. Cras ut velit ante. Phasellus nec \
mollis odio. Mauris molestie erat in arcu mattis, at aliquet dolor vehicula. Quisque malesuada \
lectus sit amet nisi pretium, a condimentum ipsum porta. Morbi at dapibus diam. Praesent egestas \
est sed risus elementum, eu rutrum metus ultrices. Etiam fermentum consectetur magna, id rutrum \
felis accumsan a. Aliquam ut pellentesque libero. Sed mi nulla, lobortis eu tortor id, suscipit \
ultricies neque. Morbi iaculis sit amet risus at iaculis. Praesent eget ligula quis turpis \
feugiat suscipit vel non arcu. Interdum et malesuada fames ac ante ipsum primis in faucibus. \
Aliquam sit amet placerat lorem.
Cras a lacus vel ante posuere elementum. Nunc est leo, bibendum ut facilisis vel, bibendum at \
mauris. Nullam adipiscing diam vel odio ornare, luctus adipiscing mi luctus. Nulla facilisi. \
Mauris adipiscing bibendum neque, quis adipiscing lectus tempus et. Sed feugiat erat et nisl \
lobortis pharetra. Donec vitae erat enim. Nullam sit amet felis et quam lacinia tincidunt. Aliquam \
suscipit dapibus urna. Sed volutpat urna in magna pulvinar volutpat. Phasellus nec tellus ac diam \
cursus accumsan.
Nam lectus enim, dapibus non nisi tempor, consectetur convallis massa. Maecenas eleifend dictum \
feugiat. Etiam quis mauris vel risus luctus mattis a a nunc. Nullam orci quam, imperdiet id \
vehicula in, porttitor ut nibh. Duis sagittis adipiscing nisl vitae congue. Donec mollis risus eu \
leo suscipit, varius porttitor nulla porta. Pellentesque ut sem nec nisi euismod vehicula. Nulla \
malesuada sollicitudin quam eu fermentum!", $iters);
}
}
}
make_test!(chars_count, s, s.chars().count());
make_test!(contains_bang_str, s, s.contains("!"));
make_test!(contains_bang_char, s, s.contains('!'));
make_test!(match_indices_a_str, s, s.match_indices("a").count());
make_test!(split_a_str, s, s.split("a").count());
make_test!(trim_ascii_char, s, { s.trim_matches(|c: char| c.is_ascii()) });
make_test!(trim_start_ascii_char, s, { s.trim_start_matches(|c: char| c.is_ascii()) });
make_test!(trim_end_ascii_char, s, { s.trim_end_matches(|c: char| c.is_ascii()) });
make_test!(find_underscore_char, s, s.find('_'));
make_test!(rfind_underscore_char, s, s.rfind('_'));
make_test!(find_underscore_str, s, s.find("_"));
make_test!(find_zzz_char, s, s.find('\u{1F4A4}'));
make_test!(rfind_zzz_char, s, s.rfind('\u{1F4A4}'));
make_test!(find_zzz_str, s, s.find("\u{1F4A4}"));
make_test!(starts_with_ascii_char, s, s.starts_with('/'), 1024);
make_test!(ends_with_ascii_char, s, s.ends_with('/'), 1024);
make_test!(starts_with_unichar, s, s.starts_with('\u{1F4A4}'), 1024);
make_test!(ends_with_unichar, s, s.ends_with('\u{1F4A4}'), 1024);
make_test!(starts_with_str, s, s.starts_with("💩💩💩💩💩💩💩💩💩💩💩💩💩💩💩💩"), 1024);
make_test!(ends_with_str, s, s.ends_with("💩💩💩💩💩💩💩💩💩💩💩💩💩💩💩💩"), 1024);
make_test!(split_space_char, s, s.split(' ').count());
make_test!(split_terminator_space_char, s, s.split_terminator(' ').count());
make_test!(splitn_space_char, s, s.splitn(10, ' ').count());
make_test!(rsplitn_space_char, s, s.rsplitn(10, ' ').count());
make_test!(split_space_str, s, s.split(" ").count());
make_test!(split_ad_str, s, s.split("ad").count());

View File

@ -0,0 +1,164 @@
use std::iter::repeat;
use test::{black_box, Bencher};
#[bench]
fn bench_with_capacity(b: &mut Bencher) {
b.iter(|| String::with_capacity(100));
}
#[bench]
fn bench_push_str(b: &mut Bencher) {
let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb";
b.iter(|| {
let mut r = String::new();
r.push_str(s);
});
}
const REPETITIONS: u64 = 10_000;
#[bench]
fn bench_push_str_one_byte(b: &mut Bencher) {
b.bytes = REPETITIONS;
b.iter(|| {
let mut r = String::new();
for _ in 0..REPETITIONS {
r.push_str("a")
}
});
}
#[bench]
fn bench_push_char_one_byte(b: &mut Bencher) {
b.bytes = REPETITIONS;
b.iter(|| {
let mut r = String::new();
for _ in 0..REPETITIONS {
r.push('a')
}
});
}
#[bench]
fn bench_push_char_two_bytes(b: &mut Bencher) {
b.bytes = REPETITIONS * 2;
b.iter(|| {
let mut r = String::new();
for _ in 0..REPETITIONS {
r.push('â')
}
});
}
#[bench]
fn from_utf8_lossy_100_ascii(b: &mut Bencher) {
let s = b"Hello there, the quick brown fox jumped over the lazy dog! \
Lorem ipsum dolor sit amet, consectetur. ";
assert_eq!(100, s.len());
b.iter(|| {
let _ = String::from_utf8_lossy(s);
});
}
#[bench]
fn from_utf8_lossy_100_multibyte(b: &mut Bencher) {
let s = "𐌀𐌖𐌋𐌄𐌑𐌉ปรدولة الكويتทศไทย中华𐍅𐌿𐌻𐍆𐌹𐌻𐌰".as_bytes();
assert_eq!(100, s.len());
b.iter(|| {
let _ = String::from_utf8_lossy(s);
});
}
#[bench]
fn from_utf8_lossy_invalid(b: &mut Bencher) {
let s = b"Hello\xC0\x80 There\xE6\x83 Goodbye";
b.iter(|| {
let _ = String::from_utf8_lossy(s);
});
}
#[bench]
fn from_utf8_lossy_100_invalid(b: &mut Bencher) {
let s = repeat(0xf5).take(100).collect::<Vec<_>>();
b.iter(|| {
let _ = String::from_utf8_lossy(&s);
});
}
#[bench]
fn bench_exact_size_shrink_to_fit(b: &mut Bencher) {
let s = "Hello there, the quick brown fox jumped over the lazy dog! \
Lorem ipsum dolor sit amet, consectetur. ";
// ensure our operation produces an exact-size string before we benchmark it
let mut r = String::with_capacity(s.len());
r.push_str(s);
assert_eq!(r.len(), r.capacity());
b.iter(|| {
let mut r = String::with_capacity(s.len());
r.push_str(s);
r.shrink_to_fit();
r
});
}
#[bench]
fn bench_from_str(b: &mut Bencher) {
let s = "Hello there, the quick brown fox jumped over the lazy dog! \
Lorem ipsum dolor sit amet, consectetur. ";
b.iter(|| String::from(s))
}
#[bench]
fn bench_from(b: &mut Bencher) {
let s = "Hello there, the quick brown fox jumped over the lazy dog! \
Lorem ipsum dolor sit amet, consectetur. ";
b.iter(|| String::from(s))
}
#[bench]
fn bench_to_string(b: &mut Bencher) {
let s = "Hello there, the quick brown fox jumped over the lazy dog! \
Lorem ipsum dolor sit amet, consectetur. ";
b.iter(|| s.to_string())
}
#[bench]
fn bench_insert_char_short(b: &mut Bencher) {
let s = "Hello, World!";
b.iter(|| {
let mut x = String::from(s);
black_box(&mut x).insert(6, black_box(' '));
x
})
}
#[bench]
fn bench_insert_char_long(b: &mut Bencher) {
let s = "Hello, World!";
b.iter(|| {
let mut x = String::from(s);
black_box(&mut x).insert(6, black_box('❤'));
x
})
}
#[bench]
fn bench_insert_str_short(b: &mut Bencher) {
let s = "Hello, World!";
b.iter(|| {
let mut x = String::from(s);
black_box(&mut x).insert_str(6, black_box(" "));
x
})
}
#[bench]
fn bench_insert_str_long(b: &mut Bencher) {
let s = "Hello, World!";
b.iter(|| {
let mut x = String::from(s);
black_box(&mut x).insert_str(6, black_box(" rustic "));
x
})
}

View File

@ -0,0 +1,872 @@
use rand::RngCore;
use std::iter::repeat;
use test::{black_box, Bencher};
#[bench]
fn bench_new(b: &mut Bencher) {
b.iter(|| Vec::<u32>::new())
}
fn do_bench_with_capacity(b: &mut Bencher, src_len: usize) {
b.bytes = src_len as u64;
b.iter(|| Vec::<u32>::with_capacity(src_len))
}
#[bench]
fn bench_with_capacity_0000(b: &mut Bencher) {
do_bench_with_capacity(b, 0)
}
#[bench]
fn bench_with_capacity_0010(b: &mut Bencher) {
do_bench_with_capacity(b, 10)
}
#[bench]
fn bench_with_capacity_0100(b: &mut Bencher) {
do_bench_with_capacity(b, 100)
}
#[bench]
fn bench_with_capacity_1000(b: &mut Bencher) {
do_bench_with_capacity(b, 1000)
}
fn do_bench_from_fn(b: &mut Bencher, src_len: usize) {
b.bytes = src_len as u64;
b.iter(|| (0..src_len).collect::<Vec<_>>())
}
#[bench]
fn bench_from_fn_0000(b: &mut Bencher) {
do_bench_from_fn(b, 0)
}
#[bench]
fn bench_from_fn_0010(b: &mut Bencher) {
do_bench_from_fn(b, 10)
}
#[bench]
fn bench_from_fn_0100(b: &mut Bencher) {
do_bench_from_fn(b, 100)
}
#[bench]
fn bench_from_fn_1000(b: &mut Bencher) {
do_bench_from_fn(b, 1000)
}
fn do_bench_from_elem(b: &mut Bencher, src_len: usize) {
b.bytes = src_len as u64;
b.iter(|| repeat(5).take(src_len).collect::<Vec<usize>>())
}
#[bench]
fn bench_from_elem_0000(b: &mut Bencher) {
do_bench_from_elem(b, 0)
}
#[bench]
fn bench_from_elem_0010(b: &mut Bencher) {
do_bench_from_elem(b, 10)
}
#[bench]
fn bench_from_elem_0100(b: &mut Bencher) {
do_bench_from_elem(b, 100)
}
#[bench]
fn bench_from_elem_1000(b: &mut Bencher) {
do_bench_from_elem(b, 1000)
}
fn do_bench_from_slice(b: &mut Bencher, src_len: usize) {
let src: Vec<_> = FromIterator::from_iter(0..src_len);
b.bytes = src_len as u64;
b.iter(|| src.as_slice().to_vec());
}
#[bench]
fn bench_from_slice_0000(b: &mut Bencher) {
do_bench_from_slice(b, 0)
}
#[bench]
fn bench_from_slice_0010(b: &mut Bencher) {
do_bench_from_slice(b, 10)
}
#[bench]
fn bench_from_slice_0100(b: &mut Bencher) {
do_bench_from_slice(b, 100)
}
#[bench]
fn bench_from_slice_1000(b: &mut Bencher) {
do_bench_from_slice(b, 1000)
}
fn do_bench_from_iter(b: &mut Bencher, src_len: usize) {
let src: Vec<_> = FromIterator::from_iter(0..src_len);
b.bytes = src_len as u64;
b.iter(|| {
let dst: Vec<_> = FromIterator::from_iter(src.iter().cloned());
dst
});
}
#[bench]
fn bench_from_iter_0000(b: &mut Bencher) {
do_bench_from_iter(b, 0)
}
#[bench]
fn bench_from_iter_0010(b: &mut Bencher) {
do_bench_from_iter(b, 10)
}
#[bench]
fn bench_from_iter_0100(b: &mut Bencher) {
do_bench_from_iter(b, 100)
}
#[bench]
fn bench_from_iter_1000(b: &mut Bencher) {
do_bench_from_iter(b, 1000)
}
fn do_bench_extend(b: &mut Bencher, dst_len: usize, src_len: usize) {
let dst: Vec<_> = FromIterator::from_iter(0..dst_len);
let src: Vec<_> = FromIterator::from_iter(dst_len..dst_len + src_len);
b.bytes = src_len as u64;
b.iter(|| {
let mut dst = dst.clone();
dst.extend(src.clone());
dst
});
}
#[bench]
fn bench_extend_0000_0000(b: &mut Bencher) {
do_bench_extend(b, 0, 0)
}
#[bench]
fn bench_extend_0000_0010(b: &mut Bencher) {
do_bench_extend(b, 0, 10)
}
#[bench]
fn bench_extend_0000_0100(b: &mut Bencher) {
do_bench_extend(b, 0, 100)
}
#[bench]
fn bench_extend_0000_1000(b: &mut Bencher) {
do_bench_extend(b, 0, 1000)
}
#[bench]
fn bench_extend_0010_0010(b: &mut Bencher) {
do_bench_extend(b, 10, 10)
}
#[bench]
fn bench_extend_0100_0100(b: &mut Bencher) {
do_bench_extend(b, 100, 100)
}
#[bench]
fn bench_extend_1000_1000(b: &mut Bencher) {
do_bench_extend(b, 1000, 1000)
}
fn do_bench_extend_from_slice(b: &mut Bencher, dst_len: usize, src_len: usize) {
let dst: Vec<_> = FromIterator::from_iter(0..dst_len);
let src: Vec<_> = FromIterator::from_iter(dst_len..dst_len + src_len);
b.bytes = src_len as u64;
b.iter(|| {
let mut dst = dst.clone();
dst.extend_from_slice(&src);
dst
});
}
#[bench]
fn bench_extend_recycle(b: &mut Bencher) {
let mut data = vec![0; 1000];
b.iter(|| {
let tmp = std::mem::take(&mut data);
let mut to_extend = black_box(Vec::new());
to_extend.extend(tmp.into_iter());
data = black_box(to_extend);
});
black_box(data);
}
#[bench]
fn bench_extend_from_slice_0000_0000(b: &mut Bencher) {
do_bench_extend_from_slice(b, 0, 0)
}
#[bench]
fn bench_extend_from_slice_0000_0010(b: &mut Bencher) {
do_bench_extend_from_slice(b, 0, 10)
}
#[bench]
fn bench_extend_from_slice_0000_0100(b: &mut Bencher) {
do_bench_extend_from_slice(b, 0, 100)
}
#[bench]
fn bench_extend_from_slice_0000_1000(b: &mut Bencher) {
do_bench_extend_from_slice(b, 0, 1000)
}
#[bench]
fn bench_extend_from_slice_0010_0010(b: &mut Bencher) {
do_bench_extend_from_slice(b, 10, 10)
}
#[bench]
fn bench_extend_from_slice_0100_0100(b: &mut Bencher) {
do_bench_extend_from_slice(b, 100, 100)
}
#[bench]
fn bench_extend_from_slice_1000_1000(b: &mut Bencher) {
do_bench_extend_from_slice(b, 1000, 1000)
}
fn do_bench_clone(b: &mut Bencher, src_len: usize) {
let src: Vec<usize> = FromIterator::from_iter(0..src_len);
b.bytes = src_len as u64;
b.iter(|| src.clone());
}
#[bench]
fn bench_clone_0000(b: &mut Bencher) {
do_bench_clone(b, 0)
}
#[bench]
fn bench_clone_0010(b: &mut Bencher) {
do_bench_clone(b, 10)
}
#[bench]
fn bench_clone_0100(b: &mut Bencher) {
do_bench_clone(b, 100)
}
#[bench]
fn bench_clone_1000(b: &mut Bencher) {
do_bench_clone(b, 1000)
}
fn do_bench_clone_from(b: &mut Bencher, times: usize, dst_len: usize, src_len: usize) {
let dst: Vec<_> = FromIterator::from_iter(0..src_len);
let src: Vec<_> = FromIterator::from_iter(dst_len..dst_len + src_len);
b.bytes = (times * src_len) as u64;
b.iter(|| {
let mut dst = dst.clone();
for _ in 0..times {
dst.clone_from(&src);
dst = black_box(dst);
}
dst
});
}
#[bench]
fn bench_clone_from_01_0000_0000(b: &mut Bencher) {
do_bench_clone_from(b, 1, 0, 0)
}
#[bench]
fn bench_clone_from_01_0000_0010(b: &mut Bencher) {
do_bench_clone_from(b, 1, 0, 10)
}
#[bench]
fn bench_clone_from_01_0000_0100(b: &mut Bencher) {
do_bench_clone_from(b, 1, 0, 100)
}
#[bench]
fn bench_clone_from_01_0000_1000(b: &mut Bencher) {
do_bench_clone_from(b, 1, 0, 1000)
}
#[bench]
fn bench_clone_from_01_0010_0010(b: &mut Bencher) {
do_bench_clone_from(b, 1, 10, 10)
}
#[bench]
fn bench_clone_from_01_0100_0100(b: &mut Bencher) {
do_bench_clone_from(b, 1, 100, 100)
}
#[bench]
fn bench_clone_from_01_1000_1000(b: &mut Bencher) {
do_bench_clone_from(b, 1, 1000, 1000)
}
#[bench]
fn bench_clone_from_01_0010_0100(b: &mut Bencher) {
do_bench_clone_from(b, 1, 10, 100)
}
#[bench]
fn bench_clone_from_01_0100_1000(b: &mut Bencher) {
do_bench_clone_from(b, 1, 100, 1000)
}
#[bench]
fn bench_clone_from_01_0010_0000(b: &mut Bencher) {
do_bench_clone_from(b, 1, 10, 0)
}
#[bench]
fn bench_clone_from_01_0100_0010(b: &mut Bencher) {
do_bench_clone_from(b, 1, 100, 10)
}
#[bench]
fn bench_clone_from_01_1000_0100(b: &mut Bencher) {
do_bench_clone_from(b, 1, 1000, 100)
}
#[bench]
fn bench_clone_from_10_0000_0000(b: &mut Bencher) {
do_bench_clone_from(b, 10, 0, 0)
}
#[bench]
fn bench_clone_from_10_0000_0010(b: &mut Bencher) {
do_bench_clone_from(b, 10, 0, 10)
}
#[bench]
fn bench_clone_from_10_0000_0100(b: &mut Bencher) {
do_bench_clone_from(b, 10, 0, 100)
}
#[bench]
fn bench_clone_from_10_0000_1000(b: &mut Bencher) {
do_bench_clone_from(b, 10, 0, 1000)
}
#[bench]
fn bench_clone_from_10_0010_0010(b: &mut Bencher) {
do_bench_clone_from(b, 10, 10, 10)
}
#[bench]
fn bench_clone_from_10_0100_0100(b: &mut Bencher) {
do_bench_clone_from(b, 10, 100, 100)
}
#[bench]
fn bench_clone_from_10_1000_1000(b: &mut Bencher) {
do_bench_clone_from(b, 10, 1000, 1000)
}
#[bench]
fn bench_clone_from_10_0010_0100(b: &mut Bencher) {
do_bench_clone_from(b, 10, 10, 100)
}
#[bench]
fn bench_clone_from_10_0100_1000(b: &mut Bencher) {
do_bench_clone_from(b, 10, 100, 1000)
}
#[bench]
fn bench_clone_from_10_0010_0000(b: &mut Bencher) {
do_bench_clone_from(b, 10, 10, 0)
}
#[bench]
fn bench_clone_from_10_0100_0010(b: &mut Bencher) {
do_bench_clone_from(b, 10, 100, 10)
}
#[bench]
fn bench_clone_from_10_1000_0100(b: &mut Bencher) {
do_bench_clone_from(b, 10, 1000, 100)
}
macro_rules! bench_in_place {
($($fname:ident, $type:ty, $count:expr, $init:expr);*) => {
$(
#[bench]
fn $fname(b: &mut Bencher) {
b.iter(|| {
let src: Vec<$type> = black_box(vec![$init; $count]);
src.into_iter()
.enumerate()
.map(|(idx, e)| idx as $type ^ e)
.collect::<Vec<$type>>()
});
}
)+
};
}
bench_in_place![
bench_in_place_xxu8_0010_i0, u8, 10, 0;
bench_in_place_xxu8_0100_i0, u8, 100, 0;
bench_in_place_xxu8_1000_i0, u8, 1000, 0;
bench_in_place_xxu8_0010_i1, u8, 10, 1;
bench_in_place_xxu8_0100_i1, u8, 100, 1;
bench_in_place_xxu8_1000_i1, u8, 1000, 1;
bench_in_place_xu32_0010_i0, u32, 10, 0;
bench_in_place_xu32_0100_i0, u32, 100, 0;
bench_in_place_xu32_1000_i0, u32, 1000, 0;
bench_in_place_xu32_0010_i1, u32, 10, 1;
bench_in_place_xu32_0100_i1, u32, 100, 1;
bench_in_place_xu32_1000_i1, u32, 1000, 1;
bench_in_place_u128_0010_i0, u128, 10, 0;
bench_in_place_u128_0100_i0, u128, 100, 0;
bench_in_place_u128_1000_i0, u128, 1000, 0;
bench_in_place_u128_0010_i1, u128, 10, 1;
bench_in_place_u128_0100_i1, u128, 100, 1;
bench_in_place_u128_1000_i1, u128, 1000, 1
];
#[bench]
fn bench_in_place_recycle(b: &mut Bencher) {
let mut data = vec![0; 1000];
b.iter(|| {
let tmp = std::mem::take(&mut data);
data = black_box(
tmp.into_iter()
.enumerate()
.map(|(idx, e)| idx.wrapping_add(e))
.fuse()
.collect::<Vec<usize>>(),
);
});
}
#[bench]
fn bench_in_place_zip_recycle(b: &mut Bencher) {
let mut data = vec![0u8; 1000];
let mut rng = crate::bench_rng();
let mut subst = vec![0u8; 1000];
rng.fill_bytes(&mut subst[..]);
b.iter(|| {
let tmp = std::mem::take(&mut data);
let mangled = tmp
.into_iter()
.zip(subst.iter().copied())
.enumerate()
.map(|(i, (d, s))| d.wrapping_add(i as u8) ^ s)
.collect::<Vec<_>>();
data = black_box(mangled);
});
}
#[bench]
fn bench_in_place_zip_iter_mut(b: &mut Bencher) {
let mut data = vec![0u8; 256];
let mut rng = crate::bench_rng();
let mut subst = vec![0u8; 1000];
rng.fill_bytes(&mut subst[..]);
b.iter(|| {
data.iter_mut().enumerate().for_each(|(i, d)| {
*d = d.wrapping_add(i as u8) ^ subst[i];
});
});
black_box(data);
}
pub fn vec_cast<T, U>(input: Vec<T>) -> Vec<U> {
input.into_iter().map(|e| unsafe { std::mem::transmute_copy(&e) }).collect()
}
#[bench]
fn bench_transmute(b: &mut Bencher) {
let mut vec = vec![10u32; 100];
b.bytes = 800; // 2 casts x 4 bytes x 100
b.iter(|| {
let v = std::mem::take(&mut vec);
let v = black_box(vec_cast::<u32, i32>(v));
let v = black_box(vec_cast::<i32, u32>(v));
vec = v;
});
}
#[derive(Clone)]
struct Droppable(usize);
impl Drop for Droppable {
fn drop(&mut self) {
black_box(self);
}
}
#[bench]
fn bench_in_place_collect_droppable(b: &mut Bencher) {
let v: Vec<Droppable> = std::iter::repeat_with(|| Droppable(0)).take(1000).collect();
b.iter(|| {
v.clone()
.into_iter()
.skip(100)
.enumerate()
.map(|(i, e)| Droppable(i ^ e.0))
.collect::<Vec<_>>()
})
}
const LEN: usize = 16384;
#[bench]
fn bench_chain_collect(b: &mut Bencher) {
let data = black_box([0; LEN]);
b.iter(|| data.iter().cloned().chain([1]).collect::<Vec<_>>());
}
#[bench]
fn bench_chain_chain_collect(b: &mut Bencher) {
let data = black_box([0; LEN]);
b.iter(|| data.iter().cloned().chain([1]).chain([2]).collect::<Vec<_>>());
}
#[bench]
fn bench_nest_chain_chain_collect(b: &mut Bencher) {
let data = black_box([0; LEN]);
b.iter(|| {
data.iter().cloned().chain([1].iter().chain([2].iter()).cloned()).collect::<Vec<_>>()
});
}
#[bench]
fn bench_range_map_collect(b: &mut Bencher) {
b.iter(|| (0..LEN).map(|_| u32::default()).collect::<Vec<_>>());
}
#[bench]
fn bench_chain_extend_ref(b: &mut Bencher) {
let data = black_box([0; LEN]);
b.iter(|| {
let mut v = Vec::<u32>::with_capacity(data.len() + 1);
v.extend(data.iter().chain([1].iter()));
v
});
}
#[bench]
fn bench_chain_extend_value(b: &mut Bencher) {
let data = black_box([0; LEN]);
b.iter(|| {
let mut v = Vec::<u32>::with_capacity(data.len() + 1);
v.extend(data.iter().cloned().chain(Some(1)));
v
});
}
#[bench]
fn bench_rev_1(b: &mut Bencher) {
let data = black_box([0; LEN]);
b.iter(|| {
let mut v = Vec::<u32>::new();
v.extend(data.iter().rev());
v
});
}
#[bench]
fn bench_rev_2(b: &mut Bencher) {
let data = black_box([0; LEN]);
b.iter(|| {
let mut v = Vec::<u32>::with_capacity(data.len());
v.extend(data.iter().rev());
v
});
}
#[bench]
fn bench_map_regular(b: &mut Bencher) {
let data = black_box([(0, 0); LEN]);
b.iter(|| {
let mut v = Vec::<u32>::new();
v.extend(data.iter().map(|t| t.1));
v
});
}
#[bench]
fn bench_map_fast(b: &mut Bencher) {
let data = black_box([(0, 0); LEN]);
b.iter(|| {
let mut result: Vec<u32> = Vec::with_capacity(data.len());
for i in 0..data.len() {
unsafe {
*result.as_mut_ptr().add(i) = data[i].0;
result.set_len(i);
}
}
result
});
}
fn random_sorted_fill(mut seed: u32, buf: &mut [u32]) {
let mask = if buf.len() < 8192 {
0xFF
} else if buf.len() < 200_000 {
0xFFFF
} else {
0xFFFF_FFFF
};
for item in buf.iter_mut() {
seed ^= seed << 13;
seed ^= seed >> 17;
seed ^= seed << 5;
*item = seed & mask;
}
buf.sort();
}
// Measures performance of slice dedup impl.
// This was used to justify separate implementation of dedup for Vec.
// This algorithm was used for Vecs prior to Rust 1.52.
fn bench_dedup_slice_truncate(b: &mut Bencher, sz: usize) {
let mut template = vec![0u32; sz];
b.bytes = std::mem::size_of_val(template.as_slice()) as u64;
random_sorted_fill(0x43, &mut template);
let mut vec = template.clone();
b.iter(|| {
let vec = black_box(&mut vec);
let len = {
let (dedup, _) = vec.partition_dedup();
dedup.len()
};
vec.truncate(len);
black_box(vec.first());
let vec = black_box(vec);
vec.clear();
vec.extend_from_slice(&template);
});
}
// Measures performance of Vec::dedup on random data.
fn bench_vec_dedup_random(b: &mut Bencher, sz: usize) {
let mut template = vec![0u32; sz];
b.bytes = std::mem::size_of_val(template.as_slice()) as u64;
random_sorted_fill(0x43, &mut template);
let mut vec = template.clone();
b.iter(|| {
let vec = black_box(&mut vec);
vec.dedup();
black_box(vec.first());
let vec = black_box(vec);
vec.clear();
vec.extend_from_slice(&template);
});
}
// Measures performance of Vec::dedup when there is no items removed
fn bench_vec_dedup_none(b: &mut Bencher, sz: usize) {
let mut template = vec![0u32; sz];
b.bytes = std::mem::size_of_val(template.as_slice()) as u64;
template.chunks_exact_mut(2).for_each(|w| {
w[0] = black_box(0);
w[1] = black_box(5);
});
let mut vec = template.clone();
b.iter(|| {
let vec = black_box(&mut vec);
vec.dedup();
black_box(vec.first());
// Unlike other benches of `dedup`
// this doesn't reinitialize vec
// because we measure how efficient dedup is
// when no memory written
});
}
// Measures performance of Vec::dedup when there is all items removed
fn bench_vec_dedup_all(b: &mut Bencher, sz: usize) {
let mut template = vec![0u32; sz];
b.bytes = std::mem::size_of_val(template.as_slice()) as u64;
template.iter_mut().for_each(|w| {
*w = black_box(0);
});
let mut vec = template.clone();
b.iter(|| {
let vec = black_box(&mut vec);
vec.dedup();
black_box(vec.first());
let vec = black_box(vec);
vec.clear();
vec.extend_from_slice(&template);
});
}
#[bench]
fn bench_dedup_slice_truncate_100(b: &mut Bencher) {
bench_dedup_slice_truncate(b, 100);
}
#[bench]
fn bench_dedup_random_100(b: &mut Bencher) {
bench_vec_dedup_random(b, 100);
}
#[bench]
fn bench_dedup_none_100(b: &mut Bencher) {
bench_vec_dedup_none(b, 100);
}
#[bench]
fn bench_dedup_all_100(b: &mut Bencher) {
bench_vec_dedup_all(b, 100);
}
#[bench]
fn bench_dedup_slice_truncate_1000(b: &mut Bencher) {
bench_dedup_slice_truncate(b, 1000);
}
#[bench]
fn bench_dedup_random_1000(b: &mut Bencher) {
bench_vec_dedup_random(b, 1000);
}
#[bench]
fn bench_dedup_none_1000(b: &mut Bencher) {
bench_vec_dedup_none(b, 1000);
}
#[bench]
fn bench_dedup_all_1000(b: &mut Bencher) {
bench_vec_dedup_all(b, 1000);
}
#[bench]
fn bench_dedup_slice_truncate_10000(b: &mut Bencher) {
bench_dedup_slice_truncate(b, 10000);
}
#[bench]
fn bench_dedup_random_10000(b: &mut Bencher) {
bench_vec_dedup_random(b, 10000);
}
#[bench]
fn bench_dedup_none_10000(b: &mut Bencher) {
bench_vec_dedup_none(b, 10000);
}
#[bench]
fn bench_dedup_all_10000(b: &mut Bencher) {
bench_vec_dedup_all(b, 10000);
}
#[bench]
fn bench_dedup_slice_truncate_100000(b: &mut Bencher) {
bench_dedup_slice_truncate(b, 100000);
}
#[bench]
fn bench_dedup_random_100000(b: &mut Bencher) {
bench_vec_dedup_random(b, 100000);
}
#[bench]
fn bench_dedup_none_100000(b: &mut Bencher) {
bench_vec_dedup_none(b, 100000);
}
#[bench]
fn bench_dedup_all_100000(b: &mut Bencher) {
bench_vec_dedup_all(b, 100000);
}
#[bench]
fn bench_flat_map_collect(b: &mut Bencher) {
let v = vec![777u32; 500000];
b.iter(|| v.iter().flat_map(|color| color.rotate_left(8).to_be_bytes()).collect::<Vec<_>>());
}
/// Reference benchmark that `retain` has to compete with.
#[bench]
fn bench_retain_iter_100000(b: &mut Bencher) {
let mut v = Vec::with_capacity(100000);
b.iter(|| {
let mut tmp = std::mem::take(&mut v);
tmp.clear();
tmp.extend(black_box(1..=100000));
v = tmp.into_iter().filter(|x| x & 1 == 0).collect();
});
}
#[bench]
fn bench_retain_100000(b: &mut Bencher) {
let mut v = Vec::with_capacity(100000);
b.iter(|| {
v.clear();
v.extend(black_box(1..=100000));
v.retain(|x| x & 1 == 0)
});
}
#[bench]
fn bench_retain_whole_100000(b: &mut Bencher) {
let mut v = black_box(vec![826u32; 100000]);
b.iter(|| v.retain(|x| *x == 826u32));
}
#[bench]
fn bench_next_chunk(b: &mut Bencher) {
let v = vec![13u8; 2048];
b.iter(|| {
const CHUNK: usize = 8;
let mut sum = [0u32; CHUNK];
let mut iter = black_box(v.clone()).into_iter();
while let Ok(chunk) = iter.next_chunk::<CHUNK>() {
for i in 0..CHUNK {
sum[i] += chunk[i] as u32;
}
}
sum
})
}

View File

@ -0,0 +1,268 @@
use std::{
collections::{vec_deque, VecDeque},
mem,
};
use test::{black_box, Bencher};
#[bench]
fn bench_new(b: &mut Bencher) {
b.iter(|| {
let ring: VecDeque<i32> = VecDeque::new();
black_box(ring);
})
}
#[bench]
fn bench_grow_1025(b: &mut Bencher) {
b.iter(|| {
let mut deq = VecDeque::new();
for i in 0..1025 {
deq.push_front(i);
}
black_box(deq);
})
}
#[bench]
fn bench_iter_1000(b: &mut Bencher) {
let ring: VecDeque<_> = (0..1000).collect();
b.iter(|| {
let mut sum = 0;
for &i in &ring {
sum += i;
}
black_box(sum);
})
}
#[bench]
fn bench_mut_iter_1000(b: &mut Bencher) {
let mut ring: VecDeque<_> = (0..1000).collect();
b.iter(|| {
let mut sum = 0;
for i in &mut ring {
sum += *i;
}
black_box(sum);
})
}
#[bench]
fn bench_try_fold(b: &mut Bencher) {
let ring: VecDeque<_> = (0..1000).collect();
b.iter(|| black_box(ring.iter().try_fold(0, |a, b| Some(a + b))))
}
/// does the memory bookkeeping to reuse the buffer of the Vec between iterations.
/// `setup` must not modify its argument's length or capacity. `g` must not move out of its argument.
fn into_iter_helper<
T: Copy,
F: FnOnce(&mut VecDeque<T>),
G: FnOnce(&mut vec_deque::IntoIter<T>),
>(
v: &mut Vec<T>,
setup: F,
g: G,
) {
let ptr = v.as_mut_ptr();
let len = v.len();
// ensure that the vec is full, to make sure that any wrapping from the deque doesn't
// access uninitialized memory.
assert_eq!(v.len(), v.capacity());
let mut deque = VecDeque::from(mem::take(v));
setup(&mut deque);
let mut it = deque.into_iter();
g(&mut it);
mem::forget(it);
// SAFETY: the provided functions are not allowed to modify the allocation, so the buffer is still alive.
// len and capacity are accurate due to the above assertion.
// All the elements in the buffer are still valid, because of `T: Copy` which implies `T: !Drop`.
mem::forget(mem::replace(v, unsafe { Vec::from_raw_parts(ptr, len, len) }));
}
#[bench]
fn bench_into_iter(b: &mut Bencher) {
let len = 1024;
// we reuse this allocation for every run
let mut vec: Vec<usize> = (0..len).collect();
vec.shrink_to_fit();
b.iter(|| {
let mut sum = 0;
into_iter_helper(
&mut vec,
|_| {},
|it| {
for i in it {
sum += i;
}
},
);
black_box(sum);
let mut sum = 0;
// rotating a full deque doesn't move any memory.
into_iter_helper(
&mut vec,
|d| d.rotate_left(len / 2),
|it| {
for i in it {
sum += i;
}
},
);
black_box(sum);
});
}
#[bench]
fn bench_into_iter_fold(b: &mut Bencher) {
let len = 1024;
// because `fold` takes ownership of the iterator,
// we can't prevent it from dropping the memory,
// so we have to bite the bullet and reallocate
// for every iteration.
b.iter(|| {
let deque: VecDeque<usize> = (0..len).collect();
assert_eq!(deque.len(), deque.capacity());
let sum = deque.into_iter().fold(0, |a, b| a + b);
black_box(sum);
// rotating a full deque doesn't move any memory.
let mut deque: VecDeque<usize> = (0..len).collect();
assert_eq!(deque.len(), deque.capacity());
deque.rotate_left(len / 2);
let sum = deque.into_iter().fold(0, |a, b| a + b);
black_box(sum);
});
}
#[bench]
fn bench_into_iter_try_fold(b: &mut Bencher) {
let len = 1024;
// we reuse this allocation for every run
let mut vec: Vec<usize> = (0..len).collect();
vec.shrink_to_fit();
// Iterator::any uses Iterator::try_fold under the hood
b.iter(|| {
let mut b = false;
into_iter_helper(&mut vec, |_| {}, |it| b = it.any(|i| i == len - 1));
black_box(b);
into_iter_helper(&mut vec, |d| d.rotate_left(len / 2), |it| b = it.any(|i| i == len - 1));
black_box(b);
});
}
#[bench]
fn bench_into_iter_next_chunk(b: &mut Bencher) {
let len = 1024;
// we reuse this allocation for every run
let mut vec: Vec<usize> = (0..len).collect();
vec.shrink_to_fit();
b.iter(|| {
let mut buf = [0; 64];
into_iter_helper(
&mut vec,
|_| {},
|it| {
while let Ok(a) = it.next_chunk() {
buf = a;
}
},
);
black_box(buf);
into_iter_helper(
&mut vec,
|d| d.rotate_left(len / 2),
|it| {
while let Ok(a) = it.next_chunk() {
buf = a;
}
},
);
black_box(buf);
});
}
#[bench]
fn bench_from_array_1000(b: &mut Bencher) {
const N: usize = 1000;
let mut array: [usize; N] = [0; N];
for i in 0..N {
array[i] = i;
}
b.iter(|| {
let deq: VecDeque<_> = array.into();
black_box(deq);
})
}
#[bench]
fn bench_extend_bytes(b: &mut Bencher) {
let mut ring: VecDeque<u8> = VecDeque::with_capacity(1000);
let input: &[u8] = &[128; 512];
b.iter(|| {
ring.clear();
ring.extend(black_box(input));
});
}
#[bench]
fn bench_extend_vec(b: &mut Bencher) {
let mut ring: VecDeque<u8> = VecDeque::with_capacity(1000);
let input = vec![128; 512];
b.iter(|| {
ring.clear();
let input = input.clone();
ring.extend(black_box(input));
});
}
#[bench]
fn bench_extend_trustedlen(b: &mut Bencher) {
let mut ring: VecDeque<u16> = VecDeque::with_capacity(1000);
b.iter(|| {
ring.clear();
ring.extend(black_box(0..512));
});
}
#[bench]
fn bench_extend_chained_trustedlen(b: &mut Bencher) {
let mut ring: VecDeque<u16> = VecDeque::with_capacity(1000);
b.iter(|| {
ring.clear();
ring.extend(black_box((0..256).chain(768..1024)));
});
}
#[bench]
fn bench_extend_chained_bytes(b: &mut Bencher) {
let mut ring: VecDeque<u16> = VecDeque::with_capacity(1000);
let input1: &[u16] = &[128; 256];
let input2: &[u16] = &[255; 256];
b.iter(|| {
ring.clear();
ring.extend(black_box(input1.iter().chain(input2.iter())));
});
}

View File

@ -0,0 +1,39 @@
use std::{collections::VecDeque, time::Instant};
const VECDEQUE_LEN: i32 = 100000;
const WARMUP_N: usize = 100;
const BENCH_N: usize = 1000;
fn main() {
if cfg!(miri) {
// Don't benchmark Miri...
// (Due to bootstrap quirks, this gets picked up by `x.py miri library/alloc --no-doc`.)
return;
}
let a: VecDeque<i32> = (0..VECDEQUE_LEN).collect();
let b: VecDeque<i32> = (0..VECDEQUE_LEN).collect();
for _ in 0..WARMUP_N {
let mut c = a.clone();
let mut d = b.clone();
c.append(&mut d);
}
let mut durations = Vec::with_capacity(BENCH_N);
for _ in 0..BENCH_N {
let mut c = a.clone();
let mut d = b.clone();
let before = Instant::now();
c.append(&mut d);
let after = Instant::now();
durations.push(after.duration_since(before));
}
let l = durations.len();
durations.sort();
assert!(BENCH_N % 2 == 0);
let median = (durations[(l / 2) - 1] + durations[l / 2]) / 2;
println!("\ncustom-bench vec_deque_append {:?} ns/iter\n", median.as_nanos());
}

View File

@ -0,0 +1,452 @@
//! Memory allocation APIs
#![stable(feature = "alloc_module", since = "1.28.0")]
#[cfg(not(test))]
use core::hint;
#[cfg(not(test))]
use core::ptr::{self, NonNull};
#[stable(feature = "alloc_module", since = "1.28.0")]
#[doc(inline)]
pub use core::alloc::*;
#[cfg(test)]
mod tests;
extern "Rust" {
// These are the magic symbols to call the global allocator. rustc generates
// them to call `__rg_alloc` etc. if there is a `#[global_allocator]` attribute
// (the code expanding that attribute macro generates those functions), or to call
// the default implementations in std (`__rdl_alloc` etc. in `library/std/src/alloc.rs`)
// otherwise.
// The rustc fork of LLVM 14 and earlier also special-cases these function names to be able to optimize them
// like `malloc`, `realloc`, and `free`, respectively.
#[rustc_allocator]
#[rustc_nounwind]
fn __rust_alloc(size: usize, align: usize) -> *mut u8;
#[rustc_deallocator]
#[rustc_nounwind]
fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize);
#[rustc_reallocator]
#[rustc_nounwind]
fn __rust_realloc(ptr: *mut u8, old_size: usize, align: usize, new_size: usize) -> *mut u8;
#[rustc_allocator_zeroed]
#[rustc_nounwind]
fn __rust_alloc_zeroed(size: usize, align: usize) -> *mut u8;
static __rust_no_alloc_shim_is_unstable: u8;
}
/// The global memory allocator.
///
/// This type implements the [`Allocator`] trait by forwarding calls
/// to the allocator registered with the `#[global_allocator]` attribute
/// if there is one, or the `std` crates default.
///
/// Note: while this type is unstable, the functionality it provides can be
/// accessed through the [free functions in `alloc`](self#functions).
#[unstable(feature = "allocator_api", issue = "32838")]
#[derive(Copy, Clone, Default, Debug)]
#[cfg(not(test))]
// the compiler needs to know when a Box uses the global allocator vs a custom one
#[lang = "global_alloc_ty"]
pub struct Global;
#[cfg(test)]
pub use std::alloc::Global;
/// Allocate memory with the global allocator.
///
/// This function forwards calls to the [`GlobalAlloc::alloc`] method
/// of the allocator registered with the `#[global_allocator]` attribute
/// if there is one, or the `std` crates default.
///
/// This function is expected to be deprecated in favor of the `alloc` method
/// of the [`Global`] type when it and the [`Allocator`] trait become stable.
///
/// # Safety
///
/// See [`GlobalAlloc::alloc`].
///
/// # Examples
///
/// ```
/// use std::alloc::{alloc, dealloc, handle_alloc_error, Layout};
///
/// unsafe {
/// let layout = Layout::new::<u16>();
/// let ptr = alloc(layout);
/// if ptr.is_null() {
/// handle_alloc_error(layout);
/// }
///
/// *(ptr as *mut u16) = 42;
/// assert_eq!(*(ptr as *mut u16), 42);
///
/// dealloc(ptr, layout);
/// }
/// ```
#[stable(feature = "global_alloc", since = "1.28.0")]
#[must_use = "losing the pointer will leak memory"]
#[inline]
pub unsafe fn alloc(layout: Layout) -> *mut u8 {
unsafe {
// Make sure we don't accidentally allow omitting the allocator shim in
// stable code until it is actually stabilized.
core::ptr::read_volatile(&__rust_no_alloc_shim_is_unstable);
__rust_alloc(layout.size(), layout.align())
}
}
/// Deallocate memory with the global allocator.
///
/// This function forwards calls to the [`GlobalAlloc::dealloc`] method
/// of the allocator registered with the `#[global_allocator]` attribute
/// if there is one, or the `std` crates default.
///
/// This function is expected to be deprecated in favor of the `dealloc` method
/// of the [`Global`] type when it and the [`Allocator`] trait become stable.
///
/// # Safety
///
/// See [`GlobalAlloc::dealloc`].
#[stable(feature = "global_alloc", since = "1.28.0")]
#[inline]
pub unsafe fn dealloc(ptr: *mut u8, layout: Layout) {
unsafe { __rust_dealloc(ptr, layout.size(), layout.align()) }
}
/// Reallocate memory with the global allocator.
///
/// This function forwards calls to the [`GlobalAlloc::realloc`] method
/// of the allocator registered with the `#[global_allocator]` attribute
/// if there is one, or the `std` crates default.
///
/// This function is expected to be deprecated in favor of the `realloc` method
/// of the [`Global`] type when it and the [`Allocator`] trait become stable.
///
/// # Safety
///
/// See [`GlobalAlloc::realloc`].
#[stable(feature = "global_alloc", since = "1.28.0")]
#[must_use = "losing the pointer will leak memory"]
#[inline]
pub unsafe fn realloc(ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
unsafe { __rust_realloc(ptr, layout.size(), layout.align(), new_size) }
}
/// Allocate zero-initialized memory with the global allocator.
///
/// This function forwards calls to the [`GlobalAlloc::alloc_zeroed`] method
/// of the allocator registered with the `#[global_allocator]` attribute
/// if there is one, or the `std` crates default.
///
/// This function is expected to be deprecated in favor of the `alloc_zeroed` method
/// of the [`Global`] type when it and the [`Allocator`] trait become stable.
///
/// # Safety
///
/// See [`GlobalAlloc::alloc_zeroed`].
///
/// # Examples
///
/// ```
/// use std::alloc::{alloc_zeroed, dealloc, Layout};
///
/// unsafe {
/// let layout = Layout::new::<u16>();
/// let ptr = alloc_zeroed(layout);
///
/// assert_eq!(*(ptr as *mut u16), 0);
///
/// dealloc(ptr, layout);
/// }
/// ```
#[stable(feature = "global_alloc", since = "1.28.0")]
#[must_use = "losing the pointer will leak memory"]
#[inline]
pub unsafe fn alloc_zeroed(layout: Layout) -> *mut u8 {
unsafe { __rust_alloc_zeroed(layout.size(), layout.align()) }
}
#[cfg(not(test))]
impl Global {
#[inline]
fn alloc_impl(&self, layout: Layout, zeroed: bool) -> Result<NonNull<[u8]>, AllocError> {
match layout.size() {
0 => Ok(NonNull::slice_from_raw_parts(layout.dangling(), 0)),
// SAFETY: `layout` is non-zero in size,
size => unsafe {
let raw_ptr = if zeroed { alloc_zeroed(layout) } else { alloc(layout) };
let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
Ok(NonNull::slice_from_raw_parts(ptr, size))
},
}
}
// SAFETY: Same as `Allocator::grow`
#[inline]
unsafe fn grow_impl(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
zeroed: bool,
) -> Result<NonNull<[u8]>, AllocError> {
debug_assert!(
new_layout.size() >= old_layout.size(),
"`new_layout.size()` must be greater than or equal to `old_layout.size()`"
);
match old_layout.size() {
0 => self.alloc_impl(new_layout, zeroed),
// SAFETY: `new_size` is non-zero as `old_size` is greater than or equal to `new_size`
// as required by safety conditions. Other conditions must be upheld by the caller
old_size if old_layout.align() == new_layout.align() => unsafe {
let new_size = new_layout.size();
// `realloc` probably checks for `new_size >= old_layout.size()` or something similar.
hint::assert_unchecked(new_size >= old_layout.size());
let raw_ptr = realloc(ptr.as_ptr(), old_layout, new_size);
let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
if zeroed {
raw_ptr.add(old_size).write_bytes(0, new_size - old_size);
}
Ok(NonNull::slice_from_raw_parts(ptr, new_size))
},
// SAFETY: because `new_layout.size()` must be greater than or equal to `old_size`,
// both the old and new memory allocation are valid for reads and writes for `old_size`
// bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
// `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
// for `dealloc` must be upheld by the caller.
old_size => unsafe {
let new_ptr = self.alloc_impl(new_layout, zeroed)?;
ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), old_size);
self.deallocate(ptr, old_layout);
Ok(new_ptr)
},
}
}
}
#[unstable(feature = "allocator_api", issue = "32838")]
#[cfg(not(test))]
unsafe impl Allocator for Global {
#[inline]
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
self.alloc_impl(layout, false)
}
#[inline]
fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
self.alloc_impl(layout, true)
}
#[inline]
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
if layout.size() != 0 {
// SAFETY: `layout` is non-zero in size,
// other conditions must be upheld by the caller
unsafe { dealloc(ptr.as_ptr(), layout) }
}
}
#[inline]
unsafe fn grow(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
// SAFETY: all conditions must be upheld by the caller
unsafe { self.grow_impl(ptr, old_layout, new_layout, false) }
}
#[inline]
unsafe fn grow_zeroed(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
// SAFETY: all conditions must be upheld by the caller
unsafe { self.grow_impl(ptr, old_layout, new_layout, true) }
}
#[inline]
unsafe fn shrink(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
debug_assert!(
new_layout.size() <= old_layout.size(),
"`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
);
match new_layout.size() {
// SAFETY: conditions must be upheld by the caller
0 => unsafe {
self.deallocate(ptr, old_layout);
Ok(NonNull::slice_from_raw_parts(new_layout.dangling(), 0))
},
// SAFETY: `new_size` is non-zero. Other conditions must be upheld by the caller
new_size if old_layout.align() == new_layout.align() => unsafe {
// `realloc` probably checks for `new_size <= old_layout.size()` or something similar.
hint::assert_unchecked(new_size <= old_layout.size());
let raw_ptr = realloc(ptr.as_ptr(), old_layout, new_size);
let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
Ok(NonNull::slice_from_raw_parts(ptr, new_size))
},
// SAFETY: because `new_size` must be smaller than or equal to `old_layout.size()`,
// both the old and new memory allocation are valid for reads and writes for `new_size`
// bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
// `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
// for `dealloc` must be upheld by the caller.
new_size => unsafe {
let new_ptr = self.allocate(new_layout)?;
ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), new_size);
self.deallocate(ptr, old_layout);
Ok(new_ptr)
},
}
}
}
/// The allocator for unique pointers.
#[cfg(all(not(no_global_oom_handling), not(test)))]
#[lang = "exchange_malloc"]
#[inline]
unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 {
let layout = unsafe { Layout::from_size_align_unchecked(size, align) };
match Global.allocate(layout) {
Ok(ptr) => ptr.as_mut_ptr(),
Err(_) => handle_alloc_error(layout),
}
}
// # Allocation error handler
#[cfg(not(no_global_oom_handling))]
extern "Rust" {
// This is the magic symbol to call the global alloc error handler. rustc generates
// it to call `__rg_oom` if there is a `#[alloc_error_handler]`, or to call the
// default implementations below (`__rdl_oom`) otherwise.
fn __rust_alloc_error_handler(size: usize, align: usize) -> !;
}
/// Signal a memory allocation error.
///
/// Callers of memory allocation APIs wishing to cease execution
/// in response to an allocation error are encouraged to call this function,
/// rather than directly invoking [`panic!`] or similar.
///
/// This function is guaranteed to diverge (not return normally with a value), but depending on
/// global configuration, it may either panic (resulting in unwinding or aborting as per
/// configuration for all panics), or abort the process (with no unwinding).
///
/// The default behavior is:
///
/// * If the binary links against `std` (typically the case), then
/// print a message to standard error and abort the process.
/// This behavior can be replaced with [`set_alloc_error_hook`] and [`take_alloc_error_hook`].
/// Future versions of Rust may panic by default instead.
///
/// * If the binary does not link against `std` (all of its crates are marked
/// [`#![no_std]`][no_std]), then call [`panic!`] with a message.
/// [The panic handler] applies as to any panic.
///
/// [`set_alloc_error_hook`]: ../../std/alloc/fn.set_alloc_error_hook.html
/// [`take_alloc_error_hook`]: ../../std/alloc/fn.take_alloc_error_hook.html
/// [The panic handler]: https://doc.rust-lang.org/reference/runtime.html#the-panic_handler-attribute
/// [no_std]: https://doc.rust-lang.org/reference/names/preludes.html#the-no_std-attribute
#[stable(feature = "global_alloc", since = "1.28.0")]
#[rustc_const_unstable(feature = "const_alloc_error", issue = "92523")]
#[cfg(all(not(no_global_oom_handling), not(test)))]
#[cold]
pub const fn handle_alloc_error(layout: Layout) -> ! {
const fn ct_error(_: Layout) -> ! {
panic!("allocation failed");
}
#[inline]
fn rt_error(layout: Layout) -> ! {
unsafe {
__rust_alloc_error_handler(layout.size(), layout.align());
}
}
#[cfg(not(feature = "panic_immediate_abort"))]
{
core::intrinsics::const_eval_select((layout,), ct_error, rt_error)
}
#[cfg(feature = "panic_immediate_abort")]
ct_error(layout)
}
// For alloc test `std::alloc::handle_alloc_error` can be used directly.
#[cfg(all(not(no_global_oom_handling), test))]
pub use std::alloc::handle_alloc_error;
#[cfg(all(not(no_global_oom_handling), not(test)))]
#[doc(hidden)]
#[allow(unused_attributes)]
#[unstable(feature = "alloc_internals", issue = "none")]
pub mod __alloc_error_handler {
// called via generated `__rust_alloc_error_handler` if there is no
// `#[alloc_error_handler]`.
#[rustc_std_internal_symbol]
pub unsafe fn __rdl_oom(size: usize, _align: usize) -> ! {
extern "Rust" {
// This symbol is emitted by rustc next to __rust_alloc_error_handler.
// Its value depends on the -Zoom={panic,abort} compiler option.
static __rust_alloc_error_handler_should_panic: u8;
}
if unsafe { __rust_alloc_error_handler_should_panic != 0 } {
panic!("memory allocation of {size} bytes failed")
} else {
core::panicking::panic_nounwind_fmt(
format_args!("memory allocation of {size} bytes failed"),
/* force_no_backtrace */ false,
)
}
}
}
#[cfg(not(no_global_oom_handling))]
/// Specialize clones into pre-allocated, uninitialized memory.
/// Used by `Box::clone` and `Rc`/`Arc::make_mut`.
pub(crate) trait WriteCloneIntoRaw: Sized {
unsafe fn write_clone_into_raw(&self, target: *mut Self);
}
#[cfg(not(no_global_oom_handling))]
impl<T: Clone> WriteCloneIntoRaw for T {
#[inline]
default unsafe fn write_clone_into_raw(&self, target: *mut Self) {
// Having allocated *first* may allow the optimizer to create
// the cloned value in-place, skipping the local and move.
unsafe { target.write(self.clone()) };
}
}
#[cfg(not(no_global_oom_handling))]
impl<T: Copy> WriteCloneIntoRaw for T {
#[inline]
unsafe fn write_clone_into_raw(&self, target: *mut Self) {
// We can always copy in-place, without ever involving a local value.
unsafe { target.copy_from_nonoverlapping(self, 1) };
}
}

View File

@ -0,0 +1,29 @@
use super::*;
extern crate test;
use crate::boxed::Box;
use test::Bencher;
#[test]
fn allocate_zeroed() {
unsafe {
let layout = Layout::from_size_align(1024, 1).unwrap();
let ptr =
Global.allocate_zeroed(layout.clone()).unwrap_or_else(|_| handle_alloc_error(layout));
let mut i = ptr.as_non_null_ptr().as_ptr();
let end = i.add(layout.size());
while i < end {
assert_eq!(*i, 0);
i = i.add(1);
}
Global.deallocate(ptr.as_non_null_ptr(), layout);
}
}
#[bench]
fn alloc_owned_small(b: &mut Bencher) {
b.iter(|| {
let _: Box<_> = Box::new(10);
})
}

View File

@ -0,0 +1,497 @@
//! A module for working with borrowed data.
#![stable(feature = "rust1", since = "1.0.0")]
use core::cmp::Ordering;
use core::hash::{Hash, Hasher};
#[cfg(not(no_global_oom_handling))]
use core::ops::{Add, AddAssign};
use core::ops::{Deref, DerefPure};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::borrow::{Borrow, BorrowMut};
use crate::fmt;
#[cfg(not(no_global_oom_handling))]
use crate::string::String;
use Cow::*;
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, B: ?Sized> Borrow<B> for Cow<'a, B>
where
B: ToOwned,
{
fn borrow(&self) -> &B {
&**self
}
}
/// A generalization of `Clone` to borrowed data.
///
/// Some types make it possible to go from borrowed to owned, usually by
/// implementing the `Clone` trait. But `Clone` works only for going from `&T`
/// to `T`. The `ToOwned` trait generalizes `Clone` to construct owned data
/// from any borrow of a given type.
#[cfg_attr(not(test), rustc_diagnostic_item = "ToOwned")]
#[stable(feature = "rust1", since = "1.0.0")]
pub trait ToOwned {
/// The resulting type after obtaining ownership.
#[stable(feature = "rust1", since = "1.0.0")]
type Owned: Borrow<Self>;
/// Creates owned data from borrowed data, usually by cloning.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let s: &str = "a";
/// let ss: String = s.to_owned();
///
/// let v: &[i32] = &[1, 2];
/// let vv: Vec<i32> = v.to_owned();
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[must_use = "cloning is often expensive and is not expected to have side effects"]
#[cfg_attr(not(test), rustc_diagnostic_item = "to_owned_method")]
fn to_owned(&self) -> Self::Owned;
/// Uses borrowed data to replace owned data, usually by cloning.
///
/// This is borrow-generalized version of [`Clone::clone_from`].
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut s: String = String::new();
/// "hello".clone_into(&mut s);
///
/// let mut v: Vec<i32> = Vec::new();
/// [1, 2][..].clone_into(&mut v);
/// ```
#[stable(feature = "toowned_clone_into", since = "1.63.0")]
fn clone_into(&self, target: &mut Self::Owned) {
*target = self.to_owned();
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ToOwned for T
where
T: Clone,
{
type Owned = T;
fn to_owned(&self) -> T {
self.clone()
}
fn clone_into(&self, target: &mut T) {
target.clone_from(self);
}
}
/// A clone-on-write smart pointer.
///
/// The type `Cow` is a smart pointer providing clone-on-write functionality: it
/// can enclose and provide immutable access to borrowed data, and clone the
/// data lazily when mutation or ownership is required. The type is designed to
/// work with general borrowed data via the `Borrow` trait.
///
/// `Cow` implements `Deref`, which means that you can call
/// non-mutating methods directly on the data it encloses. If mutation
/// is desired, `to_mut` will obtain a mutable reference to an owned
/// value, cloning if necessary.
///
/// If you need reference-counting pointers, note that
/// [`Rc::make_mut`][crate::rc::Rc::make_mut] and
/// [`Arc::make_mut`][crate::sync::Arc::make_mut] can provide clone-on-write
/// functionality as well.
///
/// # Examples
///
/// ```
/// use std::borrow::Cow;
///
/// fn abs_all(input: &mut Cow<'_, [i32]>) {
/// for i in 0..input.len() {
/// let v = input[i];
/// if v < 0 {
/// // Clones into a vector if not already owned.
/// input.to_mut()[i] = -v;
/// }
/// }
/// }
///
/// // No clone occurs because `input` doesn't need to be mutated.
/// let slice = [0, 1, 2];
/// let mut input = Cow::from(&slice[..]);
/// abs_all(&mut input);
///
/// // Clone occurs because `input` needs to be mutated.
/// let slice = [-1, 0, 1];
/// let mut input = Cow::from(&slice[..]);
/// abs_all(&mut input);
///
/// // No clone occurs because `input` is already owned.
/// let mut input = Cow::from(vec![-1, 0, 1]);
/// abs_all(&mut input);
/// ```
///
/// Another example showing how to keep `Cow` in a struct:
///
/// ```
/// use std::borrow::Cow;
///
/// struct Items<'a, X> where [X]: ToOwned<Owned = Vec<X>> {
/// values: Cow<'a, [X]>,
/// }
///
/// impl<'a, X: Clone + 'a> Items<'a, X> where [X]: ToOwned<Owned = Vec<X>> {
/// fn new(v: Cow<'a, [X]>) -> Self {
/// Items { values: v }
/// }
/// }
///
/// // Creates a container from borrowed values of a slice
/// let readonly = [1, 2];
/// let borrowed = Items::new((&readonly[..]).into());
/// match borrowed {
/// Items { values: Cow::Borrowed(b) } => println!("borrowed {b:?}"),
/// _ => panic!("expect borrowed value"),
/// }
///
/// let mut clone_on_write = borrowed;
/// // Mutates the data from slice into owned vec and pushes a new value on top
/// clone_on_write.values.to_mut().push(3);
/// println!("clone_on_write = {:?}", clone_on_write.values);
///
/// // The data was mutated. Let's check it out.
/// match clone_on_write {
/// Items { values: Cow::Owned(_) } => println!("clone_on_write contains owned data"),
/// _ => panic!("expect owned data"),
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg_attr(not(test), rustc_diagnostic_item = "Cow")]
pub enum Cow<'a, B: ?Sized + 'a>
where
B: ToOwned,
{
/// Borrowed data.
#[stable(feature = "rust1", since = "1.0.0")]
Borrowed(#[stable(feature = "rust1", since = "1.0.0")] &'a B),
/// Owned data.
#[stable(feature = "rust1", since = "1.0.0")]
Owned(#[stable(feature = "rust1", since = "1.0.0")] <B as ToOwned>::Owned),
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<B: ?Sized + ToOwned> Clone for Cow<'_, B> {
fn clone(&self) -> Self {
match *self {
Borrowed(b) => Borrowed(b),
Owned(ref o) => {
let b: &B = o.borrow();
Owned(b.to_owned())
}
}
}
fn clone_from(&mut self, source: &Self) {
match (self, source) {
(&mut Owned(ref mut dest), &Owned(ref o)) => o.borrow().clone_into(dest),
(t, s) => *t = s.clone(),
}
}
}
impl<B: ?Sized + ToOwned> Cow<'_, B> {
/// Returns true if the data is borrowed, i.e. if `to_mut` would require additional work.
///
/// # Examples
///
/// ```
/// #![feature(cow_is_borrowed)]
/// use std::borrow::Cow;
///
/// let cow = Cow::Borrowed("moo");
/// assert!(cow.is_borrowed());
///
/// let bull: Cow<'_, str> = Cow::Owned("...moo?".to_string());
/// assert!(!bull.is_borrowed());
/// ```
#[unstable(feature = "cow_is_borrowed", issue = "65143")]
#[rustc_const_unstable(feature = "const_cow_is_borrowed", issue = "65143")]
pub const fn is_borrowed(&self) -> bool {
match *self {
Borrowed(_) => true,
Owned(_) => false,
}
}
/// Returns true if the data is owned, i.e. if `to_mut` would be a no-op.
///
/// # Examples
///
/// ```
/// #![feature(cow_is_borrowed)]
/// use std::borrow::Cow;
///
/// let cow: Cow<'_, str> = Cow::Owned("moo".to_string());
/// assert!(cow.is_owned());
///
/// let bull = Cow::Borrowed("...moo?");
/// assert!(!bull.is_owned());
/// ```
#[unstable(feature = "cow_is_borrowed", issue = "65143")]
#[rustc_const_unstable(feature = "const_cow_is_borrowed", issue = "65143")]
pub const fn is_owned(&self) -> bool {
!self.is_borrowed()
}
/// Acquires a mutable reference to the owned form of the data.
///
/// Clones the data if it is not already owned.
///
/// # Examples
///
/// ```
/// use std::borrow::Cow;
///
/// let mut cow = Cow::Borrowed("foo");
/// cow.to_mut().make_ascii_uppercase();
///
/// assert_eq!(
/// cow,
/// Cow::Owned(String::from("FOO")) as Cow<'_, str>
/// );
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn to_mut(&mut self) -> &mut <B as ToOwned>::Owned {
match *self {
Borrowed(borrowed) => {
*self = Owned(borrowed.to_owned());
match *self {
Borrowed(..) => unreachable!(),
Owned(ref mut owned) => owned,
}
}
Owned(ref mut owned) => owned,
}
}
/// Extracts the owned data.
///
/// Clones the data if it is not already owned.
///
/// # Examples
///
/// Calling `into_owned` on a `Cow::Borrowed` returns a clone of the borrowed data:
///
/// ```
/// use std::borrow::Cow;
///
/// let s = "Hello world!";
/// let cow = Cow::Borrowed(s);
///
/// assert_eq!(
/// cow.into_owned(),
/// String::from(s)
/// );
/// ```
///
/// Calling `into_owned` on a `Cow::Owned` returns the owned data. The data is moved out of the
/// `Cow` without being cloned.
///
/// ```
/// use std::borrow::Cow;
///
/// let s = "Hello world!";
/// let cow: Cow<'_, str> = Cow::Owned(String::from(s));
///
/// assert_eq!(
/// cow.into_owned(),
/// String::from(s)
/// );
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn into_owned(self) -> <B as ToOwned>::Owned {
match self {
Borrowed(borrowed) => borrowed.to_owned(),
Owned(owned) => owned,
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<B: ?Sized + ToOwned> Deref for Cow<'_, B>
where
B::Owned: Borrow<B>,
{
type Target = B;
fn deref(&self) -> &B {
match *self {
Borrowed(borrowed) => borrowed,
Owned(ref owned) => owned.borrow(),
}
}
}
#[unstable(feature = "deref_pure_trait", issue = "87121")]
unsafe impl<B: ?Sized + ToOwned> DerefPure for Cow<'_, B> where B::Owned: Borrow<B> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<B: ?Sized> Eq for Cow<'_, B> where B: Eq + ToOwned {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<B: ?Sized> Ord for Cow<'_, B>
where
B: Ord + ToOwned,
{
#[inline]
fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&**self, &**other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, 'b, B: ?Sized, C: ?Sized> PartialEq<Cow<'b, C>> for Cow<'a, B>
where
B: PartialEq<C> + ToOwned,
C: ToOwned,
{
#[inline]
fn eq(&self, other: &Cow<'b, C>) -> bool {
PartialEq::eq(&**self, &**other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, B: ?Sized> PartialOrd for Cow<'a, B>
where
B: PartialOrd + ToOwned,
{
#[inline]
fn partial_cmp(&self, other: &Cow<'a, B>) -> Option<Ordering> {
PartialOrd::partial_cmp(&**self, &**other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<B: ?Sized> fmt::Debug for Cow<'_, B>
where
B: fmt::Debug + ToOwned<Owned: fmt::Debug>,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Borrowed(ref b) => fmt::Debug::fmt(b, f),
Owned(ref o) => fmt::Debug::fmt(o, f),
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<B: ?Sized> fmt::Display for Cow<'_, B>
where
B: fmt::Display + ToOwned<Owned: fmt::Display>,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Borrowed(ref b) => fmt::Display::fmt(b, f),
Owned(ref o) => fmt::Display::fmt(o, f),
}
}
}
#[stable(feature = "default", since = "1.11.0")]
impl<B: ?Sized> Default for Cow<'_, B>
where
B: ToOwned<Owned: Default>,
{
/// Creates an owned Cow<'a, B> with the default value for the contained owned value.
fn default() -> Self {
Owned(<B as ToOwned>::Owned::default())
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<B: ?Sized> Hash for Cow<'_, B>
where
B: Hash + ToOwned,
{
#[inline]
fn hash<H: Hasher>(&self, state: &mut H) {
Hash::hash(&**self, state)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + ToOwned> AsRef<T> for Cow<'_, T> {
fn as_ref(&self) -> &T {
self
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "cow_add", since = "1.14.0")]
impl<'a> Add<&'a str> for Cow<'a, str> {
type Output = Cow<'a, str>;
#[inline]
fn add(mut self, rhs: &'a str) -> Self::Output {
self += rhs;
self
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "cow_add", since = "1.14.0")]
impl<'a> Add<Cow<'a, str>> for Cow<'a, str> {
type Output = Cow<'a, str>;
#[inline]
fn add(mut self, rhs: Cow<'a, str>) -> Self::Output {
self += rhs;
self
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "cow_add", since = "1.14.0")]
impl<'a> AddAssign<&'a str> for Cow<'a, str> {
fn add_assign(&mut self, rhs: &'a str) {
if self.is_empty() {
*self = Cow::Borrowed(rhs)
} else if !rhs.is_empty() {
if let Cow::Borrowed(lhs) = *self {
let mut s = String::with_capacity(lhs.len() + rhs.len());
s.push_str(lhs);
*self = Cow::Owned(s);
}
self.to_mut().push_str(rhs);
}
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "cow_add", since = "1.14.0")]
impl<'a> AddAssign<Cow<'a, str>> for Cow<'a, str> {
fn add_assign(&mut self, rhs: Cow<'a, str>) {
if self.is_empty() {
*self = rhs
} else if !rhs.is_empty() {
if let Cow::Borrowed(lhs) = *self {
let mut s = String::with_capacity(lhs.len() + rhs.len());
s.push_str(lhs);
*self = Cow::Owned(s);
}
self.to_mut().push_str(&rhs);
}
}
}

View File

@ -0,0 +1,436 @@
// Based on
// https://github.com/matthieu-m/rfc2580/blob/b58d1d3cba0d4b5e859d3617ea2d0943aaa31329/examples/thin.rs
// by matthieu-m
use crate::alloc::{self, Layout, LayoutError};
use core::error::Error;
use core::fmt::{self, Debug, Display, Formatter};
#[cfg(not(no_global_oom_handling))]
use core::intrinsics::const_allocate;
use core::marker::PhantomData;
#[cfg(not(no_global_oom_handling))]
use core::marker::Unsize;
use core::mem;
#[cfg(not(no_global_oom_handling))]
use core::mem::SizedTypeProperties;
use core::ops::{Deref, DerefMut};
use core::ptr::Pointee;
use core::ptr::{self, NonNull};
/// ThinBox.
///
/// A thin pointer for heap allocation, regardless of T.
///
/// # Examples
///
/// ```
/// #![feature(thin_box)]
/// use std::boxed::ThinBox;
///
/// let five = ThinBox::new(5);
/// let thin_slice = ThinBox::<[i32]>::new_unsize([1, 2, 3, 4]);
///
/// use std::mem::{size_of, size_of_val};
/// let size_of_ptr = size_of::<*const ()>();
/// assert_eq!(size_of_ptr, size_of_val(&five));
/// assert_eq!(size_of_ptr, size_of_val(&thin_slice));
/// ```
#[unstable(feature = "thin_box", issue = "92791")]
pub struct ThinBox<T: ?Sized> {
// This is essentially `WithHeader<<T as Pointee>::Metadata>`,
// but that would be invariant in `T`, and we want covariance.
ptr: WithOpaqueHeader,
_marker: PhantomData<T>,
}
/// `ThinBox<T>` is `Send` if `T` is `Send` because the data is owned.
#[unstable(feature = "thin_box", issue = "92791")]
unsafe impl<T: ?Sized + Send> Send for ThinBox<T> {}
/// `ThinBox<T>` is `Sync` if `T` is `Sync` because the data is owned.
#[unstable(feature = "thin_box", issue = "92791")]
unsafe impl<T: ?Sized + Sync> Sync for ThinBox<T> {}
#[unstable(feature = "thin_box", issue = "92791")]
impl<T> ThinBox<T> {
/// Moves a type to the heap with its [`Metadata`] stored in the heap allocation instead of on
/// the stack.
///
/// # Examples
///
/// ```
/// #![feature(thin_box)]
/// use std::boxed::ThinBox;
///
/// let five = ThinBox::new(5);
/// ```
///
/// [`Metadata`]: core::ptr::Pointee::Metadata
#[cfg(not(no_global_oom_handling))]
pub fn new(value: T) -> Self {
let meta = ptr::metadata(&value);
let ptr = WithOpaqueHeader::new(meta, value);
ThinBox { ptr, _marker: PhantomData }
}
/// Moves a type to the heap with its [`Metadata`] stored in the heap allocation instead of on
/// the stack. Returns an error if allocation fails, instead of aborting.
///
/// # Examples
///
/// ```
/// #![feature(allocator_api)]
/// #![feature(thin_box)]
/// use std::boxed::ThinBox;
///
/// let five = ThinBox::try_new(5)?;
/// # Ok::<(), std::alloc::AllocError>(())
/// ```
///
/// [`Metadata`]: core::ptr::Pointee::Metadata
pub fn try_new(value: T) -> Result<Self, core::alloc::AllocError> {
let meta = ptr::metadata(&value);
WithOpaqueHeader::try_new(meta, value).map(|ptr| ThinBox { ptr, _marker: PhantomData })
}
}
#[unstable(feature = "thin_box", issue = "92791")]
impl<Dyn: ?Sized> ThinBox<Dyn> {
/// Moves a type to the heap with its [`Metadata`] stored in the heap allocation instead of on
/// the stack.
///
/// # Examples
///
/// ```
/// #![feature(thin_box)]
/// use std::boxed::ThinBox;
///
/// let thin_slice = ThinBox::<[i32]>::new_unsize([1, 2, 3, 4]);
/// ```
///
/// [`Metadata`]: core::ptr::Pointee::Metadata
#[cfg(not(no_global_oom_handling))]
pub fn new_unsize<T>(value: T) -> Self
where
T: Unsize<Dyn>,
{
if mem::size_of::<T>() == 0 {
let ptr = WithOpaqueHeader::new_unsize_zst::<Dyn, T>(value);
ThinBox { ptr, _marker: PhantomData }
} else {
let meta = ptr::metadata(&value as &Dyn);
let ptr = WithOpaqueHeader::new(meta, value);
ThinBox { ptr, _marker: PhantomData }
}
}
}
#[unstable(feature = "thin_box", issue = "92791")]
impl<T: ?Sized + Debug> Debug for ThinBox<T> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
Debug::fmt(self.deref(), f)
}
}
#[unstable(feature = "thin_box", issue = "92791")]
impl<T: ?Sized + Display> Display for ThinBox<T> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
Display::fmt(self.deref(), f)
}
}
#[unstable(feature = "thin_box", issue = "92791")]
impl<T: ?Sized> Deref for ThinBox<T> {
type Target = T;
fn deref(&self) -> &T {
let value = self.data();
let metadata = self.meta();
let pointer = ptr::from_raw_parts(value as *const (), metadata);
unsafe { &*pointer }
}
}
#[unstable(feature = "thin_box", issue = "92791")]
impl<T: ?Sized> DerefMut for ThinBox<T> {
fn deref_mut(&mut self) -> &mut T {
let value = self.data();
let metadata = self.meta();
let pointer = ptr::from_raw_parts_mut::<T>(value as *mut (), metadata);
unsafe { &mut *pointer }
}
}
#[unstable(feature = "thin_box", issue = "92791")]
impl<T: ?Sized> Drop for ThinBox<T> {
fn drop(&mut self) {
unsafe {
let value = self.deref_mut();
let value = value as *mut T;
self.with_header().drop::<T>(value);
}
}
}
#[unstable(feature = "thin_box", issue = "92791")]
impl<T: ?Sized> ThinBox<T> {
fn meta(&self) -> <T as Pointee>::Metadata {
// Safety:
// - NonNull and valid.
unsafe { *self.with_header().header() }
}
fn data(&self) -> *mut u8 {
self.with_header().value()
}
fn with_header(&self) -> &WithHeader<<T as Pointee>::Metadata> {
// SAFETY: both types are transparent to `NonNull<u8>`
unsafe { &*(core::ptr::addr_of!(self.ptr) as *const WithHeader<_>) }
}
}
/// A pointer to type-erased data, guaranteed to either be:
/// 1. `NonNull::dangling()`, in the case where both the pointee (`T`) and
/// metadata (`H`) are ZSTs.
/// 2. A pointer to a valid `T` that has a header `H` directly before the
/// pointed-to location.
#[repr(transparent)]
struct WithHeader<H>(NonNull<u8>, PhantomData<H>);
/// An opaque representation of `WithHeader<H>` to avoid the
/// projection invariance of `<T as Pointee>::Metadata`.
#[repr(transparent)]
struct WithOpaqueHeader(NonNull<u8>);
impl WithOpaqueHeader {
#[cfg(not(no_global_oom_handling))]
fn new<H, T>(header: H, value: T) -> Self {
let ptr = WithHeader::new(header, value);
Self(ptr.0)
}
#[cfg(not(no_global_oom_handling))]
fn new_unsize_zst<Dyn, T>(value: T) -> Self
where
Dyn: ?Sized,
T: Unsize<Dyn>,
{
let ptr = WithHeader::<<Dyn as Pointee>::Metadata>::new_unsize_zst::<Dyn, T>(value);
Self(ptr.0)
}
fn try_new<H, T>(header: H, value: T) -> Result<Self, core::alloc::AllocError> {
WithHeader::try_new(header, value).map(|ptr| Self(ptr.0))
}
}
impl<H> WithHeader<H> {
#[cfg(not(no_global_oom_handling))]
fn new<T>(header: H, value: T) -> WithHeader<H> {
let value_layout = Layout::new::<T>();
let Ok((layout, value_offset)) = Self::alloc_layout(value_layout) else {
// We pass an empty layout here because we do not know which layout caused the
// arithmetic overflow in `Layout::extend` and `handle_alloc_error` takes `Layout` as
// its argument rather than `Result<Layout, LayoutError>`, also this function has been
// stable since 1.28 ._.
//
// On the other hand, look at this gorgeous turbofish!
alloc::handle_alloc_error(Layout::new::<()>());
};
unsafe {
// Note: It's UB to pass a layout with a zero size to `alloc::alloc`, so
// we use `layout.dangling()` for this case, which should have a valid
// alignment for both `T` and `H`.
let ptr = if layout.size() == 0 {
// Some paranoia checking, mostly so that the ThinBox tests are
// more able to catch issues.
debug_assert!(value_offset == 0 && T::IS_ZST && H::IS_ZST);
layout.dangling()
} else {
let ptr = alloc::alloc(layout);
if ptr.is_null() {
alloc::handle_alloc_error(layout);
}
// Safety:
// - The size is at least `aligned_header_size`.
let ptr = ptr.add(value_offset) as *mut _;
NonNull::new_unchecked(ptr)
};
let result = WithHeader(ptr, PhantomData);
ptr::write(result.header(), header);
ptr::write(result.value().cast(), value);
result
}
}
/// Non-panicking version of `new`.
/// Any error is returned as `Err(core::alloc::AllocError)`.
fn try_new<T>(header: H, value: T) -> Result<WithHeader<H>, core::alloc::AllocError> {
let value_layout = Layout::new::<T>();
let Ok((layout, value_offset)) = Self::alloc_layout(value_layout) else {
return Err(core::alloc::AllocError);
};
unsafe {
// Note: It's UB to pass a layout with a zero size to `alloc::alloc`, so
// we use `layout.dangling()` for this case, which should have a valid
// alignment for both `T` and `H`.
let ptr = if layout.size() == 0 {
// Some paranoia checking, mostly so that the ThinBox tests are
// more able to catch issues.
debug_assert!(
value_offset == 0 && mem::size_of::<T>() == 0 && mem::size_of::<H>() == 0
);
layout.dangling()
} else {
let ptr = alloc::alloc(layout);
if ptr.is_null() {
return Err(core::alloc::AllocError);
}
// Safety:
// - The size is at least `aligned_header_size`.
let ptr = ptr.add(value_offset) as *mut _;
NonNull::new_unchecked(ptr)
};
let result = WithHeader(ptr, PhantomData);
ptr::write(result.header(), header);
ptr::write(result.value().cast(), value);
Ok(result)
}
}
// `Dyn` is `?Sized` type like `[u32]`, and `T` is ZST type like `[u32; 0]`.
#[cfg(not(no_global_oom_handling))]
fn new_unsize_zst<Dyn, T>(value: T) -> WithHeader<H>
where
Dyn: Pointee<Metadata = H> + ?Sized,
T: Unsize<Dyn>,
{
assert!(mem::size_of::<T>() == 0);
const fn max(a: usize, b: usize) -> usize {
if a > b { a } else { b }
}
// Compute a pointer to the right metadata. This will point to the beginning
// of the header, past the padding, so the assigned type makes sense.
// It also ensures that the address at the end of the header is sufficiently
// aligned for T.
let alloc: &<Dyn as Pointee>::Metadata = const {
// FIXME: just call `WithHeader::alloc_layout` with size reset to 0.
// Currently that's blocked on `Layout::extend` not being `const fn`.
let alloc_align =
max(mem::align_of::<T>(), mem::align_of::<<Dyn as Pointee>::Metadata>());
let alloc_size =
max(mem::align_of::<T>(), mem::size_of::<<Dyn as Pointee>::Metadata>());
unsafe {
// SAFETY: align is power of two because it is the maximum of two alignments.
let alloc: *mut u8 = const_allocate(alloc_size, alloc_align);
let metadata_offset =
alloc_size.checked_sub(mem::size_of::<<Dyn as Pointee>::Metadata>()).unwrap();
// SAFETY: adding offset within the allocation.
let metadata_ptr: *mut <Dyn as Pointee>::Metadata =
alloc.add(metadata_offset).cast();
// SAFETY: `*metadata_ptr` is within the allocation.
metadata_ptr.write(ptr::metadata::<Dyn>(ptr::dangling::<T>() as *const Dyn));
// SAFETY: we have just written the metadata.
&*(metadata_ptr)
}
};
// SAFETY: `alloc` points to `<Dyn as Pointee>::Metadata`, so addition stays in-bounds.
let value_ptr =
unsafe { (alloc as *const <Dyn as Pointee>::Metadata).add(1) }.cast::<T>().cast_mut();
debug_assert!(value_ptr.is_aligned());
mem::forget(value);
WithHeader(NonNull::new(value_ptr.cast()).unwrap(), PhantomData)
}
// Safety:
// - Assumes that either `value` can be dereferenced, or is the
// `NonNull::dangling()` we use when both `T` and `H` are ZSTs.
unsafe fn drop<T: ?Sized>(&self, value: *mut T) {
struct DropGuard<H> {
ptr: NonNull<u8>,
value_layout: Layout,
_marker: PhantomData<H>,
}
impl<H> Drop for DropGuard<H> {
fn drop(&mut self) {
// All ZST are allocated statically.
if self.value_layout.size() == 0 {
return;
}
unsafe {
// SAFETY: Layout must have been computable if we're in drop
let (layout, value_offset) =
WithHeader::<H>::alloc_layout(self.value_layout).unwrap_unchecked();
// Since we only allocate for non-ZSTs, the layout size cannot be zero.
debug_assert!(layout.size() != 0);
alloc::dealloc(self.ptr.as_ptr().sub(value_offset), layout);
}
}
}
unsafe {
// `_guard` will deallocate the memory when dropped, even if `drop_in_place` unwinds.
let _guard = DropGuard {
ptr: self.0,
value_layout: Layout::for_value_raw(value),
_marker: PhantomData::<H>,
};
// We only drop the value because the Pointee trait requires that the metadata is copy
// aka trivially droppable.
ptr::drop_in_place::<T>(value);
}
}
fn header(&self) -> *mut H {
// Safety:
// - At least `size_of::<H>()` bytes are allocated ahead of the pointer.
// - We know that H will be aligned because the middle pointer is aligned to the greater
// of the alignment of the header and the data and the header size includes the padding
// needed to align the header. Subtracting the header size from the aligned data pointer
// will always result in an aligned header pointer, it just may not point to the
// beginning of the allocation.
let hp = unsafe { self.0.as_ptr().sub(Self::header_size()) as *mut H };
debug_assert!(hp.is_aligned());
hp
}
fn value(&self) -> *mut u8 {
self.0.as_ptr()
}
const fn header_size() -> usize {
mem::size_of::<H>()
}
fn alloc_layout(value_layout: Layout) -> Result<(Layout, usize), LayoutError> {
Layout::new::<H>().extend(value_layout)
}
}
#[unstable(feature = "thin_box", issue = "92791")]
impl<T: ?Sized + Error> Error for ThinBox<T> {
fn source(&self) -> Option<&(dyn Error + 'static)> {
self.deref().source()
}
}

View File

@ -0,0 +1,578 @@
use super::*;
use crate::boxed::Box;
use crate::testing::crash_test::{CrashTestDummy, Panic};
use std::panic::{catch_unwind, AssertUnwindSafe};
#[test]
fn test_iterator() {
let data = vec![5, 9, 3];
let iterout = [9, 5, 3];
let heap = BinaryHeap::from(data);
let mut i = 0;
for el in &heap {
assert_eq!(*el, iterout[i]);
i += 1;
}
}
#[test]
fn test_iter_rev_cloned_collect() {
let data = vec![5, 9, 3];
let iterout = vec![3, 5, 9];
let pq = BinaryHeap::from(data);
let v: Vec<_> = pq.iter().rev().cloned().collect();
assert_eq!(v, iterout);
}
#[test]
fn test_into_iter_collect() {
let data = vec![5, 9, 3];
let iterout = vec![9, 5, 3];
let pq = BinaryHeap::from(data);
let v: Vec<_> = pq.into_iter().collect();
assert_eq!(v, iterout);
}
#[test]
fn test_into_iter_size_hint() {
let data = vec![5, 9];
let pq = BinaryHeap::from(data);
let mut it = pq.into_iter();
assert_eq!(it.size_hint(), (2, Some(2)));
assert_eq!(it.next(), Some(9));
assert_eq!(it.size_hint(), (1, Some(1)));
assert_eq!(it.next(), Some(5));
assert_eq!(it.size_hint(), (0, Some(0)));
assert_eq!(it.next(), None);
}
#[test]
fn test_into_iter_rev_collect() {
let data = vec![5, 9, 3];
let iterout = vec![3, 5, 9];
let pq = BinaryHeap::from(data);
let v: Vec<_> = pq.into_iter().rev().collect();
assert_eq!(v, iterout);
}
#[test]
fn test_into_iter_sorted_collect() {
let heap = BinaryHeap::from(vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1]);
let it = heap.into_iter_sorted();
let sorted = it.collect::<Vec<_>>();
assert_eq!(sorted, vec![10, 9, 8, 7, 6, 5, 4, 3, 2, 2, 1, 1, 0]);
}
#[test]
fn test_drain_sorted_collect() {
let mut heap = BinaryHeap::from(vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1]);
let it = heap.drain_sorted();
let sorted = it.collect::<Vec<_>>();
assert_eq!(sorted, vec![10, 9, 8, 7, 6, 5, 4, 3, 2, 2, 1, 1, 0]);
}
fn check_exact_size_iterator<I: ExactSizeIterator>(len: usize, it: I) {
let mut it = it;
for i in 0..it.len() {
let (lower, upper) = it.size_hint();
assert_eq!(Some(lower), upper);
assert_eq!(lower, len - i);
assert_eq!(it.len(), len - i);
it.next();
}
assert_eq!(it.len(), 0);
assert!(it.is_empty());
}
#[test]
fn test_exact_size_iterator() {
let heap = BinaryHeap::from(vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1]);
check_exact_size_iterator(heap.len(), heap.iter());
check_exact_size_iterator(heap.len(), heap.clone().into_iter());
check_exact_size_iterator(heap.len(), heap.clone().into_iter_sorted());
check_exact_size_iterator(heap.len(), heap.clone().drain());
check_exact_size_iterator(heap.len(), heap.clone().drain_sorted());
}
fn check_trusted_len<I: TrustedLen>(len: usize, it: I) {
let mut it = it;
for i in 0..len {
let (lower, upper) = it.size_hint();
if upper.is_some() {
assert_eq!(Some(lower), upper);
assert_eq!(lower, len - i);
}
it.next();
}
}
#[test]
fn test_trusted_len() {
let heap = BinaryHeap::from(vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1]);
check_trusted_len(heap.len(), heap.clone().into_iter_sorted());
check_trusted_len(heap.len(), heap.clone().drain_sorted());
}
#[test]
fn test_peek_and_pop() {
let data = vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1];
let mut sorted = data.clone();
sorted.sort();
let mut heap = BinaryHeap::from(data);
while !heap.is_empty() {
assert_eq!(heap.peek().unwrap(), sorted.last().unwrap());
assert_eq!(heap.pop().unwrap(), sorted.pop().unwrap());
}
}
#[test]
fn test_peek_mut() {
let data = vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1];
let mut heap = BinaryHeap::from(data);
assert_eq!(heap.peek(), Some(&10));
{
let mut top = heap.peek_mut().unwrap();
*top -= 2;
}
assert_eq!(heap.peek(), Some(&9));
}
#[test]
fn test_peek_mut_leek() {
let data = vec![4, 2, 7];
let mut heap = BinaryHeap::from(data);
let mut max = heap.peek_mut().unwrap();
*max = -1;
// The PeekMut object's Drop impl would have been responsible for moving the
// -1 out of the max position of the BinaryHeap, but we don't run it.
mem::forget(max);
// Absent some mitigation like leak amplification, the -1 would incorrectly
// end up in the last position of the returned Vec, with the rest of the
// heap's original contents in front of it in sorted order.
let sorted_vec = heap.into_sorted_vec();
assert!(sorted_vec.is_sorted(), "{:?}", sorted_vec);
}
#[test]
fn test_peek_mut_pop() {
let data = vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1];
let mut heap = BinaryHeap::from(data);
assert_eq!(heap.peek(), Some(&10));
{
let mut top = heap.peek_mut().unwrap();
*top -= 2;
assert_eq!(PeekMut::pop(top), 8);
}
assert_eq!(heap.peek(), Some(&9));
}
#[test]
fn test_push() {
let mut heap = BinaryHeap::from(vec![2, 4, 9]);
assert_eq!(heap.len(), 3);
assert!(*heap.peek().unwrap() == 9);
heap.push(11);
assert_eq!(heap.len(), 4);
assert!(*heap.peek().unwrap() == 11);
heap.push(5);
assert_eq!(heap.len(), 5);
assert!(*heap.peek().unwrap() == 11);
heap.push(27);
assert_eq!(heap.len(), 6);
assert!(*heap.peek().unwrap() == 27);
heap.push(3);
assert_eq!(heap.len(), 7);
assert!(*heap.peek().unwrap() == 27);
heap.push(103);
assert_eq!(heap.len(), 8);
assert!(*heap.peek().unwrap() == 103);
}
#[test]
fn test_push_unique() {
let mut heap = BinaryHeap::<Box<_>>::from(vec![Box::new(2), Box::new(4), Box::new(9)]);
assert_eq!(heap.len(), 3);
assert!(**heap.peek().unwrap() == 9);
heap.push(Box::new(11));
assert_eq!(heap.len(), 4);
assert!(**heap.peek().unwrap() == 11);
heap.push(Box::new(5));
assert_eq!(heap.len(), 5);
assert!(**heap.peek().unwrap() == 11);
heap.push(Box::new(27));
assert_eq!(heap.len(), 6);
assert!(**heap.peek().unwrap() == 27);
heap.push(Box::new(3));
assert_eq!(heap.len(), 7);
assert!(**heap.peek().unwrap() == 27);
heap.push(Box::new(103));
assert_eq!(heap.len(), 8);
assert!(**heap.peek().unwrap() == 103);
}
fn check_to_vec(mut data: Vec<i32>) {
let heap = BinaryHeap::from(data.clone());
let mut v = heap.clone().into_vec();
v.sort();
data.sort();
assert_eq!(v, data);
assert_eq!(heap.into_sorted_vec(), data);
}
#[test]
fn test_to_vec() {
check_to_vec(vec![]);
check_to_vec(vec![5]);
check_to_vec(vec![3, 2]);
check_to_vec(vec![2, 3]);
check_to_vec(vec![5, 1, 2]);
check_to_vec(vec![1, 100, 2, 3]);
check_to_vec(vec![1, 3, 5, 7, 9, 2, 4, 6, 8, 0]);
check_to_vec(vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1]);
check_to_vec(vec![9, 11, 9, 9, 9, 9, 11, 2, 3, 4, 11, 9, 0, 0, 0, 0]);
check_to_vec(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
check_to_vec(vec![10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]);
check_to_vec(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 1, 2]);
check_to_vec(vec![5, 4, 3, 2, 1, 5, 4, 3, 2, 1, 5, 4, 3, 2, 1]);
}
#[test]
fn test_in_place_iterator_specialization() {
let src: Vec<usize> = vec![1, 2, 3];
let src_ptr = src.as_ptr();
let heap: BinaryHeap<_> = src.into_iter().map(std::convert::identity).collect();
let heap_ptr = heap.iter().next().unwrap() as *const usize;
assert_eq!(src_ptr, heap_ptr);
let sink: Vec<_> = heap.into_iter().map(std::convert::identity).collect();
let sink_ptr = sink.as_ptr();
assert_eq!(heap_ptr, sink_ptr);
}
#[test]
fn test_empty_pop() {
let mut heap = BinaryHeap::<i32>::new();
assert!(heap.pop().is_none());
}
#[test]
fn test_empty_peek() {
let empty = BinaryHeap::<i32>::new();
assert!(empty.peek().is_none());
}
#[test]
fn test_empty_peek_mut() {
let mut empty = BinaryHeap::<i32>::new();
assert!(empty.peek_mut().is_none());
}
#[test]
fn test_from_iter() {
let xs = vec![9, 8, 7, 6, 5, 4, 3, 2, 1];
let mut q: BinaryHeap<_> = xs.iter().rev().cloned().collect();
for &x in &xs {
assert_eq!(q.pop().unwrap(), x);
}
}
#[test]
fn test_drain() {
let mut q: BinaryHeap<_> = [9, 8, 7, 6, 5, 4, 3, 2, 1].iter().cloned().collect();
assert_eq!(q.drain().take(5).count(), 5);
assert!(q.is_empty());
}
#[test]
fn test_drain_sorted() {
let mut q: BinaryHeap<_> = [9, 8, 7, 6, 5, 4, 3, 2, 1].iter().cloned().collect();
assert_eq!(q.drain_sorted().take(5).collect::<Vec<_>>(), vec![9, 8, 7, 6, 5]);
assert!(q.is_empty());
}
#[test]
#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
fn test_drain_sorted_leak() {
let d0 = CrashTestDummy::new(0);
let d1 = CrashTestDummy::new(1);
let d2 = CrashTestDummy::new(2);
let d3 = CrashTestDummy::new(3);
let d4 = CrashTestDummy::new(4);
let d5 = CrashTestDummy::new(5);
let mut q = BinaryHeap::from(vec![
d0.spawn(Panic::Never),
d1.spawn(Panic::Never),
d2.spawn(Panic::Never),
d3.spawn(Panic::InDrop),
d4.spawn(Panic::Never),
d5.spawn(Panic::Never),
]);
catch_unwind(AssertUnwindSafe(|| drop(q.drain_sorted()))).unwrap_err();
assert_eq!(d0.dropped(), 1);
assert_eq!(d1.dropped(), 1);
assert_eq!(d2.dropped(), 1);
assert_eq!(d3.dropped(), 1);
assert_eq!(d4.dropped(), 1);
assert_eq!(d5.dropped(), 1);
assert!(q.is_empty());
}
#[test]
fn test_drain_forget() {
let a = CrashTestDummy::new(0);
let b = CrashTestDummy::new(1);
let c = CrashTestDummy::new(2);
let mut q =
BinaryHeap::from(vec![a.spawn(Panic::Never), b.spawn(Panic::Never), c.spawn(Panic::Never)]);
catch_unwind(AssertUnwindSafe(|| {
let mut it = q.drain();
it.next();
mem::forget(it);
}))
.unwrap();
// Behaviour after leaking is explicitly unspecified and order is arbitrary,
// so it's fine if these start failing, but probably worth knowing.
assert!(q.is_empty());
assert_eq!(a.dropped() + b.dropped() + c.dropped(), 1);
assert_eq!(a.dropped(), 0);
assert_eq!(b.dropped(), 0);
assert_eq!(c.dropped(), 1);
drop(q);
assert_eq!(a.dropped(), 0);
assert_eq!(b.dropped(), 0);
assert_eq!(c.dropped(), 1);
}
#[test]
fn test_drain_sorted_forget() {
let a = CrashTestDummy::new(0);
let b = CrashTestDummy::new(1);
let c = CrashTestDummy::new(2);
let mut q =
BinaryHeap::from(vec![a.spawn(Panic::Never), b.spawn(Panic::Never), c.spawn(Panic::Never)]);
catch_unwind(AssertUnwindSafe(|| {
let mut it = q.drain_sorted();
it.next();
mem::forget(it);
}))
.unwrap();
// Behaviour after leaking is explicitly unspecified,
// so it's fine if these start failing, but probably worth knowing.
assert_eq!(q.len(), 2);
assert_eq!(a.dropped(), 0);
assert_eq!(b.dropped(), 0);
assert_eq!(c.dropped(), 1);
drop(q);
assert_eq!(a.dropped(), 1);
assert_eq!(b.dropped(), 1);
assert_eq!(c.dropped(), 1);
}
#[test]
fn test_extend_ref() {
let mut a = BinaryHeap::new();
a.push(1);
a.push(2);
a.extend(&[3, 4, 5]);
assert_eq!(a.len(), 5);
assert_eq!(a.into_sorted_vec(), [1, 2, 3, 4, 5]);
let mut a = BinaryHeap::new();
a.push(1);
a.push(2);
let mut b = BinaryHeap::new();
b.push(3);
b.push(4);
b.push(5);
a.extend(&b);
assert_eq!(a.len(), 5);
assert_eq!(a.into_sorted_vec(), [1, 2, 3, 4, 5]);
}
#[test]
fn test_append() {
let mut a = BinaryHeap::from(vec![-10, 1, 2, 3, 3]);
let mut b = BinaryHeap::from(vec![-20, 5, 43]);
a.append(&mut b);
assert_eq!(a.into_sorted_vec(), [-20, -10, 1, 2, 3, 3, 5, 43]);
assert!(b.is_empty());
}
#[test]
fn test_append_to_empty() {
let mut a = BinaryHeap::new();
let mut b = BinaryHeap::from(vec![-20, 5, 43]);
a.append(&mut b);
assert_eq!(a.into_sorted_vec(), [-20, 5, 43]);
assert!(b.is_empty());
}
#[test]
fn test_extend_specialization() {
let mut a = BinaryHeap::from(vec![-10, 1, 2, 3, 3]);
let b = BinaryHeap::from(vec![-20, 5, 43]);
a.extend(b);
assert_eq!(a.into_sorted_vec(), [-20, -10, 1, 2, 3, 3, 5, 43]);
}
#[allow(dead_code)]
fn assert_covariance() {
fn drain<'new>(d: Drain<'static, &'static str>) -> Drain<'new, &'new str> {
d
}
}
#[test]
fn test_retain() {
let mut a = BinaryHeap::from(vec![100, 10, 50, 1, 2, 20, 30]);
a.retain(|&x| x != 2);
// Check that 20 moved into 10's place.
assert_eq!(a.clone().into_vec(), [100, 20, 50, 1, 10, 30]);
a.retain(|_| true);
assert_eq!(a.clone().into_vec(), [100, 20, 50, 1, 10, 30]);
a.retain(|&x| x < 50);
assert_eq!(a.clone().into_vec(), [30, 20, 10, 1]);
a.retain(|_| false);
assert!(a.is_empty());
}
#[test]
#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
fn test_retain_catch_unwind() {
let mut heap = BinaryHeap::from(vec![3, 1, 2]);
// Removes the 3, then unwinds out of retain.
let _ = catch_unwind(AssertUnwindSafe(|| {
heap.retain(|e| {
if *e == 1 {
panic!();
}
false
});
}));
// Naively this would be [1, 2] (an invalid heap) if BinaryHeap delegates to
// Vec's retain impl and then does not rebuild the heap after that unwinds.
assert_eq!(heap.into_vec(), [2, 1]);
}
// old binaryheap failed this test
//
// Integrity means that all elements are present after a comparison panics,
// even if the order might not be correct.
//
// Destructors must be called exactly once per element.
// FIXME: re-enable emscripten once it can unwind again
#[test]
#[cfg(not(target_os = "emscripten"))]
#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
fn panic_safe() {
use rand::seq::SliceRandom;
use std::cmp;
use std::panic::{self, AssertUnwindSafe};
use std::sync::atomic::{AtomicUsize, Ordering};
static DROP_COUNTER: AtomicUsize = AtomicUsize::new(0);
#[derive(Eq, PartialEq, Ord, Clone, Debug)]
struct PanicOrd<T>(T, bool);
impl<T> Drop for PanicOrd<T> {
fn drop(&mut self) {
// update global drop count
DROP_COUNTER.fetch_add(1, Ordering::SeqCst);
}
}
impl<T: PartialOrd> PartialOrd for PanicOrd<T> {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
if self.1 || other.1 {
panic!("Panicking comparison");
}
self.0.partial_cmp(&other.0)
}
}
let mut rng = crate::test_helpers::test_rng();
const DATASZ: usize = 32;
// Miri is too slow
let ntest = if cfg!(miri) { 1 } else { 10 };
// don't use 0 in the data -- we want to catch the zeroed-out case.
let data = (1..=DATASZ).collect::<Vec<_>>();
// since it's a fuzzy test, run several tries.
for _ in 0..ntest {
for i in 1..=DATASZ {
DROP_COUNTER.store(0, Ordering::SeqCst);
let mut panic_ords: Vec<_> =
data.iter().filter(|&&x| x != i).map(|&x| PanicOrd(x, false)).collect();
let panic_item = PanicOrd(i, true);
// heapify the sane items
panic_ords.shuffle(&mut rng);
let mut heap = BinaryHeap::from(panic_ords);
let inner_data;
{
// push the panicking item to the heap and catch the panic
let thread_result = {
let mut heap_ref = AssertUnwindSafe(&mut heap);
panic::catch_unwind(move || {
heap_ref.push(panic_item);
})
};
assert!(thread_result.is_err());
// Assert no elements were dropped
let drops = DROP_COUNTER.load(Ordering::SeqCst);
assert!(drops == 0, "Must not drop items. drops={}", drops);
inner_data = heap.clone().into_vec();
drop(heap);
}
let drops = DROP_COUNTER.load(Ordering::SeqCst);
assert_eq!(drops, DATASZ);
let mut data_sorted = inner_data.into_iter().map(|p| p.0).collect::<Vec<_>>();
data_sorted.sort();
assert_eq!(data_sorted, data);
}
}
}

View File

@ -0,0 +1,107 @@
use super::merge_iter::MergeIterInner;
use super::node::{self, Root};
use core::alloc::Allocator;
use core::iter::FusedIterator;
impl<K, V> Root<K, V> {
/// Appends all key-value pairs from the union of two ascending iterators,
/// incrementing a `length` variable along the way. The latter makes it
/// easier for the caller to avoid a leak when a drop handler panicks.
///
/// If both iterators produce the same key, this method drops the pair from
/// the left iterator and appends the pair from the right iterator.
///
/// If you want the tree to end up in a strictly ascending order, like for
/// a `BTreeMap`, both iterators should produce keys in strictly ascending
/// order, each greater than all keys in the tree, including any keys
/// already in the tree upon entry.
pub fn append_from_sorted_iters<I, A: Allocator + Clone>(
&mut self,
left: I,
right: I,
length: &mut usize,
alloc: A,
) where
K: Ord,
I: Iterator<Item = (K, V)> + FusedIterator,
{
// We prepare to merge `left` and `right` into a sorted sequence in linear time.
let iter = MergeIter(MergeIterInner::new(left, right));
// Meanwhile, we build a tree from the sorted sequence in linear time.
self.bulk_push(iter, length, alloc)
}
/// Pushes all key-value pairs to the end of the tree, incrementing a
/// `length` variable along the way. The latter makes it easier for the
/// caller to avoid a leak when the iterator panicks.
pub fn bulk_push<I, A: Allocator + Clone>(&mut self, iter: I, length: &mut usize, alloc: A)
where
I: Iterator<Item = (K, V)>,
{
let mut cur_node = self.borrow_mut().last_leaf_edge().into_node();
// Iterate through all key-value pairs, pushing them into nodes at the right level.
for (key, value) in iter {
// Try to push key-value pair into the current leaf node.
if cur_node.len() < node::CAPACITY {
cur_node.push(key, value);
} else {
// No space left, go up and push there.
let mut open_node;
let mut test_node = cur_node.forget_type();
loop {
match test_node.ascend() {
Ok(parent) => {
let parent = parent.into_node();
if parent.len() < node::CAPACITY {
// Found a node with space left, push here.
open_node = parent;
break;
} else {
// Go up again.
test_node = parent.forget_type();
}
}
Err(_) => {
// We are at the top, create a new root node and push there.
open_node = self.push_internal_level(alloc.clone());
break;
}
}
}
// Push key-value pair and new right subtree.
let tree_height = open_node.height() - 1;
let mut right_tree = Root::new(alloc.clone());
for _ in 0..tree_height {
right_tree.push_internal_level(alloc.clone());
}
open_node.push(key, value, right_tree);
// Go down to the right-most leaf again.
cur_node = open_node.forget_type().last_leaf_edge().into_node();
}
// Increment length every iteration, to make sure the map drops
// the appended elements even if advancing the iterator panicks.
*length += 1;
}
self.fix_right_border_of_plentiful();
}
}
// An iterator for merging two sorted sequences into one
struct MergeIter<K, V, I: Iterator<Item = (K, V)>>(MergeIterInner<I>);
impl<K: Ord, V, I> Iterator for MergeIter<K, V, I>
where
I: Iterator<Item = (K, V)> + FusedIterator,
{
type Item = (K, V);
/// If two keys are equal, returns the key-value pair from the right source.
fn next(&mut self) -> Option<(K, V)> {
let (a_next, b_next) = self.0.nexts(|a: &(K, V), b: &(K, V)| K::cmp(&a.0, &b.0));
b_next.or(a_next)
}
}

View File

@ -0,0 +1,69 @@
use core::marker::PhantomData;
use core::ptr::NonNull;
/// Models a reborrow of some unique reference, when you know that the reborrow
/// and all its descendants (i.e., all pointers and references derived from it)
/// will not be used any more at some point, after which you want to use the
/// original unique reference again.
///
/// The borrow checker usually handles this stacking of borrows for you, but
/// some control flows that accomplish this stacking are too complicated for
/// the compiler to follow. A `DormantMutRef` allows you to check borrowing
/// yourself, while still expressing its stacked nature, and encapsulating
/// the raw pointer code needed to do this without undefined behavior.
pub struct DormantMutRef<'a, T> {
ptr: NonNull<T>,
_marker: PhantomData<&'a mut T>,
}
unsafe impl<'a, T> Sync for DormantMutRef<'a, T> where &'a mut T: Sync {}
unsafe impl<'a, T> Send for DormantMutRef<'a, T> where &'a mut T: Send {}
impl<'a, T> DormantMutRef<'a, T> {
/// Capture a unique borrow, and immediately reborrow it. For the compiler,
/// the lifetime of the new reference is the same as the lifetime of the
/// original reference, but you promise to use it for a shorter period.
pub fn new(t: &'a mut T) -> (&'a mut T, Self) {
let ptr = NonNull::from(t);
// SAFETY: we hold the borrow throughout 'a via `_marker`, and we expose
// only this reference, so it is unique.
let new_ref = unsafe { &mut *ptr.as_ptr() };
(new_ref, Self { ptr, _marker: PhantomData })
}
/// Revert to the unique borrow initially captured.
///
/// # Safety
///
/// The reborrow must have ended, i.e., the reference returned by `new` and
/// all pointers and references derived from it, must not be used anymore.
pub unsafe fn awaken(self) -> &'a mut T {
// SAFETY: our own safety conditions imply this reference is again unique.
unsafe { &mut *self.ptr.as_ptr() }
}
/// Borrows a new mutable reference from the unique borrow initially captured.
///
/// # Safety
///
/// The reborrow must have ended, i.e., the reference returned by `new` and
/// all pointers and references derived from it, must not be used anymore.
pub unsafe fn reborrow(&mut self) -> &'a mut T {
// SAFETY: our own safety conditions imply this reference is again unique.
unsafe { &mut *self.ptr.as_ptr() }
}
/// Borrows a new shared reference from the unique borrow initially captured.
///
/// # Safety
///
/// The reborrow must have ended, i.e., the reference returned by `new` and
/// all pointers and references derived from it, must not be used anymore.
pub unsafe fn reborrow_shared(&self) -> &'a T {
// SAFETY: our own safety conditions imply this reference is again unique.
unsafe { &*self.ptr.as_ptr() }
}
}
#[cfg(test)]
mod tests;

View File

@ -0,0 +1,19 @@
use super::DormantMutRef;
#[test]
fn test_borrow() {
let mut data = 1;
let mut stack = vec![];
let mut rr = &mut data;
for factor in [2, 3, 7].iter() {
let (r, dormant_r) = DormantMutRef::new(rr);
rr = r;
assert_eq!(*rr, 1);
stack.push((factor, dormant_r));
}
while let Some((factor, dormant_r)) = stack.pop() {
let r = unsafe { dormant_r.awaken() };
*r *= factor;
}
assert_eq!(data, 42);
}

View File

@ -0,0 +1,49 @@
use core::iter::Peekable;
/// An iterator for deduping the key of a sorted iterator.
/// When encountering the duplicated key, only the last key-value pair is yielded.
///
/// Used by [`BTreeMap::bulk_build_from_sorted_iter`][1].
///
/// [1]: crate::collections::BTreeMap::bulk_build_from_sorted_iter
pub struct DedupSortedIter<K, V, I>
where
I: Iterator<Item = (K, V)>,
{
iter: Peekable<I>,
}
impl<K, V, I> DedupSortedIter<K, V, I>
where
I: Iterator<Item = (K, V)>,
{
pub fn new(iter: I) -> Self {
Self { iter: iter.peekable() }
}
}
impl<K, V, I> Iterator for DedupSortedIter<K, V, I>
where
K: Eq,
I: Iterator<Item = (K, V)>,
{
type Item = (K, V);
fn next(&mut self) -> Option<(K, V)> {
loop {
let next = match self.iter.next() {
Some(next) => next,
None => return None,
};
let peeked = match self.iter.peek() {
Some(peeked) => peeked,
None => return Some(next),
};
if next.0 != peeked.0 {
return Some(next);
}
}
}
}

View File

@ -0,0 +1,179 @@
use super::map::MIN_LEN;
use super::node::{marker, ForceResult::*, Handle, LeftOrRight::*, NodeRef, Root};
use core::alloc::Allocator;
impl<'a, K: 'a, V: 'a> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> {
/// Stocks up a possibly underfull node by merging with or stealing from a
/// sibling. If successful but at the cost of shrinking the parent node,
/// returns that shrunk parent node. Returns an `Err` if the node is
/// an empty root.
fn fix_node_through_parent<A: Allocator + Clone>(
self,
alloc: A,
) -> Result<Option<NodeRef<marker::Mut<'a>, K, V, marker::Internal>>, Self> {
let len = self.len();
if len >= MIN_LEN {
Ok(None)
} else {
match self.choose_parent_kv() {
Ok(Left(mut left_parent_kv)) => {
if left_parent_kv.can_merge() {
let parent = left_parent_kv.merge_tracking_parent(alloc);
Ok(Some(parent))
} else {
left_parent_kv.bulk_steal_left(MIN_LEN - len);
Ok(None)
}
}
Ok(Right(mut right_parent_kv)) => {
if right_parent_kv.can_merge() {
let parent = right_parent_kv.merge_tracking_parent(alloc);
Ok(Some(parent))
} else {
right_parent_kv.bulk_steal_right(MIN_LEN - len);
Ok(None)
}
}
Err(root) => {
if len > 0 {
Ok(None)
} else {
Err(root)
}
}
}
}
}
}
impl<'a, K: 'a, V: 'a> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> {
/// Stocks up a possibly underfull node, and if that causes its parent node
/// to shrink, stocks up the parent, recursively.
/// Returns `true` if it fixed the tree, `false` if it couldn't because the
/// root node became empty.
///
/// This method does not expect ancestors to already be underfull upon entry
/// and panics if it encounters an empty ancestor.
pub fn fix_node_and_affected_ancestors<A: Allocator + Clone>(mut self, alloc: A) -> bool {
loop {
match self.fix_node_through_parent(alloc.clone()) {
Ok(Some(parent)) => self = parent.forget_type(),
Ok(None) => return true,
Err(_) => return false,
}
}
}
}
impl<K, V> Root<K, V> {
/// Removes empty levels on the top, but keeps an empty leaf if the entire tree is empty.
pub fn fix_top<A: Allocator + Clone>(&mut self, alloc: A) {
while self.height() > 0 && self.len() == 0 {
self.pop_internal_level(alloc.clone());
}
}
/// Stocks up or merge away any underfull nodes on the right border of the
/// tree. The other nodes, those that are not the root nor a rightmost edge,
/// must already have at least MIN_LEN elements.
pub fn fix_right_border<A: Allocator + Clone>(&mut self, alloc: A) {
self.fix_top(alloc.clone());
if self.len() > 0 {
self.borrow_mut().last_kv().fix_right_border_of_right_edge(alloc.clone());
self.fix_top(alloc);
}
}
/// The symmetric clone of `fix_right_border`.
pub fn fix_left_border<A: Allocator + Clone>(&mut self, alloc: A) {
self.fix_top(alloc.clone());
if self.len() > 0 {
self.borrow_mut().first_kv().fix_left_border_of_left_edge(alloc.clone());
self.fix_top(alloc);
}
}
/// Stocks up any underfull nodes on the right border of the tree.
/// The other nodes, those that are neither the root nor a rightmost edge,
/// must be prepared to have up to MIN_LEN elements stolen.
pub fn fix_right_border_of_plentiful(&mut self) {
let mut cur_node = self.borrow_mut();
while let Internal(internal) = cur_node.force() {
// Check if right-most child is underfull.
let mut last_kv = internal.last_kv().consider_for_balancing();
debug_assert!(last_kv.left_child_len() >= MIN_LEN * 2);
let right_child_len = last_kv.right_child_len();
if right_child_len < MIN_LEN {
// We need to steal.
last_kv.bulk_steal_left(MIN_LEN - right_child_len);
}
// Go further down.
cur_node = last_kv.into_right_child();
}
}
}
impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, marker::KV> {
fn fix_left_border_of_left_edge<A: Allocator + Clone>(mut self, alloc: A) {
while let Internal(internal_kv) = self.force() {
self = internal_kv.fix_left_child(alloc.clone()).first_kv();
debug_assert!(self.reborrow().into_node().len() > MIN_LEN);
}
}
fn fix_right_border_of_right_edge<A: Allocator + Clone>(mut self, alloc: A) {
while let Internal(internal_kv) = self.force() {
self = internal_kv.fix_right_child(alloc.clone()).last_kv();
debug_assert!(self.reborrow().into_node().len() > MIN_LEN);
}
}
}
impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::KV> {
/// Stocks up the left child, assuming the right child isn't underfull, and
/// provisions an extra element to allow merging its children in turn
/// without becoming underfull.
/// Returns the left child.
fn fix_left_child<A: Allocator + Clone>(
self,
alloc: A,
) -> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> {
let mut internal_kv = self.consider_for_balancing();
let left_len = internal_kv.left_child_len();
debug_assert!(internal_kv.right_child_len() >= MIN_LEN);
if internal_kv.can_merge() {
internal_kv.merge_tracking_child(alloc)
} else {
// `MIN_LEN + 1` to avoid readjust if merge happens on the next level.
let count = (MIN_LEN + 1).saturating_sub(left_len);
if count > 0 {
internal_kv.bulk_steal_right(count);
}
internal_kv.into_left_child()
}
}
/// Stocks up the right child, assuming the left child isn't underfull, and
/// provisions an extra element to allow merging its children in turn
/// without becoming underfull.
/// Returns wherever the right child ended up.
fn fix_right_child<A: Allocator + Clone>(
self,
alloc: A,
) -> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> {
let mut internal_kv = self.consider_for_balancing();
let right_len = internal_kv.right_child_len();
debug_assert!(internal_kv.left_child_len() >= MIN_LEN);
if internal_kv.can_merge() {
internal_kv.merge_tracking_child(alloc)
} else {
// `MIN_LEN + 1` to avoid readjust if merge happens on the next level.
let count = (MIN_LEN + 1).saturating_sub(right_len);
if count > 0 {
internal_kv.bulk_steal_left(count);
}
internal_kv.into_right_child()
}
}
}

View File

@ -0,0 +1,570 @@
use core::fmt::{self, Debug};
use core::marker::PhantomData;
use core::mem;
use crate::alloc::{Allocator, Global};
use super::super::borrow::DormantMutRef;
use super::super::node::{marker, Handle, NodeRef};
use super::BTreeMap;
use Entry::*;
/// A view into a single entry in a map, which may either be vacant or occupied.
///
/// This `enum` is constructed from the [`entry`] method on [`BTreeMap`].
///
/// [`entry`]: BTreeMap::entry
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg_attr(not(test), rustc_diagnostic_item = "BTreeEntry")]
pub enum Entry<
'a,
K: 'a,
V: 'a,
#[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + Clone = Global,
> {
/// A vacant entry.
#[stable(feature = "rust1", since = "1.0.0")]
Vacant(#[stable(feature = "rust1", since = "1.0.0")] VacantEntry<'a, K, V, A>),
/// An occupied entry.
#[stable(feature = "rust1", since = "1.0.0")]
Occupied(#[stable(feature = "rust1", since = "1.0.0")] OccupiedEntry<'a, K, V, A>),
}
#[stable(feature = "debug_btree_map", since = "1.12.0")]
impl<K: Debug + Ord, V: Debug, A: Allocator + Clone> Debug for Entry<'_, K, V, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(),
Occupied(ref o) => f.debug_tuple("Entry").field(o).finish(),
}
}
}
/// A view into a vacant entry in a `BTreeMap`.
/// It is part of the [`Entry`] enum.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct VacantEntry<
'a,
K,
V,
#[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + Clone = Global,
> {
pub(super) key: K,
/// `None` for a (empty) map without root
pub(super) handle: Option<Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>>,
pub(super) dormant_map: DormantMutRef<'a, BTreeMap<K, V, A>>,
/// The BTreeMap will outlive this IntoIter so we don't care about drop order for `alloc`.
pub(super) alloc: A,
// Be invariant in `K` and `V`
pub(super) _marker: PhantomData<&'a mut (K, V)>,
}
#[stable(feature = "debug_btree_map", since = "1.12.0")]
impl<K: Debug + Ord, V, A: Allocator + Clone> Debug for VacantEntry<'_, K, V, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("VacantEntry").field(self.key()).finish()
}
}
/// A view into an occupied entry in a `BTreeMap`.
/// It is part of the [`Entry`] enum.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct OccupiedEntry<
'a,
K,
V,
#[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + Clone = Global,
> {
pub(super) handle: Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, marker::KV>,
pub(super) dormant_map: DormantMutRef<'a, BTreeMap<K, V, A>>,
/// The BTreeMap will outlive this IntoIter so we don't care about drop order for `alloc`.
pub(super) alloc: A,
// Be invariant in `K` and `V`
pub(super) _marker: PhantomData<&'a mut (K, V)>,
}
#[stable(feature = "debug_btree_map", since = "1.12.0")]
impl<K: Debug + Ord, V: Debug, A: Allocator + Clone> Debug for OccupiedEntry<'_, K, V, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("OccupiedEntry").field("key", self.key()).field("value", self.get()).finish()
}
}
/// The error returned by [`try_insert`](BTreeMap::try_insert) when the key already exists.
///
/// Contains the occupied entry, and the value that was not inserted.
#[unstable(feature = "map_try_insert", issue = "82766")]
pub struct OccupiedError<'a, K: 'a, V: 'a, A: Allocator + Clone = Global> {
/// The entry in the map that was already occupied.
pub entry: OccupiedEntry<'a, K, V, A>,
/// The value which was not inserted, because the entry was already occupied.
pub value: V,
}
#[unstable(feature = "map_try_insert", issue = "82766")]
impl<K: Debug + Ord, V: Debug, A: Allocator + Clone> Debug for OccupiedError<'_, K, V, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("OccupiedError")
.field("key", self.entry.key())
.field("old_value", self.entry.get())
.field("new_value", &self.value)
.finish()
}
}
#[unstable(feature = "map_try_insert", issue = "82766")]
impl<'a, K: Debug + Ord, V: Debug, A: Allocator + Clone> fmt::Display
for OccupiedError<'a, K, V, A>
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"failed to insert {:?}, key {:?} already exists with value {:?}",
self.value,
self.entry.key(),
self.entry.get(),
)
}
}
#[unstable(feature = "map_try_insert", issue = "82766")]
impl<'a, K: core::fmt::Debug + Ord, V: core::fmt::Debug> core::error::Error
for crate::collections::btree_map::OccupiedError<'a, K, V>
{
#[allow(deprecated)]
fn description(&self) -> &str {
"key already exists"
}
}
impl<'a, K: Ord, V, A: Allocator + Clone> Entry<'a, K, V, A> {
/// Ensures a value is in the entry by inserting the default if empty, and returns
/// a mutable reference to the value in the entry.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
/// map.entry("poneyland").or_insert(12);
///
/// assert_eq!(map["poneyland"], 12);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn or_insert(self, default: V) -> &'a mut V {
match self {
Occupied(entry) => entry.into_mut(),
Vacant(entry) => entry.insert(default),
}
}
/// Ensures a value is in the entry by inserting the result of the default function if empty,
/// and returns a mutable reference to the value in the entry.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut map: BTreeMap<&str, String> = BTreeMap::new();
/// let s = "hoho".to_string();
///
/// map.entry("poneyland").or_insert_with(|| s);
///
/// assert_eq!(map["poneyland"], "hoho".to_string());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn or_insert_with<F: FnOnce() -> V>(self, default: F) -> &'a mut V {
match self {
Occupied(entry) => entry.into_mut(),
Vacant(entry) => entry.insert(default()),
}
}
/// Ensures a value is in the entry by inserting, if empty, the result of the default function.
/// This method allows for generating key-derived values for insertion by providing the default
/// function a reference to the key that was moved during the `.entry(key)` method call.
///
/// The reference to the moved key is provided so that cloning or copying the key is
/// unnecessary, unlike with `.or_insert_with(|| ... )`.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
///
/// map.entry("poneyland").or_insert_with_key(|key| key.chars().count());
///
/// assert_eq!(map["poneyland"], 9);
/// ```
#[inline]
#[stable(feature = "or_insert_with_key", since = "1.50.0")]
pub fn or_insert_with_key<F: FnOnce(&K) -> V>(self, default: F) -> &'a mut V {
match self {
Occupied(entry) => entry.into_mut(),
Vacant(entry) => {
let value = default(entry.key());
entry.insert(value)
}
}
}
/// Returns a reference to this entry's key.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
/// assert_eq!(map.entry("poneyland").key(), &"poneyland");
/// ```
#[stable(feature = "map_entry_keys", since = "1.10.0")]
pub fn key(&self) -> &K {
match *self {
Occupied(ref entry) => entry.key(),
Vacant(ref entry) => entry.key(),
}
}
/// Provides in-place mutable access to an occupied entry before any
/// potential inserts into the map.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
///
/// map.entry("poneyland")
/// .and_modify(|e| { *e += 1 })
/// .or_insert(42);
/// assert_eq!(map["poneyland"], 42);
///
/// map.entry("poneyland")
/// .and_modify(|e| { *e += 1 })
/// .or_insert(42);
/// assert_eq!(map["poneyland"], 43);
/// ```
#[stable(feature = "entry_and_modify", since = "1.26.0")]
pub fn and_modify<F>(self, f: F) -> Self
where
F: FnOnce(&mut V),
{
match self {
Occupied(mut entry) => {
f(entry.get_mut());
Occupied(entry)
}
Vacant(entry) => Vacant(entry),
}
}
}
impl<'a, K: Ord, V: Default, A: Allocator + Clone> Entry<'a, K, V, A> {
#[stable(feature = "entry_or_default", since = "1.28.0")]
/// Ensures a value is in the entry by inserting the default value if empty,
/// and returns a mutable reference to the value in the entry.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut map: BTreeMap<&str, Option<usize>> = BTreeMap::new();
/// map.entry("poneyland").or_default();
///
/// assert_eq!(map["poneyland"], None);
/// ```
pub fn or_default(self) -> &'a mut V {
match self {
Occupied(entry) => entry.into_mut(),
Vacant(entry) => entry.insert(Default::default()),
}
}
}
impl<'a, K: Ord, V, A: Allocator + Clone> VacantEntry<'a, K, V, A> {
/// Gets a reference to the key that would be used when inserting a value
/// through the VacantEntry.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
/// assert_eq!(map.entry("poneyland").key(), &"poneyland");
/// ```
#[stable(feature = "map_entry_keys", since = "1.10.0")]
pub fn key(&self) -> &K {
&self.key
}
/// Take ownership of the key.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
/// use std::collections::btree_map::Entry;
///
/// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
///
/// if let Entry::Vacant(v) = map.entry("poneyland") {
/// v.into_key();
/// }
/// ```
#[stable(feature = "map_entry_recover_keys2", since = "1.12.0")]
pub fn into_key(self) -> K {
self.key
}
/// Sets the value of the entry with the `VacantEntry`'s key,
/// and returns a mutable reference to it.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
/// use std::collections::btree_map::Entry;
///
/// let mut map: BTreeMap<&str, u32> = BTreeMap::new();
///
/// if let Entry::Vacant(o) = map.entry("poneyland") {
/// o.insert(37);
/// }
/// assert_eq!(map["poneyland"], 37);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_confusables("push", "put")]
pub fn insert(mut self, value: V) -> &'a mut V {
let out_ptr = match self.handle {
None => {
// SAFETY: There is no tree yet so no reference to it exists.
let map = unsafe { self.dormant_map.awaken() };
let mut root = NodeRef::new_leaf(self.alloc.clone());
let val_ptr = root.borrow_mut().push(self.key, value);
map.root = Some(root.forget_type());
map.length = 1;
val_ptr
}
Some(handle) => {
let new_handle =
handle.insert_recursing(self.key, value, self.alloc.clone(), |ins| {
drop(ins.left);
// SAFETY: Pushing a new root node doesn't invalidate
// handles to existing nodes.
let map = unsafe { self.dormant_map.reborrow() };
let root = map.root.as_mut().unwrap(); // same as ins.left
root.push_internal_level(self.alloc).push(ins.kv.0, ins.kv.1, ins.right)
});
// Get the pointer to the value
let val_ptr = new_handle.into_val_mut();
// SAFETY: We have consumed self.handle.
let map = unsafe { self.dormant_map.awaken() };
map.length += 1;
val_ptr
}
};
// Now that we have finished growing the tree using borrowed references,
// dereference the pointer to a part of it, that we picked up along the way.
unsafe { &mut *out_ptr }
}
}
impl<'a, K: Ord, V, A: Allocator + Clone> OccupiedEntry<'a, K, V, A> {
/// Gets a reference to the key in the entry.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
/// map.entry("poneyland").or_insert(12);
/// assert_eq!(map.entry("poneyland").key(), &"poneyland");
/// ```
#[must_use]
#[stable(feature = "map_entry_keys", since = "1.10.0")]
pub fn key(&self) -> &K {
self.handle.reborrow().into_kv().0
}
/// Take ownership of the key and value from the map.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
/// use std::collections::btree_map::Entry;
///
/// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
/// map.entry("poneyland").or_insert(12);
///
/// if let Entry::Occupied(o) = map.entry("poneyland") {
/// // We delete the entry from the map.
/// o.remove_entry();
/// }
///
/// // If now try to get the value, it will panic:
/// // println!("{}", map["poneyland"]);
/// ```
#[stable(feature = "map_entry_recover_keys2", since = "1.12.0")]
pub fn remove_entry(self) -> (K, V) {
self.remove_kv()
}
/// Gets a reference to the value in the entry.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
/// use std::collections::btree_map::Entry;
///
/// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
/// map.entry("poneyland").or_insert(12);
///
/// if let Entry::Occupied(o) = map.entry("poneyland") {
/// assert_eq!(o.get(), &12);
/// }
/// ```
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get(&self) -> &V {
self.handle.reborrow().into_kv().1
}
/// Gets a mutable reference to the value in the entry.
///
/// If you need a reference to the `OccupiedEntry` that may outlive the
/// destruction of the `Entry` value, see [`into_mut`].
///
/// [`into_mut`]: OccupiedEntry::into_mut
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
/// use std::collections::btree_map::Entry;
///
/// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
/// map.entry("poneyland").or_insert(12);
///
/// assert_eq!(map["poneyland"], 12);
/// if let Entry::Occupied(mut o) = map.entry("poneyland") {
/// *o.get_mut() += 10;
/// assert_eq!(*o.get(), 22);
///
/// // We can use the same Entry multiple times.
/// *o.get_mut() += 2;
/// }
/// assert_eq!(map["poneyland"], 24);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get_mut(&mut self) -> &mut V {
self.handle.kv_mut().1
}
/// Converts the entry into a mutable reference to its value.
///
/// If you need multiple references to the `OccupiedEntry`, see [`get_mut`].
///
/// [`get_mut`]: OccupiedEntry::get_mut
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
/// use std::collections::btree_map::Entry;
///
/// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
/// map.entry("poneyland").or_insert(12);
///
/// assert_eq!(map["poneyland"], 12);
/// if let Entry::Occupied(o) = map.entry("poneyland") {
/// *o.into_mut() += 10;
/// }
/// assert_eq!(map["poneyland"], 22);
/// ```
#[must_use = "`self` will be dropped if the result is not used"]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn into_mut(self) -> &'a mut V {
self.handle.into_val_mut()
}
/// Sets the value of the entry with the `OccupiedEntry`'s key,
/// and returns the entry's old value.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
/// use std::collections::btree_map::Entry;
///
/// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
/// map.entry("poneyland").or_insert(12);
///
/// if let Entry::Occupied(mut o) = map.entry("poneyland") {
/// assert_eq!(o.insert(15), 12);
/// }
/// assert_eq!(map["poneyland"], 15);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_confusables("push", "put")]
pub fn insert(&mut self, value: V) -> V {
mem::replace(self.get_mut(), value)
}
/// Takes the value of the entry out of the map, and returns it.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
/// use std::collections::btree_map::Entry;
///
/// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
/// map.entry("poneyland").or_insert(12);
///
/// if let Entry::Occupied(o) = map.entry("poneyland") {
/// assert_eq!(o.remove(), 12);
/// }
/// // If we try to get "poneyland"'s value, it'll panic:
/// // println!("{}", map["poneyland"]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_confusables("delete", "take")]
pub fn remove(self) -> V {
self.remove_kv().1
}
// Body of `remove_entry`, probably separate because the name reflects the returned pair.
pub(super) fn remove_kv(self) -> (K, V) {
let mut emptied_internal_root = false;
let (old_kv, _) =
self.handle.remove_kv_tracking(|| emptied_internal_root = true, self.alloc.clone());
// SAFETY: we consumed the intermediate root borrow, `self.handle`.
let map = unsafe { self.dormant_map.awaken() };
map.length -= 1;
if emptied_internal_root {
let root = map.root.as_mut().unwrap();
root.pop_internal_level(self.alloc);
}
old_kv
}
}

View File

@ -0,0 +1,35 @@
use core::intrinsics;
use core::mem;
use core::ptr;
/// This replaces the value behind the `v` unique reference by calling the
/// relevant function.
///
/// If a panic occurs in the `change` closure, the entire process will be aborted.
#[allow(dead_code)] // keep as illustration and for future use
#[inline]
pub fn take_mut<T>(v: &mut T, change: impl FnOnce(T) -> T) {
replace(v, |value| (change(value), ()))
}
/// This replaces the value behind the `v` unique reference by calling the
/// relevant function, and returns a result obtained along the way.
///
/// If a panic occurs in the `change` closure, the entire process will be aborted.
#[inline]
pub fn replace<T, R>(v: &mut T, change: impl FnOnce(T) -> (T, R)) -> R {
struct PanicGuard;
impl Drop for PanicGuard {
fn drop(&mut self) {
intrinsics::abort()
}
}
let guard = PanicGuard;
let value = unsafe { ptr::read(v) };
let (new_value, ret) = change(value);
unsafe {
ptr::write(v, new_value);
}
mem::forget(guard);
ret
}

View File

@ -0,0 +1,98 @@
use core::cmp::Ordering;
use core::fmt::{self, Debug};
use core::iter::FusedIterator;
/// Core of an iterator that merges the output of two strictly ascending iterators,
/// for instance a union or a symmetric difference.
pub struct MergeIterInner<I: Iterator> {
a: I,
b: I,
peeked: Option<Peeked<I>>,
}
/// Benchmarks faster than wrapping both iterators in a Peekable,
/// probably because we can afford to impose a FusedIterator bound.
#[derive(Clone, Debug)]
enum Peeked<I: Iterator> {
A(I::Item),
B(I::Item),
}
impl<I: Iterator> Clone for MergeIterInner<I>
where
I: Clone,
I::Item: Clone,
{
fn clone(&self) -> Self {
Self { a: self.a.clone(), b: self.b.clone(), peeked: self.peeked.clone() }
}
}
impl<I: Iterator> Debug for MergeIterInner<I>
where
I: Debug,
I::Item: Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("MergeIterInner").field(&self.a).field(&self.b).field(&self.peeked).finish()
}
}
impl<I: Iterator> MergeIterInner<I> {
/// Creates a new core for an iterator merging a pair of sources.
pub fn new(a: I, b: I) -> Self {
MergeIterInner { a, b, peeked: None }
}
/// Returns the next pair of items stemming from the pair of sources
/// being merged. If both returned options contain a value, that value
/// is equal and occurs in both sources. If one of the returned options
/// contains a value, that value doesn't occur in the other source (or
/// the sources are not strictly ascending). If neither returned option
/// contains a value, iteration has finished and subsequent calls will
/// return the same empty pair.
pub fn nexts<Cmp: Fn(&I::Item, &I::Item) -> Ordering>(
&mut self,
cmp: Cmp,
) -> (Option<I::Item>, Option<I::Item>)
where
I: FusedIterator,
{
let mut a_next;
let mut b_next;
match self.peeked.take() {
Some(Peeked::A(next)) => {
a_next = Some(next);
b_next = self.b.next();
}
Some(Peeked::B(next)) => {
b_next = Some(next);
a_next = self.a.next();
}
None => {
a_next = self.a.next();
b_next = self.b.next();
}
}
if let (Some(ref a1), Some(ref b1)) = (&a_next, &b_next) {
match cmp(a1, b1) {
Ordering::Less => self.peeked = b_next.take().map(Peeked::B),
Ordering::Greater => self.peeked = a_next.take().map(Peeked::A),
Ordering::Equal => (),
}
}
(a_next, b_next)
}
/// Returns a pair of upper bounds for the `size_hint` of the final iterator.
pub fn lens(&self) -> (usize, usize)
where
I: ExactSizeIterator,
{
match self.peeked {
Some(Peeked::A(_)) => (1 + self.a.len(), self.b.len()),
Some(Peeked::B(_)) => (self.a.len(), 1 + self.b.len()),
_ => (self.a.len(), self.b.len()),
}
}
}

View File

@ -0,0 +1,22 @@
mod append;
mod borrow;
mod dedup_sorted_iter;
mod fix;
pub mod map;
mod mem;
mod merge_iter;
mod navigate;
mod node;
mod remove;
mod search;
pub mod set;
mod set_val;
mod split;
trait Recover<Q: ?Sized> {
type Key;
fn get(&self, key: &Q) -> Option<&Self::Key>;
fn take(&mut self, key: &Q) -> Option<Self::Key>;
fn replace(&mut self, key: Self::Key) -> Option<Self::Key>;
}

View File

@ -0,0 +1,782 @@
use core::borrow::Borrow;
use core::hint;
use core::ops::RangeBounds;
use core::ptr;
use super::node::{marker, ForceResult::*, Handle, NodeRef};
use super::search::SearchBound;
use crate::alloc::Allocator;
// `front` and `back` are always both `None` or both `Some`.
pub struct LeafRange<BorrowType, K, V> {
front: Option<Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge>>,
back: Option<Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge>>,
}
impl<'a, K: 'a, V: 'a> Clone for LeafRange<marker::Immut<'a>, K, V> {
fn clone(&self) -> Self {
LeafRange { front: self.front.clone(), back: self.back.clone() }
}
}
impl<B, K, V> Default for LeafRange<B, K, V> {
fn default() -> Self {
LeafRange { front: None, back: None }
}
}
impl<BorrowType, K, V> LeafRange<BorrowType, K, V> {
pub fn none() -> Self {
LeafRange { front: None, back: None }
}
fn is_empty(&self) -> bool {
self.front == self.back
}
/// Temporarily takes out another, immutable equivalent of the same range.
pub fn reborrow(&self) -> LeafRange<marker::Immut<'_>, K, V> {
LeafRange {
front: self.front.as_ref().map(|f| f.reborrow()),
back: self.back.as_ref().map(|b| b.reborrow()),
}
}
}
impl<'a, K, V> LeafRange<marker::Immut<'a>, K, V> {
#[inline]
pub fn next_checked(&mut self) -> Option<(&'a K, &'a V)> {
self.perform_next_checked(|kv| kv.into_kv())
}
#[inline]
pub fn next_back_checked(&mut self) -> Option<(&'a K, &'a V)> {
self.perform_next_back_checked(|kv| kv.into_kv())
}
}
impl<'a, K, V> LeafRange<marker::ValMut<'a>, K, V> {
#[inline]
pub fn next_checked(&mut self) -> Option<(&'a K, &'a mut V)> {
self.perform_next_checked(|kv| unsafe { ptr::read(kv) }.into_kv_valmut())
}
#[inline]
pub fn next_back_checked(&mut self) -> Option<(&'a K, &'a mut V)> {
self.perform_next_back_checked(|kv| unsafe { ptr::read(kv) }.into_kv_valmut())
}
}
impl<BorrowType: marker::BorrowType, K, V> LeafRange<BorrowType, K, V> {
/// If possible, extract some result from the following KV and move to the edge beyond it.
fn perform_next_checked<F, R>(&mut self, f: F) -> Option<R>
where
F: Fn(&Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, marker::KV>) -> R,
{
if self.is_empty() {
None
} else {
super::mem::replace(self.front.as_mut().unwrap(), |front| {
let kv = front.next_kv().ok().unwrap();
let result = f(&kv);
(kv.next_leaf_edge(), Some(result))
})
}
}
/// If possible, extract some result from the preceding KV and move to the edge beyond it.
fn perform_next_back_checked<F, R>(&mut self, f: F) -> Option<R>
where
F: Fn(&Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, marker::KV>) -> R,
{
if self.is_empty() {
None
} else {
super::mem::replace(self.back.as_mut().unwrap(), |back| {
let kv = back.next_back_kv().ok().unwrap();
let result = f(&kv);
(kv.next_back_leaf_edge(), Some(result))
})
}
}
}
enum LazyLeafHandle<BorrowType, K, V> {
Root(NodeRef<BorrowType, K, V, marker::LeafOrInternal>), // not yet descended
Edge(Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge>),
}
impl<'a, K: 'a, V: 'a> Clone for LazyLeafHandle<marker::Immut<'a>, K, V> {
fn clone(&self) -> Self {
match self {
LazyLeafHandle::Root(root) => LazyLeafHandle::Root(*root),
LazyLeafHandle::Edge(edge) => LazyLeafHandle::Edge(*edge),
}
}
}
impl<BorrowType, K, V> LazyLeafHandle<BorrowType, K, V> {
fn reborrow(&self) -> LazyLeafHandle<marker::Immut<'_>, K, V> {
match self {
LazyLeafHandle::Root(root) => LazyLeafHandle::Root(root.reborrow()),
LazyLeafHandle::Edge(edge) => LazyLeafHandle::Edge(edge.reborrow()),
}
}
}
// `front` and `back` are always both `None` or both `Some`.
pub struct LazyLeafRange<BorrowType, K, V> {
front: Option<LazyLeafHandle<BorrowType, K, V>>,
back: Option<LazyLeafHandle<BorrowType, K, V>>,
}
impl<B, K, V> Default for LazyLeafRange<B, K, V> {
fn default() -> Self {
LazyLeafRange { front: None, back: None }
}
}
impl<'a, K: 'a, V: 'a> Clone for LazyLeafRange<marker::Immut<'a>, K, V> {
fn clone(&self) -> Self {
LazyLeafRange { front: self.front.clone(), back: self.back.clone() }
}
}
impl<BorrowType, K, V> LazyLeafRange<BorrowType, K, V> {
pub fn none() -> Self {
LazyLeafRange { front: None, back: None }
}
/// Temporarily takes out another, immutable equivalent of the same range.
pub fn reborrow(&self) -> LazyLeafRange<marker::Immut<'_>, K, V> {
LazyLeafRange {
front: self.front.as_ref().map(|f| f.reborrow()),
back: self.back.as_ref().map(|b| b.reborrow()),
}
}
}
impl<'a, K, V> LazyLeafRange<marker::Immut<'a>, K, V> {
#[inline]
pub unsafe fn next_unchecked(&mut self) -> (&'a K, &'a V) {
unsafe { self.init_front().unwrap().next_unchecked() }
}
#[inline]
pub unsafe fn next_back_unchecked(&mut self) -> (&'a K, &'a V) {
unsafe { self.init_back().unwrap().next_back_unchecked() }
}
}
impl<'a, K, V> LazyLeafRange<marker::ValMut<'a>, K, V> {
#[inline]
pub unsafe fn next_unchecked(&mut self) -> (&'a K, &'a mut V) {
unsafe { self.init_front().unwrap().next_unchecked() }
}
#[inline]
pub unsafe fn next_back_unchecked(&mut self) -> (&'a K, &'a mut V) {
unsafe { self.init_back().unwrap().next_back_unchecked() }
}
}
impl<K, V> LazyLeafRange<marker::Dying, K, V> {
fn take_front(
&mut self,
) -> Option<Handle<NodeRef<marker::Dying, K, V, marker::Leaf>, marker::Edge>> {
match self.front.take()? {
LazyLeafHandle::Root(root) => Some(root.first_leaf_edge()),
LazyLeafHandle::Edge(edge) => Some(edge),
}
}
#[inline]
pub unsafe fn deallocating_next_unchecked<A: Allocator + Clone>(
&mut self,
alloc: A,
) -> Handle<NodeRef<marker::Dying, K, V, marker::LeafOrInternal>, marker::KV> {
debug_assert!(self.front.is_some());
let front = self.init_front().unwrap();
unsafe { front.deallocating_next_unchecked(alloc) }
}
#[inline]
pub unsafe fn deallocating_next_back_unchecked<A: Allocator + Clone>(
&mut self,
alloc: A,
) -> Handle<NodeRef<marker::Dying, K, V, marker::LeafOrInternal>, marker::KV> {
debug_assert!(self.back.is_some());
let back = self.init_back().unwrap();
unsafe { back.deallocating_next_back_unchecked(alloc) }
}
#[inline]
pub fn deallocating_end<A: Allocator + Clone>(&mut self, alloc: A) {
if let Some(front) = self.take_front() {
front.deallocating_end(alloc)
}
}
}
impl<BorrowType: marker::BorrowType, K, V> LazyLeafRange<BorrowType, K, V> {
fn init_front(
&mut self,
) -> Option<&mut Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge>> {
if let Some(LazyLeafHandle::Root(root)) = &self.front {
self.front = Some(LazyLeafHandle::Edge(unsafe { ptr::read(root) }.first_leaf_edge()));
}
match &mut self.front {
None => None,
Some(LazyLeafHandle::Edge(edge)) => Some(edge),
// SAFETY: the code above would have replaced it.
Some(LazyLeafHandle::Root(_)) => unsafe { hint::unreachable_unchecked() },
}
}
fn init_back(
&mut self,
) -> Option<&mut Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge>> {
if let Some(LazyLeafHandle::Root(root)) = &self.back {
self.back = Some(LazyLeafHandle::Edge(unsafe { ptr::read(root) }.last_leaf_edge()));
}
match &mut self.back {
None => None,
Some(LazyLeafHandle::Edge(edge)) => Some(edge),
// SAFETY: the code above would have replaced it.
Some(LazyLeafHandle::Root(_)) => unsafe { hint::unreachable_unchecked() },
}
}
}
impl<BorrowType: marker::BorrowType, K, V> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
/// Finds the distinct leaf edges delimiting a specified range in a tree.
///
/// If such distinct edges exist, returns them in ascending order, meaning
/// that a non-zero number of calls to `next_unchecked` on the `front` of
/// the result and/or calls to `next_back_unchecked` on the `back` of the
/// result will eventually reach the same edge.
///
/// If there are no such edges, i.e., if the tree contains no key within
/// the range, returns an empty `front` and `back`.
///
/// # Safety
/// Unless `BorrowType` is `Immut`, do not use the handles to visit the same
/// KV twice.
unsafe fn find_leaf_edges_spanning_range<Q: ?Sized, R>(
self,
range: R,
) -> LeafRange<BorrowType, K, V>
where
Q: Ord,
K: Borrow<Q>,
R: RangeBounds<Q>,
{
match self.search_tree_for_bifurcation(&range) {
Err(_) => LeafRange::none(),
Ok((
node,
lower_edge_idx,
upper_edge_idx,
mut lower_child_bound,
mut upper_child_bound,
)) => {
let mut lower_edge = unsafe { Handle::new_edge(ptr::read(&node), lower_edge_idx) };
let mut upper_edge = unsafe { Handle::new_edge(node, upper_edge_idx) };
loop {
match (lower_edge.force(), upper_edge.force()) {
(Leaf(f), Leaf(b)) => return LeafRange { front: Some(f), back: Some(b) },
(Internal(f), Internal(b)) => {
(lower_edge, lower_child_bound) =
f.descend().find_lower_bound_edge(lower_child_bound);
(upper_edge, upper_child_bound) =
b.descend().find_upper_bound_edge(upper_child_bound);
}
_ => unreachable!("BTreeMap has different depths"),
}
}
}
}
}
}
fn full_range<BorrowType: marker::BorrowType, K, V>(
root1: NodeRef<BorrowType, K, V, marker::LeafOrInternal>,
root2: NodeRef<BorrowType, K, V, marker::LeafOrInternal>,
) -> LazyLeafRange<BorrowType, K, V> {
LazyLeafRange {
front: Some(LazyLeafHandle::Root(root1)),
back: Some(LazyLeafHandle::Root(root2)),
}
}
impl<'a, K: 'a, V: 'a> NodeRef<marker::Immut<'a>, K, V, marker::LeafOrInternal> {
/// Finds the pair of leaf edges delimiting a specific range in a tree.
///
/// The result is meaningful only if the tree is ordered by key, like the tree
/// in a `BTreeMap` is.
pub fn range_search<Q, R>(self, range: R) -> LeafRange<marker::Immut<'a>, K, V>
where
Q: ?Sized + Ord,
K: Borrow<Q>,
R: RangeBounds<Q>,
{
// SAFETY: our borrow type is immutable.
unsafe { self.find_leaf_edges_spanning_range(range) }
}
/// Finds the pair of leaf edges delimiting an entire tree.
pub fn full_range(self) -> LazyLeafRange<marker::Immut<'a>, K, V> {
full_range(self, self)
}
}
impl<'a, K: 'a, V: 'a> NodeRef<marker::ValMut<'a>, K, V, marker::LeafOrInternal> {
/// Splits a unique reference into a pair of leaf edges delimiting a specified range.
/// The result are non-unique references allowing (some) mutation, which must be used
/// carefully.
///
/// The result is meaningful only if the tree is ordered by key, like the tree
/// in a `BTreeMap` is.
///
/// # Safety
/// Do not use the duplicate handles to visit the same KV twice.
pub fn range_search<Q, R>(self, range: R) -> LeafRange<marker::ValMut<'a>, K, V>
where
Q: ?Sized + Ord,
K: Borrow<Q>,
R: RangeBounds<Q>,
{
unsafe { self.find_leaf_edges_spanning_range(range) }
}
/// Splits a unique reference into a pair of leaf edges delimiting the full range of the tree.
/// The results are non-unique references allowing mutation (of values only), so must be used
/// with care.
pub fn full_range(self) -> LazyLeafRange<marker::ValMut<'a>, K, V> {
// We duplicate the root NodeRef here -- we will never visit the same KV
// twice, and never end up with overlapping value references.
let self2 = unsafe { ptr::read(&self) };
full_range(self, self2)
}
}
impl<K, V> NodeRef<marker::Dying, K, V, marker::LeafOrInternal> {
/// Splits a unique reference into a pair of leaf edges delimiting the full range of the tree.
/// The results are non-unique references allowing massively destructive mutation, so must be
/// used with the utmost care.
pub fn full_range(self) -> LazyLeafRange<marker::Dying, K, V> {
// We duplicate the root NodeRef here -- we will never access it in a way
// that overlaps references obtained from the root.
let self2 = unsafe { ptr::read(&self) };
full_range(self, self2)
}
}
impl<BorrowType: marker::BorrowType, K, V>
Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge>
{
/// Given a leaf edge handle, returns [`Result::Ok`] with a handle to the neighboring KV
/// on the right side, which is either in the same leaf node or in an ancestor node.
/// If the leaf edge is the last one in the tree, returns [`Result::Err`] with the root node.
pub fn next_kv(
self,
) -> Result<
Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, marker::KV>,
NodeRef<BorrowType, K, V, marker::LeafOrInternal>,
> {
let mut edge = self.forget_node_type();
loop {
edge = match edge.right_kv() {
Ok(kv) => return Ok(kv),
Err(last_edge) => match last_edge.into_node().ascend() {
Ok(parent_edge) => parent_edge.forget_node_type(),
Err(root) => return Err(root),
},
}
}
}
/// Given a leaf edge handle, returns [`Result::Ok`] with a handle to the neighboring KV
/// on the left side, which is either in the same leaf node or in an ancestor node.
/// If the leaf edge is the first one in the tree, returns [`Result::Err`] with the root node.
pub fn next_back_kv(
self,
) -> Result<
Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, marker::KV>,
NodeRef<BorrowType, K, V, marker::LeafOrInternal>,
> {
let mut edge = self.forget_node_type();
loop {
edge = match edge.left_kv() {
Ok(kv) => return Ok(kv),
Err(last_edge) => match last_edge.into_node().ascend() {
Ok(parent_edge) => parent_edge.forget_node_type(),
Err(root) => return Err(root),
},
}
}
}
}
impl<BorrowType: marker::BorrowType, K, V>
Handle<NodeRef<BorrowType, K, V, marker::Internal>, marker::Edge>
{
/// Given an internal edge handle, returns [`Result::Ok`] with a handle to the neighboring KV
/// on the right side, which is either in the same internal node or in an ancestor node.
/// If the internal edge is the last one in the tree, returns [`Result::Err`] with the root node.
fn next_kv(
self,
) -> Result<
Handle<NodeRef<BorrowType, K, V, marker::Internal>, marker::KV>,
NodeRef<BorrowType, K, V, marker::Internal>,
> {
let mut edge = self;
loop {
edge = match edge.right_kv() {
Ok(internal_kv) => return Ok(internal_kv),
Err(last_edge) => match last_edge.into_node().ascend() {
Ok(parent_edge) => parent_edge,
Err(root) => return Err(root),
},
}
}
}
}
impl<K, V> Handle<NodeRef<marker::Dying, K, V, marker::Leaf>, marker::Edge> {
/// Given a leaf edge handle into a dying tree, returns the next leaf edge
/// on the right side, and the key-value pair in between, if they exist.
///
/// If the given edge is the last one in a leaf, this method deallocates
/// the leaf, as well as any ancestor nodes whose last edge was reached.
/// This implies that if no more key-value pair follows, the entire tree
/// will have been deallocated and there is nothing left to return.
///
/// # Safety
/// - The given edge must not have been previously returned by counterpart
/// `deallocating_next_back`.
/// - The returned KV handle is only valid to access the key and value,
/// and only valid until the next call to a `deallocating_` method.
unsafe fn deallocating_next<A: Allocator + Clone>(
self,
alloc: A,
) -> Option<(Self, Handle<NodeRef<marker::Dying, K, V, marker::LeafOrInternal>, marker::KV>)>
{
let mut edge = self.forget_node_type();
loop {
edge = match edge.right_kv() {
Ok(kv) => return Some((unsafe { ptr::read(&kv) }.next_leaf_edge(), kv)),
Err(last_edge) => {
match unsafe { last_edge.into_node().deallocate_and_ascend(alloc.clone()) } {
Some(parent_edge) => parent_edge.forget_node_type(),
None => return None,
}
}
}
}
}
/// Given a leaf edge handle into a dying tree, returns the next leaf edge
/// on the left side, and the key-value pair in between, if they exist.
///
/// If the given edge is the first one in a leaf, this method deallocates
/// the leaf, as well as any ancestor nodes whose first edge was reached.
/// This implies that if no more key-value pair follows, the entire tree
/// will have been deallocated and there is nothing left to return.
///
/// # Safety
/// - The given edge must not have been previously returned by counterpart
/// `deallocating_next`.
/// - The returned KV handle is only valid to access the key and value,
/// and only valid until the next call to a `deallocating_` method.
unsafe fn deallocating_next_back<A: Allocator + Clone>(
self,
alloc: A,
) -> Option<(Self, Handle<NodeRef<marker::Dying, K, V, marker::LeafOrInternal>, marker::KV>)>
{
let mut edge = self.forget_node_type();
loop {
edge = match edge.left_kv() {
Ok(kv) => return Some((unsafe { ptr::read(&kv) }.next_back_leaf_edge(), kv)),
Err(last_edge) => {
match unsafe { last_edge.into_node().deallocate_and_ascend(alloc.clone()) } {
Some(parent_edge) => parent_edge.forget_node_type(),
None => return None,
}
}
}
}
}
/// Deallocates a pile of nodes from the leaf up to the root.
/// This is the only way to deallocate the remainder of a tree after
/// `deallocating_next` and `deallocating_next_back` have been nibbling at
/// both sides of the tree, and have hit the same edge. As it is intended
/// only to be called when all keys and values have been returned,
/// no cleanup is done on any of the keys or values.
fn deallocating_end<A: Allocator + Clone>(self, alloc: A) {
let mut edge = self.forget_node_type();
while let Some(parent_edge) =
unsafe { edge.into_node().deallocate_and_ascend(alloc.clone()) }
{
edge = parent_edge.forget_node_type();
}
}
}
impl<'a, K, V> Handle<NodeRef<marker::Immut<'a>, K, V, marker::Leaf>, marker::Edge> {
/// Moves the leaf edge handle to the next leaf edge and returns references to the
/// key and value in between.
///
/// # Safety
/// There must be another KV in the direction travelled.
unsafe fn next_unchecked(&mut self) -> (&'a K, &'a V) {
super::mem::replace(self, |leaf_edge| {
let kv = leaf_edge.next_kv().ok().unwrap();
(kv.next_leaf_edge(), kv.into_kv())
})
}
/// Moves the leaf edge handle to the previous leaf edge and returns references to the
/// key and value in between.
///
/// # Safety
/// There must be another KV in the direction travelled.
unsafe fn next_back_unchecked(&mut self) -> (&'a K, &'a V) {
super::mem::replace(self, |leaf_edge| {
let kv = leaf_edge.next_back_kv().ok().unwrap();
(kv.next_back_leaf_edge(), kv.into_kv())
})
}
}
impl<'a, K, V> Handle<NodeRef<marker::ValMut<'a>, K, V, marker::Leaf>, marker::Edge> {
/// Moves the leaf edge handle to the next leaf edge and returns references to the
/// key and value in between.
///
/// # Safety
/// There must be another KV in the direction travelled.
unsafe fn next_unchecked(&mut self) -> (&'a K, &'a mut V) {
let kv = super::mem::replace(self, |leaf_edge| {
let kv = leaf_edge.next_kv().ok().unwrap();
(unsafe { ptr::read(&kv) }.next_leaf_edge(), kv)
});
// Doing this last is faster, according to benchmarks.
kv.into_kv_valmut()
}
/// Moves the leaf edge handle to the previous leaf and returns references to the
/// key and value in between.
///
/// # Safety
/// There must be another KV in the direction travelled.
unsafe fn next_back_unchecked(&mut self) -> (&'a K, &'a mut V) {
let kv = super::mem::replace(self, |leaf_edge| {
let kv = leaf_edge.next_back_kv().ok().unwrap();
(unsafe { ptr::read(&kv) }.next_back_leaf_edge(), kv)
});
// Doing this last is faster, according to benchmarks.
kv.into_kv_valmut()
}
}
impl<K, V> Handle<NodeRef<marker::Dying, K, V, marker::Leaf>, marker::Edge> {
/// Moves the leaf edge handle to the next leaf edge and returns the key and value
/// in between, deallocating any node left behind while leaving the corresponding
/// edge in its parent node dangling.
///
/// # Safety
/// - There must be another KV in the direction travelled.
/// - That KV was not previously returned by counterpart
/// `deallocating_next_back_unchecked` on any copy of the handles
/// being used to traverse the tree.
///
/// The only safe way to proceed with the updated handle is to compare it, drop it,
/// or call this method or counterpart `deallocating_next_back_unchecked` again.
unsafe fn deallocating_next_unchecked<A: Allocator + Clone>(
&mut self,
alloc: A,
) -> Handle<NodeRef<marker::Dying, K, V, marker::LeafOrInternal>, marker::KV> {
super::mem::replace(self, |leaf_edge| unsafe {
leaf_edge.deallocating_next(alloc).unwrap()
})
}
/// Moves the leaf edge handle to the previous leaf edge and returns the key and value
/// in between, deallocating any node left behind while leaving the corresponding
/// edge in its parent node dangling.
///
/// # Safety
/// - There must be another KV in the direction travelled.
/// - That leaf edge was not previously returned by counterpart
/// `deallocating_next_unchecked` on any copy of the handles
/// being used to traverse the tree.
///
/// The only safe way to proceed with the updated handle is to compare it, drop it,
/// or call this method or counterpart `deallocating_next_unchecked` again.
unsafe fn deallocating_next_back_unchecked<A: Allocator + Clone>(
&mut self,
alloc: A,
) -> Handle<NodeRef<marker::Dying, K, V, marker::LeafOrInternal>, marker::KV> {
super::mem::replace(self, |leaf_edge| unsafe {
leaf_edge.deallocating_next_back(alloc).unwrap()
})
}
}
impl<BorrowType: marker::BorrowType, K, V> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
/// Returns the leftmost leaf edge in or underneath a node - in other words, the edge
/// you need first when navigating forward (or last when navigating backward).
#[inline]
pub fn first_leaf_edge(self) -> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge> {
let mut node = self;
loop {
match node.force() {
Leaf(leaf) => return leaf.first_edge(),
Internal(internal) => node = internal.first_edge().descend(),
}
}
}
/// Returns the rightmost leaf edge in or underneath a node - in other words, the edge
/// you need last when navigating forward (or first when navigating backward).
#[inline]
pub fn last_leaf_edge(self) -> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge> {
let mut node = self;
loop {
match node.force() {
Leaf(leaf) => return leaf.last_edge(),
Internal(internal) => node = internal.last_edge().descend(),
}
}
}
}
pub enum Position<BorrowType, K, V> {
Leaf(NodeRef<BorrowType, K, V, marker::Leaf>),
Internal(NodeRef<BorrowType, K, V, marker::Internal>),
InternalKV,
}
impl<'a, K: 'a, V: 'a> NodeRef<marker::Immut<'a>, K, V, marker::LeafOrInternal> {
/// Visits leaf nodes and internal KVs in order of ascending keys, and also
/// visits internal nodes as a whole in a depth first order, meaning that
/// internal nodes precede their individual KVs and their child nodes.
pub fn visit_nodes_in_order<F>(self, mut visit: F)
where
F: FnMut(Position<marker::Immut<'a>, K, V>),
{
match self.force() {
Leaf(leaf) => visit(Position::Leaf(leaf)),
Internal(internal) => {
visit(Position::Internal(internal));
let mut edge = internal.first_edge();
loop {
edge = match edge.descend().force() {
Leaf(leaf) => {
visit(Position::Leaf(leaf));
match edge.next_kv() {
Ok(kv) => {
visit(Position::InternalKV);
kv.right_edge()
}
Err(_) => return,
}
}
Internal(internal) => {
visit(Position::Internal(internal));
internal.first_edge()
}
}
}
}
}
}
/// Calculates the number of elements in a (sub)tree.
pub fn calc_length(self) -> usize {
let mut result = 0;
self.visit_nodes_in_order(|pos| match pos {
Position::Leaf(node) => result += node.len(),
Position::Internal(node) => result += node.len(),
Position::InternalKV => (),
});
result
}
}
impl<BorrowType: marker::BorrowType, K, V>
Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, marker::KV>
{
/// Returns the leaf edge closest to a KV for forward navigation.
pub fn next_leaf_edge(self) -> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge> {
match self.force() {
Leaf(leaf_kv) => leaf_kv.right_edge(),
Internal(internal_kv) => {
let next_internal_edge = internal_kv.right_edge();
next_internal_edge.descend().first_leaf_edge()
}
}
}
/// Returns the leaf edge closest to a KV for backward navigation.
pub fn next_back_leaf_edge(
self,
) -> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge> {
match self.force() {
Leaf(leaf_kv) => leaf_kv.left_edge(),
Internal(internal_kv) => {
let next_internal_edge = internal_kv.left_edge();
next_internal_edge.descend().last_leaf_edge()
}
}
}
}
impl<BorrowType: marker::BorrowType, K, V> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
/// Returns the leaf edge corresponding to the first point at which the
/// given bound is true.
pub fn lower_bound<Q: ?Sized>(
self,
mut bound: SearchBound<&Q>,
) -> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge>
where
Q: Ord,
K: Borrow<Q>,
{
let mut node = self;
loop {
let (edge, new_bound) = node.find_lower_bound_edge(bound);
match edge.force() {
Leaf(edge) => return edge,
Internal(edge) => {
node = edge.descend();
bound = new_bound;
}
}
}
}
/// Returns the leaf edge corresponding to the last point at which the
/// given bound is true.
pub fn upper_bound<Q: ?Sized>(
self,
mut bound: SearchBound<&Q>,
) -> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge>
where
Q: Ord,
K: Borrow<Q>,
{
let mut node = self;
loop {
let (edge, new_bound) = node.find_upper_bound_edge(bound);
match edge.force() {
Leaf(edge) => return edge,
Internal(edge) => {
node = edge.descend();
bound = new_bound;
}
}
}
}
}

View File

@ -0,0 +1,99 @@
use super::super::navigate;
use super::*;
use crate::alloc::Global;
use crate::fmt::Debug;
use crate::string::String;
impl<'a, K: 'a, V: 'a> NodeRef<marker::Immut<'a>, K, V, marker::LeafOrInternal> {
// Asserts that the back pointer in each reachable node points to its parent.
pub fn assert_back_pointers(self) {
if let ForceResult::Internal(node) = self.force() {
for idx in 0..=node.len() {
let edge = unsafe { Handle::new_edge(node, idx) };
let child = edge.descend();
assert!(child.ascend().ok() == Some(edge));
child.assert_back_pointers();
}
}
}
// Renders a multi-line display of the keys in order and in tree hierarchy,
// picturing the tree growing sideways from its root on the left to its
// leaves on the right.
pub fn dump_keys(self) -> String
where
K: Debug,
{
let mut result = String::new();
self.visit_nodes_in_order(|pos| match pos {
navigate::Position::Leaf(leaf) => {
let depth = self.height();
let indent = " ".repeat(depth);
result += &format!("\n{}{:?}", indent, leaf.keys());
}
navigate::Position::Internal(_) => {}
navigate::Position::InternalKV => {}
});
result
}
}
#[test]
fn test_splitpoint() {
for idx in 0..=CAPACITY {
let (middle_kv_idx, insertion) = splitpoint(idx);
// Simulate performing the split:
let mut left_len = middle_kv_idx;
let mut right_len = CAPACITY - middle_kv_idx - 1;
match insertion {
LeftOrRight::Left(edge_idx) => {
assert!(edge_idx <= left_len);
left_len += 1;
}
LeftOrRight::Right(edge_idx) => {
assert!(edge_idx <= right_len);
right_len += 1;
}
}
assert!(left_len >= MIN_LEN_AFTER_SPLIT);
assert!(right_len >= MIN_LEN_AFTER_SPLIT);
assert!(left_len + right_len == CAPACITY);
}
}
#[test]
fn test_partial_eq() {
let mut root1 = NodeRef::new_leaf(Global);
root1.borrow_mut().push(1, ());
let mut root1 = NodeRef::new_internal(root1.forget_type(), Global).forget_type();
let root2 = Root::new(Global);
root1.reborrow().assert_back_pointers();
root2.reborrow().assert_back_pointers();
let leaf_edge_1a = root1.reborrow().first_leaf_edge().forget_node_type();
let leaf_edge_1b = root1.reborrow().last_leaf_edge().forget_node_type();
let top_edge_1 = root1.reborrow().first_edge();
let top_edge_2 = root2.reborrow().first_edge();
assert!(leaf_edge_1a == leaf_edge_1a);
assert!(leaf_edge_1a != leaf_edge_1b);
assert!(leaf_edge_1a != top_edge_1);
assert!(leaf_edge_1a != top_edge_2);
assert!(top_edge_1 == top_edge_1);
assert!(top_edge_1 != top_edge_2);
root1.pop_internal_level(Global);
unsafe { root1.into_dying().deallocate_and_ascend(Global) };
unsafe { root2.into_dying().deallocate_and_ascend(Global) };
}
#[test]
#[cfg(target_arch = "x86_64")]
#[cfg_attr(miri, ignore)] // We'd like to run Miri with layout randomization
fn test_sizes() {
assert_eq!(core::mem::size_of::<LeafNode<(), ()>>(), 16);
assert_eq!(core::mem::size_of::<LeafNode<i64, i64>>(), 16 + CAPACITY * 2 * 8);
assert_eq!(core::mem::size_of::<InternalNode<(), ()>>(), 16 + (CAPACITY + 1) * 8);
assert_eq!(core::mem::size_of::<InternalNode<i64, i64>>(), 16 + (CAPACITY * 3 + 1) * 8);
}

View File

@ -0,0 +1,95 @@
use super::map::MIN_LEN;
use super::node::{marker, ForceResult::*, Handle, LeftOrRight::*, NodeRef};
use core::alloc::Allocator;
impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, marker::KV> {
/// Removes a key-value pair from the tree, and returns that pair, as well as
/// the leaf edge corresponding to that former pair. It's possible this empties
/// a root node that is internal, which the caller should pop from the map
/// holding the tree. The caller should also decrement the map's length.
pub fn remove_kv_tracking<F: FnOnce(), A: Allocator + Clone>(
self,
handle_emptied_internal_root: F,
alloc: A,
) -> ((K, V), Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>) {
match self.force() {
Leaf(node) => node.remove_leaf_kv(handle_emptied_internal_root, alloc),
Internal(node) => node.remove_internal_kv(handle_emptied_internal_root, alloc),
}
}
}
impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::KV> {
fn remove_leaf_kv<F: FnOnce(), A: Allocator + Clone>(
self,
handle_emptied_internal_root: F,
alloc: A,
) -> ((K, V), Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>) {
let (old_kv, mut pos) = self.remove();
let len = pos.reborrow().into_node().len();
if len < MIN_LEN {
let idx = pos.idx();
// We have to temporarily forget the child type, because there is no
// distinct node type for the immediate parents of a leaf.
let new_pos = match pos.into_node().forget_type().choose_parent_kv() {
Ok(Left(left_parent_kv)) => {
debug_assert!(left_parent_kv.right_child_len() == MIN_LEN - 1);
if left_parent_kv.can_merge() {
left_parent_kv.merge_tracking_child_edge(Right(idx), alloc.clone())
} else {
debug_assert!(left_parent_kv.left_child_len() > MIN_LEN);
left_parent_kv.steal_left(idx)
}
}
Ok(Right(right_parent_kv)) => {
debug_assert!(right_parent_kv.left_child_len() == MIN_LEN - 1);
if right_parent_kv.can_merge() {
right_parent_kv.merge_tracking_child_edge(Left(idx), alloc.clone())
} else {
debug_assert!(right_parent_kv.right_child_len() > MIN_LEN);
right_parent_kv.steal_right(idx)
}
}
Err(pos) => unsafe { Handle::new_edge(pos, idx) },
};
// SAFETY: `new_pos` is the leaf we started from or a sibling.
pos = unsafe { new_pos.cast_to_leaf_unchecked() };
// Only if we merged, the parent (if any) has shrunk, but skipping
// the following step otherwise does not pay off in benchmarks.
//
// SAFETY: We won't destroy or rearrange the leaf where `pos` is at
// by handling its parent recursively; at worst we will destroy or
// rearrange the parent through the grandparent, thus change the
// link to the parent inside the leaf.
if let Ok(parent) = unsafe { pos.reborrow_mut() }.into_node().ascend() {
if !parent.into_node().forget_type().fix_node_and_affected_ancestors(alloc) {
handle_emptied_internal_root();
}
}
}
(old_kv, pos)
}
}
impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::KV> {
fn remove_internal_kv<F: FnOnce(), A: Allocator + Clone>(
self,
handle_emptied_internal_root: F,
alloc: A,
) -> ((K, V), Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>) {
// Remove an adjacent KV from its leaf and then put it back in place of
// the element we were asked to remove. Prefer the left adjacent KV,
// for the reasons listed in `choose_parent_kv`.
let left_leaf_kv = self.left_edge().descend().last_leaf_edge().left_kv();
let left_leaf_kv = unsafe { left_leaf_kv.ok().unwrap_unchecked() };
let (left_kv, left_hole) = left_leaf_kv.remove_leaf_kv(handle_emptied_internal_root, alloc);
// The internal node may have been stolen from or merged. Go back right
// to find where the original KV ended up.
let mut internal = unsafe { left_hole.next_kv().ok().unwrap_unchecked() };
let old_kv = internal.replace_kv(left_kv.0, left_kv.1);
let pos = internal.next_leaf_edge();
(old_kv, pos)
}
}

View File

@ -0,0 +1,285 @@
use core::borrow::Borrow;
use core::cmp::Ordering;
use core::ops::{Bound, RangeBounds};
use super::node::{marker, ForceResult::*, Handle, NodeRef};
use SearchBound::*;
use SearchResult::*;
pub enum SearchBound<T> {
/// An inclusive bound to look for, just like `Bound::Included(T)`.
Included(T),
/// An exclusive bound to look for, just like `Bound::Excluded(T)`.
Excluded(T),
/// An unconditional inclusive bound, just like `Bound::Unbounded`.
AllIncluded,
/// An unconditional exclusive bound.
AllExcluded,
}
impl<T> SearchBound<T> {
pub fn from_range(range_bound: Bound<T>) -> Self {
match range_bound {
Bound::Included(t) => Included(t),
Bound::Excluded(t) => Excluded(t),
Bound::Unbounded => AllIncluded,
}
}
}
pub enum SearchResult<BorrowType, K, V, FoundType, GoDownType> {
Found(Handle<NodeRef<BorrowType, K, V, FoundType>, marker::KV>),
GoDown(Handle<NodeRef<BorrowType, K, V, GoDownType>, marker::Edge>),
}
pub enum IndexResult {
KV(usize),
Edge(usize),
}
impl<BorrowType: marker::BorrowType, K, V> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
/// Looks up a given key in a (sub)tree headed by the node, recursively.
/// Returns a `Found` with the handle of the matching KV, if any. Otherwise,
/// returns a `GoDown` with the handle of the leaf edge where the key belongs.
///
/// The result is meaningful only if the tree is ordered by key, like the tree
/// in a `BTreeMap` is.
pub fn search_tree<Q: ?Sized>(
mut self,
key: &Q,
) -> SearchResult<BorrowType, K, V, marker::LeafOrInternal, marker::Leaf>
where
Q: Ord,
K: Borrow<Q>,
{
loop {
self = match self.search_node(key) {
Found(handle) => return Found(handle),
GoDown(handle) => match handle.force() {
Leaf(leaf) => return GoDown(leaf),
Internal(internal) => internal.descend(),
},
}
}
}
/// Descends to the nearest node where the edge matching the lower bound
/// of the range is different from the edge matching the upper bound, i.e.,
/// the nearest node that has at least one key contained in the range.
///
/// If found, returns an `Ok` with that node, the strictly ascending pair of
/// edge indices in the node delimiting the range, and the corresponding
/// pair of bounds for continuing the search in the child nodes, in case
/// the node is internal.
///
/// If not found, returns an `Err` with the leaf edge matching the entire
/// range.
///
/// As a diagnostic service, panics if the range specifies impossible bounds.
///
/// The result is meaningful only if the tree is ordered by key.
pub fn search_tree_for_bifurcation<'r, Q: ?Sized, R>(
mut self,
range: &'r R,
) -> Result<
(
NodeRef<BorrowType, K, V, marker::LeafOrInternal>,
usize,
usize,
SearchBound<&'r Q>,
SearchBound<&'r Q>,
),
Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge>,
>
where
Q: Ord,
K: Borrow<Q>,
R: RangeBounds<Q>,
{
// Determine if map or set is being searched
let is_set = <V as super::set_val::IsSetVal>::is_set_val();
// Inlining these variables should be avoided. We assume the bounds reported by `range`
// remain the same, but an adversarial implementation could change between calls (#81138).
let (start, end) = (range.start_bound(), range.end_bound());
match (start, end) {
(Bound::Excluded(s), Bound::Excluded(e)) if s == e => {
if is_set {
panic!("range start and end are equal and excluded in BTreeSet")
} else {
panic!("range start and end are equal and excluded in BTreeMap")
}
}
(Bound::Included(s) | Bound::Excluded(s), Bound::Included(e) | Bound::Excluded(e))
if s > e =>
{
if is_set {
panic!("range start is greater than range end in BTreeSet")
} else {
panic!("range start is greater than range end in BTreeMap")
}
}
_ => {}
}
let mut lower_bound = SearchBound::from_range(start);
let mut upper_bound = SearchBound::from_range(end);
loop {
let (lower_edge_idx, lower_child_bound) = self.find_lower_bound_index(lower_bound);
let (upper_edge_idx, upper_child_bound) =
unsafe { self.find_upper_bound_index(upper_bound, lower_edge_idx) };
if lower_edge_idx < upper_edge_idx {
return Ok((
self,
lower_edge_idx,
upper_edge_idx,
lower_child_bound,
upper_child_bound,
));
}
debug_assert_eq!(lower_edge_idx, upper_edge_idx);
let common_edge = unsafe { Handle::new_edge(self, lower_edge_idx) };
match common_edge.force() {
Leaf(common_edge) => return Err(common_edge),
Internal(common_edge) => {
self = common_edge.descend();
lower_bound = lower_child_bound;
upper_bound = upper_child_bound;
}
}
}
}
/// Finds an edge in the node delimiting the lower bound of a range.
/// Also returns the lower bound to be used for continuing the search in
/// the matching child node, if `self` is an internal node.
///
/// The result is meaningful only if the tree is ordered by key.
pub fn find_lower_bound_edge<'r, Q>(
self,
bound: SearchBound<&'r Q>,
) -> (Handle<Self, marker::Edge>, SearchBound<&'r Q>)
where
Q: ?Sized + Ord,
K: Borrow<Q>,
{
let (edge_idx, bound) = self.find_lower_bound_index(bound);
let edge = unsafe { Handle::new_edge(self, edge_idx) };
(edge, bound)
}
/// Clone of `find_lower_bound_edge` for the upper bound.
pub fn find_upper_bound_edge<'r, Q>(
self,
bound: SearchBound<&'r Q>,
) -> (Handle<Self, marker::Edge>, SearchBound<&'r Q>)
where
Q: ?Sized + Ord,
K: Borrow<Q>,
{
let (edge_idx, bound) = unsafe { self.find_upper_bound_index(bound, 0) };
let edge = unsafe { Handle::new_edge(self, edge_idx) };
(edge, bound)
}
}
impl<BorrowType, K, V, Type> NodeRef<BorrowType, K, V, Type> {
/// Looks up a given key in the node, without recursion.
/// Returns a `Found` with the handle of the matching KV, if any. Otherwise,
/// returns a `GoDown` with the handle of the edge where the key might be found
/// (if the node is internal) or where the key can be inserted.
///
/// The result is meaningful only if the tree is ordered by key, like the tree
/// in a `BTreeMap` is.
pub fn search_node<Q: ?Sized>(self, key: &Q) -> SearchResult<BorrowType, K, V, Type, Type>
where
Q: Ord,
K: Borrow<Q>,
{
match unsafe { self.find_key_index(key, 0) } {
IndexResult::KV(idx) => Found(unsafe { Handle::new_kv(self, idx) }),
IndexResult::Edge(idx) => GoDown(unsafe { Handle::new_edge(self, idx) }),
}
}
/// Returns either the KV index in the node at which the key (or an equivalent)
/// exists, or the edge index where the key belongs, starting from a particular index.
///
/// The result is meaningful only if the tree is ordered by key, like the tree
/// in a `BTreeMap` is.
///
/// # Safety
/// `start_index` must be a valid edge index for the node.
unsafe fn find_key_index<Q: ?Sized>(&self, key: &Q, start_index: usize) -> IndexResult
where
Q: Ord,
K: Borrow<Q>,
{
let node = self.reborrow();
let keys = node.keys();
debug_assert!(start_index <= keys.len());
for (offset, k) in unsafe { keys.get_unchecked(start_index..) }.iter().enumerate() {
match key.cmp(k.borrow()) {
Ordering::Greater => {}
Ordering::Equal => return IndexResult::KV(start_index + offset),
Ordering::Less => return IndexResult::Edge(start_index + offset),
}
}
IndexResult::Edge(keys.len())
}
/// Finds an edge index in the node delimiting the lower bound of a range.
/// Also returns the lower bound to be used for continuing the search in
/// the matching child node, if `self` is an internal node.
///
/// The result is meaningful only if the tree is ordered by key.
fn find_lower_bound_index<'r, Q>(
&self,
bound: SearchBound<&'r Q>,
) -> (usize, SearchBound<&'r Q>)
where
Q: ?Sized + Ord,
K: Borrow<Q>,
{
match bound {
Included(key) => match unsafe { self.find_key_index(key, 0) } {
IndexResult::KV(idx) => (idx, AllExcluded),
IndexResult::Edge(idx) => (idx, bound),
},
Excluded(key) => match unsafe { self.find_key_index(key, 0) } {
IndexResult::KV(idx) => (idx + 1, AllIncluded),
IndexResult::Edge(idx) => (idx, bound),
},
AllIncluded => (0, AllIncluded),
AllExcluded => (self.len(), AllExcluded),
}
}
/// Mirror image of `find_lower_bound_index` for the upper bound,
/// with an additional parameter to skip part of the key array.
///
/// # Safety
/// `start_index` must be a valid edge index for the node.
unsafe fn find_upper_bound_index<'r, Q>(
&self,
bound: SearchBound<&'r Q>,
start_index: usize,
) -> (usize, SearchBound<&'r Q>)
where
Q: ?Sized + Ord,
K: Borrow<Q>,
{
match bound {
Included(key) => match unsafe { self.find_key_index(key, start_index) } {
IndexResult::KV(idx) => (idx + 1, AllExcluded),
IndexResult::Edge(idx) => (idx, bound),
},
Excluded(key) => match unsafe { self.find_key_index(key, start_index) } {
IndexResult::KV(idx) => (idx, AllIncluded),
IndexResult::Edge(idx) => (idx, bound),
},
AllIncluded => (self.len(), AllIncluded),
AllExcluded => (start_index, AllExcluded),
}
}
}

View File

@ -0,0 +1,855 @@
use super::*;
use crate::testing::crash_test::{CrashTestDummy, Panic};
use crate::testing::rng::DeterministicRng;
use std::ops::Bound::{Excluded, Included};
use std::panic::{catch_unwind, AssertUnwindSafe};
#[test]
fn test_clone_eq() {
let mut m = BTreeSet::new();
m.insert(1);
m.insert(2);
assert_eq!(m.clone(), m);
}
#[test]
fn test_iter_min_max() {
let mut a = BTreeSet::new();
assert_eq!(a.iter().min(), None);
assert_eq!(a.iter().max(), None);
assert_eq!(a.range(..).min(), None);
assert_eq!(a.range(..).max(), None);
assert_eq!(a.difference(&BTreeSet::new()).min(), None);
assert_eq!(a.difference(&BTreeSet::new()).max(), None);
assert_eq!(a.intersection(&a).min(), None);
assert_eq!(a.intersection(&a).max(), None);
assert_eq!(a.symmetric_difference(&BTreeSet::new()).min(), None);
assert_eq!(a.symmetric_difference(&BTreeSet::new()).max(), None);
assert_eq!(a.union(&a).min(), None);
assert_eq!(a.union(&a).max(), None);
a.insert(1);
a.insert(2);
assert_eq!(a.iter().min(), Some(&1));
assert_eq!(a.iter().max(), Some(&2));
assert_eq!(a.range(..).min(), Some(&1));
assert_eq!(a.range(..).max(), Some(&2));
assert_eq!(a.difference(&BTreeSet::new()).min(), Some(&1));
assert_eq!(a.difference(&BTreeSet::new()).max(), Some(&2));
assert_eq!(a.intersection(&a).min(), Some(&1));
assert_eq!(a.intersection(&a).max(), Some(&2));
assert_eq!(a.symmetric_difference(&BTreeSet::new()).min(), Some(&1));
assert_eq!(a.symmetric_difference(&BTreeSet::new()).max(), Some(&2));
assert_eq!(a.union(&a).min(), Some(&1));
assert_eq!(a.union(&a).max(), Some(&2));
}
fn check<F>(a: &[i32], b: &[i32], expected: &[i32], f: F)
where
F: FnOnce(&BTreeSet<i32>, &BTreeSet<i32>, &mut dyn FnMut(&i32) -> bool) -> bool,
{
let mut set_a = BTreeSet::new();
let mut set_b = BTreeSet::new();
for x in a {
assert!(set_a.insert(*x))
}
for y in b {
assert!(set_b.insert(*y))
}
let mut i = 0;
f(&set_a, &set_b, &mut |&x| {
if i < expected.len() {
assert_eq!(x, expected[i]);
}
i += 1;
true
});
assert_eq!(i, expected.len());
}
#[test]
fn test_intersection() {
fn check_intersection(a: &[i32], b: &[i32], expected: &[i32]) {
check(a, b, expected, |x, y, f| x.intersection(y).all(f))
}
check_intersection(&[], &[], &[]);
check_intersection(&[1, 2, 3], &[], &[]);
check_intersection(&[], &[1, 2, 3], &[]);
check_intersection(&[2], &[1, 2, 3], &[2]);
check_intersection(&[1, 2, 3], &[2], &[2]);
check_intersection(&[11, 1, 3, 77, 103, 5, -5], &[2, 11, 77, -9, -42, 5, 3], &[3, 5, 11, 77]);
if cfg!(miri) {
// Miri is too slow
return;
}
let large = Vec::from_iter(0..100);
check_intersection(&[], &large, &[]);
check_intersection(&large, &[], &[]);
check_intersection(&[-1], &large, &[]);
check_intersection(&large, &[-1], &[]);
check_intersection(&[0], &large, &[0]);
check_intersection(&large, &[0], &[0]);
check_intersection(&[99], &large, &[99]);
check_intersection(&large, &[99], &[99]);
check_intersection(&[100], &large, &[]);
check_intersection(&large, &[100], &[]);
check_intersection(&[11, 5000, 1, 3, 77, 8924], &large, &[1, 3, 11, 77]);
}
#[test]
fn test_intersection_size_hint() {
let x = BTreeSet::from([3, 4]);
let y = BTreeSet::from([1, 2, 3]);
let mut iter = x.intersection(&y);
assert_eq!(iter.size_hint(), (1, Some(1)));
assert_eq!(iter.next(), Some(&3));
assert_eq!(iter.size_hint(), (0, Some(0)));
assert_eq!(iter.next(), None);
iter = y.intersection(&y);
assert_eq!(iter.size_hint(), (0, Some(3)));
assert_eq!(iter.next(), Some(&1));
assert_eq!(iter.size_hint(), (0, Some(2)));
}
#[test]
fn test_difference() {
fn check_difference(a: &[i32], b: &[i32], expected: &[i32]) {
check(a, b, expected, |x, y, f| x.difference(y).all(f))
}
check_difference(&[], &[], &[]);
check_difference(&[1, 12], &[], &[1, 12]);
check_difference(&[], &[1, 2, 3, 9], &[]);
check_difference(&[1, 3, 5, 9, 11], &[3, 9], &[1, 5, 11]);
check_difference(&[1, 3, 5, 9, 11], &[3, 6, 9], &[1, 5, 11]);
check_difference(&[1, 3, 5, 9, 11], &[0, 1], &[3, 5, 9, 11]);
check_difference(&[1, 3, 5, 9, 11], &[11, 12], &[1, 3, 5, 9]);
check_difference(
&[-5, 11, 22, 33, 40, 42],
&[-12, -5, 14, 23, 34, 38, 39, 50],
&[11, 22, 33, 40, 42],
);
if cfg!(miri) {
// Miri is too slow
return;
}
let large = Vec::from_iter(0..100);
check_difference(&[], &large, &[]);
check_difference(&[-1], &large, &[-1]);
check_difference(&[0], &large, &[]);
check_difference(&[99], &large, &[]);
check_difference(&[100], &large, &[100]);
check_difference(&[11, 5000, 1, 3, 77, 8924], &large, &[5000, 8924]);
check_difference(&large, &[], &large);
check_difference(&large, &[-1], &large);
check_difference(&large, &[100], &large);
}
#[test]
fn test_difference_size_hint() {
let s246 = BTreeSet::from([2, 4, 6]);
let s23456 = BTreeSet::from_iter(2..=6);
let mut iter = s246.difference(&s23456);
assert_eq!(iter.size_hint(), (0, Some(3)));
assert_eq!(iter.next(), None);
let s12345 = BTreeSet::from_iter(1..=5);
iter = s246.difference(&s12345);
assert_eq!(iter.size_hint(), (0, Some(3)));
assert_eq!(iter.next(), Some(&6));
assert_eq!(iter.size_hint(), (0, Some(0)));
assert_eq!(iter.next(), None);
let s34567 = BTreeSet::from_iter(3..=7);
iter = s246.difference(&s34567);
assert_eq!(iter.size_hint(), (0, Some(3)));
assert_eq!(iter.next(), Some(&2));
assert_eq!(iter.size_hint(), (0, Some(2)));
assert_eq!(iter.next(), None);
let s1 = BTreeSet::from_iter(-9..=1);
iter = s246.difference(&s1);
assert_eq!(iter.size_hint(), (3, Some(3)));
let s2 = BTreeSet::from_iter(-9..=2);
iter = s246.difference(&s2);
assert_eq!(iter.size_hint(), (2, Some(2)));
assert_eq!(iter.next(), Some(&4));
assert_eq!(iter.size_hint(), (1, Some(1)));
let s23 = BTreeSet::from([2, 3]);
iter = s246.difference(&s23);
assert_eq!(iter.size_hint(), (1, Some(3)));
assert_eq!(iter.next(), Some(&4));
assert_eq!(iter.size_hint(), (1, Some(1)));
let s4 = BTreeSet::from([4]);
iter = s246.difference(&s4);
assert_eq!(iter.size_hint(), (2, Some(3)));
assert_eq!(iter.next(), Some(&2));
assert_eq!(iter.size_hint(), (1, Some(2)));
assert_eq!(iter.next(), Some(&6));
assert_eq!(iter.size_hint(), (0, Some(0)));
assert_eq!(iter.next(), None);
let s56 = BTreeSet::from([5, 6]);
iter = s246.difference(&s56);
assert_eq!(iter.size_hint(), (1, Some(3)));
assert_eq!(iter.next(), Some(&2));
assert_eq!(iter.size_hint(), (0, Some(2)));
let s6 = BTreeSet::from_iter(6..=19);
iter = s246.difference(&s6);
assert_eq!(iter.size_hint(), (2, Some(2)));
assert_eq!(iter.next(), Some(&2));
assert_eq!(iter.size_hint(), (1, Some(1)));
let s7 = BTreeSet::from_iter(7..=19);
iter = s246.difference(&s7);
assert_eq!(iter.size_hint(), (3, Some(3)));
}
#[test]
fn test_symmetric_difference() {
fn check_symmetric_difference(a: &[i32], b: &[i32], expected: &[i32]) {
check(a, b, expected, |x, y, f| x.symmetric_difference(y).all(f))
}
check_symmetric_difference(&[], &[], &[]);
check_symmetric_difference(&[1, 2, 3], &[2], &[1, 3]);
check_symmetric_difference(&[2], &[1, 2, 3], &[1, 3]);
check_symmetric_difference(&[1, 3, 5, 9, 11], &[-2, 3, 9, 14, 22], &[-2, 1, 5, 11, 14, 22]);
}
#[test]
fn test_symmetric_difference_size_hint() {
let x = BTreeSet::from([2, 4]);
let y = BTreeSet::from([1, 2, 3]);
let mut iter = x.symmetric_difference(&y);
assert_eq!(iter.size_hint(), (0, Some(5)));
assert_eq!(iter.next(), Some(&1));
assert_eq!(iter.size_hint(), (0, Some(4)));
assert_eq!(iter.next(), Some(&3));
assert_eq!(iter.size_hint(), (0, Some(1)));
}
#[test]
fn test_union() {
fn check_union(a: &[i32], b: &[i32], expected: &[i32]) {
check(a, b, expected, |x, y, f| x.union(y).all(f))
}
check_union(&[], &[], &[]);
check_union(&[1, 2, 3], &[2], &[1, 2, 3]);
check_union(&[2], &[1, 2, 3], &[1, 2, 3]);
check_union(
&[1, 3, 5, 9, 11, 16, 19, 24],
&[-2, 1, 5, 9, 13, 19],
&[-2, 1, 3, 5, 9, 11, 13, 16, 19, 24],
);
}
#[test]
fn test_union_size_hint() {
let x = BTreeSet::from([2, 4]);
let y = BTreeSet::from([1, 2, 3]);
let mut iter = x.union(&y);
assert_eq!(iter.size_hint(), (3, Some(5)));
assert_eq!(iter.next(), Some(&1));
assert_eq!(iter.size_hint(), (2, Some(4)));
assert_eq!(iter.next(), Some(&2));
assert_eq!(iter.size_hint(), (1, Some(2)));
}
#[test]
// Only tests the simple function definition with respect to intersection
fn test_is_disjoint() {
let one = BTreeSet::from([1]);
let two = BTreeSet::from([2]);
assert!(one.is_disjoint(&two));
}
#[test]
// Also implicitly tests the trivial function definition of is_superset
fn test_is_subset() {
fn is_subset(a: &[i32], b: &[i32]) -> bool {
let set_a = BTreeSet::from_iter(a.iter());
let set_b = BTreeSet::from_iter(b.iter());
set_a.is_subset(&set_b)
}
assert_eq!(is_subset(&[], &[]), true);
assert_eq!(is_subset(&[], &[1, 2]), true);
assert_eq!(is_subset(&[0], &[1, 2]), false);
assert_eq!(is_subset(&[1], &[1, 2]), true);
assert_eq!(is_subset(&[2], &[1, 2]), true);
assert_eq!(is_subset(&[3], &[1, 2]), false);
assert_eq!(is_subset(&[1, 2], &[1]), false);
assert_eq!(is_subset(&[1, 2], &[1, 2]), true);
assert_eq!(is_subset(&[1, 2], &[2, 3]), false);
assert_eq!(
is_subset(&[-5, 11, 22, 33, 40, 42], &[-12, -5, 11, 14, 22, 23, 33, 34, 38, 39, 40, 42]),
true
);
assert_eq!(is_subset(&[-5, 11, 22, 33, 40, 42], &[-12, -5, 11, 14, 22, 23, 34, 38]), false);
if cfg!(miri) {
// Miri is too slow
return;
}
let large = Vec::from_iter(0..100);
assert_eq!(is_subset(&[], &large), true);
assert_eq!(is_subset(&large, &[]), false);
assert_eq!(is_subset(&[-1], &large), false);
assert_eq!(is_subset(&[0], &large), true);
assert_eq!(is_subset(&[1, 2], &large), true);
assert_eq!(is_subset(&[99, 100], &large), false);
}
#[test]
fn test_is_superset() {
fn is_superset(a: &[i32], b: &[i32]) -> bool {
let set_a = BTreeSet::from_iter(a.iter());
let set_b = BTreeSet::from_iter(b.iter());
set_a.is_superset(&set_b)
}
assert_eq!(is_superset(&[], &[]), true);
assert_eq!(is_superset(&[], &[1, 2]), false);
assert_eq!(is_superset(&[0], &[1, 2]), false);
assert_eq!(is_superset(&[1], &[1, 2]), false);
assert_eq!(is_superset(&[4], &[1, 2]), false);
assert_eq!(is_superset(&[1, 4], &[1, 2]), false);
assert_eq!(is_superset(&[1, 2], &[1, 2]), true);
assert_eq!(is_superset(&[1, 2, 3], &[1, 3]), true);
assert_eq!(is_superset(&[1, 2, 3], &[]), true);
assert_eq!(is_superset(&[-1, 1, 2, 3], &[-1, 3]), true);
if cfg!(miri) {
// Miri is too slow
return;
}
let large = Vec::from_iter(0..100);
assert_eq!(is_superset(&[], &large), false);
assert_eq!(is_superset(&large, &[]), true);
assert_eq!(is_superset(&large, &[1]), true);
assert_eq!(is_superset(&large, &[50, 99]), true);
assert_eq!(is_superset(&large, &[100]), false);
assert_eq!(is_superset(&large, &[0, 99]), true);
assert_eq!(is_superset(&[-1], &large), false);
assert_eq!(is_superset(&[0], &large), false);
assert_eq!(is_superset(&[99, 100], &large), false);
}
#[test]
fn test_retain() {
let mut set = BTreeSet::from([1, 2, 3, 4, 5, 6]);
set.retain(|&k| k % 2 == 0);
assert_eq!(set.len(), 3);
assert!(set.contains(&2));
assert!(set.contains(&4));
assert!(set.contains(&6));
}
#[test]
fn test_extract_if() {
let mut x = BTreeSet::from([1]);
let mut y = BTreeSet::from([1]);
x.extract_if(|_| true).for_each(drop);
y.extract_if(|_| false).for_each(drop);
assert_eq!(x.len(), 0);
assert_eq!(y.len(), 1);
}
#[test]
#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
fn test_extract_if_drop_panic_leak() {
let a = CrashTestDummy::new(0);
let b = CrashTestDummy::new(1);
let c = CrashTestDummy::new(2);
let mut set = BTreeSet::new();
set.insert(a.spawn(Panic::Never));
set.insert(b.spawn(Panic::InDrop));
set.insert(c.spawn(Panic::Never));
catch_unwind(move || set.extract_if(|dummy| dummy.query(true)).for_each(drop)).ok();
assert_eq!(a.queried(), 1);
assert_eq!(b.queried(), 1);
assert_eq!(c.queried(), 0);
assert_eq!(a.dropped(), 1);
assert_eq!(b.dropped(), 1);
assert_eq!(c.dropped(), 1);
}
#[test]
#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
fn test_extract_if_pred_panic_leak() {
let a = CrashTestDummy::new(0);
let b = CrashTestDummy::new(1);
let c = CrashTestDummy::new(2);
let mut set = BTreeSet::new();
set.insert(a.spawn(Panic::Never));
set.insert(b.spawn(Panic::InQuery));
set.insert(c.spawn(Panic::InQuery));
catch_unwind(AssertUnwindSafe(|| set.extract_if(|dummy| dummy.query(true)).for_each(drop)))
.ok();
assert_eq!(a.queried(), 1);
assert_eq!(b.queried(), 1);
assert_eq!(c.queried(), 0);
assert_eq!(a.dropped(), 1);
assert_eq!(b.dropped(), 0);
assert_eq!(c.dropped(), 0);
assert_eq!(set.len(), 2);
assert_eq!(set.first().unwrap().id(), 1);
assert_eq!(set.last().unwrap().id(), 2);
}
#[test]
fn test_clear() {
let mut x = BTreeSet::new();
x.insert(1);
x.clear();
assert!(x.is_empty());
}
#[test]
fn test_remove() {
let mut x = BTreeSet::new();
assert!(x.is_empty());
x.insert(1);
x.insert(2);
x.insert(3);
x.insert(4);
assert_eq!(x.remove(&2), true);
assert_eq!(x.remove(&0), false);
assert_eq!(x.remove(&5), false);
assert_eq!(x.remove(&1), true);
assert_eq!(x.remove(&2), false);
assert_eq!(x.remove(&3), true);
assert_eq!(x.remove(&4), true);
assert_eq!(x.remove(&4), false);
assert!(x.is_empty());
}
#[test]
fn test_zip() {
let mut x = BTreeSet::new();
x.insert(5);
x.insert(12);
x.insert(11);
let mut y = BTreeSet::new();
y.insert("foo");
y.insert("bar");
let x = x;
let y = y;
let mut z = x.iter().zip(&y);
assert_eq!(z.next().unwrap(), (&5, &("bar")));
assert_eq!(z.next().unwrap(), (&11, &("foo")));
assert!(z.next().is_none());
}
#[test]
fn test_from_iter() {
let xs = [1, 2, 3, 4, 5, 6, 7, 8, 9];
let set = BTreeSet::from_iter(xs.iter());
for x in &xs {
assert!(set.contains(x));
}
}
#[test]
fn test_show() {
let mut set = BTreeSet::new();
let empty = BTreeSet::<i32>::new();
set.insert(1);
set.insert(2);
let set_str = format!("{set:?}");
assert_eq!(set_str, "{1, 2}");
assert_eq!(format!("{empty:?}"), "{}");
}
#[test]
fn test_extend_ref() {
let mut a = BTreeSet::new();
a.insert(1);
a.extend(&[2, 3, 4]);
assert_eq!(a.len(), 4);
assert!(a.contains(&1));
assert!(a.contains(&2));
assert!(a.contains(&3));
assert!(a.contains(&4));
let mut b = BTreeSet::new();
b.insert(5);
b.insert(6);
a.extend(&b);
assert_eq!(a.len(), 6);
assert!(a.contains(&1));
assert!(a.contains(&2));
assert!(a.contains(&3));
assert!(a.contains(&4));
assert!(a.contains(&5));
assert!(a.contains(&6));
}
#[test]
fn test_recovery() {
#[derive(Debug)]
struct Foo(&'static str, #[allow(dead_code)] i32);
impl PartialEq for Foo {
fn eq(&self, other: &Self) -> bool {
self.0 == other.0
}
}
impl Eq for Foo {}
impl PartialOrd for Foo {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.0.partial_cmp(&other.0)
}
}
impl Ord for Foo {
fn cmp(&self, other: &Self) -> Ordering {
self.0.cmp(&other.0)
}
}
let mut s = BTreeSet::new();
assert_eq!(s.replace(Foo("a", 1)), None);
assert_eq!(s.len(), 1);
assert_eq!(s.replace(Foo("a", 2)), Some(Foo("a", 1)));
assert_eq!(s.len(), 1);
{
let mut it = s.iter();
assert_eq!(it.next(), Some(&Foo("a", 2)));
assert_eq!(it.next(), None);
}
assert_eq!(s.get(&Foo("a", 1)), Some(&Foo("a", 2)));
assert_eq!(s.take(&Foo("a", 1)), Some(Foo("a", 2)));
assert_eq!(s.len(), 0);
assert_eq!(s.get(&Foo("a", 1)), None);
assert_eq!(s.take(&Foo("a", 1)), None);
assert_eq!(s.iter().next(), None);
}
#[allow(dead_code)]
fn assert_covariance() {
fn set<'new>(v: BTreeSet<&'static str>) -> BTreeSet<&'new str> {
v
}
fn iter<'a, 'new>(v: Iter<'a, &'static str>) -> Iter<'a, &'new str> {
v
}
fn into_iter<'new>(v: IntoIter<&'static str>) -> IntoIter<&'new str> {
v
}
fn range<'a, 'new>(v: Range<'a, &'static str>) -> Range<'a, &'new str> {
v
}
// not applied to Difference, Intersection, SymmetricDifference, Union
}
#[allow(dead_code)]
fn assert_sync() {
fn set<T: Sync>(v: &BTreeSet<T>) -> impl Sync + '_ {
v
}
fn iter<T: Sync>(v: &BTreeSet<T>) -> impl Sync + '_ {
v.iter()
}
fn into_iter<T: Sync>(v: BTreeSet<T>) -> impl Sync {
v.into_iter()
}
fn range<T: Sync + Ord>(v: &BTreeSet<T>) -> impl Sync + '_ {
v.range(..)
}
fn extract_if<T: Sync + Ord>(v: &mut BTreeSet<T>) -> impl Sync + '_ {
v.extract_if(|_| false)
}
fn difference<T: Sync + Ord>(v: &BTreeSet<T>) -> impl Sync + '_ {
v.difference(&v)
}
fn intersection<T: Sync + Ord>(v: &BTreeSet<T>) -> impl Sync + '_ {
v.intersection(&v)
}
fn symmetric_difference<T: Sync + Ord>(v: &BTreeSet<T>) -> impl Sync + '_ {
v.symmetric_difference(&v)
}
fn union<T: Sync + Ord>(v: &BTreeSet<T>) -> impl Sync + '_ {
v.union(&v)
}
}
#[allow(dead_code)]
fn assert_send() {
fn set<T: Send>(v: BTreeSet<T>) -> impl Send {
v
}
fn iter<T: Send + Sync>(v: &BTreeSet<T>) -> impl Send + '_ {
v.iter()
}
fn into_iter<T: Send>(v: BTreeSet<T>) -> impl Send {
v.into_iter()
}
fn range<T: Send + Sync + Ord>(v: &BTreeSet<T>) -> impl Send + '_ {
v.range(..)
}
fn extract_if<T: Send + Ord>(v: &mut BTreeSet<T>) -> impl Send + '_ {
v.extract_if(|_| false)
}
fn difference<T: Send + Sync + Ord>(v: &BTreeSet<T>) -> impl Send + '_ {
v.difference(&v)
}
fn intersection<T: Send + Sync + Ord>(v: &BTreeSet<T>) -> impl Send + '_ {
v.intersection(&v)
}
fn symmetric_difference<T: Send + Sync + Ord>(v: &BTreeSet<T>) -> impl Send + '_ {
v.symmetric_difference(&v)
}
fn union<T: Send + Sync + Ord>(v: &BTreeSet<T>) -> impl Send + '_ {
v.union(&v)
}
}
#[allow(dead_code)]
// Check that the member-like functions conditionally provided by #[derive()]
// are not overridden by genuine member functions with a different signature.
fn assert_derives() {
fn hash<T: Hash, H: Hasher>(v: BTreeSet<T>, state: &mut H) {
v.hash(state);
// Tested much more thoroughly outside the crate in btree_set_hash.rs
}
fn eq<T: PartialEq>(v: BTreeSet<T>) {
let _ = v.eq(&v);
}
fn ne<T: PartialEq>(v: BTreeSet<T>) {
let _ = v.ne(&v);
}
fn cmp<T: Ord>(v: BTreeSet<T>) {
let _ = v.cmp(&v);
}
fn min<T: Ord>(v: BTreeSet<T>, w: BTreeSet<T>) {
let _ = v.min(w);
}
fn max<T: Ord>(v: BTreeSet<T>, w: BTreeSet<T>) {
let _ = v.max(w);
}
fn clamp<T: Ord>(v: BTreeSet<T>, w: BTreeSet<T>, x: BTreeSet<T>) {
let _ = v.clamp(w, x);
}
fn partial_cmp<T: PartialOrd>(v: &BTreeSet<T>) {
let _ = v.partial_cmp(&v);
}
}
#[test]
fn test_ord_absence() {
fn set<K>(mut set: BTreeSet<K>) {
let _ = set.is_empty();
let _ = set.len();
set.clear();
let _ = set.iter();
let _ = set.into_iter();
}
fn set_debug<K: Debug>(set: BTreeSet<K>) {
format!("{set:?}");
format!("{:?}", set.iter());
format!("{:?}", set.into_iter());
}
fn set_clone<K: Clone>(mut set: BTreeSet<K>) {
set.clone_from(&set.clone());
}
#[derive(Debug, Clone)]
struct NonOrd;
set(BTreeSet::<NonOrd>::new());
set_debug(BTreeSet::<NonOrd>::new());
set_clone(BTreeSet::<NonOrd>::default());
}
#[test]
fn test_append() {
let mut a = BTreeSet::new();
a.insert(1);
a.insert(2);
a.insert(3);
let mut b = BTreeSet::new();
b.insert(3);
b.insert(4);
b.insert(5);
a.append(&mut b);
assert_eq!(a.len(), 5);
assert_eq!(b.len(), 0);
assert_eq!(a.contains(&1), true);
assert_eq!(a.contains(&2), true);
assert_eq!(a.contains(&3), true);
assert_eq!(a.contains(&4), true);
assert_eq!(a.contains(&5), true);
}
#[test]
fn test_first_last() {
let mut a = BTreeSet::new();
assert_eq!(a.first(), None);
assert_eq!(a.last(), None);
a.insert(1);
assert_eq!(a.first(), Some(&1));
assert_eq!(a.last(), Some(&1));
a.insert(2);
assert_eq!(a.first(), Some(&1));
assert_eq!(a.last(), Some(&2));
for i in 3..=12 {
a.insert(i);
}
assert_eq!(a.first(), Some(&1));
assert_eq!(a.last(), Some(&12));
assert_eq!(a.pop_first(), Some(1));
assert_eq!(a.pop_last(), Some(12));
assert_eq!(a.pop_first(), Some(2));
assert_eq!(a.pop_last(), Some(11));
assert_eq!(a.pop_first(), Some(3));
assert_eq!(a.pop_last(), Some(10));
assert_eq!(a.pop_first(), Some(4));
assert_eq!(a.pop_first(), Some(5));
assert_eq!(a.pop_first(), Some(6));
assert_eq!(a.pop_first(), Some(7));
assert_eq!(a.pop_first(), Some(8));
assert_eq!(a.clone().pop_last(), Some(9));
assert_eq!(a.pop_first(), Some(9));
assert_eq!(a.pop_first(), None);
assert_eq!(a.pop_last(), None);
}
// Unlike the function with the same name in map/tests, returns no values.
// Which also means it returns different predetermined pseudo-random keys,
// and the test cases using this function explore slightly different trees.
fn rand_data(len: usize) -> Vec<u32> {
let mut rng = DeterministicRng::new();
Vec::from_iter((0..len).map(|_| rng.next()))
}
#[test]
fn test_split_off_empty_right() {
let mut data = rand_data(173);
let mut set = BTreeSet::from_iter(data.clone());
let right = set.split_off(&(data.iter().max().unwrap() + 1));
data.sort();
assert!(set.into_iter().eq(data));
assert!(right.into_iter().eq(None));
}
#[test]
fn test_split_off_empty_left() {
let mut data = rand_data(314);
let mut set = BTreeSet::from_iter(data.clone());
let right = set.split_off(data.iter().min().unwrap());
data.sort();
assert!(set.into_iter().eq(None));
assert!(right.into_iter().eq(data));
}
#[test]
fn test_split_off_large_random_sorted() {
// Miri is too slow
let mut data = if cfg!(miri) { rand_data(529) } else { rand_data(1529) };
// special case with maximum height.
data.sort();
let mut set = BTreeSet::from_iter(data.clone());
let key = data[data.len() / 2];
let right = set.split_off(&key);
assert!(set.into_iter().eq(data.clone().into_iter().filter(|x| *x < key)));
assert!(right.into_iter().eq(data.into_iter().filter(|x| *x >= key)));
}
#[test]
fn from_array() {
let set = BTreeSet::from([1, 2, 3, 4]);
let unordered_duplicates = BTreeSet::from([4, 1, 4, 3, 2]);
assert_eq!(set, unordered_duplicates);
}
#[should_panic(expected = "range start is greater than range end in BTreeSet")]
#[test]
fn test_range_panic_1() {
let mut set = BTreeSet::new();
set.insert(3);
set.insert(5);
set.insert(8);
let _invalid_range = set.range((Included(&8), Included(&3)));
}
#[should_panic(expected = "range start and end are equal and excluded in BTreeSet")]
#[test]
fn test_range_panic_2() {
let mut set = BTreeSet::new();
set.insert(3);
set.insert(5);
set.insert(8);
let _invalid_range = set.range((Excluded(&5), Excluded(&5)));
}

View File

@ -0,0 +1,29 @@
/// Zero-Sized Type (ZST) for internal `BTreeSet` values.
/// Used instead of `()` to differentiate between:
/// * `BTreeMap<T, ()>` (possible user-defined map)
/// * `BTreeMap<T, SetValZST>` (internal set representation)
#[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Clone, Default)]
pub struct SetValZST;
/// A trait to differentiate between `BTreeMap` and `BTreeSet` values.
/// Returns `true` only for type `SetValZST`, `false` for all other types (blanket implementation).
/// `TypeId` requires a `'static` lifetime, use of this trait avoids that restriction.
///
/// [`TypeId`]: std::any::TypeId
pub trait IsSetVal {
fn is_set_val() -> bool;
}
// Blanket implementation
impl<V> IsSetVal for V {
default fn is_set_val() -> bool {
false
}
}
// Specialization
impl IsSetVal for SetValZST {
fn is_set_val() -> bool {
true
}
}

View File

@ -0,0 +1,73 @@
use super::node::{ForceResult::*, Root};
use super::search::SearchResult::*;
use core::alloc::Allocator;
use core::borrow::Borrow;
impl<K, V> Root<K, V> {
/// Calculates the length of both trees that result from splitting up
/// a given number of distinct key-value pairs.
pub fn calc_split_length(
total_num: usize,
root_a: &Root<K, V>,
root_b: &Root<K, V>,
) -> (usize, usize) {
let (length_a, length_b);
if root_a.height() < root_b.height() {
length_a = root_a.reborrow().calc_length();
length_b = total_num - length_a;
debug_assert_eq!(length_b, root_b.reborrow().calc_length());
} else {
length_b = root_b.reborrow().calc_length();
length_a = total_num - length_b;
debug_assert_eq!(length_a, root_a.reborrow().calc_length());
}
(length_a, length_b)
}
/// Split off a tree with key-value pairs at and after the given key.
/// The result is meaningful only if the tree is ordered by key,
/// and if the ordering of `Q` corresponds to that of `K`.
/// If `self` respects all `BTreeMap` tree invariants, then both
/// `self` and the returned tree will respect those invariants.
pub fn split_off<Q: ?Sized + Ord, A: Allocator + Clone>(&mut self, key: &Q, alloc: A) -> Self
where
K: Borrow<Q>,
{
let left_root = self;
let mut right_root = Root::new_pillar(left_root.height(), alloc.clone());
let mut left_node = left_root.borrow_mut();
let mut right_node = right_root.borrow_mut();
loop {
let mut split_edge = match left_node.search_node(key) {
// key is going to the right tree
Found(kv) => kv.left_edge(),
GoDown(edge) => edge,
};
split_edge.move_suffix(&mut right_node);
match (split_edge.force(), right_node.force()) {
(Internal(edge), Internal(node)) => {
left_node = edge.descend();
right_node = node.first_edge().descend();
}
(Leaf(_), Leaf(_)) => break,
_ => unreachable!(),
}
}
left_root.fix_right_border(alloc.clone());
right_root.fix_left_border(alloc);
right_root
}
/// Creates a tree consisting of empty nodes.
fn new_pillar<A: Allocator + Clone>(height: usize, alloc: A) -> Self {
let mut root = Root::new(alloc.clone());
for _ in 0..height {
root.push_internal_level(alloc.clone());
}
root
}
}

View File

@ -0,0 +1,158 @@
//! Collection types.
#![stable(feature = "rust1", since = "1.0.0")]
#[cfg(not(no_global_oom_handling))]
pub mod binary_heap;
#[cfg(not(no_global_oom_handling))]
mod btree;
#[cfg(not(no_global_oom_handling))]
pub mod linked_list;
#[cfg(not(no_global_oom_handling))]
pub mod vec_deque;
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
pub mod btree_map {
//! An ordered map based on a B-Tree.
#[stable(feature = "rust1", since = "1.0.0")]
pub use super::btree::map::*;
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
pub mod btree_set {
//! An ordered set based on a B-Tree.
#[stable(feature = "rust1", since = "1.0.0")]
pub use super::btree::set::*;
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
#[doc(no_inline)]
pub use binary_heap::BinaryHeap;
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
#[doc(no_inline)]
pub use btree_map::BTreeMap;
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
#[doc(no_inline)]
pub use btree_set::BTreeSet;
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
#[doc(no_inline)]
pub use linked_list::LinkedList;
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
#[doc(no_inline)]
pub use vec_deque::VecDeque;
use crate::alloc::{Layout, LayoutError};
use core::fmt::Display;
/// The error type for `try_reserve` methods.
#[derive(Clone, PartialEq, Eq, Debug)]
#[stable(feature = "try_reserve", since = "1.57.0")]
pub struct TryReserveError {
kind: TryReserveErrorKind,
}
impl TryReserveError {
/// Details about the allocation that caused the error
#[inline]
#[must_use]
#[unstable(
feature = "try_reserve_kind",
reason = "Uncertain how much info should be exposed",
issue = "48043"
)]
pub fn kind(&self) -> TryReserveErrorKind {
self.kind.clone()
}
}
/// Details of the allocation that caused a `TryReserveError`
#[derive(Clone, PartialEq, Eq, Debug)]
#[unstable(
feature = "try_reserve_kind",
reason = "Uncertain how much info should be exposed",
issue = "48043"
)]
pub enum TryReserveErrorKind {
/// Error due to the computed capacity exceeding the collection's maximum
/// (usually `isize::MAX` bytes).
CapacityOverflow,
/// The memory allocator returned an error
AllocError {
/// The layout of allocation request that failed
layout: Layout,
#[doc(hidden)]
#[unstable(
feature = "container_error_extra",
issue = "none",
reason = "\
Enable exposing the allocators custom error value \
if an associated type is added in the future: \
https://github.com/rust-lang/wg-allocators/issues/23"
)]
non_exhaustive: (),
},
}
#[unstable(
feature = "try_reserve_kind",
reason = "Uncertain how much info should be exposed",
issue = "48043"
)]
impl From<TryReserveErrorKind> for TryReserveError {
#[inline]
fn from(kind: TryReserveErrorKind) -> Self {
Self { kind }
}
}
#[unstable(feature = "try_reserve_kind", reason = "new API", issue = "48043")]
impl From<LayoutError> for TryReserveErrorKind {
/// Always evaluates to [`TryReserveErrorKind::CapacityOverflow`].
#[inline]
fn from(_: LayoutError) -> Self {
TryReserveErrorKind::CapacityOverflow
}
}
#[stable(feature = "try_reserve", since = "1.57.0")]
impl Display for TryReserveError {
fn fmt(
&self,
fmt: &mut core::fmt::Formatter<'_>,
) -> core::result::Result<(), core::fmt::Error> {
fmt.write_str("memory allocation failed")?;
let reason = match self.kind {
TryReserveErrorKind::CapacityOverflow => {
" because the computed capacity exceeded the collection's maximum"
}
TryReserveErrorKind::AllocError { .. } => {
" because the memory allocator returned an error"
}
};
fmt.write_str(reason)
}
}
/// An intermediate trait for specialization of `Extend`.
#[doc(hidden)]
#[cfg(not(no_global_oom_handling))]
trait SpecExtend<I: IntoIterator> {
/// Extends `self` with the contents of the given iterator.
fn spec_extend(&mut self, iter: I);
}
#[stable(feature = "try_reserve", since = "1.57.0")]
impl core::error::Error for TryReserveError {}

View File

@ -0,0 +1,275 @@
use core::iter::FusedIterator;
use core::marker::PhantomData;
use core::mem::{self, SizedTypeProperties};
use core::ptr::NonNull;
use core::{fmt, ptr};
use crate::alloc::{Allocator, Global};
use super::VecDeque;
/// A draining iterator over the elements of a `VecDeque`.
///
/// This `struct` is created by the [`drain`] method on [`VecDeque`]. See its
/// documentation for more.
///
/// [`drain`]: VecDeque::drain
#[stable(feature = "drain", since = "1.6.0")]
pub struct Drain<
'a,
T: 'a,
#[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
> {
// We can't just use a &mut VecDeque<T, A>, as that would make Drain invariant over T
// and we want it to be covariant instead
deque: NonNull<VecDeque<T, A>>,
// drain_start is stored in deque.len
drain_len: usize,
// index into the logical array, not the physical one (always lies in [0..deque.len))
idx: usize,
// number of elements remaining after dropping the drain
new_len: usize,
remaining: usize,
// Needed to make Drain covariant over T
_marker: PhantomData<&'a T>,
}
impl<'a, T, A: Allocator> Drain<'a, T, A> {
pub(super) unsafe fn new(
deque: &'a mut VecDeque<T, A>,
drain_start: usize,
drain_len: usize,
) -> Self {
let orig_len = mem::replace(&mut deque.len, drain_start);
let new_len = orig_len - drain_len;
Drain {
deque: NonNull::from(deque),
drain_len,
idx: drain_start,
new_len,
remaining: drain_len,
_marker: PhantomData,
}
}
// Only returns pointers to the slices, as that's all we need
// to drop them. May only be called if `self.remaining != 0`.
unsafe fn as_slices(&self) -> (*mut [T], *mut [T]) {
unsafe {
let deque = self.deque.as_ref();
// We know that `self.idx + self.remaining <= deque.len <= usize::MAX`, so this won't overflow.
let logical_remaining_range = self.idx..self.idx + self.remaining;
// SAFETY: `logical_remaining_range` represents the
// range into the logical buffer of elements that
// haven't been drained yet, so they're all initialized,
// and `slice::range(start..end, end) == start..end`,
// so the preconditions for `slice_ranges` are met.
let (a_range, b_range) =
deque.slice_ranges(logical_remaining_range.clone(), logical_remaining_range.end);
(deque.buffer_range(a_range), deque.buffer_range(b_range))
}
}
}
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<T: fmt::Debug, A: Allocator> fmt::Debug for Drain<'_, T, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("Drain")
.field(&self.drain_len)
.field(&self.idx)
.field(&self.new_len)
.field(&self.remaining)
.finish()
}
}
#[stable(feature = "drain", since = "1.6.0")]
unsafe impl<T: Sync, A: Allocator + Sync> Sync for Drain<'_, T, A> {}
#[stable(feature = "drain", since = "1.6.0")]
unsafe impl<T: Send, A: Allocator + Send> Send for Drain<'_, T, A> {}
#[stable(feature = "drain", since = "1.6.0")]
impl<T, A: Allocator> Drop for Drain<'_, T, A> {
fn drop(&mut self) {
struct DropGuard<'r, 'a, T, A: Allocator>(&'r mut Drain<'a, T, A>);
let guard = DropGuard(self);
if mem::needs_drop::<T>() && guard.0.remaining != 0 {
unsafe {
// SAFETY: We just checked that `self.remaining != 0`.
let (front, back) = guard.0.as_slices();
// since idx is a logical index, we don't need to worry about wrapping.
guard.0.idx += front.len();
guard.0.remaining -= front.len();
ptr::drop_in_place(front);
guard.0.remaining = 0;
ptr::drop_in_place(back);
}
}
// Dropping `guard` handles moving the remaining elements into place.
impl<'r, 'a, T, A: Allocator> Drop for DropGuard<'r, 'a, T, A> {
#[inline]
fn drop(&mut self) {
if mem::needs_drop::<T>() && self.0.remaining != 0 {
unsafe {
// SAFETY: We just checked that `self.remaining != 0`.
let (front, back) = self.0.as_slices();
ptr::drop_in_place(front);
ptr::drop_in_place(back);
}
}
let source_deque = unsafe { self.0.deque.as_mut() };
let drain_len = self.0.drain_len;
let new_len = self.0.new_len;
if T::IS_ZST {
// no need to copy around any memory if T is a ZST
source_deque.len = new_len;
return;
}
let head_len = source_deque.len; // #elements in front of the drain
let tail_len = new_len - head_len; // #elements behind the drain
// Next, we will fill the hole left by the drain with as few writes as possible.
// The code below handles the following control flow and reduces the amount of
// branches under the assumption that `head_len == 0 || tail_len == 0`, i.e.
// draining at the front or at the back of the dequeue is especially common.
//
// H = "head index" = `deque.head`
// h = elements in front of the drain
// d = elements in the drain
// t = elements behind the drain
//
// Note that the buffer may wrap at any point and the wrapping is handled by
// `wrap_copy` and `to_physical_idx`.
//
// Case 1: if `head_len == 0 && tail_len == 0`
// Everything was drained, reset the head index back to 0.
// H
// [ . . . . . d d d d . . . . . ]
// H
// [ . . . . . . . . . . . . . . ]
//
// Case 2: else if `tail_len == 0`
// Don't move data or the head index.
// H
// [ . . . h h h h d d d d . . . ]
// H
// [ . . . h h h h . . . . . . . ]
//
// Case 3: else if `head_len == 0`
// Don't move data, but move the head index.
// H
// [ . . . d d d d t t t t . . . ]
// H
// [ . . . . . . . t t t t . . . ]
//
// Case 4: else if `tail_len <= head_len`
// Move data, but not the head index.
// H
// [ . . h h h h d d d d t t . . ]
// H
// [ . . h h h h t t . . . . . . ]
//
// Case 5: else
// Move data and the head index.
// H
// [ . . h h d d d d t t t t . . ]
// H
// [ . . . . . . h h t t t t . . ]
// When draining at the front (`.drain(..n)`) or at the back (`.drain(n..)`),
// we don't need to copy any data. The number of elements copied would be 0.
if head_len != 0 && tail_len != 0 {
join_head_and_tail_wrapping(source_deque, drain_len, head_len, tail_len);
// Marking this function as cold helps LLVM to eliminate it entirely if
// this branch is never taken.
// We use `#[cold]` instead of `#[inline(never)]`, because inlining this
// function into the general case (`.drain(n..m)`) is fine.
// See `tests/codegen/vecdeque-drain.rs` for a test.
#[cold]
fn join_head_and_tail_wrapping<T, A: Allocator>(
source_deque: &mut VecDeque<T, A>,
drain_len: usize,
head_len: usize,
tail_len: usize,
) {
// Pick whether to move the head or the tail here.
let (src, dst, len);
if head_len < tail_len {
src = source_deque.head;
dst = source_deque.to_physical_idx(drain_len);
len = head_len;
} else {
src = source_deque.to_physical_idx(head_len + drain_len);
dst = source_deque.to_physical_idx(head_len);
len = tail_len;
};
unsafe {
source_deque.wrap_copy(src, dst, len);
}
}
}
if new_len == 0 {
// Special case: If the entire dequeue was drained, reset the head back to 0,
// like `.clear()` does.
source_deque.head = 0;
} else if head_len < tail_len {
// If we moved the head above, then we need to adjust the head index here.
source_deque.head = source_deque.to_physical_idx(drain_len);
}
source_deque.len = new_len;
}
}
}
}
#[stable(feature = "drain", since = "1.6.0")]
impl<T, A: Allocator> Iterator for Drain<'_, T, A> {
type Item = T;
#[inline]
fn next(&mut self) -> Option<T> {
if self.remaining == 0 {
return None;
}
let wrapped_idx = unsafe { self.deque.as_ref().to_physical_idx(self.idx) };
self.idx += 1;
self.remaining -= 1;
Some(unsafe { self.deque.as_mut().buffer_read(wrapped_idx) })
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.remaining;
(len, Some(len))
}
}
#[stable(feature = "drain", since = "1.6.0")]
impl<T, A: Allocator> DoubleEndedIterator for Drain<'_, T, A> {
#[inline]
fn next_back(&mut self) -> Option<T> {
if self.remaining == 0 {
return None;
}
self.remaining -= 1;
let wrapped_idx = unsafe { self.deque.as_ref().to_physical_idx(self.idx + self.remaining) };
Some(unsafe { self.deque.as_mut().buffer_read(wrapped_idx) })
}
}
#[stable(feature = "drain", since = "1.6.0")]
impl<T, A: Allocator> ExactSizeIterator for Drain<'_, T, A> {}
#[stable(feature = "fused", since = "1.26.0")]
impl<T, A: Allocator> FusedIterator for Drain<'_, T, A> {}

View File

@ -0,0 +1,261 @@
use core::iter::{FusedIterator, TrustedLen};
use core::num::NonZero;
use core::{array, fmt, mem::MaybeUninit, ops::Try, ptr};
use crate::alloc::{Allocator, Global};
use super::VecDeque;
/// An owning iterator over the elements of a `VecDeque`.
///
/// This `struct` is created by the [`into_iter`] method on [`VecDeque`]
/// (provided by the [`IntoIterator`] trait). See its documentation for more.
///
/// [`into_iter`]: VecDeque::into_iter
#[derive(Clone)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IntoIter<
T,
#[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
> {
inner: VecDeque<T, A>,
}
impl<T, A: Allocator> IntoIter<T, A> {
pub(super) fn new(inner: VecDeque<T, A>) -> Self {
IntoIter { inner }
}
pub(super) fn into_vecdeque(self) -> VecDeque<T, A> {
self.inner
}
}
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<T: fmt::Debug, A: Allocator> fmt::Debug for IntoIter<T, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("IntoIter").field(&self.inner).finish()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, A: Allocator> Iterator for IntoIter<T, A> {
type Item = T;
#[inline]
fn next(&mut self) -> Option<T> {
self.inner.pop_front()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.inner.len();
(len, Some(len))
}
#[inline]
fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
let len = self.inner.len;
let rem = if len < n {
self.inner.clear();
n - len
} else {
self.inner.drain(..n);
0
};
NonZero::new(rem).map_or(Ok(()), Err)
}
#[inline]
fn count(self) -> usize {
self.inner.len
}
fn try_fold<B, F, R>(&mut self, mut init: B, mut f: F) -> R
where
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
struct Guard<'a, T, A: Allocator> {
deque: &'a mut VecDeque<T, A>,
// `consumed <= deque.len` always holds.
consumed: usize,
}
impl<'a, T, A: Allocator> Drop for Guard<'a, T, A> {
fn drop(&mut self) {
self.deque.len -= self.consumed;
self.deque.head = self.deque.to_physical_idx(self.consumed);
}
}
let mut guard = Guard { deque: &mut self.inner, consumed: 0 };
let (head, tail) = guard.deque.as_slices();
init = head
.iter()
.map(|elem| {
guard.consumed += 1;
// SAFETY: Because we incremented `guard.consumed`, the
// deque effectively forgot the element, so we can take
// ownership
unsafe { ptr::read(elem) }
})
.try_fold(init, &mut f)?;
tail.iter()
.map(|elem| {
guard.consumed += 1;
// SAFETY: Same as above.
unsafe { ptr::read(elem) }
})
.try_fold(init, &mut f)
}
#[inline]
fn fold<B, F>(mut self, init: B, mut f: F) -> B
where
F: FnMut(B, Self::Item) -> B,
{
match self.try_fold(init, |b, item| Ok::<B, !>(f(b, item))) {
Ok(b) => b,
Err(e) => match e {},
}
}
#[inline]
fn last(mut self) -> Option<Self::Item> {
self.inner.pop_back()
}
fn next_chunk<const N: usize>(
&mut self,
) -> Result<[Self::Item; N], array::IntoIter<Self::Item, N>> {
let mut raw_arr = MaybeUninit::uninit_array();
let raw_arr_ptr = raw_arr.as_mut_ptr().cast();
let (head, tail) = self.inner.as_slices();
if head.len() >= N {
// SAFETY: By manually adjusting the head and length of the deque, we effectively
// make it forget the first `N` elements, so taking ownership of them is safe.
unsafe { ptr::copy_nonoverlapping(head.as_ptr(), raw_arr_ptr, N) };
self.inner.head = self.inner.to_physical_idx(N);
self.inner.len -= N;
// SAFETY: We initialized the entire array with items from `head`
return Ok(unsafe { raw_arr.transpose().assume_init() });
}
// SAFETY: Same argument as above.
unsafe { ptr::copy_nonoverlapping(head.as_ptr(), raw_arr_ptr, head.len()) };
let remaining = N - head.len();
if tail.len() >= remaining {
// SAFETY: Same argument as above.
unsafe {
ptr::copy_nonoverlapping(tail.as_ptr(), raw_arr_ptr.add(head.len()), remaining)
};
self.inner.head = self.inner.to_physical_idx(N);
self.inner.len -= N;
// SAFETY: We initialized the entire array with items from `head` and `tail`
Ok(unsafe { raw_arr.transpose().assume_init() })
} else {
// SAFETY: Same argument as above.
unsafe {
ptr::copy_nonoverlapping(tail.as_ptr(), raw_arr_ptr.add(head.len()), tail.len())
};
let init = head.len() + tail.len();
// We completely drained all the deques elements.
self.inner.head = 0;
self.inner.len = 0;
// SAFETY: We copied all elements from both slices to the beginning of the array, so
// the given range is initialized.
Err(unsafe { array::IntoIter::new_unchecked(raw_arr, 0..init) })
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
#[inline]
fn next_back(&mut self) -> Option<T> {
self.inner.pop_back()
}
#[inline]
fn advance_back_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
let len = self.inner.len;
let rem = if len < n {
self.inner.clear();
n - len
} else {
self.inner.truncate(len - n);
0
};
NonZero::new(rem).map_or(Ok(()), Err)
}
fn try_rfold<B, F, R>(&mut self, mut init: B, mut f: F) -> R
where
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
struct Guard<'a, T, A: Allocator> {
deque: &'a mut VecDeque<T, A>,
// `consumed <= deque.len` always holds.
consumed: usize,
}
impl<'a, T, A: Allocator> Drop for Guard<'a, T, A> {
fn drop(&mut self) {
self.deque.len -= self.consumed;
}
}
let mut guard = Guard { deque: &mut self.inner, consumed: 0 };
let (head, tail) = guard.deque.as_slices();
init = tail
.iter()
.map(|elem| {
guard.consumed += 1;
// SAFETY: See `try_fold`'s safety comment.
unsafe { ptr::read(elem) }
})
.try_rfold(init, &mut f)?;
head.iter()
.map(|elem| {
guard.consumed += 1;
// SAFETY: Same as above.
unsafe { ptr::read(elem) }
})
.try_rfold(init, &mut f)
}
#[inline]
fn rfold<B, F>(mut self, init: B, mut f: F) -> B
where
F: FnMut(B, Self::Item) -> B,
{
match self.try_rfold(init, |b, item| Ok::<B, !>(f(b, item))) {
Ok(b) => b,
Err(e) => match e {},
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, A: Allocator> ExactSizeIterator for IntoIter<T, A> {
#[inline]
fn is_empty(&self) -> bool {
self.inner.is_empty()
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<T, A: Allocator> FusedIterator for IntoIter<T, A> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T, A: Allocator> TrustedLen for IntoIter<T, A> {}

View File

@ -0,0 +1,184 @@
use core::iter::{FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce};
use core::num::NonZero;
use core::ops::Try;
use core::{fmt, mem, slice};
/// An iterator over the elements of a `VecDeque`.
///
/// This `struct` is created by the [`iter`] method on [`super::VecDeque`]. See its
/// documentation for more.
///
/// [`iter`]: super::VecDeque::iter
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Iter<'a, T: 'a> {
i1: slice::Iter<'a, T>,
i2: slice::Iter<'a, T>,
}
impl<'a, T> Iter<'a, T> {
pub(super) fn new(i1: slice::Iter<'a, T>, i2: slice::Iter<'a, T>) -> Self {
Self { i1, i2 }
}
}
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("Iter").field(&self.i1.as_slice()).field(&self.i2.as_slice()).finish()
}
}
// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Clone for Iter<'_, T> {
fn clone(&self) -> Self {
Iter { i1: self.i1.clone(), i2: self.i2.clone() }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Iterator for Iter<'a, T> {
type Item = &'a T;
#[inline]
fn next(&mut self) -> Option<&'a T> {
match self.i1.next() {
Some(val) => Some(val),
None => {
// most of the time, the iterator will either always
// call next(), or always call next_back(). By swapping
// the iterators once the first one is empty, we ensure
// that the first branch is taken as often as possible,
// without sacrificing correctness, as i1 is empty anyways
mem::swap(&mut self.i1, &mut self.i2);
self.i1.next()
}
}
}
fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
let remaining = self.i1.advance_by(n);
match remaining {
Ok(()) => return Ok(()),
Err(n) => {
mem::swap(&mut self.i1, &mut self.i2);
self.i1.advance_by(n.get())
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
fn fold<Acc, F>(self, accum: Acc, mut f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
let accum = self.i1.fold(accum, &mut f);
self.i2.fold(accum, &mut f)
}
fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
where
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
let acc = self.i1.try_fold(init, &mut f)?;
self.i2.try_fold(acc, &mut f)
}
#[inline]
fn last(mut self) -> Option<&'a T> {
self.next_back()
}
#[inline]
unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
// Safety: The TrustedRandomAccess contract requires that callers only pass an index
// that is in bounds.
unsafe {
let i1_len = self.i1.len();
if idx < i1_len {
self.i1.__iterator_get_unchecked(idx)
} else {
self.i2.__iterator_get_unchecked(idx - i1_len)
}
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a T> {
match self.i2.next_back() {
Some(val) => Some(val),
None => {
// most of the time, the iterator will either always
// call next(), or always call next_back(). By swapping
// the iterators once the second one is empty, we ensure
// that the first branch is taken as often as possible,
// without sacrificing correctness, as i2 is empty anyways
mem::swap(&mut self.i1, &mut self.i2);
self.i2.next_back()
}
}
}
fn advance_back_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
match self.i2.advance_back_by(n) {
Ok(()) => return Ok(()),
Err(n) => {
mem::swap(&mut self.i1, &mut self.i2);
self.i2.advance_back_by(n.get())
}
}
}
fn rfold<Acc, F>(self, accum: Acc, mut f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
let accum = self.i2.rfold(accum, &mut f);
self.i1.rfold(accum, &mut f)
}
fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
where
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
let acc = self.i2.try_rfold(init, &mut f)?;
self.i1.try_rfold(acc, &mut f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ExactSizeIterator for Iter<'_, T> {
fn len(&self) -> usize {
self.i1.len() + self.i2.len()
}
fn is_empty(&self) -> bool {
self.i1.is_empty() && self.i2.is_empty()
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<T> FusedIterator for Iter<'_, T> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T> TrustedLen for Iter<'_, T> {}
#[doc(hidden)]
#[unstable(feature = "trusted_random_access", issue = "none")]
unsafe impl<T> TrustedRandomAccess for Iter<'_, T> {}
#[doc(hidden)]
#[unstable(feature = "trusted_random_access", issue = "none")]
unsafe impl<T> TrustedRandomAccessNoCoerce for Iter<'_, T> {
const MAY_HAVE_SIDE_EFFECT: bool = false;
}

View File

@ -0,0 +1,175 @@
use core::iter::{FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce};
use core::num::NonZero;
use core::ops::Try;
use core::{fmt, mem, slice};
/// A mutable iterator over the elements of a `VecDeque`.
///
/// This `struct` is created by the [`iter_mut`] method on [`super::VecDeque`]. See its
/// documentation for more.
///
/// [`iter_mut`]: super::VecDeque::iter_mut
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IterMut<'a, T: 'a> {
i1: slice::IterMut<'a, T>,
i2: slice::IterMut<'a, T>,
}
impl<'a, T> IterMut<'a, T> {
pub(super) fn new(i1: slice::IterMut<'a, T>, i2: slice::IterMut<'a, T>) -> Self {
Self { i1, i2 }
}
}
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<T: fmt::Debug> fmt::Debug for IterMut<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("IterMut").field(&self.i1.as_slice()).field(&self.i2.as_slice()).finish()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Iterator for IterMut<'a, T> {
type Item = &'a mut T;
#[inline]
fn next(&mut self) -> Option<&'a mut T> {
match self.i1.next() {
Some(val) => Some(val),
None => {
// most of the time, the iterator will either always
// call next(), or always call next_back(). By swapping
// the iterators once the first one is empty, we ensure
// that the first branch is taken as often as possible,
// without sacrificing correctness, as i1 is empty anyways
mem::swap(&mut self.i1, &mut self.i2);
self.i1.next()
}
}
}
fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
match self.i1.advance_by(n) {
Ok(()) => return Ok(()),
Err(remaining) => {
mem::swap(&mut self.i1, &mut self.i2);
self.i1.advance_by(remaining.get())
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
fn fold<Acc, F>(self, accum: Acc, mut f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
let accum = self.i1.fold(accum, &mut f);
self.i2.fold(accum, &mut f)
}
fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
where
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
let acc = self.i1.try_fold(init, &mut f)?;
self.i2.try_fold(acc, &mut f)
}
#[inline]
fn last(mut self) -> Option<&'a mut T> {
self.next_back()
}
#[inline]
unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
// Safety: The TrustedRandomAccess contract requires that callers only pass an index
// that is in bounds.
unsafe {
let i1_len = self.i1.len();
if idx < i1_len {
self.i1.__iterator_get_unchecked(idx)
} else {
self.i2.__iterator_get_unchecked(idx - i1_len)
}
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> DoubleEndedIterator for IterMut<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a mut T> {
match self.i2.next_back() {
Some(val) => Some(val),
None => {
// most of the time, the iterator will either always
// call next(), or always call next_back(). By swapping
// the iterators once the first one is empty, we ensure
// that the first branch is taken as often as possible,
// without sacrificing correctness, as i2 is empty anyways
mem::swap(&mut self.i1, &mut self.i2);
self.i2.next_back()
}
}
}
fn advance_back_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
match self.i2.advance_back_by(n) {
Ok(()) => return Ok(()),
Err(remaining) => {
mem::swap(&mut self.i1, &mut self.i2);
self.i2.advance_back_by(remaining.get())
}
}
}
fn rfold<Acc, F>(self, accum: Acc, mut f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
let accum = self.i2.rfold(accum, &mut f);
self.i1.rfold(accum, &mut f)
}
fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
where
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
let acc = self.i2.try_rfold(init, &mut f)?;
self.i1.try_rfold(acc, &mut f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ExactSizeIterator for IterMut<'_, T> {
fn len(&self) -> usize {
self.i1.len() + self.i2.len()
}
fn is_empty(&self) -> bool {
self.i1.is_empty() && self.i2.is_empty()
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<T> FusedIterator for IterMut<'_, T> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T> TrustedLen for IterMut<'_, T> {}
#[doc(hidden)]
#[unstable(feature = "trusted_random_access", issue = "none")]
unsafe impl<T> TrustedRandomAccess for IterMut<'_, T> {}
#[doc(hidden)]
#[unstable(feature = "trusted_random_access", issue = "none")]
unsafe impl<T> TrustedRandomAccessNoCoerce for IterMut<'_, T> {
const MAY_HAVE_SIDE_EFFECT: bool = false;
}

View File

@ -0,0 +1,19 @@
macro_rules! __impl_slice_eq1 {
([$($vars:tt)*] $lhs:ty, $rhs:ty, $($constraints:tt)*) => {
#[stable(feature = "vec_deque_partial_eq_slice", since = "1.17.0")]
impl<T, U, A: Allocator, $($vars)*> PartialEq<$rhs> for $lhs
where
T: PartialEq<U>,
$($constraints)*
{
fn eq(&self, other: &$rhs) -> bool {
if self.len() != other.len() {
return false;
}
let (sa, sb) = self.as_slices();
let (oa, ob) = other[..].split_at(sa.len());
sa == oa && sb == ob
}
}
}
}

View File

@ -0,0 +1,123 @@
use crate::alloc::Allocator;
use crate::vec;
use core::iter::TrustedLen;
use core::slice;
use super::VecDeque;
// Specialization trait used for VecDeque::extend
pub(super) trait SpecExtend<T, I> {
fn spec_extend(&mut self, iter: I);
}
impl<T, I, A: Allocator> SpecExtend<T, I> for VecDeque<T, A>
where
I: Iterator<Item = T>,
{
default fn spec_extend(&mut self, mut iter: I) {
// This function should be the moral equivalent of:
//
// for item in iter {
// self.push_back(item);
// }
// May only be called if `deque.len() < deque.capacity()`
unsafe fn push_unchecked<T, A: Allocator>(deque: &mut VecDeque<T, A>, element: T) {
// SAFETY: Because of the precondition, it's guaranteed that there is space
// in the logical array after the last element.
unsafe { deque.buffer_write(deque.to_physical_idx(deque.len), element) };
// This can't overflow because `deque.len() < deque.capacity() <= usize::MAX`.
deque.len += 1;
}
while let Some(element) = iter.next() {
let (lower, _) = iter.size_hint();
self.reserve(lower.saturating_add(1));
// SAFETY: We just reserved space for at least one element.
unsafe { push_unchecked(self, element) };
// Inner loop to avoid repeatedly calling `reserve`.
while self.len < self.capacity() {
let Some(element) = iter.next() else {
return;
};
// SAFETY: The loop condition guarantees that `self.len() < self.capacity()`.
unsafe { push_unchecked(self, element) };
}
}
}
}
impl<T, I, A: Allocator> SpecExtend<T, I> for VecDeque<T, A>
where
I: TrustedLen<Item = T>,
{
default fn spec_extend(&mut self, iter: I) {
// This is the case for a TrustedLen iterator.
let (low, high) = iter.size_hint();
if let Some(additional) = high {
debug_assert_eq!(
low,
additional,
"TrustedLen iterator's size hint is not exact: {:?}",
(low, high)
);
self.reserve(additional);
let written = unsafe {
self.write_iter_wrapping(self.to_physical_idx(self.len), iter, additional)
};
debug_assert_eq!(
additional, written,
"The number of items written to VecDeque doesn't match the TrustedLen size hint"
);
} else {
// Per TrustedLen contract a `None` upper bound means that the iterator length
// truly exceeds usize::MAX, which would eventually lead to a capacity overflow anyway.
// Since the other branch already panics eagerly (via `reserve()`) we do the same here.
// This avoids additional codegen for a fallback code path which would eventually
// panic anyway.
panic!("capacity overflow");
}
}
}
impl<T, A: Allocator> SpecExtend<T, vec::IntoIter<T>> for VecDeque<T, A> {
fn spec_extend(&mut self, mut iterator: vec::IntoIter<T>) {
let slice = iterator.as_slice();
self.reserve(slice.len());
unsafe {
self.copy_slice(self.to_physical_idx(self.len), slice);
self.len += slice.len();
}
iterator.forget_remaining_elements();
}
}
impl<'a, T: 'a, I, A: Allocator> SpecExtend<&'a T, I> for VecDeque<T, A>
where
I: Iterator<Item = &'a T>,
T: Copy,
{
default fn spec_extend(&mut self, iterator: I) {
self.spec_extend(iterator.copied())
}
}
impl<'a, T: 'a, A: Allocator> SpecExtend<&'a T, slice::Iter<'a, T>> for VecDeque<T, A>
where
T: Copy,
{
fn spec_extend(&mut self, iterator: slice::Iter<'a, T>) {
let slice = iterator.as_slice();
self.reserve(slice.len());
unsafe {
self.copy_slice(self.to_physical_idx(self.len), slice);
self.len += slice.len();
}
}
}

View File

@ -0,0 +1,33 @@
use super::{IntoIter, VecDeque};
/// Specialization trait used for `VecDeque::from_iter`
pub(super) trait SpecFromIter<T, I> {
fn spec_from_iter(iter: I) -> Self;
}
impl<T, I> SpecFromIter<T, I> for VecDeque<T>
where
I: Iterator<Item = T>,
{
default fn spec_from_iter(iterator: I) -> Self {
// Since converting is O(1) now, just re-use the `Vec` logic for
// anything where we can't do something extra-special for `VecDeque`,
// especially as that could save us some monomorphization work
// if one uses the same iterators (like slice ones) with both.
crate::vec::Vec::from_iter(iterator).into()
}
}
impl<T> SpecFromIter<T, crate::vec::IntoIter<T>> for VecDeque<T> {
#[inline]
fn spec_from_iter(iterator: crate::vec::IntoIter<T>) -> Self {
iterator.into_vecdeque()
}
}
impl<T> SpecFromIter<T, IntoIter<T>> for VecDeque<T> {
#[inline]
fn spec_from_iter(iterator: IntoIter<T>) -> Self {
iterator.into_vecdeque()
}
}

View File

@ -0,0 +1,226 @@
use super::*;
use core::assert_matches::assert_matches;
use core::ffi::FromBytesUntilNulError;
use core::hash::{Hash, Hasher};
#[allow(deprecated)]
use core::hash::SipHasher13 as DefaultHasher;
#[test]
fn c_to_rust() {
let data = b"123\0";
let ptr = data.as_ptr() as *const c_char;
unsafe {
assert_eq!(CStr::from_ptr(ptr).to_bytes(), b"123");
assert_eq!(CStr::from_ptr(ptr).to_bytes_with_nul(), b"123\0");
}
}
#[test]
fn simple() {
let s = CString::new("1234").unwrap();
assert_eq!(s.as_bytes(), b"1234");
assert_eq!(s.as_bytes_with_nul(), b"1234\0");
}
#[test]
fn build_with_zero1() {
assert!(CString::new(&b"\0"[..]).is_err());
}
#[test]
fn build_with_zero2() {
assert!(CString::new(vec![0]).is_err());
}
#[test]
fn formatted() {
let s = CString::new(&b"abc\x01\x02\n\xE2\x80\xA6\xFF"[..]).unwrap();
assert_eq!(format!("{s:?}"), r#""abc\x01\x02\n\xe2\x80\xa6\xff""#);
}
#[test]
fn borrowed() {
unsafe {
let s = CStr::from_ptr(b"12\0".as_ptr() as *const _);
assert_eq!(s.to_bytes(), b"12");
assert_eq!(s.to_bytes_with_nul(), b"12\0");
}
}
#[test]
fn to_owned() {
let data = b"123\0";
let ptr = data.as_ptr() as *const c_char;
let owned = unsafe { CStr::from_ptr(ptr).to_owned() };
assert_eq!(owned.as_bytes_with_nul(), data);
}
#[test]
fn equal_hash() {
let data = b"123\xE2\xFA\xA6\0";
let ptr = data.as_ptr() as *const c_char;
let cstr: &'static CStr = unsafe { CStr::from_ptr(ptr) };
#[allow(deprecated)]
let mut s = DefaultHasher::new();
cstr.hash(&mut s);
let cstr_hash = s.finish();
#[allow(deprecated)]
let mut s = DefaultHasher::new();
CString::new(&data[..data.len() - 1]).unwrap().hash(&mut s);
let cstring_hash = s.finish();
assert_eq!(cstr_hash, cstring_hash);
}
#[test]
fn from_bytes_with_nul() {
let data = b"123\0";
let cstr = CStr::from_bytes_with_nul(data);
assert_eq!(cstr.map(CStr::to_bytes), Ok(&b"123"[..]));
let cstr = CStr::from_bytes_with_nul(data);
assert_eq!(cstr.map(CStr::to_bytes_with_nul), Ok(&b"123\0"[..]));
unsafe {
let cstr = CStr::from_bytes_with_nul(data);
let cstr_unchecked = CStr::from_bytes_with_nul_unchecked(data);
assert_eq!(cstr, Ok(cstr_unchecked));
}
}
#[test]
fn from_bytes_with_nul_unterminated() {
let data = b"123";
let cstr = CStr::from_bytes_with_nul(data);
assert!(cstr.is_err());
}
#[test]
fn from_bytes_with_nul_interior() {
let data = b"1\023\0";
let cstr = CStr::from_bytes_with_nul(data);
assert!(cstr.is_err());
}
#[test]
fn cstr_from_bytes_until_nul() {
// Test an empty slice. This should fail because it
// does not contain a nul byte.
let b = b"";
assert_matches!(CStr::from_bytes_until_nul(&b[..]), Err(FromBytesUntilNulError { .. }));
// Test a non-empty slice, that does not contain a nul byte.
let b = b"hello";
assert_matches!(CStr::from_bytes_until_nul(&b[..]), Err(FromBytesUntilNulError { .. }));
// Test an empty nul-terminated string
let b = b"\0";
let r = CStr::from_bytes_until_nul(&b[..]).unwrap();
assert_eq!(r.to_bytes(), b"");
// Test a slice with the nul byte in the middle
let b = b"hello\0world!";
let r = CStr::from_bytes_until_nul(&b[..]).unwrap();
assert_eq!(r.to_bytes(), b"hello");
// Test a slice with the nul byte at the end
let b = b"hello\0";
let r = CStr::from_bytes_until_nul(&b[..]).unwrap();
assert_eq!(r.to_bytes(), b"hello");
// Test a slice with two nul bytes at the end
let b = b"hello\0\0";
let r = CStr::from_bytes_until_nul(&b[..]).unwrap();
assert_eq!(r.to_bytes(), b"hello");
// Test a slice containing lots of nul bytes
let b = b"\0\0\0\0";
let r = CStr::from_bytes_until_nul(&b[..]).unwrap();
assert_eq!(r.to_bytes(), b"");
}
#[test]
fn into_boxed() {
let orig: &[u8] = b"Hello, world!\0";
let cstr = CStr::from_bytes_with_nul(orig).unwrap();
let boxed: Box<CStr> = Box::from(cstr);
let cstring = cstr.to_owned().into_boxed_c_str().into_c_string();
assert_eq!(cstr, &*boxed);
assert_eq!(&*boxed, &*cstring);
assert_eq!(&*cstring, cstr);
}
#[test]
fn boxed_default() {
let boxed = <Box<CStr>>::default();
assert_eq!(boxed.to_bytes_with_nul(), &[0]);
}
#[test]
fn test_c_str_clone_into() {
let mut c_string = CString::new("lorem").unwrap();
let c_ptr = c_string.as_ptr();
let c_str = CStr::from_bytes_with_nul(b"ipsum\0").unwrap();
c_str.clone_into(&mut c_string);
assert_eq!(c_str, c_string.as_c_str());
// The exact same size shouldn't have needed to move its allocation
assert_eq!(c_ptr, c_string.as_ptr());
}
#[test]
fn into_rc() {
let orig: &[u8] = b"Hello, world!\0";
let cstr = CStr::from_bytes_with_nul(orig).unwrap();
let rc: Rc<CStr> = Rc::from(cstr);
let arc: Arc<CStr> = Arc::from(cstr);
assert_eq!(&*rc, cstr);
assert_eq!(&*arc, cstr);
let rc2: Rc<CStr> = Rc::from(cstr.to_owned());
let arc2: Arc<CStr> = Arc::from(cstr.to_owned());
assert_eq!(&*rc2, cstr);
assert_eq!(&*arc2, cstr);
}
#[test]
fn cstr_const_constructor() {
const CSTR: &CStr = unsafe { CStr::from_bytes_with_nul_unchecked(b"Hello, world!\0") };
assert_eq!(CSTR.to_str().unwrap(), "Hello, world!");
}
#[test]
fn cstr_index_from() {
let original = b"Hello, world!\0";
let cstr = CStr::from_bytes_with_nul(original).unwrap();
let result = CStr::from_bytes_with_nul(&original[7..]).unwrap();
assert_eq!(&cstr[7..], result);
}
#[test]
#[should_panic]
fn cstr_index_from_empty() {
let original = b"Hello, world!\0";
let cstr = CStr::from_bytes_with_nul(original).unwrap();
let _ = &cstr[original.len()..];
}
#[test]
fn c_string_from_empty_string() {
let original = "";
let cstring = CString::new(original).unwrap();
assert_eq!(original.as_bytes(), cstring.as_bytes());
assert_eq!([b'\0'], cstring.as_bytes_with_nul());
}
#[test]
fn c_str_from_empty_string() {
let original = b"\0";
let cstr = CStr::from_bytes_with_nul(original).unwrap();
assert_eq!([] as [u8; 0], cstr.to_bytes());
assert_eq!([b'\0'], cstr.to_bytes_with_nul());
}

View File

@ -0,0 +1,92 @@
//! Utilities related to FFI bindings.
//!
//! This module provides utilities to handle data across non-Rust
//! interfaces, like other programming languages and the underlying
//! operating system. It is mainly of use for FFI (Foreign Function
//! Interface) bindings and code that needs to exchange C-like strings
//! with other languages.
//!
//! # Overview
//!
//! Rust represents owned strings with the [`String`] type, and
//! borrowed slices of strings with the [`str`] primitive. Both are
//! always in UTF-8 encoding, and may contain nul bytes in the middle,
//! i.e., if you look at the bytes that make up the string, there may
//! be a `\0` among them. Both `String` and `str` store their length
//! explicitly; there are no nul terminators at the end of strings
//! like in C.
//!
//! C strings are different from Rust strings:
//!
//! * **Encodings** - Rust strings are UTF-8, but C strings may use
//! other encodings. If you are using a string from C, you should
//! check its encoding explicitly, rather than just assuming that it
//! is UTF-8 like you can do in Rust.
//!
//! * **Character size** - C strings may use `char` or `wchar_t`-sized
//! characters; please **note** that C's `char` is different from Rust's.
//! The C standard leaves the actual sizes of those types open to
//! interpretation, but defines different APIs for strings made up of
//! each character type. Rust strings are always UTF-8, so different
//! Unicode characters will be encoded in a variable number of bytes
//! each. The Rust type [`char`] represents a '[Unicode scalar
//! value]', which is similar to, but not the same as, a '[Unicode
//! code point]'.
//!
//! * **Nul terminators and implicit string lengths** - Often, C
//! strings are nul-terminated, i.e., they have a `\0` character at the
//! end. The length of a string buffer is not stored, but has to be
//! calculated; to compute the length of a string, C code must
//! manually call a function like `strlen()` for `char`-based strings,
//! or `wcslen()` for `wchar_t`-based ones. Those functions return
//! the number of characters in the string excluding the nul
//! terminator, so the buffer length is really `len+1` characters.
//! Rust strings don't have a nul terminator; their length is always
//! stored and does not need to be calculated. While in Rust
//! accessing a string's length is an *O*(1) operation (because the
//! length is stored); in C it is an *O*(*n*) operation because the
//! length needs to be computed by scanning the string for the nul
//! terminator.
//!
//! * **Internal nul characters** - When C strings have a nul
//! terminator character, this usually means that they cannot have nul
//! characters in the middle — a nul character would essentially
//! truncate the string. Rust strings *can* have nul characters in
//! the middle, because nul does not have to mark the end of the
//! string in Rust.
//!
//! # Representations of non-Rust strings
//!
//! [`CString`] and [`CStr`] are useful when you need to transfer
//! UTF-8 strings to and from languages with a C ABI, like Python.
//!
//! * **From Rust to C:** [`CString`] represents an owned, C-friendly
//! string: it is nul-terminated, and has no internal nul characters.
//! Rust code can create a [`CString`] out of a normal string (provided
//! that the string doesn't have nul characters in the middle), and
//! then use a variety of methods to obtain a raw <code>\*mut [u8]</code> that can
//! then be passed as an argument to functions which use the C
//! conventions for strings.
//!
//! * **From C to Rust:** [`CStr`] represents a borrowed C string; it
//! is what you would use to wrap a raw <code>\*const [u8]</code> that you got from
//! a C function. A [`CStr`] is guaranteed to be a nul-terminated array
//! of bytes. Once you have a [`CStr`], you can convert it to a Rust
//! <code>&[str]</code> if it's valid UTF-8, or lossily convert it by adding
//! replacement characters.
//!
//! [`String`]: crate::string::String
//! [`CStr`]: core::ffi::CStr
#![stable(feature = "alloc_ffi", since = "1.64.0")]
#[doc(no_inline)]
#[stable(feature = "alloc_c_string", since = "1.64.0")]
pub use self::c_str::{FromVecWithNulError, IntoStringError, NulError};
#[doc(inline)]
#[stable(feature = "alloc_c_string", since = "1.64.0")]
pub use self::c_str::CString;
#[unstable(feature = "c_str_module", issue = "112134")]
pub mod c_str;

View File

@ -0,0 +1,638 @@
//! Utilities for formatting and printing `String`s.
//!
//! This module contains the runtime support for the [`format!`] syntax extension.
//! This macro is implemented in the compiler to emit calls to this module in
//! order to format arguments at runtime into strings.
//!
//! # Usage
//!
//! The [`format!`] macro is intended to be familiar to those coming from C's
//! `printf`/`fprintf` functions or Python's `str.format` function.
//!
//! Some examples of the [`format!`] extension are:
//!
//! ```
//! format!("Hello"); // => "Hello"
//! format!("Hello, {}!", "world"); // => "Hello, world!"
//! format!("The number is {}", 1); // => "The number is 1"
//! format!("{:?}", (3, 4)); // => "(3, 4)"
//! format!("{value}", value=4); // => "4"
//! let people = "Rustaceans";
//! format!("Hello {people}!"); // => "Hello Rustaceans!"
//! format!("{} {}", 1, 2); // => "1 2"
//! format!("{:04}", 42); // => "0042" with leading zeros
//! format!("{:#?}", (100, 200)); // => "(
//! // 100,
//! // 200,
//! // )"
//! ```
//!
//! From these, you can see that the first argument is a format string. It is
//! required by the compiler for this to be a string literal; it cannot be a
//! variable passed in (in order to perform validity checking). The compiler
//! will then parse the format string and determine if the list of arguments
//! provided is suitable to pass to this format string.
//!
//! To convert a single value to a string, use the [`to_string`] method. This
//! will use the [`Display`] formatting trait.
//!
//! ## Positional parameters
//!
//! Each formatting argument is allowed to specify which value argument it's
//! referencing, and if omitted it is assumed to be "the next argument". For
//! example, the format string `{} {} {}` would take three parameters, and they
//! would be formatted in the same order as they're given. The format string
//! `{2} {1} {0}`, however, would format arguments in reverse order.
//!
//! Things can get a little tricky once you start intermingling the two types of
//! positional specifiers. The "next argument" specifier can be thought of as an
//! iterator over the argument. Each time a "next argument" specifier is seen,
//! the iterator advances. This leads to behavior like this:
//!
//! ```
//! format!("{1} {} {0} {}", 1, 2); // => "2 1 1 2"
//! ```
//!
//! The internal iterator over the argument has not been advanced by the time
//! the first `{}` is seen, so it prints the first argument. Then upon reaching
//! the second `{}`, the iterator has advanced forward to the second argument.
//! Essentially, parameters that explicitly name their argument do not affect
//! parameters that do not name an argument in terms of positional specifiers.
//!
//! A format string is required to use all of its arguments, otherwise it is a
//! compile-time error. You may refer to the same argument more than once in the
//! format string.
//!
//! ## Named parameters
//!
//! Rust itself does not have a Python-like equivalent of named parameters to a
//! function, but the [`format!`] macro is a syntax extension that allows it to
//! leverage named parameters. Named parameters are listed at the end of the
//! argument list and have the syntax:
//!
//! ```text
//! identifier '=' expression
//! ```
//!
//! For example, the following [`format!`] expressions all use named arguments:
//!
//! ```
//! format!("{argument}", argument = "test"); // => "test"
//! format!("{name} {}", 1, name = 2); // => "2 1"
//! format!("{a} {c} {b}", a="a", b='b', c=3); // => "a 3 b"
//! ```
//!
//! If a named parameter does not appear in the argument list, `format!` will
//! reference a variable with that name in the current scope.
//!
//! ```
//! let argument = 2 + 2;
//! format!("{argument}"); // => "4"
//!
//! fn make_string(a: u32, b: &str) -> String {
//! format!("{b} {a}")
//! }
//! make_string(927, "label"); // => "label 927"
//! ```
//!
//! It is not valid to put positional parameters (those without names) after
//! arguments that have names. Like with positional parameters, it is not
//! valid to provide named parameters that are unused by the format string.
//!
//! # Formatting Parameters
//!
//! Each argument being formatted can be transformed by a number of formatting
//! parameters (corresponding to `format_spec` in [the syntax](#syntax)). These
//! parameters affect the string representation of what's being formatted.
//!
//! ## Width
//!
//! ```
//! // All of these print "Hello x !"
//! println!("Hello {:5}!", "x");
//! println!("Hello {:1$}!", "x", 5);
//! println!("Hello {1:0$}!", 5, "x");
//! println!("Hello {:width$}!", "x", width = 5);
//! let width = 5;
//! println!("Hello {:width$}!", "x");
//! ```
//!
//! This is a parameter for the "minimum width" that the format should take up.
//! If the value's string does not fill up this many characters, then the
//! padding specified by fill/alignment will be used to take up the required
//! space (see below).
//!
//! The value for the width can also be provided as a [`usize`] in the list of
//! parameters by adding a postfix `$`, indicating that the second argument is
//! a [`usize`] specifying the width.
//!
//! Referring to an argument with the dollar syntax does not affect the "next
//! argument" counter, so it's usually a good idea to refer to arguments by
//! position, or use named arguments.
//!
//! ## Fill/Alignment
//!
//! ```
//! assert_eq!(format!("Hello {:<5}!", "x"), "Hello x !");
//! assert_eq!(format!("Hello {:-<5}!", "x"), "Hello x----!");
//! assert_eq!(format!("Hello {:^5}!", "x"), "Hello x !");
//! assert_eq!(format!("Hello {:>5}!", "x"), "Hello x!");
//! ```
//!
//! The optional fill character and alignment is provided normally in conjunction with the
//! [`width`](#width) parameter. It must be defined before `width`, right after the `:`.
//! This indicates that if the value being formatted is smaller than
//! `width` some extra characters will be printed around it.
//! Filling comes in the following variants for different alignments:
//!
//! * `[fill]<` - the argument is left-aligned in `width` columns
//! * `[fill]^` - the argument is center-aligned in `width` columns
//! * `[fill]>` - the argument is right-aligned in `width` columns
//!
//! The default [fill/alignment](#fillalignment) for non-numerics is a space and
//! left-aligned. The
//! default for numeric formatters is also a space character but with right-alignment. If
//! the `0` flag (see below) is specified for numerics, then the implicit fill character is
//! `0`.
//!
//! Note that alignment might not be implemented by some types. In particular, it
//! is not generally implemented for the `Debug` trait. A good way to ensure
//! padding is applied is to format your input, then pad this resulting string
//! to obtain your output:
//!
//! ```
//! println!("Hello {:^15}!", format!("{:?}", Some("hi"))); // => "Hello Some("hi") !"
//! ```
//!
//! ## Sign/`#`/`0`
//!
//! ```
//! assert_eq!(format!("Hello {:+}!", 5), "Hello +5!");
//! assert_eq!(format!("{:#x}!", 27), "0x1b!");
//! assert_eq!(format!("Hello {:05}!", 5), "Hello 00005!");
//! assert_eq!(format!("Hello {:05}!", -5), "Hello -0005!");
//! assert_eq!(format!("{:#010x}!", 27), "0x0000001b!");
//! ```
//!
//! These are all flags altering the behavior of the formatter.
//!
//! * `+` - This is intended for numeric types and indicates that the sign
//! should always be printed. By default only the negative sign of signed values
//! is printed, and the sign of positive or unsigned values is omitted.
//! This flag indicates that the correct sign (`+` or `-`) should always be printed.
//! * `-` - Currently not used
//! * `#` - This flag indicates that the "alternate" form of printing should
//! be used. The alternate forms are:
//! * `#?` - pretty-print the [`Debug`] formatting (adds linebreaks and indentation)
//! * `#x` - precedes the argument with a `0x`
//! * `#X` - precedes the argument with a `0x`
//! * `#b` - precedes the argument with a `0b`
//! * `#o` - precedes the argument with a `0o`
//!
//! See [Formatting traits](#formatting-traits) for a description of what the `?`, `x`, `X`,
//! `b`, and `o` flags do.
//!
//! * `0` - This is used to indicate for integer formats that the padding to `width` should
//! both be done with a `0` character as well as be sign-aware. A format
//! like `{:08}` would yield `00000001` for the integer `1`, while the
//! same format would yield `-0000001` for the integer `-1`. Notice that
//! the negative version has one fewer zero than the positive version.
//! Note that padding zeros are always placed after the sign (if any)
//! and before the digits. When used together with the `#` flag, a similar
//! rule applies: padding zeros are inserted after the prefix but before
//! the digits. The prefix is included in the total width.
//! This flag overrides the [fill character and alignment flag](#fillalignment).
//!
//! ## Precision
//!
//! For non-numeric types, this can be considered a "maximum width". If the resulting string is
//! longer than this width, then it is truncated down to this many characters and that truncated
//! value is emitted with proper `fill`, `alignment` and `width` if those parameters are set.
//!
//! For integral types, this is ignored.
//!
//! For floating-point types, this indicates how many digits after the decimal point should be
//! printed.
//!
//! There are three possible ways to specify the desired `precision`:
//!
//! 1. An integer `.N`:
//!
//! the integer `N` itself is the precision.
//!
//! 2. An integer or name followed by dollar sign `.N$`:
//!
//! use format *argument* `N` (which must be a `usize`) as the precision.
//!
//! 3. An asterisk `.*`:
//!
//! `.*` means that this `{...}` is associated with *two* format inputs rather than one:
//! - If a format string in the fashion of `{:<spec>.*}` is used, then the first input holds
//! the `usize` precision, and the second holds the value to print.
//! - If a format string in the fashion of `{<arg>:<spec>.*}` is used, then the `<arg>` part
//! refers to the value to print, and the `precision` is taken like it was specified with an
//! omitted positional parameter (`{}` instead of `{<arg>:}`).
//!
//! For example, the following calls all print the same thing `Hello x is 0.01000`:
//!
//! ```
//! // Hello {arg 0 ("x")} is {arg 1 (0.01) with precision specified inline (5)}
//! println!("Hello {0} is {1:.5}", "x", 0.01);
//!
//! // Hello {arg 1 ("x")} is {arg 2 (0.01) with precision specified in arg 0 (5)}
//! println!("Hello {1} is {2:.0$}", 5, "x", 0.01);
//!
//! // Hello {arg 0 ("x")} is {arg 2 (0.01) with precision specified in arg 1 (5)}
//! println!("Hello {0} is {2:.1$}", "x", 5, 0.01);
//!
//! // Hello {next arg -> arg 0 ("x")} is {second of next two args -> arg 2 (0.01) with precision
//! // specified in first of next two args -> arg 1 (5)}
//! println!("Hello {} is {:.*}", "x", 5, 0.01);
//!
//! // Hello {arg 1 ("x")} is {arg 2 (0.01) with precision
//! // specified in next arg -> arg 0 (5)}
//! println!("Hello {1} is {2:.*}", 5, "x", 0.01);
//!
//! // Hello {next arg -> arg 0 ("x")} is {arg 2 (0.01) with precision
//! // specified in next arg -> arg 1 (5)}
//! println!("Hello {} is {2:.*}", "x", 5, 0.01);
//!
//! // Hello {next arg -> arg 0 ("x")} is {arg "number" (0.01) with precision specified
//! // in arg "prec" (5)}
//! println!("Hello {} is {number:.prec$}", "x", prec = 5, number = 0.01);
//! ```
//!
//! While these:
//!
//! ```
//! println!("{}, `{name:.*}` has 3 fractional digits", "Hello", 3, name=1234.56);
//! println!("{}, `{name:.*}` has 3 characters", "Hello", 3, name="1234.56");
//! println!("{}, `{name:>8.*}` has 3 right-aligned characters", "Hello", 3, name="1234.56");
//! ```
//!
//! print three significantly different things:
//!
//! ```text
//! Hello, `1234.560` has 3 fractional digits
//! Hello, `123` has 3 characters
//! Hello, ` 123` has 3 right-aligned characters
//! ```
//!
//! When truncating these values, Rust uses [round half-to-even](https://en.wikipedia.org/wiki/Rounding#Rounding_half_to_even),
//! which is the default rounding mode in IEEE 754.
//! For example,
//!
//! ```
//! print!("{0:.1$e}", 12345, 3);
//! print!("{0:.1$e}", 12355, 3);
//! ```
//!
//! Would return:
//!
//! ```text
//! 1.234e4
//! 1.236e4
//! ```
//!
//! ## Localization
//!
//! In some programming languages, the behavior of string formatting functions
//! depends on the operating system's locale setting. The format functions
//! provided by Rust's standard library do not have any concept of locale and
//! will produce the same results on all systems regardless of user
//! configuration.
//!
//! For example, the following code will always print `1.5` even if the system
//! locale uses a decimal separator other than a dot.
//!
//! ```
//! println!("The value is {}", 1.5);
//! ```
//!
//! # Escaping
//!
//! The literal characters `{` and `}` may be included in a string by preceding
//! them with the same character. For example, the `{` character is escaped with
//! `{{` and the `}` character is escaped with `}}`.
//!
//! ```
//! assert_eq!(format!("Hello {{}}"), "Hello {}");
//! assert_eq!(format!("{{ Hello"), "{ Hello");
//! ```
//!
//! # Syntax
//!
//! To summarize, here you can find the full grammar of format strings.
//! The syntax for the formatting language used is drawn from other languages,
//! so it should not be too alien. Arguments are formatted with Python-like
//! syntax, meaning that arguments are surrounded by `{}` instead of the C-like
//! `%`. The actual grammar for the formatting syntax is:
//!
//! ```text
//! format_string := text [ maybe_format text ] *
//! maybe_format := '{' '{' | '}' '}' | format
//! format := '{' [ argument ] [ ':' format_spec ] [ ws ] * '}'
//! argument := integer | identifier
//!
//! format_spec := [[fill]align][sign]['#']['0'][width]['.' precision]type
//! fill := character
//! align := '<' | '^' | '>'
//! sign := '+' | '-'
//! width := count
//! precision := count | '*'
//! type := '' | '?' | 'x?' | 'X?' | identifier
//! count := parameter | integer
//! parameter := argument '$'
//! ```
//! In the above grammar,
//! - `text` must not contain any `'{'` or `'}'` characters,
//! - `ws` is any character for which [`char::is_whitespace`] returns `true`, has no semantic
//! meaning and is completely optional,
//! - `integer` is a decimal integer that may contain leading zeroes and must fit into an `usize` and
//! - `identifier` is an `IDENTIFIER_OR_KEYWORD` (not an `IDENTIFIER`) as defined by the [Rust language reference](https://doc.rust-lang.org/reference/identifiers.html).
//!
//! # Formatting traits
//!
//! When requesting that an argument be formatted with a particular type, you
//! are actually requesting that an argument ascribes to a particular trait.
//! This allows multiple actual types to be formatted via `{:x}` (like [`i8`] as
//! well as [`isize`]). The current mapping of types to traits is:
//!
//! * *nothing* ⇒ [`Display`]
//! * `?` ⇒ [`Debug`]
//! * `x?` ⇒ [`Debug`] with lower-case hexadecimal integers
//! * `X?` ⇒ [`Debug`] with upper-case hexadecimal integers
//! * `o` ⇒ [`Octal`]
//! * `x` ⇒ [`LowerHex`]
//! * `X` ⇒ [`UpperHex`]
//! * `p` ⇒ [`Pointer`]
//! * `b` ⇒ [`Binary`]
//! * `e` ⇒ [`LowerExp`]
//! * `E` ⇒ [`UpperExp`]
//!
//! What this means is that any type of argument which implements the
//! [`fmt::Binary`][`Binary`] trait can then be formatted with `{:b}`. Implementations
//! are provided for these traits for a number of primitive types by the
//! standard library as well. If no format is specified (as in `{}` or `{:6}`),
//! then the format trait used is the [`Display`] trait.
//!
//! When implementing a format trait for your own type, you will have to
//! implement a method of the signature:
//!
//! ```
//! # #![allow(dead_code)]
//! # use std::fmt;
//! # struct Foo; // our custom type
//! # impl fmt::Display for Foo {
//! fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
//! # write!(f, "testing, testing")
//! # } }
//! ```
//!
//! Your type will be passed as `self` by-reference, and then the function
//! should emit output into the Formatter `f` which implements `fmt::Write`. It is up to each
//! format trait implementation to correctly adhere to the requested formatting parameters.
//! The values of these parameters can be accessed with methods of the
//! [`Formatter`] struct. In order to help with this, the [`Formatter`] struct also
//! provides some helper methods.
//!
//! Additionally, the return value of this function is [`fmt::Result`] which is a
//! type alias of <code>[Result]<(), [std::fmt::Error]></code>. Formatting implementations
//! should ensure that they propagate errors from the [`Formatter`] (e.g., when
//! calling [`write!`]). However, they should never return errors spuriously. That
//! is, a formatting implementation must and may only return an error if the
//! passed-in [`Formatter`] returns an error. This is because, contrary to what
//! the function signature might suggest, string formatting is an infallible
//! operation. This function only returns a result because writing to the
//! underlying stream might fail and it must provide a way to propagate the fact
//! that an error has occurred back up the stack.
//!
//! An example of implementing the formatting traits would look
//! like:
//!
//! ```
//! use std::fmt;
//!
//! #[derive(Debug)]
//! struct Vector2D {
//! x: isize,
//! y: isize,
//! }
//!
//! impl fmt::Display for Vector2D {
//! fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
//! // The `f` value implements the `Write` trait, which is what the
//! // write! macro is expecting. Note that this formatting ignores the
//! // various flags provided to format strings.
//! write!(f, "({}, {})", self.x, self.y)
//! }
//! }
//!
//! // Different traits allow different forms of output of a type. The meaning
//! // of this format is to print the magnitude of a vector.
//! impl fmt::Binary for Vector2D {
//! fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
//! let magnitude = (self.x * self.x + self.y * self.y) as f64;
//! let magnitude = magnitude.sqrt();
//!
//! // Respect the formatting flags by using the helper method
//! // `pad_integral` on the Formatter object. See the method
//! // documentation for details, and the function `pad` can be used
//! // to pad strings.
//! let decimals = f.precision().unwrap_or(3);
//! let string = format!("{magnitude:.decimals$}");
//! f.pad_integral(true, "", &string)
//! }
//! }
//!
//! fn main() {
//! let myvector = Vector2D { x: 3, y: 4 };
//!
//! println!("{myvector}"); // => "(3, 4)"
//! println!("{myvector:?}"); // => "Vector2D {x: 3, y:4}"
//! println!("{myvector:10.3b}"); // => " 5.000"
//! }
//! ```
//!
//! ### `fmt::Display` vs `fmt::Debug`
//!
//! These two formatting traits have distinct purposes:
//!
//! - [`fmt::Display`][`Display`] implementations assert that the type can be faithfully
//! represented as a UTF-8 string at all times. It is **not** expected that
//! all types implement the [`Display`] trait.
//! - [`fmt::Debug`][`Debug`] implementations should be implemented for **all** public types.
//! Output will typically represent the internal state as faithfully as possible.
//! The purpose of the [`Debug`] trait is to facilitate debugging Rust code. In
//! most cases, using `#[derive(Debug)]` is sufficient and recommended.
//!
//! Some examples of the output from both traits:
//!
//! ```
//! assert_eq!(format!("{} {:?}", 3, 4), "3 4");
//! assert_eq!(format!("{} {:?}", 'a', 'b'), "a 'b'");
//! assert_eq!(format!("{} {:?}", "foo\n", "bar\n"), "foo\n \"bar\\n\"");
//! ```
//!
//! # Related macros
//!
//! There are a number of related macros in the [`format!`] family. The ones that
//! are currently implemented are:
//!
//! ```ignore (only-for-syntax-highlight)
//! format! // described above
//! write! // first argument is either a &mut io::Write or a &mut fmt::Write, the destination
//! writeln! // same as write but appends a newline
//! print! // the format string is printed to the standard output
//! println! // same as print but appends a newline
//! eprint! // the format string is printed to the standard error
//! eprintln! // same as eprint but appends a newline
//! format_args! // described below.
//! ```
//!
//! ### `write!`
//!
//! [`write!`] and [`writeln!`] are two macros which are used to emit the format string
//! to a specified stream. This is used to prevent intermediate allocations of
//! format strings and instead directly write the output. Under the hood, this
//! function is actually invoking the [`write_fmt`] function defined on the
//! [`std::io::Write`] and the [`std::fmt::Write`] trait. Example usage is:
//!
//! ```
//! # #![allow(unused_must_use)]
//! use std::io::Write;
//! let mut w = Vec::new();
//! write!(&mut w, "Hello {}!", "world");
//! ```
//!
//! ### `print!`
//!
//! This and [`println!`] emit their output to stdout. Similarly to the [`write!`]
//! macro, the goal of these macros is to avoid intermediate allocations when
//! printing output. Example usage is:
//!
//! ```
//! print!("Hello {}!", "world");
//! println!("I have a newline {}", "character at the end");
//! ```
//! ### `eprint!`
//!
//! The [`eprint!`] and [`eprintln!`] macros are identical to
//! [`print!`] and [`println!`], respectively, except they emit their
//! output to stderr.
//!
//! ### `format_args!`
//!
//! [`format_args!`] is a curious macro used to safely pass around
//! an opaque object describing the format string. This object
//! does not require any heap allocations to create, and it only
//! references information on the stack. Under the hood, all of
//! the related macros are implemented in terms of this. First
//! off, some example usage is:
//!
//! ```
//! # #![allow(unused_must_use)]
//! use std::fmt;
//! use std::io::{self, Write};
//!
//! let mut some_writer = io::stdout();
//! write!(&mut some_writer, "{}", format_args!("print with a {}", "macro"));
//!
//! fn my_fmt_fn(args: fmt::Arguments<'_>) {
//! write!(&mut io::stdout(), "{args}");
//! }
//! my_fmt_fn(format_args!(", or a {} too", "function"));
//! ```
//!
//! The result of the [`format_args!`] macro is a value of type [`fmt::Arguments`].
//! This structure can then be passed to the [`write`] and [`format`] functions
//! inside this module in order to process the format string.
//! The goal of this macro is to even further prevent intermediate allocations
//! when dealing with formatting strings.
//!
//! For example, a logging library could use the standard formatting syntax, but
//! it would internally pass around this structure until it has been determined
//! where output should go to.
//!
//! [`fmt::Result`]: Result "fmt::Result"
//! [Result]: core::result::Result "std::result::Result"
//! [std::fmt::Error]: Error "fmt::Error"
//! [`write`]: write() "fmt::write"
//! [`to_string`]: crate::string::ToString::to_string "ToString::to_string"
//! [`write_fmt`]: ../../std/io/trait.Write.html#method.write_fmt
//! [`std::io::Write`]: ../../std/io/trait.Write.html
//! [`std::fmt::Write`]: ../../std/fmt/trait.Write.html
//! [`print!`]: ../../std/macro.print.html "print!"
//! [`println!`]: ../../std/macro.println.html "println!"
//! [`eprint!`]: ../../std/macro.eprint.html "eprint!"
//! [`eprintln!`]: ../../std/macro.eprintln.html "eprintln!"
//! [`format_args!`]: ../../std/macro.format_args.html "format_args!"
//! [`fmt::Arguments`]: Arguments "fmt::Arguments"
//! [`format`]: format() "fmt::format"
#![stable(feature = "rust1", since = "1.0.0")]
#[stable(feature = "fmt_flags_align", since = "1.28.0")]
pub use core::fmt::Alignment;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::Error;
#[unstable(feature = "debug_closure_helpers", issue = "117729")]
pub use core::fmt::FormatterFn;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{write, Arguments};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Binary, Octal};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Debug, Display};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{DebugList, DebugMap, DebugSet, DebugStruct, DebugTuple};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Formatter, Result, Write};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{LowerExp, UpperExp};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{LowerHex, Pointer, UpperHex};
#[cfg(not(no_global_oom_handling))]
use crate::string;
/// The `format` function takes an [`Arguments`] struct and returns the resulting
/// formatted string.
///
/// The [`Arguments`] instance can be created with the [`format_args!`] macro.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::fmt;
///
/// let s = fmt::format(format_args!("Hello, {}!", "world"));
/// assert_eq!(s, "Hello, world!");
/// ```
///
/// Please note that using [`format!`] might be preferable.
/// Example:
///
/// ```
/// let s = format!("Hello, {}!", "world");
/// assert_eq!(s, "Hello, world!");
/// ```
///
/// [`format_args!`]: core::format_args
/// [`format!`]: crate::format
#[cfg(not(no_global_oom_handling))]
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn format(args: Arguments<'_>) -> string::String {
fn format_inner(args: Arguments<'_>) -> string::String {
let capacity = args.estimated_capacity();
let mut output = string::String::with_capacity(capacity);
output.write_fmt(args).expect("a formatting trait implementation returned an error");
output
}
args.as_str().map_or_else(|| format_inner(args), crate::borrow::ToOwned::to_owned)
}

View File

@ -0,0 +1,4 @@
//! Grep bootstrap for `MIRI_REPLACE_LIBRS_IF_NOT_TEST` to learn what this is about.
#![no_std]
extern crate alloc as realalloc;
pub use realalloc::*;

View File

@ -0,0 +1,279 @@
//! # The Rust core allocation and collections library
//!
//! This library provides smart pointers and collections for managing
//! heap-allocated values.
//!
//! This library, like core, normally doesnt need to be used directly
//! since its contents are re-exported in the [`std` crate](../std/index.html).
//! Crates that use the `#![no_std]` attribute however will typically
//! not depend on `std`, so theyd use this crate instead.
//!
//! ## Boxed values
//!
//! The [`Box`] type is a smart pointer type. There can only be one owner of a
//! [`Box`], and the owner can decide to mutate the contents, which live on the
//! heap.
//!
//! This type can be sent among threads efficiently as the size of a `Box` value
//! is the same as that of a pointer. Tree-like data structures are often built
//! with boxes because each node often has only one owner, the parent.
//!
//! ## Reference counted pointers
//!
//! The [`Rc`] type is a non-threadsafe reference-counted pointer type intended
//! for sharing memory within a thread. An [`Rc`] pointer wraps a type, `T`, and
//! only allows access to `&T`, a shared reference.
//!
//! This type is useful when inherited mutability (such as using [`Box`]) is too
//! constraining for an application, and is often paired with the [`Cell`] or
//! [`RefCell`] types in order to allow mutation.
//!
//! ## Atomically reference counted pointers
//!
//! The [`Arc`] type is the threadsafe equivalent of the [`Rc`] type. It
//! provides all the same functionality of [`Rc`], except it requires that the
//! contained type `T` is shareable. Additionally, [`Arc<T>`][`Arc`] is itself
//! sendable while [`Rc<T>`][`Rc`] is not.
//!
//! This type allows for shared access to the contained data, and is often
//! paired with synchronization primitives such as mutexes to allow mutation of
//! shared resources.
//!
//! ## Collections
//!
//! Implementations of the most common general purpose data structures are
//! defined in this library. They are re-exported through the
//! [standard collections library](../std/collections/index.html).
//!
//! ## Heap interfaces
//!
//! The [`alloc`](alloc/index.html) module defines the low-level interface to the
//! default global allocator. It is not compatible with the libc allocator API.
//!
//! [`Arc`]: sync
//! [`Box`]: boxed
//! [`Cell`]: core::cell
//! [`Rc`]: rc
//! [`RefCell`]: core::cell
#![allow(unused_attributes)]
#![stable(feature = "alloc", since = "1.36.0")]
#![doc(
html_playground_url = "https://play.rust-lang.org/",
issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/",
test(no_crate_inject, attr(allow(unused_variables), deny(warnings)))
)]
#![doc(cfg_hide(
not(test),
not(any(test, bootstrap)),
no_global_oom_handling,
not(no_global_oom_handling),
not(no_rc),
not(no_sync),
target_has_atomic = "ptr"
))]
#![doc(rust_logo)]
#![feature(rustdoc_internals)]
#![no_std]
#![needs_allocator]
// Lints:
#![deny(unsafe_op_in_unsafe_fn)]
#![deny(fuzzy_provenance_casts)]
#![warn(deprecated_in_future)]
#![warn(missing_debug_implementations)]
#![warn(missing_docs)]
#![allow(explicit_outlives_requirements)]
#![warn(multiple_supertrait_upcastable)]
#![allow(internal_features)]
#![allow(rustdoc::redundant_explicit_links)]
#![deny(ffi_unwind_calls)]
//
// Library features:
// tidy-alphabetical-start
#![cfg_attr(not(no_global_oom_handling), feature(const_alloc_error))]
#![cfg_attr(not(no_global_oom_handling), feature(const_btree_len))]
#![cfg_attr(test, feature(is_sorted))]
#![cfg_attr(test, feature(new_uninit))]
#![feature(alloc_layout_extra)]
#![feature(allocator_api)]
#![feature(array_chunks)]
#![feature(array_into_iter_constructors)]
#![feature(array_windows)]
#![feature(ascii_char)]
#![feature(assert_matches)]
#![feature(async_fn_traits)]
#![feature(async_iterator)]
#![feature(coerce_unsized)]
#![feature(const_align_of_val)]
#![feature(const_box)]
#![feature(const_cow_is_borrowed)]
#![feature(const_eval_select)]
#![feature(const_heap)]
#![feature(const_maybe_uninit_as_mut_ptr)]
#![feature(const_maybe_uninit_write)]
#![feature(const_option)]
#![feature(const_pin)]
#![feature(const_refs_to_cell)]
#![feature(const_size_of_val)]
#![feature(const_waker)]
#![feature(core_intrinsics)]
#![feature(deprecated_suggestion)]
#![feature(deref_pure_trait)]
#![feature(dispatch_from_dyn)]
#![feature(error_generic_member_access)]
#![feature(error_in_core)]
#![feature(exact_size_is_empty)]
#![feature(extend_one)]
#![feature(fmt_internals)]
#![feature(fn_traits)]
#![feature(hasher_prefixfree_extras)]
#![feature(hint_assert_unchecked)]
#![feature(inplace_iteration)]
#![feature(iter_advance_by)]
#![feature(iter_next_chunk)]
#![feature(iter_repeat_n)]
#![feature(layout_for_ptr)]
#![feature(local_waker)]
#![feature(maybe_uninit_slice)]
#![feature(maybe_uninit_uninit_array)]
#![feature(maybe_uninit_uninit_array_transpose)]
#![feature(panic_internals)]
#![feature(pattern)]
#![feature(ptr_internals)]
#![feature(ptr_metadata)]
#![feature(ptr_sub_ptr)]
#![feature(receiver_trait)]
#![feature(set_ptr_value)]
#![feature(sized_type_properties)]
#![feature(slice_from_ptr_range)]
#![feature(slice_index_methods)]
#![feature(slice_ptr_get)]
#![feature(slice_range)]
#![feature(std_internals)]
#![feature(str_internals)]
#![feature(strict_provenance)]
#![feature(trusted_fused)]
#![feature(trusted_len)]
#![feature(trusted_random_access)]
#![feature(try_trait_v2)]
#![feature(try_with_capacity)]
#![feature(tuple_trait)]
#![feature(unicode_internals)]
#![feature(unsize)]
#![feature(vec_pop_if)]
// tidy-alphabetical-end
//
// Language features:
// tidy-alphabetical-start
#![cfg_attr(bootstrap, feature(exclusive_range_pattern))]
#![cfg_attr(not(test), feature(coroutine_trait))]
#![cfg_attr(test, feature(panic_update_hook))]
#![cfg_attr(test, feature(test))]
#![feature(allocator_internals)]
#![feature(allow_internal_unstable)]
#![feature(c_unwind)]
#![feature(cfg_sanitize)]
#![feature(const_mut_refs)]
#![feature(const_precise_live_drops)]
#![feature(const_ptr_write)]
#![feature(const_trait_impl)]
#![feature(const_try)]
#![feature(decl_macro)]
#![feature(dropck_eyepatch)]
#![feature(fundamental)]
#![feature(hashmap_internals)]
#![feature(lang_items)]
#![feature(min_specialization)]
#![feature(multiple_supertrait_upcastable)]
#![feature(negative_impls)]
#![feature(never_type)]
#![feature(rustc_allow_const_fn_unstable)]
#![feature(rustc_attrs)]
#![feature(slice_internals)]
#![feature(staged_api)]
#![feature(stmt_expr_attributes)]
#![feature(unboxed_closures)]
#![feature(unsized_fn_params)]
#![feature(with_negative_coherence)]
#![rustc_preserve_ub_checks]
// tidy-alphabetical-end
//
// Rustdoc features:
#![feature(doc_cfg)]
#![feature(doc_cfg_hide)]
// Technically, this is a bug in rustdoc: rustdoc sees the documentation on `#[lang = slice_alloc]`
// blocks is for `&[T]`, which also has documentation using this feature in `core`, and gets mad
// that the feature-gate isn't enabled. Ideally, it wouldn't check for the feature gate for docs
// from other crates, but since this can only appear for lang items, it doesn't seem worth fixing.
#![feature(intra_doc_pointers)]
// Allow testing this library
#[cfg(test)]
#[macro_use]
extern crate std;
#[cfg(test)]
extern crate test;
#[cfg(test)]
mod testing;
// Module with internal macros used by other modules (needs to be included before other modules).
#[macro_use]
mod macros;
mod raw_vec;
// Heaps provided for low-level allocation strategies
pub mod alloc;
// Primitive types using the heaps above
// Need to conditionally define the mod from `boxed.rs` to avoid
// duplicating the lang-items when building in test cfg; but also need
// to allow code to have `use boxed::Box;` declarations.
#[cfg(not(test))]
pub mod boxed;
#[cfg(test)]
mod boxed {
pub use std::boxed::Box;
}
pub mod borrow;
pub mod collections;
#[cfg(all(not(no_rc), not(no_sync), not(no_global_oom_handling)))]
pub mod ffi;
pub mod fmt;
#[cfg(not(no_rc))]
pub mod rc;
pub mod slice;
pub mod str;
pub mod string;
#[cfg(all(not(no_rc), not(no_sync), target_has_atomic = "ptr"))]
pub mod sync;
#[cfg(all(not(no_global_oom_handling), not(no_rc), not(no_sync)))]
pub mod task;
#[cfg(test)]
mod tests;
pub mod vec;
#[doc(hidden)]
#[unstable(feature = "liballoc_internals", issue = "none", reason = "implementation detail")]
pub mod __export {
pub use core::format_args;
}
#[cfg(test)]
#[allow(dead_code)] // Not used in all configurations
pub(crate) mod test_helpers {
/// Copied from `std::test_helpers::test_rng`, since these tests rely on the
/// seed not being the same for every RNG invocation too.
pub(crate) fn test_rng() -> rand_xorshift::XorShiftRng {
use std::hash::{BuildHasher, Hash, Hasher};
let mut hasher = std::hash::RandomState::new().build_hasher();
std::panic::Location::caller().hash(&mut hasher);
let hc64 = hasher.finish();
let seed_vec =
hc64.to_le_bytes().into_iter().chain(0u8..8).collect::<crate::vec::Vec<u8>>();
let seed: [u8; 16] = seed_vec.as_slice().try_into().unwrap();
rand::SeedableRng::from_seed(seed)
}
}

View File

@ -0,0 +1,138 @@
/// Creates a [`Vec`] containing the arguments.
///
/// `vec!` allows `Vec`s to be defined with the same syntax as array expressions.
/// There are two forms of this macro:
///
/// - Create a [`Vec`] containing a given list of elements:
///
/// ```
/// let v = vec![1, 2, 3];
/// assert_eq!(v[0], 1);
/// assert_eq!(v[1], 2);
/// assert_eq!(v[2], 3);
/// ```
///
/// - Create a [`Vec`] from a given element and size:
///
/// ```
/// let v = vec![1; 3];
/// assert_eq!(v, [1, 1, 1]);
/// ```
///
/// Note that unlike array expressions this syntax supports all elements
/// which implement [`Clone`] and the number of elements doesn't have to be
/// a constant.
///
/// This will use `clone` to duplicate an expression, so one should be careful
/// using this with types having a nonstandard `Clone` implementation. For
/// example, `vec![Rc::new(1); 5]` will create a vector of five references
/// to the same boxed integer value, not five references pointing to independently
/// boxed integers.
///
/// Also, note that `vec![expr; 0]` is allowed, and produces an empty vector.
/// This will still evaluate `expr`, however, and immediately drop the resulting value, so
/// be mindful of side effects.
///
/// [`Vec`]: crate::vec::Vec
#[cfg(all(not(no_global_oom_handling), not(test)))]
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_diagnostic_item = "vec_macro"]
#[allow_internal_unstable(rustc_attrs, liballoc_internals)]
macro_rules! vec {
() => (
$crate::__rust_force_expr!($crate::vec::Vec::new())
);
($elem:expr; $n:expr) => (
$crate::__rust_force_expr!($crate::vec::from_elem($elem, $n))
);
($($x:expr),+ $(,)?) => (
$crate::__rust_force_expr!(<[_]>::into_vec(
// This rustc_box is not required, but it produces a dramatic improvement in compile
// time when constructing arrays with many elements.
#[rustc_box]
$crate::boxed::Box::new([$($x),+])
))
);
}
// HACK(japaric): with cfg(test) the inherent `[T]::into_vec` method, which is
// required for this macro definition, is not available. Instead use the
// `slice::into_vec` function which is only available with cfg(test)
// NB see the slice::hack module in slice.rs for more information
#[cfg(all(not(no_global_oom_handling), test))]
#[allow(unused_macro_rules)]
macro_rules! vec {
() => (
$crate::vec::Vec::new()
);
($elem:expr; $n:expr) => (
$crate::vec::from_elem($elem, $n)
);
($($x:expr),*) => (
$crate::slice::into_vec($crate::boxed::Box::new([$($x),*]))
);
($($x:expr,)*) => (vec![$($x),*])
}
/// Creates a `String` using interpolation of runtime expressions.
///
/// The first argument `format!` receives is a format string. This must be a string
/// literal. The power of the formatting string is in the `{}`s contained.
/// Additional parameters passed to `format!` replace the `{}`s within the
/// formatting string in the order given unless named or positional parameters
/// are used.
///
/// See [the formatting syntax documentation in `std::fmt`](../std/fmt/index.html)
/// for details.
///
/// A common use for `format!` is concatenation and interpolation of strings.
/// The same convention is used with [`print!`] and [`write!`] macros,
/// depending on the intended destination of the string; all these macros internally use [`format_args!`].
///
/// To convert a single value to a string, use the [`to_string`] method. This
/// will use the [`Display`] formatting trait.
///
/// To concatenate literals into a `&'static str`, use the [`concat!`] macro.
///
/// [`print!`]: ../std/macro.print.html
/// [`write!`]: core::write
/// [`format_args!`]: core::format_args
/// [`to_string`]: crate::string::ToString
/// [`Display`]: core::fmt::Display
/// [`concat!`]: core::concat
///
/// # Panics
///
/// `format!` panics if a formatting trait implementation returns an error.
/// This indicates an incorrect implementation
/// since `fmt::Write for String` never returns an error itself.
///
/// # Examples
///
/// ```
/// format!("test"); // => "test"
/// format!("hello {}", "world!"); // => "hello world!"
/// format!("x = {}, y = {val}", 10, val = 30); // => "x = 10, y = 30"
/// let (x, y) = (1, 2);
/// format!("{x} + {y} = 3"); // => "1 + 2 = 3"
/// ```
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg_attr(not(test), rustc_diagnostic_item = "format_macro")]
macro_rules! format {
($($arg:tt)*) => {{
let res = $crate::fmt::format($crate::__export::format_args!($($arg)*));
res
}}
}
/// Force AST node to an expression to improve diagnostics in pattern position.
#[doc(hidden)]
#[macro_export]
#[unstable(feature = "liballoc_internals", issue = "none", reason = "implementation detail")]
macro_rules! __rust_force_expr {
($e:expr) => {
$e
};
}

View File

@ -0,0 +1,613 @@
#![unstable(feature = "raw_vec_internals", reason = "unstable const warnings", issue = "none")]
use core::alloc::LayoutError;
use core::cmp;
use core::hint;
use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
use core::ptr::{self, NonNull, Unique};
#[cfg(not(no_global_oom_handling))]
use crate::alloc::handle_alloc_error;
use crate::alloc::{Allocator, Global, Layout};
use crate::boxed::Box;
use crate::collections::TryReserveError;
use crate::collections::TryReserveErrorKind::*;
#[cfg(test)]
mod tests;
// One central function responsible for reporting capacity overflows. This'll
// ensure that the code generation related to these panics is minimal as there's
// only one location which panics rather than a bunch throughout the module.
#[cfg(not(no_global_oom_handling))]
#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
fn capacity_overflow() -> ! {
panic!("capacity overflow");
}
enum AllocInit {
/// The contents of the new memory are uninitialized.
Uninitialized,
#[cfg(not(no_global_oom_handling))]
/// The new memory is guaranteed to be zeroed.
Zeroed,
}
#[repr(transparent)]
#[cfg_attr(target_pointer_width = "16", rustc_layout_scalar_valid_range_end(0x7fff))]
#[cfg_attr(target_pointer_width = "32", rustc_layout_scalar_valid_range_end(0x7fff_ffff))]
#[cfg_attr(target_pointer_width = "64", rustc_layout_scalar_valid_range_end(0x7fff_ffff_ffff_ffff))]
struct Cap(usize);
impl Cap {
const ZERO: Cap = unsafe { Cap(0) };
}
/// A low-level utility for more ergonomically allocating, reallocating, and deallocating
/// a buffer of memory on the heap without having to worry about all the corner cases
/// involved. This type is excellent for building your own data structures like Vec and VecDeque.
/// In particular:
///
/// * Produces `Unique::dangling()` on zero-sized types.
/// * Produces `Unique::dangling()` on zero-length allocations.
/// * Avoids freeing `Unique::dangling()`.
/// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics).
/// * Guards against 32-bit systems allocating more than isize::MAX bytes.
/// * Guards against overflowing your length.
/// * Calls `handle_alloc_error` for fallible allocations.
/// * Contains a `ptr::Unique` and thus endows the user with all related benefits.
/// * Uses the excess returned from the allocator to use the largest available capacity.
///
/// This type does not in anyway inspect the memory that it manages. When dropped it *will*
/// free its memory, but it *won't* try to drop its contents. It is up to the user of `RawVec`
/// to handle the actual things *stored* inside of a `RawVec`.
///
/// Note that the excess of a zero-sized types is always infinite, so `capacity()` always returns
/// `usize::MAX`. This means that you need to be careful when round-tripping this type with a
/// `Box<[T]>`, since `capacity()` won't yield the length.
#[allow(missing_debug_implementations)]
pub(crate) struct RawVec<T, A: Allocator = Global> {
ptr: Unique<T>,
/// Never used for ZSTs; it's `capacity()`'s responsibility to return usize::MAX in that case.
///
/// # Safety
///
/// `cap` must be in the `0..=isize::MAX` range.
cap: Cap,
alloc: A,
}
impl<T> RawVec<T, Global> {
/// HACK(Centril): This exists because stable `const fn` can only call stable `const fn`, so
/// they cannot call `Self::new()`.
///
/// If you change `RawVec<T>::new` or dependencies, please take care to not introduce anything
/// that would truly const-call something unstable.
pub const NEW: Self = Self::new();
/// Creates the biggest possible `RawVec` (on the system heap)
/// without allocating. If `T` has positive size, then this makes a
/// `RawVec` with capacity `0`. If `T` is zero-sized, then it makes a
/// `RawVec` with capacity `usize::MAX`. Useful for implementing
/// delayed allocation.
#[must_use]
pub const fn new() -> Self {
Self::new_in(Global)
}
/// Creates a `RawVec` (on the system heap) with exactly the
/// capacity and alignment requirements for a `[T; capacity]`. This is
/// equivalent to calling `RawVec::new` when `capacity` is `0` or `T` is
/// zero-sized. Note that if `T` is zero-sized this means you will
/// *not* get a `RawVec` with the requested capacity.
///
/// Non-fallible version of `try_with_capacity`
///
/// # Panics
///
/// Panics if the requested capacity exceeds `isize::MAX` bytes.
///
/// # Aborts
///
/// Aborts on OOM.
#[cfg(not(any(no_global_oom_handling, test)))]
#[must_use]
#[inline]
pub fn with_capacity(capacity: usize) -> Self {
match Self::try_allocate_in(capacity, AllocInit::Uninitialized, Global) {
Ok(res) => res,
Err(err) => handle_error(err),
}
}
/// Like `with_capacity`, but guarantees the buffer is zeroed.
#[cfg(not(any(no_global_oom_handling, test)))]
#[must_use]
#[inline]
pub fn with_capacity_zeroed(capacity: usize) -> Self {
Self::with_capacity_zeroed_in(capacity, Global)
}
}
impl<T, A: Allocator> RawVec<T, A> {
// Tiny Vecs are dumb. Skip to:
// - 8 if the element size is 1, because any heap allocators is likely
// to round up a request of less than 8 bytes to at least 8 bytes.
// - 4 if elements are moderate-sized (<= 1 KiB).
// - 1 otherwise, to avoid wasting too much space for very short Vecs.
pub(crate) const MIN_NON_ZERO_CAP: usize = if mem::size_of::<T>() == 1 {
8
} else if mem::size_of::<T>() <= 1024 {
4
} else {
1
};
/// Like `new`, but parameterized over the choice of allocator for
/// the returned `RawVec`.
pub const fn new_in(alloc: A) -> Self {
// `cap: 0` means "unallocated". zero-sized types are ignored.
Self { ptr: Unique::dangling(), cap: Cap::ZERO, alloc }
}
/// Like `with_capacity`, but parameterized over the choice of
/// allocator for the returned `RawVec`.
#[cfg(not(no_global_oom_handling))]
#[inline]
pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
match Self::try_allocate_in(capacity, AllocInit::Uninitialized, alloc) {
Ok(res) => res,
Err(err) => handle_error(err),
}
}
/// Like `try_with_capacity`, but parameterized over the choice of
/// allocator for the returned `RawVec`.
#[inline]
pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result<Self, TryReserveError> {
Self::try_allocate_in(capacity, AllocInit::Uninitialized, alloc)
}
/// Like `with_capacity_zeroed`, but parameterized over the choice
/// of allocator for the returned `RawVec`.
#[cfg(not(no_global_oom_handling))]
#[inline]
pub fn with_capacity_zeroed_in(capacity: usize, alloc: A) -> Self {
match Self::try_allocate_in(capacity, AllocInit::Zeroed, alloc) {
Ok(res) => res,
Err(err) => handle_error(err),
}
}
/// Converts the entire buffer into `Box<[MaybeUninit<T>]>` with the specified `len`.
///
/// Note that this will correctly reconstitute any `cap` changes
/// that may have been performed. (See description of type for details.)
///
/// # Safety
///
/// * `len` must be greater than or equal to the most recently requested capacity, and
/// * `len` must be less than or equal to `self.capacity()`.
///
/// Note, that the requested capacity and `self.capacity()` could differ, as
/// an allocator could overallocate and return a greater memory block than requested.
pub unsafe fn into_box(self, len: usize) -> Box<[MaybeUninit<T>], A> {
// Sanity-check one half of the safety requirement (we cannot check the other half).
debug_assert!(
len <= self.capacity(),
"`len` must be smaller than or equal to `self.capacity()`"
);
let me = ManuallyDrop::new(self);
unsafe {
let slice = ptr::slice_from_raw_parts_mut(me.ptr() as *mut MaybeUninit<T>, len);
Box::from_raw_in(slice, ptr::read(&me.alloc))
}
}
fn try_allocate_in(
capacity: usize,
init: AllocInit,
alloc: A,
) -> Result<Self, TryReserveError> {
// Don't allocate here because `Drop` will not deallocate when `capacity` is 0.
if T::IS_ZST || capacity == 0 {
Ok(Self::new_in(alloc))
} else {
// We avoid `unwrap_or_else` here because it bloats the amount of
// LLVM IR generated.
let layout = match Layout::array::<T>(capacity) {
Ok(layout) => layout,
Err(_) => return Err(CapacityOverflow.into()),
};
if let Err(err) = alloc_guard(layout.size()) {
return Err(err);
}
let result = match init {
AllocInit::Uninitialized => alloc.allocate(layout),
#[cfg(not(no_global_oom_handling))]
AllocInit::Zeroed => alloc.allocate_zeroed(layout),
};
let ptr = match result {
Ok(ptr) => ptr,
Err(_) => return Err(AllocError { layout, non_exhaustive: () }.into()),
};
// Allocators currently return a `NonNull<[u8]>` whose length
// matches the size requested. If that ever changes, the capacity
// here should change to `ptr.len() / mem::size_of::<T>()`.
Ok(Self { ptr: Unique::from(ptr.cast()), cap: unsafe { Cap(capacity) }, alloc })
}
}
/// Reconstitutes a `RawVec` from a pointer, capacity, and allocator.
///
/// # Safety
///
/// The `ptr` must be allocated (via the given allocator `alloc`), and with the given
/// `capacity`.
/// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit
/// systems). For ZSTs capacity is ignored.
/// If the `ptr` and `capacity` come from a `RawVec` created via `alloc`, then this is
/// guaranteed.
#[inline]
pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, alloc: A) -> Self {
let cap = if T::IS_ZST { Cap::ZERO } else { unsafe { Cap(capacity) } };
Self { ptr: unsafe { Unique::new_unchecked(ptr) }, cap, alloc }
}
/// A convenience method for hoisting the non-null precondition out of [`RawVec::from_raw_parts_in`].
///
/// # Safety
///
/// See [`RawVec::from_raw_parts_in`].
#[inline]
pub(crate) unsafe fn from_nonnull_in(ptr: NonNull<T>, capacity: usize, alloc: A) -> Self {
let cap = if T::IS_ZST { Cap::ZERO } else { unsafe { Cap(capacity) } };
Self { ptr: Unique::from(ptr), cap, alloc }
}
/// Gets a raw pointer to the start of the allocation. Note that this is
/// `Unique::dangling()` if `capacity == 0` or `T` is zero-sized. In the former case, you must
/// be careful.
#[inline]
pub fn ptr(&self) -> *mut T {
self.ptr.as_ptr()
}
#[inline]
pub fn non_null(&self) -> NonNull<T> {
NonNull::from(self.ptr)
}
/// Gets the capacity of the allocation.
///
/// This will always be `usize::MAX` if `T` is zero-sized.
#[inline(always)]
pub fn capacity(&self) -> usize {
if T::IS_ZST { usize::MAX } else { self.cap.0 }
}
/// Returns a shared reference to the allocator backing this `RawVec`.
pub fn allocator(&self) -> &A {
&self.alloc
}
fn current_memory(&self) -> Option<(NonNull<u8>, Layout)> {
if T::IS_ZST || self.cap.0 == 0 {
None
} else {
// We could use Layout::array here which ensures the absence of isize and usize overflows
// and could hypothetically handle differences between stride and size, but this memory
// has already been allocated so we know it can't overflow and currently Rust does not
// support such types. So we can do better by skipping some checks and avoid an unwrap.
const { assert!(mem::size_of::<T>() % mem::align_of::<T>() == 0) };
unsafe {
let align = mem::align_of::<T>();
let size = mem::size_of::<T>().unchecked_mul(self.cap.0);
let layout = Layout::from_size_align_unchecked(size, align);
Some((self.ptr.cast().into(), layout))
}
}
}
/// Ensures that the buffer contains at least enough space to hold `len +
/// additional` elements. If it doesn't already have enough capacity, will
/// reallocate enough space plus comfortable slack space to get amortized
/// *O*(1) behavior. Will limit this behavior if it would needlessly cause
/// itself to panic.
///
/// If `len` exceeds `self.capacity()`, this may fail to actually allocate
/// the requested space. This is not really unsafe, but the unsafe
/// code *you* write that relies on the behavior of this function may break.
///
/// This is ideal for implementing a bulk-push operation like `extend`.
///
/// # Panics
///
/// Panics if the new capacity exceeds `isize::MAX` _bytes_.
///
/// # Aborts
///
/// Aborts on OOM.
#[cfg(not(no_global_oom_handling))]
#[inline]
pub fn reserve(&mut self, len: usize, additional: usize) {
// Callers expect this function to be very cheap when there is already sufficient capacity.
// Therefore, we move all the resizing and error-handling logic from grow_amortized and
// handle_reserve behind a call, while making sure that this function is likely to be
// inlined as just a comparison and a call if the comparison fails.
#[cold]
fn do_reserve_and_handle<T, A: Allocator>(
slf: &mut RawVec<T, A>,
len: usize,
additional: usize,
) {
if let Err(err) = slf.grow_amortized(len, additional) {
handle_error(err);
}
}
if self.needs_to_grow(len, additional) {
do_reserve_and_handle(self, len, additional);
}
}
/// A specialized version of `self.reserve(len, 1)` which requires the
/// caller to ensure `len == self.capacity()`.
#[cfg(not(no_global_oom_handling))]
#[inline(never)]
pub fn grow_one(&mut self) {
if let Err(err) = self.grow_amortized(self.cap.0, 1) {
handle_error(err);
}
}
/// The same as `reserve`, but returns on errors instead of panicking or aborting.
pub fn try_reserve(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
if self.needs_to_grow(len, additional) {
self.grow_amortized(len, additional)?;
}
unsafe {
// Inform the optimizer that the reservation has succeeded or wasn't needed
hint::assert_unchecked(!self.needs_to_grow(len, additional));
}
Ok(())
}
/// Ensures that the buffer contains at least enough space to hold `len +
/// additional` elements. If it doesn't already, will reallocate the
/// minimum possible amount of memory necessary. Generally this will be
/// exactly the amount of memory necessary, but in principle the allocator
/// is free to give back more than we asked for.
///
/// If `len` exceeds `self.capacity()`, this may fail to actually allocate
/// the requested space. This is not really unsafe, but the unsafe code
/// *you* write that relies on the behavior of this function may break.
///
/// # Panics
///
/// Panics if the new capacity exceeds `isize::MAX` _bytes_.
///
/// # Aborts
///
/// Aborts on OOM.
#[cfg(not(no_global_oom_handling))]
pub fn reserve_exact(&mut self, len: usize, additional: usize) {
if let Err(err) = self.try_reserve_exact(len, additional) {
handle_error(err);
}
}
/// The same as `reserve_exact`, but returns on errors instead of panicking or aborting.
pub fn try_reserve_exact(
&mut self,
len: usize,
additional: usize,
) -> Result<(), TryReserveError> {
if self.needs_to_grow(len, additional) {
self.grow_exact(len, additional)?;
}
unsafe {
// Inform the optimizer that the reservation has succeeded or wasn't needed
hint::assert_unchecked(!self.needs_to_grow(len, additional));
}
Ok(())
}
/// Shrinks the buffer down to the specified capacity. If the given amount
/// is 0, actually completely deallocates.
///
/// # Panics
///
/// Panics if the given amount is *larger* than the current capacity.
///
/// # Aborts
///
/// Aborts on OOM.
#[cfg(not(no_global_oom_handling))]
pub fn shrink_to_fit(&mut self, cap: usize) {
if let Err(err) = self.shrink(cap) {
handle_error(err);
}
}
}
impl<T, A: Allocator> RawVec<T, A> {
/// Returns if the buffer needs to grow to fulfill the needed extra capacity.
/// Mainly used to make inlining reserve-calls possible without inlining `grow`.
fn needs_to_grow(&self, len: usize, additional: usize) -> bool {
additional > self.capacity().wrapping_sub(len)
}
/// # Safety:
///
/// `cap` must not exceed `isize::MAX`.
unsafe fn set_ptr_and_cap(&mut self, ptr: NonNull<[u8]>, cap: usize) {
// Allocators currently return a `NonNull<[u8]>` whose length matches
// the size requested. If that ever changes, the capacity here should
// change to `ptr.len() / mem::size_of::<T>()`.
self.ptr = Unique::from(ptr.cast());
self.cap = unsafe { Cap(cap) };
}
// This method is usually instantiated many times. So we want it to be as
// small as possible, to improve compile times. But we also want as much of
// its contents to be statically computable as possible, to make the
// generated code run faster. Therefore, this method is carefully written
// so that all of the code that depends on `T` is within it, while as much
// of the code that doesn't depend on `T` as possible is in functions that
// are non-generic over `T`.
fn grow_amortized(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
// This is ensured by the calling contexts.
debug_assert!(additional > 0);
if T::IS_ZST {
// Since we return a capacity of `usize::MAX` when `elem_size` is
// 0, getting to here necessarily means the `RawVec` is overfull.
return Err(CapacityOverflow.into());
}
// Nothing we can really do about these checks, sadly.
let required_cap = len.checked_add(additional).ok_or(CapacityOverflow)?;
// This guarantees exponential growth. The doubling cannot overflow
// because `cap <= isize::MAX` and the type of `cap` is `usize`.
let cap = cmp::max(self.cap.0 * 2, required_cap);
let cap = cmp::max(Self::MIN_NON_ZERO_CAP, cap);
let new_layout = Layout::array::<T>(cap);
// `finish_grow` is non-generic over `T`.
let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
// SAFETY: finish_grow would have resulted in a capacity overflow if we tried to allocate more than isize::MAX items
unsafe { self.set_ptr_and_cap(ptr, cap) };
Ok(())
}
// The constraints on this method are much the same as those on
// `grow_amortized`, but this method is usually instantiated less often so
// it's less critical.
fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
if T::IS_ZST {
// Since we return a capacity of `usize::MAX` when the type size is
// 0, getting to here necessarily means the `RawVec` is overfull.
return Err(CapacityOverflow.into());
}
let cap = len.checked_add(additional).ok_or(CapacityOverflow)?;
let new_layout = Layout::array::<T>(cap);
// `finish_grow` is non-generic over `T`.
let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
// SAFETY: finish_grow would have resulted in a capacity overflow if we tried to allocate more than isize::MAX items
unsafe {
self.set_ptr_and_cap(ptr, cap);
}
Ok(())
}
#[cfg(not(no_global_oom_handling))]
fn shrink(&mut self, cap: usize) -> Result<(), TryReserveError> {
assert!(cap <= self.capacity(), "Tried to shrink to a larger capacity");
let (ptr, layout) = if let Some(mem) = self.current_memory() { mem } else { return Ok(()) };
// See current_memory() why this assert is here
const { assert!(mem::size_of::<T>() % mem::align_of::<T>() == 0) };
// If shrinking to 0, deallocate the buffer. We don't reach this point
// for the T::IS_ZST case since current_memory() will have returned
// None.
if cap == 0 {
unsafe { self.alloc.deallocate(ptr, layout) };
self.ptr = Unique::dangling();
self.cap = Cap::ZERO;
} else {
let ptr = unsafe {
// `Layout::array` cannot overflow here because it would have
// overflowed earlier when capacity was larger.
let new_size = mem::size_of::<T>().unchecked_mul(cap);
let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
self.alloc
.shrink(ptr, layout, new_layout)
.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?
};
// SAFETY: if the allocation is valid, then the capacity is too
unsafe {
self.set_ptr_and_cap(ptr, cap);
}
}
Ok(())
}
}
// This function is outside `RawVec` to minimize compile times. See the comment
// above `RawVec::grow_amortized` for details. (The `A` parameter isn't
// significant, because the number of different `A` types seen in practice is
// much smaller than the number of `T` types.)
#[inline(never)]
fn finish_grow<A>(
new_layout: Result<Layout, LayoutError>,
current_memory: Option<(NonNull<u8>, Layout)>,
alloc: &mut A,
) -> Result<NonNull<[u8]>, TryReserveError>
where
A: Allocator,
{
// Check for the error here to minimize the size of `RawVec::grow_*`.
let new_layout = new_layout.map_err(|_| CapacityOverflow)?;
alloc_guard(new_layout.size())?;
let memory = if let Some((ptr, old_layout)) = current_memory {
debug_assert_eq!(old_layout.align(), new_layout.align());
unsafe {
// The allocator checks for alignment equality
hint::assert_unchecked(old_layout.align() == new_layout.align());
alloc.grow(ptr, old_layout, new_layout)
}
} else {
alloc.allocate(new_layout)
};
memory.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () }.into())
}
unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawVec<T, A> {
/// Frees the memory owned by the `RawVec` *without* trying to drop its contents.
fn drop(&mut self) {
if let Some((ptr, layout)) = self.current_memory() {
unsafe { self.alloc.deallocate(ptr, layout) }
}
}
}
// Central function for reserve error handling.
#[cfg(not(no_global_oom_handling))]
#[cold]
fn handle_error(e: TryReserveError) -> ! {
match e.kind() {
CapacityOverflow => capacity_overflow(),
AllocError { layout, .. } => handle_alloc_error(layout),
}
}
// We need to guarantee the following:
// * We don't ever allocate `> isize::MAX` byte-size objects.
// * We don't overflow `usize::MAX` and actually allocate too little.
//
// On 64-bit we just need to check for overflow since trying to allocate
// `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add
// an extra guard for this in case we're running on a platform which can use
// all 4GB in user-space, e.g., PAE or x32.
#[inline]
fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> {
if usize::BITS < 64 && alloc_size > isize::MAX as usize {
Err(CapacityOverflow.into())
} else {
Ok(())
}
}

View File

@ -0,0 +1,173 @@
use super::*;
use core::mem::size_of;
use std::cell::Cell;
#[test]
fn allocator_param() {
use crate::alloc::AllocError;
// Writing a test of integration between third-party
// allocators and `RawVec` is a little tricky because the `RawVec`
// API does not expose fallible allocation methods, so we
// cannot check what happens when allocator is exhausted
// (beyond detecting a panic).
//
// Instead, this just checks that the `RawVec` methods do at
// least go through the Allocator API when it reserves
// storage.
// A dumb allocator that consumes a fixed amount of fuel
// before allocation attempts start failing.
struct BoundedAlloc {
fuel: Cell<usize>,
}
unsafe impl Allocator for BoundedAlloc {
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
let size = layout.size();
if size > self.fuel.get() {
return Err(AllocError);
}
match Global.allocate(layout) {
ok @ Ok(_) => {
self.fuel.set(self.fuel.get() - size);
ok
}
err @ Err(_) => err,
}
}
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
unsafe { Global.deallocate(ptr, layout) }
}
}
let a = BoundedAlloc { fuel: Cell::new(500) };
let mut v: RawVec<u8, _> = RawVec::with_capacity_in(50, a);
assert_eq!(v.alloc.fuel.get(), 450);
v.reserve(50, 150); // (causes a realloc, thus using 50 + 150 = 200 units of fuel)
assert_eq!(v.alloc.fuel.get(), 250);
}
#[test]
fn reserve_does_not_overallocate() {
{
let mut v: RawVec<u32> = RawVec::new();
// First, `reserve` allocates like `reserve_exact`.
v.reserve(0, 9);
assert_eq!(9, v.capacity());
}
{
let mut v: RawVec<u32> = RawVec::new();
v.reserve(0, 7);
assert_eq!(7, v.capacity());
// 97 is more than double of 7, so `reserve` should work
// like `reserve_exact`.
v.reserve(7, 90);
assert_eq!(97, v.capacity());
}
{
let mut v: RawVec<u32> = RawVec::new();
v.reserve(0, 12);
assert_eq!(12, v.capacity());
v.reserve(12, 3);
// 3 is less than half of 12, so `reserve` must grow
// exponentially. At the time of writing this test grow
// factor is 2, so new capacity is 24, however, grow factor
// of 1.5 is OK too. Hence `>= 18` in assert.
assert!(v.capacity() >= 12 + 12 / 2);
}
}
struct ZST;
// A `RawVec` holding zero-sized elements should always look like this.
fn zst_sanity<T>(v: &RawVec<T>) {
assert_eq!(v.capacity(), usize::MAX);
assert_eq!(v.ptr(), core::ptr::Unique::<T>::dangling().as_ptr());
assert_eq!(v.current_memory(), None);
}
#[test]
fn zst() {
let cap_err = Err(crate::collections::TryReserveErrorKind::CapacityOverflow.into());
assert_eq!(std::mem::size_of::<ZST>(), 0);
// All these different ways of creating the RawVec produce the same thing.
let v: RawVec<ZST> = RawVec::new();
zst_sanity(&v);
let v: RawVec<ZST> = RawVec::with_capacity_in(100, Global);
zst_sanity(&v);
let v: RawVec<ZST> = RawVec::with_capacity_in(100, Global);
zst_sanity(&v);
let v: RawVec<ZST> = RawVec::try_allocate_in(0, AllocInit::Uninitialized, Global).unwrap();
zst_sanity(&v);
let v: RawVec<ZST> = RawVec::try_allocate_in(100, AllocInit::Uninitialized, Global).unwrap();
zst_sanity(&v);
let mut v: RawVec<ZST> =
RawVec::try_allocate_in(usize::MAX, AllocInit::Uninitialized, Global).unwrap();
zst_sanity(&v);
// Check all these operations work as expected with zero-sized elements.
assert!(!v.needs_to_grow(100, usize::MAX - 100));
assert!(v.needs_to_grow(101, usize::MAX - 100));
zst_sanity(&v);
v.reserve(100, usize::MAX - 100);
//v.reserve(101, usize::MAX - 100); // panics, in `zst_reserve_panic` below
zst_sanity(&v);
v.reserve_exact(100, usize::MAX - 100);
//v.reserve_exact(101, usize::MAX - 100); // panics, in `zst_reserve_exact_panic` below
zst_sanity(&v);
assert_eq!(v.try_reserve(100, usize::MAX - 100), Ok(()));
assert_eq!(v.try_reserve(101, usize::MAX - 100), cap_err);
zst_sanity(&v);
assert_eq!(v.try_reserve_exact(100, usize::MAX - 100), Ok(()));
assert_eq!(v.try_reserve_exact(101, usize::MAX - 100), cap_err);
zst_sanity(&v);
assert_eq!(v.grow_amortized(100, usize::MAX - 100), cap_err);
assert_eq!(v.grow_amortized(101, usize::MAX - 100), cap_err);
zst_sanity(&v);
assert_eq!(v.grow_exact(100, usize::MAX - 100), cap_err);
assert_eq!(v.grow_exact(101, usize::MAX - 100), cap_err);
zst_sanity(&v);
}
#[test]
#[should_panic(expected = "capacity overflow")]
fn zst_reserve_panic() {
let mut v: RawVec<ZST> = RawVec::new();
zst_sanity(&v);
v.reserve(101, usize::MAX - 100);
}
#[test]
#[should_panic(expected = "capacity overflow")]
fn zst_reserve_exact_panic() {
let mut v: RawVec<ZST> = RawVec::new();
zst_sanity(&v);
v.reserve_exact(101, usize::MAX - 100);
}
#[test]
fn niches() {
let baseline = size_of::<RawVec<u8>>();
assert_eq!(size_of::<Option<RawVec<u8>>>(), baseline);
assert_eq!(size_of::<Option<Option<RawVec<u8>>>>(), baseline);
assert_eq!(size_of::<Option<Option<Option<RawVec<u8>>>>>(), baseline);
}

View File

@ -0,0 +1,616 @@
use super::*;
use std::cell::RefCell;
use std::clone::Clone;
#[test]
fn test_clone() {
let x = Rc::new(RefCell::new(5));
let y = x.clone();
*x.borrow_mut() = 20;
assert_eq!(*y.borrow(), 20);
}
#[test]
fn test_simple() {
let x = Rc::new(5);
assert_eq!(*x, 5);
}
#[test]
fn test_simple_clone() {
let x = Rc::new(5);
let y = x.clone();
assert_eq!(*x, 5);
assert_eq!(*y, 5);
}
#[test]
fn test_destructor() {
let x: Rc<Box<_>> = Rc::new(Box::new(5));
assert_eq!(**x, 5);
}
#[test]
fn test_live() {
let x = Rc::new(5);
let y = Rc::downgrade(&x);
assert!(y.upgrade().is_some());
}
#[test]
fn test_dead() {
let x = Rc::new(5);
let y = Rc::downgrade(&x);
drop(x);
assert!(y.upgrade().is_none());
}
#[test]
fn weak_self_cyclic() {
struct Cycle {
x: RefCell<Option<Weak<Cycle>>>,
}
let a = Rc::new(Cycle { x: RefCell::new(None) });
let b = Rc::downgrade(&a.clone());
*a.x.borrow_mut() = Some(b);
// hopefully we don't double-free (or leak)...
}
#[test]
fn is_unique() {
let x = Rc::new(3);
assert!(Rc::is_unique(&x));
let y = x.clone();
assert!(!Rc::is_unique(&x));
drop(y);
assert!(Rc::is_unique(&x));
let w = Rc::downgrade(&x);
assert!(!Rc::is_unique(&x));
drop(w);
assert!(Rc::is_unique(&x));
}
#[test]
fn test_strong_count() {
let a = Rc::new(0);
assert!(Rc::strong_count(&a) == 1);
let w = Rc::downgrade(&a);
assert!(Rc::strong_count(&a) == 1);
let b = w.upgrade().expect("upgrade of live rc failed");
assert!(Rc::strong_count(&b) == 2);
assert!(Rc::strong_count(&a) == 2);
drop(w);
drop(a);
assert!(Rc::strong_count(&b) == 1);
let c = b.clone();
assert!(Rc::strong_count(&b) == 2);
assert!(Rc::strong_count(&c) == 2);
}
#[test]
fn test_weak_count() {
let a = Rc::new(0);
assert!(Rc::strong_count(&a) == 1);
assert!(Rc::weak_count(&a) == 0);
let w = Rc::downgrade(&a);
assert!(Rc::strong_count(&a) == 1);
assert!(Rc::weak_count(&a) == 1);
drop(w);
assert!(Rc::strong_count(&a) == 1);
assert!(Rc::weak_count(&a) == 0);
let c = a.clone();
assert!(Rc::strong_count(&a) == 2);
assert!(Rc::weak_count(&a) == 0);
drop(c);
}
#[test]
fn weak_counts() {
assert_eq!(Weak::weak_count(&Weak::<u64>::new()), 0);
assert_eq!(Weak::strong_count(&Weak::<u64>::new()), 0);
let a = Rc::new(0);
let w = Rc::downgrade(&a);
assert_eq!(Weak::strong_count(&w), 1);
assert_eq!(Weak::weak_count(&w), 1);
let w2 = w.clone();
assert_eq!(Weak::strong_count(&w), 1);
assert_eq!(Weak::weak_count(&w), 2);
assert_eq!(Weak::strong_count(&w2), 1);
assert_eq!(Weak::weak_count(&w2), 2);
drop(w);
assert_eq!(Weak::strong_count(&w2), 1);
assert_eq!(Weak::weak_count(&w2), 1);
let a2 = a.clone();
assert_eq!(Weak::strong_count(&w2), 2);
assert_eq!(Weak::weak_count(&w2), 1);
drop(a2);
drop(a);
assert_eq!(Weak::strong_count(&w2), 0);
assert_eq!(Weak::weak_count(&w2), 0);
drop(w2);
}
#[test]
fn try_unwrap() {
let x = Rc::new(3);
assert_eq!(Rc::try_unwrap(x), Ok(3));
let x = Rc::new(4);
let _y = x.clone();
assert_eq!(Rc::try_unwrap(x), Err(Rc::new(4)));
let x = Rc::new(5);
let _w = Rc::downgrade(&x);
assert_eq!(Rc::try_unwrap(x), Ok(5));
}
#[test]
fn into_inner() {
let x = Rc::new(3);
assert_eq!(Rc::into_inner(x), Some(3));
let x = Rc::new(4);
let y = Rc::clone(&x);
assert_eq!(Rc::into_inner(x), None);
assert_eq!(Rc::into_inner(y), Some(4));
let x = Rc::new(5);
let _w = Rc::downgrade(&x);
assert_eq!(Rc::into_inner(x), Some(5));
}
#[test]
fn into_from_raw() {
let x = Rc::new(Box::new("hello"));
let y = x.clone();
let x_ptr = Rc::into_raw(x);
drop(y);
unsafe {
assert_eq!(**x_ptr, "hello");
let x = Rc::from_raw(x_ptr);
assert_eq!(**x, "hello");
assert_eq!(Rc::try_unwrap(x).map(|x| *x), Ok("hello"));
}
}
#[test]
fn test_into_from_raw_unsized() {
use std::fmt::Display;
use std::string::ToString;
let rc: Rc<str> = Rc::from("foo");
let ptr = Rc::into_raw(rc.clone());
let rc2 = unsafe { Rc::from_raw(ptr) };
assert_eq!(unsafe { &*ptr }, "foo");
assert_eq!(rc, rc2);
let rc: Rc<dyn Display> = Rc::new(123);
let ptr = Rc::into_raw(rc.clone());
let rc2 = unsafe { Rc::from_raw(ptr) };
assert_eq!(unsafe { &*ptr }.to_string(), "123");
assert_eq!(rc2.to_string(), "123");
}
#[test]
fn into_from_weak_raw() {
let x = Rc::new(Box::new("hello"));
let y = Rc::downgrade(&x);
let y_ptr = Weak::into_raw(y);
unsafe {
assert_eq!(**y_ptr, "hello");
let y = Weak::from_raw(y_ptr);
let y_up = Weak::upgrade(&y).unwrap();
assert_eq!(**y_up, "hello");
drop(y_up);
assert_eq!(Rc::try_unwrap(x).map(|x| *x), Ok("hello"));
}
}
#[test]
fn test_into_from_weak_raw_unsized() {
use std::fmt::Display;
use std::string::ToString;
let arc: Rc<str> = Rc::from("foo");
let weak: Weak<str> = Rc::downgrade(&arc);
let ptr = Weak::into_raw(weak.clone());
let weak2 = unsafe { Weak::from_raw(ptr) };
assert_eq!(unsafe { &*ptr }, "foo");
assert!(weak.ptr_eq(&weak2));
let arc: Rc<dyn Display> = Rc::new(123);
let weak: Weak<dyn Display> = Rc::downgrade(&arc);
let ptr = Weak::into_raw(weak.clone());
let weak2 = unsafe { Weak::from_raw(ptr) };
assert_eq!(unsafe { &*ptr }.to_string(), "123");
assert!(weak.ptr_eq(&weak2));
}
#[test]
fn get_mut() {
let mut x = Rc::new(3);
*Rc::get_mut(&mut x).unwrap() = 4;
assert_eq!(*x, 4);
let y = x.clone();
assert!(Rc::get_mut(&mut x).is_none());
drop(y);
assert!(Rc::get_mut(&mut x).is_some());
let _w = Rc::downgrade(&x);
assert!(Rc::get_mut(&mut x).is_none());
}
#[test]
fn test_cowrc_clone_make_unique() {
let mut cow0 = Rc::new(75);
let mut cow1 = cow0.clone();
let mut cow2 = cow1.clone();
assert!(75 == *Rc::make_mut(&mut cow0));
assert!(75 == *Rc::make_mut(&mut cow1));
assert!(75 == *Rc::make_mut(&mut cow2));
*Rc::make_mut(&mut cow0) += 1;
*Rc::make_mut(&mut cow1) += 2;
*Rc::make_mut(&mut cow2) += 3;
assert!(76 == *cow0);
assert!(77 == *cow1);
assert!(78 == *cow2);
// none should point to the same backing memory
assert!(*cow0 != *cow1);
assert!(*cow0 != *cow2);
assert!(*cow1 != *cow2);
}
#[test]
fn test_cowrc_clone_unique2() {
let mut cow0 = Rc::new(75);
let cow1 = cow0.clone();
let cow2 = cow1.clone();
assert!(75 == *cow0);
assert!(75 == *cow1);
assert!(75 == *cow2);
*Rc::make_mut(&mut cow0) += 1;
assert!(76 == *cow0);
assert!(75 == *cow1);
assert!(75 == *cow2);
// cow1 and cow2 should share the same contents
// cow0 should have a unique reference
assert!(*cow0 != *cow1);
assert!(*cow0 != *cow2);
assert!(*cow1 == *cow2);
}
#[test]
fn test_cowrc_clone_weak() {
let mut cow0 = Rc::new(75);
let cow1_weak = Rc::downgrade(&cow0);
assert!(75 == *cow0);
assert!(75 == *cow1_weak.upgrade().unwrap());
*Rc::make_mut(&mut cow0) += 1;
assert!(76 == *cow0);
assert!(cow1_weak.upgrade().is_none());
}
#[test]
fn test_show() {
let foo = Rc::new(75);
assert_eq!(format!("{foo:?}"), "75");
}
#[test]
fn test_unsized() {
let foo: Rc<[i32]> = Rc::new([1, 2, 3]);
assert_eq!(foo, foo.clone());
}
#[test]
fn test_maybe_thin_unsized() {
// If/when custom thin DSTs exist, this test should be updated to use one
use std::ffi::{CStr, CString};
let x: Rc<CStr> = Rc::from(CString::new("swordfish").unwrap().into_boxed_c_str());
assert_eq!(format!("{x:?}"), "\"swordfish\"");
let y: Weak<CStr> = Rc::downgrade(&x);
drop(x);
// At this point, the weak points to a dropped DST
assert!(y.upgrade().is_none());
// But we still need to be able to get the alloc layout to drop.
// CStr has no drop glue, but custom DSTs might, and need to work.
drop(y);
}
#[test]
fn test_from_owned() {
let foo = 123;
let foo_rc = Rc::from(foo);
assert!(123 == *foo_rc);
}
#[test]
fn test_new_weak() {
let foo: Weak<usize> = Weak::new();
assert!(foo.upgrade().is_none());
}
#[test]
fn test_ptr_eq() {
let five = Rc::new(5);
let same_five = five.clone();
let other_five = Rc::new(5);
assert!(Rc::ptr_eq(&five, &same_five));
assert!(!Rc::ptr_eq(&five, &other_five));
}
#[test]
fn test_from_str() {
let r: Rc<str> = Rc::from("foo");
assert_eq!(&r[..], "foo");
}
#[test]
fn test_copy_from_slice() {
let s: &[u32] = &[1, 2, 3];
let r: Rc<[u32]> = Rc::from(s);
assert_eq!(&r[..], [1, 2, 3]);
}
#[test]
fn test_clone_from_slice() {
#[derive(Clone, Debug, Eq, PartialEq)]
struct X(u32);
let s: &[X] = &[X(1), X(2), X(3)];
let r: Rc<[X]> = Rc::from(s);
assert_eq!(&r[..], s);
}
#[test]
#[should_panic]
fn test_clone_from_slice_panic() {
use std::string::{String, ToString};
struct Fail(u32, String);
impl Clone for Fail {
fn clone(&self) -> Fail {
if self.0 == 2 {
panic!();
}
Fail(self.0, self.1.clone())
}
}
let s: &[Fail] =
&[Fail(0, "foo".to_string()), Fail(1, "bar".to_string()), Fail(2, "baz".to_string())];
// Should panic, but not cause memory corruption
let _r: Rc<[Fail]> = Rc::from(s);
}
#[test]
fn test_from_box() {
let b: Box<u32> = Box::new(123);
let r: Rc<u32> = Rc::from(b);
assert_eq!(*r, 123);
}
#[test]
fn test_from_box_str() {
use std::string::String;
let s = String::from("foo").into_boxed_str();
let r: Rc<str> = Rc::from(s);
assert_eq!(&r[..], "foo");
}
#[test]
fn test_from_box_slice() {
let s = vec![1, 2, 3].into_boxed_slice();
let r: Rc<[u32]> = Rc::from(s);
assert_eq!(&r[..], [1, 2, 3]);
}
#[test]
fn test_from_box_trait() {
use std::fmt::Display;
use std::string::ToString;
let b: Box<dyn Display> = Box::new(123);
let r: Rc<dyn Display> = Rc::from(b);
assert_eq!(r.to_string(), "123");
}
#[test]
fn test_from_box_trait_zero_sized() {
use std::fmt::Debug;
let b: Box<dyn Debug> = Box::new(());
let r: Rc<dyn Debug> = Rc::from(b);
assert_eq!(format!("{r:?}"), "()");
}
#[test]
fn test_from_vec() {
let v = vec![1, 2, 3];
let r: Rc<[u32]> = Rc::from(v);
assert_eq!(&r[..], [1, 2, 3]);
}
#[test]
fn test_downcast() {
use std::any::Any;
let r1: Rc<dyn Any> = Rc::new(i32::MAX);
let r2: Rc<dyn Any> = Rc::new("abc");
assert!(r1.clone().downcast::<u32>().is_err());
let r1i32 = r1.downcast::<i32>();
assert!(r1i32.is_ok());
assert_eq!(r1i32.unwrap(), Rc::new(i32::MAX));
assert!(r2.clone().downcast::<i32>().is_err());
let r2str = r2.downcast::<&'static str>();
assert!(r2str.is_ok());
assert_eq!(r2str.unwrap(), Rc::new("abc"));
}
#[test]
fn test_array_from_slice() {
let v = vec![1, 2, 3];
let r: Rc<[u32]> = Rc::from(v);
let a: Result<Rc<[u32; 3]>, _> = r.clone().try_into();
assert!(a.is_ok());
let a: Result<Rc<[u32; 2]>, _> = r.clone().try_into();
assert!(a.is_err());
}
#[test]
fn test_rc_cyclic_with_zero_refs() {
struct ZeroRefs {
inner: Weak<ZeroRefs>,
}
let zero_refs = Rc::new_cyclic(|inner| {
assert_eq!(inner.strong_count(), 0);
assert!(inner.upgrade().is_none());
ZeroRefs { inner: Weak::new() }
});
assert_eq!(Rc::strong_count(&zero_refs), 1);
assert_eq!(Rc::weak_count(&zero_refs), 0);
assert_eq!(zero_refs.inner.strong_count(), 0);
assert_eq!(zero_refs.inner.weak_count(), 0);
}
#[test]
fn test_rc_cyclic_with_one_ref() {
struct OneRef {
inner: Weak<OneRef>,
}
let one_ref = Rc::new_cyclic(|inner| {
assert_eq!(inner.strong_count(), 0);
assert!(inner.upgrade().is_none());
OneRef { inner: inner.clone() }
});
assert_eq!(Rc::strong_count(&one_ref), 1);
assert_eq!(Rc::weak_count(&one_ref), 1);
let one_ref2 = Weak::upgrade(&one_ref.inner).unwrap();
assert!(Rc::ptr_eq(&one_ref, &one_ref2));
assert_eq!(one_ref.inner.strong_count(), 2);
assert_eq!(one_ref.inner.weak_count(), 1);
}
#[test]
fn test_rc_cyclic_with_two_ref() {
struct TwoRefs {
inner: Weak<TwoRefs>,
inner1: Weak<TwoRefs>,
}
let two_refs = Rc::new_cyclic(|inner| {
assert_eq!(inner.strong_count(), 0);
assert!(inner.upgrade().is_none());
TwoRefs { inner: inner.clone(), inner1: inner.clone() }
});
assert_eq!(Rc::strong_count(&two_refs), 1);
assert_eq!(Rc::weak_count(&two_refs), 2);
let two_ref3 = Weak::upgrade(&two_refs.inner).unwrap();
assert!(Rc::ptr_eq(&two_refs, &two_ref3));
let two_ref2 = Weak::upgrade(&two_refs.inner1).unwrap();
assert!(Rc::ptr_eq(&two_refs, &two_ref2));
assert_eq!(Rc::strong_count(&two_refs), 3);
assert_eq!(Rc::weak_count(&two_refs), 2);
}
#[test]
fn test_unique_rc_weak() {
let rc = UniqueRc::new(42);
let weak = UniqueRc::downgrade(&rc);
assert!(weak.upgrade().is_none());
let _rc = UniqueRc::into_rc(rc);
assert_eq!(*weak.upgrade().unwrap(), 42);
}
#[test]
fn test_unique_rc_drop_weak() {
let rc = UniqueRc::new(42);
let weak = UniqueRc::downgrade(&rc);
mem::drop(weak);
let rc = UniqueRc::into_rc(rc);
assert_eq!(*rc, 42);
}
#[test]
fn test_unique_rc_drops_contents() {
let mut dropped = false;
struct DropMe<'a>(&'a mut bool);
impl Drop for DropMe<'_> {
fn drop(&mut self) {
*self.0 = true;
}
}
{
let rc = UniqueRc::new(DropMe(&mut dropped));
drop(rc);
}
assert!(dropped);
}
#[test]
fn test_unique_rc_weak_clone_holding_ref() {
let mut v = UniqueRc::new(0u8);
let w = UniqueRc::downgrade(&v);
let r = &mut *v;
let _ = w.clone(); // touch weak count
*r = 123;
}

View File

@ -0,0 +1,888 @@
//! Utilities for the slice primitive type.
//!
//! *[See also the slice primitive type](slice).*
//!
//! Most of the structs in this module are iterator types which can only be created
//! using a certain function. For example, `slice.iter()` yields an [`Iter`].
//!
//! A few functions are provided to create a slice from a value reference
//! or from a raw pointer.
#![stable(feature = "rust1", since = "1.0.0")]
// Many of the usings in this module are only used in the test configuration.
// It's cleaner to just turn off the unused_imports warning than to fix them.
#![cfg_attr(test, allow(unused_imports, dead_code))]
use core::borrow::{Borrow, BorrowMut};
#[cfg(not(no_global_oom_handling))]
use core::cmp::Ordering::{self, Less};
#[cfg(not(no_global_oom_handling))]
use core::mem::{self, SizedTypeProperties};
#[cfg(not(no_global_oom_handling))]
use core::ptr;
#[cfg(not(no_global_oom_handling))]
use core::slice::sort;
use crate::alloc::Allocator;
#[cfg(not(no_global_oom_handling))]
use crate::alloc::{self, Global};
#[cfg(not(no_global_oom_handling))]
use crate::borrow::ToOwned;
use crate::boxed::Box;
use crate::vec::Vec;
#[cfg(test)]
mod tests;
#[unstable(feature = "array_chunks", issue = "74985")]
pub use core::slice::ArrayChunks;
#[unstable(feature = "array_chunks", issue = "74985")]
pub use core::slice::ArrayChunksMut;
#[unstable(feature = "array_windows", issue = "75027")]
pub use core::slice::ArrayWindows;
#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
pub use core::slice::EscapeAscii;
#[stable(feature = "slice_get_slice", since = "1.28.0")]
pub use core::slice::SliceIndex;
#[stable(feature = "from_ref", since = "1.28.0")]
pub use core::slice::{from_mut, from_ref};
#[unstable(feature = "slice_from_ptr_range", issue = "89792")]
pub use core::slice::{from_mut_ptr_range, from_ptr_range};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::slice::{from_raw_parts, from_raw_parts_mut};
#[unstable(feature = "slice_range", issue = "76393")]
pub use core::slice::{range, try_range};
#[stable(feature = "slice_group_by", since = "1.77.0")]
pub use core::slice::{ChunkBy, ChunkByMut};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::slice::{Chunks, Windows};
#[stable(feature = "chunks_exact", since = "1.31.0")]
pub use core::slice::{ChunksExact, ChunksExactMut};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::slice::{ChunksMut, Split, SplitMut};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::slice::{Iter, IterMut};
#[stable(feature = "rchunks", since = "1.31.0")]
pub use core::slice::{RChunks, RChunksExact, RChunksExactMut, RChunksMut};
#[stable(feature = "slice_rsplit", since = "1.27.0")]
pub use core::slice::{RSplit, RSplitMut};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::slice::{RSplitN, RSplitNMut, SplitN, SplitNMut};
#[stable(feature = "split_inclusive", since = "1.51.0")]
pub use core::slice::{SplitInclusive, SplitInclusiveMut};
////////////////////////////////////////////////////////////////////////////////
// Basic slice extension methods
////////////////////////////////////////////////////////////////////////////////
// HACK(japaric) needed for the implementation of `vec!` macro during testing
// N.B., see the `hack` module in this file for more details.
#[cfg(test)]
pub use hack::into_vec;
// HACK(japaric) needed for the implementation of `Vec::clone` during testing
// N.B., see the `hack` module in this file for more details.
#[cfg(test)]
pub use hack::to_vec;
// HACK(japaric): With cfg(test) `impl [T]` is not available, these three
// functions are actually methods that are in `impl [T]` but not in
// `core::slice::SliceExt` - we need to supply these functions for the
// `test_permutations` test
pub(crate) mod hack {
use core::alloc::Allocator;
use crate::boxed::Box;
use crate::vec::Vec;
// We shouldn't add inline attribute to this since this is used in
// `vec!` macro mostly and causes perf regression. See #71204 for
// discussion and perf results.
pub fn into_vec<T, A: Allocator>(b: Box<[T], A>) -> Vec<T, A> {
unsafe {
let len = b.len();
let (b, alloc) = Box::into_raw_with_allocator(b);
Vec::from_raw_parts_in(b as *mut T, len, len, alloc)
}
}
#[cfg(not(no_global_oom_handling))]
#[inline]
pub fn to_vec<T: ConvertVec, A: Allocator>(s: &[T], alloc: A) -> Vec<T, A> {
T::to_vec(s, alloc)
}
#[cfg(not(no_global_oom_handling))]
pub trait ConvertVec {
fn to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A>
where
Self: Sized;
}
#[cfg(not(no_global_oom_handling))]
impl<T: Clone> ConvertVec for T {
#[inline]
default fn to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A> {
struct DropGuard<'a, T, A: Allocator> {
vec: &'a mut Vec<T, A>,
num_init: usize,
}
impl<'a, T, A: Allocator> Drop for DropGuard<'a, T, A> {
#[inline]
fn drop(&mut self) {
// SAFETY:
// items were marked initialized in the loop below
unsafe {
self.vec.set_len(self.num_init);
}
}
}
let mut vec = Vec::with_capacity_in(s.len(), alloc);
let mut guard = DropGuard { vec: &mut vec, num_init: 0 };
let slots = guard.vec.spare_capacity_mut();
// .take(slots.len()) is necessary for LLVM to remove bounds checks
// and has better codegen than zip.
for (i, b) in s.iter().enumerate().take(slots.len()) {
guard.num_init = i;
slots[i].write(b.clone());
}
core::mem::forget(guard);
// SAFETY:
// the vec was allocated and initialized above to at least this length.
unsafe {
vec.set_len(s.len());
}
vec
}
}
#[cfg(not(no_global_oom_handling))]
impl<T: Copy> ConvertVec for T {
#[inline]
fn to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A> {
let mut v = Vec::with_capacity_in(s.len(), alloc);
// SAFETY:
// allocated above with the capacity of `s`, and initialize to `s.len()` in
// ptr::copy_to_non_overlapping below.
unsafe {
s.as_ptr().copy_to_nonoverlapping(v.as_mut_ptr(), s.len());
v.set_len(s.len());
}
v
}
}
}
#[cfg(not(test))]
impl<T> [T] {
/// Sorts the slice.
///
/// This sort is stable (i.e., does not reorder equal elements) and *O*(*n* \* log(*n*)) worst-case.
///
/// When applicable, unstable sorting is preferred because it is generally faster than stable
/// sorting and it doesn't allocate auxiliary memory.
/// See [`sort_unstable`](slice::sort_unstable).
///
/// # Current implementation
///
/// The current algorithm is an adaptive, iterative merge sort inspired by
/// [timsort](https://en.wikipedia.org/wiki/Timsort).
/// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
/// two or more sorted sequences concatenated one after another.
///
/// Also, it allocates temporary storage half the size of `self`, but for short slices a
/// non-allocating insertion sort is used instead.
///
/// # Examples
///
/// ```
/// let mut v = [-5, 4, 1, -3, 2];
///
/// v.sort();
/// assert!(v == [-5, -3, 1, 2, 4]);
/// ```
#[cfg(not(no_global_oom_handling))]
#[rustc_allow_incoherent_impl]
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn sort(&mut self)
where
T: Ord,
{
stable_sort(self, T::lt);
}
/// Sorts the slice with a comparator function.
///
/// This sort is stable (i.e., does not reorder equal elements) and *O*(*n* \* log(*n*)) worst-case.
///
/// The comparator function must define a total ordering for the elements in the slice. If
/// the ordering is not total, the order of the elements is unspecified. An order is a
/// total order if it is (for all `a`, `b` and `c`):
///
/// * total and antisymmetric: exactly one of `a < b`, `a == b` or `a > b` is true, and
/// * transitive, `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
///
/// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use
/// `partial_cmp` as our sort function when we know the slice doesn't contain a `NaN`.
///
/// ```
/// let mut floats = [5f64, 4.0, 1.0, 3.0, 2.0];
/// floats.sort_by(|a, b| a.partial_cmp(b).unwrap());
/// assert_eq!(floats, [1.0, 2.0, 3.0, 4.0, 5.0]);
/// ```
///
/// When applicable, unstable sorting is preferred because it is generally faster than stable
/// sorting and it doesn't allocate auxiliary memory.
/// See [`sort_unstable_by`](slice::sort_unstable_by).
///
/// # Current implementation
///
/// The current algorithm is an adaptive, iterative merge sort inspired by
/// [timsort](https://en.wikipedia.org/wiki/Timsort).
/// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
/// two or more sorted sequences concatenated one after another.
///
/// Also, it allocates temporary storage half the size of `self`, but for short slices a
/// non-allocating insertion sort is used instead.
///
/// # Examples
///
/// ```
/// let mut v = [5, 4, 1, 3, 2];
/// v.sort_by(|a, b| a.cmp(b));
/// assert!(v == [1, 2, 3, 4, 5]);
///
/// // reverse sorting
/// v.sort_by(|a, b| b.cmp(a));
/// assert!(v == [5, 4, 3, 2, 1]);
/// ```
#[cfg(not(no_global_oom_handling))]
#[rustc_allow_incoherent_impl]
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn sort_by<F>(&mut self, mut compare: F)
where
F: FnMut(&T, &T) -> Ordering,
{
stable_sort(self, |a, b| compare(a, b) == Less);
}
/// Sorts the slice with a key extraction function.
///
/// This sort is stable (i.e., does not reorder equal elements) and *O*(*m* \* *n* \* log(*n*))
/// worst-case, where the key function is *O*(*m*).
///
/// For expensive key functions (e.g. functions that are not simple property accesses or
/// basic operations), [`sort_by_cached_key`](slice::sort_by_cached_key) is likely to be
/// significantly faster, as it does not recompute element keys.
///
/// When applicable, unstable sorting is preferred because it is generally faster than stable
/// sorting and it doesn't allocate auxiliary memory.
/// See [`sort_unstable_by_key`](slice::sort_unstable_by_key).
///
/// # Current implementation
///
/// The current algorithm is an adaptive, iterative merge sort inspired by
/// [timsort](https://en.wikipedia.org/wiki/Timsort).
/// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
/// two or more sorted sequences concatenated one after another.
///
/// Also, it allocates temporary storage half the size of `self`, but for short slices a
/// non-allocating insertion sort is used instead.
///
/// # Examples
///
/// ```
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// v.sort_by_key(|k| k.abs());
/// assert!(v == [1, 2, -3, 4, -5]);
/// ```
#[cfg(not(no_global_oom_handling))]
#[rustc_allow_incoherent_impl]
#[stable(feature = "slice_sort_by_key", since = "1.7.0")]
#[inline]
pub fn sort_by_key<K, F>(&mut self, mut f: F)
where
F: FnMut(&T) -> K,
K: Ord,
{
stable_sort(self, |a, b| f(a).lt(&f(b)));
}
/// Sorts the slice with a key extraction function.
///
/// During sorting, the key function is called at most once per element, by using
/// temporary storage to remember the results of key evaluation.
/// The order of calls to the key function is unspecified and may change in future versions
/// of the standard library.
///
/// This sort is stable (i.e., does not reorder equal elements) and *O*(*m* \* *n* + *n* \* log(*n*))
/// worst-case, where the key function is *O*(*m*).
///
/// For simple key functions (e.g., functions that are property accesses or
/// basic operations), [`sort_by_key`](slice::sort_by_key) is likely to be
/// faster.
///
/// # Current implementation
///
/// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
/// which combines the fast average case of randomized quicksort with the fast worst case of
/// heapsort, while achieving linear time on slices with certain patterns. It uses some
/// randomization to avoid degenerate cases, but with a fixed seed to always provide
/// deterministic behavior.
///
/// In the worst case, the algorithm allocates temporary storage in a `Vec<(K, usize)>` the
/// length of the slice.
///
/// # Examples
///
/// ```
/// let mut v = [-5i32, 4, 32, -3, 2];
///
/// v.sort_by_cached_key(|k| k.to_string());
/// assert!(v == [-3, -5, 2, 32, 4]);
/// ```
///
/// [pdqsort]: https://github.com/orlp/pdqsort
#[cfg(not(no_global_oom_handling))]
#[rustc_allow_incoherent_impl]
#[stable(feature = "slice_sort_by_cached_key", since = "1.34.0")]
#[inline]
pub fn sort_by_cached_key<K, F>(&mut self, f: F)
where
F: FnMut(&T) -> K,
K: Ord,
{
// Helper macro for indexing our vector by the smallest possible type, to reduce allocation.
macro_rules! sort_by_key {
($t:ty, $slice:ident, $f:ident) => {{
let mut indices: Vec<_> =
$slice.iter().map($f).enumerate().map(|(i, k)| (k, i as $t)).collect();
// The elements of `indices` are unique, as they are indexed, so any sort will be
// stable with respect to the original slice. We use `sort_unstable` here because
// it requires less memory allocation.
indices.sort_unstable();
for i in 0..$slice.len() {
let mut index = indices[i].1;
while (index as usize) < i {
index = indices[index as usize].1;
}
indices[i].1 = index;
$slice.swap(i, index as usize);
}
}};
}
let sz_u8 = mem::size_of::<(K, u8)>();
let sz_u16 = mem::size_of::<(K, u16)>();
let sz_u32 = mem::size_of::<(K, u32)>();
let sz_usize = mem::size_of::<(K, usize)>();
let len = self.len();
if len < 2 {
return;
}
if sz_u8 < sz_u16 && len <= (u8::MAX as usize) {
return sort_by_key!(u8, self, f);
}
if sz_u16 < sz_u32 && len <= (u16::MAX as usize) {
return sort_by_key!(u16, self, f);
}
if sz_u32 < sz_usize && len <= (u32::MAX as usize) {
return sort_by_key!(u32, self, f);
}
sort_by_key!(usize, self, f)
}
/// Copies `self` into a new `Vec`.
///
/// # Examples
///
/// ```
/// let s = [10, 40, 30];
/// let x = s.to_vec();
/// // Here, `s` and `x` can be modified independently.
/// ```
#[cfg(not(no_global_oom_handling))]
#[rustc_allow_incoherent_impl]
#[rustc_conversion_suggestion]
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn to_vec(&self) -> Vec<T>
where
T: Clone,
{
self.to_vec_in(Global)
}
/// Copies `self` into a new `Vec` with an allocator.
///
/// # Examples
///
/// ```
/// #![feature(allocator_api)]
///
/// use std::alloc::System;
///
/// let s = [10, 40, 30];
/// let x = s.to_vec_in(System);
/// // Here, `s` and `x` can be modified independently.
/// ```
#[cfg(not(no_global_oom_handling))]
#[rustc_allow_incoherent_impl]
#[inline]
#[unstable(feature = "allocator_api", issue = "32838")]
pub fn to_vec_in<A: Allocator>(&self, alloc: A) -> Vec<T, A>
where
T: Clone,
{
// N.B., see the `hack` module in this file for more details.
hack::to_vec(self, alloc)
}
/// Converts `self` into a vector without clones or allocation.
///
/// The resulting vector can be converted back into a box via
/// `Vec<T>`'s `into_boxed_slice` method.
///
/// # Examples
///
/// ```
/// let s: Box<[i32]> = Box::new([10, 40, 30]);
/// let x = s.into_vec();
/// // `s` cannot be used anymore because it has been converted into `x`.
///
/// assert_eq!(x, vec![10, 40, 30]);
/// ```
#[rustc_allow_incoherent_impl]
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn into_vec<A: Allocator>(self: Box<Self, A>) -> Vec<T, A> {
// N.B., see the `hack` module in this file for more details.
hack::into_vec(self)
}
/// Creates a vector by copying a slice `n` times.
///
/// # Panics
///
/// This function will panic if the capacity would overflow.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// assert_eq!([1, 2].repeat(3), vec![1, 2, 1, 2, 1, 2]);
/// ```
///
/// A panic upon overflow:
///
/// ```should_panic
/// // this will panic at runtime
/// b"0123456789abcdef".repeat(usize::MAX);
/// ```
#[rustc_allow_incoherent_impl]
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "repeat_generic_slice", since = "1.40.0")]
pub fn repeat(&self, n: usize) -> Vec<T>
where
T: Copy,
{
if n == 0 {
return Vec::new();
}
// If `n` is larger than zero, it can be split as
// `n = 2^expn + rem (2^expn > rem, expn >= 0, rem >= 0)`.
// `2^expn` is the number represented by the leftmost '1' bit of `n`,
// and `rem` is the remaining part of `n`.
// Using `Vec` to access `set_len()`.
let capacity = self.len().checked_mul(n).expect("capacity overflow");
let mut buf = Vec::with_capacity(capacity);
// `2^expn` repetition is done by doubling `buf` `expn`-times.
buf.extend(self);
{
let mut m = n >> 1;
// If `m > 0`, there are remaining bits up to the leftmost '1'.
while m > 0 {
// `buf.extend(buf)`:
unsafe {
ptr::copy_nonoverlapping::<T>(
buf.as_ptr(),
(buf.as_mut_ptr()).add(buf.len()),
buf.len(),
);
// `buf` has capacity of `self.len() * n`.
let buf_len = buf.len();
buf.set_len(buf_len * 2);
}
m >>= 1;
}
}
// `rem` (`= n - 2^expn`) repetition is done by copying
// first `rem` repetitions from `buf` itself.
let rem_len = capacity - buf.len(); // `self.len() * rem`
if rem_len > 0 {
// `buf.extend(buf[0 .. rem_len])`:
unsafe {
// This is non-overlapping since `2^expn > rem`.
ptr::copy_nonoverlapping::<T>(
buf.as_ptr(),
(buf.as_mut_ptr()).add(buf.len()),
rem_len,
);
// `buf.len() + rem_len` equals to `buf.capacity()` (`= self.len() * n`).
buf.set_len(capacity);
}
}
buf
}
/// Flattens a slice of `T` into a single value `Self::Output`.
///
/// # Examples
///
/// ```
/// assert_eq!(["hello", "world"].concat(), "helloworld");
/// assert_eq!([[1, 2], [3, 4]].concat(), [1, 2, 3, 4]);
/// ```
#[rustc_allow_incoherent_impl]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn concat<Item: ?Sized>(&self) -> <Self as Concat<Item>>::Output
where
Self: Concat<Item>,
{
Concat::concat(self)
}
/// Flattens a slice of `T` into a single value `Self::Output`, placing a
/// given separator between each.
///
/// # Examples
///
/// ```
/// assert_eq!(["hello", "world"].join(" "), "hello world");
/// assert_eq!([[1, 2], [3, 4]].join(&0), [1, 2, 0, 3, 4]);
/// assert_eq!([[1, 2], [3, 4]].join(&[0, 0][..]), [1, 2, 0, 0, 3, 4]);
/// ```
#[rustc_allow_incoherent_impl]
#[stable(feature = "rename_connect_to_join", since = "1.3.0")]
pub fn join<Separator>(&self, sep: Separator) -> <Self as Join<Separator>>::Output
where
Self: Join<Separator>,
{
Join::join(self, sep)
}
/// Flattens a slice of `T` into a single value `Self::Output`, placing a
/// given separator between each.
///
/// # Examples
///
/// ```
/// # #![allow(deprecated)]
/// assert_eq!(["hello", "world"].connect(" "), "hello world");
/// assert_eq!([[1, 2], [3, 4]].connect(&0), [1, 2, 0, 3, 4]);
/// ```
#[rustc_allow_incoherent_impl]
#[stable(feature = "rust1", since = "1.0.0")]
#[deprecated(since = "1.3.0", note = "renamed to join", suggestion = "join")]
pub fn connect<Separator>(&self, sep: Separator) -> <Self as Join<Separator>>::Output
where
Self: Join<Separator>,
{
Join::join(self, sep)
}
}
#[cfg(not(test))]
impl [u8] {
/// Returns a vector containing a copy of this slice where each byte
/// is mapped to its ASCII upper case equivalent.
///
/// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
/// but non-ASCII letters are unchanged.
///
/// To uppercase the value in-place, use [`make_ascii_uppercase`].
///
/// [`make_ascii_uppercase`]: slice::make_ascii_uppercase
#[cfg(not(no_global_oom_handling))]
#[rustc_allow_incoherent_impl]
#[must_use = "this returns the uppercase bytes as a new Vec, \
without modifying the original"]
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
#[inline]
pub fn to_ascii_uppercase(&self) -> Vec<u8> {
let mut me = self.to_vec();
me.make_ascii_uppercase();
me
}
/// Returns a vector containing a copy of this slice where each byte
/// is mapped to its ASCII lower case equivalent.
///
/// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
/// but non-ASCII letters are unchanged.
///
/// To lowercase the value in-place, use [`make_ascii_lowercase`].
///
/// [`make_ascii_lowercase`]: slice::make_ascii_lowercase
#[cfg(not(no_global_oom_handling))]
#[rustc_allow_incoherent_impl]
#[must_use = "this returns the lowercase bytes as a new Vec, \
without modifying the original"]
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
#[inline]
pub fn to_ascii_lowercase(&self) -> Vec<u8> {
let mut me = self.to_vec();
me.make_ascii_lowercase();
me
}
}
////////////////////////////////////////////////////////////////////////////////
// Extension traits for slices over specific kinds of data
////////////////////////////////////////////////////////////////////////////////
/// Helper trait for [`[T]::concat`](slice::concat).
///
/// Note: the `Item` type parameter is not used in this trait,
/// but it allows impls to be more generic.
/// Without it, we get this error:
///
/// ```error
/// error[E0207]: the type parameter `T` is not constrained by the impl trait, self type, or predica
/// --> library/alloc/src/slice.rs:608:6
/// |
/// 608 | impl<T: Clone, V: Borrow<[T]>> Concat for [V] {
/// | ^ unconstrained type parameter
/// ```
///
/// This is because there could exist `V` types with multiple `Borrow<[_]>` impls,
/// such that multiple `T` types would apply:
///
/// ```
/// # #[allow(dead_code)]
/// pub struct Foo(Vec<u32>, Vec<String>);
///
/// impl std::borrow::Borrow<[u32]> for Foo {
/// fn borrow(&self) -> &[u32] { &self.0 }
/// }
///
/// impl std::borrow::Borrow<[String]> for Foo {
/// fn borrow(&self) -> &[String] { &self.1 }
/// }
/// ```
#[unstable(feature = "slice_concat_trait", issue = "27747")]
pub trait Concat<Item: ?Sized> {
#[unstable(feature = "slice_concat_trait", issue = "27747")]
/// The resulting type after concatenation
type Output;
/// Implementation of [`[T]::concat`](slice::concat)
#[unstable(feature = "slice_concat_trait", issue = "27747")]
fn concat(slice: &Self) -> Self::Output;
}
/// Helper trait for [`[T]::join`](slice::join)
#[unstable(feature = "slice_concat_trait", issue = "27747")]
pub trait Join<Separator> {
#[unstable(feature = "slice_concat_trait", issue = "27747")]
/// The resulting type after concatenation
type Output;
/// Implementation of [`[T]::join`](slice::join)
#[unstable(feature = "slice_concat_trait", issue = "27747")]
fn join(slice: &Self, sep: Separator) -> Self::Output;
}
#[cfg(not(no_global_oom_handling))]
#[unstable(feature = "slice_concat_ext", issue = "27747")]
impl<T: Clone, V: Borrow<[T]>> Concat<T> for [V] {
type Output = Vec<T>;
fn concat(slice: &Self) -> Vec<T> {
let size = slice.iter().map(|slice| slice.borrow().len()).sum();
let mut result = Vec::with_capacity(size);
for v in slice {
result.extend_from_slice(v.borrow())
}
result
}
}
#[cfg(not(no_global_oom_handling))]
#[unstable(feature = "slice_concat_ext", issue = "27747")]
impl<T: Clone, V: Borrow<[T]>> Join<&T> for [V] {
type Output = Vec<T>;
fn join(slice: &Self, sep: &T) -> Vec<T> {
let mut iter = slice.iter();
let first = match iter.next() {
Some(first) => first,
None => return vec![],
};
let size = slice.iter().map(|v| v.borrow().len()).sum::<usize>() + slice.len() - 1;
let mut result = Vec::with_capacity(size);
result.extend_from_slice(first.borrow());
for v in iter {
result.push(sep.clone());
result.extend_from_slice(v.borrow())
}
result
}
}
#[cfg(not(no_global_oom_handling))]
#[unstable(feature = "slice_concat_ext", issue = "27747")]
impl<T: Clone, V: Borrow<[T]>> Join<&[T]> for [V] {
type Output = Vec<T>;
fn join(slice: &Self, sep: &[T]) -> Vec<T> {
let mut iter = slice.iter();
let first = match iter.next() {
Some(first) => first,
None => return vec![],
};
let size =
slice.iter().map(|v| v.borrow().len()).sum::<usize>() + sep.len() * (slice.len() - 1);
let mut result = Vec::with_capacity(size);
result.extend_from_slice(first.borrow());
for v in iter {
result.extend_from_slice(sep);
result.extend_from_slice(v.borrow())
}
result
}
}
////////////////////////////////////////////////////////////////////////////////
// Standard trait implementations for slices
////////////////////////////////////////////////////////////////////////////////
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, A: Allocator> Borrow<[T]> for Vec<T, A> {
fn borrow(&self) -> &[T] {
&self[..]
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, A: Allocator> BorrowMut<[T]> for Vec<T, A> {
fn borrow_mut(&mut self) -> &mut [T] {
&mut self[..]
}
}
// Specializable trait for implementing ToOwned::clone_into. This is
// public in the crate and has the Allocator parameter so that
// vec::clone_from use it too.
#[cfg(not(no_global_oom_handling))]
pub(crate) trait SpecCloneIntoVec<T, A: Allocator> {
fn clone_into(&self, target: &mut Vec<T, A>);
}
#[cfg(not(no_global_oom_handling))]
impl<T: Clone, A: Allocator> SpecCloneIntoVec<T, A> for [T] {
default fn clone_into(&self, target: &mut Vec<T, A>) {
// drop anything in target that will not be overwritten
target.truncate(self.len());
// target.len <= self.len due to the truncate above, so the
// slices here are always in-bounds.
let (init, tail) = self.split_at(target.len());
// reuse the contained values' allocations/resources.
target.clone_from_slice(init);
target.extend_from_slice(tail);
}
}
#[cfg(not(no_global_oom_handling))]
impl<T: Copy, A: Allocator> SpecCloneIntoVec<T, A> for [T] {
fn clone_into(&self, target: &mut Vec<T, A>) {
target.clear();
target.extend_from_slice(self);
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Clone> ToOwned for [T] {
type Owned = Vec<T>;
#[cfg(not(test))]
fn to_owned(&self) -> Vec<T> {
self.to_vec()
}
#[cfg(test)]
fn to_owned(&self) -> Vec<T> {
hack::to_vec(self, Global)
}
fn clone_into(&self, target: &mut Vec<T>) {
SpecCloneIntoVec::clone_into(self, target);
}
}
////////////////////////////////////////////////////////////////////////////////
// Sorting
////////////////////////////////////////////////////////////////////////////////
#[inline]
#[cfg(not(no_global_oom_handling))]
fn stable_sort<T, F>(v: &mut [T], mut is_less: F)
where
F: FnMut(&T, &T) -> bool,
{
if T::IS_ZST {
// Sorting has no meaningful behavior on zero-sized types. Do nothing.
return;
}
let elem_alloc_fn = |len: usize| -> *mut T {
// SAFETY: Creating the layout is safe as long as merge_sort never calls this with len >
// v.len(). Alloc in general will only be used as 'shadow-region' to store temporary swap
// elements.
unsafe { alloc::alloc(alloc::Layout::array::<T>(len).unwrap_unchecked()) as *mut T }
};
let elem_dealloc_fn = |buf_ptr: *mut T, len: usize| {
// SAFETY: Creating the layout is safe as long as merge_sort never calls this with len >
// v.len(). The caller must ensure that buf_ptr was created by elem_alloc_fn with the same
// len.
unsafe {
alloc::dealloc(buf_ptr as *mut u8, alloc::Layout::array::<T>(len).unwrap_unchecked());
}
};
let run_alloc_fn = |len: usize| -> *mut sort::TimSortRun {
// SAFETY: Creating the layout is safe as long as merge_sort never calls this with an
// obscene length or 0.
unsafe {
alloc::alloc(alloc::Layout::array::<sort::TimSortRun>(len).unwrap_unchecked())
as *mut sort::TimSortRun
}
};
let run_dealloc_fn = |buf_ptr: *mut sort::TimSortRun, len: usize| {
// SAFETY: The caller must ensure that buf_ptr was created by elem_alloc_fn with the same
// len.
unsafe {
alloc::dealloc(
buf_ptr as *mut u8,
alloc::Layout::array::<sort::TimSortRun>(len).unwrap_unchecked(),
);
}
};
sort::merge_sort(v, &mut is_less, elem_alloc_fn, elem_dealloc_fn, run_alloc_fn, run_dealloc_fn);
}

View File

@ -0,0 +1,360 @@
use crate::borrow::ToOwned;
use crate::rc::Rc;
use crate::string::ToString;
use crate::test_helpers::test_rng;
use crate::vec::Vec;
use core::cell::Cell;
use core::cmp::Ordering::{self, Equal, Greater, Less};
use core::convert::identity;
use core::fmt;
use core::mem;
use core::sync::atomic::{AtomicUsize, Ordering::Relaxed};
use rand::{distributions::Standard, prelude::*, Rng, RngCore};
use std::panic;
macro_rules! do_test {
($input:ident, $func:ident) => {
let len = $input.len();
// Work out the total number of comparisons required to sort
// this array...
let mut count = 0usize;
$input.to_owned().$func(|a, b| {
count += 1;
a.cmp(b)
});
// ... and then panic on each and every single one.
for panic_countdown in 0..count {
// Refresh the counters.
VERSIONS.store(0, Relaxed);
for i in 0..len {
DROP_COUNTS[i].store(0, Relaxed);
}
let v = $input.to_owned();
let _ = std::panic::catch_unwind(move || {
let mut v = v;
let mut panic_countdown = panic_countdown;
v.$func(|a, b| {
if panic_countdown == 0 {
SILENCE_PANIC.with(|s| s.set(true));
panic!();
}
panic_countdown -= 1;
a.cmp(b)
})
});
// Check that the number of things dropped is exactly
// what we expect (i.e., the contents of `v`).
for (i, c) in DROP_COUNTS.iter().enumerate().take(len) {
let count = c.load(Relaxed);
assert!(count == 1, "found drop count == {} for i == {}, len == {}", count, i, len);
}
// Check that the most recent versions of values were dropped.
assert_eq!(VERSIONS.load(Relaxed), 0);
}
};
}
const MAX_LEN: usize = 80;
static DROP_COUNTS: [AtomicUsize; MAX_LEN] = [
// FIXME(RFC 1109): AtomicUsize is not Copy.
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
];
static VERSIONS: AtomicUsize = AtomicUsize::new(0);
#[derive(Clone, Eq)]
struct DropCounter {
x: u32,
id: usize,
version: Cell<usize>,
}
impl PartialEq for DropCounter {
fn eq(&self, other: &Self) -> bool {
self.partial_cmp(other) == Some(Ordering::Equal)
}
}
impl PartialOrd for DropCounter {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.version.set(self.version.get() + 1);
other.version.set(other.version.get() + 1);
VERSIONS.fetch_add(2, Relaxed);
self.x.partial_cmp(&other.x)
}
}
impl Ord for DropCounter {
fn cmp(&self, other: &Self) -> Ordering {
self.partial_cmp(other).unwrap()
}
}
impl Drop for DropCounter {
fn drop(&mut self) {
DROP_COUNTS[self.id].fetch_add(1, Relaxed);
VERSIONS.fetch_sub(self.version.get(), Relaxed);
}
}
std::thread_local!(static SILENCE_PANIC: Cell<bool> = Cell::new(false));
#[test]
#[cfg_attr(target_os = "emscripten", ignore)] // no threads
#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
fn panic_safe() {
panic::update_hook(move |prev, info| {
if !SILENCE_PANIC.with(|s| s.get()) {
prev(info);
}
});
let mut rng = test_rng();
// Miri is too slow (but still need to `chain` to make the types match)
let lens = if cfg!(miri) { (1..10).chain(0..0) } else { (1..20).chain(70..MAX_LEN) };
let moduli: &[u32] = if cfg!(miri) { &[5] } else { &[5, 20, 50] };
for len in lens {
for &modulus in moduli {
for &has_runs in &[false, true] {
let mut input = (0..len)
.map(|id| DropCounter {
x: rng.next_u32() % modulus,
id: id,
version: Cell::new(0),
})
.collect::<Vec<_>>();
if has_runs {
for c in &mut input {
c.x = c.id as u32;
}
for _ in 0..5 {
let a = rng.gen::<usize>() % len;
let b = rng.gen::<usize>() % len;
if a < b {
input[a..b].reverse();
} else {
input.swap(a, b);
}
}
}
do_test!(input, sort_by);
do_test!(input, sort_unstable_by);
}
}
}
// Set default panic hook again.
drop(panic::take_hook());
}
#[test]
#[cfg_attr(miri, ignore)] // Miri is too slow
fn test_sort() {
let mut rng = test_rng();
for len in (2..25).chain(500..510) {
for &modulus in &[5, 10, 100, 1000] {
for _ in 0..10 {
let orig: Vec<_> = (&mut rng)
.sample_iter::<i32, _>(&Standard)
.map(|x| x % modulus)
.take(len)
.collect();
// Sort in default order.
let mut v = orig.clone();
v.sort();
assert!(v.windows(2).all(|w| w[0] <= w[1]));
// Sort in ascending order.
let mut v = orig.clone();
v.sort_by(|a, b| a.cmp(b));
assert!(v.windows(2).all(|w| w[0] <= w[1]));
// Sort in descending order.
let mut v = orig.clone();
v.sort_by(|a, b| b.cmp(a));
assert!(v.windows(2).all(|w| w[0] >= w[1]));
// Sort in lexicographic order.
let mut v1 = orig.clone();
let mut v2 = orig.clone();
v1.sort_by_key(|x| x.to_string());
v2.sort_by_cached_key(|x| x.to_string());
assert!(v1.windows(2).all(|w| w[0].to_string() <= w[1].to_string()));
assert!(v1 == v2);
// Sort with many pre-sorted runs.
let mut v = orig.clone();
v.sort();
v.reverse();
for _ in 0..5 {
let a = rng.gen::<usize>() % len;
let b = rng.gen::<usize>() % len;
if a < b {
v[a..b].reverse();
} else {
v.swap(a, b);
}
}
v.sort();
assert!(v.windows(2).all(|w| w[0] <= w[1]));
}
}
}
// Sort using a completely random comparison function.
// This will reorder the elements *somehow*, but won't panic.
let mut v = [0; 500];
for i in 0..v.len() {
v[i] = i as i32;
}
v.sort_by(|_, _| *[Less, Equal, Greater].choose(&mut rng).unwrap());
v.sort();
for i in 0..v.len() {
assert_eq!(v[i], i as i32);
}
// Should not panic.
[0i32; 0].sort();
[(); 10].sort();
[(); 100].sort();
let mut v = [0xDEADBEEFu64];
v.sort();
assert!(v == [0xDEADBEEF]);
}
#[test]
fn test_sort_stability() {
// Miri is too slow
let large_range = if cfg!(miri) { 0..0 } else { 500..510 };
let rounds = if cfg!(miri) { 1 } else { 10 };
let mut rng = test_rng();
for len in (2..25).chain(large_range) {
for _ in 0..rounds {
let mut counts = [0; 10];
// create a vector like [(6, 1), (5, 1), (6, 2), ...],
// where the first item of each tuple is random, but
// the second item represents which occurrence of that
// number this element is, i.e., the second elements
// will occur in sorted order.
let orig: Vec<_> = (0..len)
.map(|_| {
let n = rng.gen::<usize>() % 10;
counts[n] += 1;
(n, counts[n])
})
.collect();
let mut v = orig.clone();
// Only sort on the first element, so an unstable sort
// may mix up the counts.
v.sort_by(|&(a, _), &(b, _)| a.cmp(&b));
// This comparison includes the count (the second item
// of the tuple), so elements with equal first items
// will need to be ordered with increasing
// counts... i.e., exactly asserting that this sort is
// stable.
assert!(v.windows(2).all(|w| w[0] <= w[1]));
let mut v = orig.clone();
v.sort_by_cached_key(|&(x, _)| x);
assert!(v.windows(2).all(|w| w[0] <= w[1]));
}
}
}

View File

@ -0,0 +1,664 @@
//! Utilities for the `str` primitive type.
//!
//! *[See also the `str` primitive type](str).*
#![stable(feature = "rust1", since = "1.0.0")]
// Many of the usings in this module are only used in the test configuration.
// It's cleaner to just turn off the unused_imports warning than to fix them.
#![allow(unused_imports)]
use core::borrow::{Borrow, BorrowMut};
use core::iter::FusedIterator;
use core::mem;
use core::ptr;
use core::str::pattern::{DoubleEndedSearcher, Pattern, ReverseSearcher, Searcher};
use core::unicode::conversions;
use crate::borrow::ToOwned;
use crate::boxed::Box;
use crate::slice::{Concat, Join, SliceIndex};
use crate::string::String;
use crate::vec::Vec;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::pattern;
#[stable(feature = "encode_utf16", since = "1.8.0")]
pub use core::str::EncodeUtf16;
#[stable(feature = "split_ascii_whitespace", since = "1.34.0")]
pub use core::str::SplitAsciiWhitespace;
#[stable(feature = "split_inclusive", since = "1.51.0")]
pub use core::str::SplitInclusive;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::SplitWhitespace;
#[unstable(feature = "str_from_raw_parts", issue = "119206")]
pub use core::str::{from_raw_parts, from_raw_parts_mut};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::{from_utf8, from_utf8_mut, Bytes, CharIndices, Chars};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::{from_utf8_unchecked, from_utf8_unchecked_mut, ParseBoolError};
#[stable(feature = "str_escape", since = "1.34.0")]
pub use core::str::{EscapeDebug, EscapeDefault, EscapeUnicode};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::{FromStr, Utf8Error};
#[allow(deprecated)]
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::{Lines, LinesAny};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::{MatchIndices, RMatchIndices};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::{Matches, RMatches};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::{RSplit, Split};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::{RSplitN, SplitN};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::{RSplitTerminator, SplitTerminator};
#[stable(feature = "utf8_chunks", since = "1.79.0")]
pub use core::str::{Utf8Chunk, Utf8Chunks};
/// Note: `str` in `Concat<str>` is not meaningful here.
/// This type parameter of the trait only exists to enable another impl.
#[cfg(not(no_global_oom_handling))]
#[unstable(feature = "slice_concat_ext", issue = "27747")]
impl<S: Borrow<str>> Concat<str> for [S] {
type Output = String;
fn concat(slice: &Self) -> String {
Join::join(slice, "")
}
}
#[cfg(not(no_global_oom_handling))]
#[unstable(feature = "slice_concat_ext", issue = "27747")]
impl<S: Borrow<str>> Join<&str> for [S] {
type Output = String;
fn join(slice: &Self, sep: &str) -> String {
unsafe { String::from_utf8_unchecked(join_generic_copy(slice, sep.as_bytes())) }
}
}
#[cfg(not(no_global_oom_handling))]
macro_rules! specialize_for_lengths {
($separator:expr, $target:expr, $iter:expr; $($num:expr),*) => {{
let mut target = $target;
let iter = $iter;
let sep_bytes = $separator;
match $separator.len() {
$(
// loops with hardcoded sizes run much faster
// specialize the cases with small separator lengths
$num => {
for s in iter {
copy_slice_and_advance!(target, sep_bytes);
let content_bytes = s.borrow().as_ref();
copy_slice_and_advance!(target, content_bytes);
}
},
)*
_ => {
// arbitrary non-zero size fallback
for s in iter {
copy_slice_and_advance!(target, sep_bytes);
let content_bytes = s.borrow().as_ref();
copy_slice_and_advance!(target, content_bytes);
}
}
}
target
}}
}
#[cfg(not(no_global_oom_handling))]
macro_rules! copy_slice_and_advance {
($target:expr, $bytes:expr) => {
let len = $bytes.len();
let (head, tail) = { $target }.split_at_mut(len);
head.copy_from_slice($bytes);
$target = tail;
};
}
// Optimized join implementation that works for both Vec<T> (T: Copy) and String's inner vec
// Currently (2018-05-13) there is a bug with type inference and specialization (see issue #36262)
// For this reason SliceConcat<T> is not specialized for T: Copy and SliceConcat<str> is the
// only user of this function. It is left in place for the time when that is fixed.
//
// the bounds for String-join are S: Borrow<str> and for Vec-join Borrow<[T]>
// [T] and str both impl AsRef<[T]> for some T
// => s.borrow().as_ref() and we always have slices
#[cfg(not(no_global_oom_handling))]
fn join_generic_copy<B, T, S>(slice: &[S], sep: &[T]) -> Vec<T>
where
T: Copy,
B: AsRef<[T]> + ?Sized,
S: Borrow<B>,
{
let sep_len = sep.len();
let mut iter = slice.iter();
// the first slice is the only one without a separator preceding it
let first = match iter.next() {
Some(first) => first,
None => return vec![],
};
// compute the exact total length of the joined Vec
// if the `len` calculation overflows, we'll panic
// we would have run out of memory anyway and the rest of the function requires
// the entire Vec pre-allocated for safety
let reserved_len = sep_len
.checked_mul(iter.len())
.and_then(|n| {
slice.iter().map(|s| s.borrow().as_ref().len()).try_fold(n, usize::checked_add)
})
.expect("attempt to join into collection with len > usize::MAX");
// prepare an uninitialized buffer
let mut result = Vec::with_capacity(reserved_len);
debug_assert!(result.capacity() >= reserved_len);
result.extend_from_slice(first.borrow().as_ref());
unsafe {
let pos = result.len();
let target = result.spare_capacity_mut().get_unchecked_mut(..reserved_len - pos);
// Convert the separator and slices to slices of MaybeUninit
// to simplify implementation in specialize_for_lengths
let sep_uninit = core::slice::from_raw_parts(sep.as_ptr().cast(), sep.len());
let iter_uninit = iter.map(|it| {
let it = it.borrow().as_ref();
core::slice::from_raw_parts(it.as_ptr().cast(), it.len())
});
// copy separator and slices over without bounds checks
// generate loops with hardcoded offsets for small separators
// massive improvements possible (~ x2)
let remain = specialize_for_lengths!(sep_uninit, target, iter_uninit; 0, 1, 2, 3, 4);
// A weird borrow implementation may return different
// slices for the length calculation and the actual copy.
// Make sure we don't expose uninitialized bytes to the caller.
let result_len = reserved_len - remain.len();
result.set_len(result_len);
}
result
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Borrow<str> for String {
#[inline]
fn borrow(&self) -> &str {
&self[..]
}
}
#[stable(feature = "string_borrow_mut", since = "1.36.0")]
impl BorrowMut<str> for String {
#[inline]
fn borrow_mut(&mut self) -> &mut str {
&mut self[..]
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
impl ToOwned for str {
type Owned = String;
#[inline]
fn to_owned(&self) -> String {
unsafe { String::from_utf8_unchecked(self.as_bytes().to_owned()) }
}
fn clone_into(&self, target: &mut String) {
let mut b = mem::take(target).into_bytes();
self.as_bytes().clone_into(&mut b);
*target = unsafe { String::from_utf8_unchecked(b) }
}
}
/// Methods for string slices.
#[cfg(not(test))]
impl str {
/// Converts a `Box<str>` into a `Box<[u8]>` without copying or allocating.
///
/// # Examples
///
/// ```
/// let s = "this is a string";
/// let boxed_str = s.to_owned().into_boxed_str();
/// let boxed_bytes = boxed_str.into_boxed_bytes();
/// assert_eq!(*boxed_bytes, *s.as_bytes());
/// ```
#[rustc_allow_incoherent_impl]
#[stable(feature = "str_box_extras", since = "1.20.0")]
#[must_use = "`self` will be dropped if the result is not used"]
#[inline]
pub fn into_boxed_bytes(self: Box<str>) -> Box<[u8]> {
self.into()
}
/// Replaces all matches of a pattern with another string.
///
/// `replace` creates a new [`String`], and copies the data from this string slice into it.
/// While doing so, it attempts to find matches of a pattern. If it finds any, it
/// replaces them with the replacement string slice.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let s = "this is old";
///
/// assert_eq!("this is new", s.replace("old", "new"));
/// assert_eq!("than an old", s.replace("is", "an"));
/// ```
///
/// When the pattern doesn't match, it returns this string slice as [`String`]:
///
/// ```
/// let s = "this is old";
/// assert_eq!(s, s.replace("cookie monster", "little lamb"));
/// ```
#[cfg(not(no_global_oom_handling))]
#[rustc_allow_incoherent_impl]
#[must_use = "this returns the replaced string as a new allocation, \
without modifying the original"]
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn replace<'a, P: Pattern<'a>>(&'a self, from: P, to: &str) -> String {
let mut result = String::new();
let mut last_end = 0;
for (start, part) in self.match_indices(from) {
result.push_str(unsafe { self.get_unchecked(last_end..start) });
result.push_str(to);
last_end = start + part.len();
}
result.push_str(unsafe { self.get_unchecked(last_end..self.len()) });
result
}
/// Replaces first N matches of a pattern with another string.
///
/// `replacen` creates a new [`String`], and copies the data from this string slice into it.
/// While doing so, it attempts to find matches of a pattern. If it finds any, it
/// replaces them with the replacement string slice at most `count` times.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let s = "foo foo 123 foo";
/// assert_eq!("new new 123 foo", s.replacen("foo", "new", 2));
/// assert_eq!("faa fao 123 foo", s.replacen('o', "a", 3));
/// assert_eq!("foo foo new23 foo", s.replacen(char::is_numeric, "new", 1));
/// ```
///
/// When the pattern doesn't match, it returns this string slice as [`String`]:
///
/// ```
/// let s = "this is old";
/// assert_eq!(s, s.replacen("cookie monster", "little lamb", 10));
/// ```
#[cfg(not(no_global_oom_handling))]
#[rustc_allow_incoherent_impl]
#[must_use = "this returns the replaced string as a new allocation, \
without modifying the original"]
#[stable(feature = "str_replacen", since = "1.16.0")]
pub fn replacen<'a, P: Pattern<'a>>(&'a self, pat: P, to: &str, count: usize) -> String {
// Hope to reduce the times of re-allocation
let mut result = String::with_capacity(32);
let mut last_end = 0;
for (start, part) in self.match_indices(pat).take(count) {
result.push_str(unsafe { self.get_unchecked(last_end..start) });
result.push_str(to);
last_end = start + part.len();
}
result.push_str(unsafe { self.get_unchecked(last_end..self.len()) });
result
}
/// Returns the lowercase equivalent of this string slice, as a new [`String`].
///
/// 'Lowercase' is defined according to the terms of the Unicode Derived Core Property
/// `Lowercase`.
///
/// Since some characters can expand into multiple characters when changing
/// the case, this function returns a [`String`] instead of modifying the
/// parameter in-place.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let s = "HELLO";
///
/// assert_eq!("hello", s.to_lowercase());
/// ```
///
/// A tricky example, with sigma:
///
/// ```
/// let sigma = "Σ";
///
/// assert_eq!("σ", sigma.to_lowercase());
///
/// // but at the end of a word, it's ς, not σ:
/// let odysseus = "ὈΔΥΣΣΕΎΣ";
///
/// assert_eq!("ὀδυσσεύς", odysseus.to_lowercase());
/// ```
///
/// Languages without case are not changed:
///
/// ```
/// let new_year = "农历新年";
///
/// assert_eq!(new_year, new_year.to_lowercase());
/// ```
#[cfg(not(no_global_oom_handling))]
#[rustc_allow_incoherent_impl]
#[must_use = "this returns the lowercase string as a new String, \
without modifying the original"]
#[stable(feature = "unicode_case_mapping", since = "1.2.0")]
pub fn to_lowercase(&self) -> String {
let out = convert_while_ascii(self.as_bytes(), u8::to_ascii_lowercase);
// Safety: we know this is a valid char boundary since
// out.len() is only progressed if ascii bytes are found
let rest = unsafe { self.get_unchecked(out.len()..) };
// Safety: We have written only valid ASCII to our vec
let mut s = unsafe { String::from_utf8_unchecked(out) };
for (i, c) in rest.char_indices() {
if c == 'Σ' {
// Σ maps to σ, except at the end of a word where it maps to ς.
// This is the only conditional (contextual) but language-independent mapping
// in `SpecialCasing.txt`,
// so hard-code it rather than have a generic "condition" mechanism.
// See https://github.com/rust-lang/rust/issues/26035
let out_len = self.len() - rest.len();
let sigma_lowercase = map_uppercase_sigma(&self, i + out_len);
s.push(sigma_lowercase);
} else {
match conversions::to_lower(c) {
[a, '\0', _] => s.push(a),
[a, b, '\0'] => {
s.push(a);
s.push(b);
}
[a, b, c] => {
s.push(a);
s.push(b);
s.push(c);
}
}
}
}
return s;
fn map_uppercase_sigma(from: &str, i: usize) -> char {
// See https://www.unicode.org/versions/Unicode7.0.0/ch03.pdf#G33992
// for the definition of `Final_Sigma`.
debug_assert!('Σ'.len_utf8() == 2);
let is_word_final = case_ignorable_then_cased(from[..i].chars().rev())
&& !case_ignorable_then_cased(from[i + 2..].chars());
if is_word_final { 'ς' } else { 'σ' }
}
fn case_ignorable_then_cased<I: Iterator<Item = char>>(iter: I) -> bool {
use core::unicode::{Case_Ignorable, Cased};
match iter.skip_while(|&c| Case_Ignorable(c)).next() {
Some(c) => Cased(c),
None => false,
}
}
}
/// Returns the uppercase equivalent of this string slice, as a new [`String`].
///
/// 'Uppercase' is defined according to the terms of the Unicode Derived Core Property
/// `Uppercase`.
///
/// Since some characters can expand into multiple characters when changing
/// the case, this function returns a [`String`] instead of modifying the
/// parameter in-place.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let s = "hello";
///
/// assert_eq!("HELLO", s.to_uppercase());
/// ```
///
/// Scripts without case are not changed:
///
/// ```
/// let new_year = "农历新年";
///
/// assert_eq!(new_year, new_year.to_uppercase());
/// ```
///
/// One character can become multiple:
/// ```
/// let s = "tschüß";
///
/// assert_eq!("TSCHÜSS", s.to_uppercase());
/// ```
#[cfg(not(no_global_oom_handling))]
#[rustc_allow_incoherent_impl]
#[must_use = "this returns the uppercase string as a new String, \
without modifying the original"]
#[stable(feature = "unicode_case_mapping", since = "1.2.0")]
pub fn to_uppercase(&self) -> String {
let out = convert_while_ascii(self.as_bytes(), u8::to_ascii_uppercase);
// Safety: we know this is a valid char boundary since
// out.len() is only progressed if ascii bytes are found
let rest = unsafe { self.get_unchecked(out.len()..) };
// Safety: We have written only valid ASCII to our vec
let mut s = unsafe { String::from_utf8_unchecked(out) };
for c in rest.chars() {
match conversions::to_upper(c) {
[a, '\0', _] => s.push(a),
[a, b, '\0'] => {
s.push(a);
s.push(b);
}
[a, b, c] => {
s.push(a);
s.push(b);
s.push(c);
}
}
}
s
}
/// Converts a [`Box<str>`] into a [`String`] without copying or allocating.
///
/// # Examples
///
/// ```
/// let string = String::from("birthday gift");
/// let boxed_str = string.clone().into_boxed_str();
///
/// assert_eq!(boxed_str.into_string(), string);
/// ```
#[stable(feature = "box_str", since = "1.4.0")]
#[rustc_allow_incoherent_impl]
#[must_use = "`self` will be dropped if the result is not used"]
#[inline]
pub fn into_string(self: Box<str>) -> String {
let slice = Box::<[u8]>::from(self);
unsafe { String::from_utf8_unchecked(slice.into_vec()) }
}
/// Creates a new [`String`] by repeating a string `n` times.
///
/// # Panics
///
/// This function will panic if the capacity would overflow.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// assert_eq!("abc".repeat(4), String::from("abcabcabcabc"));
/// ```
///
/// A panic upon overflow:
///
/// ```should_panic
/// // this will panic at runtime
/// let huge = "0123456789abcdef".repeat(usize::MAX);
/// ```
#[cfg(not(no_global_oom_handling))]
#[rustc_allow_incoherent_impl]
#[must_use]
#[stable(feature = "repeat_str", since = "1.16.0")]
pub fn repeat(&self, n: usize) -> String {
unsafe { String::from_utf8_unchecked(self.as_bytes().repeat(n)) }
}
/// Returns a copy of this string where each character is mapped to its
/// ASCII upper case equivalent.
///
/// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
/// but non-ASCII letters are unchanged.
///
/// To uppercase the value in-place, use [`make_ascii_uppercase`].
///
/// To uppercase ASCII characters in addition to non-ASCII characters, use
/// [`to_uppercase`].
///
/// # Examples
///
/// ```
/// let s = "Grüße, Jürgen ❤";
///
/// assert_eq!("GRüßE, JüRGEN ❤", s.to_ascii_uppercase());
/// ```
///
/// [`make_ascii_uppercase`]: str::make_ascii_uppercase
/// [`to_uppercase`]: #method.to_uppercase
#[cfg(not(no_global_oom_handling))]
#[rustc_allow_incoherent_impl]
#[must_use = "to uppercase the value in-place, use `make_ascii_uppercase()`"]
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
#[inline]
pub fn to_ascii_uppercase(&self) -> String {
let mut s = self.to_owned();
s.make_ascii_uppercase();
s
}
/// Returns a copy of this string where each character is mapped to its
/// ASCII lower case equivalent.
///
/// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
/// but non-ASCII letters are unchanged.
///
/// To lowercase the value in-place, use [`make_ascii_lowercase`].
///
/// To lowercase ASCII characters in addition to non-ASCII characters, use
/// [`to_lowercase`].
///
/// # Examples
///
/// ```
/// let s = "Grüße, Jürgen ❤";
///
/// assert_eq!("grüße, jürgen ❤", s.to_ascii_lowercase());
/// ```
///
/// [`make_ascii_lowercase`]: str::make_ascii_lowercase
/// [`to_lowercase`]: #method.to_lowercase
#[cfg(not(no_global_oom_handling))]
#[rustc_allow_incoherent_impl]
#[must_use = "to lowercase the value in-place, use `make_ascii_lowercase()`"]
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
#[inline]
pub fn to_ascii_lowercase(&self) -> String {
let mut s = self.to_owned();
s.make_ascii_lowercase();
s
}
}
/// Converts a boxed slice of bytes to a boxed string slice without checking
/// that the string contains valid UTF-8.
///
/// # Examples
///
/// ```
/// let smile_utf8 = Box::new([226, 152, 186]);
/// let smile = unsafe { std::str::from_boxed_utf8_unchecked(smile_utf8) };
///
/// assert_eq!("☺", &*smile);
/// ```
#[stable(feature = "str_box_extras", since = "1.20.0")]
#[must_use]
#[inline]
pub unsafe fn from_boxed_utf8_unchecked(v: Box<[u8]>) -> Box<str> {
unsafe { Box::from_raw(Box::into_raw(v) as *mut str) }
}
/// Converts the bytes while the bytes are still ascii.
/// For better average performance, this happens in chunks of `2*size_of::<usize>()`.
/// Returns a vec with the converted bytes.
#[inline]
#[cfg(not(test))]
#[cfg(not(no_global_oom_handling))]
fn convert_while_ascii(b: &[u8], convert: fn(&u8) -> u8) -> Vec<u8> {
let mut out = Vec::with_capacity(b.len());
const USIZE_SIZE: usize = mem::size_of::<usize>();
const MAGIC_UNROLL: usize = 2;
const N: usize = USIZE_SIZE * MAGIC_UNROLL;
const NONASCII_MASK: usize = usize::from_ne_bytes([0x80; USIZE_SIZE]);
let mut i = 0;
unsafe {
while i + N <= b.len() {
// Safety: we have checks the sizes `b` and `out` to know that our
let in_chunk = b.get_unchecked(i..i + N);
let out_chunk = out.spare_capacity_mut().get_unchecked_mut(i..i + N);
let mut bits = 0;
for j in 0..MAGIC_UNROLL {
// read the bytes 1 usize at a time (unaligned since we haven't checked the alignment)
// safety: in_chunk is valid bytes in the range
bits |= in_chunk.as_ptr().cast::<usize>().add(j).read_unaligned();
}
// if our chunks aren't ascii, then return only the prior bytes as init
if bits & NONASCII_MASK != 0 {
break;
}
// perform the case conversions on N bytes (gets heavily autovec'd)
for j in 0..N {
// safety: in_chunk and out_chunk is valid bytes in the range
let out = out_chunk.get_unchecked_mut(j);
out.write(convert(in_chunk.get_unchecked(j)));
}
// mark these bytes as initialised
i += N;
}
out.set_len(i);
}
out
}

View File

@ -0,0 +1,717 @@
use super::*;
use std::clone::Clone;
use std::mem::MaybeUninit;
use std::option::Option::None;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::SeqCst;
use std::sync::mpsc::channel;
use std::sync::Mutex;
use std::thread;
struct Canary(*mut AtomicUsize);
impl Drop for Canary {
fn drop(&mut self) {
unsafe {
match *self {
Canary(c) => {
(*c).fetch_add(1, SeqCst);
}
}
}
}
}
struct AllocCanary<'a>(&'a AtomicUsize);
impl<'a> AllocCanary<'a> {
fn new(counter: &'a AtomicUsize) -> Self {
counter.fetch_add(1, SeqCst);
Self(counter)
}
}
unsafe impl Allocator for AllocCanary<'_> {
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
std::alloc::Global.allocate(layout)
}
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
unsafe { std::alloc::Global.deallocate(ptr, layout) }
}
}
impl Clone for AllocCanary<'_> {
fn clone(&self) -> Self {
Self::new(self.0)
}
}
impl Drop for AllocCanary<'_> {
fn drop(&mut self) {
self.0.fetch_sub(1, SeqCst);
}
}
#[test]
#[cfg_attr(target_os = "emscripten", ignore)]
fn manually_share_arc() {
let v = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let arc_v = Arc::new(v);
let (tx, rx) = channel();
let _t = thread::spawn(move || {
let arc_v: Arc<Vec<i32>> = rx.recv().unwrap();
assert_eq!((*arc_v)[3], 4);
});
tx.send(arc_v.clone()).unwrap();
assert_eq!((*arc_v)[2], 3);
assert_eq!((*arc_v)[4], 5);
}
#[test]
fn test_arc_get_mut() {
let mut x = Arc::new(3);
*Arc::get_mut(&mut x).unwrap() = 4;
assert_eq!(*x, 4);
let y = x.clone();
assert!(Arc::get_mut(&mut x).is_none());
drop(y);
assert!(Arc::get_mut(&mut x).is_some());
let _w = Arc::downgrade(&x);
assert!(Arc::get_mut(&mut x).is_none());
}
#[test]
fn weak_counts() {
assert_eq!(Weak::weak_count(&Weak::<u64>::new()), 0);
assert_eq!(Weak::strong_count(&Weak::<u64>::new()), 0);
let a = Arc::new(0);
let w = Arc::downgrade(&a);
assert_eq!(Weak::strong_count(&w), 1);
assert_eq!(Weak::weak_count(&w), 1);
let w2 = w.clone();
assert_eq!(Weak::strong_count(&w), 1);
assert_eq!(Weak::weak_count(&w), 2);
assert_eq!(Weak::strong_count(&w2), 1);
assert_eq!(Weak::weak_count(&w2), 2);
drop(w);
assert_eq!(Weak::strong_count(&w2), 1);
assert_eq!(Weak::weak_count(&w2), 1);
let a2 = a.clone();
assert_eq!(Weak::strong_count(&w2), 2);
assert_eq!(Weak::weak_count(&w2), 1);
drop(a2);
drop(a);
assert_eq!(Weak::strong_count(&w2), 0);
assert_eq!(Weak::weak_count(&w2), 0);
drop(w2);
}
#[test]
fn try_unwrap() {
let x = Arc::new(3);
assert_eq!(Arc::try_unwrap(x), Ok(3));
let x = Arc::new(4);
let _y = x.clone();
assert_eq!(Arc::try_unwrap(x), Err(Arc::new(4)));
let x = Arc::new(5);
let _w = Arc::downgrade(&x);
assert_eq!(Arc::try_unwrap(x), Ok(5));
}
#[test]
fn into_inner() {
for _ in 0..100
// ^ Increase chances of hitting potential race conditions
{
let x = Arc::new(3);
let y = Arc::clone(&x);
let r_thread = std::thread::spawn(|| Arc::into_inner(x));
let s_thread = std::thread::spawn(|| Arc::into_inner(y));
let r = r_thread.join().expect("r_thread panicked");
let s = s_thread.join().expect("s_thread panicked");
assert!(
matches!((r, s), (None, Some(3)) | (Some(3), None)),
"assertion failed: unexpected result `{:?}`\
\n expected `(None, Some(3))` or `(Some(3), None)`",
(r, s),
);
}
let x = Arc::new(3);
assert_eq!(Arc::into_inner(x), Some(3));
let x = Arc::new(4);
let y = Arc::clone(&x);
assert_eq!(Arc::into_inner(x), None);
assert_eq!(Arc::into_inner(y), Some(4));
let x = Arc::new(5);
let _w = Arc::downgrade(&x);
assert_eq!(Arc::into_inner(x), Some(5));
}
#[test]
fn into_from_raw() {
let x = Arc::new(Box::new("hello"));
let y = x.clone();
let x_ptr = Arc::into_raw(x);
drop(y);
unsafe {
assert_eq!(**x_ptr, "hello");
let x = Arc::from_raw(x_ptr);
assert_eq!(**x, "hello");
assert_eq!(Arc::try_unwrap(x).map(|x| *x), Ok("hello"));
}
}
#[test]
fn test_into_from_raw_unsized() {
use std::fmt::Display;
use std::string::ToString;
let arc: Arc<str> = Arc::from("foo");
let ptr = Arc::into_raw(arc.clone());
let arc2 = unsafe { Arc::from_raw(ptr) };
assert_eq!(unsafe { &*ptr }, "foo");
assert_eq!(arc, arc2);
let arc: Arc<dyn Display> = Arc::new(123);
let ptr = Arc::into_raw(arc.clone());
let arc2 = unsafe { Arc::from_raw(ptr) };
assert_eq!(unsafe { &*ptr }.to_string(), "123");
assert_eq!(arc2.to_string(), "123");
}
#[test]
fn into_from_weak_raw() {
let x = Arc::new(Box::new("hello"));
let y = Arc::downgrade(&x);
let y_ptr = Weak::into_raw(y);
unsafe {
assert_eq!(**y_ptr, "hello");
let y = Weak::from_raw(y_ptr);
let y_up = Weak::upgrade(&y).unwrap();
assert_eq!(**y_up, "hello");
drop(y_up);
assert_eq!(Arc::try_unwrap(x).map(|x| *x), Ok("hello"));
}
}
#[test]
fn test_into_from_weak_raw_unsized() {
use std::fmt::Display;
use std::string::ToString;
let arc: Arc<str> = Arc::from("foo");
let weak: Weak<str> = Arc::downgrade(&arc);
let ptr = Weak::into_raw(weak.clone());
let weak2 = unsafe { Weak::from_raw(ptr) };
assert_eq!(unsafe { &*ptr }, "foo");
assert!(weak.ptr_eq(&weak2));
let arc: Arc<dyn Display> = Arc::new(123);
let weak: Weak<dyn Display> = Arc::downgrade(&arc);
let ptr = Weak::into_raw(weak.clone());
let weak2 = unsafe { Weak::from_raw(ptr) };
assert_eq!(unsafe { &*ptr }.to_string(), "123");
assert!(weak.ptr_eq(&weak2));
}
#[test]
fn test_cowarc_clone_make_mut() {
let mut cow0 = Arc::new(75);
let mut cow1 = cow0.clone();
let mut cow2 = cow1.clone();
assert!(75 == *Arc::make_mut(&mut cow0));
assert!(75 == *Arc::make_mut(&mut cow1));
assert!(75 == *Arc::make_mut(&mut cow2));
*Arc::make_mut(&mut cow0) += 1;
*Arc::make_mut(&mut cow1) += 2;
*Arc::make_mut(&mut cow2) += 3;
assert!(76 == *cow0);
assert!(77 == *cow1);
assert!(78 == *cow2);
// none should point to the same backing memory
assert!(*cow0 != *cow1);
assert!(*cow0 != *cow2);
assert!(*cow1 != *cow2);
}
#[test]
fn test_cowarc_clone_unique2() {
let mut cow0 = Arc::new(75);
let cow1 = cow0.clone();
let cow2 = cow1.clone();
assert!(75 == *cow0);
assert!(75 == *cow1);
assert!(75 == *cow2);
*Arc::make_mut(&mut cow0) += 1;
assert!(76 == *cow0);
assert!(75 == *cow1);
assert!(75 == *cow2);
// cow1 and cow2 should share the same contents
// cow0 should have a unique reference
assert!(*cow0 != *cow1);
assert!(*cow0 != *cow2);
assert!(*cow1 == *cow2);
}
#[test]
fn test_cowarc_clone_weak() {
let mut cow0 = Arc::new(75);
let cow1_weak = Arc::downgrade(&cow0);
assert!(75 == *cow0);
assert!(75 == *cow1_weak.upgrade().unwrap());
*Arc::make_mut(&mut cow0) += 1;
assert!(76 == *cow0);
assert!(cow1_weak.upgrade().is_none());
}
#[test]
fn test_live() {
let x = Arc::new(5);
let y = Arc::downgrade(&x);
assert!(y.upgrade().is_some());
}
#[test]
fn test_dead() {
let x = Arc::new(5);
let y = Arc::downgrade(&x);
drop(x);
assert!(y.upgrade().is_none());
}
#[test]
fn weak_self_cyclic() {
struct Cycle {
x: Mutex<Option<Weak<Cycle>>>,
}
let a = Arc::new(Cycle { x: Mutex::new(None) });
let b = Arc::downgrade(&a.clone());
*a.x.lock().unwrap() = Some(b);
// hopefully we don't double-free (or leak)...
}
#[test]
fn drop_arc() {
let mut canary = AtomicUsize::new(0);
let x = Arc::new(Canary(&mut canary as *mut AtomicUsize));
drop(x);
assert!(canary.load(Acquire) == 1);
}
#[test]
fn drop_arc_weak() {
let mut canary = AtomicUsize::new(0);
let arc = Arc::new(Canary(&mut canary as *mut AtomicUsize));
let arc_weak = Arc::downgrade(&arc);
assert!(canary.load(Acquire) == 0);
drop(arc);
assert!(canary.load(Acquire) == 1);
drop(arc_weak);
}
#[test]
fn test_strong_count() {
let a = Arc::new(0);
assert!(Arc::strong_count(&a) == 1);
let w = Arc::downgrade(&a);
assert!(Arc::strong_count(&a) == 1);
let b = w.upgrade().expect("");
assert!(Arc::strong_count(&b) == 2);
assert!(Arc::strong_count(&a) == 2);
drop(w);
drop(a);
assert!(Arc::strong_count(&b) == 1);
let c = b.clone();
assert!(Arc::strong_count(&b) == 2);
assert!(Arc::strong_count(&c) == 2);
}
#[test]
fn test_weak_count() {
let a = Arc::new(0);
assert!(Arc::strong_count(&a) == 1);
assert!(Arc::weak_count(&a) == 0);
let w = Arc::downgrade(&a);
assert!(Arc::strong_count(&a) == 1);
assert!(Arc::weak_count(&a) == 1);
let x = w.clone();
assert!(Arc::weak_count(&a) == 2);
drop(w);
drop(x);
assert!(Arc::strong_count(&a) == 1);
assert!(Arc::weak_count(&a) == 0);
let c = a.clone();
assert!(Arc::strong_count(&a) == 2);
assert!(Arc::weak_count(&a) == 0);
let d = Arc::downgrade(&c);
assert!(Arc::weak_count(&c) == 1);
assert!(Arc::strong_count(&c) == 2);
drop(a);
drop(c);
drop(d);
}
#[test]
fn show_arc() {
let a = Arc::new(5);
assert_eq!(format!("{a:?}"), "5");
}
// Make sure deriving works with Arc<T>
#[derive(Eq, Ord, PartialEq, PartialOrd, Clone, Debug, Default)]
struct Foo {
inner: Arc<i32>,
}
#[test]
fn test_unsized() {
let x: Arc<[i32]> = Arc::new([1, 2, 3]);
assert_eq!(format!("{x:?}"), "[1, 2, 3]");
let y = Arc::downgrade(&x.clone());
drop(x);
assert!(y.upgrade().is_none());
}
#[test]
fn test_maybe_thin_unsized() {
// If/when custom thin DSTs exist, this test should be updated to use one
use std::ffi::{CStr, CString};
let x: Arc<CStr> = Arc::from(CString::new("swordfish").unwrap().into_boxed_c_str());
assert_eq!(format!("{x:?}"), "\"swordfish\"");
let y: Weak<CStr> = Arc::downgrade(&x);
drop(x);
// At this point, the weak points to a dropped DST
assert!(y.upgrade().is_none());
// But we still need to be able to get the alloc layout to drop.
// CStr has no drop glue, but custom DSTs might, and need to work.
drop(y);
}
#[test]
fn test_from_owned() {
let foo = 123;
let foo_arc = Arc::from(foo);
assert!(123 == *foo_arc);
}
#[test]
fn test_new_weak() {
let foo: Weak<usize> = Weak::new();
assert!(foo.upgrade().is_none());
}
#[test]
fn test_ptr_eq() {
let five = Arc::new(5);
let same_five = five.clone();
let other_five = Arc::new(5);
assert!(Arc::ptr_eq(&five, &same_five));
assert!(!Arc::ptr_eq(&five, &other_five));
}
#[test]
#[cfg_attr(target_os = "emscripten", ignore)]
fn test_weak_count_locked() {
let mut a = Arc::new(atomic::AtomicBool::new(false));
let a2 = a.clone();
let t = thread::spawn(move || {
// Miri is too slow
let count = if cfg!(miri) { 1000 } else { 1000000 };
for _i in 0..count {
Arc::get_mut(&mut a);
}
a.store(true, SeqCst);
});
while !a2.load(SeqCst) {
let n = Arc::weak_count(&a2);
assert!(n < 2, "bad weak count: {}", n);
#[cfg(miri)] // Miri's scheduler does not guarantee liveness, and thus needs this hint.
std::hint::spin_loop();
}
t.join().unwrap();
}
#[test]
fn test_from_str() {
let r: Arc<str> = Arc::from("foo");
assert_eq!(&r[..], "foo");
}
#[test]
fn test_copy_from_slice() {
let s: &[u32] = &[1, 2, 3];
let r: Arc<[u32]> = Arc::from(s);
assert_eq!(&r[..], [1, 2, 3]);
}
#[test]
fn test_clone_from_slice() {
#[derive(Clone, Debug, Eq, PartialEq)]
struct X(u32);
let s: &[X] = &[X(1), X(2), X(3)];
let r: Arc<[X]> = Arc::from(s);
assert_eq!(&r[..], s);
}
#[test]
#[should_panic]
fn test_clone_from_slice_panic() {
use std::string::{String, ToString};
struct Fail(u32, String);
impl Clone for Fail {
fn clone(&self) -> Fail {
if self.0 == 2 {
panic!();
}
Fail(self.0, self.1.clone())
}
}
let s: &[Fail] =
&[Fail(0, "foo".to_string()), Fail(1, "bar".to_string()), Fail(2, "baz".to_string())];
// Should panic, but not cause memory corruption
let _r: Arc<[Fail]> = Arc::from(s);
}
#[test]
fn test_from_box() {
let b: Box<u32> = Box::new(123);
let r: Arc<u32> = Arc::from(b);
assert_eq!(*r, 123);
}
#[test]
fn test_from_box_str() {
use std::string::String;
let s = String::from("foo").into_boxed_str();
let r: Arc<str> = Arc::from(s);
assert_eq!(&r[..], "foo");
}
#[test]
fn test_from_box_slice() {
let s = vec![1, 2, 3].into_boxed_slice();
let r: Arc<[u32]> = Arc::from(s);
assert_eq!(&r[..], [1, 2, 3]);
}
#[test]
fn test_from_box_trait() {
use std::fmt::Display;
use std::string::ToString;
let b: Box<dyn Display> = Box::new(123);
let r: Arc<dyn Display> = Arc::from(b);
assert_eq!(r.to_string(), "123");
}
#[test]
fn test_from_box_trait_zero_sized() {
use std::fmt::Debug;
let b: Box<dyn Debug> = Box::new(());
let r: Arc<dyn Debug> = Arc::from(b);
assert_eq!(format!("{r:?}"), "()");
}
#[test]
fn test_from_vec() {
let v = vec![1, 2, 3];
let r: Arc<[u32]> = Arc::from(v);
assert_eq!(&r[..], [1, 2, 3]);
}
#[test]
fn test_downcast() {
use std::any::Any;
let r1: Arc<dyn Any + Send + Sync> = Arc::new(i32::MAX);
let r2: Arc<dyn Any + Send + Sync> = Arc::new("abc");
assert!(r1.clone().downcast::<u32>().is_err());
let r1i32 = r1.downcast::<i32>();
assert!(r1i32.is_ok());
assert_eq!(r1i32.unwrap(), Arc::new(i32::MAX));
assert!(r2.clone().downcast::<i32>().is_err());
let r2str = r2.downcast::<&'static str>();
assert!(r2str.is_ok());
assert_eq!(r2str.unwrap(), Arc::new("abc"));
}
#[test]
fn test_array_from_slice() {
let v = vec![1, 2, 3];
let r: Arc<[u32]> = Arc::from(v);
let a: Result<Arc<[u32; 3]>, _> = r.clone().try_into();
assert!(a.is_ok());
let a: Result<Arc<[u32; 2]>, _> = r.clone().try_into();
assert!(a.is_err());
}
#[test]
fn test_arc_cyclic_with_zero_refs() {
struct ZeroRefs {
inner: Weak<ZeroRefs>,
}
let zero_refs = Arc::new_cyclic(|inner| {
assert_eq!(inner.strong_count(), 0);
assert!(inner.upgrade().is_none());
ZeroRefs { inner: Weak::new() }
});
assert_eq!(Arc::strong_count(&zero_refs), 1);
assert_eq!(Arc::weak_count(&zero_refs), 0);
assert_eq!(zero_refs.inner.strong_count(), 0);
assert_eq!(zero_refs.inner.weak_count(), 0);
}
#[test]
fn test_arc_new_cyclic_one_ref() {
struct OneRef {
inner: Weak<OneRef>,
}
let one_ref = Arc::new_cyclic(|inner| {
assert_eq!(inner.strong_count(), 0);
assert!(inner.upgrade().is_none());
OneRef { inner: inner.clone() }
});
assert_eq!(Arc::strong_count(&one_ref), 1);
assert_eq!(Arc::weak_count(&one_ref), 1);
let one_ref2 = Weak::upgrade(&one_ref.inner).unwrap();
assert!(Arc::ptr_eq(&one_ref, &one_ref2));
assert_eq!(Arc::strong_count(&one_ref), 2);
assert_eq!(Arc::weak_count(&one_ref), 1);
}
#[test]
fn test_arc_cyclic_two_refs() {
struct TwoRefs {
inner1: Weak<TwoRefs>,
inner2: Weak<TwoRefs>,
}
let two_refs = Arc::new_cyclic(|inner| {
assert_eq!(inner.strong_count(), 0);
assert!(inner.upgrade().is_none());
let inner1 = inner.clone();
let inner2 = inner1.clone();
TwoRefs { inner1, inner2 }
});
assert_eq!(Arc::strong_count(&two_refs), 1);
assert_eq!(Arc::weak_count(&two_refs), 2);
let two_refs1 = Weak::upgrade(&two_refs.inner1).unwrap();
assert!(Arc::ptr_eq(&two_refs, &two_refs1));
let two_refs2 = Weak::upgrade(&two_refs.inner2).unwrap();
assert!(Arc::ptr_eq(&two_refs, &two_refs2));
assert_eq!(Arc::strong_count(&two_refs), 3);
assert_eq!(Arc::weak_count(&two_refs), 2);
}
/// Test for Arc::drop bug (https://github.com/rust-lang/rust/issues/55005)
#[test]
#[cfg(miri)] // relies on Stacked Borrows in Miri
fn arc_drop_dereferenceable_race() {
// The bug seems to take up to 700 iterations to reproduce with most seeds (tested 0-9).
for _ in 0..750 {
let arc_1 = Arc::new(());
let arc_2 = arc_1.clone();
let thread = thread::spawn(|| drop(arc_2));
// Spin a bit; makes the race more likely to appear
let mut i = 0;
while i < 256 {
i += 1;
}
drop(arc_1);
thread.join().unwrap();
}
}
#[test]
fn arc_doesnt_leak_allocator() {
let counter = AtomicUsize::new(0);
{
let arc: Arc<dyn Any + Send + Sync, _> = Arc::new_in(5usize, AllocCanary::new(&counter));
drop(arc.downcast::<usize>().unwrap());
let arc: Arc<dyn Any + Send + Sync, _> = Arc::new_in(5usize, AllocCanary::new(&counter));
drop(unsafe { arc.downcast_unchecked::<usize>() });
let arc = Arc::new_in(MaybeUninit::<usize>::new(5usize), AllocCanary::new(&counter));
drop(unsafe { arc.assume_init() });
let arc: Arc<[MaybeUninit<usize>], _> =
Arc::new_zeroed_slice_in(5, AllocCanary::new(&counter));
drop(unsafe { arc.assume_init() });
}
assert_eq!(counter.load(SeqCst), 0);
}

View File

@ -0,0 +1,349 @@
#![stable(feature = "wake_trait", since = "1.51.0")]
//! Types and Traits for working with asynchronous tasks.
//!
//! **Note**: Some of the types in this module are only available
//! on platforms that support atomic loads and stores of pointers.
//! This may be detected at compile time using
//! `#[cfg(target_has_atomic = "ptr")]`.
use crate::rc::Rc;
use core::mem::ManuallyDrop;
use core::task::{LocalWaker, RawWaker, RawWakerVTable};
#[cfg(target_has_atomic = "ptr")]
use crate::sync::Arc;
#[cfg(target_has_atomic = "ptr")]
use core::task::Waker;
/// The implementation of waking a task on an executor.
///
/// This trait can be used to create a [`Waker`]. An executor can define an
/// implementation of this trait, and use that to construct a [`Waker`] to pass
/// to the tasks that are executed on that executor.
///
/// This trait is a memory-safe and ergonomic alternative to constructing a
/// [`RawWaker`]. It supports the common executor design in which the data used
/// to wake up a task is stored in an [`Arc`]. Some executors (especially
/// those for embedded systems) cannot use this API, which is why [`RawWaker`]
/// exists as an alternative for those systems.
///
/// To construct a [`Waker`] from some type `W` implementing this trait,
/// wrap it in an [`Arc<W>`](Arc) and call `Waker::from()` on that.
/// It is also possible to convert to [`RawWaker`] in the same way.
///
/// <!-- Ideally we'd link to the `From` impl, but rustdoc doesn't generate any page for it within
/// `alloc` because `alloc` neither defines nor re-exports `From` or `Waker`, and we can't
/// link ../../std/task/struct.Waker.html#impl-From%3CArc%3CW,+Global%3E%3E-for-Waker
/// without getting a link-checking error in CI. -->
///
/// # Examples
///
/// A basic `block_on` function that takes a future and runs it to completion on
/// the current thread.
///
/// **Note:** This example trades correctness for simplicity. In order to prevent
/// deadlocks, production-grade implementations will also need to handle
/// intermediate calls to `thread::unpark` as well as nested invocations.
///
/// ```rust
/// use std::future::Future;
/// use std::sync::Arc;
/// use std::task::{Context, Poll, Wake};
/// use std::thread::{self, Thread};
/// use core::pin::pin;
///
/// /// A waker that wakes up the current thread when called.
/// struct ThreadWaker(Thread);
///
/// impl Wake for ThreadWaker {
/// fn wake(self: Arc<Self>) {
/// self.0.unpark();
/// }
/// }
///
/// /// Run a future to completion on the current thread.
/// fn block_on<T>(fut: impl Future<Output = T>) -> T {
/// // Pin the future so it can be polled.
/// let mut fut = pin!(fut);
///
/// // Create a new context to be passed to the future.
/// let t = thread::current();
/// let waker = Arc::new(ThreadWaker(t)).into();
/// let mut cx = Context::from_waker(&waker);
///
/// // Run the future to completion.
/// loop {
/// match fut.as_mut().poll(&mut cx) {
/// Poll::Ready(res) => return res,
/// Poll::Pending => thread::park(),
/// }
/// }
/// }
///
/// block_on(async {
/// println!("Hi from inside a future!");
/// });
/// ```
#[cfg(target_has_atomic = "ptr")]
#[stable(feature = "wake_trait", since = "1.51.0")]
pub trait Wake {
/// Wake this task.
#[stable(feature = "wake_trait", since = "1.51.0")]
fn wake(self: Arc<Self>);
/// Wake this task without consuming the waker.
///
/// If an executor supports a cheaper way to wake without consuming the
/// waker, it should override this method. By default, it clones the
/// [`Arc`] and calls [`wake`] on the clone.
///
/// [`wake`]: Wake::wake
#[stable(feature = "wake_trait", since = "1.51.0")]
fn wake_by_ref(self: &Arc<Self>) {
self.clone().wake();
}
}
#[cfg(target_has_atomic = "ptr")]
#[stable(feature = "wake_trait", since = "1.51.0")]
impl<W: Wake + Send + Sync + 'static> From<Arc<W>> for Waker {
/// Use a [`Wake`]-able type as a `Waker`.
///
/// No heap allocations or atomic operations are used for this conversion.
fn from(waker: Arc<W>) -> Waker {
// SAFETY: This is safe because raw_waker safely constructs
// a RawWaker from Arc<W>.
unsafe { Waker::from_raw(raw_waker(waker)) }
}
}
#[cfg(target_has_atomic = "ptr")]
#[stable(feature = "wake_trait", since = "1.51.0")]
impl<W: Wake + Send + Sync + 'static> From<Arc<W>> for RawWaker {
/// Use a `Wake`-able type as a `RawWaker`.
///
/// No heap allocations or atomic operations are used for this conversion.
fn from(waker: Arc<W>) -> RawWaker {
raw_waker(waker)
}
}
// NB: This private function for constructing a RawWaker is used, rather than
// inlining this into the `From<Arc<W>> for RawWaker` impl, to ensure that
// the safety of `From<Arc<W>> for Waker` does not depend on the correct
// trait dispatch - instead both impls call this function directly and
// explicitly.
#[cfg(target_has_atomic = "ptr")]
#[inline(always)]
fn raw_waker<W: Wake + Send + Sync + 'static>(waker: Arc<W>) -> RawWaker {
// Increment the reference count of the arc to clone it.
//
// The #[inline(always)] is to ensure that raw_waker and clone_waker are
// always generated in the same code generation unit as one another, and
// therefore that the structurally identical const-promoted RawWakerVTable
// within both functions is deduplicated at LLVM IR code generation time.
// This allows optimizing Waker::will_wake to a single pointer comparison of
// the vtable pointers, rather than comparing all four function pointers
// within the vtables.
#[inline(always)]
unsafe fn clone_waker<W: Wake + Send + Sync + 'static>(waker: *const ()) -> RawWaker {
unsafe { Arc::increment_strong_count(waker as *const W) };
RawWaker::new(
waker,
&RawWakerVTable::new(clone_waker::<W>, wake::<W>, wake_by_ref::<W>, drop_waker::<W>),
)
}
// Wake by value, moving the Arc into the Wake::wake function
unsafe fn wake<W: Wake + Send + Sync + 'static>(waker: *const ()) {
let waker = unsafe { Arc::from_raw(waker as *const W) };
<W as Wake>::wake(waker);
}
// Wake by reference, wrap the waker in ManuallyDrop to avoid dropping it
unsafe fn wake_by_ref<W: Wake + Send + Sync + 'static>(waker: *const ()) {
let waker = unsafe { ManuallyDrop::new(Arc::from_raw(waker as *const W)) };
<W as Wake>::wake_by_ref(&waker);
}
// Decrement the reference count of the Arc on drop
unsafe fn drop_waker<W: Wake + Send + Sync + 'static>(waker: *const ()) {
unsafe { Arc::decrement_strong_count(waker as *const W) };
}
RawWaker::new(
Arc::into_raw(waker) as *const (),
&RawWakerVTable::new(clone_waker::<W>, wake::<W>, wake_by_ref::<W>, drop_waker::<W>),
)
}
/// An analogous trait to `Wake` but used to construct a `LocalWaker`. This API
/// works in exactly the same way as `Wake`, except that it uses an `Rc` instead
/// of an `Arc`, and the result is a `LocalWaker` instead of a `Waker`.
///
/// The benefits of using `LocalWaker` over `Waker` are that it allows the local waker
/// to hold data that does not implement `Send` and `Sync`. Additionally, it saves calls
/// to `Arc::clone`, which requires atomic synchronization.
///
///
/// # Examples
///
/// This is a simplified example of a `spawn` and a `block_on` function. The `spawn` function
/// is used to push new tasks onto the run queue, while the block on function will remove them
/// and poll them. When a task is woken, it will put itself back on the run queue to be polled
/// by the executor.
///
/// **Note:** This example trades correctness for simplicity. A real world example would interleave
/// poll calls with calls to an io reactor to wait for events instead of spinning on a loop.
///
/// ```rust
/// #![feature(local_waker)]
/// #![feature(noop_waker)]
/// use std::task::{LocalWake, ContextBuilder, LocalWaker, Waker};
/// use std::future::Future;
/// use std::pin::Pin;
/// use std::rc::Rc;
/// use std::cell::RefCell;
/// use std::collections::VecDeque;
///
///
/// thread_local! {
/// // A queue containing all tasks ready to do progress
/// static RUN_QUEUE: RefCell<VecDeque<Rc<Task>>> = RefCell::default();
/// }
///
/// type BoxedFuture = Pin<Box<dyn Future<Output = ()>>>;
///
/// struct Task(RefCell<BoxedFuture>);
///
/// impl LocalWake for Task {
/// fn wake(self: Rc<Self>) {
/// RUN_QUEUE.with_borrow_mut(|queue| {
/// queue.push_back(self)
/// })
/// }
/// }
///
/// fn spawn<F>(future: F)
/// where
/// F: Future<Output=()> + 'static + Send + Sync
/// {
/// let task = RefCell::new(Box::pin(future));
/// RUN_QUEUE.with_borrow_mut(|queue| {
/// queue.push_back(Rc::new(Task(task)));
/// });
/// }
///
/// fn block_on<F>(future: F)
/// where
/// F: Future<Output=()> + 'static + Sync + Send
/// {
/// spawn(future);
/// loop {
/// let Some(task) = RUN_QUEUE.with_borrow_mut(|queue| queue.pop_front()) else {
/// // we exit, since there are no more tasks remaining on the queue
/// return;
/// };
///
/// // cast the Rc<Task> into a `LocalWaker`
/// let local_waker: LocalWaker = task.clone().into();
/// // Build the context using `ContextBuilder`
/// let mut cx = ContextBuilder::from_waker(Waker::noop())
/// .local_waker(&local_waker)
/// .build();
///
/// // Poll the task
/// let _ = task.0
/// .borrow_mut()
/// .as_mut()
/// .poll(&mut cx);
/// }
/// }
///
/// block_on(async {
/// println!("hello world");
/// });
/// ```
///
#[unstable(feature = "local_waker", issue = "118959")]
pub trait LocalWake {
/// Wake this task.
#[unstable(feature = "local_waker", issue = "118959")]
fn wake(self: Rc<Self>);
/// Wake this task without consuming the local waker.
///
/// If an executor supports a cheaper way to wake without consuming the
/// waker, it should override this method. By default, it clones the
/// [`Rc`] and calls [`wake`] on the clone.
///
/// [`wake`]: LocalWaker::wake
#[unstable(feature = "local_waker", issue = "118959")]
fn wake_by_ref(self: &Rc<Self>) {
self.clone().wake();
}
}
#[unstable(feature = "local_waker", issue = "118959")]
impl<W: LocalWake + 'static> From<Rc<W>> for LocalWaker {
/// Use a `Wake`-able type as a `LocalWaker`.
///
/// No heap allocations or atomic operations are used for this conversion.
fn from(waker: Rc<W>) -> LocalWaker {
// SAFETY: This is safe because raw_waker safely constructs
// a RawWaker from Rc<W>.
unsafe { LocalWaker::from_raw(local_raw_waker(waker)) }
}
}
#[allow(ineffective_unstable_trait_impl)]
#[unstable(feature = "local_waker", issue = "118959")]
impl<W: LocalWake + 'static> From<Rc<W>> for RawWaker {
/// Use a `Wake`-able type as a `RawWaker`.
///
/// No heap allocations or atomic operations are used for this conversion.
fn from(waker: Rc<W>) -> RawWaker {
local_raw_waker(waker)
}
}
// NB: This private function for constructing a RawWaker is used, rather than
// inlining this into the `From<Rc<W>> for RawWaker` impl, to ensure that
// the safety of `From<Rc<W>> for Waker` does not depend on the correct
// trait dispatch - instead both impls call this function directly and
// explicitly.
#[inline(always)]
fn local_raw_waker<W: LocalWake + 'static>(waker: Rc<W>) -> RawWaker {
// Increment the reference count of the Rc to clone it.
//
// Refer to the comment on raw_waker's clone_waker regarding why this is
// always inline.
#[inline(always)]
unsafe fn clone_waker<W: LocalWake + 'static>(waker: *const ()) -> RawWaker {
unsafe { Rc::increment_strong_count(waker as *const W) };
RawWaker::new(
waker,
&RawWakerVTable::new(clone_waker::<W>, wake::<W>, wake_by_ref::<W>, drop_waker::<W>),
)
}
// Wake by value, moving the Rc into the LocalWake::wake function
unsafe fn wake<W: LocalWake + 'static>(waker: *const ()) {
let waker = unsafe { Rc::from_raw(waker as *const W) };
<W as LocalWake>::wake(waker);
}
// Wake by reference, wrap the waker in ManuallyDrop to avoid dropping it
unsafe fn wake_by_ref<W: LocalWake + 'static>(waker: *const ()) {
let waker = unsafe { ManuallyDrop::new(Rc::from_raw(waker as *const W)) };
<W as LocalWake>::wake_by_ref(&waker);
}
// Decrement the reference count of the Rc on drop
unsafe fn drop_waker<W: LocalWake + 'static>(waker: *const ()) {
unsafe { Rc::decrement_strong_count(waker as *const W) };
}
RawWaker::new(
Rc::into_raw(waker) as *const (),
&RawWakerVTable::new(clone_waker::<W>, wake::<W>, wake_by_ref::<W>, drop_waker::<W>),
)
}

View File

@ -0,0 +1,119 @@
// We avoid relying on anything else in the crate, apart from the `Debug` trait.
use crate::fmt::Debug;
use std::cmp::Ordering;
use std::sync::atomic::{AtomicUsize, Ordering::SeqCst};
/// A blueprint for crash test dummy instances that monitor particular events.
/// Some instances may be configured to panic at some point.
/// Events are `clone`, `drop` or some anonymous `query`.
///
/// Crash test dummies are identified and ordered by an id, so they can be used
/// as keys in a BTreeMap.
#[derive(Debug)]
pub struct CrashTestDummy {
pub id: usize,
cloned: AtomicUsize,
dropped: AtomicUsize,
queried: AtomicUsize,
}
impl CrashTestDummy {
/// Creates a crash test dummy design. The `id` determines order and equality of instances.
pub fn new(id: usize) -> CrashTestDummy {
CrashTestDummy {
id,
cloned: AtomicUsize::new(0),
dropped: AtomicUsize::new(0),
queried: AtomicUsize::new(0),
}
}
/// Creates an instance of a crash test dummy that records what events it experiences
/// and optionally panics.
pub fn spawn(&self, panic: Panic) -> Instance<'_> {
Instance { origin: self, panic }
}
/// Returns how many times instances of the dummy have been cloned.
pub fn cloned(&self) -> usize {
self.cloned.load(SeqCst)
}
/// Returns how many times instances of the dummy have been dropped.
pub fn dropped(&self) -> usize {
self.dropped.load(SeqCst)
}
/// Returns how many times instances of the dummy have had their `query` member invoked.
pub fn queried(&self) -> usize {
self.queried.load(SeqCst)
}
}
#[derive(Debug)]
pub struct Instance<'a> {
origin: &'a CrashTestDummy,
panic: Panic,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum Panic {
Never,
InClone,
InDrop,
InQuery,
}
impl Instance<'_> {
pub fn id(&self) -> usize {
self.origin.id
}
/// Some anonymous query, the result of which is already given.
pub fn query<R>(&self, result: R) -> R {
self.origin.queried.fetch_add(1, SeqCst);
if self.panic == Panic::InQuery {
panic!("panic in `query`");
}
result
}
}
impl Clone for Instance<'_> {
fn clone(&self) -> Self {
self.origin.cloned.fetch_add(1, SeqCst);
if self.panic == Panic::InClone {
panic!("panic in `clone`");
}
Self { origin: self.origin, panic: Panic::Never }
}
}
impl Drop for Instance<'_> {
fn drop(&mut self) {
self.origin.dropped.fetch_add(1, SeqCst);
if self.panic == Panic::InDrop {
panic!("panic in `drop`");
}
}
}
impl PartialOrd for Instance<'_> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.id().partial_cmp(&other.id())
}
}
impl Ord for Instance<'_> {
fn cmp(&self, other: &Self) -> Ordering {
self.id().cmp(&other.id())
}
}
impl PartialEq for Instance<'_> {
fn eq(&self, other: &Self) -> bool {
self.id().eq(&other.id())
}
}
impl Eq for Instance<'_> {}

View File

@ -0,0 +1,3 @@
pub mod crash_test;
pub mod ord_chaos;
pub mod rng;

View File

@ -0,0 +1,81 @@
use std::cell::Cell;
use std::cmp::Ordering::{self, *};
use std::ptr;
// Minimal type with an `Ord` implementation violating transitivity.
#[derive(Debug)]
pub enum Cyclic3 {
A,
B,
C,
}
use Cyclic3::*;
impl PartialOrd for Cyclic3 {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for Cyclic3 {
fn cmp(&self, other: &Self) -> Ordering {
match (self, other) {
(A, A) | (B, B) | (C, C) => Equal,
(A, B) | (B, C) | (C, A) => Less,
(A, C) | (B, A) | (C, B) => Greater,
}
}
}
impl PartialEq for Cyclic3 {
fn eq(&self, other: &Self) -> bool {
self.cmp(&other) == Equal
}
}
impl Eq for Cyclic3 {}
// Controls the ordering of values wrapped by `Governed`.
#[derive(Debug)]
pub struct Governor {
flipped: Cell<bool>,
}
impl Governor {
pub fn new() -> Self {
Governor { flipped: Cell::new(false) }
}
pub fn flip(&self) {
self.flipped.set(!self.flipped.get());
}
}
// Type with an `Ord` implementation that forms a total order at any moment
// (assuming that `T` respects total order), but can suddenly be made to invert
// that total order.
#[derive(Debug)]
pub struct Governed<'a, T>(pub T, pub &'a Governor);
impl<T: Ord> PartialOrd for Governed<'_, T> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<T: Ord> Ord for Governed<'_, T> {
fn cmp(&self, other: &Self) -> Ordering {
assert!(ptr::eq(self.1, other.1));
let ord = self.0.cmp(&other.0);
if self.1.flipped.get() { ord.reverse() } else { ord }
}
}
impl<T: PartialEq> PartialEq for Governed<'_, T> {
fn eq(&self, other: &Self) -> bool {
assert!(ptr::eq(self.1, other.1));
self.0.eq(&other.0)
}
}
impl<T: Eq> Eq for Governed<'_, T> {}

View File

@ -0,0 +1,28 @@
/// XorShiftRng
pub struct DeterministicRng {
count: usize,
x: u32,
y: u32,
z: u32,
w: u32,
}
impl DeterministicRng {
pub fn new() -> Self {
DeterministicRng { count: 0, x: 0x193a6754, y: 0xa8a7d469, z: 0x97830e05, w: 0x113ba7bb }
}
/// Guarantees that each returned number is unique.
pub fn next(&mut self) -> u32 {
self.count += 1;
assert!(self.count <= 70029);
let x = self.x;
let t = x ^ (x << 11);
self.x = self.y;
self.y = self.z;
self.z = self.w;
let w_ = self.w;
self.w = w_ ^ (w_ >> 19) ^ (t ^ (t >> 8));
self.w
}
}

View File

@ -0,0 +1,141 @@
//! Test for `boxed` mod.
use core::any::Any;
use core::ops::Deref;
use std::boxed::Box;
#[test]
fn test_owned_clone() {
let a = Box::new(5);
let b: Box<i32> = a.clone();
assert!(a == b);
}
#[derive(Debug, PartialEq, Eq)]
struct Test;
#[test]
fn any_move() {
let a = Box::new(8) as Box<dyn Any>;
let b = Box::new(Test) as Box<dyn Any>;
let a: Box<i32> = a.downcast::<i32>().unwrap();
assert_eq!(*a, 8);
let b: Box<Test> = b.downcast::<Test>().unwrap();
assert_eq!(*b, Test);
let a = Box::new(8) as Box<dyn Any>;
let b = Box::new(Test) as Box<dyn Any>;
assert!(a.downcast::<Box<i32>>().is_err());
assert!(b.downcast::<Box<Test>>().is_err());
}
#[test]
fn test_show() {
let a = Box::new(8) as Box<dyn Any>;
let b = Box::new(Test) as Box<dyn Any>;
let a_str = format!("{a:?}");
let b_str = format!("{b:?}");
assert_eq!(a_str, "Any { .. }");
assert_eq!(b_str, "Any { .. }");
static EIGHT: usize = 8;
static TEST: Test = Test;
let a = &EIGHT as &dyn Any;
let b = &TEST as &dyn Any;
let s = format!("{a:?}");
assert_eq!(s, "Any { .. }");
let s = format!("{b:?}");
assert_eq!(s, "Any { .. }");
}
#[test]
fn deref() {
fn homura<T: Deref<Target = i32>>(_: T) {}
homura(Box::new(765));
}
#[test]
fn raw_sized() {
let x = Box::new(17);
let p = Box::into_raw(x);
unsafe {
assert_eq!(17, *p);
*p = 19;
let y = Box::from_raw(p);
assert_eq!(19, *y);
}
}
#[test]
fn raw_trait() {
trait Foo {
fn get(&self) -> u32;
fn set(&mut self, value: u32);
}
struct Bar(u32);
impl Foo for Bar {
fn get(&self) -> u32 {
self.0
}
fn set(&mut self, value: u32) {
self.0 = value;
}
}
let x: Box<dyn Foo> = Box::new(Bar(17));
let p = Box::into_raw(x);
unsafe {
assert_eq!(17, (*p).get());
(*p).set(19);
let y: Box<dyn Foo> = Box::from_raw(p);
assert_eq!(19, y.get());
}
}
#[test]
fn f64_slice() {
let slice: &[f64] = &[-1.0, 0.0, 1.0, f64::INFINITY];
let boxed: Box<[f64]> = Box::from(slice);
assert_eq!(&*boxed, slice)
}
#[test]
fn i64_slice() {
let slice: &[i64] = &[i64::MIN, -2, -1, 0, 1, 2, i64::MAX];
let boxed: Box<[i64]> = Box::from(slice);
assert_eq!(&*boxed, slice)
}
#[test]
fn str_slice() {
let s = "Hello, world!";
let boxed: Box<str> = Box::from(s);
assert_eq!(&*boxed, s)
}
#[test]
fn boxed_slice_from_iter() {
let iter = 0..100;
let boxed: Box<[u32]> = iter.collect();
assert_eq!(boxed.len(), 100);
assert_eq!(boxed[7], 7);
}
#[test]
fn test_array_from_slice() {
let v = vec![1, 2, 3];
let r: Box<[u32]> = v.into_boxed_slice();
let a: Result<Box<[u32; 3]>, _> = r.clone().try_into();
assert!(a.is_ok());
let a: Result<Box<[u32; 2]>, _> = r.clone().try_into();
assert!(a.is_err());
}

View File

@ -0,0 +1,65 @@
use crate::borrow::Cow;
use super::Vec;
#[stable(feature = "cow_from_vec", since = "1.8.0")]
impl<'a, T: Clone> From<&'a [T]> for Cow<'a, [T]> {
/// Creates a [`Borrowed`] variant of [`Cow`]
/// from a slice.
///
/// This conversion does not allocate or clone the data.
///
/// [`Borrowed`]: crate::borrow::Cow::Borrowed
fn from(s: &'a [T]) -> Cow<'a, [T]> {
Cow::Borrowed(s)
}
}
#[stable(feature = "cow_from_array_ref", since = "1.77.0")]
impl<'a, T: Clone, const N: usize> From<&'a [T; N]> for Cow<'a, [T]> {
/// Creates a [`Borrowed`] variant of [`Cow`]
/// from a reference to an array.
///
/// This conversion does not allocate or clone the data.
///
/// [`Borrowed`]: crate::borrow::Cow::Borrowed
fn from(s: &'a [T; N]) -> Cow<'a, [T]> {
Cow::Borrowed(s as &[_])
}
}
#[stable(feature = "cow_from_vec", since = "1.8.0")]
impl<'a, T: Clone> From<Vec<T>> for Cow<'a, [T]> {
/// Creates an [`Owned`] variant of [`Cow`]
/// from an owned instance of [`Vec`].
///
/// This conversion does not allocate or clone the data.
///
/// [`Owned`]: crate::borrow::Cow::Owned
fn from(v: Vec<T>) -> Cow<'a, [T]> {
Cow::Owned(v)
}
}
#[stable(feature = "cow_from_vec_ref", since = "1.28.0")]
impl<'a, T: Clone> From<&'a Vec<T>> for Cow<'a, [T]> {
/// Creates a [`Borrowed`] variant of [`Cow`]
/// from a reference to [`Vec`].
///
/// This conversion does not allocate or clone the data.
///
/// [`Borrowed`]: crate::borrow::Cow::Borrowed
fn from(v: &'a Vec<T>) -> Cow<'a, [T]> {
Cow::Borrowed(v.as_slice())
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> FromIterator<T> for Cow<'a, [T]>
where
T: Clone,
{
fn from_iter<I: IntoIterator<Item = T>>(it: I) -> Cow<'a, [T]> {
Cow::Owned(FromIterator::from_iter(it))
}
}

View File

@ -0,0 +1,253 @@
use crate::alloc::{Allocator, Global};
use core::fmt;
use core::iter::{FusedIterator, TrustedLen};
use core::mem::{self, ManuallyDrop, SizedTypeProperties};
use core::ptr::{self, NonNull};
use core::slice::{self};
use super::Vec;
/// A draining iterator for `Vec<T>`.
///
/// This `struct` is created by [`Vec::drain`].
/// See its documentation for more.
///
/// # Example
///
/// ```
/// let mut v = vec![0, 1, 2];
/// let iter: std::vec::Drain<'_, _> = v.drain(..);
/// ```
#[stable(feature = "drain", since = "1.6.0")]
pub struct Drain<
'a,
T: 'a,
#[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + 'a = Global,
> {
/// Index of tail to preserve
pub(super) tail_start: usize,
/// Length of tail
pub(super) tail_len: usize,
/// Current remaining range to remove
pub(super) iter: slice::Iter<'a, T>,
pub(super) vec: NonNull<Vec<T, A>>,
}
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<T: fmt::Debug, A: Allocator> fmt::Debug for Drain<'_, T, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("Drain").field(&self.iter.as_slice()).finish()
}
}
impl<'a, T, A: Allocator> Drain<'a, T, A> {
/// Returns the remaining items of this iterator as a slice.
///
/// # Examples
///
/// ```
/// let mut vec = vec!['a', 'b', 'c'];
/// let mut drain = vec.drain(..);
/// assert_eq!(drain.as_slice(), &['a', 'b', 'c']);
/// let _ = drain.next().unwrap();
/// assert_eq!(drain.as_slice(), &['b', 'c']);
/// ```
#[must_use]
#[stable(feature = "vec_drain_as_slice", since = "1.46.0")]
pub fn as_slice(&self) -> &[T] {
self.iter.as_slice()
}
/// Returns a reference to the underlying allocator.
#[unstable(feature = "allocator_api", issue = "32838")]
#[must_use]
#[inline]
pub fn allocator(&self) -> &A {
unsafe { self.vec.as_ref().allocator() }
}
/// Keep unyielded elements in the source `Vec`.
///
/// # Examples
///
/// ```
/// #![feature(drain_keep_rest)]
///
/// let mut vec = vec!['a', 'b', 'c'];
/// let mut drain = vec.drain(..);
///
/// assert_eq!(drain.next().unwrap(), 'a');
///
/// // This call keeps 'b' and 'c' in the vec.
/// drain.keep_rest();
///
/// // If we wouldn't call `keep_rest()`,
/// // `vec` would be empty.
/// assert_eq!(vec, ['b', 'c']);
/// ```
#[unstable(feature = "drain_keep_rest", issue = "101122")]
pub fn keep_rest(self) {
// At this moment layout looks like this:
//
// [head] [yielded by next] [unyielded] [yielded by next_back] [tail]
// ^-- start \_________/-- unyielded_len \____/-- self.tail_len
// ^-- unyielded_ptr ^-- tail
//
// Normally `Drop` impl would drop [unyielded] and then move [tail] to the `start`.
// Here we want to
// 1. Move [unyielded] to `start`
// 2. Move [tail] to a new start at `start + len(unyielded)`
// 3. Update length of the original vec to `len(head) + len(unyielded) + len(tail)`
// a. In case of ZST, this is the only thing we want to do
// 4. Do *not* drop self, as everything is put in a consistent state already, there is nothing to do
let mut this = ManuallyDrop::new(self);
unsafe {
let source_vec = this.vec.as_mut();
let start = source_vec.len();
let tail = this.tail_start;
let unyielded_len = this.iter.len();
let unyielded_ptr = this.iter.as_slice().as_ptr();
// ZSTs have no identity, so we don't need to move them around.
if !T::IS_ZST {
let start_ptr = source_vec.as_mut_ptr().add(start);
// memmove back unyielded elements
if unyielded_ptr != start_ptr {
let src = unyielded_ptr;
let dst = start_ptr;
ptr::copy(src, dst, unyielded_len);
}
// memmove back untouched tail
if tail != (start + unyielded_len) {
let src = source_vec.as_ptr().add(tail);
let dst = start_ptr.add(unyielded_len);
ptr::copy(src, dst, this.tail_len);
}
}
source_vec.set_len(start + unyielded_len + this.tail_len);
}
}
}
#[stable(feature = "vec_drain_as_slice", since = "1.46.0")]
impl<'a, T, A: Allocator> AsRef<[T]> for Drain<'a, T, A> {
fn as_ref(&self) -> &[T] {
self.as_slice()
}
}
#[stable(feature = "drain", since = "1.6.0")]
unsafe impl<T: Sync, A: Sync + Allocator> Sync for Drain<'_, T, A> {}
#[stable(feature = "drain", since = "1.6.0")]
unsafe impl<T: Send, A: Send + Allocator> Send for Drain<'_, T, A> {}
#[stable(feature = "drain", since = "1.6.0")]
impl<T, A: Allocator> Iterator for Drain<'_, T, A> {
type Item = T;
#[inline]
fn next(&mut self) -> Option<T> {
self.iter.next().map(|elt| unsafe { ptr::read(elt as *const _) })
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
#[stable(feature = "drain", since = "1.6.0")]
impl<T, A: Allocator> DoubleEndedIterator for Drain<'_, T, A> {
#[inline]
fn next_back(&mut self) -> Option<T> {
self.iter.next_back().map(|elt| unsafe { ptr::read(elt as *const _) })
}
}
#[stable(feature = "drain", since = "1.6.0")]
impl<T, A: Allocator> Drop for Drain<'_, T, A> {
fn drop(&mut self) {
/// Moves back the un-`Drain`ed elements to restore the original `Vec`.
struct DropGuard<'r, 'a, T, A: Allocator>(&'r mut Drain<'a, T, A>);
impl<'r, 'a, T, A: Allocator> Drop for DropGuard<'r, 'a, T, A> {
fn drop(&mut self) {
if self.0.tail_len > 0 {
unsafe {
let source_vec = self.0.vec.as_mut();
// memmove back untouched tail, update to new length
let start = source_vec.len();
let tail = self.0.tail_start;
if tail != start {
let src = source_vec.as_ptr().add(tail);
let dst = source_vec.as_mut_ptr().add(start);
ptr::copy(src, dst, self.0.tail_len);
}
source_vec.set_len(start + self.0.tail_len);
}
}
}
}
let iter = mem::take(&mut self.iter);
let drop_len = iter.len();
let mut vec = self.vec;
if T::IS_ZST {
// ZSTs have no identity, so we don't need to move them around, we only need to drop the correct amount.
// this can be achieved by manipulating the Vec length instead of moving values out from `iter`.
unsafe {
let vec = vec.as_mut();
let old_len = vec.len();
vec.set_len(old_len + drop_len + self.tail_len);
vec.truncate(old_len + self.tail_len);
}
return;
}
// ensure elements are moved back into their appropriate places, even when drop_in_place panics
let _guard = DropGuard(self);
if drop_len == 0 {
return;
}
// as_slice() must only be called when iter.len() is > 0 because
// it also gets touched by vec::Splice which may turn it into a dangling pointer
// which would make it and the vec pointer point to different allocations which would
// lead to invalid pointer arithmetic below.
let drop_ptr = iter.as_slice().as_ptr();
unsafe {
// drop_ptr comes from a slice::Iter which only gives us a &[T] but for drop_in_place
// a pointer with mutable provenance is necessary. Therefore we must reconstruct
// it from the original vec but also avoid creating a &mut to the front since that could
// invalidate raw pointers to it which some unsafe code might rely on.
let vec_ptr = vec.as_mut().as_mut_ptr();
let drop_offset = drop_ptr.sub_ptr(vec_ptr);
let to_drop = ptr::slice_from_raw_parts_mut(vec_ptr.add(drop_offset), drop_len);
ptr::drop_in_place(to_drop);
}
}
}
#[stable(feature = "drain", since = "1.6.0")]
impl<T, A: Allocator> ExactSizeIterator for Drain<'_, T, A> {
fn is_empty(&self) -> bool {
self.iter.is_empty()
}
}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T, A: Allocator> TrustedLen for Drain<'_, T, A> {}
#[stable(feature = "fused", since = "1.26.0")]
impl<T, A: Allocator> FusedIterator for Drain<'_, T, A> {}

View File

@ -0,0 +1,113 @@
use crate::alloc::{Allocator, Global};
use core::ptr;
use core::slice;
use super::Vec;
/// An iterator which uses a closure to determine if an element should be removed.
///
/// This struct is created by [`Vec::extract_if`].
/// See its documentation for more.
///
/// # Example
///
/// ```
/// #![feature(extract_if)]
///
/// let mut v = vec![0, 1, 2];
/// let iter: std::vec::ExtractIf<'_, _, _> = v.extract_if(|x| *x % 2 == 0);
/// ```
#[unstable(feature = "extract_if", reason = "recently added", issue = "43244")]
#[derive(Debug)]
#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct ExtractIf<
'a,
T,
F,
#[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
> where
F: FnMut(&mut T) -> bool,
{
pub(super) vec: &'a mut Vec<T, A>,
/// The index of the item that will be inspected by the next call to `next`.
pub(super) idx: usize,
/// The number of items that have been drained (removed) thus far.
pub(super) del: usize,
/// The original length of `vec` prior to draining.
pub(super) old_len: usize,
/// The filter test predicate.
pub(super) pred: F,
}
impl<T, F, A: Allocator> ExtractIf<'_, T, F, A>
where
F: FnMut(&mut T) -> bool,
{
/// Returns a reference to the underlying allocator.
#[unstable(feature = "allocator_api", issue = "32838")]
#[inline]
pub fn allocator(&self) -> &A {
self.vec.allocator()
}
}
#[unstable(feature = "extract_if", reason = "recently added", issue = "43244")]
impl<T, F, A: Allocator> Iterator for ExtractIf<'_, T, F, A>
where
F: FnMut(&mut T) -> bool,
{
type Item = T;
fn next(&mut self) -> Option<T> {
unsafe {
while self.idx < self.old_len {
let i = self.idx;
let v = slice::from_raw_parts_mut(self.vec.as_mut_ptr(), self.old_len);
let drained = (self.pred)(&mut v[i]);
// Update the index *after* the predicate is called. If the index
// is updated prior and the predicate panics, the element at this
// index would be leaked.
self.idx += 1;
if drained {
self.del += 1;
return Some(ptr::read(&v[i]));
} else if self.del > 0 {
let del = self.del;
let src: *const T = &v[i];
let dst: *mut T = &mut v[i - del];
ptr::copy_nonoverlapping(src, dst, 1);
}
}
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(0, Some(self.old_len - self.idx))
}
}
#[unstable(feature = "extract_if", reason = "recently added", issue = "43244")]
impl<T, F, A: Allocator> Drop for ExtractIf<'_, T, F, A>
where
F: FnMut(&mut T) -> bool,
{
fn drop(&mut self) {
unsafe {
if self.idx < self.old_len && self.del > 0 {
// This is a pretty messed up state, and there isn't really an
// obviously right thing to do. We don't want to keep trying
// to execute `pred`, so we just backshift all the unprocessed
// elements and tell the vec that they still exist. The backshift
// is required to prevent a double-drop of the last successfully
// drained item prior to a panic in the predicate.
let ptr = self.vec.as_mut_ptr();
let src = ptr.add(self.idx);
let dst = src.sub(self.del);
let tail_len = self.old_len - self.idx;
src.copy_to(dst, tail_len);
}
self.vec.set_len(self.old_len - self.del);
}
}
}

View File

@ -0,0 +1,428 @@
//! Inplace iterate-and-collect specialization for `Vec`
//!
//! Note: This documents Vec internals, some of the following sections explain implementation
//! details and are best read together with the source of this module.
//!
//! The specialization in this module applies to iterators in the shape of
//! `source.adapter().adapter().adapter().collect::<Vec<U>>()`
//! where `source` is an owning iterator obtained from [`Vec<T>`], [`Box<[T]>`][box] (by conversion to `Vec`)
//! or [`BinaryHeap<T>`], the adapters guarantee to consume enough items per step to make room
//! for the results (represented by [`InPlaceIterable`]), provide transitive access to `source`
//! (via [`SourceIter`]) and thus the underlying allocation.
//! And finally there are alignment and size constraints to consider, this is currently ensured via
//! const eval instead of trait bounds in the specialized [`SpecFromIter`] implementation.
//!
//! [`BinaryHeap<T>`]: crate::collections::BinaryHeap
//! [box]: crate::boxed::Box
//!
//! By extension some other collections which use `collect::<Vec<_>>()` internally in their
//! `FromIterator` implementation benefit from this too.
//!
//! Access to the underlying source goes through a further layer of indirection via the private
//! trait [`AsVecIntoIter`] to hide the implementation detail that other collections may use
//! `vec::IntoIter` internally.
//!
//! In-place iteration depends on the interaction of several unsafe traits, implementation
//! details of multiple parts in the iterator pipeline and often requires holistic reasoning
//! across multiple structs since iterators are executed cooperatively rather than having
//! a central evaluator/visitor struct executing all iterator components.
//!
//! # Reading from and writing to the same allocation
//!
//! By its nature collecting in place means that the reader and writer side of the iterator
//! use the same allocation. Since `try_fold()` (used in [`SpecInPlaceCollect`]) takes a
//! reference to the iterator for the duration of the iteration that means we can't interleave
//! the step of reading a value and getting a reference to write to. Instead raw pointers must be
//! used on the reader and writer side.
//!
//! That writes never clobber a yet-to-be-read items is ensured by the [`InPlaceIterable`] requirements.
//!
//! # Layout constraints
//!
//! When recycling an allocation between different types we must uphold the [`Allocator`] contract
//! which means that the input and output Layouts have to "fit".
//!
//! To complicate things further `InPlaceIterable` supports splitting or merging items into smaller/
//! larger ones to enable (de)aggregation of arrays.
//!
//! Ultimately each step of the iterator must free up enough *bytes* in the source to make room
//! for the next output item.
//! If `T` and `U` have the same size no fixup is needed.
//! If `T`'s size is a multiple of `U`'s we can compensate by multiplying the capacity accordingly.
//! Otherwise the input capacity (and thus layout) in bytes may not be representable by the output
//! `Vec<U>`. In that case `alloc.shrink()` is used to update the allocation's layout.
//!
//! Alignments of `T` must be the same or larger than `U`. Since alignments are always a power
//! of two _larger_ implies _is a multiple of_.
//!
//! See `in_place_collectible()` for the current conditions.
//!
//! Additionally this specialization doesn't make sense for ZSTs as there is no reallocation to
//! avoid and it would make pointer arithmetic more difficult.
//!
//! [`Allocator`]: core::alloc::Allocator
//!
//! # Drop- and panic-safety
//!
//! Iteration can panic, requiring dropping the already written parts but also the remainder of
//! the source. Iteration can also leave some source items unconsumed which must be dropped.
//! All those drops in turn can panic which then must either leak the allocation or abort to avoid
//! double-drops.
//!
//! This is handled by the [`InPlaceDrop`] guard for sink items (`U`) and by
//! [`vec::IntoIter::forget_allocation_drop_remaining()`] for remaining source items (`T`).
//!
//! If dropping any remaining source item (`T`) panics then [`InPlaceDstDataSrcBufDrop`] will handle dropping
//! the already collected sink items (`U`) and freeing the allocation.
//!
//! [`vec::IntoIter::forget_allocation_drop_remaining()`]: super::IntoIter::forget_allocation_drop_remaining()
//!
//! # O(1) collect
//!
//! The main iteration itself is further specialized when the iterator implements
//! [`TrustedRandomAccessNoCoerce`] to let the optimizer see that it is a counted loop with a single
//! [induction variable]. This can turn some iterators into a noop, i.e. it reduces them from O(n) to
//! O(1). This particular optimization is quite fickle and doesn't always work, see [#79308]
//!
//! [#79308]: https://github.com/rust-lang/rust/issues/79308
//! [induction variable]: https://en.wikipedia.org/wiki/Induction_variable
//!
//! Since unchecked accesses through that trait do not advance the read pointer of `IntoIter`
//! this would interact unsoundly with the requirements about dropping the tail described above.
//! But since the normal `Drop` implementation of `IntoIter` would suffer from the same problem it
//! is only correct for `TrustedRandomAccessNoCoerce` to be implemented when the items don't
//! have a destructor. Thus that implicit requirement also makes the specialization safe to use for
//! in-place collection.
//! Note that this safety concern is about the correctness of `impl Drop for IntoIter`,
//! not the guarantees of `InPlaceIterable`.
//!
//! # Adapter implementations
//!
//! The invariants for adapters are documented in [`SourceIter`] and [`InPlaceIterable`], but
//! getting them right can be rather subtle for multiple, sometimes non-local reasons.
//! For example `InPlaceIterable` would be valid to implement for [`Peekable`], except
//! that it is stateful, cloneable and `IntoIter`'s clone implementation shortens the underlying
//! allocation which means if the iterator has been peeked and then gets cloned there no longer is
//! enough room, thus breaking an invariant ([#85322]).
//!
//! [#85322]: https://github.com/rust-lang/rust/issues/85322
//! [`Peekable`]: core::iter::Peekable
//!
//!
//! # Examples
//!
//! Some cases that are optimized by this specialization, more can be found in the `Vec`
//! benchmarks:
//!
//! ```rust
//! # #[allow(dead_code)]
//! /// Converts a usize vec into an isize one.
//! pub fn cast(vec: Vec<usize>) -> Vec<isize> {
//! // Does not allocate, free or panic. On optlevel>=2 it does not loop.
//! // Of course this particular case could and should be written with `into_raw_parts` and
//! // `from_raw_parts` instead.
//! vec.into_iter().map(|u| u as isize).collect()
//! }
//! ```
//!
//! ```rust
//! # #[allow(dead_code)]
//! /// Drops remaining items in `src` and if the layouts of `T` and `U` match it
//! /// returns an empty Vec backed by the original allocation. Otherwise it returns a new
//! /// empty vec.
//! pub fn recycle_allocation<T, U>(src: Vec<T>) -> Vec<U> {
//! src.into_iter().filter_map(|_| None).collect()
//! }
//! ```
//!
//! ```rust
//! let vec = vec![13usize; 1024];
//! let _ = vec.into_iter()
//! .enumerate()
//! .filter_map(|(idx, val)| if idx % 2 == 0 { Some(val+idx) } else {None})
//! .collect::<Vec<_>>();
//!
//! // is equivalent to the following, but doesn't require bounds checks
//!
//! let mut vec = vec![13usize; 1024];
//! let mut write_idx = 0;
//! for idx in 0..vec.len() {
//! if idx % 2 == 0 {
//! vec[write_idx] = vec[idx] + idx;
//! write_idx += 1;
//! }
//! }
//! vec.truncate(write_idx);
//! ```
use crate::alloc::{handle_alloc_error, Global};
use core::alloc::Allocator;
use core::alloc::Layout;
use core::iter::{InPlaceIterable, SourceIter, TrustedRandomAccessNoCoerce};
use core::marker::PhantomData;
use core::mem::{self, ManuallyDrop, SizedTypeProperties};
use core::num::NonZero;
use core::ptr;
use super::{InPlaceDrop, InPlaceDstDataSrcBufDrop, SpecFromIter, SpecFromIterNested, Vec};
const fn in_place_collectible<DEST, SRC>(
step_merge: Option<NonZero<usize>>,
step_expand: Option<NonZero<usize>>,
) -> bool {
// Require matching alignments because an alignment-changing realloc is inefficient on many
// system allocators and better implementations would require the unstable Allocator trait.
if const { SRC::IS_ZST || DEST::IS_ZST || mem::align_of::<SRC>() != mem::align_of::<DEST>() } {
return false;
}
match (step_merge, step_expand) {
(Some(step_merge), Some(step_expand)) => {
// At least N merged source items -> at most M expanded destination items
// e.g.
// - 1 x [u8; 4] -> 4x u8, via flatten
// - 4 x u8 -> 1x [u8; 4], via array_chunks
mem::size_of::<SRC>() * step_merge.get() >= mem::size_of::<DEST>() * step_expand.get()
}
// Fall back to other from_iter impls if an overflow occurred in the step merge/expansion
// tracking.
_ => false,
}
}
const fn needs_realloc<SRC, DEST>(src_cap: usize, dst_cap: usize) -> bool {
if const { mem::align_of::<SRC>() != mem::align_of::<DEST>() } {
// FIXME: use unreachable! once that works in const
panic!("in_place_collectible() prevents this");
}
// If src type size is an integer multiple of the destination type size then
// the caller will have calculated a `dst_cap` that is an integer multiple of
// `src_cap` without remainder.
if const {
let src_sz = mem::size_of::<SRC>();
let dest_sz = mem::size_of::<DEST>();
dest_sz != 0 && src_sz % dest_sz == 0
} {
return false;
}
// type layouts don't guarantee a fit, so do a runtime check to see if
// the allocations happen to match
return src_cap > 0 && src_cap * mem::size_of::<SRC>() != dst_cap * mem::size_of::<DEST>();
}
/// This provides a shorthand for the source type since local type aliases aren't a thing.
#[rustc_specialization_trait]
trait InPlaceCollect: SourceIter<Source: AsVecIntoIter> + InPlaceIterable {
type Src;
}
impl<T> InPlaceCollect for T
where
T: SourceIter<Source: AsVecIntoIter> + InPlaceIterable,
{
type Src = <<T as SourceIter>::Source as AsVecIntoIter>::Item;
}
impl<T, I> SpecFromIter<T, I> for Vec<T>
where
I: Iterator<Item = T> + InPlaceCollect,
<I as SourceIter>::Source: AsVecIntoIter,
{
default fn from_iter(iterator: I) -> Self {
// Select the implementation in const eval to avoid codegen of the dead branch to improve compile times.
let fun: fn(I) -> Vec<T> = const {
// See "Layout constraints" section in the module documentation. We use const conditions here
// since these conditions currently cannot be expressed as trait bounds
if in_place_collectible::<T, I::Src>(I::MERGE_BY, I::EXPAND_BY) {
from_iter_in_place
} else {
// fallback
SpecFromIterNested::<T, I>::from_iter
}
};
fun(iterator)
}
}
fn from_iter_in_place<I, T>(mut iterator: I) -> Vec<T>
where
I: Iterator<Item = T> + InPlaceCollect,
<I as SourceIter>::Source: AsVecIntoIter,
{
let (src_buf, src_ptr, src_cap, mut dst_buf, dst_end, dst_cap) = unsafe {
let inner = iterator.as_inner().as_into_iter();
(
inner.buf,
inner.ptr,
inner.cap,
inner.buf.cast::<T>(),
inner.end as *const T,
inner.cap * mem::size_of::<I::Src>() / mem::size_of::<T>(),
)
};
// SAFETY: `dst_buf` and `dst_end` are the start and end of the buffer.
let len = unsafe {
SpecInPlaceCollect::collect_in_place(&mut iterator, dst_buf.as_ptr() as *mut T, dst_end)
};
let src = unsafe { iterator.as_inner().as_into_iter() };
// check if SourceIter contract was upheld
// caveat: if they weren't we might not even make it to this point
debug_assert_eq!(src_buf, src.buf);
// check InPlaceIterable contract. This is only possible if the iterator advanced the
// source pointer at all. If it uses unchecked access via TrustedRandomAccess
// then the source pointer will stay in its initial position and we can't use it as reference
if src.ptr != src_ptr {
debug_assert!(
unsafe { dst_buf.add(len).cast() } <= src.ptr,
"InPlaceIterable contract violation, write pointer advanced beyond read pointer"
);
}
// The ownership of the source allocation and the new `T` values is temporarily moved into `dst_guard`.
// This is safe because
// * `forget_allocation_drop_remaining` immediately forgets the allocation
// before any panic can occur in order to avoid any double free, and then proceeds to drop
// any remaining values at the tail of the source.
// * the shrink either panics without invalidating the allocation, aborts or
// succeeds. In the last case we disarm the guard.
//
// Note: This access to the source wouldn't be allowed by the TrustedRandomIteratorNoCoerce
// contract (used by SpecInPlaceCollect below). But see the "O(1) collect" section in the
// module documentation why this is ok anyway.
let dst_guard =
InPlaceDstDataSrcBufDrop { ptr: dst_buf, len, src_cap, src: PhantomData::<I::Src> };
src.forget_allocation_drop_remaining();
// Adjust the allocation if the source had a capacity in bytes that wasn't a multiple
// of the destination type size.
// Since the discrepancy should generally be small this should only result in some
// bookkeeping updates and no memmove.
if needs_realloc::<I::Src, T>(src_cap, dst_cap) {
let alloc = Global;
debug_assert_ne!(src_cap, 0);
debug_assert_ne!(dst_cap, 0);
unsafe {
// The old allocation exists, therefore it must have a valid layout.
let src_align = mem::align_of::<I::Src>();
let src_size = mem::size_of::<I::Src>().unchecked_mul(src_cap);
let old_layout = Layout::from_size_align_unchecked(src_size, src_align);
// The allocation must be equal or smaller for in-place iteration to be possible
// therefore the new layout must be ≤ the old one and therefore valid.
let dst_align = mem::align_of::<T>();
let dst_size = mem::size_of::<T>().unchecked_mul(dst_cap);
let new_layout = Layout::from_size_align_unchecked(dst_size, dst_align);
let result = alloc.shrink(dst_buf.cast(), old_layout, new_layout);
let Ok(reallocated) = result else { handle_alloc_error(new_layout) };
dst_buf = reallocated.cast::<T>();
}
} else {
debug_assert_eq!(src_cap * mem::size_of::<I::Src>(), dst_cap * mem::size_of::<T>());
}
mem::forget(dst_guard);
let vec = unsafe { Vec::from_nonnull(dst_buf, len, dst_cap) };
vec
}
fn write_in_place_with_drop<T>(
src_end: *const T,
) -> impl FnMut(InPlaceDrop<T>, T) -> Result<InPlaceDrop<T>, !> {
move |mut sink, item| {
unsafe {
// the InPlaceIterable contract cannot be verified precisely here since
// try_fold has an exclusive reference to the source pointer
// all we can do is check if it's still in range
debug_assert!(sink.dst as *const _ <= src_end, "InPlaceIterable contract violation");
ptr::write(sink.dst, item);
// Since this executes user code which can panic we have to bump the pointer
// after each step.
sink.dst = sink.dst.add(1);
}
Ok(sink)
}
}
/// Helper trait to hold specialized implementations of the in-place iterate-collect loop
trait SpecInPlaceCollect<T, I>: Iterator<Item = T> {
/// Collects an iterator (`self`) into the destination buffer (`dst`) and returns the number of items
/// collected. `end` is the last writable element of the allocation and used for bounds checks.
///
/// This method is specialized and one of its implementations makes use of
/// `Iterator::__iterator_get_unchecked` calls with a `TrustedRandomAccessNoCoerce` bound
/// on `I` which means the caller of this method must take the safety conditions
/// of that trait into consideration.
unsafe fn collect_in_place(&mut self, dst: *mut T, end: *const T) -> usize;
}
impl<T, I> SpecInPlaceCollect<T, I> for I
where
I: Iterator<Item = T>,
{
#[inline]
default unsafe fn collect_in_place(&mut self, dst_buf: *mut T, end: *const T) -> usize {
// use try-fold since
// - it vectorizes better for some iterator adapters
// - unlike most internal iteration methods, it only takes a &mut self
// - it lets us thread the write pointer through its innards and get it back in the end
let sink = InPlaceDrop { inner: dst_buf, dst: dst_buf };
let sink =
self.try_fold::<_, _, Result<_, !>>(sink, write_in_place_with_drop(end)).unwrap();
// iteration succeeded, don't drop head
unsafe { ManuallyDrop::new(sink).dst.sub_ptr(dst_buf) }
}
}
impl<T, I> SpecInPlaceCollect<T, I> for I
where
I: Iterator<Item = T> + TrustedRandomAccessNoCoerce,
{
#[inline]
unsafe fn collect_in_place(&mut self, dst_buf: *mut T, end: *const T) -> usize {
let len = self.size();
let mut drop_guard = InPlaceDrop { inner: dst_buf, dst: dst_buf };
for i in 0..len {
// Safety: InplaceIterable contract guarantees that for every element we read
// one slot in the underlying storage will have been freed up and we can immediately
// write back the result.
unsafe {
let dst = dst_buf.add(i);
debug_assert!(dst as *const _ <= end, "InPlaceIterable contract violation");
ptr::write(dst, self.__iterator_get_unchecked(i));
// Since this executes user code which can panic we have to bump the pointer
// after each step.
drop_guard.dst = dst.add(1);
}
}
mem::forget(drop_guard);
len
}
}
/// Internal helper trait for in-place iteration specialization.
///
/// Currently this is only implemented by [`vec::IntoIter`] - returning a reference to itself - and
/// [`binary_heap::IntoIter`] which returns a reference to its inner representation.
///
/// Since this is an internal trait it hides the implementation detail `binary_heap::IntoIter`
/// uses `vec::IntoIter` internally.
///
/// [`vec::IntoIter`]: super::IntoIter
/// [`binary_heap::IntoIter`]: crate::collections::binary_heap::IntoIter
///
/// # Safety
///
/// In-place iteration relies on implementation details of `vec::IntoIter`, most importantly that
/// it does not create references to the whole allocation during iteration, only raw pointers
#[rustc_specialization_trait]
pub(crate) unsafe trait AsVecIntoIter {
type Item;
fn as_into_iter(&mut self) -> &mut super::IntoIter<Self::Item>;
}

View File

@ -0,0 +1,50 @@
use core::marker::PhantomData;
use core::ptr::NonNull;
use core::ptr::{self, drop_in_place};
use core::slice::{self};
use crate::alloc::Global;
use crate::raw_vec::RawVec;
// A helper struct for in-place iteration that drops the destination slice of iteration,
// i.e. the head. The source slice (the tail) is dropped by IntoIter.
pub(super) struct InPlaceDrop<T> {
pub(super) inner: *mut T,
pub(super) dst: *mut T,
}
impl<T> InPlaceDrop<T> {
fn len(&self) -> usize {
unsafe { self.dst.sub_ptr(self.inner) }
}
}
impl<T> Drop for InPlaceDrop<T> {
#[inline]
fn drop(&mut self) {
unsafe {
ptr::drop_in_place(slice::from_raw_parts_mut(self.inner, self.len()));
}
}
}
// A helper struct for in-place collection that drops the destination items together with
// the source allocation - i.e. before the reallocation happened - to avoid leaking them
// if some other destructor panics.
pub(super) struct InPlaceDstDataSrcBufDrop<Src, Dest> {
pub(super) ptr: NonNull<Dest>,
pub(super) len: usize,
pub(super) src_cap: usize,
pub(super) src: PhantomData<Src>,
}
impl<Src, Dest> Drop for InPlaceDstDataSrcBufDrop<Src, Dest> {
#[inline]
fn drop(&mut self) {
unsafe {
let _drop_allocation =
RawVec::<Src>::from_nonnull_in(self.ptr.cast::<Src>(), self.src_cap, Global);
drop_in_place(core::ptr::slice_from_raw_parts_mut::<Dest>(self.ptr.as_ptr(), self.len));
};
}
}

View File

@ -0,0 +1,477 @@
#[cfg(not(no_global_oom_handling))]
use super::AsVecIntoIter;
use crate::alloc::{Allocator, Global};
#[cfg(not(no_global_oom_handling))]
use crate::collections::VecDeque;
use crate::raw_vec::RawVec;
use core::array;
use core::fmt;
use core::iter::{
FusedIterator, InPlaceIterable, SourceIter, TrustedFused, TrustedLen,
TrustedRandomAccessNoCoerce,
};
use core::marker::PhantomData;
use core::mem::{ManuallyDrop, MaybeUninit, SizedTypeProperties};
use core::num::NonZero;
#[cfg(not(no_global_oom_handling))]
use core::ops::Deref;
use core::ptr::{self, NonNull};
use core::slice::{self};
macro non_null {
(mut $place:expr, $t:ident) => {{
#![allow(unused_unsafe)] // we're sometimes used within an unsafe block
unsafe { &mut *(ptr::addr_of_mut!($place) as *mut NonNull<$t>) }
}},
($place:expr, $t:ident) => {{
#![allow(unused_unsafe)] // we're sometimes used within an unsafe block
unsafe { *(ptr::addr_of!($place) as *const NonNull<$t>) }
}},
}
/// An iterator that moves out of a vector.
///
/// This `struct` is created by the `into_iter` method on [`Vec`](super::Vec)
/// (provided by the [`IntoIterator`] trait).
///
/// # Example
///
/// ```
/// let v = vec![0, 1, 2];
/// let iter: std::vec::IntoIter<_> = v.into_iter();
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_insignificant_dtor]
pub struct IntoIter<
T,
#[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
> {
pub(super) buf: NonNull<T>,
pub(super) phantom: PhantomData<T>,
pub(super) cap: usize,
// the drop impl reconstructs a RawVec from buf, cap and alloc
// to avoid dropping the allocator twice we need to wrap it into ManuallyDrop
pub(super) alloc: ManuallyDrop<A>,
pub(super) ptr: NonNull<T>,
/// If T is a ZST, this is actually ptr+len. This encoding is picked so that
/// ptr == end is a quick test for the Iterator being empty, that works
/// for both ZST and non-ZST.
/// For non-ZSTs the pointer is treated as `NonNull<T>`
pub(super) end: *const T,
}
#[stable(feature = "vec_intoiter_debug", since = "1.13.0")]
impl<T: fmt::Debug, A: Allocator> fmt::Debug for IntoIter<T, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("IntoIter").field(&self.as_slice()).finish()
}
}
impl<T, A: Allocator> IntoIter<T, A> {
/// Returns the remaining items of this iterator as a slice.
///
/// # Examples
///
/// ```
/// let vec = vec!['a', 'b', 'c'];
/// let mut into_iter = vec.into_iter();
/// assert_eq!(into_iter.as_slice(), &['a', 'b', 'c']);
/// let _ = into_iter.next().unwrap();
/// assert_eq!(into_iter.as_slice(), &['b', 'c']);
/// ```
#[stable(feature = "vec_into_iter_as_slice", since = "1.15.0")]
pub fn as_slice(&self) -> &[T] {
unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len()) }
}
/// Returns the remaining items of this iterator as a mutable slice.
///
/// # Examples
///
/// ```
/// let vec = vec!['a', 'b', 'c'];
/// let mut into_iter = vec.into_iter();
/// assert_eq!(into_iter.as_slice(), &['a', 'b', 'c']);
/// into_iter.as_mut_slice()[2] = 'z';
/// assert_eq!(into_iter.next().unwrap(), 'a');
/// assert_eq!(into_iter.next().unwrap(), 'b');
/// assert_eq!(into_iter.next().unwrap(), 'z');
/// ```
#[stable(feature = "vec_into_iter_as_slice", since = "1.15.0")]
pub fn as_mut_slice(&mut self) -> &mut [T] {
unsafe { &mut *self.as_raw_mut_slice() }
}
/// Returns a reference to the underlying allocator.
#[unstable(feature = "allocator_api", issue = "32838")]
#[inline]
pub fn allocator(&self) -> &A {
&self.alloc
}
fn as_raw_mut_slice(&mut self) -> *mut [T] {
ptr::slice_from_raw_parts_mut(self.ptr.as_ptr(), self.len())
}
/// Drops remaining elements and relinquishes the backing allocation.
/// This method guarantees it won't panic before relinquishing
/// the backing allocation.
///
/// This is roughly equivalent to the following, but more efficient
///
/// ```
/// # let mut into_iter = Vec::<u8>::with_capacity(10).into_iter();
/// let mut into_iter = std::mem::replace(&mut into_iter, Vec::new().into_iter());
/// (&mut into_iter).for_each(drop);
/// std::mem::forget(into_iter);
/// ```
///
/// This method is used by in-place iteration, refer to the vec::in_place_collect
/// documentation for an overview.
#[cfg(not(no_global_oom_handling))]
pub(super) fn forget_allocation_drop_remaining(&mut self) {
let remaining = self.as_raw_mut_slice();
// overwrite the individual fields instead of creating a new
// struct and then overwriting &mut self.
// this creates less assembly
self.cap = 0;
self.buf = RawVec::NEW.non_null();
self.ptr = self.buf;
self.end = self.buf.as_ptr();
// Dropping the remaining elements can panic, so this needs to be
// done only after updating the other fields.
unsafe {
ptr::drop_in_place(remaining);
}
}
/// Forgets to Drop the remaining elements while still allowing the backing allocation to be freed.
pub(crate) fn forget_remaining_elements(&mut self) {
// For the ZST case, it is crucial that we mutate `end` here, not `ptr`.
// `ptr` must stay aligned, while `end` may be unaligned.
self.end = self.ptr.as_ptr();
}
#[cfg(not(no_global_oom_handling))]
#[inline]
pub(crate) fn into_vecdeque(self) -> VecDeque<T, A> {
// Keep our `Drop` impl from dropping the elements and the allocator
let mut this = ManuallyDrop::new(self);
// SAFETY: This allocation originally came from a `Vec`, so it passes
// all those checks. We have `this.buf` ≤ `this.ptr` ≤ `this.end`,
// so the `sub_ptr`s below cannot wrap, and will produce a well-formed
// range. `end` ≤ `buf + cap`, so the range will be in-bounds.
// Taking `alloc` is ok because nothing else is going to look at it,
// since our `Drop` impl isn't going to run so there's no more code.
unsafe {
let buf = this.buf.as_ptr();
let initialized = if T::IS_ZST {
// All the pointers are the same for ZSTs, so it's fine to
// say that they're all at the beginning of the "allocation".
0..this.len()
} else {
this.ptr.sub_ptr(this.buf)..this.end.sub_ptr(buf)
};
let cap = this.cap;
let alloc = ManuallyDrop::take(&mut this.alloc);
VecDeque::from_contiguous_raw_parts_in(buf, initialized, cap, alloc)
}
}
}
#[stable(feature = "vec_intoiter_as_ref", since = "1.46.0")]
impl<T, A: Allocator> AsRef<[T]> for IntoIter<T, A> {
fn as_ref(&self) -> &[T] {
self.as_slice()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<T: Send, A: Allocator + Send> Send for IntoIter<T, A> {}
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<T: Sync, A: Allocator + Sync> Sync for IntoIter<T, A> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, A: Allocator> Iterator for IntoIter<T, A> {
type Item = T;
#[inline]
fn next(&mut self) -> Option<T> {
let ptr = if T::IS_ZST {
if self.ptr.as_ptr() == self.end as *mut T {
return None;
}
// `ptr` has to stay where it is to remain aligned, so we reduce the length by 1 by
// reducing the `end`.
self.end = self.end.wrapping_byte_sub(1);
self.ptr
} else {
if self.ptr == non_null!(self.end, T) {
return None;
}
let old = self.ptr;
self.ptr = unsafe { old.add(1) };
old
};
Some(unsafe { ptr.read() })
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let exact = if T::IS_ZST {
self.end.addr().wrapping_sub(self.ptr.as_ptr().addr())
} else {
unsafe { non_null!(self.end, T).sub_ptr(self.ptr) }
};
(exact, Some(exact))
}
#[inline]
fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
let step_size = self.len().min(n);
let to_drop = ptr::slice_from_raw_parts_mut(self.ptr.as_ptr(), step_size);
if T::IS_ZST {
// See `next` for why we sub `end` here.
self.end = self.end.wrapping_byte_sub(step_size);
} else {
// SAFETY: the min() above ensures that step_size is in bounds
self.ptr = unsafe { self.ptr.add(step_size) };
}
// SAFETY: the min() above ensures that step_size is in bounds
unsafe {
ptr::drop_in_place(to_drop);
}
NonZero::new(n - step_size).map_or(Ok(()), Err)
}
#[inline]
fn count(self) -> usize {
self.len()
}
#[inline]
fn next_chunk<const N: usize>(&mut self) -> Result<[T; N], core::array::IntoIter<T, N>> {
let mut raw_ary = MaybeUninit::uninit_array();
let len = self.len();
if T::IS_ZST {
if len < N {
self.forget_remaining_elements();
// Safety: ZSTs can be conjured ex nihilo, only the amount has to be correct
return Err(unsafe { array::IntoIter::new_unchecked(raw_ary, 0..len) });
}
self.end = self.end.wrapping_byte_sub(N);
// Safety: ditto
return Ok(unsafe { raw_ary.transpose().assume_init() });
}
if len < N {
// Safety: `len` indicates that this many elements are available and we just checked that
// it fits into the array.
unsafe {
ptr::copy_nonoverlapping(self.ptr.as_ptr(), raw_ary.as_mut_ptr() as *mut T, len);
self.forget_remaining_elements();
return Err(array::IntoIter::new_unchecked(raw_ary, 0..len));
}
}
// Safety: `len` is larger than the array size. Copy a fixed amount here to fully initialize
// the array.
return unsafe {
ptr::copy_nonoverlapping(self.ptr.as_ptr(), raw_ary.as_mut_ptr() as *mut T, N);
self.ptr = self.ptr.add(N);
Ok(raw_ary.transpose().assume_init())
};
}
unsafe fn __iterator_get_unchecked(&mut self, i: usize) -> Self::Item
where
Self: TrustedRandomAccessNoCoerce,
{
// SAFETY: the caller must guarantee that `i` is in bounds of the
// `Vec<T>`, so `i` cannot overflow an `isize`, and the `self.ptr.add(i)`
// is guaranteed to pointer to an element of the `Vec<T>` and
// thus guaranteed to be valid to dereference.
//
// Also note the implementation of `Self: TrustedRandomAccess` requires
// that `T: Copy` so reading elements from the buffer doesn't invalidate
// them for `Drop`.
unsafe { self.ptr.add(i).read() }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
#[inline]
fn next_back(&mut self) -> Option<T> {
if T::IS_ZST {
if self.ptr.as_ptr() == self.end as *mut _ {
return None;
}
// See above for why 'ptr.offset' isn't used
self.end = self.end.wrapping_byte_sub(1);
// Note that even though this is next_back() we're reading from `self.ptr`, not
// `self.end`. We track our length using the byte offset from `self.ptr` to `self.end`,
// so the end pointer may not be suitably aligned for T.
Some(unsafe { ptr::read(self.ptr.as_ptr()) })
} else {
if self.ptr == non_null!(self.end, T) {
return None;
}
unsafe {
self.end = self.end.sub(1);
Some(ptr::read(self.end))
}
}
}
#[inline]
fn advance_back_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
let step_size = self.len().min(n);
if T::IS_ZST {
// SAFETY: same as for advance_by()
self.end = self.end.wrapping_byte_sub(step_size);
} else {
// SAFETY: same as for advance_by()
self.end = unsafe { self.end.sub(step_size) };
}
let to_drop = ptr::slice_from_raw_parts_mut(self.end as *mut T, step_size);
// SAFETY: same as for advance_by()
unsafe {
ptr::drop_in_place(to_drop);
}
NonZero::new(n - step_size).map_or(Ok(()), Err)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, A: Allocator> ExactSizeIterator for IntoIter<T, A> {
fn is_empty(&self) -> bool {
if T::IS_ZST {
self.ptr.as_ptr() == self.end as *mut _
} else {
self.ptr == non_null!(self.end, T)
}
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<T, A: Allocator> FusedIterator for IntoIter<T, A> {}
#[doc(hidden)]
#[unstable(issue = "none", feature = "trusted_fused")]
unsafe impl<T, A: Allocator> TrustedFused for IntoIter<T, A> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T, A: Allocator> TrustedLen for IntoIter<T, A> {}
#[stable(feature = "default_iters", since = "1.70.0")]
impl<T, A> Default for IntoIter<T, A>
where
A: Allocator + Default,
{
/// Creates an empty `vec::IntoIter`.
///
/// ```
/// # use std::vec;
/// let iter: vec::IntoIter<u8> = Default::default();
/// assert_eq!(iter.len(), 0);
/// assert_eq!(iter.as_slice(), &[]);
/// ```
fn default() -> Self {
super::Vec::new_in(Default::default()).into_iter()
}
}
#[doc(hidden)]
#[unstable(issue = "none", feature = "std_internals")]
#[rustc_unsafe_specialization_marker]
pub trait NonDrop {}
// T: Copy as approximation for !Drop since get_unchecked does not advance self.ptr
// and thus we can't implement drop-handling
#[unstable(issue = "none", feature = "std_internals")]
impl<T: Copy> NonDrop for T {}
#[doc(hidden)]
#[unstable(issue = "none", feature = "std_internals")]
// TrustedRandomAccess (without NoCoerce) must not be implemented because
// subtypes/supertypes of `T` might not be `NonDrop`
unsafe impl<T, A: Allocator> TrustedRandomAccessNoCoerce for IntoIter<T, A>
where
T: NonDrop,
{
const MAY_HAVE_SIDE_EFFECT: bool = false;
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "vec_into_iter_clone", since = "1.8.0")]
impl<T: Clone, A: Allocator + Clone> Clone for IntoIter<T, A> {
#[cfg(not(test))]
fn clone(&self) -> Self {
self.as_slice().to_vec_in(self.alloc.deref().clone()).into_iter()
}
#[cfg(test)]
fn clone(&self) -> Self {
crate::slice::to_vec(self.as_slice(), self.alloc.deref().clone()).into_iter()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<#[may_dangle] T, A: Allocator> Drop for IntoIter<T, A> {
fn drop(&mut self) {
struct DropGuard<'a, T, A: Allocator>(&'a mut IntoIter<T, A>);
impl<T, A: Allocator> Drop for DropGuard<'_, T, A> {
fn drop(&mut self) {
unsafe {
// `IntoIter::alloc` is not used anymore after this and will be dropped by RawVec
let alloc = ManuallyDrop::take(&mut self.0.alloc);
// RawVec handles deallocation
let _ = RawVec::from_nonnull_in(self.0.buf, self.0.cap, alloc);
}
}
}
let guard = DropGuard(self);
// destroy the remaining elements
unsafe {
ptr::drop_in_place(guard.0.as_raw_mut_slice());
}
// now `guard` will be dropped and do the rest
}
}
// In addition to the SAFETY invariants of the following three unsafe traits
// also refer to the vec::in_place_collect module documentation to get an overview
#[unstable(issue = "none", feature = "inplace_iteration")]
#[doc(hidden)]
unsafe impl<T, A: Allocator> InPlaceIterable for IntoIter<T, A> {
const EXPAND_BY: Option<NonZero<usize>> = NonZero::new(1);
const MERGE_BY: Option<NonZero<usize>> = NonZero::new(1);
}
#[unstable(issue = "none", feature = "inplace_iteration")]
#[doc(hidden)]
unsafe impl<T, A: Allocator> SourceIter for IntoIter<T, A> {
type Source = Self;
#[inline]
unsafe fn as_inner(&mut self) -> &mut Self::Source {
self
}
}
#[cfg(not(no_global_oom_handling))]
unsafe impl<T> AsVecIntoIter for IntoIter<T> {
type Item = T;
fn as_into_iter(&mut self) -> &mut IntoIter<Self::Item> {
self
}
}

View File

@ -0,0 +1,189 @@
use core::num::{NonZero, Saturating, Wrapping};
use crate::boxed::Box;
#[rustc_specialization_trait]
pub(super) unsafe trait IsZero {
/// Whether this value's representation is all zeros,
/// or can be represented with all zeroes.
fn is_zero(&self) -> bool;
}
macro_rules! impl_is_zero {
($t:ty, $is_zero:expr) => {
unsafe impl IsZero for $t {
#[inline]
fn is_zero(&self) -> bool {
$is_zero(*self)
}
}
};
}
impl_is_zero!(i8, |x| x == 0); // It is needed to impl for arrays and tuples of i8.
impl_is_zero!(i16, |x| x == 0);
impl_is_zero!(i32, |x| x == 0);
impl_is_zero!(i64, |x| x == 0);
impl_is_zero!(i128, |x| x == 0);
impl_is_zero!(isize, |x| x == 0);
impl_is_zero!(u8, |x| x == 0); // It is needed to impl for arrays and tuples of u8.
impl_is_zero!(u16, |x| x == 0);
impl_is_zero!(u32, |x| x == 0);
impl_is_zero!(u64, |x| x == 0);
impl_is_zero!(u128, |x| x == 0);
impl_is_zero!(usize, |x| x == 0);
impl_is_zero!(bool, |x| x == false);
impl_is_zero!(char, |x| x == '\0');
impl_is_zero!(f32, |x: f32| x.to_bits() == 0);
impl_is_zero!(f64, |x: f64| x.to_bits() == 0);
unsafe impl<T> IsZero for *const T {
#[inline]
fn is_zero(&self) -> bool {
(*self).is_null()
}
}
unsafe impl<T> IsZero for *mut T {
#[inline]
fn is_zero(&self) -> bool {
(*self).is_null()
}
}
unsafe impl<T: IsZero, const N: usize> IsZero for [T; N] {
#[inline]
fn is_zero(&self) -> bool {
// Because this is generated as a runtime check, it's not obvious that
// it's worth doing if the array is really long. The threshold here
// is largely arbitrary, but was picked because as of 2022-07-01 LLVM
// fails to const-fold the check in `vec![[1; 32]; n]`
// See https://github.com/rust-lang/rust/pull/97581#issuecomment-1166628022
// Feel free to tweak if you have better evidence.
N <= 16 && self.iter().all(IsZero::is_zero)
}
}
// This is recursive macro.
macro_rules! impl_is_zero_tuples {
// Stopper
() => {
// No use for implementing for empty tuple because it is ZST.
};
($first_arg:ident $(,$rest:ident)*) => {
unsafe impl <$first_arg: IsZero, $($rest: IsZero,)*> IsZero for ($first_arg, $($rest,)*){
#[inline]
fn is_zero(&self) -> bool{
// Destructure tuple to N references
// Rust allows to hide generic params by local variable names.
#[allow(non_snake_case)]
let ($first_arg, $($rest,)*) = self;
$first_arg.is_zero()
$( && $rest.is_zero() )*
}
}
impl_is_zero_tuples!($($rest),*);
}
}
impl_is_zero_tuples!(A, B, C, D, E, F, G, H);
// `Option<&T>` and `Option<Box<T>>` are guaranteed to represent `None` as null.
// For fat pointers, the bytes that would be the pointer metadata in the `Some`
// variant are padding in the `None` variant, so ignoring them and
// zero-initializing instead is ok.
// `Option<&mut T>` never implements `Clone`, so there's no need for an impl of
// `SpecFromElem`.
unsafe impl<T: ?Sized> IsZero for Option<&T> {
#[inline]
fn is_zero(&self) -> bool {
self.is_none()
}
}
unsafe impl<T: ?Sized> IsZero for Option<Box<T>> {
#[inline]
fn is_zero(&self) -> bool {
self.is_none()
}
}
// `Option<NonZero<u32>>` and similar have a representation guarantee that
// they're the same size as the corresponding `u32` type, as well as a guarantee
// that transmuting between `NonZero<u32>` and `Option<NonZero<u32>>` works.
// While the documentation officially makes it UB to transmute from `None`,
// we're the standard library so we can make extra inferences, and we know that
// the only niche available to represent `None` is the one that's all zeros.
macro_rules! impl_is_zero_option_of_nonzero_int {
($($t:ty),+ $(,)?) => {$(
unsafe impl IsZero for Option<NonZero<$t>> {
#[inline]
fn is_zero(&self) -> bool {
self.is_none()
}
}
)+};
}
impl_is_zero_option_of_nonzero_int!(u8, u16, u32, u64, u128, usize, i8, i16, i32, i64, i128, isize);
macro_rules! impl_is_zero_option_of_int {
($($t:ty),+ $(,)?) => {$(
unsafe impl IsZero for Option<$t> {
#[inline]
fn is_zero(&self) -> bool {
const {
let none: Self = unsafe { core::mem::MaybeUninit::zeroed().assume_init() };
assert!(none.is_none());
}
self.is_none()
}
}
)+};
}
impl_is_zero_option_of_int!(u8, u16, u32, u64, u128, i8, i16, i32, i64, i128, usize, isize);
unsafe impl<T: IsZero> IsZero for Wrapping<T> {
#[inline]
fn is_zero(&self) -> bool {
self.0.is_zero()
}
}
unsafe impl<T: IsZero> IsZero for Saturating<T> {
#[inline]
fn is_zero(&self) -> bool {
self.0.is_zero()
}
}
macro_rules! impl_is_zero_option_of_bool {
($($t:ty),+ $(,)?) => {$(
unsafe impl IsZero for $t {
#[inline]
fn is_zero(&self) -> bool {
// SAFETY: This is *not* a stable layout guarantee, but
// inside `core` we're allowed to rely on the current rustc
// behaviour that options of bools will be one byte with
// no padding, so long as they're nested less than 254 deep.
let raw: u8 = unsafe { core::mem::transmute(*self) };
raw == 0
}
}
)+};
}
impl_is_zero_option_of_bool! {
Option<bool>,
Option<Option<bool>>,
Option<Option<Option<bool>>>,
// Could go further, but not worth the metadata overhead.
}

View File

@ -0,0 +1,47 @@
use crate::alloc::Allocator;
#[cfg(not(no_global_oom_handling))]
use crate::borrow::Cow;
use super::Vec;
macro_rules! __impl_slice_eq1 {
([$($vars:tt)*] $lhs:ty, $rhs:ty $(where $ty:ty: $bound:ident)?, #[$stability:meta]) => {
#[$stability]
impl<T, U, $($vars)*> PartialEq<$rhs> for $lhs
where
T: PartialEq<U>,
$($ty: $bound)?
{
#[inline]
fn eq(&self, other: &$rhs) -> bool { self[..] == other[..] }
#[inline]
fn ne(&self, other: &$rhs) -> bool { self[..] != other[..] }
}
}
}
__impl_slice_eq1! { [A1: Allocator, A2: Allocator] Vec<T, A1>, Vec<U, A2>, #[stable(feature = "rust1", since = "1.0.0")] }
__impl_slice_eq1! { [A: Allocator] Vec<T, A>, &[U], #[stable(feature = "rust1", since = "1.0.0")] }
__impl_slice_eq1! { [A: Allocator] Vec<T, A>, &mut [U], #[stable(feature = "rust1", since = "1.0.0")] }
__impl_slice_eq1! { [A: Allocator] &[T], Vec<U, A>, #[stable(feature = "partialeq_vec_for_ref_slice", since = "1.46.0")] }
__impl_slice_eq1! { [A: Allocator] &mut [T], Vec<U, A>, #[stable(feature = "partialeq_vec_for_ref_slice", since = "1.46.0")] }
__impl_slice_eq1! { [A: Allocator] Vec<T, A>, [U], #[stable(feature = "partialeq_vec_for_slice", since = "1.48.0")] }
__impl_slice_eq1! { [A: Allocator] [T], Vec<U, A>, #[stable(feature = "partialeq_vec_for_slice", since = "1.48.0")] }
#[cfg(not(no_global_oom_handling))]
__impl_slice_eq1! { [A: Allocator] Cow<'_, [T]>, Vec<U, A> where T: Clone, #[stable(feature = "rust1", since = "1.0.0")] }
#[cfg(not(no_global_oom_handling))]
__impl_slice_eq1! { [] Cow<'_, [T]>, &[U] where T: Clone, #[stable(feature = "rust1", since = "1.0.0")] }
#[cfg(not(no_global_oom_handling))]
__impl_slice_eq1! { [] Cow<'_, [T]>, &mut [U] where T: Clone, #[stable(feature = "rust1", since = "1.0.0")] }
__impl_slice_eq1! { [A: Allocator, const N: usize] Vec<T, A>, [U; N], #[stable(feature = "rust1", since = "1.0.0")] }
__impl_slice_eq1! { [A: Allocator, const N: usize] Vec<T, A>, &[U; N], #[stable(feature = "rust1", since = "1.0.0")] }
// NOTE: some less important impls are omitted to reduce code bloat
// FIXME(Centril): Reconsider this?
//__impl_slice_eq1! { [const N: usize] Vec<A>, &mut [B; N], }
//__impl_slice_eq1! { [const N: usize] [A; N], Vec<B>, }
//__impl_slice_eq1! { [const N: usize] &[A; N], Vec<B>, }
//__impl_slice_eq1! { [const N: usize] &mut [A; N], Vec<B>, }
//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, [B; N], }
//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, &[B; N], }
//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, &mut [B; N], }

Some files were not shown because too many files have changed in this diff Show More