# HG changeset patch
# User Mark Goodwin <mgoodwin@mozilla.com>
# Date 1556566178 0
# lun. avril 29 19:29:38 2019 +0000
# Node ID ea8bdd612f43f22fa90fe1f87245e73fd1c5319d
# Parent 53ffb5f187119724ba5942a4ef7470dc7a961221
Bug 1538161 - Vendor rust_cascade r=keeler
Differential Revision: https://phabricator.services.mozilla.com/D24557
diff --git a/Cargo.lock b/Cargo.lock
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -173,26 +173,26 @@ dependencies = [
"target-lexicon 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "base64"
version = "0.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"safemem 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "base64"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "bench-collections-gtest"
version = "0.1.0"
dependencies = [
"fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
"fxhash 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -216,17 +216,17 @@ dependencies = [
"yaml-rust 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "bincode"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.88 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "bindgen"
version = "0.49.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
@@ -319,32 +319,37 @@ dependencies = [
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"moz_task 0.1.0",
"nserror 0.1.0",
"nsstring 0.1.0",
"xpcom 0.1.0",
]
[[package]]
+name = "bitvec"
+version = "0.10.0"
+source = "git+https://github.com/mozmark/bitvec?branch=20190429-bitvec-vendor-issues#b1f842cf24600a87f818d11547a6920c26eb3c38"
+
+[[package]]
name = "blake2-rfc"
version = "0.2.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"arrayvec 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"constant_time_eq 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "block-buffer"
version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"block-padding 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"byte-tools 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"generic-array 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "block-padding"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
@@ -381,25 +386,25 @@ source = "registry+https://github.com/ru
[[package]]
name = "byte-tools"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "byteorder"
-version = "1.2.7"
+version = "1.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "bytes"
version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "bzip2"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
@@ -433,16 +438,17 @@ dependencies = [
"base64 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam-utils 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)",
"lmdb-rkv 0.11.2 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"moz_task 0.1.0",
"nserror 0.1.0",
"nsstring 0.1.0",
"rkv 0.9.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rust_cascade 0.3.2 (git+https://github.com/mozmark/rust-cascade?branch=20190426-bitvec-vendor-issues)",
"sha2 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
"style 0.0.1",
"thin-vec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)",
"xpcom 0.1.0",
]
[[package]]
@@ -836,17 +842,17 @@ dependencies = [
]
[[package]]
name = "deflate"
version = "0.7.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"adler32 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "derive_common"
version = "0.0.1"
dependencies = [
"darling 0.8.6 (registry+https://github.com/rust-lang/crates.io-index)",
"proc-macro2 0.4.27 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -1138,17 +1144,17 @@ dependencies = [
"num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "fxhash"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "gcc"
version = "0.3.54"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
@@ -1310,34 +1316,34 @@ dependencies = [
"winapi 0.3.6 (git+https://github.com/froydnj/winapi-rs?branch=aarch64)",
]
[[package]]
name = "h2"
version = "0.1.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"bytes 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)",
"fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.23 (registry+https://github.com/rust-lang/crates.io-index)",
"http 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
"indexmap 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"slab 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"string 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-io 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "hashbrown"
version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"scopeguard 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "hashglobe"
version = "0.1.0"
dependencies = [
"libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -1406,17 +1412,17 @@ dependencies = [
"unicode-normalization 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "image"
version = "0.21.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"lzw 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
"num-iter 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)",
"num-rational 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"png 0.14.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@@ -1875,31 +1881,31 @@ dependencies = [
"semver 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "mp4parse"
version = "0.11.2"
dependencies = [
"bitreader 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"mp4parse_fallible 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "mp4parse-gtest"
version = "0.1.0"
[[package]]
name = "mp4parse_capi"
version = "0.11.2"
dependencies = [
- "byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"mp4parse 0.11.2",
"num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "mp4parse_fallible"
version = "0.0.1"
@@ -1910,16 +1916,21 @@ name = "msdos_time"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"time 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.6 (git+https://github.com/froydnj/winapi-rs?branch=aarch64)",
]
[[package]]
+name = "murmurhash3"
+version = "0.0.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
name = "net2"
version = "0.2.33"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.6 (git+https://github.com/froydnj/winapi-rs?branch=aarch64)",
]
@@ -2180,17 +2191,17 @@ dependencies = [
]
[[package]]
name = "plist"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"base64 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"humantime 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.88 (registry+https://github.com/rust-lang/crates.io-index)",
"xml-rs 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "png"
version = "0.14.0"
@@ -2435,16 +2446,28 @@ version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "rust-ini"
version = "0.10.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
+name = "rust_cascade"
+version = "0.3.2"
+source = "git+https://github.com/mozmark/rust-cascade?branch=20190426-bitvec-vendor-issues#ef8d33d924e8dc865a97a86b745e64365e3c1363"
+dependencies = [
+ "bitvec 0.10.0 (git+https://github.com/mozmark/bitvec?branch=20190429-bitvec-vendor-issues)",
+ "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "digest 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "murmurhash3 0.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
name = "rustc-demangle"
version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "rustc_version"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -2714,17 +2737,17 @@ source = "registry+https://github.com/ru
name = "style"
version = "0.0.1"
dependencies = [
"app_units 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"arrayvec 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"atomic_refcell 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bindgen 0.49.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"cssparser 0.25.3 (registry+https://github.com/rust-lang/crates.io-index)",
"derive_more 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)",
"euclid 0.19.5 (registry+https://github.com/rust-lang/crates.io-index)",
"fallible 0.0.1",
"fxhash 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"hashglobe 0.1.0",
"indexmap 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"itertools 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -3316,17 +3339,17 @@ dependencies = [
[[package]]
name = "webrender"
version = "0.60.0"
dependencies = [
"base64 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bincode 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"core-foundation 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)",
"core-graphics 0.17.1 (registry+https://github.com/rust-lang/crates.io-index)",
"core-text 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"cstr 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"dwrote 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
"freetype 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"fxhash 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -3354,17 +3377,17 @@ dependencies = [
[[package]]
name = "webrender_api"
version = "0.60.0"
dependencies = [
"app_units 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bincode 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"core-foundation 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)",
"core-graphics 0.17.1 (registry+https://github.com/rust-lang/crates.io-index)",
"derive_more 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)",
"euclid 0.19.5 (registry+https://github.com/rust-lang/crates.io-index)",
"malloc_size_of_derive 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.88 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_bytes 0.10.4 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.88 (git+https://github.com/servo/serde?branch=deserialize_from_enums10)",
@@ -3463,17 +3486,17 @@ dependencies = [
"euclid 0.19.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "ws"
version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"bytes 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)",
"httparse 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
"mio 0.6.15 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)",
"sha1 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -3581,23 +3604,24 @@ dependencies = [
"checksum binary-space-partition 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "88ceb0d16c4fd0e42876e298d7d3ce3780dd9ebdcbe4199816a32c77e08597ff"
"checksum bincode 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bda13183df33055cbb84b847becce220d392df502ebe7a4a78d7021771ed94d0"
"checksum bindgen 0.49.0 (registry+https://github.com/rust-lang/crates.io-index)" = "33e1b67a27bca31fd12a683b2a3618e275311117f48cfcc892e18403ff889026"
"checksum binjs_meta 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "430239e4551e42b80fa5d92322ac80ea38c9dda56e5d5582e057e2288352b71a"
"checksum bit-set 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6f1efcc46c18245a69c38fcc5cc650f16d3a59d034f3106e9ed63748f695730a"
"checksum bit-vec 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4440d5cb623bb7390ae27fec0bb6c61111969860f8e3ae198bfa0663645e67cf"
"checksum bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "228047a76f468627ca71776ecdebd732a3423081fcf5125585bcd7c49886ce12"
"checksum bitreader 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "80b13e2ab064ff3aa0bdbf1eff533f9822dc37899821f5f98c67f263eab51707"
+"checksum bitvec 0.10.0 (git+https://github.com/mozmark/bitvec?branch=20190429-bitvec-vendor-issues)" = "<none>"
"checksum blake2-rfc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)" = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400"
"checksum block-buffer 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b"
"checksum block-padding 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4fc4358306e344bf9775d0197fd00d2603e5afb0771bb353538630f022068ea3"
"checksum boxfnonce 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8380105befe91099e6f69206164072c05bc92427ff6aa8a5171388317346dd75"
"checksum build_const 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e90dc84f5e62d2ebe7676b83c22d33b6db8bd27340fb6ffbff0a364efa0cb9c9"
"checksum byte-tools 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "980479e6fde23246dfb54d47580d66b4e99202e7579c5eaa9fe10ecb5ebd2182"
-"checksum byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "94f88df23a25417badc922ab0f5716cc1330e87f71ddd9203b3a3ccd9cedf75d"
+"checksum byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a019b10a2a7cdeb292db131fc8113e57ea2a908f6e7894b0c3c671893b65dbeb"
"checksum bytes 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)" = "e178b8e0e239e844b083d5a0d4a156b2654e67f9f80144d48398fcd736a24fb8"
"checksum bzip2 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c3eafc42c44e0d827de6b1c131175098fe7fb53b8ce8a47e65cb3ea94688be24"
"checksum bzip2-sys 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2c5162604199bbb17690ede847eaa6120a3f33d5ab4dcc8e7c25b16d849ae79b"
"checksum cast 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "926013f2860c46252efceabb19f4a6b308197505082c609025aa6706c011d427"
"checksum cc 1.0.34 (registry+https://github.com/rust-lang/crates.io-index)" = "30f813bf45048a18eda9190fd3c6b78644146056740c43172a5a3699118588fd"
"checksum cexpr 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8fc0086be9ca82f7fc89fc873435531cb898b86e850005850de1f820e2db6e9b"
"checksum cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "082bb9b28e00d3c9d39cc03e64ce4cea0f1bb9b3fde493f0cbc008472d22bdf4"
"checksum chrono 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "45912881121cb26fad7c38c17ba7daa18764771836b34fab7d3fbd93ed633878"
@@ -3722,16 +3746,17 @@ dependencies = [
"checksum miniz_oxide_c_api 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "92d98fdbd6145645828069b37ea92ca3de225e000d80702da25c20d3584b38a5"
"checksum mio 0.6.15 (registry+https://github.com/rust-lang/crates.io-index)" = "4fcfcb32d63961fb6f367bfd5d21e4600b92cd310f71f9dca25acae196eb1560"
"checksum mio-named-pipes 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "82f43a815b57d2d652550f3d20cec88a495bb2d0956aa873dc43040278455677"
"checksum mio-uds 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "1731a873077147b626d89cc6c2a0db6288d607496c5d10c0cfcf3adc697ec673"
"checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919"
"checksum moz_cbor 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "20c82a57087fd5990d7122dbff1607c3b20c3d2958e9d9ad9765aab415e2c91c"
"checksum mp4parse_fallible 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "6626c2aef76eb8f984eef02e475883d3fe9112e114720446c5810fc5f045cd30"
"checksum msdos_time 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "aad9dfe950c057b1bfe9c1f2aa51583a8468ef2a5baba2ebbe06d775efeb7729"
+"checksum murmurhash3 0.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "a2983372caf4480544083767bf2d27defafe32af49ab4df3a0b7fc90793a3664"
"checksum net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)" = "42550d9fb7b6684a6d404d9fa7250c2eb2646df731d1c06afc06dcee9e1bcf88"
"checksum new_debug_unreachable 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0cdc457076c78ab54d5e0d6fa7c47981757f1e34dc39ff92787f217dede586c4"
"checksum nodrop 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "9a2228dca57108069a5262f2ed8bd2e82496d2e074a06d1ccc7ce1687b6ae0a2"
"checksum nom 3.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "05aec50c70fd288702bcd93284a8444607f3292dbdf2a30de5ea5dcdbe72287b"
"checksum nom 4.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9c349f68f25f596b9f44cf0e7c69752a5c633b0550c3ff849518bfba0233774a"
"checksum num-derive 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "0d2c31b75c36a993d30c7a13d70513cb93f02acafdd5b7ba250f9b0e18615de7"
"checksum num-integer 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)" = "e83d528d2677f0518c570baf2b7abdcf0cd2d248860b68507bdcb3e91d4c0cea"
"checksum num-iter 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)" = "af3fdbbc3291a5464dc57b03860ec37ca6bf915ed6ee385e7c6c052c422b2124"
@@ -3777,16 +3802,17 @@ dependencies = [
"checksum regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1731164734096285ec2a5ec7fea5248ae2f5485b3feeb0115af4fda2183b2d1b"
"checksum regex 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3d8c9f33201f46669484bacc312b00e7541bed6aaf296dffe2bb4e0ac6b8ce2a"
"checksum regex-syntax 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ad890a5eef7953f55427c50575c680c42841653abd2b028b68cd223d157f62db"
"checksum regex-syntax 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8f1ac0f60d675cc6cf13a20ec076568254472551051ad5dd050364d70671bf6b"
"checksum rkv 0.9.4 (registry+https://github.com/rust-lang/crates.io-index)" = "238764bd8750927754d91e4a27155ac672ba88934a2bf698c992d55e5ae25e5b"
"checksum ron 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "da06feaa07f69125ab9ddc769b11de29090122170b402547f64b86fe16ebc399"
"checksum runloop 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5d79b4b604167921892e84afbbaad9d5ad74e091bf6c511d9dbfb0593f09fabd"
"checksum rust-ini 0.10.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8a654c5bda722c699be6b0fe4c0d90de218928da5b724c3e467fc48865c37263"
+"checksum rust_cascade 0.3.2 (git+https://github.com/mozmark/rust-cascade?branch=20190426-bitvec-vendor-issues)" = "<none>"
"checksum rustc-demangle 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "76d7ba1feafada44f2d38eed812bd2489a03c0f5abb975799251518b68848649"
"checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
"checksum ryu 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "fd0568787116e13c652377b6846f5931454a363a8fdf8ae50463ee40935b278b"
"checksum safemem 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8dca453248a96cb0749e36ccdfe2b0b4e54a61bfef89fb97ec621eb8e0a93dd9"
"checksum same-file 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "cfb6eded0b06a0b512c8ddbcf04089138c9b4362c2f696f3c3d76039d68f3637"
"checksum scoped-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f417c22df063e9450888a7561788e9bd46d3bb3c1466435b4eccb903807f147d"
"checksum scoped_threadpool 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "1d51f5df5af43ab3f1360b429fa5e0152ac5ce8c0bd6485cae490332e96846a8"
"checksum scopeguard 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c79eb2c3ac4bc2507cda80e7f3ac5b88bd8eae4c0914d5663e6a8933994be918"
diff --git a/security/manager/ssl/cert_storage/Cargo.toml b/security/manager/ssl/cert_storage/Cargo.toml
--- a/security/manager/ssl/cert_storage/Cargo.toml
+++ b/security/manager/ssl/cert_storage/Cargo.toml
@@ -7,13 +7,14 @@ authors = ["Dana Keeler <dkeeler@mozilla
base64 = "0.10"
crossbeam-utils = "0.6.3"
lmdb-rkv = "0.11"
log = "0.4"
moz_task = { path = "../../../../xpcom/rust/moz_task" }
nserror = { path = "../../../../xpcom/rust/nserror" }
nsstring = { path = "../../../../xpcom/rust/nsstring" }
rkv = "^0.9"
+rust_cascade = { git = "https://github.com/mozmark/rust-cascade", branch = "20190426-bitvec-vendor-issues" }
sha2 = "^0.8"
style = { path = "../../../../servo/components/style" }
thin-vec = { version = "0.1.0", features = ["gecko-ffi"] }
time = "0.1"
xpcom = { path = "../../../../xpcom/rust/xpcom" }
diff --git a/third_party/rust/bitvec/.cargo-checksum.json b/third_party/rust/bitvec/.cargo-checksum.json
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bitvec/.cargo-checksum.json
@@ -0,0 +1,1 @@
+{"files":{".editorconfig":"ad2ddd2235a1ca26d8be9df333baae560b8f696f9475a8084d5169bc598feced",".gitmodules":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".travis.yml":"e3d5a6154d4ad058f85387f2bd0ff369185c74eab6c877222bffa26cb94cbb43","AUTHORS.txt":"2bdfa77827b6fd352f398752c30acbca48ada04284656b6d38fb97b85534d5bf","CHANGELOG.md":"46f8137f204277534810bc28a79ecc91677a42dda09516a66e2d60f56f7177cb","CODE_OF_CONDUCT.md":"bae3bf173a3748f4b62eb808133bb1597f811f0fa919e997149882125f91746d","CONTRIBUTING.md":"035ebfb837464470c7649e6b306c7f0b831c242cbbf25fb97a0b3f40ea4acde6","Cargo.toml":"2fbab41dae1f1de7bbf5c207125b166708f49043e83e41591c3a251f3c890f75","Justfile":"2c3a45b4a5673a0239f6d2bb06f520581e9112a565007856a1cd482afd48c360","LICENSE.txt":"411781fd38700f2357a14126d0ab048164ab881f1dcb335c1bb932e232c9a2f5","README.md":"d8d30b17771d40ed82979ea5c568e5a5f0dc220d2e7a3a44b1ea23d0dda99de3","doc/Bit Patterns.md":"cfd36f3ec70df5541c37f4c25a59211a3eccc3cc5d189efb4a61eb5d4f2357df","examples/readme.rs":"8ad2cde83012592ce0d00cc82ba6a71fa94c870c69c323475ff3fceb827604a4","examples/sieve.rs":"71bb127bf9496d608c73052d8a204a04ddb2f9717fd853d7794be51ae82e2757","examples/tour.rs":"3eef2e25cd61e67c15b22a93b9c673caa331777117752613781cf1b311200357","src/bits.rs":"c6d3195b432d358686d2b806e6de99961f455ccfabe0575bdab0eabb67f05083","src/boxed.rs":"91070b7eb5d7c4434127daca983eef69f0848f3e16c5fb764c159b90e79aeb0f","src/cursor.rs":"60aa4b518d7453a4045ec6a9941ce275eb84b547316828b300a234a515d9c471","src/lib.rs":"9a04a5474324ea796d582d7d9b24bfaf67c67370c3eb6a656c64bb9865679bf4","src/macros.rs":"b86294a6d6cbc03688844d377ee18e2170b7909ed62bfcd574f8082df55aa316","src/pointer.rs":"b2bc0e9bd884defe546054160eb2cc41d9f4f023a28c7c20604164c4d8da1296","src/slice.rs":"ad3994f52bf8c89c2bdf0816adc88f1847e3005f620990106b293e45c28b16ec","src/vec.rs":"c5a2e5624d8c518cfbba53b6ebc77bdbf79af01ce6510f9f3051e569061b039f"},"package":null}
\ No newline at end of file
diff --git a/third_party/rust/bitvec/.editorconfig b/third_party/rust/bitvec/.editorconfig
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bitvec/.editorconfig
@@ -0,0 +1,21 @@
+[*]
+charset = utf-8
+end_of_line = lf
+insert_final_newline = true
+trim_trailing_whitespace = true
+
+[*.md]
+indent_size = 2
+indent_style = space
+
+[*.rs]
+indent_size = 4
+indent_style = tab
+
+[*.toml]
+indent_size = 8
+indent_style = tab
+
+[*.yml]
+indent_size = 2
+indent_style = space
diff --git a/third_party/rust/bitvec/.gitmodules b/third_party/rust/bitvec/.gitmodules
new file mode 100644
diff --git a/third_party/rust/bitvec/.travis.yml b/third_party/rust/bitvec/.travis.yml
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bitvec/.travis.yml
@@ -0,0 +1,40 @@
+language: rust
+
+sudo: required
+
+rust:
+ - stable
+ - beta
+ - nightly
+
+matrix:
+ allow_failures:
+ - rust: nightly
+
+# codecov
+addons:
+ apt:
+ packages:
+ - libcurl4-openssl-dev
+ - libelf-dev
+ - libdw-dev
+ - cmake
+ - gcc
+ - binutils-dev
+ - libiberty-dev
+
+# codecov
+after_success: |
+ wget https://github.com/SimonKagstrom/kcov/archive/master.tar.gz &&
+ tar xzf master.tar.gz &&
+ cd kcov-master &&
+ mkdir build &&
+ cd build &&
+ cmake .. &&
+ make &&
+ make install DESTDIR=../../kcov-build &&
+ cd ../.. &&
+ rm -rf kcov-master &&
+ for file in target/debug/bitvec-*[^\.d]; do mkdir -p "target/cov/$(basename $file)"; ./kcov-build/usr/local/bin/kcov --exclude-pattern=/.cargo,/usr/lib --verify "target/cov/$(basename $file)" "$file"; done &&
+ bash <(curl -s https://codecov.io/bash) &&
+ echo "Uploaded code coverage"
diff --git a/third_party/rust/bitvec/AUTHORS.txt b/third_party/rust/bitvec/AUTHORS.txt
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bitvec/AUTHORS.txt
@@ -0,0 +1,1 @@
+myrrlyn <self@myrrlyn.dev>
diff --git a/third_party/rust/bitvec/CHANGELOG.md b/third_party/rust/bitvec/CHANGELOG.md
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bitvec/CHANGELOG.md
@@ -0,0 +1,215 @@
+# Changelog
+
+All notable changes will be documented in this file.
+
+## 0.10.0
+
+This version was a complete rewrite of the entire crate. The minimum compiler
+version has been upgraded to `1.31.0`. The crate is written against the Rust
+2018 edition of the language. It will be a `1.0` release after polishing.
+
+### Added
+
+- `BitPtr` custom pointer representation. This is the most important component
+ of the rewrite, and what enabled the expanded feature set and API surface.
+ This structure allows `BitSlice` and `BitVec` to have head cursors at any bit,
+ not just at the front edge of an element. This allows the crate to support
+ arbitrary range slicing and slice splitting, and in turn greatly expand the
+ usability of the slice and vector types.
+
+ The `BitPtr` type is wholly crate-internal, and renders the `&BitSlice` and
+ `BitVec` handle types ***wholly incompatible*** with standard Rust slice and
+ vector handles. With great power comes great responsibility to never, ever,
+ interchange these types through any means except the provided translation API.
+
+- Range indexing and more powerful iteration. Bit-precision addressing allows
+ arbitrary subslices and enables more of the slice API from `core`.
+
+### Changed
+
+- Almost everything has been rewritten. The git diff for this version is
+ horrifying.
+
+- Formatting traits better leverage the builtin printing structures available
+ from `core::fmt`, and are made available on `no_std`.
+
+### Removed
+
+- `u64` is only usable as the storage type on 64-bit systems; it has 32-bit
+ alignment on 32-bit systems and as such is unusable there.
+
+## 0.9.0
+
+### Changed
+
+- The trait `Endian` has been renamed to `Cursor`, and all type variables
+ `E: Endian` have been renamed to `C: Cursor`.
+
+- The `Bits` trait is no longer bound by `Default`.
+
+## 0.8.0
+
+### Added
+
+- `std` and `alloc` features, which can be disabled for use in `#![no_std]`
+ libraries. This was implemented by Robert Habermeier, `rphmeier@gmail.com`.
+
+ Note that the `BitSlice` tests and all the examples are disabled when the
+ `alloc` feature is not present. They will function normally when `alloc` is
+ present but `std` is not.
+
+### Changed
+
+- Compute `Bits::WIDTH` as `size_of::<Self>() * 8` instead of `1 << Bits::BITS`.
+
+## 0.7.0
+
+### Added
+
+- `examples/readme.rs` tracks the contents of the example code in `README.md`.
+ It will continue to do so until the `external_doc` feature stabilizes so that
+ the contents of the README can be included in the module documentation of
+ `src/lib.rs`.
+
+- Officially use the Rust community code of conduct.
+
+- README sections describe why a user might want this library, and what makes it
+ different than `bit-vec`.
+
+### Changed
+
+- Update minimum Rust version to `1.30.0`.
+
+ Internally, this permits use of `std` rather than `::std`. This compiler
+ edition does not change *intra-crate* macro usage. Clients at `1.30.0` and
+ above no longer need `#[macro_use]` above `extern crate bitvec;`, and are able
+ to import the `bitvec!` macro directly with `use bitvec::bitvec;` or
+ `use bitvec::*;`.
+
+ Implementation note: References to literals stabilized at *some* point between
+ `1.20.0` and `1.30.0`, so the static bool items used for indexing are no
+ longer needed.
+
+- Include numeric arithmetic as well as set arithmetic in the README.
+
+## 0.6.0
+
+### Changed
+
+- Update minimum Rust version to `1.25.0` in order to use nested imports.
+- Fix logic in `Endian::prev`, and re-enabled edge tests.
+- Pluralize `BitSlice::count_one()` and `BitSlice::count_zero()` function names.
+- Fix documentation and comments.
+- Consolidate implementation of `bitvec!` to not use any other macros.
+
+## 0.5.0
+
+### Added
+
+- `BitVec` and `BitSlice` implement `Hash`.
+
+- `BitVec` fully implements addition, negation, and subtraction.
+
+- `BitSlice` implements in-place addition and negation.
+ - `impl AddAssign for BitSlice`
+ - `impl Neg for &mut BitSlice`
+
+ This distinction is required in order to match the expectations of the
+ arithmetic traits and the realities of immovable `BitSlice`.
+
+- `BitSlice` offers `.all()`, `.any()`, `.not_all()`, `.not_any()`, and
+ `.some()` methods to perform n-ary Boolean logic.
+ - `.all()` tests if all bits are set high
+ - `.any()` tests if any bits are set high (includes `.all()`)
+ - `.not_all()` tests if any bits are set low (includes `.not_all()`)
+ - `.not_any()` tests if all bits are set low
+ - `.some()` tests if any bits are high and any are low (excludes `.all()` and
+ `.not_all()`)
+
+- `BitSlice` can count how many bits are set high or low with `.count_one()` and
+ `.count_zero()`.
+
+## 0.4.0
+
+### Added
+
+`BitSlice::for_each` provides mutable iteration over a slice. It yields each
+successive `(index: usize, bit: bool)` pair to a closure, and stores the return
+value of that closure at the yielded index.
+
+`BitVec` now implements `Eq` and `Ord` against other `BitVec`s. It is impossible
+at this time to make `BitVec` generic over anything that is `Borrow<BitSlice>`,
+which would allow comparisons over different ownership types. The declaration
+
+```rust
+impl<A, B, C, D, E> PartialEq<C> for BitVec<A, B>
+where A: Endian,
+ B: Bits,
+ C: Borrow<BitSlice<D, E>>,
+ D: Endian,
+ E: Bits,
+{
+ fn eq(&self, rhs: E) { … }
+}
+```
+
+is impossible to write, so `BitVec == BitSlice` will be rejected.
+
+As with many other traits on `BitVec`, the implementations are just a thin
+wrapper over the corresponding `BitSlice` implementations.
+
+### Changed
+
+Refine the API documentation. Rust guidelines recommend imperative rather than
+descriptive summaries for function documentation, which largely meant stripping
+the trailing -s from the first verb in each function document.
+
+I also moved the example code from the trait-level documentation to the
+function-level documentation, so that it would show up an `type::func` in the
+`rustdoc` output rather than just `type`. This makes it much clearer what is
+being tested.
+
+### Removed
+
+`BitVec` methods `iter` and `raw_len` moved to `BitSlice` in `0.3.0` but were
+not removed in that release.
+
+The remaining debugging `eprintln!` calls have been stripped.
+
+## 0.3.0
+
+Split `BitVec` off into `BitSlice` wherever possible.
+
+### Added
+
+- The `BitSlice` type is the `[T]` to `BitVec`'s `Vec<T>`. `BitVec` now `Deref`s
+ to it, and has offloaded all the work that does not require managing allocated
+ memory.
+- Almost all of the public API on both types has documentation and example code.
+
+### Changed
+
+- The implementations of left- ard right- shift are now faster.
+- `BitVec` can `Borrow` and `Deref` down to `BitSlice`, and offloads as much
+ work as possible to it.
+- `Clone` is more intelligent.
+
+## 0.2.0
+
+Improved the `bitvec!` macro.
+
+### Changed
+
+- `bitvec!` takes more syntaxes to better match `vec!`, and has better
+ runtime performance. The increased static memory used by `bitvec!` should be
+ more than counterbalanced by the vastly better generated code.
+
+## 0.1.0
+
+Initial implementation and release.
+
+### Added
+
+- `Endian` and `Bits` traits
+- `BitVec` type with basic `Vec` idioms and parallel trait implementations
+- `bitvec!` generator macro
diff --git a/third_party/rust/bitvec/CODE_OF_CONDUCT.md b/third_party/rust/bitvec/CODE_OF_CONDUCT.md
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bitvec/CODE_OF_CONDUCT.md
@@ -0,0 +1,5 @@
+# Code of Conduct
+
+See the official [Rust code of conduct][coc].
+
+[coc]: https://www.rust-lang.org/policies/code-of-conduct
diff --git a/third_party/rust/bitvec/CONTRIBUTING.md b/third_party/rust/bitvec/CONTRIBUTING.md
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bitvec/CONTRIBUTING.md
@@ -0,0 +1,45 @@
+# Contributing Guide
+
+Contributions are absolutely welcome!
+
+## Contact Information
+
+In order of likelihood that I will actionably receive your contact, my
+information is:
+
+- [appayne@outlook.com](mailto:appayne@outlook.com)
+- [@myrrlyn](//github.com/myrrlyn)
+- [@myrrlyn](//twitter.com/myrrlyn)
+- [@myrrlyn@cybre.space](//cybre.space/myrrlyn)
+- [/u/myrrlyn](//reddit.com/u/myrrlyn)
+
+I am not active on any IRC channels at this time. I am on Discord in the Rust
+channel, so you may be able to reach me there, but I don’t know offhand how to
+give out Discord profile links. I have a very consistent username scheme and so
+anywhere you see my name, it’s *probably* me and I’ll *probably* respond to it.
+
+## Preconditions
+
+Be able to make a Rust project compile. I will happily help you learn how to do
+this, but this particular crate is probably not something you want to explore as
+a beginner.
+
+Be comfortable using `U+0009 CHARACTER TABULATION` as your indentation setting.
+
+That’s about it for prerequisites! This crate intends to power the lowest-level
+of memory manipulation while also offering a convenient, powerful, and idiomatic
+high-level API, so I encourage and welcome inputs on any aspect of this crate’s
+construction. I know that I personally am much more capable at the low end than
+the high, and so the user-facing API may not be as strong as it should be.
+
+## Contributing
+
+If you have a patch you think is worth inspecting right away, opening a pull
+request without prelude is fine, although I would certainly appreciate an
+accompanying explanation of what the patch does and why.
+
+If you have questions, bugs, suggestions, or other contributions of any kind
+that do not immediately touch the codebase, you can reach me informally to talk
+about them or open an issue.
+
+I will do my best to respond to all contacts in a timely manner.
diff --git a/third_party/rust/bitvec/Cargo.toml b/third_party/rust/bitvec/Cargo.toml
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bitvec/Cargo.toml
@@ -0,0 +1,49 @@
+[package]
+name = "bitvec"
+version = "0.10.0"
+authors = [
+ "myrrlyn <myrrlyn@outlook.com>",
+]
+categories = [
+ "data-structures",
+ "embedded",
+ "no-std",
+ "rust-patterns",
+]
+description = "A crate for manipulating memory, bit by bit"
+documentation = "https://docs.rs/bitvec"
+edition = "2018"
+homepage = "https://myrrlyn.net/bitvec"
+keywords = [
+ "bits",
+ "bitvec",
+]
+license = "MIT"
+readme = "README.md"
+repository = "https://github.com/myrrlyn/bitvec"
+
+[features]
+alloc = []
+default = [
+ "std",
+]
+std = [
+ "alloc",
+]
+testing = [
+ "std",
+]
+
+[badges.codecov]
+repository = "myrrlyn/bitvec"
+branch = "master"
+service = "github"
+
+[badges.is-it-maintained-issue-resolution]
+repository = "myrrlyn/bitvec"
+
+[badges.is-it-maintained-open-issues]
+repository = "myrrlyn/bitvec"
+
+[badges.maintenance]
+status = "actively-developed"
diff --git a/third_party/rust/bitvec/Justfile b/third_party/rust/bitvec/Justfile
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bitvec/Justfile
@@ -0,0 +1,16 @@
+checkout:
+ cargo check
+ cargo doc --features testing --document-private-items
+ cargo build
+ cargo build --example sieve
+ cargo build --example tour
+ cargo test --features testing
+ cargo package --allow-dirty
+
+dev:
+ # cargo check
+ cargo test --no-default-features --features testing
+ cargo doc --features testing --document-private-items
+
+ci:
+ watchexec -- just dev
diff --git a/third_party/rust/bitvec/LICENSE.txt b/third_party/rust/bitvec/LICENSE.txt
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bitvec/LICENSE.txt
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2018 myrrlyn (Alexander Payne)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/third_party/rust/bitvec/README.md b/third_party/rust/bitvec/README.md
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bitvec/README.md
@@ -0,0 +1,243 @@
+# `BitVec` – Managing memory bit by bit
+
+[![Crate][crate_img]][crate]
+[![Documentation][docs_img]][docs]
+[![License][license_img]][license_file]
+[![Continuous Integration][travis_img]][travis]
+[![Code Coverage][codecov_img]][codecov]
+[![Crate Downloads][downloads_img]][crate]
+
+This crate provides packed bit-level analogues to `[T]` and `Vec<T>`. The slice
+type `BitSlice` and the vector type `BitVec` allow bitwise access to a region of
+memory in any endian ordering or underlying primitive type. This permits
+construction of space-efficient sets or fine-grained control over the values in
+a region of memory.
+
+`BitVec` is a strict expansion of `BitSlice` to include allocation management.
+Since `BitVec` is shorter to type, the rest of this document will use it by
+default, and mark out sections that apply *only* to the vector type and not to
+the slice type. Unless marked, assume that the text applies to both.
+
+`BitVec` is generic over an ordering cursor, using the trait `Cursor`, and the
+primitive type, using the trait `Bits`. This means that `BitVec` structures can
+be built with a great deal of flexibility over how they manage their memory and
+translate between the in-memory representation and their semantic contents.
+
+`BitVec` acts as closely to a standard `Vec` as possible, and can be assumed by
+default to be what a `Vec<u1>` would be if such a type were possible to express
+in Rust. It has stack semantics, in that push and pop operations take place only
+on one end of the `BitVec`’s buffer. It supports iteration, bitwise operations,
+and rendering for `Display` and `Debug`.
+
+## How Is This Different Than the `bit_vec` Crate
+
+- It is more recently actively maintained (I may, in the future as of this
+ writing, let it lapse)
+- It doesn’t have a hyphen in the name, so you don’t have to deal with the
+ hyphen/underscore dichotomy.
+- My `BitVec` structure is exactly the size of a `Vec`; theirs is larger.
+- I have a `BitSlice` borrowed view.
+- My types implement all of the standard library’s slice and vector APIs
+
+## Why Would You Use This
+
+- You need to directly control a bitstream’s representation in memory.
+- You need to do unpleasant things with communications protocols.
+- You need a list of `bool`s that doesn’t waste 7 bits for every bit used.
+- You need to do set arithmetic, or numeric arithmetic, on those lists.
+
+## Usage
+
+**Minimum Rust Version**: `1.31.0`
+
+I wrote this crate because I was unhappy with the other bit-vector crates
+available. I specifically need to manage raw memory in bit-level precision, and
+this is not a behavior pattern the other bit-vector crates made easily available
+to me. This served as the guiding star for my development process on this crate,
+and remains the crate’s primary goal.
+
+To this end, the default type parameters for the `BitVec` type use `u8` as the
+storage primitive and use big-endian ordering of bits: the forwards direction is
+from MSb to LSb, and the backwards direction is from LSb to MSb.
+
+To use this crate, you need to depend on it in `Cargo.toml`:
+
+```toml
+[dependencies]
+bitvec = "0.10"
+```
+
+and include it in your crate root `src/main.rs` or `src/lib.rs`:
+
+```rust,no-run
+extern crate bitvec;
+
+use bitvec::*;
+```
+
+This imports the following symbols:
+
+- `bitvec!` – a macro similar to `vec!`, which allows the creation of `BitVec`s
+ of any desired endianness, storage type, and contents. The documentation page
+ has a detailed explanation of its syntax.
+
+- `BitSlice<C: Cursor, T: Bits>` – the actual bit-slice reference type. It is
+ generic over a cursor type (`C`) and storage type (`T`). Note that `BitSlice`
+ is unsized, and can never be held directly; it must always be behind a
+ reference such as `&BitSlice` or `&mut BitSlice`.
+
+ Furthermore, it is *impossible* to put `BitSlice` into any kind of intelligent
+ pointer such as a `Box` or `Rc`! Any work that involves managing the memory
+ behind a bitwise type *must* go through `BitBox` or `BitVec` instead. This may
+ change in the future as I learn how to better manage this library, but for now
+ this limitation stands.
+
+- `BitVec<C: Cursor, T: Bits>` – the actual bit-vector structure type. It is
+ generic over a cursor type (`C`) and storage type (`T`).
+
+- `Cursor` – an open trait that defines an ordering schema for `BitVec` to use.
+ Little and big endian orderings are provided by default. If you wish to
+ implement other ordering types, the `Cursor` trait requires one function:
+
+ - `fn at<T: Bits>(index: u8) -> u8` takes a semantic index and computes a bit
+ offset into the primitive `T` for it.
+
+- `BigEndian` – a zero-sized struct that implements `Cursor` by defining the
+ forward direction as towards LSb and the backward direction as towards MSb.
+
+- `LittleEndian` – a zero-sized struct that implements `Cursor` by defining the
+ forward direction as towards MSb and the backward direction as towards LSb.
+
+- `Bits` – a sealed trait that provides generic access to the four Rust
+ primitives usable as storage types: `u8`, `u16`, `u32`, and `u64`. `usize`
+ and the signed integers do *not* implement `Bits` and cannot be used as the
+ storage type. `u128` also does not implement `Bits`, as I am not confident in
+ its memory representation.
+
+`BitVec` has the same API as `Vec`, and should be easy to use.
+
+The `bitvec!` macro requires type information as its first two arguments.
+Because macros do not have access to the type checker, this currently only
+accepts the literal tokens `BigEndian` or `LittleEndian` as the first argument,
+one of the four unsigned integer primitives as the second argument, and then as
+many values as you wish to insert into the `BitVec`. It accepts any integer
+value, and maps them to bits by comparing against 0. `0` becomes `0` and any
+other integer, whether it is odd or not, becomes `1`. While the syntax is loose,
+you should only use `0` and `1` to fill the macro, for readability and lack of
+surprise.
+
+### `no_std`
+
+This crate can be used in `#![no_std]` libraries, by disabling the default
+feature set. In your `Cargo.toml`, write:
+
+```toml
+[dependencies]
+bitvec = { version = "0.10", default-features = false }
+```
+
+or
+
+```toml
+[dependencies.bitvec]
+version = "0.10"
+default-features = false
+```
+
+This turns off the standard library imports *and* all usage of dynamic memory
+allocation. Without an allocator, the `bitvec!` macro and the `BitVec` type are
+both disable and removed from the library, leaving only the `BitSlice` type.
+
+To use `bitvec` in a `#![no_std]` environment that *does* have an allocator,
+re-enable the `alloc` feature, like so:
+
+```toml
+[dependencies.bitvec]
+version = "0.10"
+default-features = false
+features = ["alloc"]
+```
+
+The `alloc` feature restores `bitvec!` and `BitVec`, as well as the `BitSlice`
+interoperability with `BitVec`. The only difference between `alloc` and `std` is
+the presence of the standard library façade and runtime support.
+
+The `std` feature turns on `alloc`, so using this crate without any feature
+flags *or* by explicitly enabling the `std` feature will enable full
+functionality.
+
+## Example
+
+```rust
+extern crate bitvec;
+
+use bitvec::*;
+
+use std::iter::repeat;
+
+fn main() {
+ let mut bv = bitvec![BigEndian, u8; 0, 1, 0, 1];
+ bv.reserve(8);
+ bv.extend(repeat(false).take(4).chain(repeat(true).take(4)));
+
+ // Memory access
+ assert_eq!(bv.as_slice(), &[0b0101_0000, 0b1111_0000]);
+ // index 0 -^ ^- index 11
+ assert_eq!(bv.len(), 12);
+ assert!(bv.capacity() >= 16);
+
+ // Set operations
+ bv &= repeat(true);
+ bv = bv | repeat(false);
+ bv ^= repeat(true);
+ bv = !bv;
+
+ // Arithmetic operations
+ let one = bitvec![1];
+ bv += one.clone();
+ assert_eq!(bv.as_slice(), &[0b0101_0001, 0b0000_0000]);
+ bv -= one.clone();
+ assert_eq!(bv.as_slice(), &[0b0101_0000, 0b1111_0000]);
+
+ // Borrowing iteration
+ let mut iter = bv.iter();
+ // index 0
+ assert_eq!(iter.next().unwrap(), false);
+ // index 11
+ assert_eq!(iter.next_back().unwrap(), true);
+ assert_eq!(iter.len(), 10);
+}
+```
+
+Immutable and mutable access to the underlying memory is provided by the `AsRef`
+and `AsMut` implementations, so the `BitVec` can be readily passed to transport
+functions.
+
+`BitVec` implements `Borrow` down to `BitSlice`, and `BitSlice` implements
+`ToOwned` up to `BitVec`, so they can be used in a `Cow` or wherever this API
+is desired. Any case where a `Vec`/`[T]` pair cannot be replaced with a
+`BitVec`/`BitSlice` pair is a bug in this library, and a bug report is
+appropriate.
+
+`BitVec` can relinquish its owned memory as a `Box<[T]>` via the
+`.into_boxed_slice()` method, and `BitSlice` can relinquish access to its memory
+simply by going out of scope.
+
+## Planned Features
+
+Contributions of items in this list are *absolutely* welcome! Contributions of
+other features are also welcome, but I’ll have to be sold on them.
+
+- Creation of specialized pointers `Rc<BitSlice>` and `Arc<BitSlice>`.
+
+[codecov]: https://codecov.io/gh/myrrlyn/bitvec "Code Coverage"
+[codecov_img]: https://img.shields.io/codecov/c/github/myrrlyn/bitvec.svg?logo=codecov "Code Coverage Display"
+[crate]: https://crates.io/crates/bitvec "Crate Link"
+[crate_img]: https://img.shields.io/crates/v/bitvec.svg?logo=rust "Crate Page"
+[docs]: https://docs.rs/bitvec "Documentation"
+[docs_img]: https://docs.rs/bitvec/badge.svg "Documentation Display"
+[downloads_img]: https://img.shields.io/crates/dv/bitvec.svg?logo=rust "Crate Downloads"
+[license_file]: https://github.com/myrrlyn/bitvec/blob/master/LICENSE.txt "License File"
+[license_img]: https://img.shields.io/crates/l/bitvec.svg "License Display"
+[travis]: https://travis-ci.org/myrrlyn/bitvec "Travis CI"
+[travis_img]: https://img.shields.io/travis/myrrlyn/bitvec.svg?logo=travis "Travis CI Display"
diff --git a/third_party/rust/bitvec/doc/Bit Patterns.md b/third_party/rust/bitvec/doc/Bit Patterns.md
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bitvec/doc/Bit Patterns.md
@@ -0,0 +1,118 @@
+# Bit Patterns
+
+This table displays the *bit index*, in [base64], of each position in a
+`BitSlice<Cursor, Fundamental>` on a little-endian machine.
+
+```text
+byte | 00000000 11111111 22222222 33333333 44444444 55555555 66666666 77777777
+bit | 76543210 76543210 76543210 76543210 76543210 76543210 76543210 76543210
+------+------------------------------------------------------------------------
+LEu__ | HGFEDCBA PONMLKJI XWVUTSRQ fedcbaZY nmlkjihg vutsrqpo 3210zyxw /+987654
+BEu64 | 456789+/ wxyz0123 opqrstuv ghijklmn YZabcdef QRSTUVWX IJKLMNOP ABCDEFGH
+BEu32 | YZabcdef QRSTUVWX IJKLMNOP ABCDEFGH 456789+/ wxyz0123 opqrstuv ghijklmn
+BEu16 | IJKLMNOP ABCDEFGH YZabcdef QRSTUVWX opqrstuv ghijklmn 456789+/ wxyz0123
+BEu8 | ABCDEFGH IJKLMNOP QRSTUVWX YZabcdef ghijklmn opqrstuv wxyz0123 456789+/
+```
+
+This table displays the bit index in [base64] of each position in a
+`BitSlice<Cursor, Fundamental>` on a big-endian machine.
+
+```text
+byte | 00000000 11111111 22222222 33333333 44444444 55555555 66666666 77777777
+bit | 76543210 76543210 76543210 76543210 76543210 76543210 76543210 76543210
+------+------------------------------------------------------------------------
+BEu__ | ABCDEFGH IJKLMNOP QRSTUVWX YZabcdef ghijklmn opqrstuv wxyz0123 456789+/
+LEu64 | /+987654 3210zyxw vutsrqpo nmlkjihg fedcbaZY XWVUTSRQ PONMLKJI HGFEDCBA
+LEu32 | fedcbaZY XWVUTSRQ PONMLKJI HGFEDCBA /+987654 3210zyxw vutsrqpo nmlkjihg
+LEu16 | PONMLKJI HGFEDCBA fedcbaZY XWVUTSRQ vutsrqpo nmlkjihg /+987654 3210zyxw
+LEu8 | HGFEDCBA PONMLKJI XWVUTSRQ fedcbaZY nmlkjihg vutsrqpo 3210zyxw /+987654
+```
+
+`<BigEndian, u8>` and `<LittleEndian, u8>` will always have the same
+representation in memory on all machines. The wider cursors will not.
+
+# Pointer Representation
+
+Currently, the bitslice pointer uses the `len` field to address an individual
+bit in the slice. This means that all bitslices can address `usize::MAX` bits,
+regardless of the underlying storage fundamental. The bottom `3 <= n <= 6` bits
+of `len` address the bit in the fundamental, and the high bits address the
+fundamental in the slice.
+
+The next representation of bitslice pointer will permit the data pointer to
+address any *byte*, regardless of fundamental type, and address any bit in that
+byte by storing the bit position in `len`. This reduces the bit storage capacity
+of bitslice from `usize::MAX` to `usize::MAX / 8`. 2<sup>29</sup> is still a
+very large number, so I do not anticipate 32-bit machines being too limited by
+this.
+
+This means that bitslice pointers will have the following representation, in C++
+because Rust lacks bitfield syntax.
+
+```cpp
+template<typename T>
+struct WidePtr<T> {
+ size_t ptr_byte : __builtin_ctzll(alignof(T)); // 0 ... 3
+ size_t ptr_data : sizeof(T*) * 8
+ - __builtin_ctzll(alignof(T)); // 64 ... 61
+
+ size_t len_head : 3;
+ size_t len_tail : 3 + __builtin_ctzll(alignof(T)); // 3 ... 6
+ size_t len_data : sizeof(size_t) * 8
+ - 6 - __builtin_ctzll(alignof(T)); // 58 ... 55
+};
+```
+
+So, for any storage fundamental, its bitslice pointer representation has:
+
+- the low `alignof` bits of the pointer for selecting a byte, and the rest of
+ the pointer for selecting the fundamental. This is just a `*const u8` except
+ the type remembers how to find the correctly aligned pointer.
+
+- the lowest 3 bits of the length counter for selecting the bit under the head
+ pointer
+- the *next* (3 + log<sub>2</sub>(bit size)) bits of the length counter address
+ the final bit within the final *storage fundamental* of the slice.
+- the remaining high bits address the final *storage fundamental* of the slice,
+ counting from the correctly aligned address in the pointer.
+
+# Calculations
+
+Given an arbitrary `WidePtr<T>` value,
+
+- the initial `*const T` pointer is retrieved by masking away the low bits of
+ the `ptr` value
+
+- the number of `T` elements *between* the first and the last is found by taking
+ the `len` value, masking away the low bits, and shifting right/down.
+
+- the number of `T` elements in the slice is found by taking the above and
+ adding one
+
+- the address of the last `T` element in the slice is found by taking the
+ initial pointer, and adding the `T`-element-count to it
+
+- the slot number of the first live bit in the slice is found by masking away
+ the high bits of `ptr` and shifting the result left/up by three, then adding
+ the low three bits of `len`
+
+# Values
+
+## Uninhabited Domains
+
+All pointers whose non-`data` members are fully zeroed are considered
+uninhabited. When the `data` member is the null pointer, then the slice is
+*empty*; when it is non-null, the slice points to a validly allocated region of
+memory and is merely uninhabited. This distinction is important for vectors.
+
+## Full Domains
+
+The longest possible domain has `!0` as its `elts`, and `tail` values, and `0`
+as its `head` value.
+
+When `elts` and `tail` are both `!0`, then the `!0`th element has `!0 - 1` live
+bits. The final bit in the final element is a tombstone that cannot be used.
+This is a regrettable consequence of the need to distinguish between the nil and
+uninhabited slices.
+
+[base64]: https://en.wikipedia.org/wiki/Base64
diff --git a/third_party/rust/bitvec/examples/readme.rs b/third_party/rust/bitvec/examples/readme.rs
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bitvec/examples/readme.rs
@@ -0,0 +1,50 @@
+/*! Prove that the example code in `README.md` executes.
+!*/
+
+#[cfg(feature = "alloc")]
+extern crate bitvec;
+
+#[cfg(feature = "alloc")]
+use bitvec::*;
+
+#[cfg(feature = "alloc")]
+use std::iter::repeat;
+
+#[cfg(feature = "alloc")]
+fn main() {
+ let mut bv = bitvec![BigEndian, u8; 0, 1, 0, 1];
+ bv.reserve(8);
+ bv.extend(repeat(false).take(4).chain(repeat(true).take(4)));
+
+ // Memory access
+ assert_eq!(bv.as_slice(), &[0b0101_0000, 0b1111_0000]);
+ // index 0 -^ ^- index 11
+ assert_eq!(bv.len(), 12);
+ assert!(bv.capacity() >= 16);
+
+ // Set operations
+ bv &= repeat(true);
+ bv = bv | repeat(false);
+ bv ^= repeat(true);
+ bv = !bv;
+
+ // Arithmetic operations
+ let one = bitvec![1];
+ bv += one.clone();
+ assert_eq!(bv.as_slice(), &[0b0101_0001, 0b0000_0000]);
+ bv -= one.clone();
+ assert_eq!(bv.as_slice(), &[0b0101_0000, 0b1111_0000]);
+
+ // Borrowing iteration
+ let mut iter = bv.iter();
+ // index 0
+ assert_eq!(iter.next().unwrap(), false);
+ // index 11
+ assert_eq!(iter.next_back().unwrap(), true);
+ assert_eq!(iter.len(), 10);
+}
+
+#[cfg(not(feature = "alloc"))]
+fn main() {
+ println!("This example only runs when an allocator is present");
+}
diff --git a/third_party/rust/bitvec/examples/sieve.rs b/third_party/rust/bitvec/examples/sieve.rs
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bitvec/examples/sieve.rs
@@ -0,0 +1,125 @@
+/*! Sieve of Eratosthenes
+
+The `bit_vec` crate had this as an example, so I do too, I guess.
+
+Run with
+
+```sh
+$ cargo run --release --example sieve -- [max] [count]
+```
+
+where max is an optional maximum number below which all primes will be found,
+and count is an optional number whose square will be used to display the bottom
+primes.
+
+For example,
+
+```sh
+$ cargo run --release --example sieve -- 10000000 25
+```
+
+will find all primes less than ten million, and print the primes below 625 in a
+square 25x25.
+!*/
+
+#[cfg(feature = "alloc")]
+extern crate bitvec;
+
+#[cfg(feature = "alloc")]
+use bitvec::{
+ BitVec,
+ BigEndian,
+};
+#[cfg(feature = "alloc")]
+use std::env;
+
+#[cfg(feature = "alloc")]
+fn main() {
+ let max_prime: usize = env::args()
+ .nth(1)
+ .unwrap_or("1000000".into())
+ .parse()
+ .unwrap_or(1_000_000);
+
+ let primes = {
+ let mut bv = BitVec::<BigEndian, u64>::with_capacity(max_prime);
+ bv.set_elements(!0u64);
+
+ // Consider the vector fully populated
+ unsafe { bv.set_len(max_prime); }
+
+ // 0 and 1 are not primes
+ bv.set(0, false);
+ bv.set(1, false);
+
+ for n in 2 .. (1 + (max_prime as f64).sqrt() as usize) {
+ // Adjust the frequency of log statements vaguely logarithmically.
+ if n < 20_000 && n % 1_000 == 0
+ || n < 50_000 && n % 5_000 == 0
+ || n < 100_000 && n % 10_000 == 0 {
+ println!("Calculating {}…", n);
+ }
+ // If n is prime, mark all multiples as non-prime
+ if bv[n] {
+ if n < 50 {
+ println!("Calculating {}…", n);
+ }
+ 'inner:
+ for i in n .. {
+ let j = n * i;
+ if j >= max_prime {
+ break 'inner;
+ }
+ bv.set(j, false);
+ }
+ }
+ }
+ println!("Calculation complete!");
+
+ bv
+ };
+
+ // Count primes and non-primes.
+ let (mut one, mut zero) = (0u64, 0u64);
+ for n in primes.iter() {
+ if n {
+ one += 1;
+ }
+ else {
+ zero += 1;
+ }
+ }
+ println!("Counting complete!");
+ println!("There are {} primes and {} non-primes below {}", one, zero, max_prime);
+
+ let dim: usize = env::args()
+ .nth(2)
+ .unwrap_or("10".into())
+ .parse()
+ .unwrap_or(10);
+
+ println!("The primes smaller than {} are:", dim * dim);
+ let len = primes.len();
+ 'outer:
+ for i in 0 .. dim {
+ for j in 0 .. dim {
+ let k = i * dim + j;
+ if k >= len {
+ println!();
+ break 'outer;
+ }
+ if primes[k] {
+ print!("{:>4} ", k);
+ }
+ else {
+ print!(" ");
+ }
+ }
+ println!();
+ }
+}
+
+#[cfg(not(feature = "alloc"))]
+fn main() {
+ println!("This example only runs when an allocator is present");
+}
diff --git a/third_party/rust/bitvec/examples/tour.rs b/third_party/rust/bitvec/examples/tour.rs
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bitvec/examples/tour.rs
@@ -0,0 +1,113 @@
+/*! Demonstrates construction and use of a big-endian, u8, `BitVec`
+
+This example uses `bitvec!` to construct a `BitVec` from literals, then shows
+a sample of the various operations that can be applied to it.
+
+This example prints **a lot** of text to the console.
+!*/
+
+#[cfg(feature = "alloc")]
+extern crate bitvec;
+
+#[cfg(feature = "alloc")]
+use bitvec::{
+ // `bitvec!` macro
+ bitvec,
+ // trait unifying the primitives (you shouldn’t explicitly need this)
+ Bits,
+ // primary type of the whole crate! this is where the magic happens
+ BitVec,
+ // element-traversal trait (you shouldn’t explicitly need this)
+ Cursor,
+ // directionality type marker (the default for `BitVec`; you will rarely
+ // explicitly need this)
+ BigEndian,
+ // directionality type marker (you will explicitly need this if you want
+ // this ordering)
+ LittleEndian,
+};
+#[cfg(feature = "alloc")]
+use std::iter::repeat;
+
+#[cfg(feature = "alloc")]
+fn main() {
+ let bv = bitvec![ // BigEndian, u8; // default type values
+ 0, 0, 0, 0, 0, 0, 0, 1,
+ 0, 0, 0, 0, 0, 0, 1, 0,
+ 0, 0, 0, 0, 0, 1, 0, 0,
+ 0, 0, 0, 0, 1, 0, 0, 0,
+ 0, 0, 0, 1, 0, 0, 0, 0,
+ 0, 0, 1, 0, 0, 0, 0, 0,
+ 0, 1, 0, 0, 0, 0, 0, 0,
+ 1, 0, 0, 0, 0, 0, 0, 0,
+ 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1, 0, 0, 0, 0,
+ 0, 0, 0, 0, 1, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1,
+ 1, 0, 1, 0,
+ ];
+ println!("A BigEndian BitVec has the same layout in memory as it does \
+ semantically");
+ render(&bv);
+
+ // BitVec can turn into iterators, and be built from iterators.
+ let bv: BitVec<LittleEndian, u8> = bv.into_iter().collect();
+ println!("A LittleEndian BitVec has the opposite layout in memory as it \
+ does semantically");
+ render(&bv);
+
+ let bv: BitVec<BigEndian, u16> = bv.into_iter().collect();
+ println!("A BitVec can use storage other than u8");
+ render(&bv);
+
+ println!("BitVec can participate in Boolean arithmetic");
+ let full = bv.clone() | repeat(true);
+ render(&full);
+ let empty = full & repeat(false);
+ render(&empty);
+ let flip = bv ^ repeat(true);
+ render(&flip);
+ let bv = !flip;
+ render(&bv);
+
+ println!("\
+Notice that `^` did not affect the parts of the tail that were not in
+use, while `!` did affect them. `^` requires a second source, while `!`
+can just flip all elements. `!` is faster, but `^` is less likely to
+break your assumptions about what the memory looks like.\
+ ");
+
+ // Push and pop to the bitvec
+ let mut bv = bv;
+ for _ in 0 .. 12 {
+ bv.push(false);
+ }
+ for _ in 0 .. 12 {
+ bv.pop();
+ }
+ render(&bv);
+
+ println!("End example");
+
+ fn render<C: Cursor, T: Bits>(bv: &BitVec<C, T>) {
+ println!("Memory information: {} elements, {}", bv.as_slice().len(), bv.len());
+ println!("Print out the semantic contents");
+ println!("{:#?}", bv);
+ println!("Print out the memory contents");
+ println!("{:?}", bv.as_slice());
+ println!("Show the bits in memory");
+ for elt in bv.as_slice() {
+ println!("{:0w$b} ", elt, w=std::mem::size_of::<T>() * 8);
+ }
+ println!();
+ }
+}
+
+#[cfg(not(feature = "alloc"))]
+fn main() {
+ println!("This example only runs when an allocator is present");
+}
diff --git a/third_party/rust/bitvec/src/bits.rs b/third_party/rust/bitvec/src/bits.rs
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bitvec/src/bits.rs
@@ -0,0 +1,724 @@
+/*! Bit Management
+
+The `Bits` trait defines constants and free functions suitable for managing bit
+storage of a fundamental, and is the constraint for the storage type of the data
+structures of the rest of the crate.
+!*/
+
+use crate::Cursor;
+use core::{
+ cmp::Eq,
+ convert::From,
+ fmt::{
+ self,
+ Binary,
+ Debug,
+ Display,
+ Formatter,
+ LowerHex,
+ UpperHex,
+ },
+ mem::size_of,
+ ops::{
+ BitAnd,
+ BitAndAssign,
+ BitOrAssign,
+ Deref,
+ DerefMut,
+ Not,
+ Shl,
+ ShlAssign,
+ Shr,
+ ShrAssign,
+ },
+};
+
+/** Generalizes over the fundamental types for use in `bitvec` data structures.
+
+This trait must only be implemented on unsigned integer primitives with full
+alignment. It cannot be implemented on `u128` on any architecture, or on `u64`
+on 32-bit systems.
+
+The `Sealed` supertrait ensures that this can only be implemented locally, and
+will never be implemented by downstream crates on new types.
+**/
+pub trait Bits:
+ // Forbid external implementation
+ Sealed
+ + Binary
+ // Element-wise binary manipulation
+ + BitAnd<Self, Output=Self>
+ + BitAndAssign<Self>
+ + BitOrAssign<Self>
+ // Permit indexing into a generic array
+ + Copy
+ + Debug
+ + Display
+ // Permit testing a value against 1 in `get()`.
+ + Eq
+ // Rust treats numeric literals in code as vaguely typed and does not make
+ // them concrete until long after trait expansion, so this enables building
+ // a concrete Self value from a numeric literal.
+ + From<u8>
+ // Permit extending into a `u64`.
+ + Into<u64>
+ + LowerHex
+ + Not<Output=Self>
+ + Shl<u8, Output=Self>
+ + ShlAssign<u8>
+ + Shr<u8, Output=Self>
+ + ShrAssign<u8>
+ // Allow direct access to a concrete implementor type.
+ + Sized
+ + UpperHex
+{
+ /// The size, in bits, of this type.
+ const SIZE: u8 = size_of::<Self>() as u8 * 8;
+
+ /// The number of bits required to index the type. This is always
+ /// log<sub>2</sub> of the type’s bit size.
+ ///
+ /// Incidentally, this can be computed as `Self::SIZE.trailing_zeros()` once
+ /// that becomes a valid constexpr.
+ const BITS: u8; // = Self::SIZE.trailing_zeros() as u8;
+
+ /// The bitmask to turn an arbitrary `usize` into a bit index. Bit indices
+ /// are always stored in the lowest bits of an index value.
+ const MASK: u8 = Self::SIZE - 1;
+
+ /// Name of the implementing type.
+ const TYPENAME: &'static str;
+
+ /// Sets a specific bit in an element to a given value.
+ ///
+ /// # Parameters
+ ///
+ /// - `place`: A bit index in the element, from `0` at `LSb` to `Self::MASK`
+ /// at `MSb`. The bit under this index will be set according to `value`.
+ /// - `value`: A Boolean value, which sets the bit on `true` and unsets it
+ /// on `false`.
+ ///
+ /// # Type Parameters
+ ///
+ /// - `C: Cursor`: A `Cursor` implementation to translate the index into a
+ /// position.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if `place` is not less than `T::SIZE`, in order
+ /// to avoid index out of range errors.
+ ///
+ /// # Examples
+ ///
+ /// This example sets and unsets bits in a byte.
+ ///
+ /// ```rust
+ /// use bitvec::{Bits, LittleEndian};
+ /// let mut elt: u8 = 0;
+ /// elt.set::<LittleEndian>(0.into(), true);
+ /// assert_eq!(elt, 0b0000_0001);
+ /// elt.set::<LittleEndian>(4.into(), true);
+ /// assert_eq!(elt, 0b0001_0001);
+ /// elt.set::<LittleEndian>(0.into(), false);
+ /// assert_eq!(elt, 0b0001_0000);
+ /// ```
+ ///
+ /// This example overruns the index, and panics.
+ ///
+ /// ```rust,should_panic
+ /// use bitvec::{Bits, LittleEndian};
+ /// let mut elt: u8 = 0;
+ /// elt.set::<LittleEndian>(8.into(), true);
+ /// ```
+ fn set<C: Cursor>(&mut self, place: BitIdx, value: bool) {
+ let place: BitPos = C::at::<Self>(place);
+ assert!(
+ *place < Self::SIZE,
+ "Index out of range: {} overflows {}",
+ *place,
+ Self::SIZE,
+ );
+ // Blank the selected bit
+ *self &= !(Self::from(1u8) << *place);
+ // Set the selected bit
+ *self |= Self::from(value as u8) << *place;
+ }
+
+ /// Gets a specific bit in an element.
+ ///
+ /// # Parameters
+ ///
+ /// - `place`: A bit index in the element, from `0` at `LSb` to `Self::MASK`
+ /// at `MSb`. The bit under this index will be retrieved as a `bool`.
+ ///
+ /// # Returns
+ ///
+ /// The value of the bit under `place`, as a `bool`.
+ ///
+ /// # Type Parameters
+ ///
+ /// - `C: Cursor`: A `Cursor` implementation to translate the index into a
+ /// position.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if `place` is not less than `T::SIZE`, in order
+ /// to avoid index out of range errors.
+ ///
+ /// # Examples
+ ///
+ /// This example gets two bits from a byte.
+ ///
+ /// ```rust
+ /// use bitvec::{Bits, LittleEndian};
+ /// let elt: u8 = 0b0000_0100;
+ /// assert!(!elt.get::<LittleEndian>(1.into()));
+ /// assert!(elt.get::<LittleEndian>(2.into()));
+ /// assert!(!elt.get::<LittleEndian>(3.into()));
+ /// ```
+ ///
+ /// This example overruns the index, and panics.
+ ///
+ /// ```rust,should_panic
+ /// use bitvec::{Bits, LittleEndian};
+ /// 0u8.get::<LittleEndian>(8.into());
+ /// ```
+ fn get<C: Cursor>(&self, place: BitIdx) -> bool {
+ let place: BitPos = C::at::<Self>(place);
+ assert!(
+ *place < Self::SIZE,
+ "Index out of range: {} overflows {}",
+ *place,
+ Self::SIZE,
+ );
+ // Shift down so the targeted bit is in LSb, then blank all other bits.
+ (*self >> *place) & Self::from(1) == Self::from(1)
+ }
+
+ /// Counts how many bits in `self` are set to `1`.
+ ///
+ /// This zero-extends `self` to `u64`, and uses the [`u64::count_ones`]
+ /// inherent method.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// The number of bits in `self` set to `1`. This is a `usize` instead of a
+ /// `u32` in order to ease arithmetic throughout the crate.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::Bits;
+ /// assert_eq!(Bits::count_ones(&0u8), 0);
+ /// assert_eq!(Bits::count_ones(&128u8), 1);
+ /// assert_eq!(Bits::count_ones(&192u8), 2);
+ /// assert_eq!(Bits::count_ones(&224u8), 3);
+ /// assert_eq!(Bits::count_ones(&240u8), 4);
+ /// assert_eq!(Bits::count_ones(&248u8), 5);
+ /// assert_eq!(Bits::count_ones(&252u8), 6);
+ /// assert_eq!(Bits::count_ones(&254u8), 7);
+ /// assert_eq!(Bits::count_ones(&255u8), 8);
+ /// ```
+ ///
+ /// [`u64::count_ones`]: https://doc.rust-lang.org/stable/std/primitive.u64.html#method.count_ones
+ #[inline(always)]
+ fn count_ones(&self) -> usize {
+ u64::count_ones((*self).into()) as usize
+ }
+
+ /// Counts how many bits in `self` are set to `0`.
+ ///
+ /// This inverts `self`, so all `0` bits are `1` and all `1` bits are `0`,
+ /// then zero-extends `self` to `u64` and uses the [`u64::count_ones`]
+ /// inherent method.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// The number of bits in `self` set to `0`. This is a `usize` instead of a
+ /// `u32` in order to ease arithmetic throughout the crate.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::Bits;
+ /// assert_eq!(Bits::count_zeros(&0u8), 8);
+ /// assert_eq!(Bits::count_zeros(&1u8), 7);
+ /// assert_eq!(Bits::count_zeros(&3u8), 6);
+ /// assert_eq!(Bits::count_zeros(&7u8), 5);
+ /// assert_eq!(Bits::count_zeros(&15u8), 4);
+ /// assert_eq!(Bits::count_zeros(&31u8), 3);
+ /// assert_eq!(Bits::count_zeros(&63u8), 2);
+ /// assert_eq!(Bits::count_zeros(&127u8), 1);
+ /// assert_eq!(Bits::count_zeros(&255u8), 0);
+ /// ```
+ ///
+ /// [`u64::count_ones`]: https://doc.rust-lang.org/stable/std/primitive.u64.html#method.count_ones
+ #[inline(always)]
+ fn count_zeros(&self) -> usize {
+ u64::count_ones((!*self).into()) as usize
+ }
+}
+
+/** Newtype indicating a semantic index into an element.
+
+This type is consumed by [`Cursor`] implementors, which use it to produce a
+concrete bit position inside an element.
+
+`BitIdx` is a semantic counter which has a defined, constant, and predictable
+ordering. Values of `BitIdx` refer strictly to abstract ordering, and not to the
+actual position in an element, so `BitIdx(0)` is the first bit in an element,
+but is not required to be the electrical `LSb`, `MSb`, or any other.
+
+[`Cursor`]: ../trait.Cursor.html
+**/
+#[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
+pub struct BitIdx(pub(crate) u8);
+
+impl BitIdx {
+ /// Checks if the index is valid for a type.
+ ///
+ /// # Parameters
+ ///
+ /// - `self`: The index to validate.
+ ///
+ /// # Returns
+ ///
+ /// Whether the index is valid for the storage type in question.
+ ///
+ /// # Type Parameters
+ ///
+ /// - `T: Bits`: The storage type used to determine index validity.
+ pub fn is_valid<T: Bits>(self) -> bool {
+ *self < T::SIZE
+ }
+
+ /// Increments a cursor to the next value, wrapping if needed.
+ ///
+ /// # Parameters
+ ///
+ /// - `self`: The original cursor.
+ ///
+ /// # Returns
+ ///
+ /// - `Self`: An incremented cursor.
+ /// - `bool`: Marks whether the increment crossed an element boundary.
+ ///
+ /// # Type Parameters
+ ///
+ /// - `T: Bits`: The storage type for which the increment will be
+ /// calculated.
+ ///
+ /// # Panics
+ ///
+ /// This method panics if `self` is not less than `T::SIZE`, in order to
+ /// avoid index out of range errors.
+ ///
+ /// # Examples
+ ///
+ /// This example increments inside an element.
+ ///
+ /// ```rust
+ /// # #[cfg(feature = "testing")] {
+ /// use bitvec::testing::BitIdx;
+ /// assert_eq!(BitIdx::from(3).incr::<u8>(), (4.into(), false));
+ /// # }
+ /// ```
+ ///
+ /// This example increments at the high edge, and wraps to the next element.
+ ///
+ /// ```rust
+ /// # #[cfg(feature = "testing")] {
+ /// use bitvec::testing::BitIdx;
+ /// assert_eq!(BitIdx::from(7).incr::<u8>(), (0.into(), true));
+ /// # }
+ /// ```
+ pub fn incr<T: Bits>(self) -> (Self, bool) {
+ assert!(
+ *self < T::SIZE,
+ "Index out of range: {} overflows {}",
+ *self,
+ T::SIZE,
+ );
+ let next = (*self).wrapping_add(1) & T::MASK;
+ (next.into(), next == 0)
+ }
+
+ /// Decrements a cursor to the previous value, wrapping if needed.
+ ///
+ /// # Parameters
+ ///
+ /// - `self`: The original cursor.
+ ///
+ /// # Returns
+ ///
+ /// - `Self`: A decremented cursor.
+ /// - `bool`: Marks whether the decrement crossed an element boundary.
+ ///
+ /// # Type Parameters
+ ///
+ /// - `T: Bits`: The storage type for which the decrement will be
+ /// calculated.
+ ///
+ /// # Panics
+ ///
+ /// This method panics if `self` is not less than `T::SIZE`, in order to
+ /// avoid index out of range errors.
+ ///
+ /// # Examples
+ ///
+ /// This example decrements inside an element.
+ ///
+ /// ```rust
+ /// # #[cfg(feature = "testing")] {
+ /// use bitvec::testing::BitIdx;
+ /// assert_eq!(BitIdx::from(5).decr::<u8>(), (4.into(), false));
+ /// # }
+ /// ```
+ ///
+ /// This example decrements at the low edge, and wraps to the previous
+ /// element.
+ ///
+ /// ```rust
+ /// # #[cfg(feature = "testing")] {
+ /// use bitvec::testing::BitIdx;
+ /// assert_eq!(BitIdx::from(0).decr::<u8>(), (7.into(), true));
+ /// # }
+ pub fn decr<T: Bits>(self) -> (Self, bool) {
+ assert!(
+ *self < T::SIZE,
+ "Index out of range: {} overflows {}",
+ *self,
+ T::SIZE,
+ );
+ let (prev, wrap) = (*self).overflowing_sub(1);
+ ((prev & T::MASK).into(), wrap)
+ }
+
+ /// Finds the destination bit a certain distance away from a starting bit.
+ ///
+ /// This produces the number of elements to move, and then the bit index of
+ /// the destination bit in the destination element.
+ ///
+ /// # Parameters
+ ///
+ /// - `self`: The bit index in an element of the starting position. This
+ /// must be in the domain `0 .. T::SIZE`.
+ /// - `by`: The number of bits by which to move. Negative values move
+ /// downwards in memory: towards `LSb`, then starting again at `MSb` of
+ /// the prior element in memory (decreasing address). Positive values move
+ /// upwards in memory: towards `MSb`, then starting again at `LSb` of the
+ /// subsequent element in memory (increasing address).
+ ///
+ /// # Returns
+ ///
+ /// - `isize`: The number of elements by which to change the caller’s
+ /// element cursor. This value can be passed directly into [`ptr::offset`]
+ /// - `BitIdx`: The bit index of the destination bit in the newly selected
+ /// element. This will always be in the domain `0 .. T::SIZE`. This
+ /// value can be passed directly into [`Cursor`] functions to compute the
+ /// correct place in the element.
+ ///
+ /// # Type Parameters
+ ///
+ /// - `T: Bits`: The storage type with which the offset will be calculated.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if `from` is not less than `T::SIZE`, in order
+ /// to avoid index out of range errors.
+ ///
+ /// # Safety
+ ///
+ /// `by` must not be large enough to cause the returned `isize` value to,
+ /// when applied to [`ptr::offset`], produce a reference out of bounds of
+ /// the original allocation. This method has no means of checking this
+ /// requirement.
+ ///
+ /// # Examples
+ ///
+ /// This example calculates offsets within the same element.
+ ///
+ /// ```rust
+ /// # #[cfg(feature = "testing")] {
+ /// use bitvec::testing::BitIdx;
+ /// assert_eq!(BitIdx::from(1).offset::<u32>(4isize), (0, 5.into()));
+ /// assert_eq!(BitIdx::from(6).offset::<u32>(-3isize), (0, 3.into()));
+ /// # }
+ /// ```
+ ///
+ /// This example calculates offsets that cross into other elements. It uses
+ /// `u32`, so the bit index domain is `0 ..= 31`.
+ ///
+ /// `7 - 18`, modulo 32, wraps down from 0 to 31 and continues decreasing.
+ /// `23 + 68`, modulo 32, wraps up from 31 to 0 and continues increasing.
+ ///
+ /// ```rust
+ /// # #[cfg(feature = "testing")] {
+ /// use bitvec::testing::BitIdx;
+ /// assert_eq!(BitIdx::from(7).offset::<u32>(-18isize), (-1, 21.into()));
+ /// assert_eq!(BitIdx::from(23).offset::<u32>(68isize), (2, 27.into()));
+ /// # }
+ /// ```
+ ///
+ /// [`Cursor`]: ../trait.Cursor.html
+ /// [`ptr::offset`]: https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.offset
+ pub fn offset<T: Bits>(self, by: isize) -> (isize, Self) {
+ assert!(
+ *self < T::SIZE,
+ "Index out of range: {} overflows {}",
+ *self,
+ T::SIZE,
+ );
+ // If the `isize` addition does not overflow, then the sum can be used
+ // directly.
+ if let (far, false) = by.overflowing_add(*self as isize) {
+ // If `far` is in the domain `0 .. T::SIZE`, then the offset did
+ // not depart the element.
+ if far >= 0 && far < T::SIZE as isize {
+ (0, (far as u8).into())
+ }
+ // If `far` is negative, then the offset leaves the initial element
+ // going down. If `far` is not less than `T::SIZE`, then the
+ // offset leaves the initial element going up.
+ else {
+ // `Shr` on `isize` sign-extends
+ (
+ far >> T::BITS,
+ ((far & (T::MASK as isize)) as u8).into(),
+ )
+ }
+ }
+ // If the `isize` addition overflows, then the `by` offset is positive.
+ // Add as `usize` and use that. This is guaranteed not to overflow,
+ // because `isize -> usize` doubles the domain, but `self` is limited
+ // to `0 .. T::SIZE`.
+ else {
+ let far = *self as usize + by as usize;
+ // This addition will always result in a `usize` whose lowest
+ // `T::BITS` bits are the bit index in the destination element,
+ // and the rest of the high bits (shifted down) are the number of
+ // elements by which to advance.
+ (
+ (far >> T::BITS) as isize,
+ ((far & (T::MASK as usize)) as u8).into(),
+ )
+ }
+ }
+
+ /// Computes the size of a span from `self` for `len` bits.
+ ///
+ /// # Parameters
+ ///
+ /// - `self`
+ /// - `len`: The number of bits to include in the span.
+ ///
+ /// # Returns
+ ///
+ /// - `usize`: The number of elements `T` included in the span. This will
+ /// be in the domain `1 .. usize::max_value()`.
+ /// - `BitIdx`: The index of the first bit *after* the span. This will be in
+ /// the domain `1 ..= T::SIZE`.
+ ///
+ /// # Type Parameters
+ ///
+ /// - `T: Bits`: The type of the elements for which this span is computed.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// # #[cfg(feature = "testing")] {
+ /// use bitvec::testing::{BitIdx, Bits};
+ ///
+ /// let h: BitIdx = 0.into();
+ /// assert_eq!(BitIdx::from(0).span::<u8>(8), (1, 8.into()))
+ /// # }
+ /// ```
+ pub fn span<T: Bits>(self, len: usize) -> (usize, BitIdx) {
+ // Number of bits in the head *element*. Domain 32 .. 0.
+ let bits_in_head = (T::SIZE - *self) as usize;
+ // If there are n bits live between the head cursor (which marks the
+ // address of the first live bit) and the back edge of the element,
+ // then when len is <= n, the span covers one element.
+ // When len == n, the tail will be T::SIZE, which is valid for a tail.
+ // TODO(myrrlyn): Separate BitIdx into Head and Tail types, which have
+ // their proper range enforcements.
+ if len <= bits_in_head {
+ (1, (*self + len as u8).into())
+ }
+ // If there are more bits in the span than n, then subtract n from len
+ // and use the difference to count elements and bits.
+ else {
+ // 1 ..
+ let bits_after_head = len - bits_in_head;
+ // Count the number of wholly filled elements
+ let whole_elts = bits_after_head >> T::BITS;
+ // Count the number of bits in the *next* element. If this is zero,
+ // become T::SIZE; if it is nonzero, add one more to elts. elts
+ // must have one added to it by default to account for the head
+ // element.
+ let tail_bits = bits_after_head as u8 & T::MASK;
+ if tail_bits == 0 {
+ (whole_elts + 1, T::SIZE.into())
+ }
+ else {
+ (whole_elts + 2, tail_bits.into())
+ }
+ }
+ }
+}
+
+/// Wraps a `u8` as a `BitIdx`.
+impl From<u8> for BitIdx {
+ fn from(src: u8) -> Self {
+ BitIdx(src)
+ }
+}
+
+/// Unwraps a `BitIdx` to a `u8`.
+impl Into<u8> for BitIdx {
+ fn into(self) -> u8 {
+ self.0
+ }
+}
+
+impl Display for BitIdx {
+ fn fmt(&self, f: &mut Formatter) -> fmt::Result {
+ write!(f, "BitIdx({})", self.0)
+ }
+}
+
+impl Deref for BitIdx {
+ type Target = u8;
+ fn deref(&self) -> &Self::Target { &self.0 } }
+
+impl DerefMut for BitIdx {
+ fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 }
+}
+
+/** Newtype indicating a concrete index into an element.
+
+This type is produced by [`Cursor`] implementors, and denotes a concrete bit in
+an element rather than a semantic bit.
+
+`Cursor` implementors translate `BitIdx` values, which are semantic places, into
+`BitPos` values, which are concrete electrical positions.
+
+[`Cursor`]: ../trait.Cursor.html
+**/
+#[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
+pub struct BitPos(u8);
+
+impl BitPos {
+ /// Checks if the position is valid for a type.
+ ///
+ /// # Parameters
+ ///
+ /// - `self`: The position to validate.
+ ///
+ /// # Returns
+ ///
+ /// Whether the position is valid for the storage type in question.
+ ///
+ /// # Type Parameters
+ ///
+ /// - `T: Bits`: The storage type used to determine position validity.
+ pub fn is_valid<T: Bits>(self) -> bool {
+ *self < T::SIZE
+ }
+}
+
+/// Wraps a `u8` as a `BitPos`.
+impl From<u8> for BitPos {
+ fn from(src: u8) -> Self {
+ BitPos(src)
+ }
+}
+
+/// Unwraps a `BitPos` to a `u8`.
+impl Into<u8> for BitPos {
+ fn into(self) -> u8 {
+ self.0
+ }
+}
+
+impl Display for BitPos {
+ fn fmt(&self, f: &mut Formatter) -> fmt::Result {
+ write!(f, "BitPos({})", self.0)
+ }
+}
+
+impl Deref for BitPos {
+ type Target = u8;
+ fn deref(&self) -> &Self::Target { &self.0 }
+}
+
+impl DerefMut for BitPos {
+ fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 }
+}
+
+impl Bits for u8 { const BITS: u8 = 3; const TYPENAME: &'static str = "u8"; }
+impl Bits for u16 { const BITS: u8 = 4; const TYPENAME: &'static str = "u16"; }
+impl Bits for u32 { const BITS: u8 = 5; const TYPENAME: &'static str = "u32"; }
+
+#[cfg(target_pointer_width = "64")]
+impl Bits for u64 { const BITS: u8 = 6; const TYPENAME: &'static str = "u64"; }
+
+/// Marker trait to seal `Bits` against downstream implementation.
+///
+/// This trait is public in the module, so that other modules in the crate can
+/// use it, but so long as it is not exported by the crate root and this module
+/// is private, this trait effectively forbids downstream implementation of the
+/// `Bits` trait.
+#[doc(hidden)]
+pub trait Sealed {}
+
+impl Sealed for u8 {}
+impl Sealed for u16 {}
+impl Sealed for u32 {}
+
+#[cfg(target_pointer_width = "64")]
+impl Sealed for u64 {}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn jump_far_up() {
+ // isize::max_value() is 0x7f...ff, so the result bit will be one less
+ // than the start bit.
+ for n in 1 .. 8 {
+ let (elt, bit) = BitIdx::from(n).offset::<u8>(isize::max_value());
+ assert_eq!(elt, (isize::max_value() >> u8::BITS) + 1);
+ assert_eq!(*bit, n - 1);
+ }
+ let (elt, bit) = BitIdx::from(0).offset::<u8>(isize::max_value());
+ assert_eq!(elt, isize::max_value() >> u8::BITS);
+ assert_eq!(*bit, 7);
+ }
+
+ #[test]
+ fn jump_far_down() {
+ // isize::min_value() is 0x80...00, so the result bit will be equal to
+ // the start bit
+ for n in 0 .. 8 {
+ let (elt, bit) = BitIdx::from(n).offset::<u8>(isize::min_value());
+ assert_eq!(elt, isize::min_value() >> u8::BITS);
+ assert_eq!(*bit, n);
+ }
+ }
+
+ #[test]
+ #[should_panic]
+ fn offset_out_of_bound() {
+ BitIdx::from(64).offset::<u64>(isize::max_value());
+ }
+}
diff --git a/third_party/rust/bitvec/src/boxed.rs b/third_party/rust/bitvec/src/boxed.rs
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bitvec/src/boxed.rs
@@ -0,0 +1,917 @@
+/*! `BitBox` structure
+
+This module holds the type for an owned but ungrowable bit sequence. `BitVec` is
+the more appropriate and useful type for most collections.
+!*/
+
+#![cfg(feature = "alloc")]
+
+use crate::{
+ BigEndian,
+ BitPtr,
+ BitSlice,
+ BitVec,
+ Bits,
+ Cursor,
+};
+#[cfg(all(feature = "alloc", not(feature = "std")))]
+use alloc::{
+ borrow::{
+ Borrow,
+ BorrowMut,
+ ToOwned,
+ },
+ boxed::Box,
+ vec::Vec,
+};
+use core::{
+ clone::Clone,
+ cmp::{
+ Eq,
+ PartialEq,
+ PartialOrd,
+ Ord,
+ Ordering,
+ },
+ convert::{
+ AsMut,
+ AsRef,
+ From,
+ Into,
+ },
+ default::Default,
+ fmt::{
+ self,
+ Debug,
+ Display,
+ Formatter,
+ },
+ hash::{
+ Hash,
+ Hasher,
+ },
+ iter::{
+ DoubleEndedIterator,
+ ExactSizeIterator,
+ FusedIterator,
+ Iterator,
+ IntoIterator,
+ },
+ marker::PhantomData,
+ mem,
+ ops::{
+ Add,
+ AddAssign,
+ BitAnd,
+ BitAndAssign,
+ BitOr,
+ BitOrAssign,
+ BitXor,
+ BitXorAssign,
+ Deref,
+ DerefMut,
+ Drop,
+ Index,
+ IndexMut,
+ Range,
+ RangeFrom,
+ RangeFull,
+ RangeInclusive,
+ RangeTo,
+ RangeToInclusive,
+ Neg,
+ Not,
+ Shl,
+ ShlAssign,
+ Shr,
+ ShrAssign,
+ },
+};
+#[cfg(feature = "std")]
+use std::{
+ borrow::{
+ Borrow,
+ BorrowMut,
+ ToOwned,
+ },
+ boxed::Box,
+ vec::Vec,
+};
+
+/** A pointer type for owned bit sequences.
+
+This type is essentially a `&BitSlice` that owns its own memory. It can change
+the contents of its domain, but it cannot change its own domain like `BitVec`
+can. It is useful for fixed-size collections without lifetime tracking.
+
+# Type Parameters
+
+- `C: Cursor`: An implementor of the [`Cursor`] trait. This type is used to
+ convert semantic indices into concrete bit positions in elements, and store or
+ retrieve bit values from the storage type.
+- `T: Bits`: An implementor of the [`Bits`] trait: `u8`, `u16`, `u32`, or `u64`.
+ This is the actual type in memory that the box will use to store data.
+
+# Safety
+
+The `BitBox` handle has the same *size* as standard Rust `Box<[T]>` handles, but
+it is ***extremely binary incompatible*** with them. Attempting to treat
+`BitBox<_, T>` as `Box<[T]>` in any manner except through the provided APIs is
+***catastrophically*** unsafe and unsound.
+
+# Trait Implementations
+
+`BitBox<C, T>` implements all the traits that `BitSlice<C, T>` does, by
+deferring to the `BitSlice` implementation. It also implements conversion traits
+to and from `BitSlice`, and to/from `BitVec`.
+**/
+#[repr(C)]
+pub struct BitBox<C = BigEndian, T = u8>
+where C: Cursor, T: Bits {
+ _cursor: PhantomData<C>,
+ pointer: BitPtr<T>,
+}
+
+impl<C, T> BitBox<C, T>
+where C: Cursor, T: Bits {
+ /// Constructs an empty slice at a given location.
+ ///
+ /// # Parameters
+ ///
+ /// - `data`: The address of the empty `BitBox` to construct.
+ ///
+ /// # Returns
+ ///
+ /// An empty `BitBox` at the given location.
+ pub fn uninhabited(data: *const T) -> Self {
+ Self {
+ _cursor: PhantomData,
+ pointer: BitPtr::uninhabited(data),
+ }
+ }
+
+ /// Copies a `BitSlice` into an owned `BitBox`.
+ ///
+ /// # Parameters
+ ///
+ /// - `src`: The `&BitSlice` to make owned.
+ ///
+ /// # Returns
+ ///
+ /// An owned clone of the given bit slice.
+ pub fn new(src: &BitSlice<C, T>) -> Self {
+ let store: Box<[T]> = src.as_ref().to_owned().into_boxed_slice();
+ let data = store.as_ptr();
+ let (_, elts, head, tail) = src.bitptr().raw_parts();
+ let out = Self {
+ _cursor: PhantomData,
+ pointer: BitPtr::new(data, elts, head, tail),
+ };
+ mem::forget(store);
+ out
+ }
+
+ /// Constructs a `BitBox` from a raw `BitPtr`.
+ ///
+ /// After calling this function, the raw pointer is owned by the resulting
+ /// `BitBox`. The `BitBox` will deallocate the memory region it describes.
+ ///
+ /// # Parameters
+ ///
+ /// - `pointer`: A `BitPtr<T>` describing a region of owned memory. This
+ /// must have previously produced by `BitBox` constructors; it is unsound
+ /// to even pass in `BitPtr<T>` values taken from `BitVec<C, T>` handles.
+ ///
+ /// # Returns
+ ///
+ /// An owned `BitBox` over the given pointer.
+ ///
+ /// # Safety
+ ///
+ /// Because Rust does not specify the allocation scheme used, the only
+ /// valid pointer to pass into this function is one that had previously been
+ /// produced by `BitBox` constructors and extracted by [`BitBox::into_raw`].
+ ///
+ /// This function is unsafe because improper use can lead to double-free
+ /// errors (constructing multiple `BitBox`es from the same `BitPtr`) or
+ /// allocator inconsistencies (arbitrary pointers).
+ ///
+ /// [`BitBox::into_raw`]: #method.into_raw
+ pub unsafe fn from_raw(pointer: BitPtr<T>) -> Self {
+ Self {
+ _cursor: PhantomData,
+ pointer,
+ }
+ }
+
+ /// Consumes the `BitBox`, returning the wrapped `BitPtr` directly.
+ ///
+ /// After calling this function, the caller is responsible for the memory
+ /// previously managed by the `BitBox`. In particular, the caller must
+ /// properly release the memory region to which the `BitPtr` refers.
+ /// The proper way to do so is to convert the `BitPtr` back into a `BitBox`
+ /// with the [`BitBox::from_raw`] function.
+ ///
+ /// Note: this is an associated function, which means that you must call it
+ /// as `BitBox::into_raw(b)` instead of `b.into_raw()`. This is to match the
+ /// API of [`Box`]; there is no method conflict with [`BitSlice`].
+ ///
+ /// [`BitBox::from_raw`]: #method.from_raw
+ /// [`BitSlice`]: ../struct.BitSlice.html
+ #[cfg_attr(not(feature = "std"), doc = "[`Box`]: https://doc.rust-lang.org/stable/alloc/boxed/struct.Box.html")]
+ #[cfg_attr(feature = "std", doc = "[`Box`]: https://doc.rust-lang.org/stable/std/boxed/struct.Box.html")]
+ pub unsafe fn into_raw(b: BitBox<C, T>) -> BitPtr<T> {
+ let out = b.bitptr();
+ mem::forget(b);
+ out
+ }
+
+ /// Consumes and leaks the `BitBox`, returning a mutable reference,
+ /// `&'a mut BitSlice<C, T>`. Note that the memory region `[T]` must outlive
+ /// the chosen lifetime `'a`.
+ ///
+ /// This function is mainly useful for bit regions that live for the
+ /// remainder of the program’s life. Dropping the returned reference will
+ /// cause a memory leak. If this is not acceptable, the reference should
+ /// first be wrapped with the [`Box::from_raw`] function, producing a
+ /// `BitBox`. This `BitBox` can then be dropped which will properly
+ /// deallocate the memory.
+ ///
+ /// Note: this is an associated function, which means that you must call it
+ /// as `BitBox::leak(b)` instead of `b.leak()`. This is to match the API of
+ /// [`Box`]; there is no method conflict with [`BitSlice`].
+ ///
+ /// # Parameters
+ ///
+ /// - `b`: The `BitBox` to deconstruct.
+ ///
+ /// # Returns
+ ///
+ /// The raw pointer from inside the `BitBox`.
+ ///
+ /// [`BitBox::from_raw`]: #method.from_raw
+ /// [`BitSlice`]: ../struct.BitSlice.html
+ #[cfg_attr(not(feature = "std"), doc = "[`Box`]: https://doc.rust-lang.org/stable/alloc/boxed/struct.Box.html")]
+ #[cfg_attr(feature = "std", doc = "[`Box`]: https://doc.rust-lang.org/stable/std/boxed/struct.Box.html")]
+ pub fn leak<'a>(b: BitBox<C, T>) -> &'a mut BitSlice<C, T> {
+ let out = b.bitptr();
+ mem::forget(b);
+ out.into()
+ }
+
+ /// Accesses the `BitSlice<C, T>` to which the `BitBox` refers.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// The slice of bits behind the box.
+ pub fn as_bitslice(&self) -> &BitSlice<C, T> {
+ self.pointer.into()
+ }
+
+ /// Accesses the `BitSlice<C, T>` to which the `BitBox` refers.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ ///
+ /// # Returns
+ ///
+ /// The slice of bits behind the box.
+ pub fn as_mut_bitslice(&mut self) -> &mut BitSlice<C, T> {
+ self.pointer.into()
+ }
+
+ /// Gives read access to the `BitPtr<T>` structure powering the box.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// A copy of the interior `BitPtr<T>`.
+ pub(crate) fn bitptr(&self) -> BitPtr<T> {
+ self.pointer
+ }
+
+ /// Allows a function to access the `Box<[T]>` that the `BitBox` is using
+ /// under the hood.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ /// - `func`: A function which works with a borrowed `Box<[T]>` representing
+ /// the actual memory held by the `BitBox`.
+ ///
+ /// # Type Parameters
+ ///
+ /// - `F: FnOnce(&Box<[T]>) -> R`: A function which borrows a box.
+ /// - `R`: The return value of the function.
+ ///
+ /// # Returns
+ ///
+ /// The return value of the provided function.
+ fn do_with_box<F, R>(&self, func: F) -> R
+ where F: FnOnce(&Box<[T]>) -> R {
+ let (data, elts, _, _) = self.bitptr().raw_parts();
+ let b: Box<[T]> = unsafe {
+ Vec::from_raw_parts(data as *mut T, elts, elts)
+ }.into_boxed_slice();
+ let out = func(&b);
+ mem::forget(b);
+ out
+ }
+}
+
+impl<C, T> Borrow<BitSlice<C, T>> for BitBox<C, T>
+where C: Cursor, T: Bits {
+ fn borrow(&self) -> &BitSlice<C, T> {
+ &*self
+ }
+}
+
+impl<C, T> BorrowMut<BitSlice<C, T>> for BitBox<C, T>
+where C: Cursor, T: Bits {
+ fn borrow_mut(&mut self) -> &mut BitSlice<C, T> {
+ &mut *self
+ }
+}
+
+impl<C, T> Clone for BitBox<C, T>
+where C: Cursor, T: Bits {
+ fn clone(&self) -> Self {
+ let (_, e, h, t) = self.bitptr().raw_parts();
+ let new_box = self.do_with_box(Clone::clone);
+ let ptr = new_box.as_ptr();
+ mem::forget(new_box);
+ Self {
+ _cursor: PhantomData,
+ pointer: BitPtr::new(ptr, e, h, t),
+ }
+ }
+}
+
+impl<C, T> Eq for BitBox<C, T>
+where C: Cursor, T: Bits {}
+
+impl<C, T> Ord for BitBox<C, T>
+where C: Cursor, T: Bits {
+ fn cmp(&self, rhs: &Self) -> Ordering {
+ (&**self).cmp(&**rhs)
+ }
+}
+
+impl<A, B, C, D> PartialEq<BitBox<C, D>> for BitBox<A, B>
+where A: Cursor, B: Bits, C: Cursor, D: Bits {
+ fn eq(&self, rhs: &BitBox<C, D>) -> bool {
+ (&**self).eq(&**rhs)
+ }
+}
+
+impl<A, B, C, D> PartialEq<BitSlice<C, D>> for BitBox<A, B>
+where A: Cursor, B: Bits, C: Cursor, D: Bits {
+ fn eq(&self, rhs: &BitSlice<C, D>) -> bool {
+ (&**self).eq(rhs)
+ }
+}
+
+impl<A, B, C, D> PartialEq<BitBox<C, D>> for BitSlice<A, B>
+where A: Cursor, B: Bits, C: Cursor, D: Bits {
+ fn eq(&self, rhs: &BitBox<C, D>) -> bool {
+ self.eq(&**rhs)
+ }
+}
+
+impl<A, B, C, D> PartialOrd<BitBox<C, D>> for BitBox<A, B>
+where A: Cursor, B: Bits, C: Cursor, D: Bits {
+ fn partial_cmp(&self, rhs: &BitBox<C, D>) -> Option<Ordering> {
+ (&**self).partial_cmp(&**rhs)
+ }
+}
+
+impl<A, B, C, D> PartialOrd<BitSlice<C, D>> for BitBox<A, B>
+where A: Cursor, B: Bits, C: Cursor, D: Bits {
+ fn partial_cmp(&self, rhs: &BitSlice<C, D>) -> Option<Ordering> {
+ (&**self).partial_cmp(rhs)
+ }
+}
+
+impl<A, B, C, D> PartialOrd<BitBox<C, D>> for BitSlice<A, B>
+where A: Cursor, B: Bits, C: Cursor, D: Bits {
+ fn partial_cmp(&self, rhs: &BitBox<C, D>) -> Option<Ordering> {
+ self.partial_cmp(&**rhs)
+ }
+}
+
+impl<C, T> AsMut<BitSlice<C, T>> for BitBox<C, T>
+where C: Cursor, T: Bits {
+ fn as_mut(&mut self) -> &mut BitSlice<C, T> {
+ self.as_mut_bitslice()
+ }
+}
+
+impl<C, T> AsMut<[T]> for BitBox<C, T>
+where C: Cursor, T: Bits {
+ fn as_mut(&mut self) -> &mut [T] {
+ (&mut **self).as_mut()
+ }
+}
+
+impl<C, T> AsRef<BitSlice<C, T>> for BitBox<C, T>
+where C: Cursor, T: Bits {
+ fn as_ref(&self) -> &BitSlice<C, T> {
+ self.as_bitslice()
+ }
+}
+
+impl<C, T> AsRef<[T]> for BitBox<C, T>
+where C: Cursor, T: Bits {
+ fn as_ref(&self) -> &[T] {
+ (&**self).as_ref()
+ }
+}
+
+impl<C, T> From<&BitSlice<C, T>> for BitBox<C, T>
+where C: Cursor, T: Bits {
+ fn from(src: &BitSlice<C, T>) -> Self {
+ let (_, elts, head, tail) = src.bitptr().raw_parts();
+ let b: Box<[T]> = src.as_ref().to_owned().into_boxed_slice();
+ let out = Self {
+ _cursor: PhantomData,
+ pointer: BitPtr::new(b.as_ptr(), elts, head, tail),
+ };
+ mem::forget(b);
+ out
+ }
+}
+
+/// Builds a `BitBox` out of a borrowed slice of elements.
+///
+/// This copies the memory as-is from the source buffer into the new `BitBox`.
+/// The source buffer will be unchanged by this operation, so you don't need to
+/// worry about using the correct cursor type for the read.
+///
+/// This operation does a copy from the source buffer into a new allocation, as
+/// it can only borrow the source and not take ownership.
+impl<C, T> From<&[T]> for BitBox<C, T>
+where C: Cursor, T: Bits {
+ /// Builds a `BitBox<C: Cursor, T: Bits>` from a borrowed `&[T]`.
+ ///
+ /// # Parameters
+ ///
+ /// - `src`: The elements to use as the values for the new vector.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let src: &[u8] = &[5, 10];
+ /// let bv: BitBox = src.into();
+ /// assert!(bv[5]);
+ /// assert!(bv[7]);
+ /// assert!(bv[12]);
+ /// assert!(bv[14]);
+ /// ```
+ fn from(src: &[T]) -> Self {
+ assert!(src.len() < BitPtr::<T>::MAX_ELTS, "Box overflow");
+ <&BitSlice<C, T>>::from(src).into()
+ }
+}
+
+impl<C, T> From<BitVec<C, T>> for BitBox<C, T>
+where C: Cursor, T: Bits {
+ fn from(mut src: BitVec<C, T>) -> Self {
+ src.shrink_to_fit();
+ let pointer = src.bitptr();
+ mem::forget(src);
+ unsafe { Self::from_raw(pointer) }
+ }
+}
+
+/// Builds a `BitBox` out of an owned slice of elements.
+///
+/// This moves the memory as-is from the source buffer into the new `BitBox`.
+/// The source buffer will be unchanged by this operation, so you don't need to
+/// worry about using the correct cursor type.
+impl<C, T> From<Box<[T]>> for BitBox<C, T>
+where C: Cursor, T: Bits {
+ /// Consumes a `Box<[T: Bits]>` and creates a `BitBox<C: Cursor, T>` from
+ /// it.
+ ///
+ /// # Parameters
+ ///
+ /// - `src`: The source box whose memory will be used.
+ ///
+ /// # Returns
+ ///
+ /// A new `BitBox` using the `src` `Box`’s memory.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let src: Box<[u8]> = Box::new([3, 6, 9, 12, 15]);
+ /// let bv: BitBox = src.into();
+ /// ```
+ fn from(src: Box<[T]>) -> Self {
+ assert!(src.len() < BitPtr::<T>::MAX_ELTS, "Box overflow");
+ let out = Self {
+ _cursor: PhantomData,
+ pointer: BitPtr::new(src.as_ptr(), src.len(), 0, T::SIZE)
+ };
+ mem::forget(src);
+ out
+ }
+}
+
+impl<C, T> Into<Box<[T]>> for BitBox<C, T>
+where C: Cursor, T: Bits {
+ fn into(self) -> Box<[T]> {
+ let (ptr, len, _, _) = self.bitptr().raw_parts();
+ let out = unsafe { Vec::from_raw_parts(ptr as *mut T, len, len) }
+ .into_boxed_slice();
+ mem::forget(self);
+ out
+ }
+}
+
+impl<C, T> Default for BitBox<C, T>
+where C: Cursor, T: Bits {
+ fn default() -> Self {
+ Self {
+ _cursor: PhantomData,
+ pointer: BitPtr::default(),
+ }
+ }
+}
+
+impl<C, T> Debug for BitBox<C, T>
+where C: Cursor, T: Bits {
+ fn fmt(&self, f: &mut Formatter) -> fmt::Result {
+ f.write_str("BitBox<")?;
+ f.write_str(C::TYPENAME)?;
+ f.write_str(", ")?;
+ f.write_str(T::TYPENAME)?;
+ f.write_str("> ")?;
+ Display::fmt(&**self, f)
+ }
+}
+
+impl<C, T> Display for BitBox<C, T>
+where C: Cursor, T: Bits {
+ fn fmt(&self, f: &mut Formatter) -> fmt::Result {
+ Display::fmt(&**self, f)
+ }
+}
+
+impl<C, T> Hash for BitBox<C, T>
+where C: Cursor, T: Bits {
+ fn hash<H: Hasher>(&self, hasher: &mut H) {
+ (&**self).hash(hasher)
+ }
+}
+
+impl<C, T> IntoIterator for BitBox<C, T>
+where C: Cursor, T: Bits {
+ type Item = bool;
+ type IntoIter = IntoIter<C, T>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ IntoIter {
+ iterator: self.bitptr(),
+ _original: self,
+ }
+ }
+}
+
+impl<'a, C, T> IntoIterator for &'a BitBox<C, T>
+where C: Cursor, T: 'a + Bits {
+ type Item = bool;
+ type IntoIter = <&'a BitSlice<C, T> as IntoIterator>::IntoIter;
+
+ fn into_iter(self) -> Self::IntoIter {
+ (&**self).into_iter()
+ }
+}
+
+impl<C, T> Add<Self> for BitBox<C, T>
+where C: Cursor, T: Bits {
+ type Output = Self;
+
+ fn add(mut self, addend: Self) -> Self::Output {
+ self += addend;
+ self
+ }
+}
+
+impl<C, T> AddAssign for BitBox<C, T>
+where C: Cursor, T: Bits {
+ fn add_assign(&mut self, addend: Self) {
+ **self += &*addend
+ }
+}
+
+impl<C, T, I> BitAnd<I> for BitBox<C, T>
+where C: Cursor, T: Bits, I: IntoIterator<Item=bool> {
+ type Output = Self;
+
+ fn bitand(mut self, rhs: I) -> Self::Output {
+ self &= rhs;
+ self
+ }
+}
+
+impl<C, T, I> BitAndAssign<I> for BitBox<C, T>
+where C: Cursor, T: Bits, I: IntoIterator<Item=bool> {
+ fn bitand_assign(&mut self, rhs: I) {
+ **self &= rhs;
+ }
+}
+
+impl<C, T, I> BitOr<I> for BitBox<C, T>
+where C: Cursor, T: Bits, I: IntoIterator<Item=bool> {
+ type Output = Self;
+
+ fn bitor(mut self, rhs: I) -> Self::Output {
+ self |= rhs;
+ self
+ }
+}
+
+impl<C, T, I> BitOrAssign<I> for BitBox<C, T>
+where C: Cursor, T: Bits, I: IntoIterator<Item=bool> {
+ fn bitor_assign(&mut self, rhs: I) {
+ **self |= rhs;
+ }
+}
+
+impl<C, T, I> BitXor<I> for BitBox<C, T>
+where C: Cursor, T: Bits, I: IntoIterator<Item=bool> {
+ type Output = Self;
+
+ fn bitxor(mut self, rhs: I) -> Self::Output {
+ self ^= rhs;
+ self
+ }
+}
+
+impl<C, T, I> BitXorAssign<I> for BitBox<C, T>
+where C: Cursor, T: Bits, I: IntoIterator<Item=bool> {
+ fn bitxor_assign(&mut self, rhs: I) {
+ **self ^= rhs;
+ }
+}
+
+impl<C, T> Deref for BitBox<C, T>
+where C: Cursor, T: Bits {
+ type Target = BitSlice<C, T>;
+
+ fn deref(&self) -> &Self::Target {
+ self.pointer.into()
+ }
+}
+
+impl<C, T> DerefMut for BitBox<C, T>
+where C: Cursor, T: Bits {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ self.pointer.into()
+ }
+}
+
+impl<C, T> Drop for BitBox<C, T>
+where C: Cursor, T: Bits {
+ fn drop(&mut self) {
+ let ptr = self.as_mut_bitslice().as_mut_ptr();
+ let len = self.as_bitslice().len();
+ // Run the `Box<[T]>` destructor.
+ drop(unsafe { Vec::from_raw_parts(ptr, len, len).into_boxed_slice() });
+ }
+}
+
+impl<C, T> Index<usize> for BitBox<C, T>
+where C: Cursor, T: Bits {
+ type Output = bool;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &(**self)[index]
+ }
+}
+
+impl<C, T> Index<Range<usize>> for BitBox<C, T>
+where C: Cursor, T: Bits {
+ type Output = BitSlice<C, T>;
+
+ fn index(&self, range: Range<usize>) -> &Self::Output {
+ &(**self)[range]
+ }
+}
+
+impl<C, T> IndexMut<Range<usize>> for BitBox<C, T>
+where C: Cursor, T: Bits {
+ fn index_mut(&mut self, range: Range<usize>) -> &mut Self::Output {
+ &mut (**self)[range]
+ }
+}
+
+impl<C, T> Index<RangeFrom<usize>> for BitBox<C, T>
+where C: Cursor, T: Bits {
+ type Output = BitSlice<C, T>;
+
+ fn index(&self, range: RangeFrom<usize>) -> &Self::Output {
+ &(**self)[range]
+ }
+}
+
+impl<C, T> IndexMut<RangeFrom<usize>> for BitBox<C, T>
+where C: Cursor, T: Bits {
+ fn index_mut(&mut self, range: RangeFrom<usize>) -> &mut Self::Output {
+ &mut (**self)[range]
+ }
+}
+
+impl<C, T> Index<RangeFull> for BitBox<C, T>
+where C: Cursor, T: Bits {
+ type Output = BitSlice<C, T>;
+
+ fn index(&self, range: RangeFull) -> &Self::Output {
+ &(**self)[range]
+ }
+}
+
+impl<C, T> IndexMut<RangeFull> for BitBox<C, T>
+where C: Cursor, T: Bits {
+ fn index_mut(&mut self, range: RangeFull) -> &mut Self::Output {
+ &mut (**self)[range]
+ }
+}
+
+impl<C, T> Index<RangeInclusive<usize>> for BitBox<C, T>
+where C: Cursor, T: Bits {
+ type Output = BitSlice<C, T>;
+
+ fn index(&self, range: RangeInclusive<usize>) -> &Self::Output {
+ &(**self)[range]
+ }
+}
+
+impl<C, T> IndexMut<RangeInclusive<usize>> for BitBox<C, T>
+where C: Cursor, T: Bits {
+ fn index_mut(&mut self, range: RangeInclusive<usize>) -> &mut Self::Output {
+ &mut (**self)[range]
+ }
+}
+
+impl<C, T> Index<RangeTo<usize>> for BitBox<C, T>
+where C: Cursor, T: Bits {
+ type Output = BitSlice<C, T>;
+
+ fn index(&self, range: RangeTo<usize>) -> &Self::Output {
+ &(**self)[range]
+ }
+}
+
+impl<C, T> IndexMut<RangeTo<usize>> for BitBox<C, T>
+where C: Cursor, T: Bits {
+ fn index_mut(&mut self, range: RangeTo<usize>) -> &mut Self::Output {
+ &mut (**self)[range]
+ }
+}
+
+impl<C, T> Index<RangeToInclusive<usize>> for BitBox<C, T>
+where C: Cursor, T: Bits {
+ type Output = BitSlice<C, T>;
+
+ fn index(&self, range: RangeToInclusive<usize>) -> &Self::Output {
+ &(**self)[range]
+ }
+}
+
+impl<C, T> IndexMut<RangeToInclusive<usize>> for BitBox<C, T>
+where C: Cursor, T: Bits {
+ fn index_mut(&mut self, range: RangeToInclusive<usize>) -> &mut Self::Output {
+ &mut (**self)[range]
+ }
+}
+
+impl<C, T> Neg for BitBox<C, T>
+where C: Cursor, T: Bits {
+ type Output = Self;
+
+ fn neg(mut self) -> Self::Output {
+ let _ = -(&mut *self);
+ self
+ }
+}
+
+impl<C, T> Not for BitBox<C, T>
+where C: Cursor, T: Bits {
+ type Output = Self;
+
+ fn not(mut self) -> Self::Output {
+ let _ = !(&mut *self);
+ self
+ }
+}
+
+impl<C, T> Shl<usize> for BitBox<C, T>
+where C: Cursor, T: Bits {
+ type Output = Self;
+
+ fn shl(mut self, shamt: usize) -> Self::Output {
+ self <<= shamt;
+ self
+ }
+}
+
+impl<C, T> ShlAssign<usize> for BitBox<C, T>
+where C: Cursor, T: Bits {
+ fn shl_assign(&mut self, shamt: usize) {
+ **self <<= shamt;
+ }
+}
+
+impl<C, T> Shr<usize> for BitBox<C, T>
+where C: Cursor, T: Bits {
+ type Output = Self;
+
+ fn shr(mut self, shamt: usize) -> Self::Output {
+ self >>= shamt;
+ self
+ }
+}
+
+impl<C, T> ShrAssign<usize> for BitBox<C, T>
+where C: Cursor, T: Bits {
+ fn shr_assign(&mut self, shamt: usize) {
+ **self >>= shamt;
+ }
+}
+
+#[repr(C)]
+pub struct IntoIter<C, T>
+where C: Cursor, T: Bits {
+ /// Owning pointer to the full slab
+ _original: BitBox<C, T>,
+ /// Slice descriptor for the region undergoing iteration.
+ iterator: BitPtr<T>,
+}
+
+impl<C, T> IntoIter<C, T>
+where C: Cursor, T: Bits {
+ fn iterator(&self) -> <&BitSlice<C, T> as IntoIterator>::IntoIter {
+ <&BitSlice<C, T>>::from(self.iterator).into_iter()
+ }
+}
+
+impl<C, T> DoubleEndedIterator for IntoIter<C, T>
+where C: Cursor, T: Bits {
+ fn next_back(&mut self) -> Option<Self::Item> {
+ let mut slice_iter = self.iterator();
+ let out = slice_iter.next_back();
+ self.iterator = slice_iter.bitptr();
+ out
+ }
+}
+
+impl<C, T> ExactSizeIterator for IntoIter<C, T>
+where C: Cursor, T: Bits {}
+
+impl<C, T> FusedIterator for IntoIter<C, T>
+where C: Cursor, T: Bits {}
+
+impl<C, T> Iterator for IntoIter<C, T>
+where C: Cursor, T: Bits {
+ type Item = bool;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ let mut slice_iter = self.iterator();
+ let out = slice_iter.next();
+ self.iterator = slice_iter.bitptr();
+ out
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iterator().size_hint()
+ }
+
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ let mut slice_iter = self.iterator();
+ let out = slice_iter.nth(n);
+ self.iterator = slice_iter.bitptr();
+ out
+ }
+
+ fn last(mut self) -> Option<Self::Item> {
+ self.next_back()
+ }
+}
diff --git a/third_party/rust/bitvec/src/cursor.rs b/third_party/rust/bitvec/src/cursor.rs
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bitvec/src/cursor.rs
@@ -0,0 +1,97 @@
+/*! Bit Cursors
+
+`bitvec` structures are parametric over any ordering of bits within an element.
+The `Cursor` trait maps a cursor position (indicated by the `BitIdx` type) to an
+electrical position (indicated by the `BitPos` type) within that element, and
+also defines the order of traversal over an element.
+
+The only requirement on implementors of `Cursor` is that the transform function
+from cursor (`BitIdx`) to position (`BitPos`) is *total* (every integer in the
+domain `0 .. T::SIZE` is used) and *unique* (each cursor maps to one and only
+one position, and each position is mapped by one and only one cursor).
+Contiguity is not required.
+
+`Cursor` is a stateless trait, and implementors should be zero-sized types.
+!*/
+
+use super::bits::{
+ BitIdx,
+ BitPos,
+ Bits,
+};
+
+/// Traverses an element from `MSb` to `LSb`.
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub struct BigEndian;
+
+/// Traverses an element from `LSb` to `MSb`.
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub struct LittleEndian;
+
+/** A cursor over an element.
+
+# Usage
+
+`bitvec` structures store and operate on semantic counts, not bit positions. The
+`Cursor::at` function takes a semantic cursor, `BitIdx`, and produces an
+electrical position, `BitPos`.
+**/
+pub trait Cursor {
+ const TYPENAME: &'static str;
+
+ /// Translate a semantic bit index into an electrical bit position.
+ ///
+ /// # Parameters
+ ///
+ /// - `cursor`: The semantic bit value. This must be in the domain
+ /// `0 .. T::SIZE`.
+ ///
+ /// # Returns
+ ///
+ /// - A concrete position. This value can be used for shifting and masking
+ /// to extract a bit from an element.
+ ///
+ /// # Type Parameters
+ ///
+ /// - `T: Bits`: The storage type for which the position will be calculated.
+ ///
+ /// # Safety
+ ///
+ /// This function requires that `cursor` be in the domain `0 .. T::SIZE`.
+ /// Implementors must check this themselves.
+ fn at<T: Bits>(cursor: BitIdx) -> BitPos;
+}
+
+impl Cursor for BigEndian {
+ const TYPENAME: &'static str = "BigEndian";
+
+ /// Maps a semantic count to a concrete position.
+ ///
+ /// `BigEndian` order moves from `MSb` first to `LSb` last.
+ fn at<T: Bits>(cursor: BitIdx) -> BitPos {
+ assert!(
+ *cursor < T::SIZE,
+ "Index out of range: {} overflows {}",
+ *cursor,
+ T::SIZE,
+ );
+ (T::MASK - *cursor).into()
+ }
+}
+
+impl Cursor for LittleEndian {
+ const TYPENAME: &'static str = "LittleEndian";
+
+ /// Maps a semantic count to a concrete position.
+ ///
+ /// `LittleEndian` order moves from `LSb` first to `LSb` last.
+ fn at<T: Bits>(cursor: BitIdx) -> BitPos {
+ assert!(
+ *cursor < T::SIZE,
+ "Index out of range: {} overflows {}",
+ *cursor,
+ T::SIZE,
+ );
+ (*cursor).into()
+ }
+}
diff --git a/third_party/rust/bitvec/src/lib.rs b/third_party/rust/bitvec/src/lib.rs
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bitvec/src/lib.rs
@@ -0,0 +1,91 @@
+/*! `bitvec` – `[bool]` in overdrive.
+
+This crate provides views into slices of bits that are truly `[u1]`. Each bit in
+the data segment is used, unlike `[bool]` which ignores seven bits out of every
+byte.
+
+`bitvec`’s data structures provide strong guarantees about, and fine-grained
+control of, the bit-level representation of a sequence of memory. The user is
+empowered to choose the fundamental type underlying the store – `u8`, `u16`,
+`u32`, or `u64` – and the order in which each primitive is traversed –
+big-endian, from the most significant bit to the least, or little-endian, from
+the least significant bit to the most.
+
+This level of control is not necessary for most use cases where users just want
+to put bits in a sequence, but it is critically important for users making
+packets that leave main memory and hit some external device like a peripheral
+controller or a network socket. In order to provide convencienc to users for
+whom the storage details do not matter, `bitvec` types default to using
+big-endian bit order on `u8`. This means that the bits you would write down on
+paper match up with the bits as they are stored in memory.
+
+For example, the bit sequence `[0, 1, 1, 0, 1, 0, 0, 1]` inserted into `bitvec`
+structures with no extra type specification will produce the `<BigEndian, u8>`
+variant, so the bits in memory are `0b01101001`. With little-endian bit order,
+the memory value would be `0b10010110` (reversed order!).
+
+In addition to providing compact, efficient, and powerful storage and
+manipulation of bits in memory, the `bitvec` structures are capable of acting as
+a queue, set, or stream of bits. They implement the bit-wise operators for
+Boolean arithmetic, arithmetic operators for 2’s-complement numeric arithmetic,
+read indexing, bit shifts, and access to the underlying storage fundamental
+elements as a slice.
+
+(Write indexing is impossible in Rust semantics.)
+!*/
+
+#![cfg_attr(not(feature = "std"), no_std)]
+#![cfg_attr(all(feature = "alloc", not(feature = "std")), feature(alloc))]
+
+#[cfg(all(feature = "alloc", not(feature = "std")))]
+extern crate alloc;
+
+#[cfg(feature = "std")]
+extern crate core;
+
+#[macro_use]
+mod macros;
+
+mod bits;
+mod cursor;
+mod pointer;
+mod slice;
+
+#[cfg(feature = "alloc")]
+mod boxed;
+
+#[cfg(feature = "alloc")]
+mod vec;
+
+use crate::{
+ bits::BitIdx,
+ pointer::BitPtr,
+};
+
+pub use crate::{
+ bits::Bits,
+ cursor::{
+ Cursor,
+ BigEndian,
+ LittleEndian,
+ },
+ slice::BitSlice,
+};
+
+#[cfg(feature = "alloc")]
+pub use crate::{
+ boxed::BitBox,
+ vec::BitVec,
+};
+
+/// Expose crate internals for use in doctests and external tests.
+#[cfg(feature = "testing")]
+pub mod testing {
+ pub use crate::{
+ bits::*,
+ macros::*,
+ pointer::*,
+ slice::*,
+ vec::*,
+ };
+}
diff --git a/third_party/rust/bitvec/src/macros.rs b/third_party/rust/bitvec/src/macros.rs
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bitvec/src/macros.rs
@@ -0,0 +1,205 @@
+/*! Utility macros for constructing data structures and implementing bulk types.
+
+The only public macro is `bitvec`; this module also provides convenience macros
+for code generation.
+!*/
+
+/** Construct a `BitVec` out of a literal array in source code, like `vec!`.
+
+`bitvec!` can be invoked in a number of ways. It takes the name of a `Cursor`
+implementation, the name of a `Bits`-implementing fundamental, and zero or more
+fundamentals (integer, floating-point, or boolean) which are used to build the
+bits. Each fundamental literal corresponds to one bit, and is considered to
+represent `1` if it is any other value than exactly zero.
+
+`bitvec!` can be invoked with no specifiers, a `Cursor` specifier, or a `Cursor`
+and a `Bits` specifier. It cannot be invoked with a `Bits` specifier but no
+`Cursor` specifier, due to overlap in how those tokens are matched by the macro
+system.
+
+Like `vec!`, `bitvec!` supports bit lists `[0, 1, …]` and repetition markers
+`[1; n]`.
+
+# All Syntaxes
+
+```rust
+use bitvec::*;
+
+bitvec![BigEndian, u8; 0, 1];
+bitvec![LittleEndian, u8; 0, 1,];
+bitvec![BigEndian; 0, 1];
+bitvec![LittleEndian; 0, 1,];
+bitvec![0, 1];
+bitvec![0, 1,];
+bitvec![BigEndian, u8; 1; 5];
+bitvec![LittleEndian; 0; 5];
+bitvec![1; 5];
+```
+**/
+#[cfg(feature = "alloc")]
+#[macro_export]
+macro_rules! bitvec {
+ // bitvec![ endian , type ; 0 , 1 , … ]
+ ( $endian:path , $bits:ty ; $( $element:expr ),* ) => {
+ bitvec![ __bv_impl__ $endian , $bits ; $( $element ),* ]
+ };
+ // bitvec![ endian , type ; 0 , 1 , … , ]
+ ( $endian:path , $bits:ty ; $( $element:expr , )* ) => {
+ bitvec![ __bv_impl__ $endian , $bits ; $( $element ),* ]
+ };
+
+ // bitvec![ endian ; 0 , 1 , … ]
+ ( $endian:path ; $( $element:expr ),* ) => {
+ bitvec![ __bv_impl__ $endian , u8 ; $( $element ),* ]
+ };
+ // bitvec![ endian ; 0 , 1 , … , ]
+ ( $endian:path ; $( $element:expr , )* ) => {
+ bitvec![ __bv_impl__ $endian , u8 ; $( $element ),* ]
+ };
+
+ // bitvec![ 0 , 1 , … ]
+ ( $( $element:expr ),* ) => {
+ bitvec![ __bv_impl__ $crate::BigEndian , u8 ; $( $element ),* ]
+ };
+ // bitvec![ 0 , 1 , … , ]
+ ( $( $element:expr , )* ) => {
+ bitvec![ __bv_impl__ $crate::BigEndian , u8 ; $( $element ),* ]
+ };
+
+ // bitvec![ endian , type ; bit ; rep ]
+ ( $endian:path , $bits:ty ; $element:expr ; $rep:expr ) => {
+ bitvec![ __bv_impl__ $endian , $bits ; $element; $rep ]
+ };
+ // bitvec![ endian ; bit ; rep ]
+ ( $endian:path ; $element:expr ; $rep:expr ) => {
+ bitvec![ __bv_impl__ $endian , u8 ; $element ; $rep ]
+ };
+ // bitvec![ bit ; rep ]
+ ( $element:expr ; $rep:expr ) => {
+ bitvec![ __bv_impl__ $crate::BigEndian , u8 ; $element ; $rep ]
+ };
+
+ // Build an array of `bool` (one bit per byte) and then build a `BitVec`
+ // from that (one bit per bit). I have yet to think of a way to make the
+ // source array be binary-compatible with a `BitSlice` data representation,
+ // so the static source is 8x larger than it needs to be.
+ //
+ // I’m sure there is a way, but I don’t think I need to spend the effort
+ // yet. Maybe a proc-macro.
+
+ ( __bv_impl__ $endian:path , $bits:ty ; $( $element:expr ),* ) => {{
+ let init: &[bool] = &[ $( $element != 0 ),* ];
+ $crate :: BitVec :: < $endian , $bits > :: from ( init )
+ }};
+
+ ( __bv_impl__ $endian:path , $bits:ty ; $element:expr ; $rep:expr ) => {{
+ core :: iter :: repeat ( $element != 0 )
+ .take ( $rep )
+ .collect :: < $crate :: BitVec < $endian , $bits > > ( )
+ }};
+}
+
+#[doc(hidden)]
+macro_rules! __bitslice_shift {
+ ( $( $t:ty ),+ ) => { $(
+ #[doc(hidden)]
+ impl<C: $crate :: Cursor, T: $crate :: Bits> core::ops::ShlAssign< $t >
+ for $crate :: BitSlice<C, T>
+ {
+ fn shl_assign(&mut self, shamt: $t ) {
+ core::ops::ShlAssign::<usize>::shl_assign(self, shamt as usize);
+ }
+ }
+
+ #[doc(hidden)]
+ impl<C: $crate :: Cursor, T: $crate :: Bits> core::ops::ShrAssign< $t >
+ for $crate :: BitSlice<C, T>
+ {
+ fn shr_assign(&mut self, shamt: $t ) {
+ core::ops::ShrAssign::<usize>::shr_assign(self, shamt as usize);
+ }
+ }
+ )+ };
+}
+
+#[cfg(feature = "alloc")]
+#[doc(hidden)]
+macro_rules! __bitvec_shift {
+ ( $( $t:ty ),+ ) => { $(
+ #[doc(hidden)]
+ impl<C: $crate :: Cursor, T: $crate :: Bits> core::ops::Shl< $t >
+ for $crate ::BitVec<C, T>
+ {
+ type Output = <Self as core::ops::Shl<usize>>::Output;
+
+ fn shl(self, shamt: $t ) -> Self::Output {
+ core::ops::Shl::<usize>::shl(self, shamt as usize)
+ }
+ }
+
+ #[doc(hidden)]
+ impl<C: $crate :: Cursor, T: $crate :: Bits> core::ops::ShlAssign< $t >
+ for $crate ::BitVec<C, T>
+ {
+ fn shl_assign(&mut self, shamt: $t ) {
+ core::ops::ShlAssign::<usize>::shl_assign(self, shamt as usize)
+ }
+ }
+
+ #[doc(hidden)]
+ impl<C: $crate ::Cursor, T: $crate ::Bits> core::ops::Shr< $t >
+ for $crate ::BitVec<C, T>
+ {
+ type Output = <Self as core::ops::Shr<usize>>::Output;
+
+ fn shr(self, shamt: $t ) -> Self::Output {
+ core::ops::Shr::<usize>::shr(self, shamt as usize)
+ }
+ }
+
+ #[doc(hidden)]
+ impl<C: $crate ::Cursor, T: $crate ::Bits> core::ops::ShrAssign< $t >
+ for $crate ::BitVec<C, T>
+ {
+ fn shr_assign(&mut self, shamt: $t ) {
+ core::ops::ShrAssign::<usize>::shr_assign(self, shamt as usize)
+ }
+ }
+ )+ };
+}
+
+#[cfg(all(test, feature = "alloc"))]
+mod tests {
+ #[allow(unused_imports)]
+ use crate::{
+ BigEndian,
+ LittleEndian,
+ };
+
+ #[test]
+ fn compile_macros() {
+ bitvec![0, 1];
+ bitvec![BigEndian; 0, 1];
+ bitvec![LittleEndian; 0, 1];
+ bitvec![BigEndian, u8; 0, 1];
+ bitvec![LittleEndian, u8; 0, 1];
+ bitvec![BigEndian, u16; 0, 1];
+ bitvec![LittleEndian, u16; 0, 1];
+ bitvec![BigEndian, u32; 0, 1];
+ bitvec![LittleEndian, u32; 0, 1];
+ bitvec![BigEndian, u64; 0, 1];
+ bitvec![LittleEndian, u64; 0, 1];
+
+ bitvec![1; 70];
+ bitvec![BigEndian; 0; 70];
+ bitvec![LittleEndian; 1; 70];
+ bitvec![BigEndian, u8; 0; 70];
+ bitvec![LittleEndian, u8; 1; 70];
+ bitvec![BigEndian, u16; 0; 70];
+ bitvec![LittleEndian, u16; 1; 70];
+ bitvec![BigEndian, u32; 0; 70];
+ bitvec![LittleEndian, u32; 1; 70];
+ bitvec![BigEndian, u64; 0; 70];
+ bitvec![LittleEndian, u64; 1; 70];
+ }
+}
diff --git a/third_party/rust/bitvec/src/pointer.rs b/third_party/rust/bitvec/src/pointer.rs
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bitvec/src/pointer.rs
@@ -0,0 +1,1031 @@
+/*! Raw Pointer Representation
+
+This module defines the binary representation of the handle to a `BitSlice`
+region. This structure is crate-internal, and defines the methods required to
+store a `BitSlice` pointer in memory and retrieve values from it suitable for
+work.
+!*/
+
+use crate::{
+ BitIdx,
+ BitSlice,
+ Bits,
+ Cursor,
+};
+use core::{
+ convert::{
+ AsMut,
+ AsRef,
+ From,
+ },
+ default::Default,
+ fmt::{
+ self,
+ Debug,
+ Formatter,
+ },
+ marker::PhantomData,
+ mem,
+ ptr::NonNull,
+ slice,
+};
+
+
+/// Width in bits of a pointer on the target machine.
+const PTR_BITS: usize = mem::size_of::<*const u8>() * 8;
+
+/// Width in bits of a processor word on the target machine.
+const USZ_BITS: usize = mem::size_of::<usize>() * 8;
+
+/** In-memory representation of `&BitSlice` handles.
+
+# Layout
+
+This structure is a more complex version of the `*const T`/`usize` tuple that
+Rust uses to represent slices throughout the language. It breaks the pointer and
+counter fundamentals into sub-field components. Rust does not have bitfield
+syntax, so the below description of the element layout is in C++.
+
+```cpp
+template<typename T>
+struct BitPtr<T> {
+ size_t ptr_head : __builtin_ctzll(alignof(T));
+ size_t ptr_data : sizeof(T*) * 8
+ - __builtin_ctzll(alignof(T));
+
+ size_t len_head : 3;
+ size_t len_tail : 3
+ + __builtin_ctzll(alignof(T));
+ size_t len_elts : sizeof(size_t) * 8
+ - 6
+ - __builtin_ctzll(alignof(T));
+};
+```
+
+This means that the `BitPtr<T>` structure has four *logical* fields, stored in
+five segments across the two *structural* fields of the type. The widths and
+placements of each segment are functions of the size of `*const T` and `usize`,
+and the alignment of `T`.
+
+# Fields
+
+This section describes the purpose, meaning, and layout of the four logical
+fields.
+
+## Data Pointer
+
+Aligned pointers to `T` always have low bits available for use to refine the
+address of a `T` to the address of a `u8`. It is stored in the high bits of the
+`ptr` field, running from MSb down to (inclusive)
+`core::mem::align_of::<T>().trailing_zeros()`.
+
+## Element Counter
+
+The memory representation stores counters that run from
+`1 ... (Self::MAX_ELTS)`, where the bit pattern is `n - 1` when `n` is the true
+number of elements in the slice’s domain. It is stored in the high bits of the
+`len` field, running from `MSb` down to (inclusive)
+`core::mem::align_of::<T>().trailing_zeros() + 6`.
+
+## Head Bit Counter
+
+For any fundamental type `T`, `core::mem::align_of::<T>().trailing_zeros() + 3`
+bits are required to count the bit positions inside it.
+
+|Type |Alignment|Trailing Zeros|Count Bits|
+|:----|--------:|-------------:|---------:|
+|`u8` | 1| 0| 3|
+|`u16`| 2| 1| 4|
+|`u32`| 4| 2| 5|
+|`u64`| 8| 3| 6|
+
+The head bit counter is split such that its bottom three bits are stored in the
+low bits of the `len` field and the remaining high bits are stored in the low
+bits of `ptr`.
+
+The counter is a value in the range `0 .. (1 << Count)` that serves as a cursor
+into the zeroth storage element to find the first live bit.
+
+## Tail Bit Counter
+
+This counter is the same bit width as the head bit counter. It is stored
+contiguously in the middle section of the `len` field, running from (exclusive)
+`core::mem::align_of::<T>().trailing_zeros() + 6` down to (inclusive) `3`. The
+value in it is a cursor to the next bit *after* the last live bit of the slice.
+
+The tail bit counter and the element counter operate together; when the tail bit
+counter is `0`, then the element counter is also incremented to cover the next
+element *after* the last live element in the slice domain.
+
+# Edge Cases
+
+The following value sets are edge cases of valid `BitPtr` structures.
+
+## Empty Slice
+
+The empty slice is canonically represented by a wholly zeroed slot:
+
+- `data`: `core::ptr::null::<T>()`
+- `elts`: `0usize`
+- `head`: `0u8`
+- `tail`: `0u8`
+- `ptr`: `core::ptr::null::<u8>()`
+- `len`: `0usize`
+
+All `BitPtr` values whose `data` pointer is `null` represents the empty slice,
+regardless of other field contents, but the normalized form zeros all other
+fields also.
+
+## Allocated, Uninhabited, Slice
+
+An allocated, owned, region of memory that is uninhabited. This is functionally
+the empty slice, but it must retain its pointer information. All other fields in
+the slot are zeroed.
+
+- `data`: (any valid `*const T`)
+- `elts`: `0usize`
+- `head`: `0u8`
+- `tail`: `0u8`
+- `ptr`: (any valid `*const u8`)
+- `len`: `0usize`
+
+## Maximum Elements, Maximum Tail
+
+This, unfortunately, cannot be represented. The largest domain that can be
+represented has `elts` and `tail` of `!0`, which leaves the last bit in the
+element unavailable.
+
+# Type Parameters
+
+- `T: Bits` is the storage type over which the pointer governs.
+
+# Safety
+
+A `BitPtr` must never be constructed such that the element addressed by
+`self.pointer().offset(self.elements())` causes an addition overflow. This will
+be checked in `new()`.
+
+A `BitPtr` must never be constructed such that the tail bit is lower in memory
+than the head bit. This will be checked in `new()`.
+
+# Undefined Behavior
+
+Using values of this type directly as pointers or counters will result in
+undefined behavior. The pointer value will be invalid for the type, and both the
+pointer and length values will be invalid for the memory model and allocation
+regime.
+**/
+#[repr(C)]
+#[derive(Clone, Copy, Eq, Hash, PartialEq, PartialOrd, Ord)]
+pub struct BitPtr<T>
+where T: Bits {
+ _ty: PhantomData<T>,
+ /// Pointer to the first storage element of the slice.
+ ///
+ /// This will always be a pointer to one byte, regardless of the storage
+ /// type of the `BitSlice` or the type parameter of `Self`. It is a
+ /// combination of a correctly typed and aligned pointer to `T`, and the
+ /// index of a byte within that element.
+ ///
+ /// It is not necessarily the address of the byte with the first live bit.
+ /// The location of the first live bit within the first element is governed
+ /// by the [`Cursor`] type of the `BitSlice` using this structure.
+ ///
+ /// [`Cursor`]: ../trait.Cursor.html
+ ptr: NonNull<u8>,
+ /// Three-element bitfield structure, holding length and place information.
+ ///
+ /// This stores the element count in its highest bits, the tail [`BitIdx`]
+ /// cursor in the middle segment, and the low three bits of the head
+ /// `BitIdx` in the lowest three bits.
+ ///
+ /// [`BitIdx`]: ../struct.BitIdx.html
+ len: usize,
+}
+
+impl<T> BitPtr<T>
+where T: Bits {
+ /// The number of high bits in `self.ptr` that are actually the address of
+ /// the zeroth `T`.
+ pub const PTR_DATA_BITS: usize = PTR_BITS - Self::PTR_HEAD_BITS;
+ /// Marks the bits of `self.ptr` that are the `data` section.
+ pub const PTR_DATA_MASK: usize = !0 & !Self::PTR_HEAD_MASK;
+
+ /// The number of low bits in `self.ptr` that are the high bits of the head
+ /// `BitIdx` cursor.
+ pub const PTR_HEAD_BITS: usize = T::BITS as usize - Self::LEN_HEAD_BITS;
+ /// Marks the bits of `self.ptr` that are the `head` section.
+ pub const PTR_HEAD_MASK: usize = T::MASK as usize >> Self::LEN_HEAD_BITS;
+
+ /// The number of low bits in `self.len` that are the low bits of the head
+ /// `BitIdx` cursor.
+ ///
+ /// This is always `3`, until Rust tries to target a machine whose bytes are
+ /// not eight bits wide.
+ pub const LEN_HEAD_BITS: usize = 3;
+ /// Marks the bits of `self.len` that are the `head` section.
+ pub const LEN_HEAD_MASK: usize = 7;
+
+ /// The number of middle bits in `self.len` that are the tail `BitIdx`
+ /// cursor.
+ pub const LEN_TAIL_BITS: usize = T::BITS as usize;
+ /// Marks the bits of `self.len` that are the `tail` section.
+ pub const LEN_TAIL_MASK: usize = (T::MASK as usize) << Self::LEN_HEAD_BITS;
+
+ /// The number of high bits in `self.len` that are used to count `T`
+ /// elements in the slice.
+ pub const LEN_DATA_BITS: usize = USZ_BITS - Self::LEN_INDX_BITS;
+ /// Marks the bits of `self.len` that are the `data` section.
+ pub const LEN_DATA_MASK: usize = !0 & !Self::LEN_INDX_MASK;
+
+ /// The number of bits occupied by the `tail` `BitIdx` and the low 3 bits of
+ /// `head`.
+ pub const LEN_INDX_BITS: usize = Self::LEN_TAIL_BITS + Self::LEN_HEAD_BITS;
+ /// Marks the bits of `self.len` that are either `tail` or `head`.
+ pub const LEN_INDX_MASK: usize = Self::LEN_TAIL_MASK | Self::LEN_HEAD_MASK;
+
+ /// The maximum number of elements that can be stored in a `BitPtr` domain.
+ pub const MAX_ELTS: usize = 1 << Self::LEN_DATA_BITS;
+
+ /// The maximum number of bits that can be stored in a `BitPtr` domain.
+ pub const MAX_BITS: usize = !0 >> Self::LEN_HEAD_BITS;
+
+ /// Produces an empty-slice representation.
+ ///
+ /// This has no live bits, and has a dangling pointer. It is useful as a
+ /// default value (and is the function used by `Default`) to indicate
+ /// arbitrary null slices.
+ ///
+ /// # Returns
+ ///
+ /// An uninhabited, uninhabitable, empty slice.
+ ///
+ /// # Safety
+ ///
+ /// The `BitPtr` returned by this function must never be dereferenced.
+ pub fn empty() -> Self {
+ Self {
+ _ty: PhantomData,
+ ptr: NonNull::dangling(),
+ len: 0,
+ }
+ }
+
+ /// Produces an uninhabited slice from a bare pointer.
+ ///
+ /// # Parameters
+ ///
+ /// - `ptr`: A pointer to `T`.
+ ///
+ /// # Returns
+ ///
+ /// If `ptr` is null, then this returns the empty slice; otherwise, the
+ /// returned slice is uninhabited and points to the given address.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if the given pointer is not well aligned to its
+ /// type.
+ ///
+ /// # Safety
+ ///
+ /// The provided pointer must be either null, or valid in the caller’s
+ /// memory model and allocation regime.
+ pub fn uninhabited(ptr: *const T) -> Self {
+ // Check that the pointer is properly aligned for the storage type.
+ // Null pointers are always well aligned.
+ assert!(
+ (ptr as usize).trailing_zeros() as usize >= Self::PTR_HEAD_BITS,
+ "BitPtr domain pointers must be well aligned",
+ );
+ Self {
+ _ty: PhantomData,
+ ptr: NonNull::new(ptr as *mut u8).unwrap_or_else(NonNull::dangling),
+ len: 0,
+ }
+ }
+
+ /// Creates a new `BitPtr` from its components.
+ ///
+ /// # Parameters
+ ///
+ /// - `data`: A well-aligned pointer to a storage element. If this is null,
+ /// then the empty-slice representation is returned, regardless of other
+ /// parameter values.
+ /// - `elts`: A number of storage elements in the domain of the new
+ /// `BitPtr`. This number must be in `0 .. Self::MAX_ELTS`.
+ /// - `head`: The bit index of the first live bit in the domain. This must
+ /// be in the domain `0 .. T::SIZE`.
+ /// - `tail`: The bit index of the first dead bit after the domain. This
+ /// must be:
+ /// - equal to `head` when `elts` is `1`, to create an empty slice.
+ /// - in `head + 1 ..= T::SIZE` when `elts` is `1` to create a
+ /// single-element slice.
+ /// - in `1 ..= T::SIZE` when `elts` is greater than `1`.
+ /// - in `1 .. T::SIZE` when `elts` is `Self::MAX_ELTS - 1`.
+ ///
+ /// # Returns
+ ///
+ /// If `data` is null, then the empty slice is returned.
+ ///
+ /// If either of the following conditions are true, then the uninhabited
+ /// slice is returned:
+ ///
+ /// - `elts` is `0`,
+ /// - `elts` is `1` **and** `head` is equal to `tail`.
+ ///
+ /// Otherwise, a `BitPtr` structure representing the given domain is
+ /// returned.
+ ///
+ /// # Type Parameters
+ ///
+ /// - `Head: Into<BitIdx>`: A type which can be used as a `BitIdx`.
+ /// - `Tail: Into<BitIdx>`: A type which can be used as a `BitIdx`.
+ ///
+ /// # Panics
+ ///
+ /// This function happily panics at the slightest whiff of impropriety.
+ ///
+ /// - If the `data` pointer is not aligned to at least the type `T`,
+ /// - If the `elts` counter is not within the countable elements domain,
+ /// `0 .. Self::MAX_ELTS`,
+ /// - If the `data` pointer is so high in the address space that addressing
+ /// the last element would cause the pointer to wrap,
+ /// - If `head` or `tail` are too large for indexing bits within `T`,
+ /// - If `tail` is not correctly placed relative to `head`.
+ ///
+ /// # Safety
+ ///
+ /// The `data` pointer and `elts` counter must describe a correctly aligned,
+ /// validly allocated, region of memory. The caller is responsible for
+ /// ensuring that the slice of memory that the new `BitPtr` will govern is
+ /// all governable.
+ pub fn new<Head: Into<BitIdx>, Tail: Into<BitIdx>>(
+ data: *const T,
+ elts: usize,
+ head: Head,
+ tail: Tail,
+ ) -> Self {
+ let (head, tail) = (head.into(), tail.into());
+ // null pointers, and pointers to empty regions, are run through the
+ // uninhabited constructor instead
+ if data.is_null() || elts == 0 || (elts == 1 && head == tail) {
+ return Self::uninhabited(data);
+ }
+
+ // Check that the pointer is properly aligned for the storage type.
+ assert!(
+ (data as usize).trailing_zeros() as usize >= Self::PTR_HEAD_BITS,
+ "BitPtr domain pointers must be well aligned",
+ );
+
+ // Check that the slice domain is below the ceiling.
+ assert!(
+ elts < Self::MAX_ELTS,
+ "BitPtr domain regions must have at most {} elements",
+ Self::MAX_ELTS - 1,
+ );
+
+ // Check that the pointer is not so high in the address space that the
+ // slice domain wraps.
+ if data.wrapping_offset(elts as isize) < data {
+ panic!("BitPtr slices MUST NOT wrap around the address space");
+ }
+
+ // Check that the head cursor index is within the storage element.
+ assert!(
+ head.is_valid::<T>(),
+ "BitPtr head cursors must be in the domain 0 .. {}",
+ T::SIZE,
+ );
+
+ // Check that the tail cursor index is in the appropriate domain.
+ assert!(
+ BitIdx::from(*tail - 1).is_valid::<T>(),
+ "BitPtr tail cursors must be in the domain 1 ..= {}",
+ T::SIZE,
+ );
+
+ // For single-element slices, check that the tail cursor is after the
+ // head cursor (single-element, head == tail, is checked above).
+ if elts == 1 {
+ assert!(
+ tail > head,
+ "BitPtr domains with one element must have the tail cursor \
+ beyond the head cursor",
+ );
+ }
+ else if elts == Self::MAX_ELTS - 1 {
+ assert!(
+ tail.is_valid::<T>(),
+ "BitPtr domains with maximum elements must have the tail \
+ cursor in 1 .. {}",
+ T::SIZE,
+ );
+ }
+
+ // All invariants satisfied; build the fields
+ let ptr_data = data as usize & Self::PTR_DATA_MASK;
+ let ptr_head = *head as usize >> Self::LEN_HEAD_BITS;
+
+ let len_elts = elts << Self::LEN_INDX_BITS;
+ // Store tail. Note that this wraps T::SIZE to 0. This must be
+ // reconstructed during retrieval.
+ let len_tail
+ = ((*tail as usize) << Self::LEN_HEAD_BITS)
+ & Self::LEN_TAIL_MASK;
+ let len_head = *head as usize & Self::LEN_HEAD_MASK;
+
+ Self {
+ _ty: PhantomData,
+ ptr: unsafe {
+ NonNull::new_unchecked((ptr_data | ptr_head) as *mut u8)
+ },
+ len: len_elts | len_tail | len_head,
+ }
+ }
+
+ /// Extracts the pointer to the first storage element.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// The `*const T` address of the first storage element in the slice domain.
+ ///
+ /// # Safety
+ ///
+ /// This pointer must be valid in the user’s memory model and allocation
+ /// regime.
+ pub fn pointer(&self) -> *const T {
+ (self.ptr.as_ptr() as usize & Self::PTR_DATA_MASK) as *const T
+ }
+
+ /// Produces the count of all elements in the slice domain.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// The number of `T` elements in the slice domain.
+ ///
+ /// # Safety
+ ///
+ /// This size must be valid in the user’s memory model and allocation
+ /// regime.
+ pub fn elements(&self) -> usize {
+ self.len >> Self::LEN_INDX_BITS
+ }
+
+ /// Extracts the element cursor of the head bit.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// A `BitIdx` that is the index of the first live bit in the first element.
+ /// This will be in the domain `0 .. T::SIZE`.
+ pub fn head(&self) -> BitIdx {
+ ((((self.ptr.as_ptr() as usize & Self::PTR_HEAD_MASK) << 3)
+ | (self.len & Self::LEN_HEAD_MASK)) as u8).into()
+ }
+
+ /// Extracts the element cursor of the first dead bit *after* the tail bit.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// A `BitIdx` that is the index of the first dead bit after the last live
+ /// bit in the last element. This will be in the domain `1 ..= T::SIZE`.
+ pub fn tail(&self) -> BitIdx {
+ let bits = (self.len & Self::LEN_TAIL_MASK) >> Self::LEN_HEAD_BITS;
+ if bits == 0 { T::SIZE } else { bits as u8 }.into()
+ }
+
+ /// Decomposes the pointer into raw components.
+ ///
+ /// The values returned from this can be immediately passed into `::new` in
+ /// order to rebuild the pointer.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// - `*const T`: A well aligned pointer to the first element of the slice.
+ /// - `usize`: The number of elements in the slice.
+ /// - `head`: The index of the first live bit in the first element.
+ /// - `tail`: The index of the first dead bit in the last element.
+ pub fn raw_parts(&self) -> (*const T, usize, BitIdx, BitIdx) {
+ (self.pointer(), self.elements(), self.head(), self.tail())
+ }
+
+ /// Checks if the pointer represents the empty slice.
+ ///
+ /// The empty slice has a dangling `data` pointer and zeroed `elts`, `head`,
+ /// and `tail` elements.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// Whether the slice is empty or inhabited.
+ pub fn is_empty(&self) -> bool {
+ self.len >> Self::LEN_INDX_BITS == 0
+ }
+
+ /// Checks if the pointer represents the full slice.
+ ///
+ /// The full slice is marked by `0` values for `elts` and `tail`, when
+ /// `data` is not null. The full slice does not need `head` to be `0`.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// Whether the slice is fully extended or not.
+ pub fn is_full(&self) -> bool {
+ // Self must be:
+ // - not empty
+ // - `!0` in `elts` and `tail`
+ !self.is_empty()
+ && ((self.len | Self::LEN_HEAD_MASK) == !0)
+ }
+
+ /// Counts how many bits are in the domain of a `BitPtr` slice.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// A count of the live bits in the slice.
+ pub fn bits(&self) -> usize {
+ if self.is_empty() {
+ return 0;
+ }
+ let (_, elts, head, tail) = self.raw_parts();
+ if elts == 1 {
+ return *tail as usize - *head as usize;
+ }
+ // The number of bits in a domain is calculated by decrementing `elts`,
+ // multiplying it by the number of bits per element, then subtracting
+ // `head` (which is the number of dead bits in the front of the first
+ // element), and adding `tail` (which is the number of live bits in the
+ // front of the last element).
+ ((elts - 1) << T::BITS)
+ .saturating_add(*tail as usize)
+ .saturating_sub(*head as usize)
+ }
+
+ /// Produces the head element, if and only if it is partially live.
+ ///
+ /// If the head element is completely live, this returns `None`, because the
+ /// head element is returned in `body_elts()`.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// `Some(&T)` if the slice has at least one element, and the first element
+ /// has at least one bit dead.
+ ///
+ /// `None` if the slice is empty, or if the first element is completely
+ /// live.
+ pub fn head_elt(&self) -> Option<&T> {
+ if !self.is_empty() && *self.head() > 0 {
+ return Some(&self.as_ref()[0]);
+ }
+ None
+ }
+
+ /// Produces the slice of middle elements that are all fully live.
+ ///
+ /// This may produce the empty slice, if the `BitPtr` slice domain has zero,
+ /// one, or two elements, and the outer elements are only partially live.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// A slice of fully live storage elements.
+ pub fn body_elts(&self) -> &[T] {
+ let w: u8 = 1 << Self::LEN_TAIL_BITS;
+ let (_, e, h, t) = self.raw_parts();
+ match (e, *h, *t) {
+ // Empty slice
+ (0, _, _) => & [ ],
+ // Single-element slice, with cursors at the far edges
+ (1, 0, t) if t == w => &self.as_ref()[0 .. e - 0],
+ // Single-element slice, with partial cursors
+ (1, _, _) => & [ ],
+ // Multiple-element slice, with cursors at the far edges
+ (_, 0, t) if t == w => &self.as_ref()[0 .. e - 0],
+ // Multiple-element slice, with full head and partial tail
+ (_, 0, _) => &self.as_ref()[0 .. e - 1],
+ // Multiple-element slice, with partial tail and full head
+ (_, _, t) if t == w => &self.as_ref()[1 .. e - 0],
+ // Multiple-element slice, with partial cursors
+ (_, _, _) => &self.as_ref()[1 .. e - 1],
+ }
+ }
+
+ /// Produces the tail element, if and only if it is partially live.
+ ///
+ /// If the tail element is completely live, this returns `None`, because the
+ /// tail element is returned in `body_elts()`.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// `Some(&T)` if the slice has at least one element, and the last element
+ /// has at least one bit dead.
+ ///
+ /// `None` if the slice is empty, or if the last element is completely live.
+ pub fn tail_elt(&self) -> Option<&T> {
+ if !self.is_empty() && *self.tail() < T::SIZE {
+ return Some(&self.as_ref()[self.elements() - 1]);
+ }
+ None
+ }
+
+ pub fn set_head<Head: Into<BitIdx>>(&mut self, head: Head) {
+ if self.is_empty() {
+ return;
+ }
+ let head = head.into();
+ assert!(
+ head.is_valid::<T>(),
+ "Head indices must be in the domain 0 .. {}",
+ T::SIZE,
+ );
+ if self.elements() == 1 {
+ assert!(
+ head <= self.tail(),
+ "Single-element slices must have head below tail",
+ );
+ }
+ self.ptr = unsafe {
+ let ptr = self.ptr.as_ptr() as usize;
+ NonNull::new_unchecked(
+ ((ptr & !Self::PTR_HEAD_MASK)
+ | ((*head as usize >> Self::LEN_HEAD_BITS) & Self::PTR_HEAD_MASK)
+ ) as *mut u8
+ )
+ };
+ self.len &= !Self::LEN_HEAD_MASK;
+ self.len |= *head as usize & Self::LEN_HEAD_MASK;
+ }
+
+ /// Moves the `head` cursor upwards by one.
+ ///
+ /// If `head` is at the back edge of the first element, then it will be set
+ /// to the front edge of the second element, and the pointer will be moved
+ /// upwards.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ ///
+ /// # Safety
+ ///
+ /// This method is unsafe when `self` is directly, solely, managing owned
+ /// memory. It mutates the pointer and element count, so if this pointer is
+ /// solely responsible for owned memory, its conception of the allocation
+ /// will differ from the allocator’s.
+ pub unsafe fn incr_head(&mut self) {
+ let (data, elts, head, tail) = self.raw_parts();
+ let (new_head, wrap) = head.incr::<T>();
+ if wrap {
+ *self = Self::new(data.offset(1), elts - 1, new_head, tail);
+ }
+ else {
+ *self = Self::new(data, elts, new_head, tail);
+ }
+ }
+
+ /// Moves the `head` cursor downwards by one.
+ ///
+ /// If `head` is at the front edge of the first element, then it will be set
+ /// to the back edge of the zeroth element, and the pointer will be moved
+ /// downwards.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe when `self` is directly, solely, managing owned
+ /// memory. It mutates the pointer and element count, so if this pointer is
+ /// solely responsible for owned memory, its conception of the allocation
+ /// will differ from the allocator’s.
+ pub unsafe fn decr_head(&mut self) {
+ let (data, elts, head, tail) = self.raw_parts();
+ let (new_head, wrap) = head.decr::<T>();
+ if wrap {
+ *self = Self::new(data.offset(-1), elts + 1, new_head, tail);
+ }
+ else {
+ *self = Self::new(data, elts, new_head, tail);
+ }
+ }
+
+ pub fn set_tail<Tail: Into<BitIdx>>(&mut self, tail: Tail) {
+ if self.is_empty() {
+ return;
+ }
+ let tail = tail.into();
+ assert!(
+ BitIdx::from(*tail - 1).is_valid::<T>(),
+ "Tail indices must be in the domain 1 ..= {}",
+ T::SIZE,
+ );
+ if self.elements() == 1 {
+ assert!(
+ tail >= self.head(),
+ "Single-element slices must have tail above head",
+ );
+ }
+ self.len &= !Self::LEN_TAIL_MASK;
+ self.len |= *tail as usize
+ }
+
+ /// Moves the `tail` cursor upwards by one.
+ ///
+ /// If `tail` is at the back edge of the last element, then it will be set
+ /// to the front edge of the next element beyond, and the element count will
+ /// be increased.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe when `self` is directly, solely, managing owned
+ /// memory. It mutates the element count, so if this pointer is solely
+ /// responsible for owned memory, its conception of the allocation will
+ /// differ from the allocator’s.
+ pub unsafe fn incr_tail(&mut self) {
+ let (data, elts, head, tail) = self.raw_parts();
+ let decr = BitIdx::from(*tail - 1);
+ let (mut new_tail, wrap) = decr.incr::<T>();
+ new_tail = BitIdx::from(*new_tail + 1);
+ *self = Self::new(data, elts + wrap as usize, head, new_tail);
+ }
+
+ /// Moves the `tail` cursor downwards by one.
+ ///
+ /// If `tail` is at the front edge of the back element, then it will be set
+ /// to the back edge of the next element forward, and the element count will
+ /// be decreased.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe when `self` is directly, solely, managing owned
+ /// memory. It mutates the element count, so if this pointer is solely
+ /// responsible for owned memory, its conception of the allocation will
+ /// differ from the allocator’s.
+ pub unsafe fn decr_tail(&mut self) {
+ let (data, elts, head, tail) = self.raw_parts();
+ let decr = BitIdx::from(*tail - 1);
+ let (mut new_tail, wrap) = decr.decr::<T>();
+ new_tail = BitIdx::from(*new_tail + 1);
+ *self = Self::new(data, elts - wrap as usize, head, new_tail);
+ }
+}
+
+/// Gets write access to all elements in the underlying storage, including the
+/// partial head and tail elements.
+///
+/// # Safety
+///
+/// This is *unsafe* to use except from known mutable `BitSlice` structures.
+/// Mutability is not encoded in the `BitPtr` type system at this time, and thus
+/// is not enforced by the compiler yet.
+impl<T> AsMut<[T]> for BitPtr<T>
+where T: Bits {
+ fn as_mut(&mut self) -> &mut [T] {
+ let ptr = self.pointer() as *mut T;
+ let len = self.elements();
+ unsafe { slice::from_raw_parts_mut(ptr, len) }
+ }
+}
+
+/// Gets read access to all elements in the underlying storage, including the
+/// partial head and tail elements.
+impl<T> AsRef<[T]> for BitPtr<T>
+where T: Bits {
+ fn as_ref(&self) -> &[T] {
+ unsafe { slice::from_raw_parts(self.pointer(), self.elements()) }
+ }
+}
+
+/// Constructs from an immutable `BitSlice` reference handle.
+impl<'a, C, T> From<&'a BitSlice<C, T>> for BitPtr<T>
+where C: Cursor, T: 'a + Bits {
+ fn from(src: &'a BitSlice<C, T>) -> Self {
+ let src: &[()] = unsafe {
+ mem::transmute::<&'a BitSlice<C, T>, &[()]>(src)
+ };
+ let (ptr, len) = match (src.as_ptr() as usize, src.len()) {
+ (_, 0) => (NonNull::dangling(), 0),
+ (0, _) => unreachable!(
+ "Slices cannot have a length when they begin at address 0"
+ ),
+ (p, l) => (unsafe { NonNull::new_unchecked(p as *mut u8) }, l),
+ };
+ Self { ptr, len, _ty: PhantomData }
+ }
+}
+
+/// Constructs from a mutable `BitSlice` reference handle.
+impl<'a, C, T> From<&'a mut BitSlice<C, T>> for BitPtr<T>
+where C: Cursor, T: 'a + Bits {
+ fn from(src: &'a mut BitSlice<C, T>) -> Self {
+ let src: &[()] = unsafe {
+ mem::transmute::<&'a mut BitSlice<C, T>, &[()]>(src)
+ };
+ let (ptr, len) = match (src.as_ptr() as usize, src.len()) {
+ (_, 0) => (NonNull::dangling(), 0),
+ (0, _) => unreachable!(
+ "Slices cannot have a length when they begin at address 0"
+ ),
+ (p, l) => (unsafe { NonNull::new_unchecked(p as *mut u8) }, l),
+ };
+ Self { ptr, len, _ty: PhantomData }
+ }
+}
+
+/// Produces the empty-slice representation.
+impl<T> Default for BitPtr<T>
+where T: Bits {
+ /// Produces an empty-slice representation.
+ ///
+ /// The empty slice has no size or cursors, and its pointer is the alignment
+ /// of the type. The non-null pointer allows this structure to be null-value
+ /// optimized.
+ fn default() -> Self {
+ Self::empty()
+ }
+}
+
+/// Prints the `BitPtr` data structure for debugging.
+impl<T> Debug for BitPtr<T>
+where T: Bits {
+ fn fmt(&self, f: &mut Formatter) -> fmt::Result {
+ struct HexPtr<T: Bits>(*const T);
+ impl<T: Bits> Debug for HexPtr<T> {
+ fn fmt(&self, f: &mut Formatter) -> fmt::Result {
+ f.write_fmt(format_args!("0x{:0>1$X}", self.0 as usize, PTR_BITS >> 2))
+ }
+ }
+ struct HexAddr(usize);
+ impl Debug for HexAddr {
+ fn fmt(&self, f: &mut Formatter) -> fmt::Result {
+ f.write_fmt(format_args!("{:#X}", self.0))
+ }
+ }
+ struct BinAddr<T: Bits>(BitIdx, PhantomData<T>);
+ impl<T: Bits> Debug for BinAddr<T> {
+ fn fmt(&self, f: &mut Formatter) -> fmt::Result {
+ f.write_fmt(format_args!("0b{:0>1$b}", *self.0, T::BITS as usize))
+ }
+ }
+ write!(f, "BitPtr<{}>", T::TYPENAME)?;
+ f.debug_struct("")
+ .field("data", &HexPtr::<T>(self.pointer()))
+ .field("elts", &HexAddr(self.elements()))
+ .field("head", &BinAddr::<T>(self.head(), PhantomData))
+ .field("tail", &BinAddr::<T>(self.tail(), PhantomData))
+ .finish()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn associated_consts_u8() {
+ assert_eq!(BitPtr::<u8>::PTR_DATA_BITS, PTR_BITS);
+ assert_eq!(BitPtr::<u8>::PTR_HEAD_BITS, 0);
+ assert_eq!(BitPtr::<u8>::LEN_DATA_BITS, USZ_BITS - 6);
+ assert_eq!(BitPtr::<u8>::LEN_TAIL_BITS, 3);
+
+ assert_eq!(BitPtr::<u8>::PTR_DATA_MASK, !0);
+ assert_eq!(BitPtr::<u8>::PTR_HEAD_MASK, 0);
+ assert_eq!(BitPtr::<u8>::LEN_DATA_MASK, !0 << 6);
+ assert_eq!(BitPtr::<u8>::LEN_TAIL_MASK, 7 << 3);
+ assert_eq!(BitPtr::<u8>::LEN_INDX_MASK, 63);
+ }
+
+ #[test]
+ fn associated_consts_u16() {
+ assert_eq!(BitPtr::<u16>::PTR_DATA_BITS, PTR_BITS - 1);
+ assert_eq!(BitPtr::<u16>::PTR_HEAD_BITS, 1);
+ assert_eq!(BitPtr::<u16>::LEN_DATA_BITS, USZ_BITS - 7);
+ assert_eq!(BitPtr::<u16>::LEN_TAIL_BITS, 4);
+
+ assert_eq!(BitPtr::<u16>::PTR_DATA_MASK, !0 << 1);
+ assert_eq!(BitPtr::<u16>::PTR_HEAD_MASK, 1);
+ assert_eq!(BitPtr::<u16>::LEN_DATA_MASK, !0 << 7);
+ assert_eq!(BitPtr::<u16>::LEN_TAIL_MASK, 15 << 3);
+ assert_eq!(BitPtr::<u16>::LEN_INDX_MASK, 127);
+ }
+
+ #[test]
+ fn associated_consts_u32() {
+ assert_eq!(BitPtr::<u32>::PTR_DATA_BITS, PTR_BITS - 2);
+ assert_eq!(BitPtr::<u32>::PTR_HEAD_BITS, 2);
+ assert_eq!(BitPtr::<u32>::LEN_DATA_BITS, USZ_BITS - 8);
+ assert_eq!(BitPtr::<u32>::LEN_TAIL_BITS, 5);
+
+ assert_eq!(BitPtr::<u32>::PTR_DATA_MASK, !0 << 2);
+ assert_eq!(BitPtr::<u32>::PTR_HEAD_MASK, 3);
+ assert_eq!(BitPtr::<u32>::LEN_DATA_MASK, !0 << 8);
+ assert_eq!(BitPtr::<u32>::LEN_TAIL_MASK, 31 << 3);
+ assert_eq!(BitPtr::<u32>::LEN_INDX_MASK, 255);
+ }
+
+ #[test]
+ fn associated_consts_u64() {
+ assert_eq!(BitPtr::<u64>::PTR_DATA_BITS, PTR_BITS - 3);
+ assert_eq!(BitPtr::<u64>::PTR_HEAD_BITS, 3);
+ assert_eq!(BitPtr::<u64>::LEN_DATA_BITS, USZ_BITS - 9);
+ assert_eq!(BitPtr::<u64>::LEN_TAIL_BITS, 6);
+
+ assert_eq!(BitPtr::<u64>::PTR_DATA_MASK, !0 << 3);
+ assert_eq!(BitPtr::<u64>::PTR_HEAD_MASK, 7);
+ assert_eq!(BitPtr::<u64>::LEN_DATA_MASK, !0 << 9);
+ assert_eq!(BitPtr::<u64>::LEN_TAIL_MASK, 63 << 3);
+ assert_eq!(BitPtr::<u64>::LEN_INDX_MASK, 511);
+ }
+
+ #[test]
+ fn ctors() {
+ let data: [u32; 4] = [0x756c6153, 0x2c6e6f74, 0x6e6f6d20, 0x00216f64];
+ let bp = BitPtr::<u32>::new(&data as *const u32, 4, 0, 32);
+ assert_eq!(bp.pointer(), &data as *const u32);
+ assert_eq!(bp.elements(), 4);
+ assert_eq!(*bp.head(), 0);
+ assert_eq!(*bp.tail(), 32);
+ }
+
+ #[test]
+ fn empty() {
+ let data = [0u8; 4];
+ // anything with 0 elements is unconditionally empty
+ assert!(BitPtr::<u8>::new(&data as *const u8, 0, 2, 4).is_empty());
+ }
+
+ #[test]
+ fn full() {
+ let elt_ct = BitPtr::<u64>::MAX_ELTS - 1;
+ // maximum elements, maximum bits
+ let bp = BitPtr::<u64>::new(8 as *const u64, elt_ct, 0, 63);
+ assert!(bp.is_full());
+
+ // one bit fewer
+ let bp = BitPtr::<u64>::new(8 as *const u64, elt_ct, 0, 62);
+ assert!(!bp.is_full());
+ assert_eq!(*bp.tail(), 62.into());
+
+ // one element fewer
+ let bp = BitPtr::<u64>::new(8 as *const u64, elt_ct - 1, 0, 64);
+ assert!(!bp.is_full());
+ }
+
+ #[test]
+ #[should_panic]
+ fn overfull() {
+ BitPtr::<u64>::new(8 as *const u64, BitPtr::<u64>::MAX_ELTS - 1, 0, 64);
+ }
+}
diff --git a/third_party/rust/bitvec/src/slice.rs b/third_party/rust/bitvec/src/slice.rs
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bitvec/src/slice.rs
@@ -0,0 +1,5012 @@
+/*! `BitSlice` Wide Reference
+
+This module defines semantic operations on `[u1]`, in contrast to the mechanical
+operations defined in `BitPtr`.
+
+The `&BitSlice` handle has the same size and general layout as the standard Rust
+slice handle `&[T]`. Its binary layout is wholly incompatible with the layout of
+Rust slices, and must never be interchanged except through the provided APIs.
+!*/
+
+use crate::{
+ BigEndian,
+ BitIdx,
+ BitPtr,
+ Bits,
+ Cursor,
+};
+use core::{
+ cmp::{
+ Eq,
+ Ord,
+ Ordering,
+ PartialEq,
+ PartialOrd,
+ },
+ convert::{
+ AsMut,
+ AsRef,
+ From,
+ },
+ fmt::{
+ self,
+ Debug,
+ DebugList,
+ Display,
+ Formatter,
+ },
+ hash::{
+ Hash,
+ Hasher,
+ },
+ iter::{
+ DoubleEndedIterator,
+ ExactSizeIterator,
+ FusedIterator,
+ Iterator,
+ IntoIterator,
+ },
+ marker::PhantomData,
+ mem,
+ ops::{
+ AddAssign,
+ BitAndAssign,
+ BitOrAssign,
+ BitXorAssign,
+ Index,
+ IndexMut,
+ Neg,
+ Not,
+ Range,
+ RangeFrom,
+ RangeFull,
+ RangeInclusive,
+ RangeTo,
+ RangeToInclusive,
+ ShlAssign,
+ ShrAssign,
+ },
+ ptr,
+ slice,
+ str,
+};
+
+#[cfg(feature = "alloc")]
+use crate::BitVec;
+
+#[cfg(all(feature = "alloc", not(feature = "std")))]
+use alloc::borrow::ToOwned;
+
+#[cfg(feature = "std")]
+use std::borrow::ToOwned;
+
+/** A compact slice of bits, whose cursor and storage types can be customized.
+
+`BitSlice` is a newtype wrapper over [`[()]`], with a specialized reference
+handle. As an unsized slice, it can only ever be held by reference. The
+reference type is **binary incompatible** with any other Rust slice handles.
+
+`BitSlice` can only be dynamically allocated by this library. Creation of any
+other `BitSlice` collections will result in catastrophically incorrect behavior.
+
+A `BitSlice` reference can be created through the [`bitvec!`] macro, from a
+[`BitVec`] collection, or from any slice of elements by using the appropriate
+[`From`] implementation.
+
+`BitSlice`s are a view into a block of memory at bit-level resolution. They are
+represented by a crate-internal pointer structure that ***cannot*** be used with
+other Rust code except through the provided conversion APIs.
+
+```rust
+use bitvec::*;
+
+let store: &[u8] = &[0x69];
+// slicing a bitvec
+let bslice: &BitSlice = store.into();
+// coercing an array to a bitslice
+let bslice: &BitSlice = (&[1u8, 254u8][..]).into();
+```
+
+Bit slices are either mutable or shared. The shared slice type is
+`&BitSlice<C, T>`, while the mutable slice type is `&mut BitSlice<C, T>`. For
+example, you can mutate bits in the memory to which a mutable `BitSlice` points:
+
+```rust
+use bitvec::*;
+let mut base = [0u8, 0, 0, 0];
+{
+ let bs: &mut BitSlice = (&mut base[..]).into();
+ bs.set(13, true);
+ eprintln!("{:?}", bs.as_ref());
+ assert!(bs[13]);
+}
+assert_eq!(base[1], 4);
+```
+
+# Type Parameters
+
+- `C: Cursor`: An implementor of the `Cursor` trait. This type is used to
+ convert semantic indices into concrete bit positions in elements, and store or
+ retrieve bit values from the storage type.
+- `T: Bits`: An implementor of the `Bits` trait: `u8`, `u16`, `u32`, `u64`. This
+ is the actual type in memory the slice will use to store data.
+
+# Safety
+
+The `&BitSlice` reference handle has the same *size* as standard Rust slice
+handles, but it is ***extremely binary incompatible*** with them. Attempting to
+treat `&BitSlice<_, T>` as `&[T]` in any manner except through the provided APIs
+is ***catastrophically*** unsafe and unsound.
+
+[`BitVec`]: ../struct.BitVec.html
+[`From`]: https://doc.rust-lang.org/stable/std/convert/trait.From.html
+[`bitvec!`]: ../macro.bitvec.html
+[`[()]`]: https://doc.rust-lang.org/stable/std/primitive.slice.html
+**/
+#[repr(transparent)]
+pub struct BitSlice<C = BigEndian, T = u8>
+where C: Cursor, T: Bits {
+ /// Cursor type for selecting bits inside an element.
+ _kind: PhantomData<C>,
+ /// Element type of the slice.
+ ///
+ /// eddyb recommends using `PhantomData<T>` and `[()]` instead of `[T]`
+ /// alone.
+ _type: PhantomData<T>,
+ /// Slice of elements `T` over which the `BitSlice` has usage.
+ _elts: [()],
+}
+
+impl<C, T> BitSlice<C, T>
+where C: Cursor, T: Bits {
+ /// Produces the empty slice. This is equivalent to `&[]` for Rust slices.
+ ///
+ /// # Returns
+ ///
+ /// An empty `&BitSlice` handle.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let bv: &BitSlice = BitSlice::empty();
+ /// ```
+ pub fn empty<'a>() -> &'a Self {
+ BitPtr::empty().into()
+ }
+
+ /// Produces the empty mutable slice. This is equivalent to `&mut []` for
+ /// Rust slices.
+ ///
+ /// # Returns
+ ///
+ /// An empty `&mut BitSlice` handle.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let bv: &mut BitSlice = BitSlice::empty_mut();
+ /// ```
+ pub fn empty_mut<'a>() -> &'a mut Self {
+ BitPtr::empty().into()
+ }
+
+ /// Returns the number of bits contained in the `BitSlice`.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// The number of live bits in the slice domain.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0];
+ /// let bv: &BitSlice = store.into();
+ /// assert_eq!(bv.len(), 8);
+ /// ```
+ pub fn len(&self) -> usize {
+ self.bitptr().bits()
+ }
+
+ /// Tests if the slice is empty.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// Whether the slice has no live bits.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let bv: &BitSlice = BitSlice::empty();
+ /// assert!(bv.is_empty());
+ /// let bv: &BitSlice = (&[0u8] as &[u8]).into();;
+ /// assert!(!bv.is_empty());
+ /// ```
+ pub fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Gets the first element of the slice, if present.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// `None` if the slice is empty, or `Some(bit)` if it is not.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// assert!(BitSlice::<BigEndian, u8>::empty().first().is_none());
+ /// let bv: &BitSlice = (&[128u8] as &[u8]).into();
+ /// assert!(bv.first().unwrap());
+ /// ```
+ pub fn first(&self) -> Option<bool> {
+ if self.is_empty() { None }
+ else { Some(self[0]) }
+ }
+
+ /// Returns the first and all the rest of the bits of the slice, or `None`
+ /// if it is empty.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// If the slice is empty, this returns `None`, otherwise, it returns `Some`
+ /// of:
+ ///
+ /// - the first bit
+ /// - a `&BitSlice` of all the rest of the bits (this may be empty)
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// assert!(BitSlice::<BigEndian, u8>::empty().split_first().is_none());
+ ///
+ /// let store: &[u8] = &[128];
+ /// let bv: &BitSlice = store.into();
+ /// let (h, t) = bv.split_first().unwrap();
+ /// assert!(h);
+ /// assert!(t.not_any());
+ ///
+ /// let bv = &bv[0 .. 1];
+ /// let (h, t) = bv.split_first().unwrap();
+ /// assert!(h);
+ /// assert!(t.is_empty());
+ /// ```
+ pub fn split_first(&self) -> Option<(bool, &Self)> {
+ if self.is_empty() {
+ return None;
+ }
+ Some((self[0], &self[1 ..]))
+ }
+
+ /// Returns the first and all the rest of the bits of the slice, or `None`
+ /// if it is empty.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// If the slice is empty, this returns `None`, otherwise, it returns `Some`
+ /// of:
+ ///
+ /// - the first bit
+ /// - a `&mut BitSlice` of all the rest of the bits (this may be empty)
+ pub fn split_first_mut(&mut self) -> Option<(bool, &mut Self)> {
+ if self.is_empty() {
+ return None;
+ }
+ Some((self[0], &mut self[1 ..]))
+ }
+
+ /// Returns the last and all the rest of the bits in the slice, or `None`
+ /// if it is empty.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// If the slice is empty, this returns `None`, otherwise, it returns `Some`
+ /// of:
+ ///
+ /// - the last bit
+ /// - a `&BitSlice` of all the rest of the bits (this may be empty)
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// assert!(BitSlice::<BigEndian, u8>::empty().split_last().is_none());
+ ///
+ /// let bv: &BitSlice = (&[1u8] as &[u8]).into();
+ /// let (t, h) = bv.split_last().unwrap();
+ /// assert!(t);
+ /// assert!(h.not_any());
+ ///
+ /// let bv = &bv[7 .. 8];
+ /// let (t, h) = bv.split_last().unwrap();
+ /// assert!(t);
+ /// assert!(h.is_empty());
+ /// ```
+ pub fn split_last(&self) -> Option<(bool, &Self)> {
+ if self.is_empty() {
+ return None;
+ }
+ let len = self.len();
+ Some((self[len - 1], &self[.. len - 1]))
+ }
+
+ /// Returns the last and all the rest of the bits in the slice, or `None`
+ /// if it is empty.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// If the slice is empty, this returns `None`, otherwise, it returns `Some`
+ /// of:
+ ///
+ /// - the last bit
+ /// - a `&BitSlice` of all the rest of the bits (this may be empty)
+ pub fn split_last_mut(&mut self) -> Option<(bool, &mut Self)> {
+ if self.is_empty() {
+ return None;
+ }
+ let len = self.len();
+ Some((self[len - 1], &mut self[.. len - 1]))
+ }
+
+ /// Gets the last element of the slice, or `None` if it is empty.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// `None` if the slice is empty, or `Some(bit)` if it is not.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// assert!(BitSlice::<BigEndian, u8>::empty().last().is_none());
+ /// let bv: &BitSlice = (&[1u8] as &[u8]).into();
+ /// assert!(bv.last().unwrap());
+ /// ```
+ pub fn last(&self) -> Option<bool> {
+ if self.is_empty() { None }
+ else { Some(self[self.len() - 1]) }
+ }
+
+ /// Gets the bit value at the given position.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ /// - `index`: The bit index to retrieve.
+ ///
+ /// # Returns
+ ///
+ /// The bit at the specified index, if any.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let bv: &BitSlice = (&[8u8] as &[u8]).into();
+ /// assert!(bv.get(4).unwrap());
+ /// assert!(!bv.get(3).unwrap());
+ /// assert!(bv.get(10).is_none());
+ /// ```
+ pub fn get(&self, index: usize) -> Option<bool> {
+ if index >= self.len() {
+ return None;
+ }
+ Some(self[index])
+ }
+
+ /// Sets the bit value at the given position.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `index`: The bit index to set. It must be in the domain
+ /// `0 .. self.len()`.
+ /// - `value`: The value to be set, `true` for `1` and `false` for `0`.
+ ///
+ /// # Panics
+ ///
+ /// This method panics if `index` is outside the slice domain.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &mut [u8] = &mut [8u8];
+ /// let bv: &mut BitSlice = store.into();
+ /// assert!(!bv[3]);
+ /// bv.set(3, true);
+ /// assert!(bv[3]);
+ /// ```
+ pub fn set(&mut self, index: usize, value: bool) {
+ let len = self.len();
+ assert!(index < len, "Index out of range: {} >= {}", index, len);
+
+ let h = self.bitptr().head();
+ // Find the index of the containing element, and of the bit within it.
+ let (elt, bit) = h.offset::<T>(index as isize);
+ self.as_mut()[elt as usize].set::<C>(bit, value);
+ }
+
+ /// Retrieves a read pointer to the start of the underlying data slice.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// A pointer to the first element, partial or not, in the underlying store.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that the slice outlives the pointer this function
+ /// returns, or else it will dangle and point to garbage.
+ ///
+ /// Modifying the container referenced by this slice may cause its buffer to
+ /// reallocate, which would also make any pointers to it invalid.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0; 4];
+ /// let bv: &BitSlice = store.into();
+ /// assert_eq!(store.as_ptr(), bv.as_ptr());
+ /// ```
+ pub fn as_ptr(&self) -> *const T {
+ self.bitptr().pointer()
+ }
+
+ /// Retrieves a write pointer to the start of the underlying data slice.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ ///
+ /// # Returns
+ ///
+ /// A pointer to the first element, partial or not, in the underlying store.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that the slice outlives the pointer this function
+ /// returns, or else it will dangle and point to garbage.
+ ///
+ /// Modifying the container referenced by this slice may cause its buffer to
+ /// reallocate, which would also make any pointers to it invalid.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &mut [u8] = &mut[0; 4];
+ /// let store_ptr = store.as_mut_ptr();
+ /// let bv: &mut BitSlice = store.into();
+ /// assert_eq!(store_ptr, bv.as_mut_ptr());
+ /// ```
+ pub fn as_mut_ptr(&mut self) -> *mut T {
+ self.bitptr().pointer() as *mut T
+ }
+
+ /// Swaps two bits in the slice.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `a`: The first index to be swapped.
+ /// - `b`: The second index to be swapped.
+ ///
+ /// # Panics
+ ///
+ /// Panics if either `a` or `b` are out of bounds.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &mut [u8] = &mut[32u8];
+ /// let bv: &mut BitSlice = store.into();
+ /// assert!(!bv[0]);
+ /// assert!(bv[2]);
+ /// bv.swap(0, 2);
+ /// assert!(bv[0]);
+ /// assert!(!bv[2]);
+ /// ```
+ pub fn swap(&mut self, a: usize, b: usize) {
+ assert!(a < self.len(), "Index {} out of bounds: {}", a, self.len());
+ assert!(b < self.len(), "Index {} out of bounds: {}", b, self.len());
+ let bit_a = self[a];
+ let bit_b = self[b];
+ self.set(a, bit_b);
+ self.set(b, bit_a);
+ }
+
+ /// Reverses the order of bits in the slice, in place.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &mut [u8] = &mut[0b1010_1010];
+ /// {
+ /// let bv: &mut BitSlice = store.into();
+ /// bv[1 .. 7].reverse();
+ /// }
+ /// eprintln!("{:b}", store[0]);
+ /// assert_eq!(store[0], 0b1101_0100);
+ /// ```
+ pub fn reverse(&mut self) {
+ let mut cur: &mut Self = self;
+ loop {
+ let len = cur.len();
+ if len < 2 {
+ return;
+ }
+ let (h, t) = (cur[0], cur[len - 1]);
+ cur.set(0, t);
+ cur.set(len - 1, h);
+ cur = &mut cur[1 .. len - 1];
+ }
+ }
+
+ /// Provides read-only iteration across the slice domain.
+ ///
+ /// The iterator returned from this method implements `ExactSizeIterator`
+ /// and `DoubleEndedIterator` just as the consuming `.into_iter()` method’s
+ /// iterator does.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// An iterator over all bits in the slice domain, in `C` and `T` ordering.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[64];
+ /// let bv: &BitSlice = store.into();
+ /// let mut iter = bv[.. 2].iter();
+ /// assert!(!iter.next().unwrap());
+ /// assert!(iter.next().unwrap());
+ /// assert!(iter.next().is_none());
+ /// ```
+ pub fn iter(&self) -> Iter<C, T> {
+ self.into_iter()
+ }
+
+ /// Produces a sliding iterator over consecutive windows in the slice. Each
+ /// windows has the width `size`. The windows overlap. If the slice is
+ /// shorter than `size`, the produced iterator is empty.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ /// - `size`: The width of each window.
+ ///
+ /// # Returns
+ ///
+ /// An iterator which yields sliding views into the slice.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if the `size` is zero.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0b0100_1011];
+ /// let bv: &BitSlice = store.into();
+ /// let mut windows = bv.windows(4);
+ /// assert_eq!(windows.next(), Some(&bv[0 .. 4]));
+ /// assert_eq!(windows.next(), Some(&bv[1 .. 5]));
+ /// assert_eq!(windows.next(), Some(&bv[2 .. 6]));
+ /// assert_eq!(windows.next(), Some(&bv[3 .. 7]));
+ /// assert_eq!(windows.next(), Some(&bv[4 .. 8]));
+ /// assert!(windows.next().is_none());
+ /// ```
+ pub fn windows(&self, size: usize) -> Windows<C, T> {
+ assert_ne!(size, 0, "Window width cannot be zero");
+ Windows {
+ inner: self,
+ width: size,
+ }
+ }
+
+ /// Produces a galloping iterator over consecutive chunks in the slice. Each
+ /// chunk, except possibly the last, has the width `size`. The chunks do not
+ /// overlap. If the slice is shorter than `size`, the produced iterator
+ /// produces only one chunk.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ /// - `size`: The width of each chunk.
+ ///
+ /// # Returns
+ ///
+ /// An iterator which yields consecutive chunks of the slice.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if the `size` is zero.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0b0100_1011];
+ /// let bv: &BitSlice = store.into();
+ /// let mut chunks = bv.chunks(3);
+ /// assert_eq!(chunks.next(), Some(&bv[0 .. 3]));
+ /// assert_eq!(chunks.next(), Some(&bv[3 .. 6]));
+ /// assert_eq!(chunks.next(), Some(&bv[6 .. 8]));
+ /// assert!(chunks.next().is_none());
+ /// ```
+ pub fn chunks(&self, size: usize) -> Chunks<C, T> {
+ assert_ne!(size, 0, "Chunk width cannot be zero");
+ Chunks {
+ inner: self,
+ width: size,
+ }
+ }
+
+ /// Produces a galloping iterator over consecutive chunks in the slice. Each
+ /// chunk, except possibly the last, has the width `size`. The chunks do not
+ /// overlap. If the slice is shorter than `size`, the produced iterator
+ /// produces only one chunk.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `size`: The width of each chunk.
+ ///
+ /// # Returns
+ ///
+ /// An iterator which yields consecutive mutable chunks of the slice.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if the `size` is zero.
+ pub fn chunks_mut(&mut self, size: usize) -> ChunksMut<C, T> {
+ assert_ne!(size, 0, "Chunk width cannot be zero");
+ ChunksMut {
+ inner: self,
+ width: size,
+ }
+ }
+
+ /// Produces a galloping iterator over consecutive chunks in the slice. Each
+ /// chunk has the width `size`. If `size` does not evenly divide the slice,
+ /// then the remainder is not part of the iteration, and can be accessed
+ /// separately with the `.remainder()` method.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ /// - `size`: The width of each chunk.
+ ///
+ /// # Returns
+ ///
+ /// An iterator which yields consecutive chunks of the slice.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if `size` is zero.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0b0100_1011];
+ /// let bv: &BitSlice = store.into();
+ /// let mut chunks_exact = bv.chunks_exact(3);
+ /// assert_eq!(chunks_exact.next(), Some(&bv[0 .. 3]));
+ /// assert_eq!(chunks_exact.next(), Some(&bv[3 .. 6]));
+ /// assert!(chunks_exact.next().is_none());
+ /// assert_eq!(chunks_exact.remainder(), &bv[6 .. 8]);
+ /// ```
+ pub fn chunks_exact(&self, size: usize) -> ChunksExact<C, T> {
+ assert_ne!(size, 0, "Chunk size cannot be zero");
+ let rem = self.len() % size;
+ let len = self.len() - rem;
+ let (inner, extra) = self.split_at(len);
+ ChunksExact {
+ inner,
+ extra,
+ width: size,
+ }
+ }
+
+ /// Produces a galloping iterator over consecutive chunks in the slice. Each
+ /// chunk has the width `size`. If `size` does not evenly divide the slice,
+ /// then the remainder is not part of the iteration, and can be accessed
+ /// separately with the `.remainder()` method.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `size`: The width of each chunk.
+ ///
+ /// # Returns
+ ///
+ /// An iterator which yields consecutive mutable chunks of the slice.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if `size` is zero.
+ pub fn chunks_exact_mut(&mut self, size: usize) -> ChunksExactMut<C, T> {
+ assert_ne!(size, 0, "Chunk size cannot be zero");
+ let rem = self.len() % size;
+ let len = self.len() - rem;
+ let (inner, extra) = self.split_at_mut(len);
+ ChunksExactMut {
+ inner,
+ extra,
+ width: size,
+ }
+ }
+
+ /// Produces a galloping iterator over consecutive chunks in the slice, from
+ /// the back to the front. Each chunk, except possibly the front, has the
+ /// width `size`. The chunks do not overlap. If the slice is shorter than
+ /// `size`, then the iterator produces one item.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ /// - `size`: The width of each chunk.
+ ///
+ /// # Returns
+ ///
+ /// An iterator which yields consecutive chunks of the slice, from the back
+ /// to the front.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if `size` is zero.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0b0100_1011];
+ /// let bv: &BitSlice = store.into();
+ /// let mut rchunks = bv.rchunks(3);
+ /// assert_eq!(rchunks.next(), Some(&bv[5 .. 8]));
+ /// assert_eq!(rchunks.next(), Some(&bv[2 .. 5]));
+ /// assert_eq!(rchunks.next(), Some(&bv[0 .. 2]));
+ /// assert!(rchunks.next().is_none());
+ /// ```
+ pub fn rchunks(&self, size: usize) -> RChunks<C, T> {
+ assert_ne!(size, 0, "Chunk size cannot be zero");
+ RChunks {
+ inner: self,
+ width: size,
+ }
+ }
+
+ /// Produces a galloping iterator over consecutive chunks in the slice, from
+ /// the back to the front. Each chunk, except possibly the front, has the
+ /// width `size`. The chunks do not overlap. If the slice is shorter than
+ /// `size`, then the iterator produces one item.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `size`: The width of each chunk.
+ ///
+ /// # Returns
+ ///
+ /// An iterator which yields consecutive mutable chunks of the slice, from
+ /// the back to the front.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if `size` is zero.
+ pub fn rchunks_mut(&mut self, size: usize) -> RChunksMut<C, T> {
+ assert_ne!(size, 0, "Chunk size cannot be zero");
+ RChunksMut {
+ inner: self,
+ width: size,
+ }
+ }
+
+ /// Produces a galloping iterator over consecutive chunks in the slice, from
+ /// the back to the front. Each chunk has the width `size`. If `size` does
+ /// not evenly divide the slice, then the remainder is not part of the
+ /// iteration, and can be accessed separately with the `.remainder()`
+ /// method.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ /// - `size`: The width of each chunk.
+ ///
+ /// # Returns
+ ///
+ /// An iterator which yields consecutive chunks of the slice, from the back
+ /// to the front.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if `size` is zero.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0b0100_1011];
+ /// let bv: &BitSlice = store.into();
+ /// let mut rchunks_exact = bv.rchunks_exact(3);
+ /// assert_eq!(rchunks_exact.next(), Some(&bv[5 .. 8]));
+ /// assert_eq!(rchunks_exact.next(), Some(&bv[2 .. 5]));
+ /// assert!(rchunks_exact.next().is_none());
+ /// assert_eq!(rchunks_exact.remainder(), &bv[0 .. 2]);
+ /// ```
+ pub fn rchunks_exact(&self, size: usize) -> RChunksExact<C, T> {
+ assert_ne!(size, 0, "Chunk size cannot be zero");
+ let (extra, inner) = self.split_at(self.len() % size);
+ RChunksExact {
+ inner,
+ extra,
+ width: size,
+ }
+ }
+
+ /// Produces a galloping iterator over consecutive chunks in the slice, from
+ /// the back to the front. Each chunk has the width `size`. If `size` does
+ /// not evenly divide the slice, then the remainder is not part of the
+ /// iteration, and can be accessed separately with the `.remainder()`
+ /// method.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `size`: The width of each chunk.
+ ///
+ /// # Returns
+ ///
+ /// An iterator which yields consecutive mutable chunks of the slice, from
+ /// the back to the front.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if `size` is zero.
+ pub fn rchunks_exact_mut(&mut self, size: usize) -> RChunksExactMut<C, T> {
+ assert_ne!(size, 0, "Chunk size cannot be zero");
+ let (extra, inner) = self.split_at_mut(self.len() % size);
+ RChunksExactMut {
+ inner,
+ extra,
+ width: size,
+ }
+ }
+
+ /// Divides one slice into two at an index.
+ ///
+ /// The first will contain all indices from `[0, mid)` (excluding the index
+ /// `mid` itself) and the second will contain all indices from `[mid, len)`
+ /// (excluding the index `len` itself).
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ /// - `mid`: The index at which to split
+ ///
+ /// # Returns
+ ///
+ /// - The bits up to but not including `mid`.
+ /// - The bits from mid onwards.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `mid > self.len()`.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0x0F];
+ /// let bv: &BitSlice = store.into();
+ ///
+ /// let (l, r) = bv.split_at(0);
+ /// assert!(l.is_empty());
+ /// assert_eq!(r, bv);
+ ///
+ /// let (l, r) = bv.split_at(4);
+ /// assert_eq!(l, &bv[0 .. 4]);
+ /// assert_eq!(r, &bv[4 .. 8]);
+ ///
+ /// let (l, r) = bv.split_at(8);
+ /// assert_eq!(l, bv);
+ /// assert!(r.is_empty());
+ /// ```
+ pub fn split_at(&self, mid: usize) -> (&Self, &Self) {
+ assert!(mid <= self.len(), "Index {} out of bounds: {}", mid, self.len());
+ if mid == self.len() {
+ (&self, Self::empty())
+ }
+ else {
+ (&self[.. mid], &self[mid ..])
+ }
+ }
+
+ /// Divides one slice into two at an index.
+ ///
+ /// The first will contain all indices from `[0, mid)` (excluding the index
+ /// `mid` itself) and the second will contain all indices from `[mid, len)`
+ /// (excluding the index `len` itself).
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `mid`: The index at which to split
+ ///
+ /// # Returns
+ ///
+ /// - The bits up to but not including `mid`.
+ /// - The bits from mid onwards.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `mid > self.len()`.
+ pub fn split_at_mut(&mut self, mid: usize) -> (&mut Self, &mut Self) {
+ let (head, tail) = self.split_at(mid);
+ let h_mut = {
+ let (p, e, h, t) = head.bitptr().raw_parts();
+ BitPtr::new(p, e, h, t)
+ };
+ let t_mut = {
+ let (p, e, h, t) = tail.bitptr().raw_parts();
+ BitPtr::new(p, e, h, t)
+ };
+ (h_mut.into(), t_mut.into())
+ }
+
+ /// Tests if the slice begins with the given prefix.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ /// - `prefix`: Any `BitSlice` against which `self` is tested. This is not
+ /// required to have the same cursor or storage types as `self`.
+ ///
+ /// # Returns
+ ///
+ /// Whether `self` begins with `prefix`. This is true only if `self` is at
+ /// least as long as `prefix` and their bits are semantically equal.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0xA6];
+ /// let bv: &BitSlice = store.into();;
+ /// assert!(bv.starts_with(&bv[.. 3]));
+ /// assert!(!bv.starts_with(&bv[3 ..]));
+ /// ```
+ pub fn starts_with<D, U>(&self, prefix: &BitSlice<D, U>) -> bool
+ where D: Cursor, U: Bits {
+ let plen = prefix.len();
+ self.len() >= plen && prefix == &self[.. plen]
+ }
+
+ /// Tests if the slice ends with the given suffix.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ /// - `suffix`: Any `BitSlice` against which `self` is tested. This is not
+ /// required to have the same cursor or storage types as `self`.
+ ///
+ /// # Returns
+ ///
+ /// Whether `self` ends with `suffix`. This is true only if `self` is at
+ /// least as long as `suffix` and their bits are semantically equal.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0xA6];
+ /// let bv: &BitSlice = store.into();
+ /// assert!(bv.ends_with(&bv[5 ..]));
+ /// assert!(!bv.ends_with(&bv[.. 5]));
+ /// ```
+ pub fn ends_with<D, U>(&self, suffix: &BitSlice<D, U>) -> bool
+ where D: Cursor, U: Bits {
+ let slen = suffix.len();
+ let len = self.len();
+ len >= slen && suffix == &self[len - slen ..]
+ }
+
+ /// Rotates the slice, in place, to the left.
+ ///
+ /// After calling this method, the bits from `[.. by]` will be at the back
+ /// of the slice, and the bits from `[by ..]` will be at the front. This
+ /// operates fully in-place.
+ ///
+ /// In-place rotation of bits requires this method to take `O(k × n)` time.
+ /// It is impossible to use machine intrinsics to perform galloping rotation
+ /// on bits.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `by`: The number of bits by which to rotate left. This must be in the
+ /// range `0 ..= self.len()`. If it is `0` or `self.len()`, then this
+ /// method is a no-op.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &mut [u8] = &mut [0xF0];
+ /// let bv: &mut BitSlice = store.into();
+ /// bv.rotate_left(2);
+ /// assert_eq!(bv.as_ref()[0], 0xC3);
+ /// ```
+ pub fn rotate_left(&mut self, by: usize) {
+ let len = self.len();
+ assert!(by <= len, "Slices cannot be rotated by more than their length");
+ if by == len {
+ return;
+ }
+
+ for _ in 0 .. by {
+ let tmp = self[0];
+ for n in 1 .. len {
+ let bit = self[n];
+ self.set(n - 1, bit);
+ }
+ self.set(len - 1, tmp);
+ }
+ }
+
+ /// Rotates the slice, in place, to the right.
+ ///
+ /// After calling this method, the bits from `[self.len() - by ..]` will be
+ /// at the front of the slice, and the bits from `[.. self.len() - by]` will
+ /// be at the back. This operates fully in-place.
+ ///
+ /// In-place rotation of bits requires this method to take `O(k × n)` time.
+ /// It is impossible to use machine intrinsics to perform galloping rotation
+ /// on bits.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `by`: The number of bits by which to rotate right. This must be in the
+ /// range `0 ..= self.len()`. If it is `0` or `self.len`, then this method
+ /// is a no-op.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &mut [u8] = &mut [0xF0];
+ /// let bv: &mut BitSlice = store.into();
+ /// bv.rotate_right(2);
+ /// assert_eq!(bv.as_ref()[0], 0x3C);
+ /// ```
+ pub fn rotate_right(&mut self, by: usize) {
+ let len = self.len();
+ assert!(by <= len, "Slices cannot be rotated by more than their length");
+ if by == len {
+ return;
+ }
+
+ for _ in 0 .. by {
+ let tmp = self[len - 1];
+ for n in (0 .. len - 1).rev() {
+ let bit = self[n];
+ self.set(n + 1, bit);
+ }
+ self.set(0, tmp);
+ }
+ }
+
+ /// Tests if *all* bits in the slice domain are set (logical `∧`).
+ ///
+ /// # Truth Table
+ ///
+ /// ```text
+ /// 0 0 => 0
+ /// 0 1 => 0
+ /// 1 0 => 0
+ /// 1 1 => 1
+ /// ```
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// Whether all bits in the slice domain are set.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0xFD];
+ /// let bv: &BitSlice = store.into();
+ /// assert!(bv[.. 4].all());
+ /// assert!(!bv[4 ..].all());
+ /// ```
+ pub fn all(&self) -> bool {
+ match self.inner() {
+ Inner::Minor(head, elt, tail) => {
+ for n in *head .. *tail {
+ if !elt.get::<C>(n.into()) {
+ return false;
+ }
+ }
+ },
+ Inner::Major(head, body, tail) => {
+ if let Some(elt) = head {
+ for n in *self.bitptr().head() .. T::SIZE {
+ if !elt.get::<C>(n.into()) {
+ return false;
+ }
+ }
+ }
+ for elt in body {
+ if *elt != T::from(!0) {
+ return false;
+ }
+ }
+ if let Some(elt) = tail {
+ for n in 0 .. *self.bitptr().tail() {
+ if !elt.get::<C>(n.into()) {
+ return false;
+ }
+ }
+ }
+ },
+ }
+ true
+ }
+
+ /// Tests if *any* bit in the slice is set (logical `∨`).
+ ///
+ /// # Truth Table
+ ///
+ /// ```text
+ /// 0 0 => 0
+ /// 0 1 => 1
+ /// 1 0 => 1
+ /// 1 1 => 1
+ /// ```
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// Whether any bit in the slice domain is set.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0x40];
+ /// let bv: &BitSlice = store.into();
+ /// assert!(bv[.. 4].any());
+ /// assert!(!bv[4 ..].any());
+ /// ```
+ pub fn any(&self) -> bool {
+ match self.inner() {
+ Inner::Minor(head, elt, tail) => {
+ for n in *head .. *tail {
+ if elt.get::<C>(n.into()) {
+ return true;
+ }
+ }
+ },
+ Inner::Major(head, body, tail) => {
+ if let Some(elt) = head {
+ for n in *self.bitptr().head() .. T::SIZE {
+ if elt.get::<C>(n.into()) {
+ return true;
+ }
+ }
+ }
+ for elt in body {
+ if *elt != T::from(0) {
+ return true;
+ }
+ }
+ if let Some(elt) = tail {
+ for n in 0 .. *self.bitptr().tail() {
+ if elt.get::<C>(n.into()) {
+ return true;
+ }
+ }
+ }
+ },
+ }
+ false
+ }
+
+ /// Tests if *any* bit in the slice is unset (logical `¬∧`).
+ ///
+ /// # Truth Table
+ ///
+ /// ```text
+ /// 0 0 => 1
+ /// 0 1 => 1
+ /// 1 0 => 1
+ /// 1 1 => 0
+ /// ```
+ ///
+ /// # Parameters
+ ///
+ /// - `&self
+ ///
+ /// # Returns
+ ///
+ /// Whether any bit in the slice domain is unset.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0xFD];
+ /// let bv: &BitSlice = store.into();
+ /// assert!(!bv[.. 4].not_all());
+ /// assert!(bv[4 ..].not_all());
+ /// ```
+ pub fn not_all(&self) -> bool {
+ !self.all()
+ }
+
+ /// Tests if *all* bits in the slice are unset (logical `¬∨`).
+ ///
+ /// # Truth Table
+ ///
+ /// ```text
+ /// 0 0 => 1
+ /// 0 1 => 0
+ /// 1 0 => 0
+ /// 1 1 => 0
+ /// ```
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// Whether all bits in the slice domain are unset.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0x40];
+ /// let bv: &BitSlice = store.into();
+ /// assert!(!bv[.. 4].not_any());
+ /// assert!(bv[4 ..].not_any());
+ /// ```
+ pub fn not_any(&self) -> bool {
+ !self.any()
+ }
+
+ /// Tests whether the slice has some, but not all, bits set and some, but
+ /// not all, bits unset.
+ ///
+ /// This is `false` if either `all()` or `not_any()` are `true`.
+ ///
+ /// # Truth Table
+ ///
+ /// ```text
+ /// 0 0 => 0
+ /// 0 1 => 1
+ /// 1 0 => 1
+ /// 1 1 => 0
+ /// ```
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// Whether the slice domain has mixed content.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0b111_000_10];
+ /// let bv: &BitSlice = store.into();
+ /// assert!(!bv[0 .. 3].some());
+ /// assert!(!bv[3 .. 6].some());
+ /// assert!(bv[6 ..].some());
+ /// ```
+ pub fn some(&self) -> bool {
+ self.any() && self.not_all()
+ }
+
+ /// Counts how many bits are set high.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// The number of high bits in the slice domain.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0xFD, 0x25];
+ /// let bv: &BitSlice = store.into();
+ /// assert_eq!(bv.count_ones(), 10);
+ /// ```
+ pub fn count_ones(&self) -> usize {
+ match self.inner() {
+ Inner::Minor(head, elt, tail) => {
+ (*head .. *tail).map(|n| elt.get::<C>(n.into())).count()
+ },
+ Inner::Major(head, body, tail) => {
+ head.map(|t| (*self.bitptr().head() .. T::SIZE)
+ .map(|n| t.get::<C>(n.into())).filter(|b| *b).count()
+ ).unwrap_or(0) +
+ body.iter().map(T::count_ones).sum::<usize>() +
+ tail.map(|t| (0 .. *self.bitptr().tail())
+ .map(|n| t.get::<C>(n.into())).filter(|b| *b).count()
+ ).unwrap_or(0)
+ },
+ }
+ }
+
+ /// Counts how many bits are set low.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// The number of low bits in the slice domain.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0xFD, 0x25];
+ /// let bv: &BitSlice = store.into();
+ /// assert_eq!(bv.count_zeros(), 6);
+ /// ```
+ pub fn count_zeros(&self) -> usize {
+ match self.inner() {
+ Inner::Minor(head, elt, tail) => {
+ (*head .. *tail).map(|n| !elt.get::<C>(n.into())).count()
+ },
+ Inner::Major(head, body, tail) => {
+ head.map(|t| (*self.bitptr().head() .. T::SIZE)
+ .map(|n| t.get::<C>(n.into())).filter(|b| !*b).count()
+ ).unwrap_or(0) +
+ body.iter().map(T::count_zeros).sum::<usize>() +
+ tail.map(|t| (0 .. *self.bitptr().tail())
+ .map(|n| t.get::<C>(n.into())).filter(|b| !*b).count()
+ ).unwrap_or(0)
+ },
+ }
+ }
+
+ /// Set all bits in the slice to a value.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `value`: The bit value to which all bits in the slice will be set.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &mut [u8] = &mut [0];
+ /// let bv: &mut BitSlice = store.into();
+ /// bv[2 .. 6].set_all(true);
+ /// assert_eq!(bv.as_ref(), &[0b0011_1100]);
+ /// bv[3 .. 5].set_all(false);
+ /// assert_eq!(bv.as_ref(), &[0b0010_0100]);
+ /// bv[.. 1].set_all(true);
+ /// assert_eq!(bv.as_ref(), &[0b1010_0100]);
+ /// ```
+ pub fn set_all(&mut self, value: bool) {
+ match self.inner() {
+ Inner::Minor(head, _, tail) => {
+ let elt = &mut self.as_mut()[0];
+ for n in *head .. *tail {
+ elt.set::<C>(n.into(), value);
+ }
+ },
+ Inner::Major(_, _, _) => {
+ let (h, t) = (self.bitptr().head(), self.bitptr().tail());
+ if let Some(head) = self.head_mut() {
+ for n in *h .. T::SIZE {
+ head.set::<C>(n.into(), value);
+ }
+ }
+ for elt in self.body_mut() {
+ *elt = T::from(0);
+ }
+ if let Some(tail) = self.tail_mut() {
+ for n in *t .. T::SIZE {
+ tail.set::<C>(n.into(), value);
+ }
+ }
+ }
+ }
+ }
+
+ /// Provides mutable traversal of the collection.
+ ///
+ /// It is impossible to implement `IndexMut` on `BitSlice`, because bits do
+ /// not have addresses, so there can be no `&mut u1`. This method allows the
+ /// client to receive an enumerated bit, and provide a new bit to set at
+ /// each index.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `func`: A function which receives a `(usize, bool)` pair of index and
+ /// value, and returns a bool. It receives the bit at each position, and
+ /// the return value is written back at that position.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ /// ```
+ pub fn for_each<F>(&mut self, func: F)
+ where F: Fn(usize, bool) -> bool {
+ for idx in 0 .. self.len() {
+ let tmp = self[idx];
+ self.set(idx, func(idx, tmp));
+ }
+ }
+
+ pub fn as_slice(&self) -> &[T] {
+ // Get the `BitPtr` structure.
+ let bp = self.bitptr();
+ // Get the pointer and element counts from it.
+ let (ptr, len) = (bp.pointer(), bp.elements());
+ // Create a slice from them.
+ unsafe { slice::from_raw_parts(ptr, len) }
+ }
+
+ /// Accesses the underlying store.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let mut bv: BitVec = bitvec![0, 0, 0, 0, 0, 0, 0, 0, 1];
+ /// for elt in bv.as_mut_slice() {
+ /// *elt += 2;
+ /// }
+ /// assert_eq!(&[2, 0b1000_0010], bv.as_slice());
+ /// ```
+ pub fn as_mut_slice(&mut self) -> &mut [T] {
+ // Get the `BitPtr` structure.
+ let bp = self.bitptr();
+ // Get the pointer and element counts from it.
+ let (ptr, len) = (bp.pointer() as *mut T, bp.elements());
+ // Create a slice from them.
+ unsafe { slice::from_raw_parts_mut(ptr, len) }
+ }
+
+ pub fn head(&self) -> Option<&T> {
+ // Transmute into the correct lifetime.
+ unsafe { mem::transmute(self.bitptr().head_elt()) }
+ }
+
+ pub fn head_mut(&mut self) -> Option<&mut T> {
+ unsafe { mem::transmute(self.bitptr().head_elt()) }
+ }
+
+ pub fn body(&self) -> &[T] {
+ // Transmute into the correct lifetime.
+ unsafe { mem::transmute(self.bitptr().body_elts()) }
+ }
+
+ pub fn body_mut(&mut self) -> &mut [T] {
+ // Reattach the correct lifetime and mutability
+ #[allow(mutable_transmutes)]
+ unsafe { mem::transmute(self.bitptr().body_elts()) }
+ }
+
+ pub fn tail(&self) -> Option<&T> {
+ // Transmute into the correct lifetime.
+ unsafe { mem::transmute(self.bitptr().tail_elt()) }
+ }
+
+ pub fn tail_mut(&mut self) -> Option<&mut T> {
+ unsafe { mem::transmute(self.bitptr().tail_elt()) }
+ }
+
+ /// Accesses the underlying pointer structure.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// The [`BitPtr`] structure of the slice handle.
+ ///
+ /// [`BitPtr`]: ../pointer/struct.BitPtr.html
+ pub fn bitptr(&self) -> BitPtr<T> {
+ self.into()
+ }
+
+ /// Splits the slice domain into its logical parts.
+ ///
+ /// Produces either the single-element partial domain, or the edge and
+ /// center elements of a multiple-element domain.
+ fn inner(&self) -> Inner<T> {
+ let bp = self.bitptr();
+ let (h, t) = (bp.head(), bp.tail());
+ // single-element, cursors not at both edges
+ if self.as_ref().len() == 1 && !(*h == 0 && *t == T::SIZE) {
+ Inner::Minor(h, &self.as_ref()[0], t)
+ }
+ else {
+ Inner::Major(self.head(), self.body(), self.tail())
+ }
+ }
+}
+
+enum Inner<'a, T: 'a + Bits> {
+ Minor(BitIdx, &'a T, BitIdx),
+ Major(Option<&'a T>, &'a [T], Option<&'a T>),
+}
+
+/// Creates an owned `BitVec<C, T>` from a borrowed `BitSlice<C, T>`.
+#[cfg(feature = "alloc")]
+impl<C, T> ToOwned for BitSlice<C, T>
+where C: Cursor, T: Bits {
+ type Owned = BitVec<C, T>;
+
+ /// Clones a borrowed `BitSlice` into an owned `BitVec`.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// # #[cfg(feature = "alloc")] {
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0; 2];
+ /// let src: &BitSlice = store.into();
+ /// let dst = src.to_owned();
+ /// assert_eq!(src, dst);
+ /// # }
+ /// ```
+ fn to_owned(&self) -> Self::Owned {
+ self.into()
+ }
+}
+
+impl<C, T> Eq for BitSlice<C, T>
+where C: Cursor, T: Bits {}
+
+impl<C, T> Ord for BitSlice<C, T>
+where C: Cursor, T: Bits {
+ fn cmp(&self, rhs: &Self) -> Ordering {
+ self.partial_cmp(rhs)
+ .unwrap_or_else(|| unreachable!("`BitSlice` has a total ordering"))
+ }
+}
+
+/// Tests if two `BitSlice`s are semantically — not bitwise — equal.
+///
+/// It is valid to compare two slices of different endianness or element types.
+///
+/// The equality condition requires that they have the same number of total bits
+/// and that each pair of bits in semantic order are identical.
+impl<A, B, C, D> PartialEq<BitSlice<C, D>> for BitSlice<A, B>
+where A: Cursor, B: Bits, C: Cursor, D: Bits {
+ /// Performas a comparison by `==`.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ /// - `rhs`: Another `BitSlice` against which to compare. This slice can
+ /// have different cursor or storage types.
+ ///
+ /// # Returns
+ ///
+ /// If the two slices are equal, by comparing the lengths and bit values at
+ /// each semantic index.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let lstore: &[u8] = &[8, 16, 32, 0];
+ /// let rstore: &[u32] = &[0x10080400];
+ /// let lbv: &BitSlice<LittleEndian, u8> = lstore.into();
+ /// let rbv: &BitSlice<BigEndian, u32> = rstore.into();
+ ///
+ /// assert_eq!(lbv, rbv);
+ /// ```
+ fn eq(&self, rhs: &BitSlice<C, D>) -> bool {
+ if self.len() != rhs.len() {
+ return false;
+ }
+ self.iter().zip(rhs.iter()).all(|(l, r)| l == r)
+ }
+}
+
+/// Allow comparison against the allocated form.
+#[cfg(feature = "alloc")]
+impl<A, B, C, D> PartialEq<BitVec<C, D>> for BitSlice<A, B>
+where A: Cursor, B: Bits, C: Cursor, D: Bits {
+ fn eq(&self, rhs: &BitVec<C, D>) -> bool {
+ <Self as PartialEq<BitSlice<C, D>>>::eq(self, &*rhs)
+ }
+}
+
+#[cfg(feature = "alloc")]
+impl<A, B, C, D> PartialEq<BitVec<C, D>> for &BitSlice<A, B>
+where A: Cursor, B: Bits, C: Cursor, D: Bits {
+ fn eq(&self, rhs: &BitVec<C, D>) -> bool {
+ <BitSlice<A, B> as PartialEq<BitSlice<C, D>>>::eq(self, &*rhs)
+ }
+}
+
+/// Compares two `BitSlice`s by semantic — not bitwise — ordering.
+///
+/// The comparison sorts by testing each index for one slice to have a set bit
+/// where the other has an unset bit. If the slices are different, the slice
+/// with the set bit sorts greater than the slice with the unset bit.
+///
+/// If one of the slices is exhausted before they differ, the longer slice is
+/// greater.
+impl<A, B, C, D> PartialOrd<BitSlice<C, D>> for BitSlice<A, B>
+where A: Cursor, B: Bits, C: Cursor, D: Bits {
+ /// Performs a comparison by `<` or `>`.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ /// - `rhs`: Another `BitSlice` against which to compare. This slice can
+ /// have different cursor or storage types.
+ ///
+ /// # Returns
+ ///
+ /// The relative ordering of `self` against `rhs`. `self` is greater if it
+ /// has a `true` bit at an index where `rhs` has a `false`; `self` is lesser
+ /// if it has a `false` bit at an index where `rhs` has a `true`; if the two
+ /// slices do not disagree then they are compared by length.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0x45];
+ /// let slice: &BitSlice = store.into();
+ /// let a = &slice[0 .. 3]; // 010
+ /// let b = &slice[0 .. 4]; // 0100
+ /// let c = &slice[0 .. 5]; // 01000
+ /// let d = &slice[4 .. 8]; // 0101
+ ///
+ /// assert!(a < b);
+ /// assert!(b < c);
+ /// assert!(c < d);
+ /// ```
+ fn partial_cmp(&self, rhs: &BitSlice<C, D>) -> Option<Ordering> {
+ for (l, r) in self.iter().zip(rhs.iter()) {
+ match (l, r) {
+ (true, false) => return Some(Ordering::Greater),
+ (false, true) => return Some(Ordering::Less),
+ _ => continue,
+ }
+ }
+ self.len().partial_cmp(&rhs.len())
+ }
+}
+
+#[cfg(feature = "alloc")]
+impl<A, B, C, D> PartialOrd<BitVec<C, D>> for BitSlice<A, B>
+where A: Cursor, B: Bits, C: Cursor, D: Bits {
+ fn partial_cmp(&self, rhs: &BitVec<C, D>) -> Option<Ordering> {
+ self.partial_cmp(&**rhs)
+ }
+}
+
+#[cfg(feature = "alloc")]
+impl<A, B, C, D> PartialOrd<BitVec<C, D>> for &BitSlice<A, B>
+where A: Cursor, B: Bits, C: Cursor, D: Bits {
+ fn partial_cmp(&self, rhs: &BitVec<C, D>) -> Option<Ordering> {
+ (*self).partial_cmp(&**rhs)
+ }
+}
+
+/// Provides write access to all elements in the underlying storage, including
+/// the partial head and tail elements if present.
+impl<C, T> AsMut<[T]> for BitSlice<C, T>
+where C: Cursor, T: Bits {
+ /// Accesses the underlying store.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ ///
+ /// # Returns
+ ///
+ /// A mutable slice of all storage elements.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &mut [u8] = &mut [0, 128];
+ /// let bv: &mut BitSlice = store.into();
+ /// let bv = &mut bv[1 .. 9];
+ ///
+ /// for elt in bv.as_mut() {
+ /// *elt += 2;
+ /// }
+ ///
+ /// assert_eq!(&[2, 130], bv.as_ref());
+ /// ```
+ fn as_mut(&mut self) -> &mut [T] {
+ self.as_mut_slice()
+ }
+}
+
+/// Provides read access to all elements in the underlying storage, including
+/// the partial head and tail elements if present.
+impl<C, T> AsRef<[T]> for BitSlice<C, T>
+where C: Cursor, T: Bits {
+ /// Accesses the underlying store.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// An immutable slice of all storage elements.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0, 128];
+ /// let bv: &BitSlice = store.into();
+ /// let bv = &bv[1 .. 9];
+ /// assert_eq!(&[0, 128], bv.as_ref());
+ /// ```
+ fn as_ref(&self) -> &[T] {
+ self.as_slice()
+ }
+}
+
+/// Builds a `BitSlice` from a slice of elements. The resulting `BitSlice` will
+/// always completely fill the original slice.
+impl<'a, C, T> From<&'a [T]> for &'a BitSlice<C, T>
+where C: Cursor, T: 'a + Bits {
+ /// Wraps a `&[T: Bits]` in a `&BitSlice<C: Cursor, T>`. The endianness must
+ /// be specified at the call site. The element type cannot be changed.
+ ///
+ /// # Parameters
+ ///
+ /// - `src`: The elements over which the new `BitSlice` will operate.
+ ///
+ /// # Returns
+ ///
+ /// A `BitSlice` representing the original element slice.
+ ///
+ /// # Panics
+ ///
+ /// The source slice must not exceed the maximum number of elements that a
+ /// `BitSlice` can contain. This value is documented in [`BitPtr`].
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let src: &[u8] = &[1, 2, 3];
+ /// let bits: &BitSlice = src.into();
+ /// assert_eq!(bits.len(), 24);
+ /// assert_eq!(bits.as_ref().len(), 3);
+ /// assert!(bits[7]); // src[0] == 0b0000_0001
+ /// assert!(bits[14]); // src[1] == 0b0000_0010
+ /// assert!(bits[22]); // src[2] == 0b0000_0011
+ /// assert!(bits[23]);
+ /// ```
+ ///
+ /// [`BitPtr`]: ../pointer/struct.BitPtr.html
+ fn from(src: &'a [T]) -> Self {
+ BitPtr::new(src.as_ptr(), src.len(), 0, T::SIZE).into()
+ }
+}
+
+/// Builds a mutable `BitSlice` from a slice of mutable elements. The resulting
+/// `BitSlice` will always completely fill the original slice.
+impl<'a, C, T> From<&'a mut [T]> for &'a mut BitSlice<C, T>
+where C: Cursor, T: 'a + Bits {
+ /// Wraps a `&mut [T: Bits]` in a `&mut BitSlice<C: Cursor, T>`. The
+ /// endianness must be specified by the call site. The element type cannot
+ /// be changed.
+ ///
+ /// # Parameters
+ ///
+ /// - `src`: The elements over which the new `BitSlice` will operate.
+ ///
+ /// # Returns
+ ///
+ /// A `BitSlice` representing the original element slice.
+ ///
+ /// # Panics
+ ///
+ /// The source slice must not exceed the maximum number of elements that a
+ /// `BitSlice` can contain. This value is documented in [`BitPtr`].
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let src: &mut [u8] = &mut [1, 2, 3];
+ /// let bits: &mut BitSlice<LittleEndian, _> = src.into();
+ /// // The first bit is the LSb of the first element.
+ /// assert!(bits[0]);
+ /// bits.set(0, false);
+ /// assert!(!bits[0]);
+ /// assert_eq!(bits.as_ref(), &[0, 2, 3]);
+ /// ```
+ ///
+ /// [`BitPtr`]: ../pointer/struct.BitPtr.html
+ fn from(src: &'a mut [T]) -> Self {
+ BitPtr::new(src.as_ptr(), src.len(), 0, T::SIZE).into()
+ }
+}
+
+/// Converts a `BitPtr` representation into a `BitSlice` handle.
+impl<'a, C, T> From<BitPtr<T>> for &'a BitSlice<C, T>
+where C: Cursor, T: 'a + Bits {
+ /// Converts a `BitPtr` representation into a `BitSlice` handle.
+ ///
+ /// # Parameters
+ ///
+ /// - `src`: The `BitPtr` representation for the slice.
+ ///
+ /// # Returns
+ ///
+ /// A `BitSlice` handle for the slice domain the `BitPtr` represents.
+ ///
+ /// # Examples
+ ///
+ /// This example is crate-internal, and cannot be used by clients.
+ ///
+ /// ```rust
+ /// # #[cfg(feature = "testing")] {
+ /// use bitvec::testing::*;
+ ///
+ /// let store: &[u8] = &[1, 2, 3];
+ /// let bp = BitPtr::new(store.as_ptr(), 3, 2, 6);
+ /// let bv: &BitSlice = bp.into();
+ /// assert_eq!(bv.len(), 20);
+ /// assert_eq!(bv.as_ref(), store);
+ /// # }
+ /// ```
+ fn from(src: BitPtr<T>) -> Self { unsafe {
+ let (ptr, len) = mem::transmute::<BitPtr<T>, (*const (), usize)>(src);
+ let store = slice::from_raw_parts(ptr, len);
+ mem::transmute::<&[()], &'a BitSlice<C, T>>(store)
+ } }
+}
+
+/// Converts a `BitPtr` representation into a `BitSlice` handle.
+impl<C, T> From<BitPtr<T>> for &mut BitSlice<C, T>
+where C: Cursor, T: Bits {
+ /// Converts a `BitPtr` representation into a `BitSlice` handle.
+ ///
+ /// # Parameters
+ ///
+ /// - `src`: The `BitPtr` representation for the slice.
+ ///
+ /// # Returns
+ ///
+ /// A `BitSlice` handle for the slice domain the `BitPtr` represents.
+ ///
+ /// # Examples
+ ///
+ /// This example is crate-internal, and cannot be used by clients.
+ ///
+ /// ```rust
+ /// # #[cfg(feature = "testing")] {
+ /// use bitvec::testing::*;
+ ///
+ /// let store: &mut [u8] = &mut [1, 2, 3];
+ /// let bp = BitPtr::new(store.as_ptr(), 3, 2, 6);
+ /// let bv: &mut BitSlice = bp.into();
+ /// assert_eq!(bv.len(), 20);
+ /// assert_eq!(bv.as_ref(), store);
+ /// # }
+ /// ```
+ fn from(src: BitPtr<T>) -> Self { unsafe {
+ let (ptr, len) = mem::transmute::<BitPtr<T>, (*mut (), usize)>(src);
+ let store = slice::from_raw_parts_mut(ptr, len);
+ mem::transmute::<&mut [()], &mut BitSlice<C, T>>(store)
+ } }
+}
+
+/// Prints the `BitSlice` for debugging.
+///
+/// The output is of the form `BitSlice<C, T> [ELT, *]` where `<C, T>` is the
+/// cursor and element type, with square brackets on each end of the bits and
+/// all the elements of the array printed in binary. The printout is always in
+/// semantic order, and may not reflect the underlying buffer. To see the
+/// underlying buffer, use `.as_ref()`.
+///
+/// The alternate character `{:#?}` prints each element on its own line, rather
+/// than having all elements on the same line.
+impl<C, T> Debug for BitSlice<C, T>
+where C: Cursor, T: Bits {
+ /// Renders the `BitSlice` type header and contents for debug.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// # #[cfg(feature = "alloc")] {
+ /// use bitvec::*;
+ /// let bits: &BitSlice<LittleEndian, u16> = &bitvec![
+ /// LittleEndian, u16;
+ /// 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1,
+ /// 0, 1
+ /// ];
+ /// assert_eq!(
+ /// "BitSlice<LittleEndian, u16> [0101000011110101, 01]",
+ /// &format!("{:?}", bits)
+ /// );
+ /// # }
+ /// ```
+ fn fmt(&self, f: &mut Formatter) -> fmt::Result {
+ f.write_str("BitSlice<")?;
+ f.write_str(C::TYPENAME)?;
+ f.write_str(", ")?;
+ f.write_str(T::TYPENAME)?;
+ f.write_str("> ")?;
+ Display::fmt(self, f)
+ }
+}
+
+/// Prints the `BitSlice` for displaying.
+///
+/// This prints each element in turn, formatted in binary in semantic order (so
+/// the first bit seen is printed first and the last bit seen is printed last).
+/// Each element of storage is separated by a space for ease of reading.
+///
+/// The alternate character `{:#}` prints each element on its own line.
+///
+/// To see the in-memory representation, use `.as_ref()` to get access to the
+/// raw elements and print that slice instead.
+impl<C, T> Display for BitSlice<C, T>
+where C: Cursor, T: Bits {
+ /// Renders the `BitSlice` contents for display.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ /// - `f`: The formatter into which `self` is written.
+ ///
+ /// # Returns
+ ///
+ /// The result of the formatting operation.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// # #[cfg(feature = "alloc")] {
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0b01001011, 0b0100_0000];
+ /// let bits: &BitSlice = store.into();
+ /// assert_eq!("[01001011, 01]", &format!("{}", &bits[.. 10]));
+ /// # }
+ /// ```
+ fn fmt(&self, f: &mut Formatter) -> fmt::Result {
+ struct Part<'a>(&'a str);
+ impl<'a> Debug for Part<'a> {
+ fn fmt(&self, f: &mut Formatter) -> fmt::Result {
+ f.write_str(&self.0)
+ }
+ }
+ let mut dbg = f.debug_list();
+ // Empty slice
+ if self.is_empty() {
+ return dbg.finish();
+ }
+ else {
+ // Unfortunately, T::SIZE cannot be used as the size for the array,
+ // due to limitations in the type system. As such, set
+ // it to the maximum used size.
+ //
+ // This allows the writes to target a static buffer, rather
+ // than a dynamic string, making the formatter usable in
+ // `no-std` contexts.
+ let mut w: [u8; 64] = [0; 64];
+ let writer =
+ |l: &mut DebugList, w: &mut [u8; 64], e: &T, from: u8, to: u8| {
+ let (from, to) = (from as usize, to as usize);
+ for n in from .. to {
+ w[n] = if e.get::<C>((n as u8).into()) { b'1' }
+ else { b'0' };
+ }
+ l.entry(&Part(unsafe {
+ str::from_utf8_unchecked(&w[from .. to])
+ }));
+ };
+ match self.inner() {
+ // Single-element slice
+ Inner::Minor(head, elt, tail) => {
+ writer(&mut dbg, &mut w, elt, *head, *tail)
+ },
+ // Multi-element slice
+ Inner::Major(head, body, tail) => {
+ if let Some(head) = head {
+ let hc = self.bitptr().head();
+ writer(&mut dbg, &mut w, head, *hc, T::SIZE);
+ }
+ for elt in body {
+ writer(&mut dbg, &mut w, elt, 0, T::SIZE);
+ }
+ if let Some(tail) = tail {
+ let tc = self.bitptr().tail();
+ writer(&mut dbg, &mut w, tail, 0, *tc);
+ }
+ },
+ }
+ dbg.finish()
+ }
+ }
+}
+
+/// Writes the contents of the `BitSlice`, in semantic bit order, into a hasher.
+impl<C, T> Hash for BitSlice<C, T>
+where C: Cursor, T: Bits {
+ /// Writes each bit of the `BitSlice`, as a full `bool`, into the hasher.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ /// - `hasher`: The hashing state into which the slice will be written.
+ ///
+ /// # Type Parameters
+ ///
+ /// - `H: Hasher`: The type of the hashing algorithm which receives the bits
+ /// of `self`.
+ fn hash<H>(&self, hasher: &mut H)
+ where H: Hasher {
+ for bit in self {
+ hasher.write_u8(bit as u8);
+ }
+ }
+}
+
+/// Produces a read-only iterator over all the bits in the `BitSlice`.
+///
+/// This iterator follows the ordering in the `BitSlice` type, and implements
+/// `ExactSizeIterator` as `BitSlice` has a known, fixed, length, and
+/// `DoubleEndedIterator` as it has known ends.
+impl<'a, C, T> IntoIterator for &'a BitSlice<C, T>
+where C: Cursor, T: 'a + Bits {
+ type Item = bool;
+ type IntoIter = Iter<'a, C, T>;
+
+ /// Iterates over the slice.
+ ///
+ /// # Parameters
+ ///
+ /// - `self`
+ ///
+ /// # Returns
+ ///
+ /// An iterator over the slice domain.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0b1010_1100];
+ /// let bits: &BitSlice = store.into();
+ /// let mut count = 0;
+ /// for bit in bits {
+ /// if bit { count += 1; }
+ /// }
+ /// assert_eq!(count, 4);
+ /// ```
+ fn into_iter(self) -> Self::IntoIter {
+ Iter {
+ inner: self
+ }
+ }
+}
+
+/// Performs unsigned addition in place on a `BitSlice`.
+///
+/// If the addend bitstream is shorter than `self`, the addend is zero-extended
+/// at the left (so that its final bit matches with `self`’s final bit). If the
+/// addend is longer, the excess front length is unused.
+///
+/// Addition proceeds from the right ends of each slice towards the left.
+/// Because this trait is forbidden from returning anything, the final carry-out
+/// bit is discarded.
+///
+/// Note that, unlike `BitVec`, there is no subtraction implementation until I
+/// find a subtraction algorithm that does not require modifying the subtrahend.
+///
+/// Subtraction can be implemented by negating the intended subtrahend yourself
+/// and then using addition, or by using `BitVec`s instead of `BitSlice`s.
+///
+/// # Type Parameters
+///
+/// - `I: IntoIterator<Item=bool, IntoIter: DoubleEndedIterator>`: The bitstream
+/// to add into `self`. It must be finite and double-ended, since addition
+/// operates in reverse.
+impl<C, T, I> AddAssign<I> for BitSlice<C, T>
+where C: Cursor, T: Bits,
+ I: IntoIterator<Item=bool>, I::IntoIter: DoubleEndedIterator {
+ /// Performs unsigned wrapping addition in place.
+ ///
+ /// # Examples
+ ///
+ /// This example shows addition of a slice wrapping from max to zero.
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &mut [u8] = &mut [0b1110_1111, 0b0000_0001];
+ /// let bv: &mut BitSlice = store.into();
+ /// let (nums, one) = bv.split_at_mut(12);
+ /// let (accum, steps) = nums.split_at_mut(4);
+ /// *accum += &*one;
+ /// assert_eq!(accum, &steps[.. 4]);
+ /// *accum += &*one;
+ /// assert_eq!(accum, &steps[4 ..]);
+ /// ```
+ fn add_assign(&mut self, addend: I) {
+ use core::iter::repeat;
+ // zero-extend the addend if it’s shorter than self
+ let mut addend_iter = addend.into_iter().rev().chain(repeat(false));
+ let mut c = false;
+ for place in (0 .. self.len()).rev() {
+ // See `BitVec::AddAssign`
+ static JUMP: [u8; 8] = [0, 2, 2, 1, 2, 1, 1, 3];
+ let a = self[place];
+ let b = addend_iter.next().unwrap(); // addend is an infinite source
+ let idx = ((c as u8) << 2) | ((a as u8) << 1) | (b as u8);
+ let yz = JUMP[idx as usize];
+ let (y, z) = (yz & 2 != 0, yz & 1 != 0);
+ self.set(place, y);
+ c = z;
+ }
+ }
+}
+
+/// Performs the Boolean `AND` operation against another bitstream and writes
+/// the result into `self`. If the other bitstream ends before `self,`, the
+/// remaining bits of `self` are cleared.
+///
+/// # Type Parameters
+///
+/// - `I: IntoIterator<Item=bool>`: A stream of bits, which may be a `BitSlice`
+/// or some other bit producer as desired.
+impl<C, T, I> BitAndAssign<I> for BitSlice<C, T>
+where C: Cursor, T: Bits, I: IntoIterator<Item=bool> {
+ /// `AND`s a bitstream into a slice.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `rhs`: The bitstream to `AND` into `self`.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &mut [u8] = &mut [0b0101_0100];
+ /// let other: & [u8] = & [0b0011_0000];
+ /// let lhs: &mut BitSlice = store.into();
+ /// let rhs: & BitSlice = other.into();
+ /// lhs[.. 6] &= &rhs[.. 4];
+ /// assert_eq!(store[0], 0b0001_0000);
+ /// ```
+ fn bitand_assign(&mut self, rhs: I) {
+ use core::iter;
+ rhs.into_iter()
+ .chain(iter::repeat(false))
+ .enumerate()
+ .take(self.len())
+ .for_each(|(idx, bit)| {
+ let val = self[idx] & bit;
+ self.set(idx, val);
+ });
+ }
+}
+
+/// Performs the Boolean `OR` operation against another bitstream and writes the
+/// result into `self`. If the other bitstream ends before `self`, the remaining
+/// bits of `self` are not affected.
+///
+/// # Type Parameters
+///
+/// - `I: IntoIterator<Item=bool>`: A stream of bits, which may be a `BitSlice`
+/// or some other bit producer as desired.
+impl<C, T, I> BitOrAssign<I> for BitSlice<C, T>
+where C: Cursor, T: Bits, I: IntoIterator<Item=bool> {
+ /// `OR`s a bitstream into a slice.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `rhs`: The bitstream to `OR` into `self`.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ /// let store: &mut [u8] = &mut [0b0101_0100];
+ /// let other: & [u8] = & [0b0011_0000];
+ /// let lhs: &mut BitSlice = store.into();
+ /// let rhs: & BitSlice = other.into();
+ /// lhs[.. 6] |= &rhs[.. 4];
+ /// assert_eq!(store[0], 0b0111_0100);
+ /// ```
+ fn bitor_assign(&mut self, rhs: I) {
+ for (idx, bit) in rhs.into_iter().enumerate().take(self.len()) {
+ let val = self[idx] | bit;
+ self.set(idx, val);
+ }
+ }
+}
+
+/// Performs the Boolean `XOR` operation against another bitstream and writes
+/// the result into `self`. If the other bitstream ends before `self`, the
+/// remaining bits of `self` are not affected.
+///
+/// # Type Parameters
+///
+/// - `I: IntoIterator<Item=bool>`: A stream of bits, which may be a `BitSlice`
+/// or some other bit producer as desired.
+impl<C, T, I> BitXorAssign<I> for BitSlice<C, T>
+where C: Cursor, T: Bits, I: IntoIterator<Item=bool> {
+ /// `XOR`s a bitstream into a slice.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `rhs`: The bitstream to `XOR` into `self`.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &mut [u8] = &mut [0b0101_0100];
+ /// let other: & [u8] = & [0b0011_0000];
+ /// let lhs: &mut BitSlice = store.into();
+ /// let rhs: & BitSlice = other.into();
+ /// lhs[.. 6] ^= &rhs[.. 4];
+ /// assert_eq!(store[0], 0b0110_0100);
+ /// ```
+ fn bitxor_assign(&mut self, rhs: I) {
+ rhs.into_iter()
+ .enumerate()
+ .take(self.len())
+ .for_each(|(idx, bit)| {
+ let val = self[idx] ^ bit;
+ self.set(idx, val);
+ })
+ }
+}
+
+/// Indexes a single bit by semantic count. The index must be less than the
+/// length of the `BitSlice`.
+impl<C, T> Index<usize> for BitSlice<C, T>
+where C: Cursor, T: Bits {
+ type Output = bool;
+
+ /// Looks up a single bit by semantic index.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ /// - `index`: The semantic index of the bit to look up.
+ ///
+ /// # Returns
+ ///
+ /// The value of the bit at the requested index.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0b0010_0000];
+ /// let bits: &BitSlice = store.into();
+ /// assert!(bits[2]);
+ /// assert!(!bits[3]);
+ /// ```
+ fn index(&self, index: usize) -> &Self::Output {
+ let len = self.len();
+ assert!(index < len, "Index out of range: {} >= {}", index, len);
+
+ let h = self.bitptr().head();
+ let (elt, bit) = h.offset::<T>(index as isize);
+ if self.as_ref()[elt as usize].get::<C>(bit) { &true } else { &false }
+ }
+}
+
+impl<C, T> Index<Range<usize>> for BitSlice<C, T>
+where C: Cursor, T: Bits {
+ type Output = Self;
+
+ fn index(&self, Range { start, end }: Range<usize>) -> &Self::Output {
+ let len = self.len();
+ assert!(
+ start <= len,
+ "Index {} out of range: {}",
+ start,
+ len,
+ );
+ assert!(end <= len, "Index {} out of range: {}", end, len);
+ assert!(end >= start, "Ranges can only run from low to high");
+ let (data, _, head, _) = self.bitptr().raw_parts();
+ // Find the number of elements to drop from the front, and the index of
+ // the new head
+ let (skip, new_head) = head.offset::<T>(start as isize);
+ // Find the number of elements contained in the new span, and the index
+ // of the new tail.
+ let (new_elts, new_tail) = new_head.span::<T>(end - start);
+ BitPtr::new(
+ unsafe { data.offset(skip) },
+ new_elts,
+ new_head,
+ new_tail,
+ ).into()
+ }
+}
+
+impl<C, T> IndexMut<Range<usize>> for BitSlice<C, T>
+where C: Cursor, T: Bits {
+ fn index_mut(
+ &mut self,
+ Range { start, end }: Range<usize>,
+ ) -> &mut Self::Output {
+ // Get an immutable slice, and then type-hack mutability back in.
+ (&self[start .. end]).bitptr().into()
+ }
+}
+
+impl<C, T> Index<RangeInclusive<usize>> for BitSlice<C, T>
+where C: Cursor, T: Bits {
+ type Output = Self;
+
+ fn index(&self, index: RangeInclusive<usize>) -> &Self::Output {
+ &self[*index.start() .. *index.end() + 1]
+ }
+}
+
+impl<C, T> IndexMut<RangeInclusive<usize>> for BitSlice<C, T>
+where C: Cursor, T: Bits {
+ fn index_mut(&mut self, index: RangeInclusive<usize>) -> &mut Self::Output {
+ &mut self[*index.start() .. *index.end() + 1]
+ }
+}
+
+impl<C, T> Index<RangeFrom<usize>> for BitSlice<C, T>
+where C: Cursor, T: Bits {
+ type Output = Self;
+
+ fn index(&self, RangeFrom { start }: RangeFrom<usize>) -> &Self::Output {
+ &self[start .. self.len()]
+ }
+}
+
+impl<C, T> IndexMut<RangeFrom<usize>> for BitSlice<C, T>
+where C: Cursor, T: Bits {
+ fn index_mut(
+ &mut self,
+ RangeFrom { start }: RangeFrom<usize>,
+ ) -> &mut Self::Output {
+ let len = self.len();
+ &mut self[start .. len]
+ }
+}
+
+impl<C, T> Index<RangeFull> for BitSlice<C, T>
+where C: Cursor, T: Bits {
+ type Output = Self;
+
+ fn index(&self, _: RangeFull) -> &Self::Output {
+ self
+ }
+}
+
+impl<C, T> IndexMut<RangeFull> for BitSlice<C, T>
+where C: Cursor, T: Bits {
+ fn index_mut(&mut self, _: RangeFull) -> &mut Self::Output {
+ self
+ }
+}
+
+impl<C, T> Index<RangeTo<usize>> for BitSlice<C, T>
+where C: Cursor, T: Bits {
+ type Output = Self;
+
+ fn index(&self, RangeTo { end }: RangeTo<usize>) -> &Self::Output {
+ &self[0 .. end]
+ }
+}
+
+impl<C, T> IndexMut<RangeTo<usize>> for BitSlice<C, T>
+where C: Cursor, T: Bits {
+ fn index_mut(
+ &mut self,
+ RangeTo { end }: RangeTo<usize>,
+ ) -> &mut Self::Output {
+ &mut self[0 .. end]
+ }
+}
+
+impl<C, T> Index<RangeToInclusive<usize>> for BitSlice<C, T>
+where C: Cursor, T: Bits {
+ type Output = Self;
+
+ fn index(
+ &self,
+ RangeToInclusive { end }: RangeToInclusive<usize>,
+ ) -> &Self::Output {
+ &self[0 .. end + 1]
+ }
+}
+
+impl<C, T> IndexMut<RangeToInclusive<usize>> for BitSlice<C, T>
+where C: Cursor, T: Bits {
+ fn index_mut(
+ &mut self,
+ RangeToInclusive { end }: RangeToInclusive<usize>,
+ ) -> &mut Self::Output {
+ &mut self[0 .. end + 1]
+ }
+}
+
+/// Performs fixed-width 2’s-complement negation of a `BitSlice`.
+///
+/// Unlike the `!` operator (`Not` trait), the unary `-` operator treats the
+/// `BitSlice` as if it represents a signed 2’s-complement integer of fixed
+/// width. The negation of a number in 2’s complement is defined as its
+/// inversion (using `!`) plus one, and on fixed-width numbers has the following
+/// discontinuities:
+///
+/// - A slice whose bits are all zero is considered to represent the number zero
+/// which negates as itself.
+/// - A slice whose bits are all one is considered to represent the most
+/// negative number, which has no correpsonding positive number, and thus
+/// negates as zero.
+///
+/// This behavior was chosen so that all possible values would have *some*
+/// output, and so that repeated application converges at idempotence. The most
+/// negative input can never be reached by negation, but `--MOST_NEG` converges
+/// at the least unreasonable fallback value, 0.
+///
+/// Because `BitSlice` cannot move, the negation is performed in place.
+impl<'a, C, T> Neg for &'a mut BitSlice<C, T>
+where C: Cursor, T: 'a + Bits {
+ type Output = Self;
+
+ /// Perform 2’s-complement fixed-width negation.
+ ///
+ /// Negation is accomplished by inverting the bits and adding one. This has
+ /// one edge case: `1000…`, the most negative number for its width, will
+ /// negate to zero instead of itself. It thas no corresponding positive
+ /// number to which it can negate.
+ ///
+ /// # Parameters
+ ///
+ /// - `self`
+ ///
+ /// # Examples
+ ///
+ /// The contortions shown here are a result of this operator applying to a
+ /// mutable reference, and this example balancing access to the original
+ /// `BitVec` for comparison with aquiring a mutable borrow *as a slice* to
+ /// ensure that the `BitSlice` implementation is used, not the `BitVec`.
+ ///
+ /// Negate an arbitrary positive number (first bit unset).
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &mut [u8] = &mut [0b0110_1010];
+ /// let bv: &mut BitSlice = store.into();
+ /// eprintln!("{:?}", bv.split_at(4));
+ /// let num = &mut bv[.. 4];
+ /// -num;
+ /// eprintln!("{:?}", bv.split_at(4));
+ /// assert_eq!(&bv[.. 4], &bv[4 ..]);
+ /// ```
+ ///
+ /// Negate an arbitrary negative number. This example will use the above
+ /// result to demonstrate round-trip correctness.
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &mut [u8] = &mut [0b1010_0110];
+ /// let bv: &mut BitSlice = store.into();
+ /// let num = &mut bv[.. 4];
+ /// -num;
+ /// assert_eq!(&bv[.. 4], &bv[4 ..]);
+ /// ```
+ ///
+ /// Negate the most negative number, which will become zero, and show
+ /// convergence at zero.
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &mut [u8] = &mut [128];
+ /// let bv: &mut BitSlice = store.into();
+ /// let num = &mut bv[..];
+ /// -num;
+ /// assert!(bv.not_any());
+ /// let num = &mut bv[..];
+ /// -num;
+ /// assert!(bv.not_any());
+ /// ```
+ fn neg(self) -> Self::Output {
+ // negative zero is zero. The invert-and-add will result in zero, but
+ // this case can be detected quickly.
+ if self.is_empty() || self.not_any() {
+ return self;
+ }
+ // The most negative number (leading one, all zeroes else) negates to
+ // zero.
+ if self[0] {
+ // Testing the whole range, rather than [1 ..], is more likely to
+ // hit the fast path.
+ self.set(0, false);
+ if self.not_any() {
+ return self;
+ }
+ self.set(0, true);
+ }
+ let _ = Not::not(&mut *self);
+ let one: &[T] = &[T::from(!0)];
+ let one_bs: &BitSlice<C, T> = one.into();
+ AddAssign::add_assign(&mut *self, &one_bs[.. 1]);
+ self
+ }
+}
+
+/// Flips all bits in the slice, in place.
+impl<'a, C, T> Not for &'a mut BitSlice<C, T>
+where C: Cursor, T: 'a + Bits {
+ type Output = Self;
+
+ /// Inverts all bits in the slice.
+ ///
+ /// This will not affect bits outside the slice in slice storage elements.
+ ///
+ /// # Parameters
+ ///
+ /// - `self`
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &mut [u8] = &mut [0; 2];
+ /// let bv: &mut BitSlice = store.into();
+ /// let bits = &mut bv[2 .. 14];
+ /// let new_bits = !bits;
+ /// // The `bits` binding is consumed by the `!` operator, and a new reference
+ /// // is returned.
+ /// // assert_eq!(bits.as_ref(), &[!0, !0]);
+ /// assert_eq!(new_bits.as_ref(), &[0x3F, 0xFC]);
+ /// ```
+ fn not(self) -> Self::Output {
+ match self.inner() {
+ Inner::Minor(head, _, tail) => {
+ let elt = &mut self.as_mut()[0];
+ for n in *head .. *tail {
+ let tmp = elt.get::<C>(n.into());
+ elt.set::<C>(n.into(), !tmp);
+ }
+ },
+ Inner::Major(_, _, _) => {
+ let head_bit = self.bitptr().head();
+ let tail_bit = self.bitptr().tail();
+ if let Some(head) = self.head_mut() {
+ for n in *head_bit .. T::SIZE {
+ let tmp = head.get::<C>(n.into());
+ head.set::<C>(n.into(), !tmp);
+ }
+ }
+ for elt in self.body_mut() {
+ *elt = !*elt;
+ }
+ if let Some(tail) = self.tail_mut() {
+ for n in 0 .. *tail_bit {
+ let tmp = tail.get::<C>(n.into());
+ tail.set::<C>(n.into(), !tmp);
+ }
+ }
+ },
+ }
+ self
+ }
+}
+
+__bitslice_shift!(u8, u16, u32, u64, i8, i16, i32, i64);
+
+/// Shifts all bits in the array to the left — **DOWN AND TOWARDS THE FRONT**.
+///
+/// On primitives, the left-shift operator `<<` moves bits away from the origin
+/// and towards the ceiling. This is because we label the bits in a primitive
+/// with the minimum on the right and the maximum on the left, which is
+/// big-endian bit order. This increases the value of the primitive being
+/// shifted.
+///
+/// **THAT IS NOT HOW `BitSlice` WORKS!**
+///
+/// `BitSlice` defines its layout with the minimum on the left and the maximum
+/// on the right! Thus, left-shifting moves bits towards the **minimum**.
+///
+/// In BigEndian order, the effect in memory will be what you expect the `<<`
+/// operator to do.
+///
+/// **In LittleEndian order, the effect will be equivalent to using `>>` on**
+/// **the primitives in memory!**
+///
+/// # Notes
+///
+/// In order to preserve the effecs in memory that this operator traditionally
+/// expects, the bits that are emptied by this operation are zeroed rather than
+/// left to their old value.
+///
+/// The shift amount is modulated against the array length, so it is not an
+/// error to pass a shift amount greater than the array length.
+///
+/// A shift amount of zero is a no-op, and returns immediately.
+impl<C, T> ShlAssign<usize> for BitSlice<C, T>
+where C: Cursor, T: Bits {
+ /// Shifts a slice left, in place.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `shamt`: The shift amount. If this is greater than the length, then
+ /// the slice is zeroed immediately.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &mut [u8] = &mut[0x4B, 0xA5];
+ /// let bv: &mut BitSlice = store.into();
+ /// let bits = &mut bv[2 .. 14];
+ /// *bits <<= 3;
+ /// assert_eq!(bits.as_ref(), &[0b01_011_101, 0b001_000_01]);
+ /// ```
+ fn shl_assign(&mut self, shamt: usize) {
+ if shamt == 0 {
+ return;
+ }
+ let len = self.len();
+ if shamt >= len {
+ self.set_all(false);
+ return;
+ }
+ // If the shift amount is an even multiple of the element width, use
+ // `ptr::copy` instead of a bitwise crawl.
+ if shamt & T::MASK as usize == 0 {
+ // Compute the shift distance measured in elements.
+ let offset = shamt >> T::BITS;
+ // Compute the number of elements that will remain.
+ let rem = self.as_ref().len() - offset;
+ // Clear the bits after the tail cursor before the move.
+ for n in *self.bitptr().tail() .. T::SIZE {
+ self.as_mut()[len - 1].set::<C>(n.into(), false);
+ }
+ // Memory model: suppose we have this slice of sixteen elements,
+ // that is shifted five elements to the left. We have three
+ // pointers and two lengths to manage.
+ // - rem is 11
+ // - offset is 5
+ // - head is [0]
+ // - body is [5; 11]
+ // - tail is [11]
+ // [ 0 1 2 3 4 5 6 7 8 9 a b c d e f ]
+ // ^-------before------^
+ // ^-------after-------^ 0 0 0 0 0
+ // Pointer to the front of the slice
+ let head: *mut T = self.as_mut_ptr();
+ // Pointer to the front of the section that will move and be
+ // retained
+ let body: *const T = &self.as_ref()[offset];
+ // Pointer to the back of the slice that will be zero-filled.
+ let tail: *mut T = &mut self.as_mut()[rem];
+ unsafe {
+ ptr::copy(body, head, rem);
+ ptr::write_bytes(tail, 0, offset);
+ }
+ return;
+ }
+ // Otherwise, crawl.
+ for (to, from) in (shamt .. len).enumerate() {
+ let val = self[from];
+ self.set(to, val);
+ }
+ for bit in (len - shamt) .. len {
+ self.set(bit, false);
+ }
+ }
+}
+
+/// Shifts all bits in the array to the right — **UP AND TOWARDS THE BACK**.
+///
+/// On primitives, the right-shift operator `>>` moves bits towards the origin
+/// and away from the ceiling. This is because we label the bits in a primitive
+/// with the minimum on the right and the maximum on the left, which is
+/// big-endian bit order. This decreases the value of the primitive being
+/// shifted.
+///
+/// **THAT IS NOT HOW `BitSlice` WORKS!**
+///
+/// `BitSlice` defines its layout with the minimum on the left and the maximum
+/// on the right! Thus, right-shifting moves bits towards the **maximum**.
+///
+/// In Big-Endian order, the effect in memory will be what you expect the `>>`
+/// operator to do.
+///
+/// **In LittleEndian order, the effect will be equivalent to using `<<` on**
+/// **the primitives in memory!**
+///
+/// # Notes
+///
+/// In order to preserve the effects in memory that this operator traditionally
+/// expects, the bits that are emptied by this operation are zeroed rather than
+/// left to their old value.
+///
+/// The shift amount is modulated against the array length, so it is not an
+/// error to pass a shift amount greater than the array length.
+///
+/// A shift amount of zero is a no-op, and returns immediately.
+impl<C, T> ShrAssign<usize> for BitSlice<C, T>
+where C: Cursor, T: Bits {
+ /// Shifts a slice right, in place.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `shamt`: The shift amount. If this is greater than the length, then
+ /// the slice is zeroed immediately.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &mut [u8] = &mut[0x4B, 0xA5];
+ /// let bv: &mut BitSlice = store.into();
+ /// let bits = &mut bv[2 .. 14];
+ /// *bits >>= 3;
+ /// assert_eq!(bits.as_ref(), &[0b01_000_00_1, 0b011_101_01])
+ /// ```
+ fn shr_assign(&mut self, shamt: usize) {
+ if shamt == 0 {
+ return;
+ }
+ let len = self.len();
+ if shamt >= len {
+ self.set_all(false);
+ return;
+ }
+ // IF the shift amount is an even multiple of the element width, use
+ // `ptr::copy` instead of a bitwise crawl.
+ if shamt & T::MASK as usize == 0 {
+ // Compute the shift amount measured in elements.
+ let offset = shamt >> T::BITS;
+ // Compute the number of elements that will remain.
+ let rem = self.as_ref().len() - offset;
+ // Clear the bits ahead of the head cursor before the move.
+ for n in 0 .. *self.bitptr().head() {
+ self.as_mut()[0].set::<C>(n.into(), false);
+ }
+ // Memory model: suppose we have this slice of sixteen elements,
+ // that is shifted five elements to the right. We have two pointers
+ // and two lengths to manage.
+ // - rem is 11
+ // - offset is 5
+ // - head is [0; 11]
+ // - body is [5]
+ // [ 0 1 2 3 4 5 6 7 8 9 a b c d e f ]
+ // ^-------before------^
+ // 0 0 0 0 0 ^-------after-------^
+ let head: *mut T = self.as_mut_ptr();
+ let body: *mut T = &mut self.as_mut()[offset];
+ unsafe {
+ ptr::copy(head, body, rem);
+ ptr::write_bytes(head, 0, offset);
+ }
+ return;
+ }
+ // Otherwise, crawl.
+ for (from, to) in (shamt .. len).enumerate().rev() {
+ let val = self[from];
+ self.set(to, val);
+ }
+ for bit in 0 .. shamt {
+ self.set(bit.into(), false);
+ }
+ }
+}
+
+/// State keeper for chunked iteration over a `BitSlice`.
+///
+/// # Type Parameters
+///
+/// - `C: Cursor`: The bit-order type of the underlying `BitSlice`.
+/// - `T: 'a + Bits`: The storage type of the underlying `BitSlice`.
+///
+/// # Lifetimes
+///
+/// - `'a`: The lifetime of the underlying `BitSlice`.
+#[derive(Clone, Debug)]
+pub struct Chunks<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ /// The `BitSlice` being iterated.
+ inner: &'a BitSlice<C, T>,
+ /// The width of the chunks.
+ width: usize,
+}
+
+impl<'a, C, T> DoubleEndedIterator for Chunks<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ /// Produces the next chunk from the back of the slice.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ ///
+ /// # Returns
+ ///
+ /// The last chunk in the slice, if any.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[1];
+ /// let bv: &BitSlice = store.into();
+ /// let mut chunks = bv.chunks(5);
+ /// assert_eq!(chunks.next_back(), Some(&bv[5 ..]));
+ /// assert_eq!(chunks.next_back(), Some(&bv[.. 5]));
+ /// assert!(chunks.next_back().is_none());
+ /// ```
+ fn next_back(&mut self) -> Option<Self::Item> {
+ if self.inner.is_empty() {
+ return None;
+ }
+ let len = self.inner.len();
+ let rem = len % self.width;
+ let size = if rem == 0 { self.width } else { rem };
+ let (head, tail) = self.inner.split_at(len - size);
+ self.inner = head;
+ Some(tail)
+ }
+}
+
+/// Mark that the iterator has an exact size.
+impl<'a, C, T> ExactSizeIterator for Chunks<'a, C, T>
+where C: Cursor, T: 'a + Bits {}
+
+/// Mark that the iterator will not resume after halting.
+impl<'a, C, T> FusedIterator for Chunks<'a, C, T>
+where C: Cursor, T: 'a + Bits {}
+
+impl<'a, C, T> Iterator for Chunks<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ type Item = &'a BitSlice<C, T>;
+
+ /// Advances the iterator by one, returning the first chunk in it (if any).
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ ///
+ /// # Returns
+ ///
+ /// The leading chunk in the iterator, if any.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0x80];
+ /// let bv: &BitSlice = store.into();
+ /// let mut chunks = bv.chunks(5);
+ /// assert_eq!(chunks.next(), Some(&bv[.. 5]));
+ /// assert_eq!(chunks.next(), Some(&bv[5 ..]));
+ /// assert!(chunks.next().is_none());
+ /// ```
+ fn next(&mut self) -> Option<Self::Item> {
+ use core::cmp::min;
+ if self.inner.is_empty() {
+ return None;
+ }
+ let size = min(self.inner.len(), self.width);
+ let (head, tail) = self.inner.split_at(size);
+ self.inner = tail;
+ Some(head)
+ }
+
+ /// Hints at the number of chunks remaining in the iterator.
+ ///
+ /// Because the exact size is always known, this always produces
+ /// `(len, Some(len))`.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// - `usize`: The minimum chunks remaining.
+ /// - `Option<usize>`: The maximum chunks remaining.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0x4B];
+ /// let bv: &BitSlice = store.into();
+ /// let mut chunks = bv.chunks(5);
+ /// assert_eq!(chunks.size_hint(), (2, Some(2)));
+ /// chunks.next();
+ /// assert_eq!(chunks.size_hint(), (1, Some(1)));
+ /// chunks.next();
+ /// assert_eq!(chunks.size_hint(), (0, Some(0)));
+ /// ```
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.inner.is_empty() {
+ return (0, Some(0));
+ }
+ let len = self.inner.len();
+ let (n, r) = (len / self.width, len % self.width);
+ let len = n + (r > 0) as usize;
+ (len, Some(len))
+ }
+
+ /// Counts how many chunks are live in the iterator, consuming it.
+ ///
+ /// # Parameters
+ ///
+ /// - `self`
+ ///
+ /// # Returns
+ ///
+ /// The number of chunks remaining in the iterator.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0x4B];
+ /// let bv: &BitSlice = store.into();
+ /// assert_eq!(bv.chunks(3).count(), 3);
+ /// ```
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ /// Advances the iterator by `n` chunks, starting from zero.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `n`: The number of chunks to skip, before producing the next bit after
+ /// skips. If this overshoots the iterator’s remaining length, then the
+ /// iterator is marked empty before returning `None`.
+ ///
+ /// # Returns
+ ///
+ /// If `n` does not overshoot the iterator’s bounds, this produces the `n`th
+ /// bit after advancing the iterator to it, discarding the intermediate
+ /// chunks.
+ ///
+ /// If `n` does overshoot the iterator’s bounds, this empties the iterator
+ /// and returns `None`.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0x4B];
+ /// let bv: &BitSlice = store.into();
+ /// let mut chunks = bv.chunks(3);
+ /// assert_eq!(chunks.nth(1), Some(&bv[3 .. 6]));
+ /// assert_eq!(chunks.nth(0), Some(&bv[6 ..]));
+ /// assert!(chunks.nth(0).is_none());
+ /// ```
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ use core::cmp::min;
+ let (start, ovf) = n.overflowing_mul(self.width);
+ let len = self.inner.len();
+ if start >= len || ovf {
+ self.inner = BitSlice::empty();
+ return None;
+ }
+ let end = start.checked_add(self.width)
+ .map(|s| min(s, len))
+ .unwrap_or(len);
+ let out = &self.inner[start .. end];
+ self.inner = &self.inner[end ..];
+ Some(out)
+ }
+
+ /// Consumes the iterator, returning only the final chunk.
+ ///
+ /// # Parameters
+ ///
+ /// - `self`
+ ///
+ /// # Returns
+ ///
+ /// The last chunk in the iterator slice, if any.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0x4B];
+ /// let bv: &BitSlice = store.into();
+ /// assert_eq!(bv.chunks(3).last(), Some(&bv[6 ..]));
+ /// ```
+ fn last(mut self) -> Option<Self::Item> {
+ self.next_back()
+ }
+}
+
+/// State keeper for mutable chunked iteration over a `BitSlice`.
+///
+/// # Type Parameters
+///
+/// - `C: Cursor`: The bit-order type of the underlying `BitSlice`.
+/// - `T: 'a + Bits`: The storage type of the underlying `BitSlice`.
+///
+/// # Lifetimes
+///
+/// - `'a`: The lifetime of the underlying `BitSlice`.
+#[derive(Debug)]
+pub struct ChunksMut<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ /// The `BitSlice` being iterated.
+ inner: &'a mut BitSlice<C, T>,
+ /// The width of the chunks.
+ width: usize,
+}
+
+impl<'a, C, T> DoubleEndedIterator for ChunksMut<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ /// Produces the next chunk from the back of the slice.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ ///
+ /// # Returns
+ ///
+ /// The last chunk in the slice, if any.
+ fn next_back(&mut self) -> Option<Self::Item> {
+ if self.inner.is_empty() {
+ return None;
+ }
+ let len = self.inner.len();
+ let rem = len % self.width;
+ let size = if rem == 0 { self.width } else { rem };
+ let tmp = mem::replace(&mut self.inner, BitSlice::empty_mut());
+ let (head, tail) = tmp.split_at_mut(len - size);
+ self.inner = head;
+ Some(tail)
+ }
+}
+
+impl<'a, C, T> ExactSizeIterator for ChunksMut<'a, C, T>
+where C: Cursor, T: 'a + Bits {}
+
+impl<'a, C, T> FusedIterator for ChunksMut<'a, C, T>
+where C: Cursor, T: 'a + Bits {}
+
+impl<'a, C, T> Iterator for ChunksMut<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ type Item = &'a mut BitSlice<C, T>;
+
+ /// Advances the iterator by one, returning the first chunk in it (if any).
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ ///
+ /// # Returns
+ ///
+ /// The leading chunk in the iterator, if any.
+ fn next(&mut self) -> Option<Self::Item> {
+ use core::cmp::min;
+ if self.inner.is_empty() {
+ return None;
+ }
+ let size = min(self.inner.len(), self.width);
+ let tmp = mem::replace(&mut self.inner, BitSlice::empty_mut());
+ let (head, tail) = tmp.split_at_mut(size);
+ self.inner = tail;
+ Some(head)
+ }
+
+ /// Hints at the number of chunks remaining in the iterator.
+ ///
+ /// Because the exact size is always known, this always produces
+ /// `(len, Some(len))`.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// - `usize`: The minimum chunks remaining.
+ /// - `Option<usize>`: The maximum chunks remaining.
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.inner.is_empty() {
+ return (0, Some(0));
+ }
+ let len = self.inner.len();
+ let (n, r) = (len / self.width, len % self.width);
+ let len = n + (r > 0) as usize;
+ (len, Some(len))
+ }
+
+ /// Counts how many chunks are live in the iterator, consuming it.
+ ///
+ /// # Parameters
+ ///
+ /// - `self`
+ ///
+ /// # Returns
+ ///
+ /// The number of chunks remaining in the iterator.
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ /// Advances the iterator by `n` chunks, starting from zero.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `n`: The number of chunks to skip, before producing the next bit after
+ /// skips. If this overshoots the iterator’s remaining length, then the
+ /// iterator is marked empty before returning `None`.
+ ///
+ /// # Returns
+ ///
+ /// If `n` does not overshoot the iterator’s bounds, this produces the `n`th
+ /// bit after advancing the iterator to it, discarding the intermediate
+ /// chunks.
+ ///
+ /// If `n` does overshoot the iterator’s bounds, this empties the iterator
+ /// and returns `None`.
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ use core::cmp::min;
+ let (start, ovf) = n.overflowing_mul(self.width);
+ let len = self.inner.len();
+ if start >= len || ovf {
+ self.inner = BitSlice::empty_mut();
+ return None;
+ }
+ let end = start.checked_add(self.width)
+ .map(|s| min(s, len))
+ .unwrap_or(len);
+ let tmp = mem::replace(&mut self.inner, BitSlice::empty_mut());
+ let (head, tail) = tmp.split_at_mut(start);
+ let (_, nth) = head.split_at_mut(end - start);
+ self.inner = tail;
+ Some(nth)
+ }
+
+ /// Consumes the iterator, returning only the final chunk.
+ ///
+ /// # Parameters
+ ///
+ /// - `self`
+ ///
+ /// # Returns
+ ///
+ /// The last chunk in the iterator slice, if any.
+ fn last(mut self) -> Option<Self::Item> {
+ self.next_back()
+ }
+}
+
+/// State keeper for exact chunked iteration over a `BitSlice`.
+///
+/// # Type Parameters
+///
+/// - `C: Cursor`: The bit-order type of the underlying `BitSlice`.
+/// - `T: 'a + Bits`: The storage type of the underlying `BitSlice`.
+///
+/// # Lifetimes
+///
+/// - `'a`: The lifetime of the underlying `BitSlice`.
+#[derive(Clone, Debug)]
+pub struct ChunksExact<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ /// The `BitSlice` being iterated.
+ inner: &'a BitSlice<C, T>,
+ /// The excess of the original `BitSlice`, which is not iterated.
+ extra: &'a BitSlice<C, T>,
+ /// The width of the chunks.
+ width: usize,
+}
+
+impl<'a, C, T> ChunksExact<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ /// Produces the remainder of the original slice, which will not be included
+ /// in the iteration.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// The remaining slice that iteration will not include.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0x4B];
+ /// let bits: &BitSlice = store.into();
+ /// let chunks_exact = bits.chunks_exact(3);
+ /// assert_eq!(chunks_exact.remainder(), &bits[6 ..]);
+ /// ```
+ pub fn remainder(&self) -> &'a BitSlice<C, T> {
+ self.extra
+ }
+}
+
+impl<'a, C, T> DoubleEndedIterator for ChunksExact<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ /// Produces the next chunk from the back of the slice.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ ///
+ /// # Returns
+ ///
+ /// The last chunk in the slice, if any.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[1];
+ /// let bv: &BitSlice = store.into();
+ /// let mut chunks_exact = bv.chunks_exact(3);
+ /// assert_eq!(chunks_exact.next_back(), Some(&bv[3 .. 6]));
+ /// assert_eq!(chunks_exact.next_back(), Some(&bv[0 .. 3]));
+ /// assert!(chunks_exact.next_back().is_none());
+ /// ```
+ fn next_back(&mut self) -> Option<Self::Item> {
+ if self.inner.len() < self.width {
+ self.inner = BitSlice::empty();
+ return None;
+ }
+ let (head, tail) = self.inner.split_at(self.inner.len() - self.width);
+ self.inner = head;
+ Some(tail)
+ }
+}
+
+/// Mark that the iterator has an exact size.
+impl<'a, C, T> ExactSizeIterator for ChunksExact<'a, C, T>
+where C: Cursor, T: 'a + Bits {}
+
+/// Mark that the iterator will not resume after halting.
+impl<'a, C, T> FusedIterator for ChunksExact<'a, C, T>
+where C: Cursor, T: 'a + Bits {}
+
+impl<'a, C, T> Iterator for ChunksExact<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ type Item = &'a BitSlice<C, T>;
+
+ /// Advances the iterator by one, returning the first chunk in it (if any).
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ ///
+ /// # Returns
+ ///
+ /// The leading chunk in the iterator, if any.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0x80];
+ /// let bv: &BitSlice = store.into();
+ /// let mut chunks_exact = bv.chunks_exact(3);
+ /// assert_eq!(chunks_exact.next(), Some(&bv[0 .. 3]));
+ /// assert_eq!(chunks_exact.next(), Some(&bv[3 .. 6]));
+ /// assert!(chunks_exact.next().is_none());
+ /// ```
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.inner.len() < self.width {
+ self.inner = BitSlice::empty();
+ return None;
+ }
+ let (head, tail) = self.inner.split_at(self.width);
+ self.inner = tail;
+ Some(head)
+ }
+
+ /// Hints at the number of chunks remaining in the iterator.
+ ///
+ /// Because the exact size is always known, this always produces
+ /// `(len, Some(len))`.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// - `usize`: The minimum chunks remaining.
+ /// - `Option<usize>`: The maximum chunks remaining.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0x4B];
+ /// let bv: &BitSlice = store.into();
+ /// let mut chunks_exact = bv.chunks_exact(3);
+ /// assert_eq!(chunks_exact.size_hint(), (2, Some(2)));
+ /// chunks_exact.next();
+ /// assert_eq!(chunks_exact.size_hint(), (1, Some(1)));
+ /// chunks_exact.next();
+ /// assert_eq!(chunks_exact.size_hint(), (0, Some(0)));
+ /// ```
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.inner.len() / self.width;
+ (len, Some(len))
+ }
+
+ /// Counts how many chunks are live in the iterator, consuming it.
+ ///
+ /// # Parameters
+ ///
+ /// - `self`
+ ///
+ /// # Returns
+ ///
+ /// The number of chunks remaining in the iterator.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0x4B];
+ /// let bv: &BitSlice = store.into();
+ /// assert_eq!(bv.chunks_exact(3).count(), 2);
+ /// ```
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ /// Advances the iterator by `n` chunks, starting from zero.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `n`: The number of chunks to skip, before producing the next bit after
+ /// skips. If this overshoots the iterator’s remaining length, then the
+ /// iterator is marked empty before returning `None`.
+ ///
+ /// # Returns
+ ///
+ /// If `n` does not overshoot the iterator’s bounds, this produces the `n`th
+ /// bit after advancing the iterator to it, discarding the intermediate
+ /// chunks.
+ ///
+ /// If `n` does overshoot the iterator’s bounds, this empties the iterator
+ /// and returns `None`.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[2];
+ /// let bv: &BitSlice = store.into();
+ /// let mut chunks_exact = bv.chunks_exact(3);
+ /// assert_eq!(chunks_exact.nth(1), Some(&bv[3 .. 6]));
+ /// assert!(chunks_exact.nth(0).is_none());
+ /// ```
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ let (start, ovf) = n.overflowing_mul(self.width);
+ if start >= self.inner.len() || ovf {
+ self.inner = BitSlice::empty();
+ return None;
+ }
+ let (_, tail) = self.inner.split_at(start);
+ self.inner = tail;
+ self.next()
+ }
+
+ /// Consumes the iterator, returning only the final chunk.
+ ///
+ /// # Parameters
+ ///
+ /// - `self`
+ ///
+ /// # Returns
+ ///
+ /// The last chunk in the iterator slice, if any.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0x4B];
+ /// let bv: &BitSlice = store.into();
+ /// assert_eq!(bv.chunks_exact(3).last(), Some(&bv[3 .. 6]));
+ /// ```
+ fn last(mut self) -> Option<Self::Item> {
+ self.next_back()
+ }
+}
+
+/// State keeper for mutable exact chunked iteration over a `BitSlice`.
+///
+/// # Type Parameters
+///
+/// - `C: Cursor`: The bit-order type of the underlying `BitSlice`.
+/// - `T: 'a + Bits`: The storage type of the underlying `BitSlice`.
+///
+/// # Lifetimes
+///
+/// - `'a`: The lifetime of the underlying `BitSlice`.
+#[derive(Debug)]
+pub struct ChunksExactMut<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ /// The `BitSlice` being iterated.
+ inner: &'a mut BitSlice<C, T>,
+ /// The excess of the original `BitSlice`, which is not iterated.
+ extra: &'a mut BitSlice<C, T>,
+ /// The width of the chunks.
+ width: usize,
+}
+
+impl<'a, C, T> ChunksExactMut<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ /// Produces the remainder of the original slice, which will not be included
+ /// in the iteration.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// The remaining slice that iteration will not include.
+ pub fn into_remainder(self) -> &'a mut BitSlice<C, T> {
+ self.extra
+ }
+}
+
+impl<'a, C, T> DoubleEndedIterator for ChunksExactMut<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ /// Produces the next chunk from the back of th eslice.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ ///
+ /// # Returns
+ ///
+ /// The last chunk in the slice, if any.
+ fn next_back(&mut self) -> Option<Self::Item> {
+ unimplemented!()
+ }
+}
+
+impl<'a, C, T> ExactSizeIterator for ChunksExactMut<'a, C, T>
+where C: Cursor, T: 'a + Bits {}
+
+impl<'a, C, T> FusedIterator for ChunksExactMut<'a, C, T>
+where C: Cursor, T: 'a + Bits {}
+
+impl<'a, C, T> Iterator for ChunksExactMut<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ type Item = &'a mut BitSlice<C, T>;
+
+ /// Advances the iterator by one, returning the first chunk in it (if any).
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ ///
+ /// # Returns
+ ///
+ /// The leading chunk in the iterator, if any.
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.inner.len() < self.width {
+ self.inner = BitSlice::empty_mut();
+ return None;
+ }
+ let tmp = mem::replace(&mut self.inner, BitSlice::empty_mut());
+ let (head, tail) = tmp.split_at_mut(self.width);
+ self.inner = tail;
+ Some(head)
+ }
+
+ /// Hints at the number of chunks remaining in the iterator.
+ ///
+ /// Because the exact size is always known, this always produces
+ /// `(len, Some(len))`.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// - `usize`: The minimum chunks remaining.
+ /// - `Option<usize>`: The maximum chunks remaining.
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.inner.len() / self.width;
+ (len, Some(len))
+ }
+
+ /// Counts how many chunks are live in the iterator, consuming it.
+ ///
+ /// # Parameters
+ ///
+ /// - `self`
+ ///
+ /// # Returns
+ ///
+ /// The number of chunks remaining in the iterator.
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ /// Advances the iterator by `n` chunks, starting from zero.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `n`: The number of chunks to skip, before producing the next bit after
+ /// skips. If this overshoots the iterator’s remaining length, then the
+ /// iterator is marked empty before returning `None`.
+ ///
+ /// # Returns
+ ///
+ /// If `n` does not overshoot the iterator’s bounds, this produces the `n`th
+ /// bit after advancing the iterator to it, discarding the intermediate
+ /// chunks.
+ ///
+ /// If `n` does overshoot the iterator’s bounds, this empties the iterator
+ /// and returns `None`.
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ let (start, ovf) = n.overflowing_mul(self.width);
+ if start >= self.inner.len() || ovf {
+ self.inner = BitSlice::empty_mut();
+ return None;
+ }
+ let tmp = mem::replace(&mut self.inner, BitSlice::empty_mut());
+ let (_, tail) = tmp.split_at_mut(start);
+ self.inner = tail;
+ self.next()
+ }
+
+ /// Consumes the iterator, returning only the final chunk.
+ ///
+ /// # Parameters
+ ///
+ /// - `self`
+ ///
+ /// # Returns
+ ///
+ /// The last chunk in the iterator slice, if any.
+ fn last(mut self) -> Option<Self::Item> {
+ self.next_back()
+ }
+}
+
+/// State keeper for iteration over a `BitSlice`.
+///
+/// # Type Parameters
+///
+/// - `C: Cursor`: The bit-order type of the underlying `BitSlice`.
+/// - `T: 'a + Bits`: The storage type of the underlying `BitSlice`.
+///
+/// # Lifetimes
+///
+/// - `'a`: The lifetime of the underlying `BitSlice`.
+#[derive(Clone, Debug)]
+pub struct Iter<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ /// The `BitSlice` being iterated.
+ inner: &'a BitSlice<C, T>,
+}
+
+impl<'a, C, T> Iter<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ /// Accesses the `BitPtr` representation of the slice.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// The `BitPtr` representation of the remaining slice.
+ pub(crate) fn bitptr(&self) -> BitPtr<T> {
+ self.inner.bitptr()
+ }
+}
+
+impl<'a, C, T> DoubleEndedIterator for Iter<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ /// Produces the next bit from the back of the slice.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ ///
+ /// # Returns
+ ///
+ /// The last bit in the slice, if any.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[1];
+ /// let bv: &BitSlice = store.into();
+ /// let bv = &bv[6 ..];
+ /// let mut iter = bv.iter();
+ /// assert!(iter.next_back().unwrap());
+ /// assert!(!iter.next_back().unwrap());
+ /// assert!(iter.next_back().is_none());
+ /// ```
+ fn next_back(&mut self) -> Option<Self::Item> {
+ if self.inner.is_empty() {
+ return None;
+ }
+ let len = self.inner.len();
+ let out = self.inner[len - 1];
+ self.inner = &self.inner[.. len - 1];
+ Some(out)
+ }
+}
+
+/// Mark that the iterator has an exact size.
+impl<'a, C, T> ExactSizeIterator for Iter<'a, C, T>
+where C: Cursor, T: 'a + Bits {}
+
+/// Mark that the iterator will not resume after halting.
+impl<'a, C, T> FusedIterator for Iter<'a, C, T>
+where C: Cursor, T: 'a + Bits {}
+
+impl<'a, C, T> Iterator for Iter<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ type Item = bool;
+
+ /// Advances the iterator by one, returning the first bit in it (if any).
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ ///
+ /// # Returns
+ ///
+ /// The leading bit in the iterator, if any.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0x80];
+ /// let bv: &BitSlice = store.into();
+ /// let bv = &bv[.. 2];
+ /// let mut iter = bv.iter();
+ /// assert!(iter.next().unwrap());
+ /// assert!(!iter.next().unwrap());
+ /// assert!(iter.next().is_none());
+ /// ```
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.inner.is_empty() {
+ return None;
+ }
+ let out = self.inner[0];
+ self.inner = &self.inner[1 ..];
+ Some(out)
+ }
+
+ /// Hints at the number of bits remaining in the iterator.
+ ///
+ /// Because the exact size is always known, this always produces
+ /// `(len, Some(len))`.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// - `usize`: The minimum bits remaining.
+ /// - `Option<usize>`: The maximum bits remaining.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0x4B];
+ /// let bv: &BitSlice = store.into();
+ /// let bv = &bv[.. 2];
+ /// let mut iter = bv.iter();
+ /// assert_eq!(iter.size_hint(), (2, Some(2)));
+ /// iter.next();
+ /// assert_eq!(iter.size_hint(), (1, Some(1)));
+ /// iter.next();
+ /// assert_eq!(iter.size_hint(), (0, Some(0)));
+ /// ```
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.inner.len();
+ (len, Some(len))
+ }
+
+ /// Counts how many bits are live in the iterator, consuming it.
+ ///
+ /// # Parameters
+ ///
+ /// - `self`
+ ///
+ /// # Returns
+ ///
+ /// The number of bits remaining in the iterator.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0x4B];
+ /// let bv: &BitSlice = store.into();
+ /// assert_eq!(bv.iter().count(), 8);
+ /// ```
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ /// Advances the iterator by `n` bits, starting from zero.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `n`: The number of bits to skip, before producing the next bit after
+ /// skips. If this overshoots the iterator’s remaining length, then the
+ /// iterator is marked empty before returning `None`.
+ ///
+ /// # Returns
+ ///
+ /// If `n` does not overshoot the iterator’s bounds, this produces the `n`th
+ /// bit after advancing the iterator to it, discarding the intermediate
+ /// bits.
+ ///
+ /// If `n` does overshoot the iterator’s bounds, this empties the iterator
+ /// and returns `None`.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[2];
+ /// let bv: &BitSlice = store.into();
+ /// let mut iter = bv.iter();
+ /// assert!(iter.nth(6).unwrap());
+ /// assert!(!iter.nth(0).unwrap());
+ /// assert!(iter.nth(0).is_none());
+ /// ```
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ if n >= self.len() {
+ self.inner = BitSlice::empty();
+ return None;
+ }
+ self.inner = &self.inner[n ..];
+ self.next()
+ }
+
+ /// Consumes the iterator, returning only the final bit.
+ ///
+ /// # Parameters
+ ///
+ /// - `self`
+ ///
+ /// # Returns
+ ///
+ /// The last bit in the iterator slice, if any.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0x4B];
+ /// let bv: &BitSlice = store.into();
+ /// assert!(bv.iter().last().unwrap());
+ /// ```
+ fn last(mut self) -> Option<Self::Item> {
+ self.next_back()
+ }
+}
+
+/// State keeper for reverse chunked iteration over a `BitSlice`.
+///
+/// # Type Parameters
+///
+/// - `C: Cursor`: The bit-order type of the underlying `BitSlice`.
+/// - `T: 'a + Bits`: The storage type of the underlying `BitSlice`.
+///
+/// # Lifetimes
+///
+/// - `'a`: The lifetime of the underlying `BitSlice`.
+#[derive(Clone, Debug)]
+pub struct RChunks<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ /// The `BitSlice` being iterated.
+ inner: &'a BitSlice<C, T>,
+ /// The width of the chunks.
+ width: usize,
+}
+
+impl<'a, C, T> DoubleEndedIterator for RChunks<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ /// Produces the next chunk from the front of the slice.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ ///
+ /// # Returns
+ ///
+ /// The last chunk in the slice, if any.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[1];
+ /// let bv: &BitSlice = store.into();
+ /// let mut rchunks = bv.rchunks(5);
+ /// assert_eq!(rchunks.next_back(), Some(&bv[.. 3]));
+ /// assert_eq!(rchunks.next_back(), Some(&bv[3 ..]));
+ /// assert!(rchunks.next_back().is_none());
+ /// ```
+ fn next_back(&mut self) -> Option<Self::Item> {
+ if self.inner.is_empty() {
+ return None;
+ }
+ let len = self.inner.len();
+ let rem = len % self.width;
+ let size = if rem == 0 { self.width } else { rem };
+ let (head, tail) = self.inner.split_at(size);
+ self.inner = tail;
+ Some(head)
+ }
+}
+
+/// Mark that the iterator has an exact size.
+impl<'a, C, T> ExactSizeIterator for RChunks<'a, C, T>
+where C: Cursor, T: 'a + Bits {}
+
+/// Mark that the iterator will not resume after halting.
+impl<'a, C, T> FusedIterator for RChunks<'a, C, T>
+where C: Cursor, T: 'a + Bits {}
+
+impl<'a, C, T> Iterator for RChunks<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ type Item = &'a BitSlice<C, T>;
+
+ /// Advances the iterator by one, returning the first chunk in it (if any).
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ ///
+ /// # Returns
+ ///
+ /// The leading chunk in the iterator, if any.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0x80];
+ /// let bv: &BitSlice = store.into();
+ /// let mut rchunks = bv.rchunks(5);
+ /// assert_eq!(rchunks.next(), Some(&bv[3 ..]));
+ /// assert_eq!(rchunks.next(), Some(&bv[.. 3]));
+ /// assert!(rchunks.next().is_none());
+ /// ```
+ fn next(&mut self) -> Option<Self::Item> {
+ use core::cmp::min;
+ if self.inner.is_empty() {
+ return None;
+ }
+ let len = self.inner.len();
+ let size = min(len, self.width);
+ let (head, tail) = self.inner.split_at(len - size);
+ self.inner = head;
+ Some(tail)
+ }
+
+ /// Hints at the number of chunks remaining in the iterator.
+ ///
+ /// Because the exact size is always known, this always produces
+ /// `(len, Some(len))`.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// - `usize`: The minimum chunks remaining.
+ /// - `Option<usize>`: The maximum chunks remaining.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0x4B];
+ /// let bv: &BitSlice = store.into();
+ /// let mut rchunks = bv.rchunks(5);
+ /// assert_eq!(rchunks.size_hint(), (2, Some(2)));
+ /// rchunks.next();
+ /// assert_eq!(rchunks.size_hint(), (1, Some(1)));
+ /// rchunks.next();
+ /// assert_eq!(rchunks.size_hint(), (0, Some(0)));
+ /// ```
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.inner.is_empty() {
+ return (0, Some(0));
+ }
+ let len = self.inner.len();
+ let (len, rem) = (len / self.width, len % self.width);
+ let len = len + (rem > 0) as usize;
+ (len, Some(len))
+ }
+
+ /// Counts how many chunks are live in the iterator, consuming it.
+ ///
+ /// # Parameters
+ ///
+ /// - `self`
+ ///
+ /// # Returns
+ ///
+ /// The number of chunks remaining in the iterator.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0x4B];
+ /// let bv: &BitSlice = store.into();
+ /// assert_eq!(bv.rchunks(3).count(), 3);
+ /// ```
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ /// Advances the iterator by `n` chunks, starting from zero.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `n`: The number of chunks to skip, before producing the next bit after
+ /// skips. If this overshoots the iterator’s remaining length, then the
+ /// iterator is marked empty before returning `None`.
+ ///
+ /// # Returns
+ ///
+ /// If `n` does not overshoot the iterator’s bounds, this produces the `n`th
+ /// bit after advancing the iterator to it, discarding the intermediate
+ /// chunks.
+ ///
+ /// If `n` does overshoot the iterator’s bounds, this empties the iterator
+ /// and returns `None`.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[2];
+ /// let bv: &BitSlice = store.into();
+ /// let mut rchunks = bv.rchunks(3);
+ /// assert_eq!(rchunks.nth(2), Some(&bv[0 .. 2]));
+ /// assert!(rchunks.nth(0).is_none());
+ /// ```
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ let (end, ovf) = n.overflowing_mul(self.width);
+ if end >= self.inner.len() || ovf {
+ self.inner = BitSlice::empty();
+ return None;
+ }
+ // Can't underflow because of the check above
+ let end = self.inner.len() - end;
+ let start = end.checked_sub(self.width).unwrap_or(0);
+ let nth = &self.inner[start .. end];
+ self.inner = &self.inner[.. start];
+ Some(nth)
+ }
+
+ /// Consumes the iterator, returning only the final chunk.
+ ///
+ /// # Parameters
+ ///
+ /// - `self`
+ ///
+ /// # Returns
+ ///
+ /// The last chunk in the iterator slice, if any.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0x4B];
+ /// let bv: &BitSlice = store.into();
+ /// assert_eq!(bv.rchunks(3).last(), Some(&bv[.. 2]));
+ /// ```
+ fn last(mut self) -> Option<Self::Item> {
+ self.next_back()
+ }
+}
+
+/// State keeper for mutable reverse chunked iteration over a `BitSlice`.
+///
+/// # Type Parameters
+///
+/// - `C: Cursor`: The bit-order type of the underlying `BitSlice`.
+/// - `T: 'a + Bits`: The storage type of the underlying `BitSlice`.
+///
+/// # Lifetimes
+///
+/// - `'a`: The lifetime of the underlying `BitSlice`.
+#[derive(Debug)]
+pub struct RChunksMut<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ /// The `BitSlice` being iterated.
+ inner: &'a mut BitSlice<C, T>,
+ /// The width of the chunks.
+ width: usize,
+}
+
+impl<'a, C, T> DoubleEndedIterator for RChunksMut<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ /// Produces the next chunk from the front of the slice.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ ///
+ /// # Returns
+ ///
+ /// The last chunk in the slice, if any.
+ fn next_back(&mut self) -> Option<Self::Item> {
+ if self.inner.is_empty() {
+ return None;
+ }
+ let rem = self.inner.len() % self.width;
+ let size = if rem == 0 { self.width } else { rem };
+ let tmp = mem::replace(&mut self.inner, BitSlice::empty_mut());
+ let (head, tail) = tmp.split_at_mut(size);
+ self.inner = tail;
+ Some(head)
+ }
+}
+
+impl<'a, C, T> ExactSizeIterator for RChunksMut<'a, C, T>
+where C: Cursor, T: 'a + Bits {}
+
+impl<'a, C, T> FusedIterator for RChunksMut<'a, C, T>
+where C: Cursor, T: 'a + Bits {}
+
+impl<'a, C, T> Iterator for RChunksMut<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ type Item = &'a mut BitSlice<C, T>;
+
+ /// Advances the iterator by one, returning the first chunk in it (if any).
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ ///
+ /// # Returns
+ ///
+ /// The leading chunk in the iterator, if any.
+ fn next(&mut self) -> Option<Self::Item> {
+ use core::cmp::min;
+ if self.inner.is_empty() {
+ return None;
+ }
+ let size = min(self.inner.len(), self.width);
+ let tmp = mem::replace(&mut self.inner, BitSlice::empty_mut());
+ let tlen = tmp.len();
+ let (head, tail) = tmp.split_at_mut(tlen - size);
+ self.inner = head;
+ Some(tail)
+ }
+
+ /// Hints at the number of chunks remaining in the iterator.
+ ///
+ /// Because the exact size is always known, this always produces
+ /// `(len, Some(len))`.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// - `usize`: The minimum chunks remaining.
+ /// - `Option<usize>`: The maximum chunks remaining.
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.inner.is_empty() {
+ return (0, Some(0));
+ }
+ let len = self.inner.len();
+ let (len, rem) = (len / self.width, len % self.width);
+ let len = len + (rem > 0) as usize;
+ (len, Some(len))
+ }
+
+ /// Counts how many chunks are live in the iterator, consuming it.
+ ///
+ /// # Parameters
+ ///
+ /// - `self`
+ ///
+ /// # Returns
+ ///
+ /// The number of chunks remaining in the iterator.
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ /// Advances the iterator by `n` chunks, starting from zero.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `n`: The number of chunks to skip, before producing the next bit after
+ /// skips. If this overshoots the iterator’s remaining length, then the
+ /// iterator is marked empty before returning `None`.
+ ///
+ /// # Returns
+ ///
+ /// If `n` does not overshoot the iterator’s bounds, this produces the `n`th
+ /// bit after advancing the iterator to it, discarding the intermediate
+ /// chunks.
+ ///
+ /// If `n` does overshoot the iterator’s bounds, this empties the iterator
+ /// and returns `None`.
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ let (end, ovf) = n.overflowing_mul(self.width);
+ if end >= self.inner.len() || ovf {
+ self.inner = BitSlice::empty_mut();
+ return None;
+ }
+ // Can't underflow because of the check above
+ let end = self.inner.len() - end;
+ let start = end.checked_sub(self.width).unwrap_or(0);
+ let tmp = mem::replace(&mut self.inner, BitSlice::empty_mut());
+ let (head, tail) = tmp.split_at_mut(start);
+ let (nth, _) = tail.split_at_mut(end - start);
+ self.inner = head;
+ Some(nth)
+ }
+
+ /// Consumes the iterator, returning only the final chunk.
+ ///
+ /// # Parameters
+ ///
+ /// - `self`
+ ///
+ /// # Returns
+ ///
+ /// The last chunk in the iterator slice, if any.
+ fn last(mut self) -> Option<Self::Item> {
+ self.next_back()
+ }
+}
+
+/// State keeper for reverse exact iteration over a `BitSlice`.
+///
+/// # Type Parameters
+///
+/// - `C: Cursor`: The bit-order type of the underlying `BitSlice`.
+/// - `T: 'a + Bits`: The storage type of the underlying `BitSlice`.
+///
+/// # Lifetimes
+///
+/// - `'a`: The lifetime of the underlying `BitSlice`.
+#[derive(Clone, Debug)]
+pub struct RChunksExact<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ /// The `BitSlice` being iterated.
+ inner: &'a BitSlice<C, T>,
+ /// The excess of the original `BitSlice`, which is not iterated.
+ extra: &'a BitSlice<C, T>,
+ /// The width of the chunks.
+ width: usize,
+}
+
+impl<'a, C, T> RChunksExact<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ /// Produces the remainder of the original slice, which will not be included
+ /// in the iteration.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// The remaining slice that the iteration will not include.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0x4B];
+ /// let bits: &BitSlice = store.into();
+ /// let rchunks_exact = bits.rchunks_exact(3);
+ /// assert_eq!(rchunks_exact.remainder(), &bits[.. 2]);
+ /// ```
+ pub fn remainder(&self) -> &'a BitSlice<C, T> {
+ self.extra
+ }
+}
+
+impl<'a, C, T> DoubleEndedIterator for RChunksExact<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ /// Produces the next chunk from the front of the slice.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ ///
+ /// # Returns
+ ///
+ /// The last chunk in the slice, if any.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[1];
+ /// let bv: &BitSlice = store.into();
+ /// let mut rchunks_exact = bv.rchunks_exact(3);
+ /// assert_eq!(rchunks_exact.next_back(), Some(&bv[2 .. 5]));
+ /// assert_eq!(rchunks_exact.next_back(), Some(&bv[5 .. 8]));
+ /// assert!(rchunks_exact.next_back().is_none());
+ /// ```
+ fn next_back(&mut self) -> Option<Self::Item> {
+ if self.inner.len() < self.width {
+ self.inner = BitSlice::empty();
+ return None;
+ }
+ let (head, tail) = self.inner.split_at(self.width);
+ self.inner = tail;
+ Some(head)
+ }
+}
+
+/// Mark that the iterator has an exact size.
+impl<'a, C, T> ExactSizeIterator for RChunksExact<'a, C, T>
+where C: Cursor, T: 'a + Bits {}
+
+/// Mark that the iterator will not resume after halting.
+impl<'a, C, T> FusedIterator for RChunksExact<'a, C, T>
+where C: Cursor, T: 'a + Bits {}
+
+impl<'a, C, T> Iterator for RChunksExact<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ type Item = &'a BitSlice<C, T>;
+
+ /// Advances the iterator by one, returning the first chunk in it (if any).
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ ///
+ /// # Returns
+ ///
+ /// The leading chunk in the iterator, if any.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0x80];
+ /// let bv: &BitSlice = store.into();
+ /// let mut rchunks_exact = bv.rchunks_exact(3);
+ /// assert_eq!(rchunks_exact.next(), Some(&bv[5 .. 8]));
+ /// assert_eq!(rchunks_exact.next(), Some(&bv[2 .. 5]));
+ /// assert!(rchunks_exact.next().is_none());
+ /// ```
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.inner.len() < self.width {
+ self.inner = BitSlice::empty();
+ return None;
+ }
+ let (head, tail) = self.inner.split_at(self.inner.len() - self.width);
+ self.inner = head;
+ Some(tail)
+ }
+
+ /// Hints at the number of chunks remaining in the iterator.
+ ///
+ /// Because the exact size is always known, this always produces
+ /// `(len, Some(len))`.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// - `usize`: The minimum chunks remaining.
+ /// - `Option<usize>`: The maximum chunks remaining.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0x4B];
+ /// let bv: &BitSlice = store.into();
+ /// let mut rchunks_exact = bv.rchunks_exact(3);
+ /// assert_eq!(rchunks_exact.size_hint(), (2, Some(2)));
+ /// rchunks_exact.next();
+ /// assert_eq!(rchunks_exact.size_hint(), (1, Some(1)));
+ /// rchunks_exact.next();
+ /// assert_eq!(rchunks_exact.size_hint(), (0, Some(0)));
+ /// ```
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let n = self.inner.len() / self.width;
+ (n, Some(n))
+ }
+
+ /// Counts how many chunks are live in the iterator, consuming it.
+ ///
+ /// # Parameters
+ ///
+ /// - `self`
+ ///
+ /// # Returns
+ ///
+ /// The number of chunks remaining in the iterator.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0x4B];
+ /// let bv: &BitSlice = store.into();
+ /// assert_eq!(bv.rchunks_exact(3).count(), 2);
+ /// ```
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ /// Advances the iterator by `n` chunks, starting from zero.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `n`: The number of chunks to skip, before producing the next bit after
+ /// skips. If this overshoots the iterator’s remaining length, then the
+ /// iterator is marked empty before returning `None`.
+ ///
+ /// # Returns
+ ///
+ /// If `n` does not overshoot the iterator’s bounds, this produces the `n`th
+ /// bit after advancing the iterator to it, discarding the intermediate
+ /// chunks.
+ ///
+ /// If `n` does overshoot the iterator’s bounds, this empties the iterator
+ /// and returns `None`.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0x4B];
+ /// let bv: &BitSlice = store.into();
+ /// let mut rchunks_exact = bv.rchunks_exact(3);
+ /// assert_eq!(rchunks_exact.nth(1), Some(&bv[2 .. 5]));
+ /// assert!(rchunks_exact.nth(0).is_none());
+ /// ```
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ let (end, ovf) = n.overflowing_mul(self.width);
+ if end >= self.inner.len() || ovf {
+ self.inner = BitSlice::empty();
+ return None;
+ }
+ let (head, _) = self.inner.split_at(self.inner.len() - end);
+ self.inner = head;
+ self.next()
+ }
+
+ /// Consumes the iterator, returning only the final bit.
+ ///
+ /// # Parameters
+ ///
+ /// - `self`
+ ///
+ /// # Returns
+ ///
+ /// The last bit in the iterator slice, if any.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0x4B];
+ /// let bv: &BitSlice = store.into();
+ /// assert!(bv.iter().last().unwrap());
+ /// ```
+ fn last(mut self) -> Option<Self::Item> {
+ self.next_back()
+ }
+}
+
+/// State keeper for mutable reverse exact chunked iteration over a `BitSlice`.
+///
+/// # Type Parameters
+///
+/// - `C: Cursor`: The bit-order type of the underlying `BitSlice`.
+/// - `T: 'a + Bits`: The storage type of the underlying `BitSlice`.
+///
+/// # Lifetimes
+///
+/// - `'a`: The lifetime of the underlying `BitSlice`.
+#[derive(Debug)]
+pub struct RChunksExactMut<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ /// The `BitSlice` being iterated.
+ inner: &'a mut BitSlice<C, T>,
+ /// The excess of the original `BitSlice`, which is not iterated.
+ extra: &'a mut BitSlice<C, T>,
+ /// The width of the chunks.
+ width: usize,
+}
+
+impl<'a, C, T> RChunksExactMut<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ /// Produces the remainder of the original slice, which will not be included
+ /// in the iteration.
+ ///
+ /// # Parameters
+ ///
+ /// - `self`
+ ///
+ /// # Returns
+ ///
+ /// The remaining slice that iteration will not include.
+ pub fn into_remainder(self) -> &'a mut BitSlice<C, T> {
+ self.extra
+ }
+}
+
+impl<'a, C, T> DoubleEndedIterator for RChunksExactMut<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ /// Produces the next chunk from the front of the slice.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ ///
+ /// # Returns
+ ///
+ /// The last chunk in the slice, if any.
+ fn next_back(&mut self) -> Option<Self::Item> {
+ if self.inner.len() < self.width {
+ self.inner = BitSlice::empty_mut();
+ return None;
+ }
+ let tmp = mem::replace(&mut self.inner, BitSlice::empty_mut());
+ let (head, tail) = tmp.split_at_mut(self.width);;
+ self.inner = tail;
+ Some(head)
+ }
+}
+
+impl<'a, C, T> ExactSizeIterator for RChunksExactMut<'a, C, T>
+where C: Cursor, T: 'a + Bits {}
+
+impl<'a, C, T> FusedIterator for RChunksExactMut<'a, C, T>
+where C: Cursor, T: 'a + Bits {}
+
+impl<'a, C, T> Iterator for RChunksExactMut<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ type Item = &'a mut BitSlice<C, T>;
+
+ /// Advances the iterator by one, returning the first chunk in it (if any).
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ ///
+ /// # Returns
+ ///
+ /// The leading chunk in the iterator, if any.
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.inner.len() < self.width {
+ self.inner = BitSlice::empty_mut();
+ return None;
+ }
+ let tmp = mem::replace(&mut self.inner, BitSlice::empty_mut());
+ let tlen = tmp.len();
+ let (head, tail) = tmp.split_at_mut(tlen - self.width);
+ self.inner = head;
+ Some(tail)
+ }
+
+ /// Hints at the number of chunks remaining in the iterator.
+ ///
+ /// Because the exact size is always known, this always produces
+ /// `(len, Some(len))`.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// - `usize`: The minimum chunks remaining.
+ /// - `Option<usize>`: The maximum chunks remaining.
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let n = self.inner.len() / self.width;
+ (n, Some(n))
+ }
+
+ /// Counts how many chunks are live in the iterator, consuming it.
+ ///
+ /// # Parameters
+ ///
+ /// - `self`
+ ///
+ /// # Returns
+ ///
+ /// The number of chunks remaining in the iterator.
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ /// Advances the iterator by `n` chunks, starting from zero.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `n`: The number of chunks to skip, before producing the next bit after
+ /// skips. If this overshoots the iterator’s remaining length, then the
+ /// iterator is marked empty before returning `None`.
+ ///
+ /// # Returns
+ ///
+ /// If `n` does not overshoot the iterator’s bounds, this produces the `n`th
+ /// bit after advancing the iterator to it, discarding the intermediate
+ /// chunks.
+ ///
+ /// If `n` does overshoot the iterator’s bounds, this empties the iterator
+ /// and returns `None`.
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ let (end, ovf) = n.overflowing_mul(self.width);
+ if end >= self.inner.len() || ovf {
+ self.inner = BitSlice::empty_mut();
+ return None;
+ }
+ let tmp = mem::replace(&mut self.inner, BitSlice::empty_mut());
+ let tlen = tmp.len();
+ let (head, _) = tmp.split_at_mut(tlen - end);
+ self.inner = head;
+ self.next()
+ }
+
+ /// Consumes the iterator, returning only the final bit.
+ ///
+ /// # Parameters
+ ///
+ /// - `self`
+ ///
+ /// # Returns
+ ///
+ /// The last bit in the iterator slice, if any.
+ fn last(mut self) -> Option<Self::Item> {
+ self.next_back()
+ }
+}
+
+/// State keeper for sliding-window iteration over a `BitSlice`.
+///
+/// # Type Parameters
+///
+/// - `C: Cursor`: The bit-order type of the underlying `BitSlice`.
+/// - `T: 'a + Bits`: The storage type of the underlying `BitSlice`.
+///
+/// # Lifetimes
+///
+/// - `'a`: The lifetime of the underlying `BitSlice`.
+#[derive(Clone, Debug)]
+pub struct Windows<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ /// The `BitSlice` being iterated.
+ inner: &'a BitSlice<C, T>,
+ /// The width of the windows.
+ width: usize,
+}
+
+impl<'a, C, T> DoubleEndedIterator for Windows<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ /// Produces the next window from the back of the slice.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ ///
+ /// # Returns
+ ///
+ /// The last window in the slice, if any.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0b0010_1101];
+ /// let bv: &BitSlice = store.into();
+ /// let mut windows = bv[2 .. 7].windows(3);
+ /// assert_eq!(windows.next_back(), Some(&bv[4 .. 7]));
+ /// assert_eq!(windows.next_back(), Some(&bv[3 .. 6]));
+ /// assert_eq!(windows.next_back(), Some(&bv[2 .. 5]));
+ /// assert!(windows.next_back().is_none());
+ /// ```
+ fn next_back(&mut self) -> Option<Self::Item> {
+ if self.inner.is_empty() || self.width > self.inner.len() {
+ self.inner = BitSlice::empty();
+ return None;
+ }
+ let len = self.inner.len();
+ let out = &self.inner[len - self.width ..];
+ self.inner = &self.inner[.. len - 1];
+ Some(out)
+ }
+}
+
+/// Mark that the iterator has an exact size.
+impl<'a, C, T> ExactSizeIterator for Windows<'a, C, T>
+where C: Cursor, T: 'a + Bits {}
+
+/// Mark that the iterator will not resume after halting.
+impl<'a, C, T> FusedIterator for Windows<'a, C, T>
+where C: Cursor, T: 'a + Bits {}
+
+impl<'a, C, T> Iterator for Windows<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ type Item = &'a BitSlice<C, T>;
+
+ /// Advances the iterator by one, returning the first window in it (if any).
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ ///
+ /// # Returns
+ ///
+ /// The leading window in the iterator, if any.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0x80];
+ /// let bv: &BitSlice = store.into();
+ /// let bv = &bv[.. 2];
+ /// let mut iter = bv.iter();
+ /// assert!(iter.next().unwrap());
+ /// assert!(!iter.next().unwrap());
+ /// assert!(iter.next().is_none());
+ /// ```
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.width > self.inner.len() {
+ self.inner = BitSlice::empty();
+ None
+ }
+ else {
+ let out = &self.inner[.. self.width];
+ self.inner = &self.inner[1 ..];
+ Some(out)
+ }
+ }
+
+ /// Hints at the number of windows remaining in the iterator.
+ ///
+ /// Because the exact size is always known, this always produces
+ /// `(len, Some(len))`.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// - `usize`: The minimum windows remaining.
+ /// - `Option<usize>`: The maximum windows remaining.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0x4B];
+ /// let bv: &BitSlice = store.into();
+ /// let bv = &bv[.. 2];
+ /// let mut iter = bv.iter();
+ /// assert_eq!(iter.size_hint(), (2, Some(2)));
+ /// iter.next();
+ /// assert_eq!(iter.size_hint(), (1, Some(1)));
+ /// iter.next();
+ /// assert_eq!(iter.size_hint(), (0, Some(0)));
+ /// ```
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.inner.len();
+ if self.width > len {
+ (0, Some(0))
+ }
+ else {
+ let len = len - self.width + 1;
+ (len, Some(len))
+ }
+ }
+
+ /// Counts how many windows are live in the iterator, consuming it.
+ ///
+ /// # Parameters
+ ///
+ /// - `self`
+ ///
+ /// # Returns
+ ///
+ /// The number of windows remaining in the iterator.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0x4B];
+ /// let bv: &BitSlice = store.into();
+ /// assert_eq!(bv.iter().count(), 8);
+ /// ```
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ /// Advances the iterator by `n` windows, starting from zero.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `n`: The number of windows to skip, before producing the next bit after
+ /// skips. If this overshoots the iterator’s remaining length, then the
+ /// iterator is marked empty before returning `None`.
+ ///
+ /// # Returns
+ ///
+ /// If `n` does not overshoot the iterator’s bounds, this produces the `n`th
+ /// bit after advancing the iterator to it, discarding the intermediate
+ /// windows.
+ ///
+ /// If `n` does overshoot the iterator’s bounds, this empties the iterator
+ /// and returns `None`.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[2];
+ /// let bv: &BitSlice = store.into();
+ /// let mut iter = bv.iter();
+ /// assert!(iter.nth(6).unwrap());
+ /// assert!(!iter.nth(0).unwrap());
+ /// assert!(iter.nth(0).is_none());
+ /// ```
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ let (end, ovf) = n.overflowing_add(self.width);
+ if end > self.inner.len() || ovf {
+ self.inner = BitSlice::empty();
+ return None;
+ }
+ let out = &self.inner[n .. end];
+ self.inner = &self.inner[n + 1 ..];
+ Some(out)
+ }
+
+ /// Consumes the iterator, returning only the final window.
+ ///
+ /// # Parameters
+ ///
+ /// - `self`
+ ///
+ /// # Returns
+ ///
+ /// The last window in the iterator slice, if any.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let store: &[u8] = &[0x4B];
+ /// let bv: &BitSlice = store.into();
+ /// assert_eq!(bv.windows(3).last(), Some(&bv[5 ..]));
+ /// ```
+ fn last(mut self) -> Option<Self::Item> {
+ self.next_back()
+ }
+}
diff --git a/third_party/rust/bitvec/src/vec.rs b/third_party/rust/bitvec/src/vec.rs
new file mode 100644
--- /dev/null
+++ b/third_party/rust/bitvec/src/vec.rs
@@ -0,0 +1,3197 @@
+/*! `BitVec` structure
+
+This module holds the main working type of the library. Clients can use
+`BitSlice` directly, but `BitVec` is much more useful for most work.
+
+The `BitSlice` module discusses the design decisions for the separation between
+slice and vector types.
+!*/
+
+#![cfg(feature = "alloc")]
+
+use crate::{
+ BigEndian,
+ BitBox,
+ BitIdx,
+ BitPtr,
+ BitSlice,
+ Bits,
+ Cursor,
+};
+#[cfg(all(feature = "alloc", not(feature = "std")))]
+use alloc::{
+ borrow::{
+ Borrow,
+ BorrowMut,
+ ToOwned,
+ },
+ boxed::Box,
+ vec::Vec,
+};
+use core::{
+ clone::Clone,
+ cmp::{
+ self,
+ Eq,
+ Ord,
+ Ordering,
+ PartialEq,
+ PartialOrd,
+ },
+ convert::{
+ AsMut,
+ AsRef,
+ From,
+ },
+ default::Default,
+ fmt::{
+ self,
+ Debug,
+ Display,
+ Formatter,
+ },
+ hash::{
+ Hash,
+ Hasher,
+ },
+ iter::{
+ self,
+ DoubleEndedIterator,
+ ExactSizeIterator,
+ Extend,
+ FromIterator,
+ FusedIterator,
+ Iterator,
+ IntoIterator,
+ },
+ marker::PhantomData,
+ mem,
+ ops::{
+ Add,
+ AddAssign,
+ BitAnd,
+ BitAndAssign,
+ BitOr,
+ BitOrAssign,
+ BitXor,
+ BitXorAssign,
+ Deref,
+ DerefMut,
+ Drop,
+ Index,
+ IndexMut,
+ Range,
+ RangeBounds,
+ RangeFrom,
+ RangeFull,
+ RangeInclusive,
+ RangeTo,
+ RangeToInclusive,
+ Neg,
+ Not,
+ Shl,
+ ShlAssign,
+ Shr,
+ ShrAssign,
+ Sub,
+ SubAssign,
+ },
+ ptr::{
+ self,
+ NonNull,
+ },
+};
+#[cfg(feature = "std")]
+use std::{
+ borrow::{
+ Borrow,
+ BorrowMut,
+ ToOwned,
+ },
+ boxed::Box,
+ io::{
+ self,
+ Write,
+ },
+ vec::Vec,
+};
+
+/** A compact [`Vec`] of bits, whose cursor and storage type can be customized.
+
+`BitVec` is a newtype wrapper over `Vec`, and as such is exactly three words in
+size on the stack.
+
+# Examples
+
+```rust
+use bitvec::*;
+
+let mut bv: BitVec = BitVec::new();
+bv.push(false);
+bv.push(true);
+
+assert_eq!(bv.len(), 2);
+assert_eq!(bv[0], false);
+
+assert_eq!(bv.pop(), Some(true));
+assert_eq!(bv.len(), 1);
+
+bv.set(0, true);
+assert_eq!(bv[0], true);
+
+bv.extend([0u8, 1, 0].iter().map(|n| *n != 0u8));
+for bit in &*bv {
+ println!("{}", bit);
+}
+assert_eq!(bv, bitvec![1, 0, 1, 0]);
+```
+
+The [`bitvec!`] macro is provided to make initialization more convenient.
+
+```rust
+use bitvec::*;
+
+let mut bv = bitvec![0, 1, 2, 3];
+bv.push(false);
+assert_eq!(bv, bitvec![0, 1, 1, 1, 0]);
+```
+
+It can also initialize each element of a `BitVec<_, T>` with a given value. This
+may be more efficient than performing allocation and initialization in separate
+steps, especially when initializing a vector of zeros:
+
+```rust
+use bitvec::*;
+
+let bv = bitvec![0; 15];
+assert_eq!(bv, bitvec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
+
+// The following is equivalent, but potentially slower:
+let mut bv1: BitVec = BitVec::with_capacity(15);
+bv1.resize(15, false);
+```
+
+Use a `BitVec<T>` as an efficient stack:
+
+```rust
+use bitvec::*;
+let mut stack: BitVec = BitVec::new();
+
+stack.push(false);
+stack.push(true);
+stack.push(true);
+
+while let Some(top) = stack.pop() {
+ // Prints true, true, false
+ println!("{}", top);
+}
+```
+
+# Indexing
+
+The `BitVec` type allows you to access values by index, because it implements
+the [`Index`] trait. An example will be more explicit:
+
+```rust
+use bitvec::*;
+
+let bv = bitvec![0, 0, 1, 1];
+println!("{}", bv[1]); // it will display 'false'
+```
+
+However, be careful: if you try to access an index which isn’t in the `BitVec`,
+your software will panic! You cannot do this:
+
+```rust,should_panic
+use bitvec::*;
+
+let bv = bitvec![0, 1, 0, 1];
+println!("{}", bv[6]); // it will panic!
+```
+
+In conclusion: always check if the index you want to get really exists before
+doing it.
+
+# Slicing
+
+A `BitVec` is growable. A [`BitSlice`], on the other hand, is fixed size. To get
+a bit slice, use `&`. Example:
+
+```rust
+use bitvec::*;
+fn read_bitslice(slice: &BitSlice) {
+ // use slice
+}
+
+let bv = bitvec![0, 1];
+read_bitslice(&bv);
+
+// … and that’s all!
+// you can also do it like this:
+let bs : &BitSlice = &bv;
+```
+
+In Rust, it’s more common to pass slices as arguments rather than vectors when
+you do not want to grow or shrink it. The same goes for [`Vec`] and [`&[]`], and
+[`String`] and [`&str`].
+
+# Capacity and Reallocation
+
+The capacity of a bit vector is the amount of space allocated for any future
+elements that will be added onto the vector. This is not to be confused with the
+*length* of a vector, which specifies the number of actual bits within the
+vector. If a vector’s length exceeds its capacity, its capacity will
+automatically be increased, but its elements will have to be reallocated.
+
+For example, a bit vector with capacity 10 and length 0 would be an allocated,
+but uninhabited, vector, with space for ten more bits. Pushing ten or fewer bits
+onto the vector will not change its capacity or cause reallocation to occur.
+However, if the vector’s length is increased to eleven, it will have to
+reallocate, which can be slow. For this reason, it is recommended to use
+[`BitVec::with_capacity`] whenever possible to specify how big the bit vector is
+expected to get.
+
+# Guarantees
+
+Due to its incredibly fundamental nature, `BitVec` makes a lot of guarantees
+about its design. This ensures that it is as low-overhead as possible in the
+general case, and can be correctly manipulated in fundamental ways by `unsafe`
+code.
+
+Most fundamentally, `BitVec` is an always will be a `([`BitPtr`], capacity)`
+doublet. No more, no less. The order of these fields is unspecified, and you
+should **only** interact with the members through the provided APIs. Note that
+`BitPtr` is ***not directly manipulable***, and must ***never*** be written or
+interpreted as anything but opaque binary data by user code.
+
+When a `BitVec` has allocated memory, then the memory to which it points is on
+the heap (as defined by the allocator Rust is configured to use by default), and
+its pointer points to [`len`] initialized bits in order of the [`Cursor`] type
+parameter, followed by `capacity - len` logically uninitialized bits.
+
+`BitVec` will never perform a “small optimization” where elements are stored in
+its handle representation, for two reasons:
+
+- It would make it more difficult for user code to correctly manipulate a
+ `BitVec`. The contents of the `BitVec` would not have a stable address if the
+ handle were moved, and it would be more difficult to determine if a `BitVec`
+ had allocated memory.
+
+- It would penalize the general, heap-allocated, case by incurring a branch on
+ every access.
+
+`BitVec` will never automatically shrink itself, even if it is emptied. This
+ensures that no unnecessary allocations or deallocations occur. Emptying a
+`BitVec` and then refilling it to the same length will incur no calls to the
+allocator. If you wish to free up unused memory, use [`shrink_to_fit`].
+
+## Erasure
+
+`BitVec` will not specifically overwrite any data that is removed from it, nor
+will it specifically preserve it. Its uninitialized memory is scratch space that
+may be used however the implementation desires, and must not be relied upon as
+stable. Do not rely on removed data to be erased for security purposes. Even if
+you drop a `BitVec`, its buffer may simply be reused for other data structures
+in your program. Even if you zero a `BitVec`’s memory first, that may not
+actually occur if the optimizer does not consider this an observable side
+effect. There is one case that will never break, however: using `unsafe` to
+construct a `[T]` slice over the `BitVec`’s capacity, and writing to the excess
+space, then increasing the length to match, is always valid.
+
+# Type Parameters
+
+- `C: Cursor`: An implementor of the [`Cursor`] trait. This type is used to
+ convert semantic indices into concrete bit positions in elements, and store or
+ retrieve bit values from the storage type.
+- `T: Bits`: An implementor of the [`Bits`] trait: `u8`, `u16`, `u32`, `u64`.
+ This is the actual type in memory the slice will use to store data.
+
+# Safety
+
+The `BitVec` handle has the same *size* as standard Rust `Vec` handles, but it
+is ***extremely binary incompatible*** with them. Attempting to treat
+`BitVec<_, T>` as `Vec<T>` in any manner except through the provided APIs is
+***catastrophically*** unsafe and unsound.
+
+[`BitSlice`]: ../struct.BitSlice.html
+[`BitVec::with_capacity`]: #method.with_capacity
+[`Bits`]: ../trait.Bits.html
+[`Cursor`]: ../trait.Cursor.html
+[`Index`]: https://doc.rust-lang.org/stable/std/ops/trait.Index.html
+[`String`]: https://doc.rust-lang.org/stable/std/string/struct.String.html
+[`Vec`]: https://doc.rust-lang.org/stable/std/vec/struct.Vec.html
+[`bitvec!`]: ../macro.bitvec.html
+[`clear_on_drop`]: https://docs.rs/clear_on_drop
+[`len`]: #method.len
+[`shrink_to_fit`]: #method.shrink_to_fit
+[`&str`]: https://doc.rust-lang.org/stable/std/primitive.str.html
+[`&[]`]: https://doc.rust-lang.org/stable/std/primitive.slice.html
+**/
+#[repr(C)]
+pub struct BitVec<C = BigEndian, T = u8>
+where C: Cursor, T: Bits {
+ _cursor: PhantomData<C>,
+ /// Slice pointer over the owned memory.
+ pointer: BitPtr<T>,
+ /// The number of *elements* this vector has allocated.
+ capacity: usize,
+}
+
+impl<C, T> BitVec<C, T>
+where C: Cursor, T: Bits {
+ /// Constructs a new, empty, `BitVec<C, T>`.
+ ///
+ /// The vector does not allocate until bits are written into it.
+ ///
+ /// # Returns
+ ///
+ /// An empty, unallocated, `BitVec` handle.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let bv: BitVec = BitVec::new();
+ /// assert!(bv.is_empty());
+ /// assert_eq!(bv.capacity(), 0);
+ /// ```
+ pub fn new() -> Self {
+ Default::default()
+ }
+
+ /// Constructs a new, empty, `BitVec<T>` with the specified capacity.
+ ///
+ /// The new vector will be able to hold at least `capacity` elements before
+ /// it reallocates. If `capacity` is `0`, it will not allocate.
+ ///
+ /// # Parameters
+ ///
+ /// - `capacity`: The minimum number of bits that the new vector will need
+ /// to be able to hold.
+ ///
+ /// # Returns
+ ///
+ /// An empty vector with at least the given capacity.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let bv: BitVec = BitVec::with_capacity(10);
+ /// assert!(bv.is_empty());
+ /// assert!(bv.capacity() >= 10);
+ /// ```
+ pub fn with_capacity(capacity: usize) -> Self {
+ let (cap, _) = BitIdx::from(0).span::<T>(capacity);
+ let (ptr, cap) = {
+ let v = Vec::with_capacity(cap);
+ let (ptr, cap) = (v.as_ptr(), v.capacity());
+ mem::forget(v);
+ (ptr, cap)
+ };
+ Self {
+ _cursor: PhantomData,
+ pointer: BitPtr::uninhabited(ptr),
+ capacity: cap,
+ }
+ }
+
+ /// Creates a new `BitVec<C, T>` directly from the raw parts of another.
+ ///
+ /// # Parameters
+ ///
+ /// - `pointer`: The `BitPtr<T>` to use.
+ /// - `capacity`: The number of `T` elements *allocated* in that slab.
+ ///
+ /// # Returns
+ ///
+ /// A `BitVec` over the given slab of memory.
+ ///
+ /// # Safety
+ ///
+ /// This is ***highly*** unsafe, due to the number of invariants that aren’t
+ /// checked:
+ ///
+ /// - `pointer` needs to have been previously allocated by some allocating
+ /// type.
+ /// - `pointer`’s `T` needs to have the same size ***and alignment*** as it
+ /// was initially allocated.
+ /// - `pointer`’s element count needs to be less than or equal to the
+ /// original allocation capacity.
+ /// - `capacity` needs to be the original allocation capacity for the
+ /// pointer.
+ ///
+ /// Violating these ***will*** cause problems, like corrupting the handle’s
+ /// concept of memory, the allocator’s internal data structures, and the
+ /// sanity of your program. It is ***absolutely*** not safe to construct a
+ /// `BitVec` whose `T` differs from the type used for the initial
+ /// allocation.
+ ///
+ /// The ownership of `pointer` is effectively transferred to the
+ /// `BitVec<C, T>` which may then deallocate, reallocate, or modify the
+ /// contents of the referent slice at will. Ensure that nothing else uses
+ /// the pointer after calling this function.
+ pub unsafe fn from_raw_parts(pointer: BitPtr<T>, capacity: usize) -> Self {
+ Self {
+ _cursor: PhantomData,
+ pointer,
+ capacity,
+ }
+ }
+
+ /// Returns the number of bits the vector can hold without reallocating.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// The number of bits that the vector can hold before reallocating.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let bv: BitVec = BitVec::with_capacity(10);
+ /// assert!(bv.is_empty());
+ /// assert!(bv.capacity() >= 10);
+ /// ```
+ pub fn capacity(&self) -> usize {
+ assert!(self.capacity < BitPtr::<T>::MAX_ELTS, "Capacity overflow");
+ self.capacity << T::BITS
+ }
+
+ /// Reserves capacity for at least `additional` more bits to be inserted.
+ ///
+ /// The collection may reserve more space to avoid frequent reallocations.
+ /// After calling `reserve`, capacity will be greater than or equal to
+ /// `self.len() + additional`. Does nothing if the capacity is already
+ /// sufficient.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `additional`: The number of extra bits to be granted space.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity would overflow the vector’s limits.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let mut bv = bitvec![1; 5];
+ /// assert!(bv.capacity() >= 5);
+ /// bv.reserve(10);
+ /// assert!(bv.capacity() >= 15);
+ /// ```
+ pub fn reserve(&mut self, additional: usize) {
+ assert!(
+ self.len() + additional < BitPtr::<T>::MAX_BITS,
+ "Capacity overflow",
+ );
+ let (e, _) = self.pointer.head().span::<T>(additional);
+ self.do_unto_vec(|v| v.reserve(e));
+ }
+
+ /// Reserves the minimum capacity for at least `additional` more bits.
+ ///
+ /// After calling `reserve_exact`, the capacity will be greater than or
+ /// equal to `self.len() + additional`. Does nothing if the capacity is
+ /// already sufficient.
+ ///
+ /// Note that the allocator may give the collection more space than it
+ /// requests. Therefore, the capacity cannot be relied upon to be precisely
+ /// minimal. Prefer `reserve` if future insertions are expected.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `additional`: The number of extra bits to be granted space.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity would overflow the vector’s limits.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let mut bv = bitvec![1; 5];
+ /// assert!(bv.capacity() >= 5);
+ /// bv.reserve_exact(10);
+ /// assert!(bv.capacity() >= 15);
+ /// ```
+ pub fn reserve_exact(&mut self, additional: usize) {
+ assert!(
+ self.len() + additional < BitPtr::<T>::MAX_BITS,
+ "Capacity overflow",
+ );
+ let (e, _) = self.pointer.head().span::<T>(additional);
+ self.do_unto_vec(|v| v.reserve_exact(e));
+ }
+
+ /// Shrinks the capacity of the vector as much as possible.
+ ///
+ /// It will drop down as close as possible to the length, but the allocator
+ /// may still inform the vector that there is space for a few more elements.
+ ///
+ /// This does not affect the memory store! It will not zero the raw memory,
+ /// nor will it deallocate.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let mut bv = bitvec![1; 100];
+ /// let cap = bv.capacity();
+ /// bv.truncate(10);
+ /// bv.shrink_to_fit();
+ /// assert!(bv.capacity() <= cap);
+ /// ```
+ pub fn shrink_to_fit(&mut self) {
+ self.do_unto_vec(Vec::shrink_to_fit);
+ }
+
+ /// Shortens the vector, keeping the first `len` bits and dropping the rest.
+ ///
+ /// If `len` is greater than the vector’s current length, this has no
+ /// effect.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `len`: The new length of the vector.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let mut bv = bitvec![1; 15];
+ /// bv.truncate(10);
+ /// assert_eq!(bv.len(), 10);
+ ///
+ /// bv.truncate(15);
+ /// assert_eq!(bv.len(), 10);
+ /// ```
+ pub fn truncate(&mut self, len: usize) {
+ if len < self.len() {
+ let (p, _, h, _) = self.pointer.raw_parts();
+ // Find the new element count and tail position
+ let (e, t) = h.span::<T>(len);
+ // And reset the pointer to use that span.
+ self.pointer = BitPtr::new(p, e, h, t);
+ }
+ }
+
+ /// Extracts a `BitSlice` containing the entire vector.
+ ///
+ /// Equivalent to `&s[..]`.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// A `BitSlice` over the vector.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let bv = bitvec![0, 1, 1, 0];
+ /// let bs = bv.as_bitslice();
+ /// ```
+ pub fn as_bitslice(&self) -> &BitSlice<C, T> {
+ self.pointer.into()
+ }
+
+ /// Extracts a mutable `BitSlice` containing the entire vector.
+ ///
+ /// Equivalent to `&mut s[..]`.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ ///
+ /// # Returns
+ ///
+ /// A mutable `BitSlice` over the vector.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let mut bv = bitvec![0, 1, 1, 0];
+ /// let bs = bv.as_mut_bitslice();
+ /// ```
+ pub fn as_mut_bitslice(&mut self) -> &mut BitSlice<C, T> {
+ self.pointer.into()
+ }
+
+ /// Sets the length of the vector.
+ ///
+ /// This unconditionally sets the size of the vector, without modifying its
+ /// contents. It is up to the caller to ensure that the vector’s buffer can
+ /// hold the new size.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `len`: The new length of the vector. This must be less than the
+ /// maximum number of bits that the vector can hold.
+ ///
+ /// # Panics
+ ///
+ /// This panics if `len` overflows the vector's intrinsic *or allocated*
+ /// capacities.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that the new length is sound for the vector.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let mut bv: BitVec = BitVec::with_capacity(15);
+ /// assert!(bv.is_empty());
+ /// unsafe { bv.set_len(10) };
+ /// assert_eq!(bv.len(), 10);
+ /// ```
+ pub unsafe fn set_len(&mut self, len: usize) {
+ assert!(len < BitPtr::<T>::MAX_BITS, "Capacity overflow");
+ assert!(len <= self.capacity(), "Capacity overflow");
+ let (ptr, _, head, _) = self.bitptr().raw_parts();
+ let (elts, tail) = self.bitptr().head().offset::<T>(len as isize);
+ // Add one to elts because the value in elts is the *offset* from the
+ // first element.
+ self.pointer = BitPtr::new(ptr, elts as usize + 1, head, tail);
+ }
+
+ /// Removes a bit from the vector and returns it.
+ ///
+ /// The removed bit is replaced by the last bit in the vector.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `index`: The index whose bit is to be returned, and replaced by the
+ /// tail.
+ ///
+ /// # Returns
+ ///
+ /// The bit at the requested index.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the index is out of bounds.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let mut bv = bitvec![0, 0, 0, 0, 1];
+ /// assert!(!bv[2]);
+ /// assert_eq!(bv.len(), 5);
+ /// assert!(!bv.swap_remove(2));
+ /// assert!(bv[2]);
+ /// assert_eq!(bv.len(), 4);
+ /// ```
+ pub fn swap_remove(&mut self, index: usize) -> bool {
+ if index >= self.len() {
+ panic!("Index {} out of bounds: {}", index, self.len());
+ }
+ let bit = self[index];
+ let last = self.pop().unwrap();
+ self.set(index, last);
+ bit
+ }
+
+ /// Inserts an element at a position, shifting all elements after it to the
+ /// right.
+ ///
+ /// Note that this is `O(n)` runtime.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `index`: The position at which to insert. This may be any value from
+ /// `0` up to *and including* `self.len()`. At `self.len()`, it is
+ /// equivalent to calling `self.push(value)`.
+ /// - `value`: The value to be inserted.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `index` is greater than the length.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let mut bv = bitvec![0, 0, 0, 0];
+ /// bv.insert(2, true);
+ /// assert_eq!(bv, bitvec![0, 0, 1, 0, 0]);
+ /// bv.insert(5, true);
+ /// assert_eq!(bv, bitvec![0, 0, 1, 0, 0, 1]);
+ /// ```
+ pub fn insert(&mut self, index: usize, value: bool) {
+ let len = self.len();
+ self.push(false);
+ for n in (index .. len).rev() {
+ let bit = self[n];
+ self.set(n + 1, bit);
+ }
+ self.set(index, value);
+
+ }
+
+ /// Removes and returns the element at position `index`, shifting all
+ /// elements after it to the left.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `index`: The position whose bit is to be removed. This must be in the
+ /// domain `0 .. self.len()`.
+ ///
+ /// # Returns
+ ///
+ /// The bit at the requested index.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `index` is out of bounds for the vector.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let mut bv = bitvec![0, 0, 1, 0, 0];
+ /// assert!(bv.remove(2));
+ /// assert_eq!(bv, bitvec![0, 0, 0, 0]);
+ /// ```
+ pub fn remove(&mut self, index: usize) -> bool {
+ let len = self.len();
+ assert!(index < len, "Index {} is out of bounds: {}", index, len);
+ let out = self[index];
+ for n in index .. (len - 1) {
+ let bit = self[n + 1];
+ self.set(n, bit);
+ }
+ self.pop();
+ out
+ }
+
+ /// Retains only the bits that pass the predicate.
+ ///
+ /// This removes all bits `b` where `f(e)` returns `false`. This method
+ /// operates in place and preserves the order of the retained bits. Because
+ /// it is in-place, it operates in `O(n²)` time.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `pred`: The testing predicate for each bit.
+ ///
+ /// # Type Parameters
+ ///
+ /// - `F: FnMut(bool) -> bool`: A function that can be invoked on each bit,
+ /// returning whether the bit should be kept or not.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let mut bv = bitvec![0, 1, 0, 1, 0, 1];
+ /// bv.retain(|b| b);
+ /// assert_eq!(bv, bitvec![1, 1, 1]);
+ /// ```
+ pub fn retain<F: FnMut(bool) -> bool>(&mut self, mut pred: F) {
+ for n in (0 .. self.len()).rev() {
+ if !pred(self[n]) {
+ self.remove(n);
+ }
+ }
+ }
+
+ /// Appends a bit to the back of the vector.
+ ///
+ /// If the vector is at capacity, this may cause a reallocation.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `value`: The bit value to append.
+ ///
+ /// # Panics
+ ///
+ /// This will panic if the push will cause the vector to allocate above
+ /// `BitPtr<T>` or machine capacity.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let mut bv: BitVec = BitVec::new();
+ /// assert!(bv.is_empty());
+ /// bv.push(true);
+ /// assert_eq!(bv.len(), 1);
+ /// assert!(bv[0]);
+ /// ```
+ pub fn push(&mut self, value: bool) {
+ assert!(self.len() < BitPtr::<T>::MAX_BITS, "Capacity overflow");
+ let slot = self.len();
+ // If self is empty *or* tail is at the back edge of an element, push
+ // an element onto the vector.
+ if self.is_empty() || *self.pointer.tail() == T::SIZE {
+ self.do_unto_vec(|v| v.push(0.into()));
+ }
+ // At this point, it is always safe to increment the tail, and then
+ // write to the newly live bit.
+ unsafe { self.bitptr_mut().incr_tail() };
+ self.set(slot, value);
+ }
+
+ /// Removes the last bit from the collection, if present.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ ///
+ /// # Returns
+ ///
+ /// If the vector is not empty, this returns the last bit; if it is empty,
+ /// this returns None.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let mut bv: BitVec = BitVec::new();
+ /// assert!(bv.is_empty());
+ /// bv.push(true);
+ /// assert_eq!(bv.len(), 1);
+ /// assert!(bv[0]);
+ ///
+ /// assert!(bv.pop().unwrap());
+ /// assert!(bv.is_empty());
+ /// assert!(bv.pop().is_none());
+ /// ```
+ pub fn pop(&mut self) -> Option<bool> {
+ if self.is_empty() {
+ return None;
+ }
+ let out = self[self.len() - 1];
+ unsafe { self.bitptr_mut().decr_tail() };
+ Some(out)
+ }
+
+ /// Moves all the elements of `other` into `self`, leaving `other` empty.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `other`: A `BitVec` of any order and storage type. Its bits are
+ /// appended to `self`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the joined vector is too large.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let mut bv1 = bitvec![0; 10];
+ /// let mut bv2 = bitvec![1; 10];
+ /// bv1.append(&mut bv2);
+ /// assert_eq!(bv1.len(), 20);
+ /// assert!(bv1[10]);
+ /// assert!(bv2.is_empty());
+ /// ```
+ pub fn append<D, U>(&mut self, other: &mut BitVec<D, U>)
+ where D: Cursor, U: Bits {
+ self.extend(other.iter());
+ other.clear();
+ }
+
+ /// Creates a draining iterator that removes the specified range from the
+ /// vector and yields the removed bits.
+ ///
+ /// # Notes
+ ///
+ /// 1. The element range is removed, regardless of whether the iterator is
+ /// consumed.
+ /// 2. The amount of items removed from the vector if the draining iterator
+ /// is leaked, is left unspecified.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `range`: any range literal, which is used to define the range of the
+ /// vector that is drained.
+ ///
+ /// # Returns
+ ///
+ /// An iterator over the specified range.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the range is ill-formed, or if it is beyond the vector bounds.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let mut bv = bitvec![0, 0, 1, 1, 1, 0, 0];
+ /// assert_eq!(bv.len(), 7);
+ /// for bit in bv.drain(2 .. 5) {
+ /// assert!(bit);
+ /// }
+ /// assert!(bv.not_any());
+ /// assert_eq!(bv.len(), 4);
+ /// ```
+ pub fn drain<R: RangeBounds<usize>>(&mut self, range: R) -> Drain<C, T> {
+ use core::ops::Bound::*;
+ let len = self.len();
+ let from = match range.start_bound() {
+ Included(&n) => n,
+ Excluded(&n) => n + 1,
+ Unbounded => 0,
+ };
+ // First index beyond the end of the drain.
+ let upto = match range.end_bound() {
+ Included(&n) => n + 1,
+ Excluded(&n) => n,
+ Unbounded => len,
+ };
+ assert!(from <= upto, "The drain start must be below the drain end");
+ assert!(upto <= len, "The drain end must be within the vector bounds");
+
+ unsafe {
+ let ranging: &BitSlice<C, T> = self.as_bitslice()[from .. upto].bitptr().into();
+ self.set_len(from);
+
+ Drain {
+ bitvec: NonNull::from(self),
+ iter: ranging.iter(),
+ tail_start: upto,
+ tail_len: len - upto,
+ }
+ }
+ }
+
+ /// Clears the vector, removing all values.
+ ///
+ /// Note that this method has no effect on the allocated capacity of the
+ /// vector.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ ///
+ /// # Effects
+ ///
+ /// Becomes an uninhabited slice.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let mut bv = bitvec![1; 30];
+ /// assert_eq!(bv.len(), 30);
+ /// assert!(bv.iter().all(|b| b));
+ /// bv.clear();
+ /// assert!(bv.is_empty());
+ /// ```
+ ///
+ /// After calling `clear()`, `bv` will no longer show raw memory, so the
+ /// above test cannot show that the underlying memory is not altered. This
+ /// is also an implementation detail on which you should not rely.
+ pub fn clear(&mut self) {
+ self.pointer = BitPtr::uninhabited(self.pointer.pointer());
+ }
+
+ /// Splits the collection into two at the given index.
+ ///
+ /// Returns a newly allocated `Self`. `self` contains elements `[0, at)`,
+ /// and the returned `Self` contains elements `[at, self.len())`.
+ ///
+ /// Note that the capacity of `self` does not change.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `at`: The index at which to perform the split. This must be in the
+ /// domain `0 ..= self.len()`. When it is `self.len()`, an empty vector is
+ /// returned.
+ ///
+ /// # Returns
+ ///
+ /// A new `BitVec` containing all the elements from `at` onwards.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `at` is beyond `self.len()`.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let mut bv1 = bitvec![0, 0, 0, 1, 1, 1];
+ /// let bv2 = bv1.split_off(3);
+ /// assert_eq!(bv1, bitvec![0, 0, 0]);
+ /// assert_eq!(bv2, bitvec![1, 1, 1]);
+ /// ```
+ pub fn split_off(&mut self, at: usize) -> Self {
+ if at == 0 {
+ let out = self.clone();
+ self.clear();
+ return out;
+ }
+ if at == self.len() {
+ return Self::default();
+ }
+ let out = (&*self).iter()
+ .skip(at)
+ .collect::<Self>();
+ self.truncate(at);
+ out
+ }
+
+ /// Resizes the `BitVec` in place so that `len` is equal to `new_len`.
+ ///
+ /// If `new_len` is greater than `len`, then the vector is extended by the
+ /// difference, and filled with the provided value. If `new_len` is less
+ /// than `len`, then the vector is just truncated.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `new_len`: The new length of the vector.
+ /// - `value`: The fill value if the vector is to be extended.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let mut bv = bitvec![0; 4];
+ /// bv.resize(8, true);
+ /// assert_eq!(bv, bitvec![0, 0, 0, 0, 1, 1, 1, 1]);
+ /// bv.resize(5, false);
+ /// assert_eq!(bv, bitvec![0, 0, 0, 0, 1]);
+ /// ```
+ pub fn resize(&mut self, new_len: usize, value: bool) {
+ let len = self.len();
+ if new_len < len {
+ self.truncate(new_len);
+ }
+ else if new_len > len {
+ self.extend(iter::repeat(value).take(new_len - len));
+ }
+ }
+
+ /// Creates a splicing iterator that exchanges the specified range for the
+ /// `replacement` iterator, yielding the removed items. The range and its
+ /// replacement do not need to be the same size.
+ pub fn splice<R, I>(
+ &mut self,
+ range: R,
+ replacement: I,
+ ) -> Splice<C, T, <I as IntoIterator>::IntoIter>
+ where R: RangeBounds<usize>, I: Iterator<Item=bool> {
+ Splice {
+ drain: self.drain(range),
+ splice: replacement.into_iter(),
+ }
+ }
+
+ /// Sets the backing storage to the provided element.
+ ///
+ /// This unconditionally sets each allocated element in the backing storage
+ /// to the provided value, without altering the `BitVec` length or capacity.
+ /// It operates on the underlying `Vec`’s memory region directly, and will
+ /// ignore the `BitVec`’s cursors.
+ ///
+ /// This has the unobservable effect of setting the allocated, but dead,
+ /// bits beyond the end of the vector’s *length*, up to its *capacity*.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `element`: The value to which each allocated element in the backing
+ /// store will be set.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let mut bv = bitvec![0; 10];
+ /// assert_eq!(bv.as_slice(), &[0, 0]);
+ /// bv.set_elements(0xA5);
+ /// assert_eq!(bv.as_slice(), &[0xA5, 0xA5]);
+ /// ```
+ pub fn set_elements(&mut self, element: T) {
+ self.do_unto_vec(|v| {
+ let (ptr, len) = (v.as_mut_ptr(), v.capacity());
+ for elt in unsafe { std::slice::from_raw_parts_mut(ptr, len) } {
+ *elt = element;
+ }
+ })
+ }
+
+ pub(crate) fn bitptr(&self) -> BitPtr<T> {
+ self.pointer
+ }
+
+ /// Gives write access to the `BitPtr` structure powering the vector.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ ///
+ /// # Returns
+ ///
+ /// A mutable reference to the interior `BitPtr`.
+ pub(crate) fn bitptr_mut(&mut self) -> &mut BitPtr<T> {
+ &mut self.pointer
+ }
+
+ /// Permits a function to modify the `Vec<T>` underneath a `BitVec<_, T>`.
+ ///
+ /// This produces a `Vec<T>` structure referring to the same data region as
+ /// the `BitVec<_, T>`, allows a function to mutably view it, and then
+ /// forgets the `Vec<T>` after the function concludes.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `func`: A function which receives a mutable borrow to the `Vec<T>`
+ /// underlying the `BitVec<_, T>`.
+ ///
+ /// # Type Parameters
+ ///
+ /// - `F: FnOnce(&mut Vec<T>) -> R`: Any callable object (function or
+ /// closure) which receives a mutable borrow of a `Vec<T>`.
+ ///
+ /// - `R`: The return value from the called function or closure.
+ fn do_unto_vec<F, R>(&mut self, func: F) -> R
+ where F: FnOnce(&mut Vec<T>) -> R {
+ let (data, elts, head, tail) = self.bitptr().raw_parts();
+ let mut v = unsafe {
+ Vec::from_raw_parts(data as *mut T, elts, self.capacity)
+ };
+ let out = func(&mut v);
+ self.pointer = BitPtr::new(v.as_ptr(), elts, head, tail);
+ self.capacity = v.capacity();
+ mem::forget(v);
+ out
+ }
+
+ /// Permits a function to view the `Vec<T>` underneath a `BitVec<_, T>`.
+ ///
+ /// This produces a `Vec<T>` structure referring to the same data region as
+ /// the `BitVec<_, T>`, allows a function to immutably view it, and then
+ /// forgets the `Vec<T>` after the function concludes.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ /// - `func`: A function which receives an immutable borrow to the `Vec<T>`
+ /// underlying the `BitVec<_, T>`.
+ ///
+ /// # Returns
+ ///
+ /// The return value of `func`.
+ ///
+ /// # Type Parameters
+ ///
+ /// - `F: FnOnce(&Vec<T>)`: Any callable object (function or closure) which
+ /// receives an immutable borrow of a `Vec<T>` and returns nothing.
+ ///
+ /// # Safety
+ ///
+ /// This produces an empty `Vec<T>` if the `BitVec<_, T>` is empty.
+ fn do_with_vec<F, R>(&self, func: F) -> R
+ where F: FnOnce(&Vec<T>) -> R {
+ let (data, elts, _, _) = self.bitptr().raw_parts();
+ let v: Vec<T> = unsafe {
+ Vec::from_raw_parts(data as *mut T, elts, self.capacity)
+ };
+ let out = func(&v);
+ mem::forget(v);
+ out
+ }
+}
+
+/// Signifies that `BitSlice` is the borrowed form of `BitVec`.
+impl<C, T> Borrow<BitSlice<C, T>> for BitVec<C, T>
+where C: Cursor, T: Bits {
+ /// Borrows the `BitVec` as a `BitSlice`.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// A borrowed `BitSlice` of the vector.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ /// use std::borrow::Borrow;
+ ///
+ /// let bv = bitvec![0; 13];
+ /// let bs: &BitSlice = bv.borrow();
+ /// assert!(!bs[10]);
+ /// ```
+ fn borrow(&self) -> &BitSlice<C, T> {
+ &*self
+ }
+}
+
+/// Signifies that `BitSlice` is the borrowed form of `BitVec`.
+impl<C, T> BorrowMut<BitSlice<C, T>> for BitVec<C, T>
+where C: Cursor, T: Bits {
+ /// Mutably borrows the `BitVec` as a `BitSlice`.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ ///
+ /// # Returns
+ ///
+ /// A mutably borrowed `BitSlice` of the vector.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ /// use std::borrow::BorrowMut;
+ ///
+ /// let mut bv = bitvec![0; 13];
+ /// let bs: &mut BitSlice = bv.borrow_mut();
+ /// assert!(!bs[10]);
+ /// bs.set(10, true);
+ /// assert!(bs[10]);
+ /// ```
+ fn borrow_mut(&mut self) -> &mut BitSlice<C, T> {
+ &mut *self
+ }
+}
+
+impl<C, T> Clone for BitVec<C, T>
+where C: Cursor, T: Bits {
+ fn clone(&self) -> Self {
+ let (_, e, h, t) = self.bitptr().raw_parts();
+ let new_vec = self.do_with_vec(Clone::clone);
+ let (ptr, cap) = (new_vec.as_ptr(), new_vec.capacity());
+ mem::forget(new_vec);
+ Self {
+ _cursor: PhantomData,
+ pointer: BitPtr::new(ptr, e, h, t),
+ capacity: cap,
+ }
+ }
+
+ fn clone_from(&mut self, other: &Self) {
+ let (_, e, h, t) = other.bitptr().raw_parts();
+ self.clear();
+ self.reserve(other.len());
+ let from = other.bitptr().pointer();
+ let to = self.bitptr().pointer() as *mut T;
+ let num = other.bitptr().elements();
+ unsafe {
+ ptr::copy_nonoverlapping(from, to, num);
+ }
+ self.pointer = BitPtr::new(to, e, h, t);
+ }
+}
+
+impl<C, T> Eq for BitVec<C, T>
+where C: Cursor, T: Bits {}
+
+impl<C, T> Ord for BitVec<C, T>
+where C: Cursor, T: Bits {
+ fn cmp(&self, rhs: &Self) -> Ordering {
+ <BitSlice<C, T> as Ord>::cmp(&self, &rhs)
+ }
+}
+
+/// Tests if two `BitVec`s are semantically — not bitwise — equal.
+///
+/// It is valid to compare two vectors of different endianness or element types.
+///
+/// The equality condition requires that they have the same number of stored
+/// bits and that each pair of bits in semantic order are identical.
+impl<A, B, C, D> PartialEq<BitVec<C, D>> for BitVec<A, B>
+where A: Cursor, B: Bits, C: Cursor, D: Bits {
+ /// Performs a comparison by `==`.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ /// - `rhs`: The other vector to compare.
+ ///
+ /// # Returns
+ ///
+ /// Whether the vectors compare equal.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let l: BitVec<LittleEndian, u16> = bitvec![LittleEndian, u16; 0, 1, 0, 1];
+ /// let r: BitVec<BigEndian, u32> = bitvec![BigEndian, u32; 0, 1, 0, 1];
+ /// assert!(l == r);
+ /// ```
+ ///
+ /// This example uses the same types to prove that raw, bitwise, values are
+ /// not used for equality comparison.
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let l: BitVec<BigEndian, u8> = bitvec![BigEndian, u8; 0, 1, 0, 1];
+ /// let r: BitVec<LittleEndian, u8> = bitvec![LittleEndian, u8; 0, 1, 0, 1];
+ ///
+ /// assert_eq!(l, r);
+ /// assert_ne!(l.as_slice(), r.as_slice());
+ /// ```
+ fn eq(&self, rhs: &BitVec<C, D>) -> bool {
+ <BitSlice<A, B> as PartialEq<BitSlice<C, D>>>::eq(&self, &rhs)
+ }
+}
+
+impl<A, B, C, D> PartialEq<BitSlice<C, D>> for BitVec<A, B>
+where A: Cursor, B: Bits, C: Cursor, D: Bits {
+ fn eq(&self, rhs: &BitSlice<C, D>) -> bool {
+ rhs == self
+ }
+}
+
+/// Compares two `BitVec`s by semantic — not bitwise — ordering.
+///
+/// The comparison sorts by testing each index for one vector to have a set bit
+/// where the other vector has an unset bit. If the vectors are different, the
+/// vector with the set bit sorts greater than the vector with the unset bit.
+///
+/// If one of the vectors is exhausted before they differ, the longer vector is
+/// greater.
+impl<A, B, C, D> PartialOrd<BitVec<C, D>> for BitVec<A, B>
+where A: Cursor, B: Bits, C: Cursor, D: Bits {
+ /// Performs a comparison by `<` or `>`.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ /// - `rhs`: The other vector to compare.
+ ///
+ /// # Returns
+ ///
+ /// The relative ordering of the two vectors.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let a = bitvec![0, 1, 0, 0];
+ /// let b = bitvec![0, 1, 0, 1];
+ /// let c = bitvec![0, 1, 0, 1, 1];
+ /// assert!(a < b);
+ /// assert!(b < c);
+ /// ```
+ fn partial_cmp(&self, rhs: &BitVec<C, D>) -> Option<Ordering> {
+ <BitSlice<A, B> as PartialOrd<BitSlice<C, D>>>::partial_cmp(&self, &rhs)
+ }
+}
+
+impl<A, B, C, D> PartialOrd<BitSlice<C, D>> for BitVec<A, B>
+where A: Cursor, B: Bits, C: Cursor, D: Bits {
+ fn partial_cmp(&self, rhs: &BitSlice<C, D>) -> Option<Ordering> {
+ rhs.partial_cmp(self)
+ }
+}
+
+impl<C, T> AsMut<BitSlice<C, T>> for BitVec<C, T>
+where C: Cursor, T: Bits {
+ fn as_mut(&mut self) -> &mut BitSlice<C, T> {
+ &mut **self
+ }
+}
+
+/// Gives write access to all live elements in the underlying storage, including
+/// the partially-filled tail.
+impl<C, T> AsMut<[T]> for BitVec<C, T>
+where C: Cursor, T: Bits {
+ fn as_mut(&mut self) -> &mut [T] {
+ self.as_mut_slice()
+ }
+}
+
+impl<C, T> AsRef<BitSlice<C, T>> for BitVec<C, T>
+where C: Cursor, T: Bits {
+ fn as_ref(&self) -> &BitSlice<C, T> {
+ &**self
+ }
+}
+
+/// Gives read access to all live elements in the underlying storage, including
+/// the partially-filled tail.
+impl<C, T> AsRef<[T]> for BitVec<C, T>
+where C: Cursor, T: Bits {
+ /// Accesses the underlying store.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let bv = bitvec![0, 0, 0, 0, 0, 0, 0, 0, 1];
+ /// assert_eq!(&[0, 0b1000_0000], bv.as_slice());
+ /// ```
+ fn as_ref(&self) -> &[T] {
+ self.as_slice()
+ }
+}
+
+/// Copies a `BitSlice` into an owned `BitVec`.
+///
+/// The idiomatic `BitSlice` to `BitVec` conversion is `BitSlice::to_owned`, but
+/// just as `&[T].into()` yields a `Vec`, `&BitSlice.into()` yields a `BitVec`.
+impl<C, T> From<&BitSlice<C, T>> for BitVec<C, T>
+where C: Cursor, T: Bits {
+ fn from(src: &BitSlice<C, T>) -> Self {
+ let (_, elts, head, tail) = src.bitptr().raw_parts();
+ let v: Vec<T> = src.as_slice().to_owned();
+ let data = v.as_ptr();
+ let cap = v.capacity();
+ mem::forget(v);
+ let bp = BitPtr::new(data, elts, head, tail);
+ unsafe { Self::from_raw_parts(bp, cap) }
+ }
+}
+
+/// Builds a `BitVec` out of a slice of `bool`.
+///
+/// This is primarily for the `bitvec!` macro; it is not recommended for general
+/// use.
+impl<C, T> From<&[bool]> for BitVec<C, T>
+where C: Cursor, T: Bits {
+ fn from(src: &[bool]) -> Self {
+ let mut out = Self::with_capacity(src.len());
+ for bit in src.iter() {
+ out.push(*bit);
+ }
+ out
+ }
+}
+
+impl<C, T> From<BitBox<C, T>> for BitVec<C, T>
+where C: Cursor, T: Bits {
+ fn from(src: BitBox<C, T>) -> Self {
+ let pointer = src.bitptr();
+ mem::forget(src);
+ unsafe { Self::from_raw_parts(pointer, pointer.elements()) }
+ }
+}
+
+/// Builds a `BitVec` out of a borrowed slice of elements.
+///
+/// This copies the memory as-is from the source buffer into the new `BitVec`.
+/// The source buffer will be unchanged by this operation, so you don't need to
+/// worry about using the correct cursor type for the read.
+///
+/// This operation does a copy from the source buffer into a new allocation, as
+/// it can only borrow the source and not take ownership.
+impl<C, T> From<&[T]> for BitVec<C, T>
+where C: Cursor, T: Bits {
+ /// Builds a `BitVec<C: Cursor, T: Bits>` from a borrowed `&[T]`.
+ ///
+ /// # Parameters
+ ///
+ /// - `src`: The elements to use as the values for the new vector.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let src: &[u8] = &[5, 10];
+ /// let bv: BitVec = src.into();
+ /// assert!(bv[5]);
+ /// assert!(bv[7]);
+ /// assert!(bv[12]);
+ /// assert!(bv[14]);
+ /// ```
+ fn from(src: &[T]) -> Self {
+ <&BitSlice<C, T>>::from(src).to_owned()
+ }
+}
+
+/// Builds a `BitVec` out of an owned slice of elements.
+///
+/// This moves the memory as-is from the source buffer into the new `BitVec`.
+/// The source buffer will be unchanged by this operation, so you don't need to
+/// worry about using the correct cursor type.
+impl<C, T> From<Box<[T]>> for BitVec<C, T>
+where C: Cursor, T: Bits {
+ /// Consumes a `Box<[T: Bits]>` and creates a `BitVec<C: Cursor, T>` from
+ /// it.
+ ///
+ /// # Parameters
+ ///
+ /// - `src`: The source box whose memory will be used.
+ ///
+ /// # Returns
+ ///
+ /// A new `BitVec` using the `src` `Box`’s memory.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let src: Box<[u8]> = Box::new([3, 6, 9, 12, 15]);
+ /// let bv: BitVec = src.into();
+ /// ```
+ fn from(src: Box<[T]>) -> Self {
+ BitBox::<C, T>::from(src).into()
+ }
+}
+
+impl<C, T> Into<Box<[T]>> for BitVec<C, T>
+where C: Cursor, T: Bits {
+ fn into(self) -> Box<[T]> {
+ BitBox::<C, T>::from(self).into()
+ }
+}
+
+/// Builds a `BitVec` out of a `Vec` of elements.
+///
+/// This moves the memory as-is from the source buffer into the new `BitVec`.
+/// The source buffer will be unchanged by this operation, so you don't need to
+/// worry about using the correct cursor type.
+impl<C, T> From<Vec<T>> for BitVec<C, T>
+where C: Cursor, T: Bits {
+ /// Consumes a `Vec<T: Bits>` and creates a `BitVec<C: Cursor, T>` from it.
+ ///
+ /// # Parameters
+ ///
+ /// - `src`: The source vector whose memory will be used.
+ ///
+ /// # Returns
+ ///
+ /// A new `BitVec` using the `src` `Vec`’s memory.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the source vector would cause the `BitVec` to overflow
+ /// capacity.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let src: Vec<u8> = vec![1, 2, 4, 8];
+ /// let bv: BitVec = src.into();
+ /// assert_eq!(
+ /// "[00000001, 00000010, 00000100, 00001000]",
+ /// &format!("{}", bv),
+ /// );
+ /// ```
+ fn from(src: Vec<T>) -> Self {
+ assert!(src.len() < BitPtr::<T>::MAX_ELTS, "Vector overflow");
+ let (ptr, len, cap) = (src.as_ptr(), src.len(), src.capacity());
+ mem::forget(src);
+ Self {
+ _cursor: PhantomData,
+ pointer: BitPtr::new(ptr, len, 0, T::SIZE),
+ capacity: cap,
+ }
+ }
+}
+
+impl<C, T> Into<Vec<T>> for BitVec<C, T>
+where C: Cursor, T: Bits {
+ fn into(self) -> Vec<T> {
+ let (pointer, capacity) = (self.pointer, self.capacity);
+ mem::forget(self);
+ let (ptr, len, _, _) = pointer.raw_parts();
+ unsafe { Vec::from_raw_parts(ptr as *mut T, len, capacity) }
+ }
+}
+
+impl<C, T> Default for BitVec<C, T>
+where C: Cursor, T: Bits {
+ fn default() -> Self {
+ Self {
+ _cursor: PhantomData,
+ pointer: BitPtr::default(),
+ capacity: 0,
+ }
+ }
+}
+
+/// Prints the `BitVec` for debugging.
+///
+/// The output is of the form `BitVec<C, T> [ELT, *]`, where `<C, T>` is the
+/// endianness and element type, with square brackets on each end of the bits
+/// and all the live elements in the vector printed in binary. The printout is
+/// always in semantic order, and may not reflect the underlying store. To see
+/// the underlying store, use `format!("{:?}", self.as_slice());` instead.
+///
+/// The alternate character `{:#?}` prints each element on its own line, rather
+/// than separated by a space.
+impl<C, T> Debug for BitVec<C, T>
+where C: Cursor, T: Bits {
+ /// Renders the `BitVec` type header and contents for debug.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let bv = bitvec![LittleEndian, u16;
+ /// 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1
+ /// ];
+ /// assert_eq!(
+ /// "BitVec<LittleEndian, u16> [0101000011110101]",
+ /// &format!("{:?}", bv)
+ /// );
+ /// ```
+ fn fmt(&self, f: &mut Formatter) -> fmt::Result {
+ f.write_str("BitVec<")?;
+ f.write_str(C::TYPENAME)?;
+ f.write_str(", ")?;
+ f.write_str(T::TYPENAME)?;
+ f.write_str("> ")?;
+ Display::fmt(&**self, f)
+ }
+}
+
+/// Prints the `BitVec` for displaying.
+///
+/// This prints each element in turn, formatted in binary in semantic order (so
+/// the first bit seen is printed first and the last bit seen printed last).
+/// Each element of storage is separated by a space for ease of reading.
+///
+/// The alternate character `{:#}` prints each element on its own line.
+///
+/// To see the in-memory representation, use `AsRef` to get access to the raw
+/// elements and print that slice instead.
+impl<C, T> Display for BitVec<C, T>
+where C: Cursor, T: Bits {
+ /// Renders the `BitVec` contents for display.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let bv = bitvec![BigEndian, u8; 0, 1, 0, 0, 1, 0, 1, 1, 0, 1];
+ /// assert_eq!("[01001011, 01]", &format!("{}", bv));
+ /// ```
+ fn fmt(&self, f: &mut Formatter) -> fmt::Result {
+ Display::fmt(&**self, f)
+ }
+}
+
+/// Writes the contents of the `BitVec`, in semantic bit order, into a hasher.
+impl<C, T> Hash for BitVec<C, T>
+where C: Cursor, T: Bits {
+ /// Writes each bit of the `BitVec`, as a full `bool`, into the hasher.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ /// - `hasher`: The hashing pool into which the vector is written.
+ fn hash<H: Hasher>(&self, hasher: &mut H) {
+ <BitSlice<C, T> as Hash>::hash(self, hasher)
+ }
+}
+
+#[cfg(feature = "std")]
+impl<C, T> Write for BitVec<C, T>
+where C: Cursor, T: Bits {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ let amt = cmp::min(buf.len(), BitPtr::<T>::MAX_BITS - self.len());
+ self.extend(<&BitSlice<C, u8>>::from(buf));
+ Ok(amt)
+ }
+
+ fn flush(&mut self) -> io::Result<()> { Ok(()) }
+}
+
+/// Extends a `BitVec` with the contents of another bitstream.
+///
+/// At present, this just calls `.push()` in a loop. When specialization becomes
+/// available, it will be able to more intelligently perform bulk moves from the
+/// source into `self` when the source is `BitSlice`-compatible.
+impl<C, T> Extend<bool> for BitVec<C, T>
+where C: Cursor, T: Bits {
+ /// Extends a `BitVec` from another bitstream.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `src`: A source bitstream.
+ ///
+ /// # Type Parameters
+ ///
+ /// - `I: IntoIterator<Item=bool>`: The source bitstream with which to
+ /// extend `self`.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let mut bv = bitvec![0; 4];
+ /// bv.extend(bitvec![1; 4]);
+ /// assert_eq!(0x0F, bv.as_slice()[0]);
+ /// ```
+ fn extend<I: IntoIterator<Item=bool>>(&mut self, src: I) {
+ let iter = src.into_iter();
+ match iter.size_hint() {
+ (_, Some(hi)) => self.reserve(hi),
+ (lo, None) => self.reserve(lo),
+ }
+ iter.for_each(|b| self.push(b));
+ }
+}
+
+/// Permits the construction of a `BitVec` by using `.collect()` on an iterator
+/// of `bool`.
+impl<C, T> FromIterator<bool> for BitVec<C, T>
+where C: Cursor, T: Bits {
+ /// Collects an iterator of `bool` into a vector.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// use std::iter::repeat;
+ /// let bv: BitVec = repeat(true)
+ /// .take(4)
+ /// .chain(repeat(false).take(4))
+ /// .collect();
+ /// assert_eq!(bv.as_slice()[0], 0xF0);
+ /// ```
+ fn from_iter<I: IntoIterator<Item=bool>>(src: I) -> Self {
+ let iter = src.into_iter();
+ let mut bv = match iter.size_hint() {
+ | (_, Some(len))
+ | (len, _)
+ => Self::with_capacity(len),
+ };
+ for bit in iter {
+ bv.push(bit);
+ }
+ bv
+ }
+}
+
+/// Produces an iterator over all the bits in the vector.
+///
+/// This iterator follows the ordering in the vector type, and implements
+/// `ExactSizeIterator`, since `BitVec`s always know exactly how large they are,
+/// and `DoubleEndedIterator`, since they have known ends.
+impl<C, T> IntoIterator for BitVec<C, T>
+where C: Cursor, T: Bits {
+ type Item = bool;
+ type IntoIter = IntoIter<C, T>;
+
+ /// Iterates over the vector.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let bv = bitvec![BigEndian, u8; 1, 1, 1, 1, 0, 0, 0, 0];
+ /// let mut count = 0;
+ /// for bit in bv {
+ /// if bit { count += 1; }
+ /// }
+ /// assert_eq!(count, 4);
+ /// ```
+ fn into_iter(self) -> Self::IntoIter {
+ IntoIter {
+ slab: self.pointer.pointer() as *const T,
+ inner: self,
+ }
+ }
+}
+
+impl<'a, C, T> IntoIterator for &'a BitVec<C, T>
+where C: Cursor, T: 'a + Bits {
+ type Item = bool;
+ type IntoIter = <&'a BitSlice<C, T> as IntoIterator>::IntoIter;
+
+ fn into_iter(self) -> Self::IntoIter {
+ <&'a BitSlice<C, T> as IntoIterator>::into_iter(self)
+ }
+}
+
+/// Adds two `BitVec`s together, zero-extending the shorter.
+///
+/// `BitVec` addition works just like adding numbers longhand on paper. The
+/// first bits in the `BitVec` are the highest, so addition works from right to
+/// left, and the shorter `BitVec` is assumed to be extended to the left with
+/// zero.
+///
+/// The output `BitVec` may be one bit longer than the longer input, if addition
+/// overflowed.
+///
+/// Numeric arithmetic is provided on `BitVec` as a convenience. Serious numeric
+/// computation on variable-length integers should use the `num_bigint` crate
+/// instead, which is written specifically for that use case. `BitVec`s are not
+/// intended for arithmetic, and `bitvec` makes no guarantees about sustained
+/// correctness in arithmetic at this time.
+impl<C, T> Add for BitVec<C, T>
+where C: Cursor, T: Bits {
+ type Output = Self;
+
+ /// Adds two `BitVec`s.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let a = bitvec![0, 1, 0, 1];
+ /// let b = bitvec![0, 0, 1, 1];
+ /// let s = a + b;
+ /// assert_eq!(bitvec![1, 0, 0, 0], s);
+ /// ```
+ ///
+ /// This example demonstrates the addition of differently-sized `BitVec`s,
+ /// and will overflow.
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let a = bitvec![1; 4];
+ /// let b = bitvec![1; 1];
+ /// let s = b + a;
+ /// assert_eq!(bitvec![1, 0, 0, 0, 0], s);
+ /// ```
+ fn add(mut self, addend: Self) -> Self::Output {
+ self += addend;
+ self
+ }
+}
+
+/// Adds another `BitVec` into `self`, zero-extending the shorter.
+///
+/// `BitVec` addition works just like adding numbers longhand on paper. The
+/// first bits in the `BitVec` are the highest, so addition works from right to
+/// left, and the shorter `BitVec` is assumed to be extended to the left with
+/// zero.
+///
+/// The output `BitVec` may be one bit longer than the longer input, if addition
+/// overflowed.
+///
+/// Numeric arithmetic is provided on `BitVec` as a convenience. Serious numeric
+/// computation on variable-length integers should use the `num_bigint` crate
+/// instead, which is written specifically for that use case. `BitVec`s are not
+/// intended for arithmetic, and `bitvec` makes no guarantees about sustained
+/// correctness in arithmetic at this time.
+impl<C, T> AddAssign for BitVec<C, T>
+where C: Cursor, T: Bits {
+ /// Adds another `BitVec` into `self`.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let mut a = bitvec![1, 0, 0, 1];
+ /// let b = bitvec![0, 1, 1, 1];
+ /// a += b;
+ /// assert_eq!(a, bitvec![1, 0, 0, 0, 0]);
+ /// ```
+ fn add_assign(&mut self, mut addend: Self) {
+ use core::iter::repeat;
+ // If the other vec is longer, swap them and try again.
+ if addend.len() > self.len() {
+ mem::swap(self, &mut addend);
+ return *self += addend;
+ }
+ // Now that self.len() >= addend.len(), proceed with addition.
+ //
+ // I don't, at this time, want to implement a carry-lookahead adder in
+ // software, so this is going to be a plain ripple-carry adder with
+ // O(n) runtime. Furthermore, until I think of an optimization
+ // strategy, it is going to build up another bitvec to use as a stack.
+ //
+ // Computers are fast. Whatever.
+ let mut c = false;
+ let mut stack = BitVec::<C, T>::with_capacity(self.len());
+ // Reverse self, reverse addend and zero-extend, and zip both together.
+ // This walks both vecs from rightmost to leftmost, and considers an
+ // early expiration of addend to continue with 0 bits.
+ //
+ // 100111
+ // + 0010
+ // ^^---- semantically zero
+ let addend = addend.into_iter().rev().chain(repeat(false));
+ for (a, b) in self.iter().rev().zip(addend) {
+ // Addition is a finite state machine that can be precomputed into
+ // a single jump table rather than requiring more complex
+ // branching. The table is indexed as (carry, a, b) and returns
+ // (bit, carry).
+ static JUMP: [u8; 8] = [
+ 0, // 0 + 0 + 0 => (0, 0)
+ 2, // 0 + 1 + 0 => (1, 0)
+ 2, // 1 + 0 + 0 => (1, 0)
+ 1, // 1 + 1 + 1 => (0, 1)
+ 2, // 0 + 0 + 1 => (1, 0)
+ 1, // 0 + 1 + 0 => (0, 1)
+ 1, // 1 + 0 + 0 => (0, 1)
+ 3, // 1 + 1 + 1 => (1, 1)
+ ];
+ let idx = ((c as u8) << 2) | ((a as u8) << 1) | (b as u8);
+ let yz = JUMP[idx as usize];
+ let (y, z) = (yz & 2 != 0, yz & 1 != 0);
+ // Note: I checked in Godbolt, and the above comes out to ten
+ // simple instructions with the JUMP baked in as immediate values.
+ // The more semantically clear match statement does not optimize
+ // nearly as well.
+ stack.push(y);
+ c = z;
+ }
+ // If the carry made it to the end, push it.
+ if c {
+ stack.push(true);
+ }
+ // Unwind the stack into `self`.
+ self.clear();
+ while let Some(bit) = stack.pop() {
+ self.push(bit);
+ }
+ }
+}
+
+/// Performs the Boolean `AND` operation between each element of a `BitVec` and
+/// anything that can provide a stream of `bool` values (such as another
+/// `BitVec`, or any `bool` generator of your choice). The `BitVec` emitted will
+/// have the length of the shorter sequence of bits -- if one is longer than the
+/// other, the extra bits will be ignored.
+impl<C, T, I> BitAnd<I> for BitVec<C, T>
+where C: Cursor, T: Bits, I: IntoIterator<Item=bool> {
+ type Output = Self;
+
+ /// `AND`s a vector and a bitstream, producing a new vector.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let lhs = bitvec![BigEndian, u8; 0, 1, 0, 1];
+ /// let rhs = bitvec![BigEndian, u8; 0, 0, 1, 1];
+ /// let and = lhs & rhs;
+ /// assert_eq!("[0001]", &format!("{}", and));
+ /// ```
+ fn bitand(mut self, rhs: I) -> Self::Output {
+ self &= rhs;
+ self
+ }
+}
+
+/// Performs the Boolean `AND` operation in place on a `BitVec`, using a stream
+/// of `bool` values as the other bit for each operation. If the other stream is
+/// shorter than `self`, `self` will be truncated when the other stream expires.
+impl<C, T, I> BitAndAssign<I> for BitVec<C, T>
+where C: Cursor, T: Bits, I: IntoIterator<Item=bool> {
+ /// `AND`s another bitstream into a vector.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let mut src = bitvec![BigEndian, u8; 0, 1, 0, 1];
+ /// src &= bitvec![BigEndian, u8; 0, 0, 1, 1];
+ /// assert_eq!("[0001]", &format!("{}", src));
+ /// ```
+ fn bitand_assign(&mut self, rhs: I) {
+ let mut len = 0;
+ for (idx, other) in (0 .. self.len()).zip(rhs.into_iter()) {
+ let val = self[idx] & other;
+ self.set(idx, val);
+ len += 1;
+ }
+ self.truncate(len);
+ }
+}
+
+/// Performs the Boolean `OR` operation between each element of a `BitVec` and
+/// anything that can provide a stream of `bool` values (such as another
+/// `BitVec`, or any `bool` generator of your choice). The `BitVec` emitted will
+/// have the length of the shorter sequence of bits -- if one is longer than the
+/// other, the extra bits will be ignored.
+impl<C, T, I> BitOr<I> for BitVec<C, T>
+where C: Cursor, T: Bits, I: IntoIterator<Item=bool> {
+ type Output = Self;
+
+ /// `OR`s a vector and a bitstream, producing a new vector.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let lhs = bitvec![0, 1, 0, 1];
+ /// let rhs = bitvec![0, 0, 1, 1];
+ /// let or = lhs | rhs;
+ /// assert_eq!("[0111]", &format!("{}", or));
+ /// ```
+ fn bitor(mut self, rhs: I) -> Self::Output {
+ self |= rhs;
+ self
+ }
+}
+
+/// Performs the Boolean `OR` operation in place on a `BitVec`, using a stream
+/// of `bool` values as the other bit for each operation. If the other stream is
+/// shorter than `self`, `self` will be truncated when the other stream expires.
+impl<C, T, I> BitOrAssign<I> for BitVec<C, T>
+where C: Cursor, T: Bits, I: IntoIterator<Item=bool> {
+ /// `OR`s another bitstream into a vector.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let mut src = bitvec![0, 1, 0, 1];
+ /// src |= bitvec![0, 0, 1, 1];
+ /// assert_eq!("[0111]", &format!("{}", src));
+ /// ```
+ fn bitor_assign(&mut self, rhs: I) {
+ let mut len = 0;
+ for (idx, other) in (0 .. self.len()).zip(rhs.into_iter()) {
+ let val = self[idx] | other;
+ self.set(idx, val);
+ len += 1;
+ }
+ self.truncate(len);
+ }
+}
+
+/// Performs the Boolean `XOR` operation between each element of a `BitVec` and
+/// anything that can provide a stream of `bool` values (such as another
+/// `BitVec`, or any `bool` generator of your choice). The `BitVec` emitted will
+/// have the length of the shorter sequence of bits -- if one is longer than the
+/// other, the extra bits will be ignored.
+impl<C, T, I> BitXor<I> for BitVec<C, T>
+where C: Cursor, T: Bits, I: IntoIterator<Item=bool> {
+ type Output = Self;
+
+ /// `XOR`s a vector and a bitstream, producing a new vector.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let lhs = bitvec![0, 1, 0, 1];
+ /// let rhs = bitvec![0, 0, 1, 1];
+ /// let xor = lhs ^ rhs;
+ /// assert_eq!("[0110]", &format!("{}", xor));
+ /// ```
+ fn bitxor(mut self, rhs: I) -> Self::Output {
+ self ^= rhs;
+ self
+ }
+}
+
+/// Performs the Boolean `XOR` operation in place on a `BitVec`, using a stream
+/// of `bool` values as the other bit for each operation. If the other stream is
+/// shorter than `self`, `self` will be truncated when the other stream expires.
+impl<C, T, I> BitXorAssign<I> for BitVec<C, T>
+where C: Cursor, T: Bits, I: IntoIterator<Item=bool> {
+ /// `XOR`s another bitstream into a vector.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let mut src = bitvec![0, 1, 0, 1];
+ /// src ^= bitvec![0, 0, 1, 1];
+ /// assert_eq!("[0110]", &format!("{}", src));
+ /// ```
+ fn bitxor_assign(&mut self, rhs: I) {
+ let mut len = 0;
+ for (idx, other) in (0 .. self.len()).zip(rhs.into_iter()) {
+ let val = self[idx] ^ other;
+ self.set(idx, val);
+ len += 1;
+ }
+ self.truncate(len);
+ }
+}
+
+/// Reborrows the `BitVec` as a `BitSlice`.
+///
+/// This mimics the separation between `Vec<T>` and `[T]`.
+impl<C, T> Deref for BitVec<C, T>
+where C: Cursor, T: Bits {
+ type Target = BitSlice<C, T>;
+
+ /// Dereferences `&BitVec` down to `&BitSlice`.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let bv: BitVec = bitvec![1; 4];
+ /// let bref: &BitSlice = &bv;
+ /// assert!(bref[2]);
+ /// ```
+ fn deref(&self) -> &Self::Target {
+ self.pointer.into()
+ }
+}
+
+/// Mutably reborrows the `BitVec` as a `BitSlice`.
+///
+/// This mimics the separation between `Vec<T>` and `[T]`.
+impl<C, T> DerefMut for BitVec<C, T>
+where C: Cursor, T: Bits {
+ /// Dereferences `&mut BitVec` down to `&mut BitSlice`.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let mut bv: BitVec = bitvec![0; 6];
+ /// let bref: &mut BitSlice = &mut bv;
+ /// assert!(!bref[5]);
+ /// bref.set(5, true);
+ /// assert!(bref[5]);
+ /// ```
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ self.pointer.into()
+ }
+}
+
+/// Readies the underlying storage for Drop.
+impl<C, T> Drop for BitVec<C, T>
+where C: Cursor, T: Bits {
+ /// Rebuild the interior `Vec` and let it run the deallocator.
+ fn drop(&mut self) {
+ let bp = mem::replace(&mut self.pointer, BitPtr::empty());
+ // Build a Vec<T> out of the elements, and run its destructor.
+ let (ptr, len) = (bp.pointer(), bp.elements());
+ let cap = self.capacity;
+ drop(unsafe { Vec::from_raw_parts(ptr as *mut T, len, cap) });
+ }
+}
+
+/// Gets the bit at a specific index. The index must be less than the length of
+/// the `BitVec`.
+impl<C, T> Index<usize> for BitVec<C, T>
+where C: Cursor, T: Bits {
+ type Output = bool;
+
+ /// Looks up a single bit by semantic count.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let bv = bitvec![BigEndian, u8; 0, 0, 0, 0, 0, 0, 0, 0, 1, 0];
+ /// assert!(!bv[7]); // ---------------------------------^ | |
+ /// assert!( bv[8]); // ------------------------------------^ |
+ /// assert!(!bv[9]); // ---------------------------------------^
+ /// ```
+ ///
+ /// If the index is greater than or equal to the length, indexing will
+ /// panic.
+ ///
+ /// The below test will panic when accessing index 1, as only index 0 is
+ /// valid.
+ ///
+ /// ```rust,should_panic
+ /// use bitvec::*;
+ ///
+ /// let mut bv: BitVec = BitVec::new();
+ /// bv.push(true);
+ /// bv[1];
+ /// ```
+ fn index(&self, cursor: usize) -> &Self::Output {
+ if self.as_bitslice()[cursor] { &true } else { &false }
+ }
+}
+
+impl<C, T> Index<Range<usize>> for BitVec<C, T>
+where C: Cursor, T: Bits {
+ type Output = BitSlice<C, T>;
+
+ fn index(&self, Range { start, end }: Range<usize>) -> &Self::Output {
+ &self.as_bitslice()[start .. end]
+ }
+}
+
+impl<C, T> IndexMut<Range<usize>> for BitVec<C, T>
+where C: Cursor, T: Bits {
+ fn index_mut(
+ &mut self,
+ Range { start, end }: Range<usize>,
+ ) -> &mut Self::Output {
+ &mut self.as_mut_bitslice()[start .. end]
+ }
+}
+
+impl<C, T> Index<RangeInclusive<usize>> for BitVec<C, T>
+where C: Cursor, T: Bits {
+ type Output = BitSlice<C, T>;
+
+ fn index(&self, index: RangeInclusive<usize>) -> &Self::Output {
+ &self[*index.start() .. *index.end() + 1]
+ }
+}
+
+impl<C, T> IndexMut<RangeInclusive<usize>> for BitVec<C, T>
+where C: Cursor, T: Bits {
+ fn index_mut(&mut self, index: RangeInclusive<usize>) -> &mut Self::Output {
+ &mut self[*index.start() .. *index.end() + 1]
+ }
+}
+
+impl<C, T> Index<RangeFrom<usize>> for BitVec<C, T>
+where C: Cursor, T: Bits {
+ type Output = BitSlice<C, T>;
+
+ fn index(&self, RangeFrom { start }: RangeFrom<usize>) -> &Self::Output {
+ &self[start .. self.len()]
+ }
+}
+
+impl<C, T> IndexMut<RangeFrom<usize>> for BitVec<C, T>
+where C: Cursor, T: Bits {
+ fn index_mut(
+ &mut self,
+ RangeFrom { start }: RangeFrom<usize>,
+ ) -> &mut Self::Output {
+ let len = self.len();
+ &mut self[start .. len]
+ }
+}
+
+impl<C, T> Index<RangeFull> for BitVec<C, T>
+where C: Cursor, T: Bits {
+ type Output = BitSlice<C, T>;
+
+ fn index(&self, _: RangeFull) -> &Self::Output {
+ self
+ }
+}
+
+impl<C, T> IndexMut<RangeFull> for BitVec<C, T>
+where C: Cursor, T: Bits {
+ fn index_mut(&mut self, _: RangeFull) -> &mut Self::Output {
+ self
+ }
+}
+
+impl<C, T> Index<RangeTo<usize>> for BitVec<C, T>
+where C: Cursor, T: Bits {
+ type Output = BitSlice<C, T>;
+
+ fn index(&self, RangeTo { end }: RangeTo<usize>) -> &Self::Output {
+ &self[0 .. end]
+ }
+}
+
+impl<C, T> IndexMut<RangeTo<usize>> for BitVec<C, T>
+where C: Cursor, T: Bits {
+ fn index_mut(
+ &mut self,
+ RangeTo { end }: RangeTo<usize>,
+ ) -> &mut Self::Output {
+ &mut self[0 .. end]
+ }
+}
+
+impl<C, T> Index<RangeToInclusive<usize>> for BitVec<C, T>
+where C: Cursor, T: Bits {
+ type Output = BitSlice<C, T>;
+
+ fn index(
+ &self,
+ RangeToInclusive { end }: RangeToInclusive<usize>,
+ ) -> &Self::Output {
+ &self[0 .. end + 1]
+ }
+}
+
+impl<C, T> IndexMut<RangeToInclusive<usize>> for BitVec<C, T>
+where C: Cursor, T: Bits {
+ fn index_mut(
+ &mut self,
+ RangeToInclusive { end }: RangeToInclusive<usize>,
+ ) -> &mut Self::Output {
+ &mut self[0 .. end + 1]
+ }
+}
+
+/// 2’s-complement negation of a `BitVec`.
+///
+/// In 2’s-complement, negation is defined as bit-inversion followed by adding
+/// one.
+///
+/// Numeric arithmetic is provided on `BitVec` as a convenience. Serious numeric
+/// computation on variable-length integers should use the `num_bigint` crate
+/// instead, which is written specifically for that use case. `BitVec`s are not
+/// intended for arithmetic, and `bitvec` makes no guarantees about sustained
+/// correctness in arithmetic at this time.
+impl<C, T> Neg for BitVec<C, T>
+where C: Cursor, T: Bits {
+ type Output = Self;
+
+ /// Numerically negates a `BitVec` using 2’s-complement arithmetic.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let bv = bitvec![0, 1, 1];
+ /// let ne = -bv;
+ /// assert_eq!(ne, bitvec![1, 0, 1]);
+ /// ```
+ fn neg(mut self) -> Self::Output {
+ // An empty vector does nothing.
+ // Negative zero is zero. Without this check, -[0+] becomes[10+1].
+ if self.is_empty() || self.not_any() {
+ return self;
+ }
+ self = !self;
+ self += BitVec::<C, T>::from(&[true] as &[bool]);
+ self
+ }
+}
+
+/// Flips all bits in the vector.
+impl<C, T> Not for BitVec<C, T>
+where C: Cursor, T: Bits {
+ type Output = Self;
+
+ /// Inverts all bits in the vector.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let bv: BitVec<BigEndian, u32> = BitVec::from(&[0u32] as &[u32]);
+ /// let flip = !bv;
+ /// assert_eq!(!0u32, flip.as_slice()[0]);
+ /// ```
+ fn not(mut self) -> Self::Output {
+ let _ = !(self.as_mut_bitslice());
+ self
+ }
+}
+
+__bitvec_shift!(u8, u16, u32, u64, i8, i16, i32, i64);
+
+/// Shifts all bits in the vector to the left – **DOWN AND TOWARDS THE FRONT**.
+///
+/// On primitives, the left-shift operator `<<` moves bits away from origin and
+/// towards the ceiling. This is because we label the bits in a primitive with
+/// the minimum on the right and the maximum on the left, which is big-endian
+/// bit order. This increases the value of the primitive being shifted.
+///
+/// **THAT IS NOT HOW `BITVEC` WORKS!**
+///
+/// `BitVec` defines its layout with the minimum on the left and the maximum on
+/// the right! Thus, left-shifting moves bits towards the **minimum**.
+///
+/// In BigEndian order, the effect in memory will be what you expect the `<<`
+/// operator to do.
+///
+/// **In LittleEndian order, the effect will be equivalent to using `>>` on**
+/// **the primitives in memory!**
+///
+/// # Notes
+///
+/// In order to preserve the effects in memory that this operator traditionally
+/// expects, the bits that are emptied by this operation are zeroed rather than
+/// left to their old value.
+///
+/// The length of the vector is decreased by the shift amount.
+///
+/// If the shift amount is greater than the length, the vector calls `clear()`
+/// and zeroes its memory. This is *not* an error.
+impl<C, T> Shl<usize> for BitVec<C, T>
+where C: Cursor, T: Bits {
+ type Output = Self;
+
+ /// Shifts a `BitVec` to the left, shortening it.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let bv = bitvec![BigEndian, u8; 0, 0, 0, 1, 1, 1];
+ /// assert_eq!("[000111]", &format!("{}", bv));
+ /// assert_eq!(0b0001_1100, bv.as_slice()[0]);
+ /// assert_eq!(bv.len(), 6);
+ /// let ls = bv << 2usize;
+ /// assert_eq!("[0111]", &format!("{}", ls));
+ /// assert_eq!(0b0111_0000, ls.as_slice()[0]);
+ /// assert_eq!(ls.len(), 4);
+ /// ```
+ fn shl(mut self, shamt: usize) -> Self::Output {
+ self <<= shamt;
+ self
+ }
+}
+
+/// Shifts all bits in the vector to the left – **DOWN AND TOWARDS THE FRONT**.
+///
+/// On primitives, the left-shift operator `<<` moves bits away from origin and
+/// towards the ceiling. This is because we label the bits in a primitive with
+/// the minimum on the right and the maximum on the left, which is big-endian
+/// bit order. This increases the value of the primitive being shifted.
+///
+/// **THAT IS NOT HOW `BITVEC` WORKS!**
+///
+/// `BitVec` defines its layout with the minimum on the left and the maximum on
+/// the right! Thus, left-shifting moves bits towards the **minimum**.
+///
+/// In BigEndian order, the effect in memory will be what you expect the `<<`
+/// operator to do.
+///
+/// **In LittleEndian order, the effect will be equivalent to using `>>` on**
+/// **the primitives in memory!**
+///
+/// # Notes
+///
+/// In order to preserve the effects in memory that this operator traditionally
+/// expects, the bits that are emptied by this operation are zeroed rather than
+/// left to their old value.
+///
+/// The length of the vector is decreased by the shift amount.
+///
+/// If the shift amount is greater than the length, the vector calls `clear()`
+/// and zeroes its memory. This is *not* an error.
+impl<C, T> ShlAssign<usize> for BitVec<C, T>
+where C: Cursor, T: Bits {
+ /// Shifts a `BitVec` to the left in place, shortening it.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let mut bv = bitvec![LittleEndian, u8; 0, 0, 0, 1, 1, 1];
+ /// assert_eq!("[000111]", &format!("{}", bv));
+ /// assert_eq!(0b0011_1000, bv.as_slice()[0]);
+ /// assert_eq!(bv.len(), 6);
+ /// bv <<= 2;
+ /// assert_eq!("[0111]", &format!("{}", bv));
+ /// assert_eq!(0b0000_1110, bv.as_slice()[0]);
+ /// assert_eq!(bv.len(), 4);
+ /// ```
+ fn shl_assign(&mut self, shamt: usize) {
+ let len = self.len();
+ if shamt >= len {
+ self.clear();
+ let buf: &mut [T] = self.as_mut();
+ let ptr = buf.as_mut_ptr();
+ let len = buf.len();
+ unsafe { core::ptr::write_bytes(ptr, 0, len); }
+ return;
+ }
+ for idx in shamt .. len {
+ let val = self[idx];
+ self.set(idx - shamt, val);
+ }
+ let trunc = len - shamt;
+ for idx in trunc .. len {
+ self.set(idx, false);
+ }
+ self.truncate(trunc);
+ }
+}
+
+/// Shifts all bits in the vector to the right – **UP AND TOWARDS THE BACK**.
+///
+/// On primitives, the right-shift operator `>>` moves bits towards the origin
+/// and away from the ceiling. This is because we label the bits in a primitive
+/// with the minimum on the right and the maximum on the left, which is
+/// big-endian bit order. This decreases the value of the primitive being
+/// shifted.
+///
+/// **THAT IS NOT HOW `BITVEC` WORKS!**
+///
+/// `BitVec` defines its layout with the minimum on the left and the maximum on
+/// the right! Thus, right-shifting moves bits towards the **maximum**.
+///
+/// In BigEndian order, the effect in memory will be what you expect the `>>`
+/// operator to do.
+///
+/// **In LittleEndian order, the effect will be equivalent to using `<<` on**
+/// **the primitives in memory!**
+///
+/// # Notes
+///
+/// In order to preserve the effects in memory that this operator traditionally
+/// expects, the bits that are emptied by this operation are zeroed rather than
+/// left to their old value.
+///
+/// The length of the vector is increased by the shift amount.
+///
+/// If the new length of the vector would overflow, a panic occurs. This *is* an
+/// error.
+impl<C, T> Shr<usize> for BitVec<C, T>
+where C: Cursor, T: Bits {
+ type Output = Self;
+
+ /// Shifts a `BitVec` to the right, lengthening it and filling the front
+ /// with 0.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let bv = bitvec![BigEndian, u8; 0, 0, 0, 1, 1, 1];
+ /// assert_eq!("[000111]", &format!("{}", bv));
+ /// assert_eq!(0b0001_1100, bv.as_slice()[0]);
+ /// assert_eq!(bv.len(), 6);
+ /// let rs = bv >> 2usize;
+ /// assert_eq!("[00000111]", &format!("{}", rs));
+ /// assert_eq!(0b0000_0111, rs.as_slice()[0]);
+ /// assert_eq!(rs.len(), 8);
+ /// ```
+ fn shr(mut self, shamt: usize) -> Self::Output {
+ self >>= shamt;
+ self
+ }
+}
+
+/// Shifts all bits in the vector to the right – **UP AND TOWARDS THE BACK**.
+///
+/// On primitives, the right-shift operator `>>` moves bits towards the origin
+/// and away from the ceiling. This is because we label the bits in a primitive
+/// with the minimum on the right and the maximum on the left, which is
+/// big-endian bit order. This decreases the value of the primitive being
+/// shifted.
+///
+/// **THAT IS NOT HOW `BITVEC` WORKS!**
+///
+/// `BitVec` defines its layout with the minimum on the left and the maximum on
+/// the right! Thus, right-shifting moves bits towards the **maximum**.
+///
+/// In BigEndian order, the effect in memory will be what you expect the `>>`
+/// operator to do.
+///
+/// **In LittleEndian order, the effect will be equivalent to using `<<` on**
+/// **the primitives in memory!**
+///
+/// # Notes
+///
+/// In order to preserve the effects in memory that this operator traditionally
+/// expects, the bits that are emptied by this operation are zeroed rather than
+/// left to their old value.
+///
+/// The length of the vector is increased by the shift amount.
+///
+/// If the new length of the vector would overflow, a panic occurs. This *is* an
+/// error.
+impl<C, T> ShrAssign<usize> for BitVec<C, T>
+where C: Cursor, T: Bits {
+ /// Shifts a `BitVec` to the right in place, lengthening it and filling the
+ /// front with 0.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let mut bv = bitvec![LittleEndian, u8; 0, 0, 0, 1, 1, 1];
+ /// assert_eq!("[000111]", &format!("{}", bv));
+ /// assert_eq!(0b0011_1000, bv.as_slice()[0]);
+ /// assert_eq!(bv.len(), 6);
+ /// bv >>= 2;
+ /// assert_eq!("[00000111]", &format!("{}", bv));
+ /// assert_eq!(0b1110_0000, bv.as_slice()[0]);
+ /// assert_eq!(bv.len(), 8);
+ /// ```
+ fn shr_assign(&mut self, shamt: usize) {
+ let old_len = self.len();
+ for _ in 0 .. shamt {
+ self.push(false);
+ }
+ for idx in (0 .. old_len).rev() {
+ let val = self[idx];
+ self.set(idx + shamt, val);
+ }
+ for idx in 0 .. shamt {
+ self.set(idx, false);
+ }
+ }
+}
+
+/// Subtracts one `BitVec` from another assuming 2’s-complement encoding.
+///
+/// Subtraction is a more complex operation than addition. The bit-level work is
+/// largely the same, but semantic distinctions must be made. Unlike addition,
+/// which is commutative and tolerant of switching the order of the addends,
+/// subtraction cannot swap the minuend (LHS) and subtrahend (RHS).
+///
+/// Because of the properties of 2’s-complement arithmetic, M - S is equivalent
+/// to M + (!S + 1). Subtraction therefore bitflips the subtrahend and adds one.
+/// This may, in a degenerate case, cause the subtrahend to increase in length.
+///
+/// Once the subtrahend is stable, the minuend zero-extends its left side in
+/// order to match the length of the subtrahend if needed (this is provided by
+/// the `>>` operator).
+///
+/// When the minuend is stable, the minuend and subtrahend are added together
+/// by the `<BitVec as Add>` implementation. The output will be encoded in
+/// 2’s-complement, so a leading one means that the output is considered
+/// negative.
+///
+/// Interpreting the contents of a `BitVec` as an integer is beyond the scope of
+/// this crate.
+///
+/// Numeric arithmetic is provided on `BitVec` as a convenience. Serious numeric
+/// computation on variable-length integers should use the `num_bigint` crate
+/// instead, which is written specifically for that use case. `BitVec`s are not
+/// intended for arithmetic, and `bitvec` makes no guarantees about sustained
+/// correctness in arithmetic at this time.
+impl<C, T> Sub for BitVec<C, T>
+where C: Cursor, T: Bits {
+ type Output = Self;
+
+ /// Subtracts one `BitVec` from another.
+ ///
+ /// # Examples
+ ///
+ /// Minuend larger than subtrahend, positive difference.
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let a = bitvec![1, 0];
+ /// let b = bitvec![ 1];
+ /// let c = a - b;
+ /// assert_eq!(bitvec![0, 1], c);
+ /// ```
+ ///
+ /// Minuend smaller than subtrahend, negative difference.
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let a = bitvec![ 1];
+ /// let b = bitvec![1, 0];
+ /// let c = a - b;
+ /// assert_eq!(bitvec![1, 1], c);
+ /// ```
+ ///
+ /// Subtraction from self is correctly handled.
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let a = bitvec![0, 1, 1, 0];
+ /// let b = a.clone();
+ /// let c = a - b;
+ /// assert!(c.not_any(), "{:?}", c);
+ /// ```
+ fn sub(mut self, subtrahend: Self) -> Self::Output {
+ self -= subtrahend;
+ self
+ }
+}
+
+/// Subtracts another `BitVec` from `self`, assuming 2’s-complement encoding.
+///
+/// The minuend is zero-extended, or the subtrahend sign-extended, as needed to
+/// ensure that the vectors are the same width before subtraction occurs.
+///
+/// The `Sub` trait has more documentation on the subtraction process.
+///
+/// Numeric arithmetic is provided on `BitVec` as a convenience. Serious numeric
+/// computation on variable-length integers should use the `num_bigint` crate
+/// instead, which is written specifically for that use case. `BitVec`s are not
+/// intended for arithmetic, and `bitvec` makes no guarantees about sustained
+/// correctness in arithmetic at this time.
+impl<C, T> SubAssign for BitVec<C, T>
+where C: Cursor, T: Bits {
+ /// Subtracts another `BitVec` from `self`.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let a = bitvec![0, 0, 0, 1];
+ /// let b = bitvec![0, 0, 0, 0];
+ /// let c = a - b;
+ /// assert_eq!(c, bitvec![0, 0, 0, 1]);
+ /// ```
+ // Note: in `a - b`, `a` is `self` and the minuend, `b` is the subtrahend
+ fn sub_assign(&mut self, mut subtrahend: Self) {
+ // Test for a zero subtrahend. Subtraction of zero is the identity
+ // function, and can exit immediately.
+ if subtrahend.not_any() {
+ return;
+ }
+ // Invert the subtrahend in preparation for addition
+ subtrahend = -subtrahend;
+ let (llen, rlen) = (self.len(), subtrahend.len());
+ // If the subtrahend is longer than the minuend, 0-extend the minuend.
+ if rlen > llen {
+ let diff = rlen - llen;
+ *self >>= diff;
+ *self += subtrahend;
+ }
+ else {
+ // If the minuend is longer than the subtrahend, 1-extend the
+ // subtrahend.
+ if llen > rlen {
+ let diff = llen - rlen;
+ let sign = subtrahend[0];
+ subtrahend >>= diff;
+ subtrahend[.. diff].set_all(sign);
+ }
+ let old = self.len();
+ *self += subtrahend;
+ // If the subtraction emitted a carry, remove it.
+ if self.len() > old {
+ *self <<= 1;
+ }
+ }
+ }
+}
+
+/// State keeper for draining iteration.
+///
+/// # Type Parameters
+///
+/// - `C: Cursor`: The cursor type of the underlying vector.
+/// - `T: 'a + Bits`: The storage type of the underlying vector.
+///
+/// # Lifetimes
+///
+/// - `'a`: The lifetime of the underlying vector.
+pub struct Drain<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ /// Vector being drained.
+ bitvec: NonNull<BitVec<C, T>>,
+ /// Current remaining range to remove.
+ iter: crate::slice::Iter<'a, C, T>,
+ /// Index of the original vector tail to preserve.
+ tail_start: usize,
+ /// Length of the tail.
+ tail_len: usize,
+}
+
+impl<'a, C, T> Drain<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ /// Fills the drain span with another iterator.
+ ///
+ /// If the stream exhausts before the drain is filled, then the tail
+ /// elements move downwards; otherwise, the tail stays put and the drain is
+ /// filled.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `stream`: The source of bits to fill into the drain.
+ ///
+ /// # Returns
+ ///
+ /// - `true` if the drain was filled before the `stream` exhausted.
+ /// - `false` if the `stream` exhausted early, and the tail was moved down.
+ ///
+ /// # Type Parameters
+ ///
+ /// - `I: Iterator<Item=bool>`: A provider of bits.
+ unsafe fn fill<I: Iterator<Item=bool>>(&mut self, stream: &mut I) -> bool {
+ let bv = self.bitvec.as_mut();
+ let drain_from = bv.len();
+ let drain_upto = self.tail_start;
+
+ for n in drain_from .. drain_upto {
+ if let Some(bit) = stream.next() {
+ bv.push(bit);
+ }
+ else {
+ for (to, from) in (n .. n + self.tail_len).zip(drain_upto ..) {
+ bv.swap(from, to);
+ }
+ self.tail_start = n;
+ return false;
+ }
+ }
+ true
+ }
+
+ /// Moves the tail span farther back in the vector.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `by`: The amount by which to move the tail span.
+ unsafe fn move_tail(&mut self, by: usize) {
+ let bv = self.bitvec.as_mut();
+ bv.reserve(by);
+ let new_tail = self.tail_start + by;
+ let old_len = bv.len();
+ let new_len = self.tail_start + self.tail_len + by;
+
+ bv.set_len(new_len);
+ for n in (0 .. self.tail_len).rev() {
+ bv.swap(self.tail_start + n, new_tail + n);
+ }
+ bv.set_len(old_len);
+
+ self.tail_start = new_tail;
+ }
+}
+
+impl<'a, C, T> DoubleEndedIterator for Drain<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ fn next_back(&mut self) -> Option<Self::Item> { self.iter.next_back() }
+}
+
+impl<'a, C, T> ExactSizeIterator for Drain<'a, C, T>
+where C: Cursor, T: 'a + Bits {}
+
+impl<'a, C, T> FusedIterator for Drain<'a, C, T>
+where C: Cursor, T: 'a + Bits {}
+
+impl<'a, C, T> Iterator for Drain<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ type Item = bool;
+ fn next(&mut self) -> Option<Self::Item> { self.iter.next() }
+ fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() }
+ fn count(self) -> usize { self.len() }
+ fn nth(&mut self, n: usize) -> Option<Self::Item> { self.iter.nth(n) }
+ fn last(mut self) -> Option<Self::Item> { self.iter.next_back() }
+}
+
+impl<'a, C, T> Drop for Drain<'a, C, T>
+where C: Cursor, T: 'a + Bits {
+ fn drop(&mut self) { unsafe {
+ let bv: &mut BitVec<C, T> = self.bitvec.as_mut();
+ // Get the start of the drained span.
+ let start = bv.len();
+ // Get the start of the remnant span.
+ let tail = self.tail_start;
+ let tail_len = self.tail_len;
+ // Get the full length of the vector,
+ let full_len = tail + tail_len;
+ // And the length of the vector after the drain.
+ let end_len = start + tail_len;
+ // Inflate the vector to include the remnant span,
+ bv.set_len(full_len);
+ // Swap the remnant span down into the drained span,
+ for (from, to) in (tail .. full_len).zip(start .. end_len) {
+ bv.swap(from, to);
+ }
+ // And deflate the vector to fit.
+ bv.set_len(end_len);
+ } }
+}
+
+#[repr(C)]
+pub struct IntoIter<C, T>
+where C: Cursor, T: Bits {
+ /// Mostly-owning pointer to the bit slice.
+ ///
+ /// The destructor for this can never be run.
+ inner: BitVec<C, T>,
+ /// Pointer to the original allocation. This cannot be forgotten.
+ slab: *const T,
+}
+
+impl<C, T> DoubleEndedIterator for IntoIter<C, T>
+where C: Cursor, T: Bits {
+ fn next_back(&mut self) -> Option<Self::Item> {
+ let mut slice_iter = (*self.inner).into_iter();
+ let out = slice_iter.next_back();
+ self.inner.pointer = slice_iter.bitptr();
+ out
+ }
+}
+
+impl<C, T> ExactSizeIterator for IntoIter<C, T>
+where C: Cursor, T: Bits {}
+
+impl<C, T> FusedIterator for IntoIter<C, T>
+where C: Cursor, T: Bits {}
+
+impl<C, T> Iterator for IntoIter<C, T>
+where C: Cursor, T: Bits {
+ type Item = bool;
+
+ /// Advances the iterator by one, returning the first bit in it (if any).
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ ///
+ /// # Returns
+ ///
+ /// The leading bit in the iterator, if any.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let bv = bitvec![1, 0];
+ /// let mut iter = bv.iter();
+ /// assert!(iter.next().unwrap());
+ /// assert!(!iter.next().unwrap());
+ /// assert!(iter.next().is_none());
+ /// ```
+ fn next(&mut self) -> Option<Self::Item> {
+ let mut slice_iter = (*self.inner).into_iter();
+ let out = slice_iter.next();
+ self.inner.pointer = slice_iter.bitptr();
+ out
+ }
+
+ /// Hints at the number of bits remaining in the iterator.
+ ///
+ /// Because the exact size is always known, this always produces
+ /// `(len, Some(len))`.
+ ///
+ /// # Parameters
+ ///
+ /// - `&self`
+ ///
+ /// # Returns
+ ///
+ /// - `usize`: The minimum bits remaining.
+ /// - `Option<usize>`: The maximum bits remaining.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ ///
+ /// let bv = bitvec![0, 1];
+ /// let mut iter = bv.iter();
+ /// assert_eq!(iter.size_hint(), (2, Some(2)));
+ /// iter.next();
+ /// assert_eq!(iter.size_hint(), (1, Some(1)));
+ /// iter.next();
+ /// assert_eq!(iter.size_hint(), (0, Some(0)));
+ /// ```
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let rem = self.inner.len();
+ (rem, Some(rem))
+ }
+
+ /// Counts how many bits are live in the iterator, consuming it.
+ ///
+ /// You are probably looking to use this on a borrowed iterator rather than
+ /// an owning iterator. See [`BitSlice`].
+ ///
+ /// # Parameters
+ ///
+ /// - `self`
+ ///
+ /// # Returns
+ ///
+ /// The number of bits in the iterator.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ /// let bv = bitvec![BigEndian, u8; 0, 1, 0, 1, 0];
+ /// assert_eq!(bv.into_iter().count(), 5);
+ /// ```
+ ///
+ /// [`BitSlice`]: ../struct.BitSlice.html#method.iter
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ /// Advances the iterator by `n` bits, starting from zero.
+ ///
+ /// # Parameters
+ ///
+ /// - `&mut self`
+ /// - `n`: The number of bits to skip, before producing the next bit after
+ /// skips. If this overshoots the iterator’s remaining length, then the
+ /// iterator is marked empty before returning `None`.
+ ///
+ /// # Returns
+ ///
+ /// If `n` does not overshoot the iterator’s bounds, this produces the `n`th
+ /// bit after advancing the iterator to it, discarding the intermediate
+ /// bits.
+ ///
+ /// If `n` does overshoot the iterator’s bounds, this empties the iterator
+ /// and returns `None`.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ /// let bv = bitvec![BigEndian, u8; 0, 0, 0, 1];
+ /// let mut iter = bv.into_iter();
+ /// assert_eq!(iter.len(), 4);
+ /// assert!(iter.nth(3).unwrap());
+ /// assert!(iter.nth(0).is_none());
+ /// ```
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ let mut slice_iter = (*self.inner).into_iter();
+ let out = slice_iter.nth(n);
+ self.inner.pointer = slice_iter.bitptr();
+ out
+ }
+
+ /// Consumes the iterator, returning only the last bit.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ /// let bv = bitvec![BigEndian, u8; 0, 0, 0, 1];
+ /// assert!(bv.into_iter().last().unwrap());
+ /// ```
+ ///
+ /// Empty iterators return `None`
+ ///
+ /// ```rust
+ /// use bitvec::*;
+ /// assert!(bitvec![].into_iter().last().is_none());
+ /// ```
+ fn last(mut self) -> Option<Self::Item> {
+ self.next_back()
+ }
+}
+
+impl<C, T> Drop for IntoIter<C, T>
+where C: Cursor, T: Bits {
+ fn drop(&mut self) {
+ let cap = self.inner.capacity;
+ // Yank the interior BitVec, and *forget it*. This is important because
+ // the interior *does not have the correct pointer anymore*, and cannot
+ // be allowed to run the destructor.
+ mem::forget(mem::replace(&mut self.inner, BitVec::new()));
+ // Build a Vec from the slab pointer and the capacity, and allow that
+ // to drop.
+ unsafe { Vec::from_raw_parts(self.slab as *mut T, 0, cap) };
+ }
+}
+
+pub struct Splice<'a, C, T, I>
+where C: Cursor, T: 'a + Bits, I: Iterator<Item=bool> {
+ drain: Drain<'a, C, T>,
+ splice: I,
+}
+
+impl<'a, C, T, I> DoubleEndedIterator for Splice<'a, C, T, I>
+where C: Cursor, T: 'a + Bits, I: Iterator<Item=bool> {
+ fn next_back(&mut self) -> Option<Self::Item> { self.drain.next_back() }
+}
+
+impl<'a, C, T, I> ExactSizeIterator for Splice<'a, C, T, I>
+where C: Cursor, T: 'a + Bits, I: Iterator<Item=bool> {}
+
+impl<'a, C, T, I> FusedIterator for Splice<'a, C, T, I>
+where C: Cursor, T: 'a + Bits, I: Iterator<Item=bool> {}
+
+impl<'a, C, T, I> Iterator for Splice<'a, C, T, I>
+where C: Cursor, T: 'a + Bits, I: Iterator<Item=bool> {
+ type Item = bool;
+ fn next(&mut self) -> Option<Self::Item> { self.drain.next() }
+ fn size_hint(&self) -> (usize, Option<usize>) { self.drain.size_hint() }
+ fn count(self) -> usize { self.len() }
+ fn nth(&mut self, n: usize) -> Option<Self::Item> { self.drain.nth(n) }
+ fn last(mut self) -> Option<Self::Item> { self.next_back() }
+}
+
+impl<'a, C, T, I> Drop for Splice<'a, C, T, I>
+where C: Cursor, T: 'a + Bits, I: Iterator<Item=bool> {
+ fn drop(&mut self) { unsafe {
+ if self.drain.tail_len == 0 {
+ self.drain.bitvec.as_mut().extend(self.splice.by_ref());
+ return;
+ }
+
+ // Fill the drain span from the splice. If this exhausts the splice,
+ // exit.
+ if !self.drain.fill(&mut self.splice) {
+ return;
+ }
+
+ let (lower, _) = self.splice.size_hint();
+
+ // If the splice still has data, move the tail to make room for it and
+ // fill.
+ if lower > 0 {
+ self.drain.move_tail(lower);
+ if !self.drain.fill(&mut self.splice) {
+ return;
+ }
+ }
+
+ let mut remnant = self.splice.by_ref().collect::<Vec<_>>().into_iter();
+ if remnant.len() > 0 {
+ self.drain.move_tail(remnant.len());
+ self.drain.fill(&mut remnant);
+ }
+ // Drain::drop does the rest
+ } }
+}
diff --git a/third_party/rust/byteorder/.cargo-checksum.json b/third_party/rust/byteorder/.cargo-checksum.json
--- a/third_party/rust/byteorder/.cargo-checksum.json
+++ b/third_party/rust/byteorder/.cargo-checksum.json
@@ -1,1 +1,1 @@
-{"files":{"CHANGELOG.md":"dc0a497d6932b874a32c1a457e344bdcd27a1767a174d163f19d66b837477a08","COPYING":"01c266bced4a434da0051174d6bee16a4c82cf634e2679b6155d40d75012390f","Cargo.toml":"9f119d940ff1131bb71fb7c2b10c36ae91b45dbcf010975ccc555268a5b79165","LICENSE-MIT":"0f96a83840e146e43c0ec96a22ec1f392e0680e6c1226e6f3ba87e0740af850f","README.md":"97c01a66dbff4615acd49a8c3a85d137bf29cc113fa514910195bb11aef445bc","UNLICENSE":"7e12e5df4bae12cb21581ba157ced20e1986a0508dd10d0e8a4ab9a4cf94e85c","benches/bench.rs":"5dbfb724aa137efdff2cf0faeb728a3d302eda097ba5189de2d82314ce500654","src/io.rs":"d9759a25c625aa341b23bdb489144b1e9554db11bc2fa799a2a6ecfbc442ea5e","src/lib.rs":"fd18e7969a80314537ba41b3c460284975c0d4df4b72f27527506cb5bd889ae8"},"package":"94f88df23a25417badc922ab0f5716cc1330e87f71ddd9203b3a3ccd9cedf75d"}
\ No newline at end of file
+{"files":{"CHANGELOG.md":"51f0eb3b6139fc1a908d41a7b3cba7d58d684700986b3518f82e5af254c39e8e","COPYING":"01c266bced4a434da0051174d6bee16a4c82cf634e2679b6155d40d75012390f","Cargo.toml":"362a05ecca14c0934f211fba28eeeca2a72e4665e4c8303bc44321aedfa1ab1c","LICENSE-MIT":"0f96a83840e146e43c0ec96a22ec1f392e0680e6c1226e6f3ba87e0740af850f","README.md":"97c01a66dbff4615acd49a8c3a85d137bf29cc113fa514910195bb11aef445bc","UNLICENSE":"7e12e5df4bae12cb21581ba157ced20e1986a0508dd10d0e8a4ab9a4cf94e85c","benches/bench.rs":"80e43e07f34bc6d3ebe4f37cea79ba02cafce6cf30b737a1db65de55f2c38ca5","build.rs":"088c35c11be1e443c4462c7fe2863bcf885805e7b0f3dc7d83e6091ff197e779","src/io.rs":"dce98946ebc14cc37e8f5632a26979494cdd995adceeb9c22c591aef9010c7ad","src/lib.rs":"23fd0baba674e534dee99f9f082db2f7a51b075b1ed7c3510bc49ebd011033bc"},"package":"a019b10a2a7cdeb292db131fc8113e57ea2a908f6e7894b0c3c671893b65dbeb"}
\ No newline at end of file
diff --git a/third_party/rust/byteorder/CHANGELOG.md b/third_party/rust/byteorder/CHANGELOG.md
--- a/third_party/rust/byteorder/CHANGELOG.md
+++ b/third_party/rust/byteorder/CHANGELOG.md
@@ -1,8 +1,51 @@
+1.3.0
+=====
+This new minor release now enables `i128` support automatically on Rust
+compilers that support 128-bit integers. The `i128` feature is now a no-op, but
+continues to exist for backward compatibility purposes. The crate continues to
+maintain compatibility with Rust 1.12.0.
+
+This release also deprecates the `ByteOrder` trait methods
+`read_f32_into_unchecked` and `read_f64_into_unchecked` in favor of
+`read_f32_into` and `read_f64_into`. This was an oversight from the 1.2 release
+where the corresponding methods on `ReadBytesExt` were deprecated.
+
+`quickcheck` and `rand` were bumped to `0.8` and `0.6`, respectively.
+
+A few small documentation related bugs have been fixed.
+
+
+1.2.7
+=====
+This patch release excludes some CI files from the crate release and updates
+the license field to use `OR` instead of `/`.
+
+
+1.2.6
+=====
+This patch release fixes some test compilation errors introduced by an
+over-eager release of 1.2.5.
+
+
+1.2.5
+=====
+This patch release fixes some typos in the docs, adds doc tests to methods on
+`WriteByteExt` and bumps the quickcheck dependency to `0.7`.
+
+
+1.2.4
+=====
+This patch release adds support for 48-bit integers by adding the following
+methods to the `ByteOrder` trait: `read_u48`, `read_i48`, `write_u48` and
+`write_i48`. Corresponding methods have been added to the `ReadBytesExt` and
+`WriteBytesExt` traits as well.
+
+
1.2.3
=====
This patch release removes the use of `feature(i128_type)` from byteorder,
since it has been stabilized. We leave byteorder's `i128` feature in place
in order to continue supporting compilation on older versions of Rust.
1.2.2
diff --git a/third_party/rust/byteorder/Cargo.toml b/third_party/rust/byteorder/Cargo.toml
--- a/third_party/rust/byteorder/Cargo.toml
+++ b/third_party/rust/byteorder/Cargo.toml
@@ -7,38 +7,39 @@
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
name = "byteorder"
-version = "1.2.7"
+version = "1.3.1"
authors = ["Andrew Gallant <jamslam@gmail.com>"]
+build = "build.rs"
exclude = ["/ci/*"]
description = "Library for reading/writing numbers in big-endian and little-endian."
homepage = "https://github.com/BurntSushi/byteorder"
documentation = "https://docs.rs/byteorder"
readme = "README.md"
keywords = ["byte", "endian", "big-endian", "little-endian", "binary"]
categories = ["encoding", "parsing"]
license = "Unlicense OR MIT"
repository = "https://github.com/BurntSushi/byteorder"
[profile.bench]
opt-level = 3
[lib]
name = "byteorder"
bench = false
[dev-dependencies.quickcheck]
-version = "0.7"
+version = "0.8"
default-features = false
[dev-dependencies.rand]
-version = "0.5"
+version = "0.6"
[features]
default = ["std"]
i128 = []
std = []
[badges.travis-ci]
repository = "BurntSushi/byteorder"
diff --git a/third_party/rust/byteorder/benches/bench.rs b/third_party/rust/byteorder/benches/bench.rs
--- a/third_party/rust/byteorder/benches/bench.rs
+++ b/third_party/rust/byteorder/benches/bench.rs
@@ -143,118 +143,118 @@ bench_num!(int_1, read_int, 1, [1]);
bench_num!(int_2, read_int, 2, [1, 2]);
bench_num!(int_3, read_int, 3, [1, 2, 3]);
bench_num!(int_4, read_int, 4, [1, 2, 3, 4]);
bench_num!(int_5, read_int, 5, [1, 2, 3, 4, 5]);
bench_num!(int_6, read_int, 6, [1, 2, 3, 4, 5, 6]);
bench_num!(int_7, read_int, 7, [1, 2, 3, 4, 5, 6, 7]);
bench_num!(int_8, read_int, 8, [1, 2, 3, 4, 5, 6, 7, 8]);
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
bench_num!(u128, MAX, read_u128, write_u128,
16, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
bench_num!(i128, MAX, read_i128, write_i128,
16, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
bench_num!(uint128_1, read_uint128,
1, [1]);
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
bench_num!(uint128_2, read_uint128,
2, [1, 2]);
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
bench_num!(uint128_3, read_uint128,
3, [1, 2, 3]);
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
bench_num!(uint128_4, read_uint128,
4, [1, 2, 3, 4]);
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
bench_num!(uint128_5, read_uint128,
5, [1, 2, 3, 4, 5]);
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
bench_num!(uint128_6, read_uint128,
6, [1, 2, 3, 4, 5, 6]);
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
bench_num!(uint128_7, read_uint128,
7, [1, 2, 3, 4, 5, 6, 7]);
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
bench_num!(uint128_8, read_uint128,
8, [1, 2, 3, 4, 5, 6, 7, 8]);
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
bench_num!(uint128_9, read_uint128,
9, [1, 2, 3, 4, 5, 6, 7, 8, 9]);
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
bench_num!(uint128_10, read_uint128,
10, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
bench_num!(uint128_11, read_uint128,
11, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]);
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
bench_num!(uint128_12, read_uint128,
12, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]);
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
bench_num!(uint128_13, read_uint128,
13, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]);
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
bench_num!(uint128_14, read_uint128,
14, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]);
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
bench_num!(uint128_15, read_uint128,
15, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
bench_num!(uint128_16, read_uint128,
16, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
bench_num!(int128_1, read_int128,
1, [1]);
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
bench_num!(int128_2, read_int128,
2, [1, 2]);
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
bench_num!(int128_3, read_int128,
3, [1, 2, 3]);
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
bench_num!(int128_4, read_int128,
4, [1, 2, 3, 4]);
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
bench_num!(int128_5, read_int128,
5, [1, 2, 3, 4, 5]);
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
bench_num!(int128_6, read_int128,
6, [1, 2, 3, 4, 5, 6]);
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
bench_num!(int128_7, read_int128,
7, [1, 2, 3, 4, 5, 6, 7]);
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
bench_num!(int128_8, read_int128,
8, [1, 2, 3, 4, 5, 6, 7, 8]);
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
bench_num!(int128_9, read_int128,
9, [1, 2, 3, 4, 5, 6, 7, 8, 9]);
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
bench_num!(int128_10, read_int128,
10, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
bench_num!(int128_11, read_int128,
11, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]);
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
bench_num!(int128_12, read_int128,
12, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]);
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
bench_num!(int128_13, read_int128,
13, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]);
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
bench_num!(int128_14, read_int128,
14, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]);
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
bench_num!(int128_15, read_int128,
15, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
bench_num!(int128_16, read_int128,
16, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
macro_rules! bench_slice {
($name:ident, $numty:ty, $read:ident, $write:ident) => {
mod $name {
use std::mem::size_of;
diff --git a/third_party/rust/byteorder/build.rs b/third_party/rust/byteorder/build.rs
new file mode 100644
--- /dev/null
+++ b/third_party/rust/byteorder/build.rs
@@ -0,0 +1,87 @@
+use std::env;
+use std::ffi::OsString;
+use std::io::{self, Write};
+use std::process::Command;
+
+fn main() {
+ let version = match Version::read() {
+ Ok(version) => version,
+ Err(err) => {
+ writeln!(
+ &mut io::stderr(),
+ "failed to parse `rustc --version`: {}",
+ err
+ ).unwrap();
+ return;
+ }
+ };
+ enable_i128(version);
+}
+
+fn enable_i128(version: Version) {
+ if version < (Version { major: 1, minor: 26, patch: 0 }) {
+ return;
+ }
+
+ println!("cargo:rustc-cfg=byteorder_i128");
+}
+
+#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd, Ord)]
+struct Version {
+ major: u32,
+ minor: u32,
+ patch: u32,
+}
+
+impl Version {
+ fn read() -> Result<Version, String> {
+ let rustc = env::var_os("RUSTC").unwrap_or(OsString::from("rustc"));
+ let output = Command::new(&rustc)
+ .arg("--version")
+ .output()
+ .unwrap()
+ .stdout;
+ Version::parse(&String::from_utf8(output).unwrap())
+ }
+
+ fn parse(mut s: &str) -> Result<Version, String> {
+ if !s.starts_with("rustc ") {
+ return Err(format!("unrecognized version string: {}", s));
+ }
+ s = &s["rustc ".len()..];
+
+ let parts: Vec<&str> = s.split(".").collect();
+ if parts.len() < 3 {
+ return Err(format!("not enough version parts: {:?}", parts));
+ }
+
+ let mut num = String::new();
+ for c in parts[0].chars() {
+ if !c.is_digit(10) {
+ break;
+ }
+ num.push(c);
+ }
+ let major = try!(num.parse::<u32>().map_err(|e| e.to_string()));
+
+ num.clear();
+ for c in parts[1].chars() {
+ if !c.is_digit(10) {
+ break;
+ }
+ num.push(c);
+ }
+ let minor = try!(num.parse::<u32>().map_err(|e| e.to_string()));
+
+ num.clear();
+ for c in parts[2].chars() {
+ if !c.is_digit(10) {
+ break;
+ }
+ num.push(c);
+ }
+ let patch = try!(num.parse::<u32>().map_err(|e| e.to_string()));
+
+ Ok(Version { major: major, minor: minor, patch: patch })
+ }
+}
diff --git a/third_party/rust/byteorder/src/io.rs b/third_party/rust/byteorder/src/io.rs
--- a/third_party/rust/byteorder/src/io.rs
+++ b/third_party/rust/byteorder/src/io.rs
@@ -365,17 +365,17 @@ pub trait ReadBytesExt: io::Read {
/// use byteorder::{BigEndian, ReadBytesExt};
///
/// let mut rdr = Cursor::new(vec![
/// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83,
/// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83
/// ]);
/// assert_eq!(16947640962301618749969007319746179, rdr.read_u128::<BigEndian>().unwrap());
/// ```
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
#[inline]
fn read_u128<T: ByteOrder>(&mut self) -> Result<u128> {
let mut buf = [0; 16];
try!(self.read_exact(&mut buf));
Ok(T::read_u128(&buf))
}
/// Reads a signed 128 bit integer from the underlying reader.
@@ -386,24 +386,23 @@ pub trait ReadBytesExt: io::Read {
///
/// [`Read::read_exact`]: https://doc.rust-lang.org/std/io/trait.Read.html#method.read_exact
///
/// # Examples
///
/// Read a signed 128 bit big-endian integer from a `Read`:
///
/// ```rust
- /// #![feature(i128_type)]
/// use std::io::Cursor;
/// use byteorder::{BigEndian, ReadBytesExt};
///
/// let mut rdr = Cursor::new(vec![0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
/// assert_eq!(i128::min_value(), rdr.read_i128::<BigEndian>().unwrap());
/// ```
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
#[inline]
fn read_i128<T: ByteOrder>(&mut self) -> Result<i128> {
let mut buf = [0; 16];
try!(self.read_exact(&mut buf));
Ok(T::read_i128(&buf))
}
/// Reads an unsigned n-bytes integer from the underlying reader.
@@ -452,26 +451,26 @@ pub trait ReadBytesExt: io::Read {
#[inline]
fn read_int<T: ByteOrder>(&mut self, nbytes: usize) -> Result<i64> {
let mut buf = [0; 8];
try!(self.read_exact(&mut buf[..nbytes]));
Ok(T::read_int(&buf[..nbytes], nbytes))
}
/// Reads an unsigned n-bytes integer from the underlying reader.
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
#[inline]
fn read_uint128<T: ByteOrder>(&mut self, nbytes: usize) -> Result<u128> {
let mut buf = [0; 16];
try!(self.read_exact(&mut buf[..nbytes]));
Ok(T::read_uint128(&buf[..nbytes], nbytes))
}
/// Reads a signed n-bytes integer from the underlying reader.
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
#[inline]
fn read_int128<T: ByteOrder>(&mut self, nbytes: usize) -> Result<i128> {
let mut buf = [0; 16];
try!(self.read_exact(&mut buf[..nbytes]));
Ok(T::read_int128(&buf[..nbytes], nbytes))
}
/// Reads a IEEE754 single-precision (4 bytes) floating point number from
@@ -667,17 +666,17 @@ pub trait ReadBytesExt: io::Read {
/// let mut rdr = Cursor::new(vec![
/// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 5,
/// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0,
/// ]);
/// let mut dst = [0; 2];
/// rdr.read_u128_into::<BigEndian>(&mut dst).unwrap();
/// assert_eq!([517, 768], dst);
/// ```
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
#[inline]
fn read_u128_into<T: ByteOrder>(
&mut self,
dst: &mut [u128],
) -> Result<()> {
{
let buf = unsafe { slice_to_u8_mut(dst) };
try!(self.read_exact(buf));
@@ -817,17 +816,17 @@ pub trait ReadBytesExt: io::Read {
/// let mut rdr = Cursor::new(vec![
/// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 5,
/// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0,
/// ]);
/// let mut dst = [0; 2];
/// rdr.read_i128_into::<BigEndian>(&mut dst).unwrap();
/// assert_eq!([517, 768], dst);
/// ```
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
#[inline]
fn read_i128_into<T: ByteOrder>(
&mut self,
dst: &mut [i128],
) -> Result<()> {
{
let buf = unsafe { slice_to_u8_mut(dst) };
try!(self.read_exact(buf));
@@ -1368,26 +1367,26 @@ pub trait WriteBytesExt: io::Write {
#[inline]
fn write_i64<T: ByteOrder>(&mut self, n: i64) -> Result<()> {
let mut buf = [0; 8];
T::write_i64(&mut buf, n);
self.write_all(&buf)
}
/// Writes an unsigned 128 bit integer to the underlying writer.
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
#[inline]
fn write_u128<T: ByteOrder>(&mut self, n: u128) -> Result<()> {
let mut buf = [0; 16];
T::write_u128(&mut buf, n);
self.write_all(&buf)
}
/// Writes a signed 128 bit integer to the underlying writer.
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
#[inline]
fn write_i128<T: ByteOrder>(&mut self, n: i128) -> Result<()> {
let mut buf = [0; 16];
T::write_i128(&mut buf, n);
self.write_all(&buf)
}
/// Writes an unsigned n-bytes integer to the underlying writer.
@@ -1461,33 +1460,33 @@ pub trait WriteBytesExt: io::Write {
T::write_int(&mut buf, n, nbytes);
self.write_all(&buf[0..nbytes])
}
/// Writes an unsigned n-bytes integer to the underlying writer.
///
/// If the given integer is not representable in the given number of bytes,
/// this method panics. If `nbytes > 16`, this method panics.
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
#[inline]
fn write_uint128<T: ByteOrder>(
&mut self,
n: u128,
nbytes: usize,
) -> Result<()> {
let mut buf = [0; 16];
T::write_uint128(&mut buf, n, nbytes);
self.write_all(&buf[0..nbytes])
}
/// Writes a signed n-bytes integer to the underlying writer.
///
/// If the given integer is not representable in the given number of bytes,
/// this method panics. If `nbytes > 16`, this method panics.
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
#[inline]
fn write_int128<T: ByteOrder>(
&mut self,
n: i128,
nbytes: usize,
) -> Result<()> {
let mut buf = [0; 16];
T::write_int128(&mut buf, n, nbytes);
diff --git a/third_party/rust/byteorder/src/lib.rs b/third_party/rust/byteorder/src/lib.rs
--- a/third_party/rust/byteorder/src/lib.rs
+++ b/third_party/rust/byteorder/src/lib.rs
@@ -79,30 +79,30 @@ pub use io::{ReadBytesExt, WriteBytesExt
mod io;
#[inline]
fn extend_sign(val: u64, nbytes: usize) -> i64 {
let shift = (8 - nbytes) * 8;
(val << shift) as i64 >> shift
}
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
#[inline]
fn extend_sign128(val: u128, nbytes: usize) -> i128 {
let shift = (16 - nbytes) * 8;
(val << shift) as i128 >> shift
}
#[inline]
fn unextend_sign(val: i64, nbytes: usize) -> u64 {
let shift = (8 - nbytes) * 8;
(val << shift) as u64 >> shift
}
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
#[inline]
fn unextend_sign128(val: i128, nbytes: usize) -> u128 {
let shift = (16 - nbytes) * 8;
(val << shift) as u128 >> shift
}
#[inline]
fn pack_size(n: u64) -> usize {
@@ -120,17 +120,17 @@ fn pack_size(n: u64) -> usize {
6
} else if n < 1 << 56 {
7
} else {
8
}
}
-#[cfg(feature = "i128")]
+#[cfg(byteorder_i128)]
#[inline]
fn pack_size128(n: u128) -> usize {
if n < 1 << 8 {
1
} else if n < 1 << 16 {
2
} else if n < 1 << 24 {
3
@@ -309,17 +309,17 @@ pub trait ByteOrder
///
/// ```rust
/// use byteorder::{ByteOrder, LittleEndian};
///
/// let mut buf = [0; 16];
/// LittleEndian::write_u128(&mut buf, 1_000_000);
/// assert_eq!(1_000_000, LittleEndian::read_u128(&buf));
/// ```
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
fn read_u128(buf: &[u8]) -> u128;
/// Reads an unsigned n-bytes integer from `buf`.
///
/// # Panics
///
/// Panics when `nbytes < 1` or `nbytes > 8` or
/// `buf.len() < nbytes`
@@ -350,17 +350,17 @@ pub trait ByteOrder
///
/// ```rust
/// use byteorder::{ByteOrder, LittleEndian};
///
/// let mut buf = [0; 3];
/// LittleEndian::write_uint128(&mut buf, 1_000_000, 3);
/// assert_eq!(1_000_000, LittleEndian::read_uint128(&buf, 3));
/// ```
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
fn read_uint128(buf: &[u8], nbytes: usize) -> u128;
/// Writes an unsigned 16 bit integer `n` to `buf`.
///
/// # Panics
///
/// Panics when `buf.len() < 2`.
///
@@ -469,17 +469,17 @@ pub trait ByteOrder
///
/// ```rust
/// use byteorder::{ByteOrder, LittleEndian};
///
/// let mut buf = [0; 16];
/// LittleEndian::write_u128(&mut buf, 1_000_000);
/// assert_eq!(1_000_000, LittleEndian::read_u128(&buf));
/// ```
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
fn write_u128(buf: &mut [u8], n: u128);
/// Writes an unsigned integer `n` to `buf` using only `nbytes`.
///
/// # Panics
///
/// If `n` is not representable in `nbytes`, or if `nbytes` is `> 8`, then
/// this method panics.
@@ -510,17 +510,17 @@ pub trait ByteOrder
///
/// ```rust
/// use byteorder::{ByteOrder, LittleEndian};
///
/// let mut buf = [0; 3];
/// LittleEndian::write_uint128(&mut buf, 1_000_000, 3);
/// assert_eq!(1_000_000, LittleEndian::read_uint128(&buf, 3));
/// ```
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
fn write_uint128(buf: &mut [u8], n: u128, nbytes: usize);
/// Reads a signed 16 bit integer from `buf`.
///
/// # Panics
///
/// Panics when `buf.len() < 2`.
///
@@ -640,17 +640,17 @@ pub trait ByteOrder
///
/// ```rust
/// use byteorder::{ByteOrder, LittleEndian};
///
/// let mut buf = [0; 16];
/// LittleEndian::write_i128(&mut buf, -1_000_000_000);
/// assert_eq!(-1_000_000_000, LittleEndian::read_i128(&buf));
/// ```
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
#[inline]
fn read_i128(buf: &[u8]) -> i128 {
Self::read_u128(buf) as i128
}
/// Reads a signed n-bytes integer from `buf`.
///
/// # Panics
@@ -687,17 +687,17 @@ pub trait ByteOrder
///
/// ```rust
/// use byteorder::{ByteOrder, LittleEndian};
///
/// let mut buf = [0; 3];
/// LittleEndian::write_int128(&mut buf, -1_000, 3);
/// assert_eq!(-1_000, LittleEndian::read_int128(&buf, 3));
/// ```
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
#[inline]
fn read_int128(buf: &[u8], nbytes: usize) -> i128 {
extend_sign128(Self::read_uint128(buf, nbytes), nbytes)
}
/// Reads a IEEE754 single-precision (4 bytes) floating point number.
///
/// # Panics
@@ -866,17 +866,17 @@ pub trait ByteOrder
///
/// ```rust
/// use byteorder::{ByteOrder, LittleEndian};
///
/// let mut buf = [0; 16];
/// LittleEndian::write_i128(&mut buf, -1_000_000_000);
/// assert_eq!(-1_000_000_000, LittleEndian::read_i128(&buf));
/// ```
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
#[inline]
fn write_i128(buf: &mut [u8], n: i128) {
Self::write_u128(buf, n as u128)
}
/// Writes a signed integer `n` to `buf` using only `nbytes`.
///
/// # Panics
@@ -913,17 +913,17 @@ pub trait ByteOrder
///
/// ```rust
/// use byteorder::{ByteOrder, LittleEndian};
///
/// let mut buf = [0; 3];
/// LittleEndian::write_int128(&mut buf, -1_000, 3);
/// assert_eq!(-1_000, LittleEndian::read_int128(&buf, 3));
/// ```
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
#[inline]
fn write_int128(buf: &mut [u8], n: i128, nbytes: usize) {
Self::write_uint128(buf, unextend_sign128(n, nbytes), nbytes)
}
/// Writes a IEEE754 single-precision (4 bytes) floating point number.
///
/// # Panics
@@ -1057,17 +1057,17 @@ pub trait ByteOrder
/// let mut bytes = [0; 64];
/// let numbers_given = [1, 2, 0xf00f, 0xffee];
/// LittleEndian::write_u128_into(&numbers_given, &mut bytes);
///
/// let mut numbers_got = [0; 4];
/// LittleEndian::read_u128_into(&bytes, &mut numbers_got);
/// assert_eq!(numbers_given, numbers_got);
/// ```
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
fn read_u128_into(src: &[u8], dst: &mut [u128]);
/// Reads signed 16 bit integers from `src` to `dst`.
///
/// # Panics
///
/// Panics when `buf.len() != 2*dst.len()`.
///
@@ -1168,17 +1168,17 @@ pub trait ByteOrder
/// let mut bytes = [0; 64];
/// let numbers_given = [1, 2, 0xf00f, 0xffee];
/// LittleEndian::write_i128_into(&numbers_given, &mut bytes);
///
/// let mut numbers_got = [0; 4];
/// LittleEndian::read_i128_into(&bytes, &mut numbers_got);
/// assert_eq!(numbers_given, numbers_got);
/// ```
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
#[inline]
fn read_i128_into(src: &[u8], dst: &mut [i128]) {
let dst = unsafe {
slice::from_raw_parts_mut(dst.as_mut_ptr() as *mut u128, dst.len())
};
Self::read_u128_into(src, dst);
}
@@ -1192,29 +1192,60 @@ pub trait ByteOrder
/// # Examples
///
/// Write and read `f32` numbers in little endian order:
///
/// ```rust
/// use byteorder::{ByteOrder, LittleEndian};
///
/// let mut bytes = [0; 16];
- /// let numbers_given = [1.0, 2.0, 31.312e311, -11.32e91];
+ /// let numbers_given = [1.0, 2.0, 31.312e31, -11.32e19];
+ /// LittleEndian::write_f32_into(&numbers_given, &mut bytes);
+ ///
+ /// let mut numbers_got = [0.0; 4];
+ /// LittleEndian::read_f32_into(&bytes, &mut numbers_got);
+ /// assert_eq!(numbers_given, numbers_got);
+ /// ```
+ #[inline]
+ fn read_f32_into(src: &[u8], dst: &mut [f32]) {
+ let dst = unsafe {
+ slice::from_raw_parts_mut(dst.as_mut_ptr() as *mut u32, dst.len())
+ };
+ Self::read_u32_into(src, dst);
+ }
+
+ /// **DEPRECATED**.
+ ///
+ /// This method is deprecated. Use `read_f32_into` instead.
+ /// Reads IEEE754 single-precision (4 bytes) floating point numbers from
+ /// `src` into `dst`.
+ ///
+ /// # Panics
+ ///
+ /// Panics when `src.len() != 4*dst.len()`.
+ ///
+ /// # Examples
+ ///
+ /// Write and read `f32` numbers in little endian order:
+ ///
+ /// ```rust
+ /// use byteorder::{ByteOrder, LittleEndian};
+ ///
+ /// let mut bytes = [0; 16];
+ /// let numbers_given = [1.0, 2.0, 31.312e31, -11.32e19];
/// LittleEndian::write_f32_into(&numbers_given, &mut bytes);
///
/// let mut numbers_got = [0.0; 4];
/// LittleEndian::read_f32_into_unchecked(&bytes, &mut numbers_got);
/// assert_eq!(numbers_given, numbers_got);
/// ```
#[inline]
+ #[deprecated(since="1.3.0", note="please use `read_f32_into` instead")]
fn read_f32_into_unchecked(src: &[u8], dst: &mut [f32]) {
- let dst = unsafe {
- slice::from_raw_parts_mut(dst.as_mut_ptr() as *mut u32, dst.len())
- };
- Self::read_u32_into(src, dst);
+ Self::read_f32_into(src, dst);
}
/// Reads IEEE754 single-precision (4 bytes) floating point numbers from
/// `src` into `dst`.
///
/// # Panics
///
/// Panics when `src.len() != 8*dst.len()`.
@@ -1226,25 +1257,57 @@ pub trait ByteOrder
/// ```rust
/// use byteorder::{ByteOrder, LittleEndian};
///
/// let mut bytes = [0; 32];
/// let numbers_given = [1.0, 2.0, 31.312e311, -11.32e91];
/// LittleEndian::write_f64_into(&numbers_given, &mut bytes);
///
/// let mut numbers_got = [0.0; 4];
+ /// LittleEndian::read_f64_into(&bytes, &mut numbers_got);
+ /// assert_eq!(numbers_given, numbers_got);
+ /// ```
+ #[inline]
+ fn read_f64_into(src: &[u8], dst: &mut [f64]) {
+ let dst = unsafe {
+ slice::from_raw_parts_mut(dst.as_mut_ptr() as *mut u64, dst.len())
+ };
+ Self::read_u64_into(src, dst);
+ }
+
+ /// **DEPRECATED**.
+ ///
+ /// This method is deprecated. Use `read_f64_into` instead.
+ ///
+ /// Reads IEEE754 single-precision (4 bytes) floating point numbers from
+ /// `src` into `dst`.
+ ///
+ /// # Panics
+ ///
+ /// Panics when `src.len() != 8*dst.len()`.
+ ///
+ /// # Examples
+ ///
+ /// Write and read `f64` numbers in little endian order:
+ ///
+ /// ```rust
+ /// use byteorder::{ByteOrder, LittleEndian};
+ ///
+ /// let mut bytes = [0; 32];
+ /// let numbers_given = [1.0, 2.0, 31.312e311, -11.32e91];
+ /// LittleEndian::write_f64_into(&numbers_given, &mut bytes);
+ ///
+ /// let mut numbers_got = [0.0; 4];
/// LittleEndian::read_f64_into_unchecked(&bytes, &mut numbers_got);
/// assert_eq!(numbers_given, numbers_got);
/// ```
#[inline]
+ #[deprecated(since="1.3.0", note="please use `read_f64_into` instead")]
fn read_f64_into_unchecked(src: &[u8], dst: &mut [f64]) {
- let dst = unsafe {
- slice::from_raw_parts_mut(dst.as_mut_ptr() as *mut u64, dst.len())
- };
- Self::read_u64_into(src, dst);
+ Self::read_f64_into(src, dst);
}
/// Writes unsigned 16 bit integers from `src` into `dst`.
///
/// # Panics
///
/// Panics when `dst.len() != 2*src.len()`.
///
@@ -1327,17 +1390,17 @@ pub trait ByteOrder
/// let mut bytes = [0; 64];
/// let numbers_given = [1, 2, 0xf00f, 0xffee];
/// LittleEndian::write_u128_into(&numbers_given, &mut bytes);
///
/// let mut numbers_got = [0; 4];
/// LittleEndian::read_u128_into(&bytes, &mut numbers_got);
/// assert_eq!(numbers_given, numbers_got);
/// ```
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
fn write_u128_into(src: &[u128], dst: &mut [u8]);
/// Writes signed 16 bit integers from `src` into `dst`.
///
/// # Panics
///
/// Panics when `buf.len() != 2*src.len()`.
///
@@ -1435,17 +1498,17 @@ pub trait ByteOrder
/// let mut bytes = [0; 64];
/// let numbers_given = [1, 2, 0xf00f, 0xffee];
/// LittleEndian::write_i128_into(&numbers_given, &mut bytes);
///
/// let mut numbers_got = [0; 4];
/// LittleEndian::read_i128_into(&bytes, &mut numbers_got);
/// assert_eq!(numbers_given, numbers_got);
/// ```
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
fn write_i128_into(src: &[i128], dst: &mut [u8]) {
let src = unsafe {
slice::from_raw_parts(src.as_ptr() as *const u128, src.len())
};
Self::write_u128_into(src, dst);
}
/// Writes IEEE754 single-precision (4 bytes) floating point numbers from
@@ -1458,22 +1521,22 @@ pub trait ByteOrder
/// # Examples
///
/// Write and read `f32` numbers in little endian order:
///
/// ```rust
/// use byteorder::{ByteOrder, LittleEndian};
///
/// let mut bytes = [0; 16];
- /// let numbers_given = [1.0, 2.0, 31.312e311, -11.32e91];
+ /// let numbers_given = [1.0, 2.0, 31.312e31, -11.32e19];
/// LittleEndian::write_f32_into(&numbers_given, &mut bytes);
///
/// let mut numbers_got = [0.0; 4];
/// unsafe {
- /// LittleEndian::read_f32_into_unchecked(&bytes, &mut numbers_got);
+ /// LittleEndian::read_f32_into(&bytes, &mut numbers_got);
/// }
/// assert_eq!(numbers_given, numbers_got);
/// ```
fn write_f32_into(src: &[f32], dst: &mut [u8]) {
let src = unsafe {
slice::from_raw_parts(src.as_ptr() as *const u32, src.len())
};
Self::write_u32_into(src, dst);
@@ -1494,17 +1557,17 @@ pub trait ByteOrder
/// use byteorder::{ByteOrder, LittleEndian};
///
/// let mut bytes = [0; 32];
/// let numbers_given = [1.0, 2.0, 31.312e311, -11.32e91];
/// LittleEndian::write_f64_into(&numbers_given, &mut bytes);
///
/// let mut numbers_got = [0.0; 4];
/// unsafe {
- /// LittleEndian::read_f64_into_unchecked(&bytes, &mut numbers_got);
+ /// LittleEndian::read_f64_into(&bytes, &mut numbers_got);
/// }
/// assert_eq!(numbers_given, numbers_got);
/// ```
fn write_f64_into(src: &[f64], dst: &mut [u8]) {
let src = unsafe {
slice::from_raw_parts(src.as_ptr() as *const u64, src.len())
};
Self::write_u64_into(src, dst);
@@ -1579,17 +1642,17 @@ pub trait ByteOrder
///
/// ```rust
/// use byteorder::{ByteOrder, BigEndian};
///
/// let mut numbers = [5, 65000];
/// BigEndian::from_slice_u128(&mut numbers);
/// assert_eq!(numbers, [5u128.to_be(), 65000u128.to_be()]);
/// ```
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
fn from_slice_u128(numbers: &mut [u128]);
/// Converts the given slice of signed 16 bit integers to a particular
/// endianness.
///
/// If the endianness matches the endianness of the host platform, then
/// this is a no-op.
///
@@ -1674,17 +1737,17 @@ pub trait ByteOrder
///
/// ```rust
/// use byteorder::{ByteOrder, BigEndian};
///
/// let mut numbers = [5, 65000];
/// BigEndian::from_slice_i128(&mut numbers);
/// assert_eq!(numbers, [5i128.to_be(), 65000i128.to_be()]);
/// ```
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
#[inline]
fn from_slice_i128(src: &mut [i128]) {
let src = unsafe {
slice::from_raw_parts_mut(src.as_ptr() as *mut u128, src.len())
};
Self::from_slice_u128(src);
}
@@ -1889,17 +1952,17 @@ impl ByteOrder for BigEndian {
read_num_bytes!(u32, 4, buf, to_be)
}
#[inline]
fn read_u64(buf: &[u8]) -> u64 {
read_num_bytes!(u64, 8, buf, to_be)
}
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
#[inline]
fn read_u128(buf: &[u8]) -> u128 {
read_num_bytes!(u128, 16, buf, to_be)
}
#[inline]
fn read_uint(buf: &[u8], nbytes: usize) -> u64 {
assert!(1 <= nbytes && nbytes <= 8 && nbytes <= buf.len());
@@ -1907,17 +1970,17 @@ impl ByteOrder for BigEndian {
let ptr_out = out.as_mut_ptr();
unsafe {
copy_nonoverlapping(
buf.as_ptr(), ptr_out.offset((8 - nbytes) as isize), nbytes);
(*(ptr_out as *const u64)).to_be()
}
}
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
#[inline]
fn read_uint128(buf: &[u8], nbytes: usize) -> u128 {
assert!(1 <= nbytes && nbytes <= 16 && nbytes <= buf.len());
let mut out = [0u8; 16];
let ptr_out = out.as_mut_ptr();
unsafe {
copy_nonoverlapping(
buf.as_ptr(), ptr_out.offset((16 - nbytes) as isize), nbytes);
@@ -1935,17 +1998,17 @@ impl ByteOrder for BigEndian {
write_num_bytes!(u32, 4, n, buf, to_be);
}
#[inline]
fn write_u64(buf: &mut [u8], n: u64) {
write_num_bytes!(u64, 8, n, buf, to_be);
}
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
#[inline]
fn write_u128(buf: &mut [u8], n: u128) {
write_num_bytes!(u128, 16, n, buf, to_be);
}
#[inline]
fn write_uint(buf: &mut [u8], n: u64, nbytes: usize) {
assert!(pack_size(n) <= nbytes && nbytes <= 8);
@@ -1954,17 +2017,17 @@ impl ByteOrder for BigEndian {
let bytes = *(&n.to_be() as *const u64 as *const [u8; 8]);
copy_nonoverlapping(
bytes.as_ptr().offset((8 - nbytes) as isize),
buf.as_mut_ptr(),
nbytes);
}
}
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
#[inline]
fn write_uint128(buf: &mut [u8], n: u128, nbytes: usize) {
assert!(pack_size128(n) <= nbytes && nbytes <= 16);
assert!(nbytes <= buf.len());
unsafe {
let bytes = *(&n.to_be() as *const u128 as *const [u8; 16]);
copy_nonoverlapping(
bytes.as_ptr().offset((16 - nbytes) as isize),
@@ -1983,17 +2046,17 @@ impl ByteOrder for BigEndian {
read_slice!(src, dst, 4, to_be);
}
#[inline]
fn read_u64_into(src: &[u8], dst: &mut [u64]) {
read_slice!(src, dst, 8, to_be);
}
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
#[inline]
fn read_u128_into(src: &[u8], dst: &mut [u128]) {
read_slice!(src, dst, 16, to_be);
}
#[inline]
fn write_u16_into(src: &[u16], dst: &mut [u8]) {
if cfg!(target_endian = "big") {
@@ -2016,17 +2079,17 @@ impl ByteOrder for BigEndian {
fn write_u64_into(src: &[u64], dst: &mut [u8]) {
if cfg!(target_endian = "big") {
write_slice_native!(src, dst, u64, 8);
} else {
write_slice!(src, dst, u64, 8, Self::write_u64);
}
}
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
#[inline]
fn write_u128_into(src: &[u128], dst: &mut [u8]) {
if cfg!(target_endian = "big") {
write_slice_native!(src, dst, u128, 16);
} else {
write_slice!(src, dst, u128, 16, Self::write_u128);
}
}
@@ -2053,17 +2116,17 @@ impl ByteOrder for BigEndian {
fn from_slice_u64(numbers: &mut [u64]) {
if cfg!(target_endian = "little") {
for n in numbers {
*n = n.to_be();
}
}
}
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
#[inline]
fn from_slice_u128(numbers: &mut [u128]) {
if cfg!(target_endian = "little") {
for n in numbers {
*n = n.to_be();
}
}
}
@@ -2104,34 +2167,34 @@ impl ByteOrder for LittleEndian {
read_num_bytes!(u32, 4, buf, to_le)
}
#[inline]
fn read_u64(buf: &[u8]) -> u64 {
read_num_bytes!(u64, 8, buf, to_le)
}
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
#[inline]
fn read_u128(buf: &[u8]) -> u128 {
read_num_bytes!(u128, 16, buf, to_le)
}
#[inline]
fn read_uint(buf: &[u8], nbytes: usize) -> u64 {
assert!(1 <= nbytes && nbytes <= 8 && nbytes <= buf.len());
let mut out = [0u8; 8];
let ptr_out = out.as_mut_ptr();
unsafe {
copy_nonoverlapping(buf.as_ptr(), ptr_out, nbytes);
(*(ptr_out as *const u64)).to_le()
}
}
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
#[inline]
fn read_uint128(buf: &[u8], nbytes: usize) -> u128 {
assert!(1 <= nbytes && nbytes <= 16 && nbytes <= buf.len());
let mut out = [0u8; 16];
let ptr_out = out.as_mut_ptr();
unsafe {
copy_nonoverlapping(buf.as_ptr(), ptr_out, nbytes);
(*(ptr_out as *const u128)).to_le()
@@ -2148,33 +2211,33 @@ impl ByteOrder for LittleEndian {
write_num_bytes!(u32, 4, n, buf, to_le);
}
#[inline]
fn write_u64(buf: &mut [u8], n: u64) {
write_num_bytes!(u64, 8, n, buf, to_le);
}
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
#[inline]
fn write_u128(buf: &mut [u8], n: u128) {
write_num_bytes!(u128, 16, n, buf, to_le);
}
#[inline]
fn write_uint(buf: &mut [u8], n: u64, nbytes: usize) {
assert!(pack_size(n as u64) <= nbytes && nbytes <= 8);
assert!(nbytes <= buf.len());
unsafe {
let bytes = *(&n.to_le() as *const u64 as *const [u8; 8]);
copy_nonoverlapping(bytes.as_ptr(), buf.as_mut_ptr(), nbytes);
}
}
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
#[inline]
fn write_uint128(buf: &mut [u8], n: u128, nbytes: usize) {
assert!(pack_size128(n as u128) <= nbytes && nbytes <= 16);
assert!(nbytes <= buf.len());
unsafe {
let bytes = *(&n.to_le() as *const u128 as *const [u8; 16]);
copy_nonoverlapping(bytes.as_ptr(), buf.as_mut_ptr(), nbytes);
}
@@ -2190,17 +2253,17 @@ impl ByteOrder for LittleEndian {
read_slice!(src, dst, 4, to_le);
}
#[inline]
fn read_u64_into(src: &[u8], dst: &mut [u64]) {
read_slice!(src, dst, 8, to_le);
}
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
#[inline]
fn read_u128_into(src: &[u8], dst: &mut [u128]) {
read_slice!(src, dst, 16, to_le);
}
#[inline]
fn write_u16_into(src: &[u16], dst: &mut [u8]) {
if cfg!(target_endian = "little") {
@@ -2223,17 +2286,17 @@ impl ByteOrder for LittleEndian {
fn write_u64_into(src: &[u64], dst: &mut [u8]) {
if cfg!(target_endian = "little") {
write_slice_native!(src, dst, u64, 8);
} else {
write_slice!(src, dst, u64, 8, Self::write_u64);
}
}
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
#[inline]
fn write_u128_into(src: &[u128], dst: &mut [u8]) {
if cfg!(target_endian = "little") {
write_slice_native!(src, dst, u128, 16);
} else {
write_slice!(src, dst, u128, 16, Self::write_u128);
}
}
@@ -2260,17 +2323,17 @@ impl ByteOrder for LittleEndian {
fn from_slice_u64(numbers: &mut [u64]) {
if cfg!(target_endian = "big") {
for n in numbers {
*n = n.to_le();
}
}
}
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
#[inline]
fn from_slice_u128(numbers: &mut [u128]) {
if cfg!(target_endian = "big") {
for n in numbers {
*n = n.to_le();
}
}
}
@@ -2301,18 +2364,21 @@ impl ByteOrder for LittleEndian {
}
#[cfg(test)]
mod test {
extern crate quickcheck;
extern crate rand;
use self::quickcheck::{QuickCheck, StdGen, Testable};
- use self::rand::{Rng, thread_rng};
- #[cfg(feature = "i128")] use self::quickcheck::{Arbitrary, Gen};
+ use self::rand::thread_rng;
+ #[cfg(byteorder_i128)]
+ use self::rand::Rng;
+ #[cfg(byteorder_i128)]
+ use self::quickcheck::{Arbitrary, Gen};
pub const U24_MAX: u32 = 16_777_215;
pub const I24_MAX: i32 = 8_388_607;
pub const U48_MAX: u64 = 281_474_976_710_655;
pub const I48_MAX: i64 = 140_737_488_355_327;
pub const U64_MAX: u64 = ::core::u64::MAX;
pub const I64_MAX: u64 = ::core::i64::MAX as u64;
@@ -2322,41 +2388,41 @@ mod test {
($max:expr, $bytes:expr, $maxbytes:expr) => {
($max - 1) >> (8 * ($maxbytes - $bytes))
};
}
#[derive(Clone, Debug)]
pub struct Wi128<T>(pub T);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
impl<T: Clone> Wi128<T> {
pub fn clone(&self) -> T {
self.0.clone()
}
}
impl<T: PartialEq> PartialEq<T> for Wi128<T> {
fn eq(&self, other: &T) -> bool {
self.0.eq(other)
}
}
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
impl Arbitrary for Wi128<u128> {
fn arbitrary<G: Gen>(gen: &mut G) -> Wi128<u128> {
let max = calc_max!(::core::u128::MAX, gen.size(), 16);
let output =
(gen.gen::<u64>() as u128) |
((gen.gen::<u64>() as u128) << 64);
Wi128(output & (max - 1))
}
}
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
impl Arbitrary for Wi128<i128> {
fn arbitrary<G: Gen>(gen: &mut G) -> Wi128<i128> {
let max = calc_max!(::core::i128::MAX, gen.size(), 16);
let output =
(gen.gen::<i64>() as i128) |
((gen.gen::<i64>() as i128) << 64);
Wi128(output & (max - 1))
}
@@ -2459,19 +2525,19 @@ mod test {
qc_byte_order!(prop_i32, i32, ::core::i32::MAX as u64, read_i32, write_i32);
qc_byte_order!(prop_u48, u64, ::test::U48_MAX as u64, read_u48, write_u48);
qc_byte_order!(prop_i48, i64, ::test::I48_MAX as u64, read_i48, write_i48);
qc_byte_order!(prop_u64, u64, ::core::u64::MAX as u64, read_u64, write_u64);
qc_byte_order!(prop_i64, i64, ::core::i64::MAX as u64, read_i64, write_i64);
qc_byte_order!(prop_f32, f32, ::core::u64::MAX as u64, read_f32, write_f32);
qc_byte_order!(prop_f64, f64, ::core::i64::MAX as u64, read_f64, write_f64);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_byte_order!(prop_u128, Wi128<u128>, 16 + 1, read_u128, write_u128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_byte_order!(prop_i128, Wi128<i128>, 16 + 1, read_i128, write_i128);
qc_byte_order!(prop_uint_1,
u64, calc_max!(super::U64_MAX, 1), 1, read_uint, write_uint);
qc_byte_order!(prop_uint_2,
u64, calc_max!(super::U64_MAX, 2), 2, read_uint, write_uint);
qc_byte_order!(prop_uint_3,
u64, calc_max!(super::U64_MAX, 3), 3, read_uint, write_uint);
@@ -2481,62 +2547,62 @@ mod test {
u64, calc_max!(super::U64_MAX, 5), 5, read_uint, write_uint);
qc_byte_order!(prop_uint_6,
u64, calc_max!(super::U64_MAX, 6), 6, read_uint, write_uint);
qc_byte_order!(prop_uint_7,
u64, calc_max!(super::U64_MAX, 7), 7, read_uint, write_uint);
qc_byte_order!(prop_uint_8,
u64, calc_max!(super::U64_MAX, 8), 8, read_uint, write_uint);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_byte_order!(prop_uint128_1,
Wi128<u128>, 1, 1, read_uint128, write_uint128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_byte_order!(prop_uint128_2,
Wi128<u128>, 2, 2, read_uint128, write_uint128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_byte_order!(prop_uint128_3,
Wi128<u128>, 3, 3, read_uint128, write_uint128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_byte_order!(prop_uint128_4,
Wi128<u128>, 4, 4, read_uint128, write_uint128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_byte_order!(prop_uint128_5,
Wi128<u128>, 5, 5, read_uint128, write_uint128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_byte_order!(prop_uint128_6,
Wi128<u128>, 6, 6, read_uint128, write_uint128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_byte_order!(prop_uint128_7,
Wi128<u128>, 7, 7, read_uint128, write_uint128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_byte_order!(prop_uint128_8,
Wi128<u128>, 8, 8, read_uint128, write_uint128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_byte_order!(prop_uint128_9,
Wi128<u128>, 9, 9, read_uint128, write_uint128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_byte_order!(prop_uint128_10,
Wi128<u128>, 10, 10, read_uint128, write_uint128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_byte_order!(prop_uint128_11,
Wi128<u128>, 11, 11, read_uint128, write_uint128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_byte_order!(prop_uint128_12,
Wi128<u128>, 12, 12, read_uint128, write_uint128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_byte_order!(prop_uint128_13,
Wi128<u128>, 13, 13, read_uint128, write_uint128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_byte_order!(prop_uint128_14,
Wi128<u128>, 14, 14, read_uint128, write_uint128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_byte_order!(prop_uint128_15,
Wi128<u128>, 15, 15, read_uint128, write_uint128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_byte_order!(prop_uint128_16,
Wi128<u128>, 16, 16, read_uint128, write_uint128);
qc_byte_order!(prop_int_1,
i64, calc_max!(super::I64_MAX, 1), 1, read_int, write_int);
qc_byte_order!(prop_int_2,
i64, calc_max!(super::I64_MAX, 2), 2, read_int, write_int);
qc_byte_order!(prop_int_3,
@@ -2547,62 +2613,62 @@ mod test {
i64, calc_max!(super::I64_MAX, 5), 5, read_int, write_int);
qc_byte_order!(prop_int_6,
i64, calc_max!(super::I64_MAX, 6), 6, read_int, write_int);
qc_byte_order!(prop_int_7,
i64, calc_max!(super::I64_MAX, 7), 7, read_int, write_int);
qc_byte_order!(prop_int_8,
i64, calc_max!(super::I64_MAX, 8), 8, read_int, write_int);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_byte_order!(prop_int128_1,
Wi128<i128>, 1, 1, read_int128, write_int128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_byte_order!(prop_int128_2,
Wi128<i128>, 2, 2, read_int128, write_int128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_byte_order!(prop_int128_3,
Wi128<i128>, 3, 3, read_int128, write_int128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_byte_order!(prop_int128_4,
Wi128<i128>, 4, 4, read_int128, write_int128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_byte_order!(prop_int128_5,
Wi128<i128>, 5, 5, read_int128, write_int128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_byte_order!(prop_int128_6,
Wi128<i128>, 6, 6, read_int128, write_int128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_byte_order!(prop_int128_7,
Wi128<i128>, 7, 7, read_int128, write_int128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_byte_order!(prop_int128_8,
Wi128<i128>, 8, 8, read_int128, write_int128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_byte_order!(prop_int128_9,
Wi128<i128>, 9, 9, read_int128, write_int128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_byte_order!(prop_int128_10,
Wi128<i128>, 10, 10, read_int128, write_int128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_byte_order!(prop_int128_11,
Wi128<i128>, 11, 11, read_int128, write_int128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_byte_order!(prop_int128_12,
Wi128<i128>, 12, 12, read_int128, write_int128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_byte_order!(prop_int128_13,
Wi128<i128>, 13, 13, read_int128, write_int128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_byte_order!(prop_int128_14,
Wi128<i128>, 14, 14, read_int128, write_int128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_byte_order!(prop_int128_15,
Wi128<i128>, 15, 15, read_int128, write_int128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_byte_order!(prop_int128_16,
Wi128<i128>, 16, 16, read_int128, write_int128);
// Test that all of the byte conversion functions panic when given a
// buffer that is too small.
//
// These tests are critical to ensure safety, otherwise we might end up
@@ -2687,97 +2753,97 @@ mod test {
too_small!(small_u16, 1, 0, read_u16, write_u16);
too_small!(small_i16, 1, 0, read_i16, write_i16);
too_small!(small_u32, 3, 0, read_u32, write_u32);
too_small!(small_i32, 3, 0, read_i32, write_i32);
too_small!(small_u64, 7, 0, read_u64, write_u64);
too_small!(small_i64, 7, 0, read_i64, write_i64);
too_small!(small_f32, 3, 0.0, read_f32, write_f32);
too_small!(small_f64, 7, 0.0, read_f64, write_f64);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
too_small!(small_u128, 15, 0, read_u128, write_u128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
too_small!(small_i128, 15, 0, read_i128, write_i128);
too_small!(small_uint_1, 1, read_uint);
too_small!(small_uint_2, 2, read_uint);
too_small!(small_uint_3, 3, read_uint);
too_small!(small_uint_4, 4, read_uint);
too_small!(small_uint_5, 5, read_uint);
too_small!(small_uint_6, 6, read_uint);
too_small!(small_uint_7, 7, read_uint);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
too_small!(small_uint128_1, 1, read_uint128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
too_small!(small_uint128_2, 2, read_uint128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
too_small!(small_uint128_3, 3, read_uint128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
too_small!(small_uint128_4, 4, read_uint128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
too_small!(small_uint128_5, 5, read_uint128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
too_small!(small_uint128_6, 6, read_uint128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
too_small!(small_uint128_7, 7, read_uint128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
too_small!(small_uint128_8, 8, read_uint128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
too_small!(small_uint128_9, 9, read_uint128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
too_small!(small_uint128_10, 10, read_uint128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
too_small!(small_uint128_11, 11, read_uint128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
too_small!(small_uint128_12, 12, read_uint128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
too_small!(small_uint128_13, 13, read_uint128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
too_small!(small_uint128_14, 14, read_uint128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
too_small!(small_uint128_15, 15, read_uint128);
too_small!(small_int_1, 1, read_int);
too_small!(small_int_2, 2, read_int);
too_small!(small_int_3, 3, read_int);
too_small!(small_int_4, 4, read_int);
too_small!(small_int_5, 5, read_int);
too_small!(small_int_6, 6, read_int);
too_small!(small_int_7, 7, read_int);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
too_small!(small_int128_1, 1, read_int128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
too_small!(small_int128_2, 2, read_int128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
too_small!(small_int128_3, 3, read_int128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
too_small!(small_int128_4, 4, read_int128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
too_small!(small_int128_5, 5, read_int128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
too_small!(small_int128_6, 6, read_int128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
too_small!(small_int128_7, 7, read_int128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
too_small!(small_int128_8, 8, read_int128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
too_small!(small_int128_9, 9, read_int128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
too_small!(small_int128_10, 10, read_int128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
too_small!(small_int128_11, 11, read_int128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
too_small!(small_int128_12, 12, read_int128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
too_small!(small_int128_13, 13, read_int128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
too_small!(small_int128_14, 14, read_int128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
too_small!(small_int128_15, 15, read_int128);
// Test that reading/writing slices enforces the correct lengths.
macro_rules! slice_lengths {
($name:ident, $read:ident, $write:ident,
$num_bytes:expr, $numbers:expr) => {
mod $name {
use {ByteOrder, BigEndian, NativeEndian, LittleEndian};
@@ -2855,26 +2921,26 @@ mod test {
slice_len_too_small_u64, read_u64_into, write_u64_into, 15, [0, 0]);
slice_lengths!(
slice_len_too_big_u64, read_u64_into, write_u64_into, 17, [0, 0]);
slice_lengths!(
slice_len_too_small_i64, read_i64_into, write_i64_into, 15, [0, 0]);
slice_lengths!(
slice_len_too_big_i64, read_i64_into, write_i64_into, 17, [0, 0]);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
slice_lengths!(
slice_len_too_small_u128, read_u128_into, write_u128_into, 31, [0, 0]);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
slice_lengths!(
slice_len_too_big_u128, read_u128_into, write_u128_into, 33, [0, 0]);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
slice_lengths!(
slice_len_too_small_i128, read_i128_into, write_i128_into, 31, [0, 0]);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
slice_lengths!(
slice_len_too_big_i128, read_i128_into, write_i128_into, 33, [0, 0]);
#[test]
fn uint_bigger_buffer() {
use {ByteOrder, LittleEndian};
let n = LittleEndian::read_uint(&[1, 2, 3, 4, 5, 6, 7, 8], 5);
assert_eq!(n, 0x0504030201);
@@ -3011,19 +3077,19 @@ mod stdtests {
u64, ::std::u64::MAX as u64, read_u64, write_u64);
qc_bytes_ext!(prop_ext_i64,
i64, ::std::i64::MAX as u64, read_i64, write_i64);
qc_bytes_ext!(prop_ext_f32,
f32, ::std::u64::MAX as u64, read_f32, write_f32);
qc_bytes_ext!(prop_ext_f64,
f64, ::std::i64::MAX as u64, read_f64, write_f64);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_bytes_ext!(prop_ext_u128, Wi128<u128>, 16 + 1, read_u128, write_u128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_bytes_ext!(prop_ext_i128, Wi128<i128>, 16 + 1, read_i128, write_i128);
qc_bytes_ext!(prop_ext_uint_1,
u64, calc_max!(::test::U64_MAX, 1), 1, read_uint, write_u64);
qc_bytes_ext!(prop_ext_uint_2,
u64, calc_max!(::test::U64_MAX, 2), 2, read_uint, write_u64);
qc_bytes_ext!(prop_ext_uint_3,
u64, calc_max!(::test::U64_MAX, 3), 3, read_uint, write_u64);
@@ -3033,62 +3099,62 @@ mod stdtests {
u64, calc_max!(::test::U64_MAX, 5), 5, read_uint, write_u64);
qc_bytes_ext!(prop_ext_uint_6,
u64, calc_max!(::test::U64_MAX, 6), 6, read_uint, write_u64);
qc_bytes_ext!(prop_ext_uint_7,
u64, calc_max!(::test::U64_MAX, 7), 7, read_uint, write_u64);
qc_bytes_ext!(prop_ext_uint_8,
u64, calc_max!(::test::U64_MAX, 8), 8, read_uint, write_u64);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_bytes_ext!(prop_ext_uint128_1,
Wi128<u128>, 1, 1, read_uint128, write_u128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_bytes_ext!(prop_ext_uint128_2,
Wi128<u128>, 2, 2, read_uint128, write_u128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_bytes_ext!(prop_ext_uint128_3,
Wi128<u128>, 3, 3, read_uint128, write_u128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_bytes_ext!(prop_ext_uint128_4,
Wi128<u128>, 4, 4, read_uint128, write_u128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_bytes_ext!(prop_ext_uint128_5,
Wi128<u128>, 5, 5, read_uint128, write_u128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_bytes_ext!(prop_ext_uint128_6,
Wi128<u128>, 6, 6, read_uint128, write_u128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_bytes_ext!(prop_ext_uint128_7,
Wi128<u128>, 7, 7, read_uint128, write_u128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_bytes_ext!(prop_ext_uint128_8,
Wi128<u128>, 8, 8, read_uint128, write_u128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_bytes_ext!(prop_ext_uint128_9,
Wi128<u128>, 9, 9, read_uint128, write_u128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_bytes_ext!(prop_ext_uint128_10,
Wi128<u128>, 10, 10, read_uint128, write_u128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_bytes_ext!(prop_ext_uint128_11,
Wi128<u128>, 11, 11, read_uint128, write_u128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_bytes_ext!(prop_ext_uint128_12,
Wi128<u128>, 12, 12, read_uint128, write_u128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_bytes_ext!(prop_ext_uint128_13,
Wi128<u128>, 13, 13, read_uint128, write_u128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_bytes_ext!(prop_ext_uint128_14,
Wi128<u128>, 14, 14, read_uint128, write_u128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_bytes_ext!(prop_ext_uint128_15,
Wi128<u128>, 15, 15, read_uint128, write_u128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_bytes_ext!(prop_ext_uint128_16,
Wi128<u128>, 16, 16, read_uint128, write_u128);
qc_bytes_ext!(prop_ext_int_1,
i64, calc_max!(::test::I64_MAX, 1), 1, read_int, write_i64);
qc_bytes_ext!(prop_ext_int_2,
i64, calc_max!(::test::I64_MAX, 2), 2, read_int, write_i64);
qc_bytes_ext!(prop_ext_int_3,
@@ -3099,62 +3165,62 @@ mod stdtests {
i64, calc_max!(::test::I64_MAX, 5), 5, read_int, write_i64);
qc_bytes_ext!(prop_ext_int_6,
i64, calc_max!(::test::I64_MAX, 6), 6, read_int, write_i64);
qc_bytes_ext!(prop_ext_int_7,
i64, calc_max!(::test::I64_MAX, 1), 7, read_int, write_i64);
qc_bytes_ext!(prop_ext_int_8,
i64, calc_max!(::test::I64_MAX, 8), 8, read_int, write_i64);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_bytes_ext!(prop_ext_int128_1,
Wi128<i128>, 1, 1, read_int128, write_i128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_bytes_ext!(prop_ext_int128_2,
Wi128<i128>, 2, 2, read_int128, write_i128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_bytes_ext!(prop_ext_int128_3,
Wi128<i128>, 3, 3, read_int128, write_i128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_bytes_ext!(prop_ext_int128_4,
Wi128<i128>, 4, 4, read_int128, write_i128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_bytes_ext!(prop_ext_int128_5,
Wi128<i128>, 5, 5, read_int128, write_i128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_bytes_ext!(prop_ext_int128_6,
Wi128<i128>, 6, 6, read_int128, write_i128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_bytes_ext!(prop_ext_int128_7,
Wi128<i128>, 7, 7, read_int128, write_i128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_bytes_ext!(prop_ext_int128_8,
Wi128<i128>, 8, 8, read_int128, write_i128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_bytes_ext!(prop_ext_int128_9,
Wi128<i128>, 9, 9, read_int128, write_i128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_bytes_ext!(prop_ext_int128_10,
Wi128<i128>, 10, 10, read_int128, write_i128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_bytes_ext!(prop_ext_int128_11,
Wi128<i128>, 11, 11, read_int128, write_i128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_bytes_ext!(prop_ext_int128_12,
Wi128<i128>, 12, 12, read_int128, write_i128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_bytes_ext!(prop_ext_int128_13,
Wi128<i128>, 13, 13, read_int128, write_i128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_bytes_ext!(prop_ext_int128_14,
Wi128<i128>, 14, 14, read_int128, write_i128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_bytes_ext!(prop_ext_int128_15,
Wi128<i128>, 15, 15, read_int128, write_i128);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_bytes_ext!(prop_ext_int128_16,
Wi128<i128>, 16, 16, read_int128, write_i128);
// Test slice serialization/deserialization.
macro_rules! qc_slice {
($name:ident, $ty_int:ty, $read:ident, $write:ident, $zero:expr) => {
mod $name {
use core::mem::size_of;
@@ -3230,20 +3296,20 @@ mod stdtests {
}
qc_slice!(prop_slice_u16, u16, read_u16_into, write_u16_into, 0);
qc_slice!(prop_slice_i16, i16, read_i16_into, write_i16_into, 0);
qc_slice!(prop_slice_u32, u32, read_u32_into, write_u32_into, 0);
qc_slice!(prop_slice_i32, i32, read_i32_into, write_i32_into, 0);
qc_slice!(prop_slice_u64, u64, read_u64_into, write_u64_into, 0);
qc_slice!(prop_slice_i64, i64, read_i64_into, write_i64_into, 0);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_slice!(
prop_slice_u128, Wi128<u128>, read_u128_into, write_u128_into, 0);
- #[cfg(feature = "i128")]
+ #[cfg(byteorder_i128)]
qc_slice!(
prop_slice_i128, Wi128<i128>, read_i128_into, write_i128_into, 0);
qc_slice!(
- prop_slice_f32, f32, read_f32_into_unchecked, write_f32_into, 0.0);
+ prop_slice_f32, f32, read_f32_into, write_f32_into, 0.0);
qc_slice!(
- prop_slice_f64, f64, read_f64_into_unchecked, write_f64_into, 0.0);
+ prop_slice_f64, f64, read_f64_into, write_f64_into, 0.0);
}
diff --git a/third_party/rust/murmurhash3/.cargo-checksum.json b/third_party/rust/murmurhash3/.cargo-checksum.json
new file mode 100644
--- /dev/null
+++ b/third_party/rust/murmurhash3/.cargo-checksum.json
@@ -0,0 +1,1 @@
+{"files":{"CHANGELOG.rst":"11fdd5b156fc2ef5fb7ed980ba91c2a32bdabb79fa386926cbd70673ca6086a5","Cargo.toml":"f61656d89dfd6de3f420e021a55672979d02c4732154b3e91122582af084b8b5","LICENSE":"bb5492d70d4de524e3e29507fb9d87165a49acbc3a5b0e946aaed7e8cfbbd01b","README.rst":"9abdacc75d4886d6201d22b4406353beafb0f3012180109d47fca78e3b8ee5a2","src/hasher.rs":"0022eaa0525dc48b1d8e1dae3fdf5b86b8ae036cb6f87d68f3f5e3b31819f90f","src/lib.rs":"5fc49f47993193b09f170de2747679dc090ff7ec3a62858d62e8cb5213c67392","src/mmh3_128.rs":"0003106e26c34bd9b98155a19953bba946ae4c7899427b160dd74060afa96805","src/mmh3_32.rs":"67fef38bb5f6f4109b401d4b4aaa6bdd3fd4b83f89caeac6666b5f2173a8340e"},"package":"a2983372caf4480544083767bf2d27defafe32af49ab4df3a0b7fc90793a3664"}
\ No newline at end of file
diff --git a/third_party/rust/murmurhash3/CHANGELOG.rst b/third_party/rust/murmurhash3/CHANGELOG.rst
new file mode 100644
--- /dev/null
+++ b/third_party/rust/murmurhash3/CHANGELOG.rst
@@ -0,0 +1,24 @@
+Change Log
+==========
+
+Unreleased_
+----------
+
+0.0.4_ — 2014-04-04
+----------
+
+* Enable ``HashState`` implementation
+
+
+0.0.3_ — 2014-03-29
+------------------
+
+* PR1_: Fixes to keep Rust Nightly compatibility, thanks polyfractal_
+
+
+.. _Unreleased: https://github.com/mhallin/murmurhash3-rs/compare/v0.0.4...HEAD
+.. _0.0.4: https://github.com/mhallin/murmurhash3-rs/compare/v0.0.3...v0.0.4
+.. _0.0.3: https://github.com/mhallin/murmurhash3-rs/compare/v0.0.2...v0.0.3
+
+.. _PR1: https://github.com/mhallin/murmurhash3-rs/pull/1
+.. _polyfractal: https://github.com/polyfractal
diff --git a/third_party/rust/murmurhash3/Cargo.toml b/third_party/rust/murmurhash3/Cargo.toml
new file mode 100644
--- /dev/null
+++ b/third_party/rust/murmurhash3/Cargo.toml
@@ -0,0 +1,19 @@
+[package]
+
+name = "murmurhash3"
+version = "0.0.5"
+authors = ["mhallin <mhallin@gmail.com>"]
+description = "MurmurHash3 implementation"
+license = "MIT"
+readme = "README.rst"
+homepage = "https://github.com/mhallin/murmurhash3-rs"
+
+[lib]
+name = "murmurhash3"
+path = "src/lib.rs"
+
+[dev-dependencies]
+rand = "*"
+
+[features]
+nightly = []
diff --git a/third_party/rust/murmurhash3/LICENSE b/third_party/rust/murmurhash3/LICENSE
new file mode 100644
--- /dev/null
+++ b/third_party/rust/murmurhash3/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Magnus Hallin
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/third_party/rust/murmurhash3/README.rst b/third_party/rust/murmurhash3/README.rst
new file mode 100644
--- /dev/null
+++ b/third_party/rust/murmurhash3/README.rst
@@ -0,0 +1,50 @@
+**************
+MurmurHash3.rs
+**************
+
+.. image:: https://travis-ci.org/mhallin/murmurhash3-rs.svg?branch=master
+ :target: https://travis-ci.org/mhallin/murmurhash3-rs
+
+A rust implementation of the MurmurHash3_. Both 32 bit and 128 bit versions are included. The 128
+bit version is implemented with 64 bit datatypes, making it most suitable for x86_64 or other 64 bit
+architectures.
+
+----
+
+Usage
+=====
+
+In your ``Cargo.toml``:
+
+.. code:: toml
+
+ [dependencies]
+ murmurhash3 = "*"
+
+Then you can start to use either ``murmurhash3_x86_32`` or ``murmurhash3_x64_128``:
+
+.. code:: rust
+
+ use murmurhash3::murmurhash3_x64_128;
+
+ fn hash_value() {
+ let data = "test data";
+ let seed = 48221234;
+
+ let hash = murmurhash3_x64_128(data.as_bytes(), seed);
+ }
+
+Unfortunately, there is a bug in the ``HashState`` library implementation which prevents
+implementation of new ``Hasher`` implementations for use in for example ``HashMap``. Additionally,
+only the 32 bit hasher can be used there since ``HashMap`` uses a 64 bit hash internally.
+
+Tests
+=====
+
+.. code::
+
+ cargo test
+
+Runs all tests with optimization level 3 in order to weed out potential problems with the optimizer.
+
+.. _MurmurHash3: https://code.google.com/p/smhasher/wiki/MurmurHash3
diff --git a/third_party/rust/murmurhash3/src/hasher.rs b/third_party/rust/murmurhash3/src/hasher.rs
new file mode 100644
--- /dev/null
+++ b/third_party/rust/murmurhash3/src/hasher.rs
@@ -0,0 +1,61 @@
+use std::hash::Hasher;
+use std::collections::hash_state::HashState;
+
+use mmh3_32::murmurhash3_x86_32;
+
+pub struct Murmur3Hasher {
+ seed: u32,
+ bytes: Vec<u8>,
+}
+
+#[derive(Clone, Copy)]
+pub struct Murmur3HashState {
+ seed: u32,
+}
+
+impl Murmur3HashState {
+ pub fn new() -> Murmur3HashState {
+ return Murmur3HashState { seed: 0 };
+ }
+
+ pub fn with_seed(seed: u32) -> Murmur3HashState {
+ return Murmur3HashState { seed: seed };
+ }
+}
+
+
+impl Hasher for Murmur3Hasher {
+ fn finish(&self) -> u64 {
+ return murmurhash3_x86_32(&self.bytes, self.seed) as u64;
+ }
+
+ fn write(&mut self, bytes: &[u8]) {
+ self.bytes.push_all(bytes);
+ }
+}
+
+impl HashState for Murmur3HashState {
+ type Hasher = Murmur3Hasher;
+
+ fn hasher(&self) -> Murmur3Hasher {
+ return Murmur3Hasher { seed: self.seed, bytes: vec![] };
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::Murmur3HashState;
+ use std::collections::hash_map::HashMap;
+
+ #[test]
+ fn use_in_hashmap() {
+ let mut hashmap = HashMap::with_hash_state(Murmur3HashState::new());
+ hashmap.insert("one", 1);
+ hashmap.insert("two", 2);
+
+ assert!(hashmap.len() == 2);
+
+ assert!(*hashmap.get("one").unwrap() == 1);
+ assert!(*hashmap.get("two").unwrap() == 2);
+ }
+}
diff --git a/third_party/rust/murmurhash3/src/lib.rs b/third_party/rust/murmurhash3/src/lib.rs
new file mode 100644
--- /dev/null
+++ b/third_party/rust/murmurhash3/src/lib.rs
@@ -0,0 +1,15 @@
+#![cfg_attr(feature = "nightly", feature(hashmap_hasher))]
+#![cfg_attr(feature = "nightly", feature(test))]
+#![cfg_attr(feature = "nightly", feature(vec_push_all))]
+
+mod mmh3_128;
+mod mmh3_32;
+
+#[cfg(feature="nightly")]
+mod hasher;
+
+pub use mmh3_128::murmurhash3_x64_128;
+pub use mmh3_32::murmurhash3_x86_32;
+
+#[cfg(feature="nightly")]
+pub use hasher::Murmur3HashState;
diff --git a/third_party/rust/murmurhash3/src/mmh3_128.rs b/third_party/rust/murmurhash3/src/mmh3_128.rs
new file mode 100644
--- /dev/null
+++ b/third_party/rust/murmurhash3/src/mmh3_128.rs
@@ -0,0 +1,181 @@
+use std::mem;
+
+fn fmix64(mut k: u64) -> u64 {
+ k ^= k >> 33;
+ k = k.wrapping_mul(0xff51afd7ed558ccdu64);
+ k ^= k >> 33;
+ k = k.wrapping_mul(0xc4ceb9fe1a85ec53u64);
+ k ^= k >> 33;
+
+ return k;
+}
+
+fn get_128_block(bytes: &[u8], index: usize) -> (u64, u64) {
+ let b64: &[u64] = unsafe { mem::transmute(bytes) };
+
+ return (b64[index], b64[index + 1]);
+}
+
+pub fn murmurhash3_x64_128(bytes: &[u8], seed: u64) -> (u64, u64) {
+ let c1 = 0x87c37b91114253d5u64;
+ let c2 = 0x4cf5ad432745937fu64;
+ let read_size = 16;
+ let len = bytes.len() as u64;
+ let block_count = len / read_size;
+
+ let (mut h1, mut h2) = (seed, seed);
+
+
+ for i in 0..block_count as usize {
+ let (mut k1, mut k2) = get_128_block(bytes, i * 2);
+
+ k1 = k1.wrapping_mul(c1);
+ k1 = k1.rotate_left(31);
+ k1 = k1.wrapping_mul(c2);
+ h1 ^= k1;
+
+ h1 = h1.rotate_left(27);
+ h1 = h1.wrapping_add(h2);
+ h1 = h1.wrapping_mul(5);
+ h1 = h1.wrapping_add(0x52dce729);
+
+ k2 = k2.wrapping_mul(c2);
+ k2 = k2.rotate_left(33);
+ k2 = k2.wrapping_mul(c1);
+ h2 ^= k2;
+
+ h2 = h2.rotate_left(31);
+ h2 = h2.wrapping_add(h1);
+ h2 = h2.wrapping_mul(5);
+ h2 = h2.wrapping_add(0x38495ab5);
+ }
+
+
+ let (mut k1, mut k2) = (0u64, 0u64);
+
+ if len & 15 == 15 { k2 ^= (bytes[(block_count * read_size) as usize + 14] as u64) << 48; }
+ if len & 15 >= 14 { k2 ^= (bytes[(block_count * read_size) as usize + 13] as u64) << 40; }
+ if len & 15 >= 13 { k2 ^= (bytes[(block_count * read_size) as usize + 12] as u64) << 32; }
+ if len & 15 >= 12 { k2 ^= (bytes[(block_count * read_size) as usize + 11] as u64) << 24; }
+ if len & 15 >= 11 { k2 ^= (bytes[(block_count * read_size) as usize + 10] as u64) << 16; }
+ if len & 15 >= 10 { k2 ^= (bytes[(block_count * read_size) as usize + 9] as u64) << 8; }
+ if len & 15 >= 9 { k2 ^= bytes[(block_count * read_size) as usize + 8] as u64;
+ k2 = k2.wrapping_mul(c2);
+ k2 = k2.rotate_left(33);
+ k2 = k2.wrapping_mul(c1);
+ h2 ^= k2;
+ }
+
+ if len & 15 >= 8 { k1 ^= (bytes[(block_count * read_size) as usize + 7] as u64) << 56; }
+ if len & 15 >= 7 { k1 ^= (bytes[(block_count * read_size) as usize + 6] as u64) << 48; }
+ if len & 15 >= 6 { k1 ^= (bytes[(block_count * read_size) as usize + 5] as u64) << 40; }
+ if len & 15 >= 5 { k1 ^= (bytes[(block_count * read_size) as usize + 4] as u64) << 32; }
+ if len & 15 >= 4 { k1 ^= (bytes[(block_count * read_size) as usize + 3] as u64) << 24; }
+ if len & 15 >= 3 { k1 ^= (bytes[(block_count * read_size) as usize + 2] as u64) << 16; }
+ if len & 15 >= 2 { k1 ^= (bytes[(block_count * read_size) as usize + 1] as u64) << 8; }
+ if len & 15 >= 1 { k1 ^= bytes[(block_count * read_size) as usize + 0] as u64;
+ k1 = k1.wrapping_mul(c1);
+ k1 = k1.rotate_left(31);
+ k1 = k1.wrapping_mul(c2);
+ h1 ^= k1;
+ }
+
+ h1 ^= bytes.len() as u64;
+ h2 ^= bytes.len() as u64;
+
+ h1 = h1.wrapping_add(h2);
+ h2 = h2.wrapping_add(h1);
+
+ h1 = fmix64(h1);
+ h2 = fmix64(h2);
+
+ h1 = h1.wrapping_add(h2);
+ h2 = h2.wrapping_add(h1);
+
+ return (h1, h2);
+}
+
+#[cfg(test)]
+mod test {
+ use super::murmurhash3_x64_128;
+
+ #[test]
+ fn test_empty_string() {
+ assert!(murmurhash3_x64_128("".as_bytes(), 0) == (0, 0));
+ }
+
+ #[test]
+ fn test_tail_lengths() {
+ assert!(murmurhash3_x64_128("1".as_bytes(), 0)
+ == (8213365047359667313, 10676604921780958775));
+ assert!(murmurhash3_x64_128("12".as_bytes(), 0)
+ == (5355690773644049813, 9855895140584599837));
+ assert!(murmurhash3_x64_128("123".as_bytes(), 0)
+ == (10978418110857903978, 4791445053355511657));
+ assert!(murmurhash3_x64_128("1234".as_bytes(), 0)
+ == (619023178690193332, 3755592904005385637));
+ assert!(murmurhash3_x64_128("12345".as_bytes(), 0)
+ == (2375712675693977547, 17382870096830835188));
+ assert!(murmurhash3_x64_128("123456".as_bytes(), 0)
+ == (16435832985690558678, 5882968373513761278));
+ assert!(murmurhash3_x64_128("1234567".as_bytes(), 0)
+ == (3232113351312417698, 4025181827808483669));
+ assert!(murmurhash3_x64_128("12345678".as_bytes(), 0)
+ == (4272337174398058908, 10464973996478965079));
+ assert!(murmurhash3_x64_128("123456789".as_bytes(), 0)
+ == (4360720697772133540, 11094893415607738629));
+ assert!(murmurhash3_x64_128("123456789a".as_bytes(), 0)
+ == (12594836289594257748, 2662019112679848245));
+ assert!(murmurhash3_x64_128("123456789ab".as_bytes(), 0)
+ == (6978636991469537545, 12243090730442643750));
+ assert!(murmurhash3_x64_128("123456789abc".as_bytes(), 0)
+ == (211890993682310078, 16480638721813329343));
+ assert!(murmurhash3_x64_128("123456789abcd".as_bytes(), 0)
+ == (12459781455342427559, 3193214493011213179));
+ assert!(murmurhash3_x64_128("123456789abcde".as_bytes(), 0)
+ == (12538342858731408721, 9820739847336455216));
+ assert!(murmurhash3_x64_128("123456789abcdef".as_bytes(), 0)
+ == (9165946068217512774, 2451472574052603025));
+ assert!(murmurhash3_x64_128("123456789abcdef1".as_bytes(), 0)
+ == (9259082041050667785, 12459473952842597282));
+ }
+
+ #[test]
+ fn test_large_data() {
+ assert!(murmurhash3_x64_128("Lorem ipsum dolor sit amet, consectetur adipiscing elit. Etiam at consequat massa. Cras eleifend pellentesque ex, at dignissim libero maximus ut. Sed eget nulla felis".as_bytes(), 0)
+ == (9455322759164802692, 17863277201603478371));
+ }
+
+ #[cfg(feature="nightly")]
+ mod bench {
+ extern crate rand;
+ extern crate test;
+
+ use std::iter::FromIterator;
+ use self::rand::Rng;
+ use self::test::{Bencher, black_box};
+
+ use super::super::murmurhash3_x64_128;
+
+ fn run_bench(b: &mut Bencher, size: u64) {
+ let mut data: Vec<u8> = FromIterator::from_iter((0..size).map(|_| 0u8));
+ rand::thread_rng().fill_bytes(&mut data);
+
+ b.bytes = size;
+ b.iter(|| {
+ black_box(murmurhash3_x64_128(&data, 0));
+ });
+ }
+
+ #[bench]
+ fn bench_random_256k(b: &mut Bencher) {
+ run_bench(b, 256 * 1024);
+ }
+
+ #[bench]
+ fn bench_random_16b(b: &mut Bencher) {
+ run_bench(b, 16);
+ }
+
+ }
+}
diff --git a/third_party/rust/murmurhash3/src/mmh3_32.rs b/third_party/rust/murmurhash3/src/mmh3_32.rs
new file mode 100644
--- /dev/null
+++ b/third_party/rust/murmurhash3/src/mmh3_32.rs
@@ -0,0 +1,115 @@
+use std::mem;
+
+fn fmix32(mut h: u32) -> u32 {
+ h ^= h >> 16;
+ h = h.wrapping_mul(0x85ebca6b);
+ h ^= h >> 13;
+ h = h.wrapping_mul(0xc2b2ae35);
+ h ^= h >> 16;
+
+ return h;
+}
+
+fn get_32_block(bytes: &[u8], index: usize) -> u32 {
+ let b32: &[u32] = unsafe { mem::transmute(bytes) };
+
+ return b32[index];
+}
+
+pub fn murmurhash3_x86_32(bytes: &[u8], seed: u32) -> u32 {
+ let c1 = 0xcc9e2d51u32;
+ let c2 = 0x1b873593u32;
+ let read_size = 4;
+ let len = bytes.len() as u32;
+ let block_count = len / read_size;
+
+ let mut h1 = seed;
+
+ for i in 0..block_count as usize {
+ let mut k1 = get_32_block(bytes, i);
+
+ k1 = k1.wrapping_mul(c1);
+ k1 = k1.rotate_left(15);
+ k1 = k1.wrapping_mul(c2);
+
+ h1 ^= k1;
+ h1 = h1.rotate_left(13);
+ h1 = h1.wrapping_mul(5);
+ h1 = h1.wrapping_add(0xe6546b64)
+ }
+ let mut k1 = 0u32;
+
+ if len & 3 == 3 { k1 ^= (bytes[(block_count * read_size) as usize + 2] as u32) << 16; }
+ if len & 3 >= 2 { k1 ^= (bytes[(block_count * read_size) as usize + 1] as u32) << 8; }
+ if len & 3 >= 1 { k1 ^= bytes[(block_count * read_size) as usize + 0] as u32;
+ k1 = k1.wrapping_mul(c1);
+ k1 = k1.rotate_left(15);
+ k1 = k1.wrapping_mul(c2);
+ h1 ^= k1;
+ }
+
+ h1 ^= bytes.len() as u32;
+ h1 = fmix32(h1);
+
+ return h1;
+}
+
+#[cfg(test)]
+mod test {
+ use super::murmurhash3_x86_32;
+
+ #[test]
+ fn test_empty_string() {
+ assert!(murmurhash3_x86_32("".as_bytes(), 0) == 0);
+ }
+
+ #[test]
+ fn test_tail_lengths() {
+ assert!(murmurhash3_x86_32("1".as_bytes(), 0)
+ == 2484513939);
+ assert!(murmurhash3_x86_32("12".as_bytes(), 0)
+ == 4191350549);
+ assert!(murmurhash3_x86_32("123".as_bytes(), 0)
+ == 2662625771);
+ assert!(murmurhash3_x86_32("1234".as_bytes(), 0)
+ == 1914461635);
+ }
+
+ #[test]
+ fn test_large_data() {
+ assert!(murmurhash3_x86_32("Lorem ipsum dolor sit amet, consectetur adipiscing elit. Etiam at consequat massa. Cras eleifend pellentesque ex, at dignissim libero maximus ut. Sed eget nulla felis".as_bytes(), 0)
+ == 1004899618);
+ }
+
+ #[cfg(feature="nightly")]
+ mod bench {
+ extern crate rand;
+ extern crate test;
+
+ use std::iter::FromIterator;
+ use self::rand::Rng;
+ use self::test::{Bencher, black_box};
+
+ use super::super::murmurhash3_x86_32;
+
+ fn run_bench(b: &mut Bencher, size: u64) {
+ let mut data: Vec<u8> = FromIterator::from_iter((0..size).map(|_| 0u8));
+ rand::thread_rng().fill_bytes(&mut data);
+
+ b.bytes = size;
+ b.iter(|| {
+ black_box(murmurhash3_x86_32(&data, 0));
+ });
+ }
+
+ #[bench]
+ fn bench_random_256k(b: &mut Bencher) {
+ run_bench(b, 256 * 1024);
+ }
+
+ #[bench]
+ fn bench_random_16b(b: &mut Bencher) {
+ run_bench(b, 16);
+ }
+ }
+}
diff --git a/third_party/rust/rust_cascade/.cargo-checksum.json b/third_party/rust/rust_cascade/.cargo-checksum.json
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rust_cascade/.cargo-checksum.json
@@ -0,0 +1,1 @@
+{"files":{"Cargo.toml":"25e7818c7b5cad1020f6234b0ba140422354cc0177bf812bdff3407541131afd","README.md":"2813bf88a5c407c6883f173fac430072f0007f730615366983f59e6d5e6950de","license.txt":"c76f740d1521b9bed9ca7a04ad526c310493c62621b1341d623b431736533b30","src/lib.rs":"b32d7dab31f614cea5a2514a8665ae0d571232628a30a5e6d96ee4d2fdfb1737","test_data/test_bf":"afa0b9e6b9244522302aa8d153e92e25d7fd16388f34c06497487fc0332a4d34","test_data/test_mlbf":"2c6a5f13ad459f1bcf88d91c2eaab94eda84a60fc123b9914203dee725d70ba0","test_data/test_short_mlbf":"55f0d1361acb4f4ec25dba6bfa92a777d49dcc2e71f623a9ec2a70c6ff5f61eb"},"package":null}
\ No newline at end of file
diff --git a/third_party/rust/rust_cascade/Cargo.toml b/third_party/rust/rust_cascade/Cargo.toml
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rust_cascade/Cargo.toml
@@ -0,0 +1,13 @@
+[package]
+name = "rust_cascade"
+description = "A simple mmh3 based bloom filter cascade implementation in Rust."
+license = "MPL-2.0"
+version = "0.3.2"
+authors = ["Mark Goodwin <mgoodwin@mozilla.com>"]
+
+[dependencies]
+murmurhash3="0.0.5"
+bitvec = { git = "https://github.com/mozmark/bitvec", branch = "20190429-bitvec-vendor-issues" }
+byteorder="1.3.1"
+rand="0.*"
+digest="0.8.0"
diff --git a/third_party/rust/rust_cascade/README.md b/third_party/rust/rust_cascade/README.md
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rust_cascade/README.md
@@ -0,0 +1,4 @@
+# rust-cascade
+A filter cascade implementation in rust
+
+See tests in src/lib.js to get an idea of usage.
\ No newline at end of file
diff --git a/third_party/rust/rust_cascade/license.txt b/third_party/rust/rust_cascade/license.txt
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rust_cascade/license.txt
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
\ No newline at end of file
diff --git a/third_party/rust/rust_cascade/src/lib.rs b/third_party/rust/rust_cascade/src/lib.rs
new file mode 100644
--- /dev/null
+++ b/third_party/rust/rust_cascade/src/lib.rs
@@ -0,0 +1,326 @@
+extern crate bitvec;
+extern crate byteorder;
+extern crate digest;
+extern crate murmurhash3;
+extern crate rand;
+
+use bitvec::{bitvec, BitVec, LittleEndian};
+use byteorder::ReadBytesExt;
+use murmurhash3::murmurhash3_x86_32;
+
+use std::io::{Error, ErrorKind, Read};
+
+#[derive(Debug)]
+pub struct Bloom {
+ level: u32,
+ n_hash_funcs: u32,
+ size: usize,
+ bitvec: BitVec<bitvec::LittleEndian>,
+}
+
+pub fn calculate_n_hash_funcs(error_rate: f32) -> u32 {
+ ((1.0 / error_rate).ln() / (2.0_f32).ln()).ceil() as u32
+}
+
+pub fn calculate_size(elements: usize, error_rate: f32) -> usize {
+ let n_hash_funcs = calculate_n_hash_funcs(error_rate);
+ let hashes = n_hash_funcs as f32;
+ (1.0_f32
+ - (hashes * (elements as f32 + 0.5) / (1.0_f32 - error_rate.powf(1.0 / hashes)).ln()))
+ .ceil() as usize
+}
+
+impl Bloom {
+ pub fn new(size: usize, n_hash_funcs: u32, level: u32) -> Bloom {
+ let bitvec: BitVec<LittleEndian> = bitvec![LittleEndian; 0; size];
+
+ Bloom {
+ level: level,
+ n_hash_funcs: n_hash_funcs,
+ size: size,
+ bitvec: bitvec,
+ }
+ }
+
+ pub fn from_bytes(cursor: &mut &[u8]) -> Result<Bloom, Error> {
+ // Load the layer metadata. bloomer.py writes size, nHashFuncs and level as little-endian
+ // unsigned ints.
+ let size = cursor.read_u32::<byteorder::LittleEndian>()? as usize;
+ let n_hash_funcs = cursor.read_u32::<byteorder::LittleEndian>()?;
+ let level = cursor.read_u32::<byteorder::LittleEndian>()?;
+
+ let shifted_size = size.wrapping_shr(3);
+ let byte_count = if size % 8 != 0 {
+ shifted_size + 1
+ } else {
+ shifted_size
+ };
+
+ let mut bitvec_buf = vec![0u8; byte_count];
+ cursor.read_exact(&mut bitvec_buf)?;
+
+ Ok(Bloom {
+ level,
+ n_hash_funcs,
+ size,
+ bitvec: bitvec_buf.into(),
+ })
+ }
+
+ fn hash(&self, n_fn: u32, key: &[u8]) -> usize {
+ let hash_seed = (n_fn << 16) + self.level;
+ let h = murmurhash3_x86_32(key, hash_seed) as usize % self.size;
+ h
+ }
+
+ pub fn put(&mut self, item: &[u8]) {
+ for i in 0..self.n_hash_funcs {
+ let index = self.hash(i, item);
+ self.bitvec.set(index, true);
+ }
+ }
+
+ pub fn has(&self, item: &[u8]) -> bool {
+ for i in 0..self.n_hash_funcs {
+ match self.bitvec.get(self.hash(i, item)) {
+ Some(false) => return false,
+ Some(true) => (),
+ None => panic!(
+ "access outside the bloom filter bit vector (this is almost certainly a bug)"
+ ),
+ }
+ }
+
+ true
+ }
+
+ pub fn clear(&mut self) {
+ self.bitvec.clear()
+ }
+}
+
+#[derive(Debug)]
+pub struct Cascade {
+ filter: Bloom,
+ child_layer: Option<Box<Cascade>>,
+}
+
+impl Cascade {
+ pub fn new(size: usize, n_hash_funcs: u32) -> Cascade {
+ return Cascade::new_layer(size, n_hash_funcs, 1);
+ }
+
+ pub fn from_bytes(bytes: &[u8]) -> Result<Option<Box<Cascade>>, Error> {
+ if bytes.len() == 0 {
+ return Ok(None);
+ }
+ let mut cursor = bytes;
+ let version = cursor.read_u16::<byteorder::LittleEndian>()?;
+ if version != 1 {
+ return Err(Error::new(ErrorKind::InvalidInput, "Invalid version"));
+ }
+ Ok(Some(Box::new(Cascade {
+ filter: Bloom::from_bytes(&mut cursor)?,
+ child_layer: Cascade::from_bytes(cursor)?,
+ })))
+ }
+
+ fn new_layer(size: usize, n_hash_funcs: u32, layer: u32) -> Cascade {
+ Cascade {
+ filter: Bloom::new(size, n_hash_funcs, layer),
+ child_layer: Option::None,
+ }
+ }
+
+ pub fn initialize(&mut self, entries: Vec<Vec<u8>>, exclusions: Vec<Vec<u8>>) {
+ let mut false_positives = Vec::new();
+ for entry in &entries {
+ self.filter.put(entry);
+ }
+
+ for entry in exclusions {
+ if self.filter.has(&entry) {
+ false_positives.push(entry);
+ }
+ }
+
+ if false_positives.len() > 0 {
+ let n_hash_funcs = calculate_n_hash_funcs(0.5);
+ let size = calculate_size(false_positives.len(), 0.5);
+ let mut child = Box::new(Cascade::new_layer(
+ size,
+ n_hash_funcs,
+ self.filter.level + 1,
+ ));
+ child.initialize(false_positives, entries);
+ self.child_layer = Some(child);
+ }
+ }
+
+ pub fn has(&self, entry: &[u8]) -> bool {
+ if self.filter.has(&entry) {
+ match self.child_layer {
+ Some(ref child) => {
+ let child_value = !child.has(entry);
+ return child_value;
+ }
+ None => {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ pub fn check(&self, entries: Vec<Vec<u8>>, exclusions: Vec<Vec<u8>>) -> bool {
+ for entry in entries {
+ if !self.has(&entry) {
+ return false;
+ }
+ }
+
+ for entry in exclusions {
+ if self.has(&entry) {
+ return false;
+ }
+ }
+
+ true
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use calculate_n_hash_funcs;
+ use calculate_size;
+ use rand::Rng;
+ use Bloom;
+ use Cascade;
+
+ #[test]
+ fn bloom_test_bloom_size() {
+ let error_rate = 0.01;
+ let elements = 1024;
+ let n_hash_funcs = calculate_n_hash_funcs(error_rate);
+ let size = calculate_size(elements, error_rate);
+
+ let bloom = Bloom::new(size, n_hash_funcs, 0);
+ assert!(bloom.bitvec.len() == 9829);
+ }
+
+ #[test]
+ fn bloom_test_put() {
+ let error_rate = 0.01;
+ let elements = 1024;
+ let n_hash_funcs = calculate_n_hash_funcs(error_rate);
+ let size = calculate_size(elements, error_rate);
+
+ let mut bloom = Bloom::new(size, n_hash_funcs, 0);
+ let key: &[u8] = b"foo";
+
+ bloom.put(key);
+ }
+
+ #[test]
+ fn bloom_test_has() {
+ let error_rate = 0.01;
+ let elements = 1024;
+ let n_hash_funcs = calculate_n_hash_funcs(error_rate);
+ let size = calculate_size(elements, error_rate);
+
+ let mut bloom = Bloom::new(size, n_hash_funcs, 0);
+ let key: &[u8] = b"foo";
+
+ bloom.put(key);
+ assert!(bloom.has(key) == true);
+ assert!(bloom.has(b"bar") == false);
+ }
+
+ #[test]
+ fn bloom_test_from_bytes() {
+ let src: Vec<u8> = vec![
+ 0x09, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x41, 0x00,
+ ];
+
+ match Bloom::from_bytes(&mut &src[..]) {
+ Ok(mut bloom) => {
+ assert!(bloom.has(b"this") == true);
+ assert!(bloom.has(b"that") == true);
+ assert!(bloom.has(b"other") == false);
+
+ bloom.put(b"other");
+ assert!(bloom.has(b"other") == true);
+ }
+ Err(_) => {
+ panic!("Parsing failed");
+ }
+ };
+
+ let short: Vec<u8> = vec![
+ 0x09, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x41,
+ ];
+ match Bloom::from_bytes(&mut &short[..]) {
+ Ok(_) => {
+ panic!("Parsing should fail; data is truncated");
+ }
+ Err(_) => {}
+ };
+ }
+
+ #[test]
+ fn bloom_test_from_file() {
+ let v = include_bytes!("../test_data/test_bf");
+ let bloom = Bloom::from_bytes(&mut &v[..]).expect("parsing Bloom should succeed");
+ assert!(bloom.has(b"this") == true);
+ assert!(bloom.has(b"that") == true);
+ assert!(bloom.has(b"yet another test") == false);
+ }
+
+ #[test]
+ fn cascade_test() {
+ // thread_rng is often the most convenient source of randomness:
+ let mut rng = rand::thread_rng();
+
+ // create some entries and exclusions
+ let mut foo: Vec<Vec<u8>> = Vec::new();
+ let mut bar: Vec<Vec<u8>> = Vec::new();
+
+ for i in 0..500 {
+ let s = format!("{}", i);
+ let bytes = s.into_bytes();
+ foo.push(bytes);
+ }
+
+ for _ in 0..100 {
+ let idx = rng.gen_range(0, foo.len());
+ bar.push(foo.swap_remove(idx));
+ }
+
+ let error_rate = 0.5;
+ let elements = 500;
+ let n_hash_funcs = calculate_n_hash_funcs(error_rate);
+ let size = calculate_size(elements, error_rate);
+
+ let mut cascade = Cascade::new(size, n_hash_funcs);
+ cascade.initialize(foo.clone(), bar.clone());
+
+ assert!(cascade.check(foo.clone(), bar.clone()) == true);
+ }
+
+ #[test]
+ fn cascade_from_file_bytes_test() {
+ let v = include_bytes!("../test_data/test_mlbf");
+ let cascade = Cascade::from_bytes(v)
+ .expect("parsing Cascade should succeed")
+ .expect("Cascade should be Some");
+ assert!(cascade.has(b"test") == true);
+ assert!(cascade.has(b"another test") == true);
+ assert!(cascade.has(b"yet another test") == true);
+ assert!(cascade.has(b"blah") == false);
+ assert!(cascade.has(b"blah blah") == false);
+ assert!(cascade.has(b"blah blah blah") == false);
+
+ let v = include_bytes!("../test_data/test_short_mlbf");
+ Cascade::from_bytes(v).expect_err("parsing truncated Cascade should fail");
+ }
+}
diff --git a/third_party/rust/rust_cascade/test_data/test_bf b/third_party/rust/rust_cascade/test_data/test_bf
new file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..c63e3eb585974a0c4a46f99bec6d47fb9a72aa52
GIT binary patch
literal 14
Pc%0*8U|?W`Vn+r50UiKN
diff --git a/third_party/rust/rust_cascade/test_data/test_mlbf b/third_party/rust/rust_cascade/test_data/test_mlbf
new file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..6d264b2d286f3451fb894b8635ab7e6cd86e5597
GIT binary patch
literal 76
sc${No;ACK6V1(izAjuA8GXb#>BLgdx%M8REj0`MLE{JAAa6zhA03k^LsQ>@~
diff --git a/third_party/rust/rust_cascade/test_data/test_short_mlbf b/third_party/rust/rust_cascade/test_data/test_short_mlbf
new file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..7dd7410ba24c00b06795d67f988156b5c175b140
GIT binary patch
literal 65
lc%0*8U|?W`;vfcgD2oY*g;=2+W+3KZfpS1J6O02=0RS7%0H6Q>