Skip to content

Commit

Permalink
Fix some lints and other housecleaning
Browse files Browse the repository at this point in the history
  • Loading branch information
nkconnor committed Oct 16, 2024
1 parent 5408234 commit 5babe72
Show file tree
Hide file tree
Showing 2 changed files with 80 additions and 154 deletions.
30 changes: 3 additions & 27 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,50 +24,26 @@ For further reading on the strategy, see a [write up on C++'s `parallel-hashmap`

#### See Also

- **[countrie](https://crates.io/crates/contrie)** - A concurrent hash-trie map & set.
- **[contrie](https://crates.io/crates/contrie)** - A concurrent hash-trie map & set.
- **[dashmap](https://github.com/xacrimon/dashmap)** - Blazing fast concurrent HashMap for Rust.
- **[flurry](https://github.com/jonhoo/flurry)** - A port of Java's `java.util.concurrent.ConcurrentHashMap` to Rust. (Also part of a live stream series)

### Quick Start

```toml
[dependencies]
# Optionally use `parking_lot`, `ahash`, `fxhash`, `seahash`, and `xxhash`
# by specifing the feature by the same name e.g.
sharded = { version = "0.2", features = ["fxhash", "parking_lot"] }
sharded = "0.3"
```

#### Examples

**Insert a key value pair**

```rust
let users = Map::new();
let users = ConcurrentHashMap::new();
users.insert(32, "Henry");
```

**Access a storage shard**

`Map` provides `read` and `write` which give access to the underlying
storage (which is built using `hashbrown::raw`). Both methods return a tuple of `(Key, Guard<Shard>)`

```rust
let (key, shard) = users.read(&32);
assert_eq!(shard.get(key), Some(&"Henry"));
```

**Determine if a storage shard is locked**

`try_read` and `try_write` are available for avoiding blocks or in situations that could
deadlock

```rust
match users.try_read(&32) {
Some((key, mut shard)) => Ok(shard.get(key)),
None => Err(WouldBlock)
};
```

### Performance Comparison

These measurements were generated using [`jonhoo/bustle`](https://github.com/jonhoo/bustle). To reproduce the charts,
Expand Down
204 changes: 77 additions & 127 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,15 +9,15 @@
//! ## Features
//!
//! * **Zero unsafe code.** This library uses `#![deny(unsafe_code)]` and was motivated by
//! the complexity and amount of memory errors present in many alternatives.
//! the complexity and amount of memory errors present in many alternatives.
//!
//! * **Intuitive API.** Uses similar or same methods as `std` when possible.
//!
//! * **Small footprint.** The core logic is <100 lines of code. The only dependencies are
//! `hashbrown` (which `std` uses) and `parking_lot`.
//! `hashbrown` (which `std` uses) and `parking_lot`.
//!
//! * **Really fast.** This implementation may be a more performant choice than some
//! of the most popular concurrent hashmaps out there. Try it on your workload and let us know.
//! of the most popular concurrent hashmaps out there. Try it on your workload and let us know.
//!
//! ### See Also
//!
Expand Down Expand Up @@ -97,19 +97,14 @@
//! dual licensed as above, without any additional terms or conditions.
#![deny(unsafe_code)]

//use hashbrown::hash_map::Iter;
use hashbrown::raw::{RawIntoIter, RawIter, RawTable};
use parking_lot::{
MappedRwLockReadGuard, MappedRwLockWriteGuard, RwLock, RwLockReadGuard, RwLockWriteGuard,
};
use hashbrown::raw::{RawIntoIter, RawTable};
use parking_lot::{MappedRwLockReadGuard, RwLock, RwLockReadGuard};
use std::borrow::Borrow;
use std::convert::TryInto;
use std::hash::{BuildHasher, Hash, Hasher};
use std::ops::Deref;
use std::hash::{BuildHasher, Hash};
use std::{fmt, fmt::Debug};

use std::collections::hash_map::RandomState;
use std::collections::HashMap;

/// Number of shards
const DEFAULT_SHARD_COUNT: usize = 128;
Expand All @@ -132,9 +127,7 @@ where
K: Hash,
S: BuildHasher,
{
let mut state = hash_builder.build_hasher();
val.hash(&mut state);
state.finish()
hash_builder.hash_one(val)
}

// From hashbrown
Expand Down Expand Up @@ -200,7 +193,7 @@ impl<K, V> ConcurrentHashMap<K, V, RandomState, DEFAULT_SHARD_COUNT> {
///
/// ```
/// use sharded::ConcurrentHashMap;
/// let mut map: HashMap<&str, i32> = ConcurrentHashMap::with_capacity(100_000);
/// let mut map: ConcurrentHashMap<&str, i32> = ConcurrentHashMap::with_capacity(100_000);
/// ```
#[inline]
#[must_use]
Expand Down Expand Up @@ -273,15 +266,14 @@ impl<K, V, S: BuildHasher, const N: usize> ConcurrentHashMap<K, V, S, N> {
capacity: usize,
hash_builder: S,
) -> ConcurrentHashMap<K, V, S, N>
// TODO..
where
S: Clone,
{
// per shard capacity
let capacity = (capacity + DEFAULT_SHARD_COUNT - 1) / DEFAULT_SHARD_COUNT;

let shards: Vec<RwLock<Shard<K, V, S>>> =
std::iter::repeat(|| RawTable::with_capacity(capacity as usize))
std::iter::repeat(|| RawTable::with_capacity(capacity))
.map(|f| f())
.take(DEFAULT_SHARD_COUNT)
.map(|inner| {
Expand All @@ -298,6 +290,7 @@ impl<K, V, S: BuildHasher, const N: usize> ConcurrentHashMap<K, V, S, N> {
shards,
},
// .unwrap() requires Debug
// this never panics because the iter takes exactly DEFAULT_SHARD_COUNT
Err(_) => panic!("unable to build inner"),
}
}
Expand Down Expand Up @@ -356,6 +349,7 @@ impl<K, V, S: BuildHasher, const N: usize> ConcurrentHashMap<K, V, S, N> {
// iter: self.into_iter(),
// }
// }

/// Returns a guarded reference for the value corresponding to the
/// provided key.
///
Expand Down Expand Up @@ -463,7 +457,7 @@ impl<K, V, S: BuildHasher, const N: usize> ConcurrentHashMap<K, V, S, N> {

let i = hash as usize % N;

let mut shard = match self.shards.get(i as usize) {
let mut shard = match self.shards.get(i) {
Some(lock) => lock.write(),
None => panic!("index out of bounds"),
};
Expand Down Expand Up @@ -578,23 +572,24 @@ where
// }
//}

///// An iterator over the keys of a `ConcurrentHashMap`.
/////
///// This `struct` is created by the [`keys`] method on [`ConcurrentHashMap`]. See its
///// documentation for more.
/////
///// [`keys`]: ConcurrentHashMap::keys
/////
///// # Example
/////
///// ```
///// use sharded::ConcurrentHashMap;
/////
///// let map = ConcurrentHashMap::from([
///// ("a", 1),
///// ]);
///// let iter_keys = map.keys();
///// ```
/**
// An iterator over the keys of a `ConcurrentHashMap`.
//
// This `struct` is created by the ~~ConcurrentHashMap::keys~~ method on [`ConcurrentHashMap`]. See its
// documentation for more.
//
// [`keys`]: ConcurrentHashMap::keys
//
// # Example
//
// ```
// use sharded::ConcurrentHashMap;
//
// let map = ConcurrentHashMap::from([
// ("a", 1),
// ]);
// let iter_keys = map.keys();
// ```
//pub struct Keys<'a, K: 'a, V: 'a, S: 'a> {
// iter: Iter<'a, K, V, S>,
//}
Expand All @@ -612,6 +607,7 @@ where
// self.iter.size_hint()
// }
//}
**/

/// An owning iterator over the entries of a `ConcurrentHashMap`.
///
Expand Down Expand Up @@ -678,41 +674,41 @@ impl<K, V> Iterator for IntoValues<K, V> {
}

//
////scratch work
////use std::iter::Extend;
////
////impl<K, V> Extend<(K, V)> for Map<K, V>
////where
//// K: Hash + Eq + Send + Sync + 'static,
//// V: Send + Sync + 'static,
////{
//// fn extend<T>(&mut self, iter: T)
//// where
//// T: IntoIterator<Item = (K, V)>,
//// {
//// let iter = iter.into_iter();
//// // iter.size_hint()
////
//// let t_handles = Vec::with_capacity(DEFAULT_SHARD_COUNT as usize);
//// let txs = Vec::with_capacity(DEFAULT_SHARD_COUNT as usize);
////
//// for i in 0..DEFAULT_SHARD_COUNT {
//// let shard = self.shards[i as usize].write().unwrap();
//// let shard = std::sync::Arc::new(shard);
//// // ^ need crossbeam probably
//// let (tx, rx) = std::sync::mpsc::channel();
//// txs.push(tx);
////
//// std::thread::spawn(move || {
//// for (key, value) in rx {
//// shard.insert(key, value);
//// }
//// });
//// }
////
//// let (rx, tx) = std::sync::mpsc::channel();
//// }
////}

//use std::iter::Extend;
//
//impl<K, V> Extend<(K, V)> for Map<K, V>
//where
// K: Hash + Eq + Send + Sync + 'static,
// V: Send + Sync + 'static,
//{
// fn extend<T>(&mut self, iter: T)
// where
// T: IntoIterator<Item = (K, V)>,
// {
// let iter = iter.into_iter();
// // iter.size_hint()
//
// let t_handles = Vec::with_capacity(DEFAULT_SHARD_COUNT as usize);
// let txs = Vec::with_capacity(DEFAULT_SHARD_COUNT as usize);
//
// for i in 0..DEFAULT_SHARD_COUNT {
// let shard = self.shards[i as usize].write().unwrap();
// let shard = std::sync::Arc::new(shard);
// // ^ need crossbeam probably
// let (tx, rx) = std::sync::mpsc::channel();
// txs.push(tx);
//
// std::thread::spawn(move || {
// for (key, value) in rx {
// shard.insert(key, value);
// }
// });
// }
//
// let (rx, tx) = std::sync::mpsc::channel();
// }
//}

/// A single shard in the map
#[derive(Clone)]
Expand All @@ -731,6 +727,7 @@ where
}
}

#[allow(dead_code)]
impl<K, V, S> Shard<K, V, S>
where
S: BuildHasher,
Expand Down Expand Up @@ -794,7 +791,7 @@ where
K: Hash + Eq,
{
match self.inner.get(hash, equivalent_key(key)) {
Some(&(_, ref v)) => Some(v),
Some((_, v)) => Some(v),
None => None,
}
}
Expand All @@ -806,6 +803,15 @@ mod tests {
use std::sync::Arc;
use std::time::Duration;

#[test]
fn test_insert_values() {
let map = ConcurrentHashMap::new();
{
map.insert("k", "v");
}
assert_eq!(*map.get(&"k").unwrap(), "v");
}

#[test]
fn test_other_deadlock() {
let map_1 = Arc::new(ConcurrentHashMap::<i32, String>::default());
Expand Down Expand Up @@ -834,59 +840,3 @@ mod tests {
std::thread::sleep(Duration::from_secs(10));
}
}

//
//#[cfg(test)]
//mod tests {
// use super::*;
//
// #[test]
// fn write_guard_holds_the_lock_and_read_guard_blocks() {
// let map = Map::with_capacity(1);
// let (key, mut guard) = map.write("");
// guard.insert(key, "value");
//
// // since the guard is still held, this should block
// assert!(map.try_read(&"").is_none())
// }
//
// #[test]
// fn read_and_write_with_lock_held() {
// let map = Map::with_capacity(1);
// let (key, mut guard) = map.write("");
// guard.insert(key.clone(), "value");
//
// assert_eq!(guard.get(key), Some(&"value"))
// }
//
// #[test]
// fn into_iter_yields_one_expected_value() {
// let map = Map::with_capacity(1);
// map.insert("k1", "v1");
// assert_eq!(
// map.into_iter().collect::<Vec<_>>().pop().unwrap(),
// ("k1", "v1")
// );
//
// let map = Map::with_capacity(1);
// map.insert("k1", "v1");
// assert_eq!(map.into_values().collect::<Vec<_>>().pop().unwrap(), "v1");
// }
//
// #[test]
// fn into_iter_has_4_iters() {
// let map = Map::with_capacity(4);
// map.insert("k1", "v1");
// map.insert("k2", "v2");
// map.insert("k3", "v3");
// map.insert("k4", "v4");
// assert_eq!(map.into_iter().map(|_| 1).sum::<u32>(), 4);
//
// let map = Map::with_capacity(4);
// map.insert("k1", "v1");
// map.insert("k2", "v2");
// map.insert("k3", "v3");
// map.insert("k4", "v4");
// assert_eq!(map.into_values().map(|_| 1).sum::<u32>(), 4);
// }
//}

0 comments on commit 5babe72

Please sign in to comment.