Skip to content

Commit 3a06bad

Browse files
authored
Merge pull request #275 from striezel-stash/fix-typos
fix some typos
2 parents 6967402 + 6b4c765 commit 3a06bad

File tree

3 files changed

+7
-7
lines changed

3 files changed

+7
-7
lines changed

src/raw/bitmask.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ impl IntoIterator for BitMask {
106106
}
107107
}
108108

109-
/// Iterator over the contents of a `BitMask`, returning the indicies of set
109+
/// Iterator over the contents of a `BitMask`, returning the indices of set
110110
/// bits.
111111
pub struct BitMaskIter(BitMask);
112112

src/raw/generic.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ fn repeat(byte: u8) -> GroupWord {
3939
#[derive(Copy, Clone)]
4040
pub struct Group(GroupWord);
4141

42-
// We perform all operations in the native endianess, and convert to
42+
// We perform all operations in the native endianness, and convert to
4343
// little-endian just before creating a BitMask. The can potentially
4444
// enable the compiler to eliminate unnecessary byte swaps if we are
4545
// only checking whether a BitMask is empty.

src/raw/mod.rs

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -211,7 +211,7 @@ fn capacity_to_buckets(cap: usize) -> Option<usize> {
211211

212212
// Any overflows will have been caught by the checked_mul. Also, any
213213
// rounding errors from the division above will be cleaned up by
214-
// next_power_of_two (which can't overflow because of the previous divison).
214+
// next_power_of_two (which can't overflow because of the previous division).
215215
Some(adjusted_cap.next_power_of_two())
216216
}
217217

@@ -1236,7 +1236,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
12361236
// EMPTY entries. These will unfortunately trigger a
12371237
// match, but once masked may point to a full bucket that
12381238
// is already occupied. We detect this situation here and
1239-
// perform a second scan starting at the begining of the
1239+
// perform a second scan starting at the beginning of the
12401240
// table. This second scan is guaranteed to find an empty
12411241
// slot (due to the load factor) before hitting the trailing
12421242
// control bytes (containing EMPTY).
@@ -1469,7 +1469,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
14691469
//
14701470
// Note that in this context `leading_zeros` refers to the bytes at the
14711471
// end of a group, while `trailing_zeros` refers to the bytes at the
1472-
// begining of a group.
1472+
// beginning of a group.
14731473
let ctrl = if empty_before.leading_zeros() + empty_after.trailing_zeros() >= Group::WIDTH {
14741474
DELETED
14751475
} else {
@@ -1886,7 +1886,7 @@ impl<T> RawIter<T> {
18861886
/// For the iterator to remain valid, this method must be called once
18871887
/// for each insert before `next` is called again.
18881888
///
1889-
/// This method does not guarantee that an insertion of a bucket witha greater
1889+
/// This method does not guarantee that an insertion of a bucket with a greater
18901890
/// index than the last one yielded will be reflected in the iterator.
18911891
///
18921892
/// This method should be called _after_ the given insert is made.
@@ -1938,7 +1938,7 @@ impl<T> RawIter<T> {
19381938
// If it did, we're done.
19391939
// - Otherwise, update the iterator cached group so that it won't
19401940
// yield a to-be-removed bucket, or _will_ yield a to-be-added bucket.
1941-
// We'll also need ot update the item count accordingly.
1941+
// We'll also need to update the item count accordingly.
19421942
if let Some(index) = self.iter.current_group.lowest_set_bit() {
19431943
let next_bucket = self.iter.data.next_n(index);
19441944
if b.as_ptr() > next_bucket.as_ptr() {

0 commit comments

Comments
 (0)