Skip to main content

dfir_pipes/pull/half_join_state/
set.rs

1use std::borrow::Cow;
2use std::collections::VecDeque;
3use std::collections::hash_map::Entry;
4
5use rustc_hash::FxHashMap;
6use smallvec::{SmallVec, smallvec};
7
8use super::HalfJoinState;
9
10/// [`HalfJoinState`] with set semantics.
11///
12/// Duplicate key-value pairs are not stored; only unique pairs are kept.
13#[derive(Debug)]
14pub struct HalfSetJoinState<Key, ValBuild, ValProbe> {
15    // Here a smallvec with inline storage of 1 is chosen.
16    // The rationale for this decision is that, I speculate, that joins possibly have a bimodal distribution with regards to how much key contention they have.
17    // That is, I think that there are many joins that have 1 value per key on LHS/RHS, and there are also a large category of joins that have multiple values per key.
18    // For the category of joins that have multiple values per key, it's not clear why they would only have 2, 3, 4, or N specific number of values per key. So there's no good number to set the smallvec storage to.
19    // Instead we can just focus on the first group of joins that have 1 value per key and get benefit there without hurting the other group too much with excessive memory usage.
20    /// Table to probe, vec val contains all matches.
21    table: FxHashMap<Key, SmallVec<[ValBuild; 1]>>,
22    /// Not-yet emitted matches.
23    current_matches: VecDeque<(Key, ValProbe, ValBuild)>,
24    len: usize,
25}
26
27impl<Key, ValBuild, ValProbe> Default for HalfSetJoinState<Key, ValBuild, ValProbe> {
28    fn default() -> Self {
29        Self {
30            table: FxHashMap::default(),
31            current_matches: VecDeque::default(),
32            len: 0,
33        }
34    }
35}
36
37impl<Key, ValBuild, ValProbe> HalfJoinState<Key, ValBuild, ValProbe>
38    for HalfSetJoinState<Key, ValBuild, ValProbe>
39where
40    Key: Clone + Eq + std::hash::Hash,
41    ValBuild: Clone + Eq,
42    ValProbe: Clone,
43{
44    fn build(&mut self, k: Key, v: Cow<'_, ValBuild>) -> bool {
45        let entry = self.table.entry(k);
46
47        match entry {
48            Entry::Occupied(mut e) => {
49                let vec = e.get_mut();
50                if !vec.contains(v.as_ref()) {
51                    vec.push(v.into_owned());
52                    self.len += 1;
53                    return true;
54                }
55            }
56            Entry::Vacant(e) => {
57                e.insert(smallvec![v.into_owned()]);
58                self.len += 1;
59                return true;
60            }
61        };
62
63        false
64    }
65
66    fn probe(&mut self, k: &Key, v: &ValProbe) -> Option<(Key, ValProbe, ValBuild)> {
67        // TODO: We currently don't free/shrink the self.current_matches vecdeque to save time.
68        // This mean it will grow to eventually become the largest number of matches in a single probe call.
69        // Maybe we should clear this memory at the beginning of every tick/periodically?
70        let mut iter = self
71            .table
72            .get(k)?
73            .iter()
74            .map(|valbuild| (k.clone(), v.clone(), valbuild.clone()));
75
76        let first = iter.next();
77        self.current_matches.extend(iter);
78        first
79    }
80
81    fn full_probe(&self, k: &Key) -> std::slice::Iter<'_, ValBuild> {
82        self.table.get(k).map(|sv| sv.iter()).unwrap_or_default()
83    }
84
85    fn pop_match(&mut self) -> Option<(Key, ValProbe, ValBuild)> {
86        self.current_matches.pop_front()
87    }
88
89    fn len(&self) -> usize {
90        self.len
91    }
92
93    fn iter(&self) -> std::collections::hash_map::Iter<'_, Key, SmallVec<[ValBuild; 1]>> {
94        #[expect(clippy::disallowed_methods, reason = "FxHasher is deterministic")]
95        self.table.iter()
96    }
97
98    fn clear(&mut self) {
99        self.table.clear();
100        self.current_matches.clear();
101        self.len = 0;
102    }
103}