1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
// Copyright 2017, CZ.NIC z.s.p.o. (http://www.nic.cz/) // // This file is part of the pakon system. // // Pakon is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // Pakon is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with Pakon. If not, see <http://www.gnu.org/licenses/>. //! Incremental updates to a flow //! //! When something (a source of data) wants to update a representation of a flow, it might describe //! the change using data structures in this module. use std::fmt::{Debug, Formatter, Result as FmtResult}; use std::net::IpAddr; use std::rc::Rc; use futures::{Async, Future, Poll}; use futures::unsync::mpsc::{Sender, UnboundedSender}; use futures::unsync::oneshot::{self, Receiver as OneReceiver, Sender as OneSender}; use void::Void; use libdata::column::{FlowTags, Tags, Value}; use libdata::stats::Sizes as SizeStats; /// A key is some bit of data that describes which flow is spoken of. #[derive(Clone, Debug, Eq, Ord, PartialEq, PartialOrd)] pub enum Key { /// The flow can be recognized by a single tag value. /// /// Usually, this is some kind of ID internal to the source of data. /// /// # Examples /// /// ``` /// # extern crate libdata; /// # extern crate libflow; /// # extern crate serde; /// # #[macro_use] /// # extern crate serde_derive; /// use libdata::column::Type; /// use libflow::update::Key; /// /// #[derive(Clone, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)] /// struct PrivateId(pub String); /// /// impl Type for PrivateId { /// fn name() -> String { /// "my-private-source-id".to_owned() /// } /// } /// /// # fn main() { /// let key = Key::Simple(PrivateId("some-unique-id".to_owned()).into()); /// # } /// ``` Simple(Value), /// The flow can be recognized by the usual tuple ‒ ID addresses, protocol used and a pair of /// ports. /// /// This is mostly a convenience so we don't have to be able to build multi-value keys out of /// `Value`s and care about the order, etc. /// /// # TODO /// /// What about protocols like ICMP? Don't they have something else in their tuple? /// /// # Examples /// /// ``` /// # use libflow::update::Key; /// let tcp_flow = Key::FlowTuple { /// ip_proto_raw: 6, /// loc_ip: "192.0.2.1".parse().unwrap(), /// rem_ip: "192.0.2.2".parse().unwrap(), /// loc_port: Some(7384), /// rem_port: Some(80), /// }; /// let icmpv6_flow = Key::FlowTuple { /// ip_proto_raw: 58, /// loc_ip: "2001:0DB8::1".parse().unwrap(), /// rem_ip: "2001:0DB8::2".parse().unwrap(), /// loc_port: None, /// rem_port: None, /// }; /// ``` FlowTuple { /// The protocol (eg. UDP/TCP) as the raw protocol value. This is not an enum because we /// don't really care what the protocol means and this provides the full flexibility. ip_proto_raw: u8, /// The IP address of the local endpoint (eg. on LAN). loc_ip: IpAddr, /// The IP address of the remote endpoint. rem_ip: IpAddr, /// The port of the local endpoint, if it makes sense for the protocol (in local endian). loc_port: Option<u16>, /// The port of the remote endpoint, if it makes sense for the protocol (in local endian). rem_port: Option<u16>, }, /// This one uses the internal address of the flow as the key. /// /// This one can't be used by data sources, since they never know the internal address. /// However, it can be used by other parts of the processing to generate an update to already /// existing. /// /// Unlike other keys, if an update references a flow with unknown InternalHandle key, a new /// key is not set up, but the update is ignored, assuming this is a race condition and the /// update is for a flow that already ended. While this should not usually happen (as there's a /// grace time before the flow is actually deleted), this can't be completely ruled out. /// /// An update that has an InternalHandle key must have only that key and no other, otherwise /// something somewhere might stop working or panic. InternalHandle(FlowTags), } /// The status of the flow. #[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] pub enum Status { /// The flow just started. Start, /// The update is somewhere in the middle of the flow's lifetime. Ongoing, /// The flow ended. /// /// It is still possible some source might contain further updates or information about the /// flow. Therefore, it is expected the recipient doesn't delete the flow until some time later /// on. End, } /// An update to one flow. /// /// This is just a structure holding the data together (eg. it doesn't provide any kind of /// encapsulation). /// /// # Examples /// /// ``` /// # extern crate serde; /// # #[macro_use] /// # extern crate serde_derive; /// # extern crate libdata; /// # extern crate libflow; /// # use libdata::column::*; /// # use libdata::flow::*; /// # use libflow::update::*; /// #[derive(Clone, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] /// pub struct Id(pub String); /// impl Type for Id { fn name() -> String { "test-flow-id".to_owned() } } /// /// # fn main() { /// // We just discovered the flow is TCP /// let mut tags = Tags::new(); /// tags.insert(IpProto::Tcp); /// /// let update = Update { /// keys: vec![ /// // Unfortunately, we know only our internal ID as the flow key /// Key::Simple(Value::from(Id("The Id".to_owned()))), /// // If we knew more keys, they go here /// ], /// status: Status::Ongoing, /// tags, /// // We know nothing about how much data goes there. /// stats: None, /// }; /// # } /// ``` #[derive(Debug, Eq, PartialEq)] pub struct Update { /// The keys of the flow this updates. /// /// In general, the source should provide as many keys as it can. If this is the first update /// from the given source, it should contain the `Key::FlowTuple` variant so it can be paired /// with any other source. pub keys: Vec<Key>, /// The (new) status of the flow. /// /// This influences what happens to it during the processing. pub status: Status, /// New tags to add to the flow. /// /// Adds these tags to the flow, or replaces them if they already exist. It is a logical error /// to change any value that is part of any key. pub tags: Tags, /// Newest snapshot of statistics. /// /// This is expected to be cumulative (eg. from the start of the flow), the recipient is /// expected to keep track of when additions happened. pub stats: Option<SizeStats>, } /// The part of cork that is stuffed into the update channel. /// /// The receiver end of the channel simply drops the cork, which signals the corresponding /// [`CorkHandle`](struct.CorkHandle.html). /// /// The `Cork` can be cloned. The signal of the other end happens once all copies of it are /// dropped. #[derive(Clone)] pub struct Cork(Rc<OneSender<()>>); impl Debug for Cork { fn fmt(&self, f: &mut Formatter) -> FmtResult { write!(f, "Cork()") } } /// The part of a cork that is kept by the caller, so it can check when it is ready. pub struct CorkHandle(OneReceiver<()>); impl Future for CorkHandle { type Item = (); type Error = Void; fn poll(&mut self) -> Poll<(), Void> { match self.0.poll() { Ok(async) => Ok(async), // Dropping signals successful handling as well Err(_) => Ok(Async::Ready(())), } } } /// Creates a new cork. pub fn cork() -> (Cork, CorkHandle) { let (sender, receiver) = oneshot::channel(); (Cork(Rc::new(sender)), CorkHandle(receiver)) } /// Either an update or a cork marking end of a batch. /// /// The receiver shall simply drop the cork, as the drop signals the completion. pub enum CorkedUpdate { /// A real update. Update(Update), /// A cork, to flush data. Cork(Cork), } /// The channel for sending updates. pub type UpdateSender = Sender<CorkedUpdate>; /// Like [`UpdateSender`](type.UpdateSender.html), but unbounded. /// /// This is used by the internal computations, since they don't need the backpressure. pub type UpdateSenderUnbounded = UnboundedSender<CorkedUpdate>; #[cfg(test)] mod tests { use std::time::Duration; use futures::future::Either; use tokio_core::reactor::{Core, Timeout}; use super::*; /// Dropping the cork activates the handle. #[test] fn cork_drop() { let (cork, handle) = cork(); drop(cork); handle.wait().unwrap(); } /// It is not active when it is born and alive. #[test] fn cork_alive() { let (_cork, handle) = cork(); let mut core = Core::new().unwrap(); let timeout = Timeout::new(Duration::from_millis(100), &core.handle()).unwrap(); let all = timeout.select2(handle); match core.run(all) { Ok(Either::A(_)) => (), _ => panic!(), } } }