|
| 1 | +#![feature(test)] |
| 2 | + |
| 3 | +extern crate crossbeam; |
| 4 | +extern crate ipc_channel; |
| 5 | +extern crate test; |
| 6 | + |
| 7 | +use ipc_channel::platform; |
| 8 | + |
| 9 | +use std::sync::{mpsc, Mutex}; |
| 10 | + |
| 11 | +/// Allows doing multiple inner iterations per bench.iter() run. |
| 12 | +/// |
| 13 | +/// This is mostly to amortise the overhead of spawning a thread in the benchmark |
| 14 | +/// when sending larger messages (that might be fragmented). |
| 15 | +/// |
| 16 | +/// Note that you need to compensate the displayed results |
| 17 | +/// for the proportionally longer runs yourself, |
| 18 | +/// as the benchmark framework doesn't know about the inner iterations... |
| 19 | +const ITERATIONS: usize = 1; |
| 20 | + |
| 21 | +fn bench_size(b: &mut test::Bencher, size: usize) { |
| 22 | + let data: Vec<u8> = (0..size).map(|i| (i % 251) as u8).collect(); |
| 23 | + let (tx, rx) = platform::channel().unwrap(); |
| 24 | + |
| 25 | + let (wait_tx, wait_rx) = mpsc::channel(); |
| 26 | + let wait_rx = Mutex::new(wait_rx); |
| 27 | + |
| 28 | + if size > platform::OsIpcSender::get_max_fragment_size() { |
| 29 | + b.iter(|| { |
| 30 | + crossbeam::scope(|scope| { |
| 31 | + scope.spawn(|| { |
| 32 | + let wait_rx = wait_rx.lock().unwrap(); |
| 33 | + for _ in 0..ITERATIONS { |
| 34 | + tx.send(&data, vec![], vec![]).unwrap(); |
| 35 | + if ITERATIONS > 1 { |
| 36 | + // Prevent beginning of the next send |
| 37 | + // from overlapping with receive of last fragment, |
| 38 | + // as otherwise results of runs with a large tail fragment |
| 39 | + // are significantly skewed. |
| 40 | + wait_rx.recv().unwrap(); |
| 41 | + } |
| 42 | + } |
| 43 | + }); |
| 44 | + for _ in 0..ITERATIONS { |
| 45 | + rx.recv().unwrap(); |
| 46 | + if ITERATIONS > 1 { |
| 47 | + wait_tx.send(()).unwrap(); |
| 48 | + } |
| 49 | + } |
| 50 | + // For reasons mysterious to me, |
| 51 | + // not returning a value *from every branch* |
| 52 | + // adds some 100 ns or so of overhead to all results -- |
| 53 | + // which is quite significant for very short tests... |
| 54 | + 0 |
| 55 | + }) |
| 56 | + }); |
| 57 | + } else { |
| 58 | + b.iter(|| { |
| 59 | + for _ in 0..ITERATIONS { |
| 60 | + tx.send(&data, vec![], vec![]).unwrap(); |
| 61 | + rx.recv().unwrap(); |
| 62 | + } |
| 63 | + 0 |
| 64 | + }); |
| 65 | + } |
| 66 | +} |
| 67 | + |
| 68 | +#[bench] |
| 69 | +fn size_00_1(b: &mut test::Bencher) { |
| 70 | + bench_size(b, 1); |
| 71 | +} |
| 72 | +#[bench] |
| 73 | +fn size_01_2(b: &mut test::Bencher) { |
| 74 | + bench_size(b, 2); |
| 75 | +} |
| 76 | +#[bench] |
| 77 | +fn size_02_4(b: &mut test::Bencher) { |
| 78 | + bench_size(b, 4); |
| 79 | +} |
| 80 | +#[bench] |
| 81 | +fn size_03_8(b: &mut test::Bencher) { |
| 82 | + bench_size(b, 8); |
| 83 | +} |
| 84 | +#[bench] |
| 85 | +fn size_04_16(b: &mut test::Bencher) { |
| 86 | + bench_size(b, 16); |
| 87 | +} |
| 88 | +#[bench] |
| 89 | +fn size_05_32(b: &mut test::Bencher) { |
| 90 | + bench_size(b, 32); |
| 91 | +} |
| 92 | +#[bench] |
| 93 | +fn size_06_64(b: &mut test::Bencher) { |
| 94 | + bench_size(b, 64); |
| 95 | +} |
| 96 | +#[bench] |
| 97 | +fn size_07_128(b: &mut test::Bencher) { |
| 98 | + bench_size(b, 128); |
| 99 | +} |
| 100 | +#[bench] |
| 101 | +fn size_08_256(b: &mut test::Bencher) { |
| 102 | + bench_size(b, 256); |
| 103 | +} |
| 104 | +#[bench] |
| 105 | +fn size_09_512(b: &mut test::Bencher) { |
| 106 | + bench_size(b, 512); |
| 107 | +} |
| 108 | +#[bench] |
| 109 | +fn size_10_1k(b: &mut test::Bencher) { |
| 110 | + bench_size(b, 1 * 1024); |
| 111 | +} |
| 112 | +#[bench] |
| 113 | +fn size_11_2k(b: &mut test::Bencher) { |
| 114 | + bench_size(b, 2 * 1024); |
| 115 | +} |
| 116 | +#[bench] |
| 117 | +fn size_12_4k(b: &mut test::Bencher) { |
| 118 | + bench_size(b, 4 * 1024); |
| 119 | +} |
| 120 | +#[bench] |
| 121 | +fn size_13_8k(b: &mut test::Bencher) { |
| 122 | + bench_size(b, 8 * 1024); |
| 123 | +} |
| 124 | +#[bench] |
| 125 | +fn size_14_16k(b: &mut test::Bencher) { |
| 126 | + bench_size(b, 16 * 1024); |
| 127 | +} |
| 128 | +#[bench] |
| 129 | +fn size_15_32k(b: &mut test::Bencher) { |
| 130 | + bench_size(b, 32 * 1024); |
| 131 | +} |
| 132 | +#[bench] |
| 133 | +fn size_16_64k(b: &mut test::Bencher) { |
| 134 | + bench_size(b, 64 * 1024); |
| 135 | +} |
| 136 | +#[bench] |
| 137 | +fn size_17_128k(b: &mut test::Bencher) { |
| 138 | + bench_size(b, 128 * 1024); |
| 139 | +} |
| 140 | +#[bench] |
| 141 | +fn size_18_256k(b: &mut test::Bencher) { |
| 142 | + bench_size(b, 256 * 1024); |
| 143 | +} |
| 144 | +#[bench] |
| 145 | +fn size_19_512k(b: &mut test::Bencher) { |
| 146 | + bench_size(b, 512 * 1024); |
| 147 | +} |
| 148 | +#[bench] |
| 149 | +fn size_20_1m(b: &mut test::Bencher) { |
| 150 | + bench_size(b, 1 * 1024 * 1024); |
| 151 | +} |
| 152 | +#[bench] |
| 153 | +fn size_21_2m(b: &mut test::Bencher) { |
| 154 | + bench_size(b, 2 * 1024 * 1024); |
| 155 | +} |
| 156 | +#[bench] |
| 157 | +fn size_22_4m(b: &mut test::Bencher) { |
| 158 | + bench_size(b, 4 * 1024 * 1024); |
| 159 | +} |
| 160 | +#[bench] |
| 161 | +fn size_23_8m(b: &mut test::Bencher) { |
| 162 | + bench_size(b, 8 * 1024 * 1024); |
| 163 | +} |
0 commit comments