1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
//! Use blocking io_uring-based TX/RX loop.
//!
//! This example pins the TX/RX loop to core 0, and two other 100us tasks to cores 1 and 2.
//!
//! You may need to increase `INTERVAL` as 100us can be challenging for some PCs. That said, a
//! Raspberry Pi 4 with a realtime kernel and some tweaking can run 2x 100us tasks _ok_.
//!
//! This example requires a Linux with `io_uring` support and a realtime kernel (e.g. `PREEMPT_RT`).
#[cfg(not(target_os = "linux"))]
fn main() {
eprintln!("This example is only supported on Linux systems");
}
#[cfg(target_os = "linux")]
fn main() -> Result<(), ethercrab::error::Error> {
use env_logger::{Env, TimestampPrecision};
use ethercrab::{
MainDevice, MainDeviceConfig, PduStorage, SubDeviceGroup, Timeouts,
error::Error,
std::{ethercat_now, tx_rx_task_io_uring},
};
use std::{
sync::Arc,
thread,
time::{Duration, Instant},
};
use thread_priority::{
RealtimeThreadSchedulePolicy, ThreadPriority, ThreadPriorityValue, ThreadSchedulePolicy,
};
use timerfd::{SetTimeFlags, TimerFd, TimerState};
/// Maximum number of SubDevices that can be stored. This must be a power of 2 greater than 1.
const MAX_SUBDEVICES: usize = 16;
/// Maximum PDU data payload size - set this to the max PDI size or higher.
const MAX_PDU_DATA: usize = PduStorage::element_size(1100);
/// Maximum number of EtherCAT frames that can be in flight at any one time.
const MAX_FRAMES: usize = 16;
/// Interval in microseconds.
const INTERVAL: u64 = 100;
static PDU_STORAGE: PduStorage<MAX_FRAMES, MAX_PDU_DATA> = PduStorage::new();
#[derive(Default)]
struct Groups {
/// EL2889 and EK1100/EK1501. For EK1100, 2 items, 2 bytes of PDI for 16 output bits. The EK1501
/// has 2 bytes of its own PDI so we'll use an upper bound of 4.
///
/// We'll keep the EK1100/EK1501 in here as it has no useful PDI but still needs to live
/// somewhere.
slow_outputs: SubDeviceGroup<2, 4>,
/// EL2828. 1 item, 1 byte of PDI for 8 output bits.
fast_outputs: SubDeviceGroup<1, 1>,
}
env_logger::Builder::from_env(Env::default().default_filter_or("info"))
.format_timestamp(Some(TimestampPrecision::Nanos))
.init();
let interface = std::env::args()
.nth(1)
.expect("Provide network interface as first argument.");
log::info!("Starting multiple groups demo...");
log::info!(
"Ensure an EK1100 or EK1501 is the first SubDevice, with an EL2828 and EL2889 following it"
);
log::info!("Run with RUST_LOG=ethercrab=debug or =trace for debug information");
let (tx, rx, pdu_loop) = PDU_STORAGE.try_split().expect("can only split once");
let core_ids = core_affinity::get_core_ids().expect("Couldn't get core IDs");
let tx_rx_core = core_ids
.first()
.copied()
.expect("At least one core is required. Are you running on a potato?");
let slow_core = core_ids
.get(1)
.copied()
.expect("At least 2 cores are required.");
let fast_core = core_ids
.get(2)
.copied()
.expect("At least 3 cores are required.");
thread_priority::ThreadBuilder::default()
.name("tx-rx-thread")
// Might need to set `<user> hard rtprio 99` and `<user> soft rtprio 99` in `/etc/security/limits.conf`
// Check limits with `ulimit -Hr` or `ulimit -Sr`
.priority(ThreadPriority::Crossplatform(
ThreadPriorityValue::try_from(49u8).unwrap(),
))
// NOTE: Requires a realtime kernel
.policy(ThreadSchedulePolicy::Realtime(
RealtimeThreadSchedulePolicy::Fifo,
))
.spawn(move |_| {
core_affinity::set_for_current(tx_rx_core)
.then_some(())
.expect("Set TX/RX thread core");
// Blocking io_uring
tx_rx_task_io_uring(&interface, tx, rx).expect("TX/RX task");
})
.unwrap();
// Wait for TX/RX loop to start
thread::sleep(Duration::from_millis(200));
let maindevice = MainDevice::new(pdu_loop, Timeouts::default(), MainDeviceConfig::default());
let maindevice = Arc::new(maindevice);
// Read configurations from SubDevice EEPROMs and configure devices.
let Groups {
slow_outputs,
fast_outputs,
} = futures_lite::future::block_on(maindevice.init::<MAX_SUBDEVICES, _>(
ethercat_now,
Groups::default(),
|groups: &Groups, subdevice| match subdevice.name() {
"EL2889" | "EK1100" | "EK1501" => Ok(&groups.slow_outputs),
"EL2828" => Ok(&groups.fast_outputs),
_ => Err(Error::UnknownSubDevice),
},
))
.expect("Init");
let maindevice_slow = maindevice.clone();
let slow = thread_priority::ThreadBuilder::default()
.name("slow-task")
// Might need to set `<user> hard rtprio 99` and `<user> soft rtprio 99` in `/etc/security/limits.conf`
// Check limits with `ulimit -Hr` or `ulimit -Sr`
.priority(ThreadPriority::Crossplatform(
ThreadPriorityValue::try_from(48u8).unwrap(),
))
// NOTE: Requires a realtime kernel
.policy(ThreadSchedulePolicy::Realtime(
RealtimeThreadSchedulePolicy::Fifo,
))
.spawn(move |_| {
core_affinity::set_for_current(slow_core)
.then_some(())
.expect("Set slow thread core");
futures_lite::future::block_on::<Result<(), Error>>(async {
let slow_outputs = slow_outputs
.into_op(&maindevice_slow)
.await
.expect("PRE-OP -> OP");
let slow_cycle_time = Duration::from_micros(INTERVAL);
let mut tfd = TimerFd::new().unwrap();
tfd.set_state(
TimerState::Periodic {
current: slow_cycle_time,
interval: slow_cycle_time,
},
SetTimeFlags::Default,
);
let slow_duration = Duration::from_millis(250);
// Only update "slow" outputs every 250ms using this instant
let mut tick = Instant::now();
// EK1100 is first SubDevice, EL2889 is second
let el2889 = slow_outputs
.subdevice(&maindevice_slow, 1)
.expect("EL2889 not present!");
// Set initial output state
el2889.outputs_raw_mut()[0] = 0x01;
el2889.outputs_raw_mut()[1] = 0x80;
loop {
slow_outputs.tx_rx(&maindevice_slow).await.expect("TX/RX");
// Increment every output byte for every SubDevice by one
if tick.elapsed() > slow_duration {
tick = Instant::now();
let el2889 = slow_outputs
.subdevice(&maindevice_slow, 1)
.expect("EL2889 not present!");
let mut o = el2889.outputs_raw_mut();
// Make a nice pattern on EL2889 LEDs
o[0] = o[0].rotate_left(1);
o[1] = o[1].rotate_right(1);
}
tfd.read();
}
})
.unwrap();
})
.unwrap();
let fast = thread_priority::ThreadBuilder::default()
.name("fast-task")
// Might need to set `<user> hard rtprio 99` and `<user> soft rtprio 99` in `/etc/security/limits.conf`
// Check limits with `ulimit -Hr` or `ulimit -Sr`
.priority(ThreadPriority::Crossplatform(
ThreadPriorityValue::try_from(48u8).unwrap(),
))
// NOTE: Requires a realtime kernel
.policy(ThreadSchedulePolicy::Realtime(
RealtimeThreadSchedulePolicy::Fifo,
))
.spawn(move |_| {
core_affinity::set_for_current(fast_core)
.then_some(())
.expect("Set fast thread core");
futures_lite::future::block_on::<Result<(), Error>>(async {
let fast_outputs = fast_outputs
.into_op(&maindevice)
.await
.expect("PRE-OP -> OP");
let fast_cycle_time = Duration::from_micros(INTERVAL);
let mut tfd = TimerFd::new().unwrap();
tfd.set_state(
TimerState::Periodic {
current: fast_cycle_time,
interval: fast_cycle_time,
},
SetTimeFlags::Default,
);
loop {
fast_outputs.tx_rx(&maindevice).await.expect("TX/RX");
// Increment every output byte for every SubDevice by one
for subdevice in fast_outputs.iter(&maindevice) {
let mut o = subdevice.outputs_raw_mut();
for byte in o.iter_mut() {
*byte = byte.wrapping_add(1);
}
}
tfd.read();
}
})
.unwrap();
})
.unwrap();
slow.join().expect("slow task failed");
fast.join().expect("fast task failed");
Ok(())
}