1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
use log::info;
/// This function works on *any* UdpFullStack, including embedded ones -- only main() is what makes
/// this use POSIX sockets. (It does make use of a std based RNG, but that could be passed in just
/// as well for no_std operation).
async fn run<S>(stack: &mut S)
where
S: embedded_nal_async::UdpStack,
{
let mut sock = stack
.bind_multiple(embedded_nal_async::SocketAddr::new(
"::".parse().unwrap(),
5683,
))
.await
.expect("Can't create a socket");
let log = Some(coap_message_demos::log::Log::start_once());
let mut handler = coap_message_demos::full_application_tree(log);
info!("Server is ready.");
let coap = embedded_nal_coap::CoAPShared::<3>::new();
let (client, server) = coap.split();
// going with an embassy_futures join instead of an async_std::task::spawn b/c CoAPShared is not
// Sync, and async_std expects to work in multiple threads
embassy_futures::join::join(
async {
use rand::SeedableRng;
server
.run(
&mut sock,
&mut handler,
&mut rand::rngs::StdRng::from_entropy(),
)
.await
.expect("UDP error")
},
run_client_operations(client)
)
.await;
}
#[async_std::main]
async fn main() {
let mut stack = std_embedded_nal_async::Stack::default();
run(&mut stack).await;
}
/// In parallel to server operation, this function performs some operations as a client.
///
/// This doubles as an experimentation ground for the client side of embedded_nal_coap and
/// coap-request in general.
async fn run_client_operations<const N: usize>(client: embedded_nal_coap::CoAPRuntimeClient<'_, N>) {
let demoserver = "[::1]:1234".parse().unwrap();
use coap_request::Stack;
println!("Sending GET to {}...", demoserver);
let response = client
.to(demoserver)
.request(
coap_request_implementations::Code::get()
.with_path("/other/separate")
.processing_response_payload_through(|p| {
println!("Got payload {:?}", p);
}),
)
.await;
println!("Response {:?}", response);
// This demonstrates that we don't leak requests, and (later) that we don't lock up when there
// are too many concurrent requests, and still adhere to protocol.
//
// Well, except for rate limiting...
println!(
"Sending 10 (>> 3) requests in short succession, forgetting them after a moment"
);
for _i in 0..10 {
embassy_futures::select::select(
client.to(demoserver).request(
coap_request_implementations::Code::get()
.with_path("/other/separate")
.processing_response_payload_through(|p| {
println!("Got payload {:?}", p);
}),
),
// Knowing that /other/separate takes some time, this is definitely faster
async_std::task::sleep(std::time::Duration::from_millis(300)),
)
// The other future is dropped.
.await;
}
println!(
"Sending 10 (>> 3) requests in parallel, keeping all of them around"
);
// It's not NSTART that's limiting us here (although it should), it's .
let build_request = || {
// The async block allows us to keep the temporary client.to() that'd be otherwise limit
// the request's lifetime inside the Future.
let block = async {
client.to(demoserver).request(
coap_request_implementations::Code::get()
.with_path("/other/separate")
.processing_response_payload_through(|p| {
println!("Got payload {:?} (truncated)", p.get(..5).unwrap_or(p));
}),
).await
};
block
};
// That's not even that easy without TAIT and other trickery...
use embassy_futures::join::join;
join(build_request(),
join(build_request(),
join(build_request(),
join(build_request(),
join(build_request(),
join(build_request(),
join(build_request(),
join(build_request(),
join(build_request(),
build_request()
// Hello LISP my old friend
)))))))))
.await;
println!("All through");
// What follows are experiments with the request payload setting functions of
// coap-request-implementations that have so far not been used successfully without
// force over lifetimes.
// Which of these two signatures I take makes the difference in whether this works
// (upper) or errs like the closure, no matter whether we go through paywriter_f or
// not.
fn paywriter<S: Stack>(m: &mut S::RequestMessage<'_>) {
// fn paywriter<'a, 'b>(m: &'a mut coap_message_utils::inmemory_write::Message<'b>) {
use coap_message::MinimalWritableMessage;
m.set_payload(b"Set time to 1955-11-05").unwrap();
}
// let paywriter_f: &mut _ = &mut paywriter;
// FIXME: This is needed for the with_request_callback variant that takes a function,
// and I don't yet understand why. (Clearly it's unacceptable as a consequence of the
// interface; question is, is it a consequence or did I just use it wrong).
// let client: &embedded_nal_coap::CoAPRuntimeClient<'_, 3> =
// unsafe { core::mem::transmute(&client) };
// let mut paywriter_direct = paywriter;
// let paywriter_cl = |m: &mut <embedded_nal_coap::RequestingCoAPClient<3> as Stack>::RequestMessage<'_>| {
// // or let paywriter_cl = |m: &mut coap_message_utils::inmemory_write::Message<'_>| {
// use coap_message::MinimalWritableMessage;
// m.set_payload(b"Set time to 1955-11-05")
// };
let req =
coap_request_implementations::Code::post()
.with_path("/uppercase")
// We can build everything up to this point outside, but pulling more of req
// starts failing. Is that a hint as to where the lifetime trouble comes from?
;
println!("Sending POST...");
let mut response = client
.to(demoserver)
;
let response = response
.request(
req
// This works (but needs the unjustifiable lifetime extension above)
// .with_request_callback(&mut paywriter_direct)
// Does this work?
// .with_request_callback(paywriter_f)
// This fails with type mismatches
// .with_request_callback(&mut paywriter_cl)
// But this works because it is simple
.with_request_payload_slice(b"Set time to 1955-11-05")
.processing_response_payload_through(|p| {
println!("Uppercase is {}", core::str::from_utf8(p).unwrap())
})
,
)
;
let response = response
.await;
println!("Response {:?}", response);
}