mio_named_pipes/lib.rs
1//! Windows named pipes bindings for mio.
2//!
3//! This crate implements bindings for named pipes for the mio crate. This
4//! crate compiles on all platforms but only contains anything on Windows.
5//! Currently this crate requires mio 0.6.2.
6//!
7//! On Windows, mio is implemented with an IOCP object at the heart of its
8//! `Poll` implementation. For named pipes, this means that all I/O is done in
9//! an overlapped fashion and the named pipes themselves are registered with
10//! mio's internal IOCP object. Essentially, this crate is using IOCP for
11//! bindings with named pipes.
12//!
13//! Note, though, that IOCP is a *completion* based model whereas mio expects a
14//! *readiness* based model. As a result this crate, like with TCP objects in
15//! mio, has internal buffering to translate the completion model to a readiness
16//! model. This means that this crate is not a zero-cost binding over named
17//! pipes on Windows, but rather approximates the performance of mio's TCP
18//! implementation on Windows.
19//!
20//! # Trait implementations
21//!
22//! The `Read` and `Write` traits are implemented for `NamedPipe` and for
23//! `&NamedPipe`. This represents that a named pipe can be concurrently read and
24//! written to and also can be read and written to at all. Typically a named
25//! pipe needs to be connected to a client before it can be read or written,
26//! however.
27//!
28//! Note that for I/O operations on a named pipe to succeed then the named pipe
29//! needs to be associated with an event loop. Until this happens all I/O
30//! operations will return a "would block" error.
31//!
32//! # Managing connections
33//!
34//! The `NamedPipe` type supports a `connect` method to connect to a client and
35//! a `disconnect` method to disconnect from that client. These two methods only
36//! work once a named pipe is associated with an event loop.
37//!
38//! The `connect` method will succeed asynchronously and a completion can be
39//! detected once the object receives a writable notification.
40//!
41//! # Named pipe clients
42//!
43//! Currently to create a client of a named pipe server then you can use the
44//! `OpenOptions` type in the standard library to create a `File` that connects
45//! to a named pipe. Afterwards you can use the `into_raw_handle` method coupled
46//! with the `NamedPipe::from_raw_handle` method to convert that to a named pipe
47//! that can operate asynchronously. Don't forget to pass the
48//! `FILE_FLAG_OVERLAPPED` flag when opening the `File`.
49
50#![cfg(windows)]
51#![deny(missing_docs)]
52
53#[macro_use]
54extern crate log;
55extern crate mio;
56extern crate miow;
57extern crate winapi;
58
59use std::ffi::OsStr;
60use std::fmt;
61use std::io;
62use std::io::prelude::*;
63use std::mem;
64use std::os::windows::io::*;
65use std::slice;
66use std::sync::atomic::AtomicBool;
67use std::sync::atomic::Ordering::SeqCst;
68use std::sync::Mutex;
69
70use mio::windows;
71use mio::{Evented, Poll, PollOpt, Ready, Registration, SetReadiness, Token};
72use miow::iocp::CompletionStatus;
73use miow::pipe;
74use winapi::shared::winerror::*;
75use winapi::um::ioapiset::*;
76use winapi::um::minwinbase::*;
77
78mod from_raw_arc;
79use crate::from_raw_arc::FromRawArc;
80
81macro_rules! offset_of {
82 ($t:ty, $($field:ident).+) => (
83 &(*(0 as *const $t)).$($field).+ as *const _ as usize
84 )
85}
86
87macro_rules! overlapped2arc {
88 ($e:expr, $t:ty, $($field:ident).+) => ({
89 let offset = offset_of!($t, $($field).+);
90 debug_assert!(offset < mem::size_of::<$t>());
91 FromRawArc::from_raw(($e as usize - offset) as *mut $t)
92 })
93}
94
95fn would_block() -> io::Error {
96 io::ErrorKind::WouldBlock.into()
97}
98
99/// Representation of a named pipe on Windows.
100///
101/// This structure internally contains a `HANDLE` which represents the named
102/// pipe, and also maintains state associated with the mio event loop and active
103/// I/O operations that have been scheduled to translate IOCP to a readiness
104/// model.
105pub struct NamedPipe {
106 registered: AtomicBool,
107 ready_registration: Registration,
108 poll_registration: windows::Binding,
109 inner: FromRawArc<Inner>,
110}
111
112struct Inner {
113 handle: pipe::NamedPipe,
114 readiness: SetReadiness,
115
116 connect: windows::Overlapped,
117 connecting: AtomicBool,
118
119 read: windows::Overlapped,
120 write: windows::Overlapped,
121
122 io: Mutex<Io>,
123
124 pool: Mutex<BufferPool>,
125}
126
127struct Io {
128 read: State,
129 write: State,
130 connect_error: Option<io::Error>,
131}
132
133enum State {
134 None,
135 Pending(Vec<u8>, usize),
136 Ok(Vec<u8>, usize),
137 Err(io::Error),
138}
139
140fn _assert_kinds() {
141 fn _assert_send<T: Send>() {}
142 fn _assert_sync<T: Sync>() {}
143 _assert_send::<NamedPipe>();
144 _assert_sync::<NamedPipe>();
145}
146
147impl NamedPipe {
148 /// Creates a new named pipe at the specified `addr` given a "reasonable
149 /// set" of initial configuration options.
150 ///
151 /// Currently the configuration options are the [same as miow]. To change
152 /// these options, you can create a custom named pipe yourself and then use
153 /// the `FromRawHandle` constructor to convert that type to an instance of a
154 /// `NamedPipe` in this crate.
155 ///
156 /// [same as miow]: https://docs.rs/miow/0.1.4/x86_64-pc-windows-msvc/miow/pipe/struct.NamedPipe.html#method.new
157 pub fn new<A: AsRef<OsStr>>(addr: A) -> io::Result<NamedPipe> {
158 NamedPipe::_new(addr.as_ref())
159 }
160
161 fn _new(addr: &OsStr) -> io::Result<NamedPipe> {
162 let pipe = pipe::NamedPipe::new(addr)?;
163 unsafe { Ok(NamedPipe::from_raw_handle(pipe.into_raw_handle())) }
164 }
165
166 /// Attempts to call `ConnectNamedPipe`, if possible.
167 ///
168 /// This function will attempt to connect this pipe to a client in an
169 /// asynchronous fashion. If the function immediately establishes a
170 /// connection to a client then `Ok(())` is returned. Otherwise if a
171 /// connection attempt was issued and is now in progress then a "would
172 /// block" error is returned.
173 ///
174 /// When the connection is finished then this object will be flagged as
175 /// being ready for a write, or otherwise in the writable state.
176 ///
177 /// # Errors
178 ///
179 /// This function will return a "would block" error if the pipe has not yet
180 /// been registered with an event loop, if the connection operation has
181 /// previously been issued but has not yet completed, or if the connect
182 /// itself was issued and didn't finish immediately.
183 ///
184 /// Normal I/O errors from the call to `ConnectNamedPipe` are returned
185 /// immediately.
186 pub fn connect(&self) -> io::Result<()> {
187 // Make sure we're associated with an IOCP object
188 if !self.registered() {
189 return Err(would_block());
190 }
191
192 // "Acquire the connecting lock" or otherwise just make sure we're the
193 // only operation that's using the `connect` overlapped instance.
194 if self.inner.connecting.swap(true, SeqCst) {
195 return Err(would_block());
196 }
197
198 // Now that we've flagged ourselves in the connecting state, issue the
199 // connection attempt. Afterwards interpret the return value and set
200 // internal state accordingly.
201 let res = unsafe {
202 let overlapped = self.inner.connect.as_mut_ptr() as *mut _;
203 self.inner.handle.connect_overlapped(overlapped)
204 };
205
206 match res {
207 // The connection operation finished immediately, so let's schedule
208 // reads/writes and such.
209 Ok(true) => {
210 trace!("connect done immediately");
211 self.inner.connecting.store(false, SeqCst);
212 Inner::post_register(&self.inner);
213 Ok(())
214 }
215
216 // If the overlapped operation was successful and didn't finish
217 // immediately then we forget a copy of the arc we hold
218 // internally. This ensures that when the completion status comes
219 // in for the I/O operation finishing it'll have a reference
220 // associated with it and our data will still be valid. The
221 // `connect_done` function will "reify" this forgotten pointer to
222 // drop the refcount on the other side.
223 Ok(false) => {
224 trace!("connect in progress");
225 mem::forget(self.inner.clone());
226 Err(would_block())
227 }
228
229 // TODO: are we sure no IOCP notification comes in here?
230 Err(e) => {
231 trace!("connect error: {}", e);
232 self.inner.connecting.store(false, SeqCst);
233 Err(e)
234 }
235 }
236 }
237
238 /// Takes any internal error that has happened after the last I/O operation
239 /// which hasn't been retrieved yet.
240 ///
241 /// This is particularly useful when detecting failed attempts to `connect`.
242 /// After a completed `connect` flags this pipe as writable then callers
243 /// must invoke this method to determine whether the connection actually
244 /// succeeded. If this function returns `None` then a client is connected,
245 /// otherwise it returns an error of what happened and a client shouldn't be
246 /// connected.
247 pub fn take_error(&self) -> io::Result<Option<io::Error>> {
248 Ok(self.inner.io.lock().unwrap().connect_error.take())
249 }
250
251 /// Disconnects this named pipe from a connected client.
252 ///
253 /// This function will disconnect the pipe from a connected client, if any,
254 /// transitively calling the `DisconnectNamedPipe` function. If the
255 /// disconnection is successful then this object will no longer be readable
256 /// or writable.
257 ///
258 /// After a `disconnect` is issued, then a `connect` may be called again to
259 /// connect to another client.
260 pub fn disconnect(&self) -> io::Result<()> {
261 self.inner.handle.disconnect()?;
262 self.inner
263 .readiness
264 .set_readiness(Ready::empty())
265 .expect("event loop seems gone");
266 Ok(())
267 }
268
269 fn registered(&self) -> bool {
270 self.registered.load(SeqCst)
271 }
272}
273
274impl Read for NamedPipe {
275 fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
276 <&NamedPipe as Read>::read(&mut &*self, buf)
277 }
278}
279
280impl Write for NamedPipe {
281 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
282 <&NamedPipe as Write>::write(&mut &*self, buf)
283 }
284
285 fn flush(&mut self) -> io::Result<()> {
286 <&NamedPipe as Write>::flush(&mut &*self)
287 }
288}
289
290impl<'a> Read for &'a NamedPipe {
291 fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
292 // Make sure we're registered
293 if !self.registered() {
294 return Err(would_block());
295 }
296
297 let mut state = self.inner.io.lock().unwrap();
298 match mem::replace(&mut state.read, State::None) {
299 // In theory not possible with `ready_registration` checked above,
300 // but return would block for now.
301 State::None => Err(would_block()),
302
303 // A read is in flight, still waiting for it to finish
304 State::Pending(buf, amt) => {
305 state.read = State::Pending(buf, amt);
306 Err(would_block())
307 }
308
309 // We previously read something into `data`, try to copy out some
310 // data. If we copy out all the data schedule a new read and
311 // otherwise store the buffer to get read later.
312 State::Ok(data, cur) => {
313 let n = {
314 let mut remaining = &data[cur..];
315 remaining.read(buf)?
316 };
317 let next = cur + n;
318 if next != data.len() {
319 state.read = State::Ok(data, next);
320 } else {
321 self.inner.put_buffer(data);
322 Inner::schedule_read(&self.inner, &mut state);
323 }
324 Ok(n)
325 }
326
327 // Looks like an in-flight read hit an error, return that here while
328 // we schedule a new one.
329 State::Err(e) => {
330 Inner::schedule_read(&self.inner, &mut state);
331 if e.raw_os_error() == Some(ERROR_BROKEN_PIPE as i32) {
332 Ok(0)
333 } else {
334 Err(e)
335 }
336 }
337 }
338 }
339}
340
341impl<'a> Write for &'a NamedPipe {
342 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
343 // Make sure we're registered
344 if !self.registered() {
345 return Err(would_block());
346 }
347
348 // Make sure there's no writes pending
349 let mut io = self.inner.io.lock().unwrap();
350 match io.write {
351 State::None => {}
352 _ => return Err(would_block()),
353 }
354
355 // Move `buf` onto the heap and fire off the write
356 let mut owned_buf = self.inner.get_buffer();
357 owned_buf.extend(buf);
358 Inner::schedule_write(&self.inner, owned_buf, 0, &mut io);
359 Ok(buf.len())
360 }
361
362 fn flush(&mut self) -> io::Result<()> {
363 // TODO: `FlushFileBuffers` somehow?
364 Ok(())
365 }
366}
367
368impl Evented for NamedPipe {
369 fn register(
370 &self,
371 poll: &Poll,
372 token: Token,
373 interest: Ready,
374 opts: PollOpt,
375 ) -> io::Result<()> {
376 // First, register the handle with the event loop
377 unsafe {
378 self.poll_registration
379 .register_handle(&self.inner.handle, token, poll)?;
380 }
381 poll.register(&self.ready_registration, token, interest, opts)?;
382 self.registered.store(true, SeqCst);
383 Inner::post_register(&self.inner);
384 Ok(())
385 }
386
387 fn reregister(
388 &self,
389 poll: &Poll,
390 token: Token,
391 interest: Ready,
392 opts: PollOpt,
393 ) -> io::Result<()> {
394 // Validate `Poll` and that we were previously registered
395 unsafe {
396 self.poll_registration
397 .reregister_handle(&self.inner.handle, token, poll)?;
398 }
399
400 // At this point we should for sure have `ready_registration` unless
401 // we're racing with `register` above, so just return a bland error if
402 // the borrow fails.
403 poll.reregister(&self.ready_registration, token, interest, opts)?;
404
405 Inner::post_register(&self.inner);
406
407 Ok(())
408 }
409
410 fn deregister(&self, poll: &Poll) -> io::Result<()> {
411 // Validate `Poll` and deregister ourselves
412 unsafe {
413 self.poll_registration
414 .deregister_handle(&self.inner.handle, poll)?;
415 }
416 poll.deregister(&self.ready_registration)
417 }
418}
419
420impl AsRawHandle for NamedPipe {
421 fn as_raw_handle(&self) -> RawHandle {
422 self.inner.handle.as_raw_handle()
423 }
424}
425
426impl FromRawHandle for NamedPipe {
427 unsafe fn from_raw_handle(handle: RawHandle) -> NamedPipe {
428 let (r, s) = Registration::new2();
429 NamedPipe {
430 registered: AtomicBool::new(false),
431 ready_registration: r,
432 poll_registration: windows::Binding::new(),
433 inner: FromRawArc::new(Inner {
434 handle: pipe::NamedPipe::from_raw_handle(handle),
435 readiness: s,
436 connecting: AtomicBool::new(false),
437 // transmutes to straddle winapi versions (mio 0.6 is on an
438 // older winapi)
439 connect: windows::Overlapped::new(mem::transmute(connect_done as fn(_))),
440 read: windows::Overlapped::new(mem::transmute(read_done as fn(_))),
441 write: windows::Overlapped::new(mem::transmute(write_done as fn(_))),
442 io: Mutex::new(Io {
443 read: State::None,
444 write: State::None,
445 connect_error: None,
446 }),
447 pool: Mutex::new(BufferPool::with_capacity(2)),
448 }),
449 }
450 }
451}
452
453impl fmt::Debug for NamedPipe {
454 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
455 self.inner.handle.fmt(f)
456 }
457}
458
459impl Drop for NamedPipe {
460 fn drop(&mut self) {
461 // Cancel pending reads/connects, but don't cancel writes to ensure that
462 // everything is flushed out.
463 unsafe {
464 if self.inner.connecting.load(SeqCst) {
465 drop(cancel(&self.inner.handle, &self.inner.connect));
466 }
467 let io = self.inner.io.lock().unwrap();
468 match io.read {
469 State::Pending(..) => {
470 drop(cancel(&self.inner.handle, &self.inner.read));
471 }
472 _ => {}
473 }
474 }
475 }
476}
477
478impl Inner {
479 /// Schedules a read to happen in the background, executing an overlapped
480 /// operation.
481 ///
482 /// This function returns `true` if a normal error happens or if the read
483 /// is scheduled in the background. If the pipe is no longer connected
484 /// (ERROR_PIPE_LISTENING) then `false` is returned and no read is
485 /// scheduled.
486 fn schedule_read(me: &FromRawArc<Inner>, io: &mut Io) -> bool {
487 // Check to see if a read is already scheduled/completed
488 match io.read {
489 State::None => {}
490 _ => return true,
491 }
492
493 // Turn off our read readiness
494 let ready = me.readiness.readiness();
495 me.readiness
496 .set_readiness(ready & !Ready::readable())
497 .expect("event loop seems gone");
498
499 // Allocate a buffer and schedule the read.
500 let mut buf = me.get_buffer();
501 let e = unsafe {
502 let overlapped = me.read.as_mut_ptr() as *mut _;
503 let slice = slice::from_raw_parts_mut(buf.as_mut_ptr(), buf.capacity());
504 me.handle.read_overlapped(slice, overlapped)
505 };
506
507 match e {
508 // See `connect` above for the rationale behind `forget`
509 Ok(e) => {
510 trace!("schedule read success: {:?}", e);
511 io.read = State::Pending(buf, 0); // 0 is ignored on read side
512 mem::forget(me.clone());
513 true
514 }
515
516 // If ERROR_PIPE_LISTENING happens then it's not a real read error,
517 // we just need to wait for a connect.
518 Err(ref e) if e.raw_os_error() == Some(ERROR_PIPE_LISTENING as i32) => false,
519
520 // If some other error happened, though, we're now readable to give
521 // out the error.
522 Err(e) => {
523 trace!("schedule read error: {}", e);
524 io.read = State::Err(e);
525 me.readiness
526 .set_readiness(ready | Ready::readable())
527 .expect("event loop still seems gone");
528 true
529 }
530 }
531 }
532
533 fn schedule_write(me: &FromRawArc<Inner>, buf: Vec<u8>, pos: usize, io: &mut Io) {
534 // Very similar to `schedule_read` above, just done for the write half.
535 let ready = me.readiness.readiness();
536 me.readiness
537 .set_readiness(ready & !Ready::writable())
538 .expect("event loop seems gone");
539
540 let e = unsafe {
541 let overlapped = me.write.as_mut_ptr() as *mut _;
542 me.handle.write_overlapped(&buf[pos..], overlapped)
543 };
544
545 match e {
546 // See `connect` above for the rationale behind `forget`
547 Ok(e) => {
548 trace!("schedule write success: {:?}", e);
549 io.write = State::Pending(buf, pos);
550 mem::forget(me.clone())
551 }
552 Err(e) => {
553 trace!("schedule write error: {}", e);
554 io.write = State::Err(e);
555 me.add_readiness(Ready::writable());
556 }
557 }
558 }
559
560 fn add_readiness(&self, ready: Ready) {
561 self.readiness
562 .set_readiness(ready | self.readiness.readiness())
563 .expect("event loop still seems gone");
564 }
565
566 fn post_register(me: &FromRawArc<Inner>) {
567 let mut io = me.io.lock().unwrap();
568 if Inner::schedule_read(&me, &mut io) {
569 if let State::None = io.write {
570 me.add_readiness(Ready::writable());
571 }
572 }
573 }
574
575 fn get_buffer(&self) -> Vec<u8> {
576 self.pool.lock().unwrap().get(8 * 1024)
577 }
578
579 fn put_buffer(&self, buf: Vec<u8>) {
580 self.pool.lock().unwrap().put(buf)
581 }
582}
583
584unsafe fn cancel<T: AsRawHandle>(handle: &T, overlapped: &windows::Overlapped) -> io::Result<()> {
585 let ret = CancelIoEx(handle.as_raw_handle(), overlapped.as_mut_ptr() as *mut _);
586 if ret == 0 {
587 Err(io::Error::last_os_error())
588 } else {
589 Ok(())
590 }
591}
592
593fn connect_done(status: &OVERLAPPED_ENTRY) {
594 let status = CompletionStatus::from_entry(status);
595 trace!("connect done");
596
597 // Acquire the `FromRawArc<Inner>`. Note that we should be guaranteed that
598 // the refcount is available to us due to the `mem::forget` in
599 // `connect` above.
600 let me = unsafe { overlapped2arc!(status.overlapped(), Inner, connect) };
601
602 // Flag ourselves as no longer using the `connect` overlapped instances.
603 let prev = me.connecting.swap(false, SeqCst);
604 assert!(prev, "wasn't previously connecting");
605
606 // Stash away our connect error if one happened
607 debug_assert_eq!(status.bytes_transferred(), 0);
608 unsafe {
609 match me.handle.result(status.overlapped()) {
610 Ok(n) => debug_assert_eq!(n, 0),
611 Err(e) => me.io.lock().unwrap().connect_error = Some(e),
612 }
613 }
614
615 // We essentially just finished a registration, so kick off a
616 // read and register write readiness.
617 Inner::post_register(&me);
618}
619
620fn read_done(status: &OVERLAPPED_ENTRY) {
621 let status = CompletionStatus::from_entry(status);
622 trace!("read finished, bytes={}", status.bytes_transferred());
623
624 // Acquire the `FromRawArc<Inner>`. Note that we should be guaranteed that
625 // the refcount is available to us due to the `mem::forget` in
626 // `schedule_read` above.
627 let me = unsafe { overlapped2arc!(status.overlapped(), Inner, read) };
628
629 // Move from the `Pending` to `Ok` state.
630 let mut io = me.io.lock().unwrap();
631 let mut buf = match mem::replace(&mut io.read, State::None) {
632 State::Pending(buf, _) => buf,
633 _ => unreachable!(),
634 };
635 unsafe {
636 match me.handle.result(status.overlapped()) {
637 Ok(n) => {
638 debug_assert_eq!(status.bytes_transferred() as usize, n);
639 buf.set_len(status.bytes_transferred() as usize);
640 io.read = State::Ok(buf, 0);
641 }
642 Err(e) => {
643 debug_assert_eq!(status.bytes_transferred(), 0);
644 io.read = State::Err(e);
645 }
646 }
647 }
648
649 // Flag our readiness that we've got data.
650 me.add_readiness(Ready::readable());
651}
652
653fn write_done(status: &OVERLAPPED_ENTRY) {
654 let status = CompletionStatus::from_entry(status);
655 trace!("write finished, bytes={}", status.bytes_transferred());
656 // Acquire the `FromRawArc<Inner>`. Note that we should be guaranteed that
657 // the refcount is available to us due to the `mem::forget` in
658 // `schedule_write` above.
659 let me = unsafe { overlapped2arc!(status.overlapped(), Inner, write) };
660
661 // Make the state change out of `Pending`. If we wrote the entire buffer
662 // then we're writable again and otherwise we schedule another write.
663 let mut io = me.io.lock().unwrap();
664 let (buf, pos) = match mem::replace(&mut io.write, State::None) {
665 State::Pending(buf, pos) => (buf, pos),
666 _ => unreachable!(),
667 };
668
669 unsafe {
670 match me.handle.result(status.overlapped()) {
671 Ok(n) => {
672 debug_assert_eq!(status.bytes_transferred() as usize, n);
673 let new_pos = pos + (status.bytes_transferred() as usize);
674 if new_pos == buf.len() {
675 me.put_buffer(buf);
676 me.add_readiness(Ready::writable());
677 } else {
678 Inner::schedule_write(&me, buf, new_pos, &mut io);
679 }
680 }
681 Err(e) => {
682 debug_assert_eq!(status.bytes_transferred(), 0);
683 io.write = State::Err(e);
684 me.add_readiness(Ready::writable());
685 }
686 }
687 }
688}
689
690// Based on https://github.com/tokio-rs/mio/blob/13d5fc9/src/sys/windows/buffer_pool.rs
691struct BufferPool {
692 pool: Vec<Vec<u8>>,
693}
694
695impl BufferPool {
696 fn with_capacity(cap: usize) -> BufferPool {
697 BufferPool {
698 pool: Vec::with_capacity(cap),
699 }
700 }
701
702 fn get(&mut self, default_cap: usize) -> Vec<u8> {
703 self.pool
704 .pop()
705 .unwrap_or_else(|| Vec::with_capacity(default_cap))
706 }
707
708 fn put(&mut self, mut buf: Vec<u8>) {
709 if self.pool.len() < self.pool.capacity() {
710 unsafe {
711 buf.set_len(0);
712 }
713 self.pool.push(buf);
714 }
715 }
716}