void exit_polling(void) { int i; while (InterlockedExchange((LONG *)&compat_spinlock, 1) == 1) { SleepEx(0, TRUE); } if (is_polling_set) { is_polling_set = FALSE; for (i=0; i<MAX_FDS; i++) { // Cancel any async I/O (handle can be invalid) cancel_io(i); // If anything was pending on that I/O, it should be // terminating, and we should be able to access the fd // mutex lock before too long EnterCriticalSection(&_poll_fd[i].mutex); if ( (poll_fd[i].fd > 0) && (poll_fd[i].handle != INVALID_HANDLE_VALUE) && (poll_fd[i].handle != 0) && (GetFileType(poll_fd[i].handle) == FILE_TYPE_UNKNOWN) ) { _close(poll_fd[i].fd); } free_overlapped(poll_fd[i].overlapped); if (!CancelIoEx_Available) { // Close duplicate handle if (_poll_fd[i].original_handle != INVALID_HANDLE_VALUE) { CloseHandle(poll_fd[i].handle); } } poll_fd[i] = INVALID_WINFD; LeaveCriticalSection(&_poll_fd[i].mutex); DeleteCriticalSection(&_poll_fd[i].mutex); } } compat_spinlock = 0; }
void exit_polling(void) { int i; while (InterlockedExchange((LONG *)&compat_spinlock, 1) == 1) { SleepEx(0, TRUE); } if (is_polling_set) { is_polling_set = FALSE; for (i=0; i<MAX_FDS; i++) { cancel_io(i); EnterCriticalSection(&_poll_fd[i].mutex); if ( (poll_fd[i].fd > 0) && (poll_fd[i].handle != INVALID_HANDLE_VALUE) && (poll_fd[i].handle != 0) && (GetFileType(poll_fd[i].handle) == FILE_TYPE_UNKNOWN) ) { _close(poll_fd[i].fd); } free_overlapped(poll_fd[i].overlapped); if (!CancelIoEx_Available) { if (_poll_fd[i].original_handle != INVALID_HANDLE_VALUE) { CloseHandle(poll_fd[i].handle); } } poll_fd[i] = INVALID_WINFD; LeaveCriticalSection(&_poll_fd[i].mutex); DeleteCriticalSection(&_poll_fd[i].mutex); } } compat_spinlock = 0; }
static void _free_index(int _index) { struct pipe_data *item; // Cancel any async IO (Don't care about the validity of our handles for this) cancel_io(_index); // close the duplicate handle (if we have an actual duplicate) if (Use_Duplicate_Handles) { if (_poll_fd[_index].original_handle != INVALID_HANDLE_VALUE) { CloseHandle(poll_fd[_index].handle); } _poll_fd[_index].original_handle = INVALID_HANDLE_VALUE; _poll_fd[_index].thread_id = 0; if (!list_empty(&_poll_fd[_index].list)) usbi_warn(NULL, "There are some pending events in the queue"); list_for_each_entry(item, &_poll_fd[_index].list, list, struct pipe_data) { if (!item) { usbi_dbg("no item"); continue; } if (item->data) free(item->data); free(item); } } free_overlapped(poll_fd[_index].overlapped); poll_fd[_index] = INVALID_WINFD; }
void exit_polling(void) { int i; while (InterlockedExchange((LONG *)&compat_spinlock, 1) == 1) { usbi_sleep(0); } if (is_polling_set) { is_polling_set = FALSE; for (i=0; i<MAX_FDS; i++) { // Cancel any async I/O (handle can be invalid) cancel_io(i); // If anything was pending on that I/O, it should be // terminating, and we should be able to access the fd // mutex lock before too long EnterCriticalSection(&_poll_fd[i].mutex); free_overlapped(poll_fd[i].overlapped); if (Use_Duplicate_Handles) { // Close duplicate handle if (_poll_fd[i].original_handle != INVALID_HANDLE_VALUE) { CloseHandle(poll_fd[i].handle); } } poll_fd[i] = INVALID_WINFD; LeaveCriticalSection(&_poll_fd[i].mutex); DeleteCriticalSection(&_poll_fd[i].mutex); } } InterlockedExchange((LONG *)&compat_spinlock, 0); }
static void _free_index(int _index) { // Cancel any async IO (Don't care about the validity of our handles for this) cancel_io(_index); // close the duplicate handle (if we have an actual duplicate) if (Use_Duplicate_Handles) { if (_poll_fd[_index].original_handle != INVALID_HANDLE_VALUE) { CloseHandle(poll_fd[_index].handle); } _poll_fd[_index].original_handle = INVALID_HANDLE_VALUE; _poll_fd[_index].thread_id = 0; } free_overlapped(poll_fd[_index].overlapped); poll_fd[_index] = INVALID_WINFD; }
/* * close a fake pipe fd */ int usbi_close(int fd) { int _index; int r = -1; CHECK_INIT_POLLING; _index = _fd_to_index_and_lock(fd); if (_index < 0) { errno = EBADF; } else { free_overlapped(poll_fd[_index].overlapped); poll_fd[_index] = INVALID_WINFD; LeaveCriticalSection(&_poll_fd[_index].mutex); } return r; }
/* * Create a fake pipe. * As libusb only uses pipes for signaling, all we need from a pipe is an * event. To that extent, we create a single wfd and overlapped as a means * to access that event. */ int usbi_pipe(int filedes[2]) { int i; OVERLAPPED* overlapped; CHECK_INIT_POLLING; overlapped = create_overlapped(); if (overlapped == NULL) { return -1; } // The overlapped must have status pending for signaling to work in poll overlapped->Internal = STATUS_PENDING; overlapped->InternalHigh = 0; for (i=0; i<MAX_FDS; i++) { if (poll_fd[i].fd < 0) { EnterCriticalSection(&_poll_fd[i].mutex); // fd might have been allocated before we got to critical if (poll_fd[i].fd >= 0) { LeaveCriticalSection(&_poll_fd[i].mutex); continue; } // Use index as the unique fd number poll_fd[i].fd = i; // Read end of the "pipe" filedes[0] = poll_fd[i].fd; // We can use the same handle for both ends filedes[1] = filedes[0]; poll_fd[i].handle = DUMMY_HANDLE; poll_fd[i].overlapped = overlapped; // There's no polling on the write end, so we just use READ for our needs poll_fd[i].rw = RW_READ; _poll_fd[i].original_handle = INVALID_HANDLE_VALUE; LeaveCriticalSection(&_poll_fd[i].mutex); return 0; } } free_overlapped(overlapped); return -1; }
void exit_polling(void) { struct pipe_data* item; int i; while (InterlockedExchange((LONG *)&compat_spinlock, 1) == 1) { SleepEx(0, TRUE); } if (is_polling_set) { is_polling_set = FALSE; for (i=0; i<MAX_FDS; i++) { // Cancel any async I/O (handle can be invalid) cancel_io(i); // If anything was pending on that I/O, it should be // terminating, and we should be able to access the fd // mutex lock before too long EnterCriticalSection(&_poll_fd[i].mutex); free_overlapped(poll_fd[i].overlapped); if (Use_Duplicate_Handles) { // Close duplicate handle if (_poll_fd[i].original_handle != INVALID_HANDLE_VALUE) { CloseHandle(poll_fd[i].handle); } } if (!list_empty(&_poll_fd[i].list)) usbi_warn(NULL, "There are some pending events in the queue"); list_for_each_entry(item, &_poll_fd[i].list, list, struct pipe_data) { if (!item) { usbi_err(NULL, "no item to free"); continue; } if (item->data) free(item->data); free(item); } poll_fd[i] = INVALID_WINFD; LeaveCriticalSection(&_poll_fd[i].mutex); DeleteCriticalSection(&_poll_fd[i].mutex); } } InterlockedExchange((LONG *)&compat_spinlock, 0); }
void _free_index(int _index) { cancel_io(_index); if ( (poll_fd[_index].handle != INVALID_HANDLE_VALUE) && (poll_fd[_index].handle != 0) && (GetFileType(poll_fd[_index].handle) == FILE_TYPE_UNKNOWN) ) { _close(poll_fd[_index].fd); } if (!CancelIoEx_Available) { if (_poll_fd[_index].original_handle != INVALID_HANDLE_VALUE) { CloseHandle(poll_fd[_index].handle); } _poll_fd[_index].original_handle = INVALID_HANDLE_VALUE; _poll_fd[_index].thread_id = 0; } free_overlapped(poll_fd[_index].overlapped); poll_fd[_index] = INVALID_WINFD; }
void _free_index(int _index) { // Cancel any async IO (Don't care about the validity of our handles for this) cancel_io(_index); // close fake handle for devices if ( (poll_fd[_index].handle != INVALID_HANDLE_VALUE) && (poll_fd[_index].handle != 0) && (GetFileType(poll_fd[_index].handle) == FILE_TYPE_UNKNOWN) ) { _close(poll_fd[_index].fd); } // close the duplicate handle (if we have an actual duplicate) if (!CancelIoEx_Available) { if (_poll_fd[_index].original_handle != INVALID_HANDLE_VALUE) { CloseHandle(poll_fd[_index].handle); } _poll_fd[_index].original_handle = INVALID_HANDLE_VALUE; _poll_fd[_index].thread_id = 0; } free_overlapped(poll_fd[_index].overlapped); poll_fd[_index] = INVALID_WINFD; }
struct winfd usbi_create_fd(HANDLE handle, int access_mode) { int i, fd; struct winfd wfd = INVALID_WINFD; OVERLAPPED* overlapped = NULL; CHECK_INIT_POLLING; if ((handle == 0) || (handle == INVALID_HANDLE_VALUE)) { return INVALID_WINFD; } if ((access_mode != _O_RDONLY) && (access_mode != _O_WRONLY)) { usbi_warn(NULL, "only one of _O_RDONLY or _O_WRONLY are supported.\n" "If you want to poll for R/W simultaneously, create multiple fds from the same handle."); return INVALID_WINFD; } if (access_mode == _O_RDONLY) { wfd.rw = RW_READ; } else { wfd.rw = RW_WRITE; } fd = _open(NUL_DEVICE, _O_WRONLY); if (fd < 0) { return INVALID_WINFD; } overlapped = create_overlapped(); if(overlapped == NULL) { _close(fd); return INVALID_WINFD; } for (i=0; i<MAX_FDS; i++) { if (poll_fd[i].fd < 0) { EnterCriticalSection(&_poll_fd[i].mutex); if (poll_fd[i].fd >= 0) { LeaveCriticalSection(&_poll_fd[i].mutex); continue; } wfd.fd = fd; if (!CancelIoEx_Available) { _poll_fd[i].thread_id = GetCurrentThreadId(); if (!DuplicateHandle(GetCurrentProcess(), handle, GetCurrentProcess(), &wfd.handle, 0, TRUE, DUPLICATE_SAME_ACCESS)) { usbi_dbg("could not duplicate handle for CancelIo - using original one"); wfd.handle = handle; _poll_fd[i].original_handle = INVALID_HANDLE_VALUE; } else { _poll_fd[i].original_handle = handle; } } else { wfd.handle = handle; } wfd.overlapped = overlapped; memcpy(&poll_fd[i], &wfd, sizeof(struct winfd)); LeaveCriticalSection(&_poll_fd[i].mutex); return wfd; } } free_overlapped(overlapped); _close(fd); return INVALID_WINFD; }
/* * Create both an fd and an OVERLAPPED from an open Windows handle, so that * it can be used with our polling function * The handle MUST support overlapped transfers (usually requires CreateFile * with FILE_FLAG_OVERLAPPED) * Return a pollable file descriptor struct, or INVALID_WINFD on error * * Note that the fd returned by this function is a per-transfer fd, rather * than a per-session fd and cannot be used for anything else but our * custom functions (the fd itself points to the NUL: device) * if you plan to do R/W on the same handle, you MUST create 2 fds: one for * read and one for write. Using a single R/W fd is unsupported and will * produce unexpected results */ struct winfd usbi_create_fd(HANDLE handle, int access_mode) { int i, fd; struct winfd wfd = INVALID_WINFD; OVERLAPPED* overlapped = NULL; CHECK_INIT_POLLING; if ((handle == 0) || (handle == INVALID_HANDLE_VALUE)) { return INVALID_WINFD; } if ((access_mode != _O_RDONLY) && (access_mode != _O_WRONLY)) { usbi_warn(NULL, "only one of _O_RDONLY or _O_WRONLY are supported.\n" "If you want to poll for R/W simultaneously, create multiple fds from the same handle."); return INVALID_WINFD; } if (access_mode == _O_RDONLY) { wfd.rw = RW_READ; } else { wfd.rw = RW_WRITE; } // Ensure that we get a non system conflicting unique fd, using // the same fd attribution system as the pipe ends fd = _open(NUL_DEVICE, _O_WRONLY); if (fd < 0) { return INVALID_WINFD; } overlapped = create_overlapped(); if(overlapped == NULL) { _close(fd); return INVALID_WINFD; } for (i=0; i<MAX_FDS; i++) { if (poll_fd[i].fd < 0) { EnterCriticalSection(&_poll_fd[i].mutex); // fd might have been removed before we got to critical if (poll_fd[i].fd >= 0) { LeaveCriticalSection(&_poll_fd[i].mutex); continue; } wfd.fd = fd; // Attempt to emulate some of the CancelIoEx behaviour on platforms // that don't have it if (!CancelIoEx_Available) { _poll_fd[i].thread_id = GetCurrentThreadId(); if (!DuplicateHandle(GetCurrentProcess(), handle, GetCurrentProcess(), &wfd.handle, 0, TRUE, DUPLICATE_SAME_ACCESS)) { usbi_dbg("could not duplicate handle for CancelIo - using original one"); wfd.handle = handle; // Make sure we won't close the original handle on fd deletion then _poll_fd[i].original_handle = INVALID_HANDLE_VALUE; } else { _poll_fd[i].original_handle = handle; } } else { wfd.handle = handle; } wfd.overlapped = overlapped; memcpy(&poll_fd[i], &wfd, sizeof(struct winfd)); LeaveCriticalSection(&_poll_fd[i].mutex); return wfd; } } free_overlapped(overlapped); _close(fd); return INVALID_WINFD; }
/* * Create both an fd and an OVERLAPPED from an open Windows handle, so that * it can be used with our polling function * The handle MUST support overlapped transfers (usually requires CreateFile * with FILE_FLAG_OVERLAPPED) * Return a pollable file descriptor struct, or INVALID_WINFD on error * * Note that the fd returned by this function is a per-transfer fd, rather * than a per-session fd and cannot be used for anything else but our * custom functions (the fd itself points to the NUL: device) * if you plan to do R/W on the same handle, you MUST create 2 fds: one for * read and one for write. Using a single R/W fd is unsupported and will * produce unexpected results */ struct winfd usbi_create_fd(HANDLE handle, int access_mode, struct usbi_transfer *itransfer, cancel_transfer *cancel_fn) { int i; struct winfd wfd = INVALID_WINFD; OVERLAPPED* overlapped = NULL; CHECK_INIT_POLLING; if ((handle == 0) || (handle == INVALID_HANDLE_VALUE)) { return INVALID_WINFD; } wfd.itransfer = itransfer; wfd.cancel_fn = cancel_fn; if ((access_mode != RW_READ) && (access_mode != RW_WRITE)) { usbi_warn(NULL, "only one of RW_READ or RW_WRITE are supported.\n" "If you want to poll for R/W simultaneously, create multiple fds from the same handle."); return INVALID_WINFD; } if (access_mode == RW_READ) { wfd.rw = RW_READ; } else { wfd.rw = RW_WRITE; } overlapped = create_overlapped(); if(overlapped == NULL) { return INVALID_WINFD; } for (i=0; i<MAX_FDS; i++) { if (poll_fd[i].fd < 0) { EnterCriticalSection(&_poll_fd[i].mutex); // fd might have been removed before we got to critical if (poll_fd[i].fd >= 0) { LeaveCriticalSection(&_poll_fd[i].mutex); continue; } // Use index as the unique fd number wfd.fd = i; // Attempt to emulate some of the CancelIoEx behaviour on platforms // that don't have it if (Use_Duplicate_Handles) { _poll_fd[i].thread_id = GetCurrentThreadId(); if (!DuplicateHandle(GetCurrentProcess(), handle, GetCurrentProcess(), &wfd.handle, 0, TRUE, DUPLICATE_SAME_ACCESS)) { usbi_dbg("could not duplicate handle for CancelIo - using original one"); wfd.handle = handle; // Make sure we won't close the original handle on fd deletion then _poll_fd[i].original_handle = INVALID_HANDLE_VALUE; } else { _poll_fd[i].original_handle = handle; } } else { wfd.handle = handle; } wfd.overlapped = overlapped; memcpy(&poll_fd[i], &wfd, sizeof(struct winfd)); LeaveCriticalSection(&_poll_fd[i].mutex); return wfd; } } free_overlapped(overlapped); return INVALID_WINFD; }