auto merge of #17673 : aturon/rust/remove-uv, r=alexcrichton

This PR begins the process of [runtime removal](https://github.com/rust-lang/rfcs/pull/230) by dismantling the `librustuv` crate and associated event loop.

The result is that, while `libgreen` can still be used for task scheduling purposes, it will no longer be feasible to use green-threaded I/O.

Removing the libuv-based event loop eases the transition away from the runtime system, which will be done incrementally.

In terms of visible API changes, this PR:

* Removes `std::io::signal`, which was never implemented on the native threading model.

* Removes the `iotest!` macro, which was previously used to run I/O tests on both green and native threading models.

* Removes the `green_start!` macro for starting an application with a `librustuv` event loop.

* Removes the `librustuv` crate itself.

It also removes the `libuv` and `gyp` submodules and adjusts the build system and copyright notices accordingly.

If you wish to continue using `librustuv` and green-threaded I/O, consider using [green-rs](https://github.com/alexcrichton/green-rs/), which provides its own I/O stack.
This commit is contained in:
bors 2014-10-01 21:27:19 +00:00
commit a70a0374e2
70 changed files with 652 additions and 7747 deletions

7
.gitmodules vendored
View File

@ -2,13 +2,6 @@
path = src/llvm
url = https://github.com/rust-lang/llvm.git
branch = master
[submodule "src/libuv"]
path = src/libuv
url = https://github.com/rust-lang/libuv.git
branch = master
[submodule "src/gyp"]
path = src/gyp
url = https://github.com/rust-lang/gyp.git
[submodule "src/compiler-rt"]
path = src/compiler-rt
url = https://github.com/rust-lang/compiler-rt.git

View File

@ -213,41 +213,6 @@ their own copyright notices and license terms:
as the Rust compiler or runtime libraries themselves).
* The libuv asynchronous I/O library. Code for this package
is found in the src/libuv directory, within this
distribution. This package is redistributed under the
following terms, as noted in its source:
Copyright Joyent, Inc. and other Node contributors. All
rights reserved. Permission is hereby granted, free of
charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to
deal in the Software without restriction, including
without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom
the Software is furnished to do so, subject to the
following conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
* Additional libraries included in libuv carry separate
BSD-compatible licenses. See src/libuv/LICENSE for
details.
* The src/rt/miniz.c file, carrying an implementation of
RFC1950/RFC1951 DEFLATE, by Rich Geldreich
<richgel99@gmail.com>. All uses of this file are

10
configure vendored
View File

@ -438,7 +438,6 @@ valopt prefix "/usr/local" "set installation prefix"
valopt local-rust-root "/usr/local" "set prefix for local rust binary"
valopt llvm-root "" "set LLVM root"
valopt jemalloc-root "" "set directory where libjemalloc_pic.a is located"
valopt libuv-root "" "set directory where libuv.a is located"
valopt android-cross-path "/opt/ndk_standalone" "Android NDK standalone path"
valopt mingw32-cross-path "" "MinGW32 cross compiler path"
@ -869,10 +868,6 @@ do
do
make_dir $t/rt/stage$s
make_dir $t/rt/jemalloc
make_dir $t/rt/libuv
make_dir $t/rt/libuv/src/ares
make_dir $t/rt/libuv/src/eio
make_dir $t/rt/libuv/src/ev
for i in \
isaac sync test \
arch/i386 arch/x86_64 arch/arm arch/mips \
@ -952,10 +947,6 @@ then
msg "git: submodule deinit src/jemalloc"
"${CFG_GIT}" submodule deinit src/jemalloc
fi
if [ ! -z "${CFG_LIBUV_ROOT}" ]; then
msg "git: submodule deinit src/libuv"
"${CFG_GIT}" submodule deinit src/libuv
fi
msg "git: submodule update"
"${CFG_GIT}" submodule update
@ -1216,7 +1207,6 @@ putvar CFG_MINGW32_CROSS_PATH
putvar CFG_MANDIR
putvar CFG_DISABLE_INJECT_STD_VERSION
putvar CFG_JEMALLOC_ROOT
putvar CFG_LIBUV_ROOT
putvar CFG_DISABLE_JEMALLOC
# Avoid spurious warnings from clang by feeding it original source on

View File

@ -49,7 +49,7 @@
# automatically generated for all stage/host/target combinations.
################################################################################
TARGET_CRATES := libc std green rustuv native flate arena glob term semver \
TARGET_CRATES := libc std green native flate arena glob term semver \
uuid serialize sync getopts collections num test time rand \
url log regex graphviz core rbml rlibc alloc debug rustrt \
unicode
@ -69,7 +69,6 @@ DEPS_std := core libc rand alloc collections rustrt sync unicode \
native:rust_builtin native:backtrace
DEPS_graphviz := std
DEPS_green := std native:context_switch
DEPS_rustuv := std native:uv native:uv_support
DEPS_native := std
DEPS_syntax := std term serialize log fmt_macros debug arena libc
DEPS_rustc := syntax flate arena serialize getopts rbml \
@ -102,7 +101,7 @@ DEPS_regex := std
DEPS_regex_macros = rustc syntax std regex
DEPS_fmt_macros = std
TOOL_DEPS_compiletest := test green rustuv getopts
TOOL_DEPS_compiletest := test getopts
TOOL_DEPS_rustdoc := rustdoc native
TOOL_DEPS_rustc := rustc native
TOOL_SOURCE_compiletest := $(S)src/compiletest/compiletest.rs

View File

@ -16,13 +16,11 @@
.PHONY: TAGS.emacs TAGS.vi
# This is using a blacklist approach, probably more durable than a whitelist.
# We exclude: external dependencies (llvm, libuv, gyp, rt/{msvc,sundown,vg}),
# We exclude: external dependencies (llvm, rt/{msvc,sundown,vg}),
# tests (compiletest, test) and a couple of other things (rt/arch, etc)
CTAGS_LOCATIONS=$(patsubst ${CFG_SRC_DIR}src/llvm,, \
$(patsubst ${CFG_SRC_DIR}src/libuv,, \
$(patsubst ${CFG_SRC_DIR}src/compiletest,, \
$(patsubst ${CFG_SRC_DIR}src/test,, \
$(patsubst ${CFG_SRC_DIR}src/gyp,, \
$(patsubst ${CFG_SRC_DIR}src/etc,, \
$(patsubst ${CFG_SRC_DIR}src/rt,, \
$(patsubst ${CFG_SRC_DIR}src/rt/arch,, \

View File

@ -35,7 +35,7 @@ LICENSE.txt: $(S)COPYRIGHT $(S)LICENSE-APACHE $(S)LICENSE-MIT
PKG_TAR = dist/$(PKG_NAME).tar.gz
PKG_GITMODULES := $(S)src/libuv $(S)src/llvm $(S)src/gyp $(S)src/compiler-rt \
PKG_GITMODULES := $(S)src/llvm $(S)src/compiler-rt \
$(S)src/rt/hoedown $(S)src/jemalloc
PKG_FILES := \
$(S)COPYRIGHT \

View File

@ -118,7 +118,6 @@ CFG_GCCISH_POST_LIB_FLAGS_x86_64-unknown-linux-gnu := -Wl,-no-whole-archive
CFG_DEF_SUFFIX_x86_64-unknown-linux-gnu := .linux.def
CFG_LLC_FLAGS_x86_64-unknown-linux-gnu :=
CFG_INSTALL_NAME_x86_64-unknown-linux-gnu =
CFG_LIBUV_LINK_FLAGS_x86_64-unknown-linux-gnu =
CFG_EXE_SUFFIX_x86_64-unknown-linux-gnu =
CFG_WINDOWSY_x86_64-unknown-linux-gnu :=
CFG_UNIXY_x86_64-unknown-linux-gnu := 1
@ -146,7 +145,6 @@ CFG_GCCISH_POST_LIB_FLAGS_i686-unknown-linux-gnu := -Wl,-no-whole-archive
CFG_DEF_SUFFIX_i686-unknown-linux-gnu := .linux.def
CFG_LLC_FLAGS_i686-unknown-linux-gnu :=
CFG_INSTALL_NAME_i686-unknown-linux-gnu =
CFG_LIBUV_LINK_FLAGS_i686-unknown-linux-gnu =
CFG_EXE_SUFFIX_i686-unknown-linux-gnu =
CFG_WINDOWSY_i686-unknown-linux-gnu :=
CFG_UNIXY_i686-unknown-linux-gnu := 1
@ -180,7 +178,6 @@ CFG_GCCISH_POST_LIB_FLAGS_arm-apple-ios :=
CFG_DEF_SUFFIX_arm-apple-ios := .darwin.def
CFG_LLC_FLAGS_arm-apple-ios := -mattr=+vfp3,+v7,+thumb2,+neon -march=arm
CFG_INSTALL_NAME_arm-apple-ios = -Wl,-install_name,@rpath/$(1)
CFG_LIBUV_LINK_FLAGS_arm-apple-ios =
CFG_EXE_SUFFIX_arm-apple-ios :=
CFG_WINDOWSY_arm-apple-ios :=
CFG_UNIXY_arm-apple-ios := 1
@ -216,7 +213,6 @@ CFG_GCCISH_POST_LIB_FLAGS_i386-apple-ios =
CFG_DEF_SUFFIX_i386-apple-ios = .darwin.def
CFG_LLC_FLAGS_i386-apple-ios =
CFG_INSTALL_NAME_i386-apple-ios = -Wl,-install_name,@rpath/$(1)
CFG_LIBUV_LINK_FLAGS_i386-apple-ios =
CFG_EXE_SUFFIX_i386-apple-ios =
CFG_WINDOWSY_i386-apple-ios =
CFG_UNIXY_i386-apple-ios = 1
@ -245,7 +241,6 @@ CFG_GCCISH_POST_LIB_FLAGS_x86_64-apple-darwin :=
CFG_DEF_SUFFIX_x86_64-apple-darwin := .darwin.def
CFG_LLC_FLAGS_x86_64-apple-darwin :=
CFG_INSTALL_NAME_x86_64-apple-darwin = -Wl,-install_name,@rpath/$(1)
CFG_LIBUV_LINK_FLAGS_x86_64-apple-darwin =
CFG_EXE_SUFFIX_x86_64-apple-darwin :=
CFG_WINDOWSY_x86_64-apple-darwin :=
CFG_UNIXY_x86_64-apple-darwin := 1
@ -273,7 +268,6 @@ CFG_GCCISH_POST_LIB_FLAGS_i686-apple-darwin :=
CFG_DEF_SUFFIX_i686-apple-darwin := .darwin.def
CFG_LLC_FLAGS_i686-apple-darwin :=
CFG_INSTALL_NAME_i686-apple-darwin = -Wl,-install_name,@rpath/$(1)
CFG_LIBUV_LINK_FLAGS_i686-apple-darwin =
CFG_EXE_SUFFIX_i686-apple-darwin :=
CFG_WINDOWSY_i686-apple-darwin :=
CFG_UNIXY_i686-apple-darwin := 1
@ -301,7 +295,6 @@ CFG_GCCISH_POST_LIB_FLAGS_arm-linux-androideabi := -Wl,-no-whole-archive
CFG_DEF_SUFFIX_arm-linux-androideabi := .android.def
CFG_LLC_FLAGS_arm-linux-androideabi :=
CFG_INSTALL_NAME_arm-linux-androideabi =
CFG_LIBUV_LINK_FLAGS_arm-linux-androideabi =
CFG_EXE_SUFFIX_arm-linux-androideabi :=
CFG_WINDOWSY_arm-linux-androideabi :=
CFG_UNIXY_arm-linux-androideabi := 1
@ -332,7 +325,6 @@ CFG_GCCISH_POST_LIB_FLAGS_arm-unknown-linux-gnueabihf := -Wl,-no-whole-archive
CFG_DEF_SUFFIX_arm-unknown-linux-gnueabihf := .linux.def
CFG_LLC_FLAGS_arm-unknown-linux-gnueabihf :=
CFG_INSTALL_NAME_ar,-unknown-linux-gnueabihf =
CFG_LIBUV_LINK_FLAGS_arm-unknown-linux-gnueabihf =
CFG_EXE_SUFFIX_arm-unknown-linux-gnueabihf :=
CFG_WINDOWSY_arm-unknown-linux-gnueabihf :=
CFG_UNIXY_arm-unknown-linux-gnueabihf := 1
@ -363,7 +355,6 @@ CFG_GCCISH_POST_LIB_FLAGS_arm-unknown-linux-gnueabi := -Wl,-no-whole-archive
CFG_DEF_SUFFIX_arm-unknown-linux-gnueabi := .linux.def
CFG_LLC_FLAGS_arm-unknown-linux-gnueabi :=
CFG_INSTALL_NAME_arm-unknown-linux-gnueabi =
CFG_LIBUV_LINK_FLAGS_arm-unknown-linux-gnueabi =
CFG_EXE_SUFFIX_arm-unknown-linux-gnueabi :=
CFG_WINDOWSY_arm-unknown-linux-gnueabi :=
CFG_UNIXY_arm-unknown-linux-gnueabi := 1
@ -393,7 +384,6 @@ CFG_GCCISH_POST_LIB_FLAGS_mipsel-linux := -Wl,-no-whole-archive
CFG_DEF_SUFFIX_mipsel-linux := .linux.def
CFG_LLC_FLAGS_mipsel-linux :=
CFG_INSTALL_NAME_mipsel-linux =
CFG_LIBUV_LINK_FLAGS_mipsel-linux =
CFG_EXE_SUFFIX_mipsel-linux :=
CFG_WINDOWSY_mipsel-linux :=
CFG_UNIXY_mipsel-linux := 1
@ -423,7 +413,6 @@ CFG_GCCISH_POST_LIB_FLAGS_mips-unknown-linux-gnu := -Wl,-no-whole-archive
CFG_DEF_SUFFIX_mips-unknown-linux-gnu := .linux.def
CFG_LLC_FLAGS_mips-unknown-linux-gnu :=
CFG_INSTALL_NAME_mips-unknown-linux-gnu =
CFG_LIBUV_LINK_FLAGS_mips-unknown-linux-gnu =
CFG_EXE_SUFFIX_mips-unknown-linux-gnu :=
CFG_WINDOWSY_mips-unknown-linux-gnu :=
CFG_UNIXY_mips-unknown-linux-gnu := 1
@ -452,7 +441,6 @@ CFG_GCCISH_POST_LIB_FLAGS_i586-mingw32msvc :=
CFG_DEF_SUFFIX_i586-mingw32msvc := .mingw32.def
CFG_LLC_FLAGS_i586-mingw32msvc :=
CFG_INSTALL_NAME_i586-mingw32msvc =
CFG_LIBUV_LINK_FLAGS_i586-mingw32msvc := -L$(CFG_MINGW32_CROSS_PATH)/i586-mingw32msvc/lib -lws2_32 -lpsapi -liphlpapi
CFG_EXE_SUFFIX_i586-mingw32msvc := .exe
CFG_WINDOWSY_i586-mingw32msvc := 1
CFG_UNIXY_i586-mingw32msvc :=
@ -483,7 +471,6 @@ CFG_GCCISH_POST_LIB_FLAGS_i686-w64-mingw32 :=
CFG_DEF_SUFFIX_i686-w64-mingw32 := .mingw32.def
CFG_LLC_FLAGS_i686-w64-mingw32 :=
CFG_INSTALL_NAME_i686-w64-mingw32 =
CFG_LIBUV_LINK_FLAGS_i686-w64-mingw32 := -lws2_32 -lpsapi -liphlpapi
CFG_EXE_SUFFIX_i686-w64-mingw32 := .exe
CFG_WINDOWSY_i686-w64-mingw32 := 1
CFG_UNIXY_i686-w64-mingw32 :=
@ -515,7 +502,6 @@ CFG_GCCISH_POST_LIB_FLAGS_x86_64-w64-mingw32 :=
CFG_DEF_SUFFIX_x86_64-w64-mingw32 := .mingw32.def
CFG_LLC_FLAGS_x86_64-w64-mingw32 :=
CFG_INSTALL_NAME_x86_64-w64-mingw32 =
CFG_LIBUV_LINK_FLAGS_x86_64-w64-mingw32 := -lws2_32 -lpsapi -liphlpapi
CFG_EXE_SUFFIX_x86_64-w64-mingw32 := .exe
CFG_WINDOWSY_x86_64-w64-mingw32 := 1
CFG_UNIXY_x86_64-w64-mingw32 :=
@ -543,7 +529,6 @@ CFG_GCCISH_POST_LIB_FLAGS_x86_64-unknown-freebsd := -Wl,-no-whole-archive
CFG_DEF_SUFFIX_x86_64-unknown-freebsd := .bsd.def
CFG_LLC_FLAGS_x86_64-unknown-freebsd :=
CFG_INSTALL_NAME_x86_64-unknown-freebsd =
CFG_LIBUV_LINK_FLAGS_x86_64-unknown-freebsd := -pthread -lkvm
CFG_EXE_SUFFIX_x86_64-unknown-freebsd :=
CFG_WINDOWSY_x86_64-unknown-freebsd :=
CFG_UNIXY_x86_64-unknown-freebsd := 1
@ -570,7 +555,6 @@ CFG_GCCISH_POST_LIB_FLAGS_x86_64-unknown-dragonfly := -Wl,-no-whole-archive
CFG_DEF_SUFFIX_x86_64-unknown-dragonfly := .bsd.def
CFG_LLC_FLAGS_x86_64-unknown-dragonfly :=
CFG_INSTALL_NAME_x86_64-unknown-dragonfly =
CFG_LIBUV_LINK_FLAGS_x86_64-unknown-dragonfly := -pthread -lkvm
CFG_EXE_SUFFIX_x86_64-unknown-dragonfly :=
CFG_WINDOWSY_x86_64-unknown-dragonfly :=
CFG_UNIXY_x86_64-unknown-dragonfly := 1

View File

@ -16,7 +16,7 @@ rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) \
ifndef CFG_DISABLE_MANAGE_SUBMODULES
# This is a pretty expensive operation but I don't see any way to avoid it
# NB: This only looks for '+' status (wrong commit checked out), not '-' status
# (nothing checked out at all). `./configure --{llvm,jemalloc,libuv}-root`
# (nothing checked out at all). `./configure --{llvm,jemalloc}-root`
# will explicitly deinitialize the corresponding submodules, and we don't
# want to force constant rebuilds in that case.
NEED_GIT_RECONFIG=$(shell cd "$(CFG_SRC_DIR)" && "$(CFG_GIT)" submodule status | grep -c '^+')

137
mk/rt.mk
View File

@ -83,7 +83,7 @@ $$(RT_OUTPUT_DIR_$(1))/%.o: $(S)src/rt/%.c $$(MKFILE_DEPS)
@$$(call E, compile: $$@)
$$(Q)$$(call CFG_COMPILE_C_$(1), $$@, \
-I $$(S)src/rt/hoedown/src \
-I $$(S)src/libuv/include -I $$(S)src/rt \
-I $$(S)src/rt \
$$(RUNTIME_CFLAGS_$(1))) $$<
$$(RT_OUTPUT_DIR_$(1))/%.o: $(S)src/rt/%.S $$(MKFILE_DEPS) \
@ -129,155 +129,20 @@ $(foreach lib,$(NATIVE_LIBS), \
# in the correct location.
################################################################################
################################################################################
# libuv
################################################################################
define DEF_LIBUV_ARCH_VAR
LIBUV_ARCH_$(1) = $$(subst i386,ia32,$$(subst x86_64,x64,$$(HOST_$(1))))
endef
$(foreach t,$(CFG_TARGET),$(eval $(call DEF_LIBUV_ARCH_VAR,$(t))))
ifdef CFG_ENABLE_FAST_MAKE
LIBUV_DEPS := $(S)/.gitmodules
else
LIBUV_DEPS := $(wildcard \
$(S)src/libuv/* \
$(S)src/libuv/*/* \
$(S)src/libuv/*/*/* \
$(S)src/libuv/*/*/*/*)
endif
LIBUV_NO_LOAD = run-benchmarks.target.mk run-tests.target.mk \
uv_dtrace_header.target.mk uv_dtrace_provider.target.mk
export PYTHONPATH := $(PYTHONPATH):$(S)src/gyp/pylib
define DEF_THIRD_PARTY_TARGETS
# $(1) is the target triple
ifeq ($$(CFG_WINDOWSY_$(1)), 1)
LIBUV_OSTYPE_$(1) := win
# This isn't necessarily a desired option, but it's harmless and works around
# what appears to be a mingw-w64 bug.
#
# https://sourceforge.net/p/mingw-w64/bugs/395/
JEMALLOC_ARGS_$(1) := --enable-lazy-lock
else ifeq ($(OSTYPE_$(1)), apple-darwin)
LIBUV_OSTYPE_$(1) := mac
else ifeq ($(OSTYPE_$(1)), apple-ios)
LIBUV_OSTYPE_$(1) := ios
JEMALLOC_ARGS_$(1) := --disable-tls
else ifeq ($(OSTYPE_$(1)), unknown-freebsd)
LIBUV_OSTYPE_$(1) := freebsd
else ifeq ($(OSTYPE_$(1)), unknown-dragonfly)
LIBUV_OSTYPE_$(1) := freebsd
# required on DragonFly, otherwise gyp fails with a Python exception
LIBUV_GYP_ARGS_$(1) := --no-parallel
else ifeq ($(OSTYPE_$(1)), linux-androideabi)
LIBUV_OSTYPE_$(1) := android
LIBUV_ARGS_$(1) := PLATFORM=android host=android OS=linux
JEMALLOC_ARGS_$(1) := --disable-tls
else
LIBUV_OSTYPE_$(1) := linux
endif
LIBUV_NAME_$(1) := $$(call CFG_STATIC_LIB_NAME_$(1),uv)
LIBUV_DIR_$(1) := $$(RT_OUTPUT_DIR_$(1))/libuv
LIBUV_LIB_$(1) := $$(RT_OUTPUT_DIR_$(1))/$$(LIBUV_NAME_$(1))
LIBUV_MAKEFILE_$(1) := $$(CFG_BUILD_DIR)$$(RT_OUTPUT_DIR_$(1))/libuv/Makefile
LIBUV_BUILD_DIR_$(1) := $$(CFG_BUILD_DIR)$$(RT_OUTPUT_DIR_$(1))/libuv
LIBUV_XCODEPROJ_$(1) := $$(LIBUV_BUILD_DIR_$(1))/uv.xcodeproj
LIBUV_STAMP_$(1) = $$(LIBUV_DIR_$(1))/libuv-auto-clean-stamp
$$(LIBUV_STAMP_$(1)): $(S)src/rt/libuv-auto-clean-trigger
$$(Q)rm -rf $$(LIBUV_DIR_$(1))
$$(Q)mkdir -p $$(@D)
touch $$@
# libuv triggers a few warnings on some platforms
LIBUV_CFLAGS_$(1) := $(subst -Werror,,$(CFG_GCCISH_CFLAGS_$(1)))
$$(LIBUV_MAKEFILE_$(1)): $$(LIBUV_DEPS) $$(MKFILE_DEPS) $$(LIBUV_STAMP_$(1))
(cd $(S)src/libuv/ && \
CC="$$(CC_$(1))" \
CXX="$$(CXX_$(1))" \
AR="$$(AR_$(1))" \
$$(CFG_PYTHON) ./gyp_uv.py -f make -Dtarget_arch=$$(LIBUV_ARCH_$(1)) \
-D ninja \
-DOS=$$(LIBUV_OSTYPE_$(1)) \
-Goutput_dir=$$(@D) $$(LIBUV_GYP_ARGS_$(1)) --generator-output $$(@D))
touch $$@
# Windows has a completely different build system for libuv because of mingw. In
# theory when we support msvc then we should be using gyp's msvc output instead
# of mingw's makefile for windows
ifdef CFG_WINDOWSY_$(1)
LIBUV_LOCAL_$(1) := $$(S)src/libuv/libuv.a
$$(LIBUV_LOCAL_$(1)): $$(LIBUV_DEPS) $$(MKFILE_DEPS)
$$(Q)$$(MAKE) -C $$(S)src/libuv -f Makefile.mingw \
LDFLAGS="$$(CFG_GCCISH_LINK_FLAGS_$(1))" \
CC="$$(CC_$(1)) $$(LIBUV_CFLAGS_$(1)) $$(SNAP_DEFINES)" \
CXX="$$(CXX_$(1))" \
AR="$$(AR_$(1))" \
V=$$(VERBOSE)
else ifeq ($(OSTYPE_$(1)), apple-ios) # iOS
$$(LIBUV_XCODEPROJ_$(1)): $$(LIBUV_DEPS) $$(MKFILE_DEPS) $$(LIBUV_STAMP_$(1))
cp -rf $(S)src/libuv/ $$(LIBUV_BUILD_DIR_$(1))
(cd $$(LIBUV_BUILD_DIR_$(1)) && \
CC="$$(CC_$(1))" \
CXX="$$(CXX_$(1))" \
AR="$$(AR_$(1))" \
$$(CFG_PYTHON) ./gyp_uv.py -f xcode \
-D ninja \
-R libuv)
touch $$@
LIBUV_XCODE_OUT_LIB_$(1) := $$(LIBUV_BUILD_DIR_$(1))/build/Release-$$(CFG_SDK_NAME_$(1))/libuv.a
$$(LIBUV_LIB_$(1)): $$(LIBUV_XCODE_OUT_LIB_$(1)) $$(MKFILE_DEPS)
$$(Q)cp $$< $$@
$$(LIBUV_XCODE_OUT_LIB_$(1)): $$(LIBUV_DEPS) $$(LIBUV_XCODEPROJ_$(1)) \
$$(MKFILE_DEPS)
$$(Q)xcodebuild -project $$(LIBUV_BUILD_DIR_$(1))/uv.xcodeproj \
CFLAGS="$$(LIBUV_CFLAGS_$(1)) $$(SNAP_DEFINES)" \
LDFLAGS="$$(CFG_GCCISH_LINK_FLAGS_$(1))" \
$$(LIBUV_ARGS_$(1)) \
V=$$(VERBOSE) \
-configuration Release \
-sdk "$$(CFG_SDK_NAME_$(1))" \
ARCHS="$$(CFG_SDK_ARCHS_$(1))"
$$(Q)touch $$@
else
LIBUV_LOCAL_$(1) := $$(LIBUV_DIR_$(1))/Release/libuv.a
$$(LIBUV_LOCAL_$(1)): $$(LIBUV_DEPS) $$(LIBUV_MAKEFILE_$(1)) $$(MKFILE_DEPS)
$$(Q)$$(MAKE) -C $$(LIBUV_DIR_$(1)) \
CFLAGS="$$(LIBUV_CFLAGS_$(1)) $$(SNAP_DEFINES)" \
LDFLAGS="$$(CFG_GCCISH_LINK_FLAGS_$(1))" \
CC="$$(CC_$(1))" \
CXX="$$(CXX_$(1))" \
AR="$$(AR_$(1))" \
$$(LIBUV_ARGS_$(1)) \
BUILDTYPE=Release \
NO_LOAD="$$(LIBUV_NO_LOAD)" \
V=$$(VERBOSE)
$$(Q)touch $$@
endif
ifeq ($(1),$$(CFG_BUILD))
ifneq ($$(CFG_LIBUV_ROOT),)
$$(LIBUV_LIB_$(1)): $$(CFG_LIBUV_ROOT)/libuv.a
$$(Q)cp $$< $$@
else
$$(LIBUV_LIB_$(1)): $$(LIBUV_LOCAL_$(1))
$$(Q)cp $$< $$@
endif
else
$$(LIBUV_LIB_$(1)): $$(LIBUV_LOCAL_$(1))
$$(Q)cp $$< $$@
endif
################################################################################

View File

@ -57,8 +57,6 @@ Source layout:
| `test/auxiliary` | - Dependencies of tests |
| ------------------- | --------------------------------------------------------- |
| `librustdoc/` | The Rust API documentation tool |
| `libuv/` | The libuv submodule |
| `librustuv/` | Rust libuv support code |
| ------------------- | --------------------------------------------------------- |
| `llvm/` | The LLVM submodule |
| `rustllvm/` | LLVM support code |

View File

@ -11,15 +11,10 @@
#![crate_type = "bin"]
#![feature(phase)]
// we use our own (green) start below; do not link in libnative; issue #13247.
#![no_start]
#![deny(warnings)]
extern crate test;
extern crate getopts;
extern crate green;
extern crate rustuv;
#[phase(plugin, link)] extern crate log;
extern crate regex;
@ -41,11 +36,6 @@ pub mod runtest;
pub mod common;
pub mod errors;
#[start]
fn start(argc: int, argv: *const *const u8) -> int {
green::start(argc, argv, rustuv::event_loop, main)
}
pub fn main() {
let args = os::args();
let config = parse_config(args);

View File

@ -240,7 +240,7 @@ To create a pool of green tasks which have no I/O support, you may shed the
`rustuv::event_loop`. All tasks will have no I/O support, but they will still be
able to deschedule/reschedule (use channels, locks, etc).
~~~{.rust}
~~~{.ignore}
extern crate green;
extern crate rustuv;

@ -1 +0,0 @@
Subproject commit 1e46da1000bc29679ab4cebf3c1034cb7d6f4487

View File

@ -128,35 +128,6 @@
//! > **Note**: This `main` function in this example does *not* have I/O
//! > support. The basic event loop does not provide any support
//!
//! # Starting with I/O support in libgreen
//!
//! ```rust
//! extern crate green;
//! extern crate rustuv;
//!
//! #[start]
//! fn start(argc: int, argv: *const *const u8) -> int {
//! green::start(argc, argv, rustuv::event_loop, main)
//! }
//!
//! fn main() {
//! // this code is running in a pool of schedulers all powered by libuv
//! }
//! ```
//!
//! The above code can also be shortened with a macro from libgreen.
//!
//! ```
//! #![feature(phase)]
//! #[phase(plugin)] extern crate green;
//!
//! green_start!(main)
//!
//! fn main() {
//! // run inside of a green pool
//! }
//! ```
//!
//! # Using a scheduler pool
//!
//! This library adds a `GreenTaskBuilder` trait that extends the methods
@ -165,7 +136,6 @@
//!
//! ```rust
//! extern crate green;
//! extern crate rustuv;
//!
//! # fn main() {
//! use std::task::TaskBuilder;
@ -173,9 +143,6 @@
//!
//! let mut config = PoolConfig::new();
//!
//! // Optional: Set the event loop to be rustuv's to allow I/O to work
//! config.event_loop_factory = rustuv::event_loop;
//!
//! let mut pool = SchedPool::new(config);
//!
//! // Spawn tasks into the pool of schedulers
@ -221,7 +188,6 @@
#![allow(deprecated)]
#[cfg(test)] #[phase(plugin, link)] extern crate log;
#[cfg(test)] extern crate rustuv;
extern crate libc;
extern crate alloc;
@ -253,33 +219,6 @@ pub mod sleeper_list;
pub mod stack;
pub mod task;
/// A helper macro for booting a program with libgreen
///
/// # Example
///
/// ```
/// #![feature(phase)]
/// #[phase(plugin)] extern crate green;
///
/// green_start!(main)
///
/// fn main() {
/// // running with libgreen
/// }
/// ```
#[macro_export]
macro_rules! green_start( ($f:ident) => (
mod __start {
extern crate green;
extern crate rustuv;
#[start]
fn start(argc: int, argv: *const *const u8) -> int {
green::start(argc, argv, rustuv::event_loop, super::$f)
}
}
) )
/// Set up a default runtime configuration, given compiler-supplied arguments.
///
/// This function will block until the entire pool of M:N schedulers have

View File

@ -1024,8 +1024,6 @@ fn new_sched_rng() -> XorShiftRng {
#[cfg(test)]
mod test {
use rustuv;
use std::rt::task::TaskOpts;
use std::rt::task::Task;
use std::rt::local::Local;
@ -1277,28 +1275,6 @@ mod test {
// }
//}
#[test]
fn test_io_callback() {
use std::io::timer;
let mut pool = SchedPool::new(PoolConfig {
threads: 2,
event_loop_factory: rustuv::event_loop,
});
// This is a regression test that when there are no schedulable tasks in
// the work queue, but we are performing I/O, that once we do put
// something in the work queue again the scheduler picks it up and
// doesn't exit before emptying the work queue
pool.spawn(TaskOpts::new(), proc() {
spawn(proc() {
timer::sleep(Duration::milliseconds(10));
});
});
pool.shutdown();
}
#[test]
fn wakeup_across_scheds() {
let (tx1, rx1) = channel();

View File

@ -506,7 +506,7 @@ mod tests {
fn spawn_opts(opts: TaskOpts, f: proc():Send) {
let mut pool = SchedPool::new(PoolConfig {
threads: 1,
event_loop_factory: ::rustuv::event_loop,
event_loop_factory: super::super::basic::event_loop,
});
pool.spawn(opts, f);
pool.shutdown();

View File

@ -1,173 +0,0 @@
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/// An exclusive access primitive
///
/// This primitive is used to gain exclusive access to read() and write() in uv.
/// It is assumed that all invocations of this struct happen on the same thread
/// (the uv event loop).
use alloc::arc::Arc;
use std::mem;
use std::rt::local::Local;
use std::rt::task::{BlockedTask, Task};
use std::cell::UnsafeCell;
use homing::HomingMissile;
pub struct Access<T> {
inner: Arc<UnsafeCell<Inner<T>>>,
}
pub struct Guard<'a, T:'static> {
access: &'a mut Access<T>,
missile: Option<HomingMissile>,
}
struct Inner<T> {
queue: Vec<(BlockedTask, uint)>,
held: bool,
closed: bool,
data: T,
}
impl<T: Send> Access<T> {
pub fn new(data: T) -> Access<T> {
Access {
inner: Arc::new(UnsafeCell::new(Inner {
queue: vec![],
held: false,
closed: false,
data: data,
}))
}
}
pub fn grant<'a>(&'a mut self, token: uint,
missile: HomingMissile) -> Guard<'a, T> {
// This unsafety is actually OK because the homing missile argument
// guarantees that we're on the same event loop as all the other objects
// attempting to get access granted.
let inner = unsafe { &mut *self.inner.get() };
if inner.held {
let t: Box<Task> = Local::take();
t.deschedule(1, |task| {
inner.queue.push((task, token));
Ok(())
});
assert!(inner.held);
} else {
inner.held = true;
}
Guard { access: self, missile: Some(missile) }
}
pub fn unsafe_get(&self) -> *mut T {
unsafe { &mut (*self.inner.get()).data as *mut _ }
}
// Safe version which requires proof that you are on the home scheduler.
pub fn get_mut<'a>(&'a mut self, _missile: &HomingMissile) -> &'a mut T {
unsafe { &mut *self.unsafe_get() }
}
pub fn close(&self, _missile: &HomingMissile) {
// This unsafety is OK because with a homing missile we're guaranteed to
// be the only task looking at the `closed` flag (and are therefore
// allowed to modify it). Additionally, no atomics are necessary because
// everyone's running on the same thread and has already done the
// necessary synchronization to be running on this thread.
unsafe { (*self.inner.get()).closed = true; }
}
// Dequeue a blocked task with a specified token. This is unsafe because it
// is only safe to invoke while on the home event loop, and there is no
// guarantee that this i being invoked on the home event loop.
pub unsafe fn dequeue(&mut self, token: uint) -> Option<BlockedTask> {
let inner = &mut *self.inner.get();
match inner.queue.iter().position(|&(_, t)| t == token) {
Some(i) => Some(inner.queue.remove(i).unwrap().val0()),
None => None,
}
}
/// Test whether this access is closed, using a homing missile to prove
/// that it's safe
pub fn is_closed(&self, _missile: &HomingMissile) -> bool {
unsafe { (*self.inner.get()).closed }
}
}
impl<T: Send> Clone for Access<T> {
fn clone(&self) -> Access<T> {
Access { inner: self.inner.clone() }
}
}
impl<'a, T: Send> Guard<'a, T> {
pub fn is_closed(&self) -> bool {
// See above for why this unsafety is ok, it just applies to the read
// instead of the write.
unsafe { (*self.access.inner.get()).closed }
}
}
impl<'a, T: Send> Deref<T> for Guard<'a, T> {
fn deref<'a>(&'a self) -> &'a T {
// A guard represents exclusive access to a piece of data, so it's safe
// to hand out shared and mutable references
unsafe { &(*self.access.inner.get()).data }
}
}
impl<'a, T: Send> DerefMut<T> for Guard<'a, T> {
fn deref_mut<'a>(&'a mut self) -> &'a mut T {
unsafe { &mut (*self.access.inner.get()).data }
}
}
#[unsafe_destructor]
impl<'a, T:Send> Drop for Guard<'a, T> {
fn drop(&mut self) {
// This guard's homing missile is still armed, so we're guaranteed to be
// on the same I/O event loop, so this unsafety should be ok.
assert!(self.missile.is_some());
let inner: &mut Inner<T> = unsafe {
mem::transmute(self.access.inner.get())
};
match inner.queue.remove(0) {
// Here we have found a task that was waiting for access, and we
// current have the "access lock" we need to relinquish access to
// this sleeping task.
//
// To do so, we first drop out homing missile and we then reawaken
// the task. In reawakening the task, it will be immediately
// scheduled on this scheduler. Because we might be woken up on some
// other scheduler, we drop our homing missile before we reawaken
// the task.
Some((task, _)) => {
drop(self.missile.take());
task.reawaken();
}
None => { inner.held = false; }
}
}
}
#[unsafe_destructor]
impl<T> Drop for Inner<T> {
fn drop(&mut self) {
assert!(!self.held);
assert_eq!(self.queue.len(), 0);
}
}

View File

@ -1,143 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use libc::c_int;
use libc;
use std::mem;
use std::ptr::{null, null_mut};
use std::rt::task::BlockedTask;
use std::rt::rtio;
use net;
use super::{Loop, UvError, Request, wait_until_woken_after, wakeup};
use uvll;
pub struct Addrinfo {
handle: *const libc::addrinfo,
}
struct Ctx {
slot: Option<BlockedTask>,
status: c_int,
addrinfo: Option<Addrinfo>,
}
pub struct GetAddrInfoRequest;
impl GetAddrInfoRequest {
pub fn run(loop_: &Loop, node: Option<&str>, service: Option<&str>,
hints: Option<rtio::AddrinfoHint>)
-> Result<Vec<rtio::AddrinfoInfo>, UvError>
{
assert!(node.is_some() || service.is_some());
let (_c_node, c_node_ptr) = match node {
Some(n) => {
let c_node = n.to_c_str();
let c_node_ptr = c_node.as_ptr();
(Some(c_node), c_node_ptr)
}
None => (None, null())
};
let (_c_service, c_service_ptr) = match service {
Some(s) => {
let c_service = s.to_c_str();
let c_service_ptr = c_service.as_ptr();
(Some(c_service), c_service_ptr)
}
None => (None, null())
};
let hint = hints.map(|hint| {
libc::addrinfo {
ai_flags: 0,
ai_family: hint.family as c_int,
ai_socktype: 0,
ai_protocol: 0,
ai_addrlen: 0,
ai_canonname: null_mut(),
ai_addr: null_mut(),
ai_next: null_mut(),
}
});
let hint_ptr = hint.as_ref().map_or(null(), |x| {
x as *const libc::addrinfo
});
let mut req = Request::new(uvll::UV_GETADDRINFO);
return match unsafe {
uvll::uv_getaddrinfo(loop_.handle, req.handle,
getaddrinfo_cb, c_node_ptr, c_service_ptr,
hint_ptr)
} {
0 => {
req.defuse(); // uv callback now owns this request
let mut cx = Ctx { slot: None, status: 0, addrinfo: None };
wait_until_woken_after(&mut cx.slot, loop_, || {
req.set_data(&mut cx);
});
match cx.status {
0 => Ok(accum_addrinfo(cx.addrinfo.as_ref().unwrap())),
n => Err(UvError(n))
}
}
n => Err(UvError(n))
};
extern fn getaddrinfo_cb(req: *mut uvll::uv_getaddrinfo_t,
status: c_int,
res: *const libc::addrinfo) {
let req = Request::wrap(req);
assert!(status != uvll::ECANCELED);
let cx: &mut Ctx = unsafe { req.get_data() };
cx.status = status;
cx.addrinfo = Some(Addrinfo { handle: res });
wakeup(&mut cx.slot);
}
}
}
impl Drop for Addrinfo {
fn drop(&mut self) {
unsafe { uvll::uv_freeaddrinfo(self.handle as *mut _) }
}
}
// Traverse the addrinfo linked list, producing a vector of Rust socket addresses
pub fn accum_addrinfo(addr: &Addrinfo) -> Vec<rtio::AddrinfoInfo> {
unsafe {
let mut addr = addr.handle;
let mut addrs = Vec::new();
loop {
let rustaddr = net::sockaddr_to_addr(mem::transmute((*addr).ai_addr),
(*addr).ai_addrlen as uint);
addrs.push(rtio::AddrinfoInfo {
address: rustaddr,
family: (*addr).ai_family as uint,
socktype: 0,
protocol: 0,
flags: 0,
});
if (*addr).ai_next.is_not_null() {
addr = (*addr).ai_next as *const _;
} else {
break;
}
}
addrs
}
}

View File

@ -1,156 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use alloc::arc::Arc;
use std::mem;
use std::rt::exclusive::Exclusive;
use std::rt::rtio::{Callback, RemoteCallback};
use uvll;
use super::{Loop, UvHandle};
// The entire point of async is to call into a loop from other threads so it
// does not need to home.
pub struct AsyncWatcher {
handle: *mut uvll::uv_async_t,
// A flag to tell the callback to exit, set from the dtor. This is
// almost never contested - only in rare races with the dtor.
exit_flag: Arc<Exclusive<bool>>,
}
struct Payload {
callback: Box<Callback + Send>,
exit_flag: Arc<Exclusive<bool>>,
}
impl AsyncWatcher {
pub fn new(loop_: &mut Loop, cb: Box<Callback + Send>) -> AsyncWatcher {
let handle = UvHandle::alloc(None::<AsyncWatcher>, uvll::UV_ASYNC);
assert_eq!(unsafe {
uvll::uv_async_init(loop_.handle, handle, async_cb)
}, 0);
let flag = Arc::new(Exclusive::new(false));
let payload = box Payload { callback: cb, exit_flag: flag.clone() };
unsafe {
let payload: *mut u8 = mem::transmute(payload);
uvll::set_data_for_uv_handle(handle, payload);
}
return AsyncWatcher { handle: handle, exit_flag: flag, };
}
}
impl UvHandle<uvll::uv_async_t> for AsyncWatcher {
fn uv_handle(&self) -> *mut uvll::uv_async_t { self.handle }
unsafe fn from_uv_handle<'a>(_: &'a *mut uvll::uv_async_t) -> &'a mut AsyncWatcher {
fail!("async watchers can't be built from their handles");
}
}
extern fn async_cb(handle: *mut uvll::uv_async_t) {
let payload: &mut Payload = unsafe {
mem::transmute(uvll::get_data_for_uv_handle(handle))
};
// The synchronization logic here is subtle. To review,
// the uv async handle type promises that, after it is
// triggered the remote callback is definitely called at
// least once. UvRemoteCallback needs to maintain those
// semantics while also shutting down cleanly from the
// dtor. In our case that means that, when the
// UvRemoteCallback dtor calls `async.send()`, here `f` is
// always called later.
// In the dtor both the exit flag is set and the async
// callback fired under a lock. Here, before calling `f`,
// we take the lock and check the flag. Because we are
// checking the flag before calling `f`, and the flag is
// set under the same lock as the send, then if the flag
// is set then we're guaranteed to call `f` after the
// final send.
// If the check was done after `f()` then there would be a
// period between that call and the check where the dtor
// could be called in the other thread, missing the final
// callback while still destroying the handle.
let should_exit = unsafe { *payload.exit_flag.lock() };
payload.callback.call();
if should_exit {
unsafe { uvll::uv_close(handle, close_cb) }
}
}
extern fn close_cb(handle: *mut uvll::uv_handle_t) {
// drop the payload
let _payload: Box<Payload> = unsafe {
mem::transmute(uvll::get_data_for_uv_handle(handle))
};
// and then free the handle
unsafe { uvll::free_handle(handle) }
}
impl RemoteCallback for AsyncWatcher {
fn fire(&mut self) {
unsafe { uvll::uv_async_send(self.handle) }
}
}
impl Drop for AsyncWatcher {
fn drop(&mut self) {
let mut should_exit = unsafe { self.exit_flag.lock() };
// NB: These two things need to happen atomically. Otherwise
// the event handler could wake up due to a *previous*
// signal and see the exit flag, destroying the handle
// before the final send.
*should_exit = true;
unsafe { uvll::uv_async_send(self.handle) }
}
}
#[cfg(test)]
mod test_remote {
use std::rt::rtio::{Callback, RemoteCallback};
use std::rt::thread::Thread;
use super::AsyncWatcher;
use super::super::local_loop;
// Make sure that we can fire watchers in remote threads and that they
// actually trigger what they say they will.
#[test]
fn smoke_test() {
struct MyCallback(Option<Sender<int>>);
impl Callback for MyCallback {
fn call(&mut self) {
// this can get called more than once, but we only want to send
// once
let MyCallback(ref mut s) = *self;
if s.is_some() {
s.take().unwrap().send(1);
}
}
}
let (tx, rx) = channel();
let cb = box MyCallback(Some(tx));
let watcher = AsyncWatcher::new(&mut local_loop().loop_, cb);
let thread = Thread::start(proc() {
let mut watcher = watcher;
watcher.fire();
});
assert_eq!(rx.recv(), 1);
thread.join();
}
}

View File

@ -1,581 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use libc::{c_int, c_char, c_void, ssize_t};
use libc;
use std::c_str::CString;
use std::c_str;
use std::mem;
use std::os;
use std::rt::rtio::{IoResult, IoError};
use std::rt::rtio;
use std::rt::task::BlockedTask;
use homing::{HomingIO, HomeHandle};
use super::{Loop, UvError, uv_error_to_io_error, wait_until_woken_after, wakeup};
use uvio::UvIoFactory;
use uvll;
pub struct FsRequest {
req: *mut uvll::uv_fs_t,
fired: bool,
}
pub struct FileWatcher {
loop_: Loop,
fd: c_int,
close: rtio::CloseBehavior,
home: HomeHandle,
}
impl FsRequest {
pub fn open(io: &mut UvIoFactory, path: &CString, flags: int, mode: int)
-> Result<FileWatcher, UvError>
{
execute(|req, cb| unsafe {
uvll::uv_fs_open(io.uv_loop(),
req, path.as_ptr(), flags as c_int,
mode as c_int, cb)
}).map(|req|
FileWatcher::new(io, req.get_result() as c_int,
rtio::CloseSynchronously)
)
}
pub fn unlink(loop_: &Loop, path: &CString) -> Result<(), UvError> {
execute_nop(|req, cb| unsafe {
uvll::uv_fs_unlink(loop_.handle, req, path.as_ptr(),
cb)
})
}
pub fn lstat(loop_: &Loop, path: &CString)
-> Result<rtio::FileStat, UvError>
{
execute(|req, cb| unsafe {
uvll::uv_fs_lstat(loop_.handle, req, path.as_ptr(),
cb)
}).map(|req| req.mkstat())
}
pub fn stat(loop_: &Loop, path: &CString) -> Result<rtio::FileStat, UvError> {
execute(|req, cb| unsafe {
uvll::uv_fs_stat(loop_.handle, req, path.as_ptr(),
cb)
}).map(|req| req.mkstat())
}
pub fn fstat(loop_: &Loop, fd: c_int) -> Result<rtio::FileStat, UvError> {
execute(|req, cb| unsafe {
uvll::uv_fs_fstat(loop_.handle, req, fd, cb)
}).map(|req| req.mkstat())
}
pub fn write(loop_: &Loop, fd: c_int, buf: &[u8], offset: i64)
-> Result<(), UvError>
{
// In libuv, uv_fs_write is basically just shelling out to a write()
// syscall at some point, with very little fluff around it. This means
// that write() could actually be a short write, so we need to be sure
// to call it continuously if we get a short write back. This method is
// expected to write the full data if it returns success.
let mut written = 0;
while written < buf.len() {
let offset = if offset == -1 {
offset
} else {
offset + written as i64
};
let uvbuf = uvll::uv_buf_t {
base: buf.slice_from(written as uint).as_ptr() as *mut _,
len: (buf.len() - written) as uvll::uv_buf_len_t,
};
match execute(|req, cb| unsafe {
uvll::uv_fs_write(loop_.handle, req, fd, &uvbuf, 1, offset, cb)
}).map(|req| req.get_result()) {
Err(e) => return Err(e),
Ok(n) => { written += n as uint; }
}
}
Ok(())
}
pub fn read(loop_: &Loop, fd: c_int, buf: &mut [u8], offset: i64)
-> Result<int, UvError>
{
execute(|req, cb| unsafe {
let mut uvbuf = uvll::uv_buf_t {
base: buf.as_mut_ptr(),
len: buf.len() as uvll::uv_buf_len_t,
};
uvll::uv_fs_read(loop_.handle, req, fd, &mut uvbuf, 1, offset, cb)
}).map(|req| {
req.get_result() as int
})
}
pub fn mkdir(loop_: &Loop, path: &CString, mode: c_int)
-> Result<(), UvError>
{
execute_nop(|req, cb| unsafe {
uvll::uv_fs_mkdir(loop_.handle, req, path.as_ptr(),
mode, cb)
})
}
pub fn rmdir(loop_: &Loop, path: &CString) -> Result<(), UvError> {
execute_nop(|req, cb| unsafe {
uvll::uv_fs_rmdir(loop_.handle, req, path.as_ptr(),
cb)
})
}
pub fn rename(loop_: &Loop, path: &CString, to: &CString)
-> Result<(), UvError>
{
execute_nop(|req, cb| unsafe {
uvll::uv_fs_rename(loop_.handle,
req,
path.as_ptr(),
to.as_ptr(),
cb)
})
}
pub fn chmod(loop_: &Loop, path: &CString, mode: c_int)
-> Result<(), UvError>
{
execute_nop(|req, cb| unsafe {
uvll::uv_fs_chmod(loop_.handle, req, path.as_ptr(),
mode, cb)
})
}
pub fn readdir(loop_: &Loop, path: &CString, flags: c_int)
-> Result<Vec<CString>, UvError>
{
execute(|req, cb| unsafe {
uvll::uv_fs_readdir(loop_.handle,
req, path.as_ptr(), flags, cb)
}).map(|req| unsafe {
let mut paths = vec!();
let path = CString::new(path.as_ptr(), false);
let parent = Path::new(path);
let _ = c_str::from_c_multistring(req.get_ptr() as *const libc::c_char,
Some(req.get_result() as uint),
|rel| {
let p = rel.as_bytes();
paths.push(parent.join(p.slice_to(rel.len())).to_c_str());
});
paths
})
}
pub fn readlink(loop_: &Loop, path: &CString) -> Result<CString, UvError> {
execute(|req, cb| unsafe {
uvll::uv_fs_readlink(loop_.handle, req,
path.as_ptr(), cb)
}).map(|req| {
// Be sure to clone the cstring so we get an independently owned
// allocation to work with and return.
unsafe {
CString::new(req.get_ptr() as *const libc::c_char, false).clone()
}
})
}
pub fn chown(loop_: &Loop, path: &CString, uid: int, gid: int)
-> Result<(), UvError>
{
execute_nop(|req, cb| unsafe {
uvll::uv_fs_chown(loop_.handle,
req, path.as_ptr(),
uid as uvll::uv_uid_t,
gid as uvll::uv_gid_t,
cb)
})
}
pub fn truncate(loop_: &Loop, file: c_int, offset: i64)
-> Result<(), UvError>
{
execute_nop(|req, cb| unsafe {
uvll::uv_fs_ftruncate(loop_.handle, req, file, offset, cb)
})
}
pub fn link(loop_: &Loop, src: &CString, dst: &CString)
-> Result<(), UvError>
{
execute_nop(|req, cb| unsafe {
uvll::uv_fs_link(loop_.handle, req,
src.as_ptr(),
dst.as_ptr(),
cb)
})
}
pub fn symlink(loop_: &Loop, src: &CString, dst: &CString)
-> Result<(), UvError>
{
execute_nop(|req, cb| unsafe {
uvll::uv_fs_symlink(loop_.handle, req,
src.as_ptr(),
dst.as_ptr(),
0, cb)
})
}
pub fn fsync(loop_: &Loop, fd: c_int) -> Result<(), UvError> {
execute_nop(|req, cb| unsafe {
uvll::uv_fs_fsync(loop_.handle, req, fd, cb)
})
}
pub fn datasync(loop_: &Loop, fd: c_int) -> Result<(), UvError> {
execute_nop(|req, cb| unsafe {
uvll::uv_fs_fdatasync(loop_.handle, req, fd, cb)
})
}
pub fn utime(loop_: &Loop, path: &CString, atime: u64, mtime: u64)
-> Result<(), UvError>
{
// libuv takes seconds
let atime = atime as libc::c_double / 1000.0;
let mtime = mtime as libc::c_double / 1000.0;
execute_nop(|req, cb| unsafe {
uvll::uv_fs_utime(loop_.handle, req, path.as_ptr(),
atime, mtime, cb)
})
}
pub fn get_result(&self) -> ssize_t {
unsafe { uvll::get_result_from_fs_req(self.req) }
}
pub fn get_stat(&self) -> uvll::uv_stat_t {
let mut stat = uvll::uv_stat_t::new();
unsafe { uvll::populate_stat(self.req, &mut stat); }
stat
}
pub fn get_ptr(&self) -> *mut libc::c_void {
unsafe { uvll::get_ptr_from_fs_req(self.req) }
}
pub fn mkstat(&self) -> rtio::FileStat {
let stat = self.get_stat();
fn to_msec(stat: uvll::uv_timespec_t) -> u64 {
// Be sure to cast to u64 first to prevent overflowing if the tv_sec
// field is a 32-bit integer.
(stat.tv_sec as u64) * 1000 + (stat.tv_nsec as u64) / 1000000
}
rtio::FileStat {
size: stat.st_size as u64,
kind: stat.st_mode as u64,
perm: stat.st_mode as u64,
created: to_msec(stat.st_birthtim),
modified: to_msec(stat.st_mtim),
accessed: to_msec(stat.st_atim),
device: stat.st_dev as u64,
inode: stat.st_ino as u64,
rdev: stat.st_rdev as u64,
nlink: stat.st_nlink as u64,
uid: stat.st_uid as u64,
gid: stat.st_gid as u64,
blksize: stat.st_blksize as u64,
blocks: stat.st_blocks as u64,
flags: stat.st_flags as u64,
gen: stat.st_gen as u64,
}
}
}
impl Drop for FsRequest {
fn drop(&mut self) {
unsafe {
if self.fired {
uvll::uv_fs_req_cleanup(self.req);
}
uvll::free_req(self.req);
}
}
}
fn execute(f: |*mut uvll::uv_fs_t, uvll::uv_fs_cb| -> c_int)
-> Result<FsRequest, UvError>
{
let mut req = FsRequest {
fired: false,
req: unsafe { uvll::malloc_req(uvll::UV_FS) }
};
return match f(req.req, fs_cb) {
0 => {
req.fired = true;
let mut slot = None;
let loop_ = unsafe { uvll::get_loop_from_fs_req(req.req) };
wait_until_woken_after(&mut slot, &Loop::wrap(loop_), || {
unsafe { uvll::set_data_for_req(req.req, &mut slot) }
});
match req.get_result() {
n if n < 0 => Err(UvError(n as i32)),
_ => Ok(req),
}
}
n => Err(UvError(n))
};
extern fn fs_cb(req: *mut uvll::uv_fs_t) {
let slot: &mut Option<BlockedTask> = unsafe {
mem::transmute(uvll::get_data_for_req(req))
};
wakeup(slot);
}
}
fn execute_nop(f: |*mut uvll::uv_fs_t, uvll::uv_fs_cb| -> c_int)
-> Result<(), UvError> {
execute(f).map(|_| {})
}
impl HomingIO for FileWatcher {
fn home<'r>(&'r mut self) -> &'r mut HomeHandle { &mut self.home }
}
impl FileWatcher {
pub fn new(io: &mut UvIoFactory, fd: c_int,
close: rtio::CloseBehavior) -> FileWatcher {
FileWatcher {
loop_: Loop::wrap(io.uv_loop()),
fd: fd,
close: close,
home: io.make_handle(),
}
}
fn base_read(&mut self, buf: &mut [u8], offset: i64) -> IoResult<int> {
let _m = self.fire_homing_missile();
let r = FsRequest::read(&self.loop_, self.fd, buf, offset);
r.map_err(uv_error_to_io_error)
}
fn base_write(&mut self, buf: &[u8], offset: i64) -> IoResult<()> {
let _m = self.fire_homing_missile();
let r = FsRequest::write(&self.loop_, self.fd, buf, offset);
r.map_err(uv_error_to_io_error)
}
fn seek_common(&self, pos: i64, whence: c_int) -> IoResult<u64>{
match unsafe { libc::lseek(self.fd, pos as libc::off_t, whence) } {
-1 => {
Err(IoError {
code: os::errno() as uint,
extra: 0,
detail: None,
})
},
n => Ok(n as u64)
}
}
}
impl Drop for FileWatcher {
fn drop(&mut self) {
let _m = self.fire_homing_missile();
match self.close {
rtio::DontClose => {}
rtio::CloseAsynchronously => {
unsafe {
let req = uvll::malloc_req(uvll::UV_FS);
assert_eq!(uvll::uv_fs_close(self.loop_.handle, req,
self.fd, close_cb), 0);
}
extern fn close_cb(req: *mut uvll::uv_fs_t) {
unsafe {
uvll::uv_fs_req_cleanup(req);
uvll::free_req(req);
}
}
}
rtio::CloseSynchronously => {
let _ = execute_nop(|req, cb| unsafe {
uvll::uv_fs_close(self.loop_.handle, req, self.fd, cb)
});
}
}
}
}
impl rtio::RtioFileStream for FileWatcher {
fn read(&mut self, buf: &mut [u8]) -> IoResult<int> {
self.base_read(buf, -1)
}
fn write(&mut self, buf: &[u8]) -> IoResult<()> {
self.base_write(buf, -1)
}
fn pread(&mut self, buf: &mut [u8], offset: u64) -> IoResult<int> {
self.base_read(buf, offset as i64)
}
fn pwrite(&mut self, buf: &[u8], offset: u64) -> IoResult<()> {
self.base_write(buf, offset as i64)
}
fn seek(&mut self, pos: i64, whence: rtio::SeekStyle) -> IoResult<u64> {
use libc::{SEEK_SET, SEEK_CUR, SEEK_END};
let whence = match whence {
rtio::SeekSet => SEEK_SET,
rtio::SeekCur => SEEK_CUR,
rtio::SeekEnd => SEEK_END
};
self.seek_common(pos, whence)
}
fn tell(&self) -> IoResult<u64> {
use libc::SEEK_CUR;
self.seek_common(0, SEEK_CUR)
}
fn fsync(&mut self) -> IoResult<()> {
let _m = self.fire_homing_missile();
FsRequest::fsync(&self.loop_, self.fd).map_err(uv_error_to_io_error)
}
fn datasync(&mut self) -> IoResult<()> {
let _m = self.fire_homing_missile();
FsRequest::datasync(&self.loop_, self.fd).map_err(uv_error_to_io_error)
}
fn truncate(&mut self, offset: i64) -> IoResult<()> {
let _m = self.fire_homing_missile();
let r = FsRequest::truncate(&self.loop_, self.fd, offset);
r.map_err(uv_error_to_io_error)
}
fn fstat(&mut self) -> IoResult<rtio::FileStat> {
let _m = self.fire_homing_missile();
FsRequest::fstat(&self.loop_, self.fd).map_err(uv_error_to_io_error)
}
}
#[cfg(test)]
mod test {
use libc::c_int;
use libc::{O_CREAT, O_RDWR, O_RDONLY, S_IWUSR, S_IRUSR};
use std::str;
use super::FsRequest;
use super::super::Loop;
use super::super::local_loop;
fn l() -> &'static mut Loop { &mut local_loop().loop_ }
#[test]
fn file_test_full_simple_sync() {
let create_flags = O_RDWR | O_CREAT;
let read_flags = O_RDONLY;
let mode = S_IWUSR | S_IRUSR;
let path_str = "./tmp/file_full_simple_sync.txt";
{
// open/create
let result = FsRequest::open(local_loop(), &path_str.to_c_str(),
create_flags as int, mode as int);
assert!(result.is_ok());
let result = result.unwrap();
let fd = result.fd;
// write
let result = FsRequest::write(l(), fd, "hello".as_bytes(), -1);
assert!(result.is_ok());
}
{
// re-open
let result = FsRequest::open(local_loop(), &path_str.to_c_str(),
read_flags as int, 0);
assert!(result.is_ok());
let result = result.unwrap();
let fd = result.fd;
// read
let mut read_mem = Vec::from_elem(1000, 0u8);
let result = FsRequest::read(l(), fd, read_mem.as_mut_slice(), 0);
assert!(result.is_ok());
let nread = result.unwrap();
assert!(nread > 0);
let read_str = str::from_utf8(read_mem.slice_to(nread as uint)).unwrap();
assert_eq!(read_str, "hello");
}
// unlink
let result = FsRequest::unlink(l(), &path_str.to_c_str());
assert!(result.is_ok());
}
#[test]
fn file_test_stat() {
let path = &"./tmp/file_test_stat_simple".to_c_str();
let create_flags = (O_RDWR | O_CREAT) as int;
let mode = (S_IWUSR | S_IRUSR) as int;
let result = FsRequest::open(local_loop(), path, create_flags, mode);
assert!(result.is_ok());
let file = result.unwrap();
let result = FsRequest::write(l(), file.fd, "hello".as_bytes(), 0);
assert!(result.is_ok());
let result = FsRequest::stat(l(), path);
assert!(result.is_ok());
assert_eq!(result.unwrap().size, 5);
let result = FsRequest::fstat(l(), file.fd);
assert!(result.is_ok());
assert_eq!(result.unwrap().size, 5);
fn free<T>(_: T) {}
free(file);
let result = FsRequest::unlink(l(), path);
assert!(result.is_ok());
}
#[test]
fn file_test_mk_rm_dir() {
let path = &"./tmp/mk_rm_dir".to_c_str();
let mode = S_IWUSR | S_IRUSR;
let result = FsRequest::mkdir(l(), path, mode as c_int);
assert!(result.is_ok());
let result = FsRequest::rmdir(l(), path);
assert!(result.is_ok());
let result = FsRequest::stat(l(), path);
assert!(result.is_err());
}
#[test]
fn file_test_mkdir_chokes_on_double_create() {
let path = &"./tmp/double_create_dir".to_c_str();
let mode = S_IWUSR | S_IRUSR;
let result = FsRequest::stat(l(), path);
assert!(result.is_err(), "{:?}", result);
let result = FsRequest::mkdir(l(), path, mode as c_int);
assert!(result.is_ok(), "{:?}", result);
let result = FsRequest::mkdir(l(), path, mode as c_int);
assert!(result.is_err(), "{:?}", result);
let result = FsRequest::rmdir(l(), path);
assert!(result.is_ok(), "{:?}", result);
}
#[test]
fn file_test_rmdir_chokes_on_nonexistant_path() {
let path = &"./tmp/never_existed_dir".to_c_str();
let result = FsRequest::rmdir(l(), path);
assert!(result.is_err());
}
}

View File

@ -1,214 +0,0 @@
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Homing I/O implementation
//!
//! In libuv, whenever a handle is created on an I/O loop it is illegal to use
//! that handle outside of that I/O loop. We use libuv I/O with our green
//! scheduler, and each green scheduler corresponds to a different I/O loop on a
//! different OS thread. Green tasks are also free to roam among schedulers,
//! which implies that it is possible to create an I/O handle on one event loop
//! and then attempt to use it on another.
//!
//! In order to solve this problem, this module implements the notion of a
//! "homing operation" which will transplant a task from its currently running
//! scheduler back onto the original I/O loop. This is accomplished entirely at
//! the librustuv layer with very little cooperation from the scheduler (which
//! we don't even know exists technically).
//!
//! These homing operations are completed by first realizing that we're on the
//! wrong I/O loop, then descheduling ourselves, sending ourselves to the
//! correct I/O loop, and then waking up the I/O loop in order to process its
//! local queue of tasks which need to run.
//!
//! This enqueueing is done with a concurrent queue from libstd, and the
//! signalling is achieved with an async handle.
#![allow(dead_code)]
use std::mem;
use std::rt::local::Local;
use std::rt::rtio::LocalIo;
use std::rt::task::{Task, BlockedTask};
use ForbidUnwind;
use queue::{Queue, QueuePool};
/// A handle to a remote libuv event loop. This handle will keep the event loop
/// alive while active in order to ensure that a homing operation can always be
/// completed.
///
/// Handles are clone-able in order to derive new handles from existing handles
/// (very useful for when accepting a socket from a server).
pub struct HomeHandle {
queue: Queue,
id: uint,
}
impl HomeHandle {
pub fn new(id: uint, pool: &mut QueuePool) -> HomeHandle {
HomeHandle { queue: pool.queue(), id: id }
}
fn send(&mut self, task: BlockedTask) {
self.queue.push(task);
}
}
impl Clone for HomeHandle {
fn clone(&self) -> HomeHandle {
HomeHandle {
queue: self.queue.clone(),
id: self.id,
}
}
}
pub fn local_id() -> uint {
use std::raw::TraitObject;
let mut io = match LocalIo::borrow() {
Some(io) => io, None => return 0,
};
let io = io.get();
unsafe {
let obj: TraitObject = mem::transmute(io);
return mem::transmute(obj.data);
}
}
#[doc(hidden)]
pub trait HomingIO {
fn home<'r>(&'r mut self) -> &'r mut HomeHandle;
/// This function will move tasks to run on their home I/O scheduler. Note
/// that this function does *not* pin the task to the I/O scheduler, but
/// rather it simply moves it to running on the I/O scheduler.
fn go_to_io_home(&mut self) -> uint {
let _f = ForbidUnwind::new("going home");
let cur_loop_id = local_id();
let destination = self.home().id;
// Try at all costs to avoid the homing operation because it is quite
// expensive. Hence, we only deschedule/send if we're not on the correct
// event loop. If we're already on the home event loop, then we're good
// to go (remember we have no preemption, so we're guaranteed to stay on
// this event loop as long as we avoid the scheduler).
if cur_loop_id != destination {
let cur_task: Box<Task> = Local::take();
cur_task.deschedule(1, |task| {
self.home().send(task);
Ok(())
});
// Once we wake up, assert that we're in the right location
assert_eq!(local_id(), destination);
}
return destination;
}
/// Fires a single homing missile, returning another missile targeted back
/// at the original home of this task. In other words, this function will
/// move the local task to its I/O scheduler and then return an RAII wrapper
/// which will return the task home.
fn fire_homing_missile(&mut self) -> HomingMissile {
HomingMissile { io_home: self.go_to_io_home() }
}
}
/// After a homing operation has been completed, this will return the current
/// task back to its appropriate home (if applicable). The field is used to
/// assert that we are where we think we are.
pub struct HomingMissile {
io_home: uint,
}
impl HomingMissile {
/// Check at runtime that the task has *not* transplanted itself to a
/// different I/O loop while executing.
pub fn check(&self, msg: &'static str) {
assert!(local_id() == self.io_home, "{}", msg);
}
}
impl Drop for HomingMissile {
fn drop(&mut self) {
let _f = ForbidUnwind::new("leaving home");
// It would truly be a sad day if we had moved off the home I/O
// scheduler while we were doing I/O.
self.check("task moved away from the home scheduler");
}
}
#[cfg(test)]
mod test {
use green::sched;
use green::{SchedPool, PoolConfig};
use std::rt::rtio::RtioUdpSocket;
use std::rt::task::TaskOpts;
use net::UdpWatcher;
use super::super::local_loop;
// On one thread, create a udp socket. Then send that socket to another
// thread and destroy the socket on the remote thread. This should make sure
// that homing kicks in for the socket to go back home to the original
// thread, close itself, and then come back to the last thread.
#[test]
fn test_homing_closes_correctly() {
let (tx, rx) = channel();
let mut pool = SchedPool::new(PoolConfig {
threads: 1,
event_loop_factory: ::event_loop,
});
pool.spawn(TaskOpts::new(), proc() {
let listener = UdpWatcher::bind(local_loop(), ::next_test_ip4());
tx.send(listener.unwrap());
});
let task = pool.task(TaskOpts::new(), proc() {
drop(rx.recv());
});
pool.spawn_sched().send(sched::TaskFromFriend(task));
pool.shutdown();
}
#[test]
fn test_homing_read() {
let (tx, rx) = channel();
let mut pool = SchedPool::new(PoolConfig {
threads: 1,
event_loop_factory: ::event_loop,
});
pool.spawn(TaskOpts::new(), proc() {
let addr1 = ::next_test_ip4();
let addr2 = ::next_test_ip4();
let listener = UdpWatcher::bind(local_loop(), addr2);
tx.send((listener.unwrap(), addr1));
let mut listener = UdpWatcher::bind(local_loop(), addr1).unwrap();
listener.send_to([1, 2, 3, 4], addr2).ok().unwrap();
});
let task = pool.task(TaskOpts::new(), proc() {
let (mut watcher, addr) = rx.recv();
let mut buf = [0, ..10];
assert!(watcher.recv_from(buf).ok().unwrap() == (4, addr));
});
pool.spawn_sched().send(sched::TaskFromFriend(task));
pool.shutdown();
}
}

View File

@ -1,206 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use libc::c_void;
use std::mem;
use uvll;
use super::{Loop, UvHandle};
use std::rt::rtio::{Callback, PausableIdleCallback};
pub struct IdleWatcher {
handle: *mut uvll::uv_idle_t,
idle_flag: bool,
callback: Box<Callback + Send>,
}
impl IdleWatcher {
pub fn new(loop_: &mut Loop, cb: Box<Callback + Send>) -> Box<IdleWatcher> {
let handle = UvHandle::alloc(None::<IdleWatcher>, uvll::UV_IDLE);
assert_eq!(unsafe {
uvll::uv_idle_init(loop_.handle, handle)
}, 0);
let me = box IdleWatcher {
handle: handle,
idle_flag: false,
callback: cb,
};
return me.install();
}
pub fn onetime(loop_: &mut Loop, f: proc()) {
let handle = UvHandle::alloc(None::<IdleWatcher>, uvll::UV_IDLE);
unsafe {
assert_eq!(uvll::uv_idle_init(loop_.handle, handle), 0);
let data: *mut c_void = mem::transmute(box f);
uvll::set_data_for_uv_handle(handle, data);
assert_eq!(uvll::uv_idle_start(handle, onetime_cb), 0)
}
extern fn onetime_cb(handle: *mut uvll::uv_idle_t) {
unsafe {
let data = uvll::get_data_for_uv_handle(handle);
let f: Box<proc()> = mem::transmute(data);
(*f)();
assert_eq!(uvll::uv_idle_stop(handle), 0);
uvll::uv_close(handle, close_cb);
}
}
extern fn close_cb(handle: *mut uvll::uv_handle_t) {
unsafe { uvll::free_handle(handle) }
}
}
}
impl PausableIdleCallback for IdleWatcher {
fn pause(&mut self) {
if self.idle_flag == true {
assert_eq!(unsafe {uvll::uv_idle_stop(self.handle) }, 0);
self.idle_flag = false;
}
}
fn resume(&mut self) {
if self.idle_flag == false {
assert_eq!(unsafe { uvll::uv_idle_start(self.handle, idle_cb) }, 0)
self.idle_flag = true;
}
}
}
impl UvHandle<uvll::uv_idle_t> for IdleWatcher {
fn uv_handle(&self) -> *mut uvll::uv_idle_t { self.handle }
}
extern fn idle_cb(handle: *mut uvll::uv_idle_t) {
let idle: &mut IdleWatcher = unsafe { UvHandle::from_uv_handle(&handle) };
idle.callback.call();
}
impl Drop for IdleWatcher {
fn drop(&mut self) {
self.pause();
self.close_async_();
}
}
#[cfg(test)]
mod test {
use std::mem;
use std::cell::RefCell;
use std::rc::Rc;
use std::rt::rtio::{Callback, PausableIdleCallback};
use std::rt::task::{BlockedTask, Task};
use std::rt::local::Local;
use super::IdleWatcher;
use super::super::local_loop;
type Chan = Rc<RefCell<(Option<BlockedTask>, uint)>>;
struct MyCallback(Rc<RefCell<(Option<BlockedTask>, uint)>>, uint);
impl Callback for MyCallback {
fn call(&mut self) {
let task = match *self {
MyCallback(ref rc, n) => {
match *rc.borrow_mut().deref_mut() {
(ref mut task, ref mut val) => {
*val = n;
match task.take() {
Some(t) => t,
None => return
}
}
}
}
};
let _ = task.wake().map(|t| t.reawaken());
}
}
fn mk(v: uint) -> (Box<IdleWatcher>, Chan) {
let rc = Rc::new(RefCell::new((None, 0)));
let cb = box MyCallback(rc.clone(), v);
let cb = cb as Box<Callback>;
let cb = unsafe { mem::transmute(cb) };
(IdleWatcher::new(&mut local_loop().loop_, cb), rc)
}
fn sleep(chan: &Chan) -> uint {
let task: Box<Task> = Local::take();
task.deschedule(1, |task| {
match *chan.borrow_mut().deref_mut() {
(ref mut slot, _) => {
assert!(slot.is_none());
*slot = Some(task);
}
}
Ok(())
});
match *chan.borrow() { (_, n) => n }
}
#[test]
fn not_used() {
let (_idle, _chan) = mk(1);
}
#[test]
fn smoke_test() {
let (mut idle, chan) = mk(1);
idle.resume();
assert_eq!(sleep(&chan), 1);
}
#[test] #[should_fail]
fn smoke_fail() {
// By default, the test harness is capturing our stderr output through a
// channel. This means that when we start failing and "print" our error
// message, we could be switched to running on another test. The
// IdleWatcher assumes that we're already running on the same task, so
// it can cause serious problems and internal race conditions.
//
// To fix this bug, we just set our stderr to a null writer which will
// never reschedule us, so we're guaranteed to stay on the same
// task/event loop.
use std::io;
drop(io::stdio::set_stderr(box io::util::NullWriter));
let (mut idle, _chan) = mk(1);
idle.resume();
fail!();
}
#[test]
fn fun_combinations_of_methods() {
let (mut idle, chan) = mk(1);
idle.resume();
assert_eq!(sleep(&chan), 1);
idle.pause();
idle.resume();
idle.resume();
assert_eq!(sleep(&chan), 1);
idle.pause();
idle.pause();
idle.resume();
assert_eq!(sleep(&chan), 1);
}
#[test]
fn pause_pauses() {
let (mut idle1, chan1) = mk(1);
let (mut idle2, chan2) = mk(2);
idle2.resume();
assert_eq!(sleep(&chan2), 2);
idle2.pause();
idle1.resume();
assert_eq!(sleep(&chan1), 1);
}
}

View File

@ -1,536 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
Bindings to libuv, along with the default implementation of `std::rt::rtio`.
UV types consist of the event loop (Loop), Watchers, Requests and
Callbacks.
Watchers and Requests encapsulate pointers to uv *handles*, which have
subtyping relationships with each other. This subtyping is reflected
in the bindings with explicit or implicit coercions. For example, an
upcast from TcpWatcher to StreamWatcher is done with
`tcp_watcher.as_stream()`. In other cases a callback on a specific
type of watcher will be passed a watcher of a supertype.
Currently all use of Request types (connect/write requests) are
encapsulated in the bindings and don't need to be dealt with by the
caller.
# Safety note
Due to the complex lifecycle of uv handles, as well as compiler bugs,
this module is not memory safe and requires explicit memory management,
via `close` and `delete` methods.
*/
#![crate_name = "rustuv"]
#![experimental]
#![license = "MIT/ASL2"]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/master/",
html_playground_url = "http://play.rust-lang.org/")]
#![feature(macro_rules, unsafe_destructor)]
#![deny(unused_result, unused_must_use)]
#![reexport_test_harness_main = "test_main"]
#[cfg(test)] extern crate green;
#[cfg(test)] extern crate debug;
#[cfg(test)] extern crate "rustuv" as realrustuv;
extern crate libc;
extern crate alloc;
use libc::{c_int, c_void};
use std::fmt;
use std::mem;
use std::ptr;
use std::string;
use std::rt::local::Local;
use std::rt::rtio;
use std::rt::rtio::{IoResult, IoError};
use std::rt::task::{BlockedTask, Task};
use std::task;
pub use self::async::AsyncWatcher;
pub use self::file::{FsRequest, FileWatcher};
pub use self::idle::IdleWatcher;
pub use self::net::{TcpWatcher, TcpListener, TcpAcceptor, UdpWatcher};
pub use self::pipe::{PipeWatcher, PipeListener, PipeAcceptor};
pub use self::process::Process;
pub use self::signal::SignalWatcher;
pub use self::timer::TimerWatcher;
pub use self::tty::TtyWatcher;
// Run tests with libgreen instead of libnative.
#[cfg(test)] #[start]
fn start(argc: int, argv: *const *const u8) -> int {
green::start(argc, argv, event_loop, test_main)
}
mod macros;
mod access;
mod timeout;
mod homing;
mod queue;
mod rc;
pub mod uvio;
pub mod uvll;
pub mod file;
pub mod net;
pub mod idle;
pub mod timer;
pub mod async;
pub mod addrinfo;
pub mod process;
pub mod pipe;
pub mod tty;
pub mod signal;
pub mod stream;
/// Creates a new event loop which is powered by libuv
///
/// This function is used in tandem with libgreen's `PoolConfig` type as a value
/// for the `event_loop_factory` field. Using this function as the event loop
/// factory will power programs with libuv and enable green threading.
///
/// # Example
///
/// ```
/// extern crate rustuv;
/// extern crate green;
///
/// #[start]
/// fn start(argc: int, argv: *const *const u8) -> int {
/// green::start(argc, argv, rustuv::event_loop, main)
/// }
///
/// fn main() {
/// // this code is running inside of a green task powered by libuv
/// }
/// ```
pub fn event_loop() -> Box<rtio::EventLoop + Send> {
box uvio::UvEventLoop::new() as Box<rtio::EventLoop + Send>
}
/// A type that wraps a uv handle
pub trait UvHandle<T> {
fn uv_handle(&self) -> *mut T;
fn uv_loop(&self) -> Loop {
Loop::wrap(unsafe { uvll::get_loop_for_uv_handle(self.uv_handle()) })
}
// FIXME(#8888) dummy self
fn alloc(_: Option<Self>, ty: uvll::uv_handle_type) -> *mut T {
unsafe {
let handle = uvll::malloc_handle(ty);
assert!(!handle.is_null());
handle as *mut T
}
}
unsafe fn from_uv_handle<'a>(h: &'a *mut T) -> &'a mut Self {
mem::transmute(uvll::get_data_for_uv_handle(*h))
}
fn install(self: Box<Self>) -> Box<Self> {
unsafe {
let myptr = mem::transmute::<&Box<Self>, &*mut u8>(&self);
uvll::set_data_for_uv_handle(self.uv_handle(), *myptr);
}
self
}
fn close_async_(&mut self) {
// we used malloc to allocate all handles, so we must always have at
// least a callback to free all the handles we allocated.
extern fn close_cb(handle: *mut uvll::uv_handle_t) {
unsafe { uvll::free_handle(handle) }
}
unsafe {
uvll::set_data_for_uv_handle(self.uv_handle(), ptr::null_mut::<()>());
uvll::uv_close(self.uv_handle() as *mut uvll::uv_handle_t, close_cb)
}
}
fn close(&mut self) {
let mut slot = None;
unsafe {
uvll::uv_close(self.uv_handle() as *mut uvll::uv_handle_t, close_cb);
uvll::set_data_for_uv_handle(self.uv_handle(),
ptr::null_mut::<()>());
wait_until_woken_after(&mut slot, &self.uv_loop(), || {
uvll::set_data_for_uv_handle(self.uv_handle(), &mut slot);
})
}
extern fn close_cb(handle: *mut uvll::uv_handle_t) {
unsafe {
let data = uvll::get_data_for_uv_handle(handle);
uvll::free_handle(handle);
if data == ptr::null_mut() { return }
let slot: &mut Option<BlockedTask> = mem::transmute(data);
wakeup(slot);
}
}
}
}
pub struct ForbidSwitch {
msg: &'static str,
io: uint,
}
impl ForbidSwitch {
fn new(s: &'static str) -> ForbidSwitch {
ForbidSwitch {
msg: s,
io: homing::local_id(),
}
}
}
impl Drop for ForbidSwitch {
fn drop(&mut self) {
assert!(self.io == homing::local_id(),
"didn't want a scheduler switch: {}",
self.msg);
}
}
pub struct ForbidUnwind {
msg: &'static str,
failing_before: bool,
}
impl ForbidUnwind {
fn new(s: &'static str) -> ForbidUnwind {
ForbidUnwind {
msg: s, failing_before: task::failing(),
}
}
}
impl Drop for ForbidUnwind {
fn drop(&mut self) {
assert!(self.failing_before == task::failing(),
"didn't want an unwind during: {}", self.msg);
}
}
fn wait_until_woken_after(slot: *mut Option<BlockedTask>,
loop_: &Loop,
f: ||) {
let _f = ForbidUnwind::new("wait_until_woken_after");
unsafe {
assert!((*slot).is_none());
let task: Box<Task> = Local::take();
loop_.modify_blockers(1);
task.deschedule(1, |task| {
*slot = Some(task);
f();
Ok(())
});
loop_.modify_blockers(-1);
}
}
fn wakeup(slot: &mut Option<BlockedTask>) {
assert!(slot.is_some());
let _ = slot.take().unwrap().wake().map(|t| t.reawaken());
}
pub struct Request {
pub handle: *mut uvll::uv_req_t,
defused: bool,
}
impl Request {
pub fn new(ty: uvll::uv_req_type) -> Request {
unsafe {
let handle = uvll::malloc_req(ty);
uvll::set_data_for_req(handle, ptr::null_mut::<()>());
Request::wrap(handle)
}
}
pub fn wrap(handle: *mut uvll::uv_req_t) -> Request {
Request { handle: handle, defused: false }
}
pub fn set_data<T>(&self, t: *mut T) {
unsafe { uvll::set_data_for_req(self.handle, t) }
}
pub unsafe fn get_data<T>(&self) -> &'static mut T {
let data = uvll::get_data_for_req(self.handle);
assert!(data != ptr::null_mut());
mem::transmute(data)
}
// This function should be used when the request handle has been given to an
// underlying uv function, and the uv function has succeeded. This means
// that uv will at some point invoke the callback, and in the meantime we
// can't deallocate the handle because libuv could be using it.
//
// This is still a problem in blocking situations due to linked failure. In
// the connection callback the handle should be re-wrapped with the `wrap`
// function to ensure its destruction.
pub fn defuse(&mut self) {
self.defused = true;
}
}
impl Drop for Request {
fn drop(&mut self) {
if !self.defused {
unsafe { uvll::free_req(self.handle) }
}
}
}
/// FIXME: Loop(*handle) is buggy with destructors. Normal structs
/// with dtors may not be destructured, but tuple structs can,
/// but the results are not correct.
pub struct Loop {
handle: *mut uvll::uv_loop_t
}
impl Loop {
pub fn new() -> Loop {
let handle = unsafe { uvll::loop_new() };
assert!(handle.is_not_null());
unsafe { uvll::set_data_for_uv_loop(handle, 0 as *mut c_void) }
Loop::wrap(handle)
}
pub fn wrap(handle: *mut uvll::uv_loop_t) -> Loop { Loop { handle: handle } }
pub fn run(&mut self) {
assert_eq!(unsafe { uvll::uv_run(self.handle, uvll::RUN_DEFAULT) }, 0);
}
pub fn close(&mut self) {
unsafe { uvll::uv_loop_delete(self.handle) };
}
// The 'data' field of the uv_loop_t is used to count the number of tasks
// that are currently blocked waiting for I/O to complete.
fn modify_blockers(&self, amt: uint) {
unsafe {
let cur = uvll::get_data_for_uv_loop(self.handle) as uint;
uvll::set_data_for_uv_loop(self.handle, (cur + amt) as *mut c_void)
}
}
fn get_blockers(&self) -> uint {
unsafe { uvll::get_data_for_uv_loop(self.handle) as uint }
}
}
// FIXME: Need to define the error constants like EOF so they can be
// compared to the UvError type
pub struct UvError(c_int);
impl UvError {
pub fn name(&self) -> String {
unsafe {
let inner = match self { &UvError(a) => a };
let name_str = uvll::uv_err_name(inner);
assert!(name_str.is_not_null());
string::raw::from_buf(name_str as *const u8)
}
}
pub fn desc(&self) -> String {
unsafe {
let inner = match self { &UvError(a) => a };
let desc_str = uvll::uv_strerror(inner);
assert!(desc_str.is_not_null());
string::raw::from_buf(desc_str as *const u8)
}
}
pub fn is_eof(&self) -> bool {
let UvError(handle) = *self;
handle == uvll::EOF
}
}
impl fmt::Show for UvError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}: {}", self.name(), self.desc())
}
}
#[test]
fn error_smoke_test() {
let err: UvError = UvError(uvll::EOF);
assert_eq!(err.to_string(), "EOF: end of file".to_string());
}
#[cfg(unix)]
pub fn uv_error_to_io_error(uverr: UvError) -> IoError {
let UvError(errcode) = uverr;
IoError {
code: if errcode == uvll::EOF {libc::EOF as uint} else {-errcode as uint},
extra: 0,
detail: Some(uverr.desc()),
}
}
#[cfg(windows)]
pub fn uv_error_to_io_error(uverr: UvError) -> IoError {
let UvError(errcode) = uverr;
IoError {
code: match errcode {
uvll::EOF => libc::EOF,
uvll::EACCES => libc::ERROR_ACCESS_DENIED,
uvll::ECONNREFUSED => libc::WSAECONNREFUSED,
uvll::ECONNRESET => libc::WSAECONNRESET,
uvll::ENOTCONN => libc::WSAENOTCONN,
uvll::ENOENT => libc::ERROR_FILE_NOT_FOUND,
uvll::EPIPE => libc::ERROR_NO_DATA,
uvll::ECONNABORTED => libc::WSAECONNABORTED,
uvll::EADDRNOTAVAIL => libc::WSAEADDRNOTAVAIL,
uvll::ECANCELED => libc::ERROR_OPERATION_ABORTED,
uvll::EADDRINUSE => libc::WSAEADDRINUSE,
uvll::EPERM => libc::ERROR_ACCESS_DENIED,
err => {
uvdebug!("uverr.code {}", err as int);
// FIXME: Need to map remaining uv error types
-1
}
} as uint,
extra: 0,
detail: Some(uverr.desc()),
}
}
/// Given a uv error code, convert a callback status to a UvError
pub fn status_to_maybe_uv_error(status: c_int) -> Option<UvError> {
if status >= 0 {
None
} else {
Some(UvError(status))
}
}
pub fn status_to_io_result(status: c_int) -> IoResult<()> {
if status >= 0 {Ok(())} else {Err(uv_error_to_io_error(UvError(status)))}
}
/// The uv buffer type
pub type Buf = uvll::uv_buf_t;
pub fn empty_buf() -> Buf {
uvll::uv_buf_t {
base: ptr::null_mut(),
len: 0,
}
}
/// Borrow a slice to a Buf
pub fn slice_to_uv_buf(v: &[u8]) -> Buf {
let data = v.as_ptr();
uvll::uv_buf_t { base: data as *mut u8, len: v.len() as uvll::uv_buf_len_t }
}
// This function is full of lies!
#[cfg(test)]
fn local_loop() -> &'static mut uvio::UvIoFactory {
use std::raw::TraitObject;
unsafe {
mem::transmute({
let mut task = Local::borrow(None::<Task>);
let mut io = task.local_io().unwrap();
let obj: TraitObject =
mem::transmute(io.get());
obj.data
})
}
}
#[cfg(test)]
fn next_test_ip4() -> std::rt::rtio::SocketAddr {
use std::io;
use std::rt::rtio;
let io::net::ip::SocketAddr { ip, port } = io::test::next_test_ip4();
let ip = match ip {
io::net::ip::Ipv4Addr(a, b, c, d) => rtio::Ipv4Addr(a, b, c, d),
_ => unreachable!(),
};
rtio::SocketAddr { ip: ip, port: port }
}
#[cfg(test)]
fn next_test_ip6() -> std::rt::rtio::SocketAddr {
use std::io;
use std::rt::rtio;
let io::net::ip::SocketAddr { ip, port } = io::test::next_test_ip6();
let ip = match ip {
io::net::ip::Ipv6Addr(a, b, c, d, e, f, g, h) =>
rtio::Ipv6Addr(a, b, c, d, e, f, g, h),
_ => unreachable!(),
};
rtio::SocketAddr { ip: ip, port: port }
}
#[cfg(test)]
mod test {
use std::mem::transmute;
use std::rt::thread::Thread;
use super::{slice_to_uv_buf, Loop};
#[test]
fn test_slice_to_uv_buf() {
let slice = [0, .. 20];
let buf = slice_to_uv_buf(slice);
assert_eq!(buf.len, 20);
unsafe {
let base = transmute::<*mut u8, *mut u8>(buf.base);
(*base) = 1;
(*base.offset(1)) = 2;
}
assert!(slice[0] == 1);
assert!(slice[1] == 2);
}
#[test]
fn loop_smoke_test() {
Thread::start(proc() {
let mut loop_ = Loop::new();
loop_.run();
loop_.close();
}).join();
}
}

View File

@ -1,34 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![macro_escape]
use std::fmt;
macro_rules! uverrln (
($($arg:tt)*) => ( {
format_args!(::macros::dumb_println, $($arg)*)
} )
)
// Some basic logging. Enabled by passing `--cfg uvdebug` to the libstd build.
macro_rules! uvdebug (
($($arg:tt)*) => ( {
if cfg!(uvdebug) {
uverrln!($($arg)*)
}
})
)
pub fn dumb_println(args: &fmt::Arguments) {
use std::rt;
let mut w = rt::Stderr;
let _ = writeln!(&mut w, "{}", args);
}

File diff suppressed because it is too large Load Diff

View File

@ -1,436 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use libc;
use std::c_str::CString;
use std::mem;
use std::rt::rtio;
use std::rt::rtio::IoResult;
use std::rt::task::BlockedTask;
use homing::{HomingIO, HomeHandle};
use net;
use rc::Refcount;
use stream::StreamWatcher;
use super::{Loop, UvError, UvHandle, uv_error_to_io_error};
use timeout::{AcceptTimeout, ConnectCtx, AccessTimeout};
use uvio::UvIoFactory;
use uvll;
pub struct PipeWatcher {
stream: StreamWatcher,
home: HomeHandle,
defused: bool,
refcount: Refcount,
// see comments in TcpWatcher for why these exist
write_access: AccessTimeout<()>,
read_access: AccessTimeout<()>,
}
pub struct PipeListener {
home: HomeHandle,
pipe: *mut uvll::uv_pipe_t,
}
pub struct PipeAcceptor {
home: HomeHandle,
handle: *mut uvll::uv_pipe_t,
access: AcceptTimeout<Box<rtio::RtioPipe + Send>>,
refcount: Refcount,
}
// PipeWatcher implementation and traits
impl PipeWatcher {
// Creates an uninitialized pipe watcher. The underlying uv pipe is ready to
// get bound to some other source (this is normally a helper method paired
// with another call).
pub fn new(io: &mut UvIoFactory, ipc: bool) -> PipeWatcher {
let home = io.make_handle();
PipeWatcher::new_home(&io.loop_, home, ipc)
}
pub fn new_home(loop_: &Loop, home: HomeHandle, ipc: bool) -> PipeWatcher {
let handle = unsafe {
let handle = uvll::malloc_handle(uvll::UV_NAMED_PIPE);
assert!(!handle.is_null());
let ipc = ipc as libc::c_int;
assert_eq!(uvll::uv_pipe_init(loop_.handle, handle, ipc), 0);
handle
};
PipeWatcher {
stream: StreamWatcher::new(handle, true),
home: home,
defused: false,
refcount: Refcount::new(),
read_access: AccessTimeout::new(()),
write_access: AccessTimeout::new(()),
}
}
pub fn open(io: &mut UvIoFactory, file: libc::c_int)
-> Result<PipeWatcher, UvError>
{
let pipe = PipeWatcher::new(io, false);
match unsafe { uvll::uv_pipe_open(pipe.handle(), file) } {
0 => Ok(pipe),
n => Err(UvError(n))
}
}
pub fn connect(io: &mut UvIoFactory, name: &CString, timeout: Option<u64>)
-> Result<PipeWatcher, UvError>
{
let pipe = PipeWatcher::new(io, false);
let cx = ConnectCtx { status: -1, task: None, timer: None };
cx.connect(pipe, timeout, io, |req, pipe, cb| {
unsafe {
uvll::uv_pipe_connect(req.handle, pipe.handle(),
name.as_ptr(), cb)
}
0
})
}
pub fn handle(&self) -> *mut uvll::uv_pipe_t { self.stream.handle }
// Unwraps the underlying uv pipe. This cancels destruction of the pipe and
// allows the pipe to get moved elsewhere
fn unwrap(mut self) -> *mut uvll::uv_pipe_t {
self.defused = true;
return self.stream.handle;
}
}
impl rtio::RtioPipe for PipeWatcher {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> {
let m = self.fire_homing_missile();
let guard = try!(self.read_access.grant(m));
// see comments in close_read about this check
if guard.access.is_closed() {
return Err(uv_error_to_io_error(UvError(uvll::EOF)))
}
self.stream.read(buf).map_err(uv_error_to_io_error)
}
fn write(&mut self, buf: &[u8]) -> IoResult<()> {
let m = self.fire_homing_missile();
let guard = try!(self.write_access.grant(m));
self.stream.write(buf, guard.can_timeout).map_err(uv_error_to_io_error)
}
fn clone(&self) -> Box<rtio::RtioPipe + Send> {
box PipeWatcher {
stream: StreamWatcher::new(self.stream.handle, false),
defused: false,
home: self.home.clone(),
refcount: self.refcount.clone(),
read_access: self.read_access.clone(),
write_access: self.write_access.clone(),
} as Box<rtio::RtioPipe + Send>
}
fn close_read(&mut self) -> IoResult<()> {
// The current uv_shutdown method only shuts the writing half of the
// connection, and no method is provided to shut down the reading half
// of the connection. With a lack of method, we emulate shutting down
// the reading half of the connection by manually returning early from
// all future calls to `read`.
//
// Note that we must be careful to ensure that *all* cloned handles see
// the closing of the read half, so we stored the "is closed" bit in the
// Access struct, not in our own personal watcher. Additionally, the
// homing missile is used as a locking mechanism to ensure there is no
// contention over this bit.
//
// To shutdown the read half, we must first flag the access as being
// closed, and then afterwards we cease any pending read. Note that this
// ordering is crucial because we could in theory be rescheduled during
// the uv_read_stop which means that another read invocation could leak
// in before we set the flag.
let task = {
let m = self.fire_homing_missile();
self.read_access.access.close(&m);
self.stream.cancel_read(uvll::EOF as libc::ssize_t)
};
let _ = task.map(|t| t.reawaken());
Ok(())
}
fn close_write(&mut self) -> IoResult<()> {
let _m = self.fire_homing_missile();
net::shutdown(self.stream.handle, &self.uv_loop())
}
fn set_timeout(&mut self, timeout: Option<u64>) {
self.set_read_timeout(timeout);
self.set_write_timeout(timeout);
}
fn set_read_timeout(&mut self, ms: Option<u64>) {
let _m = self.fire_homing_missile();
let loop_ = self.uv_loop();
self.read_access.set_timeout(ms, &self.home, &loop_, cancel_read,
&self.stream as *const _ as uint);
fn cancel_read(stream: uint) -> Option<BlockedTask> {
let stream: &mut StreamWatcher = unsafe { mem::transmute(stream) };
stream.cancel_read(uvll::ECANCELED as libc::ssize_t)
}
}
fn set_write_timeout(&mut self, ms: Option<u64>) {
let _m = self.fire_homing_missile();
let loop_ = self.uv_loop();
self.write_access.set_timeout(ms, &self.home, &loop_, cancel_write,
&self.stream as *const _ as uint);
fn cancel_write(stream: uint) -> Option<BlockedTask> {
let stream: &mut StreamWatcher = unsafe { mem::transmute(stream) };
stream.cancel_write()
}
}
}
impl HomingIO for PipeWatcher {
fn home<'a>(&'a mut self) -> &'a mut HomeHandle { &mut self.home }
}
impl UvHandle<uvll::uv_pipe_t> for PipeWatcher {
fn uv_handle(&self) -> *mut uvll::uv_pipe_t { self.stream.handle }
}
impl Drop for PipeWatcher {
fn drop(&mut self) {
let _m = self.fire_homing_missile();
if !self.defused && self.refcount.decrement() {
self.close();
}
}
}
// PipeListener implementation and traits
impl PipeListener {
pub fn bind(io: &mut UvIoFactory, name: &CString)
-> Result<Box<PipeListener>, UvError>
{
let pipe = PipeWatcher::new(io, false);
match unsafe {
uvll::uv_pipe_bind(pipe.handle(), name.as_ptr())
} {
0 => {
// If successful, unwrap the PipeWatcher because we control how
// we close the pipe differently. We can't rely on
// StreamWatcher's default close method.
let p = box PipeListener {
home: io.make_handle(),
pipe: pipe.unwrap(),
};
Ok(p.install())
}
n => Err(UvError(n))
}
}
}
impl rtio::RtioUnixListener for PipeListener {
fn listen(mut self: Box<PipeListener>)
-> IoResult<Box<rtio::RtioUnixAcceptor + Send>> {
let _m = self.fire_homing_missile();
// create the acceptor object from ourselves
let acceptor = (box PipeAcceptor {
handle: self.pipe,
home: self.home.clone(),
access: AcceptTimeout::new(),
refcount: Refcount::new(),
}).install();
self.pipe = 0 as *mut _;
// FIXME: the 128 backlog should be configurable
match unsafe { uvll::uv_listen(acceptor.handle, 128, listen_cb) } {
0 => Ok(acceptor as Box<rtio::RtioUnixAcceptor + Send>),
n => Err(uv_error_to_io_error(UvError(n))),
}
}
}
impl HomingIO for PipeListener {
fn home<'r>(&'r mut self) -> &'r mut HomeHandle { &mut self.home }
}
impl UvHandle<uvll::uv_pipe_t> for PipeListener {
fn uv_handle(&self) -> *mut uvll::uv_pipe_t { self.pipe }
}
extern fn listen_cb(server: *mut uvll::uv_stream_t, status: libc::c_int) {
assert!(status != uvll::ECANCELED);
let pipe: &mut PipeAcceptor = unsafe { UvHandle::from_uv_handle(&server) };
let msg = match status {
0 => {
let loop_ = Loop::wrap(unsafe {
uvll::get_loop_for_uv_handle(server)
});
let client = PipeWatcher::new_home(&loop_, pipe.home().clone(), false);
assert_eq!(unsafe { uvll::uv_accept(server, client.handle()) }, 0);
Ok(box client as Box<rtio::RtioPipe + Send>)
}
n => Err(uv_error_to_io_error(UvError(n)))
};
// If we're running then we have exclusive access, so the unsafe_get() is ok
unsafe { pipe.access.push(msg); }
}
impl Drop for PipeListener {
fn drop(&mut self) {
if self.pipe.is_null() { return }
let _m = self.fire_homing_missile();
self.close();
}
}
// PipeAcceptor implementation and traits
impl rtio::RtioUnixAcceptor for PipeAcceptor {
fn accept(&mut self) -> IoResult<Box<rtio::RtioPipe + Send>> {
let m = self.fire_homing_missile();
let loop_ = self.uv_loop();
self.access.accept(m, &loop_)
}
fn set_timeout(&mut self, ms: Option<u64>) {
let _m = self.fire_homing_missile();
let loop_ = self.uv_loop();
self.access.set_timeout(ms, &loop_, &self.home);
}
fn clone(&self) -> Box<rtio::RtioUnixAcceptor + Send> {
box PipeAcceptor {
refcount: self.refcount.clone(),
home: self.home.clone(),
handle: self.handle,
access: self.access.clone(),
} as Box<rtio::RtioUnixAcceptor + Send>
}
fn close_accept(&mut self) -> IoResult<()> {
let m = self.fire_homing_missile();
self.access.close(m);
Ok(())
}
}
impl HomingIO for PipeAcceptor {
fn home<'r>(&'r mut self) -> &'r mut HomeHandle { &mut self.home }
}
impl UvHandle<uvll::uv_pipe_t> for PipeAcceptor {
fn uv_handle(&self) -> *mut uvll::uv_pipe_t { self.handle }
}
impl Drop for PipeAcceptor {
fn drop(&mut self) {
let _m = self.fire_homing_missile();
if self.refcount.decrement() {
self.close();
}
}
}
#[cfg(test)]
mod tests {
use std::rt::rtio::{RtioUnixListener, RtioUnixAcceptor, RtioPipe};
use std::io::test::next_test_unix;
use super::{PipeWatcher, PipeListener};
use super::super::local_loop;
#[test]
fn connect_err() {
match PipeWatcher::connect(local_loop(), &"path/to/nowhere".to_c_str(),
None) {
Ok(..) => fail!(),
Err(..) => {}
}
}
#[test]
fn bind_err() {
match PipeListener::bind(local_loop(), &"path/to/nowhere".to_c_str()) {
Ok(..) => fail!(),
Err(e) => assert_eq!(e.name(), "EACCES".to_string()),
}
}
#[test]
fn bind() {
let p = next_test_unix().to_c_str();
match PipeListener::bind(local_loop(), &p) {
Ok(..) => {}
Err(..) => fail!(),
}
}
#[test] #[should_fail]
fn bind_fail() {
let p = next_test_unix().to_c_str();
let _w = PipeListener::bind(local_loop(), &p).unwrap();
fail!();
}
#[test]
fn connect() {
let path = next_test_unix();
let path2 = path.clone();
let (tx, rx) = channel();
spawn(proc() {
let p = PipeListener::bind(local_loop(), &path2.to_c_str()).unwrap();
let mut p = p.listen().ok().unwrap();
tx.send(());
let mut client = p.accept().ok().unwrap();
let mut buf = [0];
assert!(client.read(buf).ok().unwrap() == 1);
assert_eq!(buf[0], 1);
assert!(client.write([2]).is_ok());
});
rx.recv();
let mut c = PipeWatcher::connect(local_loop(), &path.to_c_str(), None).unwrap();
assert!(c.write([1]).is_ok());
let mut buf = [0];
assert!(c.read(buf).ok().unwrap() == 1);
assert_eq!(buf[0], 2);
}
#[test] #[should_fail]
fn connect_fail() {
let path = next_test_unix();
let path2 = path.clone();
let (tx, rx) = channel();
spawn(proc() {
let p = PipeListener::bind(local_loop(), &path2.to_c_str()).unwrap();
let mut p = p.listen().ok().unwrap();
tx.send(());
drop(p.accept().ok().unwrap());
});
rx.recv();
let _c = PipeWatcher::connect(local_loop(), &path.to_c_str(), None).unwrap();
fail!()
}
}

View File

@ -1,324 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use libc::c_int;
use libc;
use std::ptr;
use std::c_str::CString;
use std::rt::rtio;
use std::rt::rtio::IoResult;
use std::rt::task::BlockedTask;
use homing::{HomingIO, HomeHandle};
use pipe::PipeWatcher;
use super::{UvHandle, UvError, uv_error_to_io_error,
wait_until_woken_after, wakeup, Loop};
use timer::TimerWatcher;
use uvio::UvIoFactory;
use uvll;
pub struct Process {
handle: *mut uvll::uv_process_t,
home: HomeHandle,
/// Task to wake up (may be null) for when the process exits
to_wake: Option<BlockedTask>,
/// Collected from the exit_cb
exit_status: Option<rtio::ProcessExit>,
/// Lazily initialized timeout timer
timer: Option<Box<TimerWatcher>>,
timeout_state: TimeoutState,
}
enum TimeoutState {
NoTimeout,
TimeoutPending,
TimeoutElapsed,
}
impl Process {
/// Spawn a new process inside the specified event loop.
///
/// Returns either the corresponding process object or an error which
/// occurred.
pub fn spawn(io_loop: &mut UvIoFactory, cfg: rtio::ProcessConfig)
-> Result<(Box<Process>, Vec<Option<PipeWatcher>>), UvError> {
let mut io = vec![cfg.stdin, cfg.stdout, cfg.stderr];
for slot in cfg.extra_io.iter() {
io.push(*slot);
}
let mut stdio = Vec::<uvll::uv_stdio_container_t>::with_capacity(io.len());
let mut ret_io = Vec::with_capacity(io.len());
unsafe {
stdio.set_len(io.len());
for (slot, other) in stdio.iter_mut().zip(io.iter()) {
let io = set_stdio(slot as *mut uvll::uv_stdio_container_t, other,
io_loop);
ret_io.push(io);
}
}
let ret = with_argv(cfg.program, cfg.args, |argv| {
with_env(cfg.env, |envp| {
let mut flags = 0;
if cfg.uid.is_some() {
flags |= uvll::PROCESS_SETUID;
}
if cfg.gid.is_some() {
flags |= uvll::PROCESS_SETGID;
}
if cfg.detach {
flags |= uvll::PROCESS_DETACHED;
}
let mut options = uvll::uv_process_options_t {
exit_cb: on_exit,
file: unsafe { *argv },
args: argv,
env: envp,
cwd: match cfg.cwd {
Some(cwd) => cwd.as_ptr(),
None => ptr::null(),
},
flags: flags as libc::c_uint,
stdio_count: stdio.len() as libc::c_int,
stdio: stdio.as_mut_ptr(),
uid: cfg.uid.unwrap_or(0) as uvll::uv_uid_t,
gid: cfg.gid.unwrap_or(0) as uvll::uv_gid_t,
};
let handle = UvHandle::alloc(None::<Process>, uvll::UV_PROCESS);
let process = box Process {
handle: handle,
home: io_loop.make_handle(),
to_wake: None,
exit_status: None,
timer: None,
timeout_state: NoTimeout,
};
match unsafe {
uvll::uv_spawn(io_loop.uv_loop(), handle, &mut options)
} {
0 => Ok(process.install()),
err => Err(UvError(err)),
}
})
});
match ret {
Ok(p) => Ok((p, ret_io)),
Err(e) => Err(e),
}
}
pub fn kill(pid: libc::pid_t, signum: int) -> Result<(), UvError> {
match unsafe {
uvll::uv_kill(pid as libc::c_int, signum as libc::c_int)
} {
0 => Ok(()),
n => Err(UvError(n))
}
}
}
extern fn on_exit(handle: *mut uvll::uv_process_t,
exit_status: i64,
term_signal: libc::c_int) {
let p: &mut Process = unsafe { UvHandle::from_uv_handle(&handle) };
assert!(p.exit_status.is_none());
p.exit_status = Some(match term_signal {
0 => rtio::ExitStatus(exit_status as int),
n => rtio::ExitSignal(n as int),
});
if p.to_wake.is_none() { return }
wakeup(&mut p.to_wake);
}
unsafe fn set_stdio(dst: *mut uvll::uv_stdio_container_t,
io: &rtio::StdioContainer,
io_loop: &mut UvIoFactory) -> Option<PipeWatcher> {
match *io {
rtio::Ignored => {
uvll::set_stdio_container_flags(dst, uvll::STDIO_IGNORE);
None
}
rtio::InheritFd(fd) => {
uvll::set_stdio_container_flags(dst, uvll::STDIO_INHERIT_FD);
uvll::set_stdio_container_fd(dst, fd);
None
}
rtio::CreatePipe(readable, writable) => {
let mut flags = uvll::STDIO_CREATE_PIPE as libc::c_int;
if readable {
flags |= uvll::STDIO_READABLE_PIPE as libc::c_int;
}
if writable {
flags |= uvll::STDIO_WRITABLE_PIPE as libc::c_int;
}
let pipe = PipeWatcher::new(io_loop, false);
uvll::set_stdio_container_flags(dst, flags);
uvll::set_stdio_container_stream(dst, pipe.handle());
Some(pipe)
}
}
}
/// Converts the program and arguments to the argv array expected by libuv.
fn with_argv<T>(prog: &CString, args: &[CString],
cb: |*const *const libc::c_char| -> T) -> T {
let mut ptrs: Vec<*const libc::c_char> = Vec::with_capacity(args.len()+1);
// Convert the CStrings into an array of pointers. Note: the
// lifetime of the various CStrings involved is guaranteed to be
// larger than the lifetime of our invocation of cb, but this is
// technically unsafe as the callback could leak these pointers
// out of our scope.
ptrs.push(prog.as_ptr());
ptrs.extend(args.iter().map(|tmp| tmp.as_ptr()));
// Add a terminating null pointer (required by libc).
ptrs.push(ptr::null());
cb(ptrs.as_ptr())
}
/// Converts the environment to the env array expected by libuv
fn with_env<T>(env: Option<&[(&CString, &CString)]>,
cb: |*const *const libc::c_char| -> T) -> T {
// We can pass a char** for envp, which is a null-terminated array
// of "k=v\0" strings. Since we must create these strings locally,
// yet expose a raw pointer to them, we create a temporary vector
// to own the CStrings that outlives the call to cb.
match env {
Some(env) => {
let mut tmps = Vec::with_capacity(env.len());
for pair in env.iter() {
let mut kv = Vec::new();
kv.push_all(pair.ref0().as_bytes_no_nul());
kv.push('=' as u8);
kv.push_all(pair.ref1().as_bytes()); // includes terminal \0
tmps.push(kv);
}
// As with `with_argv`, this is unsafe, since cb could leak the pointers.
let mut ptrs: Vec<*const libc::c_char> =
tmps.iter()
.map(|tmp| tmp.as_ptr() as *const libc::c_char)
.collect();
ptrs.push(ptr::null());
cb(ptrs.as_ptr())
}
_ => cb(ptr::null())
}
}
impl HomingIO for Process {
fn home<'r>(&'r mut self) -> &'r mut HomeHandle { &mut self.home }
}
impl UvHandle<uvll::uv_process_t> for Process {
fn uv_handle(&self) -> *mut uvll::uv_process_t { self.handle }
}
impl rtio::RtioProcess for Process {
fn id(&self) -> libc::pid_t {
unsafe { uvll::process_pid(self.handle) as libc::pid_t }
}
fn kill(&mut self, signal: int) -> IoResult<()> {
let _m = self.fire_homing_missile();
match unsafe {
uvll::uv_process_kill(self.handle, signal as libc::c_int)
} {
0 => Ok(()),
err => Err(uv_error_to_io_error(UvError(err)))
}
}
fn wait(&mut self) -> IoResult<rtio::ProcessExit> {
// Make sure (on the home scheduler) that we have an exit status listed
let _m = self.fire_homing_missile();
match self.exit_status {
Some(status) => return Ok(status),
None => {}
}
// If there's no exit code previously listed, then the process's exit
// callback has yet to be invoked. We just need to deschedule ourselves
// and wait to be reawoken.
match self.timeout_state {
NoTimeout | TimeoutPending => {
wait_until_woken_after(&mut self.to_wake, &self.uv_loop(), || {});
}
TimeoutElapsed => {}
}
// If there's still no exit status listed, then we timed out, and we
// need to return.
match self.exit_status {
Some(status) => Ok(status),
None => Err(uv_error_to_io_error(UvError(uvll::ECANCELED)))
}
}
fn set_timeout(&mut self, timeout: Option<u64>) {
let _m = self.fire_homing_missile();
self.timeout_state = NoTimeout;
let ms = match timeout {
Some(ms) => ms,
None => {
match self.timer {
Some(ref mut timer) => timer.stop(),
None => {}
}
return
}
};
if self.timer.is_none() {
let loop_ = Loop::wrap(unsafe {
uvll::get_loop_for_uv_handle(self.uv_handle())
});
let mut timer = box TimerWatcher::new_home(&loop_, self.home().clone());
unsafe {
timer.set_data(self as *mut _);
}
self.timer = Some(timer);
}
let timer = self.timer.as_mut().unwrap();
timer.stop();
timer.start(timer_cb, ms, 0);
self.timeout_state = TimeoutPending;
extern fn timer_cb(timer: *mut uvll::uv_timer_t) {
let p: &mut Process = unsafe {
&mut *(uvll::get_data_for_uv_handle(timer) as *mut Process)
};
p.timeout_state = TimeoutElapsed;
match p.to_wake.take() {
Some(task) => { let _t = task.wake().map(|t| t.reawaken()); }
None => {}
}
}
}
}
impl Drop for Process {
fn drop(&mut self) {
let _m = self.fire_homing_missile();
assert!(self.to_wake.is_none());
self.close();
}
}

View File

@ -1,185 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A concurrent queue used to signal remote event loops
//!
//! This queue implementation is used to send tasks among event loops. This is
//! backed by a multi-producer/single-consumer queue from libstd and uv_async_t
//! handles (to wake up a remote event loop).
//!
//! The uv_async_t is stored next to the event loop, so in order to not keep the
//! event loop alive we use uv_ref and uv_unref in order to control when the
//! async handle is active or not.
#![allow(dead_code)]
use alloc::arc::Arc;
use libc::c_void;
use std::mem;
use std::rt::mutex::NativeMutex;
use std::rt::task::BlockedTask;
use std::sync::mpsc_queue as mpsc;
use async::AsyncWatcher;
use super::{Loop, UvHandle};
use uvll;
enum Message {
Task(BlockedTask),
Increment,
Decrement,
}
struct State {
handle: *mut uvll::uv_async_t,
lock: NativeMutex, // see comments in async_cb for why this is needed
queue: mpsc::Queue<Message>,
}
/// This structure is intended to be stored next to the event loop, and it is
/// used to create new `Queue` structures.
pub struct QueuePool {
queue: Arc<State>,
refcnt: uint,
}
/// This type is used to send messages back to the original event loop.
pub struct Queue {
queue: Arc<State>,
}
extern fn async_cb(handle: *mut uvll::uv_async_t) {
let pool: &mut QueuePool = unsafe {
mem::transmute(uvll::get_data_for_uv_handle(handle))
};
let state: &State = &*pool.queue;
// Remember that there is no guarantee about how many times an async
// callback is called with relation to the number of sends, so process the
// entire queue in a loop.
loop {
match state.queue.pop() {
mpsc::Data(Task(task)) => {
let _ = task.wake().map(|t| t.reawaken());
}
mpsc::Data(Increment) => unsafe {
if pool.refcnt == 0 {
uvll::uv_ref(state.handle);
}
pool.refcnt += 1;
},
mpsc::Data(Decrement) => unsafe {
pool.refcnt -= 1;
if pool.refcnt == 0 {
uvll::uv_unref(state.handle);
}
},
mpsc::Empty | mpsc::Inconsistent => break
};
}
// If the refcount is now zero after processing the queue, then there is no
// longer a reference on the async handle and it is possible that this event
// loop can exit. What we're not guaranteed, however, is that a producer in
// the middle of dropping itself is yet done with the handle. It could be
// possible that we saw their Decrement message but they have yet to signal
// on the async handle. If we were to return immediately, the entire uv loop
// could be destroyed meaning the call to uv_async_send would abort()
//
// In order to fix this, an OS mutex is used to wait for the other end to
// finish before we continue. The drop block on a handle will acquire a
// mutex and then drop it after both the push and send have been completed.
// If we acquire the mutex here, then we are guaranteed that there are no
// longer any senders which are holding on to their handles, so we can
// safely allow the event loop to exit.
if pool.refcnt == 0 {
unsafe {
let _l = state.lock.lock();
}
}
}
impl QueuePool {
pub fn new(loop_: &mut Loop) -> Box<QueuePool> {
let handle = UvHandle::alloc(None::<AsyncWatcher>, uvll::UV_ASYNC);
let state = Arc::new(State {
handle: handle,
lock: unsafe {NativeMutex::new()},
queue: mpsc::Queue::new(),
});
let mut q = box QueuePool {
refcnt: 0,
queue: state,
};
unsafe {
assert_eq!(uvll::uv_async_init(loop_.handle, handle, async_cb), 0);
uvll::uv_unref(handle);
let data = &mut *q as *mut QueuePool as *mut c_void;
uvll::set_data_for_uv_handle(handle, data);
}
return q;
}
pub fn queue(&mut self) -> Queue {
unsafe {
if self.refcnt == 0 {
uvll::uv_ref(self.queue.handle);
}
self.refcnt += 1;
}
Queue { queue: self.queue.clone() }
}
pub fn handle(&self) -> *mut uvll::uv_async_t { self.queue.handle }
}
impl Queue {
pub fn push(&mut self, task: BlockedTask) {
self.queue.queue.push(Task(task));
unsafe { uvll::uv_async_send(self.queue.handle); }
}
}
impl Clone for Queue {
fn clone(&self) -> Queue {
// Push a request to increment on the queue, but there's no need to
// signal the event loop to process it at this time. We're guaranteed
// that the count is at least one (because we have a queue right here),
// and if the queue is dropped later on it'll see the increment for the
// decrement anyway.
self.queue.queue.push(Increment);
Queue { queue: self.queue.clone() }
}
}
impl Drop for Queue {
fn drop(&mut self) {
// See the comments in the async_cb function for why there is a lock
// that is acquired only on a drop.
unsafe {
let _l = self.queue.lock.lock();
self.queue.queue.push(Decrement);
uvll::uv_async_send(self.queue.handle);
}
}
}
impl Drop for State {
fn drop(&mut self) {
unsafe {
uvll::uv_close(self.handle, mem::transmute(0u));
// Note that this does *not* free the handle, that is the
// responsibility of the caller because the uv loop must be closed
// before we deallocate this uv handle.
}
}
}

View File

@ -1,50 +0,0 @@
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/// Simple refcount structure for cloning handles
///
/// This is meant to be an unintrusive solution to cloning handles in rustuv.
/// The handles themselves shouldn't be sharing memory because there are bits of
/// state in the rust objects which shouldn't be shared across multiple users of
/// the same underlying uv object, hence Rc is not used and this simple counter
/// should suffice.
use alloc::arc::Arc;
use std::cell::UnsafeCell;
pub struct Refcount {
rc: Arc<UnsafeCell<uint>>,
}
impl Refcount {
/// Creates a new refcount of 1
pub fn new() -> Refcount {
Refcount { rc: Arc::new(UnsafeCell::new(1)) }
}
fn increment(&self) {
unsafe { *self.rc.get() += 1; }
}
/// Returns whether the refcount just hit 0 or not
pub fn decrement(&self) -> bool {
unsafe {
*self.rc.get() -= 1;
*self.rc.get() == 0
}
}
}
impl Clone for Refcount {
fn clone(&self) -> Refcount {
self.increment();
Refcount { rc: self.rc.clone() }
}
}

View File

@ -1,68 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use libc::c_int;
use std::rt::rtio::{RtioSignal, Callback};
use homing::{HomingIO, HomeHandle};
use super::{UvError, UvHandle};
use uvll;
use uvio::UvIoFactory;
pub struct SignalWatcher {
handle: *mut uvll::uv_signal_t,
home: HomeHandle,
cb: Box<Callback + Send>,
}
impl SignalWatcher {
pub fn new(io: &mut UvIoFactory, signum: int, cb: Box<Callback + Send>)
-> Result<Box<SignalWatcher>, UvError> {
let s = box SignalWatcher {
handle: UvHandle::alloc(None::<SignalWatcher>, uvll::UV_SIGNAL),
home: io.make_handle(),
cb: cb,
};
assert_eq!(unsafe {
uvll::uv_signal_init(io.uv_loop(), s.handle)
}, 0);
match unsafe {
uvll::uv_signal_start(s.handle, signal_cb, signum as c_int)
} {
0 => Ok(s.install()),
n => Err(UvError(n)),
}
}
}
extern fn signal_cb(handle: *mut uvll::uv_signal_t, _signum: c_int) {
let s: &mut SignalWatcher = unsafe { UvHandle::from_uv_handle(&handle) };
let _ = s.cb.call();
}
impl HomingIO for SignalWatcher {
fn home<'r>(&'r mut self) -> &'r mut HomeHandle { &mut self.home }
}
impl UvHandle<uvll::uv_signal_t> for SignalWatcher {
fn uv_handle(&self) -> *mut uvll::uv_signal_t { self.handle }
}
impl RtioSignal for SignalWatcher {}
impl Drop for SignalWatcher {
fn drop(&mut self) {
let _m = self.fire_homing_missile();
self.close();
}
}

View File

@ -1,281 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use libc::{c_int, size_t, ssize_t};
use std::mem;
use std::ptr;
use std::rt::task::BlockedTask;
use Loop;
use super::{UvError, Buf, slice_to_uv_buf, Request, wait_until_woken_after,
ForbidUnwind, wakeup};
use uvll;
// This is a helper structure which is intended to get embedded into other
// Watcher structures. This structure will retain a handle to the underlying
// uv_stream_t instance, and all I/O operations assume that it's already located
// on the appropriate scheduler.
pub struct StreamWatcher {
pub handle: *mut uvll::uv_stream_t,
// Cache the last used uv_write_t so we don't have to allocate a new one on
// every call to uv_write(). Ideally this would be a stack-allocated
// structure, but currently we don't have mappings for all the structures
// defined in libuv, so we're forced to malloc this.
last_write_req: Option<Request>,
blocked_writer: Option<BlockedTask>,
}
struct ReadContext {
buf: Option<Buf>,
result: ssize_t,
task: Option<BlockedTask>,
}
struct WriteContext {
result: c_int,
stream: *mut StreamWatcher,
data: Option<Vec<u8>>,
}
impl StreamWatcher {
// Creates a new helper structure which should be then embedded into another
// watcher. This provides the generic read/write methods on streams.
//
// This structure will *not* close the stream when it is dropped. It is up
// to the enclosure structure to be sure to call the close method (which
// will block the task). Note that this is also required to prevent memory
// leaks.
//
// It should also be noted that the `data` field of the underlying uv handle
// will be manipulated on each of the methods called on this watcher.
// Wrappers should ensure to always reset the field to an appropriate value
// if they rely on the field to perform an action.
pub fn new(stream: *mut uvll::uv_stream_t,
init: bool) -> StreamWatcher {
if init {
unsafe { uvll::set_data_for_uv_handle(stream, 0 as *mut int) }
}
StreamWatcher {
handle: stream,
last_write_req: None,
blocked_writer: None,
}
}
pub fn read(&mut self, buf: &mut [u8]) -> Result<uint, UvError> {
// This read operation needs to get canceled on an unwind via libuv's
// uv_read_stop function
let _f = ForbidUnwind::new("stream read");
let mut rcx = ReadContext {
buf: Some(slice_to_uv_buf(buf)),
// if the read is canceled, we'll see eof, otherwise this will get
// overwritten
result: 0,
task: None,
};
// When reading a TTY stream on windows, libuv will invoke alloc_cb
// immediately as part of the call to alloc_cb. What this means is that
// we must be ready for this to happen (by setting the data in the uv
// handle). In theory this otherwise doesn't need to happen until after
// the read is successfully started.
unsafe { uvll::set_data_for_uv_handle(self.handle, &mut rcx) }
// Send off the read request, but don't block until we're sure that the
// read request is queued.
let ret = match unsafe {
uvll::uv_read_start(self.handle, alloc_cb, read_cb)
} {
0 => {
let loop_ = unsafe { uvll::get_loop_for_uv_handle(self.handle) };
wait_until_woken_after(&mut rcx.task, &Loop::wrap(loop_), || {});
match rcx.result {
n if n < 0 => Err(UvError(n as c_int)),
n => Ok(n as uint),
}
}
n => Err(UvError(n))
};
// Make sure a read cancellation sees that there's no pending read
unsafe { uvll::set_data_for_uv_handle(self.handle, 0 as *mut int) }
return ret;
}
pub fn cancel_read(&mut self, reason: ssize_t) -> Option<BlockedTask> {
// When we invoke uv_read_stop, it cancels the read and alloc
// callbacks. We need to manually wake up a pending task (if one was
// present).
assert_eq!(unsafe { uvll::uv_read_stop(self.handle) }, 0);
let data = unsafe {
let data = uvll::get_data_for_uv_handle(self.handle);
if data.is_null() { return None }
uvll::set_data_for_uv_handle(self.handle, 0 as *mut int);
&mut *(data as *mut ReadContext)
};
data.result = reason;
data.task.take()
}
pub fn write(&mut self, buf: &[u8], may_timeout: bool) -> Result<(), UvError> {
// The ownership of the write request is dubious if this function
// unwinds. I believe that if the write_cb fails to re-schedule the task
// then the write request will be leaked.
let _f = ForbidUnwind::new("stream write");
// Prepare the write request, either using a cached one or allocating a
// new one
let mut req = match self.last_write_req.take() {
Some(req) => req, None => Request::new(uvll::UV_WRITE),
};
req.set_data(ptr::null_mut::<()>());
// And here's where timeouts get a little interesting. Currently, libuv
// does not support canceling an in-flight write request. Consequently,
// when a write timeout expires, there's not much we can do other than
// detach the sleeping task from the write request itself. Semantically,
// this means that the write request will complete asynchronously, but
// the calling task will return error (because the write timed out).
//
// There is special wording in the documentation of set_write_timeout()
// indicating that this is a plausible failure scenario, and this
// function is why that wording exists.
//
// Implementation-wise, we must be careful when passing a buffer down to
// libuv. Most of this implementation avoids allocations because of the
// blocking guarantee (all stack local variables are valid for the
// entire read/write request). If our write request can be timed out,
// however, we must heap allocate the data and pass that to the libuv
// functions instead. The reason for this is that if we time out and
// return, there's no guarantee that `buf` is a valid buffer any more.
//
// To do this, the write context has an optionally owned vector of
// bytes.
let data = if may_timeout {Some(buf.to_vec())} else {None};
let uv_buf = if may_timeout {
slice_to_uv_buf(data.as_ref().unwrap().as_slice())
} else {
slice_to_uv_buf(buf)
};
// Send off the request, but be careful to not block until we're sure
// that the write request is queued. If the request couldn't be queued,
// then we should return immediately with an error.
match unsafe {
uvll::uv_write(req.handle, self.handle, [uv_buf], write_cb)
} {
0 => {
let mut wcx = WriteContext {
result: uvll::ECANCELED,
stream: self as *mut _,
data: data,
};
req.defuse(); // uv callback now owns this request
let loop_ = unsafe { uvll::get_loop_for_uv_handle(self.handle) };
wait_until_woken_after(&mut self.blocked_writer,
&Loop::wrap(loop_), || {
req.set_data(&mut wcx);
});
if wcx.result != uvll::ECANCELED {
self.last_write_req = Some(Request::wrap(req.handle));
return match wcx.result {
0 => Ok(()),
n => Err(UvError(n)),
}
}
// This is the second case where canceling an in-flight write
// gets interesting. If we've been canceled (no one reset our
// result), then someone still needs to free the request, and
// someone still needs to free the allocate buffer.
//
// To take care of this, we swap out the stack-allocated write
// context for a heap-allocated context, transferring ownership
// of everything to the write_cb. Libuv guarantees that this
// callback will be invoked at some point, and the callback will
// be responsible for deallocating these resources.
//
// Note that we don't cache this write request back in the
// stream watcher because we no longer have ownership of it, and
// we never will.
let mut new_wcx = box WriteContext {
result: 0,
stream: 0 as *mut StreamWatcher,
data: wcx.data.take(),
};
unsafe {
req.set_data(&mut *new_wcx);
mem::forget(new_wcx);
}
Err(UvError(wcx.result))
}
n => Err(UvError(n)),
}
}
pub fn cancel_write(&mut self) -> Option<BlockedTask> {
self.blocked_writer.take()
}
}
// This allocation callback expects to be invoked once and only once. It will
// unwrap the buffer in the ReadContext stored in the stream and return it. This
// will fail if it is called more than once.
extern fn alloc_cb(stream: *mut uvll::uv_stream_t, _hint: size_t, buf: *mut Buf) {
uvdebug!("alloc_cb");
unsafe {
let rcx: &mut ReadContext =
mem::transmute(uvll::get_data_for_uv_handle(stream));
*buf = rcx.buf.take().expect("stream alloc_cb called more than once");
}
}
// When a stream has read some data, we will always forcibly stop reading and
// return all the data read (even if it didn't fill the whole buffer).
extern fn read_cb(handle: *mut uvll::uv_stream_t, nread: ssize_t,
_buf: *const Buf) {
uvdebug!("read_cb {}", nread);
assert!(nread != uvll::ECANCELED as ssize_t);
let rcx: &mut ReadContext = unsafe {
mem::transmute(uvll::get_data_for_uv_handle(handle))
};
// Stop reading so that no read callbacks are
// triggered before the user calls `read` again.
// FIXME: Is there a performance impact to calling
// stop here?
unsafe { assert_eq!(uvll::uv_read_stop(handle), 0); }
rcx.result = nread;
wakeup(&mut rcx.task);
}
// Unlike reading, the WriteContext is stored in the uv_write_t request. Like
// reading, however, all this does is wake up the blocked task after squirreling
// away the error code as a result.
extern fn write_cb(req: *mut uvll::uv_write_t, status: c_int) {
let mut req = Request::wrap(req);
// Remember to not free the request because it is re-used between writes on
// the same stream.
let wcx: &mut WriteContext = unsafe { req.get_data() };
wcx.result = status;
// If the stream is present, we haven't timed out, otherwise we acquire
// ownership of everything and then deallocate it all at once.
if wcx.stream as uint != 0 {
req.defuse();
let stream: &mut StreamWatcher = unsafe { &mut *wcx.stream };
wakeup(&mut stream.blocked_writer);
} else {
let _wcx: Box<WriteContext> = unsafe { mem::transmute(wcx) };
}
}

View File

@ -1,411 +0,0 @@
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use libc::c_int;
use std::mem;
use std::rt::task::BlockedTask;
use std::rt::rtio::IoResult;
use access;
use homing::{HomeHandle, HomingMissile};
use timer::TimerWatcher;
use uvll;
use uvio::UvIoFactory;
use {Loop, UvError, uv_error_to_io_error, Request, wakeup};
use {UvHandle, wait_until_woken_after};
/// Management of a timeout when gaining access to a portion of a duplex stream.
pub struct AccessTimeout<T> {
state: TimeoutState,
timer: Option<Box<TimerWatcher>>,
pub access: access::Access<T>,
}
pub struct Guard<'a, T:'static> {
state: &'a mut TimeoutState,
pub access: access::Guard<'a, T>,
pub can_timeout: bool,
}
#[deriving(PartialEq)]
enum TimeoutState {
NoTimeout,
TimeoutPending(ClientState),
TimedOut,
}
#[deriving(PartialEq)]
enum ClientState {
NoWaiter,
AccessPending,
RequestPending,
}
struct TimerContext {
timeout: *mut AccessTimeout<()>,
callback: fn(*mut AccessTimeout<()>, &TimerContext),
user_unblock: fn(uint) -> Option<BlockedTask>,
user_payload: uint,
}
impl<T: Send> AccessTimeout<T> {
pub fn new(data: T) -> AccessTimeout<T> {
AccessTimeout {
state: NoTimeout,
timer: None,
access: access::Access::new(data),
}
}
/// Grants access to half of a duplex stream, timing out if necessary.
///
/// On success, Ok(Guard) is returned and access has been granted to the
/// stream. If a timeout occurs, then Err is returned with an appropriate
/// error.
pub fn grant<'a>(&'a mut self, m: HomingMissile) -> IoResult<Guard<'a, T>> {
// First, flag that we're attempting to acquire access. This will allow
// us to cancel the pending grant if we timeout out while waiting for a
// grant.
match self.state {
NoTimeout => {},
TimeoutPending(ref mut client) => *client = AccessPending,
TimedOut => return Err(uv_error_to_io_error(UvError(uvll::ECANCELED)))
}
let access = self.access.grant(self as *mut _ as uint, m);
// After acquiring the grant, we need to flag ourselves as having a
// pending request so the timeout knows to cancel the request.
let can_timeout = match self.state {
NoTimeout => false,
TimeoutPending(ref mut client) => { *client = RequestPending; true }
TimedOut => return Err(uv_error_to_io_error(UvError(uvll::ECANCELED)))
};
Ok(Guard {
access: access,
state: &mut self.state,
can_timeout: can_timeout
})
}
pub fn timed_out(&self) -> bool {
match self.state {
TimedOut => true,
_ => false,
}
}
/// Sets the pending timeout to the value specified.
///
/// The home/loop variables are used to construct a timer if one has not
/// been previously constructed.
///
/// The callback will be invoked if the timeout elapses, and the data of
/// the time will be set to `data`.
pub fn set_timeout(&mut self, ms: Option<u64>,
home: &HomeHandle,
loop_: &Loop,
cb: fn(uint) -> Option<BlockedTask>,
data: uint) {
self.state = NoTimeout;
let ms = match ms {
Some(ms) => ms,
None => return match self.timer {
Some(ref mut t) => t.stop(),
None => {}
}
};
// If we have a timeout, lazily initialize the timer which will be used
// to fire when the timeout runs out.
if self.timer.is_none() {
let mut timer = box TimerWatcher::new_home(loop_, home.clone());
let mut cx = box TimerContext {
timeout: self as *mut _ as *mut AccessTimeout<()>,
callback: real_cb::<T>,
user_unblock: cb,
user_payload: data,
};
unsafe {
timer.set_data(&mut *cx);
mem::forget(cx);
}
self.timer = Some(timer);
}
let timer = self.timer.as_mut().unwrap();
unsafe {
let cx = uvll::get_data_for_uv_handle(timer.handle);
let cx = cx as *mut TimerContext;
(*cx).user_unblock = cb;
(*cx).user_payload = data;
}
timer.stop();
timer.start(timer_cb, ms, 0);
self.state = TimeoutPending(NoWaiter);
extern fn timer_cb(timer: *mut uvll::uv_timer_t) {
let cx: &TimerContext = unsafe {
&*(uvll::get_data_for_uv_handle(timer) as *const TimerContext)
};
(cx.callback)(cx.timeout, cx);
}
fn real_cb<T: Send>(timeout: *mut AccessTimeout<()>, cx: &TimerContext) {
let timeout = timeout as *mut AccessTimeout<T>;
let me = unsafe { &mut *timeout };
match mem::replace(&mut me.state, TimedOut) {
TimedOut | NoTimeout => unreachable!(),
TimeoutPending(NoWaiter) => {}
TimeoutPending(AccessPending) => {
match unsafe { me.access.dequeue(me as *mut _ as uint) } {
Some(task) => task.reawaken(),
None => unreachable!(),
}
}
TimeoutPending(RequestPending) => {
match (cx.user_unblock)(cx.user_payload) {
Some(task) => task.reawaken(),
None => unreachable!(),
}
}
}
}
}
}
impl<T: Send> Clone for AccessTimeout<T> {
fn clone(&self) -> AccessTimeout<T> {
AccessTimeout {
access: self.access.clone(),
state: NoTimeout,
timer: None,
}
}
}
#[unsafe_destructor]
impl<'a, T> Drop for Guard<'a, T> {
fn drop(&mut self) {
match *self.state {
TimeoutPending(NoWaiter) | TimeoutPending(AccessPending) =>
unreachable!(),
NoTimeout | TimedOut => {}
TimeoutPending(RequestPending) => {
*self.state = TimeoutPending(NoWaiter);
}
}
}
}
#[unsafe_destructor]
impl<T> Drop for AccessTimeout<T> {
fn drop(&mut self) {
match self.timer {
Some(ref timer) => unsafe {
let data = uvll::get_data_for_uv_handle(timer.handle);
let _data: Box<TimerContext> = mem::transmute(data);
},
None => {}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Connect timeouts
////////////////////////////////////////////////////////////////////////////////
pub struct ConnectCtx {
pub status: c_int,
pub task: Option<BlockedTask>,
pub timer: Option<Box<TimerWatcher>>,
}
impl ConnectCtx {
pub fn connect<T>(
mut self, obj: T, timeout: Option<u64>, io: &mut UvIoFactory,
f: |&Request, &T, uvll::uv_connect_cb| -> c_int
) -> Result<T, UvError> {
let mut req = Request::new(uvll::UV_CONNECT);
let r = f(&req, &obj, connect_cb);
return match r {
0 => {
req.defuse(); // uv callback now owns this request
match timeout {
Some(t) => {
let mut timer = TimerWatcher::new(io);
timer.start(timer_cb, t, 0);
self.timer = Some(timer);
}
None => {}
}
wait_until_woken_after(&mut self.task, &io.loop_, || {
let data = &self as *const _ as *mut ConnectCtx;
match self.timer {
Some(ref mut timer) => unsafe { timer.set_data(data) },
None => {}
}
req.set_data(data);
});
// Make sure an erroneously fired callback doesn't have access
// to the context any more.
req.set_data(0 as *mut int);
// If we failed because of a timeout, drop the TcpWatcher as
// soon as possible because it's data is now set to null and we
// want to cancel the callback ASAP.
match self.status {
0 => Ok(obj),
n => { drop(obj); Err(UvError(n)) }
}
}
n => Err(UvError(n))
};
extern fn timer_cb(handle: *mut uvll::uv_timer_t) {
// Don't close the corresponding tcp request, just wake up the task
// and let RAII take care of the pending watcher.
let cx: &mut ConnectCtx = unsafe {
&mut *(uvll::get_data_for_uv_handle(handle) as *mut ConnectCtx)
};
cx.status = uvll::ECANCELED;
wakeup(&mut cx.task);
}
extern fn connect_cb(req: *mut uvll::uv_connect_t, status: c_int) {
// This callback can be invoked with ECANCELED if the watcher is
// closed by the timeout callback. In that case we just want to free
// the request and be along our merry way.
let req = Request::wrap(req);
if status == uvll::ECANCELED { return }
// Apparently on windows when the handle is closed this callback may
// not be invoked with ECANCELED but rather another error code.
// Either ways, if the data is null, then our timeout has expired
// and there's nothing we can do.
let data = unsafe { uvll::get_data_for_req(req.handle) };
if data.is_null() { return }
let cx: &mut ConnectCtx = unsafe { &mut *(data as *mut ConnectCtx) };
cx.status = status;
match cx.timer {
Some(ref mut t) => t.stop(),
None => {}
}
// Note that the timer callback doesn't cancel the connect request
// (that's the job of uv_close()), so it's possible for this
// callback to get triggered after the timeout callback fires, but
// before the task wakes up. In that case, we did indeed
// successfully connect, but we don't need to wake someone up. We
// updated the status above (correctly so), and the task will pick
// up on this when it wakes up.
if cx.task.is_some() {
wakeup(&mut cx.task);
}
}
}
}
pub struct AcceptTimeout<T> {
access: AccessTimeout<AcceptorState<T>>,
}
struct AcceptorState<T> {
blocked_acceptor: Option<BlockedTask>,
pending: Vec<IoResult<T>>,
}
impl<T: Send> AcceptTimeout<T> {
pub fn new() -> AcceptTimeout<T> {
AcceptTimeout {
access: AccessTimeout::new(AcceptorState {
blocked_acceptor: None,
pending: Vec::new(),
})
}
}
pub fn accept(&mut self,
missile: HomingMissile,
loop_: &Loop) -> IoResult<T> {
// If we've timed out but we're not closed yet, poll the state of the
// queue to see if we can peel off a connection.
if self.access.timed_out() && !self.access.access.is_closed(&missile) {
let tmp = self.access.access.get_mut(&missile);
return match tmp.pending.remove(0) {
Some(msg) => msg,
None => Err(uv_error_to_io_error(UvError(uvll::ECANCELED)))
}
}
// Now that we're not polling, attempt to gain access and then peel off
// a connection. If we have no pending connections, then we need to go
// to sleep and wait for one.
//
// Note that if we're woken up for a pending connection then we're
// guaranteed that the check above will not steal our connection due to
// the single-threaded nature of the event loop.
let mut guard = try!(self.access.grant(missile));
if guard.access.is_closed() {
return Err(uv_error_to_io_error(UvError(uvll::EOF)))
}
match guard.access.pending.remove(0) {
Some(msg) => return msg,
None => {}
}
wait_until_woken_after(&mut guard.access.blocked_acceptor, loop_, || {});
match guard.access.pending.remove(0) {
_ if guard.access.is_closed() => {
Err(uv_error_to_io_error(UvError(uvll::EOF)))
}
Some(msg) => msg,
None => Err(uv_error_to_io_error(UvError(uvll::ECANCELED)))
}
}
pub unsafe fn push(&mut self, t: IoResult<T>) {
let state = self.access.access.unsafe_get();
(*state).pending.push(t);
let _ = (*state).blocked_acceptor.take().map(|t| t.reawaken());
}
pub fn set_timeout(&mut self,
ms: Option<u64>,
loop_: &Loop,
home: &HomeHandle) {
self.access.set_timeout(ms, home, loop_, cancel_accept::<T>,
self as *mut _ as uint);
fn cancel_accept<T: Send>(me: uint) -> Option<BlockedTask> {
unsafe {
let me: &mut AcceptTimeout<T> = mem::transmute(me);
(*me.access.access.unsafe_get()).blocked_acceptor.take()
}
}
}
pub fn close(&mut self, m: HomingMissile) {
self.access.access.close(&m);
let task = self.access.access.get_mut(&m).blocked_acceptor.take();
drop(m);
let _ = task.map(|t| t.reawaken());
}
}
impl<T: Send> Clone for AcceptTimeout<T> {
fn clone(&self) -> AcceptTimeout<T> {
AcceptTimeout { access: self.access.clone() }
}
}

View File

@ -1,173 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::mem;
use std::rt::rtio::{RtioTimer, Callback};
use std::rt::task::BlockedTask;
use homing::{HomeHandle, HomingIO};
use super::{UvHandle, ForbidUnwind, ForbidSwitch, wait_until_woken_after, Loop};
use uvio::UvIoFactory;
use uvll;
pub struct TimerWatcher {
pub handle: *mut uvll::uv_timer_t,
home: HomeHandle,
action: Option<NextAction>,
blocker: Option<BlockedTask>,
id: uint, // see comments in timer_cb
}
pub enum NextAction {
WakeTask,
CallOnce(Box<Callback + Send>),
CallMany(Box<Callback + Send>, uint),
}
impl TimerWatcher {
pub fn new(io: &mut UvIoFactory) -> Box<TimerWatcher> {
let handle = io.make_handle();
let me = box TimerWatcher::new_home(&io.loop_, handle);
me.install()
}
pub fn new_home(loop_: &Loop, home: HomeHandle) -> TimerWatcher {
let handle = UvHandle::alloc(None::<TimerWatcher>, uvll::UV_TIMER);
assert_eq!(unsafe { uvll::uv_timer_init(loop_.handle, handle) }, 0);
TimerWatcher {
handle: handle,
action: None,
blocker: None,
home: home,
id: 0,
}
}
pub fn start(&mut self, f: uvll::uv_timer_cb, msecs: u64, period: u64) {
assert_eq!(unsafe {
uvll::uv_timer_start(self.handle, f, msecs, period)
}, 0)
}
pub fn stop(&mut self) {
assert_eq!(unsafe { uvll::uv_timer_stop(self.handle) }, 0)
}
pub unsafe fn set_data<T>(&mut self, data: *mut T) {
uvll::set_data_for_uv_handle(self.handle, data);
}
}
impl HomingIO for TimerWatcher {
fn home<'r>(&'r mut self) -> &'r mut HomeHandle { &mut self.home }
}
impl UvHandle<uvll::uv_timer_t> for TimerWatcher {
fn uv_handle(&self) -> *mut uvll::uv_timer_t { self.handle }
}
impl RtioTimer for TimerWatcher {
fn sleep(&mut self, msecs: u64) {
// As with all of the below functions, we must be extra careful when
// destroying the previous action. If the previous action was a channel,
// destroying it could invoke a context switch. For these situations,
// we must temporarily un-home ourselves, then destroy the action, and
// then re-home again.
let missile = self.fire_homing_missile();
self.id += 1;
self.stop();
let _missile = match mem::replace(&mut self.action, None) {
None => missile, // no need to do a homing dance
Some(action) => {
drop(missile); // un-home ourself
drop(action); // destroy the previous action
self.fire_homing_missile() // re-home ourself
}
};
// If the descheduling operation unwinds after the timer has been
// started, then we need to call stop on the timer.
let _f = ForbidUnwind::new("timer");
self.action = Some(WakeTask);
wait_until_woken_after(&mut self.blocker, &self.uv_loop(), || {
self.start(timer_cb, msecs, 0);
});
self.stop();
}
fn oneshot(&mut self, msecs: u64, cb: Box<Callback + Send>) {
// similarly to the destructor, we must drop the previous action outside
// of the homing missile
let _prev_action = {
let _m = self.fire_homing_missile();
self.id += 1;
self.stop();
self.start(timer_cb, msecs, 0);
mem::replace(&mut self.action, Some(CallOnce(cb)))
};
}
fn period(&mut self, msecs: u64, cb: Box<Callback + Send>) {
// similarly to the destructor, we must drop the previous action outside
// of the homing missile
let _prev_action = {
let _m = self.fire_homing_missile();
self.id += 1;
self.stop();
self.start(timer_cb, msecs, msecs);
mem::replace(&mut self.action, Some(CallMany(cb, self.id)))
};
}
}
extern fn timer_cb(handle: *mut uvll::uv_timer_t) {
let _f = ForbidSwitch::new("timer callback can't switch");
let timer: &mut TimerWatcher = unsafe { UvHandle::from_uv_handle(&handle) };
match timer.action.take().unwrap() {
WakeTask => {
let task = timer.blocker.take().unwrap();
let _ = task.wake().map(|t| t.reawaken());
}
CallOnce(mut cb) => { cb.call() }
CallMany(mut cb, id) => {
cb.call();
// Note that the above operation could have performed some form of
// scheduling. This means that the timer may have decided to insert
// some other action to happen. This 'id' keeps track of the updates
// to the timer, so we only reset the action back to sending on this
// channel if the id has remained the same. This is essentially a
// bug in that we have mutably aliasable memory, but that's libuv
// for you. We're guaranteed to all be running on the same thread,
// so there's no need for any synchronization here.
if timer.id == id {
timer.action = Some(CallMany(cb, id));
}
}
}
}
impl Drop for TimerWatcher {
fn drop(&mut self) {
// note that this drop is a little subtle. Dropping a channel which is
// held internally may invoke some scheduling operations. We can't take
// the channel unless we're on the home scheduler, but once we're on the
// home scheduler we should never move. Hence, we take the timer's
// action item and then move it outside of the homing block.
let _action = {
let _m = self.fire_homing_missile();
self.stop();
self.close();
self.action.take()
};
}
}

View File

@ -1,136 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use libc;
use std::ptr;
use std::rt::rtio::{RtioTTY, IoResult};
use homing::{HomingIO, HomeHandle};
use stream::StreamWatcher;
use super::{UvError, UvHandle, uv_error_to_io_error};
use uvio::UvIoFactory;
use uvll;
pub struct TtyWatcher{
tty: *mut uvll::uv_tty_t,
stream: StreamWatcher,
home: HomeHandle,
fd: libc::c_int,
}
impl TtyWatcher {
pub fn new(io: &mut UvIoFactory, fd: libc::c_int, readable: bool)
-> Result<TtyWatcher, UvError>
{
// libuv may succeed in giving us a handle (via uv_tty_init), but if the
// handle isn't actually connected to a terminal there are frequently
// many problems in using it with libuv. To get around this, always
// return a failure if the specified file descriptor isn't actually a
// TTY.
//
// Related:
// - https://github.com/joyent/libuv/issues/982
// - https://github.com/joyent/libuv/issues/988
let guess = unsafe { uvll::guess_handle(fd) };
if guess != uvll::UV_TTY as libc::c_int {
return Err(UvError(uvll::EBADF));
}
// libuv was recently changed to not close the stdio file descriptors,
// but it did not change the behavior for windows. Until this issue is
// fixed, we need to dup the stdio file descriptors because otherwise
// uv_close will close them
let fd = if cfg!(windows) && fd <= libc::STDERR_FILENO {
unsafe { libc::dup(fd) }
} else { fd };
// If this file descriptor is indeed guessed to be a tty, then go ahead
// with attempting to open it as a tty.
let handle = UvHandle::alloc(None::<TtyWatcher>, uvll::UV_TTY);
let mut watcher = TtyWatcher {
tty: handle,
stream: StreamWatcher::new(handle, true),
home: io.make_handle(),
fd: fd,
};
match unsafe {
uvll::uv_tty_init(io.uv_loop(), handle, fd as libc::c_int,
readable as libc::c_int)
} {
0 => Ok(watcher),
n => {
// On windows, libuv returns errors before initializing the
// handle, so our only cleanup is to free the handle itself
if cfg!(windows) {
unsafe { uvll::free_handle(handle); }
watcher.tty = ptr::null_mut();
}
Err(UvError(n))
}
}
}
}
impl RtioTTY for TtyWatcher {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> {
let _m = self.fire_homing_missile();
self.stream.read(buf).map_err(uv_error_to_io_error)
}
fn write(&mut self, buf: &[u8]) -> IoResult<()> {
let _m = self.fire_homing_missile();
self.stream.write(buf, false).map_err(uv_error_to_io_error)
}
fn set_raw(&mut self, raw: bool) -> IoResult<()> {
let raw = raw as libc::c_int;
let _m = self.fire_homing_missile();
match unsafe { uvll::uv_tty_set_mode(self.tty, raw) } {
0 => Ok(()),
n => Err(uv_error_to_io_error(UvError(n)))
}
}
#[allow(unused_mut)]
fn get_winsize(&mut self) -> IoResult<(int, int)> {
let mut width: libc::c_int = 0;
let mut height: libc::c_int = 0;
let widthptr: *mut libc::c_int = &mut width;
let heightptr: *mut libc::c_int = &mut width;
let _m = self.fire_homing_missile();
match unsafe { uvll::uv_tty_get_winsize(self.tty,
widthptr, heightptr) } {
0 => Ok((width as int, height as int)),
n => Err(uv_error_to_io_error(UvError(n)))
}
}
fn isatty(&self) -> bool {
unsafe { uvll::guess_handle(self.fd) == uvll::UV_TTY as libc::c_int }
}
}
impl UvHandle<uvll::uv_tty_t> for TtyWatcher {
fn uv_handle(&self) -> *mut uvll::uv_tty_t { self.tty }
}
impl HomingIO for TtyWatcher {
fn home<'a>(&'a mut self) -> &'a mut HomeHandle { &mut self.home }
}
impl Drop for TtyWatcher {
fn drop(&mut self) {
if !self.tty.is_null() {
let _m = self.fire_homing_missile();
self.close_async_();
}
}
}

View File

@ -1,326 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The implementation of `rtio` for libuv
use std::c_str::CString;
use std::mem;
use libc::c_int;
use libc::{O_CREAT, O_APPEND, O_TRUNC, O_RDWR, O_RDONLY, O_WRONLY, S_IRUSR,
S_IWUSR};
use libc;
use std::rt::rtio;
use std::rt::rtio::{ProcessConfig, IoFactory, EventLoop, IoResult};
#[cfg(test)] use std::rt::thread::Thread;
use super::{uv_error_to_io_error, Loop};
use addrinfo::GetAddrInfoRequest;
use async::AsyncWatcher;
use file::{FsRequest, FileWatcher};
use queue::QueuePool;
use homing::HomeHandle;
use idle::IdleWatcher;
use net::{TcpWatcher, TcpListener, UdpWatcher};
use pipe::{PipeWatcher, PipeListener};
use process::Process;
use signal::SignalWatcher;
use timer::TimerWatcher;
use tty::TtyWatcher;
use uvll;
// Obviously an Event Loop is always home.
pub struct UvEventLoop {
uvio: UvIoFactory
}
impl UvEventLoop {
pub fn new() -> UvEventLoop {
let mut loop_ = Loop::new();
let handle_pool = QueuePool::new(&mut loop_);
UvEventLoop {
uvio: UvIoFactory {
loop_: loop_,
handle_pool: Some(handle_pool),
}
}
}
}
impl Drop for UvEventLoop {
fn drop(&mut self) {
// Must first destroy the pool of handles before we destroy the loop
// because otherwise the contained async handle will be destroyed after
// the loop is free'd (use-after-free). We also must free the uv handle
// after the loop has been closed because during the closing of the loop
// the handle is required to be used apparently.
//
// Lastly, after we've closed the pool of handles we pump the event loop
// one last time to run any closing callbacks to make sure the loop
// shuts down cleanly.
let handle = self.uvio.handle_pool.as_ref().unwrap().handle();
drop(self.uvio.handle_pool.take());
self.run();
self.uvio.loop_.close();
unsafe { uvll::free_handle(handle) }
}
}
impl EventLoop for UvEventLoop {
fn run(&mut self) {
self.uvio.loop_.run();
}
fn callback(&mut self, f: proc()) {
IdleWatcher::onetime(&mut self.uvio.loop_, f);
}
fn pausable_idle_callback(&mut self, cb: Box<rtio::Callback + Send>)
-> Box<rtio::PausableIdleCallback + Send> {
IdleWatcher::new(&mut self.uvio.loop_, cb)
as Box<rtio::PausableIdleCallback + Send>
}
fn remote_callback(&mut self, f: Box<rtio::Callback + Send>)
-> Box<rtio::RemoteCallback + Send> {
box AsyncWatcher::new(&mut self.uvio.loop_, f) as
Box<rtio::RemoteCallback + Send>
}
fn io<'a>(&'a mut self) -> Option<&'a mut rtio::IoFactory> {
let factory = &mut self.uvio as &mut rtio::IoFactory;
Some(factory)
}
fn has_active_io(&self) -> bool {
self.uvio.loop_.get_blockers() > 0
}
}
#[test]
fn test_callback_run_once() {
Thread::start(proc() {
let mut event_loop = UvEventLoop::new();
let mut count = 0;
let count_ptr: *mut int = &mut count;
event_loop.callback(proc() {
unsafe { *count_ptr += 1 }
});
event_loop.run();
assert_eq!(count, 1);
}).join();
}
pub struct UvIoFactory {
pub loop_: Loop,
handle_pool: Option<Box<QueuePool>>,
}
impl UvIoFactory {
pub fn uv_loop<'a>(&mut self) -> *mut uvll::uv_loop_t { self.loop_.handle }
pub fn make_handle(&mut self) -> HomeHandle {
// It's understood by the homing code that the "local id" is just the
// pointer of the local I/O factory cast to a uint.
let id: uint = unsafe { mem::transmute_copy(&self) };
HomeHandle::new(id, &mut **self.handle_pool.as_mut().unwrap())
}
}
impl IoFactory for UvIoFactory {
// Connect to an address and return a new stream
// NB: This blocks the task waiting on the connection.
// It would probably be better to return a future
fn tcp_connect(&mut self, addr: rtio::SocketAddr, timeout: Option<u64>)
-> IoResult<Box<rtio::RtioTcpStream + Send>> {
match TcpWatcher::connect(self, addr, timeout) {
Ok(t) => Ok(box t as Box<rtio::RtioTcpStream + Send>),
Err(e) => Err(uv_error_to_io_error(e)),
}
}
fn tcp_bind(&mut self, addr: rtio::SocketAddr)
-> IoResult<Box<rtio::RtioTcpListener + Send>> {
match TcpListener::bind(self, addr) {
Ok(t) => Ok(t as Box<rtio::RtioTcpListener + Send>),
Err(e) => Err(uv_error_to_io_error(e)),
}
}
fn udp_bind(&mut self, addr: rtio::SocketAddr)
-> IoResult<Box<rtio::RtioUdpSocket + Send>> {
match UdpWatcher::bind(self, addr) {
Ok(u) => Ok(box u as Box<rtio::RtioUdpSocket + Send>),
Err(e) => Err(uv_error_to_io_error(e)),
}
}
fn timer_init(&mut self) -> IoResult<Box<rtio::RtioTimer + Send>> {
Ok(TimerWatcher::new(self) as Box<rtio::RtioTimer + Send>)
}
fn get_host_addresses(&mut self, host: Option<&str>, servname: Option<&str>,
hint: Option<rtio::AddrinfoHint>)
-> IoResult<Vec<rtio::AddrinfoInfo>>
{
let r = GetAddrInfoRequest::run(&self.loop_, host, servname, hint);
r.map_err(uv_error_to_io_error)
}
fn fs_from_raw_fd(&mut self, fd: c_int, close: rtio::CloseBehavior)
-> Box<rtio::RtioFileStream + Send> {
box FileWatcher::new(self, fd, close) as
Box<rtio::RtioFileStream + Send>
}
fn fs_open(&mut self, path: &CString, fm: rtio::FileMode,
fa: rtio::FileAccess)
-> IoResult<Box<rtio::RtioFileStream + Send>>
{
let flags = match fm {
rtio::Open => 0,
rtio::Append => libc::O_APPEND,
rtio::Truncate => libc::O_TRUNC,
};
// Opening with a write permission must silently create the file.
let (flags, mode) = match fa {
rtio::Read => (flags | libc::O_RDONLY, 0),
rtio::Write => (flags | libc::O_WRONLY | libc::O_CREAT,
libc::S_IRUSR | libc::S_IWUSR),
rtio::ReadWrite => (flags | libc::O_RDWR | libc::O_CREAT,
libc::S_IRUSR | libc::S_IWUSR),
};
match FsRequest::open(self, path, flags as int, mode as int) {
Ok(fs) => Ok(box fs as Box<rtio::RtioFileStream + Send>),
Err(e) => Err(uv_error_to_io_error(e))
}
}
fn fs_unlink(&mut self, path: &CString) -> IoResult<()> {
let r = FsRequest::unlink(&self.loop_, path);
r.map_err(uv_error_to_io_error)
}
fn fs_lstat(&mut self, path: &CString) -> IoResult<rtio::FileStat> {
let r = FsRequest::lstat(&self.loop_, path);
r.map_err(uv_error_to_io_error)
}
fn fs_stat(&mut self, path: &CString) -> IoResult<rtio::FileStat> {
let r = FsRequest::stat(&self.loop_, path);
r.map_err(uv_error_to_io_error)
}
fn fs_mkdir(&mut self, path: &CString, perm: uint) -> IoResult<()> {
let r = FsRequest::mkdir(&self.loop_, path, perm as c_int);
r.map_err(uv_error_to_io_error)
}
fn fs_rmdir(&mut self, path: &CString) -> IoResult<()> {
let r = FsRequest::rmdir(&self.loop_, path);
r.map_err(uv_error_to_io_error)
}
fn fs_rename(&mut self, path: &CString, to: &CString) -> IoResult<()> {
let r = FsRequest::rename(&self.loop_, path, to);
r.map_err(uv_error_to_io_error)
}
fn fs_chmod(&mut self, path: &CString, perm: uint) -> IoResult<()> {
let r = FsRequest::chmod(&self.loop_, path, perm as c_int);
r.map_err(uv_error_to_io_error)
}
fn fs_readdir(&mut self, path: &CString, flags: c_int)
-> IoResult<Vec<CString>>
{
let r = FsRequest::readdir(&self.loop_, path, flags);
r.map_err(uv_error_to_io_error)
}
fn fs_link(&mut self, src: &CString, dst: &CString) -> IoResult<()> {
let r = FsRequest::link(&self.loop_, src, dst);
r.map_err(uv_error_to_io_error)
}
fn fs_symlink(&mut self, src: &CString, dst: &CString) -> IoResult<()> {
let r = FsRequest::symlink(&self.loop_, src, dst);
r.map_err(uv_error_to_io_error)
}
fn fs_chown(&mut self, path: &CString, uid: int, gid: int) -> IoResult<()> {
let r = FsRequest::chown(&self.loop_, path, uid, gid);
r.map_err(uv_error_to_io_error)
}
fn fs_readlink(&mut self, path: &CString) -> IoResult<CString> {
let r = FsRequest::readlink(&self.loop_, path);
r.map_err(uv_error_to_io_error)
}
fn fs_utime(&mut self, path: &CString, atime: u64, mtime: u64)
-> IoResult<()>
{
let r = FsRequest::utime(&self.loop_, path, atime, mtime);
r.map_err(uv_error_to_io_error)
}
fn spawn(&mut self, cfg: ProcessConfig)
-> IoResult<(Box<rtio::RtioProcess + Send>,
Vec<Option<Box<rtio::RtioPipe + Send>>>)>
{
match Process::spawn(self, cfg) {
Ok((p, io)) => {
Ok((p as Box<rtio::RtioProcess + Send>,
io.into_iter().map(|i| i.map(|p| {
box p as Box<rtio::RtioPipe + Send>
})).collect()))
}
Err(e) => Err(uv_error_to_io_error(e)),
}
}
fn kill(&mut self, pid: libc::pid_t, signum: int) -> IoResult<()> {
Process::kill(pid, signum).map_err(uv_error_to_io_error)
}
fn unix_bind(&mut self, path: &CString)
-> IoResult<Box<rtio::RtioUnixListener + Send>> {
match PipeListener::bind(self, path) {
Ok(p) => Ok(p as Box<rtio::RtioUnixListener + Send>),
Err(e) => Err(uv_error_to_io_error(e)),
}
}
fn unix_connect(&mut self, path: &CString, timeout: Option<u64>)
-> IoResult<Box<rtio::RtioPipe + Send>> {
match PipeWatcher::connect(self, path, timeout) {
Ok(p) => Ok(box p as Box<rtio::RtioPipe + Send>),
Err(e) => Err(uv_error_to_io_error(e)),
}
}
fn tty_open(&mut self, fd: c_int, readable: bool)
-> IoResult<Box<rtio::RtioTTY + Send>> {
match TtyWatcher::new(self, fd, readable) {
Ok(tty) => Ok(box tty as Box<rtio::RtioTTY + Send>),
Err(e) => Err(uv_error_to_io_error(e))
}
}
fn pipe_open(&mut self, fd: c_int)
-> IoResult<Box<rtio::RtioPipe + Send>>
{
match PipeWatcher::open(self, fd) {
Ok(s) => Ok(box s as Box<rtio::RtioPipe + Send>),
Err(e) => Err(uv_error_to_io_error(e))
}
}
fn signal(&mut self, signum: int, cb: Box<rtio::Callback + Send>)
-> IoResult<Box<rtio::RtioSignal + Send>>
{
match SignalWatcher::new(self, signum, cb) {
Ok(s) => Ok(s as Box<rtio::RtioSignal + Send>),
Err(e) => Err(uv_error_to_io_error(e)),
}
}
}

View File

@ -1,742 +0,0 @@
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* Low-level bindings to the libuv library.
*
* This module contains a set of direct, 'bare-metal' wrappers around
* the libuv C-API.
*
* We're not bothering yet to redefine uv's structs as Rust structs
* because they are quite large and change often between versions.
* The maintenance burden is just too high. Instead we use the uv's
* `uv_handle_size` and `uv_req_size` to find the correct size of the
* structs and allocate them on the heap. This can be revisited later.
*
* There are also a collection of helper functions to ease interacting
* with the low-level API.
*
* As new functionality, existent in uv.h, is added to the rust stdlib,
* the mappings should be added in this module.
*/
#![allow(non_camel_case_types)] // C types
use libc::{size_t, c_int, c_uint, c_void, c_char, c_double};
use libc::{ssize_t, sockaddr, free, addrinfo};
use libc;
use std::rt::libc_heap::malloc_raw;
#[cfg(test)]
use libc::uintptr_t;
pub use self::errors::{EACCES, ECONNREFUSED, ECONNRESET, EPIPE, ECONNABORTED,
ECANCELED, EBADF, ENOTCONN, ENOENT, EADDRNOTAVAIL,
EADDRINUSE, EPERM};
pub static OK: c_int = 0;
pub static EOF: c_int = -4095;
pub static UNKNOWN: c_int = -4094;
// uv-errno.h redefines error codes for windows, but not for unix...
// https://github.com/joyent/libuv/blob/master/include/uv-errno.h
#[cfg(windows)]
pub mod errors {
use libc::c_int;
pub static EACCES: c_int = -4092;
pub static ECONNREFUSED: c_int = -4078;
pub static ECONNRESET: c_int = -4077;
pub static ENOENT: c_int = -4058;
pub static ENOTCONN: c_int = -4053;
pub static EPIPE: c_int = -4047;
pub static ECONNABORTED: c_int = -4079;
pub static ECANCELED: c_int = -4081;
pub static EBADF: c_int = -4083;
pub static EADDRNOTAVAIL: c_int = -4090;
pub static EADDRINUSE: c_int = -4091;
pub static EPERM: c_int = -4048;
}
#[cfg(not(windows))]
pub mod errors {
use libc;
use libc::c_int;
pub static EACCES: c_int = -libc::EACCES;
pub static ECONNREFUSED: c_int = -libc::ECONNREFUSED;
pub static ECONNRESET: c_int = -libc::ECONNRESET;
pub static ENOENT: c_int = -libc::ENOENT;
pub static ENOTCONN: c_int = -libc::ENOTCONN;
pub static EPIPE: c_int = -libc::EPIPE;
pub static ECONNABORTED: c_int = -libc::ECONNABORTED;
pub static ECANCELED : c_int = -libc::ECANCELED;
pub static EBADF : c_int = -libc::EBADF;
pub static EADDRNOTAVAIL : c_int = -libc::EADDRNOTAVAIL;
pub static EADDRINUSE : c_int = -libc::EADDRINUSE;
pub static EPERM: c_int = -libc::EPERM;
}
pub static PROCESS_SETUID: c_int = 1 << 0;
pub static PROCESS_SETGID: c_int = 1 << 1;
pub static PROCESS_WINDOWS_VERBATIM_ARGUMENTS: c_int = 1 << 2;
pub static PROCESS_DETACHED: c_int = 1 << 3;
pub static PROCESS_WINDOWS_HIDE: c_int = 1 << 4;
pub static STDIO_IGNORE: c_int = 0x00;
pub static STDIO_CREATE_PIPE: c_int = 0x01;
pub static STDIO_INHERIT_FD: c_int = 0x02;
pub static STDIO_INHERIT_STREAM: c_int = 0x04;
pub static STDIO_READABLE_PIPE: c_int = 0x10;
pub static STDIO_WRITABLE_PIPE: c_int = 0x20;
#[cfg(unix)]
pub type uv_buf_len_t = libc::size_t;
#[cfg(windows)]
pub type uv_buf_len_t = libc::c_ulong;
// see libuv/include/uv-unix.h
#[repr(C)]
#[cfg(unix)]
pub struct uv_buf_t {
pub base: *mut u8,
pub len: uv_buf_len_t,
}
#[cfg(unix)]
pub type uv_os_socket_t = c_int;
// see libuv/include/uv-win.h
#[cfg(windows)]
#[repr(C)]
pub struct uv_buf_t {
pub len: uv_buf_len_t,
pub base: *mut u8,
}
#[cfg(windows)]
pub type uv_os_socket_t = libc::SOCKET;
#[repr(C)]
pub enum uv_run_mode {
RUN_DEFAULT = 0,
RUN_ONCE,
RUN_NOWAIT,
}
#[repr(C)]
pub enum uv_poll_event {
UV_READABLE = 1,
UV_WRITABLE = 2,
}
#[repr(C)]
pub struct uv_process_options_t {
pub exit_cb: uv_exit_cb,
pub file: *const libc::c_char,
pub args: *const *const libc::c_char,
pub env: *const *const libc::c_char,
pub cwd: *const libc::c_char,
pub flags: libc::c_uint,
pub stdio_count: libc::c_int,
pub stdio: *mut uv_stdio_container_t,
pub uid: uv_uid_t,
pub gid: uv_gid_t,
}
// These fields are private because they must be interfaced with through the
// functions below.
#[repr(C)]
pub struct uv_stdio_container_t {
flags: libc::c_int,
stream: *mut uv_stream_t,
}
pub type uv_handle_t = c_void;
pub type uv_req_t = c_void;
pub type uv_loop_t = c_void;
pub type uv_idle_t = c_void;
pub type uv_tcp_t = c_void;
pub type uv_udp_t = c_void;
pub type uv_poll_t = c_void;
pub type uv_connect_t = c_void;
pub type uv_connection_t = c_void;
pub type uv_write_t = c_void;
pub type uv_async_t = c_void;
pub type uv_timer_t = c_void;
pub type uv_stream_t = c_void;
pub type uv_fs_t = c_void;
pub type uv_udp_send_t = c_void;
pub type uv_getaddrinfo_t = c_void;
pub type uv_process_t = c_void;
pub type uv_pipe_t = c_void;
pub type uv_tty_t = c_void;
pub type uv_signal_t = c_void;
pub type uv_shutdown_t = c_void;
#[repr(C)]
pub struct uv_timespec_t {
pub tv_sec: libc::c_long,
pub tv_nsec: libc::c_long
}
#[repr(C)]
pub struct uv_stat_t {
pub st_dev: libc::uint64_t,
pub st_mode: libc::uint64_t,
pub st_nlink: libc::uint64_t,
pub st_uid: libc::uint64_t,
pub st_gid: libc::uint64_t,
pub st_rdev: libc::uint64_t,
pub st_ino: libc::uint64_t,
pub st_size: libc::uint64_t,
pub st_blksize: libc::uint64_t,
pub st_blocks: libc::uint64_t,
pub st_flags: libc::uint64_t,
pub st_gen: libc::uint64_t,
pub st_atim: uv_timespec_t,
pub st_mtim: uv_timespec_t,
pub st_ctim: uv_timespec_t,
pub st_birthtim: uv_timespec_t
}
impl uv_stat_t {
pub fn new() -> uv_stat_t {
uv_stat_t {
st_dev: 0,
st_mode: 0,
st_nlink: 0,
st_uid: 0,
st_gid: 0,
st_rdev: 0,
st_ino: 0,
st_size: 0,
st_blksize: 0,
st_blocks: 0,
st_flags: 0,
st_gen: 0,
st_atim: uv_timespec_t { tv_sec: 0, tv_nsec: 0 },
st_mtim: uv_timespec_t { tv_sec: 0, tv_nsec: 0 },
st_ctim: uv_timespec_t { tv_sec: 0, tv_nsec: 0 },
st_birthtim: uv_timespec_t { tv_sec: 0, tv_nsec: 0 }
}
}
pub fn is_file(&self) -> bool {
((self.st_mode) & libc::S_IFMT as libc::uint64_t) == libc::S_IFREG as libc::uint64_t
}
pub fn is_dir(&self) -> bool {
((self.st_mode) & libc::S_IFMT as libc::uint64_t) == libc::S_IFDIR as libc::uint64_t
}
}
pub type uv_idle_cb = extern "C" fn(handle: *mut uv_idle_t);
pub type uv_alloc_cb = extern "C" fn(stream: *mut uv_stream_t,
suggested_size: size_t,
buf: *mut uv_buf_t);
pub type uv_read_cb = extern "C" fn(stream: *mut uv_stream_t,
nread: ssize_t,
buf: *const uv_buf_t);
pub type uv_udp_send_cb = extern "C" fn(req: *mut uv_udp_send_t,
status: c_int);
pub type uv_udp_recv_cb = extern "C" fn(handle: *mut uv_udp_t,
nread: ssize_t,
buf: *const uv_buf_t,
addr: *const sockaddr,
flags: c_uint);
pub type uv_close_cb = extern "C" fn(handle: *mut uv_handle_t);
pub type uv_poll_cb = extern "C" fn(handle: *mut uv_poll_t,
status: c_int,
events: c_int);
pub type uv_walk_cb = extern "C" fn(handle: *mut uv_handle_t,
arg: *mut c_void);
pub type uv_async_cb = extern "C" fn(handle: *mut uv_async_t);
pub type uv_connect_cb = extern "C" fn(handle: *mut uv_connect_t,
status: c_int);
pub type uv_connection_cb = extern "C" fn(handle: *mut uv_connection_t,
status: c_int);
pub type uv_timer_cb = extern "C" fn(handle: *mut uv_timer_t);
pub type uv_write_cb = extern "C" fn(handle: *mut uv_write_t,
status: c_int);
pub type uv_getaddrinfo_cb = extern "C" fn(req: *mut uv_getaddrinfo_t,
status: c_int,
res: *const addrinfo);
pub type uv_exit_cb = extern "C" fn(handle: *mut uv_process_t,
exit_status: i64,
term_signal: c_int);
pub type uv_signal_cb = extern "C" fn(handle: *mut uv_signal_t,
signum: c_int);
pub type uv_fs_cb = extern "C" fn(req: *mut uv_fs_t);
pub type uv_shutdown_cb = extern "C" fn(req: *mut uv_shutdown_t, status: c_int);
#[cfg(unix)] pub type uv_uid_t = libc::types::os::arch::posix88::uid_t;
#[cfg(unix)] pub type uv_gid_t = libc::types::os::arch::posix88::gid_t;
#[cfg(windows)] pub type uv_uid_t = libc::c_uchar;
#[cfg(windows)] pub type uv_gid_t = libc::c_uchar;
#[repr(C)]
#[deriving(PartialEq)]
pub enum uv_handle_type {
UV_UNKNOWN_HANDLE,
UV_ASYNC,
UV_CHECK,
UV_FS_EVENT,
UV_FS_POLL,
UV_HANDLE,
UV_IDLE,
UV_NAMED_PIPE,
UV_POLL,
UV_PREPARE,
UV_PROCESS,
UV_STREAM,
UV_TCP,
UV_TIMER,
UV_TTY,
UV_UDP,
UV_SIGNAL,
UV_FILE,
UV_HANDLE_TYPE_MAX
}
#[repr(C)]
#[cfg(unix)]
#[deriving(PartialEq)]
pub enum uv_req_type {
UV_UNKNOWN_REQ,
UV_REQ,
UV_CONNECT,
UV_WRITE,
UV_SHUTDOWN,
UV_UDP_SEND,
UV_FS,
UV_WORK,
UV_GETADDRINFO,
UV_GETNAMEINFO,
UV_REQ_TYPE_MAX
}
// uv_req_type may have additional fields defined by UV_REQ_TYPE_PRIVATE.
// See UV_REQ_TYPE_PRIVATE at libuv/include/uv-win.h
#[repr(C)]
#[cfg(windows)]
#[deriving(PartialEq)]
pub enum uv_req_type {
UV_UNKNOWN_REQ,
UV_REQ,
UV_CONNECT,
UV_WRITE,
UV_SHUTDOWN,
UV_UDP_SEND,
UV_FS,
UV_WORK,
UV_GETNAMEINFO,
UV_GETADDRINFO,
UV_ACCEPT,
UV_FS_EVENT_REQ,
UV_POLL_REQ,
UV_PROCESS_EXIT,
UV_READ,
UV_UDP_RECV,
UV_WAKEUP,
UV_SIGNAL_REQ,
UV_REQ_TYPE_MAX
}
#[repr(C)]
#[deriving(PartialEq)]
pub enum uv_membership {
UV_LEAVE_GROUP,
UV_JOIN_GROUP
}
pub unsafe fn malloc_handle(handle: uv_handle_type) -> *mut c_void {
assert!(handle != UV_UNKNOWN_HANDLE && handle != UV_HANDLE_TYPE_MAX);
let size = uv_handle_size(handle);
malloc_raw(size as uint) as *mut c_void
}
pub unsafe fn free_handle(v: *mut c_void) {
free(v as *mut c_void)
}
pub unsafe fn malloc_req(req: uv_req_type) -> *mut c_void {
assert!(req != UV_UNKNOWN_REQ && req != UV_REQ_TYPE_MAX);
let size = uv_req_size(req);
malloc_raw(size as uint) as *mut c_void
}
pub unsafe fn free_req(v: *mut c_void) {
free(v as *mut c_void)
}
#[test]
fn handle_sanity_check() {
unsafe {
assert_eq!(UV_HANDLE_TYPE_MAX as libc::uintptr_t, rust_uv_handle_type_max());
}
}
#[test]
fn request_sanity_check() {
unsafe {
assert_eq!(UV_REQ_TYPE_MAX as libc::uintptr_t, rust_uv_req_type_max());
}
}
// FIXME Event loops ignore SIGPIPE by default.
pub unsafe fn loop_new() -> *mut c_void {
return rust_uv_loop_new();
}
pub unsafe fn uv_write(req: *mut uv_write_t,
stream: *mut uv_stream_t,
buf_in: &[uv_buf_t],
cb: uv_write_cb) -> c_int {
extern {
fn uv_write(req: *mut uv_write_t, stream: *mut uv_stream_t,
buf_in: *const uv_buf_t, buf_cnt: c_int,
cb: uv_write_cb) -> c_int;
}
let buf_ptr = buf_in.as_ptr();
let buf_cnt = buf_in.len() as i32;
return uv_write(req, stream, buf_ptr, buf_cnt, cb);
}
pub unsafe fn uv_udp_send(req: *mut uv_udp_send_t,
handle: *mut uv_udp_t,
buf_in: &[uv_buf_t],
addr: *const sockaddr,
cb: uv_udp_send_cb) -> c_int {
extern {
fn uv_udp_send(req: *mut uv_write_t, stream: *mut uv_stream_t,
buf_in: *const uv_buf_t, buf_cnt: c_int,
addr: *const sockaddr,
cb: uv_udp_send_cb) -> c_int;
}
let buf_ptr = buf_in.as_ptr();
let buf_cnt = buf_in.len() as i32;
return uv_udp_send(req, handle, buf_ptr, buf_cnt, addr, cb);
}
pub unsafe fn get_udp_handle_from_send_req(send_req: *mut uv_udp_send_t) -> *mut uv_udp_t {
return rust_uv_get_udp_handle_from_send_req(send_req);
}
pub unsafe fn process_pid(p: *mut uv_process_t) -> c_int {
return rust_uv_process_pid(p);
}
pub unsafe fn set_stdio_container_flags(c: *mut uv_stdio_container_t,
flags: libc::c_int) {
rust_set_stdio_container_flags(c, flags);
}
pub unsafe fn set_stdio_container_fd(c: *mut uv_stdio_container_t,
fd: libc::c_int) {
rust_set_stdio_container_fd(c, fd);
}
pub unsafe fn set_stdio_container_stream(c: *mut uv_stdio_container_t,
stream: *mut uv_stream_t) {
rust_set_stdio_container_stream(c, stream);
}
// data access helpers
pub unsafe fn get_result_from_fs_req(req: *mut uv_fs_t) -> ssize_t {
rust_uv_get_result_from_fs_req(req)
}
pub unsafe fn get_ptr_from_fs_req(req: *mut uv_fs_t) -> *mut libc::c_void {
rust_uv_get_ptr_from_fs_req(req)
}
pub unsafe fn get_path_from_fs_req(req: *mut uv_fs_t) -> *mut c_char {
rust_uv_get_path_from_fs_req(req)
}
pub unsafe fn get_loop_from_fs_req(req: *mut uv_fs_t) -> *mut uv_loop_t {
rust_uv_get_loop_from_fs_req(req)
}
pub unsafe fn get_loop_from_getaddrinfo_req(req: *mut uv_getaddrinfo_t) -> *mut uv_loop_t {
rust_uv_get_loop_from_getaddrinfo_req(req)
}
pub unsafe fn get_loop_for_uv_handle<T>(handle: *mut T) -> *mut c_void {
return rust_uv_get_loop_for_uv_handle(handle as *mut c_void);
}
pub unsafe fn get_stream_handle_from_connect_req(connect: *mut uv_connect_t) -> *mut uv_stream_t {
return rust_uv_get_stream_handle_from_connect_req(connect);
}
pub unsafe fn get_stream_handle_from_write_req(write_req: *mut uv_write_t) -> *mut uv_stream_t {
return rust_uv_get_stream_handle_from_write_req(write_req);
}
pub unsafe fn get_data_for_uv_loop(loop_ptr: *mut c_void) -> *mut c_void {
rust_uv_get_data_for_uv_loop(loop_ptr)
}
pub unsafe fn set_data_for_uv_loop(loop_ptr: *mut c_void, data: *mut c_void) {
rust_uv_set_data_for_uv_loop(loop_ptr, data);
}
pub unsafe fn get_data_for_uv_handle<T>(handle: *mut T) -> *mut c_void {
return rust_uv_get_data_for_uv_handle(handle as *mut c_void);
}
pub unsafe fn set_data_for_uv_handle<T, U>(handle: *mut T, data: *mut U) {
rust_uv_set_data_for_uv_handle(handle as *mut c_void, data as *mut c_void);
}
pub unsafe fn get_data_for_req<T>(req: *mut T) -> *mut c_void {
return rust_uv_get_data_for_req(req as *mut c_void);
}
pub unsafe fn set_data_for_req<T, U>(req: *mut T, data: *mut U) {
rust_uv_set_data_for_req(req as *mut c_void, data as *mut c_void);
}
pub unsafe fn populate_stat(req_in: *mut uv_fs_t, stat_out: *mut uv_stat_t) {
rust_uv_populate_uv_stat(req_in, stat_out)
}
pub unsafe fn guess_handle(handle: c_int) -> c_int {
rust_uv_guess_handle(handle)
}
// uv_support is the result of compiling rust_uv.cpp
//
// Note that this is in a cfg'd block so it doesn't get linked during testing.
// There's a bit of a conundrum when testing in that we're actually assuming
// that the tests are running in a uv loop, but they were created from the
// statically linked uv to the original rustuv crate. When we create the test
// executable, on some platforms if we re-link against uv, it actually creates
// second copies of everything. We obviously don't want this, so instead of
// dying horribly during testing, we allow all of the test rustuv's references
// to get resolved to the original rustuv crate.
#[cfg(not(test))]
#[link(name = "uv_support", kind = "static")]
#[link(name = "uv", kind = "static")]
extern {}
extern {
fn rust_uv_loop_new() -> *mut c_void;
#[cfg(test)]
fn rust_uv_handle_type_max() -> uintptr_t;
#[cfg(test)]
fn rust_uv_req_type_max() -> uintptr_t;
fn rust_uv_get_udp_handle_from_send_req(req: *mut uv_udp_send_t) -> *mut uv_udp_t;
fn rust_uv_populate_uv_stat(req_in: *mut uv_fs_t, stat_out: *mut uv_stat_t);
fn rust_uv_get_result_from_fs_req(req: *mut uv_fs_t) -> ssize_t;
fn rust_uv_get_ptr_from_fs_req(req: *mut uv_fs_t) -> *mut libc::c_void;
fn rust_uv_get_path_from_fs_req(req: *mut uv_fs_t) -> *mut c_char;
fn rust_uv_get_loop_from_fs_req(req: *mut uv_fs_t) -> *mut uv_loop_t;
fn rust_uv_get_loop_from_getaddrinfo_req(req: *mut uv_fs_t) -> *mut uv_loop_t;
fn rust_uv_get_stream_handle_from_connect_req(req: *mut uv_connect_t) -> *mut uv_stream_t;
fn rust_uv_get_stream_handle_from_write_req(req: *mut uv_write_t) -> *mut uv_stream_t;
fn rust_uv_get_loop_for_uv_handle(handle: *mut c_void) -> *mut c_void;
fn rust_uv_get_data_for_uv_loop(loop_ptr: *mut c_void) -> *mut c_void;
fn rust_uv_set_data_for_uv_loop(loop_ptr: *mut c_void, data: *mut c_void);
fn rust_uv_get_data_for_uv_handle(handle: *mut c_void) -> *mut c_void;
fn rust_uv_set_data_for_uv_handle(handle: *mut c_void, data: *mut c_void);
fn rust_uv_get_data_for_req(req: *mut c_void) -> *mut c_void;
fn rust_uv_set_data_for_req(req: *mut c_void, data: *mut c_void);
fn rust_set_stdio_container_flags(c: *mut uv_stdio_container_t, flags: c_int);
fn rust_set_stdio_container_fd(c: *mut uv_stdio_container_t, fd: c_int);
fn rust_set_stdio_container_stream(c: *mut uv_stdio_container_t,
stream: *mut uv_stream_t);
fn rust_uv_process_pid(p: *mut uv_process_t) -> c_int;
fn rust_uv_guess_handle(fd: c_int) -> c_int;
// generic uv functions
pub fn uv_loop_delete(l: *mut uv_loop_t);
pub fn uv_ref(t: *mut uv_handle_t);
pub fn uv_unref(t: *mut uv_handle_t);
pub fn uv_handle_size(ty: uv_handle_type) -> size_t;
pub fn uv_req_size(ty: uv_req_type) -> size_t;
pub fn uv_run(l: *mut uv_loop_t, mode: uv_run_mode) -> c_int;
pub fn uv_close(h: *mut uv_handle_t, cb: uv_close_cb);
pub fn uv_walk(l: *mut uv_loop_t, cb: uv_walk_cb, arg: *mut c_void);
pub fn uv_buf_init(base: *mut c_char, len: c_uint) -> uv_buf_t;
pub fn uv_strerror(err: c_int) -> *const c_char;
pub fn uv_err_name(err: c_int) -> *const c_char;
pub fn uv_listen(s: *mut uv_stream_t, backlog: c_int,
cb: uv_connection_cb) -> c_int;
pub fn uv_accept(server: *mut uv_stream_t, client: *mut uv_stream_t) -> c_int;
pub fn uv_read_start(stream: *mut uv_stream_t,
on_alloc: uv_alloc_cb,
on_read: uv_read_cb) -> c_int;
pub fn uv_read_stop(stream: *mut uv_stream_t) -> c_int;
pub fn uv_shutdown(req: *mut uv_shutdown_t, handle: *mut uv_stream_t,
cb: uv_shutdown_cb) -> c_int;
// idle bindings
pub fn uv_idle_init(l: *mut uv_loop_t, i: *mut uv_idle_t) -> c_int;
pub fn uv_idle_start(i: *mut uv_idle_t, cb: uv_idle_cb) -> c_int;
pub fn uv_idle_stop(i: *mut uv_idle_t) -> c_int;
// async bindings
pub fn uv_async_init(l: *mut uv_loop_t, a: *mut uv_async_t,
cb: uv_async_cb) -> c_int;
pub fn uv_async_send(a: *mut uv_async_t);
// tcp bindings
pub fn uv_tcp_init(l: *mut uv_loop_t, h: *mut uv_tcp_t) -> c_int;
pub fn uv_tcp_connect(c: *mut uv_connect_t, h: *mut uv_tcp_t,
addr: *const sockaddr, cb: uv_connect_cb) -> c_int;
pub fn uv_tcp_bind(t: *mut uv_tcp_t,
addr: *const sockaddr,
flags: c_uint) -> c_int;
pub fn uv_tcp_nodelay(h: *mut uv_tcp_t, enable: c_int) -> c_int;
pub fn uv_tcp_keepalive(h: *mut uv_tcp_t, enable: c_int,
delay: c_uint) -> c_int;
pub fn uv_tcp_simultaneous_accepts(h: *mut uv_tcp_t, enable: c_int) -> c_int;
pub fn uv_tcp_getsockname(h: *const uv_tcp_t, name: *mut sockaddr,
len: *mut c_int) -> c_int;
pub fn uv_tcp_getpeername(h: *const uv_tcp_t, name: *mut sockaddr,
len: *mut c_int) -> c_int;
// udp bindings
pub fn uv_udp_init(l: *mut uv_loop_t, h: *mut uv_udp_t) -> c_int;
pub fn uv_udp_bind(h: *mut uv_udp_t, addr: *const sockaddr,
flags: c_uint) -> c_int;
pub fn uv_udp_recv_start(server: *mut uv_udp_t,
on_alloc: uv_alloc_cb,
on_recv: uv_udp_recv_cb) -> c_int;
pub fn uv_udp_set_membership(handle: *mut uv_udp_t,
multicast_addr: *const c_char,
interface_addr: *const c_char,
membership: uv_membership) -> c_int;
pub fn uv_udp_recv_stop(server: *mut uv_udp_t) -> c_int;
pub fn uv_udp_set_multicast_loop(handle: *mut uv_udp_t, on: c_int) -> c_int;
pub fn uv_udp_set_multicast_ttl(handle: *mut uv_udp_t, ttl: c_int) -> c_int;
pub fn uv_udp_set_ttl(handle: *mut uv_udp_t, ttl: c_int) -> c_int;
pub fn uv_udp_set_broadcast(handle: *mut uv_udp_t, on: c_int) -> c_int;
pub fn uv_udp_getsockname(h: *const uv_udp_t, name: *mut sockaddr,
len: *mut c_int) -> c_int;
// timer bindings
pub fn uv_timer_init(l: *mut uv_loop_t, t: *mut uv_timer_t) -> c_int;
pub fn uv_timer_start(t: *mut uv_timer_t, cb: uv_timer_cb,
timeout: libc::uint64_t,
repeat: libc::uint64_t) -> c_int;
pub fn uv_timer_stop(handle: *mut uv_timer_t) -> c_int;
// fs operations
pub fn uv_fs_open(loop_ptr: *mut uv_loop_t, req: *mut uv_fs_t,
path: *const c_char, flags: c_int, mode: c_int,
cb: uv_fs_cb) -> c_int;
pub fn uv_fs_unlink(loop_ptr: *mut uv_loop_t, req: *mut uv_fs_t,
path: *const c_char, cb: uv_fs_cb) -> c_int;
pub fn uv_fs_write(l: *mut uv_loop_t, req: *mut uv_fs_t, fd: c_int,
bufs: *const uv_buf_t, nbufs: c_uint,
offset: i64, cb: uv_fs_cb) -> c_int;
pub fn uv_fs_read(l: *mut uv_loop_t, req: *mut uv_fs_t, fd: c_int,
bufs: *mut uv_buf_t, nbufs: c_uint,
offset: i64, cb: uv_fs_cb) -> c_int;
pub fn uv_fs_close(l: *mut uv_loop_t, req: *mut uv_fs_t, fd: c_int,
cb: uv_fs_cb) -> c_int;
pub fn uv_fs_stat(l: *mut uv_loop_t, req: *mut uv_fs_t, path: *const c_char,
cb: uv_fs_cb) -> c_int;
pub fn uv_fs_fstat(l: *mut uv_loop_t, req: *mut uv_fs_t, fd: c_int,
cb: uv_fs_cb) -> c_int;
pub fn uv_fs_mkdir(l: *mut uv_loop_t, req: *mut uv_fs_t, path: *const c_char,
mode: c_int, cb: uv_fs_cb) -> c_int;
pub fn uv_fs_rmdir(l: *mut uv_loop_t, req: *mut uv_fs_t, path: *const c_char,
cb: uv_fs_cb) -> c_int;
pub fn uv_fs_readdir(l: *mut uv_loop_t, req: *mut uv_fs_t,
path: *const c_char, flags: c_int,
cb: uv_fs_cb) -> c_int;
pub fn uv_fs_req_cleanup(req: *mut uv_fs_t);
pub fn uv_fs_fsync(handle: *mut uv_loop_t, req: *mut uv_fs_t, file: c_int,
cb: uv_fs_cb) -> c_int;
pub fn uv_fs_fdatasync(handle: *mut uv_loop_t, req: *mut uv_fs_t, file: c_int,
cb: uv_fs_cb) -> c_int;
pub fn uv_fs_ftruncate(handle: *mut uv_loop_t, req: *mut uv_fs_t, file: c_int,
offset: i64, cb: uv_fs_cb) -> c_int;
pub fn uv_fs_readlink(handle: *mut uv_loop_t, req: *mut uv_fs_t,
file: *const c_char, cb: uv_fs_cb) -> c_int;
pub fn uv_fs_symlink(handle: *mut uv_loop_t, req: *mut uv_fs_t,
src: *const c_char, dst: *const c_char, flags: c_int,
cb: uv_fs_cb) -> c_int;
pub fn uv_fs_rename(handle: *mut uv_loop_t, req: *mut uv_fs_t,
src: *const c_char, dst: *const c_char,
cb: uv_fs_cb) -> c_int;
pub fn uv_fs_utime(handle: *mut uv_loop_t, req: *mut uv_fs_t,
path: *const c_char, atime: c_double, mtime: c_double,
cb: uv_fs_cb) -> c_int;
pub fn uv_fs_link(handle: *mut uv_loop_t, req: *mut uv_fs_t,
src: *const c_char, dst: *const c_char,
cb: uv_fs_cb) -> c_int;
pub fn uv_fs_chown(handle: *mut uv_loop_t, req: *mut uv_fs_t, src: *const c_char,
uid: uv_uid_t, gid: uv_gid_t, cb: uv_fs_cb) -> c_int;
pub fn uv_fs_chmod(handle: *mut uv_loop_t, req: *mut uv_fs_t,
path: *const c_char, mode: c_int, cb: uv_fs_cb) -> c_int;
pub fn uv_fs_lstat(handle: *mut uv_loop_t, req: *mut uv_fs_t,
file: *const c_char, cb: uv_fs_cb) -> c_int;
// poll bindings
pub fn uv_poll_init_socket(l: *mut uv_loop_t, h: *mut uv_poll_t, s: uv_os_socket_t) -> c_int;
pub fn uv_poll_start(h: *mut uv_poll_t, events: c_int, cb: uv_poll_cb) -> c_int;
pub fn uv_poll_stop(h: *mut uv_poll_t) -> c_int;
// getaddrinfo
pub fn uv_getaddrinfo(loop_: *mut uv_loop_t, req: *mut uv_getaddrinfo_t,
getaddrinfo_cb: uv_getaddrinfo_cb,
node: *const c_char, service: *const c_char,
hints: *const addrinfo) -> c_int;
pub fn uv_freeaddrinfo(ai: *mut addrinfo);
// process spawning
pub fn uv_spawn(loop_ptr: *mut uv_loop_t, outptr: *mut uv_process_t,
options: *mut uv_process_options_t) -> c_int;
pub fn uv_process_kill(p: *mut uv_process_t, signum: c_int) -> c_int;
pub fn uv_kill(pid: c_int, signum: c_int) -> c_int;
// pipes
pub fn uv_pipe_init(l: *mut uv_loop_t, p: *mut uv_pipe_t,
ipc: c_int) -> c_int;
pub fn uv_pipe_open(pipe: *mut uv_pipe_t, file: c_int) -> c_int;
pub fn uv_pipe_bind(pipe: *mut uv_pipe_t, name: *const c_char) -> c_int;
pub fn uv_pipe_connect(req: *mut uv_connect_t, handle: *mut uv_pipe_t,
name: *const c_char, cb: uv_connect_cb);
// tty
pub fn uv_tty_init(l: *mut uv_loop_t, tty: *mut uv_tty_t, fd: c_int,
readable: c_int) -> c_int;
pub fn uv_tty_set_mode(tty: *mut uv_tty_t, mode: c_int) -> c_int;
pub fn uv_tty_get_winsize(tty: *mut uv_tty_t,
width: *mut c_int,
height: *mut c_int) -> c_int;
// signals
pub fn uv_signal_init(loop_: *mut uv_loop_t,
handle: *mut uv_signal_t) -> c_int;
pub fn uv_signal_start(h: *mut uv_signal_t, cb: uv_signal_cb,
signum: c_int) -> c_int;
pub fn uv_signal_stop(handle: *mut uv_signal_t) -> c_int;
}
// libuv requires other native libraries on various platforms. These are all
// listed here (for each platform)
// libuv doesn't use pthread on windows
// android libc (bionic) provides pthread, so no additional link is required
#[cfg(not(any(windows, target_os = "android")))]
#[link(name = "pthread")]
extern {}
#[cfg(any(target_os = "linux", target_os = "dragonfly"))]
#[link(name = "rt")]
extern {}
#[cfg(target_os = "windows")]
#[link(name = "ws2_32")]
#[link(name = "psapi")]
#[link(name = "iphlpapi")]
extern {}
#[cfg(any(target_os = "freebsd", target_os = "dragonfly"))]
#[link(name = "kvm")]
extern {}

View File

@ -948,9 +948,7 @@ mod test {
use io::{SeekSet, SeekCur, SeekEnd, Read, Open, ReadWrite};
use io;
use str;
use io::fs::{File, rmdir, mkdir, readdir, rmdir_recursive,
mkdir_recursive, copy, unlink, stat, symlink, link,
readlink, chmod, lstat, change_file_times};
use io::fs::*;
use path::Path;
use io;
use ops::Drop;
@ -1002,7 +1000,8 @@ mod test {
TempDir(ret)
}
iotest!(fn file_test_io_smoke_test() {
#[test]
fn file_test_io_smoke_test() {
let message = "it's alright. have a good time";
let tmpdir = tmpdir();
let filename = &tmpdir.join("file_rt_io_file_test.txt");
@ -1020,9 +1019,10 @@ mod test {
assert_eq!(read_str.as_slice(), message);
}
check!(unlink(filename));
})
}
iotest!(fn invalid_path_raises() {
#[test]
fn invalid_path_raises() {
let tmpdir = tmpdir();
let filename = &tmpdir.join("file_that_does_not_exist.txt");
let result = File::open_mode(filename, Open, Read);
@ -1032,9 +1032,10 @@ mod test {
error!(result, "no such file or directory");
}
error!(result, format!("path={}; mode=open; access=read", filename.display()));
})
}
iotest!(fn file_test_iounlinking_invalid_path_should_raise_condition() {
#[test]
fn file_test_iounlinking_invalid_path_should_raise_condition() {
let tmpdir = tmpdir();
let filename = &tmpdir.join("file_another_file_that_does_not_exist.txt");
@ -1045,9 +1046,10 @@ mod test {
error!(result, "no such file or directory");
}
error!(result, format!("path={}", filename.display()));
})
}
iotest!(fn file_test_io_non_positional_read() {
#[test]
fn file_test_io_non_positional_read() {
let message: &str = "ten-four";
let mut read_mem = [0, .. 8];
let tmpdir = tmpdir();
@ -1070,9 +1072,10 @@ mod test {
check!(unlink(filename));
let read_str = str::from_utf8(read_mem).unwrap();
assert_eq!(read_str, message);
})
}
iotest!(fn file_test_io_seek_and_tell_smoke_test() {
#[test]
fn file_test_io_seek_and_tell_smoke_test() {
let message = "ten-four";
let mut read_mem = [0, .. 4];
let set_cursor = 4 as u64;
@ -1096,9 +1099,10 @@ mod test {
assert_eq!(read_str, message.slice(4, 8));
assert_eq!(tell_pos_pre_read, set_cursor);
assert_eq!(tell_pos_post_read, message.len() as u64);
})
}
iotest!(fn file_test_io_seek_and_write() {
#[test]
fn file_test_io_seek_and_write() {
let initial_msg = "food-is-yummy";
let overwrite_msg = "-the-bar!!";
let final_msg = "foo-the-bar!!";
@ -1119,9 +1123,10 @@ mod test {
check!(unlink(filename));
let read_str = str::from_utf8(read_mem).unwrap();
assert!(read_str.as_slice() == final_msg.as_slice());
})
}
iotest!(fn file_test_io_seek_shakedown() {
#[test]
fn file_test_io_seek_shakedown() {
use str; // 01234567890123
let initial_msg = "qwer-asdf-zxcv";
let chunk_one: &str = "qwer";
@ -1150,9 +1155,10 @@ mod test {
assert_eq!(str::from_utf8(read_mem).unwrap(), chunk_one);
}
check!(unlink(filename));
})
}
iotest!(fn file_test_stat_is_correct_on_is_file() {
#[test]
fn file_test_stat_is_correct_on_is_file() {
let tmpdir = tmpdir();
let filename = &tmpdir.join("file_stat_correct_on_is_file.txt");
{
@ -1168,9 +1174,10 @@ mod test {
let stat_res_meth = check!(filename.stat());
assert_eq!(stat_res_meth.kind, io::TypeFile);
check!(unlink(filename));
})
}
iotest!(fn file_test_stat_is_correct_on_is_dir() {
#[test]
fn file_test_stat_is_correct_on_is_dir() {
let tmpdir = tmpdir();
let filename = &tmpdir.join("file_stat_correct_on_is_dir");
check!(mkdir(filename, io::UserRWX));
@ -1179,26 +1186,29 @@ mod test {
let stat_res_meth = check!(filename.stat());
assert!(stat_res_meth.kind == io::TypeDirectory);
check!(rmdir(filename));
})
}
iotest!(fn file_test_fileinfo_false_when_checking_is_file_on_a_directory() {
#[test]
fn file_test_fileinfo_false_when_checking_is_file_on_a_directory() {
let tmpdir = tmpdir();
let dir = &tmpdir.join("fileinfo_false_on_dir");
check!(mkdir(dir, io::UserRWX));
assert!(dir.is_file() == false);
check!(rmdir(dir));
})
}
iotest!(fn file_test_fileinfo_check_exists_before_and_after_file_creation() {
#[test]
fn file_test_fileinfo_check_exists_before_and_after_file_creation() {
let tmpdir = tmpdir();
let file = &tmpdir.join("fileinfo_check_exists_b_and_a.txt");
check!(File::create(file).write(b"foo"));
assert!(file.exists());
check!(unlink(file));
assert!(!file.exists());
})
}
iotest!(fn file_test_directoryinfo_check_exists_before_and_after_mkdir() {
#[test]
fn file_test_directoryinfo_check_exists_before_and_after_mkdir() {
let tmpdir = tmpdir();
let dir = &tmpdir.join("before_and_after_dir");
assert!(!dir.exists());
@ -1207,9 +1217,10 @@ mod test {
assert!(dir.is_dir());
check!(rmdir(dir));
assert!(!dir.exists());
})
}
iotest!(fn file_test_directoryinfo_readdir() {
#[test]
fn file_test_directoryinfo_readdir() {
use str;
let tmpdir = tmpdir();
let dir = &tmpdir.join("di_readdir");
@ -1238,9 +1249,10 @@ mod test {
check!(unlink(f));
}
check!(rmdir(dir));
})
}
iotest!(fn file_test_walk_dir() {
#[test]
fn file_test_walk_dir() {
let tmpdir = tmpdir();
let dir = &tmpdir.join("walk_dir");
check!(mkdir(dir, io::UserRWX));
@ -1264,16 +1276,18 @@ mod test {
}
check!(rmdir_recursive(dir));
})
}
iotest!(fn recursive_mkdir() {
#[test]
fn recursive_mkdir() {
let tmpdir = tmpdir();
let dir = tmpdir.join("d1/d2");
check!(mkdir_recursive(&dir, io::UserRWX));
assert!(dir.is_dir())
})
}
iotest!(fn recursive_mkdir_failure() {
#[test]
fn recursive_mkdir_failure() {
let tmpdir = tmpdir();
let dir = tmpdir.join("d1");
let file = dir.join("f1");
@ -1287,15 +1301,17 @@ mod test {
error!(result, "couldn't create directory");
error!(result, "mode=0700");
error!(result, format!("path={}", file.display()));
})
}
iotest!(fn recursive_mkdir_slash() {
#[test]
fn recursive_mkdir_slash() {
check!(mkdir_recursive(&Path::new("/"), io::UserRWX));
})
}
// FIXME(#12795) depends on lstat to work on windows
#[cfg(not(windows))]
iotest!(fn recursive_rmdir() {
#[test]
fn recursive_rmdir() {
let tmpdir = tmpdir();
let d1 = tmpdir.join("d1");
let dt = d1.join("t");
@ -1310,9 +1326,10 @@ mod test {
assert!(!d1.is_dir());
assert!(canary.exists());
})
}
iotest!(fn unicode_path_is_dir() {
#[test]
fn unicode_path_is_dir() {
assert!(Path::new(".").is_dir());
assert!(!Path::new("test/stdtest/fs.rs").is_dir());
@ -1328,9 +1345,10 @@ mod test {
check!(File::create(&filepath)); // ignore return; touch only
assert!(!filepath.is_dir());
assert!(filepath.exists());
})
}
iotest!(fn unicode_path_exists() {
#[test]
fn unicode_path_exists() {
assert!(Path::new(".").exists());
assert!(!Path::new("test/nonexistent-bogus-path").exists());
@ -1340,9 +1358,10 @@ mod test {
check!(mkdir(&unicode, io::UserRWX));
assert!(unicode.exists());
assert!(!Path::new("test/unicode-bogus-path-각丁ー再见").exists());
})
}
iotest!(fn copy_file_does_not_exist() {
#[test]
fn copy_file_does_not_exist() {
let from = Path::new("test/nonexistent-bogus-path");
let to = Path::new("test/other-bogus-path");
@ -1358,9 +1377,10 @@ mod test {
assert!(!to.exists());
}
}
})
}
iotest!(fn copy_file_ok() {
#[test]
fn copy_file_ok() {
let tmpdir = tmpdir();
let input = tmpdir.join("in.txt");
let out = tmpdir.join("out.txt");
@ -1371,9 +1391,10 @@ mod test {
assert_eq!(contents.as_slice(), b"hello");
assert_eq!(check!(input.stat()).perm, check!(out.stat()).perm);
})
}
iotest!(fn copy_file_dst_dir() {
#[test]
fn copy_file_dst_dir() {
let tmpdir = tmpdir();
let out = tmpdir.join("out");
@ -1381,9 +1402,10 @@ mod test {
match copy(&out, tmpdir.path()) {
Ok(..) => fail!(), Err(..) => {}
}
})
}
iotest!(fn copy_file_dst_exists() {
#[test]
fn copy_file_dst_exists() {
let tmpdir = tmpdir();
let input = tmpdir.join("in");
let output = tmpdir.join("out");
@ -1394,9 +1416,10 @@ mod test {
assert_eq!(check!(File::open(&output).read_to_end()),
(Vec::from_slice(b"foo")));
})
}
iotest!(fn copy_file_src_dir() {
#[test]
fn copy_file_src_dir() {
let tmpdir = tmpdir();
let out = tmpdir.join("out");
@ -1404,9 +1427,10 @@ mod test {
Ok(..) => fail!(), Err(..) => {}
}
assert!(!out.exists());
})
}
iotest!(fn copy_file_preserves_perm_bits() {
#[test]
fn copy_file_preserves_perm_bits() {
let tmpdir = tmpdir();
let input = tmpdir.join("in.txt");
let out = tmpdir.join("out.txt");
@ -1418,10 +1442,11 @@ mod test {
check!(chmod(&input, io::UserFile));
check!(chmod(&out, io::UserFile));
})
}
#[cfg(not(windows))] // FIXME(#10264) operation not permitted?
iotest!(fn symlinks_work() {
#[test]
fn symlinks_work() {
let tmpdir = tmpdir();
let input = tmpdir.join("in.txt");
let out = tmpdir.join("out.txt");
@ -1435,25 +1460,28 @@ mod test {
assert_eq!(check!(stat(&out)).size, check!(stat(&input)).size);
assert_eq!(check!(File::open(&out).read_to_end()),
(Vec::from_slice(b"foobar")));
})
}
#[cfg(not(windows))] // apparently windows doesn't like symlinks
iotest!(fn symlink_noexist() {
#[test]
fn symlink_noexist() {
let tmpdir = tmpdir();
// symlinks can point to things that don't exist
check!(symlink(&tmpdir.join("foo"), &tmpdir.join("bar")));
assert!(check!(readlink(&tmpdir.join("bar"))) == tmpdir.join("foo"));
})
}
iotest!(fn readlink_not_symlink() {
#[test]
fn readlink_not_symlink() {
let tmpdir = tmpdir();
match readlink(tmpdir.path()) {
Ok(..) => fail!("wanted a failure"),
Err(..) => {}
}
})
}
iotest!(fn links_work() {
#[test]
fn links_work() {
let tmpdir = tmpdir();
let input = tmpdir.join("in.txt");
let out = tmpdir.join("out.txt");
@ -1481,9 +1509,10 @@ mod test {
Ok(..) => fail!("wanted a failure"),
Err(..) => {}
}
})
}
iotest!(fn chmod_works() {
#[test]
fn chmod_works() {
let tmpdir = tmpdir();
let file = tmpdir.join("in.txt");
@ -1498,9 +1527,10 @@ mod test {
}
check!(chmod(&file, io::UserFile));
})
}
iotest!(fn sync_doesnt_kill_anything() {
#[test]
fn sync_doesnt_kill_anything() {
let tmpdir = tmpdir();
let path = tmpdir.join("in.txt");
@ -1511,9 +1541,10 @@ mod test {
check!(file.fsync());
check!(file.datasync());
drop(file);
})
}
iotest!(fn truncate_works() {
#[test]
fn truncate_works() {
let tmpdir = tmpdir();
let path = tmpdir.join("in.txt");
@ -1542,9 +1573,10 @@ mod test {
assert_eq!(check!(File::open(&path).read_to_end()),
(Vec::from_slice(b"fo\0\0\0\0wut")));
drop(file);
})
}
iotest!(fn open_flavors() {
#[test]
fn open_flavors() {
let tmpdir = tmpdir();
match File::open_mode(&tmpdir.join("a"), io::Open, io::Read) {
@ -1602,9 +1634,10 @@ mod test {
}
assert!(check!(stat(&tmpdir.join("h"))).size == 3,
"truncate didn't truncate");
})
}
iotest!(fn utime() {
#[test]
fn utime() {
let tmpdir = tmpdir();
let path = tmpdir.join("a");
check!(File::create(&path));
@ -1613,18 +1646,20 @@ mod test {
check!(change_file_times(&path, 100000, 200000));
assert_eq!(check!(path.stat()).accessed, 100000);
assert_eq!(check!(path.stat()).modified, 200000);
})
}
iotest!(fn utime_noexist() {
#[test]
fn utime_noexist() {
let tmpdir = tmpdir();
match change_file_times(&tmpdir.join("a"), 100, 200) {
Ok(..) => fail!(),
Err(..) => {}
}
})
}
iotest!(fn binary_file() {
#[test]
fn binary_file() {
use rand::{StdRng, Rng};
let mut bytes = [0, ..1024];
@ -1635,13 +1670,14 @@ mod test {
check!(File::create(&tmpdir.join("test")).write(bytes));
let actual = check!(File::open(&tmpdir.join("test")).read_to_end());
assert!(actual.as_slice() == bytes);
})
}
iotest!(fn unlink_readonly() {
#[test]
fn unlink_readonly() {
let tmpdir = tmpdir();
let path = tmpdir.join("file");
check!(File::create(&path));
check!(chmod(&path, io::UserRead));
check!(unlink(&path));
})
}
}

View File

@ -21,7 +21,7 @@
`std::io` provides Rust's basic I/O types,
for reading and writing to files, TCP, UDP,
and other types of sockets and pipes,
manipulating the file system, spawning processes and signal handling.
manipulating the file system, spawning processes.
# Examples
@ -265,9 +265,6 @@ pub use self::buffered::{BufferedReader, BufferedWriter, BufferedStream,
LineBufferedWriter};
pub use self::comm_adapters::{ChanReader, ChanWriter};
// this comes first to get the iotest! macro
pub mod test;
mod buffered;
mod comm_adapters;
mod mem;
@ -278,8 +275,8 @@ pub mod fs;
pub mod net;
pub mod pipe;
pub mod process;
pub mod signal;
pub mod stdio;
pub mod test;
pub mod timer;
pub mod util;

View File

@ -125,7 +125,13 @@ fn lookup(hostname: Option<&str>, servname: Option<&str>, hint: Option<Hint>)
// permission without help of apk
#[cfg(all(test, not(target_os = "android")))]
mod test {
iotest!(fn dns_smoke_test() {
use super::*;
use io::net::tcp::*;
use io::net::ip::*;
use io::net::udp::*;
#[test]
fn dns_smoke_test() {
let ipaddrs = get_host_addresses("localhost").unwrap();
let mut found_local = false;
let local_addr = &Ipv4Addr(127, 0, 0, 1);
@ -133,11 +139,13 @@ mod test {
found_local = found_local || addr == local_addr;
}
assert!(found_local);
})
}
iotest!(fn issue_10663() {
#[ignore]
#[test]
fn issue_10663() {
// Something should happen here, but this certainly shouldn't cause
// everything to die. The actual outcome we don't care too much about.
get_host_addresses("example.com").unwrap();
} #[ignore])
}
}

View File

@ -257,6 +257,8 @@ mod tests {
use super::*;
use io::*;
use io::test::*;
use io::fs::PathExtensions;
use time::Duration;
pub fn smalltest(server: proc(UnixStream):Send, client: proc(UnixStream):Send) {
let path1 = next_test_unix();
@ -277,7 +279,8 @@ mod tests {
}
}
iotest!(fn bind_error() {
#[test]
fn bind_error() {
let path = "path/to/nowhere";
match UnixListener::bind(&path) {
Ok(..) => fail!(),
@ -286,9 +289,10 @@ mod tests {
e.kind == InvalidInput);
}
}
})
}
iotest!(fn connect_error() {
#[test]
fn connect_error() {
let path = if cfg!(windows) {
r"\\.\pipe\this_should_not_exist_ever"
} else {
@ -300,9 +304,10 @@ mod tests {
assert!(e.kind == FileNotFound || e.kind == OtherIoError);
}
}
})
}
iotest!(fn smoke() {
#[test]
fn smoke() {
smalltest(proc(mut server) {
let mut buf = [0];
server.read(buf).unwrap();
@ -310,9 +315,11 @@ mod tests {
}, proc(mut client) {
client.write([99]).unwrap();
})
})
}
iotest!(fn read_eof() {
#[cfg_attr(windows, ignore)] // FIXME(#12516)
#[test]
fn read_eof() {
smalltest(proc(mut server) {
let mut buf = [0];
assert!(server.read(buf).is_err());
@ -320,9 +327,10 @@ mod tests {
}, proc(_client) {
// drop the client
})
} #[cfg_attr(windows, ignore)]) // FIXME(#12516)
}
iotest!(fn write_begone() {
#[test]
fn write_begone() {
smalltest(proc(mut server) {
let buf = [0];
loop {
@ -340,9 +348,10 @@ mod tests {
}, proc(_client) {
// drop the client
})
})
}
iotest!(fn accept_lots() {
#[test]
fn accept_lots() {
let times = 10;
let path1 = next_test_unix();
let path2 = path1.clone();
@ -371,16 +380,18 @@ mod tests {
}
assert_eq!(buf[0], 100);
}
})
}
#[cfg(unix)]
iotest!(fn path_exists() {
#[test]
fn path_exists() {
let path = next_test_unix();
let _acceptor = UnixListener::bind(&path).listen();
assert!(path.exists());
})
}
iotest!(fn unix_clone_smoke() {
#[test]
fn unix_clone_smoke() {
let addr = next_test_unix();
let mut acceptor = UnixListener::bind(&addr).listen();
@ -414,9 +425,10 @@ mod tests {
assert_eq!(s1.read(buf), Ok(1));
debug!("reader done");
rx2.recv();
})
}
iotest!(fn unix_clone_two_read() {
#[test]
fn unix_clone_two_read() {
let addr = next_test_unix();
let mut acceptor = UnixListener::bind(&addr).listen();
let (tx1, rx) = channel();
@ -446,9 +458,10 @@ mod tests {
tx1.send(());
rx.recv();
})
}
iotest!(fn unix_clone_two_write() {
#[test]
fn unix_clone_two_write() {
let addr = next_test_unix();
let mut acceptor = UnixListener::bind(&addr).listen();
@ -471,25 +484,30 @@ mod tests {
s1.write([2]).unwrap();
rx.recv();
})
}
iotest!(fn drop_removes_listener_path() {
#[cfg(not(windows))]
#[test]
fn drop_removes_listener_path() {
let path = next_test_unix();
let l = UnixListener::bind(&path).unwrap();
assert!(path.exists());
drop(l);
assert!(!path.exists());
} #[cfg(not(windows))])
}
iotest!(fn drop_removes_acceptor_path() {
#[cfg(not(windows))]
#[test]
fn drop_removes_acceptor_path() {
let path = next_test_unix();
let l = UnixListener::bind(&path).unwrap();
assert!(path.exists());
drop(l.listen().unwrap());
assert!(!path.exists());
} #[cfg(not(windows))])
}
iotest!(fn accept_timeout() {
#[test]
fn accept_timeout() {
let addr = next_test_unix();
let mut a = UnixListener::bind(&addr).unwrap().listen().unwrap();
@ -527,32 +545,37 @@ mod tests {
drop(UnixStream::connect(&addr2).unwrap());
});
a.accept().unwrap();
})
}
iotest!(fn connect_timeout_error() {
#[test]
fn connect_timeout_error() {
let addr = next_test_unix();
assert!(UnixStream::connect_timeout(&addr, Duration::milliseconds(100)).is_err());
})
}
iotest!(fn connect_timeout_success() {
#[test]
fn connect_timeout_success() {
let addr = next_test_unix();
let _a = UnixListener::bind(&addr).unwrap().listen().unwrap();
assert!(UnixStream::connect_timeout(&addr, Duration::milliseconds(100)).is_ok());
})
}
iotest!(fn connect_timeout_zero() {
#[test]
fn connect_timeout_zero() {
let addr = next_test_unix();
let _a = UnixListener::bind(&addr).unwrap().listen().unwrap();
assert!(UnixStream::connect_timeout(&addr, Duration::milliseconds(0)).is_err());
})
}
iotest!(fn connect_timeout_negative() {
#[test]
fn connect_timeout_negative() {
let addr = next_test_unix();
let _a = UnixListener::bind(&addr).unwrap().listen().unwrap();
assert!(UnixStream::connect_timeout(&addr, Duration::milliseconds(-1)).is_err());
})
}
iotest!(fn close_readwrite_smoke() {
#[test]
fn close_readwrite_smoke() {
let addr = next_test_unix();
let a = UnixListener::bind(&addr).listen().unwrap();
let (_tx, rx) = channel::<()>();
@ -586,9 +609,10 @@ mod tests {
let _ = s2.close_write();
let _ = s3.close_read();
let _ = s3.close_write();
})
}
iotest!(fn close_read_wakes_up() {
#[test]
fn close_read_wakes_up() {
let addr = next_test_unix();
let a = UnixListener::bind(&addr).listen().unwrap();
let (_tx, rx) = channel::<()>();
@ -611,9 +635,10 @@ mod tests {
// this test will never finish if the child doesn't wake up
rx.recv();
})
}
iotest!(fn readwrite_timeouts() {
#[test]
fn readwrite_timeouts() {
let addr = next_test_unix();
let mut a = UnixListener::bind(&addr).listen().unwrap();
let (tx, rx) = channel::<()>();
@ -648,9 +673,10 @@ mod tests {
tx.send(());
s.set_timeout(None);
assert_eq!(s.read([0, 0]), Ok(1));
})
}
iotest!(fn read_timeouts() {
#[test]
fn read_timeouts() {
let addr = next_test_unix();
let mut a = UnixListener::bind(&addr).listen().unwrap();
let (tx, rx) = channel::<()>();
@ -676,9 +702,10 @@ mod tests {
for _ in range(0u, 100) {
assert!(s.write([0, ..128 * 1024]).is_ok());
}
})
}
iotest!(fn write_timeouts() {
#[test]
fn write_timeouts() {
let addr = next_test_unix();
let mut a = UnixListener::bind(&addr).listen().unwrap();
let (tx, rx) = channel::<()>();
@ -702,9 +729,10 @@ mod tests {
tx.send(());
assert!(s.read([0]).is_ok());
})
}
iotest!(fn timeout_concurrent_read() {
#[test]
fn timeout_concurrent_read() {
let addr = next_test_unix();
let mut a = UnixListener::bind(&addr).listen().unwrap();
let (tx, rx) = channel::<()>();
@ -729,10 +757,11 @@ mod tests {
tx.send(());
rx2.recv();
})
}
#[cfg(not(windows))]
iotest!(fn clone_accept_smoke() {
#[test]
fn clone_accept_smoke() {
let addr = next_test_unix();
let l = UnixListener::bind(&addr);
let mut a = l.listen().unwrap();
@ -749,10 +778,11 @@ mod tests {
assert!(a.accept().is_ok());
drop(a);
assert!(a2.accept().is_ok());
})
}
#[cfg(not(windows))] // FIXME #17553
iotest!(fn clone_accept_concurrent() {
#[test]
fn clone_accept_concurrent() {
let addr = next_test_unix();
let l = UnixListener::bind(&addr);
let a = l.listen().unwrap();
@ -774,18 +804,20 @@ mod tests {
assert!(rx.recv().is_ok());
assert!(rx.recv().is_ok());
})
}
iotest!(fn close_accept_smoke() {
#[test]
fn close_accept_smoke() {
let addr = next_test_unix();
let l = UnixListener::bind(&addr);
let mut a = l.listen().unwrap();
a.close_accept().unwrap();
assert_eq!(a.accept().err().unwrap().kind, EndOfFile);
})
}
iotest!(fn close_accept_concurrent() {
#[test]
fn close_accept_concurrent() {
let addr = next_test_unix();
let l = UnixListener::bind(&addr);
let a = l.listen().unwrap();
@ -799,5 +831,5 @@ mod tests {
a2.close_accept().unwrap();
assert_eq!(rx.recv().err().unwrap().kind, EndOfFile);
})
}
}

View File

@ -523,26 +523,33 @@ impl Clone for TcpAcceptor {
#[allow(experimental)]
mod test {
use super::*;
use io::net::ip::SocketAddr;
use io::net::tcp::*;
use io::net::ip::*;
use io::net::udp::*;
use io::*;
use io::test::*;
use prelude::*;
// FIXME #11530 this fails on android because tests are run as root
iotest!(fn bind_error() {
#[cfg_attr(any(windows, target_os = "android"), ignore)]
#[test]
fn bind_error() {
match TcpListener::bind("0.0.0.0", 1) {
Ok(..) => fail!(),
Err(e) => assert_eq!(e.kind, PermissionDenied),
}
} #[cfg_attr(any(windows, target_os = "android"), ignore)])
}
iotest!(fn connect_error() {
#[test]
fn connect_error() {
match TcpStream::connect("0.0.0.0", 1) {
Ok(..) => fail!(),
Err(e) => assert_eq!(e.kind, ConnectionRefused),
}
})
}
iotest!(fn listen_ip4_localhost() {
#[test]
fn listen_ip4_localhost() {
let socket_addr = next_test_ip4();
let ip_str = socket_addr.ip.to_string();
let port = socket_addr.port;
@ -558,9 +565,10 @@ mod test {
let mut buf = [0];
stream.read(buf).unwrap();
assert!(buf[0] == 144);
})
}
iotest!(fn connect_localhost() {
#[test]
fn connect_localhost() {
let addr = next_test_ip4();
let ip_str = addr.ip.to_string();
let port = addr.port;
@ -575,9 +583,10 @@ mod test {
let mut buf = [0];
stream.read(buf).unwrap();
assert!(buf[0] == 64);
})
}
iotest!(fn connect_ip4_loopback() {
#[test]
fn connect_ip4_loopback() {
let addr = next_test_ip4();
let ip_str = addr.ip.to_string();
let port = addr.port;
@ -592,9 +601,10 @@ mod test {
let mut buf = [0];
stream.read(buf).unwrap();
assert!(buf[0] == 44);
})
}
iotest!(fn connect_ip6_loopback() {
#[test]
fn connect_ip6_loopback() {
let addr = next_test_ip6();
let ip_str = addr.ip.to_string();
let port = addr.port;
@ -609,9 +619,10 @@ mod test {
let mut buf = [0];
stream.read(buf).unwrap();
assert!(buf[0] == 66);
})
}
iotest!(fn smoke_test_ip4() {
#[test]
fn smoke_test_ip4() {
let addr = next_test_ip4();
let ip_str = addr.ip.to_string();
let port = addr.port;
@ -626,9 +637,10 @@ mod test {
let mut buf = [0];
stream.read(buf).unwrap();
assert!(buf[0] == 99);
})
}
iotest!(fn smoke_test_ip6() {
#[test]
fn smoke_test_ip6() {
let addr = next_test_ip6();
let ip_str = addr.ip.to_string();
let port = addr.port;
@ -643,9 +655,10 @@ mod test {
let mut buf = [0];
stream.read(buf).unwrap();
assert!(buf[0] == 99);
})
}
iotest!(fn read_eof_ip4() {
#[test]
fn read_eof_ip4() {
let addr = next_test_ip4();
let ip_str = addr.ip.to_string();
let port = addr.port;
@ -660,9 +673,10 @@ mod test {
let mut buf = [0];
let nread = stream.read(buf);
assert!(nread.is_err());
})
}
iotest!(fn read_eof_ip6() {
#[test]
fn read_eof_ip6() {
let addr = next_test_ip6();
let ip_str = addr.ip.to_string();
let port = addr.port;
@ -677,9 +691,10 @@ mod test {
let mut buf = [0];
let nread = stream.read(buf);
assert!(nread.is_err());
})
}
iotest!(fn read_eof_twice_ip4() {
#[test]
fn read_eof_twice_ip4() {
let addr = next_test_ip4();
let ip_str = addr.ip.to_string();
let port = addr.port;
@ -702,9 +717,10 @@ mod test {
"unknown kind: {}", e.kind);
}
}
})
}
iotest!(fn read_eof_twice_ip6() {
#[test]
fn read_eof_twice_ip6() {
let addr = next_test_ip6();
let ip_str = addr.ip.to_string();
let port = addr.port;
@ -727,9 +743,10 @@ mod test {
"unknown kind: {}", e.kind);
}
}
})
}
iotest!(fn write_close_ip4() {
#[test]
fn write_close_ip4() {
let addr = next_test_ip4();
let ip_str = addr.ip.to_string();
let port = addr.port;
@ -754,9 +771,10 @@ mod test {
}
}
}
})
}
iotest!(fn write_close_ip6() {
#[test]
fn write_close_ip6() {
let addr = next_test_ip6();
let ip_str = addr.ip.to_string();
let port = addr.port;
@ -781,9 +799,10 @@ mod test {
}
}
}
})
}
iotest!(fn multiple_connect_serial_ip4() {
#[test]
fn multiple_connect_serial_ip4() {
let addr = next_test_ip4();
let ip_str = addr.ip.to_string();
let port = addr.port;
@ -802,9 +821,10 @@ mod test {
stream.read(buf).unwrap();
assert_eq!(buf[0], 99);
}
})
}
iotest!(fn multiple_connect_serial_ip6() {
#[test]
fn multiple_connect_serial_ip6() {
let addr = next_test_ip6();
let ip_str = addr.ip.to_string();
let port = addr.port;
@ -823,9 +843,10 @@ mod test {
stream.read(buf).unwrap();
assert_eq!(buf[0], 99);
}
})
}
iotest!(fn multiple_connect_interleaved_greedy_schedule_ip4() {
#[test]
fn multiple_connect_interleaved_greedy_schedule_ip4() {
let addr = next_test_ip4();
let ip_str = addr.ip.to_string();
let port = addr.port;
@ -862,9 +883,10 @@ mod test {
stream.write([i as u8]).unwrap();
});
}
})
}
iotest!(fn multiple_connect_interleaved_greedy_schedule_ip6() {
#[test]
fn multiple_connect_interleaved_greedy_schedule_ip6() {
let addr = next_test_ip6();
let ip_str = addr.ip.to_string();
let port = addr.port;
@ -901,9 +923,10 @@ mod test {
stream.write([i as u8]).unwrap();
});
}
})
}
iotest!(fn multiple_connect_interleaved_lazy_schedule_ip4() {
#[test]
fn multiple_connect_interleaved_lazy_schedule_ip4() {
static MAX: int = 10;
let addr = next_test_ip4();
let ip_str = addr.ip.to_string();
@ -940,9 +963,10 @@ mod test {
stream.write([99]).unwrap();
});
}
})
}
iotest!(fn multiple_connect_interleaved_lazy_schedule_ip6() {
#[test]
fn multiple_connect_interleaved_lazy_schedule_ip6() {
static MAX: int = 10;
let addr = next_test_ip6();
let ip_str = addr.ip.to_string();
@ -979,7 +1003,7 @@ mod test {
stream.write([99]).unwrap();
});
}
})
}
pub fn socket_name(addr: SocketAddr) {
let ip_str = addr.ip.to_string();
@ -1015,18 +1039,21 @@ mod test {
assert_eq!(addr, peer_name.unwrap());
}
iotest!(fn socket_and_peer_name_ip4() {
#[test]
fn socket_and_peer_name_ip4() {
peer_name(next_test_ip4());
socket_name(next_test_ip4());
})
}
iotest!(fn socket_and_peer_name_ip6() {
#[test]
fn socket_and_peer_name_ip6() {
// FIXME: peer name is not consistent
//peer_name(next_test_ip6());
socket_name(next_test_ip6());
})
}
iotest!(fn partial_read() {
#[test]
fn partial_read() {
let addr = next_test_ip4();
let port = addr.port;
let (tx, rx) = channel();
@ -1048,9 +1075,10 @@ mod test {
assert_eq!(c.read(b), Ok(1));
c.write([1]).unwrap();
rx.recv();
})
}
iotest!(fn double_bind() {
#[test]
fn double_bind() {
let addr = next_test_ip4();
let ip_str = addr.ip.to_string();
let port = addr.port;
@ -1063,9 +1091,10 @@ mod test {
"unknown error: {} {}", e, e.kind);
}
}
})
}
iotest!(fn fast_rebind() {
#[test]
fn fast_rebind() {
let addr = next_test_ip4();
let port = addr.port;
let (tx, rx) = channel();
@ -1090,9 +1119,10 @@ mod test {
// Close listener
}
let _listener = TcpListener::bind(addr.ip.to_string().as_slice(), port);
})
}
iotest!(fn tcp_clone_smoke() {
#[test]
fn tcp_clone_smoke() {
let addr = next_test_ip4();
let ip_str = addr.ip.to_string();
let port = addr.port;
@ -1121,9 +1151,10 @@ mod test {
let mut buf = [0, 0];
assert_eq!(s1.read(buf), Ok(1));
rx2.recv();
})
}
iotest!(fn tcp_clone_two_read() {
#[test]
fn tcp_clone_two_read() {
let addr = next_test_ip6();
let ip_str = addr.ip.to_string();
let port = addr.port;
@ -1155,9 +1186,10 @@ mod test {
tx1.send(());
rx.recv();
})
}
iotest!(fn tcp_clone_two_write() {
#[test]
fn tcp_clone_two_write() {
let addr = next_test_ip4();
let ip_str = addr.ip.to_string();
let port = addr.port;
@ -1182,9 +1214,10 @@ mod test {
s1.write([2]).unwrap();
rx.recv();
})
}
iotest!(fn shutdown_smoke() {
#[test]
fn shutdown_smoke() {
use rt::rtio::RtioTcpStream;
let addr = next_test_ip4();
@ -1202,9 +1235,10 @@ mod test {
assert!(s.obj.close_write().is_ok());
assert!(s.write([1]).is_err());
assert_eq!(s.read_to_end(), Ok(vec!(1)));
})
}
iotest!(fn accept_timeout() {
#[test]
fn accept_timeout() {
let addr = next_test_ip4();
let ip_str = addr.ip.to_string();
let port = addr.port;
@ -1249,9 +1283,10 @@ mod test {
port).unwrap());
});
a.accept().unwrap();
})
}
iotest!(fn close_readwrite_smoke() {
#[test]
fn close_readwrite_smoke() {
let addr = next_test_ip4();
let ip_str = addr.ip.to_string();
let port = addr.port;
@ -1287,9 +1322,10 @@ mod test {
let _ = s2.close_write();
let _ = s3.close_read();
let _ = s3.close_write();
})
}
iotest!(fn close_read_wakes_up() {
#[test]
fn close_read_wakes_up() {
let addr = next_test_ip4();
let ip_str = addr.ip.to_string();
let port = addr.port;
@ -1314,9 +1350,10 @@ mod test {
// this test will never finish if the child doesn't wake up
rx.recv();
})
}
iotest!(fn readwrite_timeouts() {
#[test]
fn readwrite_timeouts() {
let addr = next_test_ip6();
let ip_str = addr.ip.to_string();
let port = addr.port;
@ -1348,9 +1385,10 @@ mod test {
tx.send(());
s.set_timeout(None);
assert_eq!(s.read([0, 0]), Ok(1));
})
}
iotest!(fn read_timeouts() {
#[test]
fn read_timeouts() {
let addr = next_test_ip6();
let ip_str = addr.ip.to_string();
let port = addr.port;
@ -1378,9 +1416,10 @@ mod test {
for _ in range(0i, 100) {
assert!(s.write([0, ..128 * 1024]).is_ok());
}
})
}
iotest!(fn write_timeouts() {
#[test]
fn write_timeouts() {
let addr = next_test_ip6();
let ip_str = addr.ip.to_string();
let port = addr.port;
@ -1407,9 +1446,10 @@ mod test {
tx.send(());
assert!(s.read([0]).is_ok());
})
}
iotest!(fn timeout_concurrent_read() {
#[test]
fn timeout_concurrent_read() {
let addr = next_test_ip6();
let ip_str = addr.ip.to_string();
let port = addr.port;
@ -1436,9 +1476,10 @@ mod test {
tx.send(());
rx2.recv();
})
}
iotest!(fn clone_while_reading() {
#[test]
fn clone_while_reading() {
let addr = next_test_ip6();
let listen = TcpListener::bind(addr.ip.to_string().as_slice(), addr.port);
let mut accept = listen.listen().unwrap();
@ -1476,9 +1517,10 @@ mod test {
tx.send(());
rxdone.recv();
rxdone.recv();
})
}
iotest!(fn clone_accept_smoke() {
#[test]
fn clone_accept_smoke() {
let addr = next_test_ip4();
let l = TcpListener::bind(addr.ip.to_string().as_slice(), addr.port);
let mut a = l.listen().unwrap();
@ -1493,9 +1535,10 @@ mod test {
assert!(a.accept().is_ok());
assert!(a2.accept().is_ok());
})
}
iotest!(fn clone_accept_concurrent() {
#[test]
fn clone_accept_concurrent() {
let addr = next_test_ip4();
let l = TcpListener::bind(addr.ip.to_string().as_slice(), addr.port);
let a = l.listen().unwrap();
@ -1516,18 +1559,20 @@ mod test {
assert!(rx.recv().is_ok());
assert!(rx.recv().is_ok());
})
}
iotest!(fn close_accept_smoke() {
#[test]
fn close_accept_smoke() {
let addr = next_test_ip4();
let l = TcpListener::bind(addr.ip.to_string().as_slice(), addr.port);
let mut a = l.listen().unwrap();
a.close_accept().unwrap();
assert_eq!(a.accept().err().unwrap().kind, EndOfFile);
})
}
iotest!(fn close_accept_concurrent() {
#[test]
fn close_accept_concurrent() {
let addr = next_test_ip4();
let l = TcpListener::bind(addr.ip.to_string().as_slice(), addr.port);
let a = l.listen().unwrap();
@ -1541,5 +1586,5 @@ mod test {
a2.close_accept().unwrap();
assert_eq!(rx.recv().err().unwrap().kind, EndOfFile);
})
}
}

View File

@ -264,18 +264,24 @@ impl Writer for UdpStream {
#[allow(experimental)]
mod test {
use super::*;
use io::net::ip::{SocketAddr};
use prelude::*;
use io::*;
use io::net::ip::*;
use io::test::*;
// FIXME #11530 this fails on android because tests are run as root
iotest!(fn bind_error() {
#[cfg_attr(any(windows, target_os = "android"), ignore)]
#[test]
fn bind_error() {
let addr = SocketAddr { ip: Ipv4Addr(0, 0, 0, 0), port: 1 };
match UdpSocket::bind(addr) {
Ok(..) => fail!(),
Err(e) => assert_eq!(e.kind, PermissionDenied),
}
} #[cfg_attr(any(windows, target_os = "android"), ignore)])
}
iotest!(fn socket_smoke_test_ip4() {
#[test]
fn socket_smoke_test_ip4() {
let server_ip = next_test_ip4();
let client_ip = next_test_ip4();
let (tx1, rx1) = channel();
@ -308,9 +314,10 @@ mod test {
Err(..) => fail!()
}
rx2.recv();
})
}
iotest!(fn socket_smoke_test_ip6() {
#[test]
fn socket_smoke_test_ip6() {
let server_ip = next_test_ip6();
let client_ip = next_test_ip6();
let (tx, rx) = channel::<()>();
@ -340,9 +347,10 @@ mod test {
}
Err(..) => fail!()
}
})
}
iotest!(fn stream_smoke_test_ip4() {
#[test]
fn stream_smoke_test_ip4() {
let server_ip = next_test_ip4();
let client_ip = next_test_ip4();
let (tx1, rx1) = channel();
@ -378,9 +386,10 @@ mod test {
Err(..) => fail!()
}
rx2.recv();
})
}
iotest!(fn stream_smoke_test_ip6() {
#[test]
fn stream_smoke_test_ip6() {
let server_ip = next_test_ip6();
let client_ip = next_test_ip6();
let (tx1, rx1) = channel();
@ -416,7 +425,7 @@ mod test {
Err(..) => fail!()
}
rx2.recv();
})
}
pub fn socket_name(addr: SocketAddr) {
let server = UdpSocket::bind(addr);
@ -431,15 +440,18 @@ mod test {
assert_eq!(addr, so_name.unwrap());
}
iotest!(fn socket_name_ip4() {
#[test]
fn socket_name_ip4() {
socket_name(next_test_ip4());
})
}
iotest!(fn socket_name_ip6() {
#[test]
fn socket_name_ip6() {
socket_name(next_test_ip6());
})
}
iotest!(fn udp_clone_smoke() {
#[test]
fn udp_clone_smoke() {
let addr1 = next_test_ip4();
let addr2 = next_test_ip4();
let mut sock1 = UdpSocket::bind(addr1).unwrap();
@ -467,9 +479,10 @@ mod test {
let mut buf = [0, 0];
assert_eq!(sock1.recv_from(buf), Ok((1, addr2)));
rx2.recv();
})
}
iotest!(fn udp_clone_two_read() {
#[test]
fn udp_clone_two_read() {
let addr1 = next_test_ip4();
let addr2 = next_test_ip4();
let mut sock1 = UdpSocket::bind(addr1).unwrap();
@ -500,9 +513,10 @@ mod test {
tx1.send(());
rx.recv();
})
}
iotest!(fn udp_clone_two_write() {
#[test]
fn udp_clone_two_write() {
let addr1 = next_test_ip4();
let addr2 = next_test_ip4();
let mut sock1 = UdpSocket::bind(addr1).unwrap();
@ -543,10 +557,11 @@ mod test {
rx.recv();
serv_rx.recv();
})
}
#[cfg(not(windows))] // FIXME #17553
iotest!(fn recv_from_timeout() {
#[test]
fn recv_from_timeout() {
let addr1 = next_test_ip4();
let addr2 = next_test_ip4();
let mut a = UdpSocket::bind(addr1).unwrap();
@ -580,9 +595,10 @@ mod test {
// Make sure the child didn't die
rx2.recv();
})
}
iotest!(fn send_to_timeout() {
#[test]
fn send_to_timeout() {
let addr1 = next_test_ip4();
let addr2 = next_test_ip4();
let mut a = UdpSocket::bind(addr1).unwrap();
@ -596,5 +612,5 @@ mod test {
Err(e) => fail!("other error: {}", e),
}
}
})
}
}

View File

@ -118,7 +118,11 @@ impl Writer for PipeStream {
#[cfg(test)]
mod test {
iotest!(fn partial_read() {
use super::*;
use prelude::*;
#[test]
fn partial_read() {
use os;
use io::pipe::PipeStream;
@ -135,5 +139,5 @@ mod test {
let mut buf = [0, ..10];
input.read(buf).unwrap();
tx.send(());
})
}
}

View File

@ -662,39 +662,52 @@ impl Drop for Process {
#[cfg(test)]
mod tests {
#![allow(unused_imports)]
extern crate native;
use io::process::{Command, Process};
use super::*;
use prelude::*;
use io::timer::*;
use io::*;
use io::fs::PathExtensions;
use time::Duration;
use str;
use rt::running_on_valgrind;
// FIXME(#10380) these tests should not all be ignored on android.
#[cfg(not(target_os="android"))]
iotest!(fn smoke() {
#[test]
fn smoke() {
let p = Command::new("true").spawn();
assert!(p.is_ok());
let mut p = p.unwrap();
assert!(p.wait().unwrap().success());
})
}
#[cfg(not(target_os="android"))]
iotest!(fn smoke_failure() {
#[test]
fn smoke_failure() {
match Command::new("if-this-is-a-binary-then-the-world-has-ended").spawn() {
Ok(..) => fail!(),
Err(..) => {}
}
})
}
#[cfg(not(target_os="android"))]
iotest!(fn exit_reported_right() {
#[test]
fn exit_reported_right() {
let p = Command::new("false").spawn();
assert!(p.is_ok());
let mut p = p.unwrap();
assert!(p.wait().unwrap().matches_exit_status(1));
drop(p.wait().clone());
})
}
#[cfg(all(unix, not(target_os="android")))]
iotest!(fn signal_reported_right() {
#[test]
fn signal_reported_right() {
let p = Command::new("/bin/sh").arg("-c").arg("kill -1 $$").spawn();
assert!(p.is_ok());
let mut p = p.unwrap();
@ -702,7 +715,7 @@ mod tests {
process::ExitSignal(1) => {},
result => fail!("not terminated by signal 1 (instead, {})", result),
}
})
}
pub fn read_all(input: &mut Reader) -> String {
input.read_to_string().unwrap()
@ -719,23 +732,26 @@ mod tests {
}
#[cfg(not(target_os="android"))]
iotest!(fn stdout_works() {
#[test]
fn stdout_works() {
let mut cmd = Command::new("echo");
cmd.arg("foobar").stdout(CreatePipe(false, true));
assert_eq!(run_output(cmd), "foobar\n".to_string());
})
}
#[cfg(all(unix, not(target_os="android")))]
iotest!(fn set_cwd_works() {
#[test]
fn set_cwd_works() {
let mut cmd = Command::new("/bin/sh");
cmd.arg("-c").arg("pwd")
.cwd(&Path::new("/"))
.stdout(CreatePipe(false, true));
assert_eq!(run_output(cmd), "/\n".to_string());
})
}
#[cfg(all(unix, not(target_os="android")))]
iotest!(fn stdin_works() {
#[test]
fn stdin_works() {
let mut p = Command::new("/bin/sh")
.arg("-c").arg("read line; echo $line")
.stdin(CreatePipe(true, false))
@ -746,21 +762,24 @@ mod tests {
let out = read_all(p.stdout.get_mut_ref() as &mut Reader);
assert!(p.wait().unwrap().success());
assert_eq!(out, "foobar\n".to_string());
})
}
#[cfg(not(target_os="android"))]
iotest!(fn detach_works() {
#[test]
fn detach_works() {
let mut p = Command::new("true").detached().spawn().unwrap();
assert!(p.wait().unwrap().success());
})
}
#[cfg(windows)]
iotest!(fn uid_fails_on_windows() {
#[test]
fn uid_fails_on_windows() {
assert!(Command::new("test").uid(10).spawn().is_err());
})
}
#[cfg(all(unix, not(target_os="android")))]
iotest!(fn uid_works() {
#[test]
fn uid_works() {
use libc;
let mut p = Command::new("/bin/sh")
.arg("-c").arg("true")
@ -768,36 +787,40 @@ mod tests {
.gid(unsafe { libc::getgid() as uint })
.spawn().unwrap();
assert!(p.wait().unwrap().success());
})
}
#[cfg(all(unix, not(target_os="android")))]
iotest!(fn uid_to_root_fails() {
#[test]
fn uid_to_root_fails() {
use libc;
// if we're already root, this isn't a valid test. Most of the bots run
// as non-root though (android is an exception).
if unsafe { libc::getuid() == 0 } { return }
assert!(Command::new("/bin/ls").uid(0).gid(0).spawn().is_err());
})
}
#[cfg(not(target_os="android"))]
iotest!(fn test_process_status() {
#[test]
fn test_process_status() {
let mut status = Command::new("false").status().unwrap();
assert!(status.matches_exit_status(1));
status = Command::new("true").status().unwrap();
assert!(status.success());
})
}
iotest!(fn test_process_output_fail_to_start() {
#[test]
fn test_process_output_fail_to_start() {
match Command::new("/no-binary-by-this-name-should-exist").output() {
Err(e) => assert_eq!(e.kind, FileNotFound),
Ok(..) => fail!()
}
})
}
#[cfg(not(target_os="android"))]
iotest!(fn test_process_output_output() {
#[test]
fn test_process_output_output() {
let ProcessOutput {status, output, error}
= Command::new("echo").arg("hello").output().unwrap();
let output_str = str::from_utf8(output.as_slice()).unwrap();
@ -808,33 +831,37 @@ mod tests {
if !running_on_valgrind() {
assert_eq!(error, Vec::new());
}
})
}
#[cfg(not(target_os="android"))]
iotest!(fn test_process_output_error() {
#[test]
fn test_process_output_error() {
let ProcessOutput {status, output, error}
= Command::new("mkdir").arg(".").output().unwrap();
assert!(status.matches_exit_status(1));
assert_eq!(output, Vec::new());
assert!(!error.is_empty());
})
}
#[cfg(not(target_os="android"))]
iotest!(fn test_finish_once() {
#[test]
fn test_finish_once() {
let mut prog = Command::new("false").spawn().unwrap();
assert!(prog.wait().unwrap().matches_exit_status(1));
})
}
#[cfg(not(target_os="android"))]
iotest!(fn test_finish_twice() {
#[test]
fn test_finish_twice() {
let mut prog = Command::new("false").spawn().unwrap();
assert!(prog.wait().unwrap().matches_exit_status(1));
assert!(prog.wait().unwrap().matches_exit_status(1));
})
}
#[cfg(not(target_os="android"))]
iotest!(fn test_wait_with_output_once() {
#[test]
fn test_wait_with_output_once() {
let prog = Command::new("echo").arg("hello").spawn().unwrap();
let ProcessOutput {status, output, error} = prog.wait_with_output().unwrap();
let output_str = str::from_utf8(output.as_slice()).unwrap();
@ -845,7 +872,7 @@ mod tests {
if !running_on_valgrind() {
assert_eq!(error, Vec::new());
}
})
}
#[cfg(all(unix, not(target_os="android")))]
pub fn pwd_cmd() -> Command {
@ -865,7 +892,8 @@ mod tests {
cmd
}
iotest!(fn test_keep_current_working_dir() {
#[test]
fn test_keep_current_working_dir() {
use os;
let prog = pwd_cmd().spawn().unwrap();
@ -878,9 +906,10 @@ mod tests {
assert_eq!(parent_stat.unstable.device, child_stat.unstable.device);
assert_eq!(parent_stat.unstable.inode, child_stat.unstable.inode);
})
}
iotest!(fn test_change_working_directory() {
#[test]
fn test_change_working_directory() {
use os;
// test changing to the parent of os::getcwd() because we know
// the path exists (and os::getcwd() is not expected to be root)
@ -895,7 +924,7 @@ mod tests {
assert_eq!(parent_stat.unstable.device, child_stat.unstable.device);
assert_eq!(parent_stat.unstable.inode, child_stat.unstable.inode);
})
}
#[cfg(all(unix, not(target_os="android")))]
pub fn env_cmd() -> Command {
@ -916,7 +945,8 @@ mod tests {
}
#[cfg(not(target_os="android"))]
iotest!(fn test_inherit_env() {
#[test]
fn test_inherit_env() {
use os;
if running_on_valgrind() { return; }
@ -930,9 +960,10 @@ mod tests {
output.as_slice()
.contains(format!("{}={}", *k, *v).as_slice()));
}
})
}
#[cfg(target_os="android")]
iotest!(fn test_inherit_env() {
#[test]
fn test_inherit_env() {
use os;
if running_on_valgrind() { return; }
@ -953,9 +984,10 @@ mod tests {
*v).as_slice()));
}
}
})
}
iotest!(fn test_override_env() {
#[test]
fn test_override_env() {
use os;
let mut new_env = vec![("RUN_TEST_NEW_ENV", "123")];
@ -978,18 +1010,20 @@ mod tests {
assert!(output.as_slice().contains("RUN_TEST_NEW_ENV=123"),
"didn't find RUN_TEST_NEW_ENV inside of:\n\n{}", output);
})
}
iotest!(fn test_add_to_env() {
#[test]
fn test_add_to_env() {
let prog = env_cmd().env("RUN_TEST_NEW_ENV", "123").spawn().unwrap();
let result = prog.wait_with_output().unwrap();
let output = str::from_utf8_lossy(result.output.as_slice()).into_string();
assert!(output.as_slice().contains("RUN_TEST_NEW_ENV=123"),
"didn't find RUN_TEST_NEW_ENV inside of:\n\n{}", output);
})
}
iotest!(fn test_remove_from_env() {
#[test]
fn test_remove_from_env() {
use os;
// save original environment
@ -1012,7 +1046,7 @@ mod tests {
assert!(!output.as_slice().contains("RUN_TEST_NEW_ENV"),
"found RUN_TEST_NEW_ENV inside of:\n\n{}", output);
})
}
#[cfg(unix)]
pub fn sleeper() -> Process {
@ -1026,20 +1060,23 @@ mod tests {
Command::new("ping").arg("127.0.0.1").arg("-n").arg("1000").spawn().unwrap()
}
iotest!(fn test_kill() {
#[test]
fn test_kill() {
let mut p = sleeper();
Process::kill(p.id(), PleaseExitSignal).unwrap();
assert!(!p.wait().unwrap().success());
})
}
iotest!(fn test_exists() {
#[test]
fn test_exists() {
let mut p = sleeper();
assert!(Process::kill(p.id(), 0).is_ok());
p.signal_kill().unwrap();
assert!(!p.wait().unwrap().success());
})
}
iotest!(fn test_zero() {
#[test]
fn test_zero() {
let mut p = sleeper();
p.signal_kill().unwrap();
for _ in range(0i, 20) {
@ -1050,9 +1087,10 @@ mod tests {
timer::sleep(Duration::milliseconds(100));
}
fail!("never saw the child go away");
})
}
iotest!(fn wait_timeout() {
#[test]
fn wait_timeout() {
let mut p = sleeper();
p.set_timeout(Some(10));
assert_eq!(p.wait().err().unwrap().kind, TimedOut);
@ -1060,9 +1098,10 @@ mod tests {
p.signal_kill().unwrap();
p.set_timeout(None);
assert!(p.wait().is_ok());
})
}
iotest!(fn wait_timeout2() {
#[test]
fn wait_timeout2() {
let (tx, rx) = channel();
let tx2 = tx.clone();
spawn(proc() {
@ -1081,19 +1120,21 @@ mod tests {
});
rx.recv();
rx.recv();
})
}
iotest!(fn forget() {
#[test]
fn forget() {
let p = sleeper();
let id = p.id();
p.forget();
assert!(Process::kill(id, 0).is_ok());
assert!(Process::kill(id, PleaseExitSignal).is_ok());
})
}
iotest!(fn dont_close_fd_on_command_spawn() {
#[test]
fn dont_close_fd_on_command_spawn() {
use std::rt::rtio::{Truncate, Write};
use native::io::file;
use self::native::io::file;
let path = if cfg!(windows) {
Path::new("NUL")
@ -1110,7 +1151,7 @@ mod tests {
let _ = cmd.stdout(InheritFd(fdes.fd()));
assert!(cmd.status().unwrap().success());
assert!(fdes.inner_write("extra write\n".as_bytes()).is_ok());
})
}
#[test]
#[cfg(windows)]

View File

@ -1,236 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
Signal handling
This modules provides bindings to receive signals safely, built on top of the
local I/O factory. There are a number of defined signals which can be caught,
but not all signals will work across all platforms (windows doesn't have
definitions for a number of signals.
*/
use clone::Clone;
use collections::MutableSeq;
use comm::{Sender, Receiver, channel};
use io;
use iter::Iterator;
use kinds::Send;
use mem::drop;
use option::{Some, None};
use boxed::Box;
use result::{Ok, Err};
use rt::rtio::{IoFactory, LocalIo, RtioSignal, Callback};
use slice::ImmutableSlice;
use vec::Vec;
/// Signals that can be sent and received
#[repr(int)]
#[deriving(PartialEq, Hash, Show)]
pub enum Signum {
/// Equivalent to SIGBREAK, delivered when the user presses Ctrl-Break.
Break = 21i,
/// Equivalent to SIGHUP, delivered when the user closes the terminal
/// window. On delivery of HangUp, the program is given approximately
/// 10 seconds to perform any cleanup. After that, Windows will
/// unconditionally terminate it.
HangUp = 1i,
/// Equivalent to SIGINT, delivered when the user presses Ctrl-c.
Interrupt = 2i,
/// Equivalent to SIGQUIT, delivered when the user presses Ctrl-\.
Quit = 3i,
/// Equivalent to SIGTSTP, delivered when the user presses Ctrl-z.
StopTemporarily = 20i,
/// Equivalent to SIGUSR1.
User1 = 10i,
/// Equivalent to SIGUSR2.
User2 = 12i,
/// Equivalent to SIGWINCH, delivered when the console has been resized.
/// WindowSizeChange may not be delivered in a timely manner; size change
/// will only be detected when the cursor is being moved.
WindowSizeChange = 28i,
}
/// Listener provides a receiver to listen for registered signals.
///
/// Listener automatically unregisters its handles once it is out of scope.
/// However, clients can still unregister signums manually.
///
/// # Example
///
/// ```rust,no_run
/// # #![allow(unused_must_use)]
/// use std::io::signal::{Listener, Interrupt};
///
/// let mut listener = Listener::new();
/// listener.register(Interrupt);
///
/// loop {
/// match listener.rx.recv() {
/// Interrupt => println!("Got Interrupt'ed"),
/// _ => (),
/// }
/// }
/// ```
pub struct Listener {
/// A map from signums to handles to keep the handles in memory
handles: Vec<(Signum, Box<RtioSignal + Send>)>,
/// This is where all the handles send signums, which are received by
/// the clients from the receiver.
tx: Sender<Signum>,
/// Clients of Listener can `recv()` on this receiver. This is exposed to
/// allow selection over it as well as manipulation of the receiver
/// directly.
pub rx: Receiver<Signum>,
}
impl Listener {
/// Creates a new listener for signals. Once created, signals are bound via
/// the `register` method (otherwise nothing will ever be received)
pub fn new() -> Listener {
let (tx, rx) = channel();
Listener {
tx: tx,
rx: rx,
handles: vec!(),
}
}
/// Listen for a signal, returning true when successfully registered for
/// signum. Signals can be received using `recv()`.
///
/// Once a signal is registered, this listener will continue to receive
/// notifications of signals until it is unregistered. This occurs
/// regardless of the number of other listeners registered in other tasks
/// (or on this task).
///
/// Signals are still received if there is no task actively waiting for
/// a signal, and a later call to `recv` will return the signal that was
/// received while no task was waiting on it.
///
/// # Error
///
/// If this function fails to register a signal handler, then an error will
/// be returned.
pub fn register(&mut self, signum: Signum) -> io::IoResult<()> {
struct SignalCallback {
signum: Signum,
tx: Sender<Signum>,
}
impl Callback for SignalCallback {
fn call(&mut self) { self.tx.send(self.signum) }
}
if self.handles.iter().any(|&(sig, _)| sig == signum) {
return Ok(()); // self is already listening to signum, so succeed
}
match LocalIo::maybe_raise(|io| {
io.signal(signum as int, box SignalCallback {
signum: signum,
tx: self.tx.clone(),
})
}) {
Ok(handle) => {
self.handles.push((signum, handle));
Ok(())
}
Err(e) => Err(io::IoError::from_rtio_error(e))
}
}
/// Unregisters a signal. If this listener currently had a handler
/// registered for the signal, then it will stop receiving any more
/// notification about the signal. If the signal has already been received,
/// it may still be returned by `recv`.
pub fn unregister(&mut self, signum: Signum) {
match self.handles.iter().position(|&(i, _)| i == signum) {
Some(i) => drop(self.handles.remove(i)),
None => {}
}
}
}
#[cfg(all(test, unix))]
mod test_unix {
use prelude::*;
use libc;
use comm::Empty;
use io::timer;
use super::{Listener, Interrupt};
use time::Duration;
fn sigint() {
unsafe {
libc::funcs::posix88::signal::kill(libc::getpid(), libc::SIGINT);
}
}
#[test] #[cfg(not(target_os="android"))] // FIXME(#10378)
fn test_io_signal_smoketest() {
let mut signal = Listener::new();
signal.register(Interrupt).unwrap();
sigint();
timer::sleep(Duration::milliseconds(10));
match signal.rx.recv() {
Interrupt => (),
s => fail!("Expected Interrupt, got {:?}", s),
}
}
#[test] #[cfg(not(target_os="android"))] // FIXME(#10378)
fn test_io_signal_two_signal_one_signum() {
let mut s1 = Listener::new();
let mut s2 = Listener::new();
s1.register(Interrupt).unwrap();
s2.register(Interrupt).unwrap();
sigint();
timer::sleep(Duration::milliseconds(10));
match s1.rx.recv() {
Interrupt => (),
s => fail!("Expected Interrupt, got {:?}", s),
}
match s2.rx.recv() {
Interrupt => (),
s => fail!("Expected Interrupt, got {:?}", s),
}
}
#[test] #[cfg(not(target_os="android"))] // FIXME(#10378)
fn test_io_signal_unregister() {
let mut s1 = Listener::new();
let mut s2 = Listener::new();
s1.register(Interrupt).unwrap();
s2.register(Interrupt).unwrap();
s2.unregister(Interrupt);
sigint();
timer::sleep(Duration::milliseconds(10));
assert_eq!(s2.rx.try_recv(), Err(Empty));
}
}
#[cfg(all(test, windows))]
mod test_windows {
use super::{User1, Listener};
use result::{Ok, Err};
#[test]
fn test_io_signal_invalid_signum() {
let mut s = Listener::new();
match s.register(User1) {
Ok(..) => {
fail!("Unexpected successful registry of signum {:?}", User1);
}
Err(..) => {}
}
}
}

View File

@ -382,14 +382,17 @@ impl Writer for StdWriter {
#[cfg(test)]
mod tests {
iotest!(fn smoke() {
use super::*;
use prelude::*;
fn smoke() {
// Just make sure we can acquire handles
stdin();
stdout();
stderr();
})
}
iotest!(fn capture_stdout() {
fn capture_stdout() {
use io::{ChanReader, ChanWriter};
let (tx, rx) = channel();
@ -399,9 +402,9 @@ mod tests {
println!("hello!");
});
assert_eq!(r.read_to_string().unwrap(), "hello!\n".to_string());
})
}
iotest!(fn capture_stderr() {
fn capture_stderr() {
use realstd::comm::channel;
use realstd::io::{Writer, ChanReader, ChanWriter, Reader};
@ -413,5 +416,5 @@ mod tests {
});
let s = r.read_to_string().unwrap();
assert!(s.as_slice().contains("my special message"));
})
}
}

View File

@ -18,42 +18,6 @@ use prelude::*;
use std::io::net::ip::*;
use sync::atomic::{AtomicUint, INIT_ATOMIC_UINT, Relaxed};
macro_rules! iotest (
{ fn $name:ident() $b:block $(#[$a:meta])* } => (
mod $name {
#![allow(unused_imports)]
use super::super::*;
use super::*;
use io;
use prelude::*;
use io::*;
use io::fs::*;
use io::test::*;
use io::net::tcp::*;
use io::net::ip::*;
use io::net::udp::*;
#[cfg(unix)]
use io::net::pipe::*;
use io::timer::*;
use io::process::*;
use rt::running_on_valgrind;
use str;
use time::Duration;
fn f() $b
$(#[$a])* #[test] fn green() { f() }
$(#[$a])* #[test] fn native() {
use native;
let (tx, rx) = channel();
native::task::spawn(proc() { tx.send(f()) });
rx.recv();
}
}
)
)
/// Get a port number, starting at 9600, for use in tests
pub fn next_test_port() -> u16 {
static mut next_offset: AtomicUint = INIT_ATOMIC_UINT;

View File

@ -232,55 +232,70 @@ fn in_ms_u64(d: Duration) -> u64 {
#[cfg(test)]
mod test {
iotest!(fn test_io_timer_sleep_simple() {
use super::*;
use time::Duration;
use task::spawn;
use io::*;
use prelude::*;
#[test]
fn test_io_timer_sleep_simple() {
let mut timer = Timer::new().unwrap();
timer.sleep(Duration::milliseconds(1));
})
}
iotest!(fn test_io_timer_sleep_oneshot() {
#[test]
fn test_io_timer_sleep_oneshot() {
let mut timer = Timer::new().unwrap();
timer.oneshot(Duration::milliseconds(1)).recv();
})
}
iotest!(fn test_io_timer_sleep_oneshot_forget() {
#[test]
fn test_io_timer_sleep_oneshot_forget() {
let mut timer = Timer::new().unwrap();
timer.oneshot(Duration::milliseconds(100000000));
})
}
iotest!(fn oneshot_twice() {
#[test]
fn oneshot_twice() {
let mut timer = Timer::new().unwrap();
let rx1 = timer.oneshot(Duration::milliseconds(10000));
let rx = timer.oneshot(Duration::milliseconds(1));
rx.recv();
assert_eq!(rx1.recv_opt(), Err(()));
})
}
iotest!(fn test_io_timer_oneshot_then_sleep() {
#[test]
fn test_io_timer_oneshot_then_sleep() {
let mut timer = Timer::new().unwrap();
let rx = timer.oneshot(Duration::milliseconds(100000000));
timer.sleep(Duration::milliseconds(1)); // this should invalidate rx
assert_eq!(rx.recv_opt(), Err(()));
})
}
iotest!(fn test_io_timer_sleep_periodic() {
#[test]
fn test_io_timer_sleep_periodic() {
let mut timer = Timer::new().unwrap();
let rx = timer.periodic(Duration::milliseconds(1));
rx.recv();
rx.recv();
rx.recv();
})
}
iotest!(fn test_io_timer_sleep_periodic_forget() {
#[test]
fn test_io_timer_sleep_periodic_forget() {
let mut timer = Timer::new().unwrap();
timer.periodic(Duration::milliseconds(100000000));
})
}
iotest!(fn test_io_timer_sleep_standalone() {
sleep(Duration::milliseconds(1))
})
#[test]
fn test_io_timer_sleep_standalone() {
super::sleep(Duration::milliseconds(1))
}
iotest!(fn oneshot() {
#[test]
fn oneshot() {
let mut timer = Timer::new().unwrap();
let rx = timer.oneshot(Duration::milliseconds(1));
@ -290,9 +305,10 @@ mod test {
let rx = timer.oneshot(Duration::milliseconds(1));
rx.recv();
assert!(rx.recv_opt().is_err());
})
}
iotest!(fn override() {
#[test]
fn override() {
let mut timer = Timer::new().unwrap();
let orx = timer.oneshot(Duration::milliseconds(100));
let prx = timer.periodic(Duration::milliseconds(100));
@ -300,9 +316,10 @@ mod test {
assert_eq!(orx.recv_opt(), Err(()));
assert_eq!(prx.recv_opt(), Err(()));
timer.oneshot(Duration::milliseconds(1)).recv();
})
}
iotest!(fn period() {
#[test]
fn period() {
let mut timer = Timer::new().unwrap();
let rx = timer.periodic(Duration::milliseconds(1));
rx.recv();
@ -310,32 +327,40 @@ mod test {
let rx2 = timer.periodic(Duration::milliseconds(1));
rx2.recv();
rx2.recv();
})
}
iotest!(fn sleep() {
#[test]
fn sleep() {
let mut timer = Timer::new().unwrap();
timer.sleep(Duration::milliseconds(1));
timer.sleep(Duration::milliseconds(1));
})
}
iotest!(fn oneshot_fail() {
#[test]
#[should_fail]
fn oneshot_fail() {
let mut timer = Timer::new().unwrap();
let _rx = timer.oneshot(Duration::milliseconds(1));
fail!();
} #[should_fail])
}
iotest!(fn period_fail() {
#[test]
#[should_fail]
fn period_fail() {
let mut timer = Timer::new().unwrap();
let _rx = timer.periodic(Duration::milliseconds(1));
fail!();
} #[should_fail])
}
iotest!(fn normal_fail() {
#[test]
#[should_fail]
fn normal_fail() {
let _timer = Timer::new().unwrap();
fail!();
} #[should_fail])
}
iotest!(fn closing_channel_during_drop_doesnt_kill_everything() {
#[test]
fn closing_channel_during_drop_doesnt_kill_everything() {
// see issue #10375
let mut timer = Timer::new().unwrap();
let timer_rx = timer.periodic(Duration::milliseconds(1000));
@ -346,9 +371,10 @@ mod test {
// when we drop the TimerWatcher we're going to destroy the channel,
// which must wake up the task on the other end
})
}
iotest!(fn reset_doesnt_switch_tasks() {
#[test]
fn reset_doesnt_switch_tasks() {
// similar test to the one above.
let mut timer = Timer::new().unwrap();
let timer_rx = timer.periodic(Duration::milliseconds(1000));
@ -358,9 +384,10 @@ mod test {
});
timer.oneshot(Duration::milliseconds(1));
})
}
iotest!(fn reset_doesnt_switch_tasks2() {
#[test]
fn reset_doesnt_switch_tasks2() {
// similar test to the one above.
let mut timer = Timer::new().unwrap();
let timer_rx = timer.periodic(Duration::milliseconds(1000));
@ -370,80 +397,90 @@ mod test {
});
timer.sleep(Duration::milliseconds(1));
})
}
iotest!(fn sender_goes_away_oneshot() {
#[test]
fn sender_goes_away_oneshot() {
let rx = {
let mut timer = Timer::new().unwrap();
timer.oneshot(Duration::milliseconds(1000))
};
assert_eq!(rx.recv_opt(), Err(()));
})
}
iotest!(fn sender_goes_away_period() {
#[test]
fn sender_goes_away_period() {
let rx = {
let mut timer = Timer::new().unwrap();
timer.periodic(Duration::milliseconds(1000))
};
assert_eq!(rx.recv_opt(), Err(()));
})
}
iotest!(fn receiver_goes_away_oneshot() {
#[test]
fn receiver_goes_away_oneshot() {
let mut timer1 = Timer::new().unwrap();
timer1.oneshot(Duration::milliseconds(1));
let mut timer2 = Timer::new().unwrap();
// while sleeping, the previous timer should fire and not have its
// callback do something terrible.
timer2.sleep(Duration::milliseconds(2));
})
}
iotest!(fn receiver_goes_away_period() {
#[test]
fn receiver_goes_away_period() {
let mut timer1 = Timer::new().unwrap();
timer1.periodic(Duration::milliseconds(1));
let mut timer2 = Timer::new().unwrap();
// while sleeping, the previous timer should fire and not have its
// callback do something terrible.
timer2.sleep(Duration::milliseconds(2));
})
}
iotest!(fn sleep_zero() {
#[test]
fn sleep_zero() {
let mut timer = Timer::new().unwrap();
timer.sleep(Duration::milliseconds(0));
})
}
iotest!(fn sleep_negative() {
#[test]
fn sleep_negative() {
let mut timer = Timer::new().unwrap();
timer.sleep(Duration::milliseconds(-1000000));
})
}
iotest!(fn oneshot_zero() {
#[test]
fn oneshot_zero() {
let mut timer = Timer::new().unwrap();
let rx = timer.oneshot(Duration::milliseconds(0));
rx.recv();
})
}
iotest!(fn oneshot_negative() {
#[test]
fn oneshot_negative() {
let mut timer = Timer::new().unwrap();
let rx = timer.oneshot(Duration::milliseconds(-1000000));
rx.recv();
})
}
iotest!(fn periodic_zero() {
#[test]
fn periodic_zero() {
let mut timer = Timer::new().unwrap();
let rx = timer.periodic(Duration::milliseconds(0));
rx.recv();
rx.recv();
rx.recv();
rx.recv();
})
}
iotest!(fn periodic_negative() {
#[test]
fn periodic_negative() {
let mut timer = Timer::new().unwrap();
let rx = timer.periodic(Duration::milliseconds(-1000000));
rx.recv();
rx.recv();
rx.recv();
rx.recv();
})
}
}

View File

@ -116,11 +116,6 @@
#![reexport_test_harness_main = "test_main"]
// When testing libstd, bring in libuv as the I/O backend so tests can print
// things and all of the std::io tests have an I/O interface to run on top
// of
#[cfg(test)] extern crate rustuv;
#[cfg(test)] extern crate native;
#[cfg(test)] extern crate green;
#[cfg(test)] extern crate debug;
#[cfg(test)] #[phase(plugin, link)] extern crate log;
@ -187,12 +182,6 @@ pub use unicode::char;
pub use core_sync::comm;
// Run tests with libgreen instead of libnative.
#[cfg(test)] #[start]
fn start(argc: int, argv: *const *const u8) -> int {
green::start(argc, argv, rustuv::event_loop, test_main)
}
/* Exported macros */
pub mod macros;

@ -1 +0,0 @@
Subproject commit dec0561d198d86a274b1067b53b64fea3c659202

View File

@ -11,7 +11,6 @@
#![no_start]
extern crate green;
extern crate rustuv;
use std::task::spawn;
use std::os;
@ -22,7 +21,7 @@ use std::uint;
#[start]
fn start(argc: int, argv: *const *const u8) -> int {
green::start(argc, argv, rustuv::event_loop, main)
green::start(argc, argv, green::basic::event_loop, main)
}
fn main() {

View File

@ -40,14 +40,9 @@
// no-pretty-expanded
#![feature(phase)]
#[phase(plugin)] extern crate green;
use std::string::String;
use std::fmt;
green_start!(main)
fn print_complements() {
let all = [Blue, Red, Yellow];
for aa in all.iter() {

View File

@ -40,13 +40,8 @@
// no-pretty-expanded FIXME #15189
#![feature(phase)]
#[phase(plugin)] extern crate green;
use std::sync::Arc;
green_start!(main)
//
// Utilities.
//

View File

@ -40,9 +40,7 @@
// no-pretty-expanded FIXME #15189
#![feature(phase)]
#![allow(non_snake_case)]
#[phase(plugin)] extern crate green;
use std::from_str::FromStr;
use std::iter::count;
@ -50,8 +48,6 @@ use std::cmp::min;
use std::os;
use std::sync::{Arc, RWLock};
green_start!(main)
fn A(i: uint, j: uint) -> f64 {
((i + j) * (i + j + 1) / 2 + i + 1) as f64
}

View File

@ -38,10 +38,6 @@
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
#![feature(phase)]
#[phase(plugin)] extern crate green;
green_start!(main)
fn start(n_tasks: int, token: int) {
let (tx, mut rx) = channel();
tx.send(token);

View File

@ -9,16 +9,13 @@
// except according to those terms.
// This is (hopefully) a quick test to get a good idea about spawning
// performance in libgreen. Note that this uses the rustuv event loop rather
// than the basic event loop in order to get a better real world idea about the
// performance of a task spawn.
// performance in libgreen.
extern crate green;
extern crate rustuv;
#[start]
fn start(argc: int, argv: *const *const u8) -> int {
green::start(argc, argv, rustuv::event_loop, main)
green::start(argc, argv, green::basic::event_loop, main)
}
fn main() {

View File

@ -1,12 +0,0 @@
-include ../tools.mk
HOST_LIB_DIR=$(TMPDIR)/../../../stage$(RUST_BUILD_STAGE)/lib
# This overrides the LD_LIBRARY_PATH for RUN
TARGET_RPATH_DIR:=$(TARGET_RPATH_DIR):$(TMPDIR)
all:
$(RUSTC) lib.rs
$(CC) main.c -o $(call RUN_BINFILE,main) $(call RPATH_LINK_SEARCH,$(HOST_LIB_DIR)) -lboot
$(call RUN,main)
$(call REMOVE_DYLIBS,boot)
$(call FAIL,main)

View File

@ -1,24 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![crate_name="boot"]
#![crate_type="dylib"]
extern crate rustuv;
extern crate green;
#[no_mangle] // this needs to get called from C
pub extern "C" fn foo(argc: int, argv: *const *const u8) -> int {
green::start(argc, argv, rustuv::event_loop, proc() {
spawn(proc() {
println!("hello");
});
})
}

View File

@ -1,16 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// this is the rust entry point that we're going to call.
int foo(int argc, char *argv[]);
int main(int argc, char *argv[]) {
return foo(argc, argv);
}

View File

@ -20,54 +20,21 @@
extern crate libc;
extern crate native;
extern crate green;
extern crate rustuv;
use std::io::{Process, Command};
use std::io::{Process, Command, timer};
use std::time::Duration;
use std::str;
macro_rules! succeed( ($e:expr) => (
match $e { Ok(..) => {}, Err(e) => fail!("failure: {}", e) }
) )
macro_rules! iotest (
{ fn $name:ident() $b:block $($a:attr)* } => (
mod $name {
#![allow(unused_imports)]
use std::io::timer;
use libc;
use std::str;
use std::io::process::Command;
use native;
use super::{sleeper, test_destroy_actually_kills};
fn f() $b
$($a)* #[test] fn green() { f() }
$($a)* #[test] fn native() {
use native;
let (tx, rx) = channel();
native::task::spawn(proc() { tx.send(f()) });
rx.recv();
}
}
)
)
#[cfg(test)] #[start]
fn start(argc: int, argv: *const *const u8) -> int {
green::start(argc, argv, rustuv::event_loop, test_main)
}
iotest!(fn test_destroy_once() {
fn test_destroy_once() {
let mut p = sleeper();
match p.signal_exit() {
Ok(()) => {}
Err(e) => fail!("error: {}", e),
}
})
}
#[cfg(unix)]
pub fn sleeper() -> Process {
@ -81,11 +48,11 @@ pub fn sleeper() -> Process {
Command::new("ping").arg("127.0.0.1").arg("-n").arg("1000").spawn().unwrap()
}
iotest!(fn test_destroy_twice() {
fn test_destroy_twice() {
let mut p = sleeper();
succeed!(p.signal_exit()); // this shouldnt crash...
let _ = p.signal_exit(); // ...and nor should this (and nor should the destructor)
})
}
pub fn test_destroy_actually_kills(force: bool) {
use std::io::process::{Command, ProcessOutput, ExitStatus, ExitSignal};
@ -129,10 +96,10 @@ pub fn test_destroy_actually_kills(force: bool) {
}
}
iotest!(fn test_unforced_destroy_actually_kills() {
fn test_unforced_destroy_actually_kills() {
test_destroy_actually_kills(false);
})
}
iotest!(fn test_forced_destroy_actually_kills() {
fn test_forced_destroy_actually_kills() {
test_destroy_actually_kills(true);
})
}

View File

@ -8,20 +8,10 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate native;
extern crate green;
extern crate rustuv;
use std::time::Duration;
#[start]
fn start(argc: int, argv: *const *const u8) -> int {
green::start(argc, argv, rustuv::event_loop, main)
}
fn main() {
native::task::spawn(proc() customtask());
std::task::spawn(proc() customtask());
}
fn customtask() {

View File

@ -10,48 +10,25 @@
// ignore-fast
extern crate green;
extern crate rustuv;
extern crate native;
use std::os;
use std::io;
use std::str;
#[start]
fn start(argc: int, argv: *const *const u8) -> int {
green::start(argc, argv, rustuv::event_loop, main)
}
fn main() {
let args = os::args();
let args = args.as_slice();
if args.len() > 1 && args[1].as_slice() == "child" {
if args[2].as_slice() == "green" {
child();
} else {
let (tx, rx) = channel();
native::task::spawn(proc() { tx.send(child()); });
rx.recv();
}
child();
} else {
parent("green".to_string());
parent("native".to_string());
let (tx, rx) = channel();
native::task::spawn(proc() {
parent("green".to_string());
parent("native".to_string());
tx.send(());
});
rx.recv();
parent();
}
}
fn parent(flavor: String) {
fn parent() {
let args = os::args();
let args = args.as_slice();
let mut p = io::process::Command::new(args[0].as_slice())
.arg("child").arg(flavor).spawn().unwrap();
.arg("child").spawn().unwrap();
p.stdin.get_mut_ref().write_str("test1\ntest2\ntest3").unwrap();
let out = p.wait_with_output().unwrap();
assert!(out.status.success());

View File

@ -11,22 +11,13 @@
// This test may not always fail, but it can be flaky if the race it used to
// expose is still present.
extern crate green;
extern crate rustuv;
extern crate native;
#[start]
fn start(argc: int, argv: *const *const u8) -> int {
green::start(argc, argv, rustuv::event_loop, main)
}
fn helper(rx: Receiver<Sender<()>>) {
for tx in rx.iter() {
let _ = tx.send_opt(());
}
}
fn test() {
fn main() {
let (tx, rx) = channel();
spawn(proc() { helper(rx) });
let (snd, rcv) = channel::<int>();
@ -40,17 +31,3 @@ fn test() {
}
}
}
fn main() {
let (tx, rx) = channel();
spawn(proc() {
tx.send(test());
});
rx.recv();
let (tx, rx) = channel();
native::task::spawn(proc() {
tx.send(test());
});
rx.recv();
}

View File

@ -8,19 +8,12 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(phase)]
#[phase(plugin, link)]
extern crate green;
extern crate native;
use std::io::process;
use std::io::Command;
use std::io;
use std::os;
green_start!(main)
fn main() {
let args = os::args();
if args.len() > 1 && args.get(1).as_slice() == "child" {
@ -29,12 +22,6 @@ fn main() {
test();
let (tx, rx) = channel();
native::task::spawn(proc() {
tx.send(test());
});
rx.recv();
}
fn child() {
@ -52,4 +39,3 @@ fn test() {
.spawn().unwrap();
assert!(p.wait().unwrap().success());
}

View File

@ -8,18 +8,10 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(phase)]
extern crate native;
#[phase(plugin)]
extern crate green;
use native::NativeTaskBuilder;
use std::io::{TempDir, Command, fs};
use std::os;
use std::task::TaskBuilder;
green_start!(main)
fn main() {
// If we're the child, make sure we were invoked correctly
let args = os::args();
@ -28,11 +20,6 @@ fn main() {
}
test();
let (tx, rx) = channel();
TaskBuilder::new().native().spawn(proc() {
tx.send(test());
});
rx.recv();
}
fn test() {

View File

@ -8,28 +8,14 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(phase)]
#[phase(plugin)]
extern crate green;
extern crate native;
use native::NativeTaskBuilder;
use std::io::{process, Command};
use std::os;
use std::task::TaskBuilder;
green_start!(main)
fn main() {
let len = os::args().len();
if len == 1 {
test();
let (tx, rx) = channel();
TaskBuilder::new().native().spawn(proc() {
tx.send(test());
});
rx.recv();
} else {
assert_eq!(len, 3);
}

View File

@ -8,8 +8,9 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-windows
// ignore-android
// ignore-test
// FIXME: this test is being ignored until signals are implemented
// This test ensures that the 'detach' field on processes does the right thing.
// By detaching the child process, they should be put into a separate process
@ -19,19 +20,12 @@
// Note that the first thing we do is put ourselves in our own process group so
// we don't interfere with other running tests.
extern crate green;
extern crate rustuv;
extern crate libc;
use std::io::process;
use std::io::process::Command;
use std::io::signal::{Listener, Interrupt};
#[start]
fn start(argc: int, argv: *const *const u8) -> int {
green::start(argc, argv, rustuv::event_loop, main)
}
fn main() {
unsafe { libc::setsid(); }

View File

@ -13,30 +13,14 @@
// quite quickly and it takes a few seconds for the sockets to get
// recycled.
#![feature(phase)]
#[phase(plugin)]
extern crate green;
extern crate native;
use std::io::{TcpListener, Listener, Acceptor, EndOfFile, TcpStream};
use std::sync::{atomic, Arc};
use std::task::TaskBuilder;
use native::NativeTaskBuilder;
static N: uint = 8;
static M: uint = 20;
green_start!(main)
fn main() {
test();
let (tx, rx) = channel();
TaskBuilder::new().native().spawn(proc() {
tx.send(test());
});
rx.recv();
}
fn test() {
@ -98,4 +82,3 @@ fn test() {
// Everything should have been accepted.
assert_eq!(cnt.load(atomic::SeqCst), N * M);
}

View File

@ -20,51 +20,23 @@
#![allow(experimental)]
#![reexport_test_harness_main = "test_main"]
extern crate native;
extern crate green;
extern crate rustuv;
#![allow(unused_imports)]
#[cfg(test)] #[start]
fn start(argc: int, argv: *const *const u8) -> int {
green::start(argc, argv, rustuv::event_loop, test_main)
}
use std::io::*;
use std::io::net::tcp::*;
use std::io::test::*;
use std::io;
use std::time::Duration;
macro_rules! iotest (
{ fn $name:ident() $b:block $(#[$a:meta])* } => (
mod $name {
#![allow(unused_imports)]
use std::io::*;
use std::io::net::tcp::*;
use std::io::test::*;
use std::io;
use std::time::Duration;
fn f() $b
$(#[$a])* #[test] fn green() { f() }
$(#[$a])* #[test] fn native() {
use native;
let (tx, rx) = channel();
native::task::spawn(proc() { tx.send(f()) });
rx.recv();
}
}
)
)
iotest!(fn eventual_timeout() {
use native;
#[cfg_attr(target_os = "freebsd", ignore)]
fn eventual_timeout() {
let addr = next_test_ip4();
let host = addr.ip.to_string();
let port = addr.port;
// Use a native task to receive connections because it turns out libuv is
// really good at accepting connections and will likely run out of file
// descriptors before timing out.
let (tx1, rx1) = channel();
let (_tx2, rx2) = channel::<()>();
native::task::spawn(proc() {
std::task::spawn(proc() {
let _l = TcpListener::bind(host.as_slice(), port).unwrap().listen();
tx1.send(());
let _ = rx2.recv_opt();
@ -80,30 +52,29 @@ iotest!(fn eventual_timeout() {
}
}
fail!("never timed out!");
} #[cfg_attr(target_os = "freebsd", ignore)])
}
iotest!(fn timeout_success() {
fn timeout_success() {
let addr = next_test_ip4();
let host = addr.ip.to_string();
let port = addr.port;
let _l = TcpListener::bind(host.as_slice(), port).unwrap().listen();
assert!(TcpStream::connect_timeout(addr, Duration::milliseconds(1000)).is_ok());
})
}
iotest!(fn timeout_error() {
fn timeout_error() {
let addr = next_test_ip4();
assert!(TcpStream::connect_timeout(addr, Duration::milliseconds(1000)).is_err());
})
}
iotest!(fn connect_timeout_zero() {
let addr = next_test_ip4();
assert!(TcpStream::connect_timeout(addr, Duration::milliseconds(0)).is_err());
})
iotest!(fn connect_timeout_negative() {
let addr = next_test_ip4();
assert!(TcpStream::connect_timeout(addr, Duration::milliseconds(-1)).is_err());
})
fn connect_timeout_zero() {
let addr = next_test_ip4();
assert!(TcpStream::connect_timeout(addr, Duration::milliseconds(0)).is_err());
}
fn connect_timeout_negative() {
let addr = next_test_ip4();
assert!(TcpStream::connect_timeout(addr, Duration::milliseconds(-1)).is_err());
}

View File

@ -16,8 +16,6 @@
#[phase(plugin, link)]
extern crate log;
extern crate libc;
extern crate green;
extern crate rustuv;
extern crate debug;
use std::io::net::tcp::{TcpListener, TcpStream};
@ -25,11 +23,6 @@ use std::io::{Acceptor, Listener};
use std::task::TaskBuilder;
use std::time::Duration;
#[start]
fn start(argc: int, argv: *const *const u8) -> int {
green::start(argc, argv, rustuv::event_loop, main)
}
fn main() {
// This test has a chance to time out, try to not let it time out
spawn(proc() {