auto merge of #10965 : alexcrichton/rust/libgreen, r=brson

This pull request extracts all scheduling functionality from libstd, moving it into its own separate crates. The new libnative and libgreen will be the new way in which 1:1 and M:N scheduling is implemented. The standard library still requires an interface to the runtime, however, (think of things like `std::comm` and `io::println`). The interface is now defined by the `Runtime` trait inside of `std::rt`.

The booting process is now that libgreen defines the start lang-item and that's it. I want to extend this soon to have libnative also have a "start lang item" but also allow libgreen and libnative to be linked together in the same process. For now though, only libgreen can be used to start a program (unless you define the start lang item yourself). Again though, I want to change this soon, I just figured that this pull request is large enough as-is.

This certainly wasn't a smooth transition, certain functionality has no equivalent in this new separation, and some functionality is now better enabled through this new system. I did my best to separate all of the commits by topic and keep things fairly bite-sized, although are indeed larger than others.

As a note, this is currently rebased on top of my `std::comm` rewrite (or at least an old copy of it), but none of those commits need reviewing (that will all happen in another pull request).
This commit is contained in:
bors 2013-12-26 01:01:54 -08:00
commit 9477c49a7b
120 changed files with 5828 additions and 6370 deletions

View File

@ -235,6 +235,8 @@ CFG_LIBSYNTAX_$(1) :=$(call CFG_LIB_NAME_$(1),syntax)
CFG_LIBRUSTPKG_$(1) :=$(call CFG_LIB_NAME_$(1),rustpkg)
CFG_LIBRUSTDOC_$(1) :=$(call CFG_LIB_NAME_$(1),rustdoc)
CFG_LIBRUSTUV_$(1) :=$(call CFG_LIB_NAME_$(1),rustuv)
CFG_LIBGREEN_$(1) :=$(call CFG_LIB_NAME_$(1),green)
CFG_LIBNATIVE_$(1) :=$(call CFG_LIB_NAME_$(1),native)
EXTRALIB_GLOB_$(1) :=$(call CFG_LIB_GLOB_$(1),extra)
STDLIB_GLOB_$(1) :=$(call CFG_LIB_GLOB_$(1),std)
@ -243,6 +245,8 @@ LIBSYNTAX_GLOB_$(1) :=$(call CFG_LIB_GLOB_$(1),syntax)
LIBRUSTPKG_GLOB_$(1) :=$(call CFG_LIB_GLOB_$(1),rustpkg)
LIBRUSTDOC_GLOB_$(1) :=$(call CFG_LIB_GLOB_$(1),rustdoc)
LIBRUSTUV_GLOB_$(1) :=$(call CFG_LIB_GLOB_$(1),rustuv)
LIBGREEN_GLOB_$(1) :=$(call CFG_LIB_GLOB_$(1),green)
LIBNATIVE_GLOB_$(1) :=$(call CFG_LIB_GLOB_$(1),native)
EXTRALIB_DSYM_GLOB_$(1) :=$(call CFG_LIB_DSYM_GLOB_$(1),extra)
STDLIB_DSYM_GLOB_$(1) :=$(call CFG_LIB_DSYM_GLOB_$(1),std)
LIBRUSTC_DSYM_GLOB_$(1) :=$(call CFG_LIB_DSYM_GLOB_$(1),rustc)
@ -250,12 +254,16 @@ LIBSYNTAX_DSYM_GLOB_$(1) :=$(call CFG_LIB_DSYM_GLOB_$(1),syntax)
LIBRUSTPKG_DSYM_GLOB_$(1) :=$(call CFG_LIB_DSYM_GLOB_$(1),rustpkg)
LIBRUSTDOC_DSYM_GLOB_$(1) :=$(call CFG_LIB_DSYM_GLOB_$(1),rustdoc)
LIBRUSTUV_DSYM_GLOB_$(1) :=$(call CFG_LIB_DSYM_GLOB_$(1),rustuv)
LIBGREEN_DSYM_GLOB_$(1) :=$(call CFG_LIB_DSYM_GLOB_$(1),green)
LIBNATIVE_DSYM_GLOB_$(1) :=$(call CFG_LIB_DSYM_GLOB_$(1),native)
EXTRALIB_RGLOB_$(1) :=$(call CFG_RLIB_GLOB,extra)
STDLIB_RGLOB_$(1) :=$(call CFG_RLIB_GLOB,std)
LIBRUSTUV_RGLOB_$(1) :=$(call CFG_RLIB_GLOB,rustuv)
LIBSYNTAX_RGLOB_$(1) :=$(call CFG_RLIB_GLOB,syntax)
LIBRUSTC_RGLOB_$(1) :=$(call CFG_RLIB_GLOB,rustc)
LIBNATIVE_RGLOB_$(1) :=$(call CFG_RLIB_GLOB,native)
LIBGREEN_RGLOB_$(1) :=$(call CFG_RLIB_GLOB,green)
endef
@ -272,9 +280,15 @@ define CHECK_FOR_OLD_GLOB_MATCHES_EXCEPT
endef
# Same interface as above, but deletes rather than just listing the files.
ifdef VERBOSE
define REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT
$(Q)MATCHES="$(filter-out %$(3),$(wildcard $(1)/$(2)))"; if [ -n "$$MATCHES" ] ; then echo "warning: removing previous" \'$(2)\' "libraries:" $$MATCHES; rm $$MATCHES ; fi
endef
else
define REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT
$(Q)MATCHES="$(filter-out %$(3),$(wildcard $(1)/$(2)))"; if [ -n "$$MATCHES" ] ; then rm $$MATCHES ; fi
endef
endif
# We use a different strategy for LIST_ALL_OLD_GLOB_MATCHES_EXCEPT
# than in the macros above because it needs the result of running the
@ -319,6 +333,22 @@ LIBRUSTUV_CRATE := $(S)src/librustuv/lib.rs
LIBRUSTUV_INPUTS := $(wildcard $(addprefix $(S)src/librustuv/, \
*.rs */*.rs))
######################################################################
# Green threading library variables
######################################################################
LIBGREEN_CRATE := $(S)src/libgreen/lib.rs
LIBGREEN_INPUTS := $(wildcard $(addprefix $(S)src/libgreen/, \
*.rs */*.rs))
######################################################################
# Native threading library variables
######################################################################
LIBNATIVE_CRATE := $(S)src/libnative/lib.rs
LIBNATIVE_INPUTS := $(wildcard $(addprefix $(S)src/libnative/, \
*.rs */*.rs))
######################################################################
# rustc crate variables
######################################################################
@ -430,6 +460,16 @@ HLIBRUSTUV_DEFAULT$(1)_H_$(3) = \
TLIBRUSTUV_DEFAULT$(1)_T_$(2)_H_$(3) = \
$$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTUV_$(2))
HLIBGREEN_DEFAULT$(1)_H_$(3) = \
$$(HLIB$(1)_H_$(3))/$(CFG_LIBGREEN_$(3))
TLIBGREEN_DEFAULT$(1)_T_$(2)_H_$(3) = \
$$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBGREEN_$(2))
HLIBNATIVE_DEFAULT$(1)_H_$(3) = \
$$(HLIB$(1)_H_$(3))/$(CFG_LIBNATIVE_$(3))
TLIBNATIVE_DEFAULT$(1)_T_$(2)_H_$(3) = \
$$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBNATIVE_$(2))
# Preqrequisites for using the stageN compiler
ifeq ($(1),0)
HSREQ$(1)_H_$(3) = $$(HBIN$(1)_H_$(3))/rustc$$(X_$(3))
@ -441,6 +481,8 @@ HSREQ$(1)_H_$(3) = \
$$(HLIBSYNTAX_DEFAULT$(1)_H_$(3)) \
$$(HLIBRUSTC_DEFAULT$(1)_H_$(3)) \
$$(HLIBRUSTUV_DEFAULT$(1)_H_$(3)) \
$$(HLIBGREEN_DEFAULT$(1)_H_$(3)) \
$$(HLIBNATIVE_DEFAULT$(1)_H_$(3)) \
$$(MKFILE_DEPS)
endif
@ -455,7 +497,9 @@ SREQ$(1)_T_$(2)_H_$(3) = \
$$(TSREQ$(1)_T_$(2)_H_$(3)) \
$$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_STDLIB_$(2)) \
$$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_EXTRALIB_$(2)) \
$$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTUV_$(2))
$$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTUV_$(2)) \
$$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBGREEN_$(2)) \
$$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBNATIVE_$(2))
# Prerequisites for a working stageN compiler and libraries, for a specific target
CSREQ$(1)_T_$(2)_H_$(3) = \
@ -470,7 +514,9 @@ CSREQ$(1)_T_$(2)_H_$(3) = \
$$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTC_$(2)) \
$$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTPKG_$(2)) \
$$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTDOC_$(2)) \
$$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTUV_$(2))
$$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTUV_$(2)) \
$$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBGREEN_$(2)) \
$$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBNATIVE_$(2))
ifeq ($(1),0)
# Don't run the the stage0 compiler under valgrind - that ship has sailed

View File

@ -90,6 +90,8 @@ clean$(1)_H_$(2):
$(Q)rm -f $$(HLIB$(1)_H_$(2))/$(CFG_STDLIB_$(2))
$(Q)rm -f $$(HLIB$(1)_H_$(2))/$(CFG_EXTRALIB_$(2))
$(Q)rm -f $$(HLIB$(1)_H_$(2))/$(CFG_LIBRUSTUV_$(2))
$(Q)rm -f $$(HLIB$(1)_H_$(2))/$(CFG_LIBNATIVE_$(2))
$(Q)rm -f $$(HLIB$(1)_H_$(2))/$(CFG_LIBGREEN_$(2))
$(Q)rm -f $$(HLIB$(1)_H_$(2))/$(CFG_LIBRUSTC_$(2))
$(Q)rm -f $$(HLIB$(1)_H_$(2))/$(CFG_LIBSYNTAX_$(2))
$(Q)rm -f $$(HLIB$(1)_H_$(2))/$(STDLIB_GLOB_$(2))
@ -98,6 +100,10 @@ clean$(1)_H_$(2):
$(Q)rm -f $$(HLIB$(1)_H_$(2))/$(EXTRALIB_RGLOB_$(2))
$(Q)rm -f $$(HLIB$(1)_H_$(2))/$(LIBRUSTUV_GLOB_$(2))
$(Q)rm -f $$(HLIB$(1)_H_$(2))/$(LIBRUSTUV_RGLOB_$(2))
$(Q)rm -f $$(HLIB$(1)_H_$(2))/$(LIBNATIVE_GLOB_$(2))
$(Q)rm -f $$(HLIB$(1)_H_$(2))/$(LIBNATIVE_RGLOB_$(2))
$(Q)rm -f $$(HLIB$(1)_H_$(2))/$(LIBGREEN_GLOB_$(2))
$(Q)rm -f $$(HLIB$(1)_H_$(2))/$(LIBGREEN_RGLOB_$(2))
$(Q)rm -f $$(HLIB$(1)_H_$(2))/$(LIBRUSTC_GLOB_$(2))
$(Q)rm -f $$(HLIB$(1)_H_$(2))/$(LIBSYNTAX_GLOB_$(2))
$(Q)rm -f $$(HLIB$(1)_H_$(2))/$(LIBRUSTPKG_GLOB_$(2))
@ -124,6 +130,8 @@ clean$(1)_T_$(2)_H_$(3):
$(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_STDLIB_$(2))
$(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_EXTRALIB_$(2))
$(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTUV_$(2))
$(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBGREEN_$(2))
$(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBNATIVE_$(2))
$(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTC_$(2))
$(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBSYNTAX_$(2))
$(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(STDLIB_GLOB_$(2))
@ -132,6 +140,10 @@ clean$(1)_T_$(2)_H_$(3):
$(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(EXTRALIB_RGLOB_$(2))
$(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(LIBRUSTUV_GLOB_$(2))
$(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(LIBRUSTUV_RGLOB_$(2))
$(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(LIBNATIVE_GLOB_$(2))
$(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(LIBNATIVE_RGLOB_$(2))
$(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(LIBGREEN_GLOB_$(2))
$(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(LIBGREEN_RGLOB_$(2))
$(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(LIBRUSTC_GLOB_$(2))
$(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(LIBRUSTC_RGLOB_$(2))
$(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/$(LIBSYNTAX_GLOB_$(2))

View File

@ -35,6 +35,9 @@ PKG_FILES := \
libextra \
libstd \
libsyntax \
librustuv \
libgreen \
libnative \
rt \
librustdoc \
rustllvm \

View File

@ -25,13 +25,7 @@ define CP_HOST_STAGE_N
$$(HBIN$(2)_H_$(4))/rustc$$(X_$(4)): \
$$(TBIN$(1)_T_$(4)_H_$(3))/rustc$$(X_$(4)) \
$$(HLIB$(2)_H_$(4))/$(CFG_RUNTIME_$(4)) \
$$(HLIB$(2)_H_$(4))/$(CFG_LIBRUSTC_$(4)) \
$$(HSTDLIB_DEFAULT$(2)_H_$(4)) \
$$(HEXTRALIB_DEFAULT$(2)_H_$(4)) \
$$(HLIBRUSTUV_DEFAULT$(2)_H_$(4)) \
$$(HLIBRUSTC_DEFAULT$(2)_H_$(4)) \
$$(HLIBSYNTAX_DEFAULT$(2)_H_$(4)) \
| $$(HBIN$(2)_H_$(4))/
@$$(call E, cp: $$@)
$$(Q)cp $$< $$@
@ -39,10 +33,6 @@ $$(HBIN$(2)_H_$(4))/rustc$$(X_$(4)): \
$$(HLIB$(2)_H_$(4))/$(CFG_LIBRUSTC_$(4)): \
$$(TLIB$(1)_T_$(4)_H_$(3))/$(CFG_LIBRUSTC_$(4)) \
$$(HLIB$(2)_H_$(4))/$(CFG_LIBSYNTAX_$(4)) \
$$(HLIB$(2)_H_$(4))/$(CFG_RUNTIME_$(4)) \
$$(HSTDLIB_DEFAULT$(2)_H_$(4)) \
$$(HEXTRALIB_DEFAULT$(2)_H_$(4)) \
$$(HLIBRUSTUV_DEFAULT$(2)_H_$(4)) \
| $$(HLIB$(2)_H_$(4))/
@$$(call E, cp: $$@)
@ -55,10 +45,11 @@ $$(HLIB$(2)_H_$(4))/$(CFG_LIBRUSTC_$(4)): \
$$(HLIB$(2)_H_$(4))/$(CFG_LIBSYNTAX_$(4)): \
$$(TLIB$(1)_T_$(4)_H_$(3))/$(CFG_LIBSYNTAX_$(4)) \
$$(HLIB$(2)_H_$(4))/$(CFG_RUNTIME_$(4)) \
$$(HSTDLIB_DEFAULT$(2)_H_$(4)) \
$$(HEXTRALIB_DEFAULT$(2)_H_$(4)) \
$$(HLIBRUSTUV_DEFAULT$(2)_H_$(4)) \
$$(HLIBGREEN_DEFAULT$(2)_H_$(4)) \
$$(HLIBNATIVE_DEFAULT$(2)_H_$(4)) \
| $$(HLIB$(2)_H_$(4))/
@$$(call E, cp: $$@)
$$(call REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBSYNTAX_GLOB_$(4)),$$(notdir $$@))
@ -76,7 +67,6 @@ $$(HLIB$(2)_H_$(4))/$(CFG_RUNTIME_$(4)): \
$$(HLIB$(2)_H_$(4))/$(CFG_STDLIB_$(4)): \
$$(TLIB$(1)_T_$(4)_H_$(3))/$(CFG_STDLIB_$(4)) \
$$(HLIB$(2)_H_$(4))/$(CFG_RUNTIME_$(4)) \
| $$(HLIB$(2)_H_$(4))/
@$$(call E, cp: $$@)
$$(call REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(STDLIB_GLOB_$(4)),$$(notdir $$@))
@ -98,8 +88,7 @@ $$(HLIB$(2)_H_$(4))/$(CFG_STDLIB_$(4)): \
$$(HLIB$(2)_H_$(4))/$(CFG_EXTRALIB_$(4)): \
$$(TLIB$(1)_T_$(4)_H_$(3))/$(CFG_EXTRALIB_$(4)) \
$$(HLIB$(2)_H_$(4))/$(CFG_STDLIB_$(4)) \
$$(HLIB$(2)_H_$(4))/$(CFG_RUNTIME_$(4)) \
$$(HSTDLIB_DEFAULT$(2)_H_$(4)) \
| $$(HLIB$(2)_H_$(4))/
@$$(call E, cp: $$@)
$$(call REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(EXTRALIB_GLOB_$(4)),$$(notdir $$@))
@ -115,7 +104,6 @@ $$(HLIB$(2)_H_$(4))/$(CFG_EXTRALIB_$(4)): \
$$(HLIB$(2)_H_$(4))/$(CFG_LIBRUSTUV_$(4)): \
$$(TLIB$(1)_T_$(4)_H_$(3))/$(CFG_LIBRUSTUV_$(4)) \
$$(HLIB$(2)_H_$(4))/$(CFG_STDLIB_$(4)) \
$$(HLIB$(2)_H_$(4))/$(CFG_RUNTIME_$(4)) \
| $$(HLIB$(2)_H_$(4))/
@$$(call E, cp: $$@)
$$(call REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBRUSTUV_GLOB_$(4)),$$(notdir $$@))
@ -128,6 +116,36 @@ $$(HLIB$(2)_H_$(4))/$(CFG_LIBRUSTUV_$(4)): \
$$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBRUSTUV_GLOB_$(4)),$$(notdir $$@))
$$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBRUSTUV_RGLOB_$(4)),$$(notdir $$@))
$$(HLIB$(2)_H_$(4))/$(CFG_LIBGREEN_$(4)): \
$$(TLIB$(1)_T_$(4)_H_$(3))/$(CFG_LIBGREEN_$(4)) \
$$(HLIB$(2)_H_$(4))/$(CFG_STDLIB_$(4)) \
| $$(HLIB$(2)_H_$(4))/
@$$(call E, cp: $$@)
$$(call REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBGREEN_GLOB_$(4)),$$(notdir $$@))
$$(call REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBGREEN_RGLOB_$(4)),$$(notdir $$@))
$$(Q)cp $$< $$@
$$(Q)cp -R $$(TLIB$(1)_T_$(4)_H_$(3))/$(LIBGREEN_GLOB_$(4)) \
$$(wildcard $$(TLIB$(1)_T_$(4)_H_$(3))/$(LIBGREEN_RGLOB_$(4))) \
$$(wildcard $$(TLIB$(1)_T_$(4)_H_$(3))/$(LIBGREEN_DSYM_GLOB_$(4))) \
$$(HLIB$(2)_H_$(4))
$$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBGREEN_GLOB_$(4)),$$(notdir $$@))
$$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBGREEN_RGLOB_$(4)),$$(notdir $$@))
$$(HLIB$(2)_H_$(4))/$(CFG_LIBNATIVE_$(4)): \
$$(TLIB$(1)_T_$(4)_H_$(3))/$(CFG_LIBNATIVE_$(4)) \
$$(HLIB$(2)_H_$(4))/$(CFG_STDLIB_$(4)) \
| $$(HLIB$(2)_H_$(4))/
@$$(call E, cp: $$@)
$$(call REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBNATIVE_GLOB_$(4)),$$(notdir $$@))
$$(call REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBNATIVE_RGLOB_$(4)),$$(notdir $$@))
$$(Q)cp $$< $$@
$$(Q)cp -R $$(TLIB$(1)_T_$(4)_H_$(3))/$(LIBNATIVE_GLOB_$(4)) \
$$(wildcard $$(TLIB$(1)_T_$(4)_H_$(3))/$(LIBNATIVE_RGLOB_$(4))) \
$$(wildcard $$(TLIB$(1)_T_$(4)_H_$(3))/$(LIBNATIVE_DSYM_GLOB_$(4))) \
$$(HLIB$(2)_H_$(4))
$$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBNATIVE_GLOB_$(4)),$$(notdir $$@))
$$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBNATIVE_RGLOB_$(4)),$$(notdir $$@))
$$(HBIN$(2)_H_$(4))/:
mkdir -p $$@

View File

@ -94,6 +94,10 @@ install-target-$(1)-host-$(2): $$(TSREQ$$(ISTAGE)_T_$(1)_H_$(2)) $$(SREQ$$(ISTAG
$$(Q)$$(call INSTALL_LIB,$$(EXTRALIB_RGLOB_$(1)))
$$(Q)$$(call INSTALL_LIB,$$(LIBRUSTUV_GLOB_$(1)))
$$(Q)$$(call INSTALL_LIB,$$(LIBRUSTUV_RGLOB_$(1)))
$$(Q)$$(call INSTALL_LIB,$$(LIBGREEN_GLOB_$(1)))
$$(Q)$$(call INSTALL_LIB,$$(LIBGREEN_RGLOB_$(1)))
$$(Q)$$(call INSTALL_LIB,$$(LIBNATIVE_GLOB_$(1)))
$$(Q)$$(call INSTALL_LIB,$$(LIBNATIVE_RGLOB_$(1)))
$$(Q)$$(call INSTALL_LIB,libmorestack.a)
endef
@ -109,6 +113,10 @@ install-target-$(1)-host-$(2): $$(CSREQ$$(ISTAGE)_T_$(1)_H_$(2))
$$(Q)$$(call INSTALL_LIB,$$(EXTRALIB_RGLOB_$(1)))
$$(Q)$$(call INSTALL_LIB,$$(LIBRUSTUV_GLOB_$(1)))
$$(Q)$$(call INSTALL_LIB,$$(LIBRUSTUV_RGLOB_$(1)))
$$(Q)$$(call INSTALL_LIB,$$(LIBGREEN_GLOB_$(1)))
$$(Q)$$(call INSTALL_LIB,$$(LIBGREEN_RGLOB_$(1)))
$$(Q)$$(call INSTALL_LIB,$$(LIBNATIVE_GLOB_$(1)))
$$(Q)$$(call INSTALL_LIB,$$(LIBNATIVE_RGLOB_$(1)))
$$(Q)$$(call INSTALL_LIB,$$(LIBRUSTC_GLOB_$(1)))
$$(Q)$$(call INSTALL_LIB,$$(LIBSYNTAX_GLOB_$(1)))
$$(Q)$$(call INSTALL_LIB,$$(LIBRUSTPKG_GLOB_$(1)))
@ -149,6 +157,7 @@ install-host: $(CSREQ$(ISTAGE)_T_$(CFG_BUILD_)_H_$(CFG_BUILD_))
$(Q)$(call INSTALL_LIB,$(STDLIB_GLOB_$(CFG_BUILD)))
$(Q)$(call INSTALL_LIB,$(EXTRALIB_GLOB_$(CFG_BUILD)))
$(Q)$(call INSTALL_LIB,$(LIBRUSTUV_GLOB_$(CFG_BUILD)))
$(Q)$(call INSTALL_LIB,$(LIBGREEN_GLOB_$(CFG_BUILD)))
$(Q)$(call INSTALL_LIB,$(LIBRUSTC_GLOB_$(CFG_BUILD)))
$(Q)$(call INSTALL_LIB,$(LIBSYNTAX_GLOB_$(CFG_BUILD)))
$(Q)$(call INSTALL_LIB,$(LIBRUSTPKG_GLOB_$(CFG_BUILD)))
@ -174,6 +183,10 @@ uninstall:
$(call HOST_LIB_FROM_HL_GLOB,$(EXTRALIB_RGLOB_$(CFG_BUILD))) \
$(call HOST_LIB_FROM_HL_GLOB,$(LIBRUSTUV_GLOB_$(CFG_BUILD))) \
$(call HOST_LIB_FROM_HL_GLOB,$(LIBRUSTUV_RGLOB_$(CFG_BUILD))) \
$(call HOST_LIB_FROM_HL_GLOB,$(LIBGREEN_GLOB_$(CFG_BUILD))) \
$(call HOST_LIB_FROM_HL_GLOB,$(LIBGREEN_RGLOB_$(CFG_BUILD))) \
$(call HOST_LIB_FROM_HL_GLOB,$(LIBNATIVE_GLOB_$(CFG_BUILD))) \
$(call HOST_LIB_FROM_HL_GLOB,$(LIBNATIVE_RGLOB_$(CFG_BUILD))) \
$(call HOST_LIB_FROM_HL_GLOB,$(LIBRUSTC_GLOB_$(CFG_BUILD))) \
$(call HOST_LIB_FROM_HL_GLOB,$(LIBSYNTAX_GLOB_$(CFG_BUILD))) \
$(call HOST_LIB_FROM_HL_GLOB,$(LIBRUSTPKG_GLOB_$(CFG_BUILD))) \
@ -237,6 +250,7 @@ install-runtime-target-$(1)-host-$(2): $$(TSREQ$$(ISTAGE)_T_$(1)_H_$(2)) $$(SREQ
$(Q)$(call ADB_PUSH,$$(TL$(1)$(2))/$$(STDLIB_GLOB_$(1)),$(CFG_RUNTIME_PUSH_DIR))
$(Q)$(call ADB_PUSH,$$(TL$(1)$(2))/$$(EXTRALIB_GLOB_$(1)),$(CFG_RUNTIME_PUSH_DIR))
$(Q)$(call ADB_PUSH,$$(TL$(1)$(2))/$$(LIBRUSTUV_GLOB_$(1)),$(CFG_RUNTIME_PUSH_DIR))
$(Q)$(call ADB_PUSH,$$(TL$(1)$(2))/$$(LIBGREEN_GLOB_$(1)),$(CFG_RUNTIME_PUSH_DIR))
endef
define INSTALL_RUNTIME_TARGET_CLEANUP_N
@ -245,6 +259,7 @@ install-runtime-target-$(1)-cleanup:
$(Q)$(call ADB_SHELL,rm,$(CFG_RUNTIME_PUSH_DIR)/$(STDLIB_GLOB_$(1)))
$(Q)$(call ADB_SHELL,rm,$(CFG_RUNTIME_PUSH_DIR)/$(EXTRALIB_GLOB_$(1)))
$(Q)$(call ADB_SHELL,rm,$(CFG_RUNTIME_PUSH_DIR)/$(LIBRUSTUV_GLOB_$(1)))
$(Q)$(call ADB_SHELL,rm,$(CFG_RUNTIME_PUSH_DIR)/$(LIBGREEN_GLOB_$(1)))
endef
$(eval $(call INSTALL_RUNTIME_TARGET_N,arm-linux-androideabi,$(CFG_BUILD)))

View File

@ -94,12 +94,37 @@ $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTUV_$(2)): \
$$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBRUSTUV_GLOB_$(2)),$$(notdir $$@))
$$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBRUSTUV_RGLOB_$(2)),$$(notdir $$@))
$$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBGREEN_$(2)): \
$$(LIBGREEN_CRATE) $$(LIBGREEN_INPUTS) \
$$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_STDLIB_$(2)) \
$$(TSREQ$(1)_T_$(2)_H_$(3)) \
| $$(TLIB$(1)_T_$(2)_H_$(3))/
@$$(call E, compile_and_link: $$@)
$$(call REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBGREEN_GLOB_$(2)),$$(notdir $$@))
$$(call REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBGREEN_RGLOB_$(2)),$$(notdir $$@))
$$(STAGE$(1)_T_$(2)_H_$(3)) $$(WFLAGS_ST$(1)) \
--out-dir $$(@D) $$< && touch $$@
$$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBGREEN_GLOB_$(2)),$$(notdir $$@))
$$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBGREEN_RGLOB_$(2)),$$(notdir $$@))
$$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBNATIVE_$(2)): \
$$(LIBNATIVE_CRATE) $$(LIBNATIVE_INPUTS) \
$$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_STDLIB_$(2)) \
$$(TSREQ$(1)_T_$(2)_H_$(3)) \
| $$(TLIB$(1)_T_$(2)_H_$(3))/
@$$(call E, compile_and_link: $$@)
$$(call REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBNATIVE_GLOB_$(2)),$$(notdir $$@))
$$(call REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBNATIVE_RGLOB_$(2)),$$(notdir $$@))
$$(STAGE$(1)_T_$(2)_H_$(3)) $$(WFLAGS_ST$(1)) \
--out-dir $$(@D) $$< && touch $$@
$$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBNATIVE_GLOB_$(2)),$$(notdir $$@))
$$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBNATIVE_RGLOB_$(2)),$$(notdir $$@))
$$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBSYNTAX_$(3)): \
$$(LIBSYNTAX_CRATE) $$(LIBSYNTAX_INPUTS) \
$$(TSREQ$(1)_T_$(2)_H_$(3)) \
$$(TSTDLIB_DEFAULT$(1)_T_$(2)_H_$(3)) \
$$(TEXTRALIB_DEFAULT$(1)_T_$(2)_H_$(3)) \
$$(TLIBRUSTUV_DEFAULT$(1)_T_$(2)_H_$(3)) \
| $$(TLIB$(1)_T_$(2)_H_$(3))/
@$$(call E, compile_and_link: $$@)
$$(call REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBSYNTAX_GLOB_$(2)),$$(notdir $$@))
@ -135,16 +160,13 @@ $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTC_$(3)): \
$$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBRUSTC_GLOB_$(2)),$$(notdir $$@))
$$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBRUSTC_RGLOB_$(2)),$$(notdir $$@))
# NOTE: after the next snapshot remove these '-L' flags
$$(TBIN$(1)_T_$(2)_H_$(3))/rustc$$(X_$(3)): \
$$(DRIVER_CRATE) \
$$(TSREQ$(1)_T_$(2)_H_$(3)) \
$$(SREQ$(1)_T_$(2)_H_$(3)) \
$$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTC_$(3)) \
| $$(TBIN$(1)_T_$(2)_H_$(3))/
@$$(call E, compile_and_link: $$@)
$$(STAGE$(1)_T_$(2)_H_$(3)) --cfg rustc -o $$@ $$< \
-L $$(UV_SUPPORT_DIR_$(2)) \
-L $$(dir $$(LIBUV_LIB_$(2)))
$$(STAGE$(1)_T_$(2)_H_$(3)) --cfg rustc -o $$@ $$<
ifdef CFG_ENABLE_PAX_FLAGS
@$$(call E, apply PaX flags: $$@)
@"$(CFG_PAXCTL)" -cm "$$@"

View File

@ -14,7 +14,7 @@
######################################################################
# The names of crates that must be tested
TEST_TARGET_CRATES = std extra rustuv
TEST_TARGET_CRATES = std extra rustuv green native
TEST_DOC_CRATES = std extra
TEST_HOST_CRATES = rustpkg rustc rustdoc syntax
TEST_CRATES = $(TEST_TARGET_CRATES) $(TEST_HOST_CRATES)
@ -162,6 +162,8 @@ $(info check: android device test dir $(CFG_ADB_TEST_DIR) ready \
$(CFG_ADB_TEST_DIR)) \
$(shell adb push $(TLIB2_T_arm-linux-androideabi_H_$(CFG_BUILD))/$(LIBRUSTUV_GLOB_arm-linux-androideabi) \
$(CFG_ADB_TEST_DIR)) \
$(shell adb push $(TLIB2_T_arm-linux-androideabi_H_$(CFG_BUILD))/$(LIBGREEN_GLOB_arm-linux-androideabi) \
$(CFG_ADB_TEST_DIR)) \
)
else
CFG_ADB_TEST_DIR=
@ -187,7 +189,7 @@ check-test: cleantestlibs cleantmptestlogs all check-stage2-rfail
check-lite: cleantestlibs cleantmptestlogs \
check-stage2-std check-stage2-extra check-stage2-rpass \
check-stage2-rustuv \
check-stage2-rustuv check-stage2-native check-stage2-green \
check-stage2-rustpkg \
check-stage2-rfail check-stage2-cfail check-stage2-rmake
$(Q)$(CFG_PYTHON) $(S)src/etc/check-summary.py tmp/*.log
@ -339,19 +341,20 @@ define TEST_RUNNER
ifeq ($(NO_REBUILD),)
STDTESTDEP_$(1)_$(2)_$(3) = $$(SREQ$(1)_T_$(2)_H_$(3)) \
$$(TLIB$(1)_T_$(2)_H_$(3))/$$(CFG_EXTRALIB_$(2)) \
$$(TLIB$(1)_T_$(2)_H_$(3))/$$(CFG_LIBRUSTUV_$(2))
$$(TLIB$(1)_T_$(2)_H_$(3))/$$(CFG_LIBRUSTUV_$(2)) \
$$(TLIB$(1)_T_$(2)_H_$(3))/$$(CFG_LIBGREEN_$(2))
else
STDTESTDEP_$(1)_$(2)_$(3) =
endif
$(3)/stage$(1)/test/stdtest-$(2)$$(X_$(2)): \
$$(STDLIB_CRATE) $$(STDLIB_INPUTS) \
$$(STDLIB_CRATE) $$(STDLIB_INPUTS) \
$$(STDTESTDEP_$(1)_$(2)_$(3))
@$$(call E, compile_and_link: $$@)
$$(STAGE$(1)_T_$(2)_H_$(3)) -o $$@ $$< --test
$(3)/stage$(1)/test/extratest-$(2)$$(X_$(2)): \
$$(EXTRALIB_CRATE) $$(EXTRALIB_INPUTS) \
$$(EXTRALIB_CRATE) $$(EXTRALIB_INPUTS) \
$$(STDTESTDEP_$(1)_$(2)_$(3))
@$$(call E, compile_and_link: $$@)
$$(STAGE$(1)_T_$(2)_H_$(3)) -o $$@ $$< --test
@ -364,6 +367,18 @@ $(3)/stage$(1)/test/rustuvtest-$(2)$$(X_$(2)): \
-L $$(UV_SUPPORT_DIR_$(2)) \
-L $$(dir $$(LIBUV_LIB_$(2)))
$(3)/stage$(1)/test/nativetest-$(2)$$(X_$(2)): \
$$(LIBNATIVE_CRATE) $$(LIBNATIVE_INPUTS) \
$$(STDTESTDEP_$(1)_$(2)_$(3))
@$$(call E, compile_and_link: $$@)
$$(STAGE$(1)_T_$(2)_H_$(3)) -o $$@ $$< --test
$(3)/stage$(1)/test/greentest-$(2)$$(X_$(2)): \
$$(LIBGREEN_CRATE) $$(LIBGREEN_INPUTS) \
$$(STDTESTDEP_$(1)_$(2)_$(3))
@$$(call E, compile_and_link: $$@)
$$(STAGE$(1)_T_$(2)_H_$(3)) -o $$@ $$< --test
$(3)/stage$(1)/test/syntaxtest-$(2)$$(X_$(2)): \
$$(LIBSYNTAX_CRATE) $$(LIBSYNTAX_INPUTS) \
$$(STDTESTDEP_$(1)_$(2)_$(3))
@ -375,7 +390,7 @@ $(3)/stage$(1)/test/rustctest-$(2)$$(X_$(2)): \
$$(COMPILER_CRATE) $$(COMPILER_INPUTS) \
$$(SREQ$(1)_T_$(2)_H_$(3)) \
$$(TLIB$(1)_T_$(2)_H_$(3))/$$(CFG_RUSTLLVM_$(2)) \
$$(TLIB$(1)_T_$(2)_H_$(3))/$$(CFG_LIBSYNTAX_$(2))
$$(TLIB$(1)_T_$(2)_H_$(3))/$$(CFG_LIBSYNTAX_$(2))
@$$(call E, compile_and_link: $$@)
$$(STAGE$(1)_T_$(2)_H_$(3)) -o $$@ $$< --test \
-L "$$(LLVM_LIBDIR_$(2))"
@ -416,10 +431,10 @@ check-stage$(1)-T-$(2)-H-$(3)-$(4)-exec: $$(call TEST_OK_FILE,$(1),$(2),$(3),$(4
$$(call TEST_OK_FILE,$(1),$(2),$(3),$(4)): \
$(3)/stage$(1)/test/$(4)test-$(2)$$(X_$(2))
@$$(call E, run: $$<)
$$(Q)$$(call CFG_RUN_TEST_$(2),$$<,$(2),$(3)) $$(TESTARGS) \
--logfile $$(call TEST_LOG_FILE,$(1),$(2),$(3),$(4)) \
$$(call CRATE_TEST_EXTRA_ARGS,$(1),$(2),$(3),$(4)) \
&& touch $$@
$$(Q)$$(call CFG_RUN_TEST_$(2),$$<,$(2),$(3)) $$(TESTARGS) \
--logfile $$(call TEST_LOG_FILE,$(1),$(2),$(3),$(4)) \
$$(call CRATE_TEST_EXTRA_ARGS,$(1),$(2),$(3),$(4)) \
&& touch $$@
endef
define DEF_TEST_CRATE_RULES_arm-linux-androideabi

View File

@ -13,10 +13,11 @@
#[allow(non_camel_case_types)];
#[deny(warnings)];
#[cfg(stage0)] extern mod green;
extern mod extra;
use std::os;
use std::rt;
use std::io;
use std::io::fs;
use extra::getopts;
@ -234,7 +235,7 @@ pub fn run_tests(config: &config) {
// sadly osx needs some file descriptor limits raised for running tests in
// parallel (especially when we have lots and lots of child processes).
// For context, see #8904
rt::test::prepare_for_lots_of_tests();
io::test::raise_fd_limit();
let res = test::run_tests_console(&opts, tests);
if !res { fail!("Some tests failed"); }
}

View File

@ -757,8 +757,8 @@ fn make_lib_name(config: &config, auxfile: &Path, testfile: &Path) -> Path {
fn make_exe_name(config: &config, testfile: &Path) -> Path {
let mut f = output_base_name(config, testfile);
if !os::EXE_SUFFIX.is_empty() {
match f.filename().map(|s| s + os::EXE_SUFFIX.as_bytes()) {
if !os::consts::EXE_SUFFIX.is_empty() {
match f.filename().map(|s| s + os::consts::EXE_SUFFIX.as_bytes()) {
Some(v) => f.set_filename(v),
None => ()
}

View File

@ -8,6 +8,8 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[cfg(stage0)] extern mod green;
#[cfg(rustpkg)]
extern mod this = "rustpkg";

View File

@ -76,9 +76,9 @@ exceptions = [
"rt/isaac/randport.cpp", # public domain
"rt/isaac/rand.h", # public domain
"rt/isaac/standard.h", # public domain
"libstd/rt/mpsc_queue.rs", # BSD
"libstd/rt/spsc_queue.rs", # BSD
"libstd/rt/mpmc_bounded_queue.rs", # BSD
"libstd/sync/mpsc_queue.rs", # BSD
"libstd/sync/spsc_queue.rs", # BSD
"libstd/sync/mpmc_bounded_queue.rs", # BSD
]
def check_license(name, contents):

View File

@ -45,7 +45,7 @@ use sync;
use sync::{Mutex, RWLock};
use std::cast;
use std::unstable::sync::UnsafeArc;
use std::sync::arc::UnsafeArc;
use std::task;
use std::borrow;
@ -127,20 +127,6 @@ impl<T:Freeze+Send> Arc<T> {
pub fn get<'a>(&'a self) -> &'a T {
unsafe { &*self.x.get_immut() }
}
/**
* Retrieve the data back out of the Arc. This function blocks until the
* reference given to it is the last existing one, and then unwrap the data
* instead of destroying it.
*
* If multiple tasks call unwrap, all but the first will fail. Do not call
* unwrap from a task that holds another reference to the same Arc; it is
* guaranteed to deadlock.
*/
pub fn unwrap(self) -> T {
let Arc { x: x } = self;
x.unwrap()
}
}
impl<T:Freeze + Send> Clone for Arc<T> {
@ -247,22 +233,6 @@ impl<T:Send> MutexArc<T> {
cond: cond })
})
}
/**
* Retrieves the data, blocking until all other references are dropped,
* exactly as arc::unwrap.
*
* Will additionally fail if another task has failed while accessing the arc.
*/
pub fn unwrap(self) -> T {
let MutexArc { x: x } = self;
let inner = x.unwrap();
let MutexArcInner { failed: failed, data: data, .. } = inner;
if failed {
fail!("Can't unwrap poisoned MutexArc - another task failed inside!");
}
data
}
}
impl<T:Freeze + Send> MutexArc<T> {
@ -503,23 +473,6 @@ impl<T:Freeze + Send> RWArc<T> {
}
}
}
/**
* Retrieves the data, blocking until all other references are dropped,
* exactly as arc::unwrap.
*
* Will additionally fail if another task has failed while accessing the arc
* in write mode.
*/
pub fn unwrap(self) -> T {
let RWArc { x: x, .. } = self;
let inner = x.unwrap();
let RWArcInner { failed: failed, data: data, .. } = inner;
if failed {
fail!("Can't unwrap poisoned RWArc - another task failed inside!")
}
data
}
}
// Borrowck rightly complains about immutably aliasing the rwlock in order to
@ -689,22 +642,6 @@ mod tests {
})
}
#[test] #[should_fail]
pub fn test_mutex_arc_unwrap_poison() {
let arc = MutexArc::new(1);
let arc2 = ~(&arc).clone();
let (p, c) = Chan::new();
do task::spawn {
arc2.access(|one| {
c.send(());
assert!(*one == 2);
})
}
let _ = p.recv();
let one = arc.unwrap();
assert!(one == 1);
}
#[test]
fn test_unsafe_mutex_arc_nested() {
unsafe {

View File

@ -96,7 +96,6 @@ pub fn rendezvous<T: Send>() -> (SyncPort<T>, SyncChan<T>) {
#[cfg(test)]
mod test {
use comm::{DuplexStream, rendezvous};
use std::rt::test::run_in_uv_task;
#[test]
@ -124,13 +123,11 @@ mod test {
#[test]
fn recv_a_lot() {
// Rendezvous streams should be able to handle any number of messages being sent
do run_in_uv_task {
let (port, chan) = rendezvous();
do spawn {
1000000.times(|| { chan.send(()) })
}
1000000.times(|| { port.recv() })
let (port, chan) = rendezvous();
do spawn {
1000000.times(|| { chan.send(()) })
}
1000000.times(|| { port.recv() })
}
#[test]

View File

@ -19,8 +19,9 @@
use std::borrow;
use std::unstable::sync::{Exclusive, UnsafeArc};
use std::unstable::atomics;
use std::unstable::sync::Exclusive;
use std::sync::arc::UnsafeArc;
use std::sync::atomics;
use std::unstable::finally::Finally;
use std::util;
use std::util::NonCopyable;
@ -78,7 +79,7 @@ impl WaitQueue {
fn wait_end(&self) -> WaitEnd {
let (wait_end, signal_end) = Chan::new();
self.tail.send_deferred(signal_end);
assert!(self.tail.try_send_deferred(signal_end));
wait_end
}
}
@ -760,23 +761,21 @@ mod tests {
fn test_sem_runtime_friendly_blocking() {
// Force the runtime to schedule two threads on the same sched_loop.
// When one blocks, it should schedule the other one.
do task::spawn_sched(task::SingleThreaded) {
let s = Semaphore::new(1);
let s2 = s.clone();
let (p, c) = Chan::new();
let mut child_data = Some((s2, c));
s.access(|| {
let (s2, c) = child_data.take_unwrap();
do task::spawn {
c.send(());
s2.access(|| { });
c.send(());
}
let _ = p.recv(); // wait for child to come alive
5.times(|| { task::deschedule(); }); // let the child contend
});
let _ = p.recv(); // wait for child to be done
}
let s = Semaphore::new(1);
let s2 = s.clone();
let (p, c) = Chan::new();
let mut child_data = Some((s2, c));
s.access(|| {
let (s2, c) = child_data.take_unwrap();
do task::spawn {
c.send(());
s2.access(|| { });
c.send(());
}
let _ = p.recv(); // wait for child to come alive
5.times(|| { task::deschedule(); }); // let the child contend
});
let _ = p.recv(); // wait for child to be done
}
/************************************************************************
* Mutex tests

View File

@ -14,12 +14,9 @@
/// parallelism.
use std::task::SchedMode;
use std::task;
use std::vec;
#[cfg(test)] use std::task::SingleThreaded;
enum Msg<T> {
Execute(proc(&T)),
Quit
@ -46,7 +43,6 @@ impl<T> TaskPool<T> {
/// returns a function which, given the index of the task, should return
/// local data to be kept around in that task.
pub fn new(n_tasks: uint,
opt_sched_mode: Option<SchedMode>,
init_fn_factory: || -> proc(uint) -> T)
-> TaskPool<T> {
assert!(n_tasks >= 1);
@ -65,18 +61,8 @@ impl<T> TaskPool<T> {
}
};
// Start the task.
match opt_sched_mode {
None => {
// Run on this scheduler.
task::spawn(task_body);
}
Some(sched_mode) => {
let mut task = task::task();
task.sched_mode(sched_mode);
task.spawn(task_body);
}
}
// Run on this scheduler.
task::spawn(task_body);
chan
});
@ -99,7 +85,7 @@ fn test_task_pool() {
let g: proc(uint) -> uint = proc(i) i;
g
};
let mut pool = TaskPool::new(4, Some(SingleThreaded), f);
let mut pool = TaskPool::new(4, f);
8.times(|| {
pool.execute(proc(i) println!("Hello from thread {}!", *i));
})

View File

@ -11,15 +11,15 @@
//! This is a basic event loop implementation not meant for any "real purposes"
//! other than testing the scheduler and proving that it's possible to have a
//! pluggable event loop.
//!
//! This implementation is also used as the fallback implementation of an event
//! loop if no other one is provided (and M:N scheduling is desired).
use prelude::*;
use cast;
use rt::rtio::{EventLoop, IoFactory, RemoteCallback, PausableIdleCallback,
Callback};
use unstable::sync::Exclusive;
use io::native;
use util;
use std::cast;
use std::rt::rtio::{EventLoop, IoFactory, RemoteCallback, PausableIdleCallback,
Callback};
use std::unstable::sync::Exclusive;
use std::util;
/// This is the only exported function from this module.
pub fn event_loop() -> ~EventLoop {
@ -32,7 +32,6 @@ struct BasicLoop {
remotes: ~[(uint, ~Callback)],
next_remote: uint,
messages: Exclusive<~[Message]>,
io: ~IoFactory,
}
enum Message { RunRemote(uint), RemoveRemote(uint) }
@ -45,7 +44,6 @@ impl BasicLoop {
next_remote: 0,
remotes: ~[],
messages: Exclusive::new(~[]),
io: ~native::IoFactory as ~IoFactory,
}
}
@ -159,10 +157,7 @@ impl EventLoop for BasicLoop {
~BasicRemote::new(self.messages.clone(), id) as ~RemoteCallback
}
fn io<'a>(&'a mut self) -> Option<&'a mut IoFactory> {
let factory: &mut IoFactory = self.io;
Some(factory)
}
fn io<'a>(&'a mut self) -> Option<&'a mut IoFactory> { None }
}
struct BasicRemote {
@ -228,3 +223,61 @@ impl Drop for BasicPausable {
}
}
}
#[cfg(test)]
mod test {
use std::task::TaskOpts;
use basic;
use PoolConfig;
use SchedPool;
fn pool() -> SchedPool {
SchedPool::new(PoolConfig {
threads: 1,
event_loop_factory: Some(basic::event_loop),
})
}
fn run(f: proc()) {
let mut pool = pool();
pool.spawn(TaskOpts::new(), f);
pool.shutdown();
}
#[test]
fn smoke() {
do run {}
}
#[test]
fn some_channels() {
do run {
let (p, c) = Chan::new();
do spawn {
c.send(());
}
p.recv();
}
}
#[test]
fn multi_thread() {
let mut pool = SchedPool::new(PoolConfig {
threads: 2,
event_loop_factory: Some(basic::event_loop),
});
for _ in range(0, 20) {
do pool.spawn(TaskOpts::new()) {
let (p, c) = Chan::new();
do spawn {
c.send(());
}
p.recv();
}
}
pool.shutdown();
}
}

View File

@ -8,14 +8,13 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use option::*;
use super::stack::StackSegment;
use libc::c_void;
use uint;
use cast::{transmute, transmute_mut_unsafe,
transmute_region, transmute_mut_region};
use std::libc::c_void;
use std::uint;
use std::cast::{transmute, transmute_mut_unsafe,
transmute_region, transmute_mut_region};
use std::unstable::stack;
pub static RED_ZONE: uint = 20 * 1024;
use stack::StackSegment;
// FIXME #7761: Registers is boxed so that it is 16-byte aligned, for storing
// SSE regs. It would be marginally better not to do this. In C++ we
@ -43,32 +42,47 @@ impl Context {
/// Create a new context that will resume execution by running proc()
pub fn new(start: proc(), stack: &mut StackSegment) -> Context {
// FIXME #7767: Putting main into a ~ so it's a thin pointer and can
// be passed to the spawn function. Another unfortunate
// allocation
let start = ~start;
// The C-ABI function that is the task entry point
//
// Note that this function is a little sketchy. We're taking a
// procedure, transmuting it to a stack-closure, and then calling to
// closure. This leverages the fact that the representation of these two
// types is the same.
//
// The reason that we're doing this is that this procedure is expected
// to never return. The codegen which frees the environment of the
// procedure occurs *after* the procedure has completed, and this means
// that we'll never actually free the procedure.
//
// To solve this, we use this transmute (to not trigger the procedure
// deallocation here), and then store a copy of the procedure in the
// `Context` structure returned. When the `Context` is deallocated, then
// the entire procedure box will be deallocated as well.
extern fn task_start_wrapper(f: &proc()) {
// XXX(pcwalton): This may be sketchy.
unsafe {
let f: &|| = transmute(f);
(*f)()
}
}
let fp: *c_void = task_start_wrapper as *c_void;
let argp: *c_void = unsafe { transmute::<&proc(), *c_void>(&*start) };
let sp: *uint = stack.end();
let sp: *mut uint = unsafe { transmute_mut_unsafe(sp) };
// Save and then immediately load the current context,
// which we will then modify to call the given function when restored
let mut regs = new_regs();
unsafe {
rust_swap_registers(transmute_mut_region(&mut *regs), transmute_region(&*regs));
rust_swap_registers(transmute_mut_region(&mut *regs),
transmute_region(&*regs));
};
initialize_call_frame(&mut *regs, fp, argp, sp);
// FIXME #7767: Putting main into a ~ so it's a thin pointer and can
// be passed to the spawn function. Another unfortunate
// allocation
let start = ~start;
initialize_call_frame(&mut *regs,
task_start_wrapper as *c_void,
unsafe { transmute(&*start) },
sp);
// Scheduler tasks don't have a stack in the "we allocated it" sense,
// but rather they run on pthreads stacks. We have complete control over
@ -113,17 +127,18 @@ impl Context {
// invalid for the current task. Lucky for us `rust_swap_registers`
// is a C function so we don't have to worry about that!
match in_context.stack_bounds {
Some((lo, hi)) => record_stack_bounds(lo, hi),
Some((lo, hi)) => stack::record_stack_bounds(lo, hi),
// If we're going back to one of the original contexts or
// something that's possibly not a "normal task", then reset
// the stack limit to 0 to make morestack never fail
None => record_stack_bounds(0, uint::max_value),
None => stack::record_stack_bounds(0, uint::max_value),
}
rust_swap_registers(out_regs, in_regs)
}
}
}
#[link(name = "rustrt", kind = "static")]
extern {
fn rust_swap_registers(out_regs: *mut Registers, in_regs: *Registers);
}
@ -282,182 +297,6 @@ fn align_down(sp: *mut uint) -> *mut uint {
// ptr::mut_offset is positive ints only
#[inline]
pub fn mut_offset<T>(ptr: *mut T, count: int) -> *mut T {
use mem::size_of;
use std::mem::size_of;
(ptr as int + count * (size_of::<T>() as int)) as *mut T
}
#[inline(always)]
pub unsafe fn record_stack_bounds(stack_lo: uint, stack_hi: uint) {
// When the old runtime had segmented stacks, it used a calculation that was
// "limit + RED_ZONE + FUDGE". The red zone was for things like dynamic
// symbol resolution, llvm function calls, etc. In theory this red zone
// value is 0, but it matters far less when we have gigantic stacks because
// we don't need to be so exact about our stack budget. The "fudge factor"
// was because LLVM doesn't emit a stack check for functions < 256 bytes in
// size. Again though, we have giant stacks, so we round all these
// calculations up to the nice round number of 20k.
record_sp_limit(stack_lo + RED_ZONE);
return target_record_stack_bounds(stack_lo, stack_hi);
#[cfg(not(windows))] #[cfg(not(target_arch = "x86_64"))] #[inline(always)]
unsafe fn target_record_stack_bounds(_stack_lo: uint, _stack_hi: uint) {}
#[cfg(windows, target_arch = "x86_64")] #[inline(always)]
unsafe fn target_record_stack_bounds(stack_lo: uint, stack_hi: uint) {
// Windows compiles C functions which may check the stack bounds. This
// means that if we want to perform valid FFI on windows, then we need
// to ensure that the stack bounds are what they truly are for this
// task. More info can be found at:
// https://github.com/mozilla/rust/issues/3445#issuecomment-26114839
//
// stack range is at TIB: %gs:0x08 (top) and %gs:0x10 (bottom)
asm!("mov $0, %gs:0x08" :: "r"(stack_hi) :: "volatile");
asm!("mov $0, %gs:0x10" :: "r"(stack_lo) :: "volatile");
}
}
/// Records the current limit of the stack as specified by `end`.
///
/// This is stored in an OS-dependent location, likely inside of the thread
/// local storage. The location that the limit is stored is a pre-ordained
/// location because it's where LLVM has emitted code to check.
///
/// Note that this cannot be called under normal circumstances. This function is
/// changing the stack limit, so upon returning any further function calls will
/// possibly be triggering the morestack logic if you're not careful.
///
/// Also note that this and all of the inside functions are all flagged as
/// "inline(always)" because they're messing around with the stack limits. This
/// would be unfortunate for the functions themselves to trigger a morestack
/// invocation (if they were an actual function call).
#[inline(always)]
pub unsafe fn record_sp_limit(limit: uint) {
return target_record_sp_limit(limit);
// x86-64
#[cfg(target_arch = "x86_64", target_os = "macos")] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $$0x60+90*8, %rsi
movq $0, %gs:(%rsi)" :: "r"(limit) : "rsi" : "volatile")
}
#[cfg(target_arch = "x86_64", target_os = "linux")] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $0, %fs:112" :: "r"(limit) :: "volatile")
}
#[cfg(target_arch = "x86_64", target_os = "win32")] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
// see: http://en.wikipedia.org/wiki/Win32_Thread_Information_Block
// store this inside of the "arbitrary data slot", but double the size
// because this is 64 bit instead of 32 bit
asm!("movq $0, %gs:0x28" :: "r"(limit) :: "volatile")
}
#[cfg(target_arch = "x86_64", target_os = "freebsd")] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $0, %fs:24" :: "r"(limit) :: "volatile")
}
// x86
#[cfg(target_arch = "x86", target_os = "macos")] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movl $$0x48+90*4, %eax
movl $0, %gs:(%eax)" :: "r"(limit) : "eax" : "volatile")
}
#[cfg(target_arch = "x86", target_os = "linux")]
#[cfg(target_arch = "x86", target_os = "freebsd")] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movl $0, %gs:48" :: "r"(limit) :: "volatile")
}
#[cfg(target_arch = "x86", target_os = "win32")] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
// see: http://en.wikipedia.org/wiki/Win32_Thread_Information_Block
// store this inside of the "arbitrary data slot"
asm!("movl $0, %fs:0x14" :: "r"(limit) :: "volatile")
}
// mips, arm - Some brave soul can port these to inline asm, but it's over
// my head personally
#[cfg(target_arch = "mips")]
#[cfg(target_arch = "arm")] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
return record_sp_limit(limit as *c_void);
extern {
fn record_sp_limit(limit: *c_void);
}
}
}
/// The counterpart of the function above, this function will fetch the current
/// stack limit stored in TLS.
///
/// Note that all of these functions are meant to be exact counterparts of their
/// brethren above, except that the operands are reversed.
///
/// As with the setter, this function does not have a __morestack header and can
/// therefore be called in a "we're out of stack" situation.
#[inline(always)]
// currently only called by `rust_stack_exhausted`, which doesn't
// exist in a test build.
#[cfg(not(test))]
pub unsafe fn get_sp_limit() -> uint {
return target_get_sp_limit();
// x86-64
#[cfg(target_arch = "x86_64", target_os = "macos")] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movq $$0x60+90*8, %rsi
movq %gs:(%rsi), $0" : "=r"(limit) :: "rsi" : "volatile");
return limit;
}
#[cfg(target_arch = "x86_64", target_os = "linux")] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movq %fs:112, $0" : "=r"(limit) ::: "volatile");
return limit;
}
#[cfg(target_arch = "x86_64", target_os = "win32")] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movq %gs:0x28, $0" : "=r"(limit) ::: "volatile");
return limit;
}
#[cfg(target_arch = "x86_64", target_os = "freebsd")] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movq %fs:24, $0" : "=r"(limit) ::: "volatile");
return limit;
}
// x86
#[cfg(target_arch = "x86", target_os = "macos")] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movl $$0x48+90*4, %eax
movl %gs:(%eax), $0" : "=r"(limit) :: "eax" : "volatile");
return limit;
}
#[cfg(target_arch = "x86", target_os = "linux")]
#[cfg(target_arch = "x86", target_os = "freebsd")] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movl %gs:48, $0" : "=r"(limit) ::: "volatile");
return limit;
}
#[cfg(target_arch = "x86", target_os = "win32")] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movl %fs:0x14, $0" : "=r"(limit) ::: "volatile");
return limit;
}
// mips, arm - Some brave soul can port these to inline asm, but it's over
// my head personally
#[cfg(target_arch = "mips")]
#[cfg(target_arch = "arm")] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
return get_sp_limit() as uint;
extern {
fn get_sp_limit() -> *c_void;
}
}
}

62
src/libgreen/coroutine.rs Normal file
View File

@ -0,0 +1,62 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Coroutines represent nothing more than a context and a stack
// segment.
use std::rt::env;
use context::Context;
use stack::{StackPool, StackSegment};
/// A coroutine is nothing more than a (register context, stack) pair.
pub struct Coroutine {
/// The segment of stack on which the task is currently running or
/// if the task is blocked, on which the task will resume
/// execution.
///
/// Servo needs this to be public in order to tell SpiderMonkey
/// about the stack bounds.
current_stack_segment: StackSegment,
/// Always valid if the task is alive and not running.
saved_context: Context
}
impl Coroutine {
pub fn new(stack_pool: &mut StackPool,
stack_size: Option<uint>,
start: proc())
-> Coroutine {
let stack_size = match stack_size {
Some(size) => size,
None => env::min_stack()
};
let mut stack = stack_pool.take_segment(stack_size);
let initial_context = Context::new(start, &mut stack);
Coroutine {
current_stack_segment: stack,
saved_context: initial_context
}
}
pub fn empty() -> Coroutine {
Coroutine {
current_stack_segment: StackSegment::new(0),
saved_context: Context::empty()
}
}
/// Destroy coroutine and try to reuse std::stack segment.
pub fn recycle(self, stack_pool: &mut StackPool) {
let Coroutine { current_stack_segment, .. } = self;
stack_pool.give_segment(current_stack_segment);
}
}

320
src/libgreen/lib.rs Normal file
View File

@ -0,0 +1,320 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The "green scheduling" library
//!
//! This library provides M:N threading for rust programs. Internally this has
//! the implementation of a green scheduler along with context switching and a
//! stack-allocation strategy.
//!
//! This can be optionally linked in to rust programs in order to provide M:N
//! functionality inside of 1:1 programs.
#[pkgid = "green#0.9-pre"];
#[crate_id = "green#0.9-pre"];
#[license = "MIT/ASL2"];
#[crate_type = "rlib"];
#[crate_type = "dylib"];
// NB this does *not* include globs, please keep it that way.
#[feature(macro_rules)];
use std::os;
use std::rt::crate_map;
use std::rt::local::Local;
use std::rt::rtio;
use std::rt::task::Task;
use std::rt::thread::Thread;
use std::rt;
use std::sync::atomics::{SeqCst, AtomicUint, INIT_ATOMIC_UINT};
use std::sync::deque;
use std::task::TaskOpts;
use std::util;
use std::vec;
use sched::{Shutdown, Scheduler, SchedHandle, TaskFromFriend, NewNeighbor};
use sleeper_list::SleeperList;
use stack::StackPool;
use task::GreenTask;
mod macros;
mod simple;
pub mod basic;
pub mod context;
pub mod coroutine;
pub mod sched;
pub mod sleeper_list;
pub mod stack;
pub mod task;
#[lang = "start"]
#[cfg(not(test))]
pub fn lang_start(main: *u8, argc: int, argv: **u8) -> int {
use std::cast;
do start(argc, argv) {
let main: extern "Rust" fn() = unsafe { cast::transmute(main) };
main();
}
}
/// Set up a default runtime configuration, given compiler-supplied arguments.
///
/// This function will block until the entire pool of M:N schedulers have
/// exited. This function also requires a local task to be available.
///
/// # Arguments
///
/// * `argc` & `argv` - The argument vector. On Unix this information is used
/// by os::args.
/// * `main` - The initial procedure to run inside of the M:N scheduling pool.
/// Once this procedure exits, the scheduling pool will begin to shut
/// down. The entire pool (and this function) will only return once
/// all child tasks have finished executing.
///
/// # Return value
///
/// The return value is used as the process return code. 0 on success, 101 on
/// error.
pub fn start(argc: int, argv: **u8, main: proc()) -> int {
rt::init(argc, argv);
let mut main = Some(main);
let mut ret = None;
simple::task().run(|| {
ret = Some(run(main.take_unwrap()));
});
// unsafe is ok b/c we're sure that the runtime is gone
unsafe { rt::cleanup() }
ret.unwrap()
}
/// Execute the main function in a pool of M:N schedulers.
///
/// Configures the runtime according to the environment, by default using a task
/// scheduler with the same number of threads as cores. Returns a process exit
/// code.
///
/// This function will not return until all schedulers in the associated pool
/// have returned.
pub fn run(main: proc()) -> int {
// Create a scheduler pool and spawn the main task into this pool. We will
// get notified over a channel when the main task exits.
let mut pool = SchedPool::new(PoolConfig::new());
let (port, chan) = Chan::new();
let mut opts = TaskOpts::new();
opts.notify_chan = Some(chan);
opts.name = Some(SendStrStatic("<main>"));
pool.spawn(opts, main);
// Wait for the main task to return, and set the process error code
// appropriately.
if port.recv().is_err() {
os::set_exit_status(rt::DEFAULT_ERROR_CODE);
}
// Once the main task has exited and we've set our exit code, wait for all
// spawned sub-tasks to finish running. This is done to allow all schedulers
// to remain active while there are still tasks possibly running.
unsafe {
let mut task = Local::borrow(None::<Task>);
task.get().wait_for_other_tasks();
}
// Now that we're sure all tasks are dead, shut down the pool of schedulers,
// waiting for them all to return.
pool.shutdown();
os::get_exit_status()
}
/// Configuration of how an M:N pool of schedulers is spawned.
pub struct PoolConfig {
/// The number of schedulers (OS threads) to spawn into this M:N pool.
threads: uint,
/// A factory function used to create new event loops. If this is not
/// specified then the default event loop factory is used.
event_loop_factory: Option<fn() -> ~rtio::EventLoop>,
}
impl PoolConfig {
/// Returns the default configuration, as determined the the environment
/// variables of this process.
pub fn new() -> PoolConfig {
PoolConfig {
threads: rt::default_sched_threads(),
event_loop_factory: None,
}
}
}
/// A structure representing a handle to a pool of schedulers. This handle is
/// used to keep the pool alive and also reap the status from the pool.
pub struct SchedPool {
priv id: uint,
priv threads: ~[Thread<()>],
priv handles: ~[SchedHandle],
priv stealers: ~[deque::Stealer<~task::GreenTask>],
priv next_friend: uint,
priv stack_pool: StackPool,
priv deque_pool: deque::BufferPool<~task::GreenTask>,
priv sleepers: SleeperList,
priv factory: fn() -> ~rtio::EventLoop,
}
impl SchedPool {
/// Execute the main function in a pool of M:N schedulers.
///
/// This will configure the pool according to the `config` parameter, and
/// initially run `main` inside the pool of schedulers.
pub fn new(config: PoolConfig) -> SchedPool {
static mut POOL_ID: AtomicUint = INIT_ATOMIC_UINT;
let PoolConfig {
threads: nscheds,
event_loop_factory: factory
} = config;
let factory = factory.unwrap_or(default_event_loop_factory());
assert!(nscheds > 0);
// The pool of schedulers that will be returned from this function
let mut pool = SchedPool {
threads: ~[],
handles: ~[],
stealers: ~[],
id: unsafe { POOL_ID.fetch_add(1, SeqCst) },
sleepers: SleeperList::new(),
stack_pool: StackPool::new(),
deque_pool: deque::BufferPool::new(),
next_friend: 0,
factory: factory,
};
// Create a work queue for each scheduler, ntimes. Create an extra
// for the main thread if that flag is set. We won't steal from it.
let arr = vec::from_fn(nscheds, |_| pool.deque_pool.deque());
let (workers, stealers) = vec::unzip(arr.move_iter());
pool.stealers = stealers;
// Now that we've got all our work queues, create one scheduler per
// queue, spawn the scheduler into a thread, and be sure to keep a
// handle to the scheduler and the thread to keep them alive.
for worker in workers.move_iter() {
rtdebug!("inserting a regular scheduler");
let mut sched = ~Scheduler::new(pool.id,
(pool.factory)(),
worker,
pool.stealers.clone(),
pool.sleepers.clone());
pool.handles.push(sched.make_handle());
let sched = sched;
pool.threads.push(do Thread::start {
let mut sched = sched;
let task = do GreenTask::new(&mut sched.stack_pool, None) {
rtdebug!("boostraping a non-primary scheduler");
};
sched.bootstrap(task);
});
}
return pool;
}
pub fn task(&mut self, opts: TaskOpts, f: proc()) -> ~GreenTask {
GreenTask::configure(&mut self.stack_pool, opts, f)
}
pub fn spawn(&mut self, opts: TaskOpts, f: proc()) {
let task = self.task(opts, f);
// Figure out someone to send this task to
let idx = self.next_friend;
self.next_friend += 1;
if self.next_friend >= self.handles.len() {
self.next_friend = 0;
}
// Jettison the task away!
self.handles[idx].send(TaskFromFriend(task));
}
/// Spawns a new scheduler into this M:N pool. A handle is returned to the
/// scheduler for use. The scheduler will not exit as long as this handle is
/// active.
///
/// The scheduler spawned will participate in work stealing with all of the
/// other schedulers currently in the scheduler pool.
pub fn spawn_sched(&mut self) -> SchedHandle {
let (worker, stealer) = self.deque_pool.deque();
self.stealers.push(stealer.clone());
// Tell all existing schedulers about this new scheduler so they can all
// steal work from it
for handle in self.handles.mut_iter() {
handle.send(NewNeighbor(stealer.clone()));
}
// Create the new scheduler, using the same sleeper list as all the
// other schedulers as well as having a stealer handle to all other
// schedulers.
let mut sched = ~Scheduler::new(self.id,
(self.factory)(),
worker,
self.stealers.clone(),
self.sleepers.clone());
let ret = sched.make_handle();
self.handles.push(sched.make_handle());
let sched = sched;
self.threads.push(do Thread::start {
let mut sched = sched;
let task = do GreenTask::new(&mut sched.stack_pool, None) {
rtdebug!("boostraping a non-primary scheduler");
};
sched.bootstrap(task);
});
return ret;
}
pub fn shutdown(mut self) {
self.stealers = ~[];
for mut handle in util::replace(&mut self.handles, ~[]).move_iter() {
handle.send(Shutdown);
}
for thread in util::replace(&mut self.threads, ~[]).move_iter() {
thread.join();
}
}
}
impl Drop for SchedPool {
fn drop(&mut self) {
if self.threads.len() > 0 {
fail!("dropping a M:N scheduler pool that wasn't shut down");
}
}
}
fn default_event_loop_factory() -> fn() -> ~rtio::EventLoop {
match crate_map::get_crate_map() {
None => {}
Some(map) => {
match map.event_loop_factory {
None => {}
Some(factory) => return factory
}
}
}
// If the crate map didn't specify a factory to create an event loop, then
// instead just use a basic event loop missing all I/O services to at least
// get the scheduler running.
return basic::event_loop;
}

129
src/libgreen/macros.rs Normal file
View File

@ -0,0 +1,129 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// XXX: this file probably shouldn't exist
#[macro_escape];
use std::fmt;
use std::libc;
// Indicates whether we should perform expensive sanity checks, including rtassert!
// XXX: Once the runtime matures remove the `true` below to turn off rtassert, etc.
pub static ENFORCE_SANITY: bool = true || !cfg!(rtopt) || cfg!(rtdebug) || cfg!(rtassert);
macro_rules! rterrln (
($($arg:tt)*) => ( {
format_args!(::macros::dumb_println, $($arg)*)
} )
)
// Some basic logging. Enabled by passing `--cfg rtdebug` to the libstd build.
macro_rules! rtdebug (
($($arg:tt)*) => ( {
if cfg!(rtdebug) {
rterrln!($($arg)*)
}
})
)
macro_rules! rtassert (
( $arg:expr ) => ( {
if ::macros::ENFORCE_SANITY {
if !$arg {
rtabort!(" assertion failed: {}", stringify!($arg));
}
}
} )
)
macro_rules! rtabort (
($($arg:tt)*) => ( {
::macros::abort(format!($($arg)*));
} )
)
pub fn dumb_println(args: &fmt::Arguments) {
use std::io;
use std::libc;
struct Stderr;
impl io::Writer for Stderr {
fn write(&mut self, data: &[u8]) {
unsafe {
libc::write(libc::STDERR_FILENO,
data.as_ptr() as *libc::c_void,
data.len() as libc::size_t);
}
}
}
let mut w = Stderr;
fmt::writeln(&mut w as &mut io::Writer, args);
}
pub fn abort(msg: &str) -> ! {
let msg = if !msg.is_empty() { msg } else { "aborted" };
let hash = msg.chars().fold(0, |accum, val| accum + (val as uint) );
let quote = match hash % 10 {
0 => "
It was from the artists and poets that the pertinent answers came, and I
know that panic would have broken loose had they been able to compare notes.
As it was, lacking their original letters, I half suspected the compiler of
having asked leading questions, or of having edited the correspondence in
corroboration of what he had latently resolved to see.",
1 => "
There are not many persons who know what wonders are opened to them in the
stories and visions of their youth; for when as children we listen and dream,
we think but half-formed thoughts, and when as men we try to remember, we are
dulled and prosaic with the poison of life. But some of us awake in the night
with strange phantasms of enchanted hills and gardens, of fountains that sing
in the sun, of golden cliffs overhanging murmuring seas, of plains that stretch
down to sleeping cities of bronze and stone, and of shadowy companies of heroes
that ride caparisoned white horses along the edges of thick forests; and then
we know that we have looked back through the ivory gates into that world of
wonder which was ours before we were wise and unhappy.",
2 => "
Instead of the poems I had hoped for, there came only a shuddering blackness
and ineffable loneliness; and I saw at last a fearful truth which no one had
ever dared to breathe before the unwhisperable secret of secrets The fact
that this city of stone and stridor is not a sentient perpetuation of Old New
York as London is of Old London and Paris of Old Paris, but that it is in fact
quite dead, its sprawling body imperfectly embalmed and infested with queer
animate things which have nothing to do with it as it was in life.",
3 => "
The ocean ate the last of the land and poured into the smoking gulf, thereby
giving up all it had ever conquered. From the new-flooded lands it flowed
again, uncovering death and decay; and from its ancient and immemorial bed it
trickled loathsomely, uncovering nighted secrets of the years when Time was
young and the gods unborn. Above the waves rose weedy remembered spires. The
moon laid pale lilies of light on dead London, and Paris stood up from its damp
grave to be sanctified with star-dust. Then rose spires and monoliths that were
weedy but not remembered; terrible spires and monoliths of lands that men never
knew were lands...",
4 => "
There was a night when winds from unknown spaces whirled us irresistibly into
limitless vacuum beyond all thought and entity. Perceptions of the most
maddeningly untransmissible sort thronged upon us; perceptions of infinity
which at the time convulsed us with joy, yet which are now partly lost to my
memory and partly incapable of presentation to others.",
_ => "You've met with a terrible fate, haven't you?"
};
rterrln!("{}", "");
rterrln!("{}", quote);
rterrln!("{}", "");
rterrln!("fatal runtime error: {}", msg);
abort();
fn abort() -> ! {
unsafe { libc::abort() }
}
}

File diff suppressed because it is too large Load Diff

88
src/libgreen/simple.rs Normal file
View File

@ -0,0 +1,88 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A small module implementing a simple "runtime" used for bootstrapping a rust
//! scheduler pool and then interacting with it.
use std::cast;
use std::rt::Runtime;
use std::rt::local::Local;
use std::rt::rtio;
use std::rt::task::{Task, BlockedTask};
use std::task::TaskOpts;
use std::unstable::sync::LittleLock;
struct SimpleTask {
lock: LittleLock,
awoken: bool,
}
impl Runtime for SimpleTask {
// Implement the simple tasks of descheduling and rescheduling, but only in
// a simple number of cases.
fn deschedule(mut ~self, times: uint, mut cur_task: ~Task,
f: |BlockedTask| -> Result<(), BlockedTask>) {
assert!(times == 1);
let me = &mut *self as *mut SimpleTask;
let cur_dupe = &*cur_task as *Task;
cur_task.put_runtime(self as ~Runtime);
let task = BlockedTask::block(cur_task);
// See libnative/task.rs for what's going on here with the `awoken`
// field and the while loop around wait()
unsafe {
let mut guard = (*me).lock.lock();
(*me).awoken = false;
match f(task) {
Ok(()) => {
while !(*me).awoken {
guard.wait();
}
}
Err(task) => { cast::forget(task.wake()); }
}
drop(guard);
cur_task = cast::transmute(cur_dupe);
}
Local::put(cur_task);
}
fn reawaken(mut ~self, mut to_wake: ~Task, _can_resched: bool) {
let me = &mut *self as *mut SimpleTask;
to_wake.put_runtime(self as ~Runtime);
unsafe {
cast::forget(to_wake);
let _l = (*me).lock.lock();
(*me).awoken = true;
(*me).lock.signal();
}
}
// These functions are all unimplemented and fail as a result. This is on
// purpose. A "simple task" is just that, a very simple task that can't
// really do a whole lot. The only purpose of the task is to get us off our
// feet and running.
fn yield_now(~self, _cur_task: ~Task) { fail!() }
fn maybe_yield(~self, _cur_task: ~Task) { fail!() }
fn spawn_sibling(~self, _cur_task: ~Task, _opts: TaskOpts, _f: proc()) {
fail!()
}
fn local_io<'a>(&'a mut self) -> Option<rtio::LocalIo<'a>> { None }
fn wrap(~self) -> ~Any { fail!() }
}
pub fn task() -> ~Task {
let mut task = ~Task::new();
task.put_runtime(~SimpleTask {
lock: LittleLock::new(),
awoken: false,
} as ~Runtime);
return task;
}

View File

@ -11,10 +11,9 @@
//! Maintains a shared list of sleeping schedulers. Schedulers
//! use this to wake each other up.
use rt::sched::SchedHandle;
use rt::mpmc_bounded_queue::Queue;
use option::*;
use clone::Clone;
use std::sync::mpmc_bounded_queue::Queue;
use sched::SchedHandle;
pub struct SleeperList {
priv q: Queue<SchedHandle>,

View File

@ -8,11 +8,8 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use container::Container;
use ptr::RawPtr;
use vec;
use ops::Drop;
use libc::{c_uint, uintptr_t};
use std::vec;
use std::libc::{c_uint, uintptr_t};
pub struct StackSegment {
priv buf: ~[u8],

536
src/libgreen/task.rs Normal file
View File

@ -0,0 +1,536 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The Green Task implementation
//!
//! This module contains the glue to the libstd runtime necessary to integrate
//! M:N scheduling. This GreenTask structure is hidden as a trait object in all
//! rust tasks and virtual calls are made in order to interface with it.
//!
//! Each green task contains a scheduler if it is currently running, and it also
//! contains the rust task itself in order to juggle around ownership of the
//! values.
use std::cast;
use std::rt::Runtime;
use std::rt::rtio;
use std::rt::local::Local;
use std::rt::task::{Task, BlockedTask};
use std::task::TaskOpts;
use std::unstable::mutex::Mutex;
use coroutine::Coroutine;
use sched::{Scheduler, SchedHandle, RunOnce};
use stack::StackPool;
/// The necessary fields needed to keep track of a green task (as opposed to a
/// 1:1 task).
pub struct GreenTask {
coroutine: Option<Coroutine>,
handle: Option<SchedHandle>,
sched: Option<~Scheduler>,
task: Option<~Task>,
task_type: TaskType,
pool_id: uint,
// See the comments in the scheduler about why this is necessary
nasty_deschedule_lock: Mutex,
}
pub enum TaskType {
TypeGreen(Option<Home>),
TypeSched,
}
pub enum Home {
AnySched,
HomeSched(SchedHandle),
}
impl GreenTask {
/// Creates a new green task which is not homed to any particular scheduler
/// and will not have any contained Task structure.
pub fn new(stack_pool: &mut StackPool,
stack_size: Option<uint>,
start: proc()) -> ~GreenTask {
GreenTask::new_homed(stack_pool, stack_size, AnySched, start)
}
/// Creates a new task (like `new`), but specifies the home for new task.
pub fn new_homed(stack_pool: &mut StackPool,
stack_size: Option<uint>,
home: Home,
start: proc()) -> ~GreenTask {
let mut ops = GreenTask::new_typed(None, TypeGreen(Some(home)));
let start = GreenTask::build_start_wrapper(start, ops.as_uint());
ops.coroutine = Some(Coroutine::new(stack_pool, stack_size, start));
return ops;
}
/// Creates a new green task with the specified coroutine and type, this is
/// useful when creating scheduler tasks.
pub fn new_typed(coroutine: Option<Coroutine>,
task_type: TaskType) -> ~GreenTask {
~GreenTask {
pool_id: 0,
coroutine: coroutine,
task_type: task_type,
sched: None,
handle: None,
nasty_deschedule_lock: unsafe { Mutex::new() },
task: Some(~Task::new()),
}
}
/// Creates a new green task with the given configuration options for the
/// contained Task object. The given stack pool is also used to allocate a
/// new stack for this task.
pub fn configure(pool: &mut StackPool,
opts: TaskOpts,
f: proc()) -> ~GreenTask {
let TaskOpts {
watched: _watched,
notify_chan, name, stack_size
} = opts;
let mut green = GreenTask::new(pool, stack_size, f);
{
let task = green.task.get_mut_ref();
task.name = name;
match notify_chan {
Some(chan) => {
let on_exit = proc(task_result) { chan.send(task_result) };
task.death.on_exit = Some(on_exit);
}
None => {}
}
}
return green;
}
/// Just like the `maybe_take_runtime` function, this function should *not*
/// exist. Usage of this function is _strongly_ discouraged. This is an
/// absolute last resort necessary for converting a libstd task to a green
/// task.
///
/// This function will assert that the task is indeed a green task before
/// returning (and will kill the entire process if this is wrong).
pub fn convert(mut task: ~Task) -> ~GreenTask {
match task.maybe_take_runtime::<GreenTask>() {
Some(mut green) => {
green.put_task(task);
green
}
None => rtabort!("not a green task any more?"),
}
}
/// Builds a function which is the actual starting execution point for a
/// rust task. This function is the glue necessary to execute the libstd
/// task and then clean up the green thread after it exits.
///
/// The second argument to this function is actually a transmuted copy of
/// the `GreenTask` pointer. Context switches in the scheduler silently
/// transfer ownership of the `GreenTask` to the other end of the context
/// switch, so because this is the first code that is running in this task,
/// it must first re-acquire ownership of the green task.
pub fn build_start_wrapper(start: proc(), ops: uint) -> proc() {
proc() {
// First code after swap to this new context. Run our
// cleanup job after we have re-acquired ownership of the green
// task.
let mut task: ~GreenTask = unsafe { GreenTask::from_uint(ops) };
task.sched.get_mut_ref().run_cleanup_job();
// Convert our green task to a libstd task and then execute the code
// requeted. This is the "try/catch" block for this green task and
// is the wrapper for *all* code run in the task.
let mut start = Some(start);
let task = task.swap().run(|| start.take_unwrap()());
// Once the function has exited, it's time to run the termination
// routine. This means we need to context switch one more time but
// clean ourselves up on the other end. Since we have no way of
// preserving a handle to the GreenTask down to this point, this
// unfortunately must call `GreenTask::convert`. In order to avoid
// this we could add a `terminate` function to the `Runtime` trait
// in libstd, but that seems less appropriate since the coversion
// method exists.
GreenTask::convert(task).terminate();
}
}
pub fn give_home(&mut self, new_home: Home) {
match self.task_type {
TypeGreen(ref mut home) => { *home = Some(new_home); }
TypeSched => rtabort!("type error: used SchedTask as GreenTask"),
}
}
pub fn take_unwrap_home(&mut self) -> Home {
match self.task_type {
TypeGreen(ref mut home) => home.take_unwrap(),
TypeSched => rtabort!("type error: used SchedTask as GreenTask"),
}
}
// New utility functions for homes.
pub fn is_home_no_tls(&self, sched: &Scheduler) -> bool {
match self.task_type {
TypeGreen(Some(AnySched)) => { false }
TypeGreen(Some(HomeSched(SchedHandle { sched_id: ref id, .. }))) => {
*id == sched.sched_id()
}
TypeGreen(None) => { rtabort!("task without home"); }
TypeSched => {
// Awe yea
rtabort!("type error: expected: TypeGreen, found: TaskSched");
}
}
}
pub fn homed(&self) -> bool {
match self.task_type {
TypeGreen(Some(AnySched)) => { false }
TypeGreen(Some(HomeSched(SchedHandle { .. }))) => { true }
TypeGreen(None) => {
rtabort!("task without home");
}
TypeSched => {
rtabort!("type error: expected: TypeGreen, found: TaskSched");
}
}
}
pub fn is_sched(&self) -> bool {
match self.task_type {
TypeGreen(..) => false, TypeSched => true,
}
}
// Unsafe functions for transferring ownership of this GreenTask across
// context switches
pub fn as_uint(&self) -> uint {
unsafe { cast::transmute(self) }
}
pub unsafe fn from_uint(val: uint) -> ~GreenTask { cast::transmute(val) }
// Runtime glue functions and helpers
pub fn put_with_sched(mut ~self, sched: ~Scheduler) {
assert!(self.sched.is_none());
self.sched = Some(sched);
self.put();
}
pub fn put_task(&mut self, task: ~Task) {
assert!(self.task.is_none());
self.task = Some(task);
}
pub fn swap(mut ~self) -> ~Task {
let mut task = self.task.take_unwrap();
task.put_runtime(self as ~Runtime);
return task;
}
pub fn put(~self) {
assert!(self.sched.is_some());
Local::put(self.swap());
}
fn terminate(mut ~self) {
let sched = self.sched.take_unwrap();
sched.terminate_current_task(self);
}
// This function is used to remotely wakeup this green task back on to its
// original pool of schedulers. In order to do so, each tasks arranges a
// SchedHandle upon descheduling to be available for sending itself back to
// the original pool.
//
// Note that there is an interesting transfer of ownership going on here. We
// must relinquish ownership of the green task, but then also send the task
// over the handle back to the original scheduler. In order to safely do
// this, we leverage the already-present "nasty descheduling lock". The
// reason for doing this is that each task will bounce on this lock after
// resuming after a context switch. By holding the lock over the enqueueing
// of the task, we're guaranteed that the SchedHandle's memory will be valid
// for this entire function.
//
// An alternative would include having incredibly cheaply cloneable handles,
// but right now a SchedHandle is something like 6 allocations, so it is
// *not* a cheap operation to clone a handle. Until the day comes that we
// need to optimize this, a lock should do just fine (it's completely
// uncontended except for when the task is rescheduled).
fn reawaken_remotely(mut ~self) {
unsafe {
let mtx = &mut self.nasty_deschedule_lock as *mut Mutex;
let handle = self.handle.get_mut_ref() as *mut SchedHandle;
(*mtx).lock();
(*handle).send(RunOnce(self));
(*mtx).unlock();
}
}
}
impl Runtime for GreenTask {
fn yield_now(mut ~self, cur_task: ~Task) {
self.put_task(cur_task);
let sched = self.sched.take_unwrap();
sched.yield_now(self);
}
fn maybe_yield(mut ~self, cur_task: ~Task) {
self.put_task(cur_task);
let sched = self.sched.take_unwrap();
sched.maybe_yield(self);
}
fn deschedule(mut ~self, times: uint, cur_task: ~Task,
f: |BlockedTask| -> Result<(), BlockedTask>) {
self.put_task(cur_task);
let mut sched = self.sched.take_unwrap();
// In order for this task to be reawoken in all possible contexts, we
// may need a handle back in to the current scheduler. When we're woken
// up in anything other than the local scheduler pool, this handle is
// used to send this task back into the scheduler pool.
if self.handle.is_none() {
self.handle = Some(sched.make_handle());
self.pool_id = sched.pool_id;
}
// This code is pretty standard, except for the usage of
// `GreenTask::convert`. Right now if we use `reawaken` directly it will
// expect for there to be a task in local TLS, but that is not true for
// this deschedule block (because the scheduler must retain ownership of
// the task while the cleanup job is running). In order to get around
// this for now, we invoke the scheduler directly with the converted
// Task => GreenTask structure.
if times == 1 {
sched.deschedule_running_task_and_then(self, |sched, task| {
match f(task) {
Ok(()) => {}
Err(t) => {
t.wake().map(|t| {
sched.enqueue_task(GreenTask::convert(t))
});
}
}
});
} else {
sched.deschedule_running_task_and_then(self, |sched, task| {
for task in task.make_selectable(times) {
match f(task) {
Ok(()) => {},
Err(task) => {
task.wake().map(|t| {
sched.enqueue_task(GreenTask::convert(t))
});
break
}
}
}
});
}
}
fn reawaken(mut ~self, to_wake: ~Task, can_resched: bool) {
self.put_task(to_wake);
assert!(self.sched.is_none());
// Waking up a green thread is a bit of a tricky situation. We have no
// guarantee about where the current task is running. The options we
// have for where this current task is running are:
//
// 1. Our original scheduler pool
// 2. Some other scheduler pool
// 3. Something that isn't a scheduler pool
//
// In order to figure out what case we're in, this is the reason that
// the `maybe_take_runtime` function exists. Using this function we can
// dynamically check to see which of these cases is the current
// situation and then dispatch accordingly.
//
// In case 1, we just use the local scheduler to resume ourselves
// immediately (if a rescheduling is possible).
//
// In case 2 and 3, we need to remotely reawaken ourself in order to be
// transplanted back to the correct scheduler pool.
let mut running_task: ~Task = Local::take();
match running_task.maybe_take_runtime::<GreenTask>() {
Some(mut running_green_task) => {
running_green_task.put_task(running_task);
let mut sched = running_green_task.sched.take_unwrap();
if sched.pool_id == self.pool_id {
if can_resched {
sched.run_task(running_green_task, self);
} else {
sched.enqueue_task(self);
running_green_task.put_with_sched(sched);
}
} else {
self.reawaken_remotely();
// put that thing back where it came from!
running_green_task.put_with_sched(sched);
}
}
None => {
self.reawaken_remotely();
Local::put(running_task);
}
}
}
fn spawn_sibling(mut ~self, cur_task: ~Task, opts: TaskOpts, f: proc()) {
self.put_task(cur_task);
// Spawns a task into the current scheduler. We allocate the new task's
// stack from the scheduler's stack pool, and then configure it
// accordingly to `opts`. Afterwards we bootstrap it immediately by
// switching to it.
//
// Upon returning, our task is back in TLS and we're good to return.
let mut sched = self.sched.take_unwrap();
let sibling = GreenTask::configure(&mut sched.stack_pool, opts, f);
sched.run_task(self, sibling)
}
// Local I/O is provided by the scheduler's event loop
fn local_io<'a>(&'a mut self) -> Option<rtio::LocalIo<'a>> {
match self.sched.get_mut_ref().event_loop.io() {
Some(io) => Some(rtio::LocalIo::new(io)),
None => None,
}
}
fn wrap(~self) -> ~Any { self as ~Any }
}
impl Drop for GreenTask {
fn drop(&mut self) {
unsafe { self.nasty_deschedule_lock.destroy(); }
}
}
#[cfg(test)]
mod tests {
use std::rt::Runtime;
use std::rt::local::Local;
use std::rt::task::Task;
use std::task;
use std::task::TaskOpts;
use super::super::{PoolConfig, SchedPool};
use super::GreenTask;
fn spawn_opts(opts: TaskOpts, f: proc()) {
let mut pool = SchedPool::new(PoolConfig {
threads: 1,
event_loop_factory: None,
});
pool.spawn(opts, f);
pool.shutdown();
}
#[test]
fn smoke() {
let (p, c) = Chan::new();
do spawn_opts(TaskOpts::new()) {
c.send(());
}
p.recv();
}
#[test]
fn smoke_fail() {
let (p, c) = Chan::<()>::new();
do spawn_opts(TaskOpts::new()) {
let _c = c;
fail!()
}
assert_eq!(p.recv_opt(), None);
}
#[test]
fn smoke_opts() {
let mut opts = TaskOpts::new();
opts.name = Some(SendStrStatic("test"));
opts.stack_size = Some(20 * 4096);
let (p, c) = Chan::new();
opts.notify_chan = Some(c);
spawn_opts(opts, proc() {});
assert!(p.recv().is_ok());
}
#[test]
fn smoke_opts_fail() {
let mut opts = TaskOpts::new();
let (p, c) = Chan::new();
opts.notify_chan = Some(c);
spawn_opts(opts, proc() { fail!() });
assert!(p.recv().is_err());
}
#[test]
fn yield_test() {
let (p, c) = Chan::new();
do spawn_opts(TaskOpts::new()) {
10.times(task::deschedule);
c.send(());
}
p.recv();
}
#[test]
fn spawn_children() {
let (p, c) = Chan::new();
do spawn_opts(TaskOpts::new()) {
let (p, c2) = Chan::new();
do spawn {
let (p, c3) = Chan::new();
do spawn {
c3.send(());
}
p.recv();
c2.send(());
}
p.recv();
c.send(());
}
p.recv();
}
#[test]
fn spawn_inherits() {
let (p, c) = Chan::new();
do spawn_opts(TaskOpts::new()) {
let c = c;
do spawn {
let mut task: ~Task = Local::take();
match task.maybe_take_runtime::<GreenTask>() {
Some(ops) => {
task.put_runtime(ops as ~Runtime);
}
None => fail!(),
}
Local::put(task);
c.send(());
}
}
p.recv();
}
}

View File

@ -10,28 +10,21 @@
//! Blocking posix-based file I/O
#[allow(non_camel_case_types)];
use std::c_str::CString;
use std::io::IoError;
use std::io;
use std::libc::c_int;
use std::libc;
use std::os;
use std::rt::rtio;
use std::unstable::intrinsics;
use std::vec;
use c_str::CString;
use io::IoError;
use io;
use libc::c_int;
use libc;
use ops::Drop;
use option::{Some, None, Option};
use os;
use path::{Path, GenericPath};
use ptr::RawPtr;
use result::{Result, Ok, Err};
use rt::rtio;
use super::IoResult;
use unstable::intrinsics;
use vec::ImmutableVector;
use vec;
#[cfg(windows)] use os::win32::{as_utf16_p, fill_utf16_buf_and_decode};
#[cfg(windows)] use ptr;
#[cfg(windows)] use str;
#[cfg(windows)] use std::os::win32::{as_utf16_p, fill_utf16_buf_and_decode};
#[cfg(windows)] use std::ptr;
#[cfg(windows)] use std::str;
fn keep_going(data: &[u8], f: |*u8, uint| -> i64) -> i64 {
#[cfg(windows)] static eintr: int = 0; // doesn't matter
@ -490,8 +483,8 @@ pub fn readdir(p: &CString) -> IoResult<~[Path]> {
unsafe {
#[cfg(not(windows))]
unsafe fn get_list(p: &CString) -> IoResult<~[Path]> {
use libc::{dirent_t};
use libc::{opendir, readdir, closedir};
use std::libc::{dirent_t};
use std::libc::{opendir, readdir, closedir};
extern {
fn rust_list_dir_val(ptr: *dirent_t) -> *libc::c_char;
}
@ -517,14 +510,14 @@ pub fn readdir(p: &CString) -> IoResult<~[Path]> {
#[cfg(windows)]
unsafe fn get_list(p: &CString) -> IoResult<~[Path]> {
use libc::consts::os::extra::INVALID_HANDLE_VALUE;
use libc::{wcslen, free};
use libc::funcs::extra::kernel32::{
use std::libc::consts::os::extra::INVALID_HANDLE_VALUE;
use std::libc::{wcslen, free};
use std::libc::funcs::extra::kernel32::{
FindFirstFileW,
FindNextFileW,
FindClose,
};
use libc::types::os::arch::extra::HANDLE;
use std::libc::types::os::arch::extra::HANDLE;
use os::win32::{
as_utf16_p
};
@ -906,12 +899,11 @@ pub fn utime(p: &CString, atime: u64, mtime: u64) -> IoResult<()> {
#[cfg(test)]
mod tests {
use io::native::file::{CFile, FileDesc};
use io;
use libc;
use os;
use result::Ok;
use rt::rtio::RtioFileStream;
use super::{CFile, FileDesc};
use std::io;
use std::libc;
use std::os;
use std::rt::rtio::RtioFileStream;
#[ignore(cfg(target_os = "freebsd"))] // hmm, maybe pipes have a tiny buffer
#[test]

View File

@ -21,24 +21,21 @@
//! play. The only dependencies of these modules are the normal system libraries
//! that you would find on the respective platform.
use c_str::CString;
use comm::SharedChan;
use libc::c_int;
use libc;
use option::{Option, None, Some};
use os;
use path::Path;
use result::{Result, Ok, Err};
use rt::rtio;
use rt::rtio::{RtioTcpStream, RtioTcpListener, RtioUdpSocket, RtioUnixListener,
RtioPipe, RtioFileStream, RtioProcess, RtioSignal, RtioTTY,
CloseBehavior, RtioTimer};
use io;
use io::IoError;
use io::net::ip::SocketAddr;
use io::process::ProcessConfig;
use io::signal::Signum;
use ai = io::net::addrinfo;
use std::c_str::CString;
use std::comm::SharedChan;
use std::libc::c_int;
use std::libc;
use std::os;
use std::rt::rtio;
use std::rt::rtio::{RtioTcpStream, RtioTcpListener, RtioUdpSocket,
RtioUnixListener, RtioPipe, RtioFileStream, RtioProcess,
RtioSignal, RtioTTY, CloseBehavior, RtioTimer};
use std::io;
use std::io::IoError;
use std::io::net::ip::SocketAddr;
use std::io::process::ProcessConfig;
use std::io::signal::Signum;
use ai = std::io::net::addrinfo;
// Local re-exports
pub use self::file::FileDesc;
@ -223,6 +220,3 @@ impl rtio::IoFactory for IoFactory {
Err(unimpl())
}
}
pub static mut NATIVE_IO_FACTORY: IoFactory = IoFactory;

View File

@ -8,18 +8,17 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use io;
use libc::{pid_t, c_void, c_int};
use libc;
use os;
use prelude::*;
use ptr;
use rt::rtio;
use super::file;
#[cfg(windows)]
use cast;
use std::io;
use std::libc::{pid_t, c_void, c_int};
use std::libc;
use std::os;
use std::ptr;
use std::rt::rtio;
use p = std::io::process;
use p = io::process;
#[cfg(windows)] use std::cast;
use super::file;
/**
* A value representing a child process.
@ -179,22 +178,22 @@ fn spawn_process_os(prog: &str, args: &[~str],
env: Option<~[(~str, ~str)]>,
dir: Option<&Path>,
in_fd: c_int, out_fd: c_int, err_fd: c_int) -> SpawnProcessResult {
use libc::types::os::arch::extra::{DWORD, HANDLE, STARTUPINFO};
use libc::consts::os::extra::{
use std::libc::types::os::arch::extra::{DWORD, HANDLE, STARTUPINFO};
use std::libc::consts::os::extra::{
TRUE, FALSE,
STARTF_USESTDHANDLES,
INVALID_HANDLE_VALUE,
DUPLICATE_SAME_ACCESS
};
use libc::funcs::extra::kernel32::{
use std::libc::funcs::extra::kernel32::{
GetCurrentProcess,
DuplicateHandle,
CloseHandle,
CreateProcessA
};
use libc::funcs::extra::msvcrt::get_osfhandle;
use std::libc::funcs::extra::msvcrt::get_osfhandle;
use mem;
use std::mem;
unsafe {
@ -256,10 +255,10 @@ fn spawn_process_os(prog: &str, args: &[~str],
fail!("failure in CreateProcess: {}", *msg);
}
// We close the thread handle because we don't care about keeping the
// We close the thread handle because std::we don't care about keeping the
// thread id valid, and we aren't keeping the thread handle around to be
// able to close it later. We don't close the process handle however
// because we want the process id to stay valid at least until the
// because std::we want the process id to stay valid at least until the
// calling code closes the process handle.
CloseHandle(pi.hThread);
@ -362,8 +361,8 @@ fn spawn_process_os(prog: &str, args: &[~str],
env: Option<~[(~str, ~str)]>,
dir: Option<&Path>,
in_fd: c_int, out_fd: c_int, err_fd: c_int) -> SpawnProcessResult {
use libc::funcs::posix88::unistd::{fork, dup2, close, chdir, execvp};
use libc::funcs::bsd44::getdtablesize;
use std::libc::funcs::posix88::unistd::{fork, dup2, close, chdir, execvp};
use std::libc::funcs::bsd44::getdtablesize;
mod rustrt {
extern {
@ -433,7 +432,7 @@ fn spawn_process_os(prog: &str, args: &[~str],
#[cfg(unix)]
fn with_argv<T>(prog: &str, args: &[~str], cb: |**libc::c_char| -> T) -> T {
use vec;
use std::vec;
// We can't directly convert `str`s into `*char`s, as someone needs to hold
// a reference to the intermediary byte buffers. So first build an array to
@ -459,7 +458,7 @@ fn with_argv<T>(prog: &str, args: &[~str], cb: |**libc::c_char| -> T) -> T {
#[cfg(unix)]
fn with_envp<T>(env: Option<~[(~str, ~str)]>, cb: |*c_void| -> T) -> T {
use vec;
use std::vec;
// On posixy systems we can pass a char** for envp, which is a
// null-terminated array of "k=v\n" strings. Like `with_argv`, we have to
@ -540,8 +539,8 @@ fn waitpid(pid: pid_t) -> int {
#[cfg(windows)]
fn waitpid_os(pid: pid_t) -> int {
use libc::types::os::arch::extra::DWORD;
use libc::consts::os::extra::{
use std::libc::types::os::arch::extra::DWORD;
use std::libc::consts::os::extra::{
SYNCHRONIZE,
PROCESS_QUERY_INFORMATION,
FALSE,
@ -549,7 +548,7 @@ fn waitpid(pid: pid_t) -> int {
INFINITE,
WAIT_FAILED
};
use libc::funcs::extra::kernel32::{
use std::libc::funcs::extra::kernel32::{
OpenProcess,
GetExitCodeProcess,
CloseHandle,
@ -585,7 +584,7 @@ fn waitpid(pid: pid_t) -> int {
#[cfg(unix)]
fn waitpid_os(pid: pid_t) -> int {
use libc::funcs::posix01::wait::*;
use std::libc::funcs::posix01::wait;
#[cfg(target_os = "linux")]
#[cfg(target_os = "android")]
@ -612,7 +611,7 @@ fn waitpid(pid: pid_t) -> int {
}
let mut status = 0 as c_int;
if unsafe { waitpid(pid, &mut status, 0) } == -1 {
if unsafe { wait::waitpid(pid, &mut status, 0) } == -1 {
fail!("failure in waitpid: {}", os::last_os_error());
}

94
src/libnative/lib.rs Normal file
View File

@ -0,0 +1,94 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The native runtime crate
//!
//! This crate contains an implementation of 1:1 scheduling for a "native"
//! runtime. In addition, all I/O provided by this crate is the thread blocking
//! version of I/O.
#[pkgid = "native#0.9-pre"];
#[crate_id = "native#0.9-pre"];
#[license = "MIT/ASL2"];
#[crate_type = "rlib"];
#[crate_type = "dylib"];
// Allow check-stage0-native for now
#[cfg(stage0, test)] extern mod green;
// NB this crate explicitly does *not* allow glob imports, please seriously
// consider whether they're needed before adding that feature here (the
// answer is that you don't need them)
use std::os;
use std::rt::local::Local;
use std::rt::task::Task;
use std::rt;
pub mod io;
pub mod task;
// XXX: this should not exist here
#[cfg(stage0)]
#[lang = "start"]
pub fn lang_start(main: *u8, argc: int, argv: **u8) -> int {
use std::cast;
use std::task;
do start(argc, argv) {
// Instead of invoking main directly on this thread, invoke it on
// another spawned thread that we are guaranteed to know the size of the
// stack of. Currently, we do not have a method of figuring out the size
// of the main thread's stack, so for stack overflow detection to work
// we must spawn the task in a subtask which we know the stack size of.
let main: extern "Rust" fn() = unsafe { cast::transmute(main) };
let mut task = task::task();
task.name("<main>");
match do task.try { main() } {
Ok(()) => { os::set_exit_status(0); }
Err(..) => { os::set_exit_status(rt::DEFAULT_ERROR_CODE); }
}
}
}
/// Executes the given procedure after initializing the runtime with the given
/// argc/argv.
///
/// This procedure is guaranteed to run on the thread calling this function, but
/// the stack bounds for this rust task will *not* be set. Care must be taken
/// for this function to not overflow its stack.
///
/// This function will only return once *all* native threads in the system have
/// exited.
pub fn start(argc: int, argv: **u8, main: proc()) -> int {
rt::init(argc, argv);
let mut exit_code = None;
let mut main = Some(main);
task::new().run(|| {
exit_code = Some(run(main.take_unwrap()));
});
unsafe { rt::cleanup(); }
return exit_code.unwrap();
}
/// Executes a procedure on the current thread in a Rust task context.
///
/// This function has all of the same details as `start` except for a different
/// number of arguments.
pub fn run(main: proc()) -> int {
// Run the main procedure and then wait for everything to finish
main();
unsafe {
let mut task = Local::borrow(None::<Task>);
task.get().wait_for_other_tasks();
}
os::get_exit_status()
}

330
src/libnative/task.rs Normal file
View File

@ -0,0 +1,330 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Tasks implemented on top of OS threads
//!
//! This module contains the implementation of the 1:1 threading module required
//! by rust tasks. This implements the necessary API traits laid out by std::rt
//! in order to spawn new tasks and deschedule the current task.
use std::cast;
use std::rt::env;
use std::rt::local::Local;
use std::rt::rtio;
use std::rt::task::{Task, BlockedTask};
use std::rt::thread::Thread;
use std::rt;
use std::task::TaskOpts;
use std::unstable::mutex::Mutex;
use std::unstable::stack;
use io;
use task;
/// Creates a new Task which is ready to execute as a 1:1 task.
pub fn new() -> ~Task {
let mut task = ~Task::new();
task.put_runtime(~Ops {
lock: unsafe { Mutex::new() },
awoken: false,
} as ~rt::Runtime);
return task;
}
/// Spawns a function with the default configuration
pub fn spawn(f: proc()) {
spawn_opts(TaskOpts::new(), f)
}
/// Spawns a new task given the configuration options and a procedure to run
/// inside the task.
pub fn spawn_opts(opts: TaskOpts, f: proc()) {
let TaskOpts {
watched: _watched,
notify_chan, name, stack_size
} = opts;
let mut task = new();
task.name = name;
match notify_chan {
Some(chan) => {
let on_exit = proc(task_result) { chan.send(task_result) };
task.death.on_exit = Some(on_exit);
}
None => {}
}
let stack = stack_size.unwrap_or(env::min_stack());
let task = task;
// Spawning a new OS thread guarantees that __morestack will never get
// triggered, but we must manually set up the actual stack bounds once this
// function starts executing. This raises the lower limit by a bit because
// by the time that this function is executing we've already consumed at
// least a little bit of stack (we don't know the exact byte address at
// which our stack started).
Thread::spawn_stack(stack, proc() {
let something_around_the_top_of_the_stack = 1;
let addr = &something_around_the_top_of_the_stack as *int;
unsafe {
let my_stack = addr as uint;
stack::record_stack_bounds(my_stack - stack + 1024, my_stack);
}
let mut f = Some(f);
task.run(|| { f.take_unwrap()() });
})
}
// This structure is the glue between channels and the 1:1 scheduling mode. This
// structure is allocated once per task.
struct Ops {
lock: Mutex, // native synchronization
awoken: bool, // used to prevent spurious wakeups
}
impl rt::Runtime for Ops {
fn yield_now(~self, mut cur_task: ~Task) {
// put the task back in TLS and then invoke the OS thread yield
cur_task.put_runtime(self as ~rt::Runtime);
Local::put(cur_task);
Thread::yield_now();
}
fn maybe_yield(~self, mut cur_task: ~Task) {
// just put the task back in TLS, on OS threads we never need to
// opportunistically yield b/c the OS will do that for us (preemption)
cur_task.put_runtime(self as ~rt::Runtime);
Local::put(cur_task);
}
fn wrap(~self) -> ~Any {
self as ~Any
}
// This function gets a little interesting. There are a few safety and
// ownership violations going on here, but this is all done in the name of
// shared state. Additionally, all of the violations are protected with a
// mutex, so in theory there are no races.
//
// The first thing we need to do is to get a pointer to the task's internal
// mutex. This address will not be changing (because the task is allocated
// on the heap). We must have this handle separately because the task will
// have its ownership transferred to the given closure. We're guaranteed,
// however, that this memory will remain valid because *this* is the current
// task's execution thread.
//
// The next weird part is where ownership of the task actually goes. We
// relinquish it to the `f` blocking function, but upon returning this
// function needs to replace the task back in TLS. There is no communication
// from the wakeup thread back to this thread about the task pointer, and
// there's really no need to. In order to get around this, we cast the task
// to a `uint` which is then used at the end of this function to cast back
// to a `~Task` object. Naturally, this looks like it violates ownership
// semantics in that there may be two `~Task` objects.
//
// The fun part is that the wakeup half of this implementation knows to
// "forget" the task on the other end. This means that the awakening half of
// things silently relinquishes ownership back to this thread, but not in a
// way that the compiler can understand. The task's memory is always valid
// for both tasks because these operations are all done inside of a mutex.
//
// You'll also find that if blocking fails (the `f` function hands the
// BlockedTask back to us), we will `cast::forget` the handles. The
// reasoning for this is the same logic as above in that the task silently
// transfers ownership via the `uint`, not through normal compiler
// semantics.
//
// On a mildly unrelated note, it should also be pointed out that OS
// condition variables are susceptible to spurious wakeups, which we need to
// be ready for. In order to accomodate for this fact, we have an extra
// `awoken` field which indicates whether we were actually woken up via some
// invocation of `reawaken`. This flag is only ever accessed inside the
// lock, so there's no need to make it atomic.
fn deschedule(mut ~self, times: uint, mut cur_task: ~Task,
f: |BlockedTask| -> Result<(), BlockedTask>) {
let me = &mut *self as *mut Ops;
cur_task.put_runtime(self as ~rt::Runtime);
unsafe {
let cur_task_dupe = *cast::transmute::<&~Task, &uint>(&cur_task);
let task = BlockedTask::block(cur_task);
if times == 1 {
(*me).lock.lock();
(*me).awoken = false;
match f(task) {
Ok(()) => {
while !(*me).awoken {
(*me).lock.wait();
}
}
Err(task) => { cast::forget(task.wake()); }
}
(*me).lock.unlock();
} else {
let mut iter = task.make_selectable(times);
(*me).lock.lock();
(*me).awoken = false;
let success = iter.all(|task| {
match f(task) {
Ok(()) => true,
Err(task) => {
cast::forget(task.wake());
false
}
}
});
while success && !(*me).awoken {
(*me).lock.wait();
}
(*me).lock.unlock();
}
// re-acquire ownership of the task
cur_task = cast::transmute::<uint, ~Task>(cur_task_dupe);
}
// put the task back in TLS, and everything is as it once was.
Local::put(cur_task);
}
// See the comments on `deschedule` for why the task is forgotten here, and
// why it's valid to do so.
fn reawaken(mut ~self, mut to_wake: ~Task, _can_resched: bool) {
unsafe {
let me = &mut *self as *mut Ops;
to_wake.put_runtime(self as ~rt::Runtime);
cast::forget(to_wake);
(*me).lock.lock();
(*me).awoken = true;
(*me).lock.signal();
(*me).lock.unlock();
}
}
fn spawn_sibling(~self, mut cur_task: ~Task, opts: TaskOpts, f: proc()) {
cur_task.put_runtime(self as ~rt::Runtime);
Local::put(cur_task);
task::spawn_opts(opts, f);
}
fn local_io<'a>(&'a mut self) -> Option<rtio::LocalIo<'a>> {
static mut io: io::IoFactory = io::IoFactory;
// Unsafety is from accessing `io`, which is guaranteed to be safe
// because you can't do anything usable with this statically initialized
// unit struct.
Some(unsafe { rtio::LocalIo::new(&mut io as &mut rtio::IoFactory) })
}
}
impl Drop for Ops {
fn drop(&mut self) {
unsafe { self.lock.destroy() }
}
}
#[cfg(test)]
mod tests {
use std::rt::Runtime;
use std::rt::local::Local;
use std::rt::task::Task;
use std::task;
use std::task::TaskOpts;
use super::{spawn, spawn_opts, Ops};
#[test]
fn smoke() {
let (p, c) = Chan::new();
do spawn {
c.send(());
}
p.recv();
}
#[test]
fn smoke_fail() {
let (p, c) = Chan::<()>::new();
do spawn {
let _c = c;
fail!()
}
assert_eq!(p.recv_opt(), None);
}
#[test]
fn smoke_opts() {
let mut opts = TaskOpts::new();
opts.name = Some(SendStrStatic("test"));
opts.stack_size = Some(20 * 4096);
let (p, c) = Chan::new();
opts.notify_chan = Some(c);
spawn_opts(opts, proc() {});
assert!(p.recv().is_ok());
}
#[test]
fn smoke_opts_fail() {
let mut opts = TaskOpts::new();
let (p, c) = Chan::new();
opts.notify_chan = Some(c);
spawn_opts(opts, proc() { fail!() });
assert!(p.recv().is_err());
}
#[test]
fn yield_test() {
let (p, c) = Chan::new();
do spawn {
10.times(task::deschedule);
c.send(());
}
p.recv();
}
#[test]
fn spawn_children() {
let (p, c) = Chan::new();
do spawn {
let (p, c2) = Chan::new();
do spawn {
let (p, c3) = Chan::new();
do spawn {
c3.send(());
}
p.recv();
c2.send(());
}
p.recv();
c.send(());
}
p.recv();
}
#[test]
fn spawn_inherits() {
let (p, c) = Chan::new();
do spawn {
let c = c;
do spawn {
let mut task: ~Task = Local::take();
match task.maybe_take_runtime::<Ops>() {
Some(ops) => {
task.put_runtime(ops as ~Runtime);
}
None => fail!(),
}
Local::put(task);
c.send(());
}
}
p.recv();
}
}

View File

@ -333,6 +333,10 @@ pub mod write {
}
unsafe fn configure_llvm(sess: Session) {
use std::unstable::mutex::{MUTEX_INIT, Mutex};
static mut LOCK: Mutex = MUTEX_INIT;
static mut CONFIGURED: bool = false;
// Copy what clan does by turning on loop vectorization at O2 and
// slp vectorization at O3
let vectorize_loop = !sess.no_vectorize_loops() &&
@ -360,7 +364,13 @@ pub mod write {
add(*arg);
}
llvm::LLVMRustSetLLVMOptions(llvm_args.len() as c_int, llvm_args.as_ptr());
LOCK.lock();
if !CONFIGURED {
llvm::LLVMRustSetLLVMOptions(llvm_args.len() as c_int,
llvm_args.as_ptr());
CONFIGURED = true;
}
LOCK.unlock();
}
unsafe fn populate_llvm_passes(fpm: lib::llvm::PassManagerRef,

View File

@ -70,6 +70,15 @@ impl fold::ast_fold for StandardLibraryInjector {
}];
if use_uv(&crate) && !*self.sess.building_library {
vis.push(ast::view_item {
node: ast::view_item_extern_mod(self.sess.ident_of("green"),
None,
~[vers_item],
ast::DUMMY_NODE_ID),
attrs: ~[],
vis: ast::private,
span: dummy_sp()
});
vis.push(ast::view_item {
node: ast::view_item_extern_mod(self.sess.ident_of("rustuv"),
None,

View File

@ -146,7 +146,7 @@ use std::hashmap::HashMap;
use std::hashmap::HashSet;
use std::libc::{c_uint, c_ulonglong, c_longlong};
use std::ptr;
use std::unstable::atomics;
use std::sync::atomics;
use std::vec;
use syntax::codemap::{Span, Pos};
use syntax::{ast, codemap, ast_util, ast_map, opt_vec};

View File

@ -390,7 +390,7 @@ pub fn mk_output_path(what: OutputType, where: Target,
Bench => "bench",
_ => ""
},
os::EXE_SUFFIX))
os::consts::EXE_SUFFIX))
};
if !output_path.is_absolute() {
output_path = os::getcwd().join(&output_path);

View File

@ -487,8 +487,9 @@ fn lib_output_file_name(workspace: &Path, short_name: &str) -> Path {
}
fn output_file_name(workspace: &Path, short_name: ~str) -> Path {
target_build_dir(workspace).join(short_name.as_slice()).join(format!("{}{}", short_name,
os::EXE_SUFFIX))
target_build_dir(workspace).join(short_name.as_slice())
.join(format!("{}{}", short_name,
os::consts::EXE_SUFFIX))
}
#[cfg(target_os = "linux")]
@ -1353,7 +1354,7 @@ fn test_import_rustpkg() {
command_line_test([~"build", ~"foo"], workspace);
debug!("workspace = {}", workspace.display());
assert!(target_build_dir(workspace).join("foo").join(format!("pkg{}",
os::EXE_SUFFIX)).exists());
os::consts::EXE_SUFFIX)).exists());
}
#[test]
@ -1366,7 +1367,7 @@ fn test_macro_pkg_script() {
command_line_test([~"build", ~"foo"], workspace);
debug!("workspace = {}", workspace.display());
assert!(target_build_dir(workspace).join("foo").join(format!("pkg{}",
os::EXE_SUFFIX)).exists());
os::consts::EXE_SUFFIX)).exists());
}
#[test]

View File

@ -11,12 +11,10 @@
use ai = std::io::net::addrinfo;
use std::libc::c_int;
use std::ptr::null;
use std::rt::BlockedTask;
use std::rt::local::Local;
use std::rt::sched::Scheduler;
use std::rt::task::BlockedTask;
use net;
use super::{Loop, UvError, Request, wait_until_woken_after};
use super::{Loop, UvError, Request, wait_until_woken_after, wakeup};
use uvll;
struct Addrinfo {
@ -108,8 +106,7 @@ impl GetAddrInfoRequest {
cx.status = status;
cx.addrinfo = Some(Addrinfo { handle: res });
let sched: ~Scheduler = Local::take();
sched.resume_blocked_task_immediately(cx.slot.take_unwrap());
wakeup(&mut cx.slot);
}
}
}
@ -188,12 +185,13 @@ pub fn accum_addrinfo(addr: &Addrinfo) -> ~[ai::Info] {
#[cfg(test, not(target_os="android"))]
mod test {
use std::io::net::ip::{SocketAddr, Ipv4Addr};
use super::*;
use super::super::local_loop;
use super::GetAddrInfoRequest;
#[test]
fn getaddrinfo_test() {
match GetAddrInfoRequest::run(local_loop(), Some("localhost"), None, None) {
let loop_ = &mut local_loop().loop_;
match GetAddrInfoRequest::run(loop_, Some("localhost"), None, None) {
Ok(infos) => {
let mut found_local = false;
let local_addr = &SocketAddr {
@ -211,9 +209,10 @@ mod test {
#[test]
fn issue_10663() {
let loop_ = &mut local_loop().loop_;
// Something should happen here, but this certainly shouldn't cause
// everything to die. The actual outcome we don't care too much about.
GetAddrInfoRequest::run(local_loop(), Some("irc.n0v4.com"), None,
GetAddrInfoRequest::run(loop_, Some("irc.n0v4.com"), None,
None);
}
}

View File

@ -127,16 +127,15 @@ impl Drop for AsyncWatcher {
mod test_remote {
use std::rt::rtio::Callback;
use std::rt::thread::Thread;
use std::rt::tube::Tube;
use super::*;
use super::AsyncWatcher;
use super::super::local_loop;
// Make sure that we can fire watchers in remote threads and that they
// actually trigger what they say they will.
#[test]
fn smoke_test() {
struct MyCallback(Option<Tube<int>>);
struct MyCallback(Option<Chan<int>>);
impl Callback for MyCallback {
fn call(&mut self) {
// this can get called more than once, but we only want to send
@ -147,16 +146,17 @@ mod test_remote {
}
}
let mut tube = Tube::new();
let cb = ~MyCallback(Some(tube.clone()));
let watcher = AsyncWatcher::new(local_loop(), cb as ~Callback);
let (port, chan) = Chan::new();
let cb = ~MyCallback(Some(chan));
let watcher = AsyncWatcher::new(&mut local_loop().loop_,
cb as ~Callback);
let thread = do Thread::start {
let mut watcher = watcher;
watcher.fire();
};
assert_eq!(tube.recv(), 1);
assert_eq!(port.recv(), 1);
thread.join();
}
}

View File

@ -14,15 +14,14 @@ use std::cast::transmute;
use std::cast;
use std::libc::{c_int, c_char, c_void, size_t};
use std::libc;
use std::rt::BlockedTask;
use std::rt::task::BlockedTask;
use std::io::{FileStat, IoError};
use std::io;
use std::rt::local::Local;
use std::rt::rtio;
use std::rt::sched::{Scheduler, SchedHandle};
use super::{Loop, UvError, uv_error_to_io_error, wait_until_woken_after};
use uvio::HomingIO;
use homing::{HomingIO, HomeHandle};
use super::{Loop, UvError, uv_error_to_io_error, wait_until_woken_after, wakeup};
use uvio::UvIoFactory;
use uvll;
pub struct FsRequest {
@ -34,19 +33,19 @@ pub struct FileWatcher {
priv loop_: Loop,
priv fd: c_int,
priv close: rtio::CloseBehavior,
priv home: SchedHandle,
priv home: HomeHandle,
}
impl FsRequest {
pub fn open(loop_: &Loop, path: &CString, flags: int, mode: int)
pub fn open(io: &mut UvIoFactory, path: &CString, flags: int, mode: int)
-> Result<FileWatcher, UvError>
{
execute(|req, cb| unsafe {
uvll::uv_fs_open(loop_.handle,
uvll::uv_fs_open(io.uv_loop(),
req, path.with_ref(|p| p), flags as c_int,
mode as c_int, cb)
}).map(|req|
FileWatcher::new(*loop_, req.get_result() as c_int,
FileWatcher::new(io, req.get_result() as c_int,
rtio::CloseSynchronously)
)
}
@ -320,8 +319,7 @@ fn execute(f: |*uvll::uv_fs_t, uvll::uv_fs_cb| -> c_int)
let slot: &mut Option<BlockedTask> = unsafe {
cast::transmute(uvll::get_data_for_req(req))
};
let sched: ~Scheduler = Local::take();
sched.resume_blocked_task_immediately(slot.take_unwrap());
wakeup(slot);
}
}
@ -331,16 +329,17 @@ fn execute_nop(f: |*uvll::uv_fs_t, uvll::uv_fs_cb| -> c_int)
}
impl HomingIO for FileWatcher {
fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home }
fn home<'r>(&'r mut self) -> &'r mut HomeHandle { &mut self.home }
}
impl FileWatcher {
pub fn new(loop_: Loop, fd: c_int, close: rtio::CloseBehavior) -> FileWatcher {
pub fn new(io: &mut UvIoFactory, fd: c_int,
close: rtio::CloseBehavior) -> FileWatcher {
FileWatcher {
loop_: loop_,
loop_: Loop::wrap(io.uv_loop()),
fd: fd,
close: close,
home: get_handle_to_current_scheduler!()
home: io.make_handle(),
}
}
@ -448,8 +447,11 @@ mod test {
use std::io;
use std::str;
use std::vec;
use super::*;
use l = super::super::local_loop;
use super::FsRequest;
use super::super::Loop;
use super::super::local_loop;
fn l() -> &mut Loop { &mut local_loop().loop_ }
#[test]
fn file_test_full_simple_sync() {
@ -460,7 +462,7 @@ mod test {
{
// open/create
let result = FsRequest::open(l(), &path_str.to_c_str(),
let result = FsRequest::open(local_loop(), &path_str.to_c_str(),
create_flags as int, mode as int);
assert!(result.is_ok());
let result = result.unwrap();
@ -473,7 +475,7 @@ mod test {
{
// re-open
let result = FsRequest::open(l(), &path_str.to_c_str(),
let result = FsRequest::open(local_loop(), &path_str.to_c_str(),
read_flags as int, 0);
assert!(result.is_ok());
let result = result.unwrap();
@ -500,7 +502,7 @@ mod test {
let create_flags = (O_RDWR | O_CREAT) as int;
let mode = (S_IWUSR | S_IRUSR) as int;
let result = FsRequest::open(l(), path, create_flags, mode);
let result = FsRequest::open(local_loop(), path, create_flags, mode);
assert!(result.is_ok());
let file = result.unwrap();

212
src/librustuv/homing.rs Normal file
View File

@ -0,0 +1,212 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Homing I/O implementation
//!
//! In libuv, whenever a handle is created on an I/O loop it is illegal to use
//! that handle outside of that I/O loop. We use libuv I/O with our green
//! scheduler, and each green scheduler corresponds to a different I/O loop on a
//! different OS thread. Green tasks are also free to roam among schedulers,
//! which implies that it is possible to create an I/O handle on one event loop
//! and then attempt to use it on another.
//!
//! In order to solve this problem, this module implements the notion of a
//! "homing operation" which will transplant a task from its currently running
//! scheduler back onto the original I/O loop. This is accomplished entirely at
//! the librustuv layer with very little cooperation from the scheduler (which
//! we don't even know exists technically).
//!
//! These homing operations are completed by first realizing that we're on the
//! wrong I/O loop, then descheduling ourselves, sending ourselves to the
//! correct I/O loop, and then waking up the I/O loop in order to process its
//! local queue of tasks which need to run.
//!
//! This enqueueing is done with a concurrent queue from libstd, and the
//! signalling is achieved with an async handle.
#[allow(dead_code)];
use std::cast;
use std::rt::local::Local;
use std::rt::rtio::LocalIo;
use std::rt::task::{Task, BlockedTask};
use ForbidUnwind;
use queue::{Queue, QueuePool};
/// A handle to a remote libuv event loop. This handle will keep the event loop
/// alive while active in order to ensure that a homing operation can always be
/// completed.
///
/// Handles are clone-able in order to derive new handles from existing handles
/// (very useful for when accepting a socket from a server).
pub struct HomeHandle {
priv queue: Queue,
priv id: uint,
}
impl HomeHandle {
pub fn new(id: uint, pool: &mut QueuePool) -> HomeHandle {
HomeHandle { queue: pool.queue(), id: id }
}
fn send(&mut self, task: BlockedTask) {
self.queue.push(task);
}
}
impl Clone for HomeHandle {
fn clone(&self) -> HomeHandle {
HomeHandle {
queue: self.queue.clone(),
id: self.id,
}
}
}
pub fn local_id() -> uint {
let mut io = match LocalIo::borrow() {
Some(io) => io, None => return 0,
};
let io = io.get();
unsafe {
let (_vtable, ptr): (uint, uint) = cast::transmute(io);
return ptr;
}
}
pub trait HomingIO {
fn home<'r>(&'r mut self) -> &'r mut HomeHandle;
/// This function will move tasks to run on their home I/O scheduler. Note
/// that this function does *not* pin the task to the I/O scheduler, but
/// rather it simply moves it to running on the I/O scheduler.
fn go_to_IO_home(&mut self) -> uint {
let _f = ForbidUnwind::new("going home");
let cur_loop_id = local_id();
let destination = self.home().id;
// Try at all costs to avoid the homing operation because it is quite
// expensive. Hence, we only deschedule/send if we're not on the correct
// event loop. If we're already on the home event loop, then we're good
// to go (remember we have no preemption, so we're guaranteed to stay on
// this event loop as long as we avoid the scheduler).
if cur_loop_id != destination {
let cur_task: ~Task = Local::take();
cur_task.deschedule(1, |task| {
self.home().send(task);
Ok(())
});
// Once we wake up, assert that we're in the right location
assert_eq!(local_id(), destination);
}
return destination;
}
/// Fires a single homing missile, returning another missile targeted back
/// at the original home of this task. In other words, this function will
/// move the local task to its I/O scheduler and then return an RAII wrapper
/// which will return the task home.
fn fire_homing_missile(&mut self) -> HomingMissile {
HomingMissile { io_home: self.go_to_IO_home() }
}
}
/// After a homing operation has been completed, this will return the current
/// task back to its appropriate home (if applicable). The field is used to
/// assert that we are where we think we are.
struct HomingMissile {
priv io_home: uint,
}
impl HomingMissile {
/// Check at runtime that the task has *not* transplanted itself to a
/// different I/O loop while executing.
pub fn check(&self, msg: &'static str) {
assert!(local_id() == self.io_home, "{}", msg);
}
}
impl Drop for HomingMissile {
fn drop(&mut self) {
let _f = ForbidUnwind::new("leaving home");
// It would truly be a sad day if we had moved off the home I/O
// scheduler while we were doing I/O.
self.check("task moved away from the home scheduler");
}
}
#[cfg(test)]
mod test {
use green::sched;
use green::{SchedPool, PoolConfig};
use std::rt::rtio::RtioUdpSocket;
use std::io::test::next_test_ip4;
use std::task::TaskOpts;
use net::UdpWatcher;
use super::super::local_loop;
// On one thread, create a udp socket. Then send that socket to another
// thread and destroy the socket on the remote thread. This should make sure
// that homing kicks in for the socket to go back home to the original
// thread, close itself, and then come back to the last thread.
#[test]
fn test_homing_closes_correctly() {
let (port, chan) = Chan::new();
let mut pool = SchedPool::new(PoolConfig {
threads: 1,
event_loop_factory: None,
});
do pool.spawn(TaskOpts::new()) {
let listener = UdpWatcher::bind(local_loop(), next_test_ip4());
chan.send(listener.unwrap());
}
let task = do pool.task(TaskOpts::new()) {
port.recv();
};
pool.spawn_sched().send(sched::TaskFromFriend(task));
pool.shutdown();
}
#[test]
fn test_homing_read() {
let (port, chan) = Chan::new();
let mut pool = SchedPool::new(PoolConfig {
threads: 1,
event_loop_factory: None,
});
do pool.spawn(TaskOpts::new()) {
let addr1 = next_test_ip4();
let addr2 = next_test_ip4();
let listener = UdpWatcher::bind(local_loop(), addr2);
chan.send((listener.unwrap(), addr1));
let mut listener = UdpWatcher::bind(local_loop(), addr1).unwrap();
listener.sendto([1, 2, 3, 4], addr2);
}
let task = do pool.task(TaskOpts::new()) {
let (mut watcher, addr) = port.recv();
let mut buf = [0, ..10];
assert_eq!(watcher.recvfrom(buf).unwrap(), (4, addr));
};
pool.spawn_sched().send(sched::TaskFromFriend(task));
pool.shutdown();
}
}

View File

@ -97,72 +97,102 @@ impl Drop for IdleWatcher {
#[cfg(test)]
mod test {
use super::*;
use std::rt::tube::Tube;
use std::cast;
use std::cell::RefCell;
use std::rc::Rc;
use std::rt::rtio::{Callback, PausableIdleCallback};
use std::rt::task::{BlockedTask, Task};
use std::rt::local::Local;
use super::IdleWatcher;
use super::super::local_loop;
struct MyCallback(Tube<int>, int);
type Chan = Rc<RefCell<(Option<BlockedTask>, uint)>>;
struct MyCallback(Rc<RefCell<(Option<BlockedTask>, uint)>>, uint);
impl Callback for MyCallback {
fn call(&mut self) {
match *self {
MyCallback(ref mut tube, val) => tube.send(val)
}
let task = match *self {
MyCallback(ref rc, n) => {
let mut slot = rc.borrow().borrow_mut();
match *slot.get() {
(ref mut task, ref mut val) => {
*val = n;
task.take_unwrap()
}
}
}
};
task.wake().map(|t| t.reawaken(true));
}
}
fn mk(v: uint) -> (~IdleWatcher, Chan) {
let rc = Rc::from_send(RefCell::new((None, 0)));
let cb = ~MyCallback(rc.clone(), v);
let cb = cb as ~Callback:;
let cb = unsafe { cast::transmute(cb) };
(IdleWatcher::new(&mut local_loop().loop_, cb), rc)
}
fn sleep(chan: &Chan) -> uint {
let task: ~Task = Local::take();
task.deschedule(1, |task| {
let mut slot = chan.borrow().borrow_mut();
match *slot.get() {
(ref mut slot, _) => {
assert!(slot.is_none());
*slot = Some(task);
}
}
Ok(())
});
let slot = chan.borrow().borrow();
match *slot.get() { (_, n) => n }
}
#[test]
fn not_used() {
let cb = ~MyCallback(Tube::new(), 1);
let _idle = IdleWatcher::new(local_loop(), cb as ~Callback);
let (_idle, _chan) = mk(1);
}
#[test]
fn smoke_test() {
let mut tube = Tube::new();
let cb = ~MyCallback(tube.clone(), 1);
let mut idle = IdleWatcher::new(local_loop(), cb as ~Callback);
let (mut idle, chan) = mk(1);
idle.resume();
tube.recv();
assert_eq!(sleep(&chan), 1);
}
#[test] #[should_fail]
fn smoke_fail() {
let tube = Tube::new();
let cb = ~MyCallback(tube.clone(), 1);
let mut idle = IdleWatcher::new(local_loop(), cb as ~Callback);
let (mut idle, _chan) = mk(1);
idle.resume();
fail!();
}
#[test]
fn fun_combinations_of_methods() {
let mut tube = Tube::new();
let cb = ~MyCallback(tube.clone(), 1);
let mut idle = IdleWatcher::new(local_loop(), cb as ~Callback);
let (mut idle, chan) = mk(1);
idle.resume();
tube.recv();
assert_eq!(sleep(&chan), 1);
idle.pause();
idle.resume();
idle.resume();
tube.recv();
assert_eq!(sleep(&chan), 1);
idle.pause();
idle.pause();
idle.resume();
tube.recv();
assert_eq!(sleep(&chan), 1);
}
#[test]
fn pause_pauses() {
let mut tube = Tube::new();
let cb = ~MyCallback(tube.clone(), 1);
let mut idle1 = IdleWatcher::new(local_loop(), cb as ~Callback);
let cb = ~MyCallback(tube.clone(), 2);
let mut idle2 = IdleWatcher::new(local_loop(), cb as ~Callback);
let (mut idle1, chan1) = mk(1);
let (mut idle2, chan2) = mk(2);
idle2.resume();
assert_eq!(tube.recv(), 2);
assert_eq!(sleep(&chan2), 2);
idle2.pause();
idle1.resume();
assert_eq!(tube.recv(), 1);
assert_eq!(sleep(&chan1), 1);
}
}

View File

@ -41,23 +41,23 @@ via `close` and `delete` methods.
#[crate_type = "rlib"];
#[crate_type = "dylib"];
#[feature(macro_rules, globs)];
#[feature(macro_rules)];
#[cfg(test)] extern mod green;
use std::cast::transmute;
use std::cast;
use std::io;
use std::io::IoError;
use std::libc::{c_int, malloc};
use std::ptr::null;
use std::ptr;
use std::rt::BlockedTask;
use std::rt::local::Local;
use std::rt::sched::Scheduler;
use std::rt::task::{BlockedTask, Task};
use std::str::raw::from_c_str;
use std::str;
use std::task;
use std::unstable::finally::Finally;
use std::io::IoError;
pub use self::async::AsyncWatcher;
pub use self::file::{FsRequest, FileWatcher};
pub use self::idle::IdleWatcher;
@ -70,6 +70,9 @@ pub use self::tty::TtyWatcher;
mod macros;
mod queue;
mod homing;
/// The implementation of `rtio` for libuv
pub mod uvio;
@ -144,32 +147,29 @@ pub trait UvHandle<T> {
uvll::free_handle(handle);
if data == ptr::null() { return }
let slot: &mut Option<BlockedTask> = cast::transmute(data);
let sched: ~Scheduler = Local::take();
sched.resume_blocked_task_immediately(slot.take_unwrap());
wakeup(slot);
}
}
}
}
pub struct ForbidSwitch {
msg: &'static str,
sched: uint,
priv msg: &'static str,
priv io: uint,
}
impl ForbidSwitch {
fn new(s: &'static str) -> ForbidSwitch {
let mut sched = Local::borrow(None::<Scheduler>);
ForbidSwitch {
msg: s,
sched: sched.get().sched_id(),
io: homing::local_id(),
}
}
}
impl Drop for ForbidSwitch {
fn drop(&mut self) {
let mut sched = Local::borrow(None::<Scheduler>);
assert!(self.sched == sched.get().sched_id(),
assert!(self.io == homing::local_id(),
"didnt want a scheduler switch: {}",
self.msg);
}
@ -199,14 +199,20 @@ fn wait_until_woken_after(slot: *mut Option<BlockedTask>, f: ||) {
let _f = ForbidUnwind::new("wait_until_woken_after");
unsafe {
assert!((*slot).is_none());
let sched: ~Scheduler = Local::take();
sched.deschedule_running_task_and_then(|_, task| {
f();
let task: ~Task = Local::take();
task.deschedule(1, |task| {
*slot = Some(task);
})
f();
Ok(())
});
}
}
fn wakeup(slot: &mut Option<BlockedTask>) {
assert!(slot.is_some());
slot.take_unwrap().wake().map(|t| t.reawaken(true));
}
pub struct Request {
handle: *uvll::uv_req_t,
priv defused: bool,
@ -325,28 +331,26 @@ fn error_smoke_test() {
pub fn uv_error_to_io_error(uverr: UvError) -> IoError {
unsafe {
// Importing error constants
use uvll::*;
use std::io::*;
// uv error descriptions are static
let c_desc = uvll::uv_strerror(*uverr);
let desc = str::raw::c_str_to_static_slice(c_desc);
let kind = match *uverr {
UNKNOWN => OtherIoError,
OK => OtherIoError,
EOF => EndOfFile,
EACCES => PermissionDenied,
ECONNREFUSED => ConnectionRefused,
ECONNRESET => ConnectionReset,
ENOENT => FileNotFound,
ENOTCONN => NotConnected,
EPIPE => BrokenPipe,
ECONNABORTED => ConnectionAborted,
uvll::UNKNOWN => io::OtherIoError,
uvll::OK => io::OtherIoError,
uvll::EOF => io::EndOfFile,
uvll::EACCES => io::PermissionDenied,
uvll::ECONNREFUSED => io::ConnectionRefused,
uvll::ECONNRESET => io::ConnectionReset,
uvll::ENOTCONN => io::NotConnected,
uvll::ENOENT => io::FileNotFound,
uvll::EPIPE => io::BrokenPipe,
uvll::ECONNABORTED => io::ConnectionAborted,
err => {
uvdebug!("uverr.code {}", err as int);
// XXX: Need to map remaining uv error types
OtherIoError
io::OtherIoError
}
};
@ -387,15 +391,17 @@ pub fn slice_to_uv_buf(v: &[u8]) -> Buf {
uvll::uv_buf_t { base: data, len: v.len() as uvll::uv_buf_len_t }
}
// This function is full of lies!
#[cfg(test)]
fn local_loop() -> &'static mut Loop {
fn local_loop() -> &'static mut uvio::UvIoFactory {
unsafe {
cast::transmute({
let mut sched = Local::borrow(None::<Scheduler>);
let mut task = Local::borrow(None::<Task>);
let mut io = task.get().local_io().unwrap();
let (_vtable, uvio): (uint, &'static mut uvio::UvIoFactory) =
cast::transmute(sched.get().event_loop.io().unwrap());
cast::transmute(io.get());
uvio
}.uv_loop())
})
}
}

View File

@ -27,18 +27,20 @@ macro_rules! uvdebug (
})
)
// get a handle for the current scheduler
macro_rules! get_handle_to_current_scheduler(
() => ({
let mut sched = Local::borrow(None::<Scheduler>);
sched.get().make_handle()
})
)
pub fn dumb_println(args: &fmt::Arguments) {
use std::io::native::file::FileDesc;
use std::io;
use std::libc;
let mut out = FileDesc::new(libc::STDERR_FILENO, false);
fmt::writeln(&mut out as &mut io::Writer, args);
struct Stderr;
impl io::Writer for Stderr {
fn write(&mut self, data: &[u8]) {
unsafe {
libc::write(libc::STDERR_FILENO,
data.as_ptr() as *libc::c_void,
data.len() as libc::size_t);
}
}
}
let mut w = Stderr;
fmt::writeln(&mut w as &mut io::Writer, args);
}

View File

@ -9,24 +9,22 @@
// except according to those terms.
use std::cast;
use std::libc;
use std::libc::{size_t, ssize_t, c_int, c_void, c_uint, c_char};
use std::ptr;
use std::rt::BlockedTask;
use std::io::IoError;
use std::io::net::ip::{Ipv4Addr, Ipv6Addr, SocketAddr, IpAddr};
use std::rt::local::Local;
use std::libc::{size_t, ssize_t, c_int, c_void, c_uint, c_char};
use std::libc;
use std::ptr;
use std::rt::rtio;
use std::rt::sched::{Scheduler, SchedHandle};
use std::rt::tube::Tube;
use std::rt::task::BlockedTask;
use std::str;
use std::vec;
use homing::{HomingIO, HomeHandle};
use stream::StreamWatcher;
use super::{Loop, Request, UvError, Buf, status_to_io_result,
uv_error_to_io_error, UvHandle, slice_to_uv_buf,
wait_until_woken_after};
use uvio::HomingIO;
wait_until_woken_after, wakeup};
use uvio::UvIoFactory;
use uvll;
use uvll::sockaddr;
@ -88,21 +86,19 @@ pub fn sockaddr_to_socket_addr(addr: *sockaddr) -> SocketAddr {
}
}
#[cfg(test)]
#[test]
fn test_ip4_conversion() {
use std::rt;
let ip4 = rt::test::next_test_ip4();
use std::io::net::ip::{SocketAddr, Ipv4Addr};
let ip4 = SocketAddr { ip: Ipv4Addr(127, 0, 0, 1), port: 4824 };
socket_addr_as_sockaddr(ip4, |addr| {
assert_eq!(ip4, sockaddr_to_socket_addr(addr));
})
}
#[cfg(test)]
#[test]
fn test_ip6_conversion() {
use std::rt;
let ip6 = rt::test::next_test_ip6();
use std::io::net::ip::{SocketAddr, Ipv6Addr};
let ip6 = SocketAddr { ip: Ipv6Addr(0, 0, 0, 0, 0, 0, 0, 1), port: 4824 };
socket_addr_as_sockaddr(ip6, |addr| {
assert_eq!(ip6, sockaddr_to_socket_addr(addr));
})
@ -145,42 +141,47 @@ fn socket_name(sk: SocketNameKind, handle: *c_void) -> Result<SocketAddr, IoErro
pub struct TcpWatcher {
handle: *uvll::uv_tcp_t,
stream: StreamWatcher,
home: SchedHandle,
home: HomeHandle,
}
pub struct TcpListener {
home: SchedHandle,
home: HomeHandle,
handle: *uvll::uv_pipe_t,
priv closing_task: Option<BlockedTask>,
priv outgoing: Tube<Result<~rtio::RtioTcpStream, IoError>>,
priv outgoing: Chan<Result<~rtio::RtioTcpStream, IoError>>,
priv incoming: Port<Result<~rtio::RtioTcpStream, IoError>>,
}
pub struct TcpAcceptor {
listener: ~TcpListener,
priv incoming: Tube<Result<~rtio::RtioTcpStream, IoError>>,
}
// TCP watchers (clients/streams)
impl TcpWatcher {
pub fn new(loop_: &Loop) -> TcpWatcher {
pub fn new(io: &mut UvIoFactory) -> TcpWatcher {
let handle = io.make_handle();
TcpWatcher::new_home(&io.loop_, handle)
}
fn new_home(loop_: &Loop, home: HomeHandle) -> TcpWatcher {
let handle = unsafe { uvll::malloc_handle(uvll::UV_TCP) };
assert_eq!(unsafe {
uvll::uv_tcp_init(loop_.handle, handle)
}, 0);
TcpWatcher {
home: get_handle_to_current_scheduler!(),
home: home,
handle: handle,
stream: StreamWatcher::new(handle),
}
}
pub fn connect(loop_: &mut Loop, address: SocketAddr)
pub fn connect(io: &mut UvIoFactory, address: SocketAddr)
-> Result<TcpWatcher, UvError>
{
struct Ctx { status: c_int, task: Option<BlockedTask> }
let tcp = TcpWatcher::new(loop_);
let tcp = TcpWatcher::new(io);
let ret = socket_addr_as_sockaddr(address, |addr| {
let mut req = Request::new(uvll::UV_CONNECT);
let result = unsafe {
@ -213,14 +214,13 @@ impl TcpWatcher {
assert!(status != uvll::ECANCELED);
let cx: &mut Ctx = unsafe { req.get_data() };
cx.status = status;
let scheduler: ~Scheduler = Local::take();
scheduler.resume_blocked_task_immediately(cx.task.take_unwrap());
wakeup(&mut cx.task);
}
}
}
impl HomingIO for TcpWatcher {
fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home }
fn home<'r>(&'r mut self) -> &'r mut HomeHandle { &mut self.home }
}
impl rtio::RtioSocket for TcpWatcher {
@ -290,17 +290,19 @@ impl Drop for TcpWatcher {
// TCP listeners (unbound servers)
impl TcpListener {
pub fn bind(loop_: &mut Loop, address: SocketAddr)
pub fn bind(io: &mut UvIoFactory, address: SocketAddr)
-> Result<~TcpListener, UvError> {
let handle = unsafe { uvll::malloc_handle(uvll::UV_TCP) };
assert_eq!(unsafe {
uvll::uv_tcp_init(loop_.handle, handle)
uvll::uv_tcp_init(io.uv_loop(), handle)
}, 0);
let (port, chan) = Chan::new();
let l = ~TcpListener {
home: get_handle_to_current_scheduler!(),
home: io.make_handle(),
handle: handle,
closing_task: None,
outgoing: Tube::new(),
outgoing: chan,
incoming: port,
};
let res = socket_addr_as_sockaddr(address, |addr| unsafe {
uvll::uv_tcp_bind(l.handle, addr)
@ -313,7 +315,7 @@ impl TcpListener {
}
impl HomingIO for TcpListener {
fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home }
fn home<'r>(&'r mut self) -> &'r mut HomeHandle { &mut self.home }
}
impl UvHandle<uvll::uv_tcp_t> for TcpListener {
@ -330,11 +332,7 @@ impl rtio::RtioSocket for TcpListener {
impl rtio::RtioTcpListener for TcpListener {
fn listen(mut ~self) -> Result<~rtio::RtioTcpAcceptor, IoError> {
// create the acceptor object from ourselves
let incoming = self.outgoing.clone();
let mut acceptor = ~TcpAcceptor {
listener: self,
incoming: incoming,
};
let mut acceptor = ~TcpAcceptor { listener: self };
let _m = acceptor.fire_homing_missile();
// XXX: the 128 backlog should be configurable
@ -347,19 +345,18 @@ impl rtio::RtioTcpListener for TcpListener {
extern fn listen_cb(server: *uvll::uv_stream_t, status: c_int) {
assert!(status != uvll::ECANCELED);
let tcp: &mut TcpListener = unsafe { UvHandle::from_uv_handle(&server) };
let msg = match status {
0 => {
let loop_ = Loop::wrap(unsafe {
uvll::get_loop_for_uv_handle(server)
});
let client = TcpWatcher::new(&loop_);
let client = TcpWatcher::new_home(&loop_, tcp.home().clone());
assert_eq!(unsafe { uvll::uv_accept(server, client.handle) }, 0);
Ok(~client as ~rtio::RtioTcpStream)
}
n => Err(uv_error_to_io_error(UvError(n)))
};
let tcp: &mut TcpListener = unsafe { UvHandle::from_uv_handle(&server) };
tcp.outgoing.send(msg);
}
@ -373,7 +370,7 @@ impl Drop for TcpListener {
// TCP acceptors (bound servers)
impl HomingIO for TcpAcceptor {
fn home<'r>(&'r mut self) -> &'r mut SchedHandle { self.listener.home() }
fn home<'r>(&'r mut self) -> &'r mut HomeHandle { self.listener.home() }
}
impl rtio::RtioSocket for TcpAcceptor {
@ -385,8 +382,7 @@ impl rtio::RtioSocket for TcpAcceptor {
impl rtio::RtioTcpAcceptor for TcpAcceptor {
fn accept(&mut self) -> Result<~rtio::RtioTcpStream, IoError> {
let _m = self.fire_homing_missile();
self.incoming.recv()
self.listener.incoming.recv()
}
fn accept_simultaneously(&mut self) -> Result<(), IoError> {
@ -410,18 +406,18 @@ impl rtio::RtioTcpAcceptor for TcpAcceptor {
pub struct UdpWatcher {
handle: *uvll::uv_udp_t,
home: SchedHandle,
home: HomeHandle,
}
impl UdpWatcher {
pub fn bind(loop_: &Loop, address: SocketAddr)
pub fn bind(io: &mut UvIoFactory, address: SocketAddr)
-> Result<UdpWatcher, UvError> {
let udp = UdpWatcher {
handle: unsafe { uvll::malloc_handle(uvll::UV_UDP) },
home: get_handle_to_current_scheduler!(),
home: io.make_handle(),
};
assert_eq!(unsafe {
uvll::uv_udp_init(loop_.handle, udp.handle)
uvll::uv_udp_init(io.uv_loop(), udp.handle)
}, 0);
let result = socket_addr_as_sockaddr(address, |addr| unsafe {
uvll::uv_udp_bind(udp.handle, addr, 0u32)
@ -438,7 +434,7 @@ impl UvHandle<uvll::uv_udp_t> for UdpWatcher {
}
impl HomingIO for UdpWatcher {
fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home }
fn home<'r>(&'r mut self) -> &'r mut HomeHandle { &mut self.home }
}
impl rtio::RtioSocket for UdpWatcher {
@ -519,9 +515,7 @@ impl rtio::RtioUdpSocket for UdpWatcher {
Some(sockaddr_to_socket_addr(addr))
};
cx.result = Some((nread, addr));
let sched: ~Scheduler = Local::take();
sched.resume_blocked_task_immediately(cx.task.take_unwrap());
wakeup(&mut cx.task);
}
}
@ -556,9 +550,7 @@ impl rtio::RtioUdpSocket for UdpWatcher {
assert!(status != uvll::ECANCELED);
let cx: &mut Ctx = unsafe { req.get_data() };
cx.result = status;
let sched: ~Scheduler = Local::take();
sched.resume_blocked_task_immediately(cx.task.take_unwrap());
wakeup(&mut cx.task);
}
}
@ -640,18 +632,13 @@ impl Drop for UdpWatcher {
}
}
////////////////////////////////////////////////////////////////////////////////
/// UV request support
////////////////////////////////////////////////////////////////////////////////
#[cfg(test)]
mod test {
use std::rt::test::*;
use std::rt::rtio::{RtioTcpStream, RtioTcpListener, RtioTcpAcceptor,
RtioUdpSocket};
use std::task;
use std::io::test::{next_test_ip4, next_test_ip6};
use super::*;
use super::{UdpWatcher, TcpWatcher, TcpListener};
use super::super::local_loop;
#[test]
@ -824,7 +811,6 @@ mod test {
#[test]
fn test_read_read_read() {
use std::rt::rtio::*;
let addr = next_test_ip4();
static MAX: uint = 5000;
let (port, chan) = Chan::new();
@ -843,20 +829,18 @@ mod test {
}
}
do spawn {
port.recv();
let mut stream = TcpWatcher::connect(local_loop(), addr).unwrap();
let mut buf = [0, .. 2048];
let mut total_bytes_read = 0;
while total_bytes_read < MAX {
let nread = stream.read(buf).unwrap();
total_bytes_read += nread;
for i in range(0u, nread) {
assert_eq!(buf[i], 1);
}
port.recv();
let mut stream = TcpWatcher::connect(local_loop(), addr).unwrap();
let mut buf = [0, .. 2048];
let mut total_bytes_read = 0;
while total_bytes_read < MAX {
let nread = stream.read(buf).unwrap();
total_bytes_read += nread;
for i in range(0u, nread) {
assert_eq!(buf[i], 1);
}
uvdebug!("read {} bytes total", total_bytes_read);
}
uvdebug!("read {} bytes total", total_bytes_read);
}
#[test]
@ -922,65 +906,35 @@ mod test {
assert!(total_bytes_sent >= MAX);
}
do spawn {
let l = local_loop();
let mut client_out = UdpWatcher::bind(l, client_out_addr).unwrap();
let mut client_in = UdpWatcher::bind(l, client_in_addr).unwrap();
let (port, chan) = (p2, c1);
port.recv();
chan.send(());
let mut total_bytes_recv = 0;
let mut buf = [0, .. 2048];
while total_bytes_recv < MAX {
// ask for more
assert!(client_out.sendto([1], server_in_addr).is_ok());
// wait for data
let res = client_in.recvfrom(buf);
assert!(res.is_ok());
let (nread, src) = res.unwrap();
assert_eq!(src, server_out_addr);
total_bytes_recv += nread;
for i in range(0u, nread) {
assert_eq!(buf[i], 1);
}
let l = local_loop();
let mut client_out = UdpWatcher::bind(l, client_out_addr).unwrap();
let mut client_in = UdpWatcher::bind(l, client_in_addr).unwrap();
let (port, chan) = (p2, c1);
port.recv();
chan.send(());
let mut total_bytes_recv = 0;
let mut buf = [0, .. 2048];
while total_bytes_recv < MAX {
// ask for more
assert!(client_out.sendto([1], server_in_addr).is_ok());
// wait for data
let res = client_in.recvfrom(buf);
assert!(res.is_ok());
let (nread, src) = res.unwrap();
assert_eq!(src, server_out_addr);
total_bytes_recv += nread;
for i in range(0u, nread) {
assert_eq!(buf[i], 1);
}
// tell the server we're done
assert!(client_out.sendto([0], server_in_addr).is_ok());
}
// tell the server we're done
assert!(client_out.sendto([0], server_in_addr).is_ok());
}
#[test]
fn test_read_and_block() {
let addr = next_test_ip4();
let (port, chan) = Chan::new();
do spawn {
let listener = TcpListener::bind(local_loop(), addr).unwrap();
let mut acceptor = listener.listen().unwrap();
let (port2, chan2) = Chan::new();
chan.send(port2);
let mut stream = acceptor.accept().unwrap();
let mut buf = [0, .. 2048];
let expected = 32;
let mut current = 0;
let mut reads = 0;
while current < expected {
let nread = stream.read(buf).unwrap();
for i in range(0u, nread) {
let val = buf[i] as uint;
assert_eq!(val, current % 8);
current += 1;
}
reads += 1;
chan2.send(());
}
// Make sure we had multiple reads
assert!(reads > 1);
}
let (port, chan) = Chan::<Port<()>>::new();
do spawn {
let port2 = port.recv();
@ -992,13 +946,39 @@ mod test {
stream.write([0, 1, 2, 3, 4, 5, 6, 7]);
port2.recv();
}
let listener = TcpListener::bind(local_loop(), addr).unwrap();
let mut acceptor = listener.listen().unwrap();
let (port2, chan2) = Chan::new();
chan.send(port2);
let mut stream = acceptor.accept().unwrap();
let mut buf = [0, .. 2048];
let expected = 32;
let mut current = 0;
let mut reads = 0;
while current < expected {
let nread = stream.read(buf).unwrap();
for i in range(0u, nread) {
let val = buf[i] as uint;
assert_eq!(val, current % 8);
current += 1;
}
reads += 1;
chan2.try_send(());
}
// Make sure we had multiple reads
assert!(reads > 1);
}
#[test]
fn test_simple_tcp_server_and_client_on_diff_threads() {
let addr = next_test_ip4();
do task::spawn_sched(task::SingleThreaded) {
do spawn {
let listener = TcpListener::bind(local_loop(), addr).unwrap();
let mut acceptor = listener.listen().unwrap();
let mut stream = acceptor.accept().unwrap();
@ -1010,131 +990,11 @@ mod test {
}
}
do task::spawn_sched(task::SingleThreaded) {
let mut stream = TcpWatcher::connect(local_loop(), addr);
while stream.is_err() {
stream = TcpWatcher::connect(local_loop(), addr);
}
stream.unwrap().write([0, 1, 2, 3, 4, 5, 6, 7]);
}
}
// On one thread, create a udp socket. Then send that socket to another
// thread and destroy the socket on the remote thread. This should make sure
// that homing kicks in for the socket to go back home to the original
// thread, close itself, and then come back to the last thread.
#[test]
fn test_homing_closes_correctly() {
let (port, chan) = Chan::new();
do task::spawn_sched(task::SingleThreaded) {
let listener = UdpWatcher::bind(local_loop(), next_test_ip4()).unwrap();
chan.send(listener);
}
do task::spawn_sched(task::SingleThreaded) {
port.recv();
}
}
// This is a bit of a crufty old test, but it has its uses.
#[test]
fn test_simple_homed_udp_io_bind_then_move_task_then_home_and_close() {
use std::cast;
use std::rt::local::Local;
use std::rt::rtio::{EventLoop, IoFactory};
use std::rt::sched::Scheduler;
use std::rt::sched::{Shutdown, TaskFromFriend};
use std::rt::sleeper_list::SleeperList;
use std::rt::task::Task;
use std::rt::thread::Thread;
use std::rt::deque::BufferPool;
use std::task::TaskResult;
use std::unstable::run_in_bare_thread;
use uvio::UvEventLoop;
do run_in_bare_thread {
let sleepers = SleeperList::new();
let mut pool = BufferPool::new();
let (worker1, stealer1) = pool.deque();
let (worker2, stealer2) = pool.deque();
let queues = ~[stealer1, stealer2];
let loop1 = ~UvEventLoop::new() as ~EventLoop;
let mut sched1 = ~Scheduler::new(loop1, worker1, queues.clone(),
sleepers.clone());
let loop2 = ~UvEventLoop::new() as ~EventLoop;
let mut sched2 = ~Scheduler::new(loop2, worker2, queues.clone(),
sleepers.clone());
let handle1 = sched1.make_handle();
let handle2 = sched2.make_handle();
let tasksFriendHandle = sched2.make_handle();
let on_exit: proc(TaskResult) = proc(exit_status) {
let mut handle1 = handle1;
let mut handle2 = handle2;
handle1.send(Shutdown);
handle2.send(Shutdown);
assert!(exit_status.is_ok());
};
unsafe fn local_io() -> &'static mut IoFactory {
let mut sched = Local::borrow(None::<Scheduler>);
let io = sched.get().event_loop.io();
cast::transmute(io.unwrap())
}
let test_function: proc() = proc() {
let io = unsafe { local_io() };
let addr = next_test_ip4();
let maybe_socket = io.udp_bind(addr);
// this socket is bound to this event loop
assert!(maybe_socket.is_ok());
// block self on sched1
let scheduler: ~Scheduler = Local::take();
let mut tasksFriendHandle = Some(tasksFriendHandle);
scheduler.deschedule_running_task_and_then(|_, task| {
// unblock task
task.wake().map(|task| {
// send self to sched2
tasksFriendHandle.take_unwrap()
.send(TaskFromFriend(task));
});
// sched1 should now sleep since it has nothing else to do
})
// sched2 will wake up and get the task as we do nothing else,
// the function ends and the socket goes out of scope sched2
// will start to run the destructor the destructor will first
// block the task, set it's home as sched1, then enqueue it
// sched2 will dequeue the task, see that it has a home, and
// send it to sched1 sched1 will wake up, exec the close
// function on the correct loop, and then we're done
};
let mut main_task = ~Task::new_root(&mut sched1.stack_pool, None,
test_function);
main_task.death.on_exit = Some(on_exit);
let null_task = ~do Task::new_root(&mut sched2.stack_pool, None) {
// nothing
};
let main_task = main_task;
let sched1 = sched1;
let thread1 = do Thread::start {
sched1.bootstrap(main_task);
};
let sched2 = sched2;
let thread2 = do Thread::start {
sched2.bootstrap(null_task);
};
thread1.join();
thread2.join();
let mut stream = TcpWatcher::connect(local_loop(), addr);
while stream.is_err() {
stream = TcpWatcher::connect(local_loop(), addr);
}
stream.unwrap().write([0, 1, 2, 3, 4, 5, 6, 7]);
}
#[should_fail] #[test]
@ -1176,7 +1036,7 @@ mod test {
// force the handle to be created on a different scheduler, failure in
// the original task will force a homing operation back to this
// scheduler.
do task::spawn_sched(task::SingleThreaded) {
do spawn {
let w = UdpWatcher::bind(local_loop(), addr).unwrap();
chan.send(w);
}
@ -1184,67 +1044,4 @@ mod test {
let _w = port.recv();
fail!();
}
#[should_fail]
#[test]
#[ignore(reason = "linked failure")]
fn linked_failure1() {
let (port, chan) = Chan::new();
let addr = next_test_ip4();
do spawn {
let w = TcpListener::bind(local_loop(), addr).unwrap();
let mut w = w.listen().unwrap();
chan.send(());
w.accept();
}
port.recv();
fail!();
}
#[should_fail]
#[test]
#[ignore(reason = "linked failure")]
fn linked_failure2() {
let (port, chan) = Chan::new();
let addr = next_test_ip4();
do spawn {
let w = TcpListener::bind(local_loop(), addr).unwrap();
let mut w = w.listen().unwrap();
chan.send(());
let mut buf = [0];
w.accept().unwrap().read(buf);
}
port.recv();
let _w = TcpWatcher::connect(local_loop(), addr).unwrap();
fail!();
}
#[should_fail]
#[test]
#[ignore(reason = "linked failure")]
fn linked_failure3() {
let (port, chan) = Chan::new();
let addr = next_test_ip4();
do spawn {
let chan = chan;
let w = TcpListener::bind(local_loop(), addr).unwrap();
let mut w = w.listen().unwrap();
chan.send(());
let mut conn = w.accept().unwrap();
chan.send(());
let buf = [0, ..65536];
conn.write(buf);
}
port.recv();
let _w = TcpWatcher::connect(local_loop(), addr).unwrap();
port.recv();
fail!();
}
}

View File

@ -9,35 +9,33 @@
// except according to those terms.
use std::c_str::CString;
use std::libc;
use std::rt::BlockedTask;
use std::io::IoError;
use std::rt::local::Local;
use std::libc;
use std::rt::rtio::{RtioPipe, RtioUnixListener, RtioUnixAcceptor};
use std::rt::sched::{Scheduler, SchedHandle};
use std::rt::tube::Tube;
use std::rt::task::BlockedTask;
use homing::{HomingIO, HomeHandle};
use stream::StreamWatcher;
use super::{Loop, UvError, UvHandle, Request, uv_error_to_io_error,
wait_until_woken_after};
use uvio::HomingIO;
wait_until_woken_after, wakeup};
use uvio::UvIoFactory;
use uvll;
pub struct PipeWatcher {
stream: StreamWatcher,
home: SchedHandle,
home: HomeHandle,
priv defused: bool,
}
pub struct PipeListener {
home: SchedHandle,
home: HomeHandle,
pipe: *uvll::uv_pipe_t,
priv outgoing: Tube<Result<~RtioPipe, IoError>>,
priv outgoing: Chan<Result<~RtioPipe, IoError>>,
priv incoming: Port<Result<~RtioPipe, IoError>>,
}
pub struct PipeAcceptor {
listener: ~PipeListener,
priv incoming: Tube<Result<~RtioPipe, IoError>>,
}
// PipeWatcher implementation and traits
@ -46,7 +44,12 @@ impl PipeWatcher {
// Creates an uninitialized pipe watcher. The underlying uv pipe is ready to
// get bound to some other source (this is normally a helper method paired
// with another call).
pub fn new(loop_: &Loop, ipc: bool) -> PipeWatcher {
pub fn new(io: &mut UvIoFactory, ipc: bool) -> PipeWatcher {
let home = io.make_handle();
PipeWatcher::new_home(&io.loop_, home, ipc)
}
pub fn new_home(loop_: &Loop, home: HomeHandle, ipc: bool) -> PipeWatcher {
let handle = unsafe {
let handle = uvll::malloc_handle(uvll::UV_NAMED_PIPE);
assert!(!handle.is_null());
@ -56,26 +59,28 @@ impl PipeWatcher {
};
PipeWatcher {
stream: StreamWatcher::new(handle),
home: get_handle_to_current_scheduler!(),
home: home,
defused: false,
}
}
pub fn open(loop_: &Loop, file: libc::c_int) -> Result<PipeWatcher, UvError>
pub fn open(io: &mut UvIoFactory, file: libc::c_int)
-> Result<PipeWatcher, UvError>
{
let pipe = PipeWatcher::new(loop_, false);
let pipe = PipeWatcher::new(io, false);
match unsafe { uvll::uv_pipe_open(pipe.handle(), file) } {
0 => Ok(pipe),
n => Err(UvError(n))
}
}
pub fn connect(loop_: &Loop, name: &CString) -> Result<PipeWatcher, UvError>
pub fn connect(io: &mut UvIoFactory, name: &CString)
-> Result<PipeWatcher, UvError>
{
struct Ctx { task: Option<BlockedTask>, result: libc::c_int, }
let mut cx = Ctx { task: None, result: 0 };
let mut req = Request::new(uvll::UV_CONNECT);
let pipe = PipeWatcher::new(loop_, false);
let pipe = PipeWatcher::new(io, false);
wait_until_woken_after(&mut cx.task, || {
unsafe {
@ -97,8 +102,7 @@ impl PipeWatcher {
assert!(status != uvll::ECANCELED);
let cx: &mut Ctx = unsafe { req.get_data() };
cx.result = status;
let sched: ~Scheduler = Local::take();
sched.resume_blocked_task_immediately(cx.task.take_unwrap());
wakeup(&mut cx.task);
}
}
@ -125,7 +129,7 @@ impl RtioPipe for PipeWatcher {
}
impl HomingIO for PipeWatcher {
fn home<'a>(&'a mut self) -> &'a mut SchedHandle { &mut self.home }
fn home<'a>(&'a mut self) -> &'a mut HomeHandle { &mut self.home }
}
impl UvHandle<uvll::uv_pipe_t> for PipeWatcher {
@ -144,8 +148,10 @@ impl Drop for PipeWatcher {
// PipeListener implementation and traits
impl PipeListener {
pub fn bind(loop_: &Loop, name: &CString) -> Result<~PipeListener, UvError> {
let pipe = PipeWatcher::new(loop_, false);
pub fn bind(io: &mut UvIoFactory, name: &CString)
-> Result<~PipeListener, UvError>
{
let pipe = PipeWatcher::new(io, false);
match unsafe {
uvll::uv_pipe_bind(pipe.handle(), name.with_ref(|p| p))
} {
@ -153,10 +159,12 @@ impl PipeListener {
// If successful, unwrap the PipeWatcher because we control how
// we close the pipe differently. We can't rely on
// StreamWatcher's default close method.
let (port, chan) = Chan::new();
let p = ~PipeListener {
home: get_handle_to_current_scheduler!(),
home: io.make_handle(),
pipe: pipe.unwrap(),
outgoing: Tube::new(),
incoming: port,
outgoing: chan,
};
Ok(p.install())
}
@ -168,11 +176,7 @@ impl PipeListener {
impl RtioUnixListener for PipeListener {
fn listen(mut ~self) -> Result<~RtioUnixAcceptor, IoError> {
// create the acceptor object from ourselves
let incoming = self.outgoing.clone();
let mut acceptor = ~PipeAcceptor {
listener: self,
incoming: incoming,
};
let mut acceptor = ~PipeAcceptor { listener: self };
let _m = acceptor.fire_homing_missile();
// XXX: the 128 backlog should be configurable
@ -184,7 +188,7 @@ impl RtioUnixListener for PipeListener {
}
impl HomingIO for PipeListener {
fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home }
fn home<'r>(&'r mut self) -> &'r mut HomeHandle { &mut self.home }
}
impl UvHandle<uvll::uv_pipe_t> for PipeListener {
@ -193,19 +197,19 @@ impl UvHandle<uvll::uv_pipe_t> for PipeListener {
extern fn listen_cb(server: *uvll::uv_stream_t, status: libc::c_int) {
assert!(status != uvll::ECANCELED);
let pipe: &mut PipeListener = unsafe { UvHandle::from_uv_handle(&server) };
let msg = match status {
0 => {
let loop_ = Loop::wrap(unsafe {
uvll::get_loop_for_uv_handle(server)
});
let client = PipeWatcher::new(&loop_, false);
let client = PipeWatcher::new_home(&loop_, pipe.home().clone(), false);
assert_eq!(unsafe { uvll::uv_accept(server, client.handle()) }, 0);
Ok(~client as ~RtioPipe)
}
n => Err(uv_error_to_io_error(UvError(n)))
};
let pipe: &mut PipeListener = unsafe { UvHandle::from_uv_handle(&server) };
pipe.outgoing.send(msg);
}
@ -220,21 +224,20 @@ impl Drop for PipeListener {
impl RtioUnixAcceptor for PipeAcceptor {
fn accept(&mut self) -> Result<~RtioPipe, IoError> {
let _m = self.fire_homing_missile();
self.incoming.recv()
self.listener.incoming.recv()
}
}
impl HomingIO for PipeAcceptor {
fn home<'r>(&'r mut self) -> &'r mut SchedHandle { self.listener.home() }
fn home<'r>(&'r mut self) -> &'r mut HomeHandle { &mut self.listener.home }
}
#[cfg(test)]
mod tests {
use std::rt::rtio::{RtioUnixListener, RtioUnixAcceptor, RtioPipe};
use std::rt::test::next_test_unix;
use std::io::test::next_test_unix;
use super::*;
use super::{PipeWatcher, PipeListener};
use super::super::local_loop;
#[test]

View File

@ -8,32 +8,31 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::io::IoError;
use std::io::process;
use std::libc::c_int;
use std::libc;
use std::ptr;
use std::rt::BlockedTask;
use std::io::IoError;
use std::io::process::*;
use std::rt::local::Local;
use std::rt::rtio::RtioProcess;
use std::rt::sched::{Scheduler, SchedHandle};
use std::rt::task::BlockedTask;
use std::vec;
use super::{Loop, UvHandle, UvError, uv_error_to_io_error,
wait_until_woken_after};
use uvio::HomingIO;
use uvll;
use homing::{HomingIO, HomeHandle};
use pipe::PipeWatcher;
use super::{UvHandle, UvError, uv_error_to_io_error,
wait_until_woken_after, wakeup};
use uvio::UvIoFactory;
use uvll;
pub struct Process {
handle: *uvll::uv_process_t,
home: SchedHandle,
home: HomeHandle,
/// Task to wake up (may be null) for when the process exits
to_wake: Option<BlockedTask>,
/// Collected from the exit_cb
exit_status: Option<ProcessExit>,
exit_status: Option<process::ProcessExit>,
}
impl Process {
@ -41,7 +40,7 @@ impl Process {
///
/// Returns either the corresponding process object or an error which
/// occurred.
pub fn spawn(loop_: &Loop, config: ProcessConfig)
pub fn spawn(io_loop: &mut UvIoFactory, config: process::ProcessConfig)
-> Result<(~Process, ~[Option<PipeWatcher>]), UvError>
{
let cwd = config.cwd.map(|s| s.to_c_str());
@ -52,7 +51,7 @@ impl Process {
stdio.set_len(io.len());
for (slot, other) in stdio.iter().zip(io.iter()) {
let io = set_stdio(slot as *uvll::uv_stdio_container_t, other,
loop_);
io_loop);
ret_io.push(io);
}
}
@ -78,12 +77,12 @@ impl Process {
let handle = UvHandle::alloc(None::<Process>, uvll::UV_PROCESS);
let process = ~Process {
handle: handle,
home: get_handle_to_current_scheduler!(),
home: io_loop.make_handle(),
to_wake: None,
exit_status: None,
};
match unsafe {
uvll::uv_spawn(loop_.handle, handle, &options)
uvll::uv_spawn(io_loop.uv_loop(), handle, &options)
} {
0 => Ok(process.install()),
err => Err(UvError(err)),
@ -105,33 +104,28 @@ extern fn on_exit(handle: *uvll::uv_process_t,
assert!(p.exit_status.is_none());
p.exit_status = Some(match term_signal {
0 => ExitStatus(exit_status as int),
n => ExitSignal(n as int),
0 => process::ExitStatus(exit_status as int),
n => process::ExitSignal(n as int),
});
match p.to_wake.take() {
Some(task) => {
let scheduler: ~Scheduler = Local::take();
scheduler.resume_blocked_task_immediately(task);
}
None => {}
}
if p.to_wake.is_none() { return }
wakeup(&mut p.to_wake);
}
unsafe fn set_stdio(dst: *uvll::uv_stdio_container_t,
io: &StdioContainer,
loop_: &Loop) -> Option<PipeWatcher> {
io: &process::StdioContainer,
io_loop: &mut UvIoFactory) -> Option<PipeWatcher> {
match *io {
Ignored => {
process::Ignored => {
uvll::set_stdio_container_flags(dst, uvll::STDIO_IGNORE);
None
}
InheritFd(fd) => {
process::InheritFd(fd) => {
uvll::set_stdio_container_flags(dst, uvll::STDIO_INHERIT_FD);
uvll::set_stdio_container_fd(dst, fd);
None
}
CreatePipe(readable, writable) => {
process::CreatePipe(readable, writable) => {
let mut flags = uvll::STDIO_CREATE_PIPE as libc::c_int;
if readable {
flags |= uvll::STDIO_READABLE_PIPE as libc::c_int;
@ -139,7 +133,7 @@ unsafe fn set_stdio(dst: *uvll::uv_stdio_container_t,
if writable {
flags |= uvll::STDIO_WRITABLE_PIPE as libc::c_int;
}
let pipe = PipeWatcher::new(loop_, false);
let pipe = PipeWatcher::new(io_loop, false);
uvll::set_stdio_container_flags(dst, flags);
uvll::set_stdio_container_stream(dst, pipe.handle());
Some(pipe)
@ -186,7 +180,7 @@ fn with_env<T>(env: Option<&[(~str, ~str)]>, f: |**libc::c_char| -> T) -> T {
}
impl HomingIO for Process {
fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home }
fn home<'r>(&'r mut self) -> &'r mut HomeHandle { &mut self.home }
}
impl UvHandle<uvll::uv_process_t> for Process {
@ -208,7 +202,7 @@ impl RtioProcess for Process {
}
}
fn wait(&mut self) -> ProcessExit {
fn wait(&mut self) -> process::ProcessExit {
// Make sure (on the home scheduler) that we have an exit status listed
let _m = self.fire_homing_missile();
match self.exit_status {

192
src/librustuv/queue.rs Normal file
View File

@ -0,0 +1,192 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A concurrent queue used to signal remote event loops
//!
//! This queue implementation is used to send tasks among event loops. This is
//! backed by a multi-producer/single-consumer queue from libstd and uv_async_t
//! handles (to wake up a remote event loop).
//!
//! The uv_async_t is stored next to the event loop, so in order to not keep the
//! event loop alive we use uv_ref and uv_unref in order to control when the
//! async handle is active or not.
#[allow(dead_code)];
use std::cast;
use std::libc::{c_void, c_int};
use std::rt::task::BlockedTask;
use std::unstable::sync::LittleLock;
use mpsc = std::sync::mpsc_queue;
use async::AsyncWatcher;
use super::{Loop, UvHandle};
use uvll;
enum Message {
Task(BlockedTask),
Increment,
Decrement,
}
struct State {
handle: *uvll::uv_async_t,
lock: LittleLock, // see comments in async_cb for why this is needed
}
/// This structure is intended to be stored next to the event loop, and it is
/// used to create new `Queue` structures.
pub struct QueuePool {
priv producer: mpsc::Producer<Message, State>,
priv consumer: mpsc::Consumer<Message, State>,
priv refcnt: uint,
}
/// This type is used to send messages back to the original event loop.
pub struct Queue {
priv queue: mpsc::Producer<Message, State>,
}
extern fn async_cb(handle: *uvll::uv_async_t, status: c_int) {
assert_eq!(status, 0);
let state: &mut QueuePool = unsafe {
cast::transmute(uvll::get_data_for_uv_handle(handle))
};
let packet = unsafe { state.consumer.packet() };
// Remember that there is no guarantee about how many times an async
// callback is called with relation to the number of sends, so process the
// entire queue in a loop.
loop {
match state.consumer.pop() {
mpsc::Data(Task(task)) => {
task.wake().map(|t| t.reawaken(true));
}
mpsc::Data(Increment) => unsafe {
if state.refcnt == 0 {
uvll::uv_ref((*packet).handle);
}
state.refcnt += 1;
},
mpsc::Data(Decrement) => unsafe {
state.refcnt -= 1;
if state.refcnt == 0 {
uvll::uv_unref((*packet).handle);
}
},
mpsc::Empty | mpsc::Inconsistent => break
};
}
// If the refcount is now zero after processing the queue, then there is no
// longer a reference on the async handle and it is possible that this event
// loop can exit. What we're not guaranteed, however, is that a producer in
// the middle of dropping itself is yet done with the handle. It could be
// possible that we saw their Decrement message but they have yet to signal
// on the async handle. If we were to return immediately, the entire uv loop
// could be destroyed meaning the call to uv_async_send would abort()
//
// In order to fix this, an OS mutex is used to wait for the other end to
// finish before we continue. The drop block on a handle will acquire a
// mutex and then drop it after both the push and send have been completed.
// If we acquire the mutex here, then we are guaranteed that there are no
// longer any senders which are holding on to their handles, so we can
// safely allow the event loop to exit.
if state.refcnt == 0 {
unsafe {
let _l = (*packet).lock.lock();
}
}
}
impl QueuePool {
pub fn new(loop_: &mut Loop) -> ~QueuePool {
let handle = UvHandle::alloc(None::<AsyncWatcher>, uvll::UV_ASYNC);
let (c, p) = mpsc::queue(State {
handle: handle,
lock: LittleLock::new(),
});
let q = ~QueuePool {
producer: p,
consumer: c,
refcnt: 0,
};
unsafe {
assert_eq!(uvll::uv_async_init(loop_.handle, handle, async_cb), 0);
uvll::uv_unref(handle);
let data: *c_void = *cast::transmute::<&~QueuePool, &*c_void>(&q);
uvll::set_data_for_uv_handle(handle, data);
}
return q;
}
pub fn queue(&mut self) -> Queue {
unsafe {
if self.refcnt == 0 {
uvll::uv_ref((*self.producer.packet()).handle);
}
self.refcnt += 1;
}
Queue { queue: self.producer.clone() }
}
pub fn handle(&self) -> *uvll::uv_async_t {
unsafe { (*self.producer.packet()).handle }
}
}
impl Queue {
pub fn push(&mut self, task: BlockedTask) {
self.queue.push(Task(task));
unsafe {
uvll::uv_async_send((*self.queue.packet()).handle);
}
}
}
impl Clone for Queue {
fn clone(&self) -> Queue {
// Push a request to increment on the queue, but there's no need to
// signal the event loop to process it at this time. We're guaranteed
// that the count is at least one (because we have a queue right here),
// and if the queue is dropped later on it'll see the increment for the
// decrement anyway.
unsafe {
cast::transmute_mut(self).queue.push(Increment);
}
Queue { queue: self.queue.clone() }
}
}
impl Drop for Queue {
fn drop(&mut self) {
// See the comments in the async_cb function for why there is a lock
// that is acquired only on a drop.
unsafe {
let state = self.queue.packet();
let _l = (*state).lock.lock();
self.queue.push(Decrement);
uvll::uv_async_send((*state).handle);
}
}
}
impl Drop for State {
fn drop(&mut self) {
unsafe {
uvll::uv_close(self.handle, cast::transmute(0));
// Note that this does *not* free the handle, that is the
// responsibility of the caller because the uv loop must be closed
// before we deallocate this uv handle.
}
}
}

View File

@ -10,34 +10,33 @@
use std::libc::c_int;
use std::io::signal::Signum;
use std::rt::sched::{SchedHandle, Scheduler};
use std::comm::SharedChan;
use std::rt::local::Local;
use std::rt::rtio::RtioSignal;
use super::{Loop, UvError, UvHandle};
use homing::{HomingIO, HomeHandle};
use super::{UvError, UvHandle};
use uvll;
use uvio::HomingIO;
use uvio::UvIoFactory;
pub struct SignalWatcher {
handle: *uvll::uv_signal_t,
home: SchedHandle,
home: HomeHandle,
channel: SharedChan<Signum>,
signal: Signum,
}
impl SignalWatcher {
pub fn new(loop_: &mut Loop, signum: Signum,
pub fn new(io: &mut UvIoFactory, signum: Signum,
channel: SharedChan<Signum>) -> Result<~SignalWatcher, UvError> {
let s = ~SignalWatcher {
handle: UvHandle::alloc(None::<SignalWatcher>, uvll::UV_SIGNAL),
home: get_handle_to_current_scheduler!(),
home: io.make_handle(),
channel: channel,
signal: signum,
};
assert_eq!(unsafe {
uvll::uv_signal_init(loop_.handle, s.handle)
uvll::uv_signal_init(io.uv_loop(), s.handle)
}, 0);
match unsafe {
@ -53,11 +52,11 @@ impl SignalWatcher {
extern fn signal_cb(handle: *uvll::uv_signal_t, signum: c_int) {
let s: &mut SignalWatcher = unsafe { UvHandle::from_uv_handle(&handle) };
assert_eq!(signum as int, s.signal as int);
s.channel.send_deferred(s.signal);
s.channel.try_send(s.signal);
}
impl HomingIO for SignalWatcher {
fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home }
fn home<'r>(&'r mut self) -> &'r mut HomeHandle { &mut self.home }
}
impl UvHandle<uvll::uv_signal_t> for SignalWatcher {
@ -69,15 +68,15 @@ impl RtioSignal for SignalWatcher {}
impl Drop for SignalWatcher {
fn drop(&mut self) {
let _m = self.fire_homing_missile();
self.close_async_();
self.close();
}
}
#[cfg(test)]
mod test {
use super::*;
use super::super::local_loop;
use std::io::signal;
use super::SignalWatcher;
#[test]
fn closing_channel_during_drop_doesnt_kill_everything() {

View File

@ -11,12 +11,10 @@
use std::cast;
use std::libc::{c_int, size_t, ssize_t};
use std::ptr;
use std::rt::BlockedTask;
use std::rt::local::Local;
use std::rt::sched::Scheduler;
use std::rt::task::BlockedTask;
use super::{UvError, Buf, slice_to_uv_buf, Request, wait_until_woken_after,
ForbidUnwind};
ForbidUnwind, wakeup};
use uvll;
// This is a helper structure which is intended to get embedded into other
@ -164,8 +162,7 @@ extern fn read_cb(handle: *uvll::uv_stream_t, nread: ssize_t, _buf: *Buf) {
unsafe { assert_eq!(uvll::uv_read_stop(handle), 0); }
rcx.result = nread;
let scheduler: ~Scheduler = Local::take();
scheduler.resume_blocked_task_immediately(rcx.task.take_unwrap());
wakeup(&mut rcx.task);
}
// Unlike reading, the WriteContext is stored in the uv_write_t request. Like
@ -180,6 +177,5 @@ extern fn write_cb(req: *uvll::uv_write_t, status: c_int) {
wcx.result = status;
req.defuse();
let sched: ~Scheduler = Local::take();
sched.resume_blocked_task_immediately(wcx.task.take_unwrap());
wakeup(&mut wcx.task);
}

View File

@ -9,19 +9,19 @@
// except according to those terms.
use std::libc::c_int;
use std::rt::BlockedTask;
use std::rt::local::Local;
use std::rt::rtio::RtioTimer;
use std::rt::sched::{Scheduler, SchedHandle};
use std::rt::task::{BlockedTask, Task};
use std::util;
use homing::{HomeHandle, HomingIO};
use super::{UvHandle, ForbidUnwind, ForbidSwitch};
use uvio::UvIoFactory;
use uvll;
use super::{Loop, UvHandle, ForbidUnwind, ForbidSwitch};
use uvio::HomingIO;
pub struct TimerWatcher {
handle: *uvll::uv_timer_t,
home: SchedHandle,
home: HomeHandle,
action: Option<NextAction>,
id: uint, // see comments in timer_cb
}
@ -33,15 +33,15 @@ pub enum NextAction {
}
impl TimerWatcher {
pub fn new(loop_: &mut Loop) -> ~TimerWatcher {
pub fn new(io: &mut UvIoFactory) -> ~TimerWatcher {
let handle = UvHandle::alloc(None::<TimerWatcher>, uvll::UV_TIMER);
assert_eq!(unsafe {
uvll::uv_timer_init(loop_.handle, handle)
uvll::uv_timer_init(io.uv_loop(), handle)
}, 0);
let me = ~TimerWatcher {
handle: handle,
action: None,
home: get_handle_to_current_scheduler!(),
home: io.make_handle(),
id: 0,
};
return me.install();
@ -59,7 +59,7 @@ impl TimerWatcher {
}
impl HomingIO for TimerWatcher {
fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home }
fn home<'r>(&'r mut self) -> &'r mut HomeHandle { &mut self.home }
}
impl UvHandle<uvll::uv_timer_t> for TimerWatcher {
@ -89,10 +89,11 @@ impl RtioTimer for TimerWatcher {
// started, then we need to call stop on the timer.
let _f = ForbidUnwind::new("timer");
let sched: ~Scheduler = Local::take();
sched.deschedule_running_task_and_then(|_sched, task| {
let task: ~Task = Local::take();
task.deschedule(1, |task| {
self.action = Some(WakeTask(task));
self.start(msecs, 0);
Ok(())
});
self.stop();
}
@ -137,12 +138,11 @@ extern fn timer_cb(handle: *uvll::uv_timer_t, status: c_int) {
match timer.action.take_unwrap() {
WakeTask(task) => {
let sched: ~Scheduler = Local::take();
sched.resume_blocked_task_immediately(task);
task.wake().map(|t| t.reawaken(true));
}
SendOnce(chan) => { chan.try_send_deferred(()); }
SendOnce(chan) => { chan.try_send(()); }
SendMany(chan, id) => {
chan.try_send_deferred(());
chan.try_send(());
// Note that the above operation could have performed some form of
// scheduling. This means that the timer may have decided to insert
@ -169,7 +169,7 @@ impl Drop for TimerWatcher {
let _action = {
let _m = self.fire_homing_missile();
self.stop();
self.close_async_();
self.close();
self.action.take()
};
}
@ -177,9 +177,9 @@ impl Drop for TimerWatcher {
#[cfg(test)]
mod test {
use super::*;
use std::rt::rtio::RtioTimer;
use super::super::local_loop;
use super::TimerWatcher;
#[test]
fn oneshot() {
@ -207,9 +207,9 @@ mod test {
let port = timer.period(1);
port.recv();
port.recv();
let port = timer.period(1);
port.recv();
port.recv();
let port2 = timer.period(1);
port2.recv();
port2.recv();
}
#[test]

View File

@ -10,24 +10,23 @@
use std::libc;
use std::io::IoError;
use std::rt::local::Local;
use std::rt::rtio::RtioTTY;
use std::rt::sched::{Scheduler, SchedHandle};
use homing::{HomingIO, HomeHandle};
use stream::StreamWatcher;
use super::{Loop, UvError, UvHandle, uv_error_to_io_error};
use uvio::HomingIO;
use super::{UvError, UvHandle, uv_error_to_io_error};
use uvio::UvIoFactory;
use uvll;
pub struct TtyWatcher{
tty: *uvll::uv_tty_t,
stream: StreamWatcher,
home: SchedHandle,
home: HomeHandle,
fd: libc::c_int,
}
impl TtyWatcher {
pub fn new(loop_: &Loop, fd: libc::c_int, readable: bool)
pub fn new(io: &mut UvIoFactory, fd: libc::c_int, readable: bool)
-> Result<TtyWatcher, UvError>
{
// libuv may succeed in giving us a handle (via uv_tty_init), but if the
@ -56,14 +55,14 @@ impl TtyWatcher {
// with attempting to open it as a tty.
let handle = UvHandle::alloc(None::<TtyWatcher>, uvll::UV_TTY);
match unsafe {
uvll::uv_tty_init(loop_.handle, handle, fd as libc::c_int,
uvll::uv_tty_init(io.uv_loop(), handle, fd as libc::c_int,
readable as libc::c_int)
} {
0 => {
Ok(TtyWatcher {
tty: handle,
stream: StreamWatcher::new(handle),
home: get_handle_to_current_scheduler!(),
home: io.make_handle(),
fd: fd,
})
}
@ -120,7 +119,7 @@ impl UvHandle<uvll::uv_tty_t> for TtyWatcher {
}
impl HomingIO for TtyWatcher {
fn home<'a>(&'a mut self) -> &'a mut SchedHandle { &mut self.home }
fn home<'a>(&'a mut self) -> &'a mut HomeHandle { &mut self.home }
}
impl Drop for TtyWatcher {

View File

@ -9,121 +9,41 @@
// except according to those terms.
use std::c_str::CString;
use std::cast;
use std::comm::SharedChan;
use std::libc::c_int;
use std::libc;
use std::path::Path;
use std::io::IoError;
use std::io::net::ip::SocketAddr;
use std::io::process::ProcessConfig;
use std::io;
use std::rt::local::Local;
use std::rt::rtio::*;
use std::rt::sched::{Scheduler, SchedHandle};
use std::rt::task::Task;
use std::libc::{O_CREAT, O_APPEND, O_TRUNC, O_RDWR, O_RDONLY, O_WRONLY,
S_IRUSR, S_IWUSR};
use std::io::{FileMode, FileAccess, Open, Append, Truncate, Read, Write,
ReadWrite, FileStat};
use std::io::signal::Signum;
use std::io::{FileMode, FileAccess, Open, Append, Truncate, Read, Write,
ReadWrite, FileStat};
use std::io;
use std::libc::c_int;
use std::libc::{O_CREAT, O_APPEND, O_TRUNC, O_RDWR, O_RDONLY, O_WRONLY, S_IRUSR,
S_IWUSR};
use std::libc;
use std::path::Path;
use std::rt::rtio;
use std::rt::rtio::IoFactory;
use ai = std::io::net::addrinfo;
#[cfg(test)] use std::unstable::run_in_bare_thread;
use super::*;
use super::{uv_error_to_io_error, Loop};
use addrinfo::GetAddrInfoRequest;
pub trait HomingIO {
fn home<'r>(&'r mut self) -> &'r mut SchedHandle;
/// This function will move tasks to run on their home I/O scheduler. Note
/// that this function does *not* pin the task to the I/O scheduler, but
/// rather it simply moves it to running on the I/O scheduler.
fn go_to_IO_home(&mut self) -> uint {
use std::rt::sched::RunOnce;
let _f = ForbidUnwind::new("going home");
let current_sched_id = {
let mut sched = Local::borrow(None::<Scheduler>);
sched.get().sched_id()
};
// Only need to invoke a context switch if we're not on the right
// scheduler.
if current_sched_id != self.home().sched_id {
let scheduler: ~Scheduler = Local::take();
scheduler.deschedule_running_task_and_then(|_, task| {
task.wake().map(|task| {
self.home().send(RunOnce(task));
});
})
}
let current_sched_id = {
let mut sched = Local::borrow(None::<Scheduler>);
sched.get().sched_id()
};
assert!(current_sched_id == self.home().sched_id);
self.home().sched_id
}
/// Fires a single homing missile, returning another missile targeted back
/// at the original home of this task. In other words, this function will
/// move the local task to its I/O scheduler and then return an RAII wrapper
/// which will return the task home.
fn fire_homing_missile(&mut self) -> HomingMissile {
HomingMissile { io_home: self.go_to_IO_home() }
}
/// Same as `fire_homing_missile`, but returns the local I/O scheduler as
/// well (the one that was homed to).
fn fire_homing_missile_sched(&mut self) -> (HomingMissile, ~Scheduler) {
// First, transplant ourselves to the home I/O scheduler
let missile = self.fire_homing_missile();
// Next (must happen next), grab the local I/O scheduler
let io_sched: ~Scheduler = Local::take();
(missile, io_sched)
}
}
/// After a homing operation has been completed, this will return the current
/// task back to its appropriate home (if applicable). The field is used to
/// assert that we are where we think we are.
struct HomingMissile {
priv io_home: uint,
}
impl HomingMissile {
pub fn check(&self, msg: &'static str) {
let mut sched = Local::borrow(None::<Scheduler>);
let local_id = sched.get().sched_id();
assert!(local_id == self.io_home, "{}", msg);
}
}
impl Drop for HomingMissile {
fn drop(&mut self) {
let _f = ForbidUnwind::new("leaving home");
// It would truly be a sad day if we had moved off the home I/O
// scheduler while we were doing I/O.
self.check("task moved away from the home scheduler");
// If we were a homed task, then we must send ourselves back to the
// original scheduler. Otherwise, we can just return and keep running
if !Task::on_appropriate_sched() {
let scheduler: ~Scheduler = Local::take();
scheduler.deschedule_running_task_and_then(|_, task| {
task.wake().map(|task| {
Scheduler::run_task(task);
});
})
}
}
}
use async::AsyncWatcher;
use file::{FsRequest, FileWatcher};
use queue::QueuePool;
use homing::HomeHandle;
use idle::IdleWatcher;
use net::{TcpWatcher, TcpListener, UdpWatcher};
use pipe::{PipeWatcher, PipeListener};
use process::Process;
use signal::SignalWatcher;
use timer::TimerWatcher;
use tty::TtyWatcher;
use uvll;
// Obviously an Event Loop is always home.
pub struct UvEventLoop {
@ -132,49 +52,65 @@ pub struct UvEventLoop {
impl UvEventLoop {
pub fn new() -> UvEventLoop {
let mut loop_ = Loop::new();
let handle_pool = QueuePool::new(&mut loop_);
UvEventLoop {
uvio: UvIoFactory(Loop::new())
uvio: UvIoFactory {
loop_: loop_,
handle_pool: Some(handle_pool),
}
}
}
}
impl Drop for UvEventLoop {
fn drop(&mut self) {
self.uvio.uv_loop().close();
// Must first destroy the pool of handles before we destroy the loop
// because otherwise the contained async handle will be destroyed after
// the loop is free'd (use-after-free). We also must free the uv handle
// after the loop has been closed because during the closing of the loop
// the handle is required to be used apparently.
let handle = self.uvio.handle_pool.get_ref().handle();
self.uvio.handle_pool.take();
self.uvio.loop_.close();
unsafe { uvll::free_handle(handle) }
}
}
impl EventLoop for UvEventLoop {
impl rtio::EventLoop for UvEventLoop {
fn run(&mut self) {
self.uvio.uv_loop().run();
self.uvio.loop_.run();
}
fn callback(&mut self, f: proc()) {
IdleWatcher::onetime(self.uvio.uv_loop(), f);
IdleWatcher::onetime(&mut self.uvio.loop_, f);
}
fn pausable_idle_callback(&mut self, cb: ~Callback) -> ~PausableIdleCallback {
IdleWatcher::new(self.uvio.uv_loop(), cb) as ~PausableIdleCallback
fn pausable_idle_callback(&mut self, cb: ~rtio::Callback)
-> ~rtio::PausableIdleCallback
{
IdleWatcher::new(&mut self.uvio.loop_, cb) as ~rtio::PausableIdleCallback
}
fn remote_callback(&mut self, f: ~Callback) -> ~RemoteCallback {
~AsyncWatcher::new(self.uvio.uv_loop(), f) as ~RemoteCallback
fn remote_callback(&mut self, f: ~rtio::Callback) -> ~rtio::RemoteCallback {
~AsyncWatcher::new(&mut self.uvio.loop_, f) as ~rtio::RemoteCallback
}
fn io<'a>(&'a mut self) -> Option<&'a mut IoFactory> {
let factory = &mut self.uvio as &mut IoFactory;
fn io<'a>(&'a mut self) -> Option<&'a mut rtio::IoFactory> {
let factory = &mut self.uvio as &mut rtio::IoFactory;
Some(factory)
}
}
#[cfg(not(test))]
#[lang = "event_loop_factory"]
pub extern "C" fn new_loop() -> ~EventLoop {
~UvEventLoop::new() as ~EventLoop
pub fn new_loop() -> ~rtio::EventLoop {
~UvEventLoop::new() as ~rtio::EventLoop
}
#[test]
fn test_callback_run_once() {
use std::rt::rtio::EventLoop;
do run_in_bare_thread {
let mut event_loop = UvEventLoop::new();
let mut count = 0;
@ -187,11 +123,19 @@ fn test_callback_run_once() {
}
}
pub struct UvIoFactory(Loop);
pub struct UvIoFactory {
loop_: Loop,
priv handle_pool: Option<~QueuePool>,
}
impl UvIoFactory {
pub fn uv_loop<'a>(&'a mut self) -> &'a mut Loop {
match self { &UvIoFactory(ref mut ptr) => ptr }
pub fn uv_loop<'a>(&mut self) -> *uvll::uv_loop_t { self.loop_.handle }
pub fn make_handle(&mut self) -> HomeHandle {
// It's understood by the homing code that the "local id" is just the
// pointer of the local I/O factory cast to a uint.
let id: uint = unsafe { cast::transmute_copy(&self) };
HomeHandle::new(id, &mut **self.handle_pool.get_mut_ref())
}
}
@ -200,46 +144,45 @@ impl IoFactory for UvIoFactory {
// NB: This blocks the task waiting on the connection.
// It would probably be better to return a future
fn tcp_connect(&mut self, addr: SocketAddr)
-> Result<~RtioTcpStream, IoError>
-> Result<~rtio::RtioTcpStream, IoError>
{
match TcpWatcher::connect(self.uv_loop(), addr) {
Ok(t) => Ok(~t as ~RtioTcpStream),
match TcpWatcher::connect(self, addr) {
Ok(t) => Ok(~t as ~rtio::RtioTcpStream),
Err(e) => Err(uv_error_to_io_error(e)),
}
}
fn tcp_bind(&mut self, addr: SocketAddr) -> Result<~RtioTcpListener, IoError> {
match TcpListener::bind(self.uv_loop(), addr) {
Ok(t) => Ok(t as ~RtioTcpListener),
fn tcp_bind(&mut self, addr: SocketAddr) -> Result<~rtio::RtioTcpListener, IoError> {
match TcpListener::bind(self, addr) {
Ok(t) => Ok(t as ~rtio::RtioTcpListener),
Err(e) => Err(uv_error_to_io_error(e)),
}
}
fn udp_bind(&mut self, addr: SocketAddr) -> Result<~RtioUdpSocket, IoError> {
match UdpWatcher::bind(self.uv_loop(), addr) {
Ok(u) => Ok(~u as ~RtioUdpSocket),
fn udp_bind(&mut self, addr: SocketAddr) -> Result<~rtio::RtioUdpSocket, IoError> {
match UdpWatcher::bind(self, addr) {
Ok(u) => Ok(~u as ~rtio::RtioUdpSocket),
Err(e) => Err(uv_error_to_io_error(e)),
}
}
fn timer_init(&mut self) -> Result<~RtioTimer, IoError> {
Ok(TimerWatcher::new(self.uv_loop()) as ~RtioTimer)
fn timer_init(&mut self) -> Result<~rtio::RtioTimer, IoError> {
Ok(TimerWatcher::new(self) as ~rtio::RtioTimer)
}
fn get_host_addresses(&mut self, host: Option<&str>, servname: Option<&str>,
hint: Option<ai::Hint>) -> Result<~[ai::Info], IoError> {
let r = GetAddrInfoRequest::run(self.uv_loop(), host, servname, hint);
let r = GetAddrInfoRequest::run(&self.loop_, host, servname, hint);
r.map_err(uv_error_to_io_error)
}
fn fs_from_raw_fd(&mut self, fd: c_int,
close: CloseBehavior) -> ~RtioFileStream {
let loop_ = Loop::wrap(self.uv_loop().handle);
~FileWatcher::new(loop_, fd, close) as ~RtioFileStream
close: rtio::CloseBehavior) -> ~rtio::RtioFileStream {
~FileWatcher::new(self, fd, close) as ~rtio::RtioFileStream
}
fn fs_open(&mut self, path: &CString, fm: FileMode, fa: FileAccess)
-> Result<~RtioFileStream, IoError> {
-> Result<~rtio::RtioFileStream, IoError> {
let flags = match fm {
io::Open => 0,
io::Append => libc::O_APPEND,
@ -254,117 +197,117 @@ impl IoFactory for UvIoFactory {
libc::S_IRUSR | libc::S_IWUSR),
};
match FsRequest::open(self.uv_loop(), path, flags as int, mode as int) {
Ok(fs) => Ok(~fs as ~RtioFileStream),
match FsRequest::open(self, path, flags as int, mode as int) {
Ok(fs) => Ok(~fs as ~rtio::RtioFileStream),
Err(e) => Err(uv_error_to_io_error(e))
}
}
fn fs_unlink(&mut self, path: &CString) -> Result<(), IoError> {
let r = FsRequest::unlink(self.uv_loop(), path);
let r = FsRequest::unlink(&self.loop_, path);
r.map_err(uv_error_to_io_error)
}
fn fs_lstat(&mut self, path: &CString) -> Result<FileStat, IoError> {
let r = FsRequest::lstat(self.uv_loop(), path);
let r = FsRequest::lstat(&self.loop_, path);
r.map_err(uv_error_to_io_error)
}
fn fs_stat(&mut self, path: &CString) -> Result<FileStat, IoError> {
let r = FsRequest::stat(self.uv_loop(), path);
let r = FsRequest::stat(&self.loop_, path);
r.map_err(uv_error_to_io_error)
}
fn fs_mkdir(&mut self, path: &CString,
perm: io::FilePermission) -> Result<(), IoError> {
let r = FsRequest::mkdir(self.uv_loop(), path, perm as c_int);
let r = FsRequest::mkdir(&self.loop_, path, perm as c_int);
r.map_err(uv_error_to_io_error)
}
fn fs_rmdir(&mut self, path: &CString) -> Result<(), IoError> {
let r = FsRequest::rmdir(self.uv_loop(), path);
let r = FsRequest::rmdir(&self.loop_, path);
r.map_err(uv_error_to_io_error)
}
fn fs_rename(&mut self, path: &CString, to: &CString) -> Result<(), IoError> {
let r = FsRequest::rename(self.uv_loop(), path, to);
let r = FsRequest::rename(&self.loop_, path, to);
r.map_err(uv_error_to_io_error)
}
fn fs_chmod(&mut self, path: &CString,
perm: io::FilePermission) -> Result<(), IoError> {
let r = FsRequest::chmod(self.uv_loop(), path, perm as c_int);
let r = FsRequest::chmod(&self.loop_, path, perm as c_int);
r.map_err(uv_error_to_io_error)
}
fn fs_readdir(&mut self, path: &CString, flags: c_int)
-> Result<~[Path], IoError>
{
let r = FsRequest::readdir(self.uv_loop(), path, flags);
let r = FsRequest::readdir(&self.loop_, path, flags);
r.map_err(uv_error_to_io_error)
}
fn fs_link(&mut self, src: &CString, dst: &CString) -> Result<(), IoError> {
let r = FsRequest::link(self.uv_loop(), src, dst);
let r = FsRequest::link(&self.loop_, src, dst);
r.map_err(uv_error_to_io_error)
}
fn fs_symlink(&mut self, src: &CString, dst: &CString) -> Result<(), IoError> {
let r = FsRequest::symlink(self.uv_loop(), src, dst);
let r = FsRequest::symlink(&self.loop_, src, dst);
r.map_err(uv_error_to_io_error)
}
fn fs_chown(&mut self, path: &CString, uid: int, gid: int) -> Result<(), IoError> {
let r = FsRequest::chown(self.uv_loop(), path, uid, gid);
let r = FsRequest::chown(&self.loop_, path, uid, gid);
r.map_err(uv_error_to_io_error)
}
fn fs_readlink(&mut self, path: &CString) -> Result<Path, IoError> {
let r = FsRequest::readlink(self.uv_loop(), path);
let r = FsRequest::readlink(&self.loop_, path);
r.map_err(uv_error_to_io_error)
}
fn fs_utime(&mut self, path: &CString, atime: u64, mtime: u64)
-> Result<(), IoError>
{
let r = FsRequest::utime(self.uv_loop(), path, atime, mtime);
let r = FsRequest::utime(&self.loop_, path, atime, mtime);
r.map_err(uv_error_to_io_error)
}
fn spawn(&mut self, config: ProcessConfig)
-> Result<(~RtioProcess, ~[Option<~RtioPipe>]), IoError>
-> Result<(~rtio::RtioProcess, ~[Option<~rtio::RtioPipe>]), IoError>
{
match Process::spawn(self.uv_loop(), config) {
match Process::spawn(self, config) {
Ok((p, io)) => {
Ok((p as ~RtioProcess,
io.move_iter().map(|i| i.map(|p| ~p as ~RtioPipe)).collect()))
Ok((p as ~rtio::RtioProcess,
io.move_iter().map(|i| i.map(|p| ~p as ~rtio::RtioPipe)).collect()))
}
Err(e) => Err(uv_error_to_io_error(e)),
}
}
fn unix_bind(&mut self, path: &CString) -> Result<~RtioUnixListener, IoError>
fn unix_bind(&mut self, path: &CString) -> Result<~rtio::RtioUnixListener, IoError>
{
match PipeListener::bind(self.uv_loop(), path) {
Ok(p) => Ok(p as ~RtioUnixListener),
match PipeListener::bind(self, path) {
Ok(p) => Ok(p as ~rtio::RtioUnixListener),
Err(e) => Err(uv_error_to_io_error(e)),
}
}
fn unix_connect(&mut self, path: &CString) -> Result<~RtioPipe, IoError> {
match PipeWatcher::connect(self.uv_loop(), path) {
Ok(p) => Ok(~p as ~RtioPipe),
fn unix_connect(&mut self, path: &CString) -> Result<~rtio::RtioPipe, IoError> {
match PipeWatcher::connect(self, path) {
Ok(p) => Ok(~p as ~rtio::RtioPipe),
Err(e) => Err(uv_error_to_io_error(e)),
}
}
fn tty_open(&mut self, fd: c_int, readable: bool)
-> Result<~RtioTTY, IoError> {
match TtyWatcher::new(self.uv_loop(), fd, readable) {
Ok(tty) => Ok(~tty as ~RtioTTY),
-> Result<~rtio::RtioTTY, IoError> {
match TtyWatcher::new(self, fd, readable) {
Ok(tty) => Ok(~tty as ~rtio::RtioTTY),
Err(e) => Err(uv_error_to_io_error(e))
}
}
fn pipe_open(&mut self, fd: c_int) -> Result<~RtioPipe, IoError> {
match PipeWatcher::open(self.uv_loop(), fd) {
Ok(s) => Ok(~s as ~RtioPipe),
fn pipe_open(&mut self, fd: c_int) -> Result<~rtio::RtioPipe, IoError> {
match PipeWatcher::open(self, fd) {
Ok(s) => Ok(~s as ~rtio::RtioPipe),
Err(e) => Err(uv_error_to_io_error(e))
}
}
fn signal(&mut self, signum: Signum, channel: SharedChan<Signum>)
-> Result<~RtioSignal, IoError> {
match SignalWatcher::new(self.uv_loop(), signum, channel) {
Ok(s) => Ok(s as ~RtioSignal),
-> Result<~rtio::RtioSignal, IoError> {
match SignalWatcher::new(self, signum, channel) {
Ok(s) => Ok(s as ~rtio::RtioSignal),
Err(e) => Err(uv_error_to_io_error(e)),
}
}

View File

@ -37,7 +37,8 @@ use std::libc;
#[cfg(test)]
use std::libc::uintptr_t;
pub use self::errors::*;
pub use self::errors::{EACCES, ECONNREFUSED, ECONNRESET, EPIPE, ECONNABORTED,
ECANCELED, EBADF, ENOTCONN, ENOENT};
pub static OK: c_int = 0;
pub static EOF: c_int = -4095;
@ -576,6 +577,8 @@ extern {
// generic uv functions
pub fn uv_loop_delete(l: *uv_loop_t);
pub fn uv_ref(t: *uv_handle_t);
pub fn uv_unref(t: *uv_handle_t);
pub fn uv_handle_size(ty: uv_handle_type) -> size_t;
pub fn uv_req_size(ty: uv_req_type) -> size_t;
pub fn uv_run(l: *uv_loop_t, mode: uv_run_mode) -> c_int;

View File

@ -20,10 +20,11 @@
use cast::transmute;
use option::{Option, Some, None};
use result::{Result, Ok, Err};
use to_str::ToStr;
use unstable::intrinsics::TypeId;
use unstable::intrinsics;
use util::Void;
use unstable::intrinsics::TypeId;
///////////////////////////////////////////////////////////////////////////////
// Any trait
@ -118,13 +119,13 @@ impl<'a> AnyMutRefExt<'a> for &'a mut Any {
/// Extension methods for a owning `Any` trait object
pub trait AnyOwnExt {
/// Returns the boxed value if it is of type `T`, or
/// `None` if it isn't.
fn move<T: 'static>(self) -> Option<~T>;
/// `Err(Self)` if it isn't.
fn move<T: 'static>(self) -> Result<~T, Self>;
}
impl AnyOwnExt for ~Any {
#[inline]
fn move<T: 'static>(self) -> Option<~T> {
fn move<T: 'static>(self) -> Result<~T, ~Any> {
if self.is::<T>() {
unsafe {
// Extract the pointer to the boxed value, temporary alias with self
@ -133,10 +134,10 @@ impl AnyOwnExt for ~Any {
// Prevent destructor on self being run
intrinsics::forget(self);
Some(ptr)
Ok(ptr)
}
} else {
None
Err(self)
}
}
}
@ -155,9 +156,8 @@ impl<'a> ToStr for &'a Any {
#[cfg(test)]
mod tests {
use prelude::*;
use super::*;
use super::AnyRefExt;
use option::{Some, None};
#[deriving(Eq)]
struct Test;
@ -384,13 +384,19 @@ mod tests {
let a = ~8u as ~Any;
let b = ~Test as ~Any;
assert_eq!(a.move(), Some(~8u));
assert_eq!(b.move(), Some(~Test));
match a.move::<uint>() {
Ok(a) => { assert_eq!(a, ~8u); }
Err(..) => fail!()
}
match b.move::<Test>() {
Ok(a) => { assert_eq!(a, ~Test); }
Err(..) => fail!()
}
let a = ~8u as ~Any;
let b = ~Test as ~Any;
assert_eq!(a.move(), None::<~Test>);
assert_eq!(b.move(), None::<~uint>);
assert!(a.move::<~Test>().is_err());
assert!(b.move::<~uint>().is_err());
}
}

View File

@ -1,337 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! One of the major goals behind this channel implementation is to work
//! seamlessly on and off the runtime. This also means that the code isn't
//! littered with "if is_green() { ... } else { ... }". Right now, the rest of
//! the runtime isn't quite ready to for this abstraction to be done very
//! nicely, so the conditional "if green" blocks are all contained in this inner
//! module.
//!
//! The goal of this module is to mirror what the runtime "should be", not the
//! state that it is currently in today. You'll notice that there is no mention
//! of schedulers or is_green inside any of the channel code, it is currently
//! entirely contained in this one module.
//!
//! In the ideal world, nothing in this module exists and it is all implemented
//! elsewhere in the runtime (in the proper location). All of this code is
//! structured in order to easily refactor this to the correct location whenever
//! we have the trait objects in place to serve as the boundary of the
//! abstraction.
use iter::{range, Iterator};
use ops::Drop;
use option::{Some, None, Option};
use rt::local::Local;
use rt::sched::{SchedHandle, Scheduler, TaskFromFriend};
use rt::thread::Thread;
use rt;
use unstable::mutex::Mutex;
use unstable::sync::UnsafeArc;
// A task handle is a method of waking up a blocked task. The handle itself
// is completely opaque and only has a wake() method defined on it. This
// method will wake the method regardless of the context of the thread which
// is currently calling wake().
//
// This abstraction should be able to be created when putting a task to
// sleep. This should basically be a method on whatever the local Task is,
// consuming the local Task.
pub struct TaskHandle {
priv inner: TaskRepr
}
enum TaskRepr {
Green(rt::BlockedTask, *mut SchedHandle),
Native(NativeWakeupStyle),
}
enum NativeWakeupStyle {
ArcWakeup(UnsafeArc<Mutex>), // shared mutex to synchronize on
LocalWakeup(*mut Mutex), // synchronize on the task-local mutex
}
impl TaskHandle {
// Signal that this handle should be woken up. The `can_resched`
// argument indicates whether the current task could possibly be
// rescheduled or not. This does not have a lot of meaning for the
// native case, but for an M:N case it indicates whether a context
// switch can happen or not.
pub fn wake(self, can_resched: bool) {
match self.inner {
Green(task, handle) => {
// If we have a local scheduler, then use that to run the
// blocked task, otherwise we can use the handle to send the
// task back to its home.
if rt::in_green_task_context() {
if can_resched {
task.wake().map(Scheduler::run_task);
} else {
let mut s: ~Scheduler = Local::take();
s.enqueue_blocked_task(task);
Local::put(s);
}
} else {
let task = match task.wake() {
Some(task) => task, None => return
};
// XXX: this is not an easy section of code to refactor.
// If this handle is owned by the Task (which it
// should be), then this would be a use-after-free
// because once the task is pushed onto the message
// queue, the handle is gone.
//
// Currently the handle is instead owned by the
// Port/Chan pair, which means that because a
// channel is invoking this method the handle will
// continue to stay alive for the entire duration
// of this method. This will require thought when
// moving the handle into the task.
unsafe { (*handle).send(TaskFromFriend(task)) }
}
}
// Note that there are no use-after-free races in this code. In
// the arc-case, we own the lock, and in the local case, we're
// using a lock so it's guranteed that they aren't running while
// we hold the lock.
Native(ArcWakeup(lock)) => {
unsafe {
let lock = lock.get();
(*lock).lock();
(*lock).signal();
(*lock).unlock();
}
}
Native(LocalWakeup(lock)) => {
unsafe {
(*lock).lock();
(*lock).signal();
(*lock).unlock();
}
}
}
}
// Trashes handle to this task. This ensures that necessary memory is
// deallocated, and there may be some extra assertions as well.
pub fn trash(self) {
match self.inner {
Green(task, _) => task.assert_already_awake(),
Native(..) => {}
}
}
}
// This structure is an abstraction of what should be stored in the local
// task itself. This data is currently stored inside of each channel, but
// this should rather be stored in each task (and channels will still
// continue to lazily initialize this data).
pub struct TaskData {
priv handle: Option<SchedHandle>,
priv lock: Mutex,
}
impl TaskData {
pub fn new() -> TaskData {
TaskData {
handle: None,
lock: unsafe { Mutex::empty() },
}
}
}
impl Drop for TaskData {
fn drop(&mut self) {
unsafe { self.lock.destroy() }
}
}
// Now this is the really fun part. This is where all the M:N/1:1-agnostic
// along with recv/select-agnostic blocking information goes. A "blocking
// context" is really just a stack-allocated structure (which is probably
// fine to be a stack-trait-object).
//
// This has some particularly strange interfaces, but the reason for all
// this is to support selection/recv/1:1/M:N all in one bundle.
pub struct BlockingContext<'a> {
priv inner: BlockingRepr<'a>
}
enum BlockingRepr<'a> {
GreenBlock(rt::BlockedTask, &'a mut Scheduler),
NativeBlock(Option<UnsafeArc<Mutex>>),
}
impl<'a> BlockingContext<'a> {
// Creates one blocking context. The data provided should in theory be
// acquired from the local task, but it is instead acquired from the
// channel currently.
//
// This function will call `f` with a blocking context, plus the data
// that it is given. This function will then return whether this task
// should actually go to sleep or not. If `true` is returned, then this
// function does not return until someone calls `wake()` on the task.
// If `false` is returned, then this function immediately returns.
//
// # Safety note
//
// Note that this stack closure may not be run on the same stack as when
// this function was called. This means that the environment of this
// stack closure could be unsafely aliased. This is currently prevented
// through the guarantee that this function will never return before `f`
// finishes executing.
pub fn one(data: &mut TaskData,
f: |BlockingContext, &mut TaskData| -> bool) {
if rt::in_green_task_context() {
let sched: ~Scheduler = Local::take();
sched.deschedule_running_task_and_then(|sched, task| {
let ctx = BlockingContext { inner: GreenBlock(task, sched) };
// no need to do something on success/failure other than
// returning because the `block` function for a BlockingContext
// takes care of reawakening itself if the blocking procedure
// fails. If this function is successful, then we're already
// blocked, and if it fails, the task will already be
// rescheduled.
f(ctx, data);
});
} else {
unsafe { data.lock.lock(); }
let ctx = BlockingContext { inner: NativeBlock(None) };
if f(ctx, data) {
unsafe { data.lock.wait(); }
}
unsafe { data.lock.unlock(); }
}
}
// Creates many blocking contexts. The intended use case for this
// function is selection over a number of ports. This will create `amt`
// blocking contexts, yielding them to `f` in turn. If `f` returns
// false, then this function aborts and returns immediately. If `f`
// repeatedly returns `true` `amt` times, then this function will block.
pub fn many(amt: uint, f: |BlockingContext| -> bool) {
if rt::in_green_task_context() {
let sched: ~Scheduler = Local::take();
sched.deschedule_running_task_and_then(|sched, task| {
for handle in task.make_selectable(amt) {
let ctx = BlockingContext {
inner: GreenBlock(handle, sched)
};
// see comment above in `one` for why no further action is
// necessary here
if !f(ctx) { break }
}
});
} else {
// In the native case, our decision to block must be shared
// amongst all of the channels. It may be possible to
// stack-allocate this mutex (instead of putting it in an
// UnsafeArc box), but for now in order to prevent
// use-after-free trivially we place this into a box and then
// pass that around.
unsafe {
let mtx = UnsafeArc::new(Mutex::new());
(*mtx.get()).lock();
let success = range(0, amt).all(|_| {
f(BlockingContext {
inner: NativeBlock(Some(mtx.clone()))
})
});
if success {
(*mtx.get()).wait();
}
(*mtx.get()).unlock();
}
}
}
// This function will consume this BlockingContext, and optionally block
// if according to the atomic `decision` function. The semantics of this
// functions are:
//
// * `slot` is required to be a `None`-slot (which is owned by the
// channel)
// * The `slot` will be filled in with a blocked version of the current
// task (with `wake`-ability if this function is successful).
// * If the `decision` function returns true, then this function
// immediately returns having relinquished ownership of the task.
// * If the `decision` function returns false, then the `slot` is reset
// to `None` and the task is re-scheduled if necessary (remember that
// the task will not resume executing before the outer `one` or
// `many` function has returned. This function is expected to have a
// release memory fence in order for the modifications of `to_wake` to be
// visible to other tasks. Code which attempts to read `to_wake` should
// have an acquiring memory fence to guarantee that this write is
// visible.
//
// This function will return whether the blocking occurred or not.
pub fn block(self,
data: &mut TaskData,
slot: &mut Option<TaskHandle>,
decision: || -> bool) -> bool {
assert!(slot.is_none());
match self.inner {
GreenBlock(task, sched) => {
if data.handle.is_none() {
data.handle = Some(sched.make_handle());
}
let handle = data.handle.get_mut_ref() as *mut SchedHandle;
*slot = Some(TaskHandle { inner: Green(task, handle) });
if !decision() {
match slot.take_unwrap().inner {
Green(task, _) => sched.enqueue_blocked_task(task),
Native(..) => unreachable!()
}
false
} else {
true
}
}
NativeBlock(shared) => {
*slot = Some(TaskHandle {
inner: Native(match shared {
Some(arc) => ArcWakeup(arc),
None => LocalWakeup(&mut data.lock as *mut Mutex),
})
});
if !decision() {
*slot = None;
false
} else {
true
}
}
}
}
}
// Agnostic method of forcing a yield of the current task
pub fn yield_now() {
if rt::in_green_task_context() {
let sched: ~Scheduler = Local::take();
sched.yield_now();
} else {
Thread::yield_now();
}
}
// Agnostic method of "maybe yielding" in order to provide fairness
pub fn maybe_yield() {
if rt::in_green_task_context() {
let sched: ~Scheduler = Local::take();
sched.maybe_yield();
} else {
// the OS decides fairness, nothing for us to do.
}
}

View File

@ -233,14 +233,16 @@ use iter::Iterator;
use kinds::Send;
use ops::Drop;
use option::{Option, Some, None};
use result::{Ok, Err};
use rt::local::Local;
use rt::task::{Task, BlockedTask};
use rt::thread::Thread;
use unstable::atomics::{AtomicInt, AtomicBool, SeqCst, Relaxed};
use sync::atomics::{AtomicInt, AtomicBool, SeqCst, Relaxed};
use vec::{ImmutableVector, OwnedVector};
use spsc = rt::spsc_queue;
use mpsc = rt::mpsc_queue;
use spsc = sync::spsc_queue;
use mpsc = sync::mpsc_queue;
use self::imp::{TaskHandle, TaskData, BlockingContext};
pub use self::select::Select;
macro_rules! test (
@ -248,24 +250,26 @@ macro_rules! test (
mod $name {
#[allow(unused_imports)];
use util;
use super::super::*;
use native;
use prelude::*;
use super::*;
use super::super::*;
use task;
use util;
fn f() $b
$($a)* #[test] fn uv() { f() }
$($a)* #[test]
#[ignore(cfg(windows))] // FIXME(#11003)
fn native() {
use unstable::run_in_bare_thread;
run_in_bare_thread(f);
$($a)* #[test] fn native() {
use native;
let (p, c) = Chan::new();
do native::task::spawn { c.send(f()) }
p.recv();
}
}
)
)
mod imp;
mod select;
///////////////////////////////////////////////////////////////////////////////
@ -326,9 +330,7 @@ pub struct SharedChan<T> {
struct Packet {
cnt: AtomicInt, // How many items are on this channel
steals: int, // How many times has a port received without blocking?
to_wake: Option<TaskHandle>, // Task to wake up
data: TaskData,
to_wake: Option<BlockedTask>, // Task to wake up
// This lock is used to wake up native threads blocked in select. The
// `lock` field is not used because the thread blocking in select must
@ -343,6 +345,7 @@ struct Packet {
selection_id: uint,
select_next: *mut Packet,
select_prev: *mut Packet,
recv_cnt: int,
}
///////////////////////////////////////////////////////////////////////////////
@ -358,13 +361,13 @@ impl Packet {
cnt: AtomicInt::new(0),
steals: 0,
to_wake: None,
data: TaskData::new(),
channels: AtomicInt::new(1),
selecting: AtomicBool::new(false),
selection_id: 0,
select_next: 0 as *mut Packet,
select_prev: 0 as *mut Packet,
recv_cnt: 0,
}
}
@ -418,7 +421,10 @@ impl Packet {
// This function must have had at least an acquire fence before it to be
// properly called.
fn wakeup(&mut self, can_resched: bool) {
self.to_wake.take_unwrap().wake(can_resched);
match self.to_wake.take_unwrap().wake() {
Some(task) => task.reawaken(can_resched),
None => {}
}
self.selecting.store(false, Relaxed);
}
@ -490,7 +496,7 @@ impl Packet {
match self.channels.fetch_sub(1, SeqCst) {
1 => {
match self.cnt.swap(DISCONNECTED, SeqCst) {
-1 => { self.wakeup(false); }
-1 => { self.wakeup(true); }
DISCONNECTED => {}
n => { assert!(n >= 0); }
}
@ -531,9 +537,6 @@ impl<T: Send> Chan<T> {
/// port.
///
/// Rust channels are infinitely buffered so this method will never block.
/// This method may trigger a rescheduling, however, in order to wake up a
/// blocked receiver (if one is present). If no scheduling is desired, then
/// the `send_deferred` guarantees that there will be no reschedulings.
///
/// # Failure
///
@ -555,15 +558,6 @@ impl<T: Send> Chan<T> {
}
}
/// This function is equivalent in the semantics of `send`, but it
/// guarantees that a rescheduling will never occur when this method is
/// called.
pub fn send_deferred(&self, t: T) {
if !self.try_send_deferred(t) {
fail!("sending on a closed channel");
}
}
/// Attempts to send a value on this channel, returning whether it was
/// successfully sent.
///
@ -579,9 +573,8 @@ impl<T: Send> Chan<T> {
/// be tolerated, then this method should be used instead.
pub fn try_send(&self, t: T) -> bool { self.try(t, true) }
/// This function is equivalent in the semantics of `try_send`, but it
/// guarantees that a rescheduling will never occur when this method is
/// called.
/// This function will not stick around for very long. The purpose of this
/// function is to guarantee that no rescheduling is performed.
pub fn try_send_deferred(&self, t: T) -> bool { self.try(t, false) }
fn try(&self, t: T, can_resched: bool) -> bool {
@ -606,8 +599,9 @@ impl<T: Send> Chan<T> {
// the TLS overhead can be a bit much.
n => {
assert!(n >= 0);
if can_resched && n > 0 && n % RESCHED_FREQ == 0 {
imp::maybe_yield();
if n > 0 && n % RESCHED_FREQ == 0 {
let task: ~Task = Local::take();
task.maybe_yield();
}
true
}
@ -642,25 +636,9 @@ impl<T: Send> SharedChan<T> {
}
}
/// This function is equivalent in the semantics of `send`, but it
/// guarantees that a rescheduling will never occur when this method is
/// called.
pub fn send_deferred(&self, t: T) {
if !self.try_send_deferred(t) {
fail!("sending on a closed channel");
}
}
/// Equivalent method to `try_send` on the `Chan` type (using the same
/// semantics)
pub fn try_send(&self, t: T) -> bool { self.try(t, true) }
/// This function is equivalent in the semantics of `try_send`, but it
/// guarantees that a rescheduling will never occur when this method is
/// called.
pub fn try_send_deferred(&self, t: T) -> bool { self.try(t, false) }
fn try(&self, t: T, can_resched: bool) -> bool {
pub fn try_send(&self, t: T) -> bool {
unsafe {
// Note that the multiple sender case is a little tricker
// semantically than the single sender case. The logic for
@ -697,10 +675,11 @@ impl<T: Send> SharedChan<T> {
match (*packet).increment() {
DISCONNECTED => {} // oh well, we tried
-1 => { (*packet).wakeup(can_resched); }
-1 => { (*packet).wakeup(true); }
n => {
if can_resched && n > 0 && n % RESCHED_FREQ == 0 {
imp::maybe_yield();
if n > 0 && n % RESCHED_FREQ == 0 {
let task: ~Task = Local::take();
task.maybe_yield();
}
}
}
@ -768,6 +747,18 @@ impl<T: Send> Port<T> {
// This is a "best effort" situation, so if a queue is inconsistent just
// don't worry about it.
let this = unsafe { cast::transmute_mut(self) };
// See the comment about yielding on sends, but the same applies here.
// If a thread is spinning in try_recv we should try
unsafe {
let packet = this.queue.packet();
(*packet).recv_cnt += 1;
if (*packet).recv_cnt % RESCHED_FREQ == 0 {
let task: ~Task = Local::take();
task.maybe_yield();
}
}
let ret = match this.queue {
SPSC(ref mut queue) => queue.pop(),
MPSC(ref mut queue) => match queue.pop() {
@ -840,15 +831,22 @@ impl<T: Send> Port<T> {
unsafe {
this = cast::transmute_mut(self);
packet = this.queue.packet();
BlockingContext::one(&mut (*packet).data, |ctx, data| {
ctx.block(data, &mut (*packet).to_wake, || (*packet).decrement())
let task: ~Task = Local::take();
task.deschedule(1, |task| {
assert!((*packet).to_wake.is_none());
(*packet).to_wake = Some(task);
if (*packet).decrement() {
Ok(())
} else {
Err((*packet).to_wake.take_unwrap())
}
});
}
let data = self.try_recv_inc(false);
if data.is_none() &&
unsafe { (*packet).cnt.load(SeqCst) } != DISCONNECTED {
fail!("bug: woke up too soon");
fail!("bug: woke up too soon {}", unsafe { (*packet).cnt.load(SeqCst) });
}
return data;
}
@ -880,10 +878,16 @@ impl<T: Send> Drop for Port<T> {
mod test {
use prelude::*;
use task;
use rt::thread::Thread;
use native;
use os;
use super::*;
use rt::test::*;
pub fn stress_factor() -> uint {
match os::getenv("RUST_TEST_STRESS") {
Some(val) => from_str::<uint>(val).unwrap(),
None => 1,
}
}
test!(fn smoke() {
let (p, c) = Chan::new();
@ -910,99 +914,88 @@ mod test {
assert_eq!(p.recv(), 1);
})
#[test]
fn smoke_threads() {
test!(fn smoke_threads() {
let (p, c) = Chan::new();
do task::spawn_sched(task::SingleThreaded) {
do spawn {
c.send(1);
}
assert_eq!(p.recv(), 1);
}
})
#[test] #[should_fail]
fn smoke_port_gone() {
test!(fn smoke_port_gone() {
let (p, c) = Chan::new();
drop(p);
c.send(1);
}
} #[should_fail])
#[test] #[should_fail]
fn smoke_shared_port_gone() {
test!(fn smoke_shared_port_gone() {
let (p, c) = SharedChan::new();
drop(p);
c.send(1);
}
} #[should_fail])
#[test] #[should_fail]
fn smoke_shared_port_gone2() {
test!(fn smoke_shared_port_gone2() {
let (p, c) = SharedChan::new();
drop(p);
let c2 = c.clone();
drop(c);
c2.send(1);
}
} #[should_fail])
#[test] #[should_fail]
fn port_gone_concurrent() {
test!(fn port_gone_concurrent() {
let (p, c) = Chan::new();
do task::spawn_sched(task::SingleThreaded) {
do spawn {
p.recv();
}
loop { c.send(1) }
}
} #[should_fail])
#[test] #[should_fail]
fn port_gone_concurrent_shared() {
test!(fn port_gone_concurrent_shared() {
let (p, c) = SharedChan::new();
let c1 = c.clone();
do task::spawn_sched(task::SingleThreaded) {
do spawn {
p.recv();
}
loop {
c.send(1);
c1.send(1);
}
}
} #[should_fail])
#[test] #[should_fail]
fn smoke_chan_gone() {
test!(fn smoke_chan_gone() {
let (p, c) = Chan::<int>::new();
drop(c);
p.recv();
}
} #[should_fail])
#[test] #[should_fail]
fn smoke_chan_gone_shared() {
test!(fn smoke_chan_gone_shared() {
let (p, c) = SharedChan::<()>::new();
let c2 = c.clone();
drop(c);
drop(c2);
p.recv();
}
} #[should_fail])
#[test] #[should_fail]
fn chan_gone_concurrent() {
test!(fn chan_gone_concurrent() {
let (p, c) = Chan::new();
do task::spawn_sched(task::SingleThreaded) {
do spawn {
c.send(1);
c.send(1);
}
loop { p.recv(); }
}
} #[should_fail])
#[test]
fn stress() {
test!(fn stress() {
let (p, c) = Chan::new();
do task::spawn_sched(task::SingleThreaded) {
do spawn {
for _ in range(0, 10000) { c.send(1); }
}
for _ in range(0, 10000) {
assert_eq!(p.recv(), 1);
}
}
})
#[test]
fn stress_shared() {
test!(fn stress_shared() {
static AMT: uint = 10000;
static NTHREADS: uint = 8;
let (p, c) = SharedChan::<int>::new();
@ -1018,47 +1011,53 @@ mod test {
for _ in range(0, NTHREADS) {
let c = c.clone();
do task::spawn_sched(task::SingleThreaded) {
do spawn {
for _ in range(0, AMT) { c.send(1); }
}
}
p1.recv();
}
})
#[test]
#[ignore(cfg(windows))] // FIXME(#11003)
fn send_from_outside_runtime() {
let (p, c) = Chan::<int>::new();
let (p1, c1) = Chan::new();
let (port, chan) = SharedChan::new();
let chan2 = chan.clone();
do spawn {
c1.send(());
for _ in range(0, 40) {
assert_eq!(p.recv(), 1);
}
chan2.send(());
}
p1.recv();
let t = do Thread::start {
do native::task::spawn {
for _ in range(0, 40) {
c.send(1);
}
};
t.join();
chan.send(());
}
port.recv();
port.recv();
}
#[test]
#[ignore(cfg(windows))] // FIXME(#11003)
fn recv_from_outside_runtime() {
let (p, c) = Chan::<int>::new();
let t = do Thread::start {
let (dp, dc) = Chan::new();
do native::task::spawn {
for _ in range(0, 40) {
assert_eq!(p.recv(), 1);
}
dc.send(());
};
for _ in range(0, 40) {
c.send(1);
}
t.join();
dp.recv();
}
#[test]
@ -1066,173 +1065,132 @@ mod test {
fn no_runtime() {
let (p1, c1) = Chan::<int>::new();
let (p2, c2) = Chan::<int>::new();
let t1 = do Thread::start {
let (port, chan) = SharedChan::new();
let chan2 = chan.clone();
do native::task::spawn {
assert_eq!(p1.recv(), 1);
c2.send(2);
};
let t2 = do Thread::start {
chan2.send(());
}
do native::task::spawn {
c1.send(1);
assert_eq!(p2.recv(), 2);
};
t1.join();
t2.join();
}
#[test]
fn oneshot_single_thread_close_port_first() {
// Simple test of closing without sending
do run_in_newsched_task {
let (port, _chan) = Chan::<int>::new();
{ let _p = port; }
chan.send(());
}
port.recv();
port.recv();
}
#[test]
fn oneshot_single_thread_close_chan_first() {
test!(fn oneshot_single_thread_close_port_first() {
// Simple test of closing without sending
do run_in_newsched_task {
let (_port, chan) = Chan::<int>::new();
{ let _c = chan; }
}
}
let (port, _chan) = Chan::<int>::new();
{ let _p = port; }
})
#[test] #[should_fail]
fn oneshot_single_thread_send_port_close() {
test!(fn oneshot_single_thread_close_chan_first() {
// Simple test of closing without sending
let (_port, chan) = Chan::<int>::new();
{ let _c = chan; }
})
test!(fn oneshot_single_thread_send_port_close() {
// Testing that the sender cleans up the payload if receiver is closed
let (port, chan) = Chan::<~int>::new();
{ let _p = port; }
chan.send(~0);
}
} #[should_fail])
#[test]
fn oneshot_single_thread_recv_chan_close() {
test!(fn oneshot_single_thread_recv_chan_close() {
// Receiving on a closed chan will fail
do run_in_newsched_task {
let res = do spawntask_try {
let (port, chan) = Chan::<~int>::new();
{ let _c = chan; }
port.recv();
};
// What is our res?
assert!(res.is_err());
}
}
#[test]
fn oneshot_single_thread_send_then_recv() {
do run_in_newsched_task {
let res = do task::try {
let (port, chan) = Chan::<~int>::new();
chan.send(~10);
{ let _c = chan; }
port.recv();
};
// What is our res?
assert!(res.is_err());
})
test!(fn oneshot_single_thread_send_then_recv() {
let (port, chan) = Chan::<~int>::new();
chan.send(~10);
assert!(port.recv() == ~10);
})
test!(fn oneshot_single_thread_try_send_open() {
let (port, chan) = Chan::<int>::new();
assert!(chan.try_send(10));
assert!(port.recv() == 10);
})
test!(fn oneshot_single_thread_try_send_closed() {
let (port, chan) = Chan::<int>::new();
{ let _p = port; }
assert!(!chan.try_send(10));
})
test!(fn oneshot_single_thread_try_recv_open() {
let (port, chan) = Chan::<int>::new();
chan.send(10);
assert!(port.try_recv() == Some(10));
})
test!(fn oneshot_single_thread_try_recv_closed() {
let (port, chan) = Chan::<int>::new();
{ let _c = chan; }
assert!(port.recv_opt() == None);
})
test!(fn oneshot_single_thread_peek_data() {
let (port, chan) = Chan::<int>::new();
assert!(port.try_recv().is_none());
chan.send(10);
assert!(port.try_recv().is_some());
})
test!(fn oneshot_single_thread_peek_close() {
let (port, chan) = Chan::<int>::new();
{ let _c = chan; }
assert!(port.try_recv().is_none());
assert!(port.try_recv().is_none());
})
test!(fn oneshot_single_thread_peek_open() {
let (port, _) = Chan::<int>::new();
assert!(port.try_recv().is_none());
})
test!(fn oneshot_multi_task_recv_then_send() {
let (port, chan) = Chan::<~int>::new();
do spawn {
assert!(port.recv() == ~10);
}
}
#[test]
fn oneshot_single_thread_try_send_open() {
do run_in_newsched_task {
let (port, chan) = Chan::<int>::new();
assert!(chan.try_send(10));
assert!(port.recv() == 10);
chan.send(~10);
})
test!(fn oneshot_multi_task_recv_then_close() {
let (port, chan) = Chan::<~int>::new();
do spawn {
let _chan = chan;
}
}
let res = do task::try {
assert!(port.recv() == ~10);
};
assert!(res.is_err());
})
#[test]
fn oneshot_single_thread_try_send_closed() {
do run_in_newsched_task {
let (port, chan) = Chan::<int>::new();
{ let _p = port; }
assert!(!chan.try_send(10));
}
}
#[test]
fn oneshot_single_thread_try_recv_open() {
do run_in_newsched_task {
let (port, chan) = Chan::<int>::new();
chan.send(10);
assert!(port.try_recv() == Some(10));
}
}
#[test]
fn oneshot_single_thread_try_recv_closed() {
do run_in_newsched_task {
let (port, chan) = Chan::<int>::new();
{ let _c = chan; }
assert!(port.recv_opt() == None);
}
}
#[test]
fn oneshot_single_thread_peek_data() {
do run_in_newsched_task {
let (port, chan) = Chan::<int>::new();
assert!(port.try_recv().is_none());
chan.send(10);
assert!(port.try_recv().is_some());
}
}
#[test]
fn oneshot_single_thread_peek_close() {
do run_in_newsched_task {
let (port, chan) = Chan::<int>::new();
{ let _c = chan; }
assert!(port.try_recv().is_none());
assert!(port.try_recv().is_none());
}
}
#[test]
fn oneshot_single_thread_peek_open() {
do run_in_newsched_task {
let (port, _) = Chan::<int>::new();
assert!(port.try_recv().is_none());
}
}
#[test]
fn oneshot_multi_task_recv_then_send() {
do run_in_newsched_task {
let (port, chan) = Chan::<~int>::new();
do spawntask {
assert!(port.recv() == ~10);
}
chan.send(~10);
}
}
#[test]
fn oneshot_multi_task_recv_then_close() {
do run_in_newsched_task {
let (port, chan) = Chan::<~int>::new();
do spawntask_later {
let _chan = chan;
}
let res = do spawntask_try {
assert!(port.recv() == ~10);
};
assert!(res.is_err());
}
}
#[test]
fn oneshot_multi_thread_close_stress() {
test!(fn oneshot_multi_thread_close_stress() {
stress_factor().times(|| {
do run_in_newsched_task {
let (port, chan) = Chan::<int>::new();
let thread = do spawntask_thread {
let _p = port;
};
let _chan = chan;
thread.join();
let (port, chan) = Chan::<int>::new();
do spawn {
let _p = port;
}
let _chan = chan;
})
}
})
#[test]
fn oneshot_multi_thread_send_close_stress() {
test!(fn oneshot_multi_thread_send_close_stress() {
stress_factor().times(|| {
let (port, chan) = Chan::<int>::new();
do spawn {
@ -1242,10 +1200,9 @@ mod test {
chan.send(1);
};
})
}
})
#[test]
fn oneshot_multi_thread_recv_close_stress() {
test!(fn oneshot_multi_thread_recv_close_stress() {
stress_factor().times(|| {
let (port, chan) = Chan::<int>::new();
do spawn {
@ -1262,10 +1219,9 @@ mod test {
}
};
})
}
})
#[test]
fn oneshot_multi_thread_send_recv_stress() {
test!(fn oneshot_multi_thread_send_recv_stress() {
stress_factor().times(|| {
let (port, chan) = Chan::<~int>::new();
do spawn {
@ -1275,10 +1231,9 @@ mod test {
assert!(port.recv() == ~10);
}
})
}
})
#[test]
fn stream_send_recv_stress() {
test!(fn stream_send_recv_stress() {
stress_factor().times(|| {
let (port, chan) = Chan::<~int>::new();
@ -1288,7 +1243,7 @@ mod test {
fn send(chan: Chan<~int>, i: int) {
if i == 10 { return }
do spawntask_random {
do spawn {
chan.send(~i);
send(chan, i + 1);
}
@ -1297,44 +1252,37 @@ mod test {
fn recv(port: Port<~int>, i: int) {
if i == 10 { return }
do spawntask_random {
do spawn {
assert!(port.recv() == ~i);
recv(port, i + 1);
};
}
})
}
})
#[test]
fn recv_a_lot() {
test!(fn recv_a_lot() {
// Regression test that we don't run out of stack in scheduler context
do run_in_newsched_task {
let (port, chan) = Chan::new();
10000.times(|| { chan.send(()) });
10000.times(|| { port.recv() });
}
}
let (port, chan) = Chan::new();
10000.times(|| { chan.send(()) });
10000.times(|| { port.recv() });
})
#[test]
fn shared_chan_stress() {
do run_in_mt_newsched_task {
let (port, chan) = SharedChan::new();
let total = stress_factor() + 100;
total.times(|| {
let chan_clone = chan.clone();
do spawntask_random {
chan_clone.send(());
}
});
test!(fn shared_chan_stress() {
let (port, chan) = SharedChan::new();
let total = stress_factor() + 100;
total.times(|| {
let chan_clone = chan.clone();
do spawn {
chan_clone.send(());
}
});
total.times(|| {
port.recv();
});
}
}
total.times(|| {
port.recv();
});
})
#[test]
fn test_nested_recv_iter() {
test!(fn test_nested_recv_iter() {
let (port, chan) = Chan::<int>::new();
let (total_port, total_chan) = Chan::<int>::new();
@ -1351,10 +1299,9 @@ mod test {
chan.send(2);
drop(chan);
assert_eq!(total_port.recv(), 6);
}
})
#[test]
fn test_recv_iter_break() {
test!(fn test_recv_iter_break() {
let (port, chan) = Chan::<int>::new();
let (count_port, count_chan) = Chan::<int>::new();
@ -1376,5 +1323,5 @@ mod test {
chan.try_send(2);
drop(chan);
assert_eq!(count_port.recv(), 4);
}
})
}

View File

@ -50,10 +50,13 @@ use kinds::Send;
use ops::Drop;
use option::{Some, None, Option};
use ptr::RawPtr;
use super::imp::BlockingContext;
use super::{Packet, Port, imp};
use result::{Ok, Err};
use rt::local::Local;
use rt::task::Task;
use super::{Packet, Port};
use sync::atomics::{Relaxed, SeqCst};
use task;
use uint;
use unstable::atomics::{Relaxed, SeqCst};
macro_rules! select {
(
@ -184,19 +187,22 @@ impl Select {
// Acquire a number of blocking contexts, and block on each one
// sequentially until one fails. If one fails, then abort
// immediately so we can go unblock on all the other ports.
BlockingContext::many(amt, |ctx| {
let task: ~Task = Local::take();
task.deschedule(amt, |task| {
// Prepare for the block
let (i, packet) = iter.next().unwrap();
assert!((*packet).to_wake.is_none());
(*packet).to_wake = Some(task);
(*packet).selecting.store(true, SeqCst);
if !ctx.block(&mut (*packet).data,
&mut (*packet).to_wake,
|| (*packet).decrement()) {
if (*packet).decrement() {
Ok(())
} else {
(*packet).abort_selection(false);
(*packet).selecting.store(false, SeqCst);
ready_index = i;
ready_id = (*packet).selection_id;
false
} else {
true
Err((*packet).to_wake.take_unwrap())
}
});
@ -225,7 +231,7 @@ impl Select {
if (*packet).abort_selection(true) {
ready_id = (*packet).selection_id;
while (*packet).selecting.load(Relaxed) {
imp::yield_now();
task::deschedule();
}
}
}
@ -304,6 +310,7 @@ impl Iterator<*mut Packet> for PacketIterator {
}
#[cfg(test)]
#[allow(unused_imports)]
mod test {
use super::super::*;
use prelude::*;
@ -359,19 +366,16 @@ mod test {
)
})
#[test]
fn unblocks() {
use std::io::timer;
test!(fn unblocks() {
let (mut p1, c1) = Chan::<int>::new();
let (mut p2, _c2) = Chan::<int>::new();
let (p3, c3) = Chan::<int>::new();
do spawn {
timer::sleep(3);
20.times(task::deschedule);
c1.send(1);
p3.recv();
timer::sleep(3);
20.times(task::deschedule);
}
select! (
@ -383,18 +387,15 @@ mod test {
a = p1.recv_opt() => { assert_eq!(a, None); },
_b = p2.recv() => { fail!() }
)
}
#[test]
fn both_ready() {
use std::io::timer;
})
test!(fn both_ready() {
let (mut p1, c1) = Chan::<int>::new();
let (mut p2, c2) = Chan::<int>::new();
let (p3, c3) = Chan::<()>::new();
do spawn {
timer::sleep(3);
20.times(task::deschedule);
c1.send(1);
c2.send(2);
p3.recv();
@ -408,11 +409,12 @@ mod test {
a = p1.recv() => { assert_eq!(a, 1); },
a = p2.recv() => { assert_eq!(a, 2); }
)
assert_eq!(p1.try_recv(), None);
assert_eq!(p2.try_recv(), None);
c3.send(());
}
})
#[test]
fn stress() {
test!(fn stress() {
static AMT: int = 10000;
let (mut p1, c1) = Chan::<int>::new();
let (mut p2, c2) = Chan::<int>::new();
@ -436,69 +438,5 @@ mod test {
)
c3.send(());
}
}
#[test]
#[ignore(cfg(windows))] // FIXME(#11003)
fn stress_native() {
use std::rt::thread::Thread;
use std::unstable::run_in_bare_thread;
static AMT: int = 10000;
do run_in_bare_thread {
let (mut p1, c1) = Chan::<int>::new();
let (mut p2, c2) = Chan::<int>::new();
let (p3, c3) = Chan::<()>::new();
let t = do Thread::start {
for i in range(0, AMT) {
if i % 2 == 0 {
c1.send(i);
} else {
c2.send(i);
}
p3.recv();
}
};
for i in range(0, AMT) {
select! (
i1 = p1.recv() => { assert!(i % 2 == 0 && i == i1); },
i2 = p2.recv() => { assert!(i % 2 == 1 && i == i2); }
)
c3.send(());
}
t.join();
}
}
#[test]
#[ignore(cfg(windows))] // FIXME(#11003)
fn native_both_ready() {
use std::rt::thread::Thread;
use std::unstable::run_in_bare_thread;
do run_in_bare_thread {
let (mut p1, c1) = Chan::<int>::new();
let (mut p2, c2) = Chan::<int>::new();
let (p3, c3) = Chan::<()>::new();
let t = do Thread::start {
c1.send(1);
c2.send(2);
p3.recv();
};
select! (
a = p1.recv() => { assert_eq!(a, 1); },
b = p2.recv() => { assert_eq!(b, 2); }
)
select! (
a = p1.recv() => { assert_eq!(a, 1); },
b = p2.recv() => { assert_eq!(b, 2); }
)
c3.send(());
t.join();
}
}
})
}

View File

@ -54,7 +54,7 @@ use super::{SeekStyle, Read, Write, Open, IoError, Truncate,
use rt::rtio::{RtioFileStream, IoFactory, LocalIo};
use io;
use option::{Some, None, Option};
use result::{Ok, Err, Result};
use result::{Ok, Err};
use path;
use path::{Path, GenericPath};
use vec::{OwnedVector, ImmutableVector};
@ -75,17 +75,6 @@ pub struct File {
priv last_nread: int,
}
fn io_raise<T>(f: |io: &mut IoFactory| -> Result<T, IoError>) -> Option<T> {
let mut io = LocalIo::borrow();
match f(io.get()) {
Ok(t) => Some(t),
Err(ioerr) => {
io_error::cond.raise(ioerr);
None
}
}
}
impl File {
/// Open a file at `path` in the mode specified by the `mode` and `access`
/// arguments
@ -131,18 +120,15 @@ impl File {
pub fn open_mode(path: &Path,
mode: FileMode,
access: FileAccess) -> Option<File> {
let mut io = LocalIo::borrow();
match io.get().fs_open(&path.to_c_str(), mode, access) {
Ok(fd) => Some(File {
path: path.clone(),
fd: fd,
last_nread: -1
}),
Err(ioerr) => {
io_error::cond.raise(ioerr);
None
}
}
LocalIo::maybe_raise(|io| {
io.fs_open(&path.to_c_str(), mode, access).map(|fd| {
File {
path: path.clone(),
fd: fd,
last_nread: -1
}
})
})
}
/// Attempts to open a file in read-only mode. This function is equivalent to
@ -242,7 +228,7 @@ impl File {
/// directory, the user lacks permissions to remove the file, or if some
/// other filesystem-level error occurs.
pub fn unlink(path: &Path) {
io_raise(|io| io.fs_unlink(&path.to_c_str()));
LocalIo::maybe_raise(|io| io.fs_unlink(&path.to_c_str()));
}
/// Given a path, query the file system to get information about a file,
@ -270,7 +256,9 @@ pub fn unlink(path: &Path) {
/// requisite permissions to perform a `stat` call on the given path or if
/// there is no entry in the filesystem at the provided path.
pub fn stat(path: &Path) -> FileStat {
io_raise(|io| io.fs_stat(&path.to_c_str())).unwrap_or_else(dummystat)
LocalIo::maybe_raise(|io| {
io.fs_stat(&path.to_c_str())
}).unwrap_or_else(dummystat)
}
fn dummystat() -> FileStat {
@ -306,7 +294,9 @@ fn dummystat() -> FileStat {
///
/// See `stat`
pub fn lstat(path: &Path) -> FileStat {
io_raise(|io| io.fs_lstat(&path.to_c_str())).unwrap_or_else(dummystat)
LocalIo::maybe_raise(|io| {
io.fs_lstat(&path.to_c_str())
}).unwrap_or_else(dummystat)
}
/// Rename a file or directory to a new name.
@ -324,7 +314,7 @@ pub fn lstat(path: &Path) -> FileStat {
/// the process lacks permissions to view the contents, or if some other
/// intermittent I/O error occurs.
pub fn rename(from: &Path, to: &Path) {
io_raise(|io| io.fs_rename(&from.to_c_str(), &to.to_c_str()));
LocalIo::maybe_raise(|io| io.fs_rename(&from.to_c_str(), &to.to_c_str()));
}
/// Copies the contents of one file to another. This function will also
@ -395,7 +385,7 @@ pub fn copy(from: &Path, to: &Path) {
/// condition. Some possible error situations are not having the permission to
/// change the attributes of a file or the file not existing.
pub fn chmod(path: &Path, mode: io::FilePermission) {
io_raise(|io| io.fs_chmod(&path.to_c_str(), mode));
LocalIo::maybe_raise(|io| io.fs_chmod(&path.to_c_str(), mode));
}
/// Change the user and group owners of a file at the specified path.
@ -404,7 +394,7 @@ pub fn chmod(path: &Path, mode: io::FilePermission) {
///
/// This function will raise on the `io_error` condition on failure.
pub fn chown(path: &Path, uid: int, gid: int) {
io_raise(|io| io.fs_chown(&path.to_c_str(), uid, gid));
LocalIo::maybe_raise(|io| io.fs_chown(&path.to_c_str(), uid, gid));
}
/// Creates a new hard link on the filesystem. The `dst` path will be a
@ -415,7 +405,7 @@ pub fn chown(path: &Path, uid: int, gid: int) {
///
/// This function will raise on the `io_error` condition on failure.
pub fn link(src: &Path, dst: &Path) {
io_raise(|io| io.fs_link(&src.to_c_str(), &dst.to_c_str()));
LocalIo::maybe_raise(|io| io.fs_link(&src.to_c_str(), &dst.to_c_str()));
}
/// Creates a new symbolic link on the filesystem. The `dst` path will be a
@ -425,7 +415,7 @@ pub fn link(src: &Path, dst: &Path) {
///
/// This function will raise on the `io_error` condition on failure.
pub fn symlink(src: &Path, dst: &Path) {
io_raise(|io| io.fs_symlink(&src.to_c_str(), &dst.to_c_str()));
LocalIo::maybe_raise(|io| io.fs_symlink(&src.to_c_str(), &dst.to_c_str()));
}
/// Reads a symlink, returning the file that the symlink points to.
@ -436,7 +426,7 @@ pub fn symlink(src: &Path, dst: &Path) {
/// conditions include reading a file that does not exist or reading a file
/// which is not a symlink.
pub fn readlink(path: &Path) -> Option<Path> {
io_raise(|io| io.fs_readlink(&path.to_c_str()))
LocalIo::maybe_raise(|io| io.fs_readlink(&path.to_c_str()))
}
/// Create a new, empty directory at the provided path
@ -456,7 +446,7 @@ pub fn readlink(path: &Path) -> Option<Path> {
/// to make a new directory at the provided path, or if the directory already
/// exists.
pub fn mkdir(path: &Path, mode: FilePermission) {
io_raise(|io| io.fs_mkdir(&path.to_c_str(), mode));
LocalIo::maybe_raise(|io| io.fs_mkdir(&path.to_c_str(), mode));
}
/// Remove an existing, empty directory
@ -475,7 +465,7 @@ pub fn mkdir(path: &Path, mode: FilePermission) {
/// to remove the directory at the provided path, or if the directory isn't
/// empty.
pub fn rmdir(path: &Path) {
io_raise(|io| io.fs_rmdir(&path.to_c_str()));
LocalIo::maybe_raise(|io| io.fs_rmdir(&path.to_c_str()));
}
/// Retrieve a vector containing all entries within a provided directory
@ -502,7 +492,9 @@ pub fn rmdir(path: &Path) {
/// the process lacks permissions to view the contents or if the `path` points
/// at a non-directory file
pub fn readdir(path: &Path) -> ~[Path] {
io_raise(|io| io.fs_readdir(&path.to_c_str(), 0)).unwrap_or_else(|| ~[])
LocalIo::maybe_raise(|io| {
io.fs_readdir(&path.to_c_str(), 0)
}).unwrap_or_else(|| ~[])
}
/// Returns an iterator which will recursively walk the directory structure
@ -583,7 +575,7 @@ pub fn rmdir_recursive(path: &Path) {
/// happens.
// FIXME(#10301) these arguments should not be u64
pub fn change_file_times(path: &Path, atime: u64, mtime: u64) {
io_raise(|io| io.fs_utime(&path.to_c_str(), atime, mtime));
LocalIo::maybe_raise(|io| io.fs_utime(&path.to_c_str(), atime, mtime));
}
impl Reader for File {
@ -722,7 +714,7 @@ mod test {
}
}
fn tmpdir() -> TempDir {
pub fn tmpdir() -> TempDir {
use os;
use rand;
let ret = os::tmpdir().join(format!("rust-{}", rand::random::<u32>()));
@ -730,32 +722,7 @@ mod test {
TempDir(ret)
}
macro_rules! test (
{ fn $name:ident() $b:block } => (
mod $name {
use prelude::*;
use io::{SeekSet, SeekCur, SeekEnd, io_error, Read, Open,
ReadWrite};
use io;
use str;
use io::fs::{File, rmdir, mkdir, readdir, rmdir_recursive,
mkdir_recursive, copy, unlink, stat, symlink, link,
readlink, chmod, lstat, change_file_times};
use io::fs::test::tmpdir;
use util;
fn f() $b
#[test] fn uv() { f() }
#[test] fn native() {
use rt::test::run_in_newsched_task;
run_in_newsched_task(f);
}
}
)
)
test!(fn file_test_io_smoke_test() {
iotest!(fn file_test_io_smoke_test() {
let message = "it's alright. have a good time";
let tmpdir = tmpdir();
let filename = &tmpdir.join("file_rt_io_file_test.txt");
@ -775,7 +742,7 @@ mod test {
unlink(filename);
})
test!(fn invalid_path_raises() {
iotest!(fn invalid_path_raises() {
let tmpdir = tmpdir();
let filename = &tmpdir.join("file_that_does_not_exist.txt");
let mut called = false;
@ -788,7 +755,7 @@ mod test {
assert!(called);
})
test!(fn file_test_iounlinking_invalid_path_should_raise_condition() {
iotest!(fn file_test_iounlinking_invalid_path_should_raise_condition() {
let tmpdir = tmpdir();
let filename = &tmpdir.join("file_another_file_that_does_not_exist.txt");
let mut called = false;
@ -798,7 +765,7 @@ mod test {
assert!(called);
})
test!(fn file_test_io_non_positional_read() {
iotest!(fn file_test_io_non_positional_read() {
let message: &str = "ten-four";
let mut read_mem = [0, .. 8];
let tmpdir = tmpdir();
@ -823,7 +790,7 @@ mod test {
assert_eq!(read_str, message);
})
test!(fn file_test_io_seek_and_tell_smoke_test() {
iotest!(fn file_test_io_seek_and_tell_smoke_test() {
let message = "ten-four";
let mut read_mem = [0, .. 4];
let set_cursor = 4 as u64;
@ -849,7 +816,7 @@ mod test {
assert_eq!(tell_pos_post_read, message.len() as u64);
})
test!(fn file_test_io_seek_and_write() {
iotest!(fn file_test_io_seek_and_write() {
let initial_msg = "food-is-yummy";
let overwrite_msg = "-the-bar!!";
let final_msg = "foo-the-bar!!";
@ -872,7 +839,7 @@ mod test {
assert!(read_str == final_msg.to_owned());
})
test!(fn file_test_io_seek_shakedown() {
iotest!(fn file_test_io_seek_shakedown() {
use std::str; // 01234567890123
let initial_msg = "qwer-asdf-zxcv";
let chunk_one: &str = "qwer";
@ -903,7 +870,7 @@ mod test {
unlink(filename);
})
test!(fn file_test_stat_is_correct_on_is_file() {
iotest!(fn file_test_stat_is_correct_on_is_file() {
let tmpdir = tmpdir();
let filename = &tmpdir.join("file_stat_correct_on_is_file.txt");
{
@ -916,7 +883,7 @@ mod test {
unlink(filename);
})
test!(fn file_test_stat_is_correct_on_is_dir() {
iotest!(fn file_test_stat_is_correct_on_is_dir() {
let tmpdir = tmpdir();
let filename = &tmpdir.join("file_stat_correct_on_is_dir");
mkdir(filename, io::UserRWX);
@ -925,7 +892,7 @@ mod test {
rmdir(filename);
})
test!(fn file_test_fileinfo_false_when_checking_is_file_on_a_directory() {
iotest!(fn file_test_fileinfo_false_when_checking_is_file_on_a_directory() {
let tmpdir = tmpdir();
let dir = &tmpdir.join("fileinfo_false_on_dir");
mkdir(dir, io::UserRWX);
@ -933,7 +900,7 @@ mod test {
rmdir(dir);
})
test!(fn file_test_fileinfo_check_exists_before_and_after_file_creation() {
iotest!(fn file_test_fileinfo_check_exists_before_and_after_file_creation() {
let tmpdir = tmpdir();
let file = &tmpdir.join("fileinfo_check_exists_b_and_a.txt");
File::create(file).write(bytes!("foo"));
@ -942,7 +909,7 @@ mod test {
assert!(!file.exists());
})
test!(fn file_test_directoryinfo_check_exists_before_and_after_mkdir() {
iotest!(fn file_test_directoryinfo_check_exists_before_and_after_mkdir() {
let tmpdir = tmpdir();
let dir = &tmpdir.join("before_and_after_dir");
assert!(!dir.exists());
@ -953,7 +920,7 @@ mod test {
assert!(!dir.exists());
})
test!(fn file_test_directoryinfo_readdir() {
iotest!(fn file_test_directoryinfo_readdir() {
use std::str;
let tmpdir = tmpdir();
let dir = &tmpdir.join("di_readdir");
@ -984,11 +951,11 @@ mod test {
rmdir(dir);
})
test!(fn recursive_mkdir_slash() {
iotest!(fn recursive_mkdir_slash() {
mkdir_recursive(&Path::new("/"), io::UserRWX);
})
test!(fn unicode_path_is_dir() {
iotest!(fn unicode_path_is_dir() {
assert!(Path::new(".").is_dir());
assert!(!Path::new("test/stdtest/fs.rs").is_dir());
@ -1006,7 +973,7 @@ mod test {
assert!(filepath.exists());
})
test!(fn unicode_path_exists() {
iotest!(fn unicode_path_exists() {
assert!(Path::new(".").exists());
assert!(!Path::new("test/nonexistent-bogus-path").exists());
@ -1018,7 +985,7 @@ mod test {
assert!(!Path::new("test/unicode-bogus-path-각丁ー再见").exists());
})
test!(fn copy_file_does_not_exist() {
iotest!(fn copy_file_does_not_exist() {
let from = Path::new("test/nonexistent-bogus-path");
let to = Path::new("test/other-bogus-path");
match io::result(|| copy(&from, &to)) {
@ -1030,7 +997,7 @@ mod test {
}
})
test!(fn copy_file_ok() {
iotest!(fn copy_file_ok() {
let tmpdir = tmpdir();
let input = tmpdir.join("in.txt");
let out = tmpdir.join("out.txt");
@ -1043,7 +1010,7 @@ mod test {
assert_eq!(input.stat().perm, out.stat().perm);
})
test!(fn copy_file_dst_dir() {
iotest!(fn copy_file_dst_dir() {
let tmpdir = tmpdir();
let out = tmpdir.join("out");
@ -1053,7 +1020,7 @@ mod test {
}
})
test!(fn copy_file_dst_exists() {
iotest!(fn copy_file_dst_exists() {
let tmpdir = tmpdir();
let input = tmpdir.join("in");
let output = tmpdir.join("out");
@ -1066,7 +1033,7 @@ mod test {
(bytes!("foo")).to_owned());
})
test!(fn copy_file_src_dir() {
iotest!(fn copy_file_src_dir() {
let tmpdir = tmpdir();
let out = tmpdir.join("out");
@ -1076,7 +1043,7 @@ mod test {
assert!(!out.exists());
})
test!(fn copy_file_preserves_perm_bits() {
iotest!(fn copy_file_preserves_perm_bits() {
let tmpdir = tmpdir();
let input = tmpdir.join("in.txt");
let out = tmpdir.join("out.txt");
@ -1091,7 +1058,7 @@ mod test {
})
#[cfg(not(windows))] // FIXME(#10264) operation not permitted?
test!(fn symlinks_work() {
iotest!(fn symlinks_work() {
let tmpdir = tmpdir();
let input = tmpdir.join("in.txt");
let out = tmpdir.join("out.txt");
@ -1106,14 +1073,14 @@ mod test {
})
#[cfg(not(windows))] // apparently windows doesn't like symlinks
test!(fn symlink_noexist() {
iotest!(fn symlink_noexist() {
let tmpdir = tmpdir();
// symlinks can point to things that don't exist
symlink(&tmpdir.join("foo"), &tmpdir.join("bar"));
assert!(readlink(&tmpdir.join("bar")).unwrap() == tmpdir.join("foo"));
})
test!(fn readlink_not_symlink() {
iotest!(fn readlink_not_symlink() {
let tmpdir = tmpdir();
match io::result(|| readlink(&*tmpdir)) {
Ok(..) => fail!("wanted a failure"),
@ -1121,7 +1088,7 @@ mod test {
}
})
test!(fn links_work() {
iotest!(fn links_work() {
let tmpdir = tmpdir();
let input = tmpdir.join("in.txt");
let out = tmpdir.join("out.txt");
@ -1147,7 +1114,7 @@ mod test {
}
})
test!(fn chmod_works() {
iotest!(fn chmod_works() {
let tmpdir = tmpdir();
let file = tmpdir.join("in.txt");
@ -1164,7 +1131,7 @@ mod test {
chmod(&file, io::UserFile);
})
test!(fn sync_doesnt_kill_anything() {
iotest!(fn sync_doesnt_kill_anything() {
let tmpdir = tmpdir();
let path = tmpdir.join("in.txt");
@ -1177,7 +1144,7 @@ mod test {
drop(file);
})
test!(fn truncate_works() {
iotest!(fn truncate_works() {
let tmpdir = tmpdir();
let path = tmpdir.join("in.txt");
@ -1208,7 +1175,7 @@ mod test {
drop(file);
})
test!(fn open_flavors() {
iotest!(fn open_flavors() {
let tmpdir = tmpdir();
match io::result(|| File::open_mode(&tmpdir.join("a"), io::Open,

View File

@ -164,9 +164,6 @@ requests are implemented by descheduling the running task and
performing an asynchronous request; the task is only resumed once the
asynchronous request completes.
For blocking (but possibly more efficient) implementations, look
in the `io::native` module.
# Error Handling
I/O is an area where nearly every operation can result in unexpected
@ -316,6 +313,9 @@ pub use self::net::udp::UdpStream;
pub use self::pipe::PipeStream;
pub use self::process::Process;
/// Various utility functions useful for writing I/O tests
pub mod test;
/// Synchronous, non-blocking filesystem operations.
pub mod fs;
@ -349,8 +349,6 @@ pub mod timer;
/// Buffered I/O wrappers
pub mod buffered;
pub mod native;
/// Signal handling
pub mod signal;

View File

@ -18,8 +18,6 @@ getaddrinfo()
*/
use option::{Option, Some, None};
use result::{Ok, Err};
use io::{io_error};
use io::net::ip::{SocketAddr, IpAddr};
use rt::rtio::{IoFactory, LocalIo};
use vec::ImmutableVector;
@ -97,14 +95,7 @@ pub fn get_host_addresses(host: &str) -> Option<~[IpAddr]> {
/// consumption just yet.
fn lookup(hostname: Option<&str>, servname: Option<&str>, hint: Option<Hint>)
-> Option<~[Info]> {
let mut io = LocalIo::borrow();
match io.get().get_host_addresses(hostname, servname, hint) {
Ok(i) => Some(i),
Err(ioerr) => {
io_error::cond.raise(ioerr);
None
}
}
LocalIo::maybe_raise(|io| io.get_host_addresses(hostname, servname, hint))
}
#[cfg(test)]

View File

@ -26,17 +26,9 @@ impl TcpStream {
}
pub fn connect(addr: SocketAddr) -> Option<TcpStream> {
let result = {
let mut io = LocalIo::borrow();
io.get().tcp_connect(addr)
};
match result {
Ok(s) => Some(TcpStream::new(s)),
Err(ioerr) => {
io_error::cond.raise(ioerr);
None
}
}
LocalIo::maybe_raise(|io| {
io.tcp_connect(addr).map(TcpStream::new)
})
}
pub fn peer_name(&mut self) -> Option<SocketAddr> {
@ -94,14 +86,9 @@ pub struct TcpListener {
impl TcpListener {
pub fn bind(addr: SocketAddr) -> Option<TcpListener> {
let mut io = LocalIo::borrow();
match io.get().tcp_bind(addr) {
Ok(l) => Some(TcpListener { obj: l }),
Err(ioerr) => {
io_error::cond.raise(ioerr);
None
}
}
LocalIo::maybe_raise(|io| {
io.tcp_bind(addr).map(|l| TcpListener { obj: l })
})
}
pub fn socket_name(&mut self) -> Option<SocketAddr> {
@ -147,513 +134,473 @@ impl Acceptor<TcpStream> for TcpAcceptor {
#[cfg(test)]
mod test {
use super::*;
use rt::test::*;
use io::net::ip::{Ipv4Addr, SocketAddr};
use io::*;
use io::test::{next_test_ip4, next_test_ip6};
use prelude::*;
#[test] #[ignore]
fn bind_error() {
do run_in_mt_newsched_task {
let mut called = false;
io_error::cond.trap(|e| {
assert!(e.kind == PermissionDenied);
called = true;
}).inside(|| {
let addr = SocketAddr { ip: Ipv4Addr(0, 0, 0, 0), port: 1 };
let listener = TcpListener::bind(addr);
assert!(listener.is_none());
});
assert!(called);
}
let mut called = false;
io_error::cond.trap(|e| {
assert!(e.kind == PermissionDenied);
called = true;
}).inside(|| {
let addr = SocketAddr { ip: Ipv4Addr(0, 0, 0, 0), port: 1 };
let listener = TcpListener::bind(addr);
assert!(listener.is_none());
});
assert!(called);
}
#[test]
fn connect_error() {
do run_in_mt_newsched_task {
let mut called = false;
io_error::cond.trap(|e| {
let expected_error = if cfg!(unix) {
ConnectionRefused
} else {
// On Win32, opening port 1 gives WSAEADDRNOTAVAIL error.
OtherIoError
};
assert_eq!(e.kind, expected_error);
called = true;
}).inside(|| {
let addr = SocketAddr { ip: Ipv4Addr(0, 0, 0, 0), port: 1 };
let stream = TcpStream::connect(addr);
assert!(stream.is_none());
});
assert!(called);
}
let mut called = false;
io_error::cond.trap(|e| {
let expected_error = if cfg!(unix) {
ConnectionRefused
} else {
// On Win32, opening port 1 gives WSAEADDRNOTAVAIL error.
OtherIoError
};
assert_eq!(e.kind, expected_error);
called = true;
}).inside(|| {
let addr = SocketAddr { ip: Ipv4Addr(0, 0, 0, 0), port: 1 };
let stream = TcpStream::connect(addr);
assert!(stream.is_none());
});
assert!(called);
}
#[test]
fn smoke_test_ip4() {
do run_in_mt_newsched_task {
let addr = next_test_ip4();
let (port, chan) = Chan::new();
do spawntask {
let mut acceptor = TcpListener::bind(addr).listen();
chan.send(());
let mut stream = acceptor.accept();
let mut buf = [0];
stream.read(buf);
assert!(buf[0] == 99);
}
let addr = next_test_ip4();
let (port, chan) = Chan::new();
do spawn {
port.recv();
let mut stream = TcpStream::connect(addr);
stream.write([99]);
}
let mut acceptor = TcpListener::bind(addr).listen();
chan.send(());
let mut stream = acceptor.accept();
let mut buf = [0];
stream.read(buf);
assert!(buf[0] == 99);
}
#[test]
fn smoke_test_ip6() {
do run_in_mt_newsched_task {
let addr = next_test_ip6();
let (port, chan) = Chan::new();
do spawntask {
let mut acceptor = TcpListener::bind(addr).listen();
chan.send(());
let mut stream = acceptor.accept();
let mut buf = [0];
stream.read(buf);
assert!(buf[0] == 99);
}
let addr = next_test_ip6();
let (port, chan) = Chan::new();
do spawn {
port.recv();
let mut stream = TcpStream::connect(addr);
stream.write([99]);
}
let mut acceptor = TcpListener::bind(addr).listen();
chan.send(());
let mut stream = acceptor.accept();
let mut buf = [0];
stream.read(buf);
assert!(buf[0] == 99);
}
#[test]
fn read_eof_ip4() {
do run_in_mt_newsched_task {
let addr = next_test_ip4();
let (port, chan) = Chan::new();
do spawntask {
let mut acceptor = TcpListener::bind(addr).listen();
chan.send(());
let mut stream = acceptor.accept();
let mut buf = [0];
let nread = stream.read(buf);
assert!(nread.is_none());
}
let addr = next_test_ip4();
let (port, chan) = Chan::new();
do spawn {
port.recv();
let _stream = TcpStream::connect(addr);
// Close
}
let mut acceptor = TcpListener::bind(addr).listen();
chan.send(());
let mut stream = acceptor.accept();
let mut buf = [0];
let nread = stream.read(buf);
assert!(nread.is_none());
}
#[test]
fn read_eof_ip6() {
do run_in_mt_newsched_task {
let addr = next_test_ip6();
let (port, chan) = Chan::new();
do spawntask {
let mut acceptor = TcpListener::bind(addr).listen();
chan.send(());
let mut stream = acceptor.accept();
let mut buf = [0];
let nread = stream.read(buf);
assert!(nread.is_none());
}
let addr = next_test_ip6();
let (port, chan) = Chan::new();
do spawn {
port.recv();
let _stream = TcpStream::connect(addr);
// Close
}
let mut acceptor = TcpListener::bind(addr).listen();
chan.send(());
let mut stream = acceptor.accept();
let mut buf = [0];
let nread = stream.read(buf);
assert!(nread.is_none());
}
#[test]
fn read_eof_twice_ip4() {
do run_in_mt_newsched_task {
let addr = next_test_ip4();
let (port, chan) = Chan::new();
do spawntask {
let mut acceptor = TcpListener::bind(addr).listen();
chan.send(());
let mut stream = acceptor.accept();
let mut buf = [0];
let nread = stream.read(buf);
assert!(nread.is_none());
io_error::cond.trap(|e| {
if cfg!(windows) {
assert_eq!(e.kind, NotConnected);
} else {
fail!();
}
}).inside(|| {
let nread = stream.read(buf);
assert!(nread.is_none());
})
}
let addr = next_test_ip4();
let (port, chan) = Chan::new();
do spawn {
port.recv();
let _stream = TcpStream::connect(addr);
// Close
}
let mut acceptor = TcpListener::bind(addr).listen();
chan.send(());
let mut stream = acceptor.accept();
let mut buf = [0];
let nread = stream.read(buf);
assert!(nread.is_none());
io_error::cond.trap(|e| {
if cfg!(windows) {
assert_eq!(e.kind, NotConnected);
} else {
fail!();
}
}).inside(|| {
let nread = stream.read(buf);
assert!(nread.is_none());
})
}
#[test]
fn read_eof_twice_ip6() {
do run_in_mt_newsched_task {
let addr = next_test_ip6();
let (port, chan) = Chan::new();
do spawntask {
let mut acceptor = TcpListener::bind(addr).listen();
chan.send(());
let mut stream = acceptor.accept();
let mut buf = [0];
let nread = stream.read(buf);
assert!(nread.is_none());
io_error::cond.trap(|e| {
if cfg!(windows) {
assert_eq!(e.kind, NotConnected);
} else {
fail!();
}
}).inside(|| {
let nread = stream.read(buf);
assert!(nread.is_none());
})
}
let addr = next_test_ip6();
let (port, chan) = Chan::new();
do spawn {
port.recv();
let _stream = TcpStream::connect(addr);
// Close
}
let mut acceptor = TcpListener::bind(addr).listen();
chan.send(());
let mut stream = acceptor.accept();
let mut buf = [0];
let nread = stream.read(buf);
assert!(nread.is_none());
io_error::cond.trap(|e| {
if cfg!(windows) {
assert_eq!(e.kind, NotConnected);
} else {
fail!();
}
}).inside(|| {
let nread = stream.read(buf);
assert!(nread.is_none());
})
}
#[test]
fn write_close_ip4() {
do run_in_mt_newsched_task {
let addr = next_test_ip4();
let (port, chan) = Chan::new();
do spawntask {
let mut acceptor = TcpListener::bind(addr).listen();
chan.send(());
let mut stream = acceptor.accept();
let buf = [0];
loop {
let mut stop = false;
io_error::cond.trap(|e| {
// NB: ECONNRESET on linux, EPIPE on mac, ECONNABORTED
// on windows
assert!(e.kind == ConnectionReset ||
e.kind == BrokenPipe ||
e.kind == ConnectionAborted,
"unknown error: {:?}", e);
stop = true;
}).inside(|| {
stream.write(buf);
});
if stop { break }
}
}
let addr = next_test_ip4();
let (port, chan) = Chan::new();
do spawn {
port.recv();
let _stream = TcpStream::connect(addr);
// Close
}
let mut acceptor = TcpListener::bind(addr).listen();
chan.send(());
let mut stream = acceptor.accept();
let buf = [0];
loop {
let mut stop = false;
io_error::cond.trap(|e| {
// NB: ECONNRESET on linux, EPIPE on mac, ECONNABORTED
// on windows
assert!(e.kind == ConnectionReset ||
e.kind == BrokenPipe ||
e.kind == ConnectionAborted,
"unknown error: {:?}", e);
stop = true;
}).inside(|| {
stream.write(buf);
});
if stop { break }
}
}
#[test]
fn write_close_ip6() {
do run_in_mt_newsched_task {
let addr = next_test_ip6();
let (port, chan) = Chan::new();
do spawntask {
let mut acceptor = TcpListener::bind(addr).listen();
chan.send(());
let mut stream = acceptor.accept();
let buf = [0];
loop {
let mut stop = false;
io_error::cond.trap(|e| {
// NB: ECONNRESET on linux, EPIPE on mac, ECONNABORTED
// on windows
assert!(e.kind == ConnectionReset ||
e.kind == BrokenPipe ||
e.kind == ConnectionAborted,
"unknown error: {:?}", e);
stop = true;
}).inside(|| {
stream.write(buf);
});
if stop { break }
}
}
let addr = next_test_ip6();
let (port, chan) = Chan::new();
do spawn {
port.recv();
let _stream = TcpStream::connect(addr);
// Close
}
let mut acceptor = TcpListener::bind(addr).listen();
chan.send(());
let mut stream = acceptor.accept();
let buf = [0];
loop {
let mut stop = false;
io_error::cond.trap(|e| {
// NB: ECONNRESET on linux, EPIPE on mac, ECONNABORTED
// on windows
assert!(e.kind == ConnectionReset ||
e.kind == BrokenPipe ||
e.kind == ConnectionAborted,
"unknown error: {:?}", e);
stop = true;
}).inside(|| {
stream.write(buf);
});
if stop { break }
}
}
#[test]
fn multiple_connect_serial_ip4() {
do run_in_mt_newsched_task {
let addr = next_test_ip4();
let max = 10;
let (port, chan) = Chan::new();
do spawntask {
let mut acceptor = TcpListener::bind(addr).listen();
chan.send(());
for ref mut stream in acceptor.incoming().take(max) {
let mut buf = [0];
stream.read(buf);
assert_eq!(buf[0], 99);
}
}
let addr = next_test_ip4();
let max = 10;
let (port, chan) = Chan::new();
do spawn {
port.recv();
max.times(|| {
let mut stream = TcpStream::connect(addr);
stream.write([99]);
});
}
let mut acceptor = TcpListener::bind(addr).listen();
chan.send(());
for ref mut stream in acceptor.incoming().take(max) {
let mut buf = [0];
stream.read(buf);
assert_eq!(buf[0], 99);
}
}
#[test]
fn multiple_connect_serial_ip6() {
do run_in_mt_newsched_task {
let addr = next_test_ip6();
let max = 10;
let (port, chan) = Chan::new();
do spawntask {
let mut acceptor = TcpListener::bind(addr).listen();
chan.send(());
for ref mut stream in acceptor.incoming().take(max) {
let mut buf = [0];
stream.read(buf);
assert_eq!(buf[0], 99);
}
}
let addr = next_test_ip6();
let max = 10;
let (port, chan) = Chan::new();
do spawn {
port.recv();
max.times(|| {
let mut stream = TcpStream::connect(addr);
stream.write([99]);
});
}
let mut acceptor = TcpListener::bind(addr).listen();
chan.send(());
for ref mut stream in acceptor.incoming().take(max) {
let mut buf = [0];
stream.read(buf);
assert_eq!(buf[0], 99);
}
}
#[test]
fn multiple_connect_interleaved_greedy_schedule_ip4() {
do run_in_mt_newsched_task {
let addr = next_test_ip4();
static MAX: int = 10;
let (port, chan) = Chan::new();
let addr = next_test_ip4();
static MAX: int = 10;
let (port, chan) = Chan::new();
do spawntask {
let mut acceptor = TcpListener::bind(addr).listen();
chan.send(());
for (i, stream) in acceptor.incoming().enumerate().take(MAX as uint) {
// Start another task to handle the connection
do spawntask {
let mut stream = stream;
let mut buf = [0];
stream.read(buf);
assert!(buf[0] == i as u8);
debug!("read");
}
do spawn {
let mut acceptor = TcpListener::bind(addr).listen();
chan.send(());
for (i, stream) in acceptor.incoming().enumerate().take(MAX as uint) {
// Start another task to handle the connection
do spawn {
let mut stream = stream;
let mut buf = [0];
stream.read(buf);
assert!(buf[0] == i as u8);
debug!("read");
}
}
}
port.recv();
connect(0, addr);
port.recv();
connect(0, addr);
fn connect(i: int, addr: SocketAddr) {
if i == MAX { return }
fn connect(i: int, addr: SocketAddr) {
if i == MAX { return }
do spawntask {
debug!("connecting");
let mut stream = TcpStream::connect(addr);
// Connect again before writing
connect(i + 1, addr);
debug!("writing");
stream.write([i as u8]);
}
do spawn {
debug!("connecting");
let mut stream = TcpStream::connect(addr);
// Connect again before writing
connect(i + 1, addr);
debug!("writing");
stream.write([i as u8]);
}
}
}
#[test]
fn multiple_connect_interleaved_greedy_schedule_ip6() {
do run_in_mt_newsched_task {
let addr = next_test_ip6();
static MAX: int = 10;
let (port, chan) = Chan::new();
let addr = next_test_ip6();
static MAX: int = 10;
let (port, chan) = Chan::<()>::new();
do spawntask {
let mut acceptor = TcpListener::bind(addr).listen();
chan.send(());
for (i, stream) in acceptor.incoming().enumerate().take(MAX as uint) {
// Start another task to handle the connection
do spawntask {
let mut stream = stream;
let mut buf = [0];
stream.read(buf);
assert!(buf[0] == i as u8);
debug!("read");
}
do spawn {
let mut acceptor = TcpListener::bind(addr).listen();
chan.send(());
for (i, stream) in acceptor.incoming().enumerate().take(MAX as uint) {
// Start another task to handle the connection
do spawn {
let mut stream = stream;
let mut buf = [0];
stream.read(buf);
assert!(buf[0] == i as u8);
debug!("read");
}
}
}
port.recv();
connect(0, addr);
port.recv();
connect(0, addr);
fn connect(i: int, addr: SocketAddr) {
if i == MAX { return }
fn connect(i: int, addr: SocketAddr) {
if i == MAX { return }
do spawntask {
debug!("connecting");
let mut stream = TcpStream::connect(addr);
// Connect again before writing
connect(i + 1, addr);
debug!("writing");
stream.write([i as u8]);
}
do spawn {
debug!("connecting");
let mut stream = TcpStream::connect(addr);
// Connect again before writing
connect(i + 1, addr);
debug!("writing");
stream.write([i as u8]);
}
}
}
#[test]
fn multiple_connect_interleaved_lazy_schedule_ip4() {
do run_in_mt_newsched_task {
let addr = next_test_ip4();
static MAX: int = 10;
let (port, chan) = Chan::new();
let addr = next_test_ip4();
static MAX: int = 10;
let (port, chan) = Chan::new();
do spawntask {
let mut acceptor = TcpListener::bind(addr).listen();
chan.send(());
for stream in acceptor.incoming().take(MAX as uint) {
// Start another task to handle the connection
do spawntask_later {
let mut stream = stream;
let mut buf = [0];
stream.read(buf);
assert!(buf[0] == 99);
debug!("read");
}
do spawn {
let mut acceptor = TcpListener::bind(addr).listen();
chan.send(());
for stream in acceptor.incoming().take(MAX as uint) {
// Start another task to handle the connection
do spawn {
let mut stream = stream;
let mut buf = [0];
stream.read(buf);
assert!(buf[0] == 99);
debug!("read");
}
}
}
port.recv();
connect(0, addr);
port.recv();
connect(0, addr);
fn connect(i: int, addr: SocketAddr) {
if i == MAX { return }
fn connect(i: int, addr: SocketAddr) {
if i == MAX { return }
do spawntask_later {
debug!("connecting");
let mut stream = TcpStream::connect(addr);
// Connect again before writing
connect(i + 1, addr);
debug!("writing");
stream.write([99]);
}
do spawn {
debug!("connecting");
let mut stream = TcpStream::connect(addr);
// Connect again before writing
connect(i + 1, addr);
debug!("writing");
stream.write([99]);
}
}
}
#[test]
fn multiple_connect_interleaved_lazy_schedule_ip6() {
do run_in_mt_newsched_task {
let addr = next_test_ip6();
static MAX: int = 10;
let (port, chan) = Chan::new();
let addr = next_test_ip6();
static MAX: int = 10;
let (port, chan) = Chan::new();
do spawntask {
let mut acceptor = TcpListener::bind(addr).listen();
chan.send(());
for stream in acceptor.incoming().take(MAX as uint) {
// Start another task to handle the connection
do spawntask_later {
let mut stream = stream;
let mut buf = [0];
stream.read(buf);
assert!(buf[0] == 99);
debug!("read");
}
do spawn {
let mut acceptor = TcpListener::bind(addr).listen();
chan.send(());
for stream in acceptor.incoming().take(MAX as uint) {
// Start another task to handle the connection
do spawn {
let mut stream = stream;
let mut buf = [0];
stream.read(buf);
assert!(buf[0] == 99);
debug!("read");
}
}
}
port.recv();
connect(0, addr);
port.recv();
connect(0, addr);
fn connect(i: int, addr: SocketAddr) {
if i == MAX { return }
fn connect(i: int, addr: SocketAddr) {
if i == MAX { return }
do spawntask_later {
debug!("connecting");
let mut stream = TcpStream::connect(addr);
// Connect again before writing
connect(i + 1, addr);
debug!("writing");
stream.write([99]);
}
do spawn {
debug!("connecting");
let mut stream = TcpStream::connect(addr);
// Connect again before writing
connect(i + 1, addr);
debug!("writing");
stream.write([99]);
}
}
}
#[cfg(test)]
fn socket_name(addr: SocketAddr) {
do run_in_mt_newsched_task {
do spawntask {
let mut listener = TcpListener::bind(addr).unwrap();
let mut listener = TcpListener::bind(addr).unwrap();
// Make sure socket_name gives
// us the socket we binded to.
let so_name = listener.socket_name();
assert!(so_name.is_some());
assert_eq!(addr, so_name.unwrap());
}
}
// Make sure socket_name gives
// us the socket we binded to.
let so_name = listener.socket_name();
assert!(so_name.is_some());
assert_eq!(addr, so_name.unwrap());
}
#[cfg(test)]
fn peer_name(addr: SocketAddr) {
do run_in_mt_newsched_task {
let (port, chan) = Chan::new();
let (port, chan) = Chan::new();
do spawntask {
let mut acceptor = TcpListener::bind(addr).listen();
chan.send(());
acceptor.accept();
}
port.recv();
let stream = TcpStream::connect(addr);
assert!(stream.is_some());
let mut stream = stream.unwrap();
// Make sure peer_name gives us the
// address/port of the peer we've
// connected to.
let peer_name = stream.peer_name();
assert!(peer_name.is_some());
assert_eq!(addr, peer_name.unwrap());
do spawn {
let mut acceptor = TcpListener::bind(addr).listen();
chan.send(());
acceptor.accept();
}
port.recv();
let stream = TcpStream::connect(addr);
assert!(stream.is_some());
let mut stream = stream.unwrap();
// Make sure peer_name gives us the
// address/port of the peer we've
// connected to.
let peer_name = stream.peer_name();
assert!(peer_name.is_some());
assert_eq!(addr, peer_name.unwrap());
}
#[test]
@ -668,5 +615,4 @@ mod test {
//peer_name(next_test_ip6());
socket_name(next_test_ip6());
}
}

View File

@ -21,14 +21,9 @@ pub struct UdpSocket {
impl UdpSocket {
pub fn bind(addr: SocketAddr) -> Option<UdpSocket> {
let mut io = LocalIo::borrow();
match io.get().udp_bind(addr) {
Ok(s) => Some(UdpSocket { obj: s }),
Err(ioerr) => {
io_error::cond.raise(ioerr);
None
}
}
LocalIo::maybe_raise(|io| {
io.udp_bind(addr).map(|s| UdpSocket { obj: s })
})
}
pub fn recvfrom(&mut self, buf: &mut [u8]) -> Option<(uint, SocketAddr)> {
@ -104,52 +99,32 @@ impl Writer for UdpStream {
#[cfg(test)]
mod test {
use super::*;
use rt::test::*;
use io::net::ip::{Ipv4Addr, SocketAddr};
use io::*;
use io::test::*;
use prelude::*;
#[test] #[ignore]
fn bind_error() {
do run_in_mt_newsched_task {
let mut called = false;
io_error::cond.trap(|e| {
assert!(e.kind == PermissionDenied);
called = true;
}).inside(|| {
let addr = SocketAddr { ip: Ipv4Addr(0, 0, 0, 0), port: 1 };
let socket = UdpSocket::bind(addr);
assert!(socket.is_none());
});
assert!(called);
}
let mut called = false;
io_error::cond.trap(|e| {
assert!(e.kind == PermissionDenied);
called = true;
}).inside(|| {
let addr = SocketAddr { ip: Ipv4Addr(0, 0, 0, 0), port: 1 };
let socket = UdpSocket::bind(addr);
assert!(socket.is_none());
});
assert!(called);
}
#[test]
fn socket_smoke_test_ip4() {
do run_in_mt_newsched_task {
let server_ip = next_test_ip4();
let client_ip = next_test_ip4();
let (port, chan) = Chan::new();
do spawntask {
match UdpSocket::bind(server_ip) {
Some(ref mut server) => {
chan.send(());
let mut buf = [0];
match server.recvfrom(buf) {
Some((nread, src)) => {
assert_eq!(nread, 1);
assert_eq!(buf[0], 99);
assert_eq!(src, client_ip);
}
None => fail!()
}
}
None => fail!()
}
}
let server_ip = next_test_ip4();
let client_ip = next_test_ip4();
let (port, chan) = Chan::new();
do spawn {
match UdpSocket::bind(client_ip) {
Some(ref mut client) => {
port.recv();
@ -158,33 +133,31 @@ mod test {
None => fail!()
}
}
match UdpSocket::bind(server_ip) {
Some(ref mut server) => {
chan.send(());
let mut buf = [0];
match server.recvfrom(buf) {
Some((nread, src)) => {
assert_eq!(nread, 1);
assert_eq!(buf[0], 99);
assert_eq!(src, client_ip);
}
None => fail!()
}
}
None => fail!()
}
}
#[test]
fn socket_smoke_test_ip6() {
do run_in_mt_newsched_task {
let server_ip = next_test_ip6();
let client_ip = next_test_ip6();
let (port, chan) = Chan::new();
do spawntask {
match UdpSocket::bind(server_ip) {
Some(ref mut server) => {
chan.send(());
let mut buf = [0];
match server.recvfrom(buf) {
Some((nread, src)) => {
assert_eq!(nread, 1);
assert_eq!(buf[0], 99);
assert_eq!(src, client_ip);
}
None => fail!()
}
}
None => fail!()
}
}
let server_ip = next_test_ip6();
let client_ip = next_test_ip6();
let (port, chan) = Chan::<()>::new();
do spawn {
match UdpSocket::bind(client_ip) {
Some(ref mut client) => {
port.recv();
@ -193,34 +166,31 @@ mod test {
None => fail!()
}
}
}
#[test]
fn stream_smoke_test_ip4() {
do run_in_mt_newsched_task {
let server_ip = next_test_ip4();
let client_ip = next_test_ip4();
let (port, chan) = Chan::new();
do spawntask {
match UdpSocket::bind(server_ip) {
Some(server) => {
let server = ~server;
let mut stream = server.connect(client_ip);
chan.send(());
let mut buf = [0];
match stream.read(buf) {
Some(nread) => {
assert_eq!(nread, 1);
assert_eq!(buf[0], 99);
}
None => fail!()
}
match UdpSocket::bind(server_ip) {
Some(ref mut server) => {
chan.send(());
let mut buf = [0];
match server.recvfrom(buf) {
Some((nread, src)) => {
assert_eq!(nread, 1);
assert_eq!(buf[0], 99);
assert_eq!(src, client_ip);
}
None => fail!()
}
}
None => fail!()
}
}
#[test]
fn stream_smoke_test_ip4() {
let server_ip = next_test_ip4();
let client_ip = next_test_ip4();
let (port, chan) = Chan::new();
do spawn {
match UdpSocket::bind(client_ip) {
Some(client) => {
let client = ~client;
@ -231,34 +201,32 @@ mod test {
None => fail!()
}
}
match UdpSocket::bind(server_ip) {
Some(server) => {
let server = ~server;
let mut stream = server.connect(client_ip);
chan.send(());
let mut buf = [0];
match stream.read(buf) {
Some(nread) => {
assert_eq!(nread, 1);
assert_eq!(buf[0], 99);
}
None => fail!()
}
}
None => fail!()
}
}
#[test]
fn stream_smoke_test_ip6() {
do run_in_mt_newsched_task {
let server_ip = next_test_ip6();
let client_ip = next_test_ip6();
let (port, chan) = Chan::new();
do spawntask {
match UdpSocket::bind(server_ip) {
Some(server) => {
let server = ~server;
let mut stream = server.connect(client_ip);
chan.send(());
let mut buf = [0];
match stream.read(buf) {
Some(nread) => {
assert_eq!(nread, 1);
assert_eq!(buf[0], 99);
}
None => fail!()
}
}
None => fail!()
}
}
let server_ip = next_test_ip6();
let client_ip = next_test_ip6();
let (port, chan) = Chan::new();
do spawn {
match UdpSocket::bind(client_ip) {
Some(client) => {
let client = ~client;
@ -269,25 +237,36 @@ mod test {
None => fail!()
}
}
match UdpSocket::bind(server_ip) {
Some(server) => {
let server = ~server;
let mut stream = server.connect(client_ip);
chan.send(());
let mut buf = [0];
match stream.read(buf) {
Some(nread) => {
assert_eq!(nread, 1);
assert_eq!(buf[0], 99);
}
None => fail!()
}
}
None => fail!()
}
}
#[cfg(test)]
fn socket_name(addr: SocketAddr) {
do run_in_mt_newsched_task {
do spawntask {
let server = UdpSocket::bind(addr);
let server = UdpSocket::bind(addr);
assert!(server.is_some());
let mut server = server.unwrap();
assert!(server.is_some());
let mut server = server.unwrap();
// Make sure socket_name gives
// us the socket we binded to.
let so_name = server.socket_name();
assert!(so_name.is_some());
assert_eq!(addr, so_name.unwrap());
}
}
// Make sure socket_name gives
// us the socket we binded to.
let so_name = server.socket_name();
assert!(so_name.is_some());
assert_eq!(addr, so_name.unwrap());
}
#[test]

View File

@ -59,14 +59,9 @@ impl UnixStream {
/// stream.write([1, 2, 3]);
///
pub fn connect<P: ToCStr>(path: &P) -> Option<UnixStream> {
let mut io = LocalIo::borrow();
match io.get().unix_connect(&path.to_c_str()) {
Ok(s) => Some(UnixStream::new(s)),
Err(ioerr) => {
io_error::cond.raise(ioerr);
None
}
}
LocalIo::maybe_raise(|io| {
io.unix_connect(&path.to_c_str()).map(UnixStream::new)
})
}
}
@ -107,14 +102,9 @@ impl UnixListener {
/// }
///
pub fn bind<P: ToCStr>(path: &P) -> Option<UnixListener> {
let mut io = LocalIo::borrow();
match io.get().unix_bind(&path.to_c_str()) {
Ok(s) => Some(UnixListener{ obj: s }),
Err(ioerr) => {
io_error::cond.raise(ioerr);
None
}
}
LocalIo::maybe_raise(|io| {
io.unix_bind(&path.to_c_str()).map(|s| UnixListener { obj: s })
})
}
}
@ -150,55 +140,49 @@ impl Acceptor<UnixStream> for UnixAcceptor {
mod tests {
use prelude::*;
use super::*;
use rt::test::*;
use io::*;
use io::test::*;
fn smalltest(server: proc(UnixStream), client: proc(UnixStream)) {
do run_in_mt_newsched_task {
let path1 = next_test_unix();
let path2 = path1.clone();
let (client, server) = (client, server);
let (port, chan) = Chan::new();
do spawntask {
let mut acceptor = UnixListener::bind(&path1).listen();
chan.send(());
server(acceptor.accept().unwrap());
}
let path1 = next_test_unix();
let path2 = path1.clone();
let (port, chan) = Chan::new();
do spawn {
port.recv();
client(UnixStream::connect(&path2).unwrap());
}
let mut acceptor = UnixListener::bind(&path1).listen();
chan.send(());
server(acceptor.accept().unwrap());
}
#[test]
fn bind_error() {
do run_in_mt_newsched_task {
let mut called = false;
io_error::cond.trap(|e| {
assert!(e.kind == PermissionDenied);
called = true;
}).inside(|| {
let listener = UnixListener::bind(&("path/to/nowhere"));
assert!(listener.is_none());
});
assert!(called);
}
let mut called = false;
io_error::cond.trap(|e| {
assert!(e.kind == PermissionDenied);
called = true;
}).inside(|| {
let listener = UnixListener::bind(&("path/to/nowhere"));
assert!(listener.is_none());
});
assert!(called);
}
#[test]
fn connect_error() {
do run_in_mt_newsched_task {
let mut called = false;
io_error::cond.trap(|e| {
assert_eq!(e.kind, FileNotFound);
called = true;
}).inside(|| {
let stream = UnixStream::connect(&("path/to/nowhere"));
assert!(stream.is_none());
});
assert!(called);
}
let mut called = false;
io_error::cond.trap(|e| {
assert_eq!(e.kind,
if cfg!(windows) {OtherIoError} else {FileNotFound});
called = true;
}).inside(|| {
let stream = UnixStream::connect(&("path/to/nowhere"));
assert!(stream.is_none());
});
assert!(called);
}
#[test]
@ -244,37 +228,33 @@ mod tests {
#[test]
fn accept_lots() {
do run_in_mt_newsched_task {
let times = 10;
let path1 = next_test_unix();
let path2 = path1.clone();
let (port, chan) = Chan::new();
do spawntask {
let mut acceptor = UnixListener::bind(&path1).listen();
chan.send(());
times.times(|| {
let mut client = acceptor.accept();
let mut buf = [0];
client.read(buf);
assert_eq!(buf[0], 100);
})
}
let times = 10;
let path1 = next_test_unix();
let path2 = path1.clone();
let (port, chan) = Chan::new();
do spawn {
port.recv();
times.times(|| {
let mut stream = UnixStream::connect(&path2);
stream.write([100]);
})
}
let mut acceptor = UnixListener::bind(&path1).listen();
chan.send(());
times.times(|| {
let mut client = acceptor.accept();
let mut buf = [0];
client.read(buf);
assert_eq!(buf[0], 100);
})
}
#[test]
fn path_exists() {
do run_in_mt_newsched_task {
let path = next_test_unix();
let _acceptor = UnixListener::bind(&path).listen();
assert!(path.exists());
}
let path = next_test_unix();
let _acceptor = UnixListener::bind(&path).listen();
assert!(path.exists());
}
}

View File

@ -106,53 +106,46 @@ impl<T, A: Acceptor<T>> Acceptor<T> for Option<A> {
mod test {
use option::*;
use super::super::mem::*;
use rt::test::*;
use super::super::{PreviousIoError, io_error};
#[test]
fn test_option_writer() {
do run_in_mt_newsched_task {
let mut writer: Option<MemWriter> = Some(MemWriter::new());
writer.write([0, 1, 2]);
writer.flush();
assert_eq!(writer.unwrap().inner(), ~[0, 1, 2]);
}
let mut writer: Option<MemWriter> = Some(MemWriter::new());
writer.write([0, 1, 2]);
writer.flush();
assert_eq!(writer.unwrap().inner(), ~[0, 1, 2]);
}
#[test]
fn test_option_writer_error() {
do run_in_mt_newsched_task {
let mut writer: Option<MemWriter> = None;
let mut writer: Option<MemWriter> = None;
let mut called = false;
io_error::cond.trap(|err| {
assert_eq!(err.kind, PreviousIoError);
called = true;
}).inside(|| {
writer.write([0, 0, 0]);
});
assert!(called);
let mut called = false;
io_error::cond.trap(|err| {
assert_eq!(err.kind, PreviousIoError);
called = true;
}).inside(|| {
writer.write([0, 0, 0]);
});
assert!(called);
let mut called = false;
io_error::cond.trap(|err| {
assert_eq!(err.kind, PreviousIoError);
called = true;
}).inside(|| {
writer.flush();
});
assert!(called);
}
let mut called = false;
io_error::cond.trap(|err| {
assert_eq!(err.kind, PreviousIoError);
called = true;
}).inside(|| {
writer.flush();
});
assert!(called);
}
#[test]
fn test_option_reader() {
do run_in_mt_newsched_task {
let mut reader: Option<MemReader> = Some(MemReader::new(~[0, 1, 2, 3]));
let mut buf = [0, 0];
reader.read(buf);
assert_eq!(buf, [0, 1]);
assert!(!reader.eof());
}
let mut reader: Option<MemReader> = Some(MemReader::new(~[0, 1, 2, 3]));
let mut buf = [0, 0];
reader.read(buf);
assert_eq!(buf, [0, 1]);
assert!(!reader.eof());
}
#[test]

View File

@ -14,10 +14,9 @@
//! enough so that pipes can be created to child processes.
use prelude::*;
use super::{Reader, Writer};
use io::{io_error, EndOfFile};
use io::native::file;
use rt::rtio::{LocalIo, RtioPipe};
use libc;
use rt::rtio::{RtioPipe, LocalIo};
pub struct PipeStream {
priv obj: ~RtioPipe,
@ -43,15 +42,10 @@ impl PipeStream {
///
/// If the pipe cannot be created, an error will be raised on the
/// `io_error` condition.
pub fn open(fd: file::fd_t) -> Option<PipeStream> {
let mut io = LocalIo::borrow();
match io.get().pipe_open(fd) {
Ok(obj) => Some(PipeStream { obj: obj }),
Err(e) => {
io_error::cond.raise(e);
None
}
}
pub fn open(fd: libc::c_int) -> Option<PipeStream> {
LocalIo::maybe_raise(|io| {
io.pipe_open(fd).map(|obj| PipeStream { obj: obj })
})
}
pub fn new(inner: ~RtioPipe) -> PipeStream {

View File

@ -119,19 +119,17 @@ impl Process {
/// Creates a new pipe initialized, but not bound to any particular
/// source/destination
pub fn new(config: ProcessConfig) -> Option<Process> {
let mut io = LocalIo::borrow();
match io.get().spawn(config) {
Ok((p, io)) => Some(Process{
handle: p,
io: io.move_iter().map(|p|
p.map(|p| io::PipeStream::new(p))
).collect()
}),
Err(ioerr) => {
io_error::cond.raise(ioerr);
None
}
}
let mut config = Some(config);
LocalIo::maybe_raise(|io| {
io.spawn(config.take_unwrap()).map(|(p, io)| {
Process {
handle: p,
io: io.move_iter().map(|p| {
p.map(|p| io::PipeStream::new(p))
}).collect()
}
})
})
}
/// Returns the process id of this child process

View File

@ -23,8 +23,7 @@ use clone::Clone;
use comm::{Port, SharedChan};
use container::{Map, MutableMap};
use hashmap;
use io::io_error;
use result::{Err, Ok};
use option::{Some, None};
use rt::rtio::{IoFactory, LocalIo, RtioSignal};
#[repr(int)]
@ -122,16 +121,14 @@ impl Listener {
if self.handles.contains_key(&signum) {
return true; // self is already listening to signum, so succeed
}
let mut io = LocalIo::borrow();
match io.get().signal(signum, self.chan.clone()) {
Ok(w) => {
self.handles.insert(signum, w);
match LocalIo::maybe_raise(|io| {
io.signal(signum, self.chan.clone())
}) {
Some(handle) => {
self.handles.insert(signum, handle);
true
},
Err(ioerr) => {
io_error::cond.raise(ioerr);
false
}
None => false
}
}

View File

@ -27,13 +27,13 @@ out.write(bytes!("Hello, world!"));
*/
use fmt;
use io::buffered::LineBufferedWriter;
use io::{Reader, Writer, io_error, IoError, OtherIoError,
standard_error, EndOfFile};
use libc;
use option::{Option, Some, None};
use result::{Ok, Err};
use io::buffered::LineBufferedWriter;
use rt::rtio::{DontClose, IoFactory, LocalIo, RtioFileStream, RtioTTY};
use super::{Reader, Writer, io_error, IoError, OtherIoError,
standard_error, EndOfFile};
// And so begins the tale of acquiring a uv handle to a stdio stream on all
// platforms in all situations. Our story begins by splitting the world into two
@ -69,19 +69,12 @@ enum StdSource {
}
fn src<T>(fd: libc::c_int, readable: bool, f: |StdSource| -> T) -> T {
let mut io = LocalIo::borrow();
match io.get().tty_open(fd, readable) {
Ok(tty) => f(TTY(tty)),
Err(_) => {
// It's not really that desirable if these handles are closed
// synchronously, and because they're squirreled away in a task
// structure the destructors will be run when the task is
// attempted to get destroyed. This means that if we run a
// synchronous destructor we'll attempt to do some scheduling
// operations which will just result in sadness.
f(File(io.get().fs_from_raw_fd(fd, DontClose)))
}
}
LocalIo::maybe_raise(|io| {
Ok(match io.tty_open(fd, readable) {
Ok(tty) => f(TTY(tty)),
Err(_) => f(File(io.fs_from_raw_fd(fd, DontClose))),
})
}).unwrap()
}
/// Creates a new non-blocking handle to the stdin of the current process.
@ -138,7 +131,17 @@ fn with_task_stdout(f: |&mut Writer|) {
}
None => {
let mut io = stdout();
struct Stdout;
impl Writer for Stdout {
fn write(&mut self, data: &[u8]) {
unsafe {
libc::write(libc::STDOUT_FILENO,
data.as_ptr() as *libc::c_void,
data.len() as libc::size_t);
}
}
}
let mut io = Stdout;
f(&mut io as &mut Writer);
}
}
@ -304,23 +307,10 @@ impl Writer for StdWriter {
#[cfg(test)]
mod tests {
use super::*;
use rt::test::run_in_newsched_task;
#[test]
fn smoke_uv() {
iotest!(fn smoke() {
// Just make sure we can acquire handles
stdin();
stdout();
stderr();
}
#[test]
fn smoke_native() {
do run_in_newsched_task {
stdin();
stdout();
stderr();
}
}
})
}

195
src/libstd/io/test.rs Normal file
View File

@ -0,0 +1,195 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[macro_escape];
use os;
use prelude::*;
use rand;
use rand::Rng;
use std::io::net::ip::*;
use sync::atomics::{AtomicUint, INIT_ATOMIC_UINT, Relaxed};
macro_rules! iotest (
{ fn $name:ident() $b:block } => (
mod $name {
#[allow(unused_imports)];
use super::super::*;
use super::*;
use io;
use prelude::*;
use io::*;
use io::fs::*;
use io::net::tcp::*;
use io::net::ip::*;
use io::net::udp::*;
#[cfg(unix)]
use io::net::unix::*;
use str;
use util;
fn f() $b
#[test] fn green() { f() }
#[test] fn native() {
use native;
let (p, c) = Chan::new();
do native::task::spawn { c.send(f()) }
p.recv();
}
}
)
)
/// Get a port number, starting at 9600, for use in tests
pub fn next_test_port() -> u16 {
static mut next_offset: AtomicUint = INIT_ATOMIC_UINT;
unsafe {
base_port() + next_offset.fetch_add(1, Relaxed) as u16
}
}
/// Get a temporary path which could be the location of a unix socket
pub fn next_test_unix() -> Path {
if cfg!(unix) {
os::tmpdir().join(rand::task_rng().gen_ascii_str(20))
} else {
Path::new(r"\\.\pipe\" + rand::task_rng().gen_ascii_str(20))
}
}
/// Get a unique IPv4 localhost:port pair starting at 9600
pub fn next_test_ip4() -> SocketAddr {
SocketAddr { ip: Ipv4Addr(127, 0, 0, 1), port: next_test_port() }
}
/// Get a unique IPv6 localhost:port pair starting at 9600
pub fn next_test_ip6() -> SocketAddr {
SocketAddr { ip: Ipv6Addr(0, 0, 0, 0, 0, 0, 0, 1), port: next_test_port() }
}
/*
XXX: Welcome to MegaHack City.
The bots run multiple builds at the same time, and these builds
all want to use ports. This function figures out which workspace
it is running in and assigns a port range based on it.
*/
fn base_port() -> u16 {
let base = 9600u16;
let range = 1000u16;
let bases = [
("32-opt", base + range * 1),
("32-noopt", base + range * 2),
("64-opt", base + range * 3),
("64-noopt", base + range * 4),
("64-opt-vg", base + range * 5),
("all-opt", base + range * 6),
("snap3", base + range * 7),
("dist", base + range * 8)
];
// FIXME (#9639): This needs to handle non-utf8 paths
let path = os::getcwd();
let path_s = path.as_str().unwrap();
let mut final_base = base;
for &(dir, base) in bases.iter() {
if path_s.contains(dir) {
final_base = base;
break;
}
}
return final_base;
}
pub fn raise_fd_limit() {
unsafe { darwin_fd_limit::raise_fd_limit() }
}
#[cfg(target_os="macos")]
#[allow(non_camel_case_types)]
mod darwin_fd_limit {
/*!
* darwin_fd_limit exists to work around an issue where launchctl on Mac OS X defaults the
* rlimit maxfiles to 256/unlimited. The default soft limit of 256 ends up being far too low
* for our multithreaded scheduler testing, depending on the number of cores available.
*
* This fixes issue #7772.
*/
use libc;
type rlim_t = libc::uint64_t;
struct rlimit {
rlim_cur: rlim_t,
rlim_max: rlim_t
}
#[nolink]
extern {
// name probably doesn't need to be mut, but the C function doesn't specify const
fn sysctl(name: *mut libc::c_int, namelen: libc::c_uint,
oldp: *mut libc::c_void, oldlenp: *mut libc::size_t,
newp: *mut libc::c_void, newlen: libc::size_t) -> libc::c_int;
fn getrlimit(resource: libc::c_int, rlp: *mut rlimit) -> libc::c_int;
fn setrlimit(resource: libc::c_int, rlp: *rlimit) -> libc::c_int;
}
static CTL_KERN: libc::c_int = 1;
static KERN_MAXFILESPERPROC: libc::c_int = 29;
static RLIMIT_NOFILE: libc::c_int = 8;
pub unsafe fn raise_fd_limit() {
// The strategy here is to fetch the current resource limits, read the kern.maxfilesperproc
// sysctl value, and bump the soft resource limit for maxfiles up to the sysctl value.
use ptr::{to_unsafe_ptr, to_mut_unsafe_ptr, mut_null};
use mem::size_of_val;
use os::last_os_error;
// Fetch the kern.maxfilesperproc value
let mut mib: [libc::c_int, ..2] = [CTL_KERN, KERN_MAXFILESPERPROC];
let mut maxfiles: libc::c_int = 0;
let mut size: libc::size_t = size_of_val(&maxfiles) as libc::size_t;
if sysctl(to_mut_unsafe_ptr(&mut mib[0]), 2,
to_mut_unsafe_ptr(&mut maxfiles) as *mut libc::c_void,
to_mut_unsafe_ptr(&mut size),
mut_null(), 0) != 0 {
let err = last_os_error();
error!("raise_fd_limit: error calling sysctl: {}", err);
return;
}
// Fetch the current resource limits
let mut rlim = rlimit{rlim_cur: 0, rlim_max: 0};
if getrlimit(RLIMIT_NOFILE, to_mut_unsafe_ptr(&mut rlim)) != 0 {
let err = last_os_error();
error!("raise_fd_limit: error calling getrlimit: {}", err);
return;
}
// Bump the soft limit to the smaller of kern.maxfilesperproc and the hard limit
rlim.rlim_cur = ::cmp::min(maxfiles as rlim_t, rlim.rlim_max);
// Set our newly-increased resource limit
if setrlimit(RLIMIT_NOFILE, to_unsafe_ptr(&rlim)) != 0 {
let err = last_os_error();
error!("raise_fd_limit: error calling setrlimit: {}", err);
return;
}
}
}
#[cfg(not(target_os="macos"))]
mod darwin_fd_limit {
pub unsafe fn raise_fd_limit() {}
}

View File

@ -39,9 +39,7 @@ loop {
*/
use comm::Port;
use option::{Option, Some, None};
use result::{Ok, Err};
use io::io_error;
use option::Option;
use rt::rtio::{IoFactory, LocalIo, RtioTimer};
pub struct Timer {
@ -60,15 +58,7 @@ impl Timer {
/// for a number of milliseconds, or to possibly create channels which will
/// get notified after an amount of time has passed.
pub fn new() -> Option<Timer> {
let mut io = LocalIo::borrow();
match io.get().timer_init() {
Ok(t) => Some(Timer { obj: t }),
Err(ioerr) => {
debug!("Timer::init: failed to init: {:?}", ioerr);
io_error::cond.raise(ioerr);
None
}
}
LocalIo::maybe_raise(|io| io.timer_init().map(|t| Timer { obj: t }))
}
/// Blocks the current task for `msecs` milliseconds.
@ -108,77 +98,60 @@ impl Timer {
mod test {
use prelude::*;
use super::*;
use rt::test::*;
#[test]
fn test_io_timer_sleep_simple() {
do run_in_mt_newsched_task {
let mut timer = Timer::new().unwrap();
timer.sleep(1);
}
let mut timer = Timer::new().unwrap();
timer.sleep(1);
}
#[test]
fn test_io_timer_sleep_oneshot() {
do run_in_mt_newsched_task {
let mut timer = Timer::new().unwrap();
timer.oneshot(1).recv();
}
let mut timer = Timer::new().unwrap();
timer.oneshot(1).recv();
}
#[test]
fn test_io_timer_sleep_oneshot_forget() {
do run_in_mt_newsched_task {
let mut timer = Timer::new().unwrap();
timer.oneshot(100000000000);
}
let mut timer = Timer::new().unwrap();
timer.oneshot(100000000000);
}
#[test]
fn oneshot_twice() {
do run_in_mt_newsched_task {
let mut timer = Timer::new().unwrap();
let port1 = timer.oneshot(10000);
let port = timer.oneshot(1);
port.recv();
assert_eq!(port1.try_recv(), None);
}
let mut timer = Timer::new().unwrap();
let port1 = timer.oneshot(10000);
let port = timer.oneshot(1);
port.recv();
assert_eq!(port1.try_recv(), None);
}
#[test]
fn test_io_timer_oneshot_then_sleep() {
do run_in_mt_newsched_task {
let mut timer = Timer::new().unwrap();
let port = timer.oneshot(100000000000);
timer.sleep(1); // this should invalidate the port
let mut timer = Timer::new().unwrap();
let port = timer.oneshot(100000000000);
timer.sleep(1); // this should invalidate the port
assert_eq!(port.try_recv(), None);
}
assert_eq!(port.try_recv(), None);
}
#[test]
fn test_io_timer_sleep_periodic() {
do run_in_mt_newsched_task {
let mut timer = Timer::new().unwrap();
let port = timer.periodic(1);
port.recv();
port.recv();
port.recv();
}
let mut timer = Timer::new().unwrap();
let port = timer.periodic(1);
port.recv();
port.recv();
port.recv();
}
#[test]
fn test_io_timer_sleep_periodic_forget() {
do run_in_mt_newsched_task {
let mut timer = Timer::new().unwrap();
timer.periodic(100000000000);
}
let mut timer = Timer::new().unwrap();
timer.periodic(100000000000);
}
#[test]
fn test_io_timer_sleep_standalone() {
do run_in_mt_newsched_task {
sleep(1)
}
sleep(1)
}
}

View File

@ -65,13 +65,15 @@
// When testing libstd, bring in libuv as the I/O backend so tests can print
// things and all of the std::io tests have an I/O interface to run on top
// of
#[cfg(test)] extern mod rustuv = "rustuv#0.9-pre";
#[cfg(test)] extern mod rustuv = "rustuv";
#[cfg(test)] extern mod native = "native";
#[cfg(test)] extern mod green = "green";
// Make extra accessible for benchmarking
#[cfg(test)] extern mod extra = "extra#0.9-pre";
#[cfg(test)] extern mod extra = "extra";
// Make std testable by not duplicating lang items. See #2912
#[cfg(test)] extern mod realstd = "std#0.9-pre";
#[cfg(test)] extern mod realstd = "std";
#[cfg(test)] pub use kinds = realstd::kinds;
#[cfg(test)] pub use ops = realstd::ops;
#[cfg(test)] pub use cmp = realstd::cmp;
@ -159,6 +161,7 @@ pub mod trie;
pub mod task;
pub mod comm;
pub mod local_data;
pub mod sync;
/* Runtime and platform support */

View File

@ -432,6 +432,7 @@ mod tests {
}
#[test]
#[allow(dead_code)]
fn test_tls_overwrite_multiple_types() {
static str_key: Key<~str> = &Key;
static box_key: Key<@()> = &Key;

View File

@ -118,26 +118,16 @@ pub static ERROR: u32 = 1;
/// It is not recommended to call this function directly, rather it should be
/// invoked through the logging family of macros.
pub fn log(_level: u32, args: &fmt::Arguments) {
unsafe {
let optional_task: Option<*mut Task> = Local::try_unsafe_borrow();
match optional_task {
Some(local) => {
// Lazily initialize the local task's logger
match (*local).logger {
// Use the available logger if we have one
Some(ref mut logger) => { logger.log(args); }
None => {
let mut logger = StdErrLogger::new();
logger.log(args);
(*local).logger = Some(logger);
}
}
}
// If there's no local task, then always log to stderr
None => {
let mut logger = StdErrLogger::new();
logger.log(args);
}
}
let mut logger = {
let mut task = Local::borrow(None::<Task>);
task.get().logger.take()
};
if logger.is_none() {
logger = Some(StdErrLogger::new());
}
logger.get_mut_ref().log(args);
let mut task = Local::borrow(None::<Task>);
task.get().logger = logger;
}

View File

@ -28,8 +28,6 @@
#[allow(missing_doc)];
#[cfg(unix)]
use c_str::CString;
use clone::Clone;
use container::Container;
#[cfg(target_os = "macos")]
@ -43,8 +41,7 @@ use ptr;
use str;
use to_str;
use unstable::finally::Finally;
pub use os::consts::*;
use sync::atomics::{AtomicInt, INIT_ATOMIC_INT, SeqCst};
/// Delegates to the libc close() function, returning the same return value.
pub fn close(fd: c_int) -> c_int {
@ -58,6 +55,8 @@ static BUF_BYTES : uint = 2048u;
#[cfg(unix)]
pub fn getcwd() -> Path {
use c_str::CString;
let mut buf = [0 as libc::c_char, ..BUF_BYTES];
unsafe {
if libc::getcwd(buf.as_mut_ptr(), buf.len() as size_t).is_null() {
@ -333,7 +332,7 @@ pub fn pipe() -> Pipe {
/// Returns the proper dll filename for the given basename of a file.
pub fn dll_filename(base: &str) -> ~str {
format!("{}{}{}", DLL_PREFIX, base, DLL_SUFFIX)
format!("{}{}{}", consts::DLL_PREFIX, base, consts::DLL_SUFFIX)
}
/// Optionally returns the filesystem path to the current executable which is
@ -675,17 +674,26 @@ pub fn last_os_error() -> ~str {
strerror()
}
static mut EXIT_STATUS: AtomicInt = INIT_ATOMIC_INT;
/**
* Sets the process exit code
*
* Sets the exit code returned by the process if all supervised tasks
* terminate successfully (without failing). If the current root task fails
* and is supervised by the scheduler then any user-specified exit status is
* ignored and the process exits with the default failure status
* ignored and the process exits with the default failure status.
*
* Note that this is not synchronized against modifications of other threads.
*/
pub fn set_exit_status(code: int) {
use rt;
rt::set_exit_status(code);
unsafe { EXIT_STATUS.store(code, SeqCst) }
}
/// Fetches the process's current exit code. This defaults to 0 and can change
/// by calling `set_exit_status`.
pub fn get_exit_status() -> int {
unsafe { EXIT_STATUS.load(SeqCst) }
}
#[cfg(target_os = "macos")]

View File

@ -12,9 +12,8 @@ use c_str::{ToCStr, CString};
use libc::{c_char, size_t};
use option::{Option, None, Some};
use ptr::RawPtr;
use rt::env;
use rt;
use rt::local::Local;
use rt::task;
use rt::task::Task;
use str::OwnedStr;
use str;
@ -62,7 +61,7 @@ unsafe fn fail_borrowed(alloc: *mut raw::Box<()>, file: *c_char, line: size_t)
match try_take_task_borrow_list() {
None => { // not recording borrows
let msg = "borrowed";
msg.with_c_str(|msg_p| task::begin_unwind_raw(msg_p, file, line))
msg.with_c_str(|msg_p| rt::begin_unwind_raw(msg_p, file, line))
}
Some(borrow_list) => { // recording borrows
let mut msg = ~"borrowed";
@ -76,7 +75,7 @@ unsafe fn fail_borrowed(alloc: *mut raw::Box<()>, file: *c_char, line: size_t)
sep = " and at ";
}
}
msg.with_c_str(|msg_p| task::begin_unwind_raw(msg_p, file, line))
msg.with_c_str(|msg_p| rt::begin_unwind_raw(msg_p, file, line))
}
}
}
@ -95,7 +94,7 @@ unsafe fn debug_borrow<T,P:RawPtr<T>>(tag: &'static str,
//! A useful debugging function that prints a pointer + tag + newline
//! without allocating memory.
if ENABLE_DEBUG && env::debug_borrow() {
if ENABLE_DEBUG && rt::env::debug_borrow() {
debug_borrow_slow(tag, p, old_bits, new_bits, filename, line);
}
@ -180,7 +179,7 @@ pub unsafe fn unrecord_borrow(a: *u8,
if br.alloc != a || br.file != file || br.line != line {
let err = format!("wrong borrow found, br={:?}", br);
err.with_c_str(|msg_p| {
task::begin_unwind_raw(msg_p, file, line)
rt::begin_unwind_raw(msg_p, file, line)
})
}
borrow_list

View File

@ -30,7 +30,7 @@ pub struct CrateMap<'a> {
version: i32,
entries: &'a [ModEntry<'a>],
children: &'a [&'a CrateMap<'a>],
event_loop_factory: Option<extern "C" fn() -> ~EventLoop>,
event_loop_factory: Option<fn() -> ~EventLoop>,
}
#[cfg(not(windows))]

View File

@ -17,7 +17,7 @@ use os;
// Note that these are all accessed without any synchronization.
// They are expected to be initialized once then left alone.
static mut MIN_STACK: uint = 2000000;
static mut MIN_STACK: uint = 2 * 1024 * 1024;
static mut DEBUG_BORROW: bool = false;
static mut POISON_ON_FREE: bool = false;

View File

@ -1,318 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
Task death: asynchronous killing, linked failure, exit code propagation.
This file implements two orthogonal building-blocks for communicating failure
between tasks. One is 'linked failure' or 'task killing', that is, a failing
task causing other tasks to fail promptly (even those that are blocked on
pipes or I/O). The other is 'exit code propagation', which affects the result
observed by the parent of a task::try task that itself spawns child tasks
(such as any #[test] function). In both cases the data structures live in
KillHandle.
I. Task killing.
The model for killing involves two atomic flags, the "kill flag" and the
"unkillable flag". Operations on the kill flag include:
- In the taskgroup code (task/spawn.rs), tasks store a clone of their
KillHandle in their shared taskgroup. Another task in the group that fails
will use that handle to call kill().
- When a task blocks, it turns its ~Task into a BlockedTask by storing a
the transmuted ~Task pointer inside the KillHandle's kill flag. A task
trying to block and a task trying to kill it can simultaneously access the
kill flag, after which the task will get scheduled and fail (no matter who
wins the race). Likewise, a task trying to wake a blocked task normally and
a task trying to kill it can simultaneously access the flag; only one will
get the task to reschedule it.
Operations on the unkillable flag include:
- When a task becomes unkillable, it swaps on the flag to forbid any killer
from waking it up while it's blocked inside the unkillable section. If a
kill was already pending, the task fails instead of becoming unkillable.
- When a task is done being unkillable, it restores the flag to the normal
running state. If a kill was received-but-blocked during the unkillable
section, the task fails at this later point.
- When a task tries to kill another task, before swapping on the kill flag, it
first swaps on the unkillable flag, to see if it's "allowed" to wake up the
task. If it isn't, the killed task will receive the signal when it becomes
killable again. (Of course, a task trying to wake the task normally (e.g.
sending on a channel) does not access the unkillable flag at all.)
Why do we not need acquire/release barriers on any of the kill flag swaps?
This is because barriers establish orderings between accesses on different
memory locations, but each kill-related operation is only a swap on a single
location, so atomicity is all that matters. The exception is kill(), which
does a swap on both flags in sequence. kill() needs no barriers because it
does not matter if its two accesses are seen reordered on another CPU: if a
killer does perform both writes, it means it saw a KILL_RUNNING in the
unkillable flag, which means an unkillable task will see KILL_KILLED and fail
immediately (rendering the subsequent write to the kill flag unnecessary).
II. Exit code propagation.
The basic model for exit code propagation, which is used with the "watched"
spawn mode (on by default for linked spawns, off for supervised and unlinked
spawns), is that a parent will wait for all its watched children to exit
before reporting whether it succeeded or failed. A watching parent will only
report success if it succeeded and all its children also reported success;
otherwise, it will report failure. This is most useful for writing test cases:
```
#[test]
fn test_something_in_another_task {
do spawn {
assert!(collatz_conjecture_is_false());
}
}
```
Here, as the child task will certainly outlive the parent task, we might miss
the failure of the child when deciding whether or not the test case passed.
The watched spawn mode avoids this problem.
In order to propagate exit codes from children to their parents, any
'watching' parent must wait for all of its children to exit before it can
report its final exit status. We achieve this by using an UnsafeArc, using the
reference counting to track how many children are still alive, and using the
unwrap() operation in the parent's exit path to wait for all children to exit.
The UnsafeArc referred to here is actually the KillHandle itself.
This also works transitively, as if a "middle" watched child task is itself
watching a grandchild task, the "middle" task will do unwrap() on its own
KillHandle (thereby waiting for the grandchild to exit) before dropping its
reference to its watching parent (which will alert the parent).
While UnsafeArc::unwrap() accomplishes the synchronization, there remains the
matter of reporting the exit codes themselves. This is easiest when an exiting
watched task has no watched children of its own:
- If the task with no watched children exits successfully, it need do nothing.
- If the task with no watched children has failed, it sets a flag in the
parent's KillHandle ("any_child_failed") to false. It then stays false forever.
However, if a "middle" watched task with watched children of its own exits
before its child exits, we need to ensure that the grandparent task may still
see a failure from the grandchild task. While we could achieve this by having
each intermediate task block on its handle, this keeps around the other resources
the task was using. To be more efficient, this is accomplished via "tombstones".
A tombstone is a closure, proc() -> bool, which will perform any waiting necessary
to collect the exit code of descendant tasks. In its environment is captured
the KillHandle of whichever task created the tombstone, and perhaps also any
tombstones that that task itself had, and finally also another tombstone,
effectively creating a lazy-list of heap closures.
When a child wishes to exit early and leave tombstones behind for its parent,
it must use a LittleLock (pthread mutex) to synchronize with any possible
sibling tasks which are trying to do the same thing with the same parent.
However, on the other side, when the parent is ready to pull on the tombstones,
it need not use this lock, because the unwrap() serves as a barrier that ensures
no children will remain with references to the handle.
The main logic for creating and assigning tombstones can be found in the
function reparent_children_to() in the impl for KillHandle.
IIA. Issues with exit code propagation.
There are two known issues with the current scheme for exit code propagation.
- As documented in issue #8136, the structure mandates the possibility for stack
overflow when collecting tombstones that are very deeply nested. This cannot
be avoided with the closure representation, as tombstones end up structured in
a sort of tree. However, notably, the tombstones do not actually need to be
collected in any particular order, and so a doubly-linked list may be used.
However we do not do this yet because DList is in libextra.
- A discussion with Graydon made me realize that if we decoupled the exit code
propagation from the parents-waiting action, this could result in a simpler
implementation as the exit codes themselves would not have to be propagated,
and could instead be propagated implicitly through the taskgroup mechanism
that we already have. The tombstoning scheme would still be required. I have
not implemented this because currently we can't receive a linked failure kill
signal during the task cleanup activity, as that is currently "unkillable",
and occurs outside the task's unwinder's "try" block, so would require some
restructuring.
*/
use cast;
use option::{Option, Some, None};
use prelude::*;
use iter;
use task::TaskResult;
use rt::task::Task;
use unstable::atomics::{AtomicUint, SeqCst};
use unstable::sync::UnsafeArc;
/// A handle to a blocked task. Usually this means having the ~Task pointer by
/// ownership, but if the task is killable, a killer can steal it at any time.
pub enum BlockedTask {
Owned(~Task),
Shared(UnsafeArc<AtomicUint>),
}
/// Per-task state related to task death, killing, failure, etc.
pub struct Death {
// Action to be done with the exit code. If set, also makes the task wait
// until all its watched children exit before collecting the status.
on_exit: Option<proc(TaskResult)>,
// nesting level counter for unstable::atomically calls (0 == can deschedule).
priv wont_sleep: int,
}
pub struct BlockedTaskIterator {
priv inner: UnsafeArc<AtomicUint>,
}
impl Iterator<BlockedTask> for BlockedTaskIterator {
fn next(&mut self) -> Option<BlockedTask> {
Some(Shared(self.inner.clone()))
}
}
impl BlockedTask {
/// Returns Some if the task was successfully woken; None if already killed.
pub fn wake(self) -> Option<~Task> {
match self {
Owned(task) => Some(task),
Shared(arc) => unsafe {
match (*arc.get()).swap(0, SeqCst) {
0 => None,
n => cast::transmute(n),
}
}
}
}
/// Create a blocked task, unless the task was already killed.
pub fn block(task: ~Task) -> BlockedTask {
Owned(task)
}
/// Converts one blocked task handle to a list of many handles to the same.
pub fn make_selectable(self, num_handles: uint)
-> iter::Take<BlockedTaskIterator>
{
let arc = match self {
Owned(task) => {
let flag = unsafe { AtomicUint::new(cast::transmute(task)) };
UnsafeArc::new(flag)
}
Shared(arc) => arc.clone(),
};
BlockedTaskIterator{ inner: arc }.take(num_handles)
}
// This assertion has two flavours because the wake involves an atomic op.
// In the faster version, destructors will fail dramatically instead.
#[inline] #[cfg(not(test))]
pub fn assert_already_awake(self) { }
#[inline] #[cfg(test)]
pub fn assert_already_awake(self) { assert!(self.wake().is_none()); }
/// Convert to an unsafe uint value. Useful for storing in a pipe's state flag.
#[inline]
pub unsafe fn cast_to_uint(self) -> uint {
match self {
Owned(task) => {
let blocked_task_ptr: uint = cast::transmute(task);
rtassert!(blocked_task_ptr & 0x1 == 0);
blocked_task_ptr
}
Shared(arc) => {
let blocked_task_ptr: uint = cast::transmute(~arc);
rtassert!(blocked_task_ptr & 0x1 == 0);
blocked_task_ptr | 0x1
}
}
}
/// Convert from an unsafe uint value. Useful for retrieving a pipe's state flag.
#[inline]
pub unsafe fn cast_from_uint(blocked_task_ptr: uint) -> BlockedTask {
if blocked_task_ptr & 0x1 == 0 {
Owned(cast::transmute(blocked_task_ptr))
} else {
let ptr: ~UnsafeArc<AtomicUint> = cast::transmute(blocked_task_ptr & !1);
Shared(*ptr)
}
}
}
impl Death {
pub fn new() -> Death {
Death {
on_exit: None,
wont_sleep: 0,
}
}
/// Collect failure exit codes from children and propagate them to a parent.
pub fn collect_failure(&mut self, result: TaskResult) {
match self.on_exit.take() {
Some(f) => f(result),
None => {}
}
}
/// Enter a possibly-nested "atomic" section of code. Just for assertions.
/// All calls must be paired with a subsequent call to allow_deschedule.
#[inline]
pub fn inhibit_deschedule(&mut self) {
self.wont_sleep += 1;
}
/// Exit a possibly-nested "atomic" section of code. Just for assertions.
/// All calls must be paired with a preceding call to inhibit_deschedule.
#[inline]
pub fn allow_deschedule(&mut self) {
rtassert!(self.wont_sleep != 0);
self.wont_sleep -= 1;
}
/// Ensure that the task is allowed to become descheduled.
#[inline]
pub fn assert_may_sleep(&self) {
if self.wont_sleep != 0 {
rtabort!("illegal atomic-sleep: attempt to reschedule while \
using an Exclusive or LittleLock");
}
}
}
impl Drop for Death {
fn drop(&mut self) {
// Mustn't be in an atomic or unkillable section at task death.
rtassert!(self.wont_sleep == 0);
}
}
#[cfg(test)]
mod test {
use rt::test::*;
use super::*;
// Task blocking tests
#[test]
fn block_and_wake() {
do with_test_task |task| {
BlockedTask::block(task).wake().unwrap()
}
}
}

View File

@ -8,8 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use option::{Option, Some, None};
use rt::sched::Scheduler;
use option::Option;
use rt::task::Task;
use rt::local_ptr;
@ -46,87 +45,10 @@ impl Local<local_ptr::Borrowed<Task>> for Task {
}
}
/// Encapsulates a temporarily-borrowed scheduler.
pub struct BorrowedScheduler {
priv task: local_ptr::Borrowed<Task>,
}
impl BorrowedScheduler {
fn new(mut task: local_ptr::Borrowed<Task>) -> BorrowedScheduler {
if task.get().sched.is_none() {
rtabort!("no scheduler")
} else {
BorrowedScheduler {
task: task,
}
}
}
#[inline]
pub fn get<'a>(&'a mut self) -> &'a mut ~Scheduler {
match self.task.get().sched {
None => rtabort!("no scheduler"),
Some(ref mut sched) => sched,
}
}
}
impl Local<BorrowedScheduler> for Scheduler {
fn put(value: ~Scheduler) {
let mut task = Local::borrow(None::<Task>);
task.get().sched = Some(value);
}
#[inline]
fn take() -> ~Scheduler {
unsafe {
// XXX: Unsafe for speed
let task: *mut Task = Local::unsafe_borrow();
(*task).sched.take_unwrap()
}
}
fn exists(_: Option<Scheduler>) -> bool {
let mut task = Local::borrow(None::<Task>);
task.get().sched.is_some()
}
#[inline]
fn borrow(_: Option<Scheduler>) -> BorrowedScheduler {
BorrowedScheduler::new(Local::borrow(None::<Task>))
}
unsafe fn unsafe_take() -> ~Scheduler { rtabort!("unimpl") }
unsafe fn unsafe_borrow() -> *mut Scheduler {
let task: *mut Task = Local::unsafe_borrow();
match (*task).sched {
Some(~ref mut sched) => {
let s: *mut Scheduler = &mut *sched;
return s;
}
None => {
rtabort!("no scheduler")
}
}
}
unsafe fn try_unsafe_borrow() -> Option<*mut Scheduler> {
let task_opt: Option<*mut Task> = Local::try_unsafe_borrow();
match task_opt {
Some(task) => {
match (*task).sched {
Some(~ref mut sched) => {
let s: *mut Scheduler = &mut *sched;
Some(s)
}
None => None
}
}
None => None
}
}
}
#[cfg(test)]
mod test {
use option::None;
use unstable::run_in_bare_thread;
use rt::test::*;
use super::*;
use rt::task::Task;
use rt::local_ptr;
@ -135,8 +57,7 @@ mod test {
fn thread_local_task_smoke_test() {
do run_in_bare_thread {
local_ptr::init();
let mut sched = ~new_test_uv_sched();
let task = ~Task::new_root(&mut sched.stack_pool, None, proc(){});
let task = ~Task::new();
Local::put(task);
let task: ~Task = Local::take();
cleanup_task(task);
@ -147,12 +68,11 @@ mod test {
fn thread_local_task_two_instances() {
do run_in_bare_thread {
local_ptr::init();
let mut sched = ~new_test_uv_sched();
let task = ~Task::new_root(&mut sched.stack_pool, None, proc(){});
let task = ~Task::new();
Local::put(task);
let task: ~Task = Local::take();
cleanup_task(task);
let task = ~Task::new_root(&mut sched.stack_pool, None, proc(){});
let task = ~Task::new();
Local::put(task);
let task: ~Task = Local::take();
cleanup_task(task);
@ -164,8 +84,7 @@ mod test {
fn borrow_smoke_test() {
do run_in_bare_thread {
local_ptr::init();
let mut sched = ~new_test_uv_sched();
let task = ~Task::new_root(&mut sched.stack_pool, None, proc(){});
let task = ~Task::new();
Local::put(task);
unsafe {
@ -180,8 +99,7 @@ mod test {
fn borrow_with_return() {
do run_in_bare_thread {
local_ptr::init();
let mut sched = ~new_test_uv_sched();
let task = ~Task::new_root(&mut sched.stack_pool, None, proc(){});
let task = ~Task::new();
Local::put(task);
{
@ -193,5 +111,9 @@ mod test {
}
}
fn cleanup_task(mut t: ~Task) {
t.destroyed = true;
}
}

View File

@ -42,7 +42,7 @@ impl<T> Drop for Borrowed<T> {
}
let val: ~T = cast::transmute(self.val);
put::<T>(val);
assert!(exists());
rtassert!(exists());
}
}
}
@ -109,7 +109,9 @@ pub mod compiled {
/// Does not validate the pointer type.
#[inline]
pub unsafe fn take<T>() -> ~T {
let ptr: ~T = cast::transmute(RT_TLS_PTR);
let ptr = RT_TLS_PTR;
rtassert!(!ptr.is_null());
let ptr: ~T = cast::transmute(ptr);
// can't use `as`, due to type not matching with `cfg(test)`
RT_TLS_PTR = cast::transmute(0);
ptr
@ -178,7 +180,7 @@ pub mod native {
}
pub unsafe fn cleanup() {
assert!(INITIALIZED);
rtassert!(INITIALIZED);
tls::destroy(RT_TLS_KEY);
LOCK.destroy();
INITIALIZED = false;

View File

@ -57,27 +57,17 @@ Several modules in `core` are clients of `rt`:
// XXX: this should not be here.
#[allow(missing_doc)];
use any::Any;
use clone::Clone;
use container::Container;
use iter::Iterator;
use option::{Option, None, Some};
use option::Option;
use ptr::RawPtr;
use rt::local::Local;
use rt::sched::{Scheduler, Shutdown};
use rt::sleeper_list::SleeperList;
use task::TaskResult;
use rt::task::{Task, SchedTask, GreenTask, Sched};
use send_str::SendStrStatic;
use unstable::atomics::{AtomicInt, AtomicBool, SeqCst};
use unstable::sync::UnsafeArc;
use result::Result;
use task::TaskOpts;
use vec::{OwnedVector, MutableVector, ImmutableVector};
use vec;
use self::thread::Thread;
// the os module needs to reach into this helper, so allow general access
// through this reexport.
pub use self::util::set_exit_status;
use self::task::{Task, BlockedTask};
// this is somewhat useful when a program wants to spawn a "reasonable" number
// of workers based on the constraints of the system that it's running on.
@ -85,8 +75,8 @@ pub use self::util::set_exit_status;
// method...
pub use self::util::default_sched_threads;
// Re-export of the functionality in the kill module
pub use self::kill::BlockedTask;
// Export unwinding facilities used by the failure macros
pub use self::unwind::{begin_unwind, begin_unwind_raw};
// XXX: these probably shouldn't be public...
#[doc(hidden)]
@ -99,21 +89,12 @@ pub mod shouldnt_be_public {
// Internal macros used by the runtime.
mod macros;
/// Basic implementation of an EventLoop, provides no I/O interfaces
mod basic;
/// The global (exchange) heap.
pub mod global_heap;
/// Implementations of language-critical runtime features like @.
pub mod task;
/// Facilities related to task failure, killing, and death.
mod kill;
/// The coroutine task scheduler, built on the `io` event loop.
pub mod sched;
/// The EventLoop and internal synchronous I/O interface.
pub mod rtio;
@ -121,27 +102,6 @@ pub mod rtio;
/// or task-local storage.
pub mod local;
/// A mostly lock-free multi-producer, single consumer queue.
pub mod mpsc_queue;
/// A lock-free single-producer, single consumer queue.
pub mod spsc_queue;
/// A lock-free multi-producer, multi-consumer bounded queue.
mod mpmc_bounded_queue;
/// A parallel work-stealing deque
pub mod deque;
/// A parallel data structure for tracking sleeping schedulers.
pub mod sleeper_list;
/// Stack segments and caching.
pub mod stack;
/// CPU context swapping.
mod context;
/// Bindings to system threading libraries.
pub mod thread;
@ -157,16 +117,6 @@ pub mod logging;
/// Crate map
pub mod crate_map;
/// Tools for testing the runtime
pub mod test;
/// Reference counting
pub mod rc;
/// A simple single-threaded channel type for passing buffered data between
/// scheduler and task context
pub mod tube;
/// The runtime needs to be able to put a pointer into thread-local storage.
mod local_ptr;
@ -185,41 +135,33 @@ pub mod args;
// Support for dynamic borrowck
pub mod borrowck;
/// Set up a default runtime configuration, given compiler-supplied arguments.
///
/// This is invoked by the `start` _language item_ (unstable::lang) to
/// run a Rust executable.
///
/// # Arguments
///
/// * `argc` & `argv` - The argument vector. On Unix this information is used
/// by os::args.
///
/// # Return value
///
/// The return value is used as the process return code. 0 on success, 101 on error.
pub fn start(argc: int, argv: **u8, main: proc()) -> int {
/// The default error code of the rust runtime if the main task fails instead
/// of exiting cleanly.
pub static DEFAULT_ERROR_CODE: int = 101;
init(argc, argv);
let exit_code = run(main);
// unsafe is ok b/c we're sure that the runtime is gone
unsafe { cleanup(); }
return exit_code;
}
/// Like `start` but creates an additional scheduler on the current thread,
/// which in most cases will be the 'main' thread, and pins the main task to it.
/// The interface to the current runtime.
///
/// This is appropriate for running code that must execute on the main thread,
/// such as the platform event loop and GUI.
pub fn start_on_main_thread(argc: int, argv: **u8, main: proc()) -> int {
init(argc, argv);
let exit_code = run_on_main_thread(main);
// unsafe is ok b/c we're sure that the runtime is gone
unsafe { cleanup(); }
/// This trait is used as the abstraction between 1:1 and M:N scheduling. The
/// two independent crates, libnative and libgreen, both have objects which
/// implement this trait. The goal of this trait is to encompass all the
/// fundamental differences in functionality between the 1:1 and M:N runtime
/// modes.
pub trait Runtime {
// Necessary scheduling functions, used for channels and blocking I/O
// (sometimes).
fn yield_now(~self, cur_task: ~Task);
fn maybe_yield(~self, cur_task: ~Task);
fn deschedule(~self, times: uint, cur_task: ~Task,
f: |BlockedTask| -> Result<(), BlockedTask>);
fn reawaken(~self, to_wake: ~Task, can_resched: bool);
return exit_code;
// Miscellaneous calls which are very different depending on what context
// you're in.
fn spawn_sibling(~self, cur_task: ~Task, opts: TaskOpts, f: proc());
fn local_io<'a>(&'a mut self) -> Option<rtio::LocalIo<'a>>;
// XXX: This is a serious code smell and this should not exist at all.
fn wrap(~self) -> ~Any;
}
/// One-time runtime initialization.
@ -234,6 +176,7 @@ pub fn init(argc: int, argv: **u8) {
args::init(argc, argv);
env::init();
logging::init();
local_ptr::init();
}
}
@ -250,239 +193,3 @@ pub unsafe fn cleanup() {
args::cleanup();
local_ptr::cleanup();
}
/// Execute the main function in a scheduler.
///
/// Configures the runtime according to the environment, by default
/// using a task scheduler with the same number of threads as cores.
/// Returns a process exit code.
pub fn run(main: proc()) -> int {
run_(main, false)
}
pub fn run_on_main_thread(main: proc()) -> int {
run_(main, true)
}
fn run_(main: proc(), use_main_sched: bool) -> int {
static DEFAULT_ERROR_CODE: int = 101;
let nscheds = util::default_sched_threads();
let mut main = Some(main);
// The shared list of sleeping schedulers.
let sleepers = SleeperList::new();
// Create a work queue for each scheduler, ntimes. Create an extra
// for the main thread if that flag is set. We won't steal from it.
let mut pool = deque::BufferPool::new();
let arr = vec::from_fn(nscheds, |_| pool.deque());
let (workers, stealers) = vec::unzip(arr.move_iter());
// The schedulers.
let mut scheds = ~[];
// Handles to the schedulers. When the main task ends these will be
// sent the Shutdown message to terminate the schedulers.
let mut handles = ~[];
for worker in workers.move_iter() {
rtdebug!("inserting a regular scheduler");
// Every scheduler is driven by an I/O event loop.
let loop_ = new_event_loop();
let mut sched = ~Scheduler::new(loop_,
worker,
stealers.clone(),
sleepers.clone());
let handle = sched.make_handle();
scheds.push(sched);
handles.push(handle);
}
// If we need a main-thread task then create a main thread scheduler
// that will reject any task that isn't pinned to it
let main_sched = if use_main_sched {
// Create a friend handle.
let mut friend_sched = scheds.pop();
let friend_handle = friend_sched.make_handle();
scheds.push(friend_sched);
// This scheduler needs a queue that isn't part of the stealee
// set.
let (worker, _) = pool.deque();
let main_loop = new_event_loop();
let mut main_sched = ~Scheduler::new_special(main_loop,
worker,
stealers.clone(),
sleepers.clone(),
false,
Some(friend_handle));
let mut main_handle = main_sched.make_handle();
// Allow the scheduler to exit when the main task exits.
// Note: sending the shutdown message also prevents the scheduler
// from pushing itself to the sleeper list, which is used for
// waking up schedulers for work stealing; since this is a
// non-work-stealing scheduler it should not be adding itself
// to the list.
main_handle.send(Shutdown);
Some(main_sched)
} else {
None
};
// Create a shared cell for transmitting the process exit
// code from the main task to this function.
let exit_code = UnsafeArc::new(AtomicInt::new(0));
let exit_code_clone = exit_code.clone();
// Used to sanity check that the runtime only exits once
let exited_already = UnsafeArc::new(AtomicBool::new(false));
// When the main task exits, after all the tasks in the main
// task tree, shut down the schedulers and set the exit code.
let handles = handles;
let on_exit: proc(TaskResult) = proc(exit_success) {
unsafe {
assert!(!(*exited_already.get()).swap(true, SeqCst),
"the runtime already exited");
}
let mut handles = handles;
for handle in handles.mut_iter() {
handle.send(Shutdown);
}
unsafe {
let exit_code = if exit_success.is_ok() {
use rt::util;
// If we're exiting successfully, then return the global
// exit status, which can be set programmatically.
util::get_exit_status()
} else {
DEFAULT_ERROR_CODE
};
(*exit_code_clone.get()).store(exit_code, SeqCst);
}
};
let mut threads = ~[];
let mut on_exit = Some(on_exit);
if !use_main_sched {
// In the case where we do not use a main_thread scheduler we
// run the main task in one of our threads.
let mut main_task = ~Task::new_root(&mut scheds[0].stack_pool,
None,
::util::replace(&mut main,
None).unwrap());
main_task.name = Some(SendStrStatic("<main>"));
main_task.death.on_exit = ::util::replace(&mut on_exit, None);
let sched = scheds.pop();
let main_task = main_task;
let thread = do Thread::start {
sched.bootstrap(main_task);
};
threads.push(thread);
}
// Run each remaining scheduler in a thread.
for sched in scheds.move_rev_iter() {
rtdebug!("creating regular schedulers");
let thread = do Thread::start {
let mut sched = sched;
let bootstrap_task = ~do Task::new_root(&mut sched.stack_pool, None) || {
rtdebug!("boostraping a non-primary scheduler");
};
sched.bootstrap(bootstrap_task);
};
threads.push(thread);
}
// If we do have a main thread scheduler, run it now.
if use_main_sched {
rtdebug!("about to create the main scheduler task");
let mut main_sched = main_sched.unwrap();
let home = Sched(main_sched.make_handle());
let mut main_task = ~Task::new_root_homed(&mut main_sched.stack_pool,
None,
home,
::util::replace(&mut main,
None).
unwrap());
main_task.name = Some(SendStrStatic("<main>"));
main_task.death.on_exit = ::util::replace(&mut on_exit, None);
rtdebug!("bootstrapping main_task");
main_sched.bootstrap(main_task);
}
rtdebug!("waiting for threads");
// Wait for schedulers
for thread in threads.move_iter() {
thread.join();
}
// Return the exit code
unsafe {
(*exit_code.get()).load(SeqCst)
}
}
pub fn in_sched_context() -> bool {
unsafe {
let task_ptr: Option<*mut Task> = Local::try_unsafe_borrow();
match task_ptr {
Some(task) => {
match (*task).task_type {
SchedTask => true,
_ => false
}
}
None => false
}
}
}
pub fn in_green_task_context() -> bool {
unsafe {
let task: Option<*mut Task> = Local::try_unsafe_borrow();
match task {
Some(task) => {
match (*task).task_type {
GreenTask(_) => true,
_ => false
}
}
None => false
}
}
}
pub fn new_event_loop() -> ~rtio::EventLoop {
match crate_map::get_crate_map() {
None => {}
Some(map) => {
match map.event_loop_factory {
None => {}
Some(factory) => return factory()
}
}
}
// If the crate map didn't specify a factory to create an event loop, then
// instead just use a basic event loop missing all I/O services to at least
// get the scheduler running.
return basic::event_loop();
}

View File

@ -1,139 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! An owned, task-local, reference counted type
//!
//! # Safety note
//!
//! XXX There is currently no type-system mechanism for enforcing that
//! reference counted types are both allocated on the exchange heap
//! and also non-sendable
//!
//! This doesn't prevent borrowing multiple aliasable mutable pointers
use ops::Drop;
use clone::Clone;
use libc::c_void;
use cast;
pub struct RC<T> {
priv p: *c_void // ~(uint, T)
}
impl<T> RC<T> {
pub fn new(val: T) -> RC<T> {
unsafe {
let v = ~(1, val);
let p: *c_void = cast::transmute(v);
RC { p: p }
}
}
fn get_mut_state(&mut self) -> *mut (uint, T) {
unsafe {
let p: &mut ~(uint, T) = cast::transmute(&mut self.p);
let p: *mut (uint, T) = &mut **p;
return p;
}
}
fn get_state(&self) -> *(uint, T) {
unsafe {
let p: &~(uint, T) = cast::transmute(&self.p);
let p: *(uint, T) = &**p;
return p;
}
}
pub fn unsafe_borrow_mut(&mut self) -> *mut T {
unsafe {
match *self.get_mut_state() {
(_, ref mut p) => {
let p: *mut T = p;
return p;
}
}
}
}
pub fn refcount(&self) -> uint {
unsafe {
match *self.get_state() {
(count, _) => count
}
}
}
}
#[unsafe_destructor]
impl<T> Drop for RC<T> {
fn drop(&mut self) {
assert!(self.refcount() > 0);
unsafe {
match *self.get_mut_state() {
(ref mut count, _) => {
*count = *count - 1
}
}
if self.refcount() == 0 {
let _: ~(uint, T) = cast::transmute(self.p);
}
}
}
}
impl<T> Clone for RC<T> {
fn clone(&self) -> RC<T> {
unsafe {
// XXX: Mutable clone
let this: &mut RC<T> = cast::transmute_mut(self);
match *this.get_mut_state() {
(ref mut count, _) => {
*count = *count + 1;
}
}
}
RC { p: self.p }
}
}
#[cfg(test)]
mod test {
use super::RC;
#[test]
fn smoke_test() {
unsafe {
let mut v1 = RC::new(100);
assert!(*v1.unsafe_borrow_mut() == 100);
assert!(v1.refcount() == 1);
let mut v2 = v1.clone();
assert!(*v2.unsafe_borrow_mut() == 100);
assert!(v2.refcount() == 2);
*v2.unsafe_borrow_mut() = 200;
assert!(*v2.unsafe_borrow_mut() == 200);
assert!(*v1.unsafe_borrow_mut() == 200);
let v3 = v2.clone();
assert!(v3.refcount() == 3);
{
let _v1 = v1;
let _v2 = v2;
}
assert!(v3.refcount() == 1);
}
}
}

View File

@ -14,14 +14,15 @@ use comm::{SharedChan, Port};
use libc::c_int;
use libc;
use ops::Drop;
use option::*;
use option::{Option, Some, None};
use path::Path;
use result::*;
use result::{Result, Ok, Err};
use rt::task::Task;
use rt::local::Local;
use ai = io::net::addrinfo;
use io;
use io::IoError;
use io::native::NATIVE_IO_FACTORY;
use io::native;
use io::net::ip::{IpAddr, SocketAddr};
use io::process::{ProcessConfig, ProcessExit};
use io::signal::Signum;
@ -93,36 +94,52 @@ impl<'a> Drop for LocalIo<'a> {
impl<'a> LocalIo<'a> {
/// Returns the local I/O: either the local scheduler's I/O services or
/// the native I/O services.
pub fn borrow() -> LocalIo {
use rt::sched::Scheduler;
use rt::local::Local;
pub fn borrow() -> Option<LocalIo> {
// FIXME(#11053): bad
//
// This is currently very unsafely implemented. We don't actually
// *take* the local I/O so there's a very real possibility that we
// can have two borrows at once. Currently there is not a clear way
// to actually borrow the local I/O factory safely because even if
// ownership were transferred down to the functions that the I/O
// factory implements it's just too much of a pain to know when to
// relinquish ownership back into the local task (but that would be
// the safe way of implementing this function).
//
// In order to get around this, we just transmute a copy out of the task
// in order to have what is likely a static lifetime (bad).
let mut t: ~Task = Local::take();
let ret = t.local_io().map(|t| {
unsafe { cast::transmute_copy(&t) }
});
Local::put(t);
return ret;
}
unsafe {
// First, attempt to use the local scheduler's I/O services
let sched: Option<*mut Scheduler> = Local::try_unsafe_borrow();
match sched {
Some(sched) => {
match (*sched).event_loop.io() {
Some(factory) => {
return LocalIo {
factory: factory,
}
}
None => {}
pub fn maybe_raise<T>(f: |io: &mut IoFactory| -> Result<T, IoError>)
-> Option<T>
{
match LocalIo::borrow() {
None => {
io::io_error::cond.raise(io::standard_error(io::IoUnavailable));
None
}
Some(mut io) => {
match f(io.get()) {
Ok(t) => Some(t),
Err(ioerr) => {
io::io_error::cond.raise(ioerr);
None
}
}
None => {}
}
// If we don't have a scheduler or the scheduler doesn't have I/O
// services, then fall back to the native I/O services.
let native_io: &'static mut native::IoFactory =
&mut NATIVE_IO_FACTORY;
LocalIo {
factory: native_io as &mut IoFactory:'static
}
}
}
pub fn new<'a>(io: &'a mut IoFactory) -> LocalIo<'a> {
LocalIo { factory: io }
}
/// Returns the underlying I/O factory as a trait reference.
#[inline]
pub fn get<'a>(&'a mut self) -> &'a mut IoFactory {

View File

@ -13,29 +13,41 @@
//! local storage, and logging. Even a 'freestanding' Rust would likely want
//! to implement this.
use super::local_heap::LocalHeap;
use prelude::*;
use any::AnyOwnExt;
use borrow;
use cast;
use cleanup;
use io::Writer;
use libc::{c_char, size_t};
use iter::{Iterator, Take};
use local_data;
use ops::Drop;
use option::{Option, Some, None};
use prelude::drop;
use result::{Result, Ok, Err};
use rt::Runtime;
use rt::borrowck::BorrowRecord;
use rt::borrowck;
use rt::context::Context;
use rt::env;
use rt::kill::Death;
use rt::local::Local;
use rt::local_heap::LocalHeap;
use rt::logging::StdErrLogger;
use rt::sched::{Scheduler, SchedHandle};
use rt::stack::{StackSegment, StackPool};
use rt::rtio::LocalIo;
use rt::unwind::Unwinder;
use send_str::SendStr;
use sync::arc::UnsafeArc;
use sync::atomics::{AtomicUint, SeqCst, INIT_ATOMIC_UINT};
use task::{TaskResult, TaskOpts};
use unstable::finally::Finally;
use unstable::mutex::Mutex;
use unstable::mutex::{Mutex, MUTEX_INIT};
#[cfg(stage0)]
pub use rt::unwind::begin_unwind;
// These two statics are used as bookeeping to keep track of the rust runtime's
// count of threads. In 1:1 contexts, this is used to know when to return from
// the main function, and in M:N contexts this is used to know when to shut down
// the pool of schedulers.
static mut TASK_COUNT: AtomicUint = INIT_ATOMIC_UINT;
static mut TASK_LOCK: Mutex = MUTEX_INIT;
// The Task struct represents all state associated with a rust
// task. There are at this point two primary "subtypes" of task,
@ -45,201 +57,90 @@ use unstable::mutex::Mutex;
pub struct Task {
heap: LocalHeap,
priv gc: GarbageCollector,
gc: GarbageCollector,
storage: LocalStorage,
logger: Option<StdErrLogger>,
unwinder: Unwinder,
death: Death,
destroyed: bool,
name: Option<SendStr>,
coroutine: Option<Coroutine>,
sched: Option<~Scheduler>,
task_type: TaskType,
// Dynamic borrowck debugging info
borrow_list: Option<~[BorrowRecord]>,
logger: Option<StdErrLogger>,
stdout_handle: Option<~Writer>,
// See the comments in the scheduler about why this is necessary
nasty_deschedule_lock: Mutex,
}
pub enum TaskType {
GreenTask(Option<SchedHome>),
SchedTask
}
/// A coroutine is nothing more than a (register context, stack) pair.
pub struct Coroutine {
/// The segment of stack on which the task is currently running or
/// if the task is blocked, on which the task will resume
/// execution.
///
/// Servo needs this to be public in order to tell SpiderMonkey
/// about the stack bounds.
current_stack_segment: StackSegment,
/// Always valid if the task is alive and not running.
saved_context: Context
}
/// Some tasks have a dedicated home scheduler that they must run on.
pub enum SchedHome {
AnySched,
Sched(SchedHandle)
priv imp: Option<~Runtime>,
}
pub struct GarbageCollector;
pub struct LocalStorage(Option<local_data::Map>);
/// A handle to a blocked task. Usually this means having the ~Task pointer by
/// ownership, but if the task is killable, a killer can steal it at any time.
pub enum BlockedTask {
Owned(~Task),
Shared(UnsafeArc<AtomicUint>),
}
/// Per-task state related to task death, killing, failure, etc.
pub struct Death {
// Action to be done with the exit code. If set, also makes the task wait
// until all its watched children exit before collecting the status.
on_exit: Option<proc(TaskResult)>,
}
pub struct BlockedTaskIterator {
priv inner: UnsafeArc<AtomicUint>,
}
impl Task {
// A helper to build a new task using the dynamically found
// scheduler and task. Only works in GreenTask context.
pub fn build_homed_child(stack_size: Option<uint>,
f: proc(),
home: SchedHome)
-> ~Task {
let mut running_task = Local::borrow(None::<Task>);
let mut sched = running_task.get().sched.take_unwrap();
let new_task = ~running_task.get()
.new_child_homed(&mut sched.stack_pool,
stack_size,
home,
f);
running_task.get().sched = Some(sched);
new_task
}
pub fn build_child(stack_size: Option<uint>, f: proc()) -> ~Task {
Task::build_homed_child(stack_size, f, AnySched)
}
pub fn build_homed_root(stack_size: Option<uint>,
f: proc(),
home: SchedHome)
-> ~Task {
let mut running_task = Local::borrow(None::<Task>);
let mut sched = running_task.get().sched.take_unwrap();
let new_task = ~Task::new_root_homed(&mut sched.stack_pool,
stack_size,
home,
f);
running_task.get().sched = Some(sched);
new_task
}
pub fn build_root(stack_size: Option<uint>, f: proc()) -> ~Task {
Task::build_homed_root(stack_size, f, AnySched)
}
pub fn new_sched_task() -> Task {
pub fn new() -> Task {
Task {
heap: LocalHeap::new(),
gc: GarbageCollector,
storage: LocalStorage(None),
logger: None,
unwinder: Unwinder { unwinding: false, cause: None },
death: Death::new(),
destroyed: false,
coroutine: Some(Coroutine::empty()),
name: None,
sched: None,
task_type: SchedTask,
borrow_list: None,
stdout_handle: None,
nasty_deschedule_lock: unsafe { Mutex::new() },
}
}
pub fn new_root(stack_pool: &mut StackPool,
stack_size: Option<uint>,
start: proc()) -> Task {
Task::new_root_homed(stack_pool, stack_size, AnySched, start)
}
pub fn new_child(&mut self,
stack_pool: &mut StackPool,
stack_size: Option<uint>,
start: proc()) -> Task {
self.new_child_homed(stack_pool, stack_size, AnySched, start)
}
pub fn new_root_homed(stack_pool: &mut StackPool,
stack_size: Option<uint>,
home: SchedHome,
start: proc()) -> Task {
Task {
heap: LocalHeap::new(),
gc: GarbageCollector,
storage: LocalStorage(None),
logger: None,
unwinder: Unwinder { unwinding: false, cause: None },
unwinder: Unwinder::new(),
death: Death::new(),
destroyed: false,
name: None,
coroutine: Some(Coroutine::new(stack_pool, stack_size, start)),
sched: None,
task_type: GreenTask(Some(home)),
borrow_list: None,
stdout_handle: None,
nasty_deschedule_lock: unsafe { Mutex::new() },
}
}
pub fn new_child_homed(&mut self,
stack_pool: &mut StackPool,
stack_size: Option<uint>,
home: SchedHome,
start: proc()) -> Task {
Task {
heap: LocalHeap::new(),
gc: GarbageCollector,
storage: LocalStorage(None),
logger: None,
unwinder: Unwinder { unwinding: false, cause: None },
death: Death::new(),
destroyed: false,
name: None,
coroutine: Some(Coroutine::new(stack_pool, stack_size, start)),
sched: None,
task_type: GreenTask(Some(home)),
borrow_list: None,
stdout_handle: None,
nasty_deschedule_lock: unsafe { Mutex::new() },
imp: None,
}
}
pub fn give_home(&mut self, new_home: SchedHome) {
match self.task_type {
GreenTask(ref mut home) => {
*home = Some(new_home);
}
SchedTask => {
rtabort!("type error: used SchedTask as GreenTask");
}
}
}
pub fn take_unwrap_home(&mut self) -> SchedHome {
match self.task_type {
GreenTask(ref mut home) => {
let out = home.take_unwrap();
return out;
}
SchedTask => {
rtabort!("type error: used SchedTask as GreenTask");
}
}
}
pub fn run(&mut self, f: ||) {
rtdebug!("run called on task: {}", borrow::to_uint(self));
/// Executes the given closure as if it's running inside this task. The task
/// is consumed upon entry, and the destroyed task is returned from this
/// function in order for the caller to free. This function is guaranteed to
/// not unwind because the closure specified is run inside of a `rust_try`
/// block. (this is the only try/catch block in the world).
///
/// This function is *not* meant to be abused as a "try/catch" block. This
/// is meant to be used at the absolute boundaries of a task's lifetime, and
/// only for that purpose.
pub fn run(~self, f: ||) -> ~Task {
// Need to put ourselves into TLS, but also need access to the unwinder.
// Unsafely get a handle to the task so we can continue to use it after
// putting it in tls (so we can invoke the unwinder).
let handle: *mut Task = unsafe {
*cast::transmute::<&~Task, &*mut Task>(&self)
};
Local::put(self);
unsafe { TASK_COUNT.fetch_add(1, SeqCst); }
// The only try/catch block in the world. Attempt to run the task's
// client-specified code and catch any failures.
self.unwinder.try(|| {
let try_block = || {
// Run the task main function, then do some cleanup.
f.finally(|| {
fn flush(w: Option<~Writer>) {
match w {
Some(mut w) => { w.flush(); }
None => {}
}
}
// First, destroy task-local storage. This may run user dtors.
//
@ -260,7 +161,10 @@ impl Task {
// TLS, or possibly some destructors for those objects being
// annihilated invoke TLS. Sadly these two operations seemed to
// be intertwined, and miraculously work for now...
self.storage.take();
let mut task = Local::borrow(None::<Task>);
let storage = task.get().storage.take();
drop(task);
drop(storage);
// Destroy remaining boxes. Also may run user dtors.
unsafe { cleanup::annihilate(); }
@ -268,77 +172,141 @@ impl Task {
// Finally flush and destroy any output handles which the task
// owns. There are no boxes here, and no user destructors should
// run after this any more.
match self.stdout_handle.take() {
Some(handle) => {
let mut handle = handle;
handle.flush();
}
None => {}
}
self.logger.take();
let mut task = Local::borrow(None::<Task>);
let stdout = task.get().stdout_handle.take();
let logger = task.get().logger.take();
drop(task);
flush(stdout);
drop(logger);
})
});
};
unsafe { (*handle).unwinder.try(try_block); }
// Cleanup the dynamic borrowck debugging info
borrowck::clear_task_borrow_list();
self.death.collect_failure(self.unwinder.result());
self.destroyed = true;
// Here we must unsafely borrow the task in order to not remove it from
// TLS. When collecting failure, we may attempt to send on a channel (or
// just run aribitrary code), so we must be sure to still have a local
// task in TLS.
unsafe {
let me: *mut Task = Local::unsafe_borrow();
(*me).death.collect_failure((*me).unwinder.result());
// see comments on these statics for why they're used
if TASK_COUNT.fetch_sub(1, SeqCst) == 1 {
TASK_LOCK.lock();
TASK_LOCK.signal();
TASK_LOCK.unlock();
}
}
let mut me: ~Task = Local::take();
me.destroyed = true;
return me;
}
// New utility functions for homes.
/// Inserts a runtime object into this task, transferring ownership to the
/// task. It is illegal to replace a previous runtime object in this task
/// with this argument.
pub fn put_runtime(&mut self, ops: ~Runtime) {
assert!(self.imp.is_none());
self.imp = Some(ops);
}
pub fn is_home_no_tls(&self, sched: &~Scheduler) -> bool {
match self.task_type {
GreenTask(Some(AnySched)) => { false }
GreenTask(Some(Sched(SchedHandle { sched_id: ref id, .. }))) => {
*id == sched.sched_id()
}
GreenTask(None) => {
rtabort!("task without home");
}
SchedTask => {
// Awe yea
rtabort!("type error: expected: GreenTask, found: SchedTask");
/// Attempts to extract the runtime as a specific type. If the runtime does
/// not have the provided type, then the runtime is not removed. If the
/// runtime does have the specified type, then it is removed and returned
/// (transfer of ownership).
///
/// It is recommended to only use this method when *absolutely necessary*.
/// This function may not be available in the future.
pub fn maybe_take_runtime<T: 'static>(&mut self) -> Option<~T> {
// This is a terrible, terrible function. The general idea here is to
// take the runtime, cast it to ~Any, check if it has the right type,
// and then re-cast it back if necessary. The method of doing this is
// pretty sketchy and involves shuffling vtables of trait objects
// around, but it gets the job done.
//
// XXX: This function is a serious code smell and should be avoided at
// all costs. I have yet to think of a method to avoid this
// function, and I would be saddened if more usage of the function
// crops up.
unsafe {
let imp = self.imp.take_unwrap();
let &(vtable, _): &(uint, uint) = cast::transmute(&imp);
match imp.wrap().move::<T>() {
Ok(t) => Some(t),
Err(t) => {
let (_, obj): (uint, uint) = cast::transmute(t);
let obj: ~Runtime = cast::transmute((vtable, obj));
self.put_runtime(obj);
None
}
}
}
}
pub fn homed(&self) -> bool {
match self.task_type {
GreenTask(Some(AnySched)) => { false }
GreenTask(Some(Sched(SchedHandle { .. }))) => { true }
GreenTask(None) => {
rtabort!("task without home");
}
SchedTask => {
rtabort!("type error: expected: GreenTask, found: SchedTask");
}
}
/// Spawns a sibling to this task. The newly spawned task is configured with
/// the `opts` structure and will run `f` as the body of its code.
pub fn spawn_sibling(mut ~self, opts: TaskOpts, f: proc()) {
let ops = self.imp.take_unwrap();
ops.spawn_sibling(self, opts, f)
}
// Grab both the scheduler and the task from TLS and check if the
// task is executing on an appropriate scheduler.
pub fn on_appropriate_sched() -> bool {
let mut task = Local::borrow(None::<Task>);
let sched_id = task.get().sched.get_ref().sched_id();
let sched_run_anything = task.get().sched.get_ref().run_anything;
match task.get().task_type {
GreenTask(Some(AnySched)) => {
rtdebug!("anysched task in sched check ****");
sched_run_anything
}
GreenTask(Some(Sched(SchedHandle { sched_id: ref id, ..}))) => {
rtdebug!("homed task in sched check ****");
*id == sched_id
}
GreenTask(None) => {
rtabort!("task without home");
}
SchedTask => {
rtabort!("type error: expected: GreenTask, found: SchedTask");
}
/// Deschedules the current task, invoking `f` `amt` times. It is not
/// recommended to use this function directly, but rather communication
/// primitives in `std::comm` should be used.
pub fn deschedule(mut ~self, amt: uint,
f: |BlockedTask| -> Result<(), BlockedTask>) {
let ops = self.imp.take_unwrap();
ops.deschedule(amt, self, f)
}
/// Wakes up a previously blocked task, optionally specifiying whether the
/// current task can accept a change in scheduling. This function can only
/// be called on tasks that were previously blocked in `deschedule`.
pub fn reawaken(mut ~self, can_resched: bool) {
let ops = self.imp.take_unwrap();
ops.reawaken(self, can_resched);
}
/// Yields control of this task to another task. This function will
/// eventually return, but possibly not immediately. This is used as an
/// opportunity to allow other tasks a chance to run.
pub fn yield_now(mut ~self) {
let ops = self.imp.take_unwrap();
ops.yield_now(self);
}
/// Similar to `yield_now`, except that this function may immediately return
/// without yielding (depending on what the runtime decides to do).
pub fn maybe_yield(mut ~self) {
let ops = self.imp.take_unwrap();
ops.maybe_yield(self);
}
/// Acquires a handle to the I/O factory that this task contains, normally
/// stored in the task's runtime. This factory may not always be available,
/// which is why the return type is `Option`
pub fn local_io<'a>(&'a mut self) -> Option<LocalIo<'a>> {
self.imp.get_mut_ref().local_io()
}
/// The main function of all rust executables will by default use this
/// function. This function will *block* the OS thread (hence the `unsafe`)
/// waiting for all known tasks to complete. Once this function has
/// returned, it is guaranteed that no more user-defined code is still
/// running.
pub unsafe fn wait_for_other_tasks(&mut self) {
TASK_COUNT.fetch_sub(1, SeqCst); // don't count ourselves
TASK_LOCK.lock();
while TASK_COUNT.load(SeqCst) > 0 {
TASK_LOCK.wait();
}
TASK_LOCK.unlock();
TASK_COUNT.fetch_add(1, SeqCst); // add ourselves back in
}
}
@ -346,348 +314,192 @@ impl Drop for Task {
fn drop(&mut self) {
rtdebug!("called drop for a task: {}", borrow::to_uint(self));
rtassert!(self.destroyed);
unsafe { self.nasty_deschedule_lock.destroy(); }
}
}
// Coroutines represent nothing more than a context and a stack
// segment.
impl Coroutine {
pub fn new(stack_pool: &mut StackPool,
stack_size: Option<uint>,
start: proc())
-> Coroutine {
let stack_size = match stack_size {
Some(size) => size,
None => env::min_stack()
};
let start = Coroutine::build_start_wrapper(start);
let mut stack = stack_pool.take_segment(stack_size);
let initial_context = Context::new(start, &mut stack);
Coroutine {
current_stack_segment: stack,
saved_context: initial_context
}
impl Iterator<BlockedTask> for BlockedTaskIterator {
fn next(&mut self) -> Option<BlockedTask> {
Some(Shared(self.inner.clone()))
}
}
pub fn empty() -> Coroutine {
Coroutine {
current_stack_segment: StackSegment::new(0),
saved_context: Context::empty()
}
}
fn build_start_wrapper(start: proc()) -> proc() {
let wrapper: proc() = proc() {
// First code after swap to this new context. Run our
// cleanup job.
unsafe {
// Again - might work while safe, or it might not.
{
let mut sched = Local::borrow(None::<Scheduler>);
sched.get().run_cleanup_job();
}
// To call the run method on a task we need a direct
// reference to it. The task is in TLS, so we can
// simply unsafe_borrow it to get this reference. We
// need to still have the task in TLS though, so we
// need to unsafe_borrow.
let task: *mut Task = Local::unsafe_borrow();
let mut start_cell = Some(start);
(*task).run(|| {
// N.B. Removing `start` from the start wrapper
// closure by emptying a cell is critical for
// correctness. The ~Task pointer, and in turn the
// closure used to initialize the first call
// frame, is destroyed in the scheduler context,
// not task context. So any captured closures must
// not contain user-definable dtors that expect to
// be in task context. By moving `start` out of
// the closure, all the user code goes our of
// scope while the task is still running.
let start = start_cell.take_unwrap();
start();
});
}
// We remove the sched from the Task in TLS right now.
let sched: ~Scheduler = Local::take();
// ... allowing us to give it away when performing a
// scheduling operation.
sched.terminate_current_task()
};
return wrapper;
}
/// Destroy coroutine and try to reuse stack segment.
pub fn recycle(self, stack_pool: &mut StackPool) {
impl BlockedTask {
/// Returns Some if the task was successfully woken; None if already killed.
pub fn wake(self) -> Option<~Task> {
match self {
Coroutine { current_stack_segment, .. } => {
stack_pool.give_segment(current_stack_segment);
}
}
}
}
/// This function is invoked from rust's current __morestack function. Segmented
/// stacks are currently not enabled as segmented stacks, but rather one giant
/// stack segment. This means that whenever we run out of stack, we want to
/// truly consider it to be stack overflow rather than allocating a new stack.
#[no_mangle] // - this is called from C code
#[no_split_stack] // - it would be sad for this function to trigger __morestack
#[doc(hidden)] // - Function must be `pub` to get exported, but it's
// irrelevant for documentation purposes.
#[cfg(not(test))] // in testing, use the original libstd's version
pub extern "C" fn rust_stack_exhausted() {
use rt::context;
use rt::in_green_task_context;
use rt::task::Task;
use rt::local::Local;
use unstable::intrinsics;
unsafe {
// We're calling this function because the stack just ran out. We need
// to call some other rust functions, but if we invoke the functions
// right now it'll just trigger this handler being called again. In
// order to alleviate this, we move the stack limit to be inside of the
// red zone that was allocated for exactly this reason.
let limit = context::get_sp_limit();
context::record_sp_limit(limit - context::RED_ZONE / 2);
// This probably isn't the best course of action. Ideally one would want
// to unwind the stack here instead of just aborting the entire process.
// This is a tricky problem, however. There's a few things which need to
// be considered:
//
// 1. We're here because of a stack overflow, yet unwinding will run
// destructors and hence arbitrary code. What if that code overflows
// the stack? One possibility is to use the above allocation of an
// extra 10k to hope that we don't hit the limit, and if we do then
// abort the whole program. Not the best, but kind of hard to deal
// with unless we want to switch stacks.
//
// 2. LLVM will optimize functions based on whether they can unwind or
// not. It will flag functions with 'nounwind' if it believes that
// the function cannot trigger unwinding, but if we do unwind on
// stack overflow then it means that we could unwind in any function
// anywhere. We would have to make sure that LLVM only places the
// nounwind flag on functions which don't call any other functions.
//
// 3. The function that overflowed may have owned arguments. These
// arguments need to have their destructors run, but we haven't even
// begun executing the function yet, so unwinding will not run the
// any landing pads for these functions. If this is ignored, then
// the arguments will just be leaked.
//
// Exactly what to do here is a very delicate topic, and is possibly
// still up in the air for what exactly to do. Some relevant issues:
//
// #3555 - out-of-stack failure leaks arguments
// #3695 - should there be a stack limit?
// #9855 - possible strategies which could be taken
// #9854 - unwinding on windows through __morestack has never worked
// #2361 - possible implementation of not using landing pads
if in_green_task_context() {
let mut task = Local::borrow(None::<Task>);
let n = task.get()
.name
.as_ref()
.map(|n| n.as_slice())
.unwrap_or("<unnamed>");
// See the message below for why this is not emitted to the
// task's logger. This has the additional conundrum of the
// logger may not be initialized just yet, meaning that an FFI
// call would happen to initialized it (calling out to libuv),
// and the FFI call needs 2MB of stack when we just ran out.
rterrln!("task '{}' has overflowed its stack", n);
} else {
rterrln!("stack overflow in non-task context");
}
intrinsics::abort();
}
}
/// This is the entry point of unwinding for things like lang items and such.
/// The arguments are normally generated by the compiler, and need to
/// have static lifetimes.
pub fn begin_unwind_raw(msg: *c_char, file: *c_char, line: size_t) -> ! {
use c_str::CString;
use cast::transmute;
#[inline]
fn static_char_ptr(p: *c_char) -> &'static str {
let s = unsafe { CString::new(p, false) };
match s.as_str() {
Some(s) => unsafe { transmute::<&str, &'static str>(s) },
None => rtabort!("message wasn't utf8?")
}
}
let msg = static_char_ptr(msg);
let file = static_char_ptr(file);
begin_unwind(msg, file, line as uint)
}
/// This is the entry point of unwinding for fail!() and assert!().
pub fn begin_unwind<M: Any + Send>(msg: M, file: &'static str, line: uint) -> ! {
use any::AnyRefExt;
use rt::in_green_task_context;
use rt::local::Local;
use rt::task::Task;
use str::Str;
use unstable::intrinsics;
unsafe {
let task: *mut Task;
// Note that this should be the only allocation performed in this block.
// Currently this means that fail!() on OOM will invoke this code path,
// but then again we're not really ready for failing on OOM anyway. If
// we do start doing this, then we should propagate this allocation to
// be performed in the parent of this task instead of the task that's
// failing.
let msg = ~msg as ~Any;
{
//let msg: &Any = msg;
let msg_s = match msg.as_ref::<&'static str>() {
Some(s) => *s,
None => match msg.as_ref::<~str>() {
Some(s) => s.as_slice(),
None => "~Any",
Owned(task) => Some(task),
Shared(arc) => unsafe {
match (*arc.get()).swap(0, SeqCst) {
0 => None,
n => Some(cast::transmute(n)),
}
};
if !in_green_task_context() {
rterrln!("failed in non-task context at '{}', {}:{}",
msg_s, file, line);
intrinsics::abort();
}
task = Local::unsafe_borrow();
let n = (*task).name.as_ref().map(|n| n.as_slice()).unwrap_or("<unnamed>");
// XXX: this should no get forcibly printed to the console, this should
// either be sent to the parent task (ideally), or get printed to
// the task's logger. Right now the logger is actually a uvio
// instance, which uses unkillable blocks internally for various
// reasons. This will cause serious trouble if the task is failing
// due to mismanagment of its own kill flag, so calling our own
// logger in its current state is a bit of a problem.
rterrln!("task '{}' failed at '{}', {}:{}", n, msg_s, file, line);
if (*task).unwinder.unwinding {
rtabort!("unwinding again");
}
}
}
(*task).unwinder.begin_unwind(msg);
// This assertion has two flavours because the wake involves an atomic op.
// In the faster version, destructors will fail dramatically instead.
#[cfg(not(test))] pub fn trash(self) { }
#[cfg(test)] pub fn trash(self) { assert!(self.wake().is_none()); }
/// Create a blocked task, unless the task was already killed.
pub fn block(task: ~Task) -> BlockedTask {
Owned(task)
}
/// Converts one blocked task handle to a list of many handles to the same.
pub fn make_selectable(self, num_handles: uint) -> Take<BlockedTaskIterator>
{
let arc = match self {
Owned(task) => {
let flag = unsafe { AtomicUint::new(cast::transmute(task)) };
UnsafeArc::new(flag)
}
Shared(arc) => arc.clone(),
};
BlockedTaskIterator{ inner: arc }.take(num_handles)
}
/// Convert to an unsafe uint value. Useful for storing in a pipe's state
/// flag.
#[inline]
pub unsafe fn cast_to_uint(self) -> uint {
match self {
Owned(task) => {
let blocked_task_ptr: uint = cast::transmute(task);
rtassert!(blocked_task_ptr & 0x1 == 0);
blocked_task_ptr
}
Shared(arc) => {
let blocked_task_ptr: uint = cast::transmute(~arc);
rtassert!(blocked_task_ptr & 0x1 == 0);
blocked_task_ptr | 0x1
}
}
}
/// Convert from an unsafe uint value. Useful for retrieving a pipe's state
/// flag.
#[inline]
pub unsafe fn cast_from_uint(blocked_task_ptr: uint) -> BlockedTask {
if blocked_task_ptr & 0x1 == 0 {
Owned(cast::transmute(blocked_task_ptr))
} else {
let ptr: ~UnsafeArc<AtomicUint> =
cast::transmute(blocked_task_ptr & !1);
Shared(*ptr)
}
}
}
impl Death {
pub fn new() -> Death {
Death { on_exit: None, }
}
/// Collect failure exit codes from children and propagate them to a parent.
pub fn collect_failure(&mut self, result: TaskResult) {
match self.on_exit.take() {
Some(f) => f(result),
None => {}
}
}
}
impl Drop for Death {
fn drop(&mut self) {
// make this type noncopyable
}
}
#[cfg(test)]
mod test {
use super::*;
use rt::test::*;
use prelude::*;
use task;
#[test]
fn local_heap() {
do run_in_newsched_task() {
let a = @5;
let b = a;
assert!(*a == 5);
assert!(*b == 5);
}
let a = @5;
let b = a;
assert!(*a == 5);
assert!(*b == 5);
}
#[test]
fn tls() {
use local_data;
do run_in_newsched_task() {
local_data_key!(key: @~str)
local_data::set(key, @~"data");
assert!(*local_data::get(key, |k| k.map(|k| *k)).unwrap() == ~"data");
local_data_key!(key2: @~str)
local_data::set(key2, @~"data");
assert!(*local_data::get(key2, |k| k.map(|k| *k)).unwrap() == ~"data");
}
local_data_key!(key: @~str)
local_data::set(key, @~"data");
assert!(*local_data::get(key, |k| k.map(|k| *k)).unwrap() == ~"data");
local_data_key!(key2: @~str)
local_data::set(key2, @~"data");
assert!(*local_data::get(key2, |k| k.map(|k| *k)).unwrap() == ~"data");
}
#[test]
fn unwind() {
do run_in_newsched_task() {
let result = spawntask_try(proc()());
rtdebug!("trying first assert");
assert!(result.is_ok());
let result = spawntask_try(proc() fail!());
rtdebug!("trying second assert");
assert!(result.is_err());
}
let result = task::try(proc()());
rtdebug!("trying first assert");
assert!(result.is_ok());
let result = task::try::<()>(proc() fail!());
rtdebug!("trying second assert");
assert!(result.is_err());
}
#[test]
fn rng() {
do run_in_uv_task() {
use rand::{rng, Rng};
let mut r = rng();
let _ = r.next_u32();
}
use rand::{rng, Rng};
let mut r = rng();
let _ = r.next_u32();
}
#[test]
fn logging() {
do run_in_uv_task() {
info!("here i am. logging in a newsched task");
}
info!("here i am. logging in a newsched task");
}
#[test]
fn comm_stream() {
do run_in_newsched_task() {
let (port, chan) = Chan::new();
chan.send(10);
assert!(port.recv() == 10);
}
let (port, chan) = Chan::new();
chan.send(10);
assert!(port.recv() == 10);
}
#[test]
fn comm_shared_chan() {
do run_in_newsched_task() {
let (port, chan) = SharedChan::new();
chan.send(10);
assert!(port.recv() == 10);
}
let (port, chan) = SharedChan::new();
chan.send(10);
assert!(port.recv() == 10);
}
#[test]
fn heap_cycles() {
use option::{Option, Some, None};
do run_in_newsched_task {
struct List {
next: Option<@mut List>,
}
let a = @mut List { next: None };
let b = @mut List { next: Some(a) };
a.next = Some(b);
struct List {
next: Option<@mut List>,
}
let a = @mut List { next: None };
let b = @mut List { next: Some(a) };
a.next = Some(b);
}
#[test]
#[should_fail]
fn test_begin_unwind() { begin_unwind("cause", file!(), line!()) }
fn test_begin_unwind() {
use rt::unwind::begin_unwind;
begin_unwind("cause", file!(), line!())
}
// Task blocking tests
#[test]
fn block_and_wake() {
let task = ~Task::new();
let mut task = BlockedTask::block(task).wake().unwrap();
task.destroyed = true;
}
}

View File

@ -1,440 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use io::net::ip::{SocketAddr, Ipv4Addr, Ipv6Addr};
use clone::Clone;
use container::Container;
use iter::{Iterator, range};
use option::{Some, None};
use os;
use path::GenericPath;
use path::Path;
use rand::Rng;
use rand;
use result::{Result, Ok, Err};
use rt::basic;
use rt::deque::BufferPool;
use comm::Chan;
use rt::new_event_loop;
use rt::sched::Scheduler;
use rt::sleeper_list::SleeperList;
use rt::task::Task;
use rt::thread::Thread;
use task::TaskResult;
use unstable::{run_in_bare_thread};
use vec;
use vec::{OwnedVector, MutableVector, ImmutableVector};
pub fn new_test_uv_sched() -> Scheduler {
let mut pool = BufferPool::new();
let (worker, stealer) = pool.deque();
let mut sched = Scheduler::new(new_event_loop(),
worker,
~[stealer],
SleeperList::new());
// Don't wait for the Shutdown message
sched.no_sleep = true;
return sched;
}
pub fn new_test_sched() -> Scheduler {
let mut pool = BufferPool::new();
let (worker, stealer) = pool.deque();
let mut sched = Scheduler::new(basic::event_loop(),
worker,
~[stealer],
SleeperList::new());
// Don't wait for the Shutdown message
sched.no_sleep = true;
return sched;
}
pub fn run_in_uv_task(f: proc()) {
do run_in_bare_thread {
run_in_uv_task_core(f);
}
}
pub fn run_in_newsched_task(f: proc()) {
do run_in_bare_thread {
run_in_newsched_task_core(f);
}
}
pub fn run_in_uv_task_core(f: proc()) {
use rt::sched::Shutdown;
let mut sched = ~new_test_uv_sched();
let exit_handle = sched.make_handle();
let on_exit: proc(TaskResult) = proc(exit_status: TaskResult) {
let mut exit_handle = exit_handle;
exit_handle.send(Shutdown);
rtassert!(exit_status.is_ok());
};
let mut task = ~Task::new_root(&mut sched.stack_pool, None, f);
task.death.on_exit = Some(on_exit);
sched.bootstrap(task);
}
pub fn run_in_newsched_task_core(f: proc()) {
use rt::sched::Shutdown;
let mut sched = ~new_test_sched();
let exit_handle = sched.make_handle();
let on_exit: proc(TaskResult) = proc(exit_status: TaskResult) {
let mut exit_handle = exit_handle;
exit_handle.send(Shutdown);
rtassert!(exit_status.is_ok());
};
let mut task = ~Task::new_root(&mut sched.stack_pool, None, f);
task.death.on_exit = Some(on_exit);
sched.bootstrap(task);
}
#[cfg(target_os="macos")]
#[allow(non_camel_case_types)]
mod darwin_fd_limit {
/*!
* darwin_fd_limit exists to work around an issue where launchctl on Mac OS X defaults the
* rlimit maxfiles to 256/unlimited. The default soft limit of 256 ends up being far too low
* for our multithreaded scheduler testing, depending on the number of cores available.
*
* This fixes issue #7772.
*/
use libc;
type rlim_t = libc::uint64_t;
struct rlimit {
rlim_cur: rlim_t,
rlim_max: rlim_t
}
#[nolink]
extern {
// name probably doesn't need to be mut, but the C function doesn't specify const
fn sysctl(name: *mut libc::c_int, namelen: libc::c_uint,
oldp: *mut libc::c_void, oldlenp: *mut libc::size_t,
newp: *mut libc::c_void, newlen: libc::size_t) -> libc::c_int;
fn getrlimit(resource: libc::c_int, rlp: *mut rlimit) -> libc::c_int;
fn setrlimit(resource: libc::c_int, rlp: *rlimit) -> libc::c_int;
}
static CTL_KERN: libc::c_int = 1;
static KERN_MAXFILESPERPROC: libc::c_int = 29;
static RLIMIT_NOFILE: libc::c_int = 8;
pub unsafe fn raise_fd_limit() {
// The strategy here is to fetch the current resource limits, read the kern.maxfilesperproc
// sysctl value, and bump the soft resource limit for maxfiles up to the sysctl value.
use ptr::{to_unsafe_ptr, to_mut_unsafe_ptr, mut_null};
use mem::size_of_val;
use os::last_os_error;
// Fetch the kern.maxfilesperproc value
let mut mib: [libc::c_int, ..2] = [CTL_KERN, KERN_MAXFILESPERPROC];
let mut maxfiles: libc::c_int = 0;
let mut size: libc::size_t = size_of_val(&maxfiles) as libc::size_t;
if sysctl(to_mut_unsafe_ptr(&mut mib[0]), 2,
to_mut_unsafe_ptr(&mut maxfiles) as *mut libc::c_void,
to_mut_unsafe_ptr(&mut size),
mut_null(), 0) != 0 {
let err = last_os_error();
error!("raise_fd_limit: error calling sysctl: {}", err);
return;
}
// Fetch the current resource limits
let mut rlim = rlimit{rlim_cur: 0, rlim_max: 0};
if getrlimit(RLIMIT_NOFILE, to_mut_unsafe_ptr(&mut rlim)) != 0 {
let err = last_os_error();
error!("raise_fd_limit: error calling getrlimit: {}", err);
return;
}
// Bump the soft limit to the smaller of kern.maxfilesperproc and the hard limit
rlim.rlim_cur = ::cmp::min(maxfiles as rlim_t, rlim.rlim_max);
// Set our newly-increased resource limit
if setrlimit(RLIMIT_NOFILE, to_unsafe_ptr(&rlim)) != 0 {
let err = last_os_error();
error!("raise_fd_limit: error calling setrlimit: {}", err);
return;
}
}
}
#[cfg(not(target_os="macos"))]
mod darwin_fd_limit {
pub unsafe fn raise_fd_limit() {}
}
#[doc(hidden)]
pub fn prepare_for_lots_of_tests() {
// Bump the fd limit on OS X. See darwin_fd_limit for an explanation.
unsafe { darwin_fd_limit::raise_fd_limit() }
}
/// Create more than one scheduler and run a function in a task
/// in one of the schedulers. The schedulers will stay alive
/// until the function `f` returns.
pub fn run_in_mt_newsched_task(f: proc()) {
use os;
use from_str::FromStr;
use rt::sched::Shutdown;
use rt::util;
// see comment in other function (raising fd limits)
prepare_for_lots_of_tests();
do run_in_bare_thread {
let nthreads = match os::getenv("RUST_RT_TEST_THREADS") {
Some(nstr) => FromStr::from_str(nstr).unwrap(),
None => {
if util::limit_thread_creation_due_to_osx_and_valgrind() {
1
} else {
// Using more threads than cores in test code
// to force the OS to preempt them frequently.
// Assuming that this help stress test concurrent types.
util::num_cpus() * 2
}
}
};
let sleepers = SleeperList::new();
let mut handles = ~[];
let mut scheds = ~[];
let mut pool = BufferPool::<~Task>::new();
let workers = range(0, nthreads).map(|_| pool.deque());
let (workers, stealers) = vec::unzip(workers);
for worker in workers.move_iter() {
let loop_ = new_event_loop();
let mut sched = ~Scheduler::new(loop_,
worker,
stealers.clone(),
sleepers.clone());
let handle = sched.make_handle();
handles.push(handle);
scheds.push(sched);
}
let handles = handles; // Work around not being able to capture mut
let on_exit: proc(TaskResult) = proc(exit_status: TaskResult) {
// Tell schedulers to exit
let mut handles = handles;
for handle in handles.mut_iter() {
handle.send(Shutdown);
}
rtassert!(exit_status.is_ok());
};
let mut main_task = ~Task::new_root(&mut scheds[0].stack_pool,
None,
f);
main_task.death.on_exit = Some(on_exit);
let mut threads = ~[];
let main_thread = {
let sched = scheds.pop();
let main_task = main_task;
do Thread::start {
sched.bootstrap(main_task);
}
};
threads.push(main_thread);
while !scheds.is_empty() {
let mut sched = scheds.pop();
let bootstrap_task = ~do Task::new_root(&mut sched.stack_pool, None) || {
rtdebug!("bootstrapping non-primary scheduler");
};
let sched = sched;
let thread = do Thread::start {
sched.bootstrap(bootstrap_task);
};
threads.push(thread);
}
// Wait for schedulers
for thread in threads.move_iter() {
thread.join();
}
}
}
/// Test tasks will abort on failure instead of unwinding
pub fn spawntask(f: proc()) {
Scheduler::run_task(Task::build_child(None, f));
}
/// Create a new task and run it right now. Aborts on failure
pub fn spawntask_later(f: proc()) {
Scheduler::run_task_later(Task::build_child(None, f));
}
pub fn spawntask_random(f: proc()) {
use rand::{Rand, rng};
let mut rng = rng();
let run_now: bool = Rand::rand(&mut rng);
if run_now {
spawntask(f)
} else {
spawntask_later(f)
}
}
pub fn spawntask_try(f: proc()) -> Result<(),()> {
let (port, chan) = Chan::new();
let on_exit: proc(TaskResult) = proc(exit_status) {
chan.send(exit_status)
};
let mut new_task = Task::build_root(None, f);
new_task.death.on_exit = Some(on_exit);
Scheduler::run_task(new_task);
let exit_status = port.recv();
if exit_status.is_ok() { Ok(()) } else { Err(()) }
}
/// Spawn a new task in a new scheduler and return a thread handle.
pub fn spawntask_thread(f: proc()) -> Thread<()> {
let thread = do Thread::start {
run_in_newsched_task_core(f);
};
return thread;
}
/// Get a ~Task for testing purposes other than actually scheduling it.
pub fn with_test_task(blk: proc(~Task) -> ~Task) {
do run_in_bare_thread {
let mut sched = ~new_test_sched();
let task = blk(~Task::new_root(&mut sched.stack_pool,
None,
proc() {}));
cleanup_task(task);
}
}
/// Use to cleanup tasks created for testing but not "run".
pub fn cleanup_task(mut task: ~Task) {
task.destroyed = true;
}
/// Get a port number, starting at 9600, for use in tests
pub fn next_test_port() -> u16 {
use unstable::mutex::{Mutex, MUTEX_INIT};
static mut lock: Mutex = MUTEX_INIT;
static mut next_offset: u16 = 0;
unsafe {
let base = base_port();
lock.lock();
let ret = base + next_offset;
next_offset += 1;
lock.unlock();
return ret;
}
}
/// Get a temporary path which could be the location of a unix socket
pub fn next_test_unix() -> Path {
if cfg!(unix) {
os::tmpdir().join(rand::task_rng().gen_ascii_str(20))
} else {
Path::new(r"\\.\pipe\" + rand::task_rng().gen_ascii_str(20))
}
}
/// Get a unique IPv4 localhost:port pair starting at 9600
pub fn next_test_ip4() -> SocketAddr {
SocketAddr { ip: Ipv4Addr(127, 0, 0, 1), port: next_test_port() }
}
/// Get a unique IPv6 localhost:port pair starting at 9600
pub fn next_test_ip6() -> SocketAddr {
SocketAddr { ip: Ipv6Addr(0, 0, 0, 0, 0, 0, 0, 1), port: next_test_port() }
}
/*
XXX: Welcome to MegaHack City.
The bots run multiple builds at the same time, and these builds
all want to use ports. This function figures out which workspace
it is running in and assigns a port range based on it.
*/
fn base_port() -> u16 {
use os;
use str::StrSlice;
use vec::ImmutableVector;
let base = 9600u16;
let range = 1000u16;
let bases = [
("32-opt", base + range * 1),
("32-noopt", base + range * 2),
("64-opt", base + range * 3),
("64-noopt", base + range * 4),
("64-opt-vg", base + range * 5),
("all-opt", base + range * 6),
("snap3", base + range * 7),
("dist", base + range * 8)
];
// FIXME (#9639): This needs to handle non-utf8 paths
let path = os::getcwd();
let path_s = path.as_str().unwrap();
let mut final_base = base;
for &(dir, base) in bases.iter() {
if path_s.contains(dir) {
final_base = base;
break;
}
}
return final_base;
}
/// Get a constant that represents the number of times to repeat
/// stress tests. Default 1.
pub fn stress_factor() -> uint {
use os::getenv;
use from_str::from_str;
match getenv("RUST_RT_STRESS") {
Some(val) => from_str::<uint>(val).unwrap(),
None => 1
}
}

View File

@ -33,7 +33,7 @@ pub struct Thread<T> {
priv packet: ~Option<T>,
}
static DEFAULT_STACK_SIZE: libc::size_t = 1024 * 1024;
static DEFAULT_STACK_SIZE: uint = 1024 * 1024;
// This is the starting point of rust os threads. The first thing we do
// is make sure that we don't trigger __morestack (also why this has a
@ -41,9 +41,9 @@ static DEFAULT_STACK_SIZE: libc::size_t = 1024 * 1024;
// and invoke it.
#[no_split_stack]
extern fn thread_start(main: *libc::c_void) -> imp::rust_thread_return {
use rt::context;
use unstable::stack;
unsafe {
context::record_stack_bounds(0, uint::max_value);
stack::record_stack_bounds(0, uint::max_value);
let f: ~proc() = cast::transmute(main);
(*f)();
cast::transmute(0 as imp::rust_thread_return)
@ -69,6 +69,12 @@ impl Thread<()> {
/// called, when the `Thread` falls out of scope its destructor will block
/// waiting for the OS thread.
pub fn start<T: Send>(main: proc() -> T) -> Thread<T> {
Thread::start_stack(DEFAULT_STACK_SIZE, main)
}
/// Performs the same functionality as `start`, but specifies an explicit
/// stack size for the new thread.
pub fn start_stack<T: Send>(stack: uint, main: proc() -> T) -> Thread<T> {
// We need the address of the packet to fill in to be stable so when
// `main` fills it in it's still valid, so allocate an extra ~ box to do
@ -78,7 +84,7 @@ impl Thread<()> {
*cast::transmute::<&~Option<T>, **mut Option<T>>(&packet)
};
let main: proc() = proc() unsafe { *packet2 = Some(main()); };
let native = unsafe { imp::create(~main) };
let native = unsafe { imp::create(stack, ~main) };
Thread {
native: native,
@ -94,8 +100,14 @@ impl Thread<()> {
/// systems. Note that platforms may not keep the main program alive even if
/// there are detached thread still running around.
pub fn spawn(main: proc()) {
Thread::spawn_stack(DEFAULT_STACK_SIZE, main)
}
/// Performs the same functionality as `spawn`, but explicitly specifies a
/// stack size for the new thread.
pub fn spawn_stack(stack: uint, main: proc()) {
unsafe {
let handle = imp::create(~main);
let handle = imp::create(stack, ~main);
imp::detach(handle);
}
}
@ -132,8 +144,6 @@ impl<T: Send> Drop for Thread<T> {
#[cfg(windows)]
mod imp {
use super::DEFAULT_STACK_SIZE;
use cast;
use libc;
use libc::types::os::arch::extra::{LPSECURITY_ATTRIBUTES, SIZE_T, BOOL,
@ -143,9 +153,9 @@ mod imp {
pub type rust_thread = HANDLE;
pub type rust_thread_return = DWORD;
pub unsafe fn create(p: ~proc()) -> rust_thread {
pub unsafe fn create(stack: uint, p: ~proc()) -> rust_thread {
let arg: *mut libc::c_void = cast::transmute(p);
CreateThread(ptr::mut_null(), DEFAULT_STACK_SIZE, super::thread_start,
CreateThread(ptr::mut_null(), stack as libc::size_t, super::thread_start,
arg, 0, ptr::mut_null())
}
@ -183,17 +193,17 @@ mod imp {
use libc::consts::os::posix01::PTHREAD_CREATE_JOINABLE;
use libc;
use ptr;
use super::DEFAULT_STACK_SIZE;
use unstable::intrinsics;
pub type rust_thread = libc::pthread_t;
pub type rust_thread_return = *libc::c_void;
pub unsafe fn create(p: ~proc()) -> rust_thread {
pub unsafe fn create(stack: uint, p: ~proc()) -> rust_thread {
let mut native: libc::pthread_t = intrinsics::uninit();
let mut attr: libc::pthread_attr_t = intrinsics::uninit();
assert_eq!(pthread_attr_init(&mut attr), 0);
assert_eq!(pthread_attr_setstacksize(&mut attr, DEFAULT_STACK_SIZE), 0);
assert_eq!(pthread_attr_setstacksize(&mut attr,
stack as libc::size_t), 0);
assert_eq!(pthread_attr_setdetachstate(&mut attr,
PTHREAD_CREATE_JOINABLE), 0);

View File

@ -1,170 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A very simple unsynchronized channel type for sending buffered data from
//! scheduler context to task context.
//!
//! XXX: This would be safer to use if split into two types like Port/Chan
use option::*;
use clone::Clone;
use super::rc::RC;
use rt::sched::Scheduler;
use rt::kill::BlockedTask;
use rt::local::Local;
use vec::OwnedVector;
use container::Container;
struct TubeState<T> {
blocked_task: Option<BlockedTask>,
buf: ~[T]
}
pub struct Tube<T> {
priv p: RC<TubeState<T>>
}
impl<T> Tube<T> {
pub fn new() -> Tube<T> {
Tube {
p: RC::new(TubeState {
blocked_task: None,
buf: ~[]
})
}
}
pub fn send(&mut self, val: T) {
rtdebug!("tube send");
unsafe {
let state = self.p.unsafe_borrow_mut();
(*state).buf.push(val);
if (*state).blocked_task.is_some() {
// There's a waiting task. Wake it up
rtdebug!("waking blocked tube");
let task = (*state).blocked_task.take_unwrap();
let sched: ~Scheduler = Local::take();
sched.resume_blocked_task_immediately(task);
}
}
}
pub fn recv(&mut self) -> T {
unsafe {
let state = self.p.unsafe_borrow_mut();
if !(*state).buf.is_empty() {
return (*state).buf.shift();
} else {
// Block and wait for the next message
rtdebug!("blocking on tube recv");
assert!(self.p.refcount() > 1); // There better be somebody to wake us up
assert!((*state).blocked_task.is_none());
let sched: ~Scheduler = Local::take();
sched.deschedule_running_task_and_then(|_, task| {
(*state).blocked_task = Some(task);
});
rtdebug!("waking after tube recv");
let buf = &mut (*state).buf;
assert!(!buf.is_empty());
return buf.shift();
}
}
}
}
impl<T> Clone for Tube<T> {
fn clone(&self) -> Tube<T> {
Tube { p: self.p.clone() }
}
}
#[cfg(test)]
mod test {
use rt::test::*;
use rt::rtio::EventLoop;
use rt::sched::Scheduler;
use rt::local::Local;
use super::*;
use prelude::*;
#[test]
fn simple_test() {
do run_in_newsched_task {
let mut tube: Tube<int> = Tube::new();
let mut tube_clone = Some(tube.clone());
let sched: ~Scheduler = Local::take();
sched.deschedule_running_task_and_then(|sched, task| {
let mut tube_clone = tube_clone.take_unwrap();
tube_clone.send(1);
sched.enqueue_blocked_task(task);
});
assert!(tube.recv() == 1);
}
}
#[test]
fn blocking_test() {
do run_in_newsched_task {
let mut tube: Tube<int> = Tube::new();
let mut tube_clone = Some(tube.clone());
let sched: ~Scheduler = Local::take();
sched.deschedule_running_task_and_then(|sched, task| {
let tube_clone = tube_clone.take_unwrap();
do sched.event_loop.callback {
let mut tube_clone = tube_clone;
// The task should be blocked on this now and
// sending will wake it up.
tube_clone.send(1);
}
sched.enqueue_blocked_task(task);
});
assert!(tube.recv() == 1);
}
}
#[test]
fn many_blocking_test() {
static MAX: int = 100;
do run_in_newsched_task {
let mut tube: Tube<int> = Tube::new();
let mut tube_clone = Some(tube.clone());
let sched: ~Scheduler = Local::take();
sched.deschedule_running_task_and_then(|sched, task| {
callback_send(tube_clone.take_unwrap(), 0);
fn callback_send(tube: Tube<int>, i: int) {
if i == 100 {
return
}
let mut sched = Local::borrow(None::<Scheduler>);
do sched.get().event_loop.callback {
let mut tube = tube;
// The task should be blocked on this now and
// sending will wake it up.
tube.send(i);
callback_send(tube, i + 1);
}
}
sched.enqueue_blocked_task(task);
});
for i in range(0, MAX) {
let j = tube.recv();
assert!(j == i);
}
}
}
}

View File

@ -8,11 +8,11 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Implementation of Rust stack unwinding
//
// For background on exception handling and stack unwinding please see "Exception Handling in LLVM"
// (llvm.org/docs/ExceptionHandling.html) and documents linked from it.
// For background on exception handling and stack unwinding please see
// "Exception Handling in LLVM" (llvm.org/docs/ExceptionHandling.html) and
// documents linked from it.
// These are also good reads:
// http://theofilos.cs.columbia.edu/blog/2013/09/22/base_abi/
// http://monoinfinito.wordpress.com/series/exception-handling-in-c/
@ -21,41 +21,55 @@
// ~~~ A brief summary ~~~
// Exception handling happens in two phases: a search phase and a cleanup phase.
//
// In both phases the unwinder walks stack frames from top to bottom using information from
// the stack frame unwind sections of the current process's modules ("module" here refers to
// an OS module, i.e. an executable or a dynamic library).
// In both phases the unwinder walks stack frames from top to bottom using
// information from the stack frame unwind sections of the current process's
// modules ("module" here refers to an OS module, i.e. an executable or a
// dynamic library).
//
// For each stack frame, it invokes the associated "personality routine", whose address is also
// stored in the unwind info section.
// For each stack frame, it invokes the associated "personality routine", whose
// address is also stored in the unwind info section.
//
// In the search phase, the job of a personality routine is to examine exception object being
// thrown, and to decide whether it should be caught at that stack frame. Once the handler frame
// has been identified, cleanup phase begins.
// In the search phase, the job of a personality routine is to examine exception
// object being thrown, and to decide whether it should be caught at that stack
// frame. Once the handler frame has been identified, cleanup phase begins.
//
// In the cleanup phase, personality routines invoke cleanup code associated with their
// stack frames (i.e. destructors). Once stack has been unwound down to the handler frame level,
// unwinding stops and the last personality routine transfers control to its' catch block.
// In the cleanup phase, personality routines invoke cleanup code associated
// with their stack frames (i.e. destructors). Once stack has been unwound down
// to the handler frame level, unwinding stops and the last personality routine
// transfers control to its' catch block.
//
// ~~~ Frame unwind info registration ~~~
// Each module has its' own frame unwind info section (usually ".eh_frame"), and unwinder needs
// to know about all of them in order for unwinding to be able to cross module boundaries.
// Each module has its' own frame unwind info section (usually ".eh_frame"), and
// unwinder needs to know about all of them in order for unwinding to be able to
// cross module boundaries.
//
// On some platforms, like Linux, this is achieved by dynamically enumerating currently loaded
// modules via the dl_iterate_phdr() API and finding all .eh_frame sections.
// On some platforms, like Linux, this is achieved by dynamically enumerating
// currently loaded modules via the dl_iterate_phdr() API and finding all
// .eh_frame sections.
//
// Others, like Windows, require modules to actively register their unwind info sections by calling
// __register_frame_info() API at startup.
// In the latter case it is essential that there is only one copy of the unwinder runtime
// in the process. This is usually achieved by linking to the dynamic version of the unwind
// runtime.
// Others, like Windows, require modules to actively register their unwind info
// sections by calling __register_frame_info() API at startup. In the latter
// case it is essential that there is only one copy of the unwinder runtime in
// the process. This is usually achieved by linking to the dynamic version of
// the unwind runtime.
//
// Currently Rust uses unwind runtime provided by libgcc.
use prelude::*;
use cast::transmute;
use task::TaskResult;
use any::{Any, AnyRefExt};
use c_str::CString;
use cast;
use kinds::Send;
use libc::{c_char, size_t};
use libc::{c_void, c_int};
use self::libunwind::*;
use option::{Some, None, Option};
use result::{Err, Ok};
use rt::local::Local;
use rt::task::Task;
use str::Str;
use task::TaskResult;
use unstable::intrinsics;
use uw = self::libunwind;
mod libunwind {
//! Unwind library interface
@ -110,34 +124,41 @@ mod libunwind {
}
pub struct Unwinder {
unwinding: bool,
cause: Option<~Any>
priv unwinding: bool,
priv cause: Option<~Any>
}
impl Unwinder {
pub fn new() -> Unwinder {
Unwinder {
unwinding: false,
cause: None,
}
}
pub fn unwinding(&self) -> bool {
self.unwinding
}
pub fn try(&mut self, f: ||) {
use unstable::raw::Closure;
unsafe {
let closure: Closure = transmute(f);
let code = transmute(closure.code);
let env = transmute(closure.env);
let ep = rust_try(try_fn, code, env);
let closure: Closure = cast::transmute(f);
let ep = rust_try(try_fn, closure.code as *c_void,
closure.env as *c_void);
if !ep.is_null() {
rtdebug!("Caught {}", (*ep).exception_class);
_Unwind_DeleteException(ep);
uw::_Unwind_DeleteException(ep);
}
}
extern fn try_fn(code: *c_void, env: *c_void) {
unsafe {
let closure: Closure = Closure {
code: transmute(code),
env: transmute(env),
};
let closure: || = transmute(closure);
let closure: || = cast::transmute(Closure {
code: code as *(),
env: env as *(),
});
closure();
}
}
@ -145,10 +166,11 @@ impl Unwinder {
extern {
// Rust's try-catch
// When f(...) returns normally, the return value is null.
// When f(...) throws, the return value is a pointer to the caught exception object.
// When f(...) throws, the return value is a pointer to the caught
// exception object.
fn rust_try(f: extern "C" fn(*c_void, *c_void),
code: *c_void,
data: *c_void) -> *_Unwind_Exception;
data: *c_void) -> *uw::_Unwind_Exception;
}
}
@ -159,21 +181,21 @@ impl Unwinder {
self.cause = Some(cause);
unsafe {
let exception = ~_Unwind_Exception {
let exception = ~uw::_Unwind_Exception {
exception_class: rust_exception_class(),
exception_cleanup: exception_cleanup,
private_1: 0,
private_2: 0
};
let error = _Unwind_RaiseException(transmute(exception));
let error = uw::_Unwind_RaiseException(cast::transmute(exception));
rtabort!("Could not unwind stack, error = {}", error as int)
}
extern "C" fn exception_cleanup(_unwind_code: _Unwind_Reason_Code,
exception: *_Unwind_Exception) {
extern "C" fn exception_cleanup(_unwind_code: uw::_Unwind_Reason_Code,
exception: *uw::_Unwind_Exception) {
rtdebug!("exception_cleanup()");
unsafe {
let _: ~_Unwind_Exception = transmute(exception);
let _: ~uw::_Unwind_Exception = cast::transmute(exception);
}
}
}
@ -189,68 +211,146 @@ impl Unwinder {
// Rust's exception class identifier. This is used by personality routines to
// determine whether the exception was thrown by their own runtime.
fn rust_exception_class() -> _Unwind_Exception_Class {
let bytes = bytes!("MOZ\0RUST"); // vendor, language
unsafe {
let ptr: *_Unwind_Exception_Class = transmute(bytes.as_ptr());
*ptr
}
fn rust_exception_class() -> uw::_Unwind_Exception_Class {
// M O Z \0 R U S T -- vendor, language
0x4d4f5a_00_52555354
}
// We could implement our personality routine in pure Rust, however exception info decoding
// is tedious. More importantly, personality routines have to handle various platform
// quirks, which are not fun to maintain. For this reason, we attempt to reuse personality
// routine of the C language: __gcc_personality_v0.
// We could implement our personality routine in pure Rust, however exception
// info decoding is tedious. More importantly, personality routines have to
// handle various platform quirks, which are not fun to maintain. For this
// reason, we attempt to reuse personality routine of the C language:
// __gcc_personality_v0.
//
// Since C does not support exception catching, __gcc_personality_v0 simply always
// returns _URC_CONTINUE_UNWIND in search phase, and always returns _URC_INSTALL_CONTEXT
// (i.e. "invoke cleanup code") in cleanup phase.
// Since C does not support exception catching, __gcc_personality_v0 simply
// always returns _URC_CONTINUE_UNWIND in search phase, and always returns
// _URC_INSTALL_CONTEXT (i.e. "invoke cleanup code") in cleanup phase.
//
// This is pretty close to Rust's exception handling approach, except that Rust does have
// a single "catch-all" handler at the bottom of each task's stack.
// This is pretty close to Rust's exception handling approach, except that Rust
// does have a single "catch-all" handler at the bottom of each task's stack.
// So we have two versions:
// - rust_eh_personality, used by all cleanup landing pads, which never catches, so
// the behavior of __gcc_personality_v0 is perfectly adequate there, and
// - rust_eh_personality_catch, used only by rust_try(), which always catches. This is
// achieved by overriding the return value in search phase to always say "catch!".
// - rust_eh_personality, used by all cleanup landing pads, which never catches,
// so the behavior of __gcc_personality_v0 is perfectly adequate there, and
// - rust_eh_personality_catch, used only by rust_try(), which always catches.
// This is achieved by overriding the return value in search phase to always
// say "catch!".
extern "C" {
fn __gcc_personality_v0(version: c_int,
actions: _Unwind_Action,
exception_class: _Unwind_Exception_Class,
ue_header: *_Unwind_Exception,
context: *_Unwind_Context) -> _Unwind_Reason_Code;
actions: uw::_Unwind_Action,
exception_class: uw::_Unwind_Exception_Class,
ue_header: *uw::_Unwind_Exception,
context: *uw::_Unwind_Context)
-> uw::_Unwind_Reason_Code;
}
#[lang="eh_personality"]
#[no_mangle] // so we can reference it by name from middle/trans/base.rs
#[doc(hidden)]
#[cfg(not(test))]
pub extern "C" fn rust_eh_personality(version: c_int,
actions: _Unwind_Action,
exception_class: _Unwind_Exception_Class,
ue_header: *_Unwind_Exception,
context: *_Unwind_Context) -> _Unwind_Reason_Code {
pub extern "C" fn rust_eh_personality(
version: c_int,
actions: uw::_Unwind_Action,
exception_class: uw::_Unwind_Exception_Class,
ue_header: *uw::_Unwind_Exception,
context: *uw::_Unwind_Context
) -> uw::_Unwind_Reason_Code
{
unsafe {
__gcc_personality_v0(version, actions, exception_class, ue_header, context)
__gcc_personality_v0(version, actions, exception_class, ue_header,
context)
}
}
#[no_mangle] // referenced from rust_try.ll
#[doc(hidden)]
#[cfg(not(test))]
pub extern "C" fn rust_eh_personality_catch(version: c_int,
actions: _Unwind_Action,
exception_class: _Unwind_Exception_Class,
ue_header: *_Unwind_Exception,
context: *_Unwind_Context) -> _Unwind_Reason_Code {
if (actions as c_int & _UA_SEARCH_PHASE as c_int) != 0 { // search phase
_URC_HANDLER_FOUND // catch!
pub extern "C" fn rust_eh_personality_catch(
version: c_int,
actions: uw::_Unwind_Action,
exception_class: uw::_Unwind_Exception_Class,
ue_header: *uw::_Unwind_Exception,
context: *uw::_Unwind_Context
) -> uw::_Unwind_Reason_Code
{
if (actions as c_int & uw::_UA_SEARCH_PHASE as c_int) != 0 { // search phase
uw::_URC_HANDLER_FOUND // catch!
}
else { // cleanup phase
unsafe {
__gcc_personality_v0(version, actions, exception_class, ue_header, context)
__gcc_personality_v0(version, actions, exception_class, ue_header,
context)
}
}
}
/// This is the entry point of unwinding for things like lang items and such.
/// The arguments are normally generated by the compiler, and need to
/// have static lifetimes.
pub fn begin_unwind_raw(msg: *c_char, file: *c_char, line: size_t) -> ! {
#[inline]
fn static_char_ptr(p: *c_char) -> &'static str {
let s = unsafe { CString::new(p, false) };
match s.as_str() {
Some(s) => unsafe { cast::transmute::<&str, &'static str>(s) },
None => rtabort!("message wasn't utf8?")
}
}
let msg = static_char_ptr(msg);
let file = static_char_ptr(file);
begin_unwind(msg, file, line as uint)
}
/// This is the entry point of unwinding for fail!() and assert!().
pub fn begin_unwind<M: Any + Send>(msg: M, file: &'static str, line: uint) -> ! {
unsafe {
let task: *mut Task;
// Note that this should be the only allocation performed in this block.
// Currently this means that fail!() on OOM will invoke this code path,
// but then again we're not really ready for failing on OOM anyway. If
// we do start doing this, then we should propagate this allocation to
// be performed in the parent of this task instead of the task that's
// failing.
let msg = ~msg as ~Any;
{
let msg_s = match msg.as_ref::<&'static str>() {
Some(s) => *s,
None => match msg.as_ref::<~str>() {
Some(s) => s.as_slice(),
None => "~Any",
}
};
// It is assumed that all reasonable rust code will have a local
// task at all times. This means that this `try_unsafe_borrow` will
// succeed almost all of the time. There are border cases, however,
// when the runtime has *almost* set up the local task, but hasn't
// quite gotten there yet. In order to get some better diagnostics,
// we print on failure and immediately abort the whole process if
// there is no local task available.
match Local::try_unsafe_borrow() {
Some(t) => {
task = t;
let n = (*task).name.as_ref()
.map(|n| n.as_slice()).unwrap_or("<unnamed>");
rterrln!("task '{}' failed at '{}', {}:{}", n, msg_s,
file, line);
}
None => {
rterrln!("failed at '{}', {}:{}", msg_s, file, line);
intrinsics::abort();
}
}
if (*task).unwinder.unwinding {
rtabort!("unwinding again");
}
}
(*task).unwinder.begin_unwind(msg);
}
}

View File

@ -15,7 +15,6 @@ use libc;
use option::{Some, None, Option};
use os;
use str::StrSlice;
use unstable::atomics::{AtomicInt, INIT_ATOMIC_INT, SeqCst};
use unstable::running_on_valgrind;
// Indicates whether we should perform expensive sanity checks, including rtassert!
@ -68,11 +67,21 @@ pub fn default_sched_threads() -> uint {
}
pub fn dumb_println(args: &fmt::Arguments) {
use io::native::file::FileDesc;
use io;
use libc;
let mut out = FileDesc::new(libc::STDERR_FILENO, false);
fmt::writeln(&mut out as &mut io::Writer, args);
struct Stderr;
impl io::Writer for Stderr {
fn write(&mut self, data: &[u8]) {
unsafe {
libc::write(libc::STDERR_FILENO,
data.as_ptr() as *libc::c_void,
data.len() as libc::size_t);
}
}
}
let mut w = Stderr;
fmt::writeln(&mut w as &mut io::Writer, args);
}
pub fn abort(msg: &str) -> ! {
@ -133,13 +142,3 @@ memory and partly incapable of presentation to others.",
unsafe { libc::abort() }
}
}
static mut EXIT_STATUS: AtomicInt = INIT_ATOMIC_INT;
pub fn set_exit_status(code: int) {
unsafe { EXIT_STATUS.store(code, SeqCst) }
}
pub fn get_exit_status() -> int {
unsafe { EXIT_STATUS.load(SeqCst) }
}

View File

@ -338,8 +338,8 @@ mod tests {
use str;
use task::spawn;
use unstable::running_on_valgrind;
use io::native::file;
use io::{FileNotFound, Reader, Writer, io_error};
use io::pipe::PipeStream;
use io::{Writer, Reader, io_error, FileNotFound, OtherIoError};
#[test]
#[cfg(not(target_os="android"))] // FIXME(#10380)
@ -426,13 +426,13 @@ mod tests {
}
fn writeclose(fd: c_int, s: &str) {
let mut writer = file::FileDesc::new(fd, true);
let mut writer = PipeStream::open(fd);
writer.write(s.as_bytes());
}
fn readclose(fd: c_int) -> ~str {
let mut res = ~[];
let mut reader = file::FileDesc::new(fd, true);
let mut reader = PipeStream::open(fd);
let mut buf = [0, ..1024];
loop {
match reader.read(buf) {

152
src/libstd/sync/arc.rs Normal file
View File

@ -0,0 +1,152 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Atomically reference counted data
//!
//! This modules contains the implementation of an atomically reference counted
//! pointer for the purpose of sharing data between tasks. This is obviously a
//! very unsafe primitive to use, but it has its use cases when implementing
//! concurrent data structures and similar tasks.
//!
//! Great care must be taken to ensure that data races do not arise through the
//! usage of `UnsafeArc`, and this often requires some form of external
//! synchronization. The only guarantee provided to you by this class is that
//! the underlying data will remain valid (not free'd) so long as the reference
//! count is greater than one.
use cast;
use clone::Clone;
use kinds::Send;
use ops::Drop;
use ptr::RawPtr;
use sync::atomics::{AtomicUint, SeqCst, Relaxed, Acquire};
use vec;
/// An atomically reference counted pointer.
///
/// Enforces no shared-memory safety.
#[unsafe_no_drop_flag]
pub struct UnsafeArc<T> {
priv data: *mut ArcData<T>,
}
struct ArcData<T> {
count: AtomicUint,
data: T,
}
unsafe fn new_inner<T: Send>(data: T, refcount: uint) -> *mut ArcData<T> {
let data = ~ArcData { count: AtomicUint::new(refcount), data: data };
cast::transmute(data)
}
impl<T: Send> UnsafeArc<T> {
/// Creates a new `UnsafeArc` which wraps the given data.
pub fn new(data: T) -> UnsafeArc<T> {
unsafe { UnsafeArc { data: new_inner(data, 1) } }
}
/// As new(), but returns an extra pre-cloned handle.
pub fn new2(data: T) -> (UnsafeArc<T>, UnsafeArc<T>) {
unsafe {
let ptr = new_inner(data, 2);
(UnsafeArc { data: ptr }, UnsafeArc { data: ptr })
}
}
/// As new(), but returns a vector of as many pre-cloned handles as
/// requested.
pub fn newN(data: T, num_handles: uint) -> ~[UnsafeArc<T>] {
unsafe {
if num_handles == 0 {
~[] // need to free data here
} else {
let ptr = new_inner(data, num_handles);
vec::from_fn(num_handles, |_| UnsafeArc { data: ptr })
}
}
}
/// Gets a pointer to the inner shared data. Note that care must be taken to
/// ensure that the outer `UnsafeArc` does not fall out of scope while this
/// pointer is in use, otherwise it could possibly contain a use-after-free.
#[inline]
pub fn get(&self) -> *mut T {
unsafe {
assert!((*self.data).count.load(Relaxed) > 0);
return &mut (*self.data).data as *mut T;
}
}
/// Gets an immutable pointer to the inner shared data. This has the same
/// caveats as the `get` method.
#[inline]
pub fn get_immut(&self) -> *T {
unsafe {
assert!((*self.data).count.load(Relaxed) > 0);
return &(*self.data).data as *T;
}
}
}
impl<T: Send> Clone for UnsafeArc<T> {
fn clone(&self) -> UnsafeArc<T> {
unsafe {
// This barrier might be unnecessary, but I'm not sure...
let old_count = (*self.data).count.fetch_add(1, Acquire);
assert!(old_count >= 1);
return UnsafeArc { data: self.data };
}
}
}
#[unsafe_destructor]
impl<T> Drop for UnsafeArc<T>{
fn drop(&mut self) {
unsafe {
// Happens when destructing an unwrapper's handle and from
// `#[unsafe_no_drop_flag]`
if self.data.is_null() {
return
}
// Must be acquire+release, not just release, to make sure this
// doesn't get reordered to after the unwrapper pointer load.
let old_count = (*self.data).count.fetch_sub(1, SeqCst);
assert!(old_count >= 1);
if old_count == 1 {
let _: ~ArcData<T> = cast::transmute(self.data);
}
}
}
}
#[cfg(test)]
mod tests {
use prelude::*;
use super::UnsafeArc;
use mem::size_of;
#[test]
fn test_size() {
assert_eq!(size_of::<UnsafeArc<[int, ..10]>>(), size_of::<*[int, ..10]>());
}
#[test]
fn arclike_newN() {
// Tests that the many-refcounts-at-once constructors don't leak.
let _ = UnsafeArc::new2(~~"hello");
let x = UnsafeArc::newN(~~"hello", 0);
assert_eq!(x.len(), 0)
let x = UnsafeArc::newN(~~"hello", 1);
assert_eq!(x.len(), 1)
let x = UnsafeArc::newN(~~"hello", 10);
assert_eq!(x.len(), 10)
}
}

View File

@ -11,13 +11,16 @@
/*!
* Atomic types
*
* Basic atomic types supporting atomic operations. Each method takes an `Ordering` which
* represents the strength of the memory barrier for that operation. These orderings are the same
* as C++11 atomic orderings [http://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync]
* Basic atomic types supporting atomic operations. Each method takes an
* `Ordering` which represents the strength of the memory barrier for that
* operation. These orderings are the same as C++11 atomic orderings
* [http://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync]
*
* All atomic types are a single word in size.
*/
#[allow(missing_doc)];
use unstable::intrinsics;
use cast;
use option::{Option,Some,None};

View File

@ -50,15 +50,18 @@
use cast;
use clone::Clone;
use iter::range;
use iter::{range, Iterator};
use kinds::Send;
use libc;
use mem;
use ops::Drop;
use option::{Option, Some, None};
use ptr;
use unstable::atomics::{AtomicInt, AtomicPtr, SeqCst};
use unstable::sync::{UnsafeArc, Exclusive};
use ptr::RawPtr;
use sync::arc::UnsafeArc;
use sync::atomics::{AtomicInt, AtomicPtr, SeqCst};
use unstable::sync::Exclusive;
use vec::{OwnedVector, ImmutableVector};
// Once the queue is less than 1/K full, then it will be downsized. Note that
// the deque requires that this number be less than 2.
@ -399,8 +402,8 @@ mod tests {
use rt::thread::Thread;
use rand;
use rand::Rng;
use unstable::atomics::{AtomicBool, INIT_ATOMIC_BOOL, SeqCst,
AtomicUint, INIT_ATOMIC_UINT};
use sync::atomics::{AtomicBool, INIT_ATOMIC_BOOL, SeqCst,
AtomicUint, INIT_ATOMIC_UINT};
use vec;
#[test]

View File

@ -8,19 +8,16 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-fast make-check does not like `#[start]`
//! Useful synchronization primitives
//!
//! This modules contains useful safe and unsafe synchronization primitives.
//! Most of the primitives in this module do not provide any sort of locking
//! and/or blocking at all, but rather provide the necessary tools to build
//! other types of concurrent primitives.
use std::rt;
#[start]
fn start(argc: int, argv: **u8) -> int {
do rt::start(argc, argv) {
println("First invocation");
};
do rt::start(argc, argv) {
println("Second invocation");
};
0
}
pub mod arc;
pub mod atomics;
pub mod deque;
pub mod mpmc_bounded_queue;
pub mod mpsc_queue;
pub mod spsc_queue;

View File

@ -25,15 +25,17 @@
* policies, either expressed or implied, of Dmitry Vyukov.
*/
#[allow(missing_doc, dead_code)];
// http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue
use unstable::sync::UnsafeArc;
use unstable::atomics::{AtomicUint,Relaxed,Release,Acquire};
use option::*;
use vec;
use clone::Clone;
use kinds::Send;
use num::{Exponential,Algebraic,Round};
use option::{Option, Some, None};
use sync::arc::UnsafeArc;
use sync::atomics::{AtomicUint,Relaxed,Release,Acquire};
use vec;
struct Node<T> {
sequence: AtomicUint,
@ -161,8 +163,8 @@ impl<T: Send> Clone for Queue<T> {
mod tests {
use prelude::*;
use option::*;
use task;
use super::Queue;
use native;
#[test]
fn test() {
@ -170,14 +172,17 @@ mod tests {
let nmsgs = 1000u;
let mut q = Queue::with_capacity(nthreads*nmsgs);
assert_eq!(None, q.pop());
let (port, chan) = SharedChan::new();
for _ in range(0, nthreads) {
let q = q.clone();
do task::spawn_sched(task::SingleThreaded) {
let chan = chan.clone();
do native::task::spawn {
let mut q = q;
for i in range(0, nmsgs) {
assert!(q.push(i));
}
chan.send(());
}
}
@ -186,7 +191,7 @@ mod tests {
let (completion_port, completion_chan) = Chan::new();
completion_ports.push(completion_port);
let q = q.clone();
do task::spawn_sched(task::SingleThreaded) {
do native::task::spawn {
let mut q = q;
let mut i = 0u;
loop {
@ -205,5 +210,8 @@ mod tests {
for completion_port in completion_ports.mut_iter() {
assert_eq!(nmsgs, completion_port.recv());
}
for _ in range(0, nthreads) {
port.recv();
}
}
}

View File

@ -26,6 +26,14 @@
*/
//! A mostly lock-free multi-producer, single consumer queue.
//!
//! This module contains an implementation of a concurrent MPSC queue. This
//! queue can be used to share data between tasks, and is also used as the
//! building block of channels in rust.
//!
//! Note that the current implementation of this queue has a caveat of the `pop`
//! method, and see the method for more information about it. Due to this
//! caveat, this queue may not be appropriate for all use-cases.
// http://www.1024cores.net/home/lock-free-algorithms
// /queues/non-intrusive-mpsc-node-based-queue
@ -35,9 +43,11 @@ use clone::Clone;
use kinds::Send;
use ops::Drop;
use option::{Option, None, Some};
use unstable::atomics::{AtomicPtr, Release, Acquire, AcqRel, Relaxed};
use unstable::sync::UnsafeArc;
use ptr::RawPtr;
use sync::arc::UnsafeArc;
use sync::atomics::{AtomicPtr, Release, Acquire, AcqRel, Relaxed};
/// A result of the `pop` function.
pub enum PopResult<T> {
/// Some data has been popped
Data(T),
@ -61,10 +71,14 @@ struct State<T, P> {
packet: P,
}
/// The consumer half of this concurrent queue. This half is used to receive
/// data from the producers.
pub struct Consumer<T, P> {
priv state: UnsafeArc<State<T, P>>,
}
/// The production half of the concurrent queue. This handle may be cloned in
/// order to make handles for new producers.
pub struct Producer<T, P> {
priv state: UnsafeArc<State<T, P>>,
}
@ -75,6 +89,11 @@ impl<T: Send, P: Send> Clone for Producer<T, P> {
}
}
/// Creates a new MPSC queue. The given argument `p` is a user-defined "packet"
/// of information which will be shared by the consumer and the producer which
/// can be re-acquired via the `packet` function. This is helpful when extra
/// state is shared between the producer and consumer, but note that there is no
/// synchronization performed of this data.
pub fn queue<T: Send, P: Send>(p: P) -> (Consumer<T, P>, Producer<T, P>) {
unsafe {
let (a, b) = UnsafeArc::new2(State::new(p));
@ -92,7 +111,7 @@ impl<T> Node<T> {
}
impl<T: Send, P: Send> State<T, P> {
pub unsafe fn new(p: P) -> State<T, P> {
unsafe fn new(p: P) -> State<T, P> {
let stub = Node::new(None);
State {
head: AtomicPtr::new(stub),
@ -122,10 +141,6 @@ impl<T: Send, P: Send> State<T, P> {
if self.head.load(Acquire) == tail {Empty} else {Inconsistent}
}
unsafe fn is_empty(&mut self) -> bool {
return (*self.tail).next.load(Acquire).is_null();
}
}
#[unsafe_destructor]
@ -143,27 +158,42 @@ impl<T: Send, P: Send> Drop for State<T, P> {
}
impl<T: Send, P: Send> Producer<T, P> {
/// Pushes a new value onto this queue.
pub fn push(&mut self, value: T) {
unsafe { (*self.state.get()).push(value) }
}
pub fn is_empty(&self) -> bool {
unsafe{ (*self.state.get()).is_empty() }
}
/// Gets an unsafe pointer to the user-defined packet shared by the
/// producers and the consumer. Note that care must be taken to ensure that
/// the lifetime of the queue outlives the usage of the returned pointer.
pub unsafe fn packet(&self) -> *mut P {
&mut (*self.state.get()).packet as *mut P
}
}
impl<T: Send, P: Send> Consumer<T, P> {
/// Pops some data from this queue.
///
/// Note that the current implementation means that this function cannot
/// return `Option<T>`. It is possible for this queue to be in an
/// inconsistent state where many pushes have suceeded and completely
/// finished, but pops cannot return `Some(t)`. This inconsistent state
/// happens when a pusher is pre-empted at an inopportune moment.
///
/// This inconsistent state means that this queue does indeed have data, but
/// it does not currently have access to it at this time.
pub fn pop(&mut self) -> PopResult<T> {
unsafe { (*self.state.get()).pop() }
}
/// Attempts to pop data from this queue, but doesn't attempt too hard. This
/// will canonicalize inconsistent states to a `None` value.
pub fn casual_pop(&mut self) -> Option<T> {
match self.pop() {
Data(t) => Some(t),
Empty | Inconsistent => None,
}
}
/// Gets an unsafe pointer to the underlying user-defined packet. See
/// `Producer.packet` for more information.
pub unsafe fn packet(&self) -> *mut P {
&mut (*self.state.get()).packet as *mut P
}
@ -173,8 +203,8 @@ impl<T: Send, P: Send> Consumer<T, P> {
mod tests {
use prelude::*;
use task;
use super::{queue, Data, Empty, Inconsistent};
use native;
#[test]
fn test_full() {
@ -192,14 +222,17 @@ mod tests {
Empty => {}
Inconsistent | Data(..) => fail!()
}
let (port, chan) = SharedChan::new();
for _ in range(0, nthreads) {
let q = p.clone();
do task::spawn_sched(task::SingleThreaded) {
let chan = chan.clone();
do native::task::spawn {
let mut q = q;
for i in range(0, nmsgs) {
q.push(i);
}
chan.send(());
}
}
@ -210,6 +243,9 @@ mod tests {
Data(_) => { i += 1 }
}
}
for _ in range(0, nthreads) {
port.recv();
}
}
}

View File

@ -26,12 +26,20 @@
*/
// http://www.1024cores.net/home/lock-free-algorithms/queues/unbounded-spsc-queue
//! A single-producer single-consumer concurrent queue
//!
//! This module contains the implementation of an SPSC queue which can be used
//! concurrently between two tasks. This data structure is safe to use and
//! enforces the semantics that there is one pusher and one popper.
use cast;
use kinds::Send;
use ops::Drop;
use option::{Some, None, Option};
use unstable::atomics::{AtomicPtr, Relaxed, AtomicUint, Acquire, Release};
use unstable::sync::UnsafeArc;
use ptr::RawPtr;
use sync::arc::UnsafeArc;
use sync::atomics::{AtomicPtr, Relaxed, AtomicUint, Acquire, Release};
// Node within the linked list queue of messages to send
struct Node<T> {
@ -64,14 +72,34 @@ struct State<T, P> {
packet: P,
}
/// Producer half of this queue. This handle is used to push data to the
/// consumer.
pub struct Producer<T, P> {
priv state: UnsafeArc<State<T, P>>,
}
/// Consumer half of this queue. This handle is used to receive data from the
/// producer.
pub struct Consumer<T, P> {
priv state: UnsafeArc<State<T, P>>,
}
/// Creates a new queue. The producer returned is connected to the consumer to
/// push all data to the consumer.
///
/// # Arguments
///
/// * `bound` - This queue implementation is implemented with a linked list,
/// and this means that a push is always a malloc. In order to
/// amortize this cost, an internal cache of nodes is maintained
/// to prevent a malloc from always being necessary. This bound is
/// the limit on the size of the cache (if desired). If the value
/// is 0, then the cache has no bound. Otherwise, the cache will
/// never grow larger than `bound` (although the queue itself
/// could be much larger.
///
/// * `p` - This is the user-defined packet of data which will also be shared
/// between the producer and consumer.
pub fn queue<T: Send, P: Send>(bound: uint,
p: P) -> (Consumer<T, P>, Producer<T, P>)
{
@ -105,21 +133,31 @@ impl<T: Send> Node<T> {
}
impl<T: Send, P: Send> Producer<T, P> {
/// Pushes data onto the queue
pub fn push(&mut self, t: T) {
unsafe { (*self.state.get()).push(t) }
}
/// Tests whether the queue is empty. Note that if this function returns
/// `false`, the return value is significant, but if the return value is
/// `true` then almost no meaning can be attached to the return value.
pub fn is_empty(&self) -> bool {
unsafe { (*self.state.get()).is_empty() }
}
/// Acquires an unsafe pointer to the underlying user-defined packet. Note
/// that care must be taken to ensure that the queue outlives the usage of
/// the packet (because it is an unsafe pointer).
pub unsafe fn packet(&self) -> *mut P {
&mut (*self.state.get()).packet as *mut P
}
}
impl<T: Send, P: Send> Consumer<T, P> {
/// Pops some data from this queue, returning `None` when the queue is
/// empty.
pub fn pop(&mut self) -> Option<T> {
unsafe { (*self.state.get()).pop() }
}
/// Same function as the producer's `packet` method.
pub unsafe fn packet(&self) -> *mut P {
&mut (*self.state.get()).packet as *mut P
}
@ -230,7 +268,7 @@ impl<T: Send, P: Send> Drop for State<T, P> {
mod test {
use prelude::*;
use super::queue;
use task;
use native;
#[test]
fn smoke() {
@ -276,7 +314,8 @@ mod test {
fn stress_bound(bound: uint) {
let (c, mut p) = queue(bound, ());
do task::spawn_sched(task::SingleThreaded) {
let (port, chan) = Chan::new();
do native::task::spawn {
let mut c = c;
for _ in range(0, 100000) {
loop {
@ -287,10 +326,12 @@ mod test {
}
}
}
chan.send(());
}
for _ in range(0, 100000) {
p.push(1);
}
port.recv();
}
}
}

View File

@ -53,22 +53,22 @@
#[allow(missing_doc)];
use prelude::*;
use any::Any;
use comm::{Chan, Port};
use kinds::Send;
use option::{None, Some, Option};
use result::{Result, Ok, Err};
use rt::in_green_task_context;
use rt::local::Local;
use rt::task::Task;
use send_str::{SendStr, IntoSendStr};
use str::Str;
use util;
#[cfg(test)] use any::Any;
#[cfg(test)] use any::{AnyOwnExt, AnyRefExt};
#[cfg(test)] use comm::SharedChan;
#[cfg(test)] use ptr;
#[cfg(test)] use result;
pub mod spawn;
/// Indicates the manner in which a task exited.
///
/// A task that completes without failing is considered to exit successfully.
@ -80,27 +80,6 @@ pub mod spawn;
/// children tasks complete, recommend using a result future.
pub type TaskResult = Result<(), ~Any>;
/// Scheduler modes
#[deriving(Eq)]
pub enum SchedMode {
/// Run task on the default scheduler
DefaultScheduler,
/// All tasks run in the same OS thread
SingleThreaded,
}
/**
* Scheduler configuration options
*
* # Fields
*
* * sched_mode - The operating mode of the scheduler
*
*/
pub struct SchedOpts {
priv mode: SchedMode,
}
/**
* Task configuration options
*
@ -121,10 +100,9 @@ pub struct SchedOpts {
* scheduler other tasks will be impeded or even blocked indefinitely.
*/
pub struct TaskOpts {
priv watched: bool,
priv notify_chan: Option<Chan<TaskResult>>,
watched: bool,
notify_chan: Option<Chan<TaskResult>>,
name: Option<SendStr>,
sched: SchedOpts,
stack_size: Option<uint>
}
@ -153,7 +131,7 @@ pub struct TaskBuilder {
*/
pub fn task() -> TaskBuilder {
TaskBuilder {
opts: default_task_opts(),
opts: TaskOpts::new(),
gen_body: None,
can_not_copy: None,
}
@ -169,7 +147,6 @@ impl TaskBuilder {
watched: self.opts.watched,
notify_chan: notify_chan,
name: name,
sched: self.opts.sched,
stack_size: self.opts.stack_size
},
gen_body: gen_body,
@ -229,11 +206,6 @@ impl TaskBuilder {
self.opts.name = Some(name.into_send_str());
}
/// Configure a custom scheduler mode for the task.
pub fn sched_mode(&mut self, mode: SchedMode) {
self.opts.sched.mode = mode;
}
/**
* Add a wrapper to the body of the spawned task.
*
@ -285,7 +257,6 @@ impl TaskBuilder {
watched: x.opts.watched,
notify_chan: notify_chan,
name: name,
sched: x.opts.sched,
stack_size: x.opts.stack_size
};
let f = match gen_body {
@ -296,7 +267,9 @@ impl TaskBuilder {
f
}
};
spawn::spawn_raw(opts, f);
let t: ~Task = Local::take();
t.spawn_sibling(opts, f);
}
/**
@ -328,25 +301,23 @@ impl TaskBuilder {
}
}
/* Task construction */
pub fn default_task_opts() -> TaskOpts {
/*!
* The default task options
*
* By default all tasks are supervised by their parent, are spawned
* into the same scheduler, and do not post lifecycle notifications.
*/
impl TaskOpts {
pub fn new() -> TaskOpts {
/*!
* The default task options
*
* By default all tasks are supervised by their parent, are spawned
* into the same scheduler, and do not post lifecycle notifications.
*/
TaskOpts {
watched: true,
notify_chan: None,
name: None,
sched: SchedOpts {
mode: DefaultScheduler,
},
stack_size: None
TaskOpts {
watched: true,
notify_chan: None,
name: None,
stack_size: None
}
}
}
@ -363,24 +334,6 @@ pub fn spawn(f: proc()) {
task.spawn(f)
}
pub fn spawn_sched(mode: SchedMode, f: proc()) {
/*!
* Creates a new task on a new or existing scheduler.
*
* When there are no more tasks to execute the
* scheduler terminates.
*
* # Failure
*
* In manual threads mode the number of threads requested must be
* greater than zero.
*/
let mut task = task();
task.sched_mode(mode);
task.spawn(f)
}
pub fn try<T:Send>(f: proc() -> T) -> Result<T, ~Any> {
/*!
* Execute a function in another task and return either the return value
@ -400,14 +353,10 @@ pub fn try<T:Send>(f: proc() -> T) -> Result<T, ~Any> {
pub fn with_task_name<U>(blk: |Option<&str>| -> U) -> U {
use rt::task::Task;
if in_green_task_context() {
let mut task = Local::borrow(None::<Task>);
match task.get().name {
Some(ref name) => blk(Some(name.as_slice())),
None => blk(None)
}
} else {
fail!("no task name exists in non-green task context")
let mut task = Local::borrow(None::<Task>);
match task.get().name {
Some(ref name) => blk(Some(name.as_slice())),
None => blk(None)
}
}
@ -415,11 +364,10 @@ pub fn deschedule() {
//! Yield control to the task scheduler
use rt::local::Local;
use rt::sched::Scheduler;
// FIXME(#7544): Optimize this, since we know we won't block.
let sched: ~Scheduler = Local::take();
sched.yield_now();
let task: ~Task = Local::take();
task.yield_now();
}
pub fn failing() -> bool {
@ -428,7 +376,7 @@ pub fn failing() -> bool {
use rt::task::Task;
let mut local = Local::borrow(None::<Task>);
local.get().unwinder.unwinding
local.get().unwinder.unwinding()
}
// The following 8 tests test the following 2^3 combinations:
@ -439,59 +387,43 @@ pub fn failing() -> bool {
#[test]
fn test_unnamed_task() {
use rt::test::run_in_uv_task;
do run_in_uv_task {
do spawn {
with_task_name(|name| {
assert!(name.is_none());
})
}
do spawn {
with_task_name(|name| {
assert!(name.is_none());
})
}
}
#[test]
fn test_owned_named_task() {
use rt::test::run_in_uv_task;
do run_in_uv_task {
let mut t = task();
t.name(~"ada lovelace");
do t.spawn {
with_task_name(|name| {
assert!(name.unwrap() == "ada lovelace");
})
}
let mut t = task();
t.name(~"ada lovelace");
do t.spawn {
with_task_name(|name| {
assert!(name.unwrap() == "ada lovelace");
})
}
}
#[test]
fn test_static_named_task() {
use rt::test::run_in_uv_task;
do run_in_uv_task {
let mut t = task();
t.name("ada lovelace");
do t.spawn {
with_task_name(|name| {
assert!(name.unwrap() == "ada lovelace");
})
}
let mut t = task();
t.name("ada lovelace");
do t.spawn {
with_task_name(|name| {
assert!(name.unwrap() == "ada lovelace");
})
}
}
#[test]
fn test_send_named_task() {
use rt::test::run_in_uv_task;
do run_in_uv_task {
let mut t = task();
t.name("ada lovelace".into_send_str());
do t.spawn {
with_task_name(|name| {
assert!(name.unwrap() == "ada lovelace");
})
}
let mut t = task();
t.name("ada lovelace".into_send_str());
do t.spawn {
with_task_name(|name| {
assert!(name.unwrap() == "ada lovelace");
})
}
}
@ -562,28 +494,19 @@ fn test_try_fail() {
}
}
#[cfg(test)]
fn get_sched_id() -> int {
use rt::sched::Scheduler;
let mut sched = Local::borrow(None::<Scheduler>);
sched.get().sched_id() as int
}
#[test]
fn test_spawn_sched() {
use clone::Clone;
let (po, ch) = SharedChan::new();
fn f(i: int, ch: SharedChan<()>) {
let parent_sched_id = get_sched_id();
do spawn_sched(SingleThreaded) {
let child_sched_id = get_sched_id();
assert!(parent_sched_id != child_sched_id);
let ch = ch.clone();
do spawn {
if (i == 0) {
ch.send(());
} else {
f(i - 1, ch.clone());
f(i - 1, ch);
}
};
@ -596,16 +519,9 @@ fn test_spawn_sched() {
fn test_spawn_sched_childs_on_default_sched() {
let (po, ch) = Chan::new();
// Assuming tests run on the default scheduler
let default_id = get_sched_id();
do spawn_sched(SingleThreaded) {
do spawn {
let ch = ch;
let parent_sched_id = get_sched_id();
do spawn {
let child_sched_id = get_sched_id();
assert!(parent_sched_id != child_sched_id);
assert_eq!(child_sched_id, default_id);
ch.send(());
};
};
@ -613,65 +529,6 @@ fn test_spawn_sched_childs_on_default_sched() {
po.recv();
}
#[test]
fn test_spawn_sched_blocking() {
use unstable::mutex::Mutex;
unsafe {
// Testing that a task in one scheduler can block in foreign code
// without affecting other schedulers
20u.times(|| {
let (start_po, start_ch) = Chan::new();
let (fin_po, fin_ch) = Chan::new();
let mut lock = Mutex::new();
let lock2 = lock.clone();
do spawn_sched(SingleThreaded) {
let mut lock = lock2;
lock.lock();
start_ch.send(());
// Block the scheduler thread
lock.wait();
lock.unlock();
fin_ch.send(());
};
// Wait until the other task has its lock
start_po.recv();
fn pingpong(po: &Port<int>, ch: &Chan<int>) {
let mut val = 20;
while val > 0 {
val = po.recv();
ch.try_send(val - 1);
}
}
let (setup_po, setup_ch) = Chan::new();
let (parent_po, parent_ch) = Chan::new();
do spawn {
let (child_po, child_ch) = Chan::new();
setup_ch.send(child_ch);
pingpong(&child_po, &parent_ch);
};
let child_ch = setup_po.recv();
child_ch.send(20);
pingpong(&parent_po, &child_ch);
lock.lock();
lock.signal();
lock.unlock();
fin_po.recv();
lock.destroy();
})
}
}
#[cfg(test)]
fn avoid_copying_the_body(spawnfn: |v: proc()|) {
let (p, ch) = Chan::<uint>::new();
@ -735,11 +592,7 @@ fn test_child_doesnt_ref_parent() {
#[test]
fn test_simple_newsched_spawn() {
use rt::test::run_in_uv_task;
do run_in_uv_task {
spawn(proc()())
}
spawn(proc()())
}
#[test]

View File

@ -1,233 +0,0 @@
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!**************************************************************************
*
* WARNING: linked failure has been removed since this doc comment was written,
* but it was so pretty that I didn't want to remove it.
*
* Spawning & linked failure
*
* Several data structures are involved in task management to allow properly
* propagating failure across linked/supervised tasks.
*
* (1) The "taskgroup_arc" is an unsafe::exclusive which contains a hashset of
* all tasks that are part of the group. Some tasks are 'members', which
* means if they fail, they will kill everybody else in the taskgroup.
* Other tasks are 'descendants', which means they will not kill tasks
* from this group, but can be killed by failing members.
*
* A new one of these is created each spawn_linked or spawn_supervised.
*
* (2) The "taskgroup" is a per-task control structure that tracks a task's
* spawn configuration. It contains a reference to its taskgroup_arc, a
* reference to its node in the ancestor list (below), and an optionally
* configured notification port. These are stored in TLS.
*
* (3) The "ancestor_list" is a cons-style list of unsafe::exclusives which
* tracks 'generations' of taskgroups -- a group's ancestors are groups
* which (directly or transitively) spawn_supervised-ed them. Each task
* is recorded in the 'descendants' of each of its ancestor groups.
*
* Spawning a supervised task is O(n) in the number of generations still
* alive, and exiting (by success or failure) that task is also O(n).
*
* This diagram depicts the references between these data structures:
*
* linked_________________________________
* ___/ _________ \___
* / \ | group X | / \
* ( A ) - - - - - - - > | {A,B} {}|< - - -( B )
* \___/ |_________| \___/
* unlinked
* | __ (nil)
* | //| The following code causes this:
* |__ // /\ _________
* / \ // || | group Y | fn taskA() {
* ( C )- - - ||- - - > |{C} {D,E}| spawn(taskB);
* \___/ / \=====> |_________| spawn_unlinked(taskC);
* supervise /gen \ ...
* | __ \ 00 / }
* | //| \__/ fn taskB() { ... }
* |__ // /\ _________ fn taskC() {
* / \/ || | group Z | spawn_supervised(taskD);
* ( D )- - - ||- - - > | {D} {E} | ...
* \___/ / \=====> |_________| }
* supervise /gen \ fn taskD() {
* | __ \ 01 / spawn_supervised(taskE);
* | //| \__/ ...
* |__ // _________ }
* / \/ | group W | fn taskE() { ... }
* ( E )- - - - - - - > | {E} {} |
* \___/ |_________|
*
* "tcb" "taskgroup_arc"
* "ancestor_list"
*
****************************************************************************/
#[doc(hidden)];
use prelude::*;
use comm::Chan;
use rt::local::Local;
use rt::sched::{Scheduler, Shutdown, TaskFromFriend};
use rt::task::{Task, Sched};
use rt::thread::Thread;
use rt::{in_green_task_context, new_event_loop};
use task::{SingleThreaded, TaskOpts, TaskResult};
#[cfg(test)] use task::default_task_opts;
#[cfg(test)] use task;
pub fn spawn_raw(mut opts: TaskOpts, f: proc()) {
assert!(in_green_task_context());
let mut task = if opts.sched.mode != SingleThreaded {
if opts.watched {
Task::build_child(opts.stack_size, f)
} else {
Task::build_root(opts.stack_size, f)
}
} else {
unsafe {
// Creating a 1:1 task:thread ...
let sched: *mut Scheduler = Local::unsafe_borrow();
let sched_handle = (*sched).make_handle();
// Since this is a 1:1 scheduler we create a queue not in
// the stealee set. The run_anything flag is set false
// which will disable stealing.
let (worker, _stealer) = (*sched).work_queue.pool().deque();
// Create a new scheduler to hold the new task
let mut new_sched = ~Scheduler::new_special(new_event_loop(),
worker,
(*sched).work_queues.clone(),
(*sched).sleeper_list.clone(),
false,
Some(sched_handle));
let mut new_sched_handle = new_sched.make_handle();
// Allow the scheduler to exit when the pinned task exits
new_sched_handle.send(Shutdown);
// Pin the new task to the new scheduler
let new_task = if opts.watched {
Task::build_homed_child(opts.stack_size, f, Sched(new_sched_handle))
} else {
Task::build_homed_root(opts.stack_size, f, Sched(new_sched_handle))
};
// Create a task that will later be used to join with the new scheduler
// thread when it is ready to terminate
let (thread_port, thread_chan) = Chan::new();
let join_task = do Task::build_child(None) {
debug!("running join task");
let thread: Thread<()> = thread_port.recv();
thread.join();
};
// Put the scheduler into another thread
let orig_sched_handle = (*sched).make_handle();
let new_sched = new_sched;
let thread = do Thread::start {
let mut new_sched = new_sched;
let mut orig_sched_handle = orig_sched_handle;
let bootstrap_task = ~do Task::new_root(&mut new_sched.stack_pool, None) || {
debug!("boostrapping a 1:1 scheduler");
};
new_sched.bootstrap(bootstrap_task);
// Now tell the original scheduler to join with this thread
// by scheduling a thread-joining task on the original scheduler
orig_sched_handle.send(TaskFromFriend(join_task));
// NB: We can't simply send a message from here to another task
// because this code isn't running in a task and message passing doesn't
// work outside of tasks. Hence we're sending a scheduler message
// to execute a new task directly to a scheduler.
};
// Give the thread handle to the join task
thread_chan.send(thread);
// When this task is enqueued on the current scheduler it will then get
// forwarded to the scheduler to which it is pinned
new_task
}
};
if opts.notify_chan.is_some() {
let notify_chan = opts.notify_chan.take_unwrap();
let on_exit: proc(TaskResult) = proc(task_result) {
notify_chan.try_send(task_result);
};
task.death.on_exit = Some(on_exit);
}
task.name = opts.name.take();
debug!("spawn calling run_task");
Scheduler::run_task(task);
}
#[test]
fn test_spawn_raw_simple() {
let (po, ch) = Chan::new();
do spawn_raw(default_task_opts()) {
ch.send(());
}
po.recv();
}
#[test]
fn test_spawn_raw_unsupervise() {
let opts = task::TaskOpts {
watched: false,
notify_chan: None,
.. default_task_opts()
};
do spawn_raw(opts) {
fail!();
}
}
#[test]
fn test_spawn_raw_notify_success() {
let (notify_po, notify_ch) = Chan::new();
let opts = task::TaskOpts {
notify_chan: Some(notify_ch),
.. default_task_opts()
};
do spawn_raw(opts) {
}
assert!(notify_po.recv().is_ok());
}
#[test]
fn test_spawn_raw_notify_failure() {
// New bindings for these
let (notify_po, notify_ch) = Chan::new();
let opts = task::TaskOpts {
watched: false,
notify_chan: Some(notify_ch),
.. default_task_opts()
};
do spawn_raw(opts) {
fail!();
}
assert!(notify_po.recv().is_err());
}

View File

@ -140,7 +140,6 @@ pub mod dl {
use path;
use ptr;
use str;
use unstable::sync::atomic;
use result::*;
pub unsafe fn open_external(filename: &path::Path) -> *libc::c_void {
@ -158,11 +157,7 @@ pub mod dl {
static mut lock: Mutex = MUTEX_INIT;
unsafe {
// dlerror isn't thread safe, so we need to lock around this entire
// sequence. `atomic` asserts that we don't do anything that
// would cause this task to be descheduled, which could deadlock
// the scheduler if it happens while the lock is held.
// FIXME #9105 use a Rust mutex instead of C++ mutexes.
let _guard = atomic();
// sequence
lock.lock();
let _old_error = dlerror();
@ -208,7 +203,6 @@ pub mod dl {
use libc;
use path;
use ptr;
use unstable::sync::atomic;
use result::*;
pub unsafe fn open_external(filename: &path::Path) -> *libc::c_void {
@ -225,7 +219,6 @@ pub mod dl {
pub fn check_for_errors_in<T>(f: || -> T) -> Result<T, ~str> {
unsafe {
let _guard = atomic();
SetLastError(0);
let result = f();

View File

@ -11,15 +11,13 @@
//! Runtime calls emitted by the compiler.
use c_str::ToCStr;
use cast::transmute;
use libc::{c_char, size_t, uintptr_t};
use rt::task;
use rt::borrowck;
#[cold]
#[lang="fail_"]
pub fn fail_(expr: *c_char, file: *c_char, line: size_t) -> ! {
task::begin_unwind_raw(expr, file, line);
::rt::begin_unwind_raw(expr, file, line);
}
#[cold]
@ -81,15 +79,3 @@ pub unsafe fn check_not_borrowed(a: *u8,
line: size_t) {
borrowck::check_not_borrowed(a, file, line)
}
#[lang="start"]
pub fn start(main: *u8, argc: int, argv: **c_char) -> int {
use rt;
unsafe {
return do rt::start(argc, argv as **u8) {
let main: extern "Rust" fn() = transmute(main);
main();
};
}
}

Some files were not shown because too many files have changed in this diff Show More