auto merge of #13958 : pcwalton/rust/detilde, r=pcwalton

for `~str`/`~[]`.

Note that `~self` still remains, since I forgot to add support for
`Box<self>` before the snapshot.

r? @brson or @alexcrichton or whoever
This commit is contained in:
bors 2014-05-07 05:16:48 -07:00
commit ef6daf9935
495 changed files with 2252 additions and 1897 deletions

View File

@ -285,7 +285,7 @@ extern {
fn main() {
// Create the object that will be referenced in the callback
let mut rust_object = ~RustObject{ a: 5 };
let mut rust_object = box RustObject { a: 5 };
unsafe {
register_callback(&mut *rust_object, callback);

View File

@ -41,9 +41,9 @@ point, but allocated in a different place:
~~~
# struct Point {x: f64, y: f64}
let on_the_stack : Point = Point {x: 3.0, y: 4.0};
let managed_box : @Point = @Point {x: 5.0, y: 1.0};
let owned_box : ~Point = ~Point {x: 7.0, y: 9.0};
let on_the_stack : Point = Point {x: 3.0, y: 4.0};
let managed_box : @Point = @Point {x: 5.0, y: 1.0};
let owned_box : Box<Point> = box Point {x: 7.0, y: 9.0};
~~~
Suppose we wanted to write a procedure that computed the distance between any
@ -72,9 +72,9 @@ Now we can call `compute_distance()` in various ways:
~~~
# struct Point {x: f64, y: f64}
# let on_the_stack : Point = Point{x: 3.0, y: 4.0};
# let managed_box : @Point = @Point{x: 5.0, y: 1.0};
# let owned_box : ~Point = ~Point{x: 7.0, y: 9.0};
# let on_the_stack : Point = Point{x: 3.0, y: 4.0};
# let managed_box : @Point = @Point{x: 5.0, y: 1.0};
# let owned_box : Box<Point> = box Point{x: 7.0, y: 9.0};
# fn compute_distance(p1: &Point, p2: &Point) -> f64 { 0.0 }
compute_distance(&on_the_stack, managed_box);
compute_distance(managed_box, owned_box);
@ -151,12 +151,12 @@ Now, as before, we can define rectangles in a few different ways:
# struct Point {x: f64, y: f64}
# struct Size {w: f64, h: f64} // as before
# struct Rectangle {origin: Point, size: Size}
let rect_stack = &Rectangle {origin: Point {x: 1.0, y: 2.0},
size: Size {w: 3.0, h: 4.0}};
let rect_managed = @Rectangle {origin: Point {x: 3.0, y: 4.0},
size: Size {w: 3.0, h: 4.0}};
let rect_owned = ~Rectangle {origin: Point {x: 5.0, y: 6.0},
size: Size {w: 3.0, h: 4.0}};
let rect_stack = &Rectangle {origin: Point {x: 1.0, y: 2.0},
size: Size {w: 3.0, h: 4.0}};
let rect_managed = @Rectangle {origin: Point {x: 3.0, y: 4.0},
size: Size {w: 3.0, h: 4.0}};
let rect_owned = box Rectangle {origin: Point {x: 5.0, y: 6.0},
size: Size {w: 3.0, h: 4.0}};
~~~
In each case, we can extract out individual subcomponents with the `&`
@ -168,7 +168,7 @@ operator. For example, I could write:
# struct Rectangle {origin: Point, size: Size}
# let rect_stack = &Rectangle {origin: Point {x: 1.0, y: 2.0}, size: Size {w: 3.0, h: 4.0}};
# let rect_managed = @Rectangle {origin: Point {x: 3.0, y: 4.0}, size: Size {w: 3.0, h: 4.0}};
# let rect_owned = ~Rectangle {origin: Point {x: 5.0, y: 6.0}, size: Size {w: 3.0, h: 4.0}};
# let rect_owned = box Rectangle {origin: Point {x: 5.0, y: 6.0}, size: Size {w: 3.0, h: 4.0}};
# fn compute_distance(p1: &Point, p2: &Point) -> f64 { 0.0 }
compute_distance(&rect_stack.origin, &rect_managed.origin);
~~~
@ -276,12 +276,12 @@ the following function is legal:
# fn some_condition() -> bool { true }
# struct Foo { f: int }
fn example3() -> int {
let mut x = ~Foo {f: 3};
let mut x = box Foo {f: 3};
if some_condition() {
let y = &x.f; // -+ L
return *y; // |
} // -+
x = ~Foo {f: 4};
x = box Foo {f: 4};
// ...
# return 0;
}
@ -301,9 +301,9 @@ rejected by the compiler):
~~~ {.ignore}
fn example3() -> int {
let mut x = ~X {f: 3};
let mut x = box X {f: 3};
let y = &x.f;
x = ~X {f: 4}; // Error reported here.
x = box X {f: 4}; // Error reported here.
*y
}
~~~
@ -314,13 +314,13 @@ memory immediately before the re-assignment of `x`:
~~~ {.notrust}
Stack Exchange Heap
x +----------+
| ~{f:int} | ----+
y +----------+ |
| &int | ----+
+----------+ | +---------+
+--> | f: 3 |
+---------+
x +-------------+
| box {f:int} | ----+
y +-------------+ |
| &int | ----+
+-------------+ | +---------+
+--> | f: 3 |
+---------+
~~~
Once the reassignment occurs, the memory will look like this:
@ -328,13 +328,13 @@ Once the reassignment occurs, the memory will look like this:
~~~ {.notrust}
Stack Exchange Heap
x +----------+ +---------+
| ~{f:int} | -------> | f: 4 |
y +----------+ +---------+
| &int | ----+
+----------+ | +---------+
+--> | (freed) |
+---------+
x +-------------+ +---------+
| box {f:int} | -------> | f: 4 |
y +-------------+ +---------+
| &int | ----+
+-------------+ | +---------+
+--> | (freed) |
+---------+
~~~
Here you can see that the variable `y` still points at the old box,
@ -349,12 +349,12 @@ mutations:
~~~ {.ignore}
fn example3() -> int {
struct R { g: int }
struct S { f: ~R }
struct S { f: Box<R> }
let mut x = ~S {f: ~R {g: 3}};
let mut x = box S {f: box R {g: 3}};
let y = &x.f.g;
x = ~S {f: ~R {g: 4}}; // Error reported here.
x.f = ~R {g: 5}; // Error reported here.
x = box S {f: box R {g: 4}}; // Error reported here.
x.f = box R {g: 5}; // Error reported here.
*y
}
~~~

View File

@ -182,13 +182,14 @@ trait. Therefore, unboxed traits don't make any sense, and aren't allowed.
Sometimes, you need a recursive data structure. The simplest is known as a 'cons list':
~~~rust
enum List<T> {
Nil,
Cons(T, ~List<T>),
Cons(T, Box<List<T>>),
}
fn main() {
let list: List<int> = Cons(1, ~Cons(2, ~Cons(3, ~Nil)));
let list: List<int> = Cons(1, box Cons(2, box Cons(3, box Nil)));
println!("{:?}", list);
}
~~~
@ -196,7 +197,7 @@ fn main() {
This prints:
~~~ {.notrust}
Cons(1, ~Cons(2, ~Cons(3, ~Nil)))
Cons(1, box Cons(2, box Cons(3, box Nil)))
~~~
The inner lists _must_ be an owned pointer, because we can't know how many
@ -237,7 +238,7 @@ struct Point {
}
fn main() {
let a = ~Point { x: 10, y: 20 };
let a = box Point { x: 10, y: 20 };
spawn(proc() {
println!("{}", a.x);
});
@ -268,7 +269,7 @@ struct Point {
}
fn main() {
let a = ~Point { x: 10, y: 20 };
let a = box Point { x: 10, y: 20 };
let b = a;
println!("{}", b.x);
println!("{}", a.x);
@ -285,7 +286,7 @@ note: in expansion of format_args!
<std-macros>:158:27: 158:81 note: expansion site
<std-macros>:157:5: 159:6 note: in expansion of println!
test.rs:10:5: 10:25 note: expansion site
test.rs:8:9: 8:10 note: `a` moved here because it has type `~Point`, which is moved by default (use `ref` to override)
test.rs:8:9: 8:10 note: `a` moved here because it has type `Box<Point>`, which is moved by default (use `ref` to override)
test.rs:8 let b = a;
^
~~~
@ -345,8 +346,8 @@ fn compute_distance(p1: &Point, p2: &Point) -> f32 {
}
fn main() {
let origin = @Point { x: 0.0, y: 0.0 };
let p1 = ~Point { x: 5.0, y: 3.0 };
let origin = @Point { x: 0.0, y: 0.0 };
let p1 = box Point { x: 5.0, y: 3.0 };
println!("{:?}", compute_distance(origin, p1));
}
@ -381,7 +382,7 @@ duration a 'lifetime'. Let's try a more complex example:
~~~rust
fn main() {
let mut x = ~5;
let mut x = box 5;
if *x < 10 {
let y = &x;
println!("Oh no: {:?}", y);
@ -398,7 +399,7 @@ mutated, and therefore, lets us pass. This wouldn't work:
~~~rust{.ignore}
fn main() {
let mut x = ~5;
let mut x = box 5;
if *x < 10 {
let y = &x;
*x -= 1;
@ -437,12 +438,12 @@ is best.
What does that mean? Don't do this:
~~~rust
fn foo(x: ~int) -> ~int {
return ~*x;
fn foo(x: Box<int>) -> Box<int> {
return box *x;
}
fn main() {
let x = ~5;
let x = box 5;
let y = foo(x);
}
~~~
@ -450,13 +451,13 @@ fn main() {
Do this:
~~~rust
fn foo(x: ~int) -> int {
fn foo(x: Box<int>) -> int {
return *x;
}
fn main() {
let x = ~5;
let y = ~foo(x);
let x = box 5;
let y = box foo(x);
}
~~~
@ -464,12 +465,12 @@ This gives you flexibility, without sacrificing performance. For example, this w
also work:
~~~rust
fn foo(x: ~int) -> int {
fn foo(x: Box<int>) -> int {
return *x;
}
fn main() {
let x = ~5;
let x = box 5;
let y = @foo(x);
}
~~~

View File

@ -258,10 +258,10 @@ impl<T: Send> Drop for Unique<T> {
}
}
// A comparison between the built-in ~ and this reimplementation
// A comparison between the built-in `Box` and this reimplementation
fn main() {
{
let mut x = ~5;
let mut x = box 5;
*x = 10;
} // `x` is freed here

View File

@ -127,12 +127,13 @@ That's a great example for stack memory,
but what about heap memory?
Rust has a second kind of pointer,
an 'owned box',
that you can create with a `~`.
that you can create with the `box` operator.
Check it out:
```
fn dangling() -> ~int {
let i = ~1234;
fn dangling() -> Box<int> {
let i = box 1234;
return i;
}
@ -143,7 +144,7 @@ fn add_one() -> int {
```
Now instead of a stack allocated `1234`,
we have a heap allocated `~1234`.
we have a heap allocated `box 1234`.
Whereas `&` borrows a pointer to existing memory,
creating an owned box allocates memory on the heap and places a value in it,
giving you the sole pointer to that memory.
@ -151,7 +152,7 @@ You can roughly compare these two lines:
```
// Rust
let i = ~1234;
let i = box 1234;
```
```notrust

View File

@ -1400,7 +1400,7 @@ to pointers to the trait name, used as a type.
# trait Shape { }
# impl Shape for int { }
# let mycircle = 0;
let myshape: ~Shape = ~mycircle as ~Shape;
let myshape: Box<Shape> = box mycircle as Box<Shape>;
~~~~
The resulting value is a managed box containing the value that was cast,
@ -3048,19 +3048,19 @@ stands for a *single* data field, whereas a wildcard `..` stands for *all* the
fields of a particular variant. For example:
~~~~
enum List<X> { Nil, Cons(X, ~List<X>) }
enum List<X> { Nil, Cons(X, Box<List<X>>) }
let x: List<int> = Cons(10, ~Cons(11, ~Nil));
let x: List<int> = Cons(10, box Cons(11, box Nil));
match x {
Cons(_, ~Nil) => fail!("singleton list"),
Cons(..) => return,
Nil => fail!("empty list")
Cons(_, box Nil) => fail!("singleton list"),
Cons(..) => return,
Nil => fail!("empty list")
}
~~~~
The first pattern matches lists constructed by applying `Cons` to any head
value, and a tail value of `~Nil`. The second pattern matches _any_ list
value, and a tail value of `box Nil`. The second pattern matches _any_ list
constructed with `Cons`, ignoring the values of its arguments. The difference
between `_` and `..` is that the pattern `C(_)` is only type-correct if `C` has
exactly one argument, while the pattern `C(..)` is type-correct for any enum
@ -3110,12 +3110,12 @@ An example of a `match` expression:
# fn process_pair(a: int, b: int) { }
# fn process_ten() { }
enum List<X> { Nil, Cons(X, ~List<X>) }
enum List<X> { Nil, Cons(X, Box<List<X>>) }
let x: List<int> = Cons(10, ~Cons(11, ~Nil));
let x: List<int> = Cons(10, box Cons(11, box Nil));
match x {
Cons(a, ~Cons(b, _)) => {
Cons(a, box Cons(b, _)) => {
process_pair(a,b);
}
Cons(10, _) => {
@ -3142,17 +3142,17 @@ Subpatterns can also be bound to variables by the use of the syntax
For example:
~~~~
enum List { Nil, Cons(uint, ~List) }
enum List { Nil, Cons(uint, Box<List>) }
fn is_sorted(list: &List) -> bool {
match *list {
Nil | Cons(_, ~Nil) => true,
Cons(x, ref r @ ~Cons(y, _)) => (x <= y) && is_sorted(*r)
Nil | Cons(_, box Nil) => true,
Cons(x, ref r @ box Cons(y, _)) => (x <= y) && is_sorted(*r)
}
}
fn main() {
let a = Cons(6, ~Cons(7, ~Cons(42, ~Nil)));
let a = Cons(6, box Cons(7, box Cons(42, box Nil)));
assert!(is_sorted(&a));
}
@ -3429,10 +3429,10 @@ An example of a *recursive* type and its use:
~~~~
enum List<T> {
Nil,
Cons(T, ~List<T>)
Cons(T, Box<List<T>>)
}
let a: List<int> = Cons(7, ~Cons(13, ~Nil));
let a: List<int> = Cons(7, box Cons(13, box Nil));
~~~~
### Pointer types
@ -3581,12 +3581,12 @@ impl Printable for int {
fn to_string(&self) -> ~str { self.to_str() }
}
fn print(a: ~Printable) {
fn print(a: Box<Printable>) {
println!("{}", a.to_string());
}
fn main() {
print(~10 as ~Printable);
print(box 10 as Box<Printable>);
}
~~~~
@ -3773,7 +3773,7 @@ mutable slot by prefixing them with `mut` (similar to regular arguments):
~~~
trait Changer {
fn change(mut self) -> Self;
fn modify(mut ~self) -> ~Self;
fn modify(mut ~self) -> Box<Self>;
}
~~~
@ -3786,12 +3786,14 @@ initialized; this is enforced by the compiler.
### Owned boxes
An _owned box_ is a reference to a heap allocation holding another value, which is constructed
by the prefix *tilde* sigil `~`.
by the prefix operator `box`. When the standard library is in use, the type of an owned box is
`std::owned::Box<T>`.
An example of an owned box type and value:
~~~~
let x: ~int = ~10;
let x: Box<int> = box 10;
~~~~
Owned box values exist in 1:1 correspondence with their heap allocation
@ -3799,7 +3801,7 @@ copying an owned box value makes a shallow copy of the pointer
Rust will consider a shallow copy of an owned box to move ownership of the value. After a value has been moved, the source location cannot be used unless it is reinitialized.
~~~~
let x: ~int = ~10;
let x: Box<int> = box 10;
let y = x;
// attempting to use `x` will result in an error here
~~~~

View File

@ -911,12 +911,12 @@ Objects are never accessible after their destructor has been called, so no
dynamic failures are possible from accessing freed resources. When a task
fails, destructors of all objects in the task are called.
The `~` sigil represents a unique handle for a memory allocation on the heap:
The `box` operator performs memory allocation on the heap:
~~~~
{
// an integer allocated on the heap
let y = ~10;
let y = box 10;
}
// the destructor frees the heap memory as soon as `y` goes out of scope
~~~~
@ -938,17 +938,17 @@ and destroy the contained object when they go out of scope.
~~~~
// the struct owns the objects contained in the `x` and `y` fields
struct Foo { x: int, y: ~int }
struct Foo { x: int, y: Box<int> }
{
// `a` is the owner of the struct, and thus the owner of the struct's fields
let a = Foo { x: 5, y: ~10 };
let a = Foo { x: 5, y: box 10 };
}
// when `a` goes out of scope, the destructor for the `~int` in the struct's
// field is called
// `b` is mutable, and the mutability is inherited by the objects it owns
let mut b = Foo { x: 5, y: ~10 };
let mut b = Foo { x: 5, y: box 10 };
b.x = 10;
~~~~
@ -1021,13 +1021,15 @@ Our previous attempt at defining the `List` type included an `u32` and a `List`
directly inside `Cons`, making it at least as big as the sum of both types. The
type was invalid because the size was infinite!
An *owned box* (`~`) uses a dynamic memory allocation to provide the invariant
of always being the size of a pointer, regardless of the contained type. This
can be leveraged to create a valid `List` definition:
An *owned box* (`Box`, located in the `std::owned` module) uses a dynamic memory
allocation to provide the invariant of always being the size of a pointer,
regardless of the contained type. This can be leveraged to create a valid `List`
definition:
~~~
enum List {
Cons(u32, ~List),
Cons(u32, Box<List>),
Nil
}
~~~
@ -1040,10 +1042,10 @@ Consider an instance of our `List` type:
~~~
# enum List {
# Cons(u32, ~List),
# Cons(u32, Box<List>),
# Nil
# }
let list = Cons(1, ~Cons(2, ~Cons(3, ~Nil)));
let list = Cons(1, box Cons(2, box Cons(3, box Nil)));
~~~
It represents an owned tree of values, inheriting mutability down the tree and
@ -1054,7 +1056,7 @@ box, while the owner holds onto a pointer to it:
~~~ {.notrust}
List box List box List box List box
+--------------+ +--------------+ +--------------+ +----------+
list -> | Cons | 1 | ~ | -> | Cons | 2 | ~ | -> | Cons | 3 | ~ | -> | Nil |
list -> | Cons | 1 | | -> | Cons | 2 | | -> | Cons | 3 | | -> | Nil |
+--------------+ +--------------+ +--------------+ +----------+
~~~
@ -1074,10 +1076,10 @@ the box rather than doing an implicit heap allocation.
~~~
# enum List {
# Cons(u32, ~List),
# Cons(u32, Box<List>),
# Nil
# }
let xs = Cons(1, ~Cons(2, ~Cons(3, ~Nil)));
let xs = Cons(1, box Cons(2, box Cons(3, box Nil)));
let ys = xs; // copies `Cons(u32, pointer)` shallowly
~~~
@ -1087,7 +1089,7 @@ location cannot be used unless it is reinitialized.
~~~
# enum List {
# Cons(u32, ~List),
# Cons(u32, Box<List>),
# Nil
# }
let mut xs = Nil;
@ -1107,7 +1109,7 @@ as it is only called a single time.
Avoiding a move can be done with the library-defined `clone` method:
~~~~
let x = ~5;
let x = box 5;
let y = x.clone(); // `y` is a newly allocated box
let z = x; // no new memory allocated, `x` can no longer be used
~~~~
@ -1118,11 +1120,11 @@ our `List` type. Traits will be explained in detail [later](#traits).
~~~{.ignore}
#[deriving(Clone)]
enum List {
Cons(u32, ~List),
Cons(u32, Box<List>),
Nil
}
let x = Cons(5, ~Nil);
let x = Cons(5, box Nil);
let y = x.clone();
// `x` can still be used!
@ -1135,7 +1137,7 @@ let z = x;
The mutability of a value may be changed by moving it to a new owner:
~~~~
let r = ~13;
let r = box 13;
let mut s = r; // box becomes mutable
*s += 1;
let t = s; // box becomes immutable
@ -1146,12 +1148,12 @@ advantage of moves:
~~~
enum List {
Cons(u32, ~List),
Cons(u32, Box<List>),
Nil
}
fn prepend(xs: List, value: u32) -> List {
Cons(value, ~xs)
Cons(value, box xs)
}
let mut xs = Nil;
@ -1186,7 +1188,7 @@ by-value. A recursive definition of equality using references is as follows:
~~~
# enum List {
# Cons(u32, ~List),
# Cons(u32, Box<List>),
# Nil
# }
fn eq(xs: &List, ys: &List) -> bool {
@ -1195,15 +1197,15 @@ fn eq(xs: &List, ys: &List) -> bool {
// If we have reached the end of both lists, they are equal.
(&Nil, &Nil) => true,
// If the current elements of both lists are equal, keep going.
(&Cons(x, ~ref next_xs), &Cons(y, ~ref next_ys))
(&Cons(x, box ref next_xs), &Cons(y, box ref next_ys))
if x == y => eq(next_xs, next_ys),
// If the current elements are not equal, the lists are not equal.
_ => false
}
}
let xs = Cons(5, ~Cons(10, ~Nil));
let ys = Cons(5, ~Cons(10, ~Nil));
let xs = Cons(5, box Cons(10, box Nil));
let ys = Cons(5, box Cons(10, box Nil));
assert!(eq(&xs, &ys));
~~~
@ -1223,7 +1225,7 @@ The `u32` in the previous definition can be substituted with a type parameter:
~~~
enum List<T> {
Cons(T, ~List<T>),
Cons(T, Box<List<T>>),
Nil
}
~~~
@ -1233,11 +1235,11 @@ definition has to be updated too:
~~~
# enum List<T> {
# Cons(T, ~List<T>),
# Cons(T, Box<List<T>>),
# Nil
# }
fn prepend<T>(xs: List<T>, value: T) -> List<T> {
Cons(value, ~xs)
Cons(value, box xs)
}
~~~
@ -1248,11 +1250,11 @@ Using the generic `List<T>` works much like before, thanks to type inference:
~~~
# enum List<T> {
# Cons(T, ~List<T>),
# Cons(T, Box<List<T>>),
# Nil
# }
# fn prepend<T>(xs: List<T>, value: T) -> List<T> {
# Cons(value, ~xs)
# Cons(value, box xs)
# }
let mut xs = Nil; // Unknown type! This is a `List<T>`, but `T` can be anything.
xs = prepend(xs, 10); // Here the compiler infers `xs`'s type as `List<int>`.
@ -1265,11 +1267,11 @@ equivalent to the following type-annotated code:
~~~
# enum List<T> {
# Cons(T, ~List<T>),
# Cons(T, Box<List<T>>),
# Nil
# }
# fn prepend<T>(xs: List<T>, value: T) -> List<T> {
# Cons(value, ~xs)
# Cons(value, box xs)
# }
let mut xs: List<int> = Nil::<int>;
xs = prepend::<int>(xs, 10);
@ -1293,7 +1295,7 @@ Two more `ref` annotations need to be added to avoid attempting to move out the
~~~
# enum List<T> {
# Cons(T, ~List<T>),
# Cons(T, Box<List<T>>),
# Nil
# }
fn eq<T: Eq>(xs: &List<T>, ys: &List<T>) -> bool {
@ -1302,15 +1304,15 @@ fn eq<T: Eq>(xs: &List<T>, ys: &List<T>) -> bool {
// If we have reached the end of both lists, they are equal.
(&Nil, &Nil) => true,
// If the current elements of both lists are equal, keep going.
(&Cons(ref x, ~ref next_xs), &Cons(ref y, ~ref next_ys))
(&Cons(ref x, box ref next_xs), &Cons(ref y, box ref next_ys))
if x == y => eq(next_xs, next_ys),
// If the current elements are not equal, the lists are not equal.
_ => false
}
}
let xs = Cons('c', ~Cons('a', ~Cons('t', ~Nil)));
let ys = Cons('c', ~Cons('a', ~Cons('t', ~Nil)));
let xs = Cons('c', box Cons('a', box Cons('t', box Nil)));
let ys = Cons('c', box Cons('a', box Cons('t', box Nil)));
assert!(eq(&xs, &ys));
~~~
@ -1321,7 +1323,7 @@ on.
~~~
# enum List<T> {
# Cons(T, ~List<T>),
# Cons(T, Box<List<T>>),
# Nil
# }
impl<T: Eq> Eq for List<T> {
@ -1331,7 +1333,7 @@ impl<T: Eq> Eq for List<T> {
// If we have reached the end of both lists, they are equal.
(&Nil, &Nil) => true,
// If the current elements of both lists are equal, keep going.
(&Cons(ref x, ~ref next_xs), &Cons(ref y, ~ref next_ys))
(&Cons(ref x, box ref next_xs), &Cons(ref y, box ref next_ys))
if x == y => next_xs == next_ys,
// If the current elements are not equal, the lists are not equal.
_ => false
@ -1339,8 +1341,8 @@ impl<T: Eq> Eq for List<T> {
}
}
let xs = Cons(5, ~Cons(10, ~Nil));
let ys = Cons(5, ~Cons(10, ~Nil));
let xs = Cons(5, box Cons(10, box Nil));
let ys = Cons(5, box Cons(10, box Nil));
// The methods below are part of the Eq trait,
// which we implemented on our linked list.
assert!(xs.eq(&ys));
@ -1373,7 +1375,7 @@ fn foo() -> (u64, u64, u64, u64, u64, u64) {
(5, 5, 5, 5, 5, 5)
}
let x = ~foo(); // allocates a `~` box, and writes the integers directly to it
let x = box foo(); // allocates a box, and writes the integers directly to it
~~~~
Beyond the properties granted by the size, an owned box behaves as a regular
@ -1384,8 +1386,8 @@ let x = 5; // immutable
let mut y = 5; // mutable
y += 2;
let x = ~5; // immutable
let mut y = ~5; // mutable
let x = box 5; // immutable
let mut y = box 5; // mutable
*y += 2; // the `*` operator is needed to access the contained value
~~~~
@ -1413,8 +1415,8 @@ contains a point, but allocated in a different location:
~~~
# struct Point { x: f64, y: f64 }
let on_the_stack : Point = Point { x: 3.0, y: 4.0 };
let owned_box : ~Point = ~Point { x: 7.0, y: 9.0 };
let on_the_stack : Point = Point { x: 3.0, y: 4.0 };
let owned_box : Box<Point> = box Point { x: 7.0, y: 9.0 };
~~~
Suppose we want to write a procedure that computes the distance
@ -1438,8 +1440,8 @@ Now we can call `compute_distance()` in various ways:
~~~
# struct Point{ x: f64, y: f64 };
# let on_the_stack : Point = Point { x: 3.0, y: 4.0 };
# let owned_box : ~Point = ~Point { x: 7.0, y: 9.0 };
# let on_the_stack : Point = Point { x: 3.0, y: 4.0 };
# let owned_box : Box<Point> = box Point { x: 7.0, y: 9.0 };
# fn compute_distance(p1: &Point, p2: &Point) -> f64 { 0.0 }
compute_distance(&on_the_stack, owned_box);
~~~
@ -1453,7 +1455,7 @@ route to the same data.
In the case of `owned_box`, however, no
explicit action is necessary. The compiler will automatically convert
a box `~point` to a reference like
a box `box point` to a reference like
`&point`. This is another form of borrowing; in this case, the
contents of the owned box are being lent out.
@ -1492,7 +1494,7 @@ Rust uses the unary star operator (`*`) to access the contents of a
box or pointer, similarly to C.
~~~
let owned = ~10;
let owned = box 10;
let borrowed = &20;
let sum = *owned + *borrowed;
@ -1503,7 +1505,7 @@ assignments. Such an assignment modifies the value that the pointer
points to.
~~~
let mut owned = ~10;
let mut owned = box 10;
let mut value = 20;
let borrowed = &mut value;
@ -1520,8 +1522,8 @@ can sometimes make code awkward and parenthesis-filled.
# struct Point { x: f64, y: f64 }
# enum Shape { Rectangle(Point, Point) }
# impl Shape { fn area(&self) -> int { 0 } }
let start = ~Point { x: 10.0, y: 20.0 };
let end = ~Point { x: (*start).x + 100.0, y: (*start).y + 100.0 };
let start = box Point { x: 10.0, y: 20.0 };
let end = box Point { x: (*start).x + 100.0, y: (*start).y + 100.0 };
let rect = &Rectangle(*start, *end);
let area = (*rect).area();
~~~
@ -1534,8 +1536,8 @@ dot), so in most cases, explicitly dereferencing the receiver is not necessary.
# struct Point { x: f64, y: f64 }
# enum Shape { Rectangle(Point, Point) }
# impl Shape { fn area(&self) -> int { 0 } }
let start = ~Point { x: 10.0, y: 20.0 };
let end = ~Point { x: start.x + 100.0, y: start.y + 100.0 };
let start = box Point { x: 10.0, y: 20.0 };
let end = box Point { x: start.x + 100.0, y: start.y + 100.0 };
let rect = &Rectangle(*start, *end);
let area = rect.area();
~~~
@ -1546,7 +1548,7 @@ something silly like
~~~
# struct Point { x: f64, y: f64 }
let point = &~Point { x: 10.0, y: 20.0 };
let point = &box Point { x: 10.0, y: 20.0 };
println!("{:f}", point.x);
~~~
@ -1944,7 +1946,7 @@ impl Shape {
let s = Circle(Point { x: 1.0, y: 2.0 }, 3.0);
(&s).draw_reference();
(~s).draw_owned();
(box s).draw_owned();
s.draw_value();
~~~
@ -1969,7 +1971,7 @@ to a reference.
// As with typical function arguments, owned pointers
// are automatically converted to references
(~s).draw_reference();
(box s).draw_reference();
// Unlike typical function arguments, the self value will
// automatically be referenced ...
@ -1979,7 +1981,7 @@ s.draw_reference();
(& &s).draw_reference();
// ... and dereferenced and borrowed
(&~s).draw_reference();
(&box s).draw_reference();
~~~
Implementations may also define standalone (sometimes called "static")
@ -2433,7 +2435,7 @@ an _object_.
~~~~
# trait Drawable { fn draw(&self); }
fn draw_all(shapes: &[~Drawable]) {
fn draw_all(shapes: &[Box<Drawable>]) {
for shape in shapes.iter() { shape.draw(); }
}
~~~~
@ -2448,14 +2450,14 @@ to an object:
# trait Drawable { fn draw(&self); }
# fn new_circle() -> Circle { 1 }
# fn new_rectangle() -> Rectangle { true }
# fn draw_all(shapes: &[~Drawable]) {}
# fn draw_all(shapes: &[Box<Drawable>]) {}
impl Drawable for Circle { fn draw(&self) { /* ... */ } }
impl Drawable for Rectangle { fn draw(&self) { /* ... */ } }
let c: ~Circle = ~new_circle();
let r: ~Rectangle = ~new_rectangle();
draw_all([c as ~Drawable, r as ~Drawable]);
let c: Box<Circle> = box new_circle();
let r: Box<Rectangle> = box new_rectangle();
draw_all([c as Box<Drawable>, r as Box<Drawable>]);
~~~~
We omit the code for `new_circle` and `new_rectangle`; imagine that
@ -2464,7 +2466,7 @@ that, like strings and vectors, objects have dynamic size and may
only be referred to via one of the pointer types.
Other pointer types work as well.
Casts to traits may only be done with compatible pointers so,
for example, an `&Circle` may not be cast to an `~Drawable`.
for example, an `&Circle` may not be cast to a `Box<Drawable>`.
~~~
# type Circle = int; type Rectangle = int;
@ -2473,7 +2475,7 @@ for example, an `&Circle` may not be cast to an `~Drawable`.
# fn new_circle() -> int { 1 }
# fn new_rectangle() -> int { 2 }
// An owned object
let owny: ~Drawable = ~new_circle() as ~Drawable;
let owny: Box<Drawable> = box new_circle() as Box<Drawable>;
// A borrowed object
let stacky: &Drawable = &new_circle() as &Drawable;
~~~
@ -2497,7 +2499,7 @@ valid types:
trait Foo {}
trait Bar<T> {}
fn sendable_foo(f: ~Foo:Send) { /* ... */ }
fn sendable_foo(f: Box<Foo:Send>) { /* ... */ }
fn shareable_bar<T: Share>(b: &Bar<T>: Share) { /* ... */ }
~~~

View File

@ -29,14 +29,14 @@ extern crate collections;
use std::cast::{transmute, transmute_mut_lifetime};
use std::cast;
use std::cell::{Cell, RefCell};
use std::mem;
use std::ptr::read;
use std::cmp;
use std::num;
use std::rc::Rc;
use std::rt::global_heap;
use std::intrinsics::{TyDesc, get_tydesc};
use std::intrinsics;
use std::mem;
use std::num;
use std::ptr::read;
use std::rc::Rc;
use std::rt::global_heap;
// The way arena uses arrays is really deeply awful. The arrays are
// allocated, and have capacities reserved, but the fill for the array
@ -339,12 +339,12 @@ pub struct TypedArena<T> {
end: *T,
/// A pointer to the first arena segment.
first: Option<~TypedArenaChunk<T>>,
first: Option<Box<TypedArenaChunk<T>>>,
}
struct TypedArenaChunk<T> {
/// Pointer to the next arena segment.
next: Option<~TypedArenaChunk<T>>,
next: Option<Box<TypedArenaChunk<T>>>,
/// The number of elements that this chunk can hold.
capacity: uint,
@ -354,7 +354,8 @@ struct TypedArenaChunk<T> {
impl<T> TypedArenaChunk<T> {
#[inline]
fn new(next: Option<~TypedArenaChunk<T>>, capacity: uint) -> ~TypedArenaChunk<T> {
fn new(next: Option<Box<TypedArenaChunk<T>>>, capacity: uint)
-> Box<TypedArenaChunk<T>> {
let mut size = mem::size_of::<TypedArenaChunk<T>>();
size = round_up(size, mem::min_align_of::<T>());
let elem_size = mem::size_of::<T>();
@ -363,7 +364,7 @@ impl<T> TypedArenaChunk<T> {
let mut chunk = unsafe {
let chunk = global_heap::exchange_malloc(size);
let mut chunk: ~TypedArenaChunk<T> = cast::transmute(chunk);
let mut chunk: Box<TypedArenaChunk<T>> = cast::transmute(chunk);
mem::move_val_init(&mut chunk.next, next);
chunk
};

View File

@ -140,7 +140,8 @@ impl<K: TotalOrd, V> Node<K, V> {
}
///Creates a new branch node given a vector of an elements and a pointer to a rightmost child.
fn new_branch(vec: Vec<BranchElt<K, V>>, right: ~Node<K, V>) -> Node<K, V> {
fn new_branch(vec: Vec<BranchElt<K, V>>, right: Box<Node<K, V>>)
-> Node<K, V> {
BranchNode(Branch::new(vec, right))
}
@ -270,7 +271,7 @@ struct Leaf<K, V> {
//Vector of values with children, plus a rightmost child (greater than all)
struct Branch<K, V> {
elts: Vec<BranchElt<K,V>>,
rightmost_child: ~Node<K, V>
rightmost_child: Box<Node<K, V>>,
}
@ -434,7 +435,8 @@ impl<K: fmt::Show + TotalOrd, V: fmt::Show> fmt::Show for Leaf<K, V> {
impl<K: TotalOrd, V> Branch<K, V> {
///Creates a new Branch from a vector of BranchElts and a rightmost child (a node).
fn new(vec: Vec<BranchElt<K, V>>, right: ~Node<K, V>) -> Branch<K, V> {
fn new(vec: Vec<BranchElt<K, V>>, right: Box<Node<K, V>>)
-> Branch<K, V> {
Branch {
elts: vec,
rightmost_child: right
@ -667,7 +669,7 @@ struct LeafElt<K, V> {
//A BranchElt has a left child in insertion to a key-value pair.
struct BranchElt<K, V> {
left: ~Node<K, V>,
left: Box<Node<K, V>>,
key: K,
value: V
}
@ -719,7 +721,7 @@ impl<K: fmt::Show + TotalOrd, V: fmt::Show> fmt::Show for LeafElt<K, V> {
impl<K: TotalOrd, V> BranchElt<K, V> {
///Creates a new BranchElt from a supplied key, value, and left child.
fn new(k: K, v: V, n: ~Node<K, V>) -> BranchElt<K, V> {
fn new(k: K, v: V, n: Box<Node<K, V>>) -> BranchElt<K, V> {
BranchElt {
left: n,
key: k,

View File

@ -15,7 +15,6 @@
//! DList implements the trait Deque. It should be imported with `use
//! collections::deque::Deque`.
// DList is constructed like a singly-linked list over the field `next`.
// including the last link being None; each Node owns its `next` field.
//
@ -23,10 +22,10 @@
// the reverse direction.
use std::cast;
use std::mem::{replace, swap};
use std::ptr;
use std::iter::Rev;
use std::iter;
use std::mem::{replace, swap};
use std::ptr;
use deque::Deque;
@ -37,7 +36,7 @@ pub struct DList<T> {
list_tail: Rawlink<Node<T>>,
}
type Link<T> = Option<~Node<T>>;
type Link<T> = Option<Box<Node<T>>>;
struct Rawlink<T> { p: *mut T }
struct Node<T> {
@ -118,7 +117,8 @@ impl<T> Node<T> {
}
/// Set the .prev field on `next`, then return `Some(next)`
fn link_with_prev<T>(mut next: ~Node<T>, prev: Rawlink<Node<T>>) -> Link<T> {
fn link_with_prev<T>(mut next: Box<Node<T>>, prev: Rawlink<Node<T>>)
-> Link<T> {
next.prev = prev;
Some(next)
}
@ -150,7 +150,7 @@ impl<T> Mutable for DList<T> {
impl<T> DList<T> {
/// Add a Node first in the list
#[inline]
fn push_front_node(&mut self, mut new_head: ~Node<T>) {
fn push_front_node(&mut self, mut new_head: Box<Node<T>>) {
match self.list_head {
None => {
self.list_tail = Rawlink::some(new_head);
@ -168,7 +168,7 @@ impl<T> DList<T> {
/// Remove the first Node and return it, or None if the list is empty
#[inline]
fn pop_front_node(&mut self) -> Option<~Node<T>> {
fn pop_front_node(&mut self) -> Option<Box<Node<T>>> {
self.list_head.take().map(|mut front_node| {
self.length -= 1;
match front_node.next.take() {
@ -181,7 +181,7 @@ impl<T> DList<T> {
/// Add a Node last in the list
#[inline]
fn push_back_node(&mut self, mut new_tail: ~Node<T>) {
fn push_back_node(&mut self, mut new_tail: Box<Node<T>>) {
match self.list_tail.resolve() {
None => return self.push_front_node(new_tail),
Some(tail) => {
@ -194,7 +194,7 @@ impl<T> DList<T> {
/// Remove the last Node and return it, or None if the list is empty
#[inline]
fn pop_back_node(&mut self) -> Option<~Node<T>> {
fn pop_back_node(&mut self) -> Option<Box<Node<T>>> {
self.list_tail.resolve().map_or(None, |tail| {
self.length -= 1;
self.list_tail = tail.prev;
@ -245,7 +245,7 @@ impl<T> Deque<T> for DList<T> {
///
/// O(1)
fn pop_front(&mut self) -> Option<T> {
self.pop_front_node().map(|~Node{value, ..}| value)
self.pop_front_node().map(|box Node{value, ..}| value)
}
/// Add an element last in the list
@ -259,7 +259,7 @@ impl<T> Deque<T> for DList<T> {
///
/// O(1)
fn pop_back(&mut self) -> Option<T> {
self.pop_back_node().map(|~Node{value, ..}| value)
self.pop_back_node().map(|box Node{value, ..}| value)
}
}
@ -432,7 +432,7 @@ impl<T> Drop for DList<T> {
match tail.resolve() {
None => break,
Some(prev) => {
prev.next.take(); // release ~Node<T>
prev.next.take(); // release Box<Node<T>>
tail = prev.prev;
}
}
@ -531,7 +531,7 @@ pub trait ListInsertion<A> {
// private methods for MutItems
impl<'a, A> MutItems<'a, A> {
fn insert_next_node(&mut self, mut ins_node: ~Node<A>) {
fn insert_next_node(&mut self, mut ins_node: Box<Node<A>>) {
// Insert before `self.head` so that it is between the
// previously yielded element and self.head.
//
@ -671,24 +671,24 @@ mod tests {
#[test]
fn test_basic() {
let mut m: DList<~int> = DList::new();
let mut m: DList<Box<int>> = DList::new();
assert_eq!(m.pop_front(), None);
assert_eq!(m.pop_back(), None);
assert_eq!(m.pop_front(), None);
m.push_front(box 1);
assert_eq!(m.pop_front(), Some(~1));
assert_eq!(m.pop_front(), Some(box 1));
m.push_back(box 2);
m.push_back(box 3);
assert_eq!(m.len(), 2);
assert_eq!(m.pop_front(), Some(~2));
assert_eq!(m.pop_front(), Some(~3));
assert_eq!(m.pop_front(), Some(box 2));
assert_eq!(m.pop_front(), Some(box 3));
assert_eq!(m.len(), 0);
assert_eq!(m.pop_front(), None);
m.push_back(box 1);
m.push_back(box 3);
m.push_back(box 5);
m.push_back(box 7);
assert_eq!(m.pop_front(), Some(~1));
assert_eq!(m.pop_front(), Some(box 1));
let mut n = DList::new();
n.push_front(2);

View File

@ -57,7 +57,7 @@ struct LruEntry<K, V> {
/// An LRU Cache.
pub struct LruCache<K, V> {
map: HashMap<KeyRef<K>, ~LruEntry<K, V>>,
map: HashMap<KeyRef<K>, Box<LruEntry<K, V>>>,
max_size: uint,
head: *mut LruEntry<K, V>,
}
@ -241,9 +241,9 @@ impl<K: Hash + TotalEq, V> Mutable for LruCache<K, V> {
impl<K, V> Drop for LruCache<K, V> {
fn drop(&mut self) {
unsafe {
let node: ~LruEntry<K, V> = cast::transmute(self.head);
let node: Box<LruEntry<K, V>> = cast::transmute(self.head);
// Prevent compiler from trying to drop the un-initialized field in the sigil node.
let ~LruEntry { key: k, value: v, .. } = node;
let box LruEntry { key: k, value: v, .. } = node;
cast::forget(k);
cast::forget(v);
}

View File

@ -270,24 +270,24 @@ mod tests {
#[test]
fn test_push_unique() {
let mut heap = PriorityQueue::from_vec(vec!(~2, ~4, ~9));
let mut heap = PriorityQueue::from_vec(vec!(box 2, box 4, box 9));
assert_eq!(heap.len(), 3);
assert!(*heap.top() == ~9);
assert!(*heap.top() == box 9);
heap.push(box 11);
assert_eq!(heap.len(), 4);
assert!(*heap.top() == ~11);
assert!(*heap.top() == box 11);
heap.push(box 5);
assert_eq!(heap.len(), 5);
assert!(*heap.top() == ~11);
assert!(*heap.top() == box 11);
heap.push(box 27);
assert_eq!(heap.len(), 6);
assert!(*heap.top() == ~27);
assert!(*heap.top() == box 27);
heap.push(box 3);
assert_eq!(heap.len(), 7);
assert!(*heap.top() == ~27);
assert!(*heap.top() == box 27);
heap.push(box 103);
assert_eq!(heap.len(), 8);
assert!(*heap.top() == ~103);
assert!(*heap.top() == box 103);
}
#[test]

View File

@ -465,7 +465,7 @@ mod test_map {
assert!(!called);
called = true;
assert_eq!(k, 1);
assert_eq!(v, ~2);
assert_eq!(v, box 2);
}
assert!(called);
m.insert(2, box 1);

View File

@ -36,7 +36,7 @@ use std::ptr;
#[allow(missing_doc)]
#[deriving(Clone)]
pub struct TreeMap<K, V> {
root: Option<~TreeNode<K, V>>,
root: Option<Box<TreeNode<K, V>>>,
length: uint
}
@ -79,7 +79,7 @@ impl<K: TotalOrd, V> Mutable for TreeMap<K, V> {
impl<K: TotalOrd, V> Map<K, V> for TreeMap<K, V> {
fn find<'a>(&'a self, key: &K) -> Option<&'a V> {
let mut current: &'a Option<~TreeNode<K, V>> = &self.root;
let mut current: &'a Option<Box<TreeNode<K, V>>> = &self.root;
loop {
match *current {
Some(ref r) => {
@ -157,7 +157,7 @@ impl<K: TotalOrd, V> TreeMap<K, V> {
let TreeMap { root: root, length: length } = self;
let stk = match root {
None => vec!(),
Some(~tn) => vec!(tn)
Some(box tn) => vec!(tn)
};
MoveEntries {
stack: stk,
@ -317,7 +317,7 @@ macro_rules! define_iterator {
($name:ident,
$rev_name:ident,
// the function to go from &m Option<~TreeNode> to *m TreeNode
// the function to go from &m Option<Box<TreeNode>> to *m TreeNode
deref = $deref:ident,
// see comment on `addr!`, this is just an optional `mut`, but
@ -441,7 +441,7 @@ define_iterator! {
addr_mut = mut
}
fn deref<'a, K, V>(node: &'a Option<~TreeNode<K, V>>) -> *TreeNode<K, V> {
fn deref<'a, K, V>(node: &'a Option<Box<TreeNode<K, V>>>) -> *TreeNode<K, V> {
match *node {
Some(ref n) => {
let n: &TreeNode<K, V> = *n;
@ -451,7 +451,8 @@ fn deref<'a, K, V>(node: &'a Option<~TreeNode<K, V>>) -> *TreeNode<K, V> {
}
}
fn mut_deref<K, V>(x: &mut Option<~TreeNode<K, V>>) -> *mut TreeNode<K, V> {
fn mut_deref<K, V>(x: &mut Option<Box<TreeNode<K, V>>>)
-> *mut TreeNode<K, V> {
match *x {
Some(ref mut n) => {
let n: &mut TreeNode<K, V> = *n;
@ -482,7 +483,7 @@ impl<K, V> Iterator<(K, V)> for MoveEntries<K,V> {
} = self.stack.pop().unwrap();
match left {
Some(~left) => {
Some(box left) => {
let n = TreeNode {
key: key,
value: value,
@ -495,7 +496,7 @@ impl<K, V> Iterator<(K, V)> for MoveEntries<K,V> {
}
None => {
match right {
Some(~right) => self.stack.push(right),
Some(box right) => self.stack.push(right),
None => ()
}
self.remaining -= 1;
@ -759,8 +760,8 @@ impl<'a, T: TotalOrd> Iterator<&'a T> for UnionItems<'a, T> {
struct TreeNode<K, V> {
key: K,
value: V,
left: Option<~TreeNode<K, V>>,
right: Option<~TreeNode<K, V>>,
left: Option<Box<TreeNode<K, V>>>,
right: Option<Box<TreeNode<K, V>>>,
level: uint
}
@ -773,7 +774,7 @@ impl<K: TotalOrd, V> TreeNode<K, V> {
}
// Remove left horizontal link by rotating right
fn skew<K: TotalOrd, V>(node: &mut ~TreeNode<K, V>) {
fn skew<K: TotalOrd, V>(node: &mut Box<TreeNode<K, V>>) {
if node.left.as_ref().map_or(false, |x| x.level == node.level) {
let mut save = node.left.take_unwrap();
swap(&mut node.left, &mut save.right); // save.right now None
@ -784,7 +785,7 @@ fn skew<K: TotalOrd, V>(node: &mut ~TreeNode<K, V>) {
// Remove dual horizontal link by rotating left and increasing level of
// the parent
fn split<K: TotalOrd, V>(node: &mut ~TreeNode<K, V>) {
fn split<K: TotalOrd, V>(node: &mut Box<TreeNode<K, V>>) {
if node.right.as_ref().map_or(false,
|x| x.right.as_ref().map_or(false, |y| y.level == node.level)) {
let mut save = node.right.take_unwrap();
@ -795,7 +796,7 @@ fn split<K: TotalOrd, V>(node: &mut ~TreeNode<K, V>) {
}
}
fn find_mut<'r, K: TotalOrd, V>(node: &'r mut Option<~TreeNode<K, V>>,
fn find_mut<'r, K: TotalOrd, V>(node: &'r mut Option<Box<TreeNode<K, V>>>,
key: &K)
-> Option<&'r mut V> {
match *node {
@ -810,7 +811,7 @@ fn find_mut<'r, K: TotalOrd, V>(node: &'r mut Option<~TreeNode<K, V>>,
}
}
fn insert<K: TotalOrd, V>(node: &mut Option<~TreeNode<K, V>>,
fn insert<K: TotalOrd, V>(node: &mut Option<Box<TreeNode<K, V>>>,
key: K, value: V) -> Option<V> {
match *node {
Some(ref mut save) => {
@ -840,10 +841,10 @@ fn insert<K: TotalOrd, V>(node: &mut Option<~TreeNode<K, V>>,
}
}
fn remove<K: TotalOrd, V>(node: &mut Option<~TreeNode<K, V>>,
fn remove<K: TotalOrd, V>(node: &mut Option<Box<TreeNode<K, V>>>,
key: &K) -> Option<V> {
fn heir_swap<K: TotalOrd, V>(node: &mut ~TreeNode<K, V>,
child: &mut Option<~TreeNode<K, V>>) {
fn heir_swap<K: TotalOrd, V>(node: &mut Box<TreeNode<K, V>>,
child: &mut Option<Box<TreeNode<K, V>>>) {
// *could* be done without recursion, but it won't borrow check
for x in child.mut_iter() {
if x.right.is_some() {
@ -877,13 +878,13 @@ fn remove<K: TotalOrd, V>(node: &mut Option<~TreeNode<K, V>>,
(remove(&mut save.left, key), true)
} else {
let new = save.left.take_unwrap();
let ~TreeNode{value, ..} = replace(save, new);
let box TreeNode{value, ..} = replace(save, new);
*save = save.left.take_unwrap();
(Some(value), true)
}
} else if save.right.is_some() {
let new = save.right.take_unwrap();
let ~TreeNode{value, ..} = replace(save, new);
let box TreeNode{value, ..} = replace(save, new);
(Some(value), true)
} else {
(None, false)
@ -919,7 +920,7 @@ fn remove<K: TotalOrd, V>(node: &mut Option<~TreeNode<K, V>>,
}
}
return match node.take() {
Some(~TreeNode{value, ..}) => Some(value), None => fail!()
Some(box TreeNode{value, ..}) => Some(value), None => fail!()
};
}
@ -959,7 +960,6 @@ impl<T: TotalOrd> Extendable<T> for TreeSet<T> {
#[cfg(test)]
mod test_treemap {
use super::{TreeMap, TreeNode};
use rand::Rng;
@ -1053,8 +1053,8 @@ mod test_treemap {
}
}
fn check_left<K: TotalOrd, V>(node: &Option<~TreeNode<K, V>>,
parent: &~TreeNode<K, V>) {
fn check_left<K: TotalOrd, V>(node: &Option<Box<TreeNode<K, V>>>,
parent: &Box<TreeNode<K, V>>) {
match *node {
Some(ref r) => {
assert_eq!(r.key.cmp(&parent.key), Less);
@ -1066,8 +1066,8 @@ mod test_treemap {
}
}
fn check_right<K: TotalOrd, V>(node: &Option<~TreeNode<K, V>>,
parent: &~TreeNode<K, V>,
fn check_right<K: TotalOrd, V>(node: &Option<Box<TreeNode<K, V>>>,
parent: &Box<TreeNode<K, V>>,
parent_red: bool) {
match *node {
Some(ref r) => {

View File

@ -10,11 +10,11 @@
//! Ordered containers with integer keys, implemented as radix tries (`TrieSet` and `TrieMap` types)
use std::mem;
use std::uint;
use std::mem::init;
use std::slice;
use std::mem;
use std::slice::{Items, MutItems};
use std::slice;
use std::uint;
// FIXME: #5244: need to manually update the TrieNode constructor
static SHIFT: uint = 4;
@ -23,7 +23,7 @@ static MASK: uint = SIZE - 1;
static NUM_CHUNKS: uint = uint::BITS / SHIFT;
enum Child<T> {
Internal(~TrieNode<T>),
Internal(Box<TrieNode<T>>),
External(uint, T),
Nothing
}

View File

@ -66,14 +66,15 @@ use syntax::parse::token::InternedString;
#[macro_registrar]
pub fn macro_registrar(register: |Name, SyntaxExtension|) {
register(token::intern("fourcc"),
NormalTT(~BasicMacroExpander {
NormalTT(box BasicMacroExpander {
expander: expand_syntax_ext,
span: None,
},
None));
}
pub fn expand_syntax_ext(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree]) -> ~base::MacResult {
pub fn expand_syntax_ext(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult> {
let (expr, endian) = parse_tts(cx, tts);
let little = match endian {

View File

@ -18,7 +18,7 @@ use std::slice;
// Note 2: Once Dynamically Sized Types (DST) lands, it might be
// reasonable to replace this with something like `enum MaybeOwned<'a,
// Sized? U>{ Owned(~U), Borrowed(&'a U) }`; and then `U` could be
// Sized? U>{ Owned(Box<U>), Borrowed(&'a U) }`; and then `U` could be
// instantiated with `[T]` or `str`, etc. Of course, that would imply
// removing the `Growable` variant, which relates to note 1 above.
// Alternatively, we might add `MaybeOwned` for the general case but

View File

@ -17,19 +17,19 @@
use std::cast;
use std::mem::replace;
use std::rt::rtio::{EventLoop, IoFactory, RemoteCallback, PausableIdleCallback,
Callback};
use std::rt::rtio::{EventLoop, IoFactory, RemoteCallback};
use std::rt::rtio::{PausableIdleCallback, Callback};
use std::unstable::sync::Exclusive;
/// This is the only exported function from this module.
pub fn event_loop() -> ~EventLoop:Send {
box BasicLoop::new() as ~EventLoop:Send
pub fn event_loop() -> Box<EventLoop:Send> {
box BasicLoop::new() as Box<EventLoop:Send>
}
struct BasicLoop {
work: Vec<proc():Send>, // pending work
idle: Option<*mut BasicPausable>, // only one is allowed
remotes: Vec<(uint, ~Callback:Send)>,
remotes: Vec<(uint, Box<Callback:Send>)>,
next_remote: uint,
messages: Exclusive<Vec<Message>>,
}
@ -140,23 +140,24 @@ impl EventLoop for BasicLoop {
}
// FIXME: Seems like a really weird requirement to have an event loop provide.
fn pausable_idle_callback(&mut self, cb: ~Callback:Send)
-> ~PausableIdleCallback:Send
{
fn pausable_idle_callback(&mut self, cb: Box<Callback:Send>)
-> Box<PausableIdleCallback:Send> {
let callback = box BasicPausable::new(self, cb);
rtassert!(self.idle.is_none());
unsafe {
let cb_ptr: &*mut BasicPausable = cast::transmute(&callback);
self.idle = Some(*cb_ptr);
}
callback as ~PausableIdleCallback:Send
callback as Box<PausableIdleCallback:Send>
}
fn remote_callback(&mut self, f: ~Callback:Send) -> ~RemoteCallback:Send {
fn remote_callback(&mut self, f: Box<Callback:Send>)
-> Box<RemoteCallback:Send> {
let id = self.next_remote;
self.next_remote += 1;
self.remotes.push((id, f));
box BasicRemote::new(self.messages.clone(), id) as ~RemoteCallback:Send
box BasicRemote::new(self.messages.clone(), id) as
Box<RemoteCallback:Send>
}
fn io<'a>(&'a mut self) -> Option<&'a mut IoFactory> { None }
@ -197,12 +198,12 @@ impl Drop for BasicRemote {
struct BasicPausable {
eloop: *mut BasicLoop,
work: ~Callback:Send,
work: Box<Callback:Send>,
active: bool,
}
impl BasicPausable {
fn new(eloop: &mut BasicLoop, cb: ~Callback:Send) -> BasicPausable {
fn new(eloop: &mut BasicLoop, cb: Box<Callback:Send>) -> BasicPausable {
BasicPausable {
active: false,
work: cb,

View File

@ -8,21 +8,21 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use stack::Stack;
use std::uint;
use std::cast::{transmute, transmute_mut_unsafe};
use stack::Stack;
use std::rt::stack;
use std::raw;
// FIXME #7761: Registers is boxed so that it is 16-byte aligned, for storing
// SSE regs. It would be marginally better not to do this. In C++ we
// use an attribute on a struct.
// FIXME #7761: It would be nice to define regs as `~Option<Registers>` since
// the registers are sometimes empty, but the discriminant would
// FIXME #7761: It would be nice to define regs as `Box<Option<Registers>>`
// since the registers are sometimes empty, but the discriminant would
// then misalign the regs again.
pub struct Context {
/// Hold the registers while the task or scheduler is suspended
regs: ~Registers,
regs: Box<Registers>,
/// Lower bound and upper bound for the stack
stack_bounds: Option<(uint, uint)>,
}
@ -87,10 +87,10 @@ impl Context {
pub fn swap(out_context: &mut Context, in_context: &Context) {
rtdebug!("swapping contexts");
let out_regs: &mut Registers = match out_context {
&Context { regs: ~ref mut r, .. } => r
&Context { regs: box ref mut r, .. } => r
};
let in_regs: &Registers = match in_context {
&Context { regs: ~ref r, .. } => r
&Context { regs: box ref r, .. } => r
};
rtdebug!("noting the stack limit and doing raw swap");
@ -151,7 +151,7 @@ struct Registers {
}
#[cfg(target_arch = "x86")]
fn new_regs() -> ~Registers {
fn new_regs() -> Box<Registers> {
box Registers {
eax: 0, ebx: 0, ecx: 0, edx: 0,
ebp: 0, esi: 0, edi: 0, esp: 0,
@ -190,9 +190,9 @@ type Registers = [uint, ..34];
type Registers = [uint, ..22];
#[cfg(windows, target_arch = "x86_64")]
fn new_regs() -> ~Registers { box [0, .. 34] }
fn new_regs() -> Box<Registers> { box [0, .. 34] }
#[cfg(not(windows), target_arch = "x86_64")]
fn new_regs() -> ~Registers { box {let v = [0, .. 22]; v} }
fn new_regs() -> Box<Registers> { box {let v = [0, .. 22]; v} }
#[cfg(target_arch = "x86_64")]
fn initialize_call_frame(regs: &mut Registers, fptr: InitFn, arg: uint,
@ -241,7 +241,7 @@ fn initialize_call_frame(regs: &mut Registers, fptr: InitFn, arg: uint,
type Registers = [uint, ..32];
#[cfg(target_arch = "arm")]
fn new_regs() -> ~Registers { box {[0, .. 32]} }
fn new_regs() -> Box<Registers> { box {[0, .. 32]} }
#[cfg(target_arch = "arm")]
fn initialize_call_frame(regs: &mut Registers, fptr: InitFn, arg: uint,
@ -270,7 +270,7 @@ fn initialize_call_frame(regs: &mut Registers, fptr: InitFn, arg: uint,
type Registers = [uint, ..32];
#[cfg(target_arch = "mips")]
fn new_regs() -> ~Registers { box [0, .. 32] }
fn new_regs() -> Box<Registers> { box [0, .. 32] }
#[cfg(target_arch = "mips")]
fn initialize_call_frame(regs: &mut Registers, fptr: InitFn, arg: uint,

View File

@ -288,7 +288,7 @@ macro_rules! green_start( ($f:ident) => (
/// The return value is used as the process return code. 0 on success, 101 on
/// error.
pub fn start(argc: int, argv: **u8,
event_loop_factory: fn() -> ~rtio::EventLoop:Send,
event_loop_factory: fn() -> Box<rtio::EventLoop:Send>,
main: proc():Send) -> int {
rt::init(argc, argv);
let mut main = Some(main);
@ -309,7 +309,7 @@ pub fn start(argc: int, argv: **u8,
///
/// This function will not return until all schedulers in the associated pool
/// have returned.
pub fn run(event_loop_factory: fn() -> ~rtio::EventLoop:Send,
pub fn run(event_loop_factory: fn() -> Box<rtio::EventLoop:Send>,
main: proc():Send) -> int {
// Create a scheduler pool and spawn the main task into this pool. We will
// get notified over a channel when the main task exits.
@ -340,7 +340,7 @@ pub struct PoolConfig {
pub threads: uint,
/// A factory function used to create new event loops. If this is not
/// specified then the default event loop factory is used.
pub event_loop_factory: fn() -> ~rtio::EventLoop:Send,
pub event_loop_factory: fn() -> Box<rtio::EventLoop:Send>,
}
impl PoolConfig {
@ -360,12 +360,12 @@ pub struct SchedPool {
id: uint,
threads: Vec<Thread<()>>,
handles: Vec<SchedHandle>,
stealers: Vec<deque::Stealer<~task::GreenTask>>,
stealers: Vec<deque::Stealer<Box<task::GreenTask>>>,
next_friend: uint,
stack_pool: StackPool,
deque_pool: deque::BufferPool<~task::GreenTask>,
deque_pool: deque::BufferPool<Box<task::GreenTask>>,
sleepers: SleeperList,
factory: fn() -> ~rtio::EventLoop:Send,
factory: fn() -> Box<rtio::EventLoop:Send>,
task_state: TaskState,
tasks_done: Receiver<()>,
}
@ -445,7 +445,7 @@ impl SchedPool {
/// This is useful to create a task which can then be sent to a specific
/// scheduler created by `spawn_sched` (and possibly pin it to that
/// scheduler).
pub fn task(&mut self, opts: TaskOpts, f: proc():Send) -> ~GreenTask {
pub fn task(&mut self, opts: TaskOpts, f: proc():Send) -> Box<GreenTask> {
GreenTask::configure(&mut self.stack_pool, opts, f)
}

View File

@ -46,10 +46,10 @@ pub struct Scheduler {
/// inside this pool of schedulers
pub task_state: TaskState,
/// There are N work queues, one per scheduler.
work_queue: deque::Worker<~GreenTask>,
work_queue: deque::Worker<Box<GreenTask>>,
/// Work queues for the other schedulers. These are created by
/// cloning the core work queues.
work_queues: Vec<deque::Stealer<~GreenTask>>,
work_queues: Vec<deque::Stealer<Box<GreenTask>>>,
/// The queue of incoming messages from other schedulers.
/// These are enqueued by SchedHandles after which a remote callback
/// is triggered to handle the message.
@ -71,7 +71,7 @@ pub struct Scheduler {
no_sleep: bool,
/// The scheduler runs on a special task. When it is not running
/// it is stored here instead of the work queue.
sched_task: Option<~GreenTask>,
sched_task: Option<Box<GreenTask>>,
/// An action performed after a context switch on behalf of the
/// code running before the context switch
cleanup_job: Option<CleanupJob>,
@ -83,7 +83,7 @@ pub struct Scheduler {
/// A fast XorShift rng for scheduler use
rng: XorShiftRng,
/// A togglable idle callback
idle_callback: Option<~PausableIdleCallback:Send>,
idle_callback: Option<Box<PausableIdleCallback:Send>>,
/// A countdown that starts at a random value and is decremented
/// every time a yield check is performed. When it hits 0 a task
/// will yield.
@ -100,7 +100,7 @@ pub struct Scheduler {
// destroyed before it's actually destroyed.
/// The event loop used to drive the scheduler and perform I/O
pub event_loop: ~EventLoop:Send,
pub event_loop: Box<EventLoop:Send>,
}
/// An indication of how hard to work on a given operation, the difference
@ -123,9 +123,9 @@ impl Scheduler {
// * Initialization Functions
pub fn new(pool_id: uint,
event_loop: ~EventLoop:Send,
work_queue: deque::Worker<~GreenTask>,
work_queues: Vec<deque::Stealer<~GreenTask>>,
event_loop: Box<EventLoop:Send>,
work_queue: deque::Worker<Box<GreenTask>>,
work_queues: Vec<deque::Stealer<Box<GreenTask>>>,
sleeper_list: SleeperList,
state: TaskState)
-> Scheduler {
@ -136,9 +136,9 @@ impl Scheduler {
}
pub fn new_special(pool_id: uint,
event_loop: ~EventLoop:Send,
work_queue: deque::Worker<~GreenTask>,
work_queues: Vec<deque::Stealer<~GreenTask>>,
event_loop: Box<EventLoop:Send>,
work_queue: deque::Worker<Box<GreenTask>>,
work_queues: Vec<deque::Stealer<Box<GreenTask>>>,
sleeper_list: SleeperList,
run_anything: bool,
friend: Option<SchedHandle>,
@ -183,7 +183,7 @@ impl Scheduler {
pub fn bootstrap(mut ~self) {
// Build an Idle callback.
let cb = box SchedRunner as ~Callback:Send;
let cb = box SchedRunner as Box<Callback:Send>;
self.idle_callback = Some(self.event_loop.pausable_idle_callback(cb));
// Create a task for the scheduler with an empty context.
@ -224,14 +224,14 @@ impl Scheduler {
// This does not return a scheduler, as the scheduler is placed
// inside the task.
pub fn run(mut ~self, stask: ~GreenTask) -> ~GreenTask {
pub fn run(mut ~self, stask: Box<GreenTask>) -> Box<GreenTask> {
// This is unsafe because we need to place the scheduler, with
// the event_loop inside, inside our task. But we still need a
// mutable reference to the event_loop to give it the "run"
// command.
unsafe {
let event_loop: *mut ~EventLoop:Send = &mut self.event_loop;
let event_loop: *mut Box<EventLoop:Send> = &mut self.event_loop;
// Our scheduler must be in the task before the event loop
// is started.
stask.put_with_sched(self);
@ -271,7 +271,7 @@ impl Scheduler {
// If we try really hard to do some work, but no work is available to be
// done, then we fall back to epoll() to block this thread waiting for more
// work (instead of busy waiting).
fn run_sched_once(mut ~self, stask: ~GreenTask) {
fn run_sched_once(mut ~self, stask: Box<GreenTask>) {
// Make sure that we're not lying in that the `stask` argument is indeed
// the scheduler task for this scheduler.
assert!(self.sched_task.is_none());
@ -349,9 +349,9 @@ impl Scheduler {
// returns the still-available scheduler. At this point all
// message-handling will count as a turn of work, and as a result
// return None.
fn interpret_message_queue(mut ~self, stask: ~GreenTask,
fn interpret_message_queue(mut ~self, stask: Box<GreenTask>,
effort: EffortLevel)
-> (~Scheduler, ~GreenTask, bool)
-> (Box<Scheduler>, Box<GreenTask>, bool)
{
let msg = if effort == DontTryTooHard {
@ -432,8 +432,8 @@ impl Scheduler {
}
}
fn do_work(mut ~self,
stask: ~GreenTask) -> (~Scheduler, ~GreenTask, bool) {
fn do_work(mut ~self, stask: Box<GreenTask>)
-> (Box<Scheduler>, Box<GreenTask>, bool) {
rtdebug!("scheduler calling do work");
match self.find_work() {
Some(task) => {
@ -459,7 +459,7 @@ impl Scheduler {
// First step in the process is to find a task. This function does
// that by first checking the local queue, and if there is no work
// there, trying to steal from the remote work queues.
fn find_work(&mut self) -> Option<~GreenTask> {
fn find_work(&mut self) -> Option<Box<GreenTask>> {
rtdebug!("scheduler looking for work");
if !self.steal_for_yield {
match self.work_queue.pop() {
@ -497,7 +497,7 @@ impl Scheduler {
// Try stealing from all queues the scheduler knows about. This
// naive implementation can steal from our own queue or from other
// special schedulers.
fn try_steals(&mut self) -> Option<~GreenTask> {
fn try_steals(&mut self) -> Option<Box<GreenTask>> {
let work_queues = &mut self.work_queues;
let len = work_queues.len();
let start_index = self.rng.gen_range(0, len);
@ -517,9 +517,11 @@ impl Scheduler {
// * Task Routing Functions - Make sure tasks send up in the right
// place.
fn process_task(mut ~self, cur: ~GreenTask,
mut next: ~GreenTask,
schedule_fn: SchedulingFn) -> (~Scheduler, ~GreenTask) {
fn process_task(mut ~self,
cur: Box<GreenTask>,
mut next: Box<GreenTask>,
schedule_fn: SchedulingFn)
-> (Box<Scheduler>, Box<GreenTask>) {
rtdebug!("processing a task");
match next.take_unwrap_home() {
@ -549,7 +551,7 @@ impl Scheduler {
}
}
fn send_task_home(task: ~GreenTask) {
fn send_task_home(task: Box<GreenTask>) {
let mut task = task;
match task.take_unwrap_home() {
HomeSched(mut home_handle) => home_handle.send(PinnedTask(task)),
@ -559,7 +561,7 @@ impl Scheduler {
/// Take a non-homed task we aren't allowed to run here and send
/// it to the designated friend scheduler to execute.
fn send_to_friend(&mut self, task: ~GreenTask) {
fn send_to_friend(&mut self, task: Box<GreenTask>) {
rtdebug!("sending a task to friend");
match self.friend_handle {
Some(ref mut handle) => {
@ -576,7 +578,7 @@ impl Scheduler {
/// Pushes the task onto the work stealing queue and tells the
/// event loop to run it later. Always use this instead of pushing
/// to the work queue directly.
pub fn enqueue_task(&mut self, task: ~GreenTask) {
pub fn enqueue_task(&mut self, task: Box<GreenTask>) {
// We push the task onto our local queue clone.
assert!(!task.is_sched());
@ -609,9 +611,10 @@ impl Scheduler {
// old task as inputs.
pub fn change_task_context(mut ~self,
current_task: ~GreenTask,
mut next_task: ~GreenTask,
f: |&mut Scheduler, ~GreenTask|) -> ~GreenTask {
current_task: Box<GreenTask>,
mut next_task: Box<GreenTask>,
f: |&mut Scheduler, Box<GreenTask>|)
-> Box<GreenTask> {
let f_opaque = ClosureConverter::from_fn(f);
let current_task_dupe = &*current_task as *GreenTask;
@ -655,7 +658,7 @@ impl Scheduler {
// When the context swaps back to this task we immediately
// run the cleanup job, as expected by the previously called
// swap_contexts function.
let mut current_task: ~GreenTask = unsafe {
let mut current_task: Box<GreenTask> = unsafe {
cast::transmute(current_task_dupe)
};
current_task.sched.get_mut_ref().run_cleanup_job();
@ -688,8 +691,10 @@ impl Scheduler {
// * Context Swapping Helpers - Here be ugliness!
pub fn resume_task_immediately(~self, cur: ~GreenTask,
next: ~GreenTask) -> (~Scheduler, ~GreenTask) {
pub fn resume_task_immediately(~self,
cur: Box<GreenTask>,
next: Box<GreenTask>)
-> (Box<Scheduler>, Box<GreenTask>) {
assert!(cur.is_sched());
let mut cur = self.change_task_context(cur, next, |sched, stask| {
assert!(sched.sched_task.is_none());
@ -698,9 +703,10 @@ impl Scheduler {
(cur.sched.take_unwrap(), cur)
}
fn resume_task_immediately_cl(sched: ~Scheduler,
cur: ~GreenTask,
next: ~GreenTask) -> (~Scheduler, ~GreenTask) {
fn resume_task_immediately_cl(sched: Box<Scheduler>,
cur: Box<GreenTask>,
next: Box<GreenTask>)
-> (Box<Scheduler>, Box<GreenTask>) {
sched.resume_task_immediately(cur, next)
}
@ -726,7 +732,7 @@ impl Scheduler {
/// guaranteed that this function will not return before the given closure
/// has returned.
pub fn deschedule_running_task_and_then(mut ~self,
cur: ~GreenTask,
cur: Box<GreenTask>,
f: |&mut Scheduler, BlockedTask|) {
// Trickier - we need to get the scheduler task out of self
// and use it as the destination.
@ -736,8 +742,8 @@ impl Scheduler {
}
pub fn switch_running_tasks_and_then(~self,
cur: ~GreenTask,
next: ~GreenTask,
cur: Box<GreenTask>,
next: Box<GreenTask>,
f: |&mut Scheduler, BlockedTask|) {
// And here comes one of the sad moments in which a lock is used in a
// core portion of the rust runtime. As always, this is highly
@ -768,8 +774,10 @@ impl Scheduler {
cur.put();
}
fn switch_task(sched: ~Scheduler, cur: ~GreenTask,
next: ~GreenTask) -> (~Scheduler, ~GreenTask) {
fn switch_task(sched: Box<Scheduler>,
cur: Box<GreenTask>,
next: Box<GreenTask>)
-> (Box<Scheduler>, Box<GreenTask>) {
let mut cur = sched.change_task_context(cur, next, |sched, last_task| {
if last_task.is_sched() {
assert!(sched.sched_task.is_none());
@ -785,7 +793,7 @@ impl Scheduler {
/// Called by a running task to end execution, after which it will
/// be recycled by the scheduler for reuse in a new task.
pub fn terminate_current_task(mut ~self, cur: ~GreenTask) -> ! {
pub fn terminate_current_task(mut ~self, cur: Box<GreenTask>) -> ! {
// Similar to deschedule running task and then, but cannot go through
// the task-blocking path. The task is already dying.
let stask = self.sched_task.take_unwrap();
@ -797,13 +805,13 @@ impl Scheduler {
fail!("should never return!");
}
pub fn run_task(~self, cur: ~GreenTask, next: ~GreenTask) {
pub fn run_task(~self, cur: Box<GreenTask>, next: Box<GreenTask>) {
let (sched, task) =
self.process_task(cur, next, Scheduler::switch_task);
task.put_with_sched(sched);
}
pub fn run_task_later(mut cur: ~GreenTask, next: ~GreenTask) {
pub fn run_task_later(mut cur: Box<GreenTask>, next: Box<GreenTask>) {
let mut sched = cur.sched.take_unwrap();
sched.enqueue_task(next);
cur.put_with_sched(sched);
@ -813,7 +821,7 @@ impl Scheduler {
/// to introduce some amount of randomness to the scheduler. Currently the
/// randomness is a result of performing a round of work stealing (which
/// may end up stealing from the current scheduler).
pub fn yield_now(mut ~self, cur: ~GreenTask) {
pub fn yield_now(mut ~self, cur: Box<GreenTask>) {
// Async handles trigger the scheduler by calling yield_now on the local
// task, which eventually gets us to here. See comments in SchedRunner
// for more info on this.
@ -832,7 +840,7 @@ impl Scheduler {
}
}
pub fn maybe_yield(mut ~self, cur: ~GreenTask) {
pub fn maybe_yield(mut ~self, cur: Box<GreenTask>) {
// It's possible for sched tasks to possibly call this function, and it
// just means that they're likely sending on channels (which
// occasionally call this function). Sched tasks follow different paths
@ -881,20 +889,20 @@ impl Scheduler {
// Supporting types
type SchedulingFn = fn (~Scheduler, ~GreenTask, ~GreenTask)
-> (~Scheduler, ~GreenTask);
type SchedulingFn = fn(Box<Scheduler>, Box<GreenTask>, Box<GreenTask>)
-> (Box<Scheduler>, Box<GreenTask>);
pub enum SchedMessage {
Wake,
Shutdown,
NewNeighbor(deque::Stealer<~GreenTask>),
PinnedTask(~GreenTask),
TaskFromFriend(~GreenTask),
RunOnce(~GreenTask),
NewNeighbor(deque::Stealer<Box<GreenTask>>),
PinnedTask(Box<GreenTask>),
TaskFromFriend(Box<GreenTask>),
RunOnce(Box<GreenTask>),
}
pub struct SchedHandle {
remote: ~RemoteCallback:Send,
remote: Box<RemoteCallback:Send>,
queue: msgq::Producer<SchedMessage>,
pub sched_id: uint
}
@ -920,18 +928,18 @@ impl Callback for SchedRunner {
// This function could be converted to `GreenTask::convert` if
// absolutely necessary, but for cleanliness it is much better to not
// use the conversion function.
let task: ~Task = Local::take();
let task: Box<Task> = Local::take();
task.yield_now();
}
}
struct CleanupJob {
task: ~GreenTask,
task: Box<GreenTask>,
f: UnsafeTaskReceiver
}
impl CleanupJob {
pub fn new(task: ~GreenTask, f: UnsafeTaskReceiver) -> CleanupJob {
pub fn new(task: Box<GreenTask>, f: UnsafeTaskReceiver) -> CleanupJob {
CleanupJob {
task: task,
f: f
@ -948,14 +956,14 @@ impl CleanupJob {
// complaining
type UnsafeTaskReceiver = raw::Closure;
trait ClosureConverter {
fn from_fn(|&mut Scheduler, ~GreenTask|) -> Self;
fn to_fn(self) -> |&mut Scheduler, ~GreenTask|;
fn from_fn(|&mut Scheduler, Box<GreenTask>|) -> Self;
fn to_fn(self) -> |&mut Scheduler, Box<GreenTask>|;
}
impl ClosureConverter for UnsafeTaskReceiver {
fn from_fn(f: |&mut Scheduler, ~GreenTask|) -> UnsafeTaskReceiver {
fn from_fn(f: |&mut Scheduler, Box<GreenTask>|) -> UnsafeTaskReceiver {
unsafe { cast::transmute(f) }
}
fn to_fn(self) -> |&mut Scheduler, ~GreenTask| {
fn to_fn(self) -> |&mut Scheduler, Box<GreenTask>| {
unsafe { cast::transmute(self) }
}
}
@ -1218,7 +1226,7 @@ mod test {
// Signal from the special task that we are done.
let (tx, rx) = channel::<()>();
fn run(next: ~GreenTask) {
fn run(next: Box<GreenTask>) {
let mut task = GreenTask::convert(Local::take());
let sched = task.sched.take_unwrap();
sched.run_task(task, next)

View File

@ -28,7 +28,7 @@ struct SimpleTask {
impl Runtime for SimpleTask {
// Implement the simple tasks of descheduling and rescheduling, but only in
// a simple number of cases.
fn deschedule(mut ~self, times: uint, mut cur_task: ~Task,
fn deschedule(mut ~self, times: uint, mut cur_task: Box<Task>,
f: |BlockedTask| -> Result<(), BlockedTask>) {
assert!(times == 1);
@ -55,7 +55,7 @@ impl Runtime for SimpleTask {
}
Local::put(cur_task);
}
fn reawaken(mut ~self, mut to_wake: ~Task) {
fn reawaken(mut ~self, mut to_wake: Box<Task>) {
let me = &mut *self as *mut SimpleTask;
to_wake.put_runtime(self);
unsafe {
@ -70,18 +70,21 @@ impl Runtime for SimpleTask {
// purpose. A "simple task" is just that, a very simple task that can't
// really do a whole lot. The only purpose of the task is to get us off our
// feet and running.
fn yield_now(~self, _cur_task: ~Task) { fail!() }
fn maybe_yield(~self, _cur_task: ~Task) { fail!() }
fn spawn_sibling(~self, _cur_task: ~Task, _opts: TaskOpts, _f: proc():Send) {
fn yield_now(~self, _cur_task: Box<Task>) { fail!() }
fn maybe_yield(~self, _cur_task: Box<Task>) { fail!() }
fn spawn_sibling(~self,
_cur_task: Box<Task>,
_opts: TaskOpts,
_f: proc():Send) {
fail!()
}
fn local_io<'a>(&'a mut self) -> Option<rtio::LocalIo<'a>> { None }
fn stack_bounds(&self) -> (uint, uint) { fail!() }
fn can_block(&self) -> bool { true }
fn wrap(~self) -> ~Any { fail!() }
fn wrap(~self) -> Box<Any> { fail!() }
}
pub fn task() -> ~Task {
pub fn task() -> Box<Task> {
let mut task = box Task::new();
task.put_runtime(box SimpleTask {
lock: unsafe {NativeMutex::new()},

View File

@ -50,12 +50,12 @@ pub struct GreenTask {
/// Slot for maintaining ownership of a scheduler. If a task is running,
/// this value will be Some(sched) where the task is running on "sched".
pub sched: Option<~Scheduler>,
pub sched: Option<Box<Scheduler>>,
/// Temporary ownership slot of a std::rt::task::Task object. This is used
/// to squirrel that libstd task away while we're performing green task
/// operations.
pub task: Option<~Task>,
pub task: Option<Box<Task>>,
/// Dictates whether this is a sched task or a normal green task
pub task_type: TaskType,
@ -85,8 +85,8 @@ pub enum Home {
/// for all green tasks. This code is actually called after the initial context
/// switch onto a green thread.
///
/// The first argument to this function is the `~GreenTask` pointer, and the
/// next two arguments are the user-provided procedure for running code.
/// The first argument to this function is the `Box<GreenTask>` pointer, and
/// the next two arguments are the user-provided procedure for running code.
///
/// The goal for having this weird-looking function is to reduce the number of
/// allocations done on a green-task startup as much as possible.
@ -96,8 +96,8 @@ extern fn bootstrap_green_task(task: uint, code: *(), env: *()) -> ! {
cast::transmute(raw::Procedure { code: code, env: env })
};
// Acquire ownership of the `~GreenTask`
let mut task: ~GreenTask = unsafe { cast::transmute(task) };
// Acquire ownership of the `Box<GreenTask>`
let mut task: Box<GreenTask> = unsafe { cast::transmute(task) };
// First code after swap to this new context. Run our cleanup job
task.pool_id = {
@ -129,7 +129,7 @@ impl GreenTask {
/// and will not have any contained Task structure.
pub fn new(stack_pool: &mut StackPool,
stack_size: Option<uint>,
start: proc():Send) -> ~GreenTask {
start: proc():Send) -> Box<GreenTask> {
GreenTask::new_homed(stack_pool, stack_size, AnySched, start)
}
@ -137,7 +137,7 @@ impl GreenTask {
pub fn new_homed(stack_pool: &mut StackPool,
stack_size: Option<uint>,
home: Home,
start: proc():Send) -> ~GreenTask {
start: proc():Send) -> Box<GreenTask> {
// Allocate ourselves a GreenTask structure
let mut ops = GreenTask::new_typed(None, TypeGreen(Some(home)));
@ -158,7 +158,7 @@ impl GreenTask {
/// Creates a new green task with the specified coroutine and type, this is
/// useful when creating scheduler tasks.
pub fn new_typed(coroutine: Option<Coroutine>,
task_type: TaskType) -> ~GreenTask {
task_type: TaskType) -> Box<GreenTask> {
box GreenTask {
pool_id: 0,
coroutine: coroutine,
@ -175,7 +175,7 @@ impl GreenTask {
/// new stack for this task.
pub fn configure(pool: &mut StackPool,
opts: TaskOpts,
f: proc():Send) -> ~GreenTask {
f: proc():Send) -> Box<GreenTask> {
let TaskOpts {
notify_chan, name, stack_size,
stderr, stdout,
@ -204,7 +204,7 @@ impl GreenTask {
///
/// This function will assert that the task is indeed a green task before
/// returning (and will kill the entire process if this is wrong).
pub fn convert(mut task: ~Task) -> ~GreenTask {
pub fn convert(mut task: Box<Task>) -> Box<GreenTask> {
match task.maybe_take_runtime::<GreenTask>() {
Some(mut green) => {
green.put_task(task);
@ -270,22 +270,24 @@ impl GreenTask {
self as *GreenTask as uint
}
pub unsafe fn from_uint(val: uint) -> ~GreenTask { cast::transmute(val) }
pub unsafe fn from_uint(val: uint) -> Box<GreenTask> {
cast::transmute(val)
}
// Runtime glue functions and helpers
pub fn put_with_sched(mut ~self, sched: ~Scheduler) {
pub fn put_with_sched(mut ~self, sched: Box<Scheduler>) {
assert!(self.sched.is_none());
self.sched = Some(sched);
self.put();
}
pub fn put_task(&mut self, task: ~Task) {
pub fn put_task(&mut self, task: Box<Task>) {
assert!(self.task.is_none());
self.task = Some(task);
}
pub fn swap(mut ~self) -> ~Task {
pub fn swap(mut ~self) -> Box<Task> {
let mut task = self.task.take_unwrap();
task.put_runtime(self);
return task;
@ -331,19 +333,19 @@ impl GreenTask {
}
impl Runtime for GreenTask {
fn yield_now(mut ~self, cur_task: ~Task) {
fn yield_now(mut ~self, cur_task: Box<Task>) {
self.put_task(cur_task);
let sched = self.sched.take_unwrap();
sched.yield_now(self);
}
fn maybe_yield(mut ~self, cur_task: ~Task) {
fn maybe_yield(mut ~self, cur_task: Box<Task>) {
self.put_task(cur_task);
let sched = self.sched.take_unwrap();
sched.maybe_yield(self);
}
fn deschedule(mut ~self, times: uint, cur_task: ~Task,
fn deschedule(mut ~self, times: uint, cur_task: Box<Task>,
f: |BlockedTask| -> Result<(), BlockedTask>) {
self.put_task(cur_task);
let mut sched = self.sched.take_unwrap();
@ -392,14 +394,14 @@ impl Runtime for GreenTask {
}
}
fn reawaken(mut ~self, to_wake: ~Task) {
fn reawaken(mut ~self, to_wake: Box<Task>) {
self.put_task(to_wake);
assert!(self.sched.is_none());
// Optimistically look for a local task, but if one's not available to
// inspect (in order to see if it's in the same sched pool as we are),
// then just use our remote wakeup routine and carry on!
let mut running_task: ~Task = match Local::try_take() {
let mut running_task: Box<Task> = match Local::try_take() {
Some(task) => task,
None => return self.reawaken_remotely()
};
@ -443,7 +445,10 @@ impl Runtime for GreenTask {
}
}
fn spawn_sibling(mut ~self, cur_task: ~Task, opts: TaskOpts, f: proc():Send) {
fn spawn_sibling(mut ~self,
cur_task: Box<Task>,
opts: TaskOpts,
f: proc():Send) {
self.put_task(cur_task);
// Spawns a task into the current scheduler. We allocate the new task's
@ -477,7 +482,7 @@ impl Runtime for GreenTask {
fn can_block(&self) -> bool { false }
fn wrap(~self) -> ~Any { self as ~Any }
fn wrap(~self) -> Box<Any> { self as Box<Any> }
}
#[cfg(test)]
@ -572,7 +577,7 @@ mod tests {
let (tx, rx) = channel();
spawn_opts(TaskOpts::new(), proc() {
spawn(proc() {
let mut task: ~Task = Local::take();
let mut task: Box<Task> = Local::take();
match task.maybe_take_runtime::<GreenTask>() {
Some(ops) => {
task.put_runtime(ops);

View File

@ -61,7 +61,7 @@ use syntax::parse::token;
#[macro_registrar]
pub fn macro_registrar(register: |Name, SyntaxExtension|) {
register(token::intern("hexfloat"),
NormalTT(~BasicMacroExpander {
NormalTT(box BasicMacroExpander {
expander: expand_syntax_ext,
span: None,
},
@ -97,7 +97,8 @@ fn hex_float_lit_err(s: &str) -> Option<(uint, ~str)> {
}
}
pub fn expand_syntax_ext(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree]) -> ~base::MacResult {
pub fn expand_syntax_ext(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult> {
let (expr, ty_lit) = parse_tts(cx, tts);
let ty = match ty_lit {

View File

@ -158,7 +158,7 @@ pub static WARN: u32 = 2;
/// Error log level
pub static ERROR: u32 = 1;
local_data_key!(local_logger: ~Logger:Send)
local_data_key!(local_logger: Box<Logger:Send>)
/// A trait used to represent an interface to a task-local logger. Each task
/// can have its own custom logger which can respond to logging messages
@ -229,7 +229,7 @@ pub fn log(level: u32, loc: &'static LogLocation, args: &fmt::Arguments) {
// frob the slot while we're doing the logging. This will destroy any logger
// set during logging.
let mut logger = local_data::pop(local_logger).unwrap_or_else(|| {
box DefaultLogger { handle: io::stderr() } as ~Logger:Send
box DefaultLogger { handle: io::stderr() } as Box<Logger:Send>
});
logger.log(&LogRecord {
level: LogLevel(level),
@ -249,7 +249,7 @@ pub fn log_level() -> u32 { unsafe { LOG_LEVEL } }
/// Replaces the task-local logger with the specified logger, returning the old
/// logger.
pub fn set_logger(logger: ~Logger:Send) -> Option<~Logger:Send> {
pub fn set_logger(logger: Box<Logger:Send>) -> Option<Box<Logger:Send>> {
let prev = local_data::pop(local_logger);
local_data::set(local_logger, logger);
return prev;
@ -351,7 +351,7 @@ fn init() {
// Schedule the cleanup for this global for when the runtime exits.
rt::at_exit(proc() {
assert!(!DIRECTIVES.is_null());
let _directives: ~Vec<directive::LogDirective> =
let _directives: Box<Vec<directive::LogDirective>> =
cast::transmute(DIRECTIVES);
DIRECTIVES = 0 as *Vec<directive::LogDirective>;
});

View File

@ -10,12 +10,12 @@
//! Blocking posix-based file I/O
use libc::{c_int, c_void};
use libc;
use std::sync::arc::UnsafeArc;
use std::c_str::CString;
use std::io::IoError;
use std::io;
use libc::{c_int, c_void};
use libc;
use std::mem;
use std::rt::rtio;
@ -175,8 +175,8 @@ impl rtio::RtioPipe for FileDesc {
fn write(&mut self, buf: &[u8]) -> Result<(), IoError> {
self.inner_write(buf)
}
fn clone(&self) -> ~rtio::RtioPipe:Send {
box FileDesc { inner: self.inner.clone() } as ~rtio::RtioPipe:Send
fn clone(&self) -> Box<rtio::RtioPipe:Send> {
box FileDesc { inner: self.inner.clone() } as Box<rtio::RtioPipe:Send>
}
}

View File

@ -207,8 +207,8 @@ impl rtio::RtioPipe for FileDesc {
fn write(&mut self, buf: &[u8]) -> Result<(), IoError> {
self.inner_write(buf)
}
fn clone(&self) -> ~rtio::RtioPipe:Send {
box FileDesc { inner: self.inner.clone() } as ~rtio::RtioPipe:Send
fn clone(&self) -> Box<rtio::RtioPipe:Send> {
box FileDesc { inner: self.inner.clone() } as Box<rtio::RtioPipe:Send>
}
}

View File

@ -21,19 +21,19 @@
//! play. The only dependencies of these modules are the normal system libraries
//! that you would find on the respective platform.
use libc::c_int;
use libc;
use std::c_str::CString;
use std::io;
use std::io::IoError;
use std::io::net::ip::SocketAddr;
use std::io::process::ProcessConfig;
use std::io::signal::Signum;
use libc::c_int;
use libc;
use std::os;
use std::rt::rtio;
use std::rt::rtio::{RtioTcpStream, RtioTcpListener, RtioUdpSocket,
RtioUnixListener, RtioPipe, RtioFileStream, RtioProcess,
RtioSignal, RtioTTY, CloseBehavior, RtioTimer};
use std::rt::rtio::{RtioTcpStream, RtioTcpListener, RtioUdpSocket};
use std::rt::rtio::{RtioUnixListener, RtioPipe, RtioFileStream, RtioProcess};
use std::rt::rtio::{RtioSignal, RtioTTY, CloseBehavior, RtioTimer};
use ai = std::io::net::addrinfo;
// Local re-exports
@ -166,21 +166,32 @@ impl IoFactory {
impl rtio::IoFactory for IoFactory {
// networking
fn tcp_connect(&mut self, addr: SocketAddr,
timeout: Option<u64>) -> IoResult<~RtioTcpStream:Send> {
net::TcpStream::connect(addr, timeout).map(|s| box s as ~RtioTcpStream:Send)
timeout: Option<u64>) -> IoResult<Box<RtioTcpStream:Send>> {
net::TcpStream::connect(addr, timeout).map(|s| {
box s as Box<RtioTcpStream:Send>
})
}
fn tcp_bind(&mut self, addr: SocketAddr) -> IoResult<~RtioTcpListener:Send> {
net::TcpListener::bind(addr).map(|s| box s as ~RtioTcpListener:Send)
fn tcp_bind(&mut self, addr: SocketAddr)
-> IoResult<Box<RtioTcpListener:Send>> {
net::TcpListener::bind(addr).map(|s| {
box s as Box<RtioTcpListener:Send>
})
}
fn udp_bind(&mut self, addr: SocketAddr) -> IoResult<~RtioUdpSocket:Send> {
net::UdpSocket::bind(addr).map(|u| box u as ~RtioUdpSocket:Send)
fn udp_bind(&mut self, addr: SocketAddr)
-> IoResult<Box<RtioUdpSocket:Send>> {
net::UdpSocket::bind(addr).map(|u| box u as Box<RtioUdpSocket:Send>)
}
fn unix_bind(&mut self, path: &CString) -> IoResult<~RtioUnixListener:Send> {
pipe::UnixListener::bind(path).map(|s| box s as ~RtioUnixListener:Send)
fn unix_bind(&mut self, path: &CString)
-> IoResult<Box<RtioUnixListener:Send>> {
pipe::UnixListener::bind(path).map(|s| {
box s as Box<RtioUnixListener:Send>
})
}
fn unix_connect(&mut self, path: &CString,
timeout: Option<u64>) -> IoResult<~RtioPipe:Send> {
pipe::UnixStream::connect(path, timeout).map(|s| box s as ~RtioPipe:Send)
timeout: Option<u64>) -> IoResult<Box<RtioPipe:Send>> {
pipe::UnixStream::connect(path, timeout).map(|s| {
box s as Box<RtioPipe:Send>
})
}
fn get_host_addresses(&mut self, host: Option<&str>, servname: Option<&str>,
hint: Option<ai::Hint>) -> IoResult<~[ai::Info]> {
@ -188,17 +199,17 @@ impl rtio::IoFactory for IoFactory {
}
// filesystem operations
fn fs_from_raw_fd(&mut self, fd: c_int,
close: CloseBehavior) -> ~RtioFileStream:Send {
fn fs_from_raw_fd(&mut self, fd: c_int, close: CloseBehavior)
-> Box<RtioFileStream:Send> {
let close = match close {
rtio::CloseSynchronously | rtio::CloseAsynchronously => true,
rtio::DontClose => false
};
box file::FileDesc::new(fd, close) as ~RtioFileStream:Send
box file::FileDesc::new(fd, close) as Box<RtioFileStream:Send>
}
fn fs_open(&mut self, path: &CString, fm: io::FileMode, fa: io::FileAccess)
-> IoResult<~RtioFileStream:Send> {
file::open(path, fm, fa).map(|fd| box fd as ~RtioFileStream:Send)
-> IoResult<Box<RtioFileStream:Send>> {
file::open(path, fm, fa).map(|fd| box fd as Box<RtioFileStream:Send>)
}
fn fs_unlink(&mut self, path: &CString) -> IoResult<()> {
file::unlink(path)
@ -244,27 +255,29 @@ impl rtio::IoFactory for IoFactory {
}
// misc
fn timer_init(&mut self) -> IoResult<~RtioTimer:Send> {
timer::Timer::new().map(|t| box t as ~RtioTimer:Send)
fn timer_init(&mut self) -> IoResult<Box<RtioTimer:Send>> {
timer::Timer::new().map(|t| box t as Box<RtioTimer:Send>)
}
fn spawn(&mut self, config: ProcessConfig)
-> IoResult<(~RtioProcess:Send, ~[Option<~RtioPipe:Send>])> {
-> IoResult<(Box<RtioProcess:Send>,
~[Option<Box<RtioPipe:Send>>])> {
process::Process::spawn(config).map(|(p, io)| {
(box p as ~RtioProcess:Send,
io.move_iter().map(|p| p.map(|p| box p as ~RtioPipe:Send)).collect())
(box p as Box<RtioProcess:Send>,
io.move_iter().map(|p| p.map(|p| {
box p as Box<RtioPipe:Send>
})).collect())
})
}
fn kill(&mut self, pid: libc::pid_t, signum: int) -> IoResult<()> {
process::Process::kill(pid, signum)
}
fn pipe_open(&mut self, fd: c_int) -> IoResult<~RtioPipe:Send> {
Ok(box file::FileDesc::new(fd, true) as ~RtioPipe:Send)
fn pipe_open(&mut self, fd: c_int) -> IoResult<Box<RtioPipe:Send>> {
Ok(box file::FileDesc::new(fd, true) as Box<RtioPipe:Send>)
}
fn tty_open(&mut self, fd: c_int, _readable: bool)
-> IoResult<~RtioTTY:Send>
{
-> IoResult<Box<RtioTTY:Send>> {
if unsafe { libc::isatty(fd) } != 0 {
Ok(box file::FileDesc::new(fd, true) as ~RtioTTY:Send)
Ok(box file::FileDesc::new(fd, true) as Box<RtioTTY:Send>)
} else {
Err(IoError {
kind: io::MismatchedFileTypeForOperation,
@ -274,7 +287,7 @@ impl rtio::IoFactory for IoFactory {
}
}
fn signal(&mut self, _signal: Signum, _channel: Sender<Signum>)
-> IoResult<~RtioSignal:Send> {
-> IoResult<Box<RtioSignal:Send>> {
Err(unimpl())
}
}

View File

@ -351,8 +351,10 @@ impl rtio::RtioTcpStream for TcpStream {
self.set_keepalive(None)
}
fn clone(&self) -> ~rtio::RtioTcpStream:Send {
box TcpStream { inner: self.inner.clone() } as ~rtio::RtioTcpStream:Send
fn clone(&self) -> Box<rtio::RtioTcpStream:Send> {
box TcpStream {
inner: self.inner.clone(),
} as Box<rtio::RtioTcpStream:Send>
}
fn close_write(&mut self) -> IoResult<()> {
super::mkerr_libc(unsafe {
@ -418,8 +420,10 @@ impl TcpListener {
}
impl rtio::RtioTcpListener for TcpListener {
fn listen(~self) -> IoResult<~rtio::RtioTcpAcceptor:Send> {
self.native_listen(128).map(|a| box a as ~rtio::RtioTcpAcceptor:Send)
fn listen(~self) -> IoResult<Box<rtio::RtioTcpAcceptor:Send>> {
self.native_listen(128).map(|a| {
box a as Box<rtio::RtioTcpAcceptor:Send>
})
}
}
@ -465,8 +469,8 @@ impl rtio::RtioSocket for TcpAcceptor {
}
impl rtio::RtioTcpAcceptor for TcpAcceptor {
fn accept(&mut self) -> IoResult<~rtio::RtioTcpStream:Send> {
self.native_accept().map(|s| box s as ~rtio::RtioTcpStream:Send)
fn accept(&mut self) -> IoResult<Box<rtio::RtioTcpStream:Send>> {
self.native_accept().map(|s| box s as Box<rtio::RtioTcpStream:Send>)
}
fn accept_simultaneously(&mut self) -> IoResult<()> { Ok(()) }
@ -637,7 +641,9 @@ impl rtio::RtioUdpSocket for UdpSocket {
self.set_broadcast(false)
}
fn clone(&self) -> ~rtio::RtioUdpSocket:Send {
box UdpSocket { inner: self.inner.clone() } as ~rtio::RtioUdpSocket:Send
fn clone(&self) -> Box<rtio::RtioUdpSocket:Send> {
box UdpSocket {
inner: self.inner.clone(),
} as Box<rtio::RtioUdpSocket:Send>
}
}

View File

@ -144,8 +144,10 @@ impl rtio::RtioPipe for UnixStream {
}
}
fn clone(&self) -> ~rtio::RtioPipe:Send {
box UnixStream { inner: self.inner.clone() } as ~rtio::RtioPipe:Send
fn clone(&self) -> Box<rtio::RtioPipe:Send> {
box UnixStream {
inner: self.inner.clone(),
} as Box<rtio::RtioPipe:Send>
}
}
@ -176,8 +178,10 @@ impl UnixListener {
}
impl rtio::RtioUnixListener for UnixListener {
fn listen(~self) -> IoResult<~rtio::RtioUnixAcceptor:Send> {
self.native_listen(128).map(|a| box a as ~rtio::RtioUnixAcceptor:Send)
fn listen(~self) -> IoResult<Box<rtio::RtioUnixAcceptor:Send>> {
self.native_listen(128).map(|a| {
box a as Box<rtio::RtioUnixAcceptor:Send>
})
}
}
@ -209,8 +213,8 @@ impl UnixAcceptor {
}
impl rtio::RtioUnixAcceptor for UnixAcceptor {
fn accept(&mut self) -> IoResult<~rtio::RtioPipe:Send> {
self.native_accept().map(|s| box s as ~rtio::RtioPipe:Send)
fn accept(&mut self) -> IoResult<Box<rtio::RtioPipe:Send>> {
self.native_accept().map(|s| box s as Box<rtio::RtioPipe:Send>)
}
fn set_timeout(&mut self, timeout: Option<u64>) {
self.deadline = timeout.map(|a| ::io::timer::now() + a).unwrap_or(0);

View File

@ -353,12 +353,12 @@ impl rtio::RtioPipe for UnixStream {
Ok(())
}
fn clone(&self) -> ~rtio::RtioPipe:Send {
fn clone(&self) -> Box<rtio::RtioPipe:Send> {
box UnixStream {
inner: self.inner.clone(),
read: None,
write: None,
} as ~rtio::RtioPipe:Send
} as Box<rtio::RtioPipe:Send>
}
}
@ -402,8 +402,10 @@ impl Drop for UnixListener {
}
impl rtio::RtioUnixListener for UnixListener {
fn listen(~self) -> IoResult<~rtio::RtioUnixAcceptor:Send> {
self.native_listen().map(|a| box a as ~rtio::RtioUnixAcceptor:Send)
fn listen(~self) -> IoResult<Box<rtio::RtioUnixAcceptor:Send>> {
self.native_listen().map(|a| {
box a as Box<rtio::RtioUnixAcceptor:Send>
})
}
}
@ -526,8 +528,8 @@ impl UnixAcceptor {
}
impl rtio::RtioUnixAcceptor for UnixAcceptor {
fn accept(&mut self) -> IoResult<~rtio::RtioPipe:Send> {
self.native_accept().map(|s| box s as ~rtio::RtioPipe:Send)
fn accept(&mut self) -> IoResult<Box<rtio::RtioPipe:Send>> {
self.native_accept().map(|s| box s as Box<rtio::RtioPipe:Send>)
}
fn set_timeout(&mut self, timeout: Option<u64>) {
self.deadline = timeout.map(|i| i + ::io::timer::now()).unwrap_or(0);

View File

@ -86,7 +86,7 @@ fn shutdown() {
// Clean up after ther helper thread
unsafe {
imp::close(HELPER_SIGNAL);
let _chan: ~Sender<Req> = cast::transmute(HELPER_CHAN);
let _chan: Box<Sender<Req>> = cast::transmute(HELPER_CHAN);
HELPER_CHAN = 0 as *mut Sender<Req>;
HELPER_SIGNAL = 0 as imp::signal;
}

View File

@ -60,7 +60,7 @@ use io::timer_helper;
pub struct Timer {
id: uint,
inner: Option<~Inner>,
inner: Option<Box<Inner>>,
}
struct Inner {
@ -74,11 +74,11 @@ struct Inner {
#[allow(visible_private_types)]
pub enum Req {
// Add a new timer to the helper thread.
NewTimer(~Inner),
NewTimer(Box<Inner>),
// Remove a timer based on its id and then send it back on the channel
// provided
RemoveTimer(uint, Sender<~Inner>),
RemoveTimer(uint, Sender<Box<Inner>>),
// Shut down the loop and then ACK this channel once it's shut down
Shutdown,
@ -102,11 +102,11 @@ fn helper(input: libc::c_int, messages: Receiver<Req>) {
// active timers are those which are able to be selected upon (and it's a
// sorted list, and dead timers are those which have expired, but ownership
// hasn't yet been transferred back to the timer itself.
let mut active: Vec<~Inner> = vec![];
let mut active: Vec<Box<Inner>> = vec![];
let mut dead = vec![];
// inserts a timer into an array of timers (sorted by firing time)
fn insert(t: ~Inner, active: &mut Vec<~Inner>) {
fn insert(t: Box<Inner>, active: &mut Vec<Box<Inner>>) {
match active.iter().position(|tm| tm.target > t.target) {
Some(pos) => { active.insert(pos, t); }
None => { active.push(t); }
@ -114,7 +114,8 @@ fn helper(input: libc::c_int, messages: Receiver<Req>) {
}
// signals the first requests in the queue, possible re-enqueueing it.
fn signal(active: &mut Vec<~Inner>, dead: &mut Vec<(uint, ~Inner)>) {
fn signal(active: &mut Vec<Box<Inner>>,
dead: &mut Vec<(uint, Box<Inner>)>) {
let mut timer = match active.shift() {
Some(timer) => timer, None => return
};
@ -229,7 +230,7 @@ impl Timer {
}
}
fn inner(&mut self) -> ~Inner {
fn inner(&mut self) -> Box<Inner> {
match self.inner.take() {
Some(i) => i,
None => {

View File

@ -31,7 +31,7 @@ use io;
use task;
/// Creates a new Task which is ready to execute as a 1:1 task.
pub fn new(stack_bounds: (uint, uint)) -> ~Task {
pub fn new(stack_bounds: (uint, uint)) -> Box<Task> {
let mut task = box Task::new();
let mut ops = ops();
ops.stack_bounds = stack_bounds;
@ -39,7 +39,7 @@ pub fn new(stack_bounds: (uint, uint)) -> ~Task {
return task;
}
fn ops() -> ~Ops {
fn ops() -> Box<Ops> {
box Ops {
lock: unsafe { NativeMutex::new() },
awoken: false,
@ -119,22 +119,22 @@ struct Ops {
}
impl rt::Runtime for Ops {
fn yield_now(~self, mut cur_task: ~Task) {
fn yield_now(~self, mut cur_task: Box<Task>) {
// put the task back in TLS and then invoke the OS thread yield
cur_task.put_runtime(self);
Local::put(cur_task);
Thread::yield_now();
}
fn maybe_yield(~self, mut cur_task: ~Task) {
fn maybe_yield(~self, mut cur_task: Box<Task>) {
// just put the task back in TLS, on OS threads we never need to
// opportunistically yield b/c the OS will do that for us (preemption)
cur_task.put_runtime(self);
Local::put(cur_task);
}
fn wrap(~self) -> ~Any {
self as ~Any
fn wrap(~self) -> Box<Any> {
self as Box<Any>
}
fn stack_bounds(&self) -> (uint, uint) { self.stack_bounds }
@ -159,8 +159,8 @@ impl rt::Runtime for Ops {
// from the wakeup thread back to this thread about the task pointer, and
// there's really no need to. In order to get around this, we cast the task
// to a `uint` which is then used at the end of this function to cast back
// to a `~Task` object. Naturally, this looks like it violates ownership
// semantics in that there may be two `~Task` objects.
// to a `Box<Task>` object. Naturally, this looks like it violates
// ownership semantics in that there may be two `Box<Task>` objects.
//
// The fun part is that the wakeup half of this implementation knows to
// "forget" the task on the other end. This means that the awakening half of
@ -180,7 +180,7 @@ impl rt::Runtime for Ops {
// `awoken` field which indicates whether we were actually woken up via some
// invocation of `reawaken`. This flag is only ever accessed inside the
// lock, so there's no need to make it atomic.
fn deschedule(mut ~self, times: uint, mut cur_task: ~Task,
fn deschedule(mut ~self, times: uint, mut cur_task: Box<Task>,
f: |BlockedTask| -> Result<(), BlockedTask>) {
let me = &mut *self as *mut Ops;
cur_task.put_runtime(self);
@ -238,7 +238,7 @@ impl rt::Runtime for Ops {
// See the comments on `deschedule` for why the task is forgotten here, and
// why it's valid to do so.
fn reawaken(mut ~self, mut to_wake: ~Task) {
fn reawaken(mut ~self, mut to_wake: Box<Task>) {
unsafe {
let me = &mut *self as *mut Ops;
to_wake.put_runtime(self);
@ -249,7 +249,10 @@ impl rt::Runtime for Ops {
}
}
fn spawn_sibling(~self, mut cur_task: ~Task, opts: TaskOpts, f: proc():Send) {
fn spawn_sibling(~self,
mut cur_task: Box<Task>,
opts: TaskOpts,
f: proc():Send) {
cur_task.put_runtime(self);
Local::put(cur_task);
@ -342,7 +345,7 @@ mod tests {
let (tx, rx) = channel();
spawn(proc() {
spawn(proc() {
let mut task: ~Task = Local::take();
let mut task: Box<Task> = Local::take();
match task.maybe_take_runtime::<Ops>() {
Some(ops) => {
task.put_runtime(ops);

View File

@ -57,7 +57,7 @@ if rng.gen() { // bool
```
```rust
let tuple_ptr = rand::random::<~(f64, char)>();
let tuple_ptr = rand::random::<Box<(f64, char)>>();
println!("{:?}", tuple_ptr)
```
*/
@ -569,7 +569,7 @@ type TaskRngInner = reseeding::ReseedingRng<StdRng, TaskRngReseeder>;
/// The task-local RNG.
pub struct TaskRng {
// This points into TLS (specifically, it points to the endpoint
// of a ~ stored in TLS, to make it robust against TLS moving
// of a Box stored in TLS, to make it robust against TLS moving
// things internally) and so this struct cannot be legally
// transferred between tasks *and* it's unsafe to deallocate the
// RNG other than when a task is finished.
@ -582,7 +582,7 @@ pub struct TaskRng {
}
// used to make space in TLS for a random number generator
local_data_key!(TASK_RNG_KEY: ~TaskRngInner)
local_data_key!(TASK_RNG_KEY: Box<TaskRngInner>)
/// Retrieve the lazily-initialized task-local random number
/// generator, seeded by the system. Intended to be used in method
@ -833,7 +833,9 @@ mod test {
let _f : f32 = random();
let _o : Option<Option<i8>> = random();
let _many : ((),
(~uint, @int, ~Option<~(@u32, ~(@bool,))>),
(Box<uint>,
@int,
Box<Option<Box<(@u32, Box<(@bool,)>)>>>),
(u8, i8, u16, i16, u32, i32, u64, i64),
(f32, (f64, (f64,)))) = random();
}

View File

@ -214,9 +214,9 @@ impl<T:Rand> Rand for Option<T> {
}
}
impl<T: Rand> Rand for ~T {
impl<T: Rand> Rand for Box<T> {
#[inline]
fn rand<R: Rng>(rng: &mut R) -> ~T { box rng.gen() }
fn rand<R: Rng>(rng: &mut R) -> Box<T> { box rng.gen() }
}
impl<T: Rand + 'static> Rand for @T {

View File

@ -59,12 +59,12 @@ pub enum Ast {
Begin(Flags),
End(Flags),
WordBoundary(Flags),
Capture(uint, Option<~str>, ~Ast),
Capture(uint, Option<~str>, Box<Ast>),
// Represent concatenation as a flat vector to avoid blowing the
// stack in the compiler.
Cat(Vec<Ast>),
Alt(~Ast, ~Ast),
Rep(~Ast, Repeater, Greed),
Alt(Box<Ast>, Box<Ast>),
Rep(Box<Ast>, Repeater, Greed),
}
#[deriving(Show, Eq, Clone)]
@ -245,7 +245,7 @@ impl<'a> Parser<'a> {
// alternate and make it a capture.
if cap.is_some() {
let ast = try!(self.pop_ast());
self.push(Capture(cap.unwrap(), cap_name, ~ast));
self.push(Capture(cap.unwrap(), cap_name, box ast));
}
}
'|' => {
@ -331,7 +331,7 @@ impl<'a> Parser<'a> {
_ => {}
}
let greed = try!(self.get_next_greedy());
self.push(Rep(~ast, rep, greed));
self.push(Rep(box ast, rep, greed));
Ok(())
}
@ -411,13 +411,13 @@ impl<'a> Parser<'a> {
let flags = negated | (self.flags & FLAG_NOCASE);
let mut ast = Class(combine_ranges(ranges), flags);
for alt in alts.move_iter() {
ast = Alt(~alt, ~ast)
ast = Alt(box alt, box ast)
}
self.push(ast);
} else if alts.len() > 0 {
let mut ast = alts.pop().unwrap();
for alt in alts.move_iter() {
ast = Alt(~alt, ~ast)
ast = Alt(box alt, box ast)
}
self.push(ast);
}
@ -548,7 +548,7 @@ impl<'a> Parser<'a> {
for _ in iter::range(0, min) {
self.push(ast.clone())
}
self.push(Rep(~ast, ZeroMore, greed));
self.push(Rep(box ast, ZeroMore, greed));
} else {
// Require N copies of what's on the stack and then repeat it
// up to M times optionally.
@ -558,7 +558,7 @@ impl<'a> Parser<'a> {
}
if max.is_some() {
for _ in iter::range(min, max.unwrap()) {
self.push(Rep(~ast.clone(), ZeroOne, greed))
self.push(Rep(box ast.clone(), ZeroOne, greed))
}
}
// It's possible that we popped something off the stack but
@ -842,7 +842,7 @@ impl<'a> Parser<'a> {
// thrown away). But be careful with overflow---we can't count on the
// open paren to be there.
if from > 0 { from = from - 1}
let ast = try!(self.build_from(from, |l,r| Alt(~l, ~r)));
let ast = try!(self.build_from(from, |l,r| Alt(box l, box r)));
self.push(ast);
Ok(())
}

View File

@ -49,7 +49,7 @@ use regex::native::{
#[macro_registrar]
#[doc(hidden)]
pub fn macro_registrar(register: |ast::Name, SyntaxExtension|) {
let expander = ~BasicMacroExpander { expander: native, span: None };
let expander = box BasicMacroExpander { expander: native, span: None };
register(token::intern("regex"), NormalTT(expander, None))
}
@ -76,7 +76,7 @@ pub fn macro_registrar(register: |ast::Name, SyntaxExtension|) {
/// first before trying to understand the code generator. The implementation
/// strategy is identical and vm.rs has comments and will be easier to follow.
fn native(cx: &mut ExtCtxt, sp: codemap::Span, tts: &[ast::TokenTree])
-> ~MacResult {
-> Box<MacResult> {
let regex = match parse(cx, tts) {
Some(r) => r,
// error is logged in 'parse' with cx.span_err

View File

@ -677,11 +677,11 @@ pub fn pretty_print_input(sess: Session,
let mut rdr = MemReader::new(src);
let out = match ofile {
None => box io::stdout() as ~Writer,
None => box io::stdout() as Box<Writer>,
Some(p) => {
let r = io::File::create(&p);
match r {
Ok(w) => box w as ~Writer,
Ok(w) => box w as Box<Writer>,
Err(e) => fail!("print-print failed to open {} due to {}",
p.display(), e),
}

View File

@ -229,7 +229,12 @@ fn enc_sty(w: &mut MemWriter, cx: &ctxt, st: &ty::sty) {
enc_substs(w, cx, substs);
mywrite!(w, "]");
}
ty::ty_trait(~ty::TyTrait { def_id, ref substs, store, bounds }) => {
ty::ty_trait(box ty::TyTrait {
def_id,
ref substs,
store,
bounds
}) => {
mywrite!(w, "x[{}|", (cx.ds)(def_id));
enc_substs(w, cx, substs);
enc_trait_store(w, cx, store);

View File

@ -68,13 +68,13 @@ niceties. This means that if you have a type like:
struct S { f: uint }
```
and a variable `a: ~S`, then the rust expression `a.f` would correspond
and a variable `a: Box<S>`, then the rust expression `a.f` would correspond
to an `LV` of `(*a).f`.
Here is the formal grammar for the types we'll consider:
```notrust
TY = () | S<'LT...> | ~TY | & 'LT MQ TY | @ MQ TY
TY = () | S<'LT...> | Box<TY> | & 'LT MQ TY | @ MQ TY
MQ = mut | imm | const
```
@ -97,7 +97,7 @@ Now, imagine we had a program like this:
struct Foo { f: uint, g: uint }
...
'a: {
let mut x: ~Foo = ...;
let mut x: Box<Foo> = ...;
let y = &mut (*x).f;
x = ...;
}
@ -310,7 +310,7 @@ MUTABILITY(LV.f, MQ) // M-Field
MUTABILITY(LV, MQ)
MUTABILITY(*LV, MQ) // M-Deref-Unique
TYPE(LV) = ~Ty
TYPE(LV) = Box<Ty>
MUTABILITY(LV, MQ)
```
@ -420,7 +420,7 @@ The scope of a unique referent is the scope of the pointer, since
the pointer itself `LV` goes out of scope:
```notrust
SCOPE(*LV) = SCOPE(LV) if LV has type ~T
SCOPE(*LV) = SCOPE(LV) if LV has type Box<T>
```
The scope of a managed referent is also the scope of the pointer. This
@ -459,7 +459,7 @@ LIFETIME(LV.f, LT, MQ) // L-Field
LIFETIME(LV, LT, MQ)
LIFETIME(*LV, LT, MQ) // L-Deref-Send
TYPE(LV) = ~Ty
TYPE(LV) = Box<Ty>
LIFETIME(LV, LT, MQ)
```
@ -595,7 +595,7 @@ on `LV`:
```notrust
RESTRICTIONS(*LV, LT, ACTIONS) = RS, (*LV, ACTIONS) // R-Deref-Send-Pointer
TYPE(LV) = ~Ty
TYPE(LV) = Box<Ty>
RESTRICTIONS(LV, LT, ACTIONS|MUTATE|CLAIM) = RS
```
@ -967,8 +967,8 @@ moves/uninitializations of the variable that is being used.
Let's look at a simple example:
```
fn foo(a: ~int) {
let b: ~int; // Gen bit 0.
fn foo(a: Box<int>) {
let b: Box<int>; // Gen bit 0.
if cond { // Bits: 0
use(&*a);

View File

@ -533,7 +533,10 @@ impl<'a> BorrowckCtxt<'a> {
fn move_suggestion(tcx: &ty::ctxt, ty: ty::t, default_msg: &'static str)
-> &'static str {
match ty::get(ty).sty {
ty::ty_closure(~ty::ClosureTy { store: ty::RegionTraitStore(..), .. }) =>
ty::ty_closure(box ty::ClosureTy {
store: ty::RegionTraitStore(..),
..
}) =>
"a non-copyable stack closure (capture it in a new closure, \
e.g. `|x| f(x)`, to override)",
_ if ty::type_moves_by_default(tcx, ty) =>

View File

@ -321,7 +321,7 @@ impl<'a, O:DataFlowOperator+Clone+'static> DataFlowContext<'a, O> {
});
}
fn pretty_print_to(&self, wr: ~io::Writer,
fn pretty_print_to(&self, wr: Box<io::Writer>,
blk: &ast::Block) -> io::IoResult<()> {
let mut ps = pprust::rust_printer_annotated(wr, self);
try!(ps.cbox(pprust::indent_unit));

View File

@ -51,7 +51,7 @@ fn should_explore(tcx: &ty::ctxt, def_id: ast::DefId) -> bool {
struct MarkSymbolVisitor<'a> {
worklist: Vec<ast::NodeId>,
tcx: &'a ty::ctxt,
live_symbols: ~HashSet<ast::NodeId>,
live_symbols: Box<HashSet<ast::NodeId>>,
}
impl<'a> MarkSymbolVisitor<'a> {
@ -285,7 +285,7 @@ fn find_live(tcx: &ty::ctxt,
exported_items: &privacy::ExportedItems,
reachable_symbols: &NodeSet,
krate: &ast::Crate)
-> ~HashSet<ast::NodeId> {
-> Box<HashSet<ast::NodeId>> {
let worklist = create_and_seed_worklist(tcx, exported_items,
reachable_symbols, krate);
let mut symbol_visitor = MarkSymbolVisitor::new(tcx, worklist);
@ -312,7 +312,7 @@ fn get_struct_ctor_id(item: &ast::Item) -> Option<ast::NodeId> {
struct DeadVisitor<'a> {
tcx: &'a ty::ctxt,
live_symbols: ~HashSet<ast::NodeId>,
live_symbols: Box<HashSet<ast::NodeId>>,
}
impl<'a> DeadVisitor<'a> {

View File

@ -198,11 +198,11 @@ fn with_appropriate_checker(cx: &Context,
let fty = ty::node_id_to_type(cx.tcx, id);
match ty::get(fty).sty {
ty::ty_closure(~ty::ClosureTy {
ty::ty_closure(box ty::ClosureTy {
store: ty::UniqTraitStore, bounds, ..
}) => b(|cx, fv| check_for_uniq(cx, fv, bounds)),
ty::ty_closure(~ty::ClosureTy {
ty::ty_closure(box ty::ClosureTy {
store: ty::RegionTraitStore(region, _), bounds, ..
}) => b(|cx, fv| check_for_block(cx, fv, bounds, region)),
@ -331,7 +331,7 @@ pub fn check_expr(cx: &mut Context, e: &Expr) {
fn check_trait_cast(cx: &mut Context, source_ty: ty::t, target_ty: ty::t, span: Span) {
check_cast_for_escaping_regions(cx, source_ty, target_ty, span);
match ty::get(target_ty).sty {
ty::ty_trait(~ty::TyTrait { bounds, .. }) => {
ty::ty_trait(box ty::TyTrait { bounds, .. }) => {
check_trait_cast_bounds(cx, span, source_ty, bounds);
}
_ => {}

View File

@ -240,14 +240,14 @@ static lint_table: &'static [(&'static str, LintSpec)] = &[
("owned_heap_memory",
LintSpec {
lint: OwnedHeapMemory,
desc: "use of owned (~ type) heap memory",
desc: "use of owned (Box type) heap memory",
default: allow
}),
("heap_memory",
LintSpec {
lint: HeapMemory,
desc: "use of any (~ type or @ type) heap memory",
desc: "use of any (Box type or @ type) heap memory",
default: allow
}),
@ -943,8 +943,13 @@ fn check_heap_type(cx: &Context, span: Span, ty: ty::t) {
n_box += 1;
}
ty::ty_uniq(_) |
ty::ty_trait(~ty::TyTrait { store: ty::UniqTraitStore, .. }) |
ty::ty_closure(~ty::ClosureTy { store: ty::UniqTraitStore, .. }) => {
ty::ty_trait(box ty::TyTrait {
store: ty::UniqTraitStore, ..
}) |
ty::ty_closure(box ty::ClosureTy {
store: ty::UniqTraitStore,
..
}) => {
n_uniq += 1;
}
@ -955,7 +960,7 @@ fn check_heap_type(cx: &Context, span: Span, ty: ty::t) {
if n_uniq > 0 && lint != ManagedHeapMemory {
let s = ty_to_str(cx.tcx, ty);
let m = format!("type uses owned (~ type) pointers: {}", s);
let m = format!("type uses owned (Box type) pointers: {}", s);
cx.span_lint(lint, span, m);
}

View File

@ -173,8 +173,8 @@ pub enum deref_kind {
pub fn opt_deref_kind(t: ty::t) -> Option<deref_kind> {
match ty::get(t).sty {
ty::ty_uniq(_) |
ty::ty_trait(~ty::TyTrait { store: ty::UniqTraitStore, .. }) |
ty::ty_closure(~ty::ClosureTy {store: ty::UniqTraitStore, ..}) => {
ty::ty_trait(box ty::TyTrait { store: ty::UniqTraitStore, .. }) |
ty::ty_closure(box ty::ClosureTy {store: ty::UniqTraitStore, ..}) => {
Some(deref_ptr(OwnedPtr))
}
@ -182,12 +182,18 @@ pub fn opt_deref_kind(t: ty::t) -> Option<deref_kind> {
let kind = ty::BorrowKind::from_mutbl(mt.mutbl);
Some(deref_ptr(BorrowedPtr(kind, r)))
}
ty::ty_trait(~ty::TyTrait { store: ty::RegionTraitStore(r, mutbl), .. }) => {
ty::ty_trait(box ty::TyTrait {
store: ty::RegionTraitStore(r, mutbl),
..
}) => {
let kind = ty::BorrowKind::from_mutbl(mutbl);
Some(deref_ptr(BorrowedPtr(kind, r)))
}
ty::ty_closure(~ty::ClosureTy {store: ty::RegionTraitStore(r, _), ..}) => {
ty::ty_closure(box ty::ClosureTy {
store: ty::RegionTraitStore(r, _),
..
}) => {
Some(deref_ptr(BorrowedPtr(ty::ImmBorrow, r)))
}

View File

@ -650,7 +650,6 @@ fn resolve_local(visitor: &mut RegionResolutionVisitor,
* | VariantName(..., P&, ...)
* | [ ..., P&, ... ]
* | ( ..., P&, ... )
* | ~P&
* | box P&
*/
@ -704,7 +703,7 @@ fn resolve_local(visitor: &mut RegionResolutionVisitor,
* | [ ..., E&, ... ]
* | ( ..., E&, ... )
* | {...; E&}
* | ~E&
* | box E&
* | E& as ...
* | ( E& )
*/

View File

@ -1155,7 +1155,7 @@ impl<'a> DynamicFailureHandler<'a> {
enum FailureHandler<'a> {
Infallible,
JumpToBasicBlock(BasicBlockRef),
DynamicFailureHandlerClass(~DynamicFailureHandler<'a>),
DynamicFailureHandlerClass(Box<DynamicFailureHandler<'a>>),
}
impl<'a> FailureHandler<'a> {

View File

@ -248,9 +248,10 @@ pub fn is_ffi_safe(tcx: &ty::ctxt, def_id: ast::DefId) -> bool {
if hint.is_ffi_safe() {
return true;
}
// Option<~T> and similar are used in FFI. Rather than try to resolve type parameters
// and recognize this case exactly, this overapproximates -- assuming that if a
// non-C-like enum is being used in FFI then the user knows what they're doing.
// Option<Box<T>> and similar are used in FFI. Rather than try to
// resolve type parameters and recognize this case exactly, this
// overapproximates -- assuming that if a non-C-like enum is being
// used in FFI then the user knows what they're doing.
if variants.iter().any(|vi| !vi.args.is_empty()) {
return true;
}

View File

@ -26,6 +26,7 @@ use middle::ty;
use syntax::ast;
use util::ppaux::Repr;
pub struct CleanupScope<'a> {
// The id of this cleanup scope. If the id is None,
// this is a *temporary scope* that is pushed during trans to
@ -35,7 +36,7 @@ pub struct CleanupScope<'a> {
kind: CleanupScopeKind<'a>,
// Cleanups to run upon scope exit.
cleanups: Vec<~Cleanup>,
cleanups: Vec<Box<Cleanup>>,
cached_early_exits: Vec<CachedEarlyExit>,
cached_landing_pad: Option<BasicBlockRef>,
@ -248,7 +249,7 @@ impl<'a> CleanupMethods<'a> for FunctionContext<'a> {
self.ccx.tn.val_to_str(val),
ty.repr(self.ccx.tcx()));
self.schedule_clean(cleanup_scope, drop as ~Cleanup);
self.schedule_clean(cleanup_scope, drop as Box<Cleanup>);
}
fn schedule_drop_immediate(&self,
@ -272,7 +273,7 @@ impl<'a> CleanupMethods<'a> for FunctionContext<'a> {
self.ccx.tn.val_to_str(val),
ty.repr(self.ccx.tcx()));
self.schedule_clean(cleanup_scope, drop as ~Cleanup);
self.schedule_clean(cleanup_scope, drop as Box<Cleanup>);
}
fn schedule_free_value(&self,
@ -291,12 +292,12 @@ impl<'a> CleanupMethods<'a> for FunctionContext<'a> {
self.ccx.tn.val_to_str(val),
heap);
self.schedule_clean(cleanup_scope, drop as ~Cleanup);
self.schedule_clean(cleanup_scope, drop as Box<Cleanup>);
}
fn schedule_clean(&self,
cleanup_scope: ScopeId,
cleanup: ~Cleanup) {
cleanup: Box<Cleanup>) {
match cleanup_scope {
AstScope(id) => self.schedule_clean_in_ast_scope(id, cleanup),
CustomScope(id) => self.schedule_clean_in_custom_scope(id, cleanup),
@ -305,7 +306,7 @@ impl<'a> CleanupMethods<'a> for FunctionContext<'a> {
fn schedule_clean_in_ast_scope(&self,
cleanup_scope: ast::NodeId,
cleanup: ~Cleanup) {
cleanup: Box<Cleanup>) {
/*!
* Schedules a cleanup to occur upon exit from `cleanup_scope`.
* If `cleanup_scope` is not provided, then the cleanup is scheduled
@ -333,7 +334,7 @@ impl<'a> CleanupMethods<'a> for FunctionContext<'a> {
fn schedule_clean_in_custom_scope(&self,
custom_scope: CustomScopeIndex,
cleanup: ~Cleanup) {
cleanup: Box<Cleanup>) {
/*!
* Schedules a cleanup to occur in the top-most scope,
* which must be a temporary scope.
@ -909,13 +910,13 @@ pub trait CleanupMethods<'a> {
heap: Heap);
fn schedule_clean(&self,
cleanup_scope: ScopeId,
cleanup: ~Cleanup);
cleanup: Box<Cleanup>);
fn schedule_clean_in_ast_scope(&self,
cleanup_scope: ast::NodeId,
cleanup: ~Cleanup);
cleanup: Box<Cleanup>);
fn schedule_clean_in_custom_scope(&self,
custom_scope: CustomScopeIndex,
cleanup: ~Cleanup);
cleanup: Box<Cleanup>);
fn needs_invoke(&self) -> bool;
fn get_landing_pad(&'a self) -> BasicBlockRef;
}

View File

@ -57,7 +57,7 @@ For example, the following simple type for a singly-linked list...
```
struct List {
value: int,
tail: Option<~List>,
tail: Option<Box<List>>,
}
```
@ -66,8 +66,8 @@ will generate the following callstack with a naive DFS algorithm:
```
describe(t = List)
describe(t = int)
describe(t = Option<~List>)
describe(t = ~List)
describe(t = Option<Box<List>>)
describe(t = Box<List>)
describe(t = List) // at the beginning again...
...
```
@ -211,7 +211,7 @@ pub struct FunctionDebugContext {
}
enum FunctionDebugContextRepr {
FunctionDebugContext(~FunctionDebugContextData),
FunctionDebugContext(Box<FunctionDebugContextData>),
DebugInfoDisabled,
FunctionWithoutDebugInfo,
}
@ -219,7 +219,7 @@ enum FunctionDebugContextRepr {
impl FunctionDebugContext {
fn get_ref<'a>(&'a self, cx: &CrateContext, span: Span) -> &'a FunctionDebugContextData {
match self.repr {
FunctionDebugContext(~ref data) => data,
FunctionDebugContext(box ref data) => data,
DebugInfoDisabled => {
cx.sess().span_bug(span, FunctionDebugContext::debuginfo_disabled_message());
}
@ -560,7 +560,7 @@ pub fn set_source_location(fcx: &FunctionContext,
set_debug_location(fcx.ccx, UnknownLocation);
return;
}
FunctionDebugContext(~ref function_debug_context) => {
FunctionDebugContext(box ref function_debug_context) => {
let cx = fcx.ccx;
debug!("set_source_location: {}", cx.sess().codemap().span_to_str(span));
@ -596,7 +596,7 @@ pub fn clear_source_location(fcx: &FunctionContext) {
/// translated.
pub fn start_emitting_source_locations(fcx: &FunctionContext) {
match fcx.debug_context.repr {
FunctionDebugContext(~ref data) => {
FunctionDebugContext(box ref data) => {
data.source_locations_enabled.set(true)
},
_ => { /* safe to ignore */ }
@ -2227,7 +2227,12 @@ fn type_metadata(cx: &CrateContext,
ty::ty_closure(ref closurety) => {
subroutine_type_metadata(cx, &closurety.sig, usage_site_span)
}
ty::ty_trait(~ty::TyTrait { def_id, ref substs, store, ref bounds }) => {
ty::ty_trait(box ty::TyTrait {
def_id,
ref substs,
store,
ref bounds
}) => {
trait_metadata(cx, def_id, t, substs, store, bounds)
}
ty::ty_struct(def_id, ref substs) => {

View File

@ -31,7 +31,7 @@ expression functions depending on the kind of expression. We divide
up expressions into:
- **Datum expressions:** Those that most naturally yield values.
Examples would be `22`, `~x`, or `a + b` (when not overloaded).
Examples would be `22`, `box x`, or `a + b` (when not overloaded).
- **DPS expressions:** Those that most naturally write into a location
in memory. Examples would be `foo()` or `Point { x: 3, y: 4 }`.
- **Statement expressions:** That that do not generate a meaningful
@ -107,7 +107,7 @@ Somewhat surprisingly, not all lvalue expressions yield lvalue datums
when trans'd. Ultimately the reason for this is to micro-optimize
the resulting LLVM. For example, consider the following code:
fn foo() -> ~int { ... }
fn foo() -> Box<int> { ... }
let x = *foo();
The expression `*foo()` is an lvalue, but if you invoke `expr::trans`,
@ -169,7 +169,7 @@ is fully initialized, then the cleanup will run and try to free or
drop uninitialized memory. If the initialization itself produces
byproducts that need to be freed, then you should use temporary custom
scopes to ensure that those byproducts will get freed on unwind. For
example, an expression like `~foo()` will first allocate a box in the
example, an expression like `box foo()` will first allocate a box in the
heap and then call `foo()` -- if `foo()` should fail, this box needs
to be *shallowly* freed.
@ -219,11 +219,11 @@ unwind, and only up until the point where execution succeeded, at
which time the complete value should be stored in an lvalue or some
other place where normal cleanup applies.
To spell it out, here is an example. Imagine an expression `~expr`.
To spell it out, here is an example. Imagine an expression `box expr`.
We would basically:
1. Push a custom cleanup scope C.
2. Allocate the `~` box.
2. Allocate the box.
3. Schedule a shallow free in the scope C.
4. Trans `expr` into the box.
5. Pop the scope C.

View File

@ -397,8 +397,8 @@ fn trans_datum_unadjusted<'a>(bcx: &'a Block<'a>,
DatumBlock(bcx, datum)
}
ast::ExprBox(_, contents) => {
// Special case for `~T`. (The other case, for GC, is handled in
// `trans_rvalue_dps_unadjusted`.)
// Special case for `box T`. (The other case, for GC, is handled
// in `trans_rvalue_dps_unadjusted`.)
let box_ty = expr_ty(bcx, expr);
let contents_ty = expr_ty(bcx, contents);
trans_uniq_expr(bcx, box_ty, contents, contents_ty)
@ -1171,11 +1171,12 @@ fn trans_uniq_expr<'a>(bcx: &'a Block<'a>,
let llty = type_of::type_of(bcx.ccx(), contents_ty);
let size = llsize_of(bcx.ccx(), llty);
// We need to a make a pointer type because box_ty is ty_bot
// if content_ty is, e.g. ~fail!().
// if content_ty is, e.g. box fail!().
let real_box_ty = ty::mk_uniq(bcx.tcx(), contents_ty);
let Result { bcx, val } = malloc_raw_dyn(bcx, real_box_ty, size);
// Unique boxes do not allocate for zero-size types. The standard library may assume
// that `free` is never called on the pointer returned for `~ZeroSizeType`.
// Unique boxes do not allocate for zero-size types. The standard library
// may assume that `free` is never called on the pointer returned for
// `Box<ZeroSizeType>`.
let bcx = if llsize_of_alloc(bcx.ccx(), llty) == 0 {
trans_into(bcx, contents, SaveIn(val))
} else {
@ -1774,8 +1775,8 @@ fn deref_once<'a>(bcx: &'a Block<'a>,
* Basically, the idea is to make the deref of an rvalue
* result in an rvalue. This helps to avoid intermediate stack
* slots in the resulting LLVM. The idea here is that, if the
* `~T` pointer is an rvalue, then we can schedule a *shallow*
* free of the `~T` pointer, and then return a ByRef rvalue
* `Box<T>` pointer is an rvalue, then we can schedule a *shallow*
* free of the `Box<T>` pointer, and then return a ByRef rvalue
* into the pointer. Because the free is shallow, it is legit
* to return an rvalue, because we know that the contents are
* not yet scheduled to be freed. The language rules ensure that the

View File

@ -89,7 +89,7 @@ fn get_drop_glue_type(ccx: &CrateContext, t: ty::t) -> ty::t {
let llty = sizing_type_of(ccx, typ);
// Unique boxes do not allocate for zero-size types. The standard
// library may assume that `free` is never called on the pointer
// returned for `~ZeroSizeType`.
// returned for `Box<ZeroSizeType>`.
if llsize_of_alloc(ccx, llty) == 0 {
ty::mk_i8()
} else {
@ -318,7 +318,7 @@ fn make_drop_glue<'a>(bcx: &'a Block<'a>, v0: ValueRef, t: ty::t) -> &'a Block<'
}
}
}
ty::ty_trait(~ty::TyTrait { store: ty::UniqTraitStore, .. }) => {
ty::ty_trait(box ty::TyTrait { store: ty::UniqTraitStore, .. }) => {
let lluniquevalue = GEPi(bcx, v0, [0, abi::trt_field_box]);
// Only drop the value when it is non-null
with_cond(bcx, IsNotNull(bcx, Load(bcx, lluniquevalue)), |bcx| {

View File

@ -344,7 +344,7 @@ fn trans_trait_callee<'a>(bcx: &'a Block<'a>,
-> Callee<'a> {
/*!
* Create a method callee where the method is coming from a trait
* object (e.g., ~Trait type). In this case, we must pull the fn
* object (e.g., Box<Trait> type). In this case, we must pull the fn
* pointer out of the vtable that is packaged up with the object.
* Objects are represented as a pair, so we first evaluate the self
* expression and then extract the self data and vtable out of the
@ -401,7 +401,7 @@ pub fn trans_trait_callee_from_llval<'a>(bcx: &'a Block<'a>,
// Load the function from the vtable and cast it to the expected type.
debug!("(translating trait callee) loading method");
// Replace the self type (&Self or ~Self) with an opaque pointer.
// Replace the self type (&Self or Box<Self>) with an opaque pointer.
let llcallee_ty = match ty::get(callee_ty).sty {
ty::ty_bare_fn(ref f) if f.abi == Rust => {
type_of_rust_fn(ccx, true, f.sig.inputs.slice_from(1), f.sig.output)
@ -527,8 +527,8 @@ pub fn trans_trait_cast<'a>(bcx: &'a Block<'a>,
dest: expr::Dest)
-> &'a Block<'a> {
/*!
* Generates the code to convert from a pointer (`~T`, `&T`, etc)
* into an object (`~Trait`, `&Trait`, etc). This means creating a
* Generates the code to convert from a pointer (`Box<T>`, `&T`, etc)
* into an object (`Box<Trait>`, `&Trait`, etc). This means creating a
* pair where the first word is the vtable and the second word is
* the pointer.
*/

View File

@ -128,7 +128,7 @@ pub struct mt {
#[deriving(Clone, Eq, TotalEq, Hash, Encodable, Decodable, Show)]
pub enum TraitStore {
/// ~Trait
/// Box<Trait>
UniqTraitStore,
/// &Trait and &mut Trait
RegionTraitStore(Region, ast::Mutability),
@ -229,7 +229,7 @@ pub enum AutoRef {
/// Convert from T to *T
AutoUnsafe(ast::Mutability),
/// Convert from ~Trait/&Trait to &Trait
/// Convert from Box<Trait>/&Trait to &Trait
AutoBorrowObj(Region, ast::Mutability),
}
@ -239,7 +239,7 @@ pub enum AutoRef {
pub struct ctxt {
// Specifically use a speedy hash algorithm for this hash map, it's used
// quite often.
pub interner: RefCell<FnvHashMap<intern_key, ~t_box_>>,
pub interner: RefCell<FnvHashMap<intern_key, Box<t_box_>>>,
pub next_id: Cell<uint>,
pub sess: Session,
pub def_map: resolve::DefMap,
@ -735,8 +735,8 @@ pub enum sty {
ty_ptr(mt),
ty_rptr(Region, mt),
ty_bare_fn(BareFnTy),
ty_closure(~ClosureTy),
ty_trait(~TyTrait),
ty_closure(Box<ClosureTy>),
ty_trait(Box<TyTrait>),
ty_struct(DefId, substs),
ty_tup(Vec<t>),
@ -1195,7 +1195,7 @@ pub fn mk_t(cx: &ctxt, st: sty) -> t {
&ty_enum(_, ref substs) | &ty_struct(_, ref substs) => {
flags |= sflags(substs);
}
&ty_trait(~ty::TyTrait { ref substs, store, .. }) => {
&ty_trait(box ty::TyTrait { ref substs, store, .. }) => {
flags |= sflags(substs);
match store {
RegionTraitStore(r, _) => {
@ -1482,7 +1482,7 @@ pub fn maybe_walk_ty(ty: t, f: |t| -> bool) {
maybe_walk_ty(tm.ty, f);
}
ty_enum(_, ref substs) | ty_struct(_, ref substs) |
ty_trait(~TyTrait { ref substs, .. }) => {
ty_trait(box TyTrait { ref substs, .. }) => {
for subty in (*substs).tps.iter() { maybe_walk_ty(*subty, |x| f(x)); }
}
ty_tup(ref ts) => { for tt in ts.iter() { maybe_walk_ty(*tt, |x| f(x)); } }
@ -1951,7 +1951,7 @@ impl TypeContents {
pub fn owned_pointer(&self) -> TypeContents {
/*!
* Includes only those bits that still apply
* when indirected through a `~` pointer
* when indirected through a `Box` pointer
*/
TC::OwnsOwned | (
*self & (TC::OwnsAll | TC::ReachesAll))
@ -2050,7 +2050,7 @@ pub fn type_contents(cx: &ctxt, ty: t) -> TypeContents {
// private cache for this walk. This is needed in the case of cyclic
// types like:
//
// struct List { next: ~Option<List>, ... }
// struct List { next: Box<Option<List>>, ... }
//
// When computing the type contents of such a type, we wind up deeply
// recursing as we go. So when we encounter the recursive reference
@ -2100,7 +2100,7 @@ pub fn type_contents(cx: &ctxt, ty: t) -> TypeContents {
}
}
ty_trait(~ty::TyTrait { store, bounds, .. }) => {
ty_trait(box ty::TyTrait { store, bounds, .. }) => {
object_contents(cx, store, bounds)
}
@ -2965,7 +2965,7 @@ pub fn adjust_ty(cx: &ctxt,
fn borrow_obj(cx: &ctxt, span: Span, r: Region,
m: ast::Mutability, ty: ty::t) -> ty::t {
match get(ty).sty {
ty_trait(~ty::TyTrait {def_id, ref substs, bounds, .. }) => {
ty_trait(box ty::TyTrait {def_id, ref substs, bounds, .. }) => {
ty::mk_trait(cx, def_id, substs.clone(),
RegionTraitStore(r, m), bounds)
}
@ -3164,7 +3164,7 @@ pub fn expr_kind(tcx: &ctxt, expr: &ast::Expr) -> ExprKind {
// writing) it's not easy to distinguish casts to traits
// from other casts based on the AST. This should be
// easier in the future, when casts to traits
// would like @Foo, ~Foo, or &Foo.
// would like @Foo, Box<Foo>, or &Foo.
RvalueDatumExpr
}
}
@ -3192,7 +3192,7 @@ pub fn expr_kind(tcx: &ctxt, expr: &ast::Expr) -> ExprKind {
}
ast::ExprBox(place, _) => {
// Special case `~T` for now:
// Special case `Box<T>` for now:
let definition = match tcx.def_map.borrow().find(&place.id) {
Some(&def) => def,
None => fail!("no def for place"),
@ -3264,7 +3264,7 @@ pub fn ty_sort_str(cx: &ctxt, t: t) -> ~str {
ty_enum(id, _) => format!("enum {}", item_path_str(cx, id)),
ty_box(_) => "@-ptr".to_owned(),
ty_uniq(_) => "~-ptr".to_owned(),
ty_uniq(_) => "box".to_owned(),
ty_vec(_, _) => "vector".to_owned(),
ty_ptr(_) => "*-ptr".to_owned(),
ty_rptr(_, _) => "&-ptr".to_owned(),
@ -3614,7 +3614,9 @@ pub fn try_add_builtin_trait(tcx: &ctxt,
pub fn ty_to_def_id(ty: t) -> Option<ast::DefId> {
match get(ty).sty {
ty_trait(~TyTrait { def_id: id, .. }) | ty_struct(id, _) | ty_enum(id, _) => Some(id),
ty_trait(box TyTrait { def_id: id, .. }) |
ty_struct(id, _) |
ty_enum(id, _) => Some(id),
_ => None
}
}
@ -4575,7 +4577,7 @@ pub fn hash_crate_independent(tcx: &ctxt, t: t, svh: &Svh) -> u64 {
}
}
}
ty_trait(~ty::TyTrait { def_id: d, store, bounds, .. }) => {
ty_trait(box ty::TyTrait { def_id: d, store, bounds, .. }) => {
byte!(17);
did(&mut state, d);
match store {

View File

@ -149,8 +149,13 @@ pub fn super_fold_sty<T:TypeFolder>(this: &mut T,
ty::ty_enum(tid, ref substs) => {
ty::ty_enum(tid, this.fold_substs(substs))
}
ty::ty_trait(~ty::TyTrait { def_id, ref substs, store, bounds }) => {
ty::ty_trait(box ty::TyTrait{
ty::ty_trait(box ty::TyTrait {
def_id,
ref substs,
store,
bounds
}) => {
ty::ty_trait(box ty::TyTrait {
def_id: def_id,
substs: this.fold_substs(substs),
store: this.fold_trait_store(store),

View File

@ -876,7 +876,8 @@ fn conv_builtin_bounds(tcx: &ty::ctxt, ast_bounds: &Option<OwnedSlice<ast::TyPar
//! legal.
//! If no bounds were specified, we choose a "default" bound based on
//! the allocation type of the fn/trait, as per issue #7264. The user can
//! override this with an empty bounds list, e.g. "~fn:()" or "~Trait:".
//! override this with an empty bounds list, e.g. "Box<fn:()>" or
//! "Box<Trait:>".
match (ast_bounds, store) {
(&Some(ref bound_vec), _) => {

View File

@ -691,7 +691,7 @@ pub fn check_pat(pcx: &pat_ctxt, pat: &ast::Pat, expected: ty::t) {
}
}
// Helper function to check @, ~ and & patterns
// Helper function to check @, box and & patterns
pub fn check_pointer_pat(pcx: &pat_ctxt,
pointer_kind: PointerKind,
inner: &ast::Pat,
@ -721,7 +721,7 @@ pub fn check_pointer_pat(pcx: &pat_ctxt,
e, actual)})},
Some(expected),
format!("{} pattern", match pointer_kind {
Send => "a `~`-box",
Send => "a box",
Borrowed => "an `&`-pointer"
}),
None);

View File

@ -30,7 +30,7 @@ itself (note that inherent impls can only be defined in the same
module as the type itself).
Inherent candidates are not always derived from impls. If you have a
trait instance, such as a value of type `~ToStr`, then the trait
trait instance, such as a value of type `Box<ToStr>`, then the trait
methods (`to_str()`, in this case) are inherently associated with it.
Another case is type parameters, in which case the methods of their
bounds are inherent.
@ -72,9 +72,9 @@ Both the inherent candidate collection and the candidate selection
proceed by progressively deref'ing the receiver type, after all. The
answer is that two phases are needed to elegantly deal with explicit
self. After all, if there is an impl for the type `Foo`, it can
define a method with the type `~self`, which means that it expects a
receiver of type `~Foo`. If we have a receiver of type `~Foo`, but we
waited to search for that impl until we have deref'd the `~` away and
define a method with the type `Box<self>`, which means that it expects a
receiver of type `Box<Foo>`. If we have a receiver of type `Box<Foo>`, but we
waited to search for that impl until we have deref'd the `Box` away and
obtained the type `Foo`, we would never match this method.
*/
@ -243,15 +243,15 @@ fn construct_transformed_self_ty_for_object(
*
* trait Foo {
* fn r_method<'a>(&'a self);
* fn u_method(~self);
* fn u_method(Box<self>);
* }
*
* Now, assuming that `r_method` is being called, we want the
* result to be `&'a Foo`. Assuming that `u_method` is being
* called, we want the result to be `~Foo`. Of course,
* called, we want the result to be `Box<Foo>`. Of course,
* this transformation has already been done as part of
* `method_ty.fty.sig.inputs[0]`, but there the type
* is expressed in terms of `Self` (i.e., `&'a Self`, `~Self`).
* is expressed in terms of `Self` (i.e., `&'a Self`, `Box<Self>`).
* Because objects are not standalone types, we can't just substitute
* `s/Self/Foo/`, so we must instead perform this kind of hokey
* match below.
@ -328,8 +328,8 @@ struct Candidate {
/// considered to "match" a given method candidate. Typically the test
/// is whether the receiver is of a particular type. However, this
/// type is the type of the receiver *after accounting for the
/// method's self type* (e.g., if the method is an `~self` method, we
/// have *already verified* that the receiver is of some type `~T` and
/// method's self type* (e.g., if the method is an `Box<self>` method, we
/// have *already verified* that the receiver is of some type `Box<T>` and
/// now we must check that the type `T` is correct). Unfortunately,
/// because traits are not types, this is a pain to do.
#[deriving(Clone)]
@ -421,14 +421,14 @@ impl<'a> LookupContext<'a> {
* `self.inherent_candidates`. See comment at the start of
* the file. To find the inherent candidates, we repeatedly
* deref the self-ty to find the "base-type". So, for
* example, if the receiver is ~~C where `C` is a struct type,
* example, if the receiver is Box<Box<C>> where `C` is a struct type,
* we'll want to find the inherent impls for `C`.
*/
let span = self.self_expr.map_or(self.span, |e| e.span);
check::autoderef(self.fcx, span, self_ty, None, PreferMutLvalue, |self_ty, _| {
match get(self_ty).sty {
ty_trait(~TyTrait { def_id, ref substs, .. }) => {
ty_trait(box TyTrait { def_id, ref substs, .. }) => {
self.push_inherent_candidates_from_object(def_id, substs);
self.push_inherent_impl_candidates_for_type(def_id);
}
@ -767,9 +767,9 @@ impl<'a> LookupContext<'a> {
* consuming the original pointer.
*
* You might think that this would be a natural byproduct of
* the auto-deref/auto-ref process. This is true for `~T`
* but not for an `&mut T` receiver. With `~T`, we would
* begin by testing for methods with a self type `~T`,
* the auto-deref/auto-ref process. This is true for `Box<T>`
* but not for an `&mut T` receiver. With `Box<T>`, we would
* begin by testing for methods with a self type `Box<T>`,
* then autoderef to `T`, then autoref to `&mut T`. But with
* an `&mut T` receiver the process begins with `&mut T`, only
* without any autoadjustments.
@ -797,7 +797,7 @@ impl<'a> LookupContext<'a> {
autoref: Some(auto)})
}
ty::ty_trait(~ty::TyTrait {
ty::ty_trait(box ty::TyTrait {
def_id, ref substs, store: ty::RegionTraitStore(_, mutbl), bounds
}) => {
let region =
@ -902,8 +902,13 @@ impl<'a> LookupContext<'a> {
},
ty_vec(mt, Some(_)) => self.auto_slice_vec(mt, autoderefs),
ty_trait(~ty::TyTrait { def_id: trt_did, substs: trt_substs, bounds: b, .. }) => {
// Coerce ~/&Trait instances to &Trait.
ty_trait(box ty::TyTrait {
def_id: trt_did,
substs: trt_substs,
bounds: b,
..
}) => {
// Coerce Box/&Trait instances to &Trait.
self.search_for_some_kind_of_autorefd_method(
AutoBorrowObj, autoderefs, [MutImmutable, MutMutable],
@ -1361,7 +1366,7 @@ impl<'a> LookupContext<'a> {
}
}
ty::ty_trait(~ty::TyTrait {
ty::ty_trait(box ty::TyTrait {
def_id: self_did, store: RegionTraitStore(_, self_m), ..
}) => {
mutability_matches(self_m, m) &&
@ -1382,7 +1387,7 @@ impl<'a> LookupContext<'a> {
}
}
ty::ty_trait(~ty::TyTrait {
ty::ty_trait(box ty::TyTrait {
def_id: self_did, store: UniqTraitStore, ..
}) => {
rcvr_matches_object(self_did, candidate)

View File

@ -1912,7 +1912,7 @@ fn check_expr_with_unifier(fcx: &FnCtxt,
let fn_sig = match *fn_sty {
ty::ty_bare_fn(ty::BareFnTy {sig: ref sig, ..}) |
ty::ty_closure(~ty::ClosureTy {sig: ref sig, ..}) => sig,
ty::ty_closure(box ty::ClosureTy {sig: ref sig, ..}) => sig,
_ => {
fcx.type_error_message(call_expr.span, |actual| {
format!("expected function but \

View File

@ -367,7 +367,7 @@ fn constrain_bindings_in_pat(pat: &ast::Pat, rcx: &mut Rcx) {
// accessed. We must be wary of loops like this:
//
// // from src/test/compile-fail/borrowck-lend-flow.rs
// let mut v = ~3, w = ~4;
// let mut v = box 3, w = box 4;
// let mut x = &mut w;
// loop {
// **x += 1; // (2)
@ -539,7 +539,7 @@ fn visit_expr(rcx: &mut Rcx, expr: &ast::Expr) {
// explaining how it goes about doing that.
let target_ty = rcx.resolve_node_type(expr.id);
match ty::get(target_ty).sty {
ty::ty_trait(~ty::TyTrait {
ty::ty_trait(box ty::TyTrait {
store: ty::RegionTraitStore(trait_region, _), ..
}) => {
let source_ty = rcx.resolve_expr_type_adjusted(source);
@ -609,7 +609,7 @@ fn check_expr_fn_block(rcx: &mut Rcx,
let tcx = rcx.fcx.tcx();
let function_type = rcx.resolve_node_type(expr.id);
match ty::get(function_type).sty {
ty::ty_closure(~ty::ClosureTy {
ty::ty_closure(box ty::ClosureTy {
store: ty::RegionTraitStore(region, _), ..}) => {
freevars::with_freevars(tcx, expr.id, |freevars| {
if freevars.is_empty() {
@ -635,7 +635,10 @@ fn check_expr_fn_block(rcx: &mut Rcx,
rcx.set_repeating_scope(repeating_scope);
match ty::get(function_type).sty {
ty::ty_closure(~ty::ClosureTy {store: ty::RegionTraitStore(..), ..}) => {
ty::ty_closure(box ty::ClosureTy {
store: ty::RegionTraitStore(..),
..
}) => {
freevars::with_freevars(tcx, expr.id, |freevars| {
propagate_upupvar_borrow_kind(rcx, expr, freevars);
})

View File

@ -532,7 +532,7 @@ pub fn early_resolve_expr(ex: &ast::Expr, fcx: &FnCtxt, is_early: bool) {
let resolve_object_cast = |src: &ast::Expr, target_ty: ty::t| {
match ty::get(target_ty).sty {
// Bounds of type's contents are not checked here, but in kind.rs.
ty::ty_trait(~ty::TyTrait {
ty::ty_trait(box ty::TyTrait {
def_id: target_def_id, substs: ref target_substs, store, ..
}) => {
fn mutability_allowed(a_mutbl: ast::Mutability,
@ -543,7 +543,7 @@ pub fn early_resolve_expr(ex: &ast::Expr, fcx: &FnCtxt, is_early: bool) {
// Look up vtables for the type we're casting to,
// passing in the source and target type. The source
// must be a pointer type suitable to the object sigil,
// e.g.: `&x as &Trait` or `~x as ~Trait`
// e.g.: `&x as &Trait` or `box x as Box<Trait>`
let ty = structurally_resolved_type(fcx, ex.span,
fcx.expr_ty(src));
match (&ty::get(ty).sty, store) {
@ -606,8 +606,8 @@ pub fn early_resolve_expr(ex: &ast::Expr, fcx: &FnCtxt, is_early: bool) {
(_, ty::UniqTraitStore) => {
fcx.ccx.tcx.sess.span_err(
ex.span,
format!("can only cast an ~-pointer \
to a ~-object, not a {}",
format!("can only cast an boxed pointer \
to a boxed object, not a {}",
ty::ty_sort_str(fcx.tcx(), ty)));
}

View File

@ -103,7 +103,7 @@ fn type_is_defined_in_local_crate(original_type: t) -> bool {
ty::walk_ty(original_type, |t| {
match get(t).sty {
ty_enum(def_id, _) |
ty_trait(~ty::TyTrait { def_id, .. }) |
ty_trait(box ty::TyTrait { def_id, .. }) |
ty_struct(def_id, _) => {
if def_id.krate == ast::LOCAL_CRATE {
found_nominal = true;
@ -129,7 +129,7 @@ fn get_base_type_def_id(inference_context: &InferCtxt,
match get(base_type).sty {
ty_enum(def_id, _) |
ty_struct(def_id, _) |
ty_trait(~ty::TyTrait { def_id, .. }) => {
ty_trait(box ty::TyTrait { def_id, .. }) => {
return Some(def_id);
}
_ => {

View File

@ -121,7 +121,10 @@ impl<'f> Coerce<'f> {
};
}
ty::ty_closure(~ty::ClosureTy {store: ty::RegionTraitStore(..), ..}) => {
ty::ty_closure(box ty::ClosureTy {
store: ty::RegionTraitStore(..),
..
}) => {
return self.unpack_actual_value(a, |sty_a| {
self.coerce_borrowed_fn(a, sty_a, b)
});
@ -133,7 +136,7 @@ impl<'f> Coerce<'f> {
});
}
ty::ty_trait(~ty::TyTrait {
ty::ty_trait(box ty::TyTrait {
def_id, ref substs, store: ty::UniqTraitStore, bounds
}) => {
let result = self.unpack_actual_value(a, |sty_a| {
@ -152,7 +155,7 @@ impl<'f> Coerce<'f> {
}
}
ty::ty_trait(~ty::TyTrait {
ty::ty_trait(box ty::TyTrait {
def_id, ref substs, store: ty::RegionTraitStore(region, m), bounds
}) => {
let result = self.unpack_actual_value(a, |sty_a| {
@ -332,7 +335,12 @@ impl<'f> Coerce<'f> {
let r_a = self.get_ref().infcx.next_region_var(coercion);
let a_borrowed = match *sty_a {
ty::ty_trait(~ty::TyTrait { def_id, ref substs, bounds, .. }) => {
ty::ty_trait(box ty::TyTrait {
def_id,
ref substs,
bounds,
..
}) => {
ty::mk_trait(tcx, def_id, substs.clone(),
ty::RegionTraitStore(r_a, b_mutbl), bounds)
}

View File

@ -683,7 +683,7 @@ impl<'a> InferCtxt<'a> {
ty::EmptyBuiltinBounds());
let dummy1 = self.resolve_type_vars_if_possible(dummy0);
match ty::get(dummy1).sty {
ty::ty_trait(~ty::TyTrait { ref def_id, ref substs, .. }) => {
ty::ty_trait(box ty::TyTrait { ref def_id, ref substs, .. }) => {
ty::TraitRef {
def_id: *def_id,
substs: (*substs).clone(),

View File

@ -190,7 +190,7 @@ going on:
*p += 1; *p
}
fn weird() {
let mut x: ~Foo = ~Foo { ... };
let mut x: Box<Foo> = box Foo { ... };
'a: add(&mut (*x).f,
'b: inc(&mut (*x).f)) // (..)
}
@ -243,11 +243,11 @@ this similar but unsound example:
*p += v;
}
...
fn consume(x: ~Foo) -> uint {
fn consume(x: Box<Foo>) -> uint {
x.f + x.g
}
fn weird() {
let mut x: ~Foo = ~Foo { ... };
let mut x: Box<Foo> = box Foo { ... };
'a: add(&mut (*x).f, consume(x)) // (..)
}

View File

@ -741,7 +741,7 @@ impl<'a> ConstraintContext<'a> {
substs, variance);
}
ty::ty_trait(~ty::TyTrait { def_id, ref substs, .. }) => {
ty::ty_trait(box ty::TyTrait { def_id, ref substs, .. }) => {
let trait_def = ty::lookup_trait_def(self.tcx(), def_id);
self.add_constraints_from_substs(def_id, &trait_def.generics,
substs, variance);
@ -768,11 +768,15 @@ impl<'a> ConstraintContext<'a> {
}
ty::ty_bare_fn(ty::BareFnTy { ref sig, .. }) |
ty::ty_closure(~ty::ClosureTy { ref sig, store: ty::UniqTraitStore, .. }) => {
ty::ty_closure(box ty::ClosureTy {
ref sig,
store: ty::UniqTraitStore,
..
}) => {
self.add_constraints_from_sig(sig, variance);
}
ty::ty_closure(~ty::ClosureTy { ref sig,
ty::ty_closure(box ty::ClosureTy { ref sig,
store: ty::RegionTraitStore(region, _), .. }) => {
let contra = self.contravariant(variance);
self.add_constraints_from_region(region, contra);

View File

@ -203,7 +203,7 @@ pub fn mt_to_str(cx: &ctxt, m: &mt) -> ~str {
pub fn trait_store_to_str(cx: &ctxt, s: ty::TraitStore) -> ~str {
match s {
ty::UniqTraitStore => "~".to_owned(),
ty::UniqTraitStore => "Box ".to_owned(),
ty::RegionTraitStore(r, m) => {
format!("{}{}", region_ptr_to_str(cx, r), mutability_to_str(m))
}
@ -385,7 +385,7 @@ pub fn ty_to_str(cx: &ctxt, typ: t) -> ~str {
did,
false)
}
ty_trait(~ty::TyTrait {
ty_trait(box ty::TyTrait {
def_id: did, ref substs, store, ref bounds
}) => {
let base = ty::item_path_str(cx, did);
@ -500,7 +500,7 @@ impl<T:Repr> Repr for @T {
}
}
impl<T:Repr> Repr for ~T {
impl<T:Repr> Repr for Box<T> {
fn repr(&self, tcx: &ctxt) -> ~str {
(&**self).repr(tcx)
}

View File

@ -690,26 +690,26 @@ pub enum Type {
Self(ast::NodeId),
/// Primitives are just the fixed-size numeric types (plus int/uint/float), and char.
Primitive(ast::PrimTy),
Closure(~ClosureDecl, Option<Lifetime>),
Proc(~ClosureDecl),
Closure(Box<ClosureDecl>, Option<Lifetime>),
Proc(Box<ClosureDecl>),
/// extern "ABI" fn
BareFunction(~BareFunctionDecl),
BareFunction(Box<BareFunctionDecl>),
Tuple(Vec<Type>),
Vector(~Type),
FixedVector(~Type, ~str),
Vector(Box<Type>),
FixedVector(Box<Type>, ~str),
String,
Bool,
/// aka TyNil
Unit,
/// aka TyBot
Bottom,
Unique(~Type),
Managed(~Type),
RawPointer(Mutability, ~Type),
Unique(Box<Type>),
Managed(Box<Type>),
RawPointer(Mutability, Box<Type>),
BorrowedRef {
pub lifetime: Option<Lifetime>,
pub mutability: Mutability,
pub type_: ~Type,
pub type_: Box<Type>,
},
// region, raw, other boxes, mutable
}

View File

@ -83,8 +83,8 @@ fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader, class: Option<&
let klass = match next.tok {
// If this '&' token is directly adjacent to another token, assume
// that it's the address-of operator instead of the and-operator.
// This allows us to give all pointers their own class (~ and @ are
// below).
// This allows us to give all pointers their own class (`Box` and
// `@` are below).
t::BINOP(t::AND) if lexer.peek().sp.lo == next.sp.hi => "kw-2",
t::AT | t::TILDE => "kw-2",

View File

@ -36,8 +36,11 @@ use html::markdown;
use passes;
use visit_ast::RustdocVisitor;
pub fn run(input: &str, cfgs: Vec<~str>,
libs: HashSet<Path>, mut test_args: Vec<~str>) -> int {
pub fn run(input: &str,
cfgs: Vec<~str>,
libs: HashSet<Path>,
mut test_args: Vec<~str>)
-> int {
let input_path = Path::new(input);
let input = driver::FileInput(input_path.clone());
@ -128,7 +131,7 @@ fn runtest(test: &str, cratename: &str, libs: HashSet<Path>, should_fail: bool,
let old = io::stdio::set_stderr(box w1);
spawn(proc() {
let mut p = io::ChanReader::new(rx);
let mut err = old.unwrap_or(box io::stderr() as ~Writer:Send);
let mut err = old.unwrap_or(box io::stderr() as Box<Writer:Send>);
io::util::copy(&mut p, &mut err).unwrap();
});
let emitter = diagnostic::EmitterWriter::new(box w2);

View File

@ -15,9 +15,9 @@
/// (the uv event loop).
use std::cast;
use std::sync::arc::UnsafeArc;
use std::rt::task::{BlockedTask, Task};
use std::rt::local::Local;
use std::rt::task::{BlockedTask, Task};
use std::sync::arc::UnsafeArc;
use homing::HomingMissile;
@ -52,7 +52,7 @@ impl Access {
let inner: &mut Inner = unsafe { cast::transmute(self.inner.get()) };
if inner.held {
let t: ~Task = Local::take();
let t: Box<Task> = Local::take();
t.deschedule(1, |task| {
inner.queue.push(task);
Ok(())

View File

@ -26,12 +26,12 @@ pub struct AsyncWatcher {
}
struct Payload {
callback: ~Callback:Send,
callback: Box<Callback:Send>,
exit_flag: Exclusive<bool>,
}
impl AsyncWatcher {
pub fn new(loop_: &mut Loop, cb: ~Callback:Send) -> AsyncWatcher {
pub fn new(loop_: &mut Loop, cb: Box<Callback:Send>) -> AsyncWatcher {
let handle = UvHandle::alloc(None::<AsyncWatcher>, uvll::UV_ASYNC);
assert_eq!(unsafe {
uvll::uv_async_init(loop_.handle, handle, async_cb)
@ -93,7 +93,7 @@ extern fn async_cb(handle: *uvll::uv_async_t) {
extern fn close_cb(handle: *uvll::uv_handle_t) {
// drop the payload
let _payload: ~Payload = unsafe {
let _payload: Box<Payload> = unsafe {
cast::transmute(uvll::get_data_for_uv_handle(handle))
};
// and then free the handle

View File

@ -100,7 +100,7 @@ pub trait HomingIO {
// to go (remember we have no preemption, so we're guaranteed to stay on
// this event loop as long as we avoid the scheduler).
if cur_loop_id != destination {
let cur_task: ~Task = Local::take();
let cur_task: Box<Task> = Local::take();
cur_task.deschedule(1, |task| {
self.home().send(task);
Ok(())

View File

@ -19,11 +19,11 @@ pub struct IdleWatcher {
handle: *uvll::uv_idle_t,
idle_flag: bool,
closed: bool,
callback: ~Callback:Send,
callback: Box<Callback:Send>,
}
impl IdleWatcher {
pub fn new(loop_: &mut Loop, cb: ~Callback:Send) -> ~IdleWatcher {
pub fn new(loop_: &mut Loop, cb: Box<Callback:Send>) -> Box<IdleWatcher> {
let handle = UvHandle::alloc(None::<IdleWatcher>, uvll::UV_IDLE);
assert_eq!(unsafe {
uvll::uv_idle_init(loop_.handle, handle)
@ -49,7 +49,7 @@ impl IdleWatcher {
extern fn onetime_cb(handle: *uvll::uv_idle_t) {
unsafe {
let data = uvll::get_data_for_uv_handle(handle);
let f: ~proc() = cast::transmute(data);
let f: Box<proc()> = cast::transmute(data);
(*f)();
assert_eq!(uvll::uv_idle_stop(handle), 0);
uvll::uv_close(handle, close_cb);
@ -126,16 +126,16 @@ mod test {
}
}
fn mk(v: uint) -> (~IdleWatcher, Chan) {
fn mk(v: uint) -> (Box<IdleWatcher>, Chan) {
let rc = Rc::new(RefCell::new((None, 0)));
let cb = box MyCallback(rc.clone(), v);
let cb = cb as ~Callback:;
let cb = cb as Box<Callback:>;
let cb = unsafe { cast::transmute(cb) };
(IdleWatcher::new(&mut local_loop().loop_, cb), rc)
}
fn sleep(chan: &Chan) -> uint {
let task: ~Task = Local::take();
let task: Box<Task> = Local::take();
task.deschedule(1, |task| {
match *chan.borrow_mut().deref_mut() {
(ref mut slot, _) => {

View File

@ -47,11 +47,11 @@ via `close` and `delete` methods.
#[cfg(test)] extern crate realrustuv = "rustuv";
extern crate libc;
use libc::{c_int, c_void};
use std::cast;
use std::fmt;
use std::io::IoError;
use std::io;
use libc::{c_int, c_void};
use std::ptr::null;
use std::ptr;
use std::rt::local::Local;
@ -124,8 +124,8 @@ pub mod stream;
/// // this code is running inside of a green task powered by libuv
/// }
/// ```
pub fn event_loop() -> ~rtio::EventLoop:Send {
box uvio::UvEventLoop::new() as ~rtio::EventLoop:Send
pub fn event_loop() -> Box<rtio::EventLoop:Send> {
box uvio::UvEventLoop::new() as Box<rtio::EventLoop:Send>
}
/// A type that wraps a uv handle
@ -149,9 +149,9 @@ pub trait UvHandle<T> {
cast::transmute(uvll::get_data_for_uv_handle(*h))
}
fn install(~self) -> ~Self {
fn install(~self) -> Box<Self> {
unsafe {
let myptr = cast::transmute::<&~Self, &*u8>(&self);
let myptr = cast::transmute::<&Box<Self>, &*u8>(&self);
uvll::set_data_for_uv_handle(self.uv_handle(), *myptr);
}
self
@ -242,7 +242,7 @@ fn wait_until_woken_after(slot: *mut Option<BlockedTask>,
let _f = ForbidUnwind::new("wait_until_woken_after");
unsafe {
assert!((*slot).is_none());
let task: ~Task = Local::take();
let task: Box<Task> = Local::take();
loop_.modify_blockers(1);
task.deschedule(1, |task| {
*slot = Some(task);

View File

@ -8,11 +8,11 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use libc::{size_t, ssize_t, c_int, c_void, c_uint};
use libc;
use std::cast;
use std::io::{IoError, IoResult};
use std::io::net::ip;
use libc::{size_t, ssize_t, c_int, c_void, c_uint};
use libc;
use std::mem;
use std::ptr;
use std::rt::rtio;
@ -152,7 +152,7 @@ fn socket_name(sk: SocketNameKind,
pub struct ConnectCtx {
pub status: c_int,
pub task: Option<BlockedTask>,
pub timer: Option<~TimerWatcher>,
pub timer: Option<Box<TimerWatcher>>,
}
pub struct AcceptTimeout {
@ -352,12 +352,12 @@ pub struct TcpListener {
home: HomeHandle,
handle: *uvll::uv_pipe_t,
closing_task: Option<BlockedTask>,
outgoing: Sender<Result<~rtio::RtioTcpStream:Send, IoError>>,
incoming: Receiver<Result<~rtio::RtioTcpStream:Send, IoError>>,
outgoing: Sender<Result<Box<rtio::RtioTcpStream:Send>, IoError>>,
incoming: Receiver<Result<Box<rtio::RtioTcpStream:Send>, IoError>>,
}
pub struct TcpAcceptor {
listener: ~TcpListener,
listener: Box<TcpListener>,
timeout: AcceptTimeout,
}
@ -455,7 +455,7 @@ impl rtio::RtioTcpStream for TcpWatcher {
})
}
fn clone(&self) -> ~rtio::RtioTcpStream:Send {
fn clone(&self) -> Box<rtio::RtioTcpStream:Send> {
box TcpWatcher {
handle: self.handle,
stream: StreamWatcher::new(self.handle),
@ -463,7 +463,7 @@ impl rtio::RtioTcpStream for TcpWatcher {
refcount: self.refcount.clone(),
write_access: self.write_access.clone(),
read_access: self.read_access.clone(),
} as ~rtio::RtioTcpStream:Send
} as Box<rtio::RtioTcpStream:Send>
}
fn close_write(&mut self) -> Result<(), IoError> {
@ -516,7 +516,7 @@ impl Drop for TcpWatcher {
impl TcpListener {
pub fn bind(io: &mut UvIoFactory, address: ip::SocketAddr)
-> Result<~TcpListener, UvError> {
-> Result<Box<TcpListener>, UvError> {
let handle = unsafe { uvll::malloc_handle(uvll::UV_TCP) };
assert_eq!(unsafe {
uvll::uv_tcp_init(io.uv_loop(), handle)
@ -557,7 +557,7 @@ impl rtio::RtioSocket for TcpListener {
}
impl rtio::RtioTcpListener for TcpListener {
fn listen(~self) -> Result<~rtio::RtioTcpAcceptor:Send, IoError> {
fn listen(~self) -> Result<Box<rtio::RtioTcpAcceptor:Send>, IoError> {
// create the acceptor object from ourselves
let mut acceptor = box TcpAcceptor {
listener: self,
@ -567,7 +567,7 @@ impl rtio::RtioTcpListener for TcpListener {
let _m = acceptor.fire_homing_missile();
// FIXME: the 128 backlog should be configurable
match unsafe { uvll::uv_listen(acceptor.listener.handle, 128, listen_cb) } {
0 => Ok(acceptor as ~rtio::RtioTcpAcceptor:Send),
0 => Ok(acceptor as Box<rtio::RtioTcpAcceptor:Send>),
n => Err(uv_error_to_io_error(UvError(n))),
}
}
@ -583,7 +583,7 @@ extern fn listen_cb(server: *uvll::uv_stream_t, status: c_int) {
});
let client = TcpWatcher::new_home(&loop_, tcp.home().clone());
assert_eq!(unsafe { uvll::uv_accept(server, client.handle) }, 0);
Ok(box client as ~rtio::RtioTcpStream:Send)
Ok(box client as Box<rtio::RtioTcpStream:Send>)
}
n => Err(uv_error_to_io_error(UvError(n)))
};
@ -611,7 +611,7 @@ impl rtio::RtioSocket for TcpAcceptor {
}
impl rtio::RtioTcpAcceptor for TcpAcceptor {
fn accept(&mut self) -> Result<~rtio::RtioTcpStream:Send, IoError> {
fn accept(&mut self) -> Result<Box<rtio::RtioTcpStream:Send>, IoError> {
self.timeout.accept(&self.listener.incoming)
}
@ -879,14 +879,14 @@ impl rtio::RtioUdpSocket for UdpWatcher {
})
}
fn clone(&self) -> ~rtio::RtioUdpSocket:Send {
fn clone(&self) -> Box<rtio::RtioUdpSocket:Send> {
box UdpWatcher {
handle: self.handle,
home: self.home.clone(),
refcount: self.refcount.clone(),
write_access: self.write_access.clone(),
read_access: self.read_access.clone(),
} as ~rtio::RtioUdpSocket:Send
} as Box<rtio::RtioUdpSocket:Send>
}
}

View File

@ -8,9 +8,9 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use libc;
use std::c_str::CString;
use std::io::IoError;
use libc;
use std::rt::rtio::{RtioPipe, RtioUnixListener, RtioUnixAcceptor};
use access::Access;
@ -36,12 +36,12 @@ pub struct PipeWatcher {
pub struct PipeListener {
home: HomeHandle,
pipe: *uvll::uv_pipe_t,
outgoing: Sender<Result<~RtioPipe:Send, IoError>>,
incoming: Receiver<Result<~RtioPipe:Send, IoError>>,
outgoing: Sender<Result<Box<RtioPipe:Send>, IoError>>,
incoming: Receiver<Result<Box<RtioPipe:Send>, IoError>>,
}
pub struct PipeAcceptor {
listener: ~PipeListener,
listener: Box<PipeListener>,
timeout: net::AcceptTimeout,
}
@ -121,7 +121,7 @@ impl RtioPipe for PipeWatcher {
self.stream.write(buf).map_err(uv_error_to_io_error)
}
fn clone(&self) -> ~RtioPipe:Send {
fn clone(&self) -> Box<RtioPipe:Send> {
box PipeWatcher {
stream: StreamWatcher::new(self.stream.handle),
defused: false,
@ -129,7 +129,7 @@ impl RtioPipe for PipeWatcher {
refcount: self.refcount.clone(),
read_access: self.read_access.clone(),
write_access: self.write_access.clone(),
} as ~RtioPipe:Send
} as Box<RtioPipe:Send>
}
}
@ -154,7 +154,7 @@ impl Drop for PipeWatcher {
impl PipeListener {
pub fn bind(io: &mut UvIoFactory, name: &CString)
-> Result<~PipeListener, UvError>
-> Result<Box<PipeListener>, UvError>
{
let pipe = PipeWatcher::new(io, false);
match unsafe {
@ -179,7 +179,7 @@ impl PipeListener {
}
impl RtioUnixListener for PipeListener {
fn listen(~self) -> Result<~RtioUnixAcceptor:Send, IoError> {
fn listen(~self) -> Result<Box<RtioUnixAcceptor:Send>, IoError> {
// create the acceptor object from ourselves
let mut acceptor = box PipeAcceptor {
listener: self,
@ -189,7 +189,7 @@ impl RtioUnixListener for PipeListener {
let _m = acceptor.fire_homing_missile();
// FIXME: the 128 backlog should be configurable
match unsafe { uvll::uv_listen(acceptor.listener.pipe, 128, listen_cb) } {
0 => Ok(acceptor as ~RtioUnixAcceptor:Send),
0 => Ok(acceptor as Box<RtioUnixAcceptor:Send>),
n => Err(uv_error_to_io_error(UvError(n))),
}
}
@ -214,7 +214,7 @@ extern fn listen_cb(server: *uvll::uv_stream_t, status: libc::c_int) {
});
let client = PipeWatcher::new_home(&loop_, pipe.home().clone(), false);
assert_eq!(unsafe { uvll::uv_accept(server, client.handle()) }, 0);
Ok(box client as ~RtioPipe:Send)
Ok(box client as Box<RtioPipe:Send>)
}
n => Err(uv_error_to_io_error(UvError(n)))
};
@ -231,7 +231,7 @@ impl Drop for PipeListener {
// PipeAcceptor implementation and traits
impl RtioUnixAcceptor for PipeAcceptor {
fn accept(&mut self) -> Result<~RtioPipe:Send, IoError> {
fn accept(&mut self) -> Result<Box<RtioPipe:Send>, IoError> {
self.timeout.accept(&self.listener.incoming)
}

View File

@ -8,10 +8,10 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::io::IoError;
use std::io::process;
use libc::c_int;
use libc;
use std::io::IoError;
use std::io::process;
use std::ptr;
use std::rt::rtio::RtioProcess;
use std::rt::task::BlockedTask;
@ -40,8 +40,7 @@ impl Process {
/// Returns either the corresponding process object or an error which
/// occurred.
pub fn spawn(io_loop: &mut UvIoFactory, config: process::ProcessConfig)
-> Result<(~Process, ~[Option<PipeWatcher>]), UvError>
{
-> Result<(Box<Process>, ~[Option<PipeWatcher>]), UvError> {
let cwd = config.cwd.map(|s| s.to_c_str());
let mut io = vec![config.stdin, config.stdout, config.stderr];
for slot in config.extra_io.iter() {

View File

@ -20,8 +20,8 @@
#![allow(dead_code)]
use std::cast;
use libc::c_void;
use std::cast;
use std::rt::task::BlockedTask;
use std::unstable::mutex::NativeMutex;
use std::sync::arc::UnsafeArc;
@ -107,7 +107,7 @@ extern fn async_cb(handle: *uvll::uv_async_t) {
}
impl QueuePool {
pub fn new(loop_: &mut Loop) -> ~QueuePool {
pub fn new(loop_: &mut Loop) -> Box<QueuePool> {
let handle = UvHandle::alloc(None::<AsyncWatcher>, uvll::UV_ASYNC);
let state = UnsafeArc::new(State {
handle: handle,

View File

@ -26,8 +26,8 @@ pub struct SignalWatcher {
}
impl SignalWatcher {
pub fn new(io: &mut UvIoFactory, signum: Signum,
channel: Sender<Signum>) -> Result<~SignalWatcher, UvError> {
pub fn new(io: &mut UvIoFactory, signum: Signum, channel: Sender<Signum>)
-> Result<Box<SignalWatcher>, UvError> {
let s = box SignalWatcher {
handle: UvHandle::alloc(None::<SignalWatcher>, uvll::UV_SIGNAL),
home: io.make_handle(),

View File

@ -32,7 +32,7 @@ pub enum NextAction {
}
impl TimerWatcher {
pub fn new(io: &mut UvIoFactory) -> ~TimerWatcher {
pub fn new(io: &mut UvIoFactory) -> Box<TimerWatcher> {
let handle = io.make_handle();
let me = box TimerWatcher::new_home(&io.loop_, handle);
me.install()

View File

@ -93,17 +93,16 @@ impl EventLoop for UvEventLoop {
IdleWatcher::onetime(&mut self.uvio.loop_, f);
}
fn pausable_idle_callback(&mut self, cb: ~rtio::Callback:Send)
-> ~rtio::PausableIdleCallback:Send
{
IdleWatcher::new(&mut self.uvio.loop_,
cb) as ~rtio::PausableIdleCallback:Send
fn pausable_idle_callback(&mut self, cb: Box<rtio::Callback:Send>)
-> Box<rtio::PausableIdleCallback:Send> {
IdleWatcher::new(&mut self.uvio.loop_, cb)
as Box<rtio::PausableIdleCallback:Send>
}
fn remote_callback(&mut self, f: ~rtio::Callback:Send)
-> ~rtio::RemoteCallback:Send
{
box AsyncWatcher::new(&mut self.uvio.loop_, f) as ~rtio::RemoteCallback:Send
fn remote_callback(&mut self, f: Box<rtio::Callback:Send>)
-> Box<rtio::RemoteCallback:Send> {
box AsyncWatcher::new(&mut self.uvio.loop_, f) as
Box<rtio::RemoteCallback:Send>
}
fn io<'a>(&'a mut self) -> Option<&'a mut rtio::IoFactory> {
@ -132,7 +131,7 @@ fn test_callback_run_once() {
pub struct UvIoFactory {
pub loop_: Loop,
handle_pool: Option<~QueuePool>,
handle_pool: Option<Box<QueuePool>>,
}
impl UvIoFactory {
@ -151,30 +150,31 @@ impl IoFactory for UvIoFactory {
// NB: This blocks the task waiting on the connection.
// It would probably be better to return a future
fn tcp_connect(&mut self, addr: SocketAddr, timeout: Option<u64>)
-> Result<~rtio::RtioTcpStream:Send, IoError>
{
-> Result<Box<rtio::RtioTcpStream:Send>, IoError> {
match TcpWatcher::connect(self, addr, timeout) {
Ok(t) => Ok(box t as ~rtio::RtioTcpStream:Send),
Ok(t) => Ok(box t as Box<rtio::RtioTcpStream:Send>),
Err(e) => Err(uv_error_to_io_error(e)),
}
}
fn tcp_bind(&mut self, addr: SocketAddr) -> Result<~rtio::RtioTcpListener:Send, IoError> {
fn tcp_bind(&mut self, addr: SocketAddr)
-> Result<Box<rtio::RtioTcpListener:Send>, IoError> {
match TcpListener::bind(self, addr) {
Ok(t) => Ok(t as ~rtio::RtioTcpListener:Send),
Ok(t) => Ok(t as Box<rtio::RtioTcpListener:Send>),
Err(e) => Err(uv_error_to_io_error(e)),
}
}
fn udp_bind(&mut self, addr: SocketAddr) -> Result<~rtio::RtioUdpSocket:Send, IoError> {
fn udp_bind(&mut self, addr: SocketAddr)
-> Result<Box<rtio::RtioUdpSocket:Send>, IoError> {
match UdpWatcher::bind(self, addr) {
Ok(u) => Ok(box u as ~rtio::RtioUdpSocket:Send),
Ok(u) => Ok(box u as Box<rtio::RtioUdpSocket:Send>),
Err(e) => Err(uv_error_to_io_error(e)),
}
}
fn timer_init(&mut self) -> Result<~rtio::RtioTimer:Send, IoError> {
Ok(TimerWatcher::new(self) as ~rtio::RtioTimer:Send)
fn timer_init(&mut self) -> Result<Box<rtio::RtioTimer:Send>, IoError> {
Ok(TimerWatcher::new(self) as Box<rtio::RtioTimer:Send>)
}
fn get_host_addresses(&mut self, host: Option<&str>, servname: Option<&str>,
@ -183,13 +183,14 @@ impl IoFactory for UvIoFactory {
r.map_err(uv_error_to_io_error)
}
fn fs_from_raw_fd(&mut self, fd: c_int,
close: rtio::CloseBehavior) -> ~rtio::RtioFileStream:Send {
box FileWatcher::new(self, fd, close) as ~rtio::RtioFileStream:Send
fn fs_from_raw_fd(&mut self, fd: c_int, close: rtio::CloseBehavior)
-> Box<rtio::RtioFileStream:Send> {
box FileWatcher::new(self, fd, close) as
Box<rtio::RtioFileStream:Send>
}
fn fs_open(&mut self, path: &CString, fm: FileMode, fa: FileAccess)
-> Result<~rtio::RtioFileStream:Send, IoError> {
-> Result<Box<rtio::RtioFileStream:Send>, IoError> {
let flags = match fm {
io::Open => 0,
io::Append => libc::O_APPEND,
@ -205,7 +206,7 @@ impl IoFactory for UvIoFactory {
};
match FsRequest::open(self, path, flags as int, mode as int) {
Ok(fs) => Ok(box fs as ~rtio::RtioFileStream:Send),
Ok(fs) => Ok(box fs as Box<rtio::RtioFileStream:Send>),
Err(e) => Err(uv_error_to_io_error(e))
}
}
@ -270,12 +271,16 @@ impl IoFactory for UvIoFactory {
}
fn spawn(&mut self, config: ProcessConfig)
-> Result<(~rtio::RtioProcess:Send, ~[Option<~rtio::RtioPipe:Send>]), IoError>
-> Result<(Box<rtio::RtioProcess:Send>,
~[Option<Box<rtio::RtioPipe:Send>>]),
IoError>
{
match Process::spawn(self, config) {
Ok((p, io)) => {
Ok((p as ~rtio::RtioProcess:Send,
io.move_iter().map(|i| i.map(|p| box p as ~rtio::RtioPipe:Send)).collect()))
Ok((p as Box<rtio::RtioProcess:Send>,
io.move_iter().map(|i| i.map(|p| {
box p as Box<rtio::RtioPipe:Send>
})).collect()))
}
Err(e) => Err(uv_error_to_io_error(e)),
}
@ -285,41 +290,42 @@ impl IoFactory for UvIoFactory {
Process::kill(pid, signum).map_err(uv_error_to_io_error)
}
fn unix_bind(&mut self, path: &CString) -> Result<~rtio::RtioUnixListener:Send, IoError>
{
fn unix_bind(&mut self, path: &CString)
-> Result<Box<rtio::RtioUnixListener:Send>, IoError> {
match PipeListener::bind(self, path) {
Ok(p) => Ok(p as ~rtio::RtioUnixListener:Send),
Ok(p) => Ok(p as Box<rtio::RtioUnixListener:Send>),
Err(e) => Err(uv_error_to_io_error(e)),
}
}
fn unix_connect(&mut self, path: &CString,
timeout: Option<u64>) -> Result<~rtio::RtioPipe:Send, IoError> {
fn unix_connect(&mut self, path: &CString, timeout: Option<u64>)
-> Result<Box<rtio::RtioPipe:Send>, IoError> {
match PipeWatcher::connect(self, path, timeout) {
Ok(p) => Ok(box p as ~rtio::RtioPipe:Send),
Ok(p) => Ok(box p as Box<rtio::RtioPipe:Send>),
Err(e) => Err(uv_error_to_io_error(e)),
}
}
fn tty_open(&mut self, fd: c_int, readable: bool)
-> Result<~rtio::RtioTTY:Send, IoError> {
-> Result<Box<rtio::RtioTTY:Send>, IoError> {
match TtyWatcher::new(self, fd, readable) {
Ok(tty) => Ok(box tty as ~rtio::RtioTTY:Send),
Ok(tty) => Ok(box tty as Box<rtio::RtioTTY:Send>),
Err(e) => Err(uv_error_to_io_error(e))
}
}
fn pipe_open(&mut self, fd: c_int) -> Result<~rtio::RtioPipe:Send, IoError> {
fn pipe_open(&mut self, fd: c_int)
-> Result<Box<rtio::RtioPipe:Send>, IoError> {
match PipeWatcher::open(self, fd) {
Ok(s) => Ok(box s as ~rtio::RtioPipe:Send),
Ok(s) => Ok(box s as Box<rtio::RtioPipe:Send>),
Err(e) => Err(uv_error_to_io_error(e))
}
}
fn signal(&mut self, signum: Signum, channel: Sender<Signum>)
-> Result<~rtio::RtioSignal:Send, IoError> {
-> Result<Box<rtio::RtioSignal:Send>, IoError> {
match SignalWatcher::new(self, signum, channel) {
Ok(s) => Ok(s as ~rtio::RtioSignal:Send),
Ok(s) => Ok(s as Box<rtio::RtioSignal:Send>),
Err(e) => Err(uv_error_to_io_error(e)),
}
}

View File

@ -113,7 +113,7 @@ pub struct MyStruct {
impl ToJson for MyStruct {
fn to_json( &self ) -> json::Json {
let mut d = ~TreeMap::new();
let mut d = box TreeMap::new();
d.insert("attr1".to_owned(), self.attr1.to_json());
d.insert("attr2".to_owned(), self.attr2.to_json());
json::Object(d)
@ -206,7 +206,7 @@ pub struct TestStruct1 {
impl ToJson for TestStruct1 {
fn to_json( &self ) -> json::Json {
let mut d = ~TreeMap::new();
let mut d = box TreeMap::new();
d.insert("data_int".to_owned(), self.data_int.to_json());
d.insert("data_str".to_owned(), self.data_str.to_json());
d.insert("data_vector".to_owned(), self.data_vector.to_json());
@ -232,21 +232,20 @@ fn main() {
*/
use collections::HashMap;
use std::char;
use std::f64;
use std::fmt;
use std::io::MemWriter;
use std::io;
use std::num;
use std::str;
use std::str::ScalarValue;
use std::strbuf::StrBuf;
use std::fmt;
use std::vec::Vec;
use std::mem::swap;
use std::num;
use std::str::ScalarValue;
use std::str;
use std::strbuf::StrBuf;
use std::vec::Vec;
use Encodable;
use collections::TreeMap;
use collections::{HashMap, TreeMap};
/// Represents a json value
#[deriving(Clone, Eq)]
@ -255,7 +254,7 @@ pub enum Json {
String(~str),
Boolean(bool),
List(List),
Object(~Object),
Object(Box<Object>),
Null,
}

View File

@ -391,14 +391,14 @@ impl<'a, E, S:Encoder<E>,T:Encodable<S, E>> Encodable<S, E> for &'a T {
}
}
impl<E, S:Encoder<E>,T:Encodable<S, E>> Encodable<S, E> for ~T {
impl<E, S:Encoder<E>,T:Encodable<S, E>> Encodable<S, E> for Box<T> {
fn encode(&self, s: &mut S) -> Result<(), E> {
(**self).encode(s)
}
}
impl<E, D:Decoder<E>,T:Decodable<D, E>> Decodable<D, E> for ~T {
fn decode(d: &mut D) -> Result<~T, E> {
impl<E, D:Decoder<E>,T:Decodable<D, E>> Decodable<D, E> for Box<T> {
fn decode(d: &mut D) -> Result<Box<T>, E> {
Ok(box try!(Decodable::decode(d)))
}
}

View File

@ -17,12 +17,13 @@
//! As `&Any` (a borrowed trait object), it has the `is` and `as_ref` methods, to test if the
//! contained value is of a given type, and to get a reference to the inner value as a type. As
//! `&mut Any`, there is also the `as_mut` method, for getting a mutable reference to the inner
//! value. `~Any` adds the `move` method, which will unwrap a `~T` from the object. See the
//! extension traits (`*Ext`) for the full details.
//! value. `Box<Any>` adds the `move` method, which will unwrap a `Box<T>` from the object. See
//! the extension traits (`*Ext`) for the full details.
use cast::{transmute, transmute_copy};
use fmt;
use option::{Option, Some, None};
use owned::Box;
use raw::TraitObject;
use result::{Result, Ok, Err};
use intrinsics::TypeId;
@ -121,12 +122,12 @@ impl<'a> AnyMutRefExt<'a> for &'a mut Any {
pub trait AnyOwnExt {
/// Returns the boxed value if it is of type `T`, or
/// `Err(Self)` if it isn't.
fn move<T: 'static>(self) -> Result<~T, Self>;
fn move<T: 'static>(self) -> Result<Box<T>, Self>;
}
impl AnyOwnExt for ~Any {
impl AnyOwnExt for Box<Any> {
#[inline]
fn move<T: 'static>(self) -> Result<~T, ~Any> {
fn move<T: 'static>(self) -> Result<Box<T>, Box<Any>> {
if self.is::<T>() {
unsafe {
// Get the raw representation of the trait object
@ -148,9 +149,9 @@ impl AnyOwnExt for ~Any {
// Trait implementations
///////////////////////////////////////////////////////////////////////////////
impl fmt::Show for ~Any {
impl fmt::Show for Box<Any> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("~Any")
f.pad("Box<Any>")
}
}
@ -164,6 +165,7 @@ impl<'a> fmt::Show for &'a Any {
mod tests {
use prelude::*;
use super::*;
use owned::Box;
use str::StrSlice;
#[deriving(Eq, Show)]
@ -190,7 +192,7 @@ mod tests {
#[test]
fn any_owning() {
let (a, b, c) = (box 5u as ~Any, box TEST as ~Any, box Test as ~Any);
let (a, b, c) = (box 5u as Box<Any>, box TEST as Box<Any>, box Test as Box<Any>);
assert!(a.is::<uint>());
assert!(!b.is::<uint>());
@ -268,8 +270,8 @@ mod tests {
#[test]
fn any_move() {
let a = box 8u as ~Any;
let b = box Test as ~Any;
let a = box 8u as Box<Any>;
let b = box Test as Box<Any>;
match a.move::<uint>() {
Ok(a) => { assert_eq!(a, box 8u); }
@ -280,19 +282,19 @@ mod tests {
Err(..) => fail!()
}
let a = box 8u as ~Any;
let b = box Test as ~Any;
let a = box 8u as Box<Any>;
let b = box Test as Box<Any>;
assert!(a.move::<~Test>().is_err());
assert!(b.move::<~uint>().is_err());
assert!(a.move::<Box<Test>>().is_err());
assert!(b.move::<Box<uint>>().is_err());
}
#[test]
fn test_show() {
let a = box 8u as ~Any;
let b = box Test as ~Any;
assert_eq!(format!("{}", a), "~Any".to_owned());
assert_eq!(format!("{}", b), "~Any".to_owned());
let a = box 8u as Box<Any>;
let b = box Test as Box<Any>;
assert_eq!(format!("{}", a), "Box<Any>".to_owned());
assert_eq!(format!("{}", b), "Box<Any>".to_owned());
let a = &8u as &Any;
let b = &Test as &Any;

View File

@ -21,6 +21,8 @@ the `clone` method.
*/
use owned::Box;
/// A common trait for cloning an object.
pub trait Clone {
/// Returns a copy of the value. The contents of owned pointers
@ -39,14 +41,14 @@ pub trait Clone {
}
}
impl<T: Clone> Clone for ~T {
impl<T: Clone> Clone for Box<T> {
/// Return a copy of the owned box.
#[inline]
fn clone(&self) -> ~T { box {(**self).clone()} }
fn clone(&self) -> Box<T> { box {(**self).clone()} }
/// Perform copy-assignment from `source` by reusing the existing allocation.
#[inline]
fn clone_from(&mut self, source: &~T) {
fn clone_from(&mut self, source: &Box<T>) {
(**self).clone_from(&(**source));
}
}
@ -127,7 +129,7 @@ extern_fn_clone!(A, B, C, D, E, F, G, H)
#[test]
fn test_owned_clone() {
let a = box 5i;
let b: ~int = a.clone();
let b: Box<int> = a.clone();
assert_eq!(a, b);
}

View File

@ -279,6 +279,7 @@ use kinds::marker;
use mem;
use ops::Drop;
use option::{Some, None, Option};
use owned::Box;
use result::{Ok, Err, Result};
use rt::local::Local;
use rt::task::{Task, BlockedTask};
@ -297,6 +298,7 @@ macro_rules! test (
use prelude::*;
use super::*;
use super::super::*;
use owned::Box;
use task;
fn f() $b
@ -549,7 +551,7 @@ impl<T: Send> Sender<T> {
let cnt = self.sends.get() + 1;
self.sends.set(cnt);
if cnt % (RESCHED_FREQ as uint) == 0 {
let task: Option<~Task> = Local::try_take();
let task: Option<Box<Task>> = Local::try_take();
task.map(|t| t.maybe_yield());
}
@ -773,7 +775,7 @@ impl<T: Send> Receiver<T> {
let cnt = self.receives.get() + 1;
self.receives.set(cnt);
if cnt % (RESCHED_FREQ as uint) == 0 {
let task: Option<~Task> = Local::try_take();
let task: Option<Box<Task>> = Local::try_take();
task.map(|t| t.maybe_yield());
}
@ -979,6 +981,7 @@ mod test {
use native;
use os;
use owned::Box;
use super::*;
pub fn stress_factor() -> uint {
@ -1197,7 +1200,7 @@ mod test {
test!(fn oneshot_single_thread_send_port_close() {
// Testing that the sender cleans up the payload if receiver is closed
let (tx, rx) = channel::<~int>();
let (tx, rx) = channel::<Box<int>>();
drop(rx);
tx.send(box 0);
} #[should_fail])
@ -1214,7 +1217,7 @@ mod test {
})
test!(fn oneshot_single_thread_send_then_recv() {
let (tx, rx) = channel::<~int>();
let (tx, rx) = channel::<Box<int>>();
tx.send(box 10);
assert!(rx.recv() == box 10);
})
@ -1263,7 +1266,7 @@ mod test {
})
test!(fn oneshot_multi_task_recv_then_send() {
let (tx, rx) = channel::<~int>();
let (tx, rx) = channel::<Box<int>>();
spawn(proc() {
assert!(rx.recv() == box 10);
});
@ -1272,7 +1275,7 @@ mod test {
})
test!(fn oneshot_multi_task_recv_then_close() {
let (tx, rx) = channel::<~int>();
let (tx, rx) = channel::<Box<int>>();
spawn(proc() {
drop(tx);
});
@ -1340,7 +1343,7 @@ mod test {
send(tx, 0);
recv(rx, 0);
fn send(tx: Sender<~int>, i: int) {
fn send(tx: Sender<Box<int>>, i: int) {
if i == 10 { return }
spawn(proc() {
@ -1349,7 +1352,7 @@ mod test {
});
}
fn recv(rx: Receiver<~int>, i: int) {
fn recv(rx: Receiver<Box<int>>, i: int) {
if i == 10 { return }
spawn(proc() {
@ -1513,6 +1516,7 @@ mod test {
mod sync_tests {
use prelude::*;
use os;
use owned::Box;
pub fn stress_factor() -> uint {
match os::getenv("RUST_TEST_STRESS") {
@ -1657,7 +1661,7 @@ mod sync_tests {
test!(fn oneshot_single_thread_send_port_close() {
// Testing that the sender cleans up the payload if receiver is closed
let (tx, rx) = sync_channel::<~int>(0);
let (tx, rx) = sync_channel::<Box<int>>(0);
drop(rx);
tx.send(box 0);
} #[should_fail])
@ -1674,7 +1678,7 @@ mod sync_tests {
})
test!(fn oneshot_single_thread_send_then_recv() {
let (tx, rx) = sync_channel::<~int>(1);
let (tx, rx) = sync_channel::<Box<int>>(1);
tx.send(box 10);
assert!(rx.recv() == box 10);
})
@ -1728,7 +1732,7 @@ mod sync_tests {
})
test!(fn oneshot_multi_task_recv_then_send() {
let (tx, rx) = sync_channel::<~int>(0);
let (tx, rx) = sync_channel::<Box<int>>(0);
spawn(proc() {
assert!(rx.recv() == box 10);
});
@ -1737,7 +1741,7 @@ mod sync_tests {
})
test!(fn oneshot_multi_task_recv_then_close() {
let (tx, rx) = sync_channel::<~int>(0);
let (tx, rx) = sync_channel::<Box<int>>(0);
spawn(proc() {
drop(tx);
});
@ -1805,7 +1809,7 @@ mod sync_tests {
send(tx, 0);
recv(rx, 0);
fn send(tx: SyncSender<~int>, i: int) {
fn send(tx: SyncSender<Box<int>>, i: int) {
if i == 10 { return }
spawn(proc() {
@ -1814,7 +1818,7 @@ mod sync_tests {
});
}
fn recv(rx: Receiver<~int>, i: int) {
fn recv(rx: Receiver<Box<int>>, i: int) {
if i == 10 { return }
spawn(proc() {

View File

@ -37,6 +37,7 @@ use kinds::Send;
use mem;
use ops::Drop;
use option::{Some, None, Option};
use owned::Box;
use result::{Result, Ok, Err};
use rt::local::Local;
use rt::task::{Task, BlockedTask};
@ -137,7 +138,7 @@ impl<T: Send> Packet<T> {
// Attempt to not block the task (it's a little expensive). If it looks
// like we're not empty, then immediately go through to `try_recv`.
if self.state.load(atomics::SeqCst) == EMPTY {
let t: ~Task = Local::take();
let t: Box<Task> = Local::take();
t.deschedule(1, |task| {
let n = unsafe { task.cast_to_uint() };
match self.state.compare_and_swap(EMPTY, n, atomics::SeqCst) {

View File

@ -52,6 +52,7 @@ use kinds::marker;
use kinds::Send;
use ops::Drop;
use option::{Some, None, Option};
use owned::Box;
use ptr::RawPtr;
use result::{Ok, Err, Result};
use rt::local::Local;
@ -176,7 +177,7 @@ impl Select {
// Acquire a number of blocking contexts, and block on each one
// sequentially until one fails. If one fails, then abort
// immediately so we can go unblock on all the other receivers.
let task: ~Task = Local::take();
let task: Box<Task> = Local::take();
task.deschedule(amt, |task| {
// Prepare for the block
let (i, handle) = iter.next().unwrap();

View File

@ -24,6 +24,7 @@ use iter::Iterator;
use kinds::Send;
use ops::Drop;
use option::{Some, None, Option};
use owned::Box;
use result::{Ok, Err, Result};
use rt::local::Local;
use rt::task::{Task, BlockedTask};
@ -223,7 +224,7 @@ impl<T: Send> Packet<T> {
data => return data,
}
let task: ~Task = Local::take();
let task: Box<Task> = Local::take();
task.deschedule(1, |task| {
self.decrement(task)
});

View File

@ -24,6 +24,7 @@ use iter::Iterator;
use kinds::Send;
use ops::Drop;
use option::{Some, None};
use owned::Box;
use result::{Ok, Err, Result};
use rt::local::Local;
use rt::task::{Task, BlockedTask};
@ -181,7 +182,7 @@ impl<T: Send> Packet<T> {
// Welp, our channel has no data. Deschedule the current task and
// initiate the blocking protocol.
let task: ~Task = Local::take();
let task: Box<Task> = Local::take();
task.deschedule(1, |task| {
self.decrement(task)
});

View File

@ -40,6 +40,7 @@ use kinds::Send;
use mem;
use ops::Drop;
use option::{Some, None, Option};
use owned::Box;
use ptr::RawPtr;
use result::{Result, Ok, Err};
use rt::local::Local;
@ -111,7 +112,7 @@ pub enum Failure {
/// in the meantime. This re-locks the mutex upon returning.
fn wait(slot: &mut Blocker, f: fn(BlockedTask) -> Blocker,
lock: &NativeMutex) {
let me: ~Task = Local::take();
let me: Box<Task> = Local::take();
me.deschedule(1, |task| {
match mem::replace(slot, f(task)) {
NoneBlocked => {}
@ -445,7 +446,7 @@ impl<T> Buffer<T> {
impl Queue {
fn enqueue(&mut self, lock: &NativeMutex) {
let task: ~Task = Local::take();
let task: Box<Task> = Local::take();
let mut node = Node {
task: None,
next: 0 as *mut Node,

View File

@ -10,6 +10,8 @@
//! The `Default` trait for types which may have meaningful default values
use owned::Box;
/// A trait that types which have a useful default value should implement.
pub trait Default {
/// Return the "default value" for a type.
@ -20,6 +22,6 @@ impl<T: Default + 'static> Default for @T {
fn default() -> @T { @Default::default() }
}
impl<T: Default> Default for ~T {
fn default() -> ~T { box Default::default() }
impl<T: Default> Default for Box<T> {
fn default() -> Box<T> { box Default::default() }
}

View File

@ -492,6 +492,7 @@ use io;
use iter::{Iterator, range};
use num::Signed;
use option::{Option,Some,None};
use owned::Box;
use repr;
use result::{Ok, Err};
use str::StrSlice;
@ -1113,7 +1114,7 @@ pub fn argumentuint<'a>(s: &'a uint) -> Argument<'a> {
impl<T: Show> Show for @T {
fn fmt(&self, f: &mut Formatter) -> Result { secret_show(&**self, f) }
}
impl<T: Show> Show for ~T {
impl<T: Show> Show for Box<T> {
fn fmt(&self, f: &mut Formatter) -> Result { secret_show(&**self, f) }
}
impl<'a, T: Show> Show for &'a T {

View File

@ -17,6 +17,7 @@
use prelude::*;
use char;
use owned::Box;
use str;
/// A piece is a portion of the format string which represents the next part
@ -41,7 +42,7 @@ pub struct Argument<'a> {
/// How to format the argument
pub format: FormatSpec<'a>,
/// If not `None`, what method to invoke on the argument
pub method: Option<~Method<'a>>
pub method: Option<Box<Method<'a>>>
}
/// Specification for the formatting of an argument in the format string.
@ -435,7 +436,7 @@ impl<'a> Parser<'a> {
/// Parses a method to be applied to the previously specified argument and
/// its format. The two current supported methods are 'plural' and 'select'
fn method(&mut self) -> Option<~Method<'a>> {
fn method(&mut self) -> Option<Box<Method<'a>>> {
if !self.wsconsume(',') {
return None;
}
@ -461,7 +462,7 @@ impl<'a> Parser<'a> {
}
/// Parses a 'select' statement (after the initial 'select' word)
fn select(&mut self) -> ~Method<'a> {
fn select(&mut self) -> Box<Method<'a>> {
let mut other = None;
let mut arms = vec!();
// Consume arms one at a time
@ -503,7 +504,7 @@ impl<'a> Parser<'a> {
}
/// Parses a 'plural' statement (after the initial 'plural' word)
fn plural(&mut self) -> ~Method<'a> {
fn plural(&mut self) -> Box<Method<'a>> {
let mut offset = None;
let mut other = None;
let mut arms = vec!();

View File

@ -67,6 +67,7 @@ use container::Container;
use io::Writer;
use iter::Iterator;
use option::{Option, Some, None};
use owned::Box;
use rc::Rc;
use str::{Str, StrSlice};
use slice::{Vector, ImmutableVector};
@ -229,7 +230,7 @@ impl<'a, S: Writer, T: Hash<S>> Hash<S> for &'a mut T {
}
}
impl<S: Writer, T: Hash<S>> Hash<S> for ~T {
impl<S: Writer, T: Hash<S>> Hash<S> for Box<T> {
#[inline]
fn hash(&self, state: &mut S) {
(**self).hash(state);

View File

@ -55,11 +55,12 @@ use container::Container;
use iter::Iterator;
use kinds::Send;
use super::{Reader, Writer, Seek};
use super::{SeekStyle, Read, Write, Open, IoError, Truncate,
FileMode, FileAccess, FileStat, IoResult, FilePermission};
use super::{SeekStyle, Read, Write, Open, IoError, Truncate};
use super::{FileMode, FileAccess, FileStat, IoResult, FilePermission};
use rt::rtio::{RtioFileStream, IoFactory, LocalIo};
use io;
use option::{Some, None, Option};
use owned::Box;
use result::{Ok, Err};
use path;
use path::{Path, GenericPath};
@ -78,7 +79,7 @@ use vec::Vec;
/// configured at creation time, via the `FileAccess` parameter to
/// `File::open_mode()`.
pub struct File {
fd: ~RtioFileStream:Send,
fd: Box<RtioFileStream:Send>,
path: Path,
last_nread: int,
}

Some files were not shown because too many files have changed in this diff Show More