Addressing review comments

* Renamed the example
* Reworded comments
* Moved it to after worker pools
* Sleep for a second instead of random
* Mention the new sample in worker pools
This commit is contained in:
Eli Bendersky
2019-05-30 05:28:29 -07:00
parent 6ab81bdf71
commit 74ca2a7b0f
10 changed files with 48 additions and 46 deletions

View File

@@ -1,27 +1,22 @@
// To wait for multiple goroutines to finish, we can
// use a sync.WaitGroup.
// use a *wait group*.
package main
import (
"fmt"
"math/rand"
"sync"
"time"
)
// This is the function we'll run in every goroutine.
// wg is the WaitGroup it uses to notify that it's done.
// Note that a WaitGroup must be passed to functions by
// pointer.
func worker(id int, wg *sync.WaitGroup) {
fmt.Printf("Worker %d starting\n", id)
// Sleep for a random duration between 500-700 ms
// to simulate work. See the [random numbers](random-numbers)
// example for more details on *rand*.
msToSleep := time.Duration(500 + rand.Intn(200))
time.Sleep(msToSleep * time.Millisecond)
// Sleep to simulate an expensive task.
time.Sleep(time.Second)
fmt.Printf("Worker %d done\n", id)
// Notify the WaitGroup that we're done.

View File

@@ -0,0 +1,2 @@
ffc6520e73ebfa2b8c470e3ef00fee55388234e0
8cD2V9CgI0J

View File

@@ -1,4 +1,4 @@
$ go run waiting-for-goroutines-to-finish.go
$ go run waitgroup.go
Worker 5 starting
Worker 3 starting
Worker 4 starting

View File

@@ -1,2 +0,0 @@
f068072d11ed9469174c18f5b7a6a7d9d8d3dafb
koKzXfbq8kg

View File

@@ -12,37 +12,40 @@ import "time"
// results on `results`. We'll sleep a second per job to
// simulate an expensive task.
func worker(id int, jobs <-chan int, results chan<- int) {
for j := range jobs {
fmt.Println("worker", id, "started job", j)
time.Sleep(time.Second)
fmt.Println("worker", id, "finished job", j)
results <- j * 2
}
for j := range jobs {
fmt.Println("worker", id, "started job", j)
time.Sleep(time.Second)
fmt.Println("worker", id, "finished job", j)
results <- j * 2
}
}
func main() {
// In order to use our pool of workers we need to send
// them work and collect their results. We make 2
// channels for this.
jobs := make(chan int, 100)
results := make(chan int, 100)
// In order to use our pool of workers we need to send
// them work and collect their results. We make 2
// channels for this.
jobs := make(chan int, 100)
results := make(chan int, 100)
// This starts up 3 workers, initially blocked
// because there are no jobs yet.
for w := 1; w <= 3; w++ {
go worker(w, jobs, results)
}
// This starts up 3 workers, initially blocked
// because there are no jobs yet.
for w := 1; w <= 3; w++ {
go worker(w, jobs, results)
}
// Here we send 5 `jobs` and then `close` that
// channel to indicate that's all the work we have.
for j := 1; j <= 5; j++ {
jobs <- j
}
close(jobs)
// Here we send 5 `jobs` and then `close` that
// channel to indicate that's all the work we have.
for j := 1; j <= 5; j++ {
jobs <- j
}
close(jobs)
// Finally we collect all the results of the work.
for a := 1; a <= 5; a++ {
<-results
}
// Finally we collect all the results of the work.
// This also ensures that the worker goroutines have
// finished. An alternative way to wait for multiple
// goroutines is to use a [WaitGroup](waitgroup).
for a := 1; a <= 5; a++ {
<-results
}
}

View File

@@ -1,2 +1,2 @@
1f9acf1e50be05cad73e6b085ed3294892c67d42
RTRcHA05vV
bfd2824b3840ff67fa9a0218c7be66647b4bf3d9
IQestAFcxLh