publish worker-pools

This commit is contained in:
Mark McGranaghan 2012-10-18 19:57:28 -04:00
parent dc35e6c751
commit bf0de16c9a
4 changed files with 56 additions and 48 deletions

View File

@ -34,11 +34,10 @@ Timeouts
Non-Blocking Channel Operations
Closing Channels
Range over Channels
# Scatter Gather
# Rate Limiting
# Worker Pools
Timers
Tickers
Worker Pools
# State Goroutine
# State Mutex
Sorting

View File

@ -1,24 +0,0 @@
package main
import "sync"
import "time"
import "math/rand"
import "fmt"
func main() {
times := new([20]int)
wait := new(sync.WaitGroup)
for i := 0; i < 20; i++ {
n := i
wait.Add(1)
go func() {
opTime := time.Duration(rand.Intn(2000))
time.Sleep(opTime * time.Millisecond)
fmt.Println(n)
times[n] = opTime
wait.Done()
}()
}
wait.Wait()
fmt.Println(*times)
}

View File

@ -1,29 +1,46 @@
// In this example we'll look at how to implement
// a _worker pool_ using goroutines and channels.
package main
import "time"
func main() {
jobs := make(chan int, 100)
acks := make(chan bool, 100)
for w := 0; w < 10; w++ {
go func() {
for j := range jobs {
println("worker", w, "processing job", j)
time.Sleep(time.Millisecond * 150)
acks <- true
}
}()
// Here's the worker, of which we'll run several
// concurrent instances. These workers will receive
// work on the `jobs` channel and send the corresponding
// results on `results`. We'll sleep a second per job to
// simulate an expensive task.
func worker(id int, jobs <-chan int, results chan<- int) {
for j := range jobs {
println("worker", id, "processing job", j)
time.Sleep(time.Second)
results <- j * 2
}
for j := 0; j < 100; j++ {
jobs <- j
}
for a := 0; a < 100; a++ {
<-acks
}
println("all done")
}
// todo: broken
func main() {
// In order to use our pool of workers we need to send
// them work and collect their results. We make 2
// channels for this.
jobs := make(chan int, 100)
results := make(chan int, 100)
// This starts up 3 workers, initially blocked
// because there are no jobs yet.
for w := 1; w <= 3; w++ {
go worker(w, jobs, results)
}
// Here we send 9 `jobs` and then `close` that
// channel to indicate that's all the work we have.
for j := 1; j <= 9; j++ {
jobs <- j
}
close(jobs)
// Finally we collect all the results of the work.
for a := 1; a <= 9; a++ {
<-results
}
}

View File

@ -0,0 +1,16 @@
# Our running program shows the 9 jobs being executed by
# various workers. The program only takes about 3 seconds
# despite doing about 9 seconds of total work because
# there are 3 workers operating concurrently.
$ time go run worker-pools.go
worker 1 processing job 1
worker 2 processing job 2
worker 3 processing job 3
worker 1 processing job 4
worker 2 processing job 5
worker 3 processing job 6
worker 1 processing job 7
worker 2 processing job 8
worker 3 processing job 9
real 0m3.149s