Switch to calculating statistics only when a request count is specified

This commit is contained in:
Panayiotis Pastos 2018-02-21 15:52:31 +00:00 committed by Benjamin J Cane
parent a16f6402bf
commit a191eada28
5 changed files with 41 additions and 41 deletions

View File

@ -94,7 +94,7 @@ Number of 5xx responses: 0
* Testing REST endpoints with dynamically generated keys
## Caveats
* The `min`,`max`,`avg` stats are only accurate when a request count is specified. When in timed mode only the first `100,000` requests are tracked
* Statistics are only provided when a fixed number of requests is provided (instead of providing a duration).
## Dependency Management
[Dep](https://github.com/golang/dep) is currently being utilized as the dependency manager for Baton.

View File

@ -157,6 +157,7 @@ func processResults(baton *Baton, preparedRunConfiguration runConfiguration) {
timeSum += result.timeSum
requestCount += result.totalSuccess
}
baton.result.hasStats = baton.configuration.duration == 0
baton.result.averageTime = float32(timeSum) / float32(requestCount)
baton.result.totalRequests = baton.result.httpResult.total()
baton.result.requestsPerSecond = int(float64(baton.result.totalRequests)/baton.result.timeTaken.Seconds() + 0.5)

View File

@ -13,7 +13,7 @@
* or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package main
import (
@ -23,11 +23,13 @@ import (
// CountWorker implements a worker which sends a fixed number of requests
type countWorker struct {
*worker
timings chan int
}
func newCountWorker(requests <-chan bool, results chan<- HTTPResult, done chan<- bool) *countWorker {
worker := newWorker(requests, results, done)
return &countWorker{worker}
timings := make(chan int, len(requests))
return &countWorker{worker, timings}
}
func (worker *countWorker) sendRequest(request preLoadedRequest) {
@ -38,9 +40,10 @@ func (worker *countWorker) sendRequest(request preLoadedRequest) {
resp := fasthttp.AcquireResponse()
for range worker.requests {
worker.performRequest(req, resp)
worker.performRequestWithStats(req, resp, worker.timings)
}
worker.collectStatistics(worker.timings)
worker.finish()
}
func (worker *countWorker) sendRequests(requests []preLoadedRequest) {
@ -48,8 +51,9 @@ func (worker *countWorker) sendRequests(requests []preLoadedRequest) {
for range worker.requests {
req, resp := buildRequest(requests, totalPremadeRequests)
worker.performRequest(req, resp)
worker.performRequestWithStats(req, resp, worker.timings)
}
worker.collectStatistics(worker.timings)
worker.finish()
}

View File

@ -27,13 +27,14 @@ type Result struct {
totalRequests int
timeTaken time.Duration
requestsPerSecond int
hasStats bool
averageTime float32
minTime int
maxTime int
}
func newResult() *Result {
return &Result{*newHTTPResult(), 0, 0, 0, 0, 0, 0}
return &Result{*newHTTPResult(), 0, 0, 0, false, 0, 0, 0}
}
func (result *Result) printResults() {
@ -43,9 +44,11 @@ func (result *Result) printResults() {
fmt.Printf("Total requests: %10d\n", result.totalRequests)
fmt.Printf("Time taken to complete requests: %15s\n", result.timeTaken.String())
fmt.Printf("Requests per second: %10d\n", result.requestsPerSecond)
fmt.Printf("Max response time (ms): %10d\n", result.maxTime)
fmt.Printf("Min response time (ms): %10d\n", result.minTime)
fmt.Printf("Avg response time (ms): %6.2f\n", result.averageTime)
if result.hasStats {
fmt.Printf("Max response time (ms): %10d\n", result.maxTime)
fmt.Printf("Min response time (ms): %10d\n", result.minTime)
fmt.Printf("Avg response time (ms): %6.2f\n", result.averageTime)
}
fmt.Printf("===================== Breakdown =====================\n")
fmt.Printf("Number of connection errors: %10d\n", result.httpResult.connectionErrorCount)
fmt.Printf("Number of 1xx responses: %10d\n", result.httpResult.status1xxCount)

View File

@ -22,15 +22,12 @@ import (
"time"
)
const TimedBufferSize = 100000
type worker struct {
httpResult HTTPResult
client *fasthttp.Client
requests <-chan bool
httpResults chan<- HTTPResult
done chan<- bool
timings chan int
}
type workable interface {
@ -44,39 +41,20 @@ func (worker *worker) setCustomClient(client *fasthttp.Client) {
}
func newWorker(requests <-chan bool, httpResults chan<- HTTPResult, done chan<- bool) *worker {
requestCount := len(requests)
// If this is running in timed mode we cannot predict the number of requests being
// sent, to allocate a large enough channel. As a compromise we fill in the
// buffered channel up to a fixed size and ignore the rest
if requestCount <= 1 {
requestCount = TimedBufferSize
}
timings := make(chan int, requestCount)
return &worker{*newHTTPResult(), &fasthttp.Client{}, requests, httpResults, done, timings}
return &worker{*newHTTPResult(), &fasthttp.Client{}, requests, httpResults, done}
}
func (worker *worker) performRequest(req *fasthttp.Request, resp *fasthttp.Response) bool {
timeNow := time.Now().UnixNano()
if err := worker.client.Do(req, resp); err != nil {
worker.httpResult.connectionErrorCount++
return true
}
timeAfter := time.Now().UnixNano()
i := int(timeAfter - timeNow)
// The select is needed to avoid blocking the thread
// if the channel is full
select {
case worker.timings <- i:
// Send the timing via the channel in non-blocking mode
default:
// If the channel is full (which it will in case of timed mode) then
// just proceed
break
}
status := resp.StatusCode()
worker.recordCount(status)
return false
}
func (worker *worker) recordCount(status int) {
if status >= 100 && status < 200 {
worker.httpResult.status1xxCount++
} else if status >= 200 && status < 300 {
@ -88,6 +66,21 @@ func (worker *worker) performRequest(req *fasthttp.Request, resp *fasthttp.Respo
} else if status >= 500 && status < 600 {
worker.httpResult.status5xxCount++
}
}
func (worker *worker) performRequestWithStats(req *fasthttp.Request, resp *fasthttp.Response, timings chan int) bool {
timeNow := time.Now().UnixNano()
if err := worker.client.Do(req, resp); err != nil {
worker.httpResult.connectionErrorCount++
return true
}
timeAfter := time.Now().UnixNano()
i := int(timeAfter - timeNow)
// Record the timing into a channel
timings <- i
status := resp.StatusCode()
worker.recordCount(status)
return false
}
@ -108,18 +101,17 @@ func buildRequest(requests []preLoadedRequest, totalPremadeRequests int) (*fasth
}
func (worker *worker) finish() {
worker.collectStatistics()
worker.httpResults <- worker.httpResult
worker.done <- true
}
func (worker *worker) collectStatistics() {
close(worker.timings)
func (worker *worker) collectStatistics(timings chan int) {
close(timings)
first := true
sum, total := int64(0), 0
for timing := range worker.timings {
for timing := range timings {
timing = timing / 1000
// The first request is associated with overhead
// in setting up the client so we ignore it's result