Track of min/max/avg time taken per request

This works best when a request count is provided,
if not the statistics are based on the first
`n` requests where is `n` is a predefined constant
This commit is contained in:
Panayiotis Pastos
2018-02-20 15:57:29 +00:00
parent dd97c7ec82
commit 7f7fdb8138
4 changed files with 92 additions and 9 deletions

View File

@@ -13,7 +13,7 @@
* or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package main
import (
@@ -81,7 +81,7 @@ func main() {
*wait,
}
baton := &Baton{configuration: configuration, result: Result{}}
baton := &Baton{configuration: configuration, result: *newResult()}
baton.run()
baton.result.printResults()
@@ -134,6 +134,12 @@ func (baton *Baton) run() {
log.Println("Finished sending the requests")
log.Println("Processing the results...")
processResults(baton, preparedRunConfiguration)
}
func processResults(baton *Baton, preparedRunConfiguration runConfiguration) {
timeSum := int64(0)
requestCount := 0
for a := 1; a <= baton.configuration.concurrency; a++ {
result := <-preparedRunConfiguration.results
baton.result.httpResult.connectionErrorCount += result.connectionErrorCount
@@ -142,10 +148,17 @@ func (baton *Baton) run() {
baton.result.httpResult.status3xxCount += result.status3xxCount
baton.result.httpResult.status4xxCount += result.status4xxCount
baton.result.httpResult.status5xxCount += result.status5xxCount
if result.minTime < baton.result.httpResult.minTime {
baton.result.minTime = result.minTime
}
if result.maxTime > baton.result.httpResult.maxTime {
baton.result.maxTime = result.maxTime
}
timeSum += result.timeSum
requestCount += result.totalSuccess
}
baton.result.averageTime = float32(timeSum) / float32(requestCount)
baton.result.totalRequests = baton.result.httpResult.total()
baton.result.requestsPerSecond = int(float64(baton.result.totalRequests)/baton.result.timeTaken.Seconds() + 0.5)
}

View File

@@ -13,9 +13,11 @@
* or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package main
import "math"
// HTTPResult contains counters for the responses to the HTTP requests
type HTTPResult struct {
connectionErrorCount int
@@ -24,6 +26,14 @@ type HTTPResult struct {
status3xxCount int
status4xxCount int
status5xxCount int
maxTime int
minTime int
timeSum int64
totalSuccess int
}
func newHTTPResult() *HTTPResult {
return &HTTPResult{0, 0, 0, 0, 0, 0, 0, math.MaxInt64, 0, 0}
}
func (httpResult HTTPResult) total() int {

View File

@@ -13,7 +13,7 @@
* or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package main
import (
@@ -27,6 +27,13 @@ type Result struct {
totalRequests int
timeTaken time.Duration
requestsPerSecond int
averageTime float32
minTime int
maxTime int
}
func newResult() *Result {
return &Result{*newHTTPResult(), 0, 0, 0, 0, 0, 0}
}
func (result *Result) printResults() {
@@ -36,6 +43,9 @@ func (result *Result) printResults() {
fmt.Printf("Total requests: %10d\n", result.totalRequests)
fmt.Printf("Time taken to complete requests: %15s\n", result.timeTaken.String())
fmt.Printf("Requests per second: %10d\n", result.requestsPerSecond)
fmt.Printf("Max response time (ms): %10d\n", result.maxTime)
fmt.Printf("Min response time (ms): %10d\n", result.minTime)
fmt.Printf("Avg response time (ms): %6.2f\n", result.averageTime)
fmt.Printf("===================== Breakdown =====================\n")
fmt.Printf("Number of connection errors: %10d\n", result.httpResult.connectionErrorCount)
fmt.Printf("Number of 1xx responses: %10d\n", result.httpResult.status1xxCount)

View File

@@ -13,20 +13,25 @@
* or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package main
import (
"github.com/valyala/fasthttp"
"math/rand"
"time"
)
const TimedBufferSize = 100000
type worker struct {
httpResult HTTPResult
client *fasthttp.Client
requests <-chan bool
httpResults chan<- HTTPResult
done chan<- bool
timings chan int
isBuffered bool
}
type workable interface {
@@ -40,15 +45,37 @@ func (worker *worker) setCustomClient(client *fasthttp.Client) {
}
func newWorker(requests <-chan bool, httpResults chan<- HTTPResult, done chan<- bool) *worker {
return &worker{HTTPResult{0, 0, 0, 0, 0, 0}, &fasthttp.Client{}, requests, httpResults, done}
}
requestCount := len(requests)
isBuffered := false
// If this is running in timed mode we cannot predict the number of requests being
// sent, to allocate a large enough channel. As a compromise we fill in the
// buffered channel up to a fixed size and ignore the rest
if requestCount <= 1 {
requestCount = TimedBufferSize
isBuffered = true
}
timings := make(chan int, requestCount)
return &worker{*newHTTPResult(), &fasthttp.Client{}, requests, httpResults, done, timings, isBuffered}
}
func (worker *worker) performRequest(req *fasthttp.Request, resp *fasthttp.Response) bool {
timeNow := time.Now().UnixNano()
if err := worker.client.Do(req, resp); err != nil {
worker.httpResult.connectionErrorCount++
return true
}
timeAfter := time.Now().UnixNano()
i := int((timeAfter - timeNow) / 1000)
select {
case worker.timings <- i:
// Send the timing via the channel in non-blocking mode
default:
// If the channel is full (which it will in case of timed mode) then
// just proceed
break
}
status := resp.StatusCode()
if status >= 100 && status < 200 {
@@ -82,6 +109,29 @@ func buildRequest(requests []preLoadedRequest, totalPremadeRequests int) (*fasth
}
func (worker *worker) finish() {
close(worker.timings)
first := true
sum, total := int64(0), 0
for timing := range worker.timings {
// The first request is associated with overhead
// in setting up the client so we ignore it's result
if first {
first = false
continue
}
if timing < worker.httpResult.minTime {
worker.httpResult.minTime = timing
} else if timing >= worker.httpResult.maxTime {
worker.httpResult.maxTime = timing
}
sum += int64(timing)
total++
}
worker.httpResult.timeSum = sum
worker.httpResult.totalSuccess = total
worker.httpResults <- worker.httpResult
worker.done <- true
}