Skip to content

Commit bc7f155

Browse files
authored
[testbed] fix load gen worker tick interval math to avoid div by zero (#38084)
#### Description Previously, certain combinations of load generation options could lead to an attempt to divide by zero, resulting in a panic. Specifically, if the desired batches per second of a single worker was calculated to be less than one, integer division caused it to be rounded to zero before using it as the divisor in subsequent calculations. #### Testing The logic was refactored into a standalone method and tested by unit tests added in this PR.
1 parent 2496bb6 commit bc7f155

File tree

3 files changed

+101
-1
lines changed

3 files changed

+101
-1
lines changed
+27
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
# Use this changelog template to create an entry for release notes.
2+
3+
# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
4+
change_type: bug_fix
5+
6+
# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
7+
component: testbed
8+
9+
# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
10+
note: Fix batch interval calculation to avoid possible division by zero
11+
12+
# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
13+
issues: [38084]
14+
15+
# (Optional) One or more lines of additional information to render under the primary note.
16+
# These lines will be padded with 2 spaces and then inserted directly into the document.
17+
# Use pipe (|) for multiline entries.
18+
subtext:
19+
20+
# If your change doesn't affect end users or the exported elements of any package,
21+
# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
22+
# Optional: The change log or logs in which this entry should be included.
23+
# e.g. '[user]' or '[user, api]'
24+
# Include 'user' if the change is relevant to end users.
25+
# Include 'api' if there is a change to a library API.
26+
# Default: '[user]'
27+
change_logs: [api]

testbed/testbed/load_generator.go

+15-1
Original file line numberDiff line numberDiff line change
@@ -213,12 +213,14 @@ func (ps *ProviderSender) generate() {
213213

214214
var workers sync.WaitGroup
215215

216+
tickDuration := ps.perWorkerTickDuration(numWorkers)
217+
216218
for i := 0; i < numWorkers; i++ {
217219
workers.Add(1)
218220

219221
go func() {
220222
defer workers.Done()
221-
t := time.NewTicker(time.Second / time.Duration(ps.options.DataItemsPerSecond/ps.options.ItemsPerBatch/numWorkers))
223+
t := time.NewTicker(tickDuration)
222224
defer t.Stop()
223225

224226
var prevErr error
@@ -338,3 +340,15 @@ func (ps *ProviderSender) generateLog() error {
338340
}
339341
}
340342
}
343+
344+
// perWorkerTickDuration calculates the tick interval each worker must observe in order to
345+
// produce the desired average DataItemsPerSecond given the constraints of ItemsPerBatch and numWorkers.
346+
//
347+
// Of particular note are cases when the batchesPerSecond required of each worker is less than one due to a high
348+
// number of workers relative to the desired DataItemsPerSecond. If the total batchesPerSecond is less than the
349+
// number of workers then we are dealing with fractional batches per second per worker, so we need float arithmetic.
350+
func (ps *ProviderSender) perWorkerTickDuration(numWorkers int) time.Duration {
351+
batchesPerSecond := float64(ps.options.DataItemsPerSecond) / float64(ps.options.ItemsPerBatch)
352+
batchesPerSecondPerWorker := batchesPerSecond / float64(numWorkers)
353+
return time.Duration(float64(time.Second) / batchesPerSecondPerWorker)
354+
}
+59
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
package testbed
2+
3+
import (
4+
"testing"
5+
"time"
6+
)
7+
8+
func Test_perWorkerTickDuration(t *testing.T) {
9+
for _, test := range []struct {
10+
name string
11+
expectedTickDuration time.Duration
12+
dataItemsPerSecond, itemsPerBatch, numWorkers int
13+
}{
14+
// Because of the way perWorkerTickDuration calculates the tick interval using dataItemsPerSecond,
15+
// it is important to test its behavior with respect to a one-second Duration in particular because
16+
// certain combinations of configuration could previously cause a divide-by-zero panic.
17+
{
18+
name: "less than one second",
19+
expectedTickDuration: 100 * time.Millisecond,
20+
dataItemsPerSecond: 100,
21+
itemsPerBatch: 5,
22+
numWorkers: 2,
23+
},
24+
{
25+
name: "exactly one second",
26+
expectedTickDuration: time.Second,
27+
dataItemsPerSecond: 100,
28+
itemsPerBatch: 5,
29+
numWorkers: 20,
30+
},
31+
{
32+
name: "more than one second (would previously trigger divide-by-zero panic)",
33+
expectedTickDuration: 5 * time.Second,
34+
dataItemsPerSecond: 100,
35+
itemsPerBatch: 5,
36+
numWorkers: 100,
37+
},
38+
{
39+
name: "default batch size and worker count",
40+
expectedTickDuration: 8103727, // ~8.1ms
41+
dataItemsPerSecond: 1234,
42+
itemsPerBatch: 10,
43+
numWorkers: 1,
44+
},
45+
} {
46+
t.Run(test.name, func(t *testing.T) {
47+
subject := &ProviderSender{
48+
options: LoadOptions{
49+
DataItemsPerSecond: test.dataItemsPerSecond,
50+
ItemsPerBatch: test.itemsPerBatch,
51+
},
52+
}
53+
actual := subject.perWorkerTickDuration(test.numWorkers)
54+
if actual != test.expectedTickDuration {
55+
t.Errorf("got %v; want %v", actual, test.expectedTickDuration)
56+
}
57+
})
58+
}
59+
}

0 commit comments

Comments
 (0)