1
1
package discovery
2
2
3
3
import (
4
+ "context"
4
5
"sync"
5
6
7
+ "github.com/lightningnetwork/lnd/fn/v2"
6
8
"github.com/lightningnetwork/lnd/lnpeer"
7
9
"github.com/lightningnetwork/lnd/lnwire"
8
10
)
@@ -28,7 +30,7 @@ type reliableSenderCfg struct {
28
30
29
31
// IsMsgStale determines whether a message retrieved from the backing
30
32
// MessageStore is seen as stale by the current graph.
31
- IsMsgStale func (lnwire.Message ) bool
33
+ IsMsgStale func (context. Context , lnwire.Message ) bool
32
34
}
33
35
34
36
// peerManager contains the set of channels required for the peerHandler to
@@ -59,8 +61,9 @@ type reliableSender struct {
59
61
activePeers map [[33 ]byte ]peerManager
60
62
activePeersMtx sync.Mutex
61
63
62
- wg sync.WaitGroup
63
- quit chan struct {}
64
+ wg sync.WaitGroup
65
+ quit chan struct {}
66
+ cancel fn.Option [context.CancelFunc ]
64
67
}
65
68
66
69
// newReliableSender returns a new reliableSender backed by the given config.
@@ -73,10 +76,13 @@ func newReliableSender(cfg *reliableSenderCfg) *reliableSender {
73
76
}
74
77
75
78
// Start spawns message handlers for any peers with pending messages.
76
- func (s * reliableSender ) Start () error {
79
+ func (s * reliableSender ) Start (ctx context. Context ) error {
77
80
var err error
78
81
s .start .Do (func () {
79
- err = s .resendPendingMsgs ()
82
+ ctx , cancel := context .WithCancel (ctx )
83
+ s .cancel = fn .Some (cancel )
84
+
85
+ err = s .resendPendingMsgs (ctx )
80
86
})
81
87
return err
82
88
}
@@ -87,6 +93,7 @@ func (s *reliableSender) Stop() {
87
93
log .Debugf ("reliableSender is stopping" )
88
94
defer log .Debugf ("reliableSender stopped" )
89
95
96
+ s .cancel .WhenSome (func (fn context.CancelFunc ) { fn () })
90
97
close (s .quit )
91
98
s .wg .Wait ()
92
99
})
@@ -96,7 +103,9 @@ func (s *reliableSender) Stop() {
96
103
// event that the peer is currently offline, this will only write the message to
97
104
// disk. Once the peer reconnects, this message, along with any others pending,
98
105
// will be sent to the peer.
99
- func (s * reliableSender ) sendMessage (msg lnwire.Message , peerPubKey [33 ]byte ) error {
106
+ func (s * reliableSender ) sendMessage (ctx context.Context , msg lnwire.Message ,
107
+ peerPubKey [33 ]byte ) error {
108
+
100
109
// We'll start by persisting the message to disk. This allows us to
101
110
// resend the message upon restarts and peer reconnections.
102
111
if err := s .cfg .MessageStore .AddMessage (msg , peerPubKey ); err != nil {
@@ -106,7 +115,7 @@ func (s *reliableSender) sendMessage(msg lnwire.Message, peerPubKey [33]byte) er
106
115
// Then, we'll spawn a peerHandler for this peer to handle resending its
107
116
// pending messages while taking into account its connection lifecycle.
108
117
spawnHandler:
109
- msgHandler , ok := s .spawnPeerHandler (peerPubKey )
118
+ msgHandler , ok := s .spawnPeerHandler (ctx , peerPubKey )
110
119
111
120
// If the handler wasn't previously active, we can exit now as we know
112
121
// that the message will be sent once the peer online notification is
@@ -134,7 +143,7 @@ spawnHandler:
134
143
// spawnPeerMsgHandler spawns a peerHandler for the given peer if there isn't
135
144
// one already active. The boolean returned signals whether there was already
136
145
// one active or not.
137
- func (s * reliableSender ) spawnPeerHandler (
146
+ func (s * reliableSender ) spawnPeerHandler (ctx context. Context ,
138
147
peerPubKey [33 ]byte ) (peerManager , bool ) {
139
148
140
149
s .activePeersMtx .Lock ()
@@ -152,7 +161,7 @@ func (s *reliableSender) spawnPeerHandler(
152
161
// peerHandler.
153
162
if ! ok {
154
163
s .wg .Add (1 )
155
- go s .peerHandler (msgHandler , peerPubKey )
164
+ go s .peerHandler (ctx , msgHandler , peerPubKey )
156
165
}
157
166
158
167
return msgHandler , ok
@@ -164,7 +173,9 @@ func (s *reliableSender) spawnPeerHandler(
164
173
// offline will be queued and sent once the peer reconnects.
165
174
//
166
175
// NOTE: This must be run as a goroutine.
167
- func (s * reliableSender ) peerHandler (peerMgr peerManager , peerPubKey [33 ]byte ) {
176
+ func (s * reliableSender ) peerHandler (ctx context.Context , peerMgr peerManager ,
177
+ peerPubKey [33 ]byte ) {
178
+
168
179
defer s .wg .Done ()
169
180
170
181
// We'll start by requesting a notification for when the peer
252
263
// check whether it's stale. This guarantees that
253
264
// AnnounceSignatures are sent at least once if we happen to
254
265
// already have signatures for both parties.
255
- if s .cfg .IsMsgStale (msg ) {
266
+ if s .cfg .IsMsgStale (ctx , msg ) {
256
267
err := s .cfg .MessageStore .DeleteMessage (msg , peerPubKey )
257
268
if err != nil {
258
269
log .Errorf ("Unable to remove stale %v message " +
321
332
322
333
// resendPendingMsgs retrieves and sends all of the messages within the message
323
334
// store that should be reliably sent to their respective peers.
324
- func (s * reliableSender ) resendPendingMsgs () error {
335
+ func (s * reliableSender ) resendPendingMsgs (ctx context. Context ) error {
325
336
// Fetch all of the peers for which we have pending messages for and
326
337
// spawn a peerMsgHandler for each. Once the peer is seen as online, all
327
338
// of the pending messages will be sent.
@@ -331,7 +342,7 @@ func (s *reliableSender) resendPendingMsgs() error {
331
342
}
332
343
333
344
for peer := range peers {
334
- s .spawnPeerHandler (peer )
345
+ s .spawnPeerHandler (ctx , peer )
335
346
}
336
347
337
348
return nil
0 commit comments