Skip to content

Commit

Permalink
Fix pc.GracefulClose concurrency > 2
Browse files Browse the repository at this point in the history
  • Loading branch information
edaniels committed Aug 24, 2024
1 parent 64a837f commit f7c6f15
Show file tree
Hide file tree
Showing 2 changed files with 124 additions and 32 deletions.
98 changes: 67 additions & 31 deletions peerconnection.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,9 @@ type PeerConnection struct {
idpLoginURL *string

isClosed *atomicBool
isGracefulClosed *atomicBool
isGracefulClosedDone chan struct{}
isGracefullyClosingOrClosed bool
isCloseDone chan struct{}
isGracefulCloseDone chan struct{}
isNegotiationNeeded *atomicBool
updateNegotiationNeededFlagOnEmptyChain *atomicBool

Expand Down Expand Up @@ -119,8 +120,8 @@ func (api *API) NewPeerConnection(configuration Configuration) (*PeerConnection,
ICECandidatePoolSize: 0,
},
isClosed: &atomicBool{},
isGracefulClosed: &atomicBool{},
isGracefulClosedDone: make(chan struct{}),
isCloseDone: make(chan struct{}),
isGracefulCloseDone: make(chan struct{}),
isNegotiationNeeded: &atomicBool{},
updateNegotiationNeededFlagOnEmptyChain: &atomicBool{},
lastOffer: "",
Expand Down Expand Up @@ -2111,22 +2112,40 @@ func (pc *PeerConnection) GracefulClose() error {
func (pc *PeerConnection) close(shouldGracefullyClose bool) error {
// https://www.w3.org/TR/webrtc/#dom-rtcpeerconnection-close (step #1)
// https://www.w3.org/TR/webrtc/#dom-rtcpeerconnection-close (step #2)
alreadyGracefullyClosed := shouldGracefullyClose && pc.isGracefulClosed.swap(true)
if pc.isClosed.swap(true) {
if alreadyGracefullyClosed {
// similar but distinct condition where we may be waiting for some
// other graceful close to finish. Incorrectly using isClosed may
// leak a goroutine.
<-pc.isGracefulClosedDone
}
return nil

pc.mu.Lock()
// A lock in this critical section is needed because pc.isClosed and
// pc.isGracefullyClosingOrClosed are related to each other in that we
// want to make graceful and normal closure one time operations to avoid
// any double closure errors from cropping up.
isAlreadyClosingOrClosed := pc.isClosed.swap(true)
isAlreadyGracefullyClosingOrClosed := pc.isGracefullyClosingOrClosed
if shouldGracefullyClose && !isAlreadyGracefullyClosingOrClosed {
pc.isGracefullyClosingOrClosed = true
}
if shouldGracefullyClose && !alreadyGracefullyClosed {
defer close(pc.isGracefulClosedDone)
pc.mu.Unlock()

if isAlreadyClosingOrClosed {
if !shouldGracefullyClose {
return nil
}
// even if we're already closing, it may not be graceful:
// if we are not closing the graceful channel, we just wait for the close
// to happen and return.
if isAlreadyGracefullyClosingOrClosed {
<-pc.isGracefulCloseDone
return nil
}
// Otherwise we need to go through the graceful flow once the current normal closer
// is done since there are extra steps to take with a graceful close.
<-pc.isCloseDone
} else {
defer close(pc.isCloseDone)
}

// https://www.w3.org/TR/webrtc/#dom-rtcpeerconnection-close (step #3)
pc.signalingState.Set(SignalingStateClosed)
if shouldGracefullyClose && !isAlreadyGracefullyClosingOrClosed {
defer close(pc.isGracefulCloseDone)
}

// Try closing everything and collect the errors
// Shutdown strategy:
Expand All @@ -2136,6 +2155,34 @@ func (pc *PeerConnection) close(shouldGracefullyClose bool) error {
// continue the chain the Mux has to be closed.
closeErrs := make([]error, 4)

doGracefulCloseOps := func() []error {
if !shouldGracefullyClose {
return nil
}

// these are all non-canon steps
var gracefulCloseErrors []error
if pc.iceTransport != nil {
gracefulCloseErrors = append(gracefulCloseErrors, pc.iceTransport.GracefulStop())
}

pc.ops.GracefulClose()

pc.sctpTransport.lock.Lock()
for _, d := range pc.sctpTransport.dataChannels {
gracefulCloseErrors = append(gracefulCloseErrors, d.GracefulClose())
}
pc.sctpTransport.lock.Unlock()
return gracefulCloseErrors
}

if isAlreadyClosingOrClosed {
return util.FlattenErrs(doGracefulCloseOps())
}

// https://www.w3.org/TR/webrtc/#dom-rtcpeerconnection-close (step #3)
pc.signalingState.Set(SignalingStateClosed)

closeErrs = append(closeErrs, pc.api.interceptor.Close())

// https://www.w3.org/TR/webrtc/#dom-rtcpeerconnection-close (step #4)
Expand Down Expand Up @@ -2167,27 +2214,16 @@ func (pc *PeerConnection) close(shouldGracefullyClose bool) error {

// https://www.w3.org/TR/webrtc/#dom-rtcpeerconnection-close (step #8, #9, #10)
if pc.iceTransport != nil {
if shouldGracefullyClose {
// note that it isn't canon to stop gracefully
closeErrs = append(closeErrs, pc.iceTransport.GracefulStop())
} else {
// we will stop gracefully in doGracefulCloseOps
if !shouldGracefullyClose {
closeErrs = append(closeErrs, pc.iceTransport.Stop())
}
}

// https://www.w3.org/TR/webrtc/#dom-rtcpeerconnection-close (step #11)
pc.updateConnectionState(pc.ICEConnectionState(), pc.dtlsTransport.State())

if shouldGracefullyClose {
pc.ops.GracefulClose()

// note that it isn't canon to stop gracefully
pc.sctpTransport.lock.Lock()
for _, d := range pc.sctpTransport.dataChannels {
closeErrs = append(closeErrs, d.GracefulClose())
}
pc.sctpTransport.lock.Unlock()
}
closeErrs = append(closeErrs, doGracefulCloseOps()...)

return util.FlattenErrs(closeErrs)
}
Expand Down
58 changes: 57 additions & 1 deletion peerconnection_close_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ func TestPeerConnection_Close_DuringICE(t *testing.T) {
}
}

func TestPeerConnection_CloseWithIncomingMessages(t *testing.T) {
func TestPeerConnection_GracefulCloseWithIncomingMessages(t *testing.T) {
// Limit runtime in case of deadlocks
lim := test.TimeOut(time.Second * 20)
defer lim.Stop()
Expand Down Expand Up @@ -287,3 +287,59 @@ func TestPeerConnection_GracefulCloseWhileOpening(t *testing.T) {
t.Fatal(err)
}
}

func TestPeerConnection_GracefulCloseConcurrent(t *testing.T) {
// Limit runtime in case of deadlocks
lim := test.TimeOut(time.Second * 5)
defer lim.Stop()

report := test.CheckRoutinesStrict(t)
defer report()

pc, err := NewPeerConnection(Configuration{})
if err != nil {
t.Fatal(err)
}

go func() {

Check failure on line 304 in peerconnection_close_test.go

View workflow job for this annotation

GitHub Actions / lint / Go

SA2002: the goroutine calls T.Fatal, which must be called in the same goroutine as the test (staticcheck)
if err := pc.GracefulClose(); err != nil {
t.Fatal(err)

Check failure on line 306 in peerconnection_close_test.go

View workflow job for this annotation

GitHub Actions / lint / Go

testinggoroutine: call to (*testing.T).Fatal from a non-test goroutine (govet)
}
}()
go func() {

Check failure on line 309 in peerconnection_close_test.go

View workflow job for this annotation

GitHub Actions / lint / Go

SA2002: the goroutine calls T.Fatal, which must be called in the same goroutine as the test (staticcheck)
if err := pc.GracefulClose(); err != nil {
t.Fatal(err)

Check failure on line 311 in peerconnection_close_test.go

View workflow job for this annotation

GitHub Actions / lint / Go

testinggoroutine: call to (*testing.T).Fatal from a non-test goroutine (govet)
}
}()
if err := pc.GracefulClose(); err != nil {
t.Fatal(err)
}
}

func TestPeerConnection_GracefulAndNormalCloseConcurrent(t *testing.T) {
// Limit runtime in case of deadlocks
lim := test.TimeOut(time.Second * 5)
defer lim.Stop()

report := test.CheckRoutinesStrict(t)
defer report()

pc, err := NewPeerConnection(Configuration{})
if err != nil {
t.Fatal(err)
}

go func() {

Check failure on line 332 in peerconnection_close_test.go

View workflow job for this annotation

GitHub Actions / lint / Go

SA2002: the goroutine calls T.Fatal, which must be called in the same goroutine as the test (staticcheck)
if err := pc.GracefulClose(); err != nil {
t.Fatal(err)

Check failure on line 334 in peerconnection_close_test.go

View workflow job for this annotation

GitHub Actions / lint / Go

testinggoroutine: call to (*testing.T).Fatal from a non-test goroutine (govet)
}
}()
go func() {
if err := pc.GracefulClose(); err != nil {
t.Fatal(err)
}
}()
if err := pc.Close(); err != nil {
t.Fatal(err)
}
}

0 comments on commit f7c6f15

Please sign in to comment.