summaryrefslogtreecommitdiff
path: root/accel-pppd/triton
diff options
context:
space:
mode:
authorGuillaume Nault <g.nault@alphalink.fr>2013-07-24 20:27:45 +0200
committerDmitry Kozlov <xeb@mail.ru>2013-07-24 22:32:08 +0400
commit37e2a417a6cb6a981c85dfdcd3245a48f99a4165 (patch)
treea29eee76eae6323d29c62c86492bb674446d5f80 /accel-pppd/triton
parent31e20f0ce4605b7d7ec416bf88a4e98c90aa6b93 (diff)
downloadaccel-ppp-37e2a417a6cb6a981c85dfdcd3245a48f99a4165.tar.gz
accel-ppp-37e2a417a6cb6a981c85dfdcd3245a48f99a4165.zip
triton: Fix race upon termination
The triton_terminate() function works by setting the need_close flag of each triton context, then queues this context for execution by a triton thread if not already running. But if the context is already being run by a triton thread, it may not notice that its need_close flag has been updated (this flag is only checked at the beginning of ctx_thread()). So if no other event wakes up that context (i.e. if ctx_thread() isn't run again), it will never terminate. This patch moves the need_close flag check at the end of ctx_thread() so that a triton context can take the need_close flag into account event if it's updated while running. The context spinlock is also used to protect the need_close flag as it is concurrently updated by triton_terminate(). Signed-off-by: Guillaume Nault <g.nault@alphalink.fr>
Diffstat (limited to 'accel-pppd/triton')
-rw-r--r--accel-pppd/triton/triton.c16
1 files changed, 11 insertions, 5 deletions
diff --git a/accel-pppd/triton/triton.c b/accel-pppd/triton/triton.c
index 267cc2b..d4c21e2 100644
--- a/accel-pppd/triton/triton.c
+++ b/accel-pppd/triton/triton.c
@@ -185,11 +185,6 @@ static void ctx_thread(struct _triton_context_t *ctx)
uint64_t tt;
log_debug2("ctx %p %p: enter\n", ctx, ctx->thread);
- if (ctx->need_close) {
- if (ctx->ud->close)
- ctx->ud->close(ctx->ud);
- ctx->need_close = 0;
- }
while (1) {
spin_lock(&ctx->lock);
@@ -234,6 +229,17 @@ static void ctx_thread(struct _triton_context_t *ctx)
break;
}
+ spin_lock(&ctx->lock);
+ if (ctx->need_close) {
+ spin_unlock(&ctx->lock);
+ if (ctx->ud->close) {
+ ctx->ud->close(ctx->ud);
+ }
+ spin_lock(&ctx->lock);
+ ctx->need_close = 0;
+ }
+ spin_unlock(&ctx->lock);
+
log_debug2("ctx %p %p: exit\n", ctx, ctx->thread);
}