net: Remove check for k_delayed_work_cancel
k_delayed_work_cancel now only fail if it hasn't been submitted which means it is not in use anyway so it safe to reset its data regardless of its return. Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
This commit is contained in:
parent
26cd8cefd1
commit
9147b53d76
@ -785,9 +785,6 @@ struct http_server_ctx {
|
||||
|
||||
/** URL's length */
|
||||
u16_t url_len;
|
||||
|
||||
/** Has the request timer been cancelled. */
|
||||
u8_t timer_cancelled;
|
||||
} req;
|
||||
|
||||
#if defined(CONFIG_HTTPS)
|
||||
|
||||
@ -300,11 +300,6 @@ struct net_app_ctx {
|
||||
/** DTLS final timer. Connection is terminated if this expires.
|
||||
*/
|
||||
struct k_delayed_work fin_timer;
|
||||
|
||||
/** Timer flag telling whether the dtls timer has been
|
||||
* cancelled or not.
|
||||
*/
|
||||
bool fin_timer_cancelled;
|
||||
} dtls;
|
||||
#endif
|
||||
|
||||
|
||||
@ -222,12 +222,7 @@ struct net_ipv6_nbr_data *net_ipv6_get_nbr_by_index(u8_t idx)
|
||||
|
||||
static inline void nbr_clear_ns_pending(struct net_ipv6_nbr_data *data)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = k_delayed_work_cancel(&data->send_ns);
|
||||
if (ret < 0) {
|
||||
NET_DBG("Cannot cancel NS work (%d)", ret);
|
||||
}
|
||||
k_delayed_work_cancel(&data->send_ns);
|
||||
|
||||
if (data->pending) {
|
||||
net_pkt_unref(data->pending);
|
||||
|
||||
@ -100,7 +100,6 @@ static struct tcp_backlog_entry {
|
||||
u32_t send_seq;
|
||||
u32_t send_ack;
|
||||
struct k_delayed_work ack_timer;
|
||||
bool cancelled;
|
||||
} tcp_backlog[CONFIG_NET_TCP_BACKLOG_SIZE];
|
||||
|
||||
static void backlog_ack_timeout(struct k_work *work)
|
||||
@ -108,11 +107,9 @@ static void backlog_ack_timeout(struct k_work *work)
|
||||
struct tcp_backlog_entry *backlog =
|
||||
CONTAINER_OF(work, struct tcp_backlog_entry, ack_timer);
|
||||
|
||||
if (!backlog->cancelled) {
|
||||
NET_DBG("Did not receive ACK in %dms", ACK_TIMEOUT);
|
||||
NET_DBG("Did not receive ACK in %dms", ACK_TIMEOUT);
|
||||
|
||||
send_reset(backlog->tcp->context, &backlog->remote);
|
||||
}
|
||||
send_reset(backlog->tcp->context, &backlog->remote);
|
||||
|
||||
memset(backlog, 0, sizeof(struct tcp_backlog_entry));
|
||||
}
|
||||
@ -265,17 +262,8 @@ static int tcp_backlog_ack(struct net_pkt *pkt, struct net_context *context)
|
||||
context->tcp->send_seq = tcp_backlog[r].send_seq;
|
||||
context->tcp->send_ack = tcp_backlog[r].send_ack;
|
||||
|
||||
if (k_delayed_work_cancel(&tcp_backlog[r].ack_timer) < 0) {
|
||||
/* Too late to cancel - just set flag for worker.
|
||||
* TODO: Note that in this case, we can be preempted
|
||||
* anytime (could have been preempted even before we did
|
||||
* the check), so access to tcp_backlog should be synchronized
|
||||
* between this function and worker.
|
||||
*/
|
||||
tcp_backlog[r].cancelled = true;
|
||||
} else {
|
||||
memset(&tcp_backlog[r], 0, sizeof(struct tcp_backlog_entry));
|
||||
}
|
||||
k_delayed_work_cancel(&tcp_backlog[r].ack_timer);
|
||||
memset(&tcp_backlog[r], 0, sizeof(struct tcp_backlog_entry));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -300,17 +288,8 @@ static int tcp_backlog_rst(struct net_pkt *pkt)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (k_delayed_work_cancel(&tcp_backlog[r].ack_timer) < 0) {
|
||||
/* Too late to cancel - just set flag for worker.
|
||||
* TODO: Note that in this case, we can be preempted
|
||||
* anytime (could have been preempted even before we did
|
||||
* the check), so access to tcp_backlog should be synchronized
|
||||
* between this function and worker.
|
||||
*/
|
||||
tcp_backlog[r].cancelled = true;
|
||||
} else {
|
||||
memset(&tcp_backlog[r], 0, sizeof(struct tcp_backlog_entry));
|
||||
}
|
||||
k_delayed_work_cancel(&tcp_backlog[r].ack_timer);
|
||||
memset(&tcp_backlog[r], 0, sizeof(struct tcp_backlog_entry));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -320,11 +299,9 @@ static void handle_fin_timeout(struct k_work *work)
|
||||
struct net_tcp *tcp =
|
||||
CONTAINER_OF(work, struct net_tcp, fin_timer);
|
||||
|
||||
if (!tcp->fin_timer_cancelled) {
|
||||
NET_DBG("Did not receive FIN in %dms", FIN_TIMEOUT);
|
||||
NET_DBG("Did not receive FIN in %dms", FIN_TIMEOUT);
|
||||
|
||||
net_context_unref(tcp->context);
|
||||
}
|
||||
net_context_unref(tcp->context);
|
||||
}
|
||||
|
||||
static void handle_ack_timeout(struct k_work *work)
|
||||
@ -332,10 +309,6 @@ static void handle_ack_timeout(struct k_work *work)
|
||||
/* This means that we did not receive ACK response in time. */
|
||||
struct net_tcp *tcp = CONTAINER_OF(work, struct net_tcp, ack_timer);
|
||||
|
||||
if (tcp->ack_timer_cancelled) {
|
||||
return;
|
||||
}
|
||||
|
||||
NET_DBG("Did not receive ACK in %dms while in %s", ACK_TIMEOUT,
|
||||
net_tcp_state_str(net_tcp_get_state(tcp)));
|
||||
|
||||
@ -641,13 +614,8 @@ int net_context_unref(struct net_context *context)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (k_delayed_work_cancel(&tcp_backlog[i].ack_timer) ==
|
||||
-EINPROGRESS) {
|
||||
tcp_backlog[i].cancelled = true;
|
||||
} else {
|
||||
memset(&tcp_backlog[i], 0,
|
||||
sizeof(struct tcp_backlog_entry));
|
||||
}
|
||||
k_delayed_work_cancel(&tcp_backlog[i].ack_timer);
|
||||
memset(&tcp_backlog[i], 0, sizeof(tcp_backlog[i]));
|
||||
}
|
||||
|
||||
net_tcp_release(context->tcp);
|
||||
@ -701,7 +669,6 @@ int net_context_put(struct net_context *context)
|
||||
"disposing yet (waiting %dms)", FIN_TIMEOUT);
|
||||
k_delayed_work_submit(&context->tcp->fin_timer,
|
||||
FIN_TIMEOUT);
|
||||
context->tcp->fin_timer_cancelled = false;
|
||||
queue_fin(context);
|
||||
return 0;
|
||||
}
|
||||
@ -1247,7 +1214,6 @@ NET_CONN_CB(tcp_established)
|
||||
* would be stuck forever.
|
||||
*/
|
||||
k_delayed_work_submit(&context->tcp->ack_timer, ACK_TIMEOUT);
|
||||
context->tcp->ack_timer_cancelled = false;
|
||||
}
|
||||
|
||||
send_ack(context, &conn->remote_addr, false);
|
||||
|
||||
@ -248,13 +248,11 @@ struct net_tcp *net_tcp_alloc(struct net_context *context)
|
||||
|
||||
static void ack_timer_cancel(struct net_tcp *tcp)
|
||||
{
|
||||
tcp->ack_timer_cancelled = true;
|
||||
k_delayed_work_cancel(&tcp->ack_timer);
|
||||
}
|
||||
|
||||
static void fin_timer_cancel(struct net_tcp *tcp)
|
||||
{
|
||||
tcp->fin_timer_cancelled = true;
|
||||
k_delayed_work_cancel(&tcp->fin_timer);
|
||||
}
|
||||
|
||||
|
||||
@ -135,15 +135,8 @@ struct net_tcp {
|
||||
u32_t fin_sent : 1;
|
||||
/* An inbound FIN packet has been received */
|
||||
u32_t fin_rcvd : 1;
|
||||
/* Tells if ack timer has been already cancelled. It might happen
|
||||
* that the timer is executed even if it is cancelled, this is because
|
||||
* of various timing issues when timer is scheduled to run.
|
||||
*/
|
||||
u32_t ack_timer_cancelled : 1;
|
||||
/* Tells if fin timer has been already cancelled. */
|
||||
u32_t fin_timer_cancelled : 1;
|
||||
/** Remaining bits in this u32_t */
|
||||
u32_t _padding : 11;
|
||||
u32_t _padding : 13;
|
||||
|
||||
/** Accept callback to be called when the connection has been
|
||||
* established.
|
||||
|
||||
@ -1041,8 +1041,6 @@ static int dtls_timing_get_delay(void *data)
|
||||
|
||||
static void dtls_cleanup(struct net_app_ctx *ctx, bool cancel_timer)
|
||||
{
|
||||
ctx->dtls.fin_timer_cancelled = true;
|
||||
|
||||
if (cancel_timer) {
|
||||
k_delayed_work_cancel(&ctx->dtls.fin_timer);
|
||||
}
|
||||
@ -1060,11 +1058,9 @@ static void dtls_timeout(struct k_work *work)
|
||||
struct net_app_ctx *ctx =
|
||||
CONTAINER_OF(work, struct net_app_ctx, dtls.fin_timer);
|
||||
|
||||
if (!ctx->dtls.fin_timer_cancelled) {
|
||||
NET_DBG("Did not receive DTLS traffic in %dms", DTLS_TIMEOUT);
|
||||
NET_DBG("Did not receive DTLS traffic in %dms", DTLS_TIMEOUT);
|
||||
|
||||
dtls_cleanup(ctx, false);
|
||||
}
|
||||
dtls_cleanup(ctx, false);
|
||||
}
|
||||
|
||||
enum net_verdict _net_app_dtls_established(struct net_conn *conn,
|
||||
@ -1117,12 +1113,10 @@ enum net_verdict _net_app_dtls_established(struct net_conn *conn,
|
||||
|
||||
k_fifo_put(&ctx->tls.mbedtls.ssl_ctx.tx_rx_fifo, (void *)rx_data);
|
||||
|
||||
ctx->dtls.fin_timer_cancelled = true;
|
||||
k_delayed_work_cancel(&ctx->dtls.fin_timer);
|
||||
|
||||
k_yield();
|
||||
|
||||
ctx->dtls.fin_timer_cancelled = false;
|
||||
k_delayed_work_submit(&ctx->dtls.fin_timer, DTLS_TIMEOUT);
|
||||
|
||||
return NET_OK;
|
||||
@ -1226,7 +1220,6 @@ static int accept_dtls(struct net_app_ctx *ctx,
|
||||
|
||||
ctx->dtls.ctx = dtls_context;
|
||||
|
||||
ctx->dtls.fin_timer_cancelled = false;
|
||||
k_delayed_work_submit(&ctx->dtls.fin_timer, DTLS_TIMEOUT);
|
||||
|
||||
return 0;
|
||||
@ -1279,17 +1272,6 @@ void _net_app_tls_received(struct net_context *context,
|
||||
net_pkt_unref(pkt);
|
||||
return;
|
||||
} else {
|
||||
if (ctx->dtls.fin_timer_cancelled) {
|
||||
if (pkt) {
|
||||
net_pkt_unref(pkt);
|
||||
pkt = NULL;
|
||||
}
|
||||
|
||||
ctx->dtls.fin_timer_cancelled = false;
|
||||
|
||||
goto dtls_disconnect;
|
||||
}
|
||||
|
||||
ret = accept_dtls(ctx, context, pkt);
|
||||
if (ret < 0) {
|
||||
NET_DBG("Cannot accept new DTLS "
|
||||
|
||||
@ -152,7 +152,6 @@ static int http_add_chunk(struct net_pkt *pkt, s32_t timeout, const char *str)
|
||||
|
||||
static void req_timer_cancel(struct http_server_ctx *ctx)
|
||||
{
|
||||
ctx->req.timer_cancelled = true;
|
||||
k_delayed_work_cancel(&ctx->req.timer);
|
||||
|
||||
NET_DBG("Context %p request timer cancelled", ctx);
|
||||
@ -164,10 +163,6 @@ static void req_timeout(struct k_work *work)
|
||||
struct http_server_ctx,
|
||||
req.timer);
|
||||
|
||||
if (ctx->req.timer_cancelled) {
|
||||
return;
|
||||
}
|
||||
|
||||
NET_DBG("Context %p request timeout", ctx);
|
||||
|
||||
net_context_put(ctx->req.net_ctx);
|
||||
@ -194,8 +189,6 @@ static void pkt_sent(struct net_context *context,
|
||||
NET_DBG("Context %p starting timer", ctx);
|
||||
|
||||
k_delayed_work_submit(&ctx->req.timer, timeout);
|
||||
|
||||
ctx->req.timer_cancelled = false;
|
||||
}
|
||||
|
||||
/* Note that if the timeout is K_FOREVER, we do not close
|
||||
|
||||
Loading…
Reference in New Issue
Block a user