diff --git a/Documentation/networking/tls-offload.rst b/Documentation/networking/tls-offload.rst index d134d63307e7..048e5ca44824 100644 --- a/Documentation/networking/tls-offload.rst +++ b/Documentation/networking/tls-offload.rst @@ -206,7 +206,11 @@ TX Segments transmitted from an offloaded socket can get out of sync in similar ways to the receive side-retransmissions - local drops -are possible, though network reorders are not. +are possible, though network reorders are not. There are currently +two mechanisms for dealing with out of order segments. + +Crypto state rebuilding +~~~~~~~~~~~~~~~~~~~~~~~ Whenever an out of order segment is transmitted the driver provides the device with enough information to perform cryptographic operations. @@ -225,6 +229,35 @@ was just a retransmission. The former is simpler, and does not require retransmission detection therefore it is the recommended method until such time it is proven inefficient. +Next record sync +~~~~~~~~~~~~~~~~ + +Whenever an out of order segment is detected the driver requests +that the ``ktls`` software fallback code encrypt it. If the segment's +sequence number is lower than expected the driver assumes retransmission +and doesn't change device state. If the segment is in the future, it +may imply a local drop, the driver asks the stack to sync the device +to the next record state and falls back to software. + +Resync request is indicated with: + +.. code-block:: c + + void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq) + +Until resync is complete driver should not access its expected TCP +sequence number (as it will be updated from a different context). +Following helper should be used to test if resync is complete: + +.. code-block:: c + + bool tls_offload_tx_resync_pending(struct sock *sk) + +Next time ``ktls`` pushes a record it will first send its TCP sequence number +and TLS record number to the driver. Stack will also make sure that +the new record will start on a segment boundary (like it does when +the connection is initially added). + RX -- diff --git a/include/net/tls.h b/include/net/tls.h index 9b49baecc4a8..63e473420b00 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -212,6 +212,11 @@ struct tls_offload_context_tx { enum tls_context_flags { TLS_RX_SYNC_RUNNING = 0, + /* Unlike RX where resync is driven entirely by the core in TX only + * the driver knows when things went out of sync, so we need the flag + * to be atomic. + */ + TLS_TX_SYNC_SCHED = 1, }; struct cipher_context { @@ -619,6 +624,24 @@ tls_offload_rx_resync_set_type(struct sock *sk, enum tls_offload_sync_type type) tls_offload_ctx_rx(tls_ctx)->resync_type = type; } +static inline void tls_offload_tx_resync_request(struct sock *sk) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + + WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags)); +} + +/* Driver's seq tracking has to be disabled until resync succeeded */ +static inline bool tls_offload_tx_resync_pending(struct sock *sk) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + bool ret; + + ret = test_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags); + smp_mb__after_atomic(); + return ret; +} + int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg, unsigned char *record_type); void tls_register_device(struct tls_device *device); diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index b35a3b902bfa..40076f423dcb 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -209,6 +209,29 @@ void tls_device_free_resources_tx(struct sock *sk) tls_free_partial_record(sk, tls_ctx); } +static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx, + u32 seq) +{ + struct net_device *netdev; + struct sk_buff *skb; + u8 *rcd_sn; + + skb = tcp_write_queue_tail(sk); + if (skb) + TCP_SKB_CB(skb)->eor = 1; + + rcd_sn = tls_ctx->tx.rec_seq; + + down_read(&device_offload_lock); + netdev = tls_ctx->netdev; + if (netdev) + netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn, + TLS_OFFLOAD_CTX_DIR_TX); + up_read(&device_offload_lock); + + clear_bit_unlock(TLS_TX_SYNC_SCHED, &tls_ctx->flags); +} + static void tls_append_frag(struct tls_record_info *record, struct page_frag *pfrag, int size) @@ -264,6 +287,10 @@ static int tls_push_record(struct sock *sk, list_add_tail(&record->list, &offload_ctx->records_list); spin_unlock_irq(&offload_ctx->lock); offload_ctx->open_record = NULL; + + if (test_bit(TLS_TX_SYNC_SCHED, &ctx->flags)) + tls_device_resync_tx(sk, ctx, tp->write_seq); + tls_advance_record_sn(sk, prot, &ctx->tx); for (i = 0; i < record->num_frags; i++) {