aboutsummaryrefslogtreecommitdiff
path: root/net/tls/tls_sw.c
diff options
context:
space:
mode:
authorGravatar Boris Pismenny <borisp@mellanox.com> 2018-07-13 14:33:43 +0300
committerGravatar David S. Miller <davem@davemloft.net> 2018-07-16 00:13:11 -0700
commit4799ac81e52a72a6404827bf2738337bb581a174 (patch)
tree0d75fbecd761c35507d05122d9d26855c7e6c4de /net/tls/tls_sw.c
parenttls: Fill software context without allocation (diff)
downloadlinux-4799ac81e52a72a6404827bf2738337bb581a174.tar.gz
linux-4799ac81e52a72a6404827bf2738337bb581a174.tar.bz2
linux-4799ac81e52a72a6404827bf2738337bb581a174.zip
tls: Add rx inline crypto offload
This patch completes the generic infrastructure to offload TLS crypto to a network device. It enables the kernel to skip decryption and authentication of some skbs marked as decrypted by the NIC. In the fast path, all packets received are decrypted by the NIC and the performance is comparable to plain TCP. This infrastructure doesn't require a TCP offload engine. Instead, the NIC only decrypts packets that contain the expected TCP sequence number. Out-Of-Order TCP packets are provided unmodified. As a result, at the worst case a received TLS record consists of both plaintext and ciphertext packets. These partially decrypted records must be reencrypted, only to be decrypted. The notable differences between SW KTLS Rx and this offload are as follows: 1. Partial decryption - Software must handle the case of a TLS record that was only partially decrypted by HW. This can happen due to packet reordering. 2. Resynchronization - tls_read_size calls the device driver to resynchronize HW after HW lost track of TLS record framing in the TCP stream. Signed-off-by: Boris Pismenny <borisp@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tls/tls_sw.c')
-rw-r--r--net/tls/tls_sw.c24
1 files changed, 19 insertions, 5 deletions
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 5f7d70b24be6..fe5735c57774 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -654,16 +654,25 @@ static struct sk_buff *tls_wait_data(struct sock *sk, int flags,
}
static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
- struct scatterlist *sgout)
+ struct scatterlist *sgout, bool *zc)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct strp_msg *rxm = strp_msg(skb);
int err = 0;
- err = decrypt_skb(sk, skb, sgout);
+#ifdef CONFIG_TLS_DEVICE
+ err = tls_device_decrypted(sk, skb);
if (err < 0)
return err;
+#endif
+ if (!ctx->decrypted) {
+ err = decrypt_skb(sk, skb, sgout);
+ if (err < 0)
+ return err;
+ } else {
+ *zc = false;
+ }
rxm->offset += tls_ctx->rx.prepend_size;
rxm->full_len -= tls_ctx->rx.overhead_size;
@@ -820,7 +829,7 @@ int tls_sw_recvmsg(struct sock *sk,
if (err < 0)
goto fallback_to_reg_recv;
- err = decrypt_skb_update(sk, skb, sgin);
+ err = decrypt_skb_update(sk, skb, sgin, &zc);
for (; pages > 0; pages--)
put_page(sg_page(&sgin[pages]));
if (err < 0) {
@@ -829,7 +838,7 @@ int tls_sw_recvmsg(struct sock *sk,
}
} else {
fallback_to_reg_recv:
- err = decrypt_skb_update(sk, skb, NULL);
+ err = decrypt_skb_update(sk, skb, NULL, &zc);
if (err < 0) {
tls_err_abort(sk, EBADMSG);
goto recv_end;
@@ -884,6 +893,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
int err = 0;
long timeo;
int chunk;
+ bool zc;
lock_sock(sk);
@@ -900,7 +910,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
}
if (!ctx->decrypted) {
- err = decrypt_skb_update(sk, skb, NULL);
+ err = decrypt_skb_update(sk, skb, NULL, &zc);
if (err < 0) {
tls_err_abort(sk, EBADMSG);
@@ -989,6 +999,10 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
goto read_failure;
}
+#ifdef CONFIG_TLS_DEVICE
+ handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset,
+ *(u64*)tls_ctx->rx.rec_seq);
+#endif
return data_len + TLS_HEADER_SIZE;
read_failure: