From d912bb7677f46d78a3cde8a4afd45a3fca4b34e9 Mon Sep 17 00:00:00 2001 From: Nikos Mavrogiannopoulos Date: Tue, 1 Nov 2011 13:39:56 +0100 Subject: crypto: Add CRYPTO_ALG_KERN_DRIVER_ONLY flag The added CRYPTO_ALG_KERN_DRIVER_ONLY indicates whether a cipher is only available via a kernel driver. If the cipher implementation might be available by using an instruction set or by porting the kernel code, then it must not be set. Signed-off-by: Nikos Mavrogiannopoulos Signed-off-by: Herbert Xu --- include/linux/crypto.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include') diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 8a94217b298e..a8fa6541b86c 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -75,6 +75,11 @@ */ #define CRYPTO_ALG_INSTANCE 0x00000800 +/* Set this bit if the algorithm provided is hardware accelerated but + * not available to userspace via instruction set or so. + */ +#define CRYPTO_ALG_KERN_DRIVER_ONLY 0x00001000 + /* * Transform masks and values (for crt_flags). */ -- cgit v1.2.3 From b85a088f15f2070b7180735a231012843a5ac96c Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Sat, 14 Jan 2012 21:44:49 +0300 Subject: crypto: sha512 - use standard ror64() Use standard ror64() instead of hand-written. There is no standard ror64, so create it. The difference is shift value being "unsigned int" instead of uint64_t (for which there is no reason). gcc starts to emit native ROR instructions which it doesn't do for some reason currently. This should make the code faster. Patch survives in-tree crypto test and ping flood with hmac(sha512) on. Signed-off-by: Alexey Dobriyan Signed-off-by: Herbert Xu --- include/linux/bitops.h | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'include') diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 3c1063acb2ab..94300fe46cce 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -55,6 +55,26 @@ static inline unsigned long hweight_long(unsigned long w) return sizeof(w) == 4 ? hweight32(w) : hweight64(w); } +/** + * rol64 - rotate a 64-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u64 rol64(__u64 word, unsigned int shift) +{ + return (word << shift) | (word >> (64 - shift)); +} + +/** + * ror64 - rotate a 64-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u64 ror64(__u64 word, unsigned int shift) +{ + return (word >> shift) | (word << (64 - shift)); +} + /** * rol32 - rotate a 32-bit value left * @word: value to rotate -- cgit v1.2.3 From 4b004346feab6b431f3e1f89ef692e3a4186fdfd Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 17 Jan 2012 23:34:26 +0000 Subject: crypto: Add bulk algorithm registration interface Hardware crypto engines frequently need to register a selection of different algorithms with the core. Simplify their code slightly, especially the error handling, by providing functions to register a number of algorithms in a single call. Signed-off-by: Mark Brown Signed-off-by: Herbert Xu --- include/linux/crypto.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/linux/crypto.h b/include/linux/crypto.h index a8fa6541b86c..48ce5479386c 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -314,6 +314,8 @@ struct crypto_alg { */ int crypto_register_alg(struct crypto_alg *alg); int crypto_unregister_alg(struct crypto_alg *alg); +int crypto_register_algs(struct crypto_alg *algs, int count); +int crypto_unregister_algs(struct crypto_alg *algs, int count); /* * Algorithm query interface. -- cgit v1.2.3 From 2dc9b5dbdef09840de852a4f0cc6a9c9eece7220 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Fri, 9 Mar 2012 07:20:49 +0100 Subject: padata: Fix race on sequence number wrap When padata_do_parallel() is called from multiple cpus for the same padata instance, we can get object reordering on sequence number wrap because testing for sequence number wrap and reseting the sequence number must happen atomically but is implemented with two atomic operations. This patch fixes this by converting the sequence number from atomic_t to an unsigned int and protect the access with a spin_lock. As a side effect, we get rid of the sequence number wrap handling because the seqence number wraps back to null now without the need to do anything. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- include/linux/padata.h | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/padata.h b/include/linux/padata.h index 4633b2f726b6..86292beebfe2 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -46,7 +46,6 @@ struct padata_priv { struct list_head list; struct parallel_data *pd; int cb_cpu; - int seq_nr; int info; void (*parallel)(struct padata_priv *padata); void (*serial)(struct padata_priv *padata); @@ -116,7 +115,6 @@ struct padata_cpumask { * @pinst: padata instance. * @pqueue: percpu padata queues used for parallelization. * @squeue: percpu padata queues used for serialuzation. - * @seq_nr: The sequence number that will be attached to the next object. * @reorder_objects: Number of objects waiting in the reorder queues. * @refcnt: Number of objects holding a reference on this parallel_data. * @max_seq_nr: Maximal used sequence number. @@ -129,12 +127,12 @@ struct parallel_data { struct padata_instance *pinst; struct padata_parallel_queue __percpu *pqueue; struct padata_serial_queue __percpu *squeue; - atomic_t seq_nr; atomic_t reorder_objects; atomic_t refcnt; - unsigned int max_seq_nr; struct padata_cpumask cpumask; spinlock_t lock ____cacheline_aligned; + spinlock_t seq_lock; + unsigned int seq_nr; unsigned int processed; struct timer_list timer; }; -- cgit v1.2.3