mirror of
				git://git.openwrt.org/openwrt/openwrt.git
				synced 2025-10-31 05:54:26 -04:00 
			
		
		
		
	
		
			
				
	
	
		
			19466 lines
		
	
	
		
			576 KiB
		
	
	
	
		
			Diff
		
	
	
	
	
	
			
		
		
	
	
			19466 lines
		
	
	
		
			576 KiB
		
	
	
	
		
			Diff
		
	
	
	
	
	
| --- a/crypto/Kconfig
 | |
| +++ b/crypto/Kconfig
 | |
| @@ -678,3 +678,6 @@
 | |
|  source "drivers/crypto/Kconfig"
 | |
|  
 | |
|  endif	# if CRYPTO
 | |
| +
 | |
| +source "crypto/ocf/Kconfig"
 | |
| +
 | |
| --- a/crypto/Makefile
 | |
| +++ b/crypto/Makefile
 | |
| @@ -72,6 +72,8 @@
 | |
|  obj-$(CONFIG_CRYPTO_PRNG) += prng.o
 | |
|  obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
 | |
|  
 | |
| +obj-$(CONFIG_OCF_OCF) += ocf/
 | |
| +
 | |
|  #
 | |
|  # generic algorithms and the async_tx api
 | |
|  #
 | |
| --- a/drivers/char/random.c
 | |
| +++ b/drivers/char/random.c
 | |
| @@ -129,6 +129,9 @@
 | |
|   *                                unsigned int value);
 | |
|   * 	void add_interrupt_randomness(int irq);
 | |
|   *
 | |
| + *      void random_input_words(__u32 *buf, size_t wordcount, int ent_count)
 | |
| + *      int random_input_wait(void);
 | |
| + *
 | |
|   * add_input_randomness() uses the input layer interrupt timing, as well as
 | |
|   * the event type information from the hardware.
 | |
|   *
 | |
| @@ -140,6 +143,13 @@
 | |
|   * a better measure, since the timing of the disk interrupts are more
 | |
|   * unpredictable.
 | |
|   *
 | |
| + * random_input_words() just provides a raw block of entropy to the input
 | |
| + * pool, such as from a hardware entropy generator.
 | |
| + *
 | |
| + * random_input_wait() suspends the caller until such time as the
 | |
| + * entropy pool falls below the write threshold, and returns a count of how
 | |
| + * much entropy (in bits) is needed to sustain the pool.
 | |
| + *
 | |
|   * All of these routines try to estimate how many bits of randomness a
 | |
|   * particular randomness source.  They do this by keeping track of the
 | |
|   * first and second order deltas of the event timings.
 | |
| @@ -666,6 +676,61 @@
 | |
|  }
 | |
|  #endif
 | |
|  
 | |
| +/*
 | |
| + * random_input_words - add bulk entropy to pool
 | |
| + *
 | |
| + * @buf: buffer to add
 | |
| + * @wordcount: number of __u32 words to add
 | |
| + * @ent_count: total amount of entropy (in bits) to credit
 | |
| + *
 | |
| + * this provides bulk input of entropy to the input pool
 | |
| + *
 | |
| + */
 | |
| +void random_input_words(__u32 *buf, size_t wordcount, int ent_count)
 | |
| +{
 | |
| +	mix_pool_bytes(&input_pool, buf, wordcount);
 | |
| +
 | |
| +	credit_entropy_bits(&input_pool, ent_count);
 | |
| +
 | |
| +	DEBUG_ENT("crediting %d bits => %d\n",
 | |
| +		  ent_count, input_pool.entropy_count);
 | |
| +	/*
 | |
| +	 * Wake up waiting processes if we have enough
 | |
| +	 * entropy.
 | |
| +	 */
 | |
| +	if (input_pool.entropy_count >= random_read_wakeup_thresh)
 | |
| +		wake_up_interruptible(&random_read_wait);
 | |
| +}
 | |
| +EXPORT_SYMBOL(random_input_words);
 | |
| +
 | |
| +/*
 | |
| + * random_input_wait - wait until random needs entropy
 | |
| + *
 | |
| + * this function sleeps until the /dev/random subsystem actually
 | |
| + * needs more entropy, and then return the amount of entropy
 | |
| + * that it would be nice to have added to the system.
 | |
| + */
 | |
| +int random_input_wait(void)
 | |
| +{
 | |
| +	int count;
 | |
| +
 | |
| +	wait_event_interruptible(random_write_wait,
 | |
| +			 input_pool.entropy_count < random_write_wakeup_thresh);
 | |
| +
 | |
| +	count = random_write_wakeup_thresh - input_pool.entropy_count;
 | |
| +
 | |
| +        /* likely we got woken up due to a signal */
 | |
| +	if (count <= 0) count = random_read_wakeup_thresh;
 | |
| +
 | |
| +	DEBUG_ENT("requesting %d bits from input_wait()er %d<%d\n",
 | |
| +		  count,
 | |
| +		  input_pool.entropy_count, random_write_wakeup_thresh);
 | |
| +
 | |
| +	return count;
 | |
| +}
 | |
| +EXPORT_SYMBOL(random_input_wait);
 | |
| +
 | |
| +
 | |
|  #define EXTRACT_SIZE 10
 | |
|  
 | |
|  /*********************************************************************
 | |
| --- a/fs/fcntl.c
 | |
| +++ b/fs/fcntl.c
 | |
| @@ -191,6 +191,7 @@
 | |
|  		ret = dupfd(file, 0, 0);
 | |
|  	return ret;
 | |
|  }
 | |
| +EXPORT_SYMBOL(sys_dup);
 | |
|  
 | |
|  #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | FASYNC | O_DIRECT | O_NOATIME)
 | |
|  
 | |
| --- a/include/linux/miscdevice.h
 | |
| +++ b/include/linux/miscdevice.h
 | |
| @@ -12,6 +12,7 @@
 | |
|  #define APOLLO_MOUSE_MINOR 7
 | |
|  #define PC110PAD_MINOR 9
 | |
|  /*#define ADB_MOUSE_MINOR 10	FIXME OBSOLETE */
 | |
| +#define CRYPTODEV_MINOR		70	/* /dev/crypto */
 | |
|  #define WATCHDOG_MINOR		130	/* Watchdog timer     */
 | |
|  #define TEMP_MINOR		131	/* Temperature Sensor */
 | |
|  #define RTC_MINOR 135
 | |
| --- a/include/linux/random.h
 | |
| +++ b/include/linux/random.h
 | |
| @@ -8,6 +8,7 @@
 | |
|  #define _LINUX_RANDOM_H
 | |
|  
 | |
|  #include <linux/ioctl.h>
 | |
| +#include <linux/types.h> /* for __u32 in user space */
 | |
|  
 | |
|  /* ioctl()'s for the random number generator */
 | |
|  
 | |
| @@ -32,6 +33,30 @@
 | |
|  /* Clear the entropy pool and associated counters.  (Superuser only.) */
 | |
|  #define RNDCLEARPOOL	_IO( 'R', 0x06 )
 | |
|  
 | |
| +#ifdef CONFIG_FIPS_RNG
 | |
| +
 | |
| +/* Size of seed value - equal to AES blocksize */
 | |
| +#define AES_BLOCK_SIZE_BYTES	16
 | |
| +#define SEED_SIZE_BYTES			AES_BLOCK_SIZE_BYTES
 | |
| +/* Size of AES key */
 | |
| +#define KEY_SIZE_BYTES		16
 | |
| +
 | |
| +/* ioctl() structure used by FIPS 140-2 Tests */
 | |
| +struct rand_fips_test {
 | |
| +	unsigned char key[KEY_SIZE_BYTES];			/* Input */
 | |
| +	unsigned char datetime[SEED_SIZE_BYTES];	/* Input */
 | |
| +	unsigned char seed[SEED_SIZE_BYTES];		/* Input */
 | |
| +	unsigned char result[SEED_SIZE_BYTES];		/* Output */
 | |
| +};
 | |
| +
 | |
| +/* FIPS 140-2 RNG Variable Seed Test. (Superuser only.) */
 | |
| +#define RNDFIPSVST	_IOWR('R', 0x10, struct rand_fips_test)
 | |
| +
 | |
| +/* FIPS 140-2 RNG Monte Carlo Test. (Superuser only.) */
 | |
| +#define RNDFIPSMCT	_IOWR('R', 0x11, struct rand_fips_test)
 | |
| +
 | |
| +#endif /* #ifdef CONFIG_FIPS_RNG */
 | |
| +
 | |
|  struct rand_pool_info {
 | |
|  	int	entropy_count;
 | |
|  	int	buf_size;
 | |
| @@ -48,6 +73,10 @@
 | |
|  				 unsigned int value);
 | |
|  extern void add_interrupt_randomness(int irq);
 | |
|  
 | |
| +extern void random_input_words(__u32 *buf, size_t wordcount, int ent_count);
 | |
| +extern int random_input_wait(void);
 | |
| +#define HAS_RANDOM_INPUT_WAIT 1
 | |
| +
 | |
|  extern void get_random_bytes(void *buf, int nbytes);
 | |
|  void generate_random_uuid(unsigned char uuid_out[16]);
 | |
|  
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/hifn/Makefile
 | |
| @@ -0,0 +1,13 @@
 | |
| +# for SGlinux builds
 | |
| +-include $(ROOTDIR)/modules/.config
 | |
| +
 | |
| +obj-$(CONFIG_OCF_HIFN)     += hifn7751.o
 | |
| +obj-$(CONFIG_OCF_HIFNHIPP) += hifnHIPP.o
 | |
| +
 | |
| +obj ?= .
 | |
| +EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
 | |
| +
 | |
| +ifdef TOPDIR
 | |
| +-include $(TOPDIR)/Rules.make
 | |
| +endif
 | |
| +
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/safe/Makefile
 | |
| @@ -0,0 +1,12 @@
 | |
| +# for SGlinux builds
 | |
| +-include $(ROOTDIR)/modules/.config
 | |
| +
 | |
| +obj-$(CONFIG_OCF_SAFE) += safe.o
 | |
| +
 | |
| +obj ?= .
 | |
| +EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
 | |
| +
 | |
| +ifdef TOPDIR
 | |
| +-include $(TOPDIR)/Rules.make
 | |
| +endif
 | |
| +
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/Makefile
 | |
| @@ -0,0 +1,120 @@
 | |
| +# for SGlinux builds
 | |
| +-include $(ROOTDIR)/modules/.config
 | |
| +
 | |
| +OCF_OBJS = crypto.o criov.o
 | |
| +
 | |
| +ifdef CONFIG_OCF_RANDOMHARVEST
 | |
| +	OCF_OBJS += random.o
 | |
| +endif
 | |
| +
 | |
| +ifdef CONFIG_OCF_FIPS
 | |
| +	OCF_OBJS += rndtest.o
 | |
| +endif
 | |
| +
 | |
| +# Add in autoconf.h to get #defines for CONFIG_xxx
 | |
| +AUTOCONF_H=$(ROOTDIR)/modules/autoconf.h
 | |
| +ifeq ($(AUTOCONF_H), $(wildcard $(AUTOCONF_H)))
 | |
| +	EXTRA_CFLAGS += -include $(AUTOCONF_H)
 | |
| +	export EXTRA_CFLAGS
 | |
| +endif
 | |
| +
 | |
| +ifndef obj
 | |
| +	obj ?= .
 | |
| +	_obj = subdir
 | |
| +	mod-subdirs := safe hifn ixp4xx talitos ocfnull
 | |
| +	export-objs += crypto.o criov.o random.o
 | |
| +	list-multi += ocf.o
 | |
| +	_slash :=
 | |
| +else
 | |
| +	_obj = obj
 | |
| +	_slash := /
 | |
| +endif
 | |
| +
 | |
| +EXTRA_CFLAGS += -I$(obj)/.
 | |
| +
 | |
| +obj-$(CONFIG_OCF_OCF)         += ocf.o
 | |
| +obj-$(CONFIG_OCF_CRYPTODEV)   += cryptodev.o
 | |
| +obj-$(CONFIG_OCF_CRYPTOSOFT)  += cryptosoft.o
 | |
| +obj-$(CONFIG_OCF_BENCH)       += ocf-bench.o
 | |
| +
 | |
| +$(_obj)-$(CONFIG_OCF_SAFE)    += safe$(_slash)
 | |
| +$(_obj)-$(CONFIG_OCF_HIFN)    += hifn$(_slash)
 | |
| +$(_obj)-$(CONFIG_OCF_IXP4XX)  += ixp4xx$(_slash)
 | |
| +$(_obj)-$(CONFIG_OCF_TALITOS) += talitos$(_slash)
 | |
| +$(_obj)-$(CONFIG_OCF_PASEMI)  += pasemi$(_slash)
 | |
| +$(_obj)-$(CONFIG_OCF_OCFNULL) += ocfnull$(_slash)
 | |
| +
 | |
| +ocf-objs := $(OCF_OBJS)
 | |
| +
 | |
| +$(list-multi) dummy1: $(ocf-objs)
 | |
| +	$(LD) -r -o $@ $(ocf-objs)
 | |
| +
 | |
| +.PHONY:
 | |
| +clean:
 | |
| +	rm -f *.o *.ko .*.o.flags .*.ko.cmd .*.o.cmd .*.mod.o.cmd *.mod.c
 | |
| +	rm -f */*.o */*.ko */.*.o.cmd */.*.ko.cmd */.*.mod.o.cmd */*.mod.c */.*.o.flags
 | |
| +
 | |
| +ifdef TOPDIR
 | |
| +-include $(TOPDIR)/Rules.make
 | |
| +endif
 | |
| +
 | |
| +#
 | |
| +# release gen targets
 | |
| +#
 | |
| +
 | |
| +.PHONY: patch
 | |
| +patch:
 | |
| +	REL=`date +%Y%m%d`; \
 | |
| +		patch=ocf-linux-$$REL.patch; \
 | |
| +		patch24=ocf-linux-24-$$REL.patch; \
 | |
| +		patch26=ocf-linux-26-$$REL.patch; \
 | |
| +		( \
 | |
| +			find . -name Makefile; \
 | |
| +			find . -name Config.in; \
 | |
| +			find . -name Kconfig; \
 | |
| +			find . -name README; \
 | |
| +			find . -name '*.[ch]' | grep -v '.mod.c'; \
 | |
| +		) | while read t; do \
 | |
| +			diff -Nau /dev/null $$t | sed 's?^+++ \./?+++ linux/crypto/ocf/?'; \
 | |
| +		done > $$patch; \
 | |
| +		cat patches/linux-2.4.35-ocf.patch $$patch > $$patch24; \
 | |
| +		cat patches/linux-2.6.25-ocf.patch $$patch > $$patch26
 | |
| +
 | |
| +.PHONY: tarball
 | |
| +tarball:
 | |
| +	REL=`date +%Y%m%d`; RELDIR=/tmp/ocf-linux-$$REL; \
 | |
| +		CURDIR=`pwd`; \
 | |
| +		rm -rf /tmp/ocf-linux-$$REL*; \
 | |
| +		mkdir -p $$RELDIR/tools; \
 | |
| +		cp README* $$RELDIR; \
 | |
| +		cp patches/openss*.patch $$RELDIR; \
 | |
| +		cp patches/crypto-tools.patch $$RELDIR; \
 | |
| +		cp tools/[!C]* $$RELDIR/tools; \
 | |
| +		cd ..; \
 | |
| +		tar cvf $$RELDIR/ocf-linux.tar \
 | |
| +					--exclude=CVS \
 | |
| +					--exclude=.* \
 | |
| +					--exclude=*.o \
 | |
| +					--exclude=*.ko \
 | |
| +					--exclude=*.mod.* \
 | |
| +					--exclude=README* \
 | |
| +					--exclude=ocf-*.patch \
 | |
| +					--exclude=ocf/patches/openss*.patch \
 | |
| +					--exclude=ocf/patches/crypto-tools.patch \
 | |
| +					--exclude=ocf/tools \
 | |
| +					ocf; \
 | |
| +		gzip -9 $$RELDIR/ocf-linux.tar; \
 | |
| +		cd /tmp; \
 | |
| +		tar cvf ocf-linux-$$REL.tar ocf-linux-$$REL; \
 | |
| +		gzip -9 ocf-linux-$$REL.tar; \
 | |
| +		cd $$CURDIR/../../user; \
 | |
| +		rm -rf /tmp/crypto-tools-$$REL*; \
 | |
| +		tar cvf /tmp/crypto-tools-$$REL.tar \
 | |
| +					--exclude=CVS \
 | |
| +					--exclude=.* \
 | |
| +					--exclude=*.o \
 | |
| +					--exclude=cryptotest \
 | |
| +					--exclude=cryptokeytest \
 | |
| +					crypto-tools; \
 | |
| +		gzip -9 /tmp/crypto-tools-$$REL.tar
 | |
| +
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/talitos/Makefile
 | |
| @@ -0,0 +1,12 @@
 | |
| +# for SGlinux builds
 | |
| +-include $(ROOTDIR)/modules/.config
 | |
| +
 | |
| +obj-$(CONFIG_OCF_TALITOS) += talitos.o
 | |
| +
 | |
| +obj ?= .
 | |
| +EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
 | |
| +
 | |
| +ifdef TOPDIR
 | |
| +-include $(TOPDIR)/Rules.make
 | |
| +endif
 | |
| +
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/ixp4xx/Makefile
 | |
| @@ -0,0 +1,104 @@
 | |
| +# for SGlinux builds
 | |
| +-include $(ROOTDIR)/modules/.config
 | |
| +
 | |
| +#
 | |
| +# You will need to point this at your Intel ixp425 includes,  this portion
 | |
| +# of the Makefile only really works under SGLinux with the appropriate libs
 | |
| +# installed.  They can be downloaded from http://www.snapgear.org/
 | |
| +#
 | |
| +ifeq ($(CONFIG_CPU_IXP46X),y)
 | |
| +IXPLATFORM = ixp46X
 | |
| +else
 | |
| +ifeq ($(CONFIG_CPU_IXP43X),y)
 | |
| +IXPLATFORM = ixp43X
 | |
| +else
 | |
| +IXPLATFORM = ixp42X
 | |
| +endif
 | |
| +endif
 | |
| +
 | |
| +ifdef CONFIG_IXP400_LIB_2_4
 | |
| +IX_XSCALE_SW = $(ROOTDIR)/modules/ixp425/ixp400-2.4/ixp400_xscale_sw
 | |
| +OSAL_DIR     = $(ROOTDIR)/modules/ixp425/ixp400-2.4/ixp_osal
 | |
| +endif
 | |
| +ifdef CONFIG_IXP400_LIB_2_1
 | |
| +IX_XSCALE_SW = $(ROOTDIR)/modules/ixp425/ixp400-2.1/ixp400_xscale_sw
 | |
| +OSAL_DIR     = $(ROOTDIR)/modules/ixp425/ixp400-2.1/ixp_osal
 | |
| +endif
 | |
| +ifdef CONFIG_IXP400_LIB_2_0
 | |
| +IX_XSCALE_SW = $(ROOTDIR)/modules/ixp425/ixp400-2.0/ixp400_xscale_sw
 | |
| +OSAL_DIR     = $(ROOTDIR)/modules/ixp425/ixp400-2.0/ixp_osal
 | |
| +endif
 | |
| +ifdef IX_XSCALE_SW
 | |
| +ifdef CONFIG_IXP400_LIB_2_4
 | |
| +IXP_CFLAGS = \
 | |
| +	-I$(ROOTDIR)/. \
 | |
| +	-I$(IX_XSCALE_SW)/src/include \
 | |
| +	-I$(OSAL_DIR)/common/include/ \
 | |
| +	-I$(OSAL_DIR)/common/include/modules/ \
 | |
| +	-I$(OSAL_DIR)/common/include/modules/ddk/ \
 | |
| +	-I$(OSAL_DIR)/common/include/modules/bufferMgt/ \
 | |
| +	-I$(OSAL_DIR)/common/include/modules/ioMem/ \
 | |
| +	-I$(OSAL_DIR)/common/os/linux/include/ \
 | |
| +	-I$(OSAL_DIR)/common/os/linux/include/core/  \
 | |
| +	-I$(OSAL_DIR)/common/os/linux/include/modules/ \
 | |
| +	-I$(OSAL_DIR)/common/os/linux/include/modules/ddk/ \
 | |
| +	-I$(OSAL_DIR)/common/os/linux/include/modules/bufferMgt/ \
 | |
| +	-I$(OSAL_DIR)/common/os/linux/include/modules/ioMem/ \
 | |
| +	-I$(OSAL_DIR)/platforms/$(IXPLATFORM)/include/ \
 | |
| +	-I$(OSAL_DIR)/platforms/$(IXPLATFORM)/os/linux/include/ \
 | |
| +	-DENABLE_IOMEM -DENABLE_BUFFERMGT -DENABLE_DDK \
 | |
| +	-DUSE_IXP4XX_CRYPTO
 | |
| +else
 | |
| +IXP_CFLAGS = \
 | |
| +	-I$(ROOTDIR)/. \
 | |
| +	-I$(IX_XSCALE_SW)/src/include \
 | |
| +	-I$(OSAL_DIR)/ \
 | |
| +	-I$(OSAL_DIR)/os/linux/include/ \
 | |
| +	-I$(OSAL_DIR)/os/linux/include/modules/ \
 | |
| +	-I$(OSAL_DIR)/os/linux/include/modules/ioMem/ \
 | |
| +	-I$(OSAL_DIR)/os/linux/include/modules/bufferMgt/ \
 | |
| +	-I$(OSAL_DIR)/os/linux/include/core/  \
 | |
| +	-I$(OSAL_DIR)/os/linux/include/platforms/ \
 | |
| +	-I$(OSAL_DIR)/os/linux/include/platforms/ixp400/ \
 | |
| +	-I$(OSAL_DIR)/os/linux/include/platforms/ixp400/ixp425 \
 | |
| +	-I$(OSAL_DIR)/os/linux/include/platforms/ixp400/ixp465 \
 | |
| +	-I$(OSAL_DIR)/os/linux/include/core/ \
 | |
| +	-I$(OSAL_DIR)/include/ \
 | |
| +	-I$(OSAL_DIR)/include/modules/ \
 | |
| +	-I$(OSAL_DIR)/include/modules/bufferMgt/ \
 | |
| +	-I$(OSAL_DIR)/include/modules/ioMem/ \
 | |
| +	-I$(OSAL_DIR)/include/platforms/ \
 | |
| +	-I$(OSAL_DIR)/include/platforms/ixp400/ \
 | |
| +	-DUSE_IXP4XX_CRYPTO
 | |
| +endif
 | |
| +endif
 | |
| +ifdef CONFIG_IXP400_LIB_1_4
 | |
| +IXP_CFLAGS   = \
 | |
| +	-I$(ROOTDIR)/. \
 | |
| +	-I$(ROOTDIR)/modules/ixp425/ixp400-1.4/ixp400_xscale_sw/src/include \
 | |
| +	-I$(ROOTDIR)/modules/ixp425/ixp400-1.4/ixp400_xscale_sw/src/linux \
 | |
| +	-DUSE_IXP4XX_CRYPTO
 | |
| +endif
 | |
| +ifndef IXPDIR
 | |
| +IXPDIR = ixp-version-is-not-supported
 | |
| +endif
 | |
| +
 | |
| +ifeq ($(CONFIG_CPU_IXP46X),y)
 | |
| +IXP_CFLAGS += -D__ixp46X
 | |
| +else
 | |
| +ifeq ($(CONFIG_CPU_IXP43X),y)
 | |
| +IXP_CFLAGS += -D__ixp43X
 | |
| +else
 | |
| +IXP_CFLAGS += -D__ixp42X
 | |
| +endif
 | |
| +endif
 | |
| +
 | |
| +obj-$(CONFIG_OCF_IXP4XX) += ixp4xx.o
 | |
| +
 | |
| +obj ?= .
 | |
| +EXTRA_CFLAGS += $(IXP_CFLAGS) -I$(obj)/.. -I$(obj)/.
 | |
| +
 | |
| +ifdef TOPDIR
 | |
| +-include $(TOPDIR)/Rules.make
 | |
| +endif
 | |
| +
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/ocfnull/Makefile
 | |
| @@ -0,0 +1,12 @@
 | |
| +# for SGlinux builds
 | |
| +-include $(ROOTDIR)/modules/.config
 | |
| +
 | |
| +obj-$(CONFIG_OCF_OCFNULL) += ocfnull.o
 | |
| +
 | |
| +obj ?= .
 | |
| +EXTRA_CFLAGS += -I$(obj)/..
 | |
| +
 | |
| +ifdef TOPDIR
 | |
| +-include $(TOPDIR)/Rules.make
 | |
| +endif
 | |
| +
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/pasemi/Makefile
 | |
| @@ -0,0 +1,12 @@
 | |
| +# for SGlinux builds
 | |
| +-include $(ROOTDIR)/modules/.config
 | |
| +
 | |
| +obj-$(CONFIG_OCF_PASEMI) += pasemi.o
 | |
| +
 | |
| +obj ?= .
 | |
| +EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
 | |
| +
 | |
| +ifdef TOPDIR
 | |
| +-include $(TOPDIR)/Rules.make
 | |
| +endif
 | |
| +
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/Config.in
 | |
| @@ -0,0 +1,32 @@
 | |
| +#############################################################################
 | |
| +
 | |
| +mainmenu_option next_comment
 | |
| +comment 'OCF Configuration'
 | |
| +tristate 'OCF (Open Cryptograhic Framework)' CONFIG_OCF_OCF
 | |
| +dep_mbool '  enable fips RNG checks (fips check on RNG data before use)' \
 | |
| +				CONFIG_OCF_FIPS $CONFIG_OCF_OCF
 | |
| +dep_mbool '  enable harvesting entropy for /dev/random' \
 | |
| +				CONFIG_OCF_RANDOMHARVEST $CONFIG_OCF_OCF
 | |
| +dep_tristate '  cryptodev (user space support)' \
 | |
| +				CONFIG_OCF_CRYPTODEV $CONFIG_OCF_OCF
 | |
| +dep_tristate '  cryptosoft (software crypto engine)' \
 | |
| +				CONFIG_OCF_CRYPTOSOFT $CONFIG_OCF_OCF
 | |
| +dep_tristate '  safenet (HW crypto engine)' \
 | |
| +				CONFIG_OCF_SAFE $CONFIG_OCF_OCF
 | |
| +dep_tristate '  IXP4xx (HW crypto engine)' \
 | |
| +				CONFIG_OCF_IXP4XX $CONFIG_OCF_OCF
 | |
| +dep_mbool    '  Enable IXP4xx HW to perform SHA1 and MD5 hashing (very slow)' \
 | |
| +				CONFIG_OCF_IXP4XX_SHA1_MD5 $CONFIG_OCF_IXP4XX
 | |
| +dep_tristate '  hifn (HW crypto engine)' \
 | |
| +				CONFIG_OCF_HIFN $CONFIG_OCF_OCF
 | |
| +dep_tristate '  talitos (HW crypto engine)' \
 | |
| +				CONFIG_OCF_TALITOS $CONFIG_OCF_OCF
 | |
| +dep_tristate '  pasemi (HW crypto engine)' \
 | |
| +				CONFIG_OCF_PASEMI $CONFIG_OCF_OCF
 | |
| +dep_tristate '  ocfnull (does no crypto)' \
 | |
| +				CONFIG_OCF_OCFNULL $CONFIG_OCF_OCF
 | |
| +dep_tristate '  ocf-bench (HW crypto in-kernel benchmark)' \
 | |
| +				CONFIG_OCF_BENCH $CONFIG_OCF_OCF
 | |
| +endmenu
 | |
| +
 | |
| +#############################################################################
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/Kconfig
 | |
| @@ -0,0 +1,95 @@
 | |
| +menu "OCF Configuration"
 | |
| +
 | |
| +config OCF_OCF
 | |
| +	tristate "OCF (Open Cryptograhic Framework)"
 | |
| +	help
 | |
| +	  A linux port of the OpenBSD/FreeBSD crypto framework.
 | |
| +
 | |
| +config OCF_RANDOMHARVEST
 | |
| +	bool "crypto random --- harvest entropy for /dev/random"
 | |
| +	depends on OCF_OCF
 | |
| +	help
 | |
| +	  Includes code to harvest random numbers from devices that support it.
 | |
| +
 | |
| +config OCF_FIPS
 | |
| +	bool "enable fips RNG checks"
 | |
| +	depends on OCF_OCF && OCF_RANDOMHARVEST
 | |
| +	help
 | |
| +	  Run all RNG provided data through a fips check before
 | |
| +	  adding it /dev/random's entropy pool.
 | |
| +
 | |
| +config OCF_CRYPTODEV
 | |
| +	tristate "cryptodev (user space support)"
 | |
| +	depends on OCF_OCF
 | |
| +	help
 | |
| +	  The user space API to access crypto hardware.
 | |
| +
 | |
| +config OCF_CRYPTOSOFT
 | |
| +	tristate "cryptosoft (software crypto engine)"
 | |
| +	depends on OCF_OCF
 | |
| +	help
 | |
| +	  A software driver for the OCF framework that uses
 | |
| +	  the kernel CryptoAPI.
 | |
| +
 | |
| +config OCF_SAFE
 | |
| +	tristate "safenet (HW crypto engine)"
 | |
| +	depends on OCF_OCF
 | |
| +	help
 | |
| +	  A driver for a number of the safenet Excel crypto accelerators.
 | |
| +	  Currently tested and working on the 1141 and 1741.
 | |
| +
 | |
| +config OCF_IXP4XX
 | |
| +	tristate "IXP4xx (HW crypto engine)"
 | |
| +	depends on OCF_OCF
 | |
| +	help
 | |
| +	  XScale IXP4xx crypto accelerator driver.  Requires the
 | |
| +	  Intel Access library.
 | |
| +
 | |
| +config OCF_IXP4XX_SHA1_MD5
 | |
| +	bool "IXP4xx SHA1 and MD5 Hashing"
 | |
| +	depends on OCF_IXP4XX
 | |
| +	help
 | |
| +	  Allows the IXP4xx crypto accelerator to perform SHA1 and MD5 hashing.
 | |
| +	  Note: this is MUCH slower than using cryptosoft (software crypto engine).
 | |
| +
 | |
| +config OCF_HIFN
 | |
| +	tristate "hifn (HW crypto engine)"
 | |
| +	depends on OCF_OCF
 | |
| +	help
 | |
| +	  OCF driver for various HIFN based crypto accelerators.
 | |
| +	  (7951, 7955, 7956, 7751, 7811)
 | |
| +
 | |
| +config OCF_HIFNHIPP
 | |
| +	tristate "Hifn HIPP (HW packet crypto engine)"
 | |
| +	depends on OCF_OCF
 | |
| +	help
 | |
| +	  OCF driver for various HIFN (HIPP) based crypto accelerators
 | |
| +	  (7855)
 | |
| +
 | |
| +config OCF_TALITOS
 | |
| +	tristate "talitos (HW crypto engine)"
 | |
| +	depends on OCF_OCF
 | |
| +	help
 | |
| +	  OCF driver for Freescale's security engine (SEC/talitos).
 | |
| +
 | |
| +config OCF_PASEMI
 | |
| +        tristate "pasemi (HW crypto engine)"
 | |
| +        depends on OCF_OCF && PPC_PASEMI
 | |
| +        help
 | |
| +          OCF driver for for PA Semi PWRficient DMA Engine
 | |
| +
 | |
| +config OCF_OCFNULL
 | |
| +	tristate "ocfnull (fake crypto engine)"
 | |
| +	depends on OCF_OCF
 | |
| +	help
 | |
| +	  OCF driver for measuring ipsec overheads (does no crypto)
 | |
| +
 | |
| +config OCF_BENCH
 | |
| +	tristate "ocf-bench (HW crypto in-kernel benchmark)"
 | |
| +	depends on OCF_OCF
 | |
| +	help
 | |
| +	  A very simple encryption test for the in-kernel interface
 | |
| +	  of OCF.  Also includes code to benchmark the IXP Access library
 | |
| +	  for comparison.
 | |
| +
 | |
| +endmenu
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/README
 | |
| @@ -0,0 +1,166 @@
 | |
| +README - ocf-linux-20071215
 | |
| +---------------------------
 | |
| +
 | |
| +This README provides instructions for getting ocf-linux compiled and
 | |
| +operating in a generic linux environment.  For other information you
 | |
| +might like to visit the home page for this project:
 | |
| +
 | |
| +    http://ocf-linux.sourceforge.net/
 | |
| +
 | |
| +Adding OCF to linux
 | |
| +-------------------
 | |
| +
 | |
| +    Not much in this file for now,  just some notes.  I usually build
 | |
| +    the ocf support as modules but it can be built into the kernel as
 | |
| +    well.  To use it:
 | |
| +
 | |
| +    * mknod /dev/crypto c 10 70
 | |
| +
 | |
| +    * to add OCF to your kernel source,  you have two options.  Apply
 | |
| +      the kernel specific patch:
 | |
| +
 | |
| +          cd linux-2.4*; gunzip < ocf-linux-24-XXXXXXXX.patch.gz | patch -p1
 | |
| +          cd linux-2.6*; gunzip < ocf-linux-26-XXXXXXXX.patch.gz | patch -p1
 | |
| +
 | |
| +      if you do one of the above,  then you can proceed to the next step,
 | |
| +      or you can do the above process by hand with using the patches against
 | |
| +      linux-2.4.35 and 2.6.23 to include the ocf code under crypto/ocf.
 | |
| +      Here's how to add it:
 | |
| +
 | |
| +      for 2.4.35 (and later)
 | |
| +
 | |
| +          cd linux-2.4.35/crypto
 | |
| +          tar xvzf ocf-linux.tar.gz
 | |
| +          cd ..
 | |
| +          patch -p1 < crypto/ocf/patches/linux-2.4.35-ocf.patch
 | |
| +
 | |
| +      for 2.6.23 (and later)
 | |
| +
 | |
| +          cd linux-2.6.23/crypto
 | |
| +          tar xvzf ocf-linux.tar.gz
 | |
| +          cd ..
 | |
| +          patch -p1 < crypto/ocf/patches/linux-2.6.23-ocf.patch
 | |
| +
 | |
| +      It should be easy to take this patch and apply it to other more
 | |
| +      recent versions of the kernels.  The same patches should also work
 | |
| +      relatively easily on kernels as old as 2.6.11 and 2.4.18.
 | |
| +
 | |
| +    * under 2.4 if you are on a non-x86 platform,  you may need to:
 | |
| +
 | |
| +        cp linux-2.X.x/include/asm-i386/kmap_types.h linux-2.X.x/include/asm-YYY
 | |
| +
 | |
| +      so that you can build the kernel crypto support needed for the cryptosoft
 | |
| +      driver.
 | |
| +
 | |
| +    * For simplicity you should enable all the crypto support in your kernel
 | |
| +      except for the test driver.  Likewise for the OCF options.  Do not
 | |
| +      enable OCF crypto drivers for HW that you do not have (for example
 | |
| +      ixp4xx will not compile on non-Xscale systems).
 | |
| +
 | |
| +    * make sure that cryptodev.h (from ocf-linux.tar.gz) is installed as
 | |
| +      crypto/cryptodev.h in an include directory that is used for building
 | |
| +      applications for your platform.  For example on a host system that
 | |
| +      might be:
 | |
| +
 | |
| +              /usr/include/crypto/cryptodev.h
 | |
| +
 | |
| +    * patch your openssl-0.9.8g code with the openssl-0.9.8g.patch.
 | |
| +      (NOTE: there is no longer a need to patch ssh). The patch is against:
 | |
| +      openssl-0_9_8e
 | |
| +
 | |
| +      If you need a patch for an older version of openssl,  you should look
 | |
| +      to older OCF releases.  This patch is unlikely to work on older
 | |
| +      openssl versions.
 | |
| +
 | |
| +      openssl-0.9.8g.patch
 | |
| +                - enables --with-cryptodev for non BSD systems
 | |
| +                - adds -cpu option to openssl speed for calculating CPU load
 | |
| +                  under linux
 | |
| +                - fixes null pointer in openssl speed multi thread output.
 | |
| +                - fixes test keys to work with linux crypto's more stringent
 | |
| +                  key checking.
 | |
| +                - adds MD5/SHA acceleration (Ronen Shitrit), only enabled
 | |
| +                  with the --with-cryptodev-digests option
 | |
| +                - fixes bug in engine code caching.
 | |
| +
 | |
| +    * build crypto-tools-XXXXXXXX.tar.gz if you want to try some of the BSD
 | |
| +      tools for testing OCF (ie., cryptotest).
 | |
| +
 | |
| +How to load the OCF drivers
 | |
| +---------------------------
 | |
| +
 | |
| +    First insert the base modules:
 | |
| +
 | |
| +        insmod ocf
 | |
| +        insmod cryptodev
 | |
| +
 | |
| +    You can then install the software OCF driver with:
 | |
| +
 | |
| +        insmod cryptosoft
 | |
| +
 | |
| +    and one or more of the OCF HW drivers with:
 | |
| +
 | |
| +        insmod safe
 | |
| +        insmod hifn7751
 | |
| +        insmod ixp4xx
 | |
| +        ...
 | |
| +
 | |
| +    all the drivers take a debug option to enable verbose debug so that
 | |
| +    you can see what is going on.  For debug you load them as:
 | |
| +
 | |
| +        insmod ocf crypto_debug=1
 | |
| +        insmod cryptodev cryptodev_debug=1
 | |
| +        insmod cryptosoft swcr_debug=1
 | |
| +
 | |
| +    You may load more than one OCF crypto driver but then there is no guarantee
 | |
| +    as to which will be used.
 | |
| +
 | |
| +    You can also enable debug at run time on 2.6 systems with the following:
 | |
| +
 | |
| +        echo 1 > /sys/module/ocf/parameters/crypto_debug
 | |
| +        echo 1 > /sys/module/cryptodev/parameters/cryptodev_debug
 | |
| +        echo 1 > /sys/module/cryptosoft/parameters/swcr_debug
 | |
| +        echo 1 > /sys/module/hifn7751/parameters/hifn_debug
 | |
| +        echo 1 > /sys/module/safe/parameters/safe_debug
 | |
| +        echo 1 > /sys/module/ixp4xx/parameters/ixp_debug
 | |
| +        ...
 | |
| +
 | |
| +Testing the OCF support
 | |
| +-----------------------
 | |
| +
 | |
| +    run "cryptotest",  it should do a short test for a couple of
 | |
| +    des packets.  If it does everything is working.
 | |
| +
 | |
| +    If this works,  then ssh will use the driver when invoked as:
 | |
| +
 | |
| +        ssh -c 3des username@host
 | |
| +
 | |
| +    to see for sure that it is operating, enable debug as defined above.
 | |
| +
 | |
| +    To get a better idea of performance run:
 | |
| +
 | |
| +        cryptotest 100 4096
 | |
| +
 | |
| +    There are more options to cryptotest,  see the help.
 | |
| +
 | |
| +    It is also possible to use openssl to test the speed of the crypto
 | |
| +    drivers.
 | |
| +
 | |
| +        openssl speed -evp des -engine cryptodev -elapsed
 | |
| +        openssl speed -evp des3 -engine cryptodev -elapsed
 | |
| +        openssl speed -evp aes128 -engine cryptodev -elapsed
 | |
| +
 | |
| +    and multiple threads (10) with:
 | |
| +
 | |
| +        openssl speed -evp des -engine cryptodev -elapsed -multi 10
 | |
| +        openssl speed -evp des3 -engine cryptodev -elapsed -multi 10
 | |
| +        openssl speed -evp aes128 -engine cryptodev -elapsed -multi 10
 | |
| +
 | |
| +    for public key testing you can try:
 | |
| +
 | |
| +        cryptokeytest
 | |
| +        openssl speed -engine cryptodev rsa -elapsed
 | |
| +        openssl speed -engine cryptodev dsa -elapsed
 | |
| +
 | |
| +David McCullough
 | |
| +david_mccullough@securecomputing.com
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/hifn/hifn7751reg.h
 | |
| @@ -0,0 +1,540 @@
 | |
| +/* $FreeBSD: src/sys/dev/hifn/hifn7751reg.h,v 1.7 2007/03/21 03:42:49 sam Exp $ */
 | |
| +/*	$OpenBSD: hifn7751reg.h,v 1.35 2002/04/08 17:49:42 jason Exp $	*/
 | |
| +
 | |
| +/*-
 | |
| + * Invertex AEON / Hifn 7751 driver
 | |
| + * Copyright (c) 1999 Invertex Inc. All rights reserved.
 | |
| + * Copyright (c) 1999 Theo de Raadt
 | |
| + * Copyright (c) 2000-2001 Network Security Technologies, Inc.
 | |
| + *			http://www.netsec.net
 | |
| + *
 | |
| + * Please send any comments, feedback, bug-fixes, or feature requests to
 | |
| + * software@invertex.com.
 | |
| + *
 | |
| + * Redistribution and use in source and binary forms, with or without
 | |
| + * modification, are permitted provided that the following conditions
 | |
| + * are met:
 | |
| + *
 | |
| + * 1. Redistributions of source code must retain the above copyright
 | |
| + *    notice, this list of conditions and the following disclaimer.
 | |
| + * 2. Redistributions in binary form must reproduce the above copyright
 | |
| + *    notice, this list of conditions and the following disclaimer in the
 | |
| + *    documentation and/or other materials provided with the distribution.
 | |
| + * 3. The name of the author may not be used to endorse or promote products
 | |
| + *    derived from this software without specific prior written permission.
 | |
| + *
 | |
| + *
 | |
| + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
 | |
| + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
 | |
| + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
 | |
| + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
 | |
| + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 | |
| + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 | |
| + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 | |
| + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 | |
| + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 | |
| + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 | |
| + *
 | |
| + * Effort sponsored in part by the Defense Advanced Research Projects
 | |
| + * Agency (DARPA) and Air Force Research Laboratory, Air Force
 | |
| + * Materiel Command, USAF, under agreement number F30602-01-2-0537.
 | |
| + *
 | |
| + */
 | |
| +#ifndef __HIFN_H__
 | |
| +#define	__HIFN_H__
 | |
| +
 | |
| +/*
 | |
| + * Some PCI configuration space offset defines.  The names were made
 | |
| + * identical to the names used by the Linux kernel.
 | |
| + */
 | |
| +#define	HIFN_BAR0		PCIR_BAR(0)	/* PUC register map */
 | |
| +#define	HIFN_BAR1		PCIR_BAR(1)	/* DMA register map */
 | |
| +#define	HIFN_TRDY_TIMEOUT	0x40
 | |
| +#define	HIFN_RETRY_TIMEOUT	0x41
 | |
| +
 | |
| +/*
 | |
| + * PCI vendor and device identifiers
 | |
| + * (the names are preserved from their OpenBSD source).
 | |
| + */
 | |
| +#define	PCI_VENDOR_HIFN		0x13a3		/* Hifn */
 | |
| +#define	PCI_PRODUCT_HIFN_7751	0x0005		/* 7751 */
 | |
| +#define	PCI_PRODUCT_HIFN_6500	0x0006		/* 6500 */
 | |
| +#define	PCI_PRODUCT_HIFN_7811	0x0007		/* 7811 */
 | |
| +#define	PCI_PRODUCT_HIFN_7855	0x001f		/* 7855 */
 | |
| +#define	PCI_PRODUCT_HIFN_7951	0x0012		/* 7951 */
 | |
| +#define	PCI_PRODUCT_HIFN_7955	0x0020		/* 7954/7955 */
 | |
| +#define	PCI_PRODUCT_HIFN_7956	0x001d		/* 7956 */
 | |
| +
 | |
| +#define	PCI_VENDOR_INVERTEX	0x14e1		/* Invertex */
 | |
| +#define	PCI_PRODUCT_INVERTEX_AEON 0x0005	/* AEON */
 | |
| +
 | |
| +#define	PCI_VENDOR_NETSEC	0x1660		/* NetSec */
 | |
| +#define	PCI_PRODUCT_NETSEC_7751	0x7751		/* 7751 */
 | |
| +
 | |
| +/*
 | |
| + * The values below should multiple of 4 -- and be large enough to handle
 | |
| + * any command the driver implements.
 | |
| + *
 | |
| + * MAX_COMMAND = base command + mac command + encrypt command +
 | |
| + *			mac-key + rc4-key
 | |
| + * MAX_RESULT  = base result + mac result + mac + encrypt result
 | |
| + *
 | |
| + *
 | |
| + */
 | |
| +#define	HIFN_MAX_COMMAND	(8 + 8 + 8 + 64 + 260)
 | |
| +#define	HIFN_MAX_RESULT		(8 + 4 + 20 + 4)
 | |
| +
 | |
| +/*
 | |
| + * hifn_desc_t
 | |
| + *
 | |
| + * Holds an individual descriptor for any of the rings.
 | |
| + */
 | |
| +typedef struct hifn_desc {
 | |
| +	volatile u_int32_t l;		/* length and status bits */
 | |
| +	volatile u_int32_t p;
 | |
| +} hifn_desc_t;
 | |
| +
 | |
| +/*
 | |
| + * Masks for the "length" field of struct hifn_desc.
 | |
| + */
 | |
| +#define	HIFN_D_LENGTH		0x0000ffff	/* length bit mask */
 | |
| +#define	HIFN_D_MASKDONEIRQ	0x02000000	/* mask the done interrupt */
 | |
| +#define	HIFN_D_DESTOVER		0x04000000	/* destination overflow */
 | |
| +#define	HIFN_D_OVER		0x08000000	/* overflow */
 | |
| +#define	HIFN_D_LAST		0x20000000	/* last descriptor in chain */
 | |
| +#define	HIFN_D_JUMP		0x40000000	/* jump descriptor */
 | |
| +#define	HIFN_D_VALID		0x80000000	/* valid bit */
 | |
| +
 | |
| +
 | |
| +/*
 | |
| + * Processing Unit Registers (offset from BASEREG0)
 | |
| + */
 | |
| +#define	HIFN_0_PUDATA		0x00	/* Processing Unit Data */
 | |
| +#define	HIFN_0_PUCTRL		0x04	/* Processing Unit Control */
 | |
| +#define	HIFN_0_PUISR		0x08	/* Processing Unit Interrupt Status */
 | |
| +#define	HIFN_0_PUCNFG		0x0c	/* Processing Unit Configuration */
 | |
| +#define	HIFN_0_PUIER		0x10	/* Processing Unit Interrupt Enable */
 | |
| +#define	HIFN_0_PUSTAT		0x14	/* Processing Unit Status/Chip ID */
 | |
| +#define	HIFN_0_FIFOSTAT		0x18	/* FIFO Status */
 | |
| +#define	HIFN_0_FIFOCNFG		0x1c	/* FIFO Configuration */
 | |
| +#define	HIFN_0_PUCTRL2		0x28	/* Processing Unit Control (2nd map) */
 | |
| +#define	HIFN_0_MUTE1		0x80
 | |
| +#define	HIFN_0_MUTE2		0x90
 | |
| +#define	HIFN_0_SPACESIZE	0x100	/* Register space size */
 | |
| +
 | |
| +/* Processing Unit Control Register (HIFN_0_PUCTRL) */
 | |
| +#define	HIFN_PUCTRL_CLRSRCFIFO	0x0010	/* clear source fifo */
 | |
| +#define	HIFN_PUCTRL_STOP	0x0008	/* stop pu */
 | |
| +#define	HIFN_PUCTRL_LOCKRAM	0x0004	/* lock ram */
 | |
| +#define	HIFN_PUCTRL_DMAENA	0x0002	/* enable dma */
 | |
| +#define	HIFN_PUCTRL_RESET	0x0001	/* Reset processing unit */
 | |
| +
 | |
| +/* Processing Unit Interrupt Status Register (HIFN_0_PUISR) */
 | |
| +#define	HIFN_PUISR_CMDINVAL	0x8000	/* Invalid command interrupt */
 | |
| +#define	HIFN_PUISR_DATAERR	0x4000	/* Data error interrupt */
 | |
| +#define	HIFN_PUISR_SRCFIFO	0x2000	/* Source FIFO ready interrupt */
 | |
| +#define	HIFN_PUISR_DSTFIFO	0x1000	/* Destination FIFO ready interrupt */
 | |
| +#define	HIFN_PUISR_DSTOVER	0x0200	/* Destination overrun interrupt */
 | |
| +#define	HIFN_PUISR_SRCCMD	0x0080	/* Source command interrupt */
 | |
| +#define	HIFN_PUISR_SRCCTX	0x0040	/* Source context interrupt */
 | |
| +#define	HIFN_PUISR_SRCDATA	0x0020	/* Source data interrupt */
 | |
| +#define	HIFN_PUISR_DSTDATA	0x0010	/* Destination data interrupt */
 | |
| +#define	HIFN_PUISR_DSTRESULT	0x0004	/* Destination result interrupt */
 | |
| +
 | |
| +/* Processing Unit Configuration Register (HIFN_0_PUCNFG) */
 | |
| +#define	HIFN_PUCNFG_DRAMMASK	0xe000	/* DRAM size mask */
 | |
| +#define	HIFN_PUCNFG_DSZ_256K	0x0000	/* 256k dram */
 | |
| +#define	HIFN_PUCNFG_DSZ_512K	0x2000	/* 512k dram */
 | |
| +#define	HIFN_PUCNFG_DSZ_1M	0x4000	/* 1m dram */
 | |
| +#define	HIFN_PUCNFG_DSZ_2M	0x6000	/* 2m dram */
 | |
| +#define	HIFN_PUCNFG_DSZ_4M	0x8000	/* 4m dram */
 | |
| +#define	HIFN_PUCNFG_DSZ_8M	0xa000	/* 8m dram */
 | |
| +#define	HIFN_PUNCFG_DSZ_16M	0xc000	/* 16m dram */
 | |
| +#define	HIFN_PUCNFG_DSZ_32M	0xe000	/* 32m dram */
 | |
| +#define	HIFN_PUCNFG_DRAMREFRESH	0x1800	/* DRAM refresh rate mask */
 | |
| +#define	HIFN_PUCNFG_DRFR_512	0x0000	/* 512 divisor of ECLK */
 | |
| +#define	HIFN_PUCNFG_DRFR_256	0x0800	/* 256 divisor of ECLK */
 | |
| +#define	HIFN_PUCNFG_DRFR_128	0x1000	/* 128 divisor of ECLK */
 | |
| +#define	HIFN_PUCNFG_TCALLPHASES	0x0200	/* your guess is as good as mine... */
 | |
| +#define	HIFN_PUCNFG_TCDRVTOTEM	0x0100	/* your guess is as good as mine... */
 | |
| +#define	HIFN_PUCNFG_BIGENDIAN	0x0080	/* DMA big endian mode */
 | |
| +#define	HIFN_PUCNFG_BUS32	0x0040	/* Bus width 32bits */
 | |
| +#define	HIFN_PUCNFG_BUS16	0x0000	/* Bus width 16 bits */
 | |
| +#define	HIFN_PUCNFG_CHIPID	0x0020	/* Allow chipid from PUSTAT */
 | |
| +#define	HIFN_PUCNFG_DRAM	0x0010	/* Context RAM is DRAM */
 | |
| +#define	HIFN_PUCNFG_SRAM	0x0000	/* Context RAM is SRAM */
 | |
| +#define	HIFN_PUCNFG_COMPSING	0x0004	/* Enable single compression context */
 | |
| +#define	HIFN_PUCNFG_ENCCNFG	0x0002	/* Encryption configuration */
 | |
| +
 | |
| +/* Processing Unit Interrupt Enable Register (HIFN_0_PUIER) */
 | |
| +#define	HIFN_PUIER_CMDINVAL	0x8000	/* Invalid command interrupt */
 | |
| +#define	HIFN_PUIER_DATAERR	0x4000	/* Data error interrupt */
 | |
| +#define	HIFN_PUIER_SRCFIFO	0x2000	/* Source FIFO ready interrupt */
 | |
| +#define	HIFN_PUIER_DSTFIFO	0x1000	/* Destination FIFO ready interrupt */
 | |
| +#define	HIFN_PUIER_DSTOVER	0x0200	/* Destination overrun interrupt */
 | |
| +#define	HIFN_PUIER_SRCCMD	0x0080	/* Source command interrupt */
 | |
| +#define	HIFN_PUIER_SRCCTX	0x0040	/* Source context interrupt */
 | |
| +#define	HIFN_PUIER_SRCDATA	0x0020	/* Source data interrupt */
 | |
| +#define	HIFN_PUIER_DSTDATA	0x0010	/* Destination data interrupt */
 | |
| +#define	HIFN_PUIER_DSTRESULT	0x0004	/* Destination result interrupt */
 | |
| +
 | |
| +/* Processing Unit Status Register/Chip ID (HIFN_0_PUSTAT) */
 | |
| +#define	HIFN_PUSTAT_CMDINVAL	0x8000	/* Invalid command interrupt */
 | |
| +#define	HIFN_PUSTAT_DATAERR	0x4000	/* Data error interrupt */
 | |
| +#define	HIFN_PUSTAT_SRCFIFO	0x2000	/* Source FIFO ready interrupt */
 | |
| +#define	HIFN_PUSTAT_DSTFIFO	0x1000	/* Destination FIFO ready interrupt */
 | |
| +#define	HIFN_PUSTAT_DSTOVER	0x0200	/* Destination overrun interrupt */
 | |
| +#define	HIFN_PUSTAT_SRCCMD	0x0080	/* Source command interrupt */
 | |
| +#define	HIFN_PUSTAT_SRCCTX	0x0040	/* Source context interrupt */
 | |
| +#define	HIFN_PUSTAT_SRCDATA	0x0020	/* Source data interrupt */
 | |
| +#define	HIFN_PUSTAT_DSTDATA	0x0010	/* Destination data interrupt */
 | |
| +#define	HIFN_PUSTAT_DSTRESULT	0x0004	/* Destination result interrupt */
 | |
| +#define	HIFN_PUSTAT_CHIPREV	0x00ff	/* Chip revision mask */
 | |
| +#define	HIFN_PUSTAT_CHIPENA	0xff00	/* Chip enabled mask */
 | |
| +#define	HIFN_PUSTAT_ENA_2	0x1100	/* Level 2 enabled */
 | |
| +#define	HIFN_PUSTAT_ENA_1	0x1000	/* Level 1 enabled */
 | |
| +#define	HIFN_PUSTAT_ENA_0	0x3000	/* Level 0 enabled */
 | |
| +#define	HIFN_PUSTAT_REV_2	0x0020	/* 7751 PT6/2 */
 | |
| +#define	HIFN_PUSTAT_REV_3	0x0030	/* 7751 PT6/3 */
 | |
| +
 | |
| +/* FIFO Status Register (HIFN_0_FIFOSTAT) */
 | |
| +#define	HIFN_FIFOSTAT_SRC	0x7f00	/* Source FIFO available */
 | |
| +#define	HIFN_FIFOSTAT_DST	0x007f	/* Destination FIFO available */
 | |
| +
 | |
| +/* FIFO Configuration Register (HIFN_0_FIFOCNFG) */
 | |
| +#define	HIFN_FIFOCNFG_THRESHOLD	0x0400	/* must be written as this value */
 | |
| +
 | |
| +/*
 | |
| + * DMA Interface Registers (offset from BASEREG1)
 | |
| + */
 | |
| +#define	HIFN_1_DMA_CRAR		0x0c	/* DMA Command Ring Address */
 | |
| +#define	HIFN_1_DMA_SRAR		0x1c	/* DMA Source Ring Address */
 | |
| +#define	HIFN_1_DMA_RRAR		0x2c	/* DMA Result Ring Address */
 | |
| +#define	HIFN_1_DMA_DRAR		0x3c	/* DMA Destination Ring Address */
 | |
| +#define	HIFN_1_DMA_CSR		0x40	/* DMA Status and Control */
 | |
| +#define	HIFN_1_DMA_IER		0x44	/* DMA Interrupt Enable */
 | |
| +#define	HIFN_1_DMA_CNFG		0x48	/* DMA Configuration */
 | |
| +#define	HIFN_1_PLL		0x4c	/* 7955/7956: PLL config */
 | |
| +#define	HIFN_1_7811_RNGENA	0x60	/* 7811: rng enable */
 | |
| +#define	HIFN_1_7811_RNGCFG	0x64	/* 7811: rng config */
 | |
| +#define	HIFN_1_7811_RNGDAT	0x68	/* 7811: rng data */
 | |
| +#define	HIFN_1_7811_RNGSTS	0x6c	/* 7811: rng status */
 | |
| +#define	HIFN_1_DMA_CNFG2	0x6c	/* 7955/7956: dma config #2 */
 | |
| +#define	HIFN_1_7811_MIPSRST	0x94	/* 7811: MIPS reset */
 | |
| +#define	HIFN_1_REVID		0x98	/* Revision ID */
 | |
| +
 | |
| +#define	HIFN_1_PUB_RESET	0x204	/* Public/RNG Reset */
 | |
| +#define	HIFN_1_PUB_BASE		0x300	/* Public Base Address */
 | |
| +#define	HIFN_1_PUB_OPLEN	0x304	/* 7951-compat Public Operand Length */
 | |
| +#define	HIFN_1_PUB_OP		0x308	/* 7951-compat Public Operand */
 | |
| +#define	HIFN_1_PUB_STATUS	0x30c	/* 7951-compat Public Status */
 | |
| +#define	HIFN_1_PUB_IEN		0x310	/* Public Interrupt enable */
 | |
| +#define	HIFN_1_RNG_CONFIG	0x314	/* RNG config */
 | |
| +#define	HIFN_1_RNG_DATA		0x318	/* RNG data */
 | |
| +#define	HIFN_1_PUB_MODE		0x320	/* PK mode */
 | |
| +#define	HIFN_1_PUB_FIFO_OPLEN	0x380	/* first element of oplen fifo */
 | |
| +#define	HIFN_1_PUB_FIFO_OP	0x384	/* first element of op fifo */
 | |
| +#define	HIFN_1_PUB_MEM		0x400	/* start of Public key memory */
 | |
| +#define	HIFN_1_PUB_MEMEND	0xbff	/* end of Public key memory */
 | |
| +
 | |
| +/* DMA Status and Control Register (HIFN_1_DMA_CSR) */
 | |
| +#define	HIFN_DMACSR_D_CTRLMASK	0xc0000000	/* Destinition Ring Control */
 | |
| +#define	HIFN_DMACSR_D_CTRL_NOP	0x00000000	/* Dest. Control: no-op */
 | |
| +#define	HIFN_DMACSR_D_CTRL_DIS	0x40000000	/* Dest. Control: disable */
 | |
| +#define	HIFN_DMACSR_D_CTRL_ENA	0x80000000	/* Dest. Control: enable */
 | |
| +#define	HIFN_DMACSR_D_ABORT	0x20000000	/* Destinition Ring PCIAbort */
 | |
| +#define	HIFN_DMACSR_D_DONE	0x10000000	/* Destinition Ring Done */
 | |
| +#define	HIFN_DMACSR_D_LAST	0x08000000	/* Destinition Ring Last */
 | |
| +#define	HIFN_DMACSR_D_WAIT	0x04000000	/* Destinition Ring Waiting */
 | |
| +#define	HIFN_DMACSR_D_OVER	0x02000000	/* Destinition Ring Overflow */
 | |
| +#define	HIFN_DMACSR_R_CTRL	0x00c00000	/* Result Ring Control */
 | |
| +#define	HIFN_DMACSR_R_CTRL_NOP	0x00000000	/* Result Control: no-op */
 | |
| +#define	HIFN_DMACSR_R_CTRL_DIS	0x00400000	/* Result Control: disable */
 | |
| +#define	HIFN_DMACSR_R_CTRL_ENA	0x00800000	/* Result Control: enable */
 | |
| +#define	HIFN_DMACSR_R_ABORT	0x00200000	/* Result Ring PCI Abort */
 | |
| +#define	HIFN_DMACSR_R_DONE	0x00100000	/* Result Ring Done */
 | |
| +#define	HIFN_DMACSR_R_LAST	0x00080000	/* Result Ring Last */
 | |
| +#define	HIFN_DMACSR_R_WAIT	0x00040000	/* Result Ring Waiting */
 | |
| +#define	HIFN_DMACSR_R_OVER	0x00020000	/* Result Ring Overflow */
 | |
| +#define	HIFN_DMACSR_S_CTRL	0x0000c000	/* Source Ring Control */
 | |
| +#define	HIFN_DMACSR_S_CTRL_NOP	0x00000000	/* Source Control: no-op */
 | |
| +#define	HIFN_DMACSR_S_CTRL_DIS	0x00004000	/* Source Control: disable */
 | |
| +#define	HIFN_DMACSR_S_CTRL_ENA	0x00008000	/* Source Control: enable */
 | |
| +#define	HIFN_DMACSR_S_ABORT	0x00002000	/* Source Ring PCI Abort */
 | |
| +#define	HIFN_DMACSR_S_DONE	0x00001000	/* Source Ring Done */
 | |
| +#define	HIFN_DMACSR_S_LAST	0x00000800	/* Source Ring Last */
 | |
| +#define	HIFN_DMACSR_S_WAIT	0x00000400	/* Source Ring Waiting */
 | |
| +#define	HIFN_DMACSR_ILLW	0x00000200	/* Illegal write (7811 only) */
 | |
| +#define	HIFN_DMACSR_ILLR	0x00000100	/* Illegal read (7811 only) */
 | |
| +#define	HIFN_DMACSR_C_CTRL	0x000000c0	/* Command Ring Control */
 | |
| +#define	HIFN_DMACSR_C_CTRL_NOP	0x00000000	/* Command Control: no-op */
 | |
| +#define	HIFN_DMACSR_C_CTRL_DIS	0x00000040	/* Command Control: disable */
 | |
| +#define	HIFN_DMACSR_C_CTRL_ENA	0x00000080	/* Command Control: enable */
 | |
| +#define	HIFN_DMACSR_C_ABORT	0x00000020	/* Command Ring PCI Abort */
 | |
| +#define	HIFN_DMACSR_C_DONE	0x00000010	/* Command Ring Done */
 | |
| +#define	HIFN_DMACSR_C_LAST	0x00000008	/* Command Ring Last */
 | |
| +#define	HIFN_DMACSR_C_WAIT	0x00000004	/* Command Ring Waiting */
 | |
| +#define	HIFN_DMACSR_PUBDONE	0x00000002	/* Public op done (7951 only) */
 | |
| +#define	HIFN_DMACSR_ENGINE	0x00000001	/* Command Ring Engine IRQ */
 | |
| +
 | |
| +/* DMA Interrupt Enable Register (HIFN_1_DMA_IER) */
 | |
| +#define	HIFN_DMAIER_D_ABORT	0x20000000	/* Destination Ring PCIAbort */
 | |
| +#define	HIFN_DMAIER_D_DONE	0x10000000	/* Destination Ring Done */
 | |
| +#define	HIFN_DMAIER_D_LAST	0x08000000	/* Destination Ring Last */
 | |
| +#define	HIFN_DMAIER_D_WAIT	0x04000000	/* Destination Ring Waiting */
 | |
| +#define	HIFN_DMAIER_D_OVER	0x02000000	/* Destination Ring Overflow */
 | |
| +#define	HIFN_DMAIER_R_ABORT	0x00200000	/* Result Ring PCI Abort */
 | |
| +#define	HIFN_DMAIER_R_DONE	0x00100000	/* Result Ring Done */
 | |
| +#define	HIFN_DMAIER_R_LAST	0x00080000	/* Result Ring Last */
 | |
| +#define	HIFN_DMAIER_R_WAIT	0x00040000	/* Result Ring Waiting */
 | |
| +#define	HIFN_DMAIER_R_OVER	0x00020000	/* Result Ring Overflow */
 | |
| +#define	HIFN_DMAIER_S_ABORT	0x00002000	/* Source Ring PCI Abort */
 | |
| +#define	HIFN_DMAIER_S_DONE	0x00001000	/* Source Ring Done */
 | |
| +#define	HIFN_DMAIER_S_LAST	0x00000800	/* Source Ring Last */
 | |
| +#define	HIFN_DMAIER_S_WAIT	0x00000400	/* Source Ring Waiting */
 | |
| +#define	HIFN_DMAIER_ILLW	0x00000200	/* Illegal write (7811 only) */
 | |
| +#define	HIFN_DMAIER_ILLR	0x00000100	/* Illegal read (7811 only) */
 | |
| +#define	HIFN_DMAIER_C_ABORT	0x00000020	/* Command Ring PCI Abort */
 | |
| +#define	HIFN_DMAIER_C_DONE	0x00000010	/* Command Ring Done */
 | |
| +#define	HIFN_DMAIER_C_LAST	0x00000008	/* Command Ring Last */
 | |
| +#define	HIFN_DMAIER_C_WAIT	0x00000004	/* Command Ring Waiting */
 | |
| +#define	HIFN_DMAIER_PUBDONE	0x00000002	/* public op done (7951 only) */
 | |
| +#define	HIFN_DMAIER_ENGINE	0x00000001	/* Engine IRQ */
 | |
| +
 | |
| +/* DMA Configuration Register (HIFN_1_DMA_CNFG) */
 | |
| +#define	HIFN_DMACNFG_BIGENDIAN	0x10000000	/* big endian mode */
 | |
| +#define	HIFN_DMACNFG_POLLFREQ	0x00ff0000	/* Poll frequency mask */
 | |
| +#define	HIFN_DMACNFG_UNLOCK	0x00000800
 | |
| +#define	HIFN_DMACNFG_POLLINVAL	0x00000700	/* Invalid Poll Scalar */
 | |
| +#define	HIFN_DMACNFG_LAST	0x00000010	/* Host control LAST bit */
 | |
| +#define	HIFN_DMACNFG_MODE	0x00000004	/* DMA mode */
 | |
| +#define	HIFN_DMACNFG_DMARESET	0x00000002	/* DMA Reset # */
 | |
| +#define	HIFN_DMACNFG_MSTRESET	0x00000001	/* Master Reset # */
 | |
| +
 | |
| +/* DMA Configuration Register (HIFN_1_DMA_CNFG2) */
 | |
| +#define	HIFN_DMACNFG2_PKSWAP32	(1 << 19)	/* swap the OPLEN/OP reg */
 | |
| +#define	HIFN_DMACNFG2_PKSWAP8	(1 << 18)	/* swap the bits of OPLEN/OP */
 | |
| +#define	HIFN_DMACNFG2_BAR0_SWAP32 (1<<17)	/* swap the bytes of BAR0 */
 | |
| +#define	HIFN_DMACNFG2_BAR1_SWAP8 (1<<16)	/* swap the bits  of BAR0 */
 | |
| +#define	HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT 12
 | |
| +#define	HIFN_DMACNFG2_INIT_READ_BURST_SHIFT 8
 | |
| +#define	HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT 4
 | |
| +#define	HIFN_DMACNFG2_TGT_READ_BURST_SHIFT  0
 | |
| +
 | |
| +/* 7811 RNG Enable Register (HIFN_1_7811_RNGENA) */
 | |
| +#define	HIFN_7811_RNGENA_ENA	0x00000001	/* enable RNG */
 | |
| +
 | |
| +/* 7811 RNG Config Register (HIFN_1_7811_RNGCFG) */
 | |
| +#define	HIFN_7811_RNGCFG_PRE1	0x00000f00	/* first prescalar */
 | |
| +#define	HIFN_7811_RNGCFG_OPRE	0x00000080	/* output prescalar */
 | |
| +#define	HIFN_7811_RNGCFG_DEFL	0x00000f80	/* 2 words/ 1/100 sec */
 | |
| +
 | |
| +/* 7811 RNG Status Register (HIFN_1_7811_RNGSTS) */
 | |
| +#define	HIFN_7811_RNGSTS_RDY	0x00004000	/* two numbers in FIFO */
 | |
| +#define	HIFN_7811_RNGSTS_UFL	0x00001000	/* rng underflow */
 | |
| +
 | |
| +/* 7811 MIPS Reset Register (HIFN_1_7811_MIPSRST) */
 | |
| +#define	HIFN_MIPSRST_BAR2SIZE	0xffff0000	/* sdram size */
 | |
| +#define	HIFN_MIPSRST_GPRAMINIT	0x00008000	/* gpram can be accessed */
 | |
| +#define	HIFN_MIPSRST_CRAMINIT	0x00004000	/* ctxram can be accessed */
 | |
| +#define	HIFN_MIPSRST_LED2	0x00000400	/* external LED2 */
 | |
| +#define	HIFN_MIPSRST_LED1	0x00000200	/* external LED1 */
 | |
| +#define	HIFN_MIPSRST_LED0	0x00000100	/* external LED0 */
 | |
| +#define	HIFN_MIPSRST_MIPSDIS	0x00000004	/* disable MIPS */
 | |
| +#define	HIFN_MIPSRST_MIPSRST	0x00000002	/* warm reset MIPS */
 | |
| +#define	HIFN_MIPSRST_MIPSCOLD	0x00000001	/* cold reset MIPS */
 | |
| +
 | |
| +/* Public key reset register (HIFN_1_PUB_RESET) */
 | |
| +#define	HIFN_PUBRST_RESET	0x00000001	/* reset public/rng unit */
 | |
| +
 | |
| +/* Public operation register (HIFN_1_PUB_OP) */
 | |
| +#define	HIFN_PUBOP_AOFFSET	0x0000003e	/* A offset */
 | |
| +#define	HIFN_PUBOP_BOFFSET	0x00000fc0	/* B offset */
 | |
| +#define	HIFN_PUBOP_MOFFSET	0x0003f000	/* M offset */
 | |
| +#define	HIFN_PUBOP_OP_MASK	0x003c0000	/* Opcode: */
 | |
| +#define	HIFN_PUBOP_OP_NOP	0x00000000	/*  NOP */
 | |
| +#define	HIFN_PUBOP_OP_ADD	0x00040000	/*  ADD */
 | |
| +#define	HIFN_PUBOP_OP_ADDC	0x00080000	/*  ADD w/carry */
 | |
| +#define	HIFN_PUBOP_OP_SUB	0x000c0000	/*  SUB */
 | |
| +#define	HIFN_PUBOP_OP_SUBC	0x00100000	/*  SUB w/carry */
 | |
| +#define	HIFN_PUBOP_OP_MODADD	0x00140000	/*  Modular ADD */
 | |
| +#define	HIFN_PUBOP_OP_MODSUB	0x00180000	/*  Modular SUB */
 | |
| +#define	HIFN_PUBOP_OP_INCA	0x001c0000	/*  INC A */
 | |
| +#define	HIFN_PUBOP_OP_DECA	0x00200000	/*  DEC A */
 | |
| +#define	HIFN_PUBOP_OP_MULT	0x00240000	/*  MULT */
 | |
| +#define	HIFN_PUBOP_OP_MODMULT	0x00280000	/*  Modular MULT */
 | |
| +#define	HIFN_PUBOP_OP_MODRED	0x002c0000	/*  Modular Red */
 | |
| +#define	HIFN_PUBOP_OP_MODEXP	0x00300000	/*  Modular Exp */
 | |
| +
 | |
| +/* Public operand length register (HIFN_1_PUB_OPLEN) */
 | |
| +#define	HIFN_PUBOPLEN_MODLEN	0x0000007f
 | |
| +#define	HIFN_PUBOPLEN_EXPLEN	0x0003ff80
 | |
| +#define	HIFN_PUBOPLEN_REDLEN	0x003c0000
 | |
| +
 | |
| +/* Public status register (HIFN_1_PUB_STATUS) */
 | |
| +#define	HIFN_PUBSTS_DONE	0x00000001	/* operation done */
 | |
| +#define	HIFN_PUBSTS_CARRY	0x00000002	/* carry */
 | |
| +#define	HIFN_PUBSTS_FIFO_EMPTY	0x00000100	/* fifo empty */
 | |
| +#define	HIFN_PUBSTS_FIFO_FULL	0x00000200	/* fifo full */
 | |
| +#define	HIFN_PUBSTS_FIFO_OVFL	0x00000400	/* fifo overflow */
 | |
| +#define	HIFN_PUBSTS_FIFO_WRITE	0x000f0000	/* fifo write */
 | |
| +#define	HIFN_PUBSTS_FIFO_READ	0x0f000000	/* fifo read */
 | |
| +
 | |
| +/* Public interrupt enable register (HIFN_1_PUB_IEN) */
 | |
| +#define	HIFN_PUBIEN_DONE	0x00000001	/* operation done interrupt */
 | |
| +
 | |
| +/* Random number generator config register (HIFN_1_RNG_CONFIG) */
 | |
| +#define	HIFN_RNGCFG_ENA		0x00000001	/* enable rng */
 | |
| +
 | |
| +/*
 | |
| + * Register offsets in register set 1
 | |
| + */
 | |
| +
 | |
| +#define	HIFN_UNLOCK_SECRET1	0xf4
 | |
| +#define	HIFN_UNLOCK_SECRET2	0xfc
 | |
| +
 | |
| +/*
 | |
| + * PLL config register
 | |
| + *
 | |
| + * This register is present only on 7954/7955/7956 parts. It must be
 | |
| + * programmed according to the bus interface method used by the h/w.
 | |
| + * Note that the parts require a stable clock.  Since the PCI clock
 | |
| + * may vary the reference clock must usually be used.  To avoid
 | |
| + * overclocking the core logic, setup must be done carefully, refer
 | |
| + * to the driver for details.  The exact multiplier required varies
 | |
| + * by part and system configuration; refer to the Hifn documentation.
 | |
| + */
 | |
| +#define	HIFN_PLL_REF_SEL	0x00000001	/* REF/HBI clk selection */
 | |
| +#define	HIFN_PLL_BP		0x00000002	/* bypass (used during setup) */
 | |
| +/* bit 2 reserved */
 | |
| +#define	HIFN_PLL_PK_CLK_SEL	0x00000008	/* public key clk select */
 | |
| +#define	HIFN_PLL_PE_CLK_SEL	0x00000010	/* packet engine clk select */
 | |
| +/* bits 5-9 reserved */
 | |
| +#define	HIFN_PLL_MBSET		0x00000400	/* must be set to 1 */
 | |
| +#define	HIFN_PLL_ND		0x00003800	/* Fpll_ref multiplier select */
 | |
| +#define	HIFN_PLL_ND_SHIFT	11
 | |
| +#define	HIFN_PLL_ND_2		0x00000000	/* 2x */
 | |
| +#define	HIFN_PLL_ND_4		0x00000800	/* 4x */
 | |
| +#define	HIFN_PLL_ND_6		0x00001000	/* 6x */
 | |
| +#define	HIFN_PLL_ND_8		0x00001800	/* 8x */
 | |
| +#define	HIFN_PLL_ND_10		0x00002000	/* 10x */
 | |
| +#define	HIFN_PLL_ND_12		0x00002800	/* 12x */
 | |
| +/* bits 14-15 reserved */
 | |
| +#define	HIFN_PLL_IS		0x00010000	/* charge pump current select */
 | |
| +/* bits 17-31 reserved */
 | |
| +
 | |
| +/*
 | |
| + * Board configuration specifies only these bits.
 | |
| + */
 | |
| +#define	HIFN_PLL_CONFIG		(HIFN_PLL_IS|HIFN_PLL_ND|HIFN_PLL_REF_SEL)
 | |
| +
 | |
| +/*
 | |
| + * Public Key Engine Mode Register
 | |
| + */
 | |
| +#define	HIFN_PKMODE_HOSTINVERT	(1 << 0)	/* HOST INVERT */
 | |
| +#define	HIFN_PKMODE_ENHANCED	(1 << 1)	/* Enable enhanced mode */
 | |
| +
 | |
| +
 | |
| +/*********************************************************************
 | |
| + * Structs for board commands
 | |
| + *
 | |
| + *********************************************************************/
 | |
| +
 | |
| +/*
 | |
| + * Structure to help build up the command data structure.
 | |
| + */
 | |
| +typedef struct hifn_base_command {
 | |
| +	volatile u_int16_t masks;
 | |
| +	volatile u_int16_t session_num;
 | |
| +	volatile u_int16_t total_source_count;
 | |
| +	volatile u_int16_t total_dest_count;
 | |
| +} hifn_base_command_t;
 | |
| +
 | |
| +#define	HIFN_BASE_CMD_MAC		0x0400
 | |
| +#define	HIFN_BASE_CMD_CRYPT		0x0800
 | |
| +#define	HIFN_BASE_CMD_DECODE		0x2000
 | |
| +#define	HIFN_BASE_CMD_SRCLEN_M		0xc000
 | |
| +#define	HIFN_BASE_CMD_SRCLEN_S		14
 | |
| +#define	HIFN_BASE_CMD_DSTLEN_M		0x3000
 | |
| +#define	HIFN_BASE_CMD_DSTLEN_S		12
 | |
| +#define	HIFN_BASE_CMD_LENMASK_HI	0x30000
 | |
| +#define	HIFN_BASE_CMD_LENMASK_LO	0x0ffff
 | |
| +
 | |
| +/*
 | |
| + * Structure to help build up the command data structure.
 | |
| + */
 | |
| +typedef struct hifn_crypt_command {
 | |
| +	volatile u_int16_t masks;
 | |
| +	volatile u_int16_t header_skip;
 | |
| +	volatile u_int16_t source_count;
 | |
| +	volatile u_int16_t reserved;
 | |
| +} hifn_crypt_command_t;
 | |
| +
 | |
| +#define	HIFN_CRYPT_CMD_ALG_MASK		0x0003		/* algorithm: */
 | |
| +#define	HIFN_CRYPT_CMD_ALG_DES		0x0000		/*   DES */
 | |
| +#define	HIFN_CRYPT_CMD_ALG_3DES		0x0001		/*   3DES */
 | |
| +#define	HIFN_CRYPT_CMD_ALG_RC4		0x0002		/*   RC4 */
 | |
| +#define	HIFN_CRYPT_CMD_ALG_AES		0x0003		/*   AES */
 | |
| +#define	HIFN_CRYPT_CMD_MODE_MASK	0x0018		/* Encrypt mode: */
 | |
| +#define	HIFN_CRYPT_CMD_MODE_ECB		0x0000		/*   ECB */
 | |
| +#define	HIFN_CRYPT_CMD_MODE_CBC		0x0008		/*   CBC */
 | |
| +#define	HIFN_CRYPT_CMD_MODE_CFB		0x0010		/*   CFB */
 | |
| +#define	HIFN_CRYPT_CMD_MODE_OFB		0x0018		/*   OFB */
 | |
| +#define	HIFN_CRYPT_CMD_CLR_CTX		0x0040		/* clear context */
 | |
| +#define	HIFN_CRYPT_CMD_NEW_KEY		0x0800		/* expect new key */
 | |
| +#define	HIFN_CRYPT_CMD_NEW_IV		0x1000		/* expect new iv */
 | |
| +
 | |
| +#define	HIFN_CRYPT_CMD_SRCLEN_M		0xc000
 | |
| +#define	HIFN_CRYPT_CMD_SRCLEN_S		14
 | |
| +
 | |
| +#define	HIFN_CRYPT_CMD_KSZ_MASK		0x0600		/* AES key size: */
 | |
| +#define	HIFN_CRYPT_CMD_KSZ_128		0x0000		/*   128 bit */
 | |
| +#define	HIFN_CRYPT_CMD_KSZ_192		0x0200		/*   192 bit */
 | |
| +#define	HIFN_CRYPT_CMD_KSZ_256		0x0400		/*   256 bit */
 | |
| +
 | |
| +/*
 | |
| + * Structure to help build up the command data structure.
 | |
| + */
 | |
| +typedef struct hifn_mac_command {
 | |
| +	volatile u_int16_t masks;
 | |
| +	volatile u_int16_t header_skip;
 | |
| +	volatile u_int16_t source_count;
 | |
| +	volatile u_int16_t reserved;
 | |
| +} hifn_mac_command_t;
 | |
| +
 | |
| +#define	HIFN_MAC_CMD_ALG_MASK		0x0001
 | |
| +#define	HIFN_MAC_CMD_ALG_SHA1		0x0000
 | |
| +#define	HIFN_MAC_CMD_ALG_MD5		0x0001
 | |
| +#define	HIFN_MAC_CMD_MODE_MASK		0x000c
 | |
| +#define	HIFN_MAC_CMD_MODE_HMAC		0x0000
 | |
| +#define	HIFN_MAC_CMD_MODE_SSL_MAC	0x0004
 | |
| +#define	HIFN_MAC_CMD_MODE_HASH		0x0008
 | |
| +#define	HIFN_MAC_CMD_MODE_FULL		0x0004
 | |
| +#define	HIFN_MAC_CMD_TRUNC		0x0010
 | |
| +#define	HIFN_MAC_CMD_RESULT		0x0020
 | |
| +#define	HIFN_MAC_CMD_APPEND		0x0040
 | |
| +#define	HIFN_MAC_CMD_SRCLEN_M		0xc000
 | |
| +#define	HIFN_MAC_CMD_SRCLEN_S		14
 | |
| +
 | |
| +/*
 | |
| + * MAC POS IPsec initiates authentication after encryption on encodes
 | |
| + * and before decryption on decodes.
 | |
| + */
 | |
| +#define	HIFN_MAC_CMD_POS_IPSEC		0x0200
 | |
| +#define	HIFN_MAC_CMD_NEW_KEY		0x0800
 | |
| +
 | |
| +/*
 | |
| + * The poll frequency and poll scalar defines are unshifted values used
 | |
| + * to set fields in the DMA Configuration Register.
 | |
| + */
 | |
| +#ifndef HIFN_POLL_FREQUENCY
 | |
| +#define	HIFN_POLL_FREQUENCY	0x1
 | |
| +#endif
 | |
| +
 | |
| +#ifndef HIFN_POLL_SCALAR
 | |
| +#define	HIFN_POLL_SCALAR	0x0
 | |
| +#endif
 | |
| +
 | |
| +#define	HIFN_MAX_SEGLEN 	0xffff		/* maximum dma segment len */
 | |
| +#define	HIFN_MAX_DMALEN		0x3ffff		/* maximum dma length */
 | |
| +#endif /* __HIFN_H__ */
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/hifn/hifn7751var.h
 | |
| @@ -0,0 +1,369 @@
 | |
| +/* $FreeBSD: src/sys/dev/hifn/hifn7751var.h,v 1.9 2007/03/21 03:42:49 sam Exp $ */
 | |
| +/*	$OpenBSD: hifn7751var.h,v 1.42 2002/04/08 17:49:42 jason Exp $	*/
 | |
| +
 | |
| +/*-
 | |
| + * Invertex AEON / Hifn 7751 driver
 | |
| + * Copyright (c) 1999 Invertex Inc. All rights reserved.
 | |
| + * Copyright (c) 1999 Theo de Raadt
 | |
| + * Copyright (c) 2000-2001 Network Security Technologies, Inc.
 | |
| + *			http://www.netsec.net
 | |
| + *
 | |
| + * Please send any comments, feedback, bug-fixes, or feature requests to
 | |
| + * software@invertex.com.
 | |
| + *
 | |
| + * Redistribution and use in source and binary forms, with or without
 | |
| + * modification, are permitted provided that the following conditions
 | |
| + * are met:
 | |
| + *
 | |
| + * 1. Redistributions of source code must retain the above copyright
 | |
| + *    notice, this list of conditions and the following disclaimer.
 | |
| + * 2. Redistributions in binary form must reproduce the above copyright
 | |
| + *    notice, this list of conditions and the following disclaimer in the
 | |
| + *    documentation and/or other materials provided with the distribution.
 | |
| + * 3. The name of the author may not be used to endorse or promote products
 | |
| + *    derived from this software without specific prior written permission.
 | |
| + *
 | |
| + *
 | |
| + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
 | |
| + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
 | |
| + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
 | |
| + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
 | |
| + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 | |
| + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 | |
| + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 | |
| + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 | |
| + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 | |
| + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 | |
| + *
 | |
| + * Effort sponsored in part by the Defense Advanced Research Projects
 | |
| + * Agency (DARPA) and Air Force Research Laboratory, Air Force
 | |
| + * Materiel Command, USAF, under agreement number F30602-01-2-0537.
 | |
| + *
 | |
| + */
 | |
| +
 | |
| +#ifndef __HIFN7751VAR_H__
 | |
| +#define __HIFN7751VAR_H__
 | |
| +
 | |
| +#ifdef __KERNEL__
 | |
| +
 | |
| +/*
 | |
| + * Some configurable values for the driver.  By default command+result
 | |
| + * descriptor rings are the same size.  The src+dst descriptor rings
 | |
| + * are sized at 3.5x the number of potential commands.  Slower parts
 | |
| + * (e.g. 7951) tend to run out of src descriptors; faster parts (7811)
 | |
| + * src+cmd/result descriptors.  It's not clear that increasing the size
 | |
| + * of the descriptor rings helps performance significantly as other
 | |
| + * factors tend to come into play (e.g. copying misaligned packets).
 | |
| + */
 | |
| +#define	HIFN_D_CMD_RSIZE	24	/* command descriptors */
 | |
| +#define	HIFN_D_SRC_RSIZE	((HIFN_D_CMD_RSIZE * 7) / 2)	/* source descriptors */
 | |
| +#define	HIFN_D_RES_RSIZE	HIFN_D_CMD_RSIZE	/* result descriptors */
 | |
| +#define	HIFN_D_DST_RSIZE	HIFN_D_SRC_RSIZE	/* destination descriptors */
 | |
| +
 | |
| +/*
 | |
| + *  Length values for cryptography
 | |
| + */
 | |
| +#define HIFN_DES_KEY_LENGTH		8
 | |
| +#define HIFN_3DES_KEY_LENGTH		24
 | |
| +#define HIFN_MAX_CRYPT_KEY_LENGTH	HIFN_3DES_KEY_LENGTH
 | |
| +#define HIFN_IV_LENGTH			8
 | |
| +#define	HIFN_AES_IV_LENGTH		16
 | |
| +#define HIFN_MAX_IV_LENGTH		HIFN_AES_IV_LENGTH
 | |
| +
 | |
| +/*
 | |
| + *  Length values for authentication
 | |
| + */
 | |
| +#define HIFN_MAC_KEY_LENGTH		64
 | |
| +#define HIFN_MD5_LENGTH			16
 | |
| +#define HIFN_SHA1_LENGTH		20
 | |
| +#define HIFN_MAC_TRUNC_LENGTH		12
 | |
| +
 | |
| +#define MAX_SCATTER 64
 | |
| +
 | |
| +/*
 | |
| + * Data structure to hold all 4 rings and any other ring related data.
 | |
| + */
 | |
| +struct hifn_dma {
 | |
| +	/*
 | |
| +	 *  Descriptor rings.  We add +1 to the size to accomidate the
 | |
| +	 *  jump descriptor.
 | |
| +	 */
 | |
| +	struct hifn_desc	cmdr[HIFN_D_CMD_RSIZE+1];
 | |
| +	struct hifn_desc	srcr[HIFN_D_SRC_RSIZE+1];
 | |
| +	struct hifn_desc	dstr[HIFN_D_DST_RSIZE+1];
 | |
| +	struct hifn_desc	resr[HIFN_D_RES_RSIZE+1];
 | |
| +
 | |
| +	struct hifn_command	*hifn_commands[HIFN_D_RES_RSIZE];
 | |
| +
 | |
| +	u_char			command_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_COMMAND];
 | |
| +	u_char			result_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_RESULT];
 | |
| +	u_int32_t		slop[HIFN_D_CMD_RSIZE];
 | |
| +
 | |
| +	u_int64_t		test_src, test_dst;
 | |
| +
 | |
| +	/*
 | |
| +	 *  Our current positions for insertion and removal from the desriptor
 | |
| +	 *  rings.
 | |
| +	 */
 | |
| +	int			cmdi, srci, dsti, resi;
 | |
| +	volatile int		cmdu, srcu, dstu, resu;
 | |
| +	int			cmdk, srck, dstk, resk;
 | |
| +};
 | |
| +
 | |
| +struct hifn_session {
 | |
| +	int hs_used;
 | |
| +	int hs_mlen;
 | |
| +	u_int8_t hs_iv[HIFN_MAX_IV_LENGTH];
 | |
| +};
 | |
| +
 | |
| +#define	HIFN_RING_SYNC(sc, r, i, f)					\
 | |
| +	/* DAVIDM bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f)) */
 | |
| +
 | |
| +#define	HIFN_CMDR_SYNC(sc, i, f)	HIFN_RING_SYNC((sc), cmdr, (i), (f))
 | |
| +#define	HIFN_RESR_SYNC(sc, i, f)	HIFN_RING_SYNC((sc), resr, (i), (f))
 | |
| +#define	HIFN_SRCR_SYNC(sc, i, f)	HIFN_RING_SYNC((sc), srcr, (i), (f))
 | |
| +#define	HIFN_DSTR_SYNC(sc, i, f)	HIFN_RING_SYNC((sc), dstr, (i), (f))
 | |
| +
 | |
| +#define	HIFN_CMD_SYNC(sc, i, f)						\
 | |
| +	/* DAVIDM bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f)) */
 | |
| +
 | |
| +#define	HIFN_RES_SYNC(sc, i, f)						\
 | |
| +	/* DAVIDM bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f)) */
 | |
| +
 | |
| +typedef int bus_size_t;
 | |
| +
 | |
| +/*
 | |
| + * Holds data specific to a single HIFN board.
 | |
| + */
 | |
| +struct hifn_softc {
 | |
| +	softc_device_decl		 sc_dev;
 | |
| +
 | |
| +	struct pci_dev		*sc_pcidev;	/* PCI device pointer */
 | |
| +	spinlock_t		sc_mtx;		/* per-instance lock */
 | |
| +
 | |
| +	int			sc_num;		/* for multiple devs */
 | |
| +
 | |
| +	ocf_iomem_t		sc_bar0;
 | |
| +	bus_size_t		sc_bar0_lastreg;/* bar0 last reg written */
 | |
| +	ocf_iomem_t		sc_bar1;
 | |
| +	bus_size_t		sc_bar1_lastreg;/* bar1 last reg written */
 | |
| +
 | |
| +	int			sc_irq;
 | |
| +
 | |
| +	u_int32_t		sc_dmaier;
 | |
| +	u_int32_t		sc_drammodel;	/* 1=dram, 0=sram */
 | |
| +	u_int32_t		sc_pllconfig;	/* 7954/7955/7956 PLL config */
 | |
| +
 | |
| +	struct hifn_dma		*sc_dma;
 | |
| +	dma_addr_t		sc_dma_physaddr;/* physical address of sc_dma */
 | |
| +
 | |
| +	int			sc_dmansegs;
 | |
| +	int32_t			sc_cid;
 | |
| +	int			sc_maxses;
 | |
| +	int			sc_nsessions;
 | |
| +	struct hifn_session	*sc_sessions;
 | |
| +	int			sc_ramsize;
 | |
| +	int			sc_flags;
 | |
| +#define	HIFN_HAS_RNG		0x1	/* includes random number generator */
 | |
| +#define	HIFN_HAS_PUBLIC		0x2	/* includes public key support */
 | |
| +#define	HIFN_HAS_AES		0x4	/* includes AES support */
 | |
| +#define	HIFN_IS_7811		0x8	/* Hifn 7811 part */
 | |
| +#define	HIFN_IS_7956		0x10	/* Hifn 7956/7955 don't have SDRAM */
 | |
| +
 | |
| +	struct timer_list	sc_tickto;	/* for managing DMA */
 | |
| +
 | |
| +	int			sc_rngfirst;
 | |
| +	int			sc_rnghz;	/* RNG polling frequency */
 | |
| +
 | |
| +	int			sc_c_busy;	/* command ring busy */
 | |
| +	int			sc_s_busy;	/* source data ring busy */
 | |
| +	int			sc_d_busy;	/* destination data ring busy */
 | |
| +	int			sc_r_busy;	/* result ring busy */
 | |
| +	int			sc_active;	/* for initial countdown */
 | |
| +	int			sc_needwakeup;	/* ops q'd wating on resources */
 | |
| +	int			sc_curbatch;	/* # ops submitted w/o int */
 | |
| +	int			sc_suspended;
 | |
| +#ifdef HIFN_VULCANDEV
 | |
| +	struct cdev            *sc_pkdev;
 | |
| +#endif
 | |
| +};
 | |
| +
 | |
| +#define	HIFN_LOCK(_sc)		spin_lock_irqsave(&(_sc)->sc_mtx, l_flags)
 | |
| +#define	HIFN_UNLOCK(_sc)	spin_unlock_irqrestore(&(_sc)->sc_mtx, l_flags)
 | |
| +
 | |
| +/*
 | |
| + *  hifn_command_t
 | |
| + *
 | |
| + *  This is the control structure used to pass commands to hifn_encrypt().
 | |
| + *
 | |
| + *  flags
 | |
| + *  -----
 | |
| + *  Flags is the bitwise "or" values for command configuration.  A single
 | |
| + *  encrypt direction needs to be set:
 | |
| + *
 | |
| + *	HIFN_ENCODE or HIFN_DECODE
 | |
| + *
 | |
| + *  To use cryptography, a single crypto algorithm must be included:
 | |
| + *
 | |
| + *	HIFN_CRYPT_3DES or HIFN_CRYPT_DES
 | |
| + *
 | |
| + *  To use authentication is used, a single MAC algorithm must be included:
 | |
| + *
 | |
| + *	HIFN_MAC_MD5 or HIFN_MAC_SHA1
 | |
| + *
 | |
| + *  By default MD5 uses a 16 byte hash and SHA-1 uses a 20 byte hash.
 | |
| + *  If the value below is set, hash values are truncated or assumed
 | |
| + *  truncated to 12 bytes:
 | |
| + *
 | |
| + *	HIFN_MAC_TRUNC
 | |
| + *
 | |
| + *  Keys for encryption and authentication can be sent as part of a command,
 | |
| + *  or the last key value used with a particular session can be retrieved
 | |
| + *  and used again if either of these flags are not specified.
 | |
| + *
 | |
| + *	HIFN_CRYPT_NEW_KEY, HIFN_MAC_NEW_KEY
 | |
| + *
 | |
| + *  session_num
 | |
| + *  -----------
 | |
| + *  A number between 0 and 2048 (for DRAM models) or a number between
 | |
| + *  0 and 768 (for SRAM models).  Those who don't want to use session
 | |
| + *  numbers should leave value at zero and send a new crypt key and/or
 | |
| + *  new MAC key on every command.  If you use session numbers and
 | |
| + *  don't send a key with a command, the last key sent for that same
 | |
| + *  session number will be used.
 | |
| + *
 | |
| + *  Warning:  Using session numbers and multiboard at the same time
 | |
| + *            is currently broken.
 | |
| + *
 | |
| + *  mbuf
 | |
| + *  ----
 | |
| + *  Either fill in the mbuf pointer and npa=0 or
 | |
| + *	 fill packp[] and packl[] and set npa to > 0
 | |
| + *
 | |
| + *  mac_header_skip
 | |
| + *  ---------------
 | |
| + *  The number of bytes of the source_buf that are skipped over before
 | |
| + *  authentication begins.  This must be a number between 0 and 2^16-1
 | |
| + *  and can be used by IPsec implementers to skip over IP headers.
 | |
| + *  *** Value ignored if authentication not used ***
 | |
| + *
 | |
| + *  crypt_header_skip
 | |
| + *  -----------------
 | |
| + *  The number of bytes of the source_buf that are skipped over before
 | |
| + *  the cryptographic operation begins.  This must be a number between 0
 | |
| + *  and 2^16-1.  For IPsec, this number will always be 8 bytes larger
 | |
| + *  than the auth_header_skip (to skip over the ESP header).
 | |
| + *  *** Value ignored if cryptography not used ***
 | |
| + *
 | |
| + */
 | |
| +struct hifn_operand {
 | |
| +	union {
 | |
| +		struct sk_buff *skb;
 | |
| +		struct uio *io;
 | |
| +		unsigned char *buf;
 | |
| +	} u;
 | |
| +	void		*map;
 | |
| +	bus_size_t	mapsize;
 | |
| +	int		nsegs;
 | |
| +	struct {
 | |
| +	    dma_addr_t  ds_addr;
 | |
| +	    int         ds_len;
 | |
| +	} segs[MAX_SCATTER];
 | |
| +};
 | |
| +
 | |
| +struct hifn_command {
 | |
| +	u_int16_t session_num;
 | |
| +	u_int16_t base_masks, cry_masks, mac_masks;
 | |
| +	u_int8_t iv[HIFN_MAX_IV_LENGTH], *ck, mac[HIFN_MAC_KEY_LENGTH];
 | |
| +	int cklen;
 | |
| +	int sloplen, slopidx;
 | |
| +
 | |
| +	struct hifn_operand src;
 | |
| +	struct hifn_operand dst;
 | |
| +
 | |
| +	struct hifn_softc *softc;
 | |
| +	struct cryptop *crp;
 | |
| +	struct cryptodesc *enccrd, *maccrd;
 | |
| +};
 | |
| +
 | |
| +#define	src_skb		src.u.skb
 | |
| +#define	src_io		src.u.io
 | |
| +#define	src_map		src.map
 | |
| +#define	src_mapsize	src.mapsize
 | |
| +#define	src_segs	src.segs
 | |
| +#define	src_nsegs	src.nsegs
 | |
| +#define	src_buf		src.u.buf
 | |
| +
 | |
| +#define	dst_skb		dst.u.skb
 | |
| +#define	dst_io		dst.u.io
 | |
| +#define	dst_map		dst.map
 | |
| +#define	dst_mapsize	dst.mapsize
 | |
| +#define	dst_segs	dst.segs
 | |
| +#define	dst_nsegs	dst.nsegs
 | |
| +#define	dst_buf		dst.u.buf
 | |
| +
 | |
| +/*
 | |
| + *  Return values for hifn_crypto()
 | |
| + */
 | |
| +#define HIFN_CRYPTO_SUCCESS	0
 | |
| +#define HIFN_CRYPTO_BAD_INPUT	(-1)
 | |
| +#define HIFN_CRYPTO_RINGS_FULL	(-2)
 | |
| +
 | |
| +/**************************************************************************
 | |
| + *
 | |
| + *  Function:  hifn_crypto
 | |
| + *
 | |
| + *  Purpose:   Called by external drivers to begin an encryption on the
 | |
| + *             HIFN board.
 | |
| + *
 | |
| + *  Blocking/Non-blocking Issues
 | |
| + *  ============================
 | |
| + *  The driver cannot block in hifn_crypto (no calls to tsleep) currently.
 | |
| + *  hifn_crypto() returns HIFN_CRYPTO_RINGS_FULL if there is not enough
 | |
| + *  room in any of the rings for the request to proceed.
 | |
| + *
 | |
| + *  Return Values
 | |
| + *  =============
 | |
| + *  0 for success, negative values on error
 | |
| + *
 | |
| + *  Defines for negative error codes are:
 | |
| + *
 | |
| + *    HIFN_CRYPTO_BAD_INPUT  :  The passed in command had invalid settings.
 | |
| + *    HIFN_CRYPTO_RINGS_FULL :  All DMA rings were full and non-blocking
 | |
| + *                              behaviour was requested.
 | |
| + *
 | |
| + *************************************************************************/
 | |
| +
 | |
| +/*
 | |
| + * Convert back and forth from 'sid' to 'card' and 'session'
 | |
| + */
 | |
| +#define HIFN_CARD(sid)		(((sid) & 0xf0000000) >> 28)
 | |
| +#define HIFN_SESSION(sid)	((sid) & 0x000007ff)
 | |
| +#define HIFN_SID(crd,ses)	(((crd) << 28) | ((ses) & 0x7ff))
 | |
| +
 | |
| +#endif /* _KERNEL */
 | |
| +
 | |
| +struct hifn_stats {
 | |
| +	u_int64_t hst_ibytes;
 | |
| +	u_int64_t hst_obytes;
 | |
| +	u_int32_t hst_ipackets;
 | |
| +	u_int32_t hst_opackets;
 | |
| +	u_int32_t hst_invalid;
 | |
| +	u_int32_t hst_nomem;		/* malloc or one of hst_nomem_* */
 | |
| +	u_int32_t hst_abort;
 | |
| +	u_int32_t hst_noirq;		/* IRQ for no reason */
 | |
| +	u_int32_t hst_totbatch;		/* ops submitted w/o interrupt */
 | |
| +	u_int32_t hst_maxbatch;		/* max ops submitted together */
 | |
| +	u_int32_t hst_unaligned;	/* unaligned src caused copy */
 | |
| +	/*
 | |
| +	 * The following divides hst_nomem into more specific buckets.
 | |
| +	 */
 | |
| +	u_int32_t hst_nomem_map;	/* bus_dmamap_create failed */
 | |
| +	u_int32_t hst_nomem_load;	/* bus_dmamap_load_* failed */
 | |
| +	u_int32_t hst_nomem_mbuf;	/* MGET* failed */
 | |
| +	u_int32_t hst_nomem_mcl;	/* MCLGET* failed */
 | |
| +	u_int32_t hst_nomem_cr;		/* out of command/result descriptor */
 | |
| +	u_int32_t hst_nomem_sd;		/* out of src/dst descriptors */
 | |
| +};
 | |
| +
 | |
| +#endif /* __HIFN7751VAR_H__ */
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/hifn/hifn7751.c
 | |
| @@ -0,0 +1,2970 @@
 | |
| +/*	$OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $	*/
 | |
| +
 | |
| +/*-
 | |
| + * Invertex AEON / Hifn 7751 driver
 | |
| + * Copyright (c) 1999 Invertex Inc. All rights reserved.
 | |
| + * Copyright (c) 1999 Theo de Raadt
 | |
| + * Copyright (c) 2000-2001 Network Security Technologies, Inc.
 | |
| + *			http://www.netsec.net
 | |
| + * Copyright (c) 2003 Hifn Inc.
 | |
| + *
 | |
| + * This driver is based on a previous driver by Invertex, for which they
 | |
| + * requested:  Please send any comments, feedback, bug-fixes, or feature
 | |
| + * requests to software@invertex.com.
 | |
| + *
 | |
| + * Redistribution and use in source and binary forms, with or without
 | |
| + * modification, are permitted provided that the following conditions
 | |
| + * are met:
 | |
| + *
 | |
| + * 1. Redistributions of source code must retain the above copyright
 | |
| + *   notice, this list of conditions and the following disclaimer.
 | |
| + * 2. Redistributions in binary form must reproduce the above copyright
 | |
| + *   notice, this list of conditions and the following disclaimer in the
 | |
| + *   documentation and/or other materials provided with the distribution.
 | |
| + * 3. The name of the author may not be used to endorse or promote products
 | |
| + *   derived from this software without specific prior written permission.
 | |
| + *
 | |
| + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
 | |
| + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
 | |
| + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
 | |
| + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
 | |
| + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 | |
| + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 | |
| + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 | |
| + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 | |
| + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 | |
| + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 | |
| + *
 | |
| + * Effort sponsored in part by the Defense Advanced Research Projects
 | |
| + * Agency (DARPA) and Air Force Research Laboratory, Air Force
 | |
| + * Materiel Command, USAF, under agreement number F30602-01-2-0537.
 | |
| + *
 | |
| + *
 | |
| +__FBSDID("$FreeBSD: src/sys/dev/hifn/hifn7751.c,v 1.40 2007/03/21 03:42:49 sam Exp $");
 | |
| + */
 | |
| +
 | |
| +/*
 | |
| + * Driver for various Hifn encryption processors.
 | |
| + */
 | |
| +#ifndef AUTOCONF_INCLUDED
 | |
| +#include <linux/config.h>
 | |
| +#endif
 | |
| +#include <linux/module.h>
 | |
| +#include <linux/init.h>
 | |
| +#include <linux/list.h>
 | |
| +#include <linux/slab.h>
 | |
| +#include <linux/wait.h>
 | |
| +#include <linux/sched.h>
 | |
| +#include <linux/pci.h>
 | |
| +#include <linux/delay.h>
 | |
| +#include <linux/interrupt.h>
 | |
| +#include <linux/spinlock.h>
 | |
| +#include <linux/random.h>
 | |
| +#include <linux/version.h>
 | |
| +#include <linux/skbuff.h>
 | |
| +#include <asm/io.h>
 | |
| +
 | |
| +#include <cryptodev.h>
 | |
| +#include <uio.h>
 | |
| +#include <hifn/hifn7751reg.h>
 | |
| +#include <hifn/hifn7751var.h>
 | |
| +
 | |
| +#if 1
 | |
| +#define	DPRINTF(a...)	if (hifn_debug) { \
 | |
| +							printk("%s: ", sc ? \
 | |
| +								device_get_nameunit(sc->sc_dev) : "hifn"); \
 | |
| +							printk(a); \
 | |
| +						} else
 | |
| +#else
 | |
| +#define	DPRINTF(a...)
 | |
| +#endif
 | |
| +
 | |
| +static inline int
 | |
| +pci_get_revid(struct pci_dev *dev)
 | |
| +{
 | |
| +	u8 rid = 0;
 | |
| +	pci_read_config_byte(dev, PCI_REVISION_ID, &rid);
 | |
| +	return rid;
 | |
| +}
 | |
| +
 | |
| +static	struct hifn_stats hifnstats;
 | |
| +
 | |
| +#define	debug hifn_debug
 | |
| +int hifn_debug = 0;
 | |
| +module_param(hifn_debug, int, 0644);
 | |
| +MODULE_PARM_DESC(hifn_debug, "Enable debug");
 | |
| +
 | |
| +int hifn_maxbatch = 1;
 | |
| +module_param(hifn_maxbatch, int, 0644);
 | |
| +MODULE_PARM_DESC(hifn_maxbatch, "max ops to batch w/o interrupt");
 | |
| +
 | |
| +#ifdef MODULE_PARM
 | |
| +char *hifn_pllconfig = NULL;
 | |
| +MODULE_PARM(hifn_pllconfig, "s");
 | |
| +#else
 | |
| +char hifn_pllconfig[32]; /* This setting is RO after loading */
 | |
| +module_param_string(hifn_pllconfig, hifn_pllconfig, 32, 0444);
 | |
| +#endif
 | |
| +MODULE_PARM_DESC(hifn_pllconfig, "PLL config, ie., pci66, ext33, ...");
 | |
| +
 | |
| +#ifdef HIFN_VULCANDEV
 | |
| +#include <sys/conf.h>
 | |
| +#include <sys/uio.h>
 | |
| +
 | |
| +static struct cdevsw vulcanpk_cdevsw; /* forward declaration */
 | |
| +#endif
 | |
| +
 | |
| +/*
 | |
| + * Prototypes and count for the pci_device structure
 | |
| + */
 | |
| +static	int  hifn_probe(struct pci_dev *dev, const struct pci_device_id *ent);
 | |
| +static	void hifn_remove(struct pci_dev *dev);
 | |
| +
 | |
| +static	int hifn_newsession(device_t, u_int32_t *, struct cryptoini *);
 | |
| +static	int hifn_freesession(device_t, u_int64_t);
 | |
| +static	int hifn_process(device_t, struct cryptop *, int);
 | |
| +
 | |
| +static device_method_t hifn_methods = {
 | |
| +	/* crypto device methods */
 | |
| +	DEVMETHOD(cryptodev_newsession,	hifn_newsession),
 | |
| +	DEVMETHOD(cryptodev_freesession,hifn_freesession),
 | |
| +	DEVMETHOD(cryptodev_process,	hifn_process),
 | |
| +};
 | |
| +
 | |
| +static	void hifn_reset_board(struct hifn_softc *, int);
 | |
| +static	void hifn_reset_puc(struct hifn_softc *);
 | |
| +static	void hifn_puc_wait(struct hifn_softc *);
 | |
| +static	int hifn_enable_crypto(struct hifn_softc *);
 | |
| +static	void hifn_set_retry(struct hifn_softc *sc);
 | |
| +static	void hifn_init_dma(struct hifn_softc *);
 | |
| +static	void hifn_init_pci_registers(struct hifn_softc *);
 | |
| +static	int hifn_sramsize(struct hifn_softc *);
 | |
| +static	int hifn_dramsize(struct hifn_softc *);
 | |
| +static	int hifn_ramtype(struct hifn_softc *);
 | |
| +static	void hifn_sessions(struct hifn_softc *);
 | |
| +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
 | |
| +static irqreturn_t hifn_intr(int irq, void *arg);
 | |
| +#else
 | |
| +static irqreturn_t hifn_intr(int irq, void *arg, struct pt_regs *regs);
 | |
| +#endif
 | |
| +static	u_int hifn_write_command(struct hifn_command *, u_int8_t *);
 | |
| +static	u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
 | |
| +static	void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *);
 | |
| +static	int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int);
 | |
| +static	int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
 | |
| +static	int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
 | |
| +static	int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *);
 | |
| +static	int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *);
 | |
| +static	int hifn_init_pubrng(struct hifn_softc *);
 | |
| +static	void hifn_tick(unsigned long arg);
 | |
| +static	void hifn_abort(struct hifn_softc *);
 | |
| +static	void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *);
 | |
| +
 | |
| +static	void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t);
 | |
| +static	void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t);
 | |
| +
 | |
| +#ifdef CONFIG_OCF_RANDOMHARVEST
 | |
| +static	int hifn_read_random(void *arg, u_int32_t *buf, int len);
 | |
| +#endif
 | |
| +
 | |
| +#define HIFN_MAX_CHIPS	8
 | |
| +static struct hifn_softc *hifn_chip_idx[HIFN_MAX_CHIPS];
 | |
| +
 | |
| +static __inline u_int32_t
 | |
| +READ_REG_0(struct hifn_softc *sc, bus_size_t reg)
 | |
| +{
 | |
| +	u_int32_t v = readl(sc->sc_bar0 + reg);
 | |
| +	sc->sc_bar0_lastreg = (bus_size_t) -1;
 | |
| +	return (v);
 | |
| +}
 | |
| +#define	WRITE_REG_0(sc, reg, val)	hifn_write_reg_0(sc, reg, val)
 | |
| +
 | |
| +static __inline u_int32_t
 | |
| +READ_REG_1(struct hifn_softc *sc, bus_size_t reg)
 | |
| +{
 | |
| +	u_int32_t v = readl(sc->sc_bar1 + reg);
 | |
| +	sc->sc_bar1_lastreg = (bus_size_t) -1;
 | |
| +	return (v);
 | |
| +}
 | |
| +#define	WRITE_REG_1(sc, reg, val)	hifn_write_reg_1(sc, reg, val)
 | |
| +
 | |
| +/*
 | |
| + * map in a given buffer (great on some arches :-)
 | |
| + */
 | |
| +
 | |
| +static int
 | |
| +pci_map_uio(struct hifn_softc *sc, struct hifn_operand *buf, struct uio *uio)
 | |
| +{
 | |
| +	struct iovec *iov = uio->uio_iov;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	buf->mapsize = 0;
 | |
| +	for (buf->nsegs = 0; buf->nsegs < uio->uio_iovcnt; ) {
 | |
| +		buf->segs[buf->nsegs].ds_addr = pci_map_single(sc->sc_pcidev,
 | |
| +				iov->iov_base, iov->iov_len,
 | |
| +				PCI_DMA_BIDIRECTIONAL);
 | |
| +		buf->segs[buf->nsegs].ds_len = iov->iov_len;
 | |
| +		buf->mapsize += iov->iov_len;
 | |
| +		iov++;
 | |
| +		buf->nsegs++;
 | |
| +	}
 | |
| +	/* identify this buffer by the first segment */
 | |
| +	buf->map = (void *) buf->segs[0].ds_addr;
 | |
| +	return(0);
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * map in a given sk_buff
 | |
| + */
 | |
| +
 | |
| +static int
 | |
| +pci_map_skb(struct hifn_softc *sc,struct hifn_operand *buf,struct sk_buff *skb)
 | |
| +{
 | |
| +	int i;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	buf->mapsize = 0;
 | |
| +
 | |
| +	buf->segs[0].ds_addr = pci_map_single(sc->sc_pcidev,
 | |
| +			skb->data, skb_headlen(skb), PCI_DMA_BIDIRECTIONAL);
 | |
| +	buf->segs[0].ds_len = skb_headlen(skb);
 | |
| +	buf->mapsize += buf->segs[0].ds_len;
 | |
| +
 | |
| +	buf->nsegs = 1;
 | |
| +
 | |
| +	for (i = 0; i < skb_shinfo(skb)->nr_frags; ) {
 | |
| +		buf->segs[buf->nsegs].ds_len = skb_shinfo(skb)->frags[i].size;
 | |
| +		buf->segs[buf->nsegs].ds_addr = pci_map_single(sc->sc_pcidev,
 | |
| +				page_address(skb_shinfo(skb)->frags[i].page) +
 | |
| +					skb_shinfo(skb)->frags[i].page_offset,
 | |
| +				buf->segs[buf->nsegs].ds_len, PCI_DMA_BIDIRECTIONAL);
 | |
| +		buf->mapsize += buf->segs[buf->nsegs].ds_len;
 | |
| +		buf->nsegs++;
 | |
| +	}
 | |
| +
 | |
| +	/* identify this buffer by the first segment */
 | |
| +	buf->map = (void *) buf->segs[0].ds_addr;
 | |
| +	return(0);
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * map in a given contiguous buffer
 | |
| + */
 | |
| +
 | |
| +static int
 | |
| +pci_map_buf(struct hifn_softc *sc,struct hifn_operand *buf, void *b, int len)
 | |
| +{
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	buf->mapsize = 0;
 | |
| +	buf->segs[0].ds_addr = pci_map_single(sc->sc_pcidev,
 | |
| +			b, len, PCI_DMA_BIDIRECTIONAL);
 | |
| +	buf->segs[0].ds_len = len;
 | |
| +	buf->mapsize += buf->segs[0].ds_len;
 | |
| +	buf->nsegs = 1;
 | |
| +
 | |
| +	/* identify this buffer by the first segment */
 | |
| +	buf->map = (void *) buf->segs[0].ds_addr;
 | |
| +	return(0);
 | |
| +}
 | |
| +
 | |
| +#if 0 /* not needed at this time */
 | |
| +static void
 | |
| +pci_sync_iov(struct hifn_softc *sc, struct hifn_operand *buf)
 | |
| +{
 | |
| +	int i;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +	for (i = 0; i < buf->nsegs; i++)
 | |
| +		pci_dma_sync_single_for_cpu(sc->sc_pcidev, buf->segs[i].ds_addr,
 | |
| +				buf->segs[i].ds_len, PCI_DMA_BIDIRECTIONAL);
 | |
| +}
 | |
| +#endif
 | |
| +
 | |
| +static void
 | |
| +pci_unmap_buf(struct hifn_softc *sc, struct hifn_operand *buf)
 | |
| +{
 | |
| +	int i;
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +	for (i = 0; i < buf->nsegs; i++) {
 | |
| +		pci_unmap_single(sc->sc_pcidev, buf->segs[i].ds_addr,
 | |
| +				buf->segs[i].ds_len, PCI_DMA_BIDIRECTIONAL);
 | |
| +		buf->segs[i].ds_addr = 0;
 | |
| +		buf->segs[i].ds_len = 0;
 | |
| +	}
 | |
| +	buf->nsegs = 0;
 | |
| +	buf->mapsize = 0;
 | |
| +	buf->map = 0;
 | |
| +}
 | |
| +
 | |
| +static const char*
 | |
| +hifn_partname(struct hifn_softc *sc)
 | |
| +{
 | |
| +	/* XXX sprintf numbers when not decoded */
 | |
| +	switch (pci_get_vendor(sc->sc_pcidev)) {
 | |
| +	case PCI_VENDOR_HIFN:
 | |
| +		switch (pci_get_device(sc->sc_pcidev)) {
 | |
| +		case PCI_PRODUCT_HIFN_6500:	return "Hifn 6500";
 | |
| +		case PCI_PRODUCT_HIFN_7751:	return "Hifn 7751";
 | |
| +		case PCI_PRODUCT_HIFN_7811:	return "Hifn 7811";
 | |
| +		case PCI_PRODUCT_HIFN_7951:	return "Hifn 7951";
 | |
| +		case PCI_PRODUCT_HIFN_7955:	return "Hifn 7955";
 | |
| +		case PCI_PRODUCT_HIFN_7956:	return "Hifn 7956";
 | |
| +		}
 | |
| +		return "Hifn unknown-part";
 | |
| +	case PCI_VENDOR_INVERTEX:
 | |
| +		switch (pci_get_device(sc->sc_pcidev)) {
 | |
| +		case PCI_PRODUCT_INVERTEX_AEON:	return "Invertex AEON";
 | |
| +		}
 | |
| +		return "Invertex unknown-part";
 | |
| +	case PCI_VENDOR_NETSEC:
 | |
| +		switch (pci_get_device(sc->sc_pcidev)) {
 | |
| +		case PCI_PRODUCT_NETSEC_7751:	return "NetSec 7751";
 | |
| +		}
 | |
| +		return "NetSec unknown-part";
 | |
| +	}
 | |
| +	return "Unknown-vendor unknown-part";
 | |
| +}
 | |
| +
 | |
| +static u_int
 | |
| +checkmaxmin(struct pci_dev *dev, const char *what, u_int v, u_int min, u_int max)
 | |
| +{
 | |
| +	struct hifn_softc *sc = pci_get_drvdata(dev);
 | |
| +	if (v > max) {
 | |
| +		device_printf(sc->sc_dev, "Warning, %s %u out of range, "
 | |
| +			"using max %u\n", what, v, max);
 | |
| +		v = max;
 | |
| +	} else if (v < min) {
 | |
| +		device_printf(sc->sc_dev, "Warning, %s %u out of range, "
 | |
| +			"using min %u\n", what, v, min);
 | |
| +		v = min;
 | |
| +	}
 | |
| +	return v;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Select PLL configuration for 795x parts.  This is complicated in
 | |
| + * that we cannot determine the optimal parameters without user input.
 | |
| + * The reference clock is derived from an external clock through a
 | |
| + * multiplier.  The external clock is either the host bus (i.e. PCI)
 | |
| + * or an external clock generator.  When using the PCI bus we assume
 | |
| + * the clock is either 33 or 66 MHz; for an external source we cannot
 | |
| + * tell the speed.
 | |
| + *
 | |
| + * PLL configuration is done with a string: "pci" for PCI bus, or "ext"
 | |
| + * for an external source, followed by the frequency.  We calculate
 | |
| + * the appropriate multiplier and PLL register contents accordingly.
 | |
| + * When no configuration is given we default to "pci66" since that
 | |
| + * always will allow the card to work.  If a card is using the PCI
 | |
| + * bus clock and in a 33MHz slot then it will be operating at half
 | |
| + * speed until the correct information is provided.
 | |
| + *
 | |
| + * We use a default setting of "ext66" because according to Mike Ham
 | |
| + * of HiFn, almost every board in existence has an external crystal
 | |
| + * populated at 66Mhz. Using PCI can be a problem on modern motherboards,
 | |
| + * because PCI33 can have clocks from 0 to 33Mhz, and some have
 | |
| + * non-PCI-compliant spread-spectrum clocks, which can confuse the pll.
 | |
| + */
 | |
| +static void
 | |
| +hifn_getpllconfig(struct pci_dev *dev, u_int *pll)
 | |
| +{
 | |
| +	const char *pllspec = hifn_pllconfig;
 | |
| +	u_int freq, mul, fl, fh;
 | |
| +	u_int32_t pllconfig;
 | |
| +	char *nxt;
 | |
| +
 | |
| +	if (pllspec == NULL)
 | |
| +		pllspec = "ext66";
 | |
| +	fl = 33, fh = 66;
 | |
| +	pllconfig = 0;
 | |
| +	if (strncmp(pllspec, "ext", 3) == 0) {
 | |
| +		pllspec += 3;
 | |
| +		pllconfig |= HIFN_PLL_REF_SEL;
 | |
| +		switch (pci_get_device(dev)) {
 | |
| +		case PCI_PRODUCT_HIFN_7955:
 | |
| +		case PCI_PRODUCT_HIFN_7956:
 | |
| +			fl = 20, fh = 100;
 | |
| +			break;
 | |
| +#ifdef notyet
 | |
| +		case PCI_PRODUCT_HIFN_7954:
 | |
| +			fl = 20, fh = 66;
 | |
| +			break;
 | |
| +#endif
 | |
| +		}
 | |
| +	} else if (strncmp(pllspec, "pci", 3) == 0)
 | |
| +		pllspec += 3;
 | |
| +	freq = strtoul(pllspec, &nxt, 10);
 | |
| +	if (nxt == pllspec)
 | |
| +		freq = 66;
 | |
| +	else
 | |
| +		freq = checkmaxmin(dev, "frequency", freq, fl, fh);
 | |
| +	/*
 | |
| +	 * Calculate multiplier.  We target a Fck of 266 MHz,
 | |
| +	 * allowing only even values, possibly rounded down.
 | |
| +	 * Multipliers > 8 must set the charge pump current.
 | |
| +	 */
 | |
| +	mul = checkmaxmin(dev, "PLL divisor", (266 / freq) &~ 1, 2, 12);
 | |
| +	pllconfig |= (mul / 2 - 1) << HIFN_PLL_ND_SHIFT;
 | |
| +	if (mul > 8)
 | |
| +		pllconfig |= HIFN_PLL_IS;
 | |
| +	*pll = pllconfig;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Attach an interface that successfully probed.
 | |
| + */
 | |
| +static int
 | |
| +hifn_probe(struct pci_dev *dev, const struct pci_device_id *ent)
 | |
| +{
 | |
| +	struct hifn_softc *sc = NULL;
 | |
| +	char rbase;
 | |
| +	u_int16_t ena, rev;
 | |
| +	int rseg, rc;
 | |
| +	unsigned long mem_start, mem_len;
 | |
| +	static int num_chips = 0;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	if (pci_enable_device(dev) < 0)
 | |
| +		return(-ENODEV);
 | |
| +
 | |
| +	if (pci_set_mwi(dev))
 | |
| +		return(-ENODEV);
 | |
| +
 | |
| +	if (!dev->irq) {
 | |
| +		printk("hifn: found device with no IRQ assigned. check BIOS settings!");
 | |
| +		pci_disable_device(dev);
 | |
| +		return(-ENODEV);
 | |
| +	}
 | |
| +
 | |
| +	sc = (struct hifn_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
 | |
| +	if (!sc)
 | |
| +		return(-ENOMEM);
 | |
| +	memset(sc, 0, sizeof(*sc));
 | |
| +
 | |
| +	softc_device_init(sc, "hifn", num_chips, hifn_methods);
 | |
| +
 | |
| +	sc->sc_pcidev = dev;
 | |
| +	sc->sc_irq = -1;
 | |
| +	sc->sc_cid = -1;
 | |
| +	sc->sc_num = num_chips++;
 | |
| +	if (sc->sc_num < HIFN_MAX_CHIPS)
 | |
| +		hifn_chip_idx[sc->sc_num] = sc;
 | |
| +
 | |
| +	pci_set_drvdata(sc->sc_pcidev, sc);
 | |
| +
 | |
| +	spin_lock_init(&sc->sc_mtx);
 | |
| +
 | |
| +	/* XXX handle power management */
 | |
| +
 | |
| +	/*
 | |
| +	 * The 7951 and 795x have a random number generator and
 | |
| +	 * public key support; note this.
 | |
| +	 */
 | |
| +	if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
 | |
| +	    (pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
 | |
| +	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
 | |
| +	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7956))
 | |
| +		sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC;
 | |
| +	/*
 | |
| +	 * The 7811 has a random number generator and
 | |
| +	 * we also note it's identity 'cuz of some quirks.
 | |
| +	 */
 | |
| +	if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
 | |
| +	    pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)
 | |
| +		sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG;
 | |
| +
 | |
| +	/*
 | |
| +	 * The 795x parts support AES.
 | |
| +	 */
 | |
| +	if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
 | |
| +	    (pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
 | |
| +	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) {
 | |
| +		sc->sc_flags |= HIFN_IS_7956 | HIFN_HAS_AES;
 | |
| +		/*
 | |
| +		 * Select PLL configuration.  This depends on the
 | |
| +		 * bus and board design and must be manually configured
 | |
| +		 * if the default setting is unacceptable.
 | |
| +		 */
 | |
| +		hifn_getpllconfig(dev, &sc->sc_pllconfig);
 | |
| +	}
 | |
| +
 | |
| +	/*
 | |
| +	 * Setup PCI resources. Note that we record the bus
 | |
| +	 * tag and handle for each register mapping, this is
 | |
| +	 * used by the READ_REG_0, WRITE_REG_0, READ_REG_1,
 | |
| +	 * and WRITE_REG_1 macros throughout the driver.
 | |
| +	 */
 | |
| +	mem_start = pci_resource_start(sc->sc_pcidev, 0);
 | |
| +	mem_len   = pci_resource_len(sc->sc_pcidev, 0);
 | |
| +	sc->sc_bar0 = (ocf_iomem_t) ioremap(mem_start, mem_len);
 | |
| +	if (!sc->sc_bar0) {
 | |
| +		device_printf(sc->sc_dev, "cannot map bar%d register space\n", 0);
 | |
| +		goto fail;
 | |
| +	}
 | |
| +	sc->sc_bar0_lastreg = (bus_size_t) -1;
 | |
| +
 | |
| +	mem_start = pci_resource_start(sc->sc_pcidev, 1);
 | |
| +	mem_len   = pci_resource_len(sc->sc_pcidev, 1);
 | |
| +	sc->sc_bar1 = (ocf_iomem_t) ioremap(mem_start, mem_len);
 | |
| +	if (!sc->sc_bar1) {
 | |
| +		device_printf(sc->sc_dev, "cannot map bar%d register space\n", 1);
 | |
| +		goto fail;
 | |
| +	}
 | |
| +	sc->sc_bar1_lastreg = (bus_size_t) -1;
 | |
| +
 | |
| +	/* fix up the bus size */
 | |
| +	if (pci_set_dma_mask(dev, DMA_32BIT_MASK)) {
 | |
| +		device_printf(sc->sc_dev, "No usable DMA configuration, aborting.\n");
 | |
| +		goto fail;
 | |
| +	}
 | |
| +	if (pci_set_consistent_dma_mask(dev, DMA_32BIT_MASK)) {
 | |
| +		device_printf(sc->sc_dev,
 | |
| +				"No usable consistent DMA configuration, aborting.\n");
 | |
| +		goto fail;
 | |
| +	}
 | |
| +
 | |
| +	hifn_set_retry(sc);
 | |
| +
 | |
| +	/*
 | |
| +	 * Setup the area where the Hifn DMA's descriptors
 | |
| +	 * and associated data structures.
 | |
| +	 */
 | |
| +	sc->sc_dma = (struct hifn_dma *) pci_alloc_consistent(dev,
 | |
| +			sizeof(*sc->sc_dma),
 | |
| +			&sc->sc_dma_physaddr);
 | |
| +	if (!sc->sc_dma) {
 | |
| +		device_printf(sc->sc_dev, "cannot alloc sc_dma\n");
 | |
| +		goto fail;
 | |
| +	}
 | |
| +	bzero(sc->sc_dma, sizeof(*sc->sc_dma));
 | |
| +
 | |
| +	/*
 | |
| +	 * Reset the board and do the ``secret handshake''
 | |
| +	 * to enable the crypto support.  Then complete the
 | |
| +	 * initialization procedure by setting up the interrupt
 | |
| +	 * and hooking in to the system crypto support so we'll
 | |
| +	 * get used for system services like the crypto device,
 | |
| +	 * IPsec, RNG device, etc.
 | |
| +	 */
 | |
| +	hifn_reset_board(sc, 0);
 | |
| +
 | |
| +	if (hifn_enable_crypto(sc) != 0) {
 | |
| +		device_printf(sc->sc_dev, "crypto enabling failed\n");
 | |
| +		goto fail;
 | |
| +	}
 | |
| +	hifn_reset_puc(sc);
 | |
| +
 | |
| +	hifn_init_dma(sc);
 | |
| +	hifn_init_pci_registers(sc);
 | |
| +
 | |
| +	pci_set_master(sc->sc_pcidev);
 | |
| +
 | |
| +	/* XXX can't dynamically determine ram type for 795x; force dram */
 | |
| +	if (sc->sc_flags & HIFN_IS_7956)
 | |
| +		sc->sc_drammodel = 1;
 | |
| +	else if (hifn_ramtype(sc))
 | |
| +		goto fail;
 | |
| +
 | |
| +	if (sc->sc_drammodel == 0)
 | |
| +		hifn_sramsize(sc);
 | |
| +	else
 | |
| +		hifn_dramsize(sc);
 | |
| +
 | |
| +	/*
 | |
| +	 * Workaround for NetSec 7751 rev A: half ram size because two
 | |
| +	 * of the address lines were left floating
 | |
| +	 */
 | |
| +	if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
 | |
| +	    pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 &&
 | |
| +	    pci_get_revid(dev) == 0x61)	/*XXX???*/
 | |
| +		sc->sc_ramsize >>= 1;
 | |
| +
 | |
| +	/*
 | |
| +	 * Arrange the interrupt line.
 | |
| +	 */
 | |
| +	rc = request_irq(dev->irq, hifn_intr, IRQF_SHARED, "hifn", sc);
 | |
| +	if (rc) {
 | |
| +		device_printf(sc->sc_dev, "could not map interrupt: %d\n", rc);
 | |
| +		goto fail;
 | |
| +	}
 | |
| +	sc->sc_irq = dev->irq;
 | |
| +
 | |
| +	hifn_sessions(sc);
 | |
| +
 | |
| +	/*
 | |
| +	 * NB: Keep only the low 16 bits; this masks the chip id
 | |
| +	 *     from the 7951.
 | |
| +	 */
 | |
| +	rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff;
 | |
| +
 | |
| +	rseg = sc->sc_ramsize / 1024;
 | |
| +	rbase = 'K';
 | |
| +	if (sc->sc_ramsize >= (1024 * 1024)) {
 | |
| +		rbase = 'M';
 | |
| +		rseg /= 1024;
 | |
| +	}
 | |
| +	device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram",
 | |
| +		hifn_partname(sc), rev,
 | |
| +		rseg, rbase, sc->sc_drammodel ? 'd' : 's');
 | |
| +	if (sc->sc_flags & HIFN_IS_7956)
 | |
| +		printf(", pll=0x%x<%s clk, %ux mult>",
 | |
| +			sc->sc_pllconfig,
 | |
| +			sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci",
 | |
| +			2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11));
 | |
| +	printf("\n");
 | |
| +
 | |
| +	sc->sc_cid = crypto_get_driverid(softc_get_device(sc),CRYPTOCAP_F_HARDWARE);
 | |
| +	if (sc->sc_cid < 0) {
 | |
| +		device_printf(sc->sc_dev, "could not get crypto driver id\n");
 | |
| +		goto fail;
 | |
| +	}
 | |
| +
 | |
| +	WRITE_REG_0(sc, HIFN_0_PUCNFG,
 | |
| +	    READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
 | |
| +	ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
 | |
| +
 | |
| +	switch (ena) {
 | |
| +	case HIFN_PUSTAT_ENA_2:
 | |
| +		crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
 | |
| +		crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0);
 | |
| +		if (sc->sc_flags & HIFN_HAS_AES)
 | |
| +			crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
 | |
| +		/*FALLTHROUGH*/
 | |
| +	case HIFN_PUSTAT_ENA_1:
 | |
| +		crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
 | |
| +		crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
 | |
| +		crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
 | |
| +		crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
 | |
| +		crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
 | |
| +		break;
 | |
| +	}
 | |
| +
 | |
| +	if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
 | |
| +		hifn_init_pubrng(sc);
 | |
| +
 | |
| +	init_timer(&sc->sc_tickto);
 | |
| +	sc->sc_tickto.function = hifn_tick;
 | |
| +	sc->sc_tickto.data = (unsigned long) sc->sc_num;
 | |
| +	mod_timer(&sc->sc_tickto, jiffies + HZ);
 | |
| +
 | |
| +	return (0);
 | |
| +
 | |
| +fail:
 | |
| +    if (sc->sc_cid >= 0)
 | |
| +        crypto_unregister_all(sc->sc_cid);
 | |
| +    if (sc->sc_irq != -1)
 | |
| +        free_irq(sc->sc_irq, sc);
 | |
| +    if (sc->sc_dma) {
 | |
| +		/* Turn off DMA polling */
 | |
| +		WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
 | |
| +			HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
 | |
| +
 | |
| +        pci_free_consistent(sc->sc_pcidev,
 | |
| +				sizeof(*sc->sc_dma),
 | |
| +                sc->sc_dma, sc->sc_dma_physaddr);
 | |
| +	}
 | |
| +    kfree(sc);
 | |
| +	return (-ENXIO);
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Detach an interface that successfully probed.
 | |
| + */
 | |
| +static void
 | |
| +hifn_remove(struct pci_dev *dev)
 | |
| +{
 | |
| +	struct hifn_softc *sc = pci_get_drvdata(dev);
 | |
| +	unsigned long l_flags;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	KASSERT(sc != NULL, ("hifn_detach: null software carrier!"));
 | |
| +
 | |
| +	/* disable interrupts */
 | |
| +	HIFN_LOCK(sc);
 | |
| +	WRITE_REG_1(sc, HIFN_1_DMA_IER, 0);
 | |
| +	HIFN_UNLOCK(sc);
 | |
| +
 | |
| +	/*XXX other resources */
 | |
| +	del_timer_sync(&sc->sc_tickto);
 | |
| +
 | |
| +	/* Turn off DMA polling */
 | |
| +	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
 | |
| +	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
 | |
| +
 | |
| +	crypto_unregister_all(sc->sc_cid);
 | |
| +
 | |
| +	free_irq(sc->sc_irq, sc);
 | |
| +
 | |
| +	pci_free_consistent(sc->sc_pcidev, sizeof(*sc->sc_dma),
 | |
| +                sc->sc_dma, sc->sc_dma_physaddr);
 | |
| +}
 | |
| +
 | |
| +
 | |
| +static int
 | |
| +hifn_init_pubrng(struct hifn_softc *sc)
 | |
| +{
 | |
| +	int i;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	if ((sc->sc_flags & HIFN_IS_7811) == 0) {
 | |
| +		/* Reset 7951 public key/rng engine */
 | |
| +		WRITE_REG_1(sc, HIFN_1_PUB_RESET,
 | |
| +		    READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
 | |
| +
 | |
| +		for (i = 0; i < 100; i++) {
 | |
| +			DELAY(1000);
 | |
| +			if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
 | |
| +			    HIFN_PUBRST_RESET) == 0)
 | |
| +				break;
 | |
| +		}
 | |
| +
 | |
| +		if (i == 100) {
 | |
| +			device_printf(sc->sc_dev, "public key init failed\n");
 | |
| +			return (1);
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	/* Enable the rng, if available */
 | |
| +#ifdef CONFIG_OCF_RANDOMHARVEST
 | |
| +	if (sc->sc_flags & HIFN_HAS_RNG) {
 | |
| +		if (sc->sc_flags & HIFN_IS_7811) {
 | |
| +			u_int32_t r;
 | |
| +			r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
 | |
| +			if (r & HIFN_7811_RNGENA_ENA) {
 | |
| +				r &= ~HIFN_7811_RNGENA_ENA;
 | |
| +				WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
 | |
| +			}
 | |
| +			WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
 | |
| +			    HIFN_7811_RNGCFG_DEFL);
 | |
| +			r |= HIFN_7811_RNGENA_ENA;
 | |
| +			WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
 | |
| +		} else
 | |
| +			WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
 | |
| +			    READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
 | |
| +			    HIFN_RNGCFG_ENA);
 | |
| +
 | |
| +		sc->sc_rngfirst = 1;
 | |
| +		crypto_rregister(sc->sc_cid, hifn_read_random, sc);
 | |
| +	}
 | |
| +#endif
 | |
| +
 | |
| +	/* Enable public key engine, if available */
 | |
| +	if (sc->sc_flags & HIFN_HAS_PUBLIC) {
 | |
| +		WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
 | |
| +		sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
 | |
| +		WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
 | |
| +#ifdef HIFN_VULCANDEV
 | |
| +		sc->sc_pkdev = make_dev(&vulcanpk_cdevsw, 0,
 | |
| +					UID_ROOT, GID_WHEEL, 0666,
 | |
| +					"vulcanpk");
 | |
| +		sc->sc_pkdev->si_drv1 = sc;
 | |
| +#endif
 | |
| +	}
 | |
| +
 | |
| +	return (0);
 | |
| +}
 | |
| +
 | |
| +#ifdef CONFIG_OCF_RANDOMHARVEST
 | |
| +static int
 | |
| +hifn_read_random(void *arg, u_int32_t *buf, int len)
 | |
| +{
 | |
| +	struct hifn_softc *sc = (struct hifn_softc *) arg;
 | |
| +	u_int32_t sts;
 | |
| +	int i, rc = 0;
 | |
| +
 | |
| +	if (len <= 0)
 | |
| +		return rc;
 | |
| +
 | |
| +	if (sc->sc_flags & HIFN_IS_7811) {
 | |
| +		/* ONLY VALID ON 7811!!!! */
 | |
| +		for (i = 0; i < 5; i++) {
 | |
| +			sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
 | |
| +			if (sts & HIFN_7811_RNGSTS_UFL) {
 | |
| +				device_printf(sc->sc_dev,
 | |
| +					      "RNG underflow: disabling\n");
 | |
| +				/* DAVIDM perhaps return -1 */
 | |
| +				break;
 | |
| +			}
 | |
| +			if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
 | |
| +				break;
 | |
| +
 | |
| +			/*
 | |
| +			 * There are at least two words in the RNG FIFO
 | |
| +			 * at this point.
 | |
| +			 */
 | |
| +			if (rc < len)
 | |
| +				buf[rc++] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
 | |
| +			if (rc < len)
 | |
| +				buf[rc++] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
 | |
| +		}
 | |
| +	} else
 | |
| +		buf[rc++] = READ_REG_1(sc, HIFN_1_RNG_DATA);
 | |
| +
 | |
| +	/* NB: discard first data read */
 | |
| +	if (sc->sc_rngfirst) {
 | |
| +		sc->sc_rngfirst = 0;
 | |
| +		rc = 0;
 | |
| +	}
 | |
| +
 | |
| +	return(rc);
 | |
| +}
 | |
| +#endif /* CONFIG_OCF_RANDOMHARVEST */
 | |
| +
 | |
| +static void
 | |
| +hifn_puc_wait(struct hifn_softc *sc)
 | |
| +{
 | |
| +	int i;
 | |
| +	int reg = HIFN_0_PUCTRL;
 | |
| +
 | |
| +	if (sc->sc_flags & HIFN_IS_7956) {
 | |
| +		reg = HIFN_0_PUCTRL2;
 | |
| +	}
 | |
| +
 | |
| +	for (i = 5000; i > 0; i--) {
 | |
| +		DELAY(1);
 | |
| +		if (!(READ_REG_0(sc, reg) & HIFN_PUCTRL_RESET))
 | |
| +			break;
 | |
| +	}
 | |
| +	if (!i)
 | |
| +		device_printf(sc->sc_dev, "proc unit did not reset(0x%x)\n",
 | |
| +				READ_REG_0(sc, HIFN_0_PUCTRL));
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Reset the processing unit.
 | |
| + */
 | |
| +static void
 | |
| +hifn_reset_puc(struct hifn_softc *sc)
 | |
| +{
 | |
| +	/* Reset processing unit */
 | |
| +	int reg = HIFN_0_PUCTRL;
 | |
| +
 | |
| +	if (sc->sc_flags & HIFN_IS_7956) {
 | |
| +		reg = HIFN_0_PUCTRL2;
 | |
| +	}
 | |
| +	WRITE_REG_0(sc, reg, HIFN_PUCTRL_DMAENA);
 | |
| +
 | |
| +	hifn_puc_wait(sc);
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Set the Retry and TRDY registers; note that we set them to
 | |
| + * zero because the 7811 locks up when forced to retry (section
 | |
| + * 3.6 of "Specification Update SU-0014-04".  Not clear if we
 | |
| + * should do this for all Hifn parts, but it doesn't seem to hurt.
 | |
| + */
 | |
| +static void
 | |
| +hifn_set_retry(struct hifn_softc *sc)
 | |
| +{
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +	/* NB: RETRY only responds to 8-bit reads/writes */
 | |
| +	pci_write_config_byte(sc->sc_pcidev, HIFN_RETRY_TIMEOUT, 0);
 | |
| +	pci_write_config_dword(sc->sc_pcidev, HIFN_TRDY_TIMEOUT, 0);
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Resets the board.  Values in the regesters are left as is
 | |
| + * from the reset (i.e. initial values are assigned elsewhere).
 | |
| + */
 | |
| +static void
 | |
| +hifn_reset_board(struct hifn_softc *sc, int full)
 | |
| +{
 | |
| +	u_int32_t reg;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +	/*
 | |
| +	 * Set polling in the DMA configuration register to zero.  0x7 avoids
 | |
| +	 * resetting the board and zeros out the other fields.
 | |
| +	 */
 | |
| +	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
 | |
| +	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
 | |
| +
 | |
| +	/*
 | |
| +	 * Now that polling has been disabled, we have to wait 1 ms
 | |
| +	 * before resetting the board.
 | |
| +	 */
 | |
| +	DELAY(1000);
 | |
| +
 | |
| +	/* Reset the DMA unit */
 | |
| +	if (full) {
 | |
| +		WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
 | |
| +		DELAY(1000);
 | |
| +	} else {
 | |
| +		WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
 | |
| +		    HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
 | |
| +		hifn_reset_puc(sc);
 | |
| +	}
 | |
| +
 | |
| +	KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!"));
 | |
| +	bzero(sc->sc_dma, sizeof(*sc->sc_dma));
 | |
| +
 | |
| +	/* Bring dma unit out of reset */
 | |
| +	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
 | |
| +	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
 | |
| +
 | |
| +	hifn_puc_wait(sc);
 | |
| +	hifn_set_retry(sc);
 | |
| +
 | |
| +	if (sc->sc_flags & HIFN_IS_7811) {
 | |
| +		for (reg = 0; reg < 1000; reg++) {
 | |
| +			if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
 | |
| +			    HIFN_MIPSRST_CRAMINIT)
 | |
| +				break;
 | |
| +			DELAY(1000);
 | |
| +		}
 | |
| +		if (reg == 1000)
 | |
| +			device_printf(sc->sc_dev, ": cram init timeout\n");
 | |
| +	} else {
 | |
| +	  /* set up DMA configuration register #2 */
 | |
| +	  /* turn off all PK and BAR0 swaps */
 | |
| +	  WRITE_REG_1(sc, HIFN_1_DMA_CNFG2,
 | |
| +		      (3 << HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT)|
 | |
| +		      (3 << HIFN_DMACNFG2_INIT_READ_BURST_SHIFT)|
 | |
| +		      (2 << HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT)|
 | |
| +		      (2 << HIFN_DMACNFG2_TGT_READ_BURST_SHIFT));
 | |
| +	}
 | |
| +}
 | |
| +
 | |
| +static u_int32_t
 | |
| +hifn_next_signature(u_int32_t a, u_int cnt)
 | |
| +{
 | |
| +	int i;
 | |
| +	u_int32_t v;
 | |
| +
 | |
| +	for (i = 0; i < cnt; i++) {
 | |
| +
 | |
| +		/* get the parity */
 | |
| +		v = a & 0x80080125;
 | |
| +		v ^= v >> 16;
 | |
| +		v ^= v >> 8;
 | |
| +		v ^= v >> 4;
 | |
| +		v ^= v >> 2;
 | |
| +		v ^= v >> 1;
 | |
| +
 | |
| +		a = (v & 1) ^ (a << 1);
 | |
| +	}
 | |
| +
 | |
| +	return a;
 | |
| +}
 | |
| +
 | |
| +
 | |
| +/*
 | |
| + * Checks to see if crypto is already enabled.  If crypto isn't enable,
 | |
| + * "hifn_enable_crypto" is called to enable it.  The check is important,
 | |
| + * as enabling crypto twice will lock the board.
 | |
| + */
 | |
| +static int
 | |
| +hifn_enable_crypto(struct hifn_softc *sc)
 | |
| +{
 | |
| +	u_int32_t dmacfg, ramcfg, encl, addr, i;
 | |
| +	char offtbl[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 | |
| +					  0x00, 0x00, 0x00, 0x00 };
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
 | |
| +	dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
 | |
| +
 | |
| +	/*
 | |
| +	 * The RAM config register's encrypt level bit needs to be set before
 | |
| +	 * every read performed on the encryption level register.
 | |
| +	 */
 | |
| +	WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
 | |
| +
 | |
| +	encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
 | |
| +
 | |
| +	/*
 | |
| +	 * Make sure we don't re-unlock.  Two unlocks kills chip until the
 | |
| +	 * next reboot.
 | |
| +	 */
 | |
| +	if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
 | |
| +#ifdef HIFN_DEBUG
 | |
| +		if (hifn_debug)
 | |
| +			device_printf(sc->sc_dev,
 | |
| +			    "Strong crypto already enabled!\n");
 | |
| +#endif
 | |
| +		goto report;
 | |
| +	}
 | |
| +
 | |
| +	if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
 | |
| +#ifdef HIFN_DEBUG
 | |
| +		if (hifn_debug)
 | |
| +			device_printf(sc->sc_dev,
 | |
| +			      "Unknown encryption level 0x%x\n", encl);
 | |
| +#endif
 | |
| +		return 1;
 | |
| +	}
 | |
| +
 | |
| +	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
 | |
| +	    HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
 | |
| +	DELAY(1000);
 | |
| +	addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1);
 | |
| +	DELAY(1000);
 | |
| +	WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0);
 | |
| +	DELAY(1000);
 | |
| +
 | |
| +	for (i = 0; i <= 12; i++) {
 | |
| +		addr = hifn_next_signature(addr, offtbl[i] + 0x101);
 | |
| +		WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr);
 | |
| +
 | |
| +		DELAY(1000);
 | |
| +	}
 | |
| +
 | |
| +	WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
 | |
| +	encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
 | |
| +
 | |
| +#ifdef HIFN_DEBUG
 | |
| +	if (hifn_debug) {
 | |
| +		if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
 | |
| +			device_printf(sc->sc_dev, "Engine is permanently "
 | |
| +				"locked until next system reset!\n");
 | |
| +		else
 | |
| +			device_printf(sc->sc_dev, "Engine enabled "
 | |
| +				"successfully!\n");
 | |
| +	}
 | |
| +#endif
 | |
| +
 | |
| +report:
 | |
| +	WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
 | |
| +	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
 | |
| +
 | |
| +	switch (encl) {
 | |
| +	case HIFN_PUSTAT_ENA_1:
 | |
| +	case HIFN_PUSTAT_ENA_2:
 | |
| +		break;
 | |
| +	case HIFN_PUSTAT_ENA_0:
 | |
| +	default:
 | |
| +		device_printf(sc->sc_dev, "disabled\n");
 | |
| +		break;
 | |
| +	}
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Give initial values to the registers listed in the "Register Space"
 | |
| + * section of the HIFN Software Development reference manual.
 | |
| + */
 | |
| +static void
 | |
| +hifn_init_pci_registers(struct hifn_softc *sc)
 | |
| +{
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	/* write fixed values needed by the Initialization registers */
 | |
| +	WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
 | |
| +	WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
 | |
| +	WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
 | |
| +
 | |
| +	/* write all 4 ring address registers */
 | |
| +	WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr +
 | |
| +	    offsetof(struct hifn_dma, cmdr[0]));
 | |
| +	WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr +
 | |
| +	    offsetof(struct hifn_dma, srcr[0]));
 | |
| +	WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr +
 | |
| +	    offsetof(struct hifn_dma, dstr[0]));
 | |
| +	WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr +
 | |
| +	    offsetof(struct hifn_dma, resr[0]));
 | |
| +
 | |
| +	DELAY(2000);
 | |
| +
 | |
| +	/* write status register */
 | |
| +	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
 | |
| +	    HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
 | |
| +	    HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
 | |
| +	    HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
 | |
| +	    HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
 | |
| +	    HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
 | |
| +	    HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
 | |
| +	    HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
 | |
| +	    HIFN_DMACSR_S_WAIT |
 | |
| +	    HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
 | |
| +	    HIFN_DMACSR_C_WAIT |
 | |
| +	    HIFN_DMACSR_ENGINE |
 | |
| +	    ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
 | |
| +		HIFN_DMACSR_PUBDONE : 0) |
 | |
| +	    ((sc->sc_flags & HIFN_IS_7811) ?
 | |
| +		HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
 | |
| +
 | |
| +	sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
 | |
| +	sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
 | |
| +	    HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
 | |
| +	    HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
 | |
| +	    ((sc->sc_flags & HIFN_IS_7811) ?
 | |
| +		HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
 | |
| +	sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
 | |
| +	WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
 | |
| +
 | |
| +
 | |
| +	if (sc->sc_flags & HIFN_IS_7956) {
 | |
| +		u_int32_t pll;
 | |
| +
 | |
| +		WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
 | |
| +		    HIFN_PUCNFG_TCALLPHASES |
 | |
| +		    HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
 | |
| +
 | |
| +		/* turn off the clocks and insure bypass is set */
 | |
| +		pll = READ_REG_1(sc, HIFN_1_PLL);
 | |
| +		pll = (pll &~ (HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL))
 | |
| +		  | HIFN_PLL_BP | HIFN_PLL_MBSET;
 | |
| +		WRITE_REG_1(sc, HIFN_1_PLL, pll);
 | |
| +		DELAY(10*1000);		/* 10ms */
 | |
| +
 | |
| +		/* change configuration */
 | |
| +		pll = (pll &~ HIFN_PLL_CONFIG) | sc->sc_pllconfig;
 | |
| +		WRITE_REG_1(sc, HIFN_1_PLL, pll);
 | |
| +		DELAY(10*1000);		/* 10ms */
 | |
| +
 | |
| +		/* disable bypass */
 | |
| +		pll &= ~HIFN_PLL_BP;
 | |
| +		WRITE_REG_1(sc, HIFN_1_PLL, pll);
 | |
| +		/* enable clocks with new configuration */
 | |
| +		pll |= HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL;
 | |
| +		WRITE_REG_1(sc, HIFN_1_PLL, pll);
 | |
| +	} else {
 | |
| +		WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
 | |
| +		    HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
 | |
| +		    HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
 | |
| +		    (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
 | |
| +	}
 | |
| +
 | |
| +	WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
 | |
| +	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
 | |
| +	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
 | |
| +	    ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
 | |
| +	    ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * The maximum number of sessions supported by the card
 | |
| + * is dependent on the amount of context ram, which
 | |
| + * encryption algorithms are enabled, and how compression
 | |
| + * is configured.  This should be configured before this
 | |
| + * routine is called.
 | |
| + */
 | |
| +static void
 | |
| +hifn_sessions(struct hifn_softc *sc)
 | |
| +{
 | |
| +	u_int32_t pucnfg;
 | |
| +	int ctxsize;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
 | |
| +
 | |
| +	if (pucnfg & HIFN_PUCNFG_COMPSING) {
 | |
| +		if (pucnfg & HIFN_PUCNFG_ENCCNFG)
 | |
| +			ctxsize = 128;
 | |
| +		else
 | |
| +			ctxsize = 512;
 | |
| +		/*
 | |
| +		 * 7955/7956 has internal context memory of 32K
 | |
| +		 */
 | |
| +		if (sc->sc_flags & HIFN_IS_7956)
 | |
| +			sc->sc_maxses = 32768 / ctxsize;
 | |
| +		else
 | |
| +			sc->sc_maxses = 1 +
 | |
| +			    ((sc->sc_ramsize - 32768) / ctxsize);
 | |
| +	} else
 | |
| +		sc->sc_maxses = sc->sc_ramsize / 16384;
 | |
| +
 | |
| +	if (sc->sc_maxses > 2048)
 | |
| +		sc->sc_maxses = 2048;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Determine ram type (sram or dram).  Board should be just out of a reset
 | |
| + * state when this is called.
 | |
| + */
 | |
| +static int
 | |
| +hifn_ramtype(struct hifn_softc *sc)
 | |
| +{
 | |
| +	u_int8_t data[8], dataexpect[8];
 | |
| +	int i;
 | |
| +
 | |
| +	for (i = 0; i < sizeof(data); i++)
 | |
| +		data[i] = dataexpect[i] = 0x55;
 | |
| +	if (hifn_writeramaddr(sc, 0, data))
 | |
| +		return (-1);
 | |
| +	if (hifn_readramaddr(sc, 0, data))
 | |
| +		return (-1);
 | |
| +	if (bcmp(data, dataexpect, sizeof(data)) != 0) {
 | |
| +		sc->sc_drammodel = 1;
 | |
| +		return (0);
 | |
| +	}
 | |
| +
 | |
| +	for (i = 0; i < sizeof(data); i++)
 | |
| +		data[i] = dataexpect[i] = 0xaa;
 | |
| +	if (hifn_writeramaddr(sc, 0, data))
 | |
| +		return (-1);
 | |
| +	if (hifn_readramaddr(sc, 0, data))
 | |
| +		return (-1);
 | |
| +	if (bcmp(data, dataexpect, sizeof(data)) != 0) {
 | |
| +		sc->sc_drammodel = 1;
 | |
| +		return (0);
 | |
| +	}
 | |
| +
 | |
| +	return (0);
 | |
| +}
 | |
| +
 | |
| +#define	HIFN_SRAM_MAX		(32 << 20)
 | |
| +#define	HIFN_SRAM_STEP_SIZE	16384
 | |
| +#define	HIFN_SRAM_GRANULARITY	(HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
 | |
| +
 | |
| +static int
 | |
| +hifn_sramsize(struct hifn_softc *sc)
 | |
| +{
 | |
| +	u_int32_t a;
 | |
| +	u_int8_t data[8];
 | |
| +	u_int8_t dataexpect[sizeof(data)];
 | |
| +	int32_t i;
 | |
| +
 | |
| +	for (i = 0; i < sizeof(data); i++)
 | |
| +		data[i] = dataexpect[i] = i ^ 0x5a;
 | |
| +
 | |
| +	for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
 | |
| +		a = i * HIFN_SRAM_STEP_SIZE;
 | |
| +		bcopy(&i, data, sizeof(i));
 | |
| +		hifn_writeramaddr(sc, a, data);
 | |
| +	}
 | |
| +
 | |
| +	for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
 | |
| +		a = i * HIFN_SRAM_STEP_SIZE;
 | |
| +		bcopy(&i, dataexpect, sizeof(i));
 | |
| +		if (hifn_readramaddr(sc, a, data) < 0)
 | |
| +			return (0);
 | |
| +		if (bcmp(data, dataexpect, sizeof(data)) != 0)
 | |
| +			return (0);
 | |
| +		sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
 | |
| +	}
 | |
| +
 | |
| +	return (0);
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * XXX For dram boards, one should really try all of the
 | |
| + * HIFN_PUCNFG_DSZ_*'s.  This just assumes that PUCNFG
 | |
| + * is already set up correctly.
 | |
| + */
 | |
| +static int
 | |
| +hifn_dramsize(struct hifn_softc *sc)
 | |
| +{
 | |
| +	u_int32_t cnfg;
 | |
| +
 | |
| +	if (sc->sc_flags & HIFN_IS_7956) {
 | |
| +		/*
 | |
| +		 * 7955/7956 have a fixed internal ram of only 32K.
 | |
| +		 */
 | |
| +		sc->sc_ramsize = 32768;
 | |
| +	} else {
 | |
| +		cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
 | |
| +		    HIFN_PUCNFG_DRAMMASK;
 | |
| +		sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
 | |
| +	}
 | |
| +	return (0);
 | |
| +}
 | |
| +
 | |
| +static void
 | |
| +hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp)
 | |
| +{
 | |
| +	struct hifn_dma *dma = sc->sc_dma;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	if (dma->cmdi == HIFN_D_CMD_RSIZE) {
 | |
| +		dma->cmdi = 0;
 | |
| +		dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
 | |
| +		wmb();
 | |
| +		dma->cmdr[HIFN_D_CMD_RSIZE].l |= htole32(HIFN_D_VALID);
 | |
| +		HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
 | |
| +		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
 | |
| +	}
 | |
| +	*cmdp = dma->cmdi++;
 | |
| +	dma->cmdk = dma->cmdi;
 | |
| +
 | |
| +	if (dma->srci == HIFN_D_SRC_RSIZE) {
 | |
| +		dma->srci = 0;
 | |
| +		dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
 | |
| +		wmb();
 | |
| +		dma->srcr[HIFN_D_SRC_RSIZE].l |= htole32(HIFN_D_VALID);
 | |
| +		HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
 | |
| +		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
 | |
| +	}
 | |
| +	*srcp = dma->srci++;
 | |
| +	dma->srck = dma->srci;
 | |
| +
 | |
| +	if (dma->dsti == HIFN_D_DST_RSIZE) {
 | |
| +		dma->dsti = 0;
 | |
| +		dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
 | |
| +		wmb();
 | |
| +		dma->dstr[HIFN_D_DST_RSIZE].l |= htole32(HIFN_D_VALID);
 | |
| +		HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
 | |
| +		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
 | |
| +	}
 | |
| +	*dstp = dma->dsti++;
 | |
| +	dma->dstk = dma->dsti;
 | |
| +
 | |
| +	if (dma->resi == HIFN_D_RES_RSIZE) {
 | |
| +		dma->resi = 0;
 | |
| +		dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
 | |
| +		wmb();
 | |
| +		dma->resr[HIFN_D_RES_RSIZE].l |= htole32(HIFN_D_VALID);
 | |
| +		HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
 | |
| +		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
 | |
| +	}
 | |
| +	*resp = dma->resi++;
 | |
| +	dma->resk = dma->resi;
 | |
| +}
 | |
| +
 | |
| +static int
 | |
| +hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
 | |
| +{
 | |
| +	struct hifn_dma *dma = sc->sc_dma;
 | |
| +	hifn_base_command_t wc;
 | |
| +	const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
 | |
| +	int r, cmdi, resi, srci, dsti;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	wc.masks = htole16(3 << 13);
 | |
| +	wc.session_num = htole16(addr >> 14);
 | |
| +	wc.total_source_count = htole16(8);
 | |
| +	wc.total_dest_count = htole16(addr & 0x3fff);
 | |
| +
 | |
| +	hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
 | |
| +
 | |
| +	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
 | |
| +	    HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
 | |
| +	    HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
 | |
| +
 | |
| +	/* build write command */
 | |
| +	bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
 | |
| +	*(hifn_base_command_t *)dma->command_bufs[cmdi] = wc;
 | |
| +	bcopy(data, &dma->test_src, sizeof(dma->test_src));
 | |
| +
 | |
| +	dma->srcr[srci].p = htole32(sc->sc_dma_physaddr
 | |
| +	    + offsetof(struct hifn_dma, test_src));
 | |
| +	dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr
 | |
| +	    + offsetof(struct hifn_dma, test_dst));
 | |
| +
 | |
| +	dma->cmdr[cmdi].l = htole32(16 | masks);
 | |
| +	dma->srcr[srci].l = htole32(8 | masks);
 | |
| +	dma->dstr[dsti].l = htole32(4 | masks);
 | |
| +	dma->resr[resi].l = htole32(4 | masks);
 | |
| +
 | |
| +	for (r = 10000; r >= 0; r--) {
 | |
| +		DELAY(10);
 | |
| +		if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
 | |
| +			break;
 | |
| +	}
 | |
| +	if (r == 0) {
 | |
| +		device_printf(sc->sc_dev, "writeramaddr -- "
 | |
| +		    "result[%d](addr %d) still valid\n", resi, addr);
 | |
| +		r = -1;
 | |
| +		return (-1);
 | |
| +	} else
 | |
| +		r = 0;
 | |
| +
 | |
| +	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
 | |
| +	    HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
 | |
| +	    HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
 | |
| +
 | |
| +	return (r);
 | |
| +}
 | |
| +
 | |
| +static int
 | |
| +hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
 | |
| +{
 | |
| +	struct hifn_dma *dma = sc->sc_dma;
 | |
| +	hifn_base_command_t rc;
 | |
| +	const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
 | |
| +	int r, cmdi, srci, dsti, resi;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	rc.masks = htole16(2 << 13);
 | |
| +	rc.session_num = htole16(addr >> 14);
 | |
| +	rc.total_source_count = htole16(addr & 0x3fff);
 | |
| +	rc.total_dest_count = htole16(8);
 | |
| +
 | |
| +	hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
 | |
| +
 | |
| +	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
 | |
| +	    HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
 | |
| +	    HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
 | |
| +
 | |
| +	bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
 | |
| +	*(hifn_base_command_t *)dma->command_bufs[cmdi] = rc;
 | |
| +
 | |
| +	dma->srcr[srci].p = htole32(sc->sc_dma_physaddr +
 | |
| +	    offsetof(struct hifn_dma, test_src));
 | |
| +	dma->test_src = 0;
 | |
| +	dma->dstr[dsti].p =  htole32(sc->sc_dma_physaddr +
 | |
| +	    offsetof(struct hifn_dma, test_dst));
 | |
| +	dma->test_dst = 0;
 | |
| +	dma->cmdr[cmdi].l = htole32(8 | masks);
 | |
| +	dma->srcr[srci].l = htole32(8 | masks);
 | |
| +	dma->dstr[dsti].l = htole32(8 | masks);
 | |
| +	dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
 | |
| +
 | |
| +	for (r = 10000; r >= 0; r--) {
 | |
| +		DELAY(10);
 | |
| +		if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
 | |
| +			break;
 | |
| +	}
 | |
| +	if (r == 0) {
 | |
| +		device_printf(sc->sc_dev, "readramaddr -- "
 | |
| +		    "result[%d](addr %d) still valid\n", resi, addr);
 | |
| +		r = -1;
 | |
| +	} else {
 | |
| +		r = 0;
 | |
| +		bcopy(&dma->test_dst, data, sizeof(dma->test_dst));
 | |
| +	}
 | |
| +
 | |
| +	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
 | |
| +	    HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
 | |
| +	    HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
 | |
| +
 | |
| +	return (r);
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Initialize the descriptor rings.
 | |
| + */
 | |
| +static void
 | |
| +hifn_init_dma(struct hifn_softc *sc)
 | |
| +{
 | |
| +	struct hifn_dma *dma = sc->sc_dma;
 | |
| +	int i;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	hifn_set_retry(sc);
 | |
| +
 | |
| +	/* initialize static pointer values */
 | |
| +	for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
 | |
| +		dma->cmdr[i].p = htole32(sc->sc_dma_physaddr +
 | |
| +		    offsetof(struct hifn_dma, command_bufs[i][0]));
 | |
| +	for (i = 0; i < HIFN_D_RES_RSIZE; i++)
 | |
| +		dma->resr[i].p = htole32(sc->sc_dma_physaddr +
 | |
| +		    offsetof(struct hifn_dma, result_bufs[i][0]));
 | |
| +
 | |
| +	dma->cmdr[HIFN_D_CMD_RSIZE].p =
 | |
| +	    htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0]));
 | |
| +	dma->srcr[HIFN_D_SRC_RSIZE].p =
 | |
| +	    htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0]));
 | |
| +	dma->dstr[HIFN_D_DST_RSIZE].p =
 | |
| +	    htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0]));
 | |
| +	dma->resr[HIFN_D_RES_RSIZE].p =
 | |
| +	    htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0]));
 | |
| +
 | |
| +	dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
 | |
| +	dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
 | |
| +	dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Writes out the raw command buffer space.  Returns the
 | |
| + * command buffer size.
 | |
| + */
 | |
| +static u_int
 | |
| +hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
 | |
| +{
 | |
| +	struct hifn_softc *sc = NULL;
 | |
| +	u_int8_t *buf_pos;
 | |
| +	hifn_base_command_t *base_cmd;
 | |
| +	hifn_mac_command_t *mac_cmd;
 | |
| +	hifn_crypt_command_t *cry_cmd;
 | |
| +	int using_mac, using_crypt, len, ivlen;
 | |
| +	u_int32_t dlen, slen;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	buf_pos = buf;
 | |
| +	using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
 | |
| +	using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
 | |
| +
 | |
| +	base_cmd = (hifn_base_command_t *)buf_pos;
 | |
| +	base_cmd->masks = htole16(cmd->base_masks);
 | |
| +	slen = cmd->src_mapsize;
 | |
| +	if (cmd->sloplen)
 | |
| +		dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t);
 | |
| +	else
 | |
| +		dlen = cmd->dst_mapsize;
 | |
| +	base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
 | |
| +	base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
 | |
| +	dlen >>= 16;
 | |
| +	slen >>= 16;
 | |
| +	base_cmd->session_num = htole16(
 | |
| +	    ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
 | |
| +	    ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
 | |
| +	buf_pos += sizeof(hifn_base_command_t);
 | |
| +
 | |
| +	if (using_mac) {
 | |
| +		mac_cmd = (hifn_mac_command_t *)buf_pos;
 | |
| +		dlen = cmd->maccrd->crd_len;
 | |
| +		mac_cmd->source_count = htole16(dlen & 0xffff);
 | |
| +		dlen >>= 16;
 | |
| +		mac_cmd->masks = htole16(cmd->mac_masks |
 | |
| +		    ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
 | |
| +		mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
 | |
| +		mac_cmd->reserved = 0;
 | |
| +		buf_pos += sizeof(hifn_mac_command_t);
 | |
| +	}
 | |
| +
 | |
| +	if (using_crypt) {
 | |
| +		cry_cmd = (hifn_crypt_command_t *)buf_pos;
 | |
| +		dlen = cmd->enccrd->crd_len;
 | |
| +		cry_cmd->source_count = htole16(dlen & 0xffff);
 | |
| +		dlen >>= 16;
 | |
| +		cry_cmd->masks = htole16(cmd->cry_masks |
 | |
| +		    ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
 | |
| +		cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
 | |
| +		cry_cmd->reserved = 0;
 | |
| +		buf_pos += sizeof(hifn_crypt_command_t);
 | |
| +	}
 | |
| +
 | |
| +	if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
 | |
| +		bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH);
 | |
| +		buf_pos += HIFN_MAC_KEY_LENGTH;
 | |
| +	}
 | |
| +
 | |
| +	if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
 | |
| +		switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
 | |
| +		case HIFN_CRYPT_CMD_ALG_3DES:
 | |
| +			bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH);
 | |
| +			buf_pos += HIFN_3DES_KEY_LENGTH;
 | |
| +			break;
 | |
| +		case HIFN_CRYPT_CMD_ALG_DES:
 | |
| +			bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH);
 | |
| +			buf_pos += HIFN_DES_KEY_LENGTH;
 | |
| +			break;
 | |
| +		case HIFN_CRYPT_CMD_ALG_RC4:
 | |
| +			len = 256;
 | |
| +			do {
 | |
| +				int clen;
 | |
| +
 | |
| +				clen = MIN(cmd->cklen, len);
 | |
| +				bcopy(cmd->ck, buf_pos, clen);
 | |
| +				len -= clen;
 | |
| +				buf_pos += clen;
 | |
| +			} while (len > 0);
 | |
| +			bzero(buf_pos, 4);
 | |
| +			buf_pos += 4;
 | |
| +			break;
 | |
| +		case HIFN_CRYPT_CMD_ALG_AES:
 | |
| +			/*
 | |
| +			 * AES keys are variable 128, 192 and
 | |
| +			 * 256 bits (16, 24 and 32 bytes).
 | |
| +			 */
 | |
| +			bcopy(cmd->ck, buf_pos, cmd->cklen);
 | |
| +			buf_pos += cmd->cklen;
 | |
| +			break;
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
 | |
| +		switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
 | |
| +		case HIFN_CRYPT_CMD_ALG_AES:
 | |
| +			ivlen = HIFN_AES_IV_LENGTH;
 | |
| +			break;
 | |
| +		default:
 | |
| +			ivlen = HIFN_IV_LENGTH;
 | |
| +			break;
 | |
| +		}
 | |
| +		bcopy(cmd->iv, buf_pos, ivlen);
 | |
| +		buf_pos += ivlen;
 | |
| +	}
 | |
| +
 | |
| +	if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) {
 | |
| +		bzero(buf_pos, 8);
 | |
| +		buf_pos += 8;
 | |
| +	}
 | |
| +
 | |
| +	return (buf_pos - buf);
 | |
| +}
 | |
| +
 | |
| +static int
 | |
| +hifn_dmamap_aligned(struct hifn_operand *op)
 | |
| +{
 | |
| +	struct hifn_softc *sc = NULL;
 | |
| +	int i;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	for (i = 0; i < op->nsegs; i++) {
 | |
| +		if (op->segs[i].ds_addr & 3)
 | |
| +			return (0);
 | |
| +		if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3))
 | |
| +			return (0);
 | |
| +	}
 | |
| +	return (1);
 | |
| +}
 | |
| +
 | |
| +static __inline int
 | |
| +hifn_dmamap_dstwrap(struct hifn_softc *sc, int idx)
 | |
| +{
 | |
| +	struct hifn_dma *dma = sc->sc_dma;
 | |
| +
 | |
| +	if (++idx == HIFN_D_DST_RSIZE) {
 | |
| +		dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
 | |
| +		    HIFN_D_MASKDONEIRQ);
 | |
| +		HIFN_DSTR_SYNC(sc, idx,
 | |
| +		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 | |
| +		idx = 0;
 | |
| +	}
 | |
| +	return (idx);
 | |
| +}
 | |
| +
 | |
| +static int
 | |
| +hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
 | |
| +{
 | |
| +	struct hifn_dma *dma = sc->sc_dma;
 | |
| +	struct hifn_operand *dst = &cmd->dst;
 | |
| +	u_int32_t p, l;
 | |
| +	int idx, used = 0, i;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	idx = dma->dsti;
 | |
| +	for (i = 0; i < dst->nsegs - 1; i++) {
 | |
| +		dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
 | |
| +		dma->dstr[idx].l = htole32(HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len);
 | |
| +		wmb();
 | |
| +		dma->dstr[idx].l |= htole32(HIFN_D_VALID);
 | |
| +		HIFN_DSTR_SYNC(sc, idx,
 | |
| +		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 | |
| +		used++;
 | |
| +
 | |
| +		idx = hifn_dmamap_dstwrap(sc, idx);
 | |
| +	}
 | |
| +
 | |
| +	if (cmd->sloplen == 0) {
 | |
| +		p = dst->segs[i].ds_addr;
 | |
| +		l = HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
 | |
| +		    dst->segs[i].ds_len;
 | |
| +	} else {
 | |
| +		p = sc->sc_dma_physaddr +
 | |
| +		    offsetof(struct hifn_dma, slop[cmd->slopidx]);
 | |
| +		l = HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
 | |
| +		    sizeof(u_int32_t);
 | |
| +
 | |
| +		if ((dst->segs[i].ds_len - cmd->sloplen) != 0) {
 | |
| +			dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
 | |
| +			dma->dstr[idx].l = htole32(HIFN_D_MASKDONEIRQ |
 | |
| +			    (dst->segs[i].ds_len - cmd->sloplen));
 | |
| +			wmb();
 | |
| +			dma->dstr[idx].l |= htole32(HIFN_D_VALID);
 | |
| +			HIFN_DSTR_SYNC(sc, idx,
 | |
| +			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 | |
| +			used++;
 | |
| +
 | |
| +			idx = hifn_dmamap_dstwrap(sc, idx);
 | |
| +		}
 | |
| +	}
 | |
| +	dma->dstr[idx].p = htole32(p);
 | |
| +	dma->dstr[idx].l = htole32(l);
 | |
| +	wmb();
 | |
| +	dma->dstr[idx].l |= htole32(HIFN_D_VALID);
 | |
| +	HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 | |
| +	used++;
 | |
| +
 | |
| +	idx = hifn_dmamap_dstwrap(sc, idx);
 | |
| +
 | |
| +	dma->dsti = idx;
 | |
| +	dma->dstu += used;
 | |
| +	return (idx);
 | |
| +}
 | |
| +
 | |
| +static __inline int
 | |
| +hifn_dmamap_srcwrap(struct hifn_softc *sc, int idx)
 | |
| +{
 | |
| +	struct hifn_dma *dma = sc->sc_dma;
 | |
| +
 | |
| +	if (++idx == HIFN_D_SRC_RSIZE) {
 | |
| +		dma->srcr[idx].l = htole32(HIFN_D_VALID |
 | |
| +		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
 | |
| +		HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
 | |
| +		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
 | |
| +		idx = 0;
 | |
| +	}
 | |
| +	return (idx);
 | |
| +}
 | |
| +
 | |
| +static int
 | |
| +hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
 | |
| +{
 | |
| +	struct hifn_dma *dma = sc->sc_dma;
 | |
| +	struct hifn_operand *src = &cmd->src;
 | |
| +	int idx, i;
 | |
| +	u_int32_t last = 0;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	idx = dma->srci;
 | |
| +	for (i = 0; i < src->nsegs; i++) {
 | |
| +		if (i == src->nsegs - 1)
 | |
| +			last = HIFN_D_LAST;
 | |
| +
 | |
| +		dma->srcr[idx].p = htole32(src->segs[i].ds_addr);
 | |
| +		dma->srcr[idx].l = htole32(src->segs[i].ds_len |
 | |
| +		    HIFN_D_MASKDONEIRQ | last);
 | |
| +		wmb();
 | |
| +		dma->srcr[idx].l |= htole32(HIFN_D_VALID);
 | |
| +		HIFN_SRCR_SYNC(sc, idx,
 | |
| +		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
 | |
| +
 | |
| +		idx = hifn_dmamap_srcwrap(sc, idx);
 | |
| +	}
 | |
| +	dma->srci = idx;
 | |
| +	dma->srcu += src->nsegs;
 | |
| +	return (idx);
 | |
| +}
 | |
| +
 | |
| +
 | |
| +static int
 | |
| +hifn_crypto(
 | |
| +	struct hifn_softc *sc,
 | |
| +	struct hifn_command *cmd,
 | |
| +	struct cryptop *crp,
 | |
| +	int hint)
 | |
| +{
 | |
| +	struct	hifn_dma *dma = sc->sc_dma;
 | |
| +	u_int32_t cmdlen, csr;
 | |
| +	int cmdi, resi, err = 0;
 | |
| +	unsigned long l_flags;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	/*
 | |
| +	 * need 1 cmd, and 1 res
 | |
| +	 *
 | |
| +	 * NB: check this first since it's easy.
 | |
| +	 */
 | |
| +	HIFN_LOCK(sc);
 | |
| +	if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
 | |
| +	    (dma->resu + 1) > HIFN_D_RES_RSIZE) {
 | |
| +#ifdef HIFN_DEBUG
 | |
| +		if (hifn_debug) {
 | |
| +			device_printf(sc->sc_dev,
 | |
| +				"cmd/result exhaustion, cmdu %u resu %u\n",
 | |
| +				dma->cmdu, dma->resu);
 | |
| +		}
 | |
| +#endif
 | |
| +		hifnstats.hst_nomem_cr++;
 | |
| +		sc->sc_needwakeup |= CRYPTO_SYMQ;
 | |
| +		HIFN_UNLOCK(sc);
 | |
| +		return (ERESTART);
 | |
| +	}
 | |
| +
 | |
| +	if (crp->crp_flags & CRYPTO_F_SKBUF) {
 | |
| +		if (pci_map_skb(sc, &cmd->src, cmd->src_skb)) {
 | |
| +			hifnstats.hst_nomem_load++;
 | |
| +			err = ENOMEM;
 | |
| +			goto err_srcmap1;
 | |
| +		}
 | |
| +	} else if (crp->crp_flags & CRYPTO_F_IOV) {
 | |
| +		if (pci_map_uio(sc, &cmd->src, cmd->src_io)) {
 | |
| +			hifnstats.hst_nomem_load++;
 | |
| +			err = ENOMEM;
 | |
| +			goto err_srcmap1;
 | |
| +		}
 | |
| +	} else {
 | |
| +		if (pci_map_buf(sc, &cmd->src, cmd->src_buf, crp->crp_ilen)) {
 | |
| +			hifnstats.hst_nomem_load++;
 | |
| +			err = ENOMEM;
 | |
| +			goto err_srcmap1;
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	if (hifn_dmamap_aligned(&cmd->src)) {
 | |
| +		cmd->sloplen = cmd->src_mapsize & 3;
 | |
| +		cmd->dst = cmd->src;
 | |
| +	} else {
 | |
| +		if (crp->crp_flags & CRYPTO_F_IOV) {
 | |
| +			DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
 | |
| +			err = EINVAL;
 | |
| +			goto err_srcmap;
 | |
| +		} else if (crp->crp_flags & CRYPTO_F_SKBUF) {
 | |
| +#ifdef NOTYET
 | |
| +			int totlen, len;
 | |
| +			struct mbuf *m, *m0, *mlast;
 | |
| +
 | |
| +			KASSERT(cmd->dst_m == cmd->src_m,
 | |
| +				("hifn_crypto: dst_m initialized improperly"));
 | |
| +			hifnstats.hst_unaligned++;
 | |
| +			/*
 | |
| +			 * Source is not aligned on a longword boundary.
 | |
| +			 * Copy the data to insure alignment.  If we fail
 | |
| +			 * to allocate mbufs or clusters while doing this
 | |
| +			 * we return ERESTART so the operation is requeued
 | |
| +			 * at the crypto later, but only if there are
 | |
| +			 * ops already posted to the hardware; otherwise we
 | |
| +			 * have no guarantee that we'll be re-entered.
 | |
| +			 */
 | |
| +			totlen = cmd->src_mapsize;
 | |
| +			if (cmd->src_m->m_flags & M_PKTHDR) {
 | |
| +				len = MHLEN;
 | |
| +				MGETHDR(m0, M_DONTWAIT, MT_DATA);
 | |
| +				if (m0 && !m_dup_pkthdr(m0, cmd->src_m, M_DONTWAIT)) {
 | |
| +					m_free(m0);
 | |
| +					m0 = NULL;
 | |
| +				}
 | |
| +			} else {
 | |
| +				len = MLEN;
 | |
| +				MGET(m0, M_DONTWAIT, MT_DATA);
 | |
| +			}
 | |
| +			if (m0 == NULL) {
 | |
| +				hifnstats.hst_nomem_mbuf++;
 | |
| +				err = dma->cmdu ? ERESTART : ENOMEM;
 | |
| +				goto err_srcmap;
 | |
| +			}
 | |
| +			if (totlen >= MINCLSIZE) {
 | |
| +				MCLGET(m0, M_DONTWAIT);
 | |
| +				if ((m0->m_flags & M_EXT) == 0) {
 | |
| +					hifnstats.hst_nomem_mcl++;
 | |
| +					err = dma->cmdu ? ERESTART : ENOMEM;
 | |
| +					m_freem(m0);
 | |
| +					goto err_srcmap;
 | |
| +				}
 | |
| +				len = MCLBYTES;
 | |
| +			}
 | |
| +			totlen -= len;
 | |
| +			m0->m_pkthdr.len = m0->m_len = len;
 | |
| +			mlast = m0;
 | |
| +
 | |
| +			while (totlen > 0) {
 | |
| +				MGET(m, M_DONTWAIT, MT_DATA);
 | |
| +				if (m == NULL) {
 | |
| +					hifnstats.hst_nomem_mbuf++;
 | |
| +					err = dma->cmdu ? ERESTART : ENOMEM;
 | |
| +					m_freem(m0);
 | |
| +					goto err_srcmap;
 | |
| +				}
 | |
| +				len = MLEN;
 | |
| +				if (totlen >= MINCLSIZE) {
 | |
| +					MCLGET(m, M_DONTWAIT);
 | |
| +					if ((m->m_flags & M_EXT) == 0) {
 | |
| +						hifnstats.hst_nomem_mcl++;
 | |
| +						err = dma->cmdu ? ERESTART : ENOMEM;
 | |
| +						mlast->m_next = m;
 | |
| +						m_freem(m0);
 | |
| +						goto err_srcmap;
 | |
| +					}
 | |
| +					len = MCLBYTES;
 | |
| +				}
 | |
| +
 | |
| +				m->m_len = len;
 | |
| +				m0->m_pkthdr.len += len;
 | |
| +				totlen -= len;
 | |
| +
 | |
| +				mlast->m_next = m;
 | |
| +				mlast = m;
 | |
| +			}
 | |
| +			cmd->dst_m = m0;
 | |
| +#else
 | |
| +			device_printf(sc->sc_dev,
 | |
| +					"%s,%d: CRYPTO_F_SKBUF unaligned not implemented\n",
 | |
| +					__FILE__, __LINE__);
 | |
| +			err = EINVAL;
 | |
| +			goto err_srcmap;
 | |
| +#endif
 | |
| +		} else {
 | |
| +			device_printf(sc->sc_dev,
 | |
| +					"%s,%d: unaligned contig buffers not implemented\n",
 | |
| +					__FILE__, __LINE__);
 | |
| +			err = EINVAL;
 | |
| +			goto err_srcmap;
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	if (cmd->dst_map == NULL) {
 | |
| +		if (crp->crp_flags & CRYPTO_F_SKBUF) {
 | |
| +			if (pci_map_skb(sc, &cmd->dst, cmd->dst_skb)) {
 | |
| +				hifnstats.hst_nomem_map++;
 | |
| +				err = ENOMEM;
 | |
| +				goto err_dstmap1;
 | |
| +			}
 | |
| +		} else if (crp->crp_flags & CRYPTO_F_IOV) {
 | |
| +			if (pci_map_uio(sc, &cmd->dst, cmd->dst_io)) {
 | |
| +				hifnstats.hst_nomem_load++;
 | |
| +				err = ENOMEM;
 | |
| +				goto err_dstmap1;
 | |
| +			}
 | |
| +		} else {
 | |
| +			if (pci_map_buf(sc, &cmd->dst, cmd->dst_buf, crp->crp_ilen)) {
 | |
| +				hifnstats.hst_nomem_load++;
 | |
| +				err = ENOMEM;
 | |
| +				goto err_dstmap1;
 | |
| +			}
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +#ifdef HIFN_DEBUG
 | |
| +	if (hifn_debug) {
 | |
| +		device_printf(sc->sc_dev,
 | |
| +		    "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
 | |
| +		    READ_REG_1(sc, HIFN_1_DMA_CSR),
 | |
| +		    READ_REG_1(sc, HIFN_1_DMA_IER),
 | |
| +		    dma->cmdu, dma->srcu, dma->dstu, dma->resu,
 | |
| +		    cmd->src_nsegs, cmd->dst_nsegs);
 | |
| +	}
 | |
| +#endif
 | |
| +
 | |
| +#if 0
 | |
| +	if (cmd->src_map == cmd->dst_map) {
 | |
| +		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
 | |
| +		    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
 | |
| +	} else {
 | |
| +		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
 | |
| +		    BUS_DMASYNC_PREWRITE);
 | |
| +		bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
 | |
| +		    BUS_DMASYNC_PREREAD);
 | |
| +	}
 | |
| +#endif
 | |
| +
 | |
| +	/*
 | |
| +	 * need N src, and N dst
 | |
| +	 */
 | |
| +	if ((dma->srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE ||
 | |
| +	    (dma->dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) {
 | |
| +#ifdef HIFN_DEBUG
 | |
| +		if (hifn_debug) {
 | |
| +			device_printf(sc->sc_dev,
 | |
| +				"src/dst exhaustion, srcu %u+%u dstu %u+%u\n",
 | |
| +				dma->srcu, cmd->src_nsegs,
 | |
| +				dma->dstu, cmd->dst_nsegs);
 | |
| +		}
 | |
| +#endif
 | |
| +		hifnstats.hst_nomem_sd++;
 | |
| +		err = ERESTART;
 | |
| +		goto err_dstmap;
 | |
| +	}
 | |
| +
 | |
| +	if (dma->cmdi == HIFN_D_CMD_RSIZE) {
 | |
| +		dma->cmdi = 0;
 | |
| +		dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
 | |
| +		wmb();
 | |
| +		dma->cmdr[HIFN_D_CMD_RSIZE].l |= htole32(HIFN_D_VALID);
 | |
| +		HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
 | |
| +		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
 | |
| +	}
 | |
| +	cmdi = dma->cmdi++;
 | |
| +	cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
 | |
| +	HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
 | |
| +
 | |
| +	/* .p for command/result already set */
 | |
| +	dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_LAST |
 | |
| +	    HIFN_D_MASKDONEIRQ);
 | |
| +	wmb();
 | |
| +	dma->cmdr[cmdi].l |= htole32(HIFN_D_VALID);
 | |
| +	HIFN_CMDR_SYNC(sc, cmdi,
 | |
| +	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
 | |
| +	dma->cmdu++;
 | |
| +
 | |
| +	/*
 | |
| +	 * We don't worry about missing an interrupt (which a "command wait"
 | |
| +	 * interrupt salvages us from), unless there is more than one command
 | |
| +	 * in the queue.
 | |
| +	 */
 | |
| +	if (dma->cmdu > 1) {
 | |
| +		sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
 | |
| +		WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
 | |
| +	}
 | |
| +
 | |
| +	hifnstats.hst_ipackets++;
 | |
| +	hifnstats.hst_ibytes += cmd->src_mapsize;
 | |
| +
 | |
| +	hifn_dmamap_load_src(sc, cmd);
 | |
| +
 | |
| +	/*
 | |
| +	 * Unlike other descriptors, we don't mask done interrupt from
 | |
| +	 * result descriptor.
 | |
| +	 */
 | |
| +#ifdef HIFN_DEBUG
 | |
| +	if (hifn_debug)
 | |
| +		device_printf(sc->sc_dev, "load res\n");
 | |
| +#endif
 | |
| +	if (dma->resi == HIFN_D_RES_RSIZE) {
 | |
| +		dma->resi = 0;
 | |
| +		dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
 | |
| +		wmb();
 | |
| +		dma->resr[HIFN_D_RES_RSIZE].l |= htole32(HIFN_D_VALID);
 | |
| +		HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
 | |
| +		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 | |
| +	}
 | |
| +	resi = dma->resi++;
 | |
| +	KASSERT(dma->hifn_commands[resi] == NULL,
 | |
| +		("hifn_crypto: command slot %u busy", resi));
 | |
| +	dma->hifn_commands[resi] = cmd;
 | |
| +	HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
 | |
| +	if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) {
 | |
| +		dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
 | |
| +		    HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
 | |
| +		wmb();
 | |
| +		dma->resr[resi].l |= htole32(HIFN_D_VALID);
 | |
| +		sc->sc_curbatch++;
 | |
| +		if (sc->sc_curbatch > hifnstats.hst_maxbatch)
 | |
| +			hifnstats.hst_maxbatch = sc->sc_curbatch;
 | |
| +		hifnstats.hst_totbatch++;
 | |
| +	} else {
 | |
| +		dma->resr[resi].l = htole32(HIFN_MAX_RESULT | HIFN_D_LAST);
 | |
| +		wmb();
 | |
| +		dma->resr[resi].l |= htole32(HIFN_D_VALID);
 | |
| +		sc->sc_curbatch = 0;
 | |
| +	}
 | |
| +	HIFN_RESR_SYNC(sc, resi,
 | |
| +	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 | |
| +	dma->resu++;
 | |
| +
 | |
| +	if (cmd->sloplen)
 | |
| +		cmd->slopidx = resi;
 | |
| +
 | |
| +	hifn_dmamap_load_dst(sc, cmd);
 | |
| +
 | |
| +	csr = 0;
 | |
| +	if (sc->sc_c_busy == 0) {
 | |
| +		csr |= HIFN_DMACSR_C_CTRL_ENA;
 | |
| +		sc->sc_c_busy = 1;
 | |
| +	}
 | |
| +	if (sc->sc_s_busy == 0) {
 | |
| +		csr |= HIFN_DMACSR_S_CTRL_ENA;
 | |
| +		sc->sc_s_busy = 1;
 | |
| +	}
 | |
| +	if (sc->sc_r_busy == 0) {
 | |
| +		csr |= HIFN_DMACSR_R_CTRL_ENA;
 | |
| +		sc->sc_r_busy = 1;
 | |
| +	}
 | |
| +	if (sc->sc_d_busy == 0) {
 | |
| +		csr |= HIFN_DMACSR_D_CTRL_ENA;
 | |
| +		sc->sc_d_busy = 1;
 | |
| +	}
 | |
| +	if (csr)
 | |
| +		WRITE_REG_1(sc, HIFN_1_DMA_CSR, csr);
 | |
| +
 | |
| +#ifdef HIFN_DEBUG
 | |
| +	if (hifn_debug) {
 | |
| +		device_printf(sc->sc_dev, "command: stat %8x ier %8x\n",
 | |
| +		    READ_REG_1(sc, HIFN_1_DMA_CSR),
 | |
| +		    READ_REG_1(sc, HIFN_1_DMA_IER));
 | |
| +	}
 | |
| +#endif
 | |
| +
 | |
| +	sc->sc_active = 5;
 | |
| +	HIFN_UNLOCK(sc);
 | |
| +	KASSERT(err == 0, ("hifn_crypto: success with error %u", err));
 | |
| +	return (err);		/* success */
 | |
| +
 | |
| +err_dstmap:
 | |
| +	if (cmd->src_map != cmd->dst_map)
 | |
| +		pci_unmap_buf(sc, &cmd->dst);
 | |
| +err_dstmap1:
 | |
| +err_srcmap:
 | |
| +	if (crp->crp_flags & CRYPTO_F_SKBUF) {
 | |
| +		if (cmd->src_skb != cmd->dst_skb)
 | |
| +#ifdef NOTYET
 | |
| +			m_freem(cmd->dst_m);
 | |
| +#else
 | |
| +			device_printf(sc->sc_dev,
 | |
| +					"%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
 | |
| +					__FILE__, __LINE__);
 | |
| +#endif
 | |
| +	}
 | |
| +	pci_unmap_buf(sc, &cmd->src);
 | |
| +err_srcmap1:
 | |
| +	HIFN_UNLOCK(sc);
 | |
| +	return (err);
 | |
| +}
 | |
| +
 | |
| +static void
 | |
| +hifn_tick(unsigned long arg)
 | |
| +{
 | |
| +	struct hifn_softc *sc;
 | |
| +	unsigned long l_flags;
 | |
| +
 | |
| +	if (arg >= HIFN_MAX_CHIPS)
 | |
| +		return;
 | |
| +	sc = hifn_chip_idx[arg];
 | |
| +	if (!sc)
 | |
| +		return;
 | |
| +
 | |
| +	HIFN_LOCK(sc);
 | |
| +	if (sc->sc_active == 0) {
 | |
| +		struct hifn_dma *dma = sc->sc_dma;
 | |
| +		u_int32_t r = 0;
 | |
| +
 | |
| +		if (dma->cmdu == 0 && sc->sc_c_busy) {
 | |
| +			sc->sc_c_busy = 0;
 | |
| +			r |= HIFN_DMACSR_C_CTRL_DIS;
 | |
| +		}
 | |
| +		if (dma->srcu == 0 && sc->sc_s_busy) {
 | |
| +			sc->sc_s_busy = 0;
 | |
| +			r |= HIFN_DMACSR_S_CTRL_DIS;
 | |
| +		}
 | |
| +		if (dma->dstu == 0 && sc->sc_d_busy) {
 | |
| +			sc->sc_d_busy = 0;
 | |
| +			r |= HIFN_DMACSR_D_CTRL_DIS;
 | |
| +		}
 | |
| +		if (dma->resu == 0 && sc->sc_r_busy) {
 | |
| +			sc->sc_r_busy = 0;
 | |
| +			r |= HIFN_DMACSR_R_CTRL_DIS;
 | |
| +		}
 | |
| +		if (r)
 | |
| +			WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
 | |
| +	} else
 | |
| +		sc->sc_active--;
 | |
| +	HIFN_UNLOCK(sc);
 | |
| +	mod_timer(&sc->sc_tickto, jiffies + HZ);
 | |
| +}
 | |
| +
 | |
| +static irqreturn_t
 | |
| +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
 | |
| +hifn_intr(int irq, void *arg)
 | |
| +#else
 | |
| +hifn_intr(int irq, void *arg, struct pt_regs *regs)
 | |
| +#endif
 | |
| +{
 | |
| +	struct hifn_softc *sc = arg;
 | |
| +	struct hifn_dma *dma;
 | |
| +	u_int32_t dmacsr, restart;
 | |
| +	int i, u;
 | |
| +	unsigned long l_flags;
 | |
| +
 | |
| +	dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
 | |
| +
 | |
| +	/* Nothing in the DMA unit interrupted */
 | |
| +	if ((dmacsr & sc->sc_dmaier) == 0)
 | |
| +		return IRQ_NONE;
 | |
| +
 | |
| +	HIFN_LOCK(sc);
 | |
| +
 | |
| +	dma = sc->sc_dma;
 | |
| +
 | |
| +#ifdef HIFN_DEBUG
 | |
| +	if (hifn_debug) {
 | |
| +		device_printf(sc->sc_dev,
 | |
| +		    "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n",
 | |
| +		    dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier,
 | |
| +		    dma->cmdi, dma->srci, dma->dsti, dma->resi,
 | |
| +		    dma->cmdk, dma->srck, dma->dstk, dma->resk,
 | |
| +		    dma->cmdu, dma->srcu, dma->dstu, dma->resu);
 | |
| +	}
 | |
| +#endif
 | |
| +
 | |
| +	WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
 | |
| +
 | |
| +	if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
 | |
| +	    (dmacsr & HIFN_DMACSR_PUBDONE))
 | |
| +		WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
 | |
| +		    READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
 | |
| +
 | |
| +	restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER);
 | |
| +	if (restart)
 | |
| +		device_printf(sc->sc_dev, "overrun %x\n", dmacsr);
 | |
| +
 | |
| +	if (sc->sc_flags & HIFN_IS_7811) {
 | |
| +		if (dmacsr & HIFN_DMACSR_ILLR)
 | |
| +			device_printf(sc->sc_dev, "illegal read\n");
 | |
| +		if (dmacsr & HIFN_DMACSR_ILLW)
 | |
| +			device_printf(sc->sc_dev, "illegal write\n");
 | |
| +	}
 | |
| +
 | |
| +	restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
 | |
| +	    HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
 | |
| +	if (restart) {
 | |
| +		device_printf(sc->sc_dev, "abort, resetting.\n");
 | |
| +		hifnstats.hst_abort++;
 | |
| +		hifn_abort(sc);
 | |
| +		HIFN_UNLOCK(sc);
 | |
| +		return IRQ_HANDLED;
 | |
| +	}
 | |
| +
 | |
| +	if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) {
 | |
| +		/*
 | |
| +		 * If no slots to process and we receive a "waiting on
 | |
| +		 * command" interrupt, we disable the "waiting on command"
 | |
| +		 * (by clearing it).
 | |
| +		 */
 | |
| +		sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
 | |
| +		WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
 | |
| +	}
 | |
| +
 | |
| +	/* clear the rings */
 | |
| +	i = dma->resk; u = dma->resu;
 | |
| +	while (u != 0) {
 | |
| +		HIFN_RESR_SYNC(sc, i,
 | |
| +		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 | |
| +		if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
 | |
| +			HIFN_RESR_SYNC(sc, i,
 | |
| +			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 | |
| +			break;
 | |
| +		}
 | |
| +
 | |
| +		if (i != HIFN_D_RES_RSIZE) {
 | |
| +			struct hifn_command *cmd;
 | |
| +			u_int8_t *macbuf = NULL;
 | |
| +
 | |
| +			HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
 | |
| +			cmd = dma->hifn_commands[i];
 | |
| +			KASSERT(cmd != NULL,
 | |
| +				("hifn_intr: null command slot %u", i));
 | |
| +			dma->hifn_commands[i] = NULL;
 | |
| +
 | |
| +			if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
 | |
| +				macbuf = dma->result_bufs[i];
 | |
| +				macbuf += 12;
 | |
| +			}
 | |
| +
 | |
| +			hifn_callback(sc, cmd, macbuf);
 | |
| +			hifnstats.hst_opackets++;
 | |
| +			u--;
 | |
| +		}
 | |
| +
 | |
| +		if (++i == (HIFN_D_RES_RSIZE + 1))
 | |
| +			i = 0;
 | |
| +	}
 | |
| +	dma->resk = i; dma->resu = u;
 | |
| +
 | |
| +	i = dma->srck; u = dma->srcu;
 | |
| +	while (u != 0) {
 | |
| +		if (i == HIFN_D_SRC_RSIZE)
 | |
| +			i = 0;
 | |
| +		HIFN_SRCR_SYNC(sc, i,
 | |
| +		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 | |
| +		if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
 | |
| +			HIFN_SRCR_SYNC(sc, i,
 | |
| +			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 | |
| +			break;
 | |
| +		}
 | |
| +		i++, u--;
 | |
| +	}
 | |
| +	dma->srck = i; dma->srcu = u;
 | |
| +
 | |
| +	i = dma->cmdk; u = dma->cmdu;
 | |
| +	while (u != 0) {
 | |
| +		HIFN_CMDR_SYNC(sc, i,
 | |
| +		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 | |
| +		if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
 | |
| +			HIFN_CMDR_SYNC(sc, i,
 | |
| +			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 | |
| +			break;
 | |
| +		}
 | |
| +		if (i != HIFN_D_CMD_RSIZE) {
 | |
| +			u--;
 | |
| +			HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
 | |
| +		}
 | |
| +		if (++i == (HIFN_D_CMD_RSIZE + 1))
 | |
| +			i = 0;
 | |
| +	}
 | |
| +	dma->cmdk = i; dma->cmdu = u;
 | |
| +
 | |
| +	HIFN_UNLOCK(sc);
 | |
| +
 | |
| +	if (sc->sc_needwakeup) {		/* XXX check high watermark */
 | |
| +		int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
 | |
| +#ifdef HIFN_DEBUG
 | |
| +		if (hifn_debug)
 | |
| +			device_printf(sc->sc_dev,
 | |
| +				"wakeup crypto (%x) u %d/%d/%d/%d\n",
 | |
| +				sc->sc_needwakeup,
 | |
| +				dma->cmdu, dma->srcu, dma->dstu, dma->resu);
 | |
| +#endif
 | |
| +		sc->sc_needwakeup &= ~wakeup;
 | |
| +		crypto_unblock(sc->sc_cid, wakeup);
 | |
| +	}
 | |
| +
 | |
| +	return IRQ_HANDLED;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Allocate a new 'session' and return an encoded session id.  'sidp'
 | |
| + * contains our registration id, and should contain an encoded session
 | |
| + * id on successful allocation.
 | |
| + */
 | |
| +static int
 | |
| +hifn_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
 | |
| +{
 | |
| +	struct hifn_softc *sc = device_get_softc(dev);
 | |
| +	struct cryptoini *c;
 | |
| +	int mac = 0, cry = 0, sesn;
 | |
| +	struct hifn_session *ses = NULL;
 | |
| +	unsigned long l_flags;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	KASSERT(sc != NULL, ("hifn_newsession: null softc"));
 | |
| +	if (sidp == NULL || cri == NULL || sc == NULL) {
 | |
| +		DPRINTF("%s,%d: %s - EINVAL\n", __FILE__, __LINE__, __FUNCTION__);
 | |
| +		return (EINVAL);
 | |
| +	}
 | |
| +
 | |
| +	HIFN_LOCK(sc);
 | |
| +	if (sc->sc_sessions == NULL) {
 | |
| +		ses = sc->sc_sessions = (struct hifn_session *)kmalloc(sizeof(*ses),
 | |
| +				SLAB_ATOMIC);
 | |
| +		if (ses == NULL) {
 | |
| +			HIFN_UNLOCK(sc);
 | |
| +			return (ENOMEM);
 | |
| +		}
 | |
| +		sesn = 0;
 | |
| +		sc->sc_nsessions = 1;
 | |
| +	} else {
 | |
| +		for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
 | |
| +			if (!sc->sc_sessions[sesn].hs_used) {
 | |
| +				ses = &sc->sc_sessions[sesn];
 | |
| +				break;
 | |
| +			}
 | |
| +		}
 | |
| +
 | |
| +		if (ses == NULL) {
 | |
| +			sesn = sc->sc_nsessions;
 | |
| +			ses = (struct hifn_session *)kmalloc((sesn + 1) * sizeof(*ses),
 | |
| +					SLAB_ATOMIC);
 | |
| +			if (ses == NULL) {
 | |
| +				HIFN_UNLOCK(sc);
 | |
| +				return (ENOMEM);
 | |
| +			}
 | |
| +			bcopy(sc->sc_sessions, ses, sesn * sizeof(*ses));
 | |
| +			bzero(sc->sc_sessions, sesn * sizeof(*ses));
 | |
| +			kfree(sc->sc_sessions);
 | |
| +			sc->sc_sessions = ses;
 | |
| +			ses = &sc->sc_sessions[sesn];
 | |
| +			sc->sc_nsessions++;
 | |
| +		}
 | |
| +	}
 | |
| +	HIFN_UNLOCK(sc);
 | |
| +
 | |
| +	bzero(ses, sizeof(*ses));
 | |
| +	ses->hs_used = 1;
 | |
| +
 | |
| +	for (c = cri; c != NULL; c = c->cri_next) {
 | |
| +		switch (c->cri_alg) {
 | |
| +		case CRYPTO_MD5:
 | |
| +		case CRYPTO_SHA1:
 | |
| +		case CRYPTO_MD5_HMAC:
 | |
| +		case CRYPTO_SHA1_HMAC:
 | |
| +			if (mac) {
 | |
| +				DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
 | |
| +				return (EINVAL);
 | |
| +			}
 | |
| +			mac = 1;
 | |
| +			ses->hs_mlen = c->cri_mlen;
 | |
| +			if (ses->hs_mlen == 0) {
 | |
| +				switch (c->cri_alg) {
 | |
| +				case CRYPTO_MD5:
 | |
| +				case CRYPTO_MD5_HMAC:
 | |
| +					ses->hs_mlen = 16;
 | |
| +					break;
 | |
| +				case CRYPTO_SHA1:
 | |
| +				case CRYPTO_SHA1_HMAC:
 | |
| +					ses->hs_mlen = 20;
 | |
| +					break;
 | |
| +				}
 | |
| +			}
 | |
| +			break;
 | |
| +		case CRYPTO_DES_CBC:
 | |
| +		case CRYPTO_3DES_CBC:
 | |
| +		case CRYPTO_AES_CBC:
 | |
| +			/* XXX this may read fewer, does it matter? */
 | |
| +			read_random(ses->hs_iv,
 | |
| +				c->cri_alg == CRYPTO_AES_CBC ?
 | |
| +					HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
 | |
| +			/*FALLTHROUGH*/
 | |
| +		case CRYPTO_ARC4:
 | |
| +			if (cry) {
 | |
| +				DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
 | |
| +				return (EINVAL);
 | |
| +			}
 | |
| +			cry = 1;
 | |
| +			break;
 | |
| +		default:
 | |
| +			DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
 | |
| +			return (EINVAL);
 | |
| +		}
 | |
| +	}
 | |
| +	if (mac == 0 && cry == 0) {
 | |
| +		DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
 | |
| +		return (EINVAL);
 | |
| +	}
 | |
| +
 | |
| +	*sidp = HIFN_SID(device_get_unit(sc->sc_dev), sesn);
 | |
| +
 | |
| +	return (0);
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Deallocate a session.
 | |
| + * XXX this routine should run a zero'd mac/encrypt key into context ram.
 | |
| + * XXX to blow away any keys already stored there.
 | |
| + */
 | |
| +static int
 | |
| +hifn_freesession(device_t dev, u_int64_t tid)
 | |
| +{
 | |
| +	struct hifn_softc *sc = device_get_softc(dev);
 | |
| +	int session, error;
 | |
| +	u_int32_t sid = CRYPTO_SESID2LID(tid);
 | |
| +	unsigned long l_flags;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	KASSERT(sc != NULL, ("hifn_freesession: null softc"));
 | |
| +	if (sc == NULL) {
 | |
| +		DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
 | |
| +		return (EINVAL);
 | |
| +	}
 | |
| +
 | |
| +	HIFN_LOCK(sc);
 | |
| +	session = HIFN_SESSION(sid);
 | |
| +	if (session < sc->sc_nsessions) {
 | |
| +		bzero(&sc->sc_sessions[session], sizeof(struct hifn_session));
 | |
| +		error = 0;
 | |
| +	} else {
 | |
| +		DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
 | |
| +		error = EINVAL;
 | |
| +	}
 | |
| +	HIFN_UNLOCK(sc);
 | |
| +
 | |
| +	return (error);
 | |
| +}
 | |
| +
 | |
| +static int
 | |
| +hifn_process(device_t dev, struct cryptop *crp, int hint)
 | |
| +{
 | |
| +	struct hifn_softc *sc = device_get_softc(dev);
 | |
| +	struct hifn_command *cmd = NULL;
 | |
| +	int session, err, ivlen;
 | |
| +	struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	if (crp == NULL || crp->crp_callback == NULL) {
 | |
| +		hifnstats.hst_invalid++;
 | |
| +		DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
 | |
| +		return (EINVAL);
 | |
| +	}
 | |
| +	session = HIFN_SESSION(crp->crp_sid);
 | |
| +
 | |
| +	if (sc == NULL || session >= sc->sc_nsessions) {
 | |
| +		DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
 | |
| +		err = EINVAL;
 | |
| +		goto errout;
 | |
| +	}
 | |
| +
 | |
| +	cmd = kmalloc(sizeof(struct hifn_command), SLAB_ATOMIC);
 | |
| +	if (cmd == NULL) {
 | |
| +		hifnstats.hst_nomem++;
 | |
| +		err = ENOMEM;
 | |
| +		goto errout;
 | |
| +	}
 | |
| +	memset(cmd, 0, sizeof(*cmd));
 | |
| +
 | |
| +	if (crp->crp_flags & CRYPTO_F_SKBUF) {
 | |
| +		cmd->src_skb = (struct sk_buff *)crp->crp_buf;
 | |
| +		cmd->dst_skb = (struct sk_buff *)crp->crp_buf;
 | |
| +	} else if (crp->crp_flags & CRYPTO_F_IOV) {
 | |
| +		cmd->src_io = (struct uio *)crp->crp_buf;
 | |
| +		cmd->dst_io = (struct uio *)crp->crp_buf;
 | |
| +	} else {
 | |
| +		cmd->src_buf = crp->crp_buf;
 | |
| +		cmd->dst_buf = crp->crp_buf;
 | |
| +	}
 | |
| +
 | |
| +	crd1 = crp->crp_desc;
 | |
| +	if (crd1 == NULL) {
 | |
| +		DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
 | |
| +		err = EINVAL;
 | |
| +		goto errout;
 | |
| +	}
 | |
| +	crd2 = crd1->crd_next;
 | |
| +
 | |
| +	if (crd2 == NULL) {
 | |
| +		if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
 | |
| +		    crd1->crd_alg == CRYPTO_SHA1_HMAC ||
 | |
| +		    crd1->crd_alg == CRYPTO_SHA1 ||
 | |
| +		    crd1->crd_alg == CRYPTO_MD5) {
 | |
| +			maccrd = crd1;
 | |
| +			enccrd = NULL;
 | |
| +		} else if (crd1->crd_alg == CRYPTO_DES_CBC ||
 | |
| +		    crd1->crd_alg == CRYPTO_3DES_CBC ||
 | |
| +		    crd1->crd_alg == CRYPTO_AES_CBC ||
 | |
| +		    crd1->crd_alg == CRYPTO_ARC4) {
 | |
| +			if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
 | |
| +				cmd->base_masks |= HIFN_BASE_CMD_DECODE;
 | |
| +			maccrd = NULL;
 | |
| +			enccrd = crd1;
 | |
| +		} else {
 | |
| +			DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
 | |
| +			err = EINVAL;
 | |
| +			goto errout;
 | |
| +		}
 | |
| +	} else {
 | |
| +		if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
 | |
| +                     crd1->crd_alg == CRYPTO_SHA1_HMAC ||
 | |
| +                     crd1->crd_alg == CRYPTO_MD5 ||
 | |
| +                     crd1->crd_alg == CRYPTO_SHA1) &&
 | |
| +		    (crd2->crd_alg == CRYPTO_DES_CBC ||
 | |
| +		     crd2->crd_alg == CRYPTO_3DES_CBC ||
 | |
| +		     crd2->crd_alg == CRYPTO_AES_CBC ||
 | |
| +		     crd2->crd_alg == CRYPTO_ARC4) &&
 | |
| +		    ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
 | |
| +			cmd->base_masks = HIFN_BASE_CMD_DECODE;
 | |
| +			maccrd = crd1;
 | |
| +			enccrd = crd2;
 | |
| +		} else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
 | |
| +		     crd1->crd_alg == CRYPTO_ARC4 ||
 | |
| +		     crd1->crd_alg == CRYPTO_3DES_CBC ||
 | |
| +		     crd1->crd_alg == CRYPTO_AES_CBC) &&
 | |
| +		    (crd2->crd_alg == CRYPTO_MD5_HMAC ||
 | |
| +                     crd2->crd_alg == CRYPTO_SHA1_HMAC ||
 | |
| +                     crd2->crd_alg == CRYPTO_MD5 ||
 | |
| +                     crd2->crd_alg == CRYPTO_SHA1) &&
 | |
| +		    (crd1->crd_flags & CRD_F_ENCRYPT)) {
 | |
| +			enccrd = crd1;
 | |
| +			maccrd = crd2;
 | |
| +		} else {
 | |
| +			/*
 | |
| +			 * We cannot order the 7751 as requested
 | |
| +			 */
 | |
| +			DPRINTF("%s,%d: %s %d,%d,%d - EINVAL\n",__FILE__,__LINE__,__FUNCTION__, crd1->crd_alg, crd2->crd_alg, crd1->crd_flags & CRD_F_ENCRYPT);
 | |
| +			err = EINVAL;
 | |
| +			goto errout;
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	if (enccrd) {
 | |
| +		cmd->enccrd = enccrd;
 | |
| +		cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
 | |
| +		switch (enccrd->crd_alg) {
 | |
| +		case CRYPTO_ARC4:
 | |
| +			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
 | |
| +			break;
 | |
| +		case CRYPTO_DES_CBC:
 | |
| +			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
 | |
| +			    HIFN_CRYPT_CMD_MODE_CBC |
 | |
| +			    HIFN_CRYPT_CMD_NEW_IV;
 | |
| +			break;
 | |
| +		case CRYPTO_3DES_CBC:
 | |
| +			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
 | |
| +			    HIFN_CRYPT_CMD_MODE_CBC |
 | |
| +			    HIFN_CRYPT_CMD_NEW_IV;
 | |
| +			break;
 | |
| +		case CRYPTO_AES_CBC:
 | |
| +			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
 | |
| +			    HIFN_CRYPT_CMD_MODE_CBC |
 | |
| +			    HIFN_CRYPT_CMD_NEW_IV;
 | |
| +			break;
 | |
| +		default:
 | |
| +			DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
 | |
| +			err = EINVAL;
 | |
| +			goto errout;
 | |
| +		}
 | |
| +		if (enccrd->crd_alg != CRYPTO_ARC4) {
 | |
| +			ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
 | |
| +				HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
 | |
| +			if (enccrd->crd_flags & CRD_F_ENCRYPT) {
 | |
| +				if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
 | |
| +					bcopy(enccrd->crd_iv, cmd->iv, ivlen);
 | |
| +				else
 | |
| +					bcopy(sc->sc_sessions[session].hs_iv,
 | |
| +					    cmd->iv, ivlen);
 | |
| +
 | |
| +				if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
 | |
| +				    == 0) {
 | |
| +					crypto_copyback(crp->crp_flags,
 | |
| +					    crp->crp_buf, enccrd->crd_inject,
 | |
| +					    ivlen, cmd->iv);
 | |
| +				}
 | |
| +			} else {
 | |
| +				if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
 | |
| +					bcopy(enccrd->crd_iv, cmd->iv, ivlen);
 | |
| +				else {
 | |
| +					crypto_copydata(crp->crp_flags,
 | |
| +					    crp->crp_buf, enccrd->crd_inject,
 | |
| +					    ivlen, cmd->iv);
 | |
| +				}
 | |
| +			}
 | |
| +		}
 | |
| +
 | |
| +		if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
 | |
| +			cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
 | |
| +		cmd->ck = enccrd->crd_key;
 | |
| +		cmd->cklen = enccrd->crd_klen >> 3;
 | |
| +		cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
 | |
| +
 | |
| +		/*
 | |
| +		 * Need to specify the size for the AES key in the masks.
 | |
| +		 */
 | |
| +		if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
 | |
| +		    HIFN_CRYPT_CMD_ALG_AES) {
 | |
| +			switch (cmd->cklen) {
 | |
| +			case 16:
 | |
| +				cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
 | |
| +				break;
 | |
| +			case 24:
 | |
| +				cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
 | |
| +				break;
 | |
| +			case 32:
 | |
| +				cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
 | |
| +				break;
 | |
| +			default:
 | |
| +				DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
 | |
| +				err = EINVAL;
 | |
| +				goto errout;
 | |
| +			}
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	if (maccrd) {
 | |
| +		cmd->maccrd = maccrd;
 | |
| +		cmd->base_masks |= HIFN_BASE_CMD_MAC;
 | |
| +
 | |
| +		switch (maccrd->crd_alg) {
 | |
| +		case CRYPTO_MD5:
 | |
| +			cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
 | |
| +			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
 | |
| +			    HIFN_MAC_CMD_POS_IPSEC;
 | |
| +                       break;
 | |
| +		case CRYPTO_MD5_HMAC:
 | |
| +			cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
 | |
| +			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
 | |
| +			    HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
 | |
| +			break;
 | |
| +		case CRYPTO_SHA1:
 | |
| +			cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
 | |
| +			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
 | |
| +			    HIFN_MAC_CMD_POS_IPSEC;
 | |
| +			break;
 | |
| +		case CRYPTO_SHA1_HMAC:
 | |
| +			cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
 | |
| +			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
 | |
| +			    HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
 | |
| +			break;
 | |
| +		}
 | |
| +
 | |
| +		if (maccrd->crd_alg == CRYPTO_SHA1_HMAC ||
 | |
| +		     maccrd->crd_alg == CRYPTO_MD5_HMAC) {
 | |
| +			cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
 | |
| +			bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3);
 | |
| +			bzero(cmd->mac + (maccrd->crd_klen >> 3),
 | |
| +			    HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	cmd->crp = crp;
 | |
| +	cmd->session_num = session;
 | |
| +	cmd->softc = sc;
 | |
| +
 | |
| +	err = hifn_crypto(sc, cmd, crp, hint);
 | |
| +	if (!err) {
 | |
| +		return 0;
 | |
| +	} else if (err == ERESTART) {
 | |
| +		/*
 | |
| +		 * There weren't enough resources to dispatch the request
 | |
| +		 * to the part.  Notify the caller so they'll requeue this
 | |
| +		 * request and resubmit it again soon.
 | |
| +		 */
 | |
| +#ifdef HIFN_DEBUG
 | |
| +		if (hifn_debug)
 | |
| +			device_printf(sc->sc_dev, "requeue request\n");
 | |
| +#endif
 | |
| +		kfree(cmd);
 | |
| +		sc->sc_needwakeup |= CRYPTO_SYMQ;
 | |
| +		return (err);
 | |
| +	}
 | |
| +
 | |
| +errout:
 | |
| +	if (cmd != NULL)
 | |
| +		kfree(cmd);
 | |
| +	if (err == EINVAL)
 | |
| +		hifnstats.hst_invalid++;
 | |
| +	else
 | |
| +		hifnstats.hst_nomem++;
 | |
| +	crp->crp_etype = err;
 | |
| +	crypto_done(crp);
 | |
| +	return (err);
 | |
| +}
 | |
| +
 | |
| +static void
 | |
| +hifn_abort(struct hifn_softc *sc)
 | |
| +{
 | |
| +	struct hifn_dma *dma = sc->sc_dma;
 | |
| +	struct hifn_command *cmd;
 | |
| +	struct cryptop *crp;
 | |
| +	int i, u;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	i = dma->resk; u = dma->resu;
 | |
| +	while (u != 0) {
 | |
| +		cmd = dma->hifn_commands[i];
 | |
| +		KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i));
 | |
| +		dma->hifn_commands[i] = NULL;
 | |
| +		crp = cmd->crp;
 | |
| +
 | |
| +		if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
 | |
| +			/* Salvage what we can. */
 | |
| +			u_int8_t *macbuf;
 | |
| +
 | |
| +			if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
 | |
| +				macbuf = dma->result_bufs[i];
 | |
| +				macbuf += 12;
 | |
| +			} else
 | |
| +				macbuf = NULL;
 | |
| +			hifnstats.hst_opackets++;
 | |
| +			hifn_callback(sc, cmd, macbuf);
 | |
| +		} else {
 | |
| +#if 0
 | |
| +			if (cmd->src_map == cmd->dst_map) {
 | |
| +				bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
 | |
| +				    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
 | |
| +			} else {
 | |
| +				bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
 | |
| +				    BUS_DMASYNC_POSTWRITE);
 | |
| +				bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
 | |
| +				    BUS_DMASYNC_POSTREAD);
 | |
| +			}
 | |
| +#endif
 | |
| +
 | |
| +			if (cmd->src_skb != cmd->dst_skb) {
 | |
| +#ifdef NOTYET
 | |
| +				m_freem(cmd->src_m);
 | |
| +				crp->crp_buf = (caddr_t)cmd->dst_m;
 | |
| +#else
 | |
| +				device_printf(sc->sc_dev,
 | |
| +						"%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
 | |
| +						__FILE__, __LINE__);
 | |
| +#endif
 | |
| +			}
 | |
| +
 | |
| +			/* non-shared buffers cannot be restarted */
 | |
| +			if (cmd->src_map != cmd->dst_map) {
 | |
| +				/*
 | |
| +				 * XXX should be EAGAIN, delayed until
 | |
| +				 * after the reset.
 | |
| +				 */
 | |
| +				crp->crp_etype = ENOMEM;
 | |
| +				pci_unmap_buf(sc, &cmd->dst);
 | |
| +			} else
 | |
| +				crp->crp_etype = ENOMEM;
 | |
| +
 | |
| +			pci_unmap_buf(sc, &cmd->src);
 | |
| +
 | |
| +			kfree(cmd);
 | |
| +			if (crp->crp_etype != EAGAIN)
 | |
| +				crypto_done(crp);
 | |
| +		}
 | |
| +
 | |
| +		if (++i == HIFN_D_RES_RSIZE)
 | |
| +			i = 0;
 | |
| +		u--;
 | |
| +	}
 | |
| +	dma->resk = i; dma->resu = u;
 | |
| +
 | |
| +	hifn_reset_board(sc, 1);
 | |
| +	hifn_init_dma(sc);
 | |
| +	hifn_init_pci_registers(sc);
 | |
| +}
 | |
| +
 | |
| +static void
 | |
| +hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf)
 | |
| +{
 | |
| +	struct hifn_dma *dma = sc->sc_dma;
 | |
| +	struct cryptop *crp = cmd->crp;
 | |
| +	struct cryptodesc *crd;
 | |
| +	int i, u, ivlen;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +#if 0
 | |
| +	if (cmd->src_map == cmd->dst_map) {
 | |
| +		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
 | |
| +		    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
 | |
| +	} else {
 | |
| +		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
 | |
| +		    BUS_DMASYNC_POSTWRITE);
 | |
| +		bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
 | |
| +		    BUS_DMASYNC_POSTREAD);
 | |
| +	}
 | |
| +#endif
 | |
| +
 | |
| +	if (crp->crp_flags & CRYPTO_F_SKBUF) {
 | |
| +		if (cmd->src_skb != cmd->dst_skb) {
 | |
| +#ifdef NOTYET
 | |
| +			crp->crp_buf = (caddr_t)cmd->dst_m;
 | |
| +			totlen = cmd->src_mapsize;
 | |
| +			for (m = cmd->dst_m; m != NULL; m = m->m_next) {
 | |
| +				if (totlen < m->m_len) {
 | |
| +					m->m_len = totlen;
 | |
| +					totlen = 0;
 | |
| +				} else
 | |
| +					totlen -= m->m_len;
 | |
| +			}
 | |
| +			cmd->dst_m->m_pkthdr.len = cmd->src_m->m_pkthdr.len;
 | |
| +			m_freem(cmd->src_m);
 | |
| +#else
 | |
| +			device_printf(sc->sc_dev,
 | |
| +					"%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
 | |
| +					__FILE__, __LINE__);
 | |
| +#endif
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	if (cmd->sloplen != 0) {
 | |
| +		crypto_copyback(crp->crp_flags, crp->crp_buf,
 | |
| +		    cmd->src_mapsize - cmd->sloplen, cmd->sloplen,
 | |
| +		    (caddr_t)&dma->slop[cmd->slopidx]);
 | |
| +	}
 | |
| +
 | |
| +	i = dma->dstk; u = dma->dstu;
 | |
| +	while (u != 0) {
 | |
| +		if (i == HIFN_D_DST_RSIZE)
 | |
| +			i = 0;
 | |
| +#if 0
 | |
| +		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
 | |
| +		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 | |
| +#endif
 | |
| +		if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
 | |
| +#if 0
 | |
| +			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
 | |
| +			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 | |
| +#endif
 | |
| +			break;
 | |
| +		}
 | |
| +		i++, u--;
 | |
| +	}
 | |
| +	dma->dstk = i; dma->dstu = u;
 | |
| +
 | |
| +	hifnstats.hst_obytes += cmd->dst_mapsize;
 | |
| +
 | |
| +	if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
 | |
| +	    HIFN_BASE_CMD_CRYPT) {
 | |
| +		for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
 | |
| +			if (crd->crd_alg != CRYPTO_DES_CBC &&
 | |
| +			    crd->crd_alg != CRYPTO_3DES_CBC &&
 | |
| +			    crd->crd_alg != CRYPTO_AES_CBC)
 | |
| +				continue;
 | |
| +			ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ?
 | |
| +				HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
 | |
| +			crypto_copydata(crp->crp_flags, crp->crp_buf,
 | |
| +			    crd->crd_skip + crd->crd_len - ivlen, ivlen,
 | |
| +			    cmd->softc->sc_sessions[cmd->session_num].hs_iv);
 | |
| +			break;
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	if (macbuf != NULL) {
 | |
| +		for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
 | |
| +                        int len;
 | |
| +
 | |
| +			if (crd->crd_alg != CRYPTO_MD5 &&
 | |
| +			    crd->crd_alg != CRYPTO_SHA1 &&
 | |
| +			    crd->crd_alg != CRYPTO_MD5_HMAC &&
 | |
| +			    crd->crd_alg != CRYPTO_SHA1_HMAC) {
 | |
| +				continue;
 | |
| +			}
 | |
| +			len = cmd->softc->sc_sessions[cmd->session_num].hs_mlen;
 | |
| +			crypto_copyback(crp->crp_flags, crp->crp_buf,
 | |
| +			    crd->crd_inject, len, macbuf);
 | |
| +			break;
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	if (cmd->src_map != cmd->dst_map)
 | |
| +		pci_unmap_buf(sc, &cmd->dst);
 | |
| +	pci_unmap_buf(sc, &cmd->src);
 | |
| +	kfree(cmd);
 | |
| +	crypto_done(crp);
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
 | |
| + * and Group 1 registers; avoid conditions that could create
 | |
| + * burst writes by doing a read in between the writes.
 | |
| + *
 | |
| + * NB: The read we interpose is always to the same register;
 | |
| + *     we do this because reading from an arbitrary (e.g. last)
 | |
| + *     register may not always work.
 | |
| + */
 | |
| +static void
 | |
| +hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
 | |
| +{
 | |
| +	if (sc->sc_flags & HIFN_IS_7811) {
 | |
| +		if (sc->sc_bar0_lastreg == reg - 4)
 | |
| +			readl(sc->sc_bar0 + HIFN_0_PUCNFG);
 | |
| +		sc->sc_bar0_lastreg = reg;
 | |
| +	}
 | |
| +	writel(val, sc->sc_bar0 + reg);
 | |
| +}
 | |
| +
 | |
| +static void
 | |
| +hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
 | |
| +{
 | |
| +	if (sc->sc_flags & HIFN_IS_7811) {
 | |
| +		if (sc->sc_bar1_lastreg == reg - 4)
 | |
| +			readl(sc->sc_bar1 + HIFN_1_REVID);
 | |
| +		sc->sc_bar1_lastreg = reg;
 | |
| +	}
 | |
| +	writel(val, sc->sc_bar1 + reg);
 | |
| +}
 | |
| +
 | |
| +
 | |
| +static struct pci_device_id hifn_pci_tbl[] = {
 | |
| +	{ PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951,
 | |
| +	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
 | |
| +	{ PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7955,
 | |
| +	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
 | |
| +	{ PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7956,
 | |
| +	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
 | |
| +	{ PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751,
 | |
| +	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
 | |
| +	{ PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON,
 | |
| +	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
 | |
| +	{ PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811,
 | |
| +	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
 | |
| +	/*
 | |
| +	 * Other vendors share this PCI ID as well, such as
 | |
| +	 * http://www.powercrypt.com, and obviously they also
 | |
| +	 * use the same key.
 | |
| +	 */
 | |
| +	{ PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751,
 | |
| +	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
 | |
| +	{ 0, 0, 0, 0, 0, 0, }
 | |
| +};
 | |
| +MODULE_DEVICE_TABLE(pci, hifn_pci_tbl);
 | |
| +
 | |
| +static struct pci_driver hifn_driver = {
 | |
| +	.name         = "hifn",
 | |
| +	.id_table     = hifn_pci_tbl,
 | |
| +	.probe        =	hifn_probe,
 | |
| +	.remove       = hifn_remove,
 | |
| +	/* add PM stuff here one day */
 | |
| +};
 | |
| +
 | |
| +static int __init hifn_init (void)
 | |
| +{
 | |
| +	struct hifn_softc *sc = NULL;
 | |
| +	int rc;
 | |
| +
 | |
| +	DPRINTF("%s(%p)\n", __FUNCTION__, hifn_init);
 | |
| +
 | |
| +	rc = pci_register_driver(&hifn_driver);
 | |
| +	pci_register_driver_compat(&hifn_driver, rc);
 | |
| +
 | |
| +	return rc;
 | |
| +}
 | |
| +
 | |
| +static void __exit hifn_exit (void)
 | |
| +{
 | |
| +	pci_unregister_driver(&hifn_driver);
 | |
| +}
 | |
| +
 | |
| +module_init(hifn_init);
 | |
| +module_exit(hifn_exit);
 | |
| +
 | |
| +MODULE_LICENSE("BSD");
 | |
| +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
 | |
| +MODULE_DESCRIPTION("OCF driver for hifn PCI crypto devices");
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/hifn/hifnHIPP.c
 | |
| @@ -0,0 +1,420 @@
 | |
| +/*-
 | |
| + * Driver for Hifn HIPP-I/II chipset
 | |
| + * Copyright (c) 2006 Michael Richardson <mcr@xelerance.com>
 | |
| + *
 | |
| + * Redistribution and use in source and binary forms, with or without
 | |
| + * modification, are permitted provided that the following conditions
 | |
| + * are met:
 | |
| + *
 | |
| + * 1. Redistributions of source code must retain the above copyright
 | |
| + *   notice, this list of conditions and the following disclaimer.
 | |
| + * 2. Redistributions in binary form must reproduce the above copyright
 | |
| + *   notice, this list of conditions and the following disclaimer in the
 | |
| + *   documentation and/or other materials provided with the distribution.
 | |
| + * 3. The name of the author may not be used to endorse or promote products
 | |
| + *   derived from this software without specific prior written permission.
 | |
| + *
 | |
| + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
 | |
| + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
 | |
| + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
 | |
| + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
 | |
| + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 | |
| + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 | |
| + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 | |
| + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 | |
| + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 | |
| + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 | |
| + *
 | |
| + * Effort sponsored by Hifn Inc.
 | |
| + *
 | |
| + */
 | |
| +
 | |
| +/*
 | |
| + * Driver for various Hifn encryption processors.
 | |
| + */
 | |
| +#ifndef AUTOCONF_INCLUDED
 | |
| +#include <linux/config.h>
 | |
| +#endif
 | |
| +#include <linux/module.h>
 | |
| +#include <linux/init.h>
 | |
| +#include <linux/list.h>
 | |
| +#include <linux/slab.h>
 | |
| +#include <linux/wait.h>
 | |
| +#include <linux/sched.h>
 | |
| +#include <linux/pci.h>
 | |
| +#include <linux/delay.h>
 | |
| +#include <linux/interrupt.h>
 | |
| +#include <linux/spinlock.h>
 | |
| +#include <linux/random.h>
 | |
| +#include <linux/version.h>
 | |
| +#include <linux/skbuff.h>
 | |
| +#include <linux/uio.h>
 | |
| +#include <linux/sysfs.h>
 | |
| +#include <linux/miscdevice.h>
 | |
| +#include <asm/io.h>
 | |
| +
 | |
| +#include <cryptodev.h>
 | |
| +
 | |
| +#include "hifnHIPPreg.h"
 | |
| +#include "hifnHIPPvar.h"
 | |
| +
 | |
| +#if 1
 | |
| +#define	DPRINTF(a...)	if (hipp_debug) { \
 | |
| +							printk("%s: ", sc ? \
 | |
| +								device_get_nameunit(sc->sc_dev) : "hifn"); \
 | |
| +							printk(a); \
 | |
| +						} else
 | |
| +#else
 | |
| +#define	DPRINTF(a...)
 | |
| +#endif
 | |
| +
 | |
| +typedef int bus_size_t;
 | |
| +
 | |
| +static inline int
 | |
| +pci_get_revid(struct pci_dev *dev)
 | |
| +{
 | |
| +	u8 rid = 0;
 | |
| +	pci_read_config_byte(dev, PCI_REVISION_ID, &rid);
 | |
| +	return rid;
 | |
| +}
 | |
| +
 | |
| +#define debug hipp_debug
 | |
| +int hipp_debug = 0;
 | |
| +module_param(hipp_debug, int, 0644);
 | |
| +MODULE_PARM_DESC(hipp_debug, "Enable debug");
 | |
| +
 | |
| +int hipp_maxbatch = 1;
 | |
| +module_param(hipp_maxbatch, int, 0644);
 | |
| +MODULE_PARM_DESC(hipp_maxbatch, "max ops to batch w/o interrupt");
 | |
| +
 | |
| +static	int  hipp_probe(struct pci_dev *dev, const struct pci_device_id *ent);
 | |
| +static	void hipp_remove(struct pci_dev *dev);
 | |
| +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
 | |
| +static irqreturn_t hipp_intr(int irq, void *arg);
 | |
| +#else
 | |
| +static irqreturn_t hipp_intr(int irq, void *arg, struct pt_regs *regs);
 | |
| +#endif
 | |
| +
 | |
| +static int hipp_num_chips = 0;
 | |
| +static struct hipp_softc *hipp_chip_idx[HIPP_MAX_CHIPS];
 | |
| +
 | |
| +static	int hipp_newsession(device_t, u_int32_t *, struct cryptoini *);
 | |
| +static	int hipp_freesession(device_t, u_int64_t);
 | |
| +static	int hipp_process(device_t, struct cryptop *, int);
 | |
| +
 | |
| +static device_method_t hipp_methods = {
 | |
| +	/* crypto device methods */
 | |
| +	DEVMETHOD(cryptodev_newsession,	hipp_newsession),
 | |
| +	DEVMETHOD(cryptodev_freesession,hipp_freesession),
 | |
| +	DEVMETHOD(cryptodev_process,	hipp_process),
 | |
| +};
 | |
| +
 | |
| +static __inline u_int32_t
 | |
| +READ_REG(struct hipp_softc *sc, unsigned int barno, bus_size_t reg)
 | |
| +{
 | |
| +	u_int32_t v = readl(sc->sc_bar[barno] + reg);
 | |
| +	//sc->sc_bar0_lastreg = (bus_size_t) -1;
 | |
| +	return (v);
 | |
| +}
 | |
| +static __inline void
 | |
| +WRITE_REG(struct hipp_softc *sc, unsigned int barno, bus_size_t reg, u_int32_t val)
 | |
| +{
 | |
| +	writel(val, sc->sc_bar[barno] + reg);
 | |
| +}
 | |
| +
 | |
| +#define READ_REG_0(sc, reg)         READ_REG(sc, 0, reg)
 | |
| +#define WRITE_REG_0(sc, reg, val)   WRITE_REG(sc,0, reg, val)
 | |
| +#define READ_REG_1(sc, reg)         READ_REG(sc, 1, reg)
 | |
| +#define WRITE_REG_1(sc, reg, val)   WRITE_REG(sc,1, reg, val)
 | |
| +
 | |
| +static int
 | |
| +hipp_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
 | |
| +{
 | |
| +	return EINVAL;
 | |
| +}
 | |
| +
 | |
| +static int
 | |
| +hipp_freesession(device_t dev, u_int64_t tid)
 | |
| +{
 | |
| +	return EINVAL;
 | |
| +}
 | |
| +
 | |
| +static int
 | |
| +hipp_process(device_t dev, struct cryptop *crp, int hint)
 | |
| +{
 | |
| +	return EINVAL;
 | |
| +}
 | |
| +
 | |
| +static const char*
 | |
| +hipp_partname(struct hipp_softc *sc, char buf[128], size_t blen)
 | |
| +{
 | |
| +	char *n = NULL;
 | |
| +
 | |
| +	switch (pci_get_vendor(sc->sc_pcidev)) {
 | |
| +	case PCI_VENDOR_HIFN:
 | |
| +		switch (pci_get_device(sc->sc_pcidev)) {
 | |
| +		case PCI_PRODUCT_HIFN_7855:	n = "Hifn 7855";
 | |
| +		case PCI_PRODUCT_HIFN_8155:	n = "Hifn 8155";
 | |
| +		case PCI_PRODUCT_HIFN_6500:	n = "Hifn 6500";
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	if(n==NULL) {
 | |
| +		snprintf(buf, blen, "VID=%02x,PID=%02x",
 | |
| +			 pci_get_vendor(sc->sc_pcidev),
 | |
| +			 pci_get_device(sc->sc_pcidev));
 | |
| +	} else {
 | |
| +		buf[0]='\0';
 | |
| +		strncat(buf, n, blen);
 | |
| +	}
 | |
| +	return buf;
 | |
| +}
 | |
| +
 | |
| +struct hipp_fs_entry {
 | |
| +	struct attribute attr;
 | |
| +	/* other stuff */
 | |
| +};
 | |
| +
 | |
| +
 | |
| +static ssize_t
 | |
| +cryptoid_show(struct device *dev,
 | |
| +	      struct device_attribute *attr,
 | |
| +	      char *buf)
 | |
| +{
 | |
| +	struct hipp_softc *sc;
 | |
| +
 | |
| +	sc = pci_get_drvdata(to_pci_dev (dev));
 | |
| +	return sprintf (buf, "%d\n", sc->sc_cid);
 | |
| +}
 | |
| +
 | |
| +struct device_attribute hipp_dev_cryptoid = __ATTR_RO(cryptoid);
 | |
| +
 | |
| +/*
 | |
| + * Attach an interface that successfully probed.
 | |
| + */
 | |
| +static int
 | |
| +hipp_probe(struct pci_dev *dev, const struct pci_device_id *ent)
 | |
| +{
 | |
| +	struct hipp_softc *sc = NULL;
 | |
| +	int i;
 | |
| +	//char rbase;
 | |
| +	//u_int16_t ena;
 | |
| +	int rev;
 | |
| +	//int rseg;
 | |
| +	int rc;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	if (pci_enable_device(dev) < 0)
 | |
| +		return(-ENODEV);
 | |
| +
 | |
| +	if (pci_set_mwi(dev))
 | |
| +		return(-ENODEV);
 | |
| +
 | |
| +	if (!dev->irq) {
 | |
| +		printk("hifn: found device with no IRQ assigned. check BIOS settings!");
 | |
| +		pci_disable_device(dev);
 | |
| +		return(-ENODEV);
 | |
| +	}
 | |
| +
 | |
| +	sc = (struct hipp_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
 | |
| +	if (!sc)
 | |
| +		return(-ENOMEM);
 | |
| +	memset(sc, 0, sizeof(*sc));
 | |
| +
 | |
| +	softc_device_init(sc, "hifn-hipp", hipp_num_chips, hipp_methods);
 | |
| +
 | |
| +	sc->sc_pcidev = dev;
 | |
| +	sc->sc_irq = -1;
 | |
| +	sc->sc_cid = -1;
 | |
| +	sc->sc_num = hipp_num_chips++;
 | |
| +
 | |
| +	if (sc->sc_num < HIPP_MAX_CHIPS)
 | |
| +		hipp_chip_idx[sc->sc_num] = sc;
 | |
| +
 | |
| +	pci_set_drvdata(sc->sc_pcidev, sc);
 | |
| +
 | |
| +	spin_lock_init(&sc->sc_mtx);
 | |
| +
 | |
| +	/*
 | |
| +	 * Setup PCI resources.
 | |
| +	 * The READ_REG_0, WRITE_REG_0, READ_REG_1,
 | |
| +	 * and WRITE_REG_1 macros throughout the driver are used
 | |
| +	 * to permit better debugging.
 | |
| +	 */
 | |
| +	for(i=0; i<4; i++) {
 | |
| +		unsigned long mem_start, mem_len;
 | |
| +		mem_start = pci_resource_start(sc->sc_pcidev, i);
 | |
| +		mem_len   = pci_resource_len(sc->sc_pcidev, i);
 | |
| +		sc->sc_barphy[i] = (caddr_t)mem_start;
 | |
| +		sc->sc_bar[i] = (ocf_iomem_t) ioremap(mem_start, mem_len);
 | |
| +		if (!sc->sc_bar[i]) {
 | |
| +			device_printf(sc->sc_dev, "cannot map bar%d register space\n", i);
 | |
| +			goto fail;
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	//hipp_reset_board(sc, 0);
 | |
| +	pci_set_master(sc->sc_pcidev);
 | |
| +
 | |
| +	/*
 | |
| +	 * Arrange the interrupt line.
 | |
| +	 */
 | |
| +	rc = request_irq(dev->irq, hipp_intr, IRQF_SHARED, "hifn", sc);
 | |
| +	if (rc) {
 | |
| +		device_printf(sc->sc_dev, "could not map interrupt: %d\n", rc);
 | |
| +		goto fail;
 | |
| +	}
 | |
| +	sc->sc_irq = dev->irq;
 | |
| +
 | |
| +	rev = READ_REG_1(sc, HIPP_1_REVID) & 0xffff;
 | |
| +
 | |
| +	{
 | |
| +		char b[32];
 | |
| +		device_printf(sc->sc_dev, "%s, rev %u",
 | |
| +			      hipp_partname(sc, b, sizeof(b)), rev);
 | |
| +	}
 | |
| +
 | |
| +#if 0
 | |
| +	if (sc->sc_flags & HIFN_IS_7956)
 | |
| +		printf(", pll=0x%x<%s clk, %ux mult>",
 | |
| +			sc->sc_pllconfig,
 | |
| +			sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci",
 | |
| +			2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11));
 | |
| +#endif
 | |
| +	printf("\n");
 | |
| +
 | |
| +	sc->sc_cid = crypto_get_driverid(softc_get_device(sc),CRYPTOCAP_F_HARDWARE);
 | |
| +	if (sc->sc_cid < 0) {
 | |
| +		device_printf(sc->sc_dev, "could not get crypto driver id\n");
 | |
| +		goto fail;
 | |
| +	}
 | |
| +
 | |
| +#if 0 /* cannot work with a non-GPL module */
 | |
| +	/* make a sysfs entry to let the world know what entry we got */
 | |
| +	sysfs_create_file(&sc->sc_pcidev->dev.kobj, &hipp_dev_cryptoid.attr);
 | |
| +#endif
 | |
| +
 | |
| +#if 0
 | |
| +	init_timer(&sc->sc_tickto);
 | |
| +	sc->sc_tickto.function = hifn_tick;
 | |
| +	sc->sc_tickto.data = (unsigned long) sc->sc_num;
 | |
| +	mod_timer(&sc->sc_tickto, jiffies + HZ);
 | |
| +#endif
 | |
| +
 | |
| +#if 0 /* no code here yet ?? */
 | |
| +	crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
 | |
| +#endif
 | |
| +
 | |
| +	return (0);
 | |
| +
 | |
| +fail:
 | |
| +	if (sc->sc_cid >= 0)
 | |
| +		crypto_unregister_all(sc->sc_cid);
 | |
| +	if (sc->sc_irq != -1)
 | |
| +		free_irq(sc->sc_irq, sc);
 | |
| +
 | |
| +#if 0
 | |
| +	if (sc->sc_dma) {
 | |
| +		/* Turn off DMA polling */
 | |
| +		WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
 | |
| +			    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
 | |
| +
 | |
| +		pci_free_consistent(sc->sc_pcidev,
 | |
| +				    sizeof(*sc->sc_dma),
 | |
| +				    sc->sc_dma, sc->sc_dma_physaddr);
 | |
| +	}
 | |
| +#endif
 | |
| +	kfree(sc);
 | |
| +	return (-ENXIO);
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Detach an interface that successfully probed.
 | |
| + */
 | |
| +static void
 | |
| +hipp_remove(struct pci_dev *dev)
 | |
| +{
 | |
| +	struct hipp_softc *sc = pci_get_drvdata(dev);
 | |
| +	unsigned long l_flags;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	/* disable interrupts */
 | |
| +	HIPP_LOCK(sc);
 | |
| +
 | |
| +#if 0
 | |
| +	WRITE_REG_1(sc, HIFN_1_DMA_IER, 0);
 | |
| +	HIFN_UNLOCK(sc);
 | |
| +
 | |
| +	/*XXX other resources */
 | |
| +	del_timer_sync(&sc->sc_tickto);
 | |
| +
 | |
| +	/* Turn off DMA polling */
 | |
| +	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
 | |
| +	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
 | |
| +#endif
 | |
| +
 | |
| +	crypto_unregister_all(sc->sc_cid);
 | |
| +
 | |
| +	free_irq(sc->sc_irq, sc);
 | |
| +
 | |
| +#if 0
 | |
| +	pci_free_consistent(sc->sc_pcidev, sizeof(*sc->sc_dma),
 | |
| +                sc->sc_dma, sc->sc_dma_physaddr);
 | |
| +#endif
 | |
| +}
 | |
| +
 | |
| +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
 | |
| +static irqreturn_t hipp_intr(int irq, void *arg)
 | |
| +#else
 | |
| +static irqreturn_t hipp_intr(int irq, void *arg, struct pt_regs *regs)
 | |
| +#endif
 | |
| +{
 | |
| +	struct hipp_softc *sc = arg;
 | |
| +
 | |
| +	sc = sc; /* shut up compiler */
 | |
| +
 | |
| +	return IRQ_HANDLED;
 | |
| +}
 | |
| +
 | |
| +static struct pci_device_id hipp_pci_tbl[] = {
 | |
| +	{ PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7855,
 | |
| +	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
 | |
| +	{ PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_8155,
 | |
| +	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
 | |
| +};
 | |
| +MODULE_DEVICE_TABLE(pci, hipp_pci_tbl);
 | |
| +
 | |
| +static struct pci_driver hipp_driver = {
 | |
| +	.name         = "hipp",
 | |
| +	.id_table     = hipp_pci_tbl,
 | |
| +	.probe        =	hipp_probe,
 | |
| +	.remove       = hipp_remove,
 | |
| +	/* add PM stuff here one day */
 | |
| +};
 | |
| +
 | |
| +static int __init hipp_init (void)
 | |
| +{
 | |
| +	struct hipp_softc *sc = NULL;
 | |
| +	int rc;
 | |
| +
 | |
| +	DPRINTF("%s(%p)\n", __FUNCTION__, hipp_init);
 | |
| +
 | |
| +	rc = pci_register_driver(&hipp_driver);
 | |
| +	pci_register_driver_compat(&hipp_driver, rc);
 | |
| +
 | |
| +	return rc;
 | |
| +}
 | |
| +
 | |
| +static void __exit hipp_exit (void)
 | |
| +{
 | |
| +	pci_unregister_driver(&hipp_driver);
 | |
| +}
 | |
| +
 | |
| +module_init(hipp_init);
 | |
| +module_exit(hipp_exit);
 | |
| +
 | |
| +MODULE_LICENSE("BSD");
 | |
| +MODULE_AUTHOR("Michael Richardson <mcr@xelerance.com>");
 | |
| +MODULE_DESCRIPTION("OCF driver for hifn HIPP-I/II PCI crypto devices");
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/hifn/hifnHIPPreg.h
 | |
| @@ -0,0 +1,46 @@
 | |
| +/*-
 | |
| + * Hifn HIPP-I/HIPP-II (7855/8155) driver.
 | |
| + * Copyright (c) 2006 Michael Richardson <mcr@xelerance.com>
 | |
| + *
 | |
| + * Redistribution and use in source and binary forms, with or without
 | |
| + * modification, are permitted provided that the following conditions
 | |
| + * are met:
 | |
| + *
 | |
| + * 1. Redistributions of source code must retain the above copyright
 | |
| + *    notice, this list of conditions and the following disclaimer.
 | |
| + * 2. Redistributions in binary form must reproduce the above copyright
 | |
| + *    notice, this list of conditions and the following disclaimer in the
 | |
| + *    documentation and/or other materials provided with the distribution.
 | |
| + * 3. The name of the author may not be used to endorse or promote products
 | |
| + *    derived from this software without specific prior written permission.
 | |
| + *
 | |
| + *
 | |
| + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
 | |
| + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
 | |
| + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
 | |
| + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
 | |
| + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 | |
| + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 | |
| + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 | |
| + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 | |
| + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 | |
| + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 | |
| + *
 | |
| + * Effort sponsored by Hifn inc.
 | |
| + *
 | |
| + */
 | |
| +
 | |
| +#ifndef __HIFNHIPP_H__
 | |
| +#define	__HIFNHIPP_H__
 | |
| +
 | |
| +/*
 | |
| + * PCI vendor and device identifiers
 | |
| + */
 | |
| +#define	PCI_VENDOR_HIFN		0x13a3		/* Hifn */
 | |
| +#define	PCI_PRODUCT_HIFN_6500	0x0006		/* 6500 */
 | |
| +#define	PCI_PRODUCT_HIFN_7855	0x001f		/* 7855 */
 | |
| +#define	PCI_PRODUCT_HIFN_8155	0x999		/* XXX 8155 */
 | |
| +
 | |
| +#define HIPP_1_REVID            0x01 /* BOGUS */
 | |
| +
 | |
| +#endif /* __HIPP_H__ */
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/hifn/hifnHIPPvar.h
 | |
| @@ -0,0 +1,93 @@
 | |
| +/*
 | |
| + * Hifn HIPP-I/HIPP-II (7855/8155) driver.
 | |
| + * Copyright (c) 2006 Michael Richardson <mcr@xelerance.com> *
 | |
| + *
 | |
| + * Redistribution and use in source and binary forms, with or without
 | |
| + * modification, are permitted provided that the following conditions
 | |
| + * are met:
 | |
| + *
 | |
| + * 1. Redistributions of source code must retain the above copyright
 | |
| + *    notice, this list of conditions and the following disclaimer.
 | |
| + * 2. Redistributions in binary form must reproduce the above copyright
 | |
| + *    notice, this list of conditions and the following disclaimer in the
 | |
| + *    documentation and/or other materials provided with the distribution.
 | |
| + * 3. The name of the author may not be used to endorse or promote products
 | |
| + *    derived from this software without specific prior written permission.
 | |
| + *
 | |
| + *
 | |
| + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
 | |
| + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
 | |
| + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
 | |
| + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
 | |
| + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 | |
| + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 | |
| + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 | |
| + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 | |
| + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 | |
| + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 | |
| + *
 | |
| + * Effort sponsored by Hifn inc.
 | |
| + *
 | |
| + */
 | |
| +
 | |
| +#ifndef __HIFNHIPPVAR_H__
 | |
| +#define __HIFNHIPPVAR_H__
 | |
| +
 | |
| +#define HIPP_MAX_CHIPS 8
 | |
| +
 | |
| +/*
 | |
| + * Holds data specific to a single Hifn HIPP-I board.
 | |
| + */
 | |
| +struct hipp_softc {
 | |
| +	softc_device_decl		 sc_dev;
 | |
| +
 | |
| +	struct pci_dev		*sc_pcidev;	/* device backpointer */
 | |
| +	ocf_iomem_t             sc_bar[5];
 | |
| +	caddr_t		        sc_barphy[5];   /* physical address */
 | |
| +	int			sc_num;		/* for multiple devs */
 | |
| +	spinlock_t		sc_mtx;		/* per-instance lock */
 | |
| +	int32_t			sc_cid;
 | |
| +	int			sc_irq;
 | |
| +
 | |
| +#if 0
 | |
| +
 | |
| +	u_int32_t		sc_dmaier;
 | |
| +	u_int32_t		sc_drammodel;	/* 1=dram, 0=sram */
 | |
| +	u_int32_t		sc_pllconfig;	/* 7954/7955/7956 PLL config */
 | |
| +
 | |
| +	struct hifn_dma		*sc_dma;
 | |
| +	dma_addr_t		sc_dma_physaddr;/* physical address of sc_dma */
 | |
| +
 | |
| +	int			sc_dmansegs;
 | |
| +	int			sc_maxses;
 | |
| +	int			sc_nsessions;
 | |
| +	struct hifn_session	*sc_sessions;
 | |
| +	int			sc_ramsize;
 | |
| +	int			sc_flags;
 | |
| +#define	HIFN_HAS_RNG		0x1	/* includes random number generator */
 | |
| +#define	HIFN_HAS_PUBLIC		0x2	/* includes public key support */
 | |
| +#define	HIFN_HAS_AES		0x4	/* includes AES support */
 | |
| +#define	HIFN_IS_7811		0x8	/* Hifn 7811 part */
 | |
| +#define	HIFN_IS_7956		0x10	/* Hifn 7956/7955 don't have SDRAM */
 | |
| +
 | |
| +	struct timer_list	sc_tickto;	/* for managing DMA */
 | |
| +
 | |
| +	int			sc_rngfirst;
 | |
| +	int			sc_rnghz;	/* RNG polling frequency */
 | |
| +
 | |
| +	int			sc_c_busy;	/* command ring busy */
 | |
| +	int			sc_s_busy;	/* source data ring busy */
 | |
| +	int			sc_d_busy;	/* destination data ring busy */
 | |
| +	int			sc_r_busy;	/* result ring busy */
 | |
| +	int			sc_active;	/* for initial countdown */
 | |
| +	int			sc_needwakeup;	/* ops q'd wating on resources */
 | |
| +	int			sc_curbatch;	/* # ops submitted w/o int */
 | |
| +	int			sc_suspended;
 | |
| +	struct miscdevice       sc_miscdev;
 | |
| +#endif
 | |
| +};
 | |
| +
 | |
| +#define	HIPP_LOCK(_sc)		spin_lock_irqsave(&(_sc)->sc_mtx, l_flags)
 | |
| +#define	HIPP_UNLOCK(_sc)	spin_unlock_irqrestore(&(_sc)->sc_mtx, l_flags)
 | |
| +
 | |
| +#endif /* __HIFNHIPPVAR_H__ */
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/safe/md5.c
 | |
| @@ -0,0 +1,308 @@
 | |
| +/*	$KAME: md5.c,v 1.5 2000/11/08 06:13:08 itojun Exp $	*/
 | |
| +/*
 | |
| + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
 | |
| + * All rights reserved.
 | |
| + *
 | |
| + * Redistribution and use in source and binary forms, with or without
 | |
| + * modification, are permitted provided that the following conditions
 | |
| + * are met:
 | |
| + * 1. Redistributions of source code must retain the above copyright
 | |
| + *    notice, this list of conditions and the following disclaimer.
 | |
| + * 2. Redistributions in binary form must reproduce the above copyright
 | |
| + *    notice, this list of conditions and the following disclaimer in the
 | |
| + *    documentation and/or other materials provided with the distribution.
 | |
| + * 3. Neither the name of the project nor the names of its contributors
 | |
| + *    may be used to endorse or promote products derived from this software
 | |
| + *    without specific prior written permission.
 | |
| + *
 | |
| + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
 | |
| + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 | |
| + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 | |
| + * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
 | |
| + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 | |
| + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 | |
| + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 | |
| + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 | |
| + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 | |
| + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 | |
| + * SUCH DAMAGE.
 | |
| + */
 | |
| +
 | |
| +#if 0
 | |
| +#include <sys/cdefs.h>
 | |
| +__FBSDID("$FreeBSD: src/sys/crypto/md5.c,v 1.9 2004/01/27 19:49:19 des Exp $");
 | |
| +
 | |
| +#include <sys/types.h>
 | |
| +#include <sys/cdefs.h>
 | |
| +#include <sys/time.h>
 | |
| +#include <sys/systm.h>
 | |
| +#include <crypto/md5.h>
 | |
| +#endif
 | |
| +
 | |
| +#define SHIFT(X, s) (((X) << (s)) | ((X) >> (32 - (s))))
 | |
| +
 | |
| +#define F(X, Y, Z) (((X) & (Y)) | ((~X) & (Z)))
 | |
| +#define G(X, Y, Z) (((X) & (Z)) | ((Y) & (~Z)))
 | |
| +#define H(X, Y, Z) ((X) ^ (Y) ^ (Z))
 | |
| +#define I(X, Y, Z) ((Y) ^ ((X) | (~Z)))
 | |
| +
 | |
| +#define ROUND1(a, b, c, d, k, s, i) { \
 | |
| +	(a) = (a) + F((b), (c), (d)) + X[(k)] + T[(i)]; \
 | |
| +	(a) = SHIFT((a), (s)); \
 | |
| +	(a) = (b) + (a); \
 | |
| +}
 | |
| +
 | |
| +#define ROUND2(a, b, c, d, k, s, i) { \
 | |
| +	(a) = (a) + G((b), (c), (d)) + X[(k)] + T[(i)]; \
 | |
| +	(a) = SHIFT((a), (s)); \
 | |
| +	(a) = (b) + (a); \
 | |
| +}
 | |
| +
 | |
| +#define ROUND3(a, b, c, d, k, s, i) { \
 | |
| +	(a) = (a) + H((b), (c), (d)) + X[(k)] + T[(i)]; \
 | |
| +	(a) = SHIFT((a), (s)); \
 | |
| +	(a) = (b) + (a); \
 | |
| +}
 | |
| +
 | |
| +#define ROUND4(a, b, c, d, k, s, i) { \
 | |
| +	(a) = (a) + I((b), (c), (d)) + X[(k)] + T[(i)]; \
 | |
| +	(a) = SHIFT((a), (s)); \
 | |
| +	(a) = (b) + (a); \
 | |
| +}
 | |
| +
 | |
| +#define Sa	 7
 | |
| +#define Sb	12
 | |
| +#define Sc	17
 | |
| +#define Sd	22
 | |
| +
 | |
| +#define Se	 5
 | |
| +#define Sf	 9
 | |
| +#define Sg	14
 | |
| +#define Sh	20
 | |
| +
 | |
| +#define Si	 4
 | |
| +#define Sj	11
 | |
| +#define Sk	16
 | |
| +#define Sl	23
 | |
| +
 | |
| +#define Sm	 6
 | |
| +#define Sn	10
 | |
| +#define So	15
 | |
| +#define Sp	21
 | |
| +
 | |
| +#define MD5_A0	0x67452301
 | |
| +#define MD5_B0	0xefcdab89
 | |
| +#define MD5_C0	0x98badcfe
 | |
| +#define MD5_D0	0x10325476
 | |
| +
 | |
| +/* Integer part of 4294967296 times abs(sin(i)), where i is in radians. */
 | |
| +static const u_int32_t T[65] = {
 | |
| +	0,
 | |
| +	0xd76aa478, 	0xe8c7b756,	0x242070db,	0xc1bdceee,
 | |
| +	0xf57c0faf,	0x4787c62a, 	0xa8304613,	0xfd469501,
 | |
| +	0x698098d8,	0x8b44f7af,	0xffff5bb1,	0x895cd7be,
 | |
| +	0x6b901122, 	0xfd987193, 	0xa679438e,	0x49b40821,
 | |
| +
 | |
| +	0xf61e2562,	0xc040b340, 	0x265e5a51, 	0xe9b6c7aa,
 | |
| +	0xd62f105d,	0x2441453,	0xd8a1e681,	0xe7d3fbc8,
 | |
| +	0x21e1cde6,	0xc33707d6, 	0xf4d50d87, 	0x455a14ed,
 | |
| +	0xa9e3e905,	0xfcefa3f8, 	0x676f02d9, 	0x8d2a4c8a,
 | |
| +
 | |
| +	0xfffa3942,	0x8771f681, 	0x6d9d6122, 	0xfde5380c,
 | |
| +	0xa4beea44, 	0x4bdecfa9, 	0xf6bb4b60, 	0xbebfbc70,
 | |
| +	0x289b7ec6, 	0xeaa127fa, 	0xd4ef3085,	0x4881d05,
 | |
| +	0xd9d4d039, 	0xe6db99e5, 	0x1fa27cf8, 	0xc4ac5665,
 | |
| +
 | |
| +	0xf4292244, 	0x432aff97, 	0xab9423a7, 	0xfc93a039,
 | |
| +	0x655b59c3, 	0x8f0ccc92, 	0xffeff47d, 	0x85845dd1,
 | |
| +	0x6fa87e4f, 	0xfe2ce6e0, 	0xa3014314, 	0x4e0811a1,
 | |
| +	0xf7537e82, 	0xbd3af235, 	0x2ad7d2bb, 	0xeb86d391,
 | |
| +};
 | |
| +
 | |
| +static const u_int8_t md5_paddat[MD5_BUFLEN] = {
 | |
| +	0x80,	0,	0,	0,	0,	0,	0,	0,
 | |
| +	0,	0,	0,	0,	0,	0,	0,	0,
 | |
| +	0,	0,	0,	0,	0,	0,	0,	0,
 | |
| +	0,	0,	0,	0,	0,	0,	0,	0,
 | |
| +	0,	0,	0,	0,	0,	0,	0,	0,
 | |
| +	0,	0,	0,	0,	0,	0,	0,	0,
 | |
| +	0,	0,	0,	0,	0,	0,	0,	0,
 | |
| +	0,	0,	0,	0,	0,	0,	0,	0,
 | |
| +};
 | |
| +
 | |
| +static void md5_calc(u_int8_t *, md5_ctxt *);
 | |
| +
 | |
| +void md5_init(ctxt)
 | |
| +	md5_ctxt *ctxt;
 | |
| +{
 | |
| +	ctxt->md5_n = 0;
 | |
| +	ctxt->md5_i = 0;
 | |
| +	ctxt->md5_sta = MD5_A0;
 | |
| +	ctxt->md5_stb = MD5_B0;
 | |
| +	ctxt->md5_stc = MD5_C0;
 | |
| +	ctxt->md5_std = MD5_D0;
 | |
| +	bzero(ctxt->md5_buf, sizeof(ctxt->md5_buf));
 | |
| +}
 | |
| +
 | |
| +void md5_loop(ctxt, input, len)
 | |
| +	md5_ctxt *ctxt;
 | |
| +	u_int8_t *input;
 | |
| +	u_int len; /* number of bytes */
 | |
| +{
 | |
| +	u_int gap, i;
 | |
| +
 | |
| +	ctxt->md5_n += len * 8; /* byte to bit */
 | |
| +	gap = MD5_BUFLEN - ctxt->md5_i;
 | |
| +
 | |
| +	if (len >= gap) {
 | |
| +		bcopy((void *)input, (void *)(ctxt->md5_buf + ctxt->md5_i),
 | |
| +			gap);
 | |
| +		md5_calc(ctxt->md5_buf, ctxt);
 | |
| +
 | |
| +		for (i = gap; i + MD5_BUFLEN <= len; i += MD5_BUFLEN) {
 | |
| +			md5_calc((u_int8_t *)(input + i), ctxt);
 | |
| +		}
 | |
| +
 | |
| +		ctxt->md5_i = len - i;
 | |
| +		bcopy((void *)(input + i), (void *)ctxt->md5_buf, ctxt->md5_i);
 | |
| +	} else {
 | |
| +		bcopy((void *)input, (void *)(ctxt->md5_buf + ctxt->md5_i),
 | |
| +			len);
 | |
| +		ctxt->md5_i += len;
 | |
| +	}
 | |
| +}
 | |
| +
 | |
| +void md5_pad(ctxt)
 | |
| +	md5_ctxt *ctxt;
 | |
| +{
 | |
| +	u_int gap;
 | |
| +
 | |
| +	/* Don't count up padding. Keep md5_n. */
 | |
| +	gap = MD5_BUFLEN - ctxt->md5_i;
 | |
| +	if (gap > 8) {
 | |
| +		bcopy(md5_paddat,
 | |
| +		      (void *)(ctxt->md5_buf + ctxt->md5_i),
 | |
| +		      gap - sizeof(ctxt->md5_n));
 | |
| +	} else {
 | |
| +		/* including gap == 8 */
 | |
| +		bcopy(md5_paddat, (void *)(ctxt->md5_buf + ctxt->md5_i),
 | |
| +			gap);
 | |
| +		md5_calc(ctxt->md5_buf, ctxt);
 | |
| +		bcopy((md5_paddat + gap),
 | |
| +		      (void *)ctxt->md5_buf,
 | |
| +		      MD5_BUFLEN - sizeof(ctxt->md5_n));
 | |
| +	}
 | |
| +
 | |
| +	/* 8 byte word */
 | |
| +#if BYTE_ORDER == LITTLE_ENDIAN
 | |
| +	bcopy(&ctxt->md5_n8[0], &ctxt->md5_buf[56], 8);
 | |
| +#endif
 | |
| +#if BYTE_ORDER == BIG_ENDIAN
 | |
| +	ctxt->md5_buf[56] = ctxt->md5_n8[7];
 | |
| +	ctxt->md5_buf[57] = ctxt->md5_n8[6];
 | |
| +	ctxt->md5_buf[58] = ctxt->md5_n8[5];
 | |
| +	ctxt->md5_buf[59] = ctxt->md5_n8[4];
 | |
| +	ctxt->md5_buf[60] = ctxt->md5_n8[3];
 | |
| +	ctxt->md5_buf[61] = ctxt->md5_n8[2];
 | |
| +	ctxt->md5_buf[62] = ctxt->md5_n8[1];
 | |
| +	ctxt->md5_buf[63] = ctxt->md5_n8[0];
 | |
| +#endif
 | |
| +
 | |
| +	md5_calc(ctxt->md5_buf, ctxt);
 | |
| +}
 | |
| +
 | |
| +void md5_result(digest, ctxt)
 | |
| +	u_int8_t *digest;
 | |
| +	md5_ctxt *ctxt;
 | |
| +{
 | |
| +	/* 4 byte words */
 | |
| +#if BYTE_ORDER == LITTLE_ENDIAN
 | |
| +	bcopy(&ctxt->md5_st8[0], digest, 16);
 | |
| +#endif
 | |
| +#if BYTE_ORDER == BIG_ENDIAN
 | |
| +	digest[ 0] = ctxt->md5_st8[ 3]; digest[ 1] = ctxt->md5_st8[ 2];
 | |
| +	digest[ 2] = ctxt->md5_st8[ 1]; digest[ 3] = ctxt->md5_st8[ 0];
 | |
| +	digest[ 4] = ctxt->md5_st8[ 7]; digest[ 5] = ctxt->md5_st8[ 6];
 | |
| +	digest[ 6] = ctxt->md5_st8[ 5]; digest[ 7] = ctxt->md5_st8[ 4];
 | |
| +	digest[ 8] = ctxt->md5_st8[11]; digest[ 9] = ctxt->md5_st8[10];
 | |
| +	digest[10] = ctxt->md5_st8[ 9]; digest[11] = ctxt->md5_st8[ 8];
 | |
| +	digest[12] = ctxt->md5_st8[15]; digest[13] = ctxt->md5_st8[14];
 | |
| +	digest[14] = ctxt->md5_st8[13]; digest[15] = ctxt->md5_st8[12];
 | |
| +#endif
 | |
| +}
 | |
| +
 | |
| +static void md5_calc(b64, ctxt)
 | |
| +	u_int8_t *b64;
 | |
| +	md5_ctxt *ctxt;
 | |
| +{
 | |
| +	u_int32_t A = ctxt->md5_sta;
 | |
| +	u_int32_t B = ctxt->md5_stb;
 | |
| +	u_int32_t C = ctxt->md5_stc;
 | |
| +	u_int32_t D = ctxt->md5_std;
 | |
| +#if BYTE_ORDER == LITTLE_ENDIAN
 | |
| +	u_int32_t *X = (u_int32_t *)b64;
 | |
| +#endif
 | |
| +#if BYTE_ORDER == BIG_ENDIAN
 | |
| +	/* 4 byte words */
 | |
| +	/* what a brute force but fast! */
 | |
| +	u_int32_t X[16];
 | |
| +	u_int8_t *y = (u_int8_t *)X;
 | |
| +	y[ 0] = b64[ 3]; y[ 1] = b64[ 2]; y[ 2] = b64[ 1]; y[ 3] = b64[ 0];
 | |
| +	y[ 4] = b64[ 7]; y[ 5] = b64[ 6]; y[ 6] = b64[ 5]; y[ 7] = b64[ 4];
 | |
| +	y[ 8] = b64[11]; y[ 9] = b64[10]; y[10] = b64[ 9]; y[11] = b64[ 8];
 | |
| +	y[12] = b64[15]; y[13] = b64[14]; y[14] = b64[13]; y[15] = b64[12];
 | |
| +	y[16] = b64[19]; y[17] = b64[18]; y[18] = b64[17]; y[19] = b64[16];
 | |
| +	y[20] = b64[23]; y[21] = b64[22]; y[22] = b64[21]; y[23] = b64[20];
 | |
| +	y[24] = b64[27]; y[25] = b64[26]; y[26] = b64[25]; y[27] = b64[24];
 | |
| +	y[28] = b64[31]; y[29] = b64[30]; y[30] = b64[29]; y[31] = b64[28];
 | |
| +	y[32] = b64[35]; y[33] = b64[34]; y[34] = b64[33]; y[35] = b64[32];
 | |
| +	y[36] = b64[39]; y[37] = b64[38]; y[38] = b64[37]; y[39] = b64[36];
 | |
| +	y[40] = b64[43]; y[41] = b64[42]; y[42] = b64[41]; y[43] = b64[40];
 | |
| +	y[44] = b64[47]; y[45] = b64[46]; y[46] = b64[45]; y[47] = b64[44];
 | |
| +	y[48] = b64[51]; y[49] = b64[50]; y[50] = b64[49]; y[51] = b64[48];
 | |
| +	y[52] = b64[55]; y[53] = b64[54]; y[54] = b64[53]; y[55] = b64[52];
 | |
| +	y[56] = b64[59]; y[57] = b64[58]; y[58] = b64[57]; y[59] = b64[56];
 | |
| +	y[60] = b64[63]; y[61] = b64[62]; y[62] = b64[61]; y[63] = b64[60];
 | |
| +#endif
 | |
| +
 | |
| +	ROUND1(A, B, C, D,  0, Sa,  1); ROUND1(D, A, B, C,  1, Sb,  2);
 | |
| +	ROUND1(C, D, A, B,  2, Sc,  3); ROUND1(B, C, D, A,  3, Sd,  4);
 | |
| +	ROUND1(A, B, C, D,  4, Sa,  5); ROUND1(D, A, B, C,  5, Sb,  6);
 | |
| +	ROUND1(C, D, A, B,  6, Sc,  7); ROUND1(B, C, D, A,  7, Sd,  8);
 | |
| +	ROUND1(A, B, C, D,  8, Sa,  9); ROUND1(D, A, B, C,  9, Sb, 10);
 | |
| +	ROUND1(C, D, A, B, 10, Sc, 11); ROUND1(B, C, D, A, 11, Sd, 12);
 | |
| +	ROUND1(A, B, C, D, 12, Sa, 13); ROUND1(D, A, B, C, 13, Sb, 14);
 | |
| +	ROUND1(C, D, A, B, 14, Sc, 15); ROUND1(B, C, D, A, 15, Sd, 16);
 | |
| +
 | |
| +	ROUND2(A, B, C, D,  1, Se, 17); ROUND2(D, A, B, C,  6, Sf, 18);
 | |
| +	ROUND2(C, D, A, B, 11, Sg, 19); ROUND2(B, C, D, A,  0, Sh, 20);
 | |
| +	ROUND2(A, B, C, D,  5, Se, 21); ROUND2(D, A, B, C, 10, Sf, 22);
 | |
| +	ROUND2(C, D, A, B, 15, Sg, 23); ROUND2(B, C, D, A,  4, Sh, 24);
 | |
| +	ROUND2(A, B, C, D,  9, Se, 25); ROUND2(D, A, B, C, 14, Sf, 26);
 | |
| +	ROUND2(C, D, A, B,  3, Sg, 27); ROUND2(B, C, D, A,  8, Sh, 28);
 | |
| +	ROUND2(A, B, C, D, 13, Se, 29); ROUND2(D, A, B, C,  2, Sf, 30);
 | |
| +	ROUND2(C, D, A, B,  7, Sg, 31); ROUND2(B, C, D, A, 12, Sh, 32);
 | |
| +
 | |
| +	ROUND3(A, B, C, D,  5, Si, 33); ROUND3(D, A, B, C,  8, Sj, 34);
 | |
| +	ROUND3(C, D, A, B, 11, Sk, 35); ROUND3(B, C, D, A, 14, Sl, 36);
 | |
| +	ROUND3(A, B, C, D,  1, Si, 37); ROUND3(D, A, B, C,  4, Sj, 38);
 | |
| +	ROUND3(C, D, A, B,  7, Sk, 39); ROUND3(B, C, D, A, 10, Sl, 40);
 | |
| +	ROUND3(A, B, C, D, 13, Si, 41); ROUND3(D, A, B, C,  0, Sj, 42);
 | |
| +	ROUND3(C, D, A, B,  3, Sk, 43); ROUND3(B, C, D, A,  6, Sl, 44);
 | |
| +	ROUND3(A, B, C, D,  9, Si, 45); ROUND3(D, A, B, C, 12, Sj, 46);
 | |
| +	ROUND3(C, D, A, B, 15, Sk, 47); ROUND3(B, C, D, A,  2, Sl, 48);
 | |
| +
 | |
| +	ROUND4(A, B, C, D,  0, Sm, 49); ROUND4(D, A, B, C,  7, Sn, 50);
 | |
| +	ROUND4(C, D, A, B, 14, So, 51); ROUND4(B, C, D, A,  5, Sp, 52);
 | |
| +	ROUND4(A, B, C, D, 12, Sm, 53); ROUND4(D, A, B, C,  3, Sn, 54);
 | |
| +	ROUND4(C, D, A, B, 10, So, 55); ROUND4(B, C, D, A,  1, Sp, 56);
 | |
| +	ROUND4(A, B, C, D,  8, Sm, 57); ROUND4(D, A, B, C, 15, Sn, 58);
 | |
| +	ROUND4(C, D, A, B,  6, So, 59); ROUND4(B, C, D, A, 13, Sp, 60);
 | |
| +	ROUND4(A, B, C, D,  4, Sm, 61); ROUND4(D, A, B, C, 11, Sn, 62);
 | |
| +	ROUND4(C, D, A, B,  2, So, 63); ROUND4(B, C, D, A,  9, Sp, 64);
 | |
| +
 | |
| +	ctxt->md5_sta += A;
 | |
| +	ctxt->md5_stb += B;
 | |
| +	ctxt->md5_stc += C;
 | |
| +	ctxt->md5_std += D;
 | |
| +}
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/safe/md5.h
 | |
| @@ -0,0 +1,76 @@
 | |
| +/*	$FreeBSD: src/sys/crypto/md5.h,v 1.4 2002/03/20 05:13:50 alfred Exp $	*/
 | |
| +/*	$KAME: md5.h,v 1.4 2000/03/27 04:36:22 sumikawa Exp $	*/
 | |
| +
 | |
| +/*
 | |
| + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
 | |
| + * All rights reserved.
 | |
| + *
 | |
| + * Redistribution and use in source and binary forms, with or without
 | |
| + * modification, are permitted provided that the following conditions
 | |
| + * are met:
 | |
| + * 1. Redistributions of source code must retain the above copyright
 | |
| + *    notice, this list of conditions and the following disclaimer.
 | |
| + * 2. Redistributions in binary form must reproduce the above copyright
 | |
| + *    notice, this list of conditions and the following disclaimer in the
 | |
| + *    documentation and/or other materials provided with the distribution.
 | |
| + * 3. Neither the name of the project nor the names of its contributors
 | |
| + *    may be used to endorse or promote products derived from this software
 | |
| + *    without specific prior written permission.
 | |
| + *
 | |
| + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
 | |
| + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 | |
| + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 | |
| + * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
 | |
| + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 | |
| + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 | |
| + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 | |
| + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 | |
| + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 | |
| + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 | |
| + * SUCH DAMAGE.
 | |
| + */
 | |
| +
 | |
| +#ifndef _NETINET6_MD5_H_
 | |
| +#define _NETINET6_MD5_H_
 | |
| +
 | |
| +#define MD5_BUFLEN	64
 | |
| +
 | |
| +typedef struct {
 | |
| +	union {
 | |
| +		u_int32_t	md5_state32[4];
 | |
| +		u_int8_t	md5_state8[16];
 | |
| +	} md5_st;
 | |
| +
 | |
| +#define md5_sta		md5_st.md5_state32[0]
 | |
| +#define md5_stb		md5_st.md5_state32[1]
 | |
| +#define md5_stc		md5_st.md5_state32[2]
 | |
| +#define md5_std		md5_st.md5_state32[3]
 | |
| +#define md5_st8		md5_st.md5_state8
 | |
| +
 | |
| +	union {
 | |
| +		u_int64_t	md5_count64;
 | |
| +		u_int8_t	md5_count8[8];
 | |
| +	} md5_count;
 | |
| +#define md5_n	md5_count.md5_count64
 | |
| +#define md5_n8	md5_count.md5_count8
 | |
| +
 | |
| +	u_int	md5_i;
 | |
| +	u_int8_t	md5_buf[MD5_BUFLEN];
 | |
| +} md5_ctxt;
 | |
| +
 | |
| +extern void md5_init(md5_ctxt *);
 | |
| +extern void md5_loop(md5_ctxt *, u_int8_t *, u_int);
 | |
| +extern void md5_pad(md5_ctxt *);
 | |
| +extern void md5_result(u_int8_t *, md5_ctxt *);
 | |
| +
 | |
| +/* compatibility */
 | |
| +#define MD5_CTX		md5_ctxt
 | |
| +#define MD5Init(x)	md5_init((x))
 | |
| +#define MD5Update(x, y, z)	md5_loop((x), (y), (z))
 | |
| +#define MD5Final(x, y) \
 | |
| +do {				\
 | |
| +	md5_pad((y));		\
 | |
| +	md5_result((x), (y));	\
 | |
| +} while (0)
 | |
| +
 | |
| +#endif /* ! _NETINET6_MD5_H_*/
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/safe/safe.c
 | |
| @@ -0,0 +1,2288 @@
 | |
| +/*-
 | |
| + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
 | |
| + * Copyright (C) 2004-2007 David McCullough
 | |
| + * The license and original author are listed below.
 | |
| + *
 | |
| + * Copyright (c) 2003 Sam Leffler, Errno Consulting
 | |
| + * Copyright (c) 2003 Global Technology Associates, Inc.
 | |
| + * All rights reserved.
 | |
| + *
 | |
| + * Redistribution and use in source and binary forms, with or without
 | |
| + * modification, are permitted provided that the following conditions
 | |
| + * are met:
 | |
| + * 1. Redistributions of source code must retain the above copyright
 | |
| + *    notice, this list of conditions and the following disclaimer.
 | |
| + * 2. Redistributions in binary form must reproduce the above copyright
 | |
| + *    notice, this list of conditions and the following disclaimer in the
 | |
| + *    documentation and/or other materials provided with the distribution.
 | |
| + *
 | |
| + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
 | |
| + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 | |
| + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 | |
| + * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
 | |
| + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 | |
| + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 | |
| + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 | |
| + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 | |
| + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 | |
| + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 | |
| + * SUCH DAMAGE.
 | |
| + *
 | |
| +__FBSDID("$FreeBSD: src/sys/dev/safe/safe.c,v 1.18 2007/03/21 03:42:50 sam Exp $");
 | |
| + */
 | |
| +
 | |
| +#ifndef AUTOCONF_INCLUDED
 | |
| +#include <linux/config.h>
 | |
| +#endif
 | |
| +#include <linux/module.h>
 | |
| +#include <linux/kernel.h>
 | |
| +#include <linux/init.h>
 | |
| +#include <linux/list.h>
 | |
| +#include <linux/slab.h>
 | |
| +#include <linux/wait.h>
 | |
| +#include <linux/sched.h>
 | |
| +#include <linux/pci.h>
 | |
| +#include <linux/delay.h>
 | |
| +#include <linux/interrupt.h>
 | |
| +#include <linux/spinlock.h>
 | |
| +#include <linux/random.h>
 | |
| +#include <linux/version.h>
 | |
| +#include <linux/skbuff.h>
 | |
| +#include <asm/io.h>
 | |
| +
 | |
| +/*
 | |
| + * SafeNet SafeXcel-1141 hardware crypto accelerator
 | |
| + */
 | |
| +
 | |
| +#include <cryptodev.h>
 | |
| +#include <uio.h>
 | |
| +#include <safe/safereg.h>
 | |
| +#include <safe/safevar.h>
 | |
| +
 | |
| +#if 1
 | |
| +#define	DPRINTF(a)	do { \
 | |
| +						if (debug) { \
 | |
| +							printk("%s: ", sc ? \
 | |
| +								device_get_nameunit(sc->sc_dev) : "safe"); \
 | |
| +							printk a; \
 | |
| +						} \
 | |
| +					} while (0)
 | |
| +#else
 | |
| +#define	DPRINTF(a)
 | |
| +#endif
 | |
| +
 | |
| +/*
 | |
| + * until we find a cleaner way, include the BSD md5/sha1 code
 | |
| + * here
 | |
| + */
 | |
| +#define HMAC_HACK 1
 | |
| +#ifdef HMAC_HACK
 | |
| +#define LITTLE_ENDIAN 1234
 | |
| +#define BIG_ENDIAN 4321
 | |
| +#ifdef __LITTLE_ENDIAN
 | |
| +#define BYTE_ORDER LITTLE_ENDIAN
 | |
| +#endif
 | |
| +#ifdef __BIG_ENDIAN
 | |
| +#define BYTE_ORDER BIG_ENDIAN
 | |
| +#endif
 | |
| +#include <safe/md5.h>
 | |
| +#include <safe/md5.c>
 | |
| +#include <safe/sha1.h>
 | |
| +#include <safe/sha1.c>
 | |
| +
 | |
| +u_int8_t hmac_ipad_buffer[64] = {
 | |
| +    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
 | |
| +    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
 | |
| +    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
 | |
| +    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
 | |
| +    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
 | |
| +    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
 | |
| +    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
 | |
| +    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36
 | |
| +};
 | |
| +
 | |
| +u_int8_t hmac_opad_buffer[64] = {
 | |
| +    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
 | |
| +    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
 | |
| +    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
 | |
| +    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
 | |
| +    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
 | |
| +    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
 | |
| +    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
 | |
| +    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C
 | |
| +};
 | |
| +#endif /* HMAC_HACK */
 | |
| +
 | |
| +/* add proc entry for this */
 | |
| +struct safe_stats safestats;
 | |
| +
 | |
| +#define debug safe_debug
 | |
| +int safe_debug = 0;
 | |
| +module_param(safe_debug, int, 0644);
 | |
| +MODULE_PARM_DESC(safe_debug, "Enable debug");
 | |
| +
 | |
| +static	void safe_callback(struct safe_softc *, struct safe_ringentry *);
 | |
| +static	void safe_feed(struct safe_softc *, struct safe_ringentry *);
 | |
| +#if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
 | |
| +static	void safe_rng_init(struct safe_softc *);
 | |
| +int safe_rngbufsize = 8;		/* 32 bytes each read  */
 | |
| +module_param(safe_rngbufsize, int, 0644);
 | |
| +MODULE_PARM_DESC(safe_rngbufsize, "RNG polling buffer size (32-bit words)");
 | |
| +int safe_rngmaxalarm = 8;		/* max alarms before reset */
 | |
| +module_param(safe_rngmaxalarm, int, 0644);
 | |
| +MODULE_PARM_DESC(safe_rngmaxalarm, "RNG max alarms before reset");
 | |
| +#endif /* SAFE_NO_RNG */
 | |
| +
 | |
| +static void safe_totalreset(struct safe_softc *sc);
 | |
| +static int safe_dmamap_aligned(struct safe_softc *sc, const struct safe_operand *op);
 | |
| +static int safe_dmamap_uniform(struct safe_softc *sc, const struct safe_operand *op);
 | |
| +static int safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re);
 | |
| +static int safe_kprocess(device_t dev, struct cryptkop *krp, int hint);
 | |
| +static int safe_kstart(struct safe_softc *sc);
 | |
| +static int safe_ksigbits(struct safe_softc *sc, struct crparam *cr);
 | |
| +static void safe_kfeed(struct safe_softc *sc);
 | |
| +static void safe_kpoll(unsigned long arg);
 | |
| +static void safe_kload_reg(struct safe_softc *sc, u_int32_t off,
 | |
| +								u_int32_t len, struct crparam *n);
 | |
| +
 | |
| +static	int safe_newsession(device_t, u_int32_t *, struct cryptoini *);
 | |
| +static	int safe_freesession(device_t, u_int64_t);
 | |
| +static	int safe_process(device_t, struct cryptop *, int);
 | |
| +
 | |
| +static device_method_t safe_methods = {
 | |
| +	/* crypto device methods */
 | |
| +	DEVMETHOD(cryptodev_newsession,	safe_newsession),
 | |
| +	DEVMETHOD(cryptodev_freesession,safe_freesession),
 | |
| +	DEVMETHOD(cryptodev_process,	safe_process),
 | |
| +	DEVMETHOD(cryptodev_kprocess,	safe_kprocess),
 | |
| +};
 | |
| +
 | |
| +#define	READ_REG(sc,r)			readl((sc)->sc_base_addr + (r))
 | |
| +#define WRITE_REG(sc,r,val)		writel((val), (sc)->sc_base_addr + (r))
 | |
| +
 | |
| +#define SAFE_MAX_CHIPS 8
 | |
| +static struct safe_softc *safe_chip_idx[SAFE_MAX_CHIPS];
 | |
| +
 | |
| +/*
 | |
| + * split our buffers up into safe DMAable byte fragments to avoid lockup
 | |
| + * bug in 1141 HW on rev 1.0.
 | |
| + */
 | |
| +
 | |
| +static int
 | |
| +pci_map_linear(
 | |
| +	struct safe_softc *sc,
 | |
| +	struct safe_operand *buf,
 | |
| +	void *addr,
 | |
| +	int len)
 | |
| +{
 | |
| +	dma_addr_t tmp;
 | |
| +	int chunk, tlen = len;
 | |
| +
 | |
| +	tmp = pci_map_single(sc->sc_pcidev, addr, len, PCI_DMA_BIDIRECTIONAL);
 | |
| +
 | |
| +	buf->mapsize += len;
 | |
| +	while (len > 0) {
 | |
| +		chunk = (len > sc->sc_max_dsize) ? sc->sc_max_dsize : len;
 | |
| +		buf->segs[buf->nsegs].ds_addr = tmp;
 | |
| +		buf->segs[buf->nsegs].ds_len  = chunk;
 | |
| +		buf->segs[buf->nsegs].ds_tlen = tlen;
 | |
| +		buf->nsegs++;
 | |
| +		tmp  += chunk;
 | |
| +		len  -= chunk;
 | |
| +		tlen = 0;
 | |
| +	}
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * map in a given uio buffer (great on some arches :-)
 | |
| + */
 | |
| +
 | |
| +static int
 | |
| +pci_map_uio(struct safe_softc *sc, struct safe_operand *buf, struct uio *uio)
 | |
| +{
 | |
| +	struct iovec *iov = uio->uio_iov;
 | |
| +	int n;
 | |
| +
 | |
| +	DPRINTF(("%s()\n", __FUNCTION__));
 | |
| +
 | |
| +	buf->mapsize = 0;
 | |
| +	buf->nsegs = 0;
 | |
| +
 | |
| +	for (n = 0; n < uio->uio_iovcnt; n++) {
 | |
| +		pci_map_linear(sc, buf, iov->iov_base, iov->iov_len);
 | |
| +		iov++;
 | |
| +	}
 | |
| +
 | |
| +	/* identify this buffer by the first segment */
 | |
| +	buf->map = (void *) buf->segs[0].ds_addr;
 | |
| +	return(0);
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * map in a given sk_buff
 | |
| + */
 | |
| +
 | |
| +static int
 | |
| +pci_map_skb(struct safe_softc *sc,struct safe_operand *buf,struct sk_buff *skb)
 | |
| +{
 | |
| +	int i;
 | |
| +
 | |
| +	DPRINTF(("%s()\n", __FUNCTION__));
 | |
| +
 | |
| +	buf->mapsize = 0;
 | |
| +	buf->nsegs = 0;
 | |
| +
 | |
| +	pci_map_linear(sc, buf, skb->data, skb_headlen(skb));
 | |
| +
 | |
| +	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 | |
| +		pci_map_linear(sc, buf,
 | |
| +				page_address(skb_shinfo(skb)->frags[i].page) +
 | |
| +				                        skb_shinfo(skb)->frags[i].page_offset,
 | |
| +				skb_shinfo(skb)->frags[i].size);
 | |
| +	}
 | |
| +
 | |
| +	/* identify this buffer by the first segment */
 | |
| +	buf->map = (void *) buf->segs[0].ds_addr;
 | |
| +	return(0);
 | |
| +}
 | |
| +
 | |
| +
 | |
| +#if 0 /* not needed at this time */
 | |
| +static void
 | |
| +pci_sync_operand(struct safe_softc *sc, struct safe_operand *buf)
 | |
| +{
 | |
| +	int i;
 | |
| +
 | |
| +	DPRINTF(("%s()\n", __FUNCTION__));
 | |
| +	for (i = 0; i < buf->nsegs; i++)
 | |
| +		pci_dma_sync_single_for_cpu(sc->sc_pcidev, buf->segs[i].ds_addr,
 | |
| +				buf->segs[i].ds_len, PCI_DMA_BIDIRECTIONAL);
 | |
| +}
 | |
| +#endif
 | |
| +
 | |
| +static void
 | |
| +pci_unmap_operand(struct safe_softc *sc, struct safe_operand *buf)
 | |
| +{
 | |
| +	int i;
 | |
| +	DPRINTF(("%s()\n", __FUNCTION__));
 | |
| +	for (i = 0; i < buf->nsegs; i++) {
 | |
| +		if (buf->segs[i].ds_tlen) {
 | |
| +			DPRINTF(("%s - unmap %d 0x%x %d\n", __FUNCTION__, i, buf->segs[i].ds_addr, buf->segs[i].ds_tlen));
 | |
| +			pci_unmap_single(sc->sc_pcidev, buf->segs[i].ds_addr,
 | |
| +					buf->segs[i].ds_tlen, PCI_DMA_BIDIRECTIONAL);
 | |
| +			DPRINTF(("%s - unmap %d 0x%x %d done\n", __FUNCTION__, i, buf->segs[i].ds_addr, buf->segs[i].ds_tlen));
 | |
| +		}
 | |
| +		buf->segs[i].ds_addr = 0;
 | |
| +		buf->segs[i].ds_len = 0;
 | |
| +		buf->segs[i].ds_tlen = 0;
 | |
| +	}
 | |
| +	buf->nsegs = 0;
 | |
| +	buf->mapsize = 0;
 | |
| +	buf->map = 0;
 | |
| +}
 | |
| +
 | |
| +
 | |
| +/*
 | |
| + * SafeXcel Interrupt routine
 | |
| + */
 | |
| +static irqreturn_t
 | |
| +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
 | |
| +safe_intr(int irq, void *arg)
 | |
| +#else
 | |
| +safe_intr(int irq, void *arg, struct pt_regs *regs)
 | |
| +#endif
 | |
| +{
 | |
| +	struct safe_softc *sc = arg;
 | |
| +	int stat;
 | |
| +	unsigned long flags;
 | |
| +
 | |
| +	stat = READ_REG(sc, SAFE_HM_STAT);
 | |
| +
 | |
| +	DPRINTF(("%s(stat=0x%x)\n", __FUNCTION__, stat));
 | |
| +
 | |
| +	if (stat == 0)		/* shared irq, not for us */
 | |
| +		return IRQ_NONE;
 | |
| +
 | |
| +	WRITE_REG(sc, SAFE_HI_CLR, stat);	/* IACK */
 | |
| +
 | |
| +	if ((stat & SAFE_INT_PE_DDONE)) {
 | |
| +		/*
 | |
| +		 * Descriptor(s) done; scan the ring and
 | |
| +		 * process completed operations.
 | |
| +		 */
 | |
| +		spin_lock_irqsave(&sc->sc_ringmtx, flags);
 | |
| +		while (sc->sc_back != sc->sc_front) {
 | |
| +			struct safe_ringentry *re = sc->sc_back;
 | |
| +
 | |
| +#ifdef SAFE_DEBUG
 | |
| +			if (debug) {
 | |
| +				safe_dump_ringstate(sc, __func__);
 | |
| +				safe_dump_request(sc, __func__, re);
 | |
| +			}
 | |
| +#endif
 | |
| +			/*
 | |
| +			 * safe_process marks ring entries that were allocated
 | |
| +			 * but not used with a csr of zero.  This insures the
 | |
| +			 * ring front pointer never needs to be set backwards
 | |
| +			 * in the event that an entry is allocated but not used
 | |
| +			 * because of a setup error.
 | |
| +			 */
 | |
| +			DPRINTF(("%s re->re_desc.d_csr=0x%x\n", __FUNCTION__, re->re_desc.d_csr));
 | |
| +			if (re->re_desc.d_csr != 0) {
 | |
| +				if (!SAFE_PE_CSR_IS_DONE(re->re_desc.d_csr)) {
 | |
| +					DPRINTF(("%s !CSR_IS_DONE\n", __FUNCTION__));
 | |
| +					break;
 | |
| +				}
 | |
| +				if (!SAFE_PE_LEN_IS_DONE(re->re_desc.d_len)) {
 | |
| +					DPRINTF(("%s !LEN_IS_DONE\n", __FUNCTION__));
 | |
| +					break;
 | |
| +				}
 | |
| +				sc->sc_nqchip--;
 | |
| +				safe_callback(sc, re);
 | |
| +			}
 | |
| +			if (++(sc->sc_back) == sc->sc_ringtop)
 | |
| +				sc->sc_back = sc->sc_ring;
 | |
| +		}
 | |
| +		spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
 | |
| +	}
 | |
| +
 | |
| +	/*
 | |
| +	 * Check to see if we got any DMA Error
 | |
| +	 */
 | |
| +	if (stat & SAFE_INT_PE_ERROR) {
 | |
| +		printk("%s: dmaerr dmastat %08x\n", device_get_nameunit(sc->sc_dev),
 | |
| +				(int)READ_REG(sc, SAFE_PE_DMASTAT));
 | |
| +		safestats.st_dmaerr++;
 | |
| +		safe_totalreset(sc);
 | |
| +#if 0
 | |
| +		safe_feed(sc);
 | |
| +#endif
 | |
| +	}
 | |
| +
 | |
| +	if (sc->sc_needwakeup) {		/* XXX check high watermark */
 | |
| +		int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
 | |
| +		DPRINTF(("%s: wakeup crypto %x\n", __func__,
 | |
| +			sc->sc_needwakeup));
 | |
| +		sc->sc_needwakeup &= ~wakeup;
 | |
| +		crypto_unblock(sc->sc_cid, wakeup);
 | |
| +	}
 | |
| +
 | |
| +	return IRQ_HANDLED;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * safe_feed() - post a request to chip
 | |
| + */
 | |
| +static void
 | |
| +safe_feed(struct safe_softc *sc, struct safe_ringentry *re)
 | |
| +{
 | |
| +	DPRINTF(("%s()\n", __FUNCTION__));
 | |
| +#ifdef SAFE_DEBUG
 | |
| +	if (debug) {
 | |
| +		safe_dump_ringstate(sc, __func__);
 | |
| +		safe_dump_request(sc, __func__, re);
 | |
| +	}
 | |
| +#endif
 | |
| +	sc->sc_nqchip++;
 | |
| +	if (sc->sc_nqchip > safestats.st_maxqchip)
 | |
| +		safestats.st_maxqchip = sc->sc_nqchip;
 | |
| +	/* poke h/w to check descriptor ring, any value can be written */
 | |
| +	WRITE_REG(sc, SAFE_HI_RD_DESCR, 0);
 | |
| +}
 | |
| +
 | |
| +#define	N(a)	(sizeof(a) / sizeof (a[0]))
 | |
| +static void
 | |
| +safe_setup_enckey(struct safe_session *ses, caddr_t key)
 | |
| +{
 | |
| +	int i;
 | |
| +
 | |
| +	bcopy(key, ses->ses_key, ses->ses_klen / 8);
 | |
| +
 | |
| +	/* PE is little-endian, insure proper byte order */
 | |
| +	for (i = 0; i < N(ses->ses_key); i++)
 | |
| +		ses->ses_key[i] = htole32(ses->ses_key[i]);
 | |
| +}
 | |
| +
 | |
| +static void
 | |
| +safe_setup_mackey(struct safe_session *ses, int algo, caddr_t key, int klen)
 | |
| +{
 | |
| +#ifdef HMAC_HACK
 | |
| +	MD5_CTX md5ctx;
 | |
| +	SHA1_CTX sha1ctx;
 | |
| +	int i;
 | |
| +
 | |
| +
 | |
| +	for (i = 0; i < klen; i++)
 | |
| +		key[i] ^= HMAC_IPAD_VAL;
 | |
| +
 | |
| +	if (algo == CRYPTO_MD5_HMAC) {
 | |
| +		MD5Init(&md5ctx);
 | |
| +		MD5Update(&md5ctx, key, klen);
 | |
| +		MD5Update(&md5ctx, hmac_ipad_buffer, MD5_HMAC_BLOCK_LEN - klen);
 | |
| +		bcopy(md5ctx.md5_st8, ses->ses_hminner, sizeof(md5ctx.md5_st8));
 | |
| +	} else {
 | |
| +		SHA1Init(&sha1ctx);
 | |
| +		SHA1Update(&sha1ctx, key, klen);
 | |
| +		SHA1Update(&sha1ctx, hmac_ipad_buffer,
 | |
| +		    SHA1_HMAC_BLOCK_LEN - klen);
 | |
| +		bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32));
 | |
| +	}
 | |
| +
 | |
| +	for (i = 0; i < klen; i++)
 | |
| +		key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
 | |
| +
 | |
| +	if (algo == CRYPTO_MD5_HMAC) {
 | |
| +		MD5Init(&md5ctx);
 | |
| +		MD5Update(&md5ctx, key, klen);
 | |
| +		MD5Update(&md5ctx, hmac_opad_buffer, MD5_HMAC_BLOCK_LEN - klen);
 | |
| +		bcopy(md5ctx.md5_st8, ses->ses_hmouter, sizeof(md5ctx.md5_st8));
 | |
| +	} else {
 | |
| +		SHA1Init(&sha1ctx);
 | |
| +		SHA1Update(&sha1ctx, key, klen);
 | |
| +		SHA1Update(&sha1ctx, hmac_opad_buffer,
 | |
| +		    SHA1_HMAC_BLOCK_LEN - klen);
 | |
| +		bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32));
 | |
| +	}
 | |
| +
 | |
| +	for (i = 0; i < klen; i++)
 | |
| +		key[i] ^= HMAC_OPAD_VAL;
 | |
| +
 | |
| +#if 0
 | |
| +	/*
 | |
| +	 * this code prevents SHA working on a BE host,
 | |
| +	 * so it is obviously wrong.  I think the byte
 | |
| +	 * swap setup we do with the chip fixes this for us
 | |
| +	 */
 | |
| +
 | |
| +	/* PE is little-endian, insure proper byte order */
 | |
| +	for (i = 0; i < N(ses->ses_hminner); i++) {
 | |
| +		ses->ses_hminner[i] = htole32(ses->ses_hminner[i]);
 | |
| +		ses->ses_hmouter[i] = htole32(ses->ses_hmouter[i]);
 | |
| +	}
 | |
| +#endif
 | |
| +#else /* HMAC_HACK */
 | |
| +	printk("safe: md5/sha not implemented\n");
 | |
| +#endif /* HMAC_HACK */
 | |
| +}
 | |
| +#undef N
 | |
| +
 | |
| +/*
 | |
| + * Allocate a new 'session' and return an encoded session id.  'sidp'
 | |
| + * contains our registration id, and should contain an encoded session
 | |
| + * id on successful allocation.
 | |
| + */
 | |
| +static int
 | |
| +safe_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
 | |
| +{
 | |
| +	struct safe_softc *sc = device_get_softc(dev);
 | |
| +	struct cryptoini *c, *encini = NULL, *macini = NULL;
 | |
| +	struct safe_session *ses = NULL;
 | |
| +	int sesn;
 | |
| +
 | |
| +	DPRINTF(("%s()\n", __FUNCTION__));
 | |
| +
 | |
| +	if (sidp == NULL || cri == NULL || sc == NULL)
 | |
| +		return (EINVAL);
 | |
| +
 | |
| +	for (c = cri; c != NULL; c = c->cri_next) {
 | |
| +		if (c->cri_alg == CRYPTO_MD5_HMAC ||
 | |
| +		    c->cri_alg == CRYPTO_SHA1_HMAC ||
 | |
| +		    c->cri_alg == CRYPTO_NULL_HMAC) {
 | |
| +			if (macini)
 | |
| +				return (EINVAL);
 | |
| +			macini = c;
 | |
| +		} else if (c->cri_alg == CRYPTO_DES_CBC ||
 | |
| +		    c->cri_alg == CRYPTO_3DES_CBC ||
 | |
| +		    c->cri_alg == CRYPTO_AES_CBC ||
 | |
| +		    c->cri_alg == CRYPTO_NULL_CBC) {
 | |
| +			if (encini)
 | |
| +				return (EINVAL);
 | |
| +			encini = c;
 | |
| +		} else
 | |
| +			return (EINVAL);
 | |
| +	}
 | |
| +	if (encini == NULL && macini == NULL)
 | |
| +		return (EINVAL);
 | |
| +	if (encini) {			/* validate key length */
 | |
| +		switch (encini->cri_alg) {
 | |
| +		case CRYPTO_DES_CBC:
 | |
| +			if (encini->cri_klen != 64)
 | |
| +				return (EINVAL);
 | |
| +			break;
 | |
| +		case CRYPTO_3DES_CBC:
 | |
| +			if (encini->cri_klen != 192)
 | |
| +				return (EINVAL);
 | |
| +			break;
 | |
| +		case CRYPTO_AES_CBC:
 | |
| +			if (encini->cri_klen != 128 &&
 | |
| +			    encini->cri_klen != 192 &&
 | |
| +			    encini->cri_klen != 256)
 | |
| +				return (EINVAL);
 | |
| +			break;
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	if (sc->sc_sessions == NULL) {
 | |
| +		ses = sc->sc_sessions = (struct safe_session *)
 | |
| +			kmalloc(sizeof(struct safe_session), SLAB_ATOMIC);
 | |
| +		if (ses == NULL)
 | |
| +			return (ENOMEM);
 | |
| +		memset(ses, 0, sizeof(struct safe_session));
 | |
| +		sesn = 0;
 | |
| +		sc->sc_nsessions = 1;
 | |
| +	} else {
 | |
| +		for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
 | |
| +			if (sc->sc_sessions[sesn].ses_used == 0) {
 | |
| +				ses = &sc->sc_sessions[sesn];
 | |
| +				break;
 | |
| +			}
 | |
| +		}
 | |
| +
 | |
| +		if (ses == NULL) {
 | |
| +			sesn = sc->sc_nsessions;
 | |
| +			ses = (struct safe_session *)
 | |
| +				kmalloc((sesn + 1) * sizeof(struct safe_session), SLAB_ATOMIC);
 | |
| +			if (ses == NULL)
 | |
| +				return (ENOMEM);
 | |
| +			memset(ses, 0, (sesn + 1) * sizeof(struct safe_session));
 | |
| +			bcopy(sc->sc_sessions, ses, sesn *
 | |
| +			    sizeof(struct safe_session));
 | |
| +			bzero(sc->sc_sessions, sesn *
 | |
| +			    sizeof(struct safe_session));
 | |
| +			kfree(sc->sc_sessions);
 | |
| +			sc->sc_sessions = ses;
 | |
| +			ses = &sc->sc_sessions[sesn];
 | |
| +			sc->sc_nsessions++;
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	bzero(ses, sizeof(struct safe_session));
 | |
| +	ses->ses_used = 1;
 | |
| +
 | |
| +	if (encini) {
 | |
| +		/* get an IV */
 | |
| +		/* XXX may read fewer than requested */
 | |
| +		read_random(ses->ses_iv, sizeof(ses->ses_iv));
 | |
| +
 | |
| +		ses->ses_klen = encini->cri_klen;
 | |
| +		if (encini->cri_key != NULL)
 | |
| +			safe_setup_enckey(ses, encini->cri_key);
 | |
| +	}
 | |
| +
 | |
| +	if (macini) {
 | |
| +		ses->ses_mlen = macini->cri_mlen;
 | |
| +		if (ses->ses_mlen == 0) {
 | |
| +			if (macini->cri_alg == CRYPTO_MD5_HMAC)
 | |
| +				ses->ses_mlen = MD5_HASH_LEN;
 | |
| +			else
 | |
| +				ses->ses_mlen = SHA1_HASH_LEN;
 | |
| +		}
 | |
| +
 | |
| +		if (macini->cri_key != NULL) {
 | |
| +			safe_setup_mackey(ses, macini->cri_alg, macini->cri_key,
 | |
| +			    macini->cri_klen / 8);
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	*sidp = SAFE_SID(device_get_unit(sc->sc_dev), sesn);
 | |
| +	return (0);
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Deallocate a session.
 | |
| + */
 | |
| +static int
 | |
| +safe_freesession(device_t dev, u_int64_t tid)
 | |
| +{
 | |
| +	struct safe_softc *sc = device_get_softc(dev);
 | |
| +	int session, ret;
 | |
| +	u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
 | |
| +
 | |
| +	DPRINTF(("%s()\n", __FUNCTION__));
 | |
| +
 | |
| +	if (sc == NULL)
 | |
| +		return (EINVAL);
 | |
| +
 | |
| +	session = SAFE_SESSION(sid);
 | |
| +	if (session < sc->sc_nsessions) {
 | |
| +		bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session]));
 | |
| +		ret = 0;
 | |
| +	} else
 | |
| +		ret = EINVAL;
 | |
| +	return (ret);
 | |
| +}
 | |
| +
 | |
| +
 | |
| +static int
 | |
| +safe_process(device_t dev, struct cryptop *crp, int hint)
 | |
| +{
 | |
| +	struct safe_softc *sc = device_get_softc(dev);
 | |
| +	int err = 0, i, nicealign, uniform;
 | |
| +	struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
 | |
| +	int bypass, oplen, ivsize;
 | |
| +	caddr_t iv;
 | |
| +	int16_t coffset;
 | |
| +	struct safe_session *ses;
 | |
| +	struct safe_ringentry *re;
 | |
| +	struct safe_sarec *sa;
 | |
| +	struct safe_pdesc *pd;
 | |
| +	u_int32_t cmd0, cmd1, staterec;
 | |
| +	unsigned long flags;
 | |
| +
 | |
| +	DPRINTF(("%s()\n", __FUNCTION__));
 | |
| +
 | |
| +	if (crp == NULL || crp->crp_callback == NULL || sc == NULL) {
 | |
| +		safestats.st_invalid++;
 | |
| +		return (EINVAL);
 | |
| +	}
 | |
| +	if (SAFE_SESSION(crp->crp_sid) >= sc->sc_nsessions) {
 | |
| +		safestats.st_badsession++;
 | |
| +		return (EINVAL);
 | |
| +	}
 | |
| +
 | |
| +	spin_lock_irqsave(&sc->sc_ringmtx, flags);
 | |
| +	if (sc->sc_front == sc->sc_back && sc->sc_nqchip != 0) {
 | |
| +		safestats.st_ringfull++;
 | |
| +		sc->sc_needwakeup |= CRYPTO_SYMQ;
 | |
| +		spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
 | |
| +		return (ERESTART);
 | |
| +	}
 | |
| +	re = sc->sc_front;
 | |
| +
 | |
| +	staterec = re->re_sa.sa_staterec;	/* save */
 | |
| +	/* NB: zero everything but the PE descriptor */
 | |
| +	bzero(&re->re_sa, sizeof(struct safe_ringentry) - sizeof(re->re_desc));
 | |
| +	re->re_sa.sa_staterec = staterec;	/* restore */
 | |
| +
 | |
| +	re->re_crp = crp;
 | |
| +	re->re_sesn = SAFE_SESSION(crp->crp_sid);
 | |
| +
 | |
| +	re->re_src.nsegs = 0;
 | |
| +	re->re_dst.nsegs = 0;
 | |
| +
 | |
| +	if (crp->crp_flags & CRYPTO_F_SKBUF) {
 | |
| +		re->re_src_skb = (struct sk_buff *)crp->crp_buf;
 | |
| +		re->re_dst_skb = (struct sk_buff *)crp->crp_buf;
 | |
| +	} else if (crp->crp_flags & CRYPTO_F_IOV) {
 | |
| +		re->re_src_io = (struct uio *)crp->crp_buf;
 | |
| +		re->re_dst_io = (struct uio *)crp->crp_buf;
 | |
| +	} else {
 | |
| +		safestats.st_badflags++;
 | |
| +		err = EINVAL;
 | |
| +		goto errout;	/* XXX we don't handle contiguous blocks! */
 | |
| +	}
 | |
| +
 | |
| +	sa = &re->re_sa;
 | |
| +	ses = &sc->sc_sessions[re->re_sesn];
 | |
| +
 | |
| +	crd1 = crp->crp_desc;
 | |
| +	if (crd1 == NULL) {
 | |
| +		safestats.st_nodesc++;
 | |
| +		err = EINVAL;
 | |
| +		goto errout;
 | |
| +	}
 | |
| +	crd2 = crd1->crd_next;
 | |
| +
 | |
| +	cmd0 = SAFE_SA_CMD0_BASIC;		/* basic group operation */
 | |
| +	cmd1 = 0;
 | |
| +	if (crd2 == NULL) {
 | |
| +		if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
 | |
| +		    crd1->crd_alg == CRYPTO_SHA1_HMAC ||
 | |
| +		    crd1->crd_alg == CRYPTO_NULL_HMAC) {
 | |
| +			maccrd = crd1;
 | |
| +			enccrd = NULL;
 | |
| +			cmd0 |= SAFE_SA_CMD0_OP_HASH;
 | |
| +		} else if (crd1->crd_alg == CRYPTO_DES_CBC ||
 | |
| +		    crd1->crd_alg == CRYPTO_3DES_CBC ||
 | |
| +		    crd1->crd_alg == CRYPTO_AES_CBC ||
 | |
| +		    crd1->crd_alg == CRYPTO_NULL_CBC) {
 | |
| +			maccrd = NULL;
 | |
| +			enccrd = crd1;
 | |
| +			cmd0 |= SAFE_SA_CMD0_OP_CRYPT;
 | |
| +		} else {
 | |
| +			safestats.st_badalg++;
 | |
| +			err = EINVAL;
 | |
| +			goto errout;
 | |
| +		}
 | |
| +	} else {
 | |
| +		if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
 | |
| +		    crd1->crd_alg == CRYPTO_SHA1_HMAC ||
 | |
| +		    crd1->crd_alg == CRYPTO_NULL_HMAC) &&
 | |
| +		    (crd2->crd_alg == CRYPTO_DES_CBC ||
 | |
| +			crd2->crd_alg == CRYPTO_3DES_CBC ||
 | |
| +		        crd2->crd_alg == CRYPTO_AES_CBC ||
 | |
| +		        crd2->crd_alg == CRYPTO_NULL_CBC) &&
 | |
| +		    ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
 | |
| +			maccrd = crd1;
 | |
| +			enccrd = crd2;
 | |
| +		} else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
 | |
| +		    crd1->crd_alg == CRYPTO_3DES_CBC ||
 | |
| +		    crd1->crd_alg == CRYPTO_AES_CBC ||
 | |
| +		    crd1->crd_alg == CRYPTO_NULL_CBC) &&
 | |
| +		    (crd2->crd_alg == CRYPTO_MD5_HMAC ||
 | |
| +			crd2->crd_alg == CRYPTO_SHA1_HMAC ||
 | |
| +			crd2->crd_alg == CRYPTO_NULL_HMAC) &&
 | |
| +		    (crd1->crd_flags & CRD_F_ENCRYPT)) {
 | |
| +			enccrd = crd1;
 | |
| +			maccrd = crd2;
 | |
| +		} else {
 | |
| +			safestats.st_badalg++;
 | |
| +			err = EINVAL;
 | |
| +			goto errout;
 | |
| +		}
 | |
| +		cmd0 |= SAFE_SA_CMD0_OP_BOTH;
 | |
| +	}
 | |
| +
 | |
| +	if (enccrd) {
 | |
| +		if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
 | |
| +			safe_setup_enckey(ses, enccrd->crd_key);
 | |
| +
 | |
| +		if (enccrd->crd_alg == CRYPTO_DES_CBC) {
 | |
| +			cmd0 |= SAFE_SA_CMD0_DES;
 | |
| +			cmd1 |= SAFE_SA_CMD1_CBC;
 | |
| +			ivsize = 2*sizeof(u_int32_t);
 | |
| +		} else if (enccrd->crd_alg == CRYPTO_3DES_CBC) {
 | |
| +			cmd0 |= SAFE_SA_CMD0_3DES;
 | |
| +			cmd1 |= SAFE_SA_CMD1_CBC;
 | |
| +			ivsize = 2*sizeof(u_int32_t);
 | |
| +		} else if (enccrd->crd_alg == CRYPTO_AES_CBC) {
 | |
| +			cmd0 |= SAFE_SA_CMD0_AES;
 | |
| +			cmd1 |= SAFE_SA_CMD1_CBC;
 | |
| +			if (ses->ses_klen == 128)
 | |
| +			     cmd1 |=  SAFE_SA_CMD1_AES128;
 | |
| +			else if (ses->ses_klen == 192)
 | |
| +			     cmd1 |=  SAFE_SA_CMD1_AES192;
 | |
| +			else
 | |
| +			     cmd1 |=  SAFE_SA_CMD1_AES256;
 | |
| +			ivsize = 4*sizeof(u_int32_t);
 | |
| +		} else {
 | |
| +			cmd0 |= SAFE_SA_CMD0_CRYPT_NULL;
 | |
| +			ivsize = 0;
 | |
| +		}
 | |
| +
 | |
| +		/*
 | |
| +		 * Setup encrypt/decrypt state.  When using basic ops
 | |
| +		 * we can't use an inline IV because hash/crypt offset
 | |
| +		 * must be from the end of the IV to the start of the
 | |
| +		 * crypt data and this leaves out the preceding header
 | |
| +		 * from the hash calculation.  Instead we place the IV
 | |
| +		 * in the state record and set the hash/crypt offset to
 | |
| +		 * copy both the header+IV.
 | |
| +		 */
 | |
| +		if (enccrd->crd_flags & CRD_F_ENCRYPT) {
 | |
| +			cmd0 |= SAFE_SA_CMD0_OUTBOUND;
 | |
| +
 | |
| +			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
 | |
| +				iv = enccrd->crd_iv;
 | |
| +			else
 | |
| +				iv = (caddr_t) ses->ses_iv;
 | |
| +			if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
 | |
| +				crypto_copyback(crp->crp_flags, crp->crp_buf,
 | |
| +				    enccrd->crd_inject, ivsize, iv);
 | |
| +			}
 | |
| +			bcopy(iv, re->re_sastate.sa_saved_iv, ivsize);
 | |
| +			/* make iv LE */
 | |
| +			for (i = 0; i < ivsize/sizeof(re->re_sastate.sa_saved_iv[0]); i++)
 | |
| +				re->re_sastate.sa_saved_iv[i] =
 | |
| +					cpu_to_le32(re->re_sastate.sa_saved_iv[i]);
 | |
| +			cmd0 |= SAFE_SA_CMD0_IVLD_STATE | SAFE_SA_CMD0_SAVEIV;
 | |
| +			re->re_flags |= SAFE_QFLAGS_COPYOUTIV;
 | |
| +		} else {
 | |
| +			cmd0 |= SAFE_SA_CMD0_INBOUND;
 | |
| +
 | |
| +			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
 | |
| +				bcopy(enccrd->crd_iv,
 | |
| +					re->re_sastate.sa_saved_iv, ivsize);
 | |
| +			} else {
 | |
| +				crypto_copydata(crp->crp_flags, crp->crp_buf,
 | |
| +				    enccrd->crd_inject, ivsize,
 | |
| +				    (caddr_t)re->re_sastate.sa_saved_iv);
 | |
| +			}
 | |
| +			/* make iv LE */
 | |
| +			for (i = 0; i < ivsize/sizeof(re->re_sastate.sa_saved_iv[0]); i++)
 | |
| +				re->re_sastate.sa_saved_iv[i] =
 | |
| +					cpu_to_le32(re->re_sastate.sa_saved_iv[i]);
 | |
| +			cmd0 |= SAFE_SA_CMD0_IVLD_STATE;
 | |
| +		}
 | |
| +		/*
 | |
| +		 * For basic encryption use the zero pad algorithm.
 | |
| +		 * This pads results to an 8-byte boundary and
 | |
| +		 * suppresses padding verification for inbound (i.e.
 | |
| +		 * decrypt) operations.
 | |
| +		 *
 | |
| +		 * NB: Not sure if the 8-byte pad boundary is a problem.
 | |
| +		 */
 | |
| +		cmd0 |= SAFE_SA_CMD0_PAD_ZERO;
 | |
| +
 | |
| +		/* XXX assert key bufs have the same size */
 | |
| +		bcopy(ses->ses_key, sa->sa_key, sizeof(sa->sa_key));
 | |
| +	}
 | |
| +
 | |
| +	if (maccrd) {
 | |
| +		if (maccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
 | |
| +			safe_setup_mackey(ses, maccrd->crd_alg,
 | |
| +			    maccrd->crd_key, maccrd->crd_klen / 8);
 | |
| +		}
 | |
| +
 | |
| +		if (maccrd->crd_alg == CRYPTO_MD5_HMAC) {
 | |
| +			cmd0 |= SAFE_SA_CMD0_MD5;
 | |
| +			cmd1 |= SAFE_SA_CMD1_HMAC;	/* NB: enable HMAC */
 | |
| +		} else if (maccrd->crd_alg == CRYPTO_SHA1_HMAC) {
 | |
| +			cmd0 |= SAFE_SA_CMD0_SHA1;
 | |
| +			cmd1 |= SAFE_SA_CMD1_HMAC;	/* NB: enable HMAC */
 | |
| +		} else {
 | |
| +			cmd0 |= SAFE_SA_CMD0_HASH_NULL;
 | |
| +		}
 | |
| +		/*
 | |
| +		 * Digest data is loaded from the SA and the hash
 | |
| +		 * result is saved to the state block where we
 | |
| +		 * retrieve it for return to the caller.
 | |
| +		 */
 | |
| +		/* XXX assert digest bufs have the same size */
 | |
| +		bcopy(ses->ses_hminner, sa->sa_indigest,
 | |
| +			sizeof(sa->sa_indigest));
 | |
| +		bcopy(ses->ses_hmouter, sa->sa_outdigest,
 | |
| +			sizeof(sa->sa_outdigest));
 | |
| +
 | |
| +		cmd0 |= SAFE_SA_CMD0_HSLD_SA | SAFE_SA_CMD0_SAVEHASH;
 | |
| +		re->re_flags |= SAFE_QFLAGS_COPYOUTICV;
 | |
| +	}
 | |
| +
 | |
| +	if (enccrd && maccrd) {
 | |
| +		/*
 | |
| +		 * The offset from hash data to the start of
 | |
| +		 * crypt data is the difference in the skips.
 | |
| +		 */
 | |
| +		bypass = maccrd->crd_skip;
 | |
| +		coffset = enccrd->crd_skip - maccrd->crd_skip;
 | |
| +		if (coffset < 0) {
 | |
| +			DPRINTF(("%s: hash does not precede crypt; "
 | |
| +				"mac skip %u enc skip %u\n",
 | |
| +				__func__, maccrd->crd_skip, enccrd->crd_skip));
 | |
| +			safestats.st_skipmismatch++;
 | |
| +			err = EINVAL;
 | |
| +			goto errout;
 | |
| +		}
 | |
| +		oplen = enccrd->crd_skip + enccrd->crd_len;
 | |
| +		if (maccrd->crd_skip + maccrd->crd_len != oplen) {
 | |
| +			DPRINTF(("%s: hash amount %u != crypt amount %u\n",
 | |
| +				__func__, maccrd->crd_skip + maccrd->crd_len,
 | |
| +				oplen));
 | |
| +			safestats.st_lenmismatch++;
 | |
| +			err = EINVAL;
 | |
| +			goto errout;
 | |
| +		}
 | |
| +#ifdef SAFE_DEBUG
 | |
| +		if (debug) {
 | |
| +			printf("mac: skip %d, len %d, inject %d\n",
 | |
| +			    maccrd->crd_skip, maccrd->crd_len,
 | |
| +			    maccrd->crd_inject);
 | |
| +			printf("enc: skip %d, len %d, inject %d\n",
 | |
| +			    enccrd->crd_skip, enccrd->crd_len,
 | |
| +			    enccrd->crd_inject);
 | |
| +			printf("bypass %d coffset %d oplen %d\n",
 | |
| +				bypass, coffset, oplen);
 | |
| +		}
 | |
| +#endif
 | |
| +		if (coffset & 3) {	/* offset must be 32-bit aligned */
 | |
| +			DPRINTF(("%s: coffset %u misaligned\n",
 | |
| +				__func__, coffset));
 | |
| +			safestats.st_coffmisaligned++;
 | |
| +			err = EINVAL;
 | |
| +			goto errout;
 | |
| +		}
 | |
| +		coffset >>= 2;
 | |
| +		if (coffset > 255) {	/* offset must be <256 dwords */
 | |
| +			DPRINTF(("%s: coffset %u too big\n",
 | |
| +				__func__, coffset));
 | |
| +			safestats.st_cofftoobig++;
 | |
| +			err = EINVAL;
 | |
| +			goto errout;
 | |
| +		}
 | |
| +		/*
 | |
| +		 * Tell the hardware to copy the header to the output.
 | |
| +		 * The header is defined as the data from the end of
 | |
| +		 * the bypass to the start of data to be encrypted.
 | |
| +		 * Typically this is the inline IV.  Note that you need
 | |
| +		 * to do this even if src+dst are the same; it appears
 | |
| +		 * that w/o this bit the crypted data is written
 | |
| +		 * immediately after the bypass data.
 | |
| +		 */
 | |
| +		cmd1 |= SAFE_SA_CMD1_HDRCOPY;
 | |
| +		/*
 | |
| +		 * Disable IP header mutable bit handling.  This is
 | |
| +		 * needed to get correct HMAC calculations.
 | |
| +		 */
 | |
| +		cmd1 |= SAFE_SA_CMD1_MUTABLE;
 | |
| +	} else {
 | |
| +		if (enccrd) {
 | |
| +			bypass = enccrd->crd_skip;
 | |
| +			oplen = bypass + enccrd->crd_len;
 | |
| +		} else {
 | |
| +			bypass = maccrd->crd_skip;
 | |
| +			oplen = bypass + maccrd->crd_len;
 | |
| +		}
 | |
| +		coffset = 0;
 | |
| +	}
 | |
| +	/* XXX verify multiple of 4 when using s/g */
 | |
| +	if (bypass > 96) {		/* bypass offset must be <= 96 bytes */
 | |
| +		DPRINTF(("%s: bypass %u too big\n", __func__, bypass));
 | |
| +		safestats.st_bypasstoobig++;
 | |
| +		err = EINVAL;
 | |
| +		goto errout;
 | |
| +	}
 | |
| +
 | |
| +	if (crp->crp_flags & CRYPTO_F_SKBUF) {
 | |
| +		if (pci_map_skb(sc, &re->re_src, re->re_src_skb)) {
 | |
| +			safestats.st_noload++;
 | |
| +			err = ENOMEM;
 | |
| +			goto errout;
 | |
| +		}
 | |
| +	} else if (crp->crp_flags & CRYPTO_F_IOV) {
 | |
| +		if (pci_map_uio(sc, &re->re_src, re->re_src_io)) {
 | |
| +			safestats.st_noload++;
 | |
| +			err = ENOMEM;
 | |
| +			goto errout;
 | |
| +		}
 | |
| +	}
 | |
| +	nicealign = safe_dmamap_aligned(sc, &re->re_src);
 | |
| +	uniform = safe_dmamap_uniform(sc, &re->re_src);
 | |
| +
 | |
| +	DPRINTF(("src nicealign %u uniform %u nsegs %u\n",
 | |
| +		nicealign, uniform, re->re_src.nsegs));
 | |
| +	if (re->re_src.nsegs > 1) {
 | |
| +		re->re_desc.d_src = sc->sc_spalloc.dma_paddr +
 | |
| +			((caddr_t) sc->sc_spfree - (caddr_t) sc->sc_spring);
 | |
| +		for (i = 0; i < re->re_src_nsegs; i++) {
 | |
| +			/* NB: no need to check if there's space */
 | |
| +			pd = sc->sc_spfree;
 | |
| +			if (++(sc->sc_spfree) == sc->sc_springtop)
 | |
| +				sc->sc_spfree = sc->sc_spring;
 | |
| +
 | |
| +			KASSERT((pd->pd_flags&3) == 0 ||
 | |
| +				(pd->pd_flags&3) == SAFE_PD_DONE,
 | |
| +				("bogus source particle descriptor; flags %x",
 | |
| +				pd->pd_flags));
 | |
| +			pd->pd_addr = re->re_src_segs[i].ds_addr;
 | |
| +			pd->pd_size = re->re_src_segs[i].ds_len;
 | |
| +			pd->pd_flags = SAFE_PD_READY;
 | |
| +		}
 | |
| +		cmd0 |= SAFE_SA_CMD0_IGATHER;
 | |
| +	} else {
 | |
| +		/*
 | |
| +		 * No need for gather, reference the operand directly.
 | |
| +		 */
 | |
| +		re->re_desc.d_src = re->re_src_segs[0].ds_addr;
 | |
| +	}
 | |
| +
 | |
| +	if (enccrd == NULL && maccrd != NULL) {
 | |
| +		/*
 | |
| +		 * Hash op; no destination needed.
 | |
| +		 */
 | |
| +	} else {
 | |
| +		if (crp->crp_flags & (CRYPTO_F_IOV|CRYPTO_F_SKBUF)) {
 | |
| +			if (!nicealign) {
 | |
| +				safestats.st_iovmisaligned++;
 | |
| +				err = EINVAL;
 | |
| +				goto errout;
 | |
| +			}
 | |
| +			if (uniform != 1) {
 | |
| +				device_printf(sc->sc_dev, "!uniform source\n");
 | |
| +				if (!uniform) {
 | |
| +					/*
 | |
| +					 * There's no way to handle the DMA
 | |
| +					 * requirements with this uio.  We
 | |
| +					 * could create a separate DMA area for
 | |
| +					 * the result and then copy it back,
 | |
| +					 * but for now we just bail and return
 | |
| +					 * an error.  Note that uio requests
 | |
| +					 * > SAFE_MAX_DSIZE are handled because
 | |
| +					 * the DMA map and segment list for the
 | |
| +					 * destination wil result in a
 | |
| +					 * destination particle list that does
 | |
| +					 * the necessary scatter DMA.
 | |
| +					 */
 | |
| +					safestats.st_iovnotuniform++;
 | |
| +					err = EINVAL;
 | |
| +					goto errout;
 | |
| +				}
 | |
| +			} else
 | |
| +				re->re_dst = re->re_src;
 | |
| +		} else {
 | |
| +			safestats.st_badflags++;
 | |
| +			err = EINVAL;
 | |
| +			goto errout;
 | |
| +		}
 | |
| +
 | |
| +		if (re->re_dst.nsegs > 1) {
 | |
| +			re->re_desc.d_dst = sc->sc_dpalloc.dma_paddr +
 | |
| +			    ((caddr_t) sc->sc_dpfree - (caddr_t) sc->sc_dpring);
 | |
| +			for (i = 0; i < re->re_dst_nsegs; i++) {
 | |
| +				pd = sc->sc_dpfree;
 | |
| +				KASSERT((pd->pd_flags&3) == 0 ||
 | |
| +					(pd->pd_flags&3) == SAFE_PD_DONE,
 | |
| +					("bogus dest particle descriptor; flags %x",
 | |
| +						pd->pd_flags));
 | |
| +				if (++(sc->sc_dpfree) == sc->sc_dpringtop)
 | |
| +					sc->sc_dpfree = sc->sc_dpring;
 | |
| +				pd->pd_addr = re->re_dst_segs[i].ds_addr;
 | |
| +				pd->pd_flags = SAFE_PD_READY;
 | |
| +			}
 | |
| +			cmd0 |= SAFE_SA_CMD0_OSCATTER;
 | |
| +		} else {
 | |
| +			/*
 | |
| +			 * No need for scatter, reference the operand directly.
 | |
| +			 */
 | |
| +			re->re_desc.d_dst = re->re_dst_segs[0].ds_addr;
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	/*
 | |
| +	 * All done with setup; fillin the SA command words
 | |
| +	 * and the packet engine descriptor.  The operation
 | |
| +	 * is now ready for submission to the hardware.
 | |
| +	 */
 | |
| +	sa->sa_cmd0 = cmd0 | SAFE_SA_CMD0_IPCI | SAFE_SA_CMD0_OPCI;
 | |
| +	sa->sa_cmd1 = cmd1
 | |
| +		    | (coffset << SAFE_SA_CMD1_OFFSET_S)
 | |
| +		    | SAFE_SA_CMD1_SAREV1	/* Rev 1 SA data structure */
 | |
| +		    | SAFE_SA_CMD1_SRPCI
 | |
| +		    ;
 | |
| +	/*
 | |
| +	 * NB: the order of writes is important here.  In case the
 | |
| +	 * chip is scanning the ring because of an outstanding request
 | |
| +	 * it might nab this one too.  In that case we need to make
 | |
| +	 * sure the setup is complete before we write the length
 | |
| +	 * field of the descriptor as it signals the descriptor is
 | |
| +	 * ready for processing.
 | |
| +	 */
 | |
| +	re->re_desc.d_csr = SAFE_PE_CSR_READY | SAFE_PE_CSR_SAPCI;
 | |
| +	if (maccrd)
 | |
| +		re->re_desc.d_csr |= SAFE_PE_CSR_LOADSA | SAFE_PE_CSR_HASHFINAL;
 | |
| +	wmb();
 | |
| +	re->re_desc.d_len = oplen
 | |
| +			  | SAFE_PE_LEN_READY
 | |
| +			  | (bypass << SAFE_PE_LEN_BYPASS_S)
 | |
| +			  ;
 | |
| +
 | |
| +	safestats.st_ipackets++;
 | |
| +	safestats.st_ibytes += oplen;
 | |
| +
 | |
| +	if (++(sc->sc_front) == sc->sc_ringtop)
 | |
| +		sc->sc_front = sc->sc_ring;
 | |
| +
 | |
| +	/* XXX honor batching */
 | |
| +	safe_feed(sc, re);
 | |
| +	spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
 | |
| +	return (0);
 | |
| +
 | |
| +errout:
 | |
| +	if (re->re_src.map != re->re_dst.map)
 | |
| +		pci_unmap_operand(sc, &re->re_dst);
 | |
| +	if (re->re_src.map)
 | |
| +		pci_unmap_operand(sc, &re->re_src);
 | |
| +	spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
 | |
| +	if (err != ERESTART) {
 | |
| +		crp->crp_etype = err;
 | |
| +		crypto_done(crp);
 | |
| +	} else {
 | |
| +		sc->sc_needwakeup |= CRYPTO_SYMQ;
 | |
| +	}
 | |
| +	return (err);
 | |
| +}
 | |
| +
 | |
| +static void
 | |
| +safe_callback(struct safe_softc *sc, struct safe_ringentry *re)
 | |
| +{
 | |
| +	struct cryptop *crp = (struct cryptop *)re->re_crp;
 | |
| +	struct cryptodesc *crd;
 | |
| +
 | |
| +	DPRINTF(("%s()\n", __FUNCTION__));
 | |
| +
 | |
| +	safestats.st_opackets++;
 | |
| +	safestats.st_obytes += re->re_dst.mapsize;
 | |
| +
 | |
| +	if (re->re_desc.d_csr & SAFE_PE_CSR_STATUS) {
 | |
| +		device_printf(sc->sc_dev, "csr 0x%x cmd0 0x%x cmd1 0x%x\n",
 | |
| +			re->re_desc.d_csr,
 | |
| +			re->re_sa.sa_cmd0, re->re_sa.sa_cmd1);
 | |
| +		safestats.st_peoperr++;
 | |
| +		crp->crp_etype = EIO;		/* something more meaningful? */
 | |
| +	}
 | |
| +
 | |
| +	if (re->re_dst.map != NULL && re->re_dst.map != re->re_src.map)
 | |
| +		pci_unmap_operand(sc, &re->re_dst);
 | |
| +	pci_unmap_operand(sc, &re->re_src);
 | |
| +
 | |
| +	/*
 | |
| +	 * If result was written to a differet mbuf chain, swap
 | |
| +	 * it in as the return value and reclaim the original.
 | |
| +	 */
 | |
| +	if ((crp->crp_flags & CRYPTO_F_SKBUF) && re->re_src_skb != re->re_dst_skb) {
 | |
| +		device_printf(sc->sc_dev, "no CRYPTO_F_SKBUF swapping support\n");
 | |
| +		/* kfree_skb(skb) */
 | |
| +		/* crp->crp_buf = (caddr_t)re->re_dst_skb */
 | |
| +		return;
 | |
| +	}
 | |
| +
 | |
| +	if (re->re_flags & SAFE_QFLAGS_COPYOUTIV) {
 | |
| +		/* copy out IV for future use */
 | |
| +		for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
 | |
| +			int i;
 | |
| +			int ivsize;
 | |
| +
 | |
| +			if (crd->crd_alg == CRYPTO_DES_CBC ||
 | |
| +			    crd->crd_alg == CRYPTO_3DES_CBC) {
 | |
| +				ivsize = 2*sizeof(u_int32_t);
 | |
| +			} else if (crd->crd_alg == CRYPTO_AES_CBC) {
 | |
| +				ivsize = 4*sizeof(u_int32_t);
 | |
| +			} else
 | |
| +				continue;
 | |
| +			crypto_copydata(crp->crp_flags, crp->crp_buf,
 | |
| +			    crd->crd_skip + crd->crd_len - ivsize, ivsize,
 | |
| +			    (caddr_t)sc->sc_sessions[re->re_sesn].ses_iv);
 | |
| +			for (i = 0;
 | |
| +					i < ivsize/sizeof(sc->sc_sessions[re->re_sesn].ses_iv[0]);
 | |
| +					i++)
 | |
| +				sc->sc_sessions[re->re_sesn].ses_iv[i] =
 | |
| +					cpu_to_le32(sc->sc_sessions[re->re_sesn].ses_iv[i]);
 | |
| +			break;
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	if (re->re_flags & SAFE_QFLAGS_COPYOUTICV) {
 | |
| +		/* copy out ICV result */
 | |
| +		for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
 | |
| +			if (!(crd->crd_alg == CRYPTO_MD5_HMAC ||
 | |
| +			    crd->crd_alg == CRYPTO_SHA1_HMAC ||
 | |
| +			    crd->crd_alg == CRYPTO_NULL_HMAC))
 | |
| +				continue;
 | |
| +			if (crd->crd_alg == CRYPTO_SHA1_HMAC) {
 | |
| +				/*
 | |
| +				 * SHA-1 ICV's are byte-swapped; fix 'em up
 | |
| +				 * before copy them to their destination.
 | |
| +				 */
 | |
| +				re->re_sastate.sa_saved_indigest[0] =
 | |
| +					cpu_to_be32(re->re_sastate.sa_saved_indigest[0]);
 | |
| +				re->re_sastate.sa_saved_indigest[1] =
 | |
| +					cpu_to_be32(re->re_sastate.sa_saved_indigest[1]);
 | |
| +				re->re_sastate.sa_saved_indigest[2] =
 | |
| +					cpu_to_be32(re->re_sastate.sa_saved_indigest[2]);
 | |
| +			} else {
 | |
| +				re->re_sastate.sa_saved_indigest[0] =
 | |
| +					cpu_to_le32(re->re_sastate.sa_saved_indigest[0]);
 | |
| +				re->re_sastate.sa_saved_indigest[1] =
 | |
| +					cpu_to_le32(re->re_sastate.sa_saved_indigest[1]);
 | |
| +				re->re_sastate.sa_saved_indigest[2] =
 | |
| +					cpu_to_le32(re->re_sastate.sa_saved_indigest[2]);
 | |
| +			}
 | |
| +			crypto_copyback(crp->crp_flags, crp->crp_buf,
 | |
| +			    crd->crd_inject,
 | |
| +			    sc->sc_sessions[re->re_sesn].ses_mlen,
 | |
| +			    (caddr_t)re->re_sastate.sa_saved_indigest);
 | |
| +			break;
 | |
| +		}
 | |
| +	}
 | |
| +	crypto_done(crp);
 | |
| +}
 | |
| +
 | |
| +
 | |
| +#if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
 | |
| +#define	SAFE_RNG_MAXWAIT	1000
 | |
| +
 | |
| +static void
 | |
| +safe_rng_init(struct safe_softc *sc)
 | |
| +{
 | |
| +	u_int32_t w, v;
 | |
| +	int i;
 | |
| +
 | |
| +	DPRINTF(("%s()\n", __FUNCTION__));
 | |
| +
 | |
| +	WRITE_REG(sc, SAFE_RNG_CTRL, 0);
 | |
| +	/* use default value according to the manual */
 | |
| +	WRITE_REG(sc, SAFE_RNG_CNFG, 0x834);	/* magic from SafeNet */
 | |
| +	WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
 | |
| +
 | |
| +	/*
 | |
| +	 * There is a bug in rev 1.0 of the 1140 that when the RNG
 | |
| +	 * is brought out of reset the ready status flag does not
 | |
| +	 * work until the RNG has finished its internal initialization.
 | |
| +	 *
 | |
| +	 * So in order to determine the device is through its
 | |
| +	 * initialization we must read the data register, using the
 | |
| +	 * status reg in the read in case it is initialized.  Then read
 | |
| +	 * the data register until it changes from the first read.
 | |
| +	 * Once it changes read the data register until it changes
 | |
| +	 * again.  At this time the RNG is considered initialized.
 | |
| +	 * This could take between 750ms - 1000ms in time.
 | |
| +	 */
 | |
| +	i = 0;
 | |
| +	w = READ_REG(sc, SAFE_RNG_OUT);
 | |
| +	do {
 | |
| +		v = READ_REG(sc, SAFE_RNG_OUT);
 | |
| +		if (v != w) {
 | |
| +			w = v;
 | |
| +			break;
 | |
| +		}
 | |
| +		DELAY(10);
 | |
| +	} while (++i < SAFE_RNG_MAXWAIT);
 | |
| +
 | |
| +	/* Wait Until data changes again */
 | |
| +	i = 0;
 | |
| +	do {
 | |
| +		v = READ_REG(sc, SAFE_RNG_OUT);
 | |
| +		if (v != w)
 | |
| +			break;
 | |
| +		DELAY(10);
 | |
| +	} while (++i < SAFE_RNG_MAXWAIT);
 | |
| +}
 | |
| +
 | |
| +static __inline void
 | |
| +safe_rng_disable_short_cycle(struct safe_softc *sc)
 | |
| +{
 | |
| +	DPRINTF(("%s()\n", __FUNCTION__));
 | |
| +
 | |
| +	WRITE_REG(sc, SAFE_RNG_CTRL,
 | |
| +		READ_REG(sc, SAFE_RNG_CTRL) &~ SAFE_RNG_CTRL_SHORTEN);
 | |
| +}
 | |
| +
 | |
| +static __inline void
 | |
| +safe_rng_enable_short_cycle(struct safe_softc *sc)
 | |
| +{
 | |
| +	DPRINTF(("%s()\n", __FUNCTION__));
 | |
| +
 | |
| +	WRITE_REG(sc, SAFE_RNG_CTRL,
 | |
| +		READ_REG(sc, SAFE_RNG_CTRL) | SAFE_RNG_CTRL_SHORTEN);
 | |
| +}
 | |
| +
 | |
| +static __inline u_int32_t
 | |
| +safe_rng_read(struct safe_softc *sc)
 | |
| +{
 | |
| +	int i;
 | |
| +
 | |
| +	i = 0;
 | |
| +	while (READ_REG(sc, SAFE_RNG_STAT) != 0 && ++i < SAFE_RNG_MAXWAIT)
 | |
| +		;
 | |
| +	return READ_REG(sc, SAFE_RNG_OUT);
 | |
| +}
 | |
| +
 | |
| +static int
 | |
| +safe_read_random(void *arg, u_int32_t *buf, int maxwords)
 | |
| +{
 | |
| +	struct safe_softc *sc = (struct safe_softc *) arg;
 | |
| +	int i, rc;
 | |
| +
 | |
| +	DPRINTF(("%s()\n", __FUNCTION__));
 | |
| +
 | |
| +	safestats.st_rng++;
 | |
| +	/*
 | |
| +	 * Fetch the next block of data.
 | |
| +	 */
 | |
| +	if (maxwords > safe_rngbufsize)
 | |
| +		maxwords = safe_rngbufsize;
 | |
| +	if (maxwords > SAFE_RNG_MAXBUFSIZ)
 | |
| +		maxwords = SAFE_RNG_MAXBUFSIZ;
 | |
| +retry:
 | |
| +	/* read as much as we can */
 | |
| +	for (rc = 0; rc < maxwords; rc++) {
 | |
| +		if (READ_REG(sc, SAFE_RNG_STAT) != 0)
 | |
| +			break;
 | |
| +		buf[rc] = READ_REG(sc, SAFE_RNG_OUT);
 | |
| +	}
 | |
| +	if (rc == 0)
 | |
| +		return 0;
 | |
| +	/*
 | |
| +	 * Check the comparator alarm count and reset the h/w if
 | |
| +	 * it exceeds our threshold.  This guards against the
 | |
| +	 * hardware oscillators resonating with external signals.
 | |
| +	 */
 | |
| +	if (READ_REG(sc, SAFE_RNG_ALM_CNT) > safe_rngmaxalarm) {
 | |
| +		u_int32_t freq_inc, w;
 | |
| +
 | |
| +		DPRINTF(("%s: alarm count %u exceeds threshold %u\n", __func__,
 | |
| +			(unsigned)READ_REG(sc, SAFE_RNG_ALM_CNT), safe_rngmaxalarm));
 | |
| +		safestats.st_rngalarm++;
 | |
| +		safe_rng_enable_short_cycle(sc);
 | |
| +		freq_inc = 18;
 | |
| +		for (i = 0; i < 64; i++) {
 | |
| +			w = READ_REG(sc, SAFE_RNG_CNFG);
 | |
| +			freq_inc = ((w + freq_inc) & 0x3fL);
 | |
| +			w = ((w & ~0x3fL) | freq_inc);
 | |
| +			WRITE_REG(sc, SAFE_RNG_CNFG, w);
 | |
| +
 | |
| +			WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
 | |
| +
 | |
| +			(void) safe_rng_read(sc);
 | |
| +			DELAY(25);
 | |
| +
 | |
| +			if (READ_REG(sc, SAFE_RNG_ALM_CNT) == 0) {
 | |
| +				safe_rng_disable_short_cycle(sc);
 | |
| +				goto retry;
 | |
| +			}
 | |
| +			freq_inc = 1;
 | |
| +		}
 | |
| +		safe_rng_disable_short_cycle(sc);
 | |
| +	} else
 | |
| +		WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
 | |
| +
 | |
| +	return(rc);
 | |
| +}
 | |
| +#endif /* defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG) */
 | |
| +
 | |
| +
 | |
| +/*
 | |
| + * Resets the board.  Values in the regesters are left as is
 | |
| + * from the reset (i.e. initial values are assigned elsewhere).
 | |
| + */
 | |
| +static void
 | |
| +safe_reset_board(struct safe_softc *sc)
 | |
| +{
 | |
| +	u_int32_t v;
 | |
| +	/*
 | |
| +	 * Reset the device.  The manual says no delay
 | |
| +	 * is needed between marking and clearing reset.
 | |
| +	 */
 | |
| +	DPRINTF(("%s()\n", __FUNCTION__));
 | |
| +
 | |
| +	v = READ_REG(sc, SAFE_PE_DMACFG) &~
 | |
| +		(SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET |
 | |
| +		 SAFE_PE_DMACFG_SGRESET);
 | |
| +	WRITE_REG(sc, SAFE_PE_DMACFG, v
 | |
| +				    | SAFE_PE_DMACFG_PERESET
 | |
| +				    | SAFE_PE_DMACFG_PDRRESET
 | |
| +				    | SAFE_PE_DMACFG_SGRESET);
 | |
| +	WRITE_REG(sc, SAFE_PE_DMACFG, v);
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Initialize registers we need to touch only once.
 | |
| + */
 | |
| +static void
 | |
| +safe_init_board(struct safe_softc *sc)
 | |
| +{
 | |
| +	u_int32_t v, dwords;
 | |
| +
 | |
| +	DPRINTF(("%s()\n", __FUNCTION__));
 | |
| +
 | |
| +	v = READ_REG(sc, SAFE_PE_DMACFG);
 | |
| +	v &=~ (   SAFE_PE_DMACFG_PEMODE
 | |
| +			| SAFE_PE_DMACFG_FSENA		/* failsafe enable */
 | |
| +			| SAFE_PE_DMACFG_GPRPCI		/* gather ring on PCI */
 | |
| +			| SAFE_PE_DMACFG_SPRPCI		/* scatter ring on PCI */
 | |
| +			| SAFE_PE_DMACFG_ESDESC		/* endian-swap descriptors */
 | |
| +			| SAFE_PE_DMACFG_ESPDESC	/* endian-swap part. desc's */
 | |
| +			| SAFE_PE_DMACFG_ESSA		/* endian-swap SA's */
 | |
| +			| SAFE_PE_DMACFG_ESPACKET	/* swap the packet data */
 | |
| +		  );
 | |
| +	v |= SAFE_PE_DMACFG_FSENA		/* failsafe enable */
 | |
| +	  |  SAFE_PE_DMACFG_GPRPCI		/* gather ring on PCI */
 | |
| +	  |  SAFE_PE_DMACFG_SPRPCI		/* scatter ring on PCI */
 | |
| +	  |  SAFE_PE_DMACFG_ESDESC		/* endian-swap descriptors */
 | |
| +	  |  SAFE_PE_DMACFG_ESPDESC		/* endian-swap part. desc's */
 | |
| +	  |  SAFE_PE_DMACFG_ESSA		/* endian-swap SA's */
 | |
| +#if 0
 | |
| +	  |  SAFE_PE_DMACFG_ESPACKET    /* swap the packet data */
 | |
| +#endif
 | |
| +	  ;
 | |
| +	WRITE_REG(sc, SAFE_PE_DMACFG, v);
 | |
| +
 | |
| +#ifdef __BIG_ENDIAN
 | |
| +	/* tell the safenet that we are 4321 and not 1234 */
 | |
| +	WRITE_REG(sc, SAFE_ENDIAN, 0xe4e41b1b);
 | |
| +#endif
 | |
| +
 | |
| +	if (sc->sc_chiprev == SAFE_REV(1,0)) {
 | |
| +		/*
 | |
| +		 * Avoid large PCI DMA transfers.  Rev 1.0 has a bug where
 | |
| +		 * "target mode transfers" done while the chip is DMA'ing
 | |
| +		 * >1020 bytes cause the hardware to lockup.  To avoid this
 | |
| +		 * we reduce the max PCI transfer size and use small source
 | |
| +		 * particle descriptors (<= 256 bytes).
 | |
| +		 */
 | |
| +		WRITE_REG(sc, SAFE_DMA_CFG, 256);
 | |
| +		device_printf(sc->sc_dev,
 | |
| +			"Reduce max DMA size to %u words for rev %u.%u WAR\n",
 | |
| +			(unsigned) ((READ_REG(sc, SAFE_DMA_CFG)>>2) & 0xff),
 | |
| +			(unsigned) SAFE_REV_MAJ(sc->sc_chiprev),
 | |
| +			(unsigned) SAFE_REV_MIN(sc->sc_chiprev));
 | |
| +		sc->sc_max_dsize = 256;
 | |
| +	} else {
 | |
| +		sc->sc_max_dsize = SAFE_MAX_DSIZE;
 | |
| +	}
 | |
| +
 | |
| +	/* NB: operands+results are overlaid */
 | |
| +	WRITE_REG(sc, SAFE_PE_PDRBASE, sc->sc_ringalloc.dma_paddr);
 | |
| +	WRITE_REG(sc, SAFE_PE_RDRBASE, sc->sc_ringalloc.dma_paddr);
 | |
| +	/*
 | |
| +	 * Configure ring entry size and number of items in the ring.
 | |
| +	 */
 | |
| +	KASSERT((sizeof(struct safe_ringentry) % sizeof(u_int32_t)) == 0,
 | |
| +		("PE ring entry not 32-bit aligned!"));
 | |
| +	dwords = sizeof(struct safe_ringentry) / sizeof(u_int32_t);
 | |
| +	WRITE_REG(sc, SAFE_PE_RINGCFG,
 | |
| +		(dwords << SAFE_PE_RINGCFG_OFFSET_S) | SAFE_MAX_NQUEUE);
 | |
| +	WRITE_REG(sc, SAFE_PE_RINGPOLL, 0);	/* disable polling */
 | |
| +
 | |
| +	WRITE_REG(sc, SAFE_PE_GRNGBASE, sc->sc_spalloc.dma_paddr);
 | |
| +	WRITE_REG(sc, SAFE_PE_SRNGBASE, sc->sc_dpalloc.dma_paddr);
 | |
| +	WRITE_REG(sc, SAFE_PE_PARTSIZE,
 | |
| +		(SAFE_TOTAL_DPART<<16) | SAFE_TOTAL_SPART);
 | |
| +	/*
 | |
| +	 * NB: destination particles are fixed size.  We use
 | |
| +	 *     an mbuf cluster and require all results go to
 | |
| +	 *     clusters or smaller.
 | |
| +	 */
 | |
| +	WRITE_REG(sc, SAFE_PE_PARTCFG, sc->sc_max_dsize);
 | |
| +
 | |
| +	/* it's now safe to enable PE mode, do it */
 | |
| +	WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PEMODE);
 | |
| +
 | |
| +	/*
 | |
| +	 * Configure hardware to use level-triggered interrupts and
 | |
| +	 * to interrupt after each descriptor is processed.
 | |
| +	 */
 | |
| +	WRITE_REG(sc, SAFE_HI_CFG, SAFE_HI_CFG_LEVEL);
 | |
| +	WRITE_REG(sc, SAFE_HI_CLR, 0xffffffff);
 | |
| +	WRITE_REG(sc, SAFE_HI_DESC_CNT, 1);
 | |
| +	WRITE_REG(sc, SAFE_HI_MASK, SAFE_INT_PE_DDONE | SAFE_INT_PE_ERROR);
 | |
| +}
 | |
| +
 | |
| +
 | |
| +/*
 | |
| + * Clean up after a chip crash.
 | |
| + * It is assumed that the caller in splimp()
 | |
| + */
 | |
| +static void
 | |
| +safe_cleanchip(struct safe_softc *sc)
 | |
| +{
 | |
| +	DPRINTF(("%s()\n", __FUNCTION__));
 | |
| +
 | |
| +	if (sc->sc_nqchip != 0) {
 | |
| +		struct safe_ringentry *re = sc->sc_back;
 | |
| +
 | |
| +		while (re != sc->sc_front) {
 | |
| +			if (re->re_desc.d_csr != 0)
 | |
| +				safe_free_entry(sc, re);
 | |
| +			if (++re == sc->sc_ringtop)
 | |
| +				re = sc->sc_ring;
 | |
| +		}
 | |
| +		sc->sc_back = re;
 | |
| +		sc->sc_nqchip = 0;
 | |
| +	}
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * free a safe_q
 | |
| + * It is assumed that the caller is within splimp().
 | |
| + */
 | |
| +static int
 | |
| +safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re)
 | |
| +{
 | |
| +	struct cryptop *crp;
 | |
| +
 | |
| +	DPRINTF(("%s()\n", __FUNCTION__));
 | |
| +
 | |
| +	/*
 | |
| +	 * Free header MCR
 | |
| +	 */
 | |
| +	if ((re->re_dst_skb != NULL) && (re->re_src_skb != re->re_dst_skb))
 | |
| +#ifdef NOTYET
 | |
| +		m_freem(re->re_dst_m);
 | |
| +#else
 | |
| +		printk("%s,%d: SKB not supported\n", __FILE__, __LINE__);
 | |
| +#endif
 | |
| +
 | |
| +	crp = (struct cryptop *)re->re_crp;
 | |
| +
 | |
| +	re->re_desc.d_csr = 0;
 | |
| +
 | |
| +	crp->crp_etype = EFAULT;
 | |
| +	crypto_done(crp);
 | |
| +	return(0);
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Routine to reset the chip and clean up.
 | |
| + * It is assumed that the caller is in splimp()
 | |
| + */
 | |
| +static void
 | |
| +safe_totalreset(struct safe_softc *sc)
 | |
| +{
 | |
| +	DPRINTF(("%s()\n", __FUNCTION__));
 | |
| +
 | |
| +	safe_reset_board(sc);
 | |
| +	safe_init_board(sc);
 | |
| +	safe_cleanchip(sc);
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Is the operand suitable aligned for direct DMA.  Each
 | |
| + * segment must be aligned on a 32-bit boundary and all
 | |
| + * but the last segment must be a multiple of 4 bytes.
 | |
| + */
 | |
| +static int
 | |
| +safe_dmamap_aligned(struct safe_softc *sc, const struct safe_operand *op)
 | |
| +{
 | |
| +	int i;
 | |
| +
 | |
| +	DPRINTF(("%s()\n", __FUNCTION__));
 | |
| +
 | |
| +	for (i = 0; i < op->nsegs; i++) {
 | |
| +		if (op->segs[i].ds_addr & 3)
 | |
| +			return (0);
 | |
| +		if (i != (op->nsegs - 1) && (op->segs[i].ds_len & 3))
 | |
| +			return (0);
 | |
| +	}
 | |
| +	return (1);
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Is the operand suitable for direct DMA as the destination
 | |
| + * of an operation.  The hardware requires that each ``particle''
 | |
| + * but the last in an operation result have the same size.  We
 | |
| + * fix that size at SAFE_MAX_DSIZE bytes.  This routine returns
 | |
| + * 0 if some segment is not a multiple of of this size, 1 if all
 | |
| + * segments are exactly this size, or 2 if segments are at worst
 | |
| + * a multple of this size.
 | |
| + */
 | |
| +static int
 | |
| +safe_dmamap_uniform(struct safe_softc *sc, const struct safe_operand *op)
 | |
| +{
 | |
| +	int result = 1;
 | |
| +
 | |
| +	DPRINTF(("%s()\n", __FUNCTION__));
 | |
| +
 | |
| +	if (op->nsegs > 0) {
 | |
| +		int i;
 | |
| +
 | |
| +		for (i = 0; i < op->nsegs-1; i++) {
 | |
| +			if (op->segs[i].ds_len % sc->sc_max_dsize)
 | |
| +				return (0);
 | |
| +			if (op->segs[i].ds_len != sc->sc_max_dsize)
 | |
| +				result = 2;
 | |
| +		}
 | |
| +	}
 | |
| +	return (result);
 | |
| +}
 | |
| +
 | |
| +static int
 | |
| +safe_kprocess(device_t dev, struct cryptkop *krp, int hint)
 | |
| +{
 | |
| +	struct safe_softc *sc = device_get_softc(dev);
 | |
| +	struct safe_pkq *q;
 | |
| +	unsigned long flags;
 | |
| +
 | |
| +	DPRINTF(("%s()\n", __FUNCTION__));
 | |
| +
 | |
| +	if (sc == NULL) {
 | |
| +		krp->krp_status = EINVAL;
 | |
| +		goto err;
 | |
| +	}
 | |
| +
 | |
| +	if (krp->krp_op != CRK_MOD_EXP) {
 | |
| +		krp->krp_status = EOPNOTSUPP;
 | |
| +		goto err;
 | |
| +	}
 | |
| +
 | |
| +	q = (struct safe_pkq *) kmalloc(sizeof(*q), GFP_KERNEL);
 | |
| +	if (q == NULL) {
 | |
| +		krp->krp_status = ENOMEM;
 | |
| +		goto err;
 | |
| +	}
 | |
| +	memset(q, 0, sizeof(*q));
 | |
| +	q->pkq_krp = krp;
 | |
| +	INIT_LIST_HEAD(&q->pkq_list);
 | |
| +
 | |
| +	spin_lock_irqsave(&sc->sc_pkmtx, flags);
 | |
| +	list_add_tail(&q->pkq_list, &sc->sc_pkq);
 | |
| +	safe_kfeed(sc);
 | |
| +	spin_unlock_irqrestore(&sc->sc_pkmtx, flags);
 | |
| +	return (0);
 | |
| +
 | |
| +err:
 | |
| +	crypto_kdone(krp);
 | |
| +	return (0);
 | |
| +}
 | |
| +
 | |
| +#define	SAFE_CRK_PARAM_BASE	0
 | |
| +#define	SAFE_CRK_PARAM_EXP	1
 | |
| +#define	SAFE_CRK_PARAM_MOD	2
 | |
| +
 | |
| +static int
 | |
| +safe_kstart(struct safe_softc *sc)
 | |
| +{
 | |
| +	struct cryptkop *krp = sc->sc_pkq_cur->pkq_krp;
 | |
| +	int exp_bits, mod_bits, base_bits;
 | |
| +	u_int32_t op, a_off, b_off, c_off, d_off;
 | |
| +
 | |
| +	DPRINTF(("%s()\n", __FUNCTION__));
 | |
| +
 | |
| +	if (krp->krp_iparams < 3 || krp->krp_oparams != 1) {
 | |
| +		krp->krp_status = EINVAL;
 | |
| +		return (1);
 | |
| +	}
 | |
| +
 | |
| +	base_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_BASE]);
 | |
| +	if (base_bits > 2048)
 | |
| +		goto too_big;
 | |
| +	if (base_bits <= 0)		/* 5. base not zero */
 | |
| +		goto too_small;
 | |
| +
 | |
| +	exp_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_EXP]);
 | |
| +	if (exp_bits > 2048)
 | |
| +		goto too_big;
 | |
| +	if (exp_bits <= 0)		/* 1. exponent word length > 0 */
 | |
| +		goto too_small;		/* 4. exponent not zero */
 | |
| +
 | |
| +	mod_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_MOD]);
 | |
| +	if (mod_bits > 2048)
 | |
| +		goto too_big;
 | |
| +	if (mod_bits <= 32)		/* 2. modulus word length > 1 */
 | |
| +		goto too_small;		/* 8. MSW of modulus != zero */
 | |
| +	if (mod_bits < exp_bits)	/* 3 modulus len >= exponent len */
 | |
| +		goto too_small;
 | |
| +	if ((krp->krp_param[SAFE_CRK_PARAM_MOD].crp_p[0] & 1) == 0)
 | |
| +		goto bad_domain;	/* 6. modulus is odd */
 | |
| +	if (mod_bits > krp->krp_param[krp->krp_iparams].crp_nbits)
 | |
| +		goto too_small;		/* make sure result will fit */
 | |
| +
 | |
| +	/* 7. modulus > base */
 | |
| +	if (mod_bits < base_bits)
 | |
| +		goto too_small;
 | |
| +	if (mod_bits == base_bits) {
 | |
| +		u_int8_t *basep, *modp;
 | |
| +		int i;
 | |
| +
 | |
| +		basep = krp->krp_param[SAFE_CRK_PARAM_BASE].crp_p +
 | |
| +		    ((base_bits + 7) / 8) - 1;
 | |
| +		modp = krp->krp_param[SAFE_CRK_PARAM_MOD].crp_p +
 | |
| +		    ((mod_bits + 7) / 8) - 1;
 | |
| +
 | |
| +		for (i = 0; i < (mod_bits + 7) / 8; i++, basep--, modp--) {
 | |
| +			if (*modp < *basep)
 | |
| +				goto too_small;
 | |
| +			if (*modp > *basep)
 | |
| +				break;
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	/* And on the 9th step, he rested. */
 | |
| +
 | |
| +	WRITE_REG(sc, SAFE_PK_A_LEN, (exp_bits + 31) / 32);
 | |
| +	WRITE_REG(sc, SAFE_PK_B_LEN, (mod_bits + 31) / 32);
 | |
| +	if (mod_bits > 1024) {
 | |
| +		op = SAFE_PK_FUNC_EXP4;
 | |
| +		a_off = 0x000;
 | |
| +		b_off = 0x100;
 | |
| +		c_off = 0x200;
 | |
| +		d_off = 0x300;
 | |
| +	} else {
 | |
| +		op = SAFE_PK_FUNC_EXP16;
 | |
| +		a_off = 0x000;
 | |
| +		b_off = 0x080;
 | |
| +		c_off = 0x100;
 | |
| +		d_off = 0x180;
 | |
| +	}
 | |
| +	sc->sc_pk_reslen = b_off - a_off;
 | |
| +	sc->sc_pk_resoff = d_off;
 | |
| +
 | |
| +	/* A is exponent, B is modulus, C is base, D is result */
 | |
| +	safe_kload_reg(sc, a_off, b_off - a_off,
 | |
| +	    &krp->krp_param[SAFE_CRK_PARAM_EXP]);
 | |
| +	WRITE_REG(sc, SAFE_PK_A_ADDR, a_off >> 2);
 | |
| +	safe_kload_reg(sc, b_off, b_off - a_off,
 | |
| +	    &krp->krp_param[SAFE_CRK_PARAM_MOD]);
 | |
| +	WRITE_REG(sc, SAFE_PK_B_ADDR, b_off >> 2);
 | |
| +	safe_kload_reg(sc, c_off, b_off - a_off,
 | |
| +	    &krp->krp_param[SAFE_CRK_PARAM_BASE]);
 | |
| +	WRITE_REG(sc, SAFE_PK_C_ADDR, c_off >> 2);
 | |
| +	WRITE_REG(sc, SAFE_PK_D_ADDR, d_off >> 2);
 | |
| +
 | |
| +	WRITE_REG(sc, SAFE_PK_FUNC, op | SAFE_PK_FUNC_RUN);
 | |
| +
 | |
| +	return (0);
 | |
| +
 | |
| +too_big:
 | |
| +	krp->krp_status = E2BIG;
 | |
| +	return (1);
 | |
| +too_small:
 | |
| +	krp->krp_status = ERANGE;
 | |
| +	return (1);
 | |
| +bad_domain:
 | |
| +	krp->krp_status = EDOM;
 | |
| +	return (1);
 | |
| +}
 | |
| +
 | |
| +static int
 | |
| +safe_ksigbits(struct safe_softc *sc, struct crparam *cr)
 | |
| +{
 | |
| +	u_int plen = (cr->crp_nbits + 7) / 8;
 | |
| +	int i, sig = plen * 8;
 | |
| +	u_int8_t c, *p = cr->crp_p;
 | |
| +
 | |
| +	DPRINTF(("%s()\n", __FUNCTION__));
 | |
| +
 | |
| +	for (i = plen - 1; i >= 0; i--) {
 | |
| +		c = p[i];
 | |
| +		if (c != 0) {
 | |
| +			while ((c & 0x80) == 0) {
 | |
| +				sig--;
 | |
| +				c <<= 1;
 | |
| +			}
 | |
| +			break;
 | |
| +		}
 | |
| +		sig -= 8;
 | |
| +	}
 | |
| +	return (sig);
 | |
| +}
 | |
| +
 | |
| +static void
 | |
| +safe_kfeed(struct safe_softc *sc)
 | |
| +{
 | |
| +	struct safe_pkq *q, *tmp;
 | |
| +
 | |
| +	DPRINTF(("%s()\n", __FUNCTION__));
 | |
| +
 | |
| +	if (list_empty(&sc->sc_pkq) && sc->sc_pkq_cur == NULL)
 | |
| +		return;
 | |
| +	if (sc->sc_pkq_cur != NULL)
 | |
| +		return;
 | |
| +	list_for_each_entry_safe(q, tmp, &sc->sc_pkq, pkq_list) {
 | |
| +		sc->sc_pkq_cur = q;
 | |
| +		list_del(&q->pkq_list);
 | |
| +		if (safe_kstart(sc) != 0) {
 | |
| +			crypto_kdone(q->pkq_krp);
 | |
| +			kfree(q);
 | |
| +			sc->sc_pkq_cur = NULL;
 | |
| +		} else {
 | |
| +			/* op started, start polling */
 | |
| +			mod_timer(&sc->sc_pkto, jiffies + 1);
 | |
| +			break;
 | |
| +		}
 | |
| +	}
 | |
| +}
 | |
| +
 | |
| +static void
 | |
| +safe_kpoll(unsigned long arg)
 | |
| +{
 | |
| +	struct safe_softc *sc = NULL;
 | |
| +	struct safe_pkq *q;
 | |
| +	struct crparam *res;
 | |
| +	int i;
 | |
| +	u_int32_t buf[64];
 | |
| +	unsigned long flags;
 | |
| +
 | |
| +	DPRINTF(("%s()\n", __FUNCTION__));
 | |
| +
 | |
| +	if (arg >= SAFE_MAX_CHIPS)
 | |
| +		return;
 | |
| +	sc = safe_chip_idx[arg];
 | |
| +	if (!sc) {
 | |
| +		DPRINTF(("%s() - bad callback\n", __FUNCTION__));
 | |
| +		return;
 | |
| +	}
 | |
| +
 | |
| +	spin_lock_irqsave(&sc->sc_pkmtx, flags);
 | |
| +	if (sc->sc_pkq_cur == NULL)
 | |
| +		goto out;
 | |
| +	if (READ_REG(sc, SAFE_PK_FUNC) & SAFE_PK_FUNC_RUN) {
 | |
| +		/* still running, check back later */
 | |
| +		mod_timer(&sc->sc_pkto, jiffies + 1);
 | |
| +		goto out;
 | |
| +	}
 | |
| +
 | |
| +	q = sc->sc_pkq_cur;
 | |
| +	res = &q->pkq_krp->krp_param[q->pkq_krp->krp_iparams];
 | |
| +	bzero(buf, sizeof(buf));
 | |
| +	bzero(res->crp_p, (res->crp_nbits + 7) / 8);
 | |
| +	for (i = 0; i < sc->sc_pk_reslen >> 2; i++)
 | |
| +		buf[i] = le32_to_cpu(READ_REG(sc, SAFE_PK_RAM_START +
 | |
| +		    sc->sc_pk_resoff + (i << 2)));
 | |
| +	bcopy(buf, res->crp_p, (res->crp_nbits + 7) / 8);
 | |
| +	/*
 | |
| +	 * reduce the bits that need copying if possible
 | |
| +	 */
 | |
| +	res->crp_nbits = min(res->crp_nbits,sc->sc_pk_reslen * 8);
 | |
| +	res->crp_nbits = safe_ksigbits(sc, res);
 | |
| +
 | |
| +	for (i = SAFE_PK_RAM_START; i < SAFE_PK_RAM_END; i += 4)
 | |
| +		WRITE_REG(sc, i, 0);
 | |
| +
 | |
| +	crypto_kdone(q->pkq_krp);
 | |
| +	kfree(q);
 | |
| +	sc->sc_pkq_cur = NULL;
 | |
| +
 | |
| +	safe_kfeed(sc);
 | |
| +out:
 | |
| +	spin_unlock_irqrestore(&sc->sc_pkmtx, flags);
 | |
| +}
 | |
| +
 | |
| +static void
 | |
| +safe_kload_reg(struct safe_softc *sc, u_int32_t off, u_int32_t len,
 | |
| +    struct crparam *n)
 | |
| +{
 | |
| +	u_int32_t buf[64], i;
 | |
| +
 | |
| +	DPRINTF(("%s()\n", __FUNCTION__));
 | |
| +
 | |
| +	bzero(buf, sizeof(buf));
 | |
| +	bcopy(n->crp_p, buf, (n->crp_nbits + 7) / 8);
 | |
| +
 | |
| +	for (i = 0; i < len >> 2; i++)
 | |
| +		WRITE_REG(sc, SAFE_PK_RAM_START + off + (i << 2),
 | |
| +		    cpu_to_le32(buf[i]));
 | |
| +}
 | |
| +
 | |
| +#ifdef SAFE_DEBUG
 | |
| +static void
 | |
| +safe_dump_dmastatus(struct safe_softc *sc, const char *tag)
 | |
| +{
 | |
| +	printf("%s: ENDIAN 0x%x SRC 0x%x DST 0x%x STAT 0x%x\n"
 | |
| +		, tag
 | |
| +		, READ_REG(sc, SAFE_DMA_ENDIAN)
 | |
| +		, READ_REG(sc, SAFE_DMA_SRCADDR)
 | |
| +		, READ_REG(sc, SAFE_DMA_DSTADDR)
 | |
| +		, READ_REG(sc, SAFE_DMA_STAT)
 | |
| +	);
 | |
| +}
 | |
| +
 | |
| +static void
 | |
| +safe_dump_intrstate(struct safe_softc *sc, const char *tag)
 | |
| +{
 | |
| +	printf("%s: HI_CFG 0x%x HI_MASK 0x%x HI_DESC_CNT 0x%x HU_STAT 0x%x HM_STAT 0x%x\n"
 | |
| +		, tag
 | |
| +		, READ_REG(sc, SAFE_HI_CFG)
 | |
| +		, READ_REG(sc, SAFE_HI_MASK)
 | |
| +		, READ_REG(sc, SAFE_HI_DESC_CNT)
 | |
| +		, READ_REG(sc, SAFE_HU_STAT)
 | |
| +		, READ_REG(sc, SAFE_HM_STAT)
 | |
| +	);
 | |
| +}
 | |
| +
 | |
| +static void
 | |
| +safe_dump_ringstate(struct safe_softc *sc, const char *tag)
 | |
| +{
 | |
| +	u_int32_t estat = READ_REG(sc, SAFE_PE_ERNGSTAT);
 | |
| +
 | |
| +	/* NB: assume caller has lock on ring */
 | |
| +	printf("%s: ERNGSTAT %x (next %u) back %lu front %lu\n",
 | |
| +		tag,
 | |
| +		estat, (estat >> SAFE_PE_ERNGSTAT_NEXT_S),
 | |
| +		(unsigned long)(sc->sc_back - sc->sc_ring),
 | |
| +		(unsigned long)(sc->sc_front - sc->sc_ring));
 | |
| +}
 | |
| +
 | |
| +static void
 | |
| +safe_dump_request(struct safe_softc *sc, const char* tag, struct safe_ringentry *re)
 | |
| +{
 | |
| +	int ix, nsegs;
 | |
| +
 | |
| +	ix = re - sc->sc_ring;
 | |
| +	printf("%s: %p (%u): csr %x src %x dst %x sa %x len %x\n"
 | |
| +		, tag
 | |
| +		, re, ix
 | |
| +		, re->re_desc.d_csr
 | |
| +		, re->re_desc.d_src
 | |
| +		, re->re_desc.d_dst
 | |
| +		, re->re_desc.d_sa
 | |
| +		, re->re_desc.d_len
 | |
| +	);
 | |
| +	if (re->re_src.nsegs > 1) {
 | |
| +		ix = (re->re_desc.d_src - sc->sc_spalloc.dma_paddr) /
 | |
| +			sizeof(struct safe_pdesc);
 | |
| +		for (nsegs = re->re_src.nsegs; nsegs; nsegs--) {
 | |
| +			printf(" spd[%u] %p: %p size %u flags %x"
 | |
| +				, ix, &sc->sc_spring[ix]
 | |
| +				, (caddr_t)(uintptr_t) sc->sc_spring[ix].pd_addr
 | |
| +				, sc->sc_spring[ix].pd_size
 | |
| +				, sc->sc_spring[ix].pd_flags
 | |
| +			);
 | |
| +			if (sc->sc_spring[ix].pd_size == 0)
 | |
| +				printf(" (zero!)");
 | |
| +			printf("\n");
 | |
| +			if (++ix == SAFE_TOTAL_SPART)
 | |
| +				ix = 0;
 | |
| +		}
 | |
| +	}
 | |
| +	if (re->re_dst.nsegs > 1) {
 | |
| +		ix = (re->re_desc.d_dst - sc->sc_dpalloc.dma_paddr) /
 | |
| +			sizeof(struct safe_pdesc);
 | |
| +		for (nsegs = re->re_dst.nsegs; nsegs; nsegs--) {
 | |
| +			printf(" dpd[%u] %p: %p flags %x\n"
 | |
| +				, ix, &sc->sc_dpring[ix]
 | |
| +				, (caddr_t)(uintptr_t) sc->sc_dpring[ix].pd_addr
 | |
| +				, sc->sc_dpring[ix].pd_flags
 | |
| +			);
 | |
| +			if (++ix == SAFE_TOTAL_DPART)
 | |
| +				ix = 0;
 | |
| +		}
 | |
| +	}
 | |
| +	printf("sa: cmd0 %08x cmd1 %08x staterec %x\n",
 | |
| +		re->re_sa.sa_cmd0, re->re_sa.sa_cmd1, re->re_sa.sa_staterec);
 | |
| +	printf("sa: key %x %x %x %x %x %x %x %x\n"
 | |
| +		, re->re_sa.sa_key[0]
 | |
| +		, re->re_sa.sa_key[1]
 | |
| +		, re->re_sa.sa_key[2]
 | |
| +		, re->re_sa.sa_key[3]
 | |
| +		, re->re_sa.sa_key[4]
 | |
| +		, re->re_sa.sa_key[5]
 | |
| +		, re->re_sa.sa_key[6]
 | |
| +		, re->re_sa.sa_key[7]
 | |
| +	);
 | |
| +	printf("sa: indigest %x %x %x %x %x\n"
 | |
| +		, re->re_sa.sa_indigest[0]
 | |
| +		, re->re_sa.sa_indigest[1]
 | |
| +		, re->re_sa.sa_indigest[2]
 | |
| +		, re->re_sa.sa_indigest[3]
 | |
| +		, re->re_sa.sa_indigest[4]
 | |
| +	);
 | |
| +	printf("sa: outdigest %x %x %x %x %x\n"
 | |
| +		, re->re_sa.sa_outdigest[0]
 | |
| +		, re->re_sa.sa_outdigest[1]
 | |
| +		, re->re_sa.sa_outdigest[2]
 | |
| +		, re->re_sa.sa_outdigest[3]
 | |
| +		, re->re_sa.sa_outdigest[4]
 | |
| +	);
 | |
| +	printf("sr: iv %x %x %x %x\n"
 | |
| +		, re->re_sastate.sa_saved_iv[0]
 | |
| +		, re->re_sastate.sa_saved_iv[1]
 | |
| +		, re->re_sastate.sa_saved_iv[2]
 | |
| +		, re->re_sastate.sa_saved_iv[3]
 | |
| +	);
 | |
| +	printf("sr: hashbc %u indigest %x %x %x %x %x\n"
 | |
| +		, re->re_sastate.sa_saved_hashbc
 | |
| +		, re->re_sastate.sa_saved_indigest[0]
 | |
| +		, re->re_sastate.sa_saved_indigest[1]
 | |
| +		, re->re_sastate.sa_saved_indigest[2]
 | |
| +		, re->re_sastate.sa_saved_indigest[3]
 | |
| +		, re->re_sastate.sa_saved_indigest[4]
 | |
| +	);
 | |
| +}
 | |
| +
 | |
| +static void
 | |
| +safe_dump_ring(struct safe_softc *sc, const char *tag)
 | |
| +{
 | |
| +	unsigned long flags;
 | |
| +
 | |
| +	spin_lock_irqsave(&sc->sc_ringmtx, flags);
 | |
| +	printf("\nSafeNet Ring State:\n");
 | |
| +	safe_dump_intrstate(sc, tag);
 | |
| +	safe_dump_dmastatus(sc, tag);
 | |
| +	safe_dump_ringstate(sc, tag);
 | |
| +	if (sc->sc_nqchip) {
 | |
| +		struct safe_ringentry *re = sc->sc_back;
 | |
| +		do {
 | |
| +			safe_dump_request(sc, tag, re);
 | |
| +			if (++re == sc->sc_ringtop)
 | |
| +				re = sc->sc_ring;
 | |
| +		} while (re != sc->sc_front);
 | |
| +	}
 | |
| +	spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
 | |
| +}
 | |
| +#endif /* SAFE_DEBUG */
 | |
| +
 | |
| +
 | |
| +static int safe_probe(struct pci_dev *dev, const struct pci_device_id *ent)
 | |
| +{
 | |
| +	struct safe_softc *sc = NULL;
 | |
| +	u32 mem_start, mem_len, cmd;
 | |
| +	int i, rc, devinfo;
 | |
| +	dma_addr_t raddr;
 | |
| +	static int num_chips = 0;
 | |
| +
 | |
| +	DPRINTF(("%s()\n", __FUNCTION__));
 | |
| +
 | |
| +	if (pci_enable_device(dev) < 0)
 | |
| +		return(-ENODEV);
 | |
| +
 | |
| +	if (!dev->irq) {
 | |
| +		printk("safe: found device with no IRQ assigned. check BIOS settings!");
 | |
| +		pci_disable_device(dev);
 | |
| +		return(-ENODEV);
 | |
| +	}
 | |
| +
 | |
| +	if (pci_set_mwi(dev)) {
 | |
| +		printk("safe: pci_set_mwi failed!");
 | |
| +		return(-ENODEV);
 | |
| +	}
 | |
| +
 | |
| +	sc = (struct safe_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
 | |
| +	if (!sc)
 | |
| +		return(-ENOMEM);
 | |
| +	memset(sc, 0, sizeof(*sc));
 | |
| +
 | |
| +	softc_device_init(sc, "safe", num_chips, safe_methods);
 | |
| +
 | |
| +	sc->sc_irq = -1;
 | |
| +	sc->sc_cid = -1;
 | |
| +	sc->sc_pcidev = dev;
 | |
| +	if (num_chips < SAFE_MAX_CHIPS) {
 | |
| +		safe_chip_idx[device_get_unit(sc->sc_dev)] = sc;
 | |
| +		num_chips++;
 | |
| +	}
 | |
| +
 | |
| +	INIT_LIST_HEAD(&sc->sc_pkq);
 | |
| +	spin_lock_init(&sc->sc_pkmtx);
 | |
| +
 | |
| +	pci_set_drvdata(sc->sc_pcidev, sc);
 | |
| +
 | |
| +	/* we read its hardware registers as memory */
 | |
| +	mem_start = pci_resource_start(sc->sc_pcidev, 0);
 | |
| +	mem_len   = pci_resource_len(sc->sc_pcidev, 0);
 | |
| +
 | |
| +	sc->sc_base_addr = (ocf_iomem_t) ioremap(mem_start, mem_len);
 | |
| +	if (!sc->sc_base_addr) {
 | |
| +		device_printf(sc->sc_dev, "failed to ioremap 0x%x-0x%x\n",
 | |
| +				mem_start, mem_start + mem_len - 1);
 | |
| +		goto out;
 | |
| +	}
 | |
| +
 | |
| +	/* fix up the bus size */
 | |
| +	if (pci_set_dma_mask(sc->sc_pcidev, DMA_32BIT_MASK)) {
 | |
| +		device_printf(sc->sc_dev, "No usable DMA configuration, aborting.\n");
 | |
| +		goto out;
 | |
| +	}
 | |
| +	if (pci_set_consistent_dma_mask(sc->sc_pcidev, DMA_32BIT_MASK)) {
 | |
| +		device_printf(sc->sc_dev, "No usable consistent DMA configuration, aborting.\n");
 | |
| +		goto out;
 | |
| +	}
 | |
| +
 | |
| +	pci_set_master(sc->sc_pcidev);
 | |
| +
 | |
| +	pci_read_config_dword(sc->sc_pcidev, PCI_COMMAND, &cmd);
 | |
| +
 | |
| +	if (!(cmd & PCI_COMMAND_MEMORY)) {
 | |
| +		device_printf(sc->sc_dev, "failed to enable memory mapping\n");
 | |
| +		goto out;
 | |
| +	}
 | |
| +
 | |
| +	if (!(cmd & PCI_COMMAND_MASTER)) {
 | |
| +		device_printf(sc->sc_dev, "failed to enable bus mastering\n");
 | |
| +		goto out;
 | |
| +	}
 | |
| +
 | |
| +	rc = request_irq(dev->irq, safe_intr, IRQF_SHARED, "safe", sc);
 | |
| +	if (rc) {
 | |
| +		device_printf(sc->sc_dev, "failed to hook irq %d\n", sc->sc_irq);
 | |
| +		goto out;
 | |
| +	}
 | |
| +	sc->sc_irq = dev->irq;
 | |
| +
 | |
| +	sc->sc_chiprev = READ_REG(sc, SAFE_DEVINFO) &
 | |
| +			(SAFE_DEVINFO_REV_MAJ | SAFE_DEVINFO_REV_MIN);
 | |
| +
 | |
| +	/*
 | |
| +	 * Allocate packet engine descriptors.
 | |
| +	 */
 | |
| +	sc->sc_ringalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
 | |
| +			SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
 | |
| +			&sc->sc_ringalloc.dma_paddr);
 | |
| +	if (!sc->sc_ringalloc.dma_vaddr) {
 | |
| +		device_printf(sc->sc_dev, "cannot allocate PE descriptor ring\n");
 | |
| +		goto out;
 | |
| +	}
 | |
| +
 | |
| +	/*
 | |
| +	 * Hookup the static portion of all our data structures.
 | |
| +	 */
 | |
| +	sc->sc_ring = (struct safe_ringentry *) sc->sc_ringalloc.dma_vaddr;
 | |
| +	sc->sc_ringtop = sc->sc_ring + SAFE_MAX_NQUEUE;
 | |
| +	sc->sc_front = sc->sc_ring;
 | |
| +	sc->sc_back = sc->sc_ring;
 | |
| +	raddr = sc->sc_ringalloc.dma_paddr;
 | |
| +	bzero(sc->sc_ring, SAFE_MAX_NQUEUE * sizeof(struct safe_ringentry));
 | |
| +	for (i = 0; i < SAFE_MAX_NQUEUE; i++) {
 | |
| +		struct safe_ringentry *re = &sc->sc_ring[i];
 | |
| +
 | |
| +		re->re_desc.d_sa = raddr +
 | |
| +			offsetof(struct safe_ringentry, re_sa);
 | |
| +		re->re_sa.sa_staterec = raddr +
 | |
| +			offsetof(struct safe_ringentry, re_sastate);
 | |
| +
 | |
| +		raddr += sizeof (struct safe_ringentry);
 | |
| +	}
 | |
| +	spin_lock_init(&sc->sc_ringmtx);
 | |
| +
 | |
| +	/*
 | |
| +	 * Allocate scatter and gather particle descriptors.
 | |
| +	 */
 | |
| +	sc->sc_spalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
 | |
| +			SAFE_TOTAL_SPART * sizeof (struct safe_pdesc),
 | |
| +			&sc->sc_spalloc.dma_paddr);
 | |
| +	if (!sc->sc_spalloc.dma_vaddr) {
 | |
| +		device_printf(sc->sc_dev, "cannot allocate source particle descriptor ring\n");
 | |
| +		goto out;
 | |
| +	}
 | |
| +	sc->sc_spring = (struct safe_pdesc *) sc->sc_spalloc.dma_vaddr;
 | |
| +	sc->sc_springtop = sc->sc_spring + SAFE_TOTAL_SPART;
 | |
| +	sc->sc_spfree = sc->sc_spring;
 | |
| +	bzero(sc->sc_spring, SAFE_TOTAL_SPART * sizeof(struct safe_pdesc));
 | |
| +
 | |
| +	sc->sc_dpalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
 | |
| +			SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
 | |
| +			&sc->sc_dpalloc.dma_paddr);
 | |
| +	if (!sc->sc_dpalloc.dma_vaddr) {
 | |
| +		device_printf(sc->sc_dev, "cannot allocate destination particle descriptor ring\n");
 | |
| +		goto out;
 | |
| +	}
 | |
| +	sc->sc_dpring = (struct safe_pdesc *) sc->sc_dpalloc.dma_vaddr;
 | |
| +	sc->sc_dpringtop = sc->sc_dpring + SAFE_TOTAL_DPART;
 | |
| +	sc->sc_dpfree = sc->sc_dpring;
 | |
| +	bzero(sc->sc_dpring, SAFE_TOTAL_DPART * sizeof(struct safe_pdesc));
 | |
| +
 | |
| +	sc->sc_cid = crypto_get_driverid(softc_get_device(sc), CRYPTOCAP_F_HARDWARE);
 | |
| +	if (sc->sc_cid < 0) {
 | |
| +		device_printf(sc->sc_dev, "could not get crypto driver id\n");
 | |
| +		goto out;
 | |
| +	}
 | |
| +
 | |
| +	printf("%s:", device_get_nameunit(sc->sc_dev));
 | |
| +
 | |
| +	devinfo = READ_REG(sc, SAFE_DEVINFO);
 | |
| +	if (devinfo & SAFE_DEVINFO_RNG) {
 | |
| +		sc->sc_flags |= SAFE_FLAGS_RNG;
 | |
| +		printf(" rng");
 | |
| +	}
 | |
| +	if (devinfo & SAFE_DEVINFO_PKEY) {
 | |
| +		printf(" key");
 | |
| +		sc->sc_flags |= SAFE_FLAGS_KEY;
 | |
| +		crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0);
 | |
| +#if 0
 | |
| +		crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0);
 | |
| +#endif
 | |
| +		init_timer(&sc->sc_pkto);
 | |
| +		sc->sc_pkto.function = safe_kpoll;
 | |
| +		sc->sc_pkto.data = (unsigned long) device_get_unit(sc->sc_dev);
 | |
| +	}
 | |
| +	if (devinfo & SAFE_DEVINFO_DES) {
 | |
| +		printf(" des/3des");
 | |
| +		crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
 | |
| +		crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
 | |
| +	}
 | |
| +	if (devinfo & SAFE_DEVINFO_AES) {
 | |
| +		printf(" aes");
 | |
| +		crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
 | |
| +	}
 | |
| +	if (devinfo & SAFE_DEVINFO_MD5) {
 | |
| +		printf(" md5");
 | |
| +		crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
 | |
| +	}
 | |
| +	if (devinfo & SAFE_DEVINFO_SHA1) {
 | |
| +		printf(" sha1");
 | |
| +		crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
 | |
| +	}
 | |
| +	printf(" null");
 | |
| +	crypto_register(sc->sc_cid, CRYPTO_NULL_CBC, 0, 0);
 | |
| +	crypto_register(sc->sc_cid, CRYPTO_NULL_HMAC, 0, 0);
 | |
| +	/* XXX other supported algorithms */
 | |
| +	printf("\n");
 | |
| +
 | |
| +	safe_reset_board(sc);		/* reset h/w */
 | |
| +	safe_init_board(sc);		/* init h/w */
 | |
| +
 | |
| +#if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
 | |
| +	if (sc->sc_flags & SAFE_FLAGS_RNG) {
 | |
| +		safe_rng_init(sc);
 | |
| +		crypto_rregister(sc->sc_cid, safe_read_random, sc);
 | |
| +	}
 | |
| +#endif /* SAFE_NO_RNG */
 | |
| +
 | |
| +	return (0);
 | |
| +
 | |
| +out:
 | |
| +	if (sc->sc_cid >= 0)
 | |
| +		crypto_unregister_all(sc->sc_cid);
 | |
| +	if (sc->sc_irq != -1)
 | |
| +		free_irq(sc->sc_irq, sc);
 | |
| +	if (sc->sc_ringalloc.dma_vaddr)
 | |
| +		pci_free_consistent(sc->sc_pcidev,
 | |
| +				SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
 | |
| +				sc->sc_ringalloc.dma_vaddr, sc->sc_ringalloc.dma_paddr);
 | |
| +	if (sc->sc_spalloc.dma_vaddr)
 | |
| +		pci_free_consistent(sc->sc_pcidev,
 | |
| +				SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
 | |
| +				sc->sc_spalloc.dma_vaddr, sc->sc_spalloc.dma_paddr);
 | |
| +	if (sc->sc_dpalloc.dma_vaddr)
 | |
| +		pci_free_consistent(sc->sc_pcidev,
 | |
| +				SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
 | |
| +				sc->sc_dpalloc.dma_vaddr, sc->sc_dpalloc.dma_paddr);
 | |
| +	kfree(sc);
 | |
| +	return(-ENODEV);
 | |
| +}
 | |
| +
 | |
| +static void safe_remove(struct pci_dev *dev)
 | |
| +{
 | |
| +	struct safe_softc *sc = pci_get_drvdata(dev);
 | |
| +
 | |
| +	DPRINTF(("%s()\n", __FUNCTION__));
 | |
| +
 | |
| +	/* XXX wait/abort active ops */
 | |
| +
 | |
| +	WRITE_REG(sc, SAFE_HI_MASK, 0);		/* disable interrupts */
 | |
| +
 | |
| +	del_timer_sync(&sc->sc_pkto);
 | |
| +
 | |
| +	crypto_unregister_all(sc->sc_cid);
 | |
| +
 | |
| +	safe_cleanchip(sc);
 | |
| +
 | |
| +	if (sc->sc_irq != -1)
 | |
| +		free_irq(sc->sc_irq, sc);
 | |
| +	if (sc->sc_ringalloc.dma_vaddr)
 | |
| +		pci_free_consistent(sc->sc_pcidev,
 | |
| +				SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
 | |
| +				sc->sc_ringalloc.dma_vaddr, sc->sc_ringalloc.dma_paddr);
 | |
| +	if (sc->sc_spalloc.dma_vaddr)
 | |
| +		pci_free_consistent(sc->sc_pcidev,
 | |
| +				SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
 | |
| +				sc->sc_spalloc.dma_vaddr, sc->sc_spalloc.dma_paddr);
 | |
| +	if (sc->sc_dpalloc.dma_vaddr)
 | |
| +		pci_free_consistent(sc->sc_pcidev,
 | |
| +				SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
 | |
| +				sc->sc_dpalloc.dma_vaddr, sc->sc_dpalloc.dma_paddr);
 | |
| +	sc->sc_irq = -1;
 | |
| +	sc->sc_ringalloc.dma_vaddr = NULL;
 | |
| +	sc->sc_spalloc.dma_vaddr = NULL;
 | |
| +	sc->sc_dpalloc.dma_vaddr = NULL;
 | |
| +}
 | |
| +
 | |
| +static struct pci_device_id safe_pci_tbl[] = {
 | |
| +	{ PCI_VENDOR_SAFENET, PCI_PRODUCT_SAFEXCEL,
 | |
| +	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
 | |
| +	{ },
 | |
| +};
 | |
| +MODULE_DEVICE_TABLE(pci, safe_pci_tbl);
 | |
| +
 | |
| +static struct pci_driver safe_driver = {
 | |
| +	.name         = "safe",
 | |
| +	.id_table     = safe_pci_tbl,
 | |
| +	.probe        =	safe_probe,
 | |
| +	.remove       = safe_remove,
 | |
| +	/* add PM stuff here one day */
 | |
| +};
 | |
| +
 | |
| +static int __init safe_init (void)
 | |
| +{
 | |
| +	struct safe_softc *sc = NULL;
 | |
| +	int rc;
 | |
| +
 | |
| +	DPRINTF(("%s(%p)\n", __FUNCTION__, safe_init));
 | |
| +
 | |
| +	rc = pci_register_driver(&safe_driver);
 | |
| +	pci_register_driver_compat(&safe_driver, rc);
 | |
| +
 | |
| +	return rc;
 | |
| +}
 | |
| +
 | |
| +static void __exit safe_exit (void)
 | |
| +{
 | |
| +	pci_unregister_driver(&safe_driver);
 | |
| +}
 | |
| +
 | |
| +module_init(safe_init);
 | |
| +module_exit(safe_exit);
 | |
| +
 | |
| +MODULE_LICENSE("BSD");
 | |
| +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
 | |
| +MODULE_DESCRIPTION("OCF driver for safenet PCI crypto devices");
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/safe/sha1.c
 | |
| @@ -0,0 +1,279 @@
 | |
| +/*	$KAME: sha1.c,v 1.5 2000/11/08 06:13:08 itojun Exp $	*/
 | |
| +/*
 | |
| + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
 | |
| + * All rights reserved.
 | |
| + *
 | |
| + * Redistribution and use in source and binary forms, with or without
 | |
| + * modification, are permitted provided that the following conditions
 | |
| + * are met:
 | |
| + * 1. Redistributions of source code must retain the above copyright
 | |
| + *    notice, this list of conditions and the following disclaimer.
 | |
| + * 2. Redistributions in binary form must reproduce the above copyright
 | |
| + *    notice, this list of conditions and the following disclaimer in the
 | |
| + *    documentation and/or other materials provided with the distribution.
 | |
| + * 3. Neither the name of the project nor the names of its contributors
 | |
| + *    may be used to endorse or promote products derived from this software
 | |
| + *    without specific prior written permission.
 | |
| + *
 | |
| + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
 | |
| + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 | |
| + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 | |
| + * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
 | |
| + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 | |
| + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 | |
| + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 | |
| + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 | |
| + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 | |
| + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 | |
| + * SUCH DAMAGE.
 | |
| + */
 | |
| +
 | |
| +/*
 | |
| + * FIPS pub 180-1: Secure Hash Algorithm (SHA-1)
 | |
| + * based on: http://csrc.nist.gov/fips/fip180-1.txt
 | |
| + * implemented by Jun-ichiro itojun Itoh <itojun@itojun.org>
 | |
| + */
 | |
| +
 | |
| +#if 0
 | |
| +#include <sys/cdefs.h>
 | |
| +__FBSDID("$FreeBSD: src/sys/crypto/sha1.c,v 1.9 2003/06/10 21:36:57 obrien Exp $");
 | |
| +
 | |
| +#include <sys/types.h>
 | |
| +#include <sys/cdefs.h>
 | |
| +#include <sys/time.h>
 | |
| +#include <sys/systm.h>
 | |
| +
 | |
| +#include <crypto/sha1.h>
 | |
| +#endif
 | |
| +
 | |
| +/* sanity check */
 | |
| +#if BYTE_ORDER != BIG_ENDIAN
 | |
| +# if BYTE_ORDER != LITTLE_ENDIAN
 | |
| +#  define unsupported 1
 | |
| +# endif
 | |
| +#endif
 | |
| +
 | |
| +#ifndef unsupported
 | |
| +
 | |
| +/* constant table */
 | |
| +static u_int32_t _K[] = { 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6 };
 | |
| +#define	K(t)	_K[(t) / 20]
 | |
| +
 | |
| +#define	F0(b, c, d)	(((b) & (c)) | ((~(b)) & (d)))
 | |
| +#define	F1(b, c, d)	(((b) ^ (c)) ^ (d))
 | |
| +#define	F2(b, c, d)	(((b) & (c)) | ((b) & (d)) | ((c) & (d)))
 | |
| +#define	F3(b, c, d)	(((b) ^ (c)) ^ (d))
 | |
| +
 | |
| +#define	S(n, x)		(((x) << (n)) | ((x) >> (32 - n)))
 | |
| +
 | |
| +#undef H
 | |
| +#define	H(n)	(ctxt->h.b32[(n)])
 | |
| +#define	COUNT	(ctxt->count)
 | |
| +#define	BCOUNT	(ctxt->c.b64[0] / 8)
 | |
| +#define	W(n)	(ctxt->m.b32[(n)])
 | |
| +
 | |
| +#define	PUTBYTE(x)	{ \
 | |
| +	ctxt->m.b8[(COUNT % 64)] = (x);		\
 | |
| +	COUNT++;				\
 | |
| +	COUNT %= 64;				\
 | |
| +	ctxt->c.b64[0] += 8;			\
 | |
| +	if (COUNT % 64 == 0)			\
 | |
| +		sha1_step(ctxt);		\
 | |
| +     }
 | |
| +
 | |
| +#define	PUTPAD(x)	{ \
 | |
| +	ctxt->m.b8[(COUNT % 64)] = (x);		\
 | |
| +	COUNT++;				\
 | |
| +	COUNT %= 64;				\
 | |
| +	if (COUNT % 64 == 0)			\
 | |
| +		sha1_step(ctxt);		\
 | |
| +     }
 | |
| +
 | |
| +static void sha1_step(struct sha1_ctxt *);
 | |
| +
 | |
| +static void
 | |
| +sha1_step(ctxt)
 | |
| +	struct sha1_ctxt *ctxt;
 | |
| +{
 | |
| +	u_int32_t	a, b, c, d, e;
 | |
| +	size_t t, s;
 | |
| +	u_int32_t	tmp;
 | |
| +
 | |
| +#if BYTE_ORDER == LITTLE_ENDIAN
 | |
| +	struct sha1_ctxt tctxt;
 | |
| +	bcopy(&ctxt->m.b8[0], &tctxt.m.b8[0], 64);
 | |
| +	ctxt->m.b8[0] = tctxt.m.b8[3]; ctxt->m.b8[1] = tctxt.m.b8[2];
 | |
| +	ctxt->m.b8[2] = tctxt.m.b8[1]; ctxt->m.b8[3] = tctxt.m.b8[0];
 | |
| +	ctxt->m.b8[4] = tctxt.m.b8[7]; ctxt->m.b8[5] = tctxt.m.b8[6];
 | |
| +	ctxt->m.b8[6] = tctxt.m.b8[5]; ctxt->m.b8[7] = tctxt.m.b8[4];
 | |
| +	ctxt->m.b8[8] = tctxt.m.b8[11]; ctxt->m.b8[9] = tctxt.m.b8[10];
 | |
| +	ctxt->m.b8[10] = tctxt.m.b8[9]; ctxt->m.b8[11] = tctxt.m.b8[8];
 | |
| +	ctxt->m.b8[12] = tctxt.m.b8[15]; ctxt->m.b8[13] = tctxt.m.b8[14];
 | |
| +	ctxt->m.b8[14] = tctxt.m.b8[13]; ctxt->m.b8[15] = tctxt.m.b8[12];
 | |
| +	ctxt->m.b8[16] = tctxt.m.b8[19]; ctxt->m.b8[17] = tctxt.m.b8[18];
 | |
| +	ctxt->m.b8[18] = tctxt.m.b8[17]; ctxt->m.b8[19] = tctxt.m.b8[16];
 | |
| +	ctxt->m.b8[20] = tctxt.m.b8[23]; ctxt->m.b8[21] = tctxt.m.b8[22];
 | |
| +	ctxt->m.b8[22] = tctxt.m.b8[21]; ctxt->m.b8[23] = tctxt.m.b8[20];
 | |
| +	ctxt->m.b8[24] = tctxt.m.b8[27]; ctxt->m.b8[25] = tctxt.m.b8[26];
 | |
| +	ctxt->m.b8[26] = tctxt.m.b8[25]; ctxt->m.b8[27] = tctxt.m.b8[24];
 | |
| +	ctxt->m.b8[28] = tctxt.m.b8[31]; ctxt->m.b8[29] = tctxt.m.b8[30];
 | |
| +	ctxt->m.b8[30] = tctxt.m.b8[29]; ctxt->m.b8[31] = tctxt.m.b8[28];
 | |
| +	ctxt->m.b8[32] = tctxt.m.b8[35]; ctxt->m.b8[33] = tctxt.m.b8[34];
 | |
| +	ctxt->m.b8[34] = tctxt.m.b8[33]; ctxt->m.b8[35] = tctxt.m.b8[32];
 | |
| +	ctxt->m.b8[36] = tctxt.m.b8[39]; ctxt->m.b8[37] = tctxt.m.b8[38];
 | |
| +	ctxt->m.b8[38] = tctxt.m.b8[37]; ctxt->m.b8[39] = tctxt.m.b8[36];
 | |
| +	ctxt->m.b8[40] = tctxt.m.b8[43]; ctxt->m.b8[41] = tctxt.m.b8[42];
 | |
| +	ctxt->m.b8[42] = tctxt.m.b8[41]; ctxt->m.b8[43] = tctxt.m.b8[40];
 | |
| +	ctxt->m.b8[44] = tctxt.m.b8[47]; ctxt->m.b8[45] = tctxt.m.b8[46];
 | |
| +	ctxt->m.b8[46] = tctxt.m.b8[45]; ctxt->m.b8[47] = tctxt.m.b8[44];
 | |
| +	ctxt->m.b8[48] = tctxt.m.b8[51]; ctxt->m.b8[49] = tctxt.m.b8[50];
 | |
| +	ctxt->m.b8[50] = tctxt.m.b8[49]; ctxt->m.b8[51] = tctxt.m.b8[48];
 | |
| +	ctxt->m.b8[52] = tctxt.m.b8[55]; ctxt->m.b8[53] = tctxt.m.b8[54];
 | |
| +	ctxt->m.b8[54] = tctxt.m.b8[53]; ctxt->m.b8[55] = tctxt.m.b8[52];
 | |
| +	ctxt->m.b8[56] = tctxt.m.b8[59]; ctxt->m.b8[57] = tctxt.m.b8[58];
 | |
| +	ctxt->m.b8[58] = tctxt.m.b8[57]; ctxt->m.b8[59] = tctxt.m.b8[56];
 | |
| +	ctxt->m.b8[60] = tctxt.m.b8[63]; ctxt->m.b8[61] = tctxt.m.b8[62];
 | |
| +	ctxt->m.b8[62] = tctxt.m.b8[61]; ctxt->m.b8[63] = tctxt.m.b8[60];
 | |
| +#endif
 | |
| +
 | |
| +	a = H(0); b = H(1); c = H(2); d = H(3); e = H(4);
 | |
| +
 | |
| +	for (t = 0; t < 20; t++) {
 | |
| +		s = t & 0x0f;
 | |
| +		if (t >= 16) {
 | |
| +			W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
 | |
| +		}
 | |
| +		tmp = S(5, a) + F0(b, c, d) + e + W(s) + K(t);
 | |
| +		e = d; d = c; c = S(30, b); b = a; a = tmp;
 | |
| +	}
 | |
| +	for (t = 20; t < 40; t++) {
 | |
| +		s = t & 0x0f;
 | |
| +		W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
 | |
| +		tmp = S(5, a) + F1(b, c, d) + e + W(s) + K(t);
 | |
| +		e = d; d = c; c = S(30, b); b = a; a = tmp;
 | |
| +	}
 | |
| +	for (t = 40; t < 60; t++) {
 | |
| +		s = t & 0x0f;
 | |
| +		W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
 | |
| +		tmp = S(5, a) + F2(b, c, d) + e + W(s) + K(t);
 | |
| +		e = d; d = c; c = S(30, b); b = a; a = tmp;
 | |
| +	}
 | |
| +	for (t = 60; t < 80; t++) {
 | |
| +		s = t & 0x0f;
 | |
| +		W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
 | |
| +		tmp = S(5, a) + F3(b, c, d) + e + W(s) + K(t);
 | |
| +		e = d; d = c; c = S(30, b); b = a; a = tmp;
 | |
| +	}
 | |
| +
 | |
| +	H(0) = H(0) + a;
 | |
| +	H(1) = H(1) + b;
 | |
| +	H(2) = H(2) + c;
 | |
| +	H(3) = H(3) + d;
 | |
| +	H(4) = H(4) + e;
 | |
| +
 | |
| +	bzero(&ctxt->m.b8[0], 64);
 | |
| +}
 | |
| +
 | |
| +/*------------------------------------------------------------*/
 | |
| +
 | |
| +void
 | |
| +sha1_init(ctxt)
 | |
| +	struct sha1_ctxt *ctxt;
 | |
| +{
 | |
| +	bzero(ctxt, sizeof(struct sha1_ctxt));
 | |
| +	H(0) = 0x67452301;
 | |
| +	H(1) = 0xefcdab89;
 | |
| +	H(2) = 0x98badcfe;
 | |
| +	H(3) = 0x10325476;
 | |
| +	H(4) = 0xc3d2e1f0;
 | |
| +}
 | |
| +
 | |
| +void
 | |
| +sha1_pad(ctxt)
 | |
| +	struct sha1_ctxt *ctxt;
 | |
| +{
 | |
| +	size_t padlen;		/*pad length in bytes*/
 | |
| +	size_t padstart;
 | |
| +
 | |
| +	PUTPAD(0x80);
 | |
| +
 | |
| +	padstart = COUNT % 64;
 | |
| +	padlen = 64 - padstart;
 | |
| +	if (padlen < 8) {
 | |
| +		bzero(&ctxt->m.b8[padstart], padlen);
 | |
| +		COUNT += padlen;
 | |
| +		COUNT %= 64;
 | |
| +		sha1_step(ctxt);
 | |
| +		padstart = COUNT % 64;	/* should be 0 */
 | |
| +		padlen = 64 - padstart;	/* should be 64 */
 | |
| +	}
 | |
| +	bzero(&ctxt->m.b8[padstart], padlen - 8);
 | |
| +	COUNT += (padlen - 8);
 | |
| +	COUNT %= 64;
 | |
| +#if BYTE_ORDER == BIG_ENDIAN
 | |
| +	PUTPAD(ctxt->c.b8[0]); PUTPAD(ctxt->c.b8[1]);
 | |
| +	PUTPAD(ctxt->c.b8[2]); PUTPAD(ctxt->c.b8[3]);
 | |
| +	PUTPAD(ctxt->c.b8[4]); PUTPAD(ctxt->c.b8[5]);
 | |
| +	PUTPAD(ctxt->c.b8[6]); PUTPAD(ctxt->c.b8[7]);
 | |
| +#else
 | |
| +	PUTPAD(ctxt->c.b8[7]); PUTPAD(ctxt->c.b8[6]);
 | |
| +	PUTPAD(ctxt->c.b8[5]); PUTPAD(ctxt->c.b8[4]);
 | |
| +	PUTPAD(ctxt->c.b8[3]); PUTPAD(ctxt->c.b8[2]);
 | |
| +	PUTPAD(ctxt->c.b8[1]); PUTPAD(ctxt->c.b8[0]);
 | |
| +#endif
 | |
| +}
 | |
| +
 | |
| +void
 | |
| +sha1_loop(ctxt, input, len)
 | |
| +	struct sha1_ctxt *ctxt;
 | |
| +	const u_int8_t *input;
 | |
| +	size_t len;
 | |
| +{
 | |
| +	size_t gaplen;
 | |
| +	size_t gapstart;
 | |
| +	size_t off;
 | |
| +	size_t copysiz;
 | |
| +
 | |
| +	off = 0;
 | |
| +
 | |
| +	while (off < len) {
 | |
| +		gapstart = COUNT % 64;
 | |
| +		gaplen = 64 - gapstart;
 | |
| +
 | |
| +		copysiz = (gaplen < len - off) ? gaplen : len - off;
 | |
| +		bcopy(&input[off], &ctxt->m.b8[gapstart], copysiz);
 | |
| +		COUNT += copysiz;
 | |
| +		COUNT %= 64;
 | |
| +		ctxt->c.b64[0] += copysiz * 8;
 | |
| +		if (COUNT % 64 == 0)
 | |
| +			sha1_step(ctxt);
 | |
| +		off += copysiz;
 | |
| +	}
 | |
| +}
 | |
| +
 | |
| +void
 | |
| +sha1_result(ctxt, digest0)
 | |
| +	struct sha1_ctxt *ctxt;
 | |
| +	caddr_t digest0;
 | |
| +{
 | |
| +	u_int8_t *digest;
 | |
| +
 | |
| +	digest = (u_int8_t *)digest0;
 | |
| +	sha1_pad(ctxt);
 | |
| +#if BYTE_ORDER == BIG_ENDIAN
 | |
| +	bcopy(&ctxt->h.b8[0], digest, 20);
 | |
| +#else
 | |
| +	digest[0] = ctxt->h.b8[3]; digest[1] = ctxt->h.b8[2];
 | |
| +	digest[2] = ctxt->h.b8[1]; digest[3] = ctxt->h.b8[0];
 | |
| +	digest[4] = ctxt->h.b8[7]; digest[5] = ctxt->h.b8[6];
 | |
| +	digest[6] = ctxt->h.b8[5]; digest[7] = ctxt->h.b8[4];
 | |
| +	digest[8] = ctxt->h.b8[11]; digest[9] = ctxt->h.b8[10];
 | |
| +	digest[10] = ctxt->h.b8[9]; digest[11] = ctxt->h.b8[8];
 | |
| +	digest[12] = ctxt->h.b8[15]; digest[13] = ctxt->h.b8[14];
 | |
| +	digest[14] = ctxt->h.b8[13]; digest[15] = ctxt->h.b8[12];
 | |
| +	digest[16] = ctxt->h.b8[19]; digest[17] = ctxt->h.b8[18];
 | |
| +	digest[18] = ctxt->h.b8[17]; digest[19] = ctxt->h.b8[16];
 | |
| +#endif
 | |
| +}
 | |
| +
 | |
| +#endif /*unsupported*/
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/safe/sha1.h
 | |
| @@ -0,0 +1,72 @@
 | |
| +/*	$FreeBSD: src/sys/crypto/sha1.h,v 1.8 2002/03/20 05:13:50 alfred Exp $	*/
 | |
| +/*	$KAME: sha1.h,v 1.5 2000/03/27 04:36:23 sumikawa Exp $	*/
 | |
| +
 | |
| +/*
 | |
| + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
 | |
| + * All rights reserved.
 | |
| + *
 | |
| + * Redistribution and use in source and binary forms, with or without
 | |
| + * modification, are permitted provided that the following conditions
 | |
| + * are met:
 | |
| + * 1. Redistributions of source code must retain the above copyright
 | |
| + *    notice, this list of conditions and the following disclaimer.
 | |
| + * 2. Redistributions in binary form must reproduce the above copyright
 | |
| + *    notice, this list of conditions and the following disclaimer in the
 | |
| + *    documentation and/or other materials provided with the distribution.
 | |
| + * 3. Neither the name of the project nor the names of its contributors
 | |
| + *    may be used to endorse or promote products derived from this software
 | |
| + *    without specific prior written permission.
 | |
| + *
 | |
| + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
 | |
| + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 | |
| + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 | |
| + * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
 | |
| + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 | |
| + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 | |
| + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 | |
| + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 | |
| + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 | |
| + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 | |
| + * SUCH DAMAGE.
 | |
| + */
 | |
| +/*
 | |
| + * FIPS pub 180-1: Secure Hash Algorithm (SHA-1)
 | |
| + * based on: http://csrc.nist.gov/fips/fip180-1.txt
 | |
| + * implemented by Jun-ichiro itojun Itoh <itojun@itojun.org>
 | |
| + */
 | |
| +
 | |
| +#ifndef _NETINET6_SHA1_H_
 | |
| +#define _NETINET6_SHA1_H_
 | |
| +
 | |
| +struct sha1_ctxt {
 | |
| +	union {
 | |
| +		u_int8_t	b8[20];
 | |
| +		u_int32_t	b32[5];
 | |
| +	} h;
 | |
| +	union {
 | |
| +		u_int8_t	b8[8];
 | |
| +		u_int64_t	b64[1];
 | |
| +	} c;
 | |
| +	union {
 | |
| +		u_int8_t	b8[64];
 | |
| +		u_int32_t	b32[16];
 | |
| +	} m;
 | |
| +	u_int8_t	count;
 | |
| +};
 | |
| +
 | |
| +#ifdef __KERNEL__
 | |
| +extern void sha1_init(struct sha1_ctxt *);
 | |
| +extern void sha1_pad(struct sha1_ctxt *);
 | |
| +extern void sha1_loop(struct sha1_ctxt *, const u_int8_t *, size_t);
 | |
| +extern void sha1_result(struct sha1_ctxt *, caddr_t);
 | |
| +
 | |
| +/* compatibilty with other SHA1 source codes */
 | |
| +typedef struct sha1_ctxt SHA1_CTX;
 | |
| +#define SHA1Init(x)		sha1_init((x))
 | |
| +#define SHA1Update(x, y, z)	sha1_loop((x), (y), (z))
 | |
| +#define SHA1Final(x, y)		sha1_result((y), (x))
 | |
| +#endif /* __KERNEL__ */
 | |
| +
 | |
| +#define	SHA1_RESULTLEN	(160/8)
 | |
| +
 | |
| +#endif /*_NETINET6_SHA1_H_*/
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/safe/safereg.h
 | |
| @@ -0,0 +1,421 @@
 | |
| +/*-
 | |
| + * Copyright (c) 2003 Sam Leffler, Errno Consulting
 | |
| + * Copyright (c) 2003 Global Technology Associates, Inc.
 | |
| + * All rights reserved.
 | |
| + *
 | |
| + * Redistribution and use in source and binary forms, with or without
 | |
| + * modification, are permitted provided that the following conditions
 | |
| + * are met:
 | |
| + * 1. Redistributions of source code must retain the above copyright
 | |
| + *    notice, this list of conditions and the following disclaimer.
 | |
| + * 2. Redistributions in binary form must reproduce the above copyright
 | |
| + *    notice, this list of conditions and the following disclaimer in the
 | |
| + *    documentation and/or other materials provided with the distribution.
 | |
| + *
 | |
| + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
 | |
| + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 | |
| + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 | |
| + * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
 | |
| + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 | |
| + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 | |
| + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 | |
| + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 | |
| + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 | |
| + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 | |
| + * SUCH DAMAGE.
 | |
| + *
 | |
| + * $FreeBSD: src/sys/dev/safe/safereg.h,v 1.1 2003/07/21 21:46:07 sam Exp $
 | |
| + */
 | |
| +#ifndef _SAFE_SAFEREG_H_
 | |
| +#define	_SAFE_SAFEREG_H_
 | |
| +
 | |
| +/*
 | |
| + * Register definitions for SafeNet SafeXcel-1141 crypto device.
 | |
| + * Definitions from revision 1.3 (Nov 6 2002) of the User's Manual.
 | |
| + */
 | |
| +
 | |
| +#define BS_BAR			0x10	/* DMA base address register */
 | |
| +#define	BS_TRDY_TIMEOUT		0x40	/* TRDY timeout */
 | |
| +#define	BS_RETRY_TIMEOUT	0x41	/* DMA retry timeout */
 | |
| +
 | |
| +#define	PCI_VENDOR_SAFENET	0x16ae		/* SafeNet, Inc. */
 | |
| +
 | |
| +/* SafeNet */
 | |
| +#define	PCI_PRODUCT_SAFEXCEL	0x1141		/* 1141 */
 | |
| +
 | |
| +#define	SAFE_PE_CSR		0x0000	/* Packet Enginge Ctrl/Status */
 | |
| +#define	SAFE_PE_SRC		0x0004	/* Packet Engine Source */
 | |
| +#define	SAFE_PE_DST		0x0008	/* Packet Engine Destination */
 | |
| +#define	SAFE_PE_SA		0x000c	/* Packet Engine SA */
 | |
| +#define	SAFE_PE_LEN		0x0010	/* Packet Engine Length */
 | |
| +#define	SAFE_PE_DMACFG		0x0040	/* Packet Engine DMA Configuration */
 | |
| +#define	SAFE_PE_DMASTAT		0x0044	/* Packet Engine DMA Status */
 | |
| +#define	SAFE_PE_PDRBASE		0x0048	/* Packet Engine Descriptor Ring Base */
 | |
| +#define	SAFE_PE_RDRBASE		0x004c	/* Packet Engine Result Ring Base */
 | |
| +#define	SAFE_PE_RINGCFG		0x0050	/* Packet Engine Ring Configuration */
 | |
| +#define	SAFE_PE_RINGPOLL	0x0054	/* Packet Engine Ring Poll */
 | |
| +#define	SAFE_PE_IRNGSTAT	0x0058	/* Packet Engine Internal Ring Status */
 | |
| +#define	SAFE_PE_ERNGSTAT	0x005c	/* Packet Engine External Ring Status */
 | |
| +#define	SAFE_PE_IOTHRESH	0x0060	/* Packet Engine I/O Threshold */
 | |
| +#define	SAFE_PE_GRNGBASE	0x0064	/* Packet Engine Gather Ring Base */
 | |
| +#define	SAFE_PE_SRNGBASE	0x0068	/* Packet Engine Scatter Ring Base */
 | |
| +#define	SAFE_PE_PARTSIZE	0x006c	/* Packet Engine Particlar Ring Size */
 | |
| +#define	SAFE_PE_PARTCFG		0x0070	/* Packet Engine Particle Ring Config */
 | |
| +#define	SAFE_CRYPTO_CTRL	0x0080	/* Crypto Control */
 | |
| +#define	SAFE_DEVID		0x0084	/* Device ID */
 | |
| +#define	SAFE_DEVINFO		0x0088	/* Device Info */
 | |
| +#define	SAFE_HU_STAT		0x00a0	/* Host Unmasked Status */
 | |
| +#define	SAFE_HM_STAT		0x00a4	/* Host Masked Status (read-only) */
 | |
| +#define	SAFE_HI_CLR		0x00a4	/* Host Clear Interrupt (write-only) */
 | |
| +#define	SAFE_HI_MASK		0x00a8	/* Host Mask Control */
 | |
| +#define	SAFE_HI_CFG		0x00ac	/* Interrupt Configuration */
 | |
| +#define	SAFE_HI_RD_DESCR	0x00b4	/* Force Descriptor Read */
 | |
| +#define	SAFE_HI_DESC_CNT	0x00b8	/* Host Descriptor Done Count */
 | |
| +#define	SAFE_DMA_ENDIAN		0x00c0	/* Master Endian Status */
 | |
| +#define	SAFE_DMA_SRCADDR	0x00c4	/* DMA Source Address Status */
 | |
| +#define	SAFE_DMA_DSTADDR	0x00c8	/* DMA Destination Address Status */
 | |
| +#define	SAFE_DMA_STAT		0x00cc	/* DMA Current Status */
 | |
| +#define	SAFE_DMA_CFG		0x00d4	/* DMA Configuration/Status */
 | |
| +#define	SAFE_ENDIAN		0x00e0	/* Endian Configuration */
 | |
| +#define	SAFE_PK_A_ADDR		0x0800	/* Public Key A Address */
 | |
| +#define	SAFE_PK_B_ADDR		0x0804	/* Public Key B Address */
 | |
| +#define	SAFE_PK_C_ADDR		0x0808	/* Public Key C Address */
 | |
| +#define	SAFE_PK_D_ADDR		0x080c	/* Public Key D Address */
 | |
| +#define	SAFE_PK_A_LEN		0x0810	/* Public Key A Length */
 | |
| +#define	SAFE_PK_B_LEN		0x0814	/* Public Key B Length */
 | |
| +#define	SAFE_PK_SHIFT		0x0818	/* Public Key Shift */
 | |
| +#define	SAFE_PK_FUNC		0x081c	/* Public Key Function */
 | |
| +#define SAFE_PK_RAM_START	0x1000	/* Public Key RAM start address */
 | |
| +#define SAFE_PK_RAM_END		0x1fff	/* Public Key RAM end address */
 | |
| +
 | |
| +#define	SAFE_RNG_OUT		0x0100	/* RNG Output */
 | |
| +#define	SAFE_RNG_STAT		0x0104	/* RNG Status */
 | |
| +#define	SAFE_RNG_CTRL		0x0108	/* RNG Control */
 | |
| +#define	SAFE_RNG_A		0x010c	/* RNG A */
 | |
| +#define	SAFE_RNG_B		0x0110	/* RNG B */
 | |
| +#define	SAFE_RNG_X_LO		0x0114	/* RNG X [31:0] */
 | |
| +#define	SAFE_RNG_X_MID		0x0118	/* RNG X [63:32] */
 | |
| +#define	SAFE_RNG_X_HI		0x011c	/* RNG X [80:64] */
 | |
| +#define	SAFE_RNG_X_CNTR		0x0120	/* RNG Counter */
 | |
| +#define	SAFE_RNG_ALM_CNT	0x0124	/* RNG Alarm Count */
 | |
| +#define	SAFE_RNG_CNFG		0x0128	/* RNG Configuration */
 | |
| +#define	SAFE_RNG_LFSR1_LO	0x012c	/* RNG LFSR1 [31:0] */
 | |
| +#define	SAFE_RNG_LFSR1_HI	0x0130	/* RNG LFSR1 [47:32] */
 | |
| +#define	SAFE_RNG_LFSR2_LO	0x0134	/* RNG LFSR1 [31:0] */
 | |
| +#define	SAFE_RNG_LFSR2_HI	0x0138	/* RNG LFSR1 [47:32] */
 | |
| +
 | |
| +#define	SAFE_PE_CSR_READY	0x00000001	/* ready for processing */
 | |
| +#define	SAFE_PE_CSR_DONE	0x00000002	/* h/w completed processing */
 | |
| +#define	SAFE_PE_CSR_LOADSA	0x00000004	/* load SA digests */
 | |
| +#define	SAFE_PE_CSR_HASHFINAL	0x00000010	/* do hash pad & write result */
 | |
| +#define	SAFE_PE_CSR_SABUSID	0x000000c0	/* bus id for SA */
 | |
| +#define	SAFE_PE_CSR_SAPCI	0x00000040	/* PCI bus id for SA */
 | |
| +#define	SAFE_PE_CSR_NXTHDR	0x0000ff00	/* next hdr value for IPsec */
 | |
| +#define	SAFE_PE_CSR_FPAD	0x0000ff00	/* fixed pad for basic ops */
 | |
| +#define	SAFE_PE_CSR_STATUS	0x00ff0000	/* operation result status */
 | |
| +#define	SAFE_PE_CSR_AUTH_FAIL	0x00010000	/* ICV mismatch (inbound) */
 | |
| +#define	SAFE_PE_CSR_PAD_FAIL	0x00020000	/* pad verify fail (inbound) */
 | |
| +#define	SAFE_PE_CSR_SEQ_FAIL	0x00040000	/* sequence number (inbound) */
 | |
| +#define	SAFE_PE_CSR_XERROR	0x00080000	/* extended error follows */
 | |
| +#define	SAFE_PE_CSR_XECODE	0x00f00000	/* extended error code */
 | |
| +#define	SAFE_PE_CSR_XECODE_S	20
 | |
| +#define	SAFE_PE_CSR_XECODE_BADCMD	0	/* invalid command */
 | |
| +#define	SAFE_PE_CSR_XECODE_BADALG	1	/* invalid algorithm */
 | |
| +#define	SAFE_PE_CSR_XECODE_ALGDIS	2	/* algorithm disabled */
 | |
| +#define	SAFE_PE_CSR_XECODE_ZEROLEN	3	/* zero packet length */
 | |
| +#define	SAFE_PE_CSR_XECODE_DMAERR	4	/* bus DMA error */
 | |
| +#define	SAFE_PE_CSR_XECODE_PIPEABORT	5	/* secondary bus DMA error */
 | |
| +#define	SAFE_PE_CSR_XECODE_BADSPI	6	/* IPsec SPI mismatch */
 | |
| +#define	SAFE_PE_CSR_XECODE_TIMEOUT	10	/* failsafe timeout */
 | |
| +#define	SAFE_PE_CSR_PAD		0xff000000	/* ESP padding control/status */
 | |
| +#define	SAFE_PE_CSR_PAD_MIN	0x00000000	/* minimum IPsec padding */
 | |
| +#define	SAFE_PE_CSR_PAD_16	0x08000000	/* pad to 16-byte boundary */
 | |
| +#define	SAFE_PE_CSR_PAD_32	0x10000000	/* pad to 32-byte boundary */
 | |
| +#define	SAFE_PE_CSR_PAD_64	0x20000000	/* pad to 64-byte boundary */
 | |
| +#define	SAFE_PE_CSR_PAD_128	0x40000000	/* pad to 128-byte boundary */
 | |
| +#define	SAFE_PE_CSR_PAD_256	0x80000000	/* pad to 256-byte boundary */
 | |
| +
 | |
| +/*
 | |
| + * Check the CSR to see if the PE has returned ownership to
 | |
| + * the host.  Note that before processing a descriptor this
 | |
| + * must be done followed by a check of the SAFE_PE_LEN register
 | |
| + * status bits to avoid premature processing of a descriptor
 | |
| + * on its way back to the host.
 | |
| + */
 | |
| +#define	SAFE_PE_CSR_IS_DONE(_csr) \
 | |
| +    (((_csr) & (SAFE_PE_CSR_READY | SAFE_PE_CSR_DONE)) == SAFE_PE_CSR_DONE)
 | |
| +
 | |
| +#define	SAFE_PE_LEN_LENGTH	0x000fffff	/* total length (bytes) */
 | |
| +#define	SAFE_PE_LEN_READY	0x00400000	/* ready for processing */
 | |
| +#define	SAFE_PE_LEN_DONE	0x00800000	/* h/w completed processing */
 | |
| +#define	SAFE_PE_LEN_BYPASS	0xff000000	/* bypass offset (bytes) */
 | |
| +#define	SAFE_PE_LEN_BYPASS_S	24
 | |
| +
 | |
| +#define	SAFE_PE_LEN_IS_DONE(_len) \
 | |
| +    (((_len) & (SAFE_PE_LEN_READY | SAFE_PE_LEN_DONE)) == SAFE_PE_LEN_DONE)
 | |
| +
 | |
| +/* NB: these apply to HU_STAT, HM_STAT, HI_CLR, and HI_MASK */
 | |
| +#define	SAFE_INT_PE_CDONE	0x00000002	/* PE context done */
 | |
| +#define	SAFE_INT_PE_DDONE	0x00000008	/* PE descriptor done */
 | |
| +#define	SAFE_INT_PE_ERROR	0x00000010	/* PE error */
 | |
| +#define	SAFE_INT_PE_ODONE	0x00000020	/* PE operation done */
 | |
| +
 | |
| +#define	SAFE_HI_CFG_PULSE	0x00000001	/* use pulse interrupt */
 | |
| +#define	SAFE_HI_CFG_LEVEL	0x00000000	/* use level interrupt */
 | |
| +#define	SAFE_HI_CFG_AUTOCLR	0x00000002	/* auto-clear pulse interrupt */
 | |
| +
 | |
| +#define	SAFE_ENDIAN_PASS	0x000000e4	/* straight pass-thru */
 | |
| +#define	SAFE_ENDIAN_SWAB	0x0000001b	/* swap bytes in 32-bit word */
 | |
| +
 | |
| +#define	SAFE_PE_DMACFG_PERESET	0x00000001	/* reset packet engine */
 | |
| +#define	SAFE_PE_DMACFG_PDRRESET	0x00000002	/* reset PDR counters/ptrs */
 | |
| +#define	SAFE_PE_DMACFG_SGRESET	0x00000004	/* reset scatter/gather cache */
 | |
| +#define	SAFE_PE_DMACFG_FSENA	0x00000008	/* enable failsafe reset */
 | |
| +#define	SAFE_PE_DMACFG_PEMODE	0x00000100	/* packet engine mode */
 | |
| +#define	SAFE_PE_DMACFG_SAPREC	0x00000200	/* SA precedes packet */
 | |
| +#define	SAFE_PE_DMACFG_PKFOLL	0x00000400	/* packet follows descriptor */
 | |
| +#define	SAFE_PE_DMACFG_GPRBID	0x00003000	/* gather particle ring busid */
 | |
| +#define	SAFE_PE_DMACFG_GPRPCI	0x00001000	/* PCI gather particle ring */
 | |
| +#define	SAFE_PE_DMACFG_SPRBID	0x0000c000	/* scatter part. ring busid */
 | |
| +#define	SAFE_PE_DMACFG_SPRPCI	0x00004000	/* PCI scatter part. ring */
 | |
| +#define	SAFE_PE_DMACFG_ESDESC	0x00010000	/* endian swap descriptors */
 | |
| +#define	SAFE_PE_DMACFG_ESSA	0x00020000	/* endian swap SA data */
 | |
| +#define	SAFE_PE_DMACFG_ESPACKET	0x00040000	/* endian swap packet data */
 | |
| +#define	SAFE_PE_DMACFG_ESPDESC	0x00080000	/* endian swap particle desc. */
 | |
| +#define	SAFE_PE_DMACFG_NOPDRUP	0x00100000	/* supp. PDR ownership update */
 | |
| +#define	SAFE_PD_EDMACFG_PCIMODE	0x01000000	/* PCI target mode */
 | |
| +
 | |
| +#define	SAFE_PE_DMASTAT_PEIDONE	0x00000001	/* PE core input done */
 | |
| +#define	SAFE_PE_DMASTAT_PEODONE	0x00000002	/* PE core output done */
 | |
| +#define	SAFE_PE_DMASTAT_ENCDONE	0x00000004	/* encryption done */
 | |
| +#define	SAFE_PE_DMASTAT_IHDONE	0x00000008	/* inner hash done */
 | |
| +#define	SAFE_PE_DMASTAT_OHDONE	0x00000010	/* outer hash (HMAC) done */
 | |
| +#define	SAFE_PE_DMASTAT_PADFLT	0x00000020	/* crypto pad fault */
 | |
| +#define	SAFE_PE_DMASTAT_ICVFLT	0x00000040	/* ICV fault */
 | |
| +#define	SAFE_PE_DMASTAT_SPIMIS	0x00000080	/* SPI mismatch */
 | |
| +#define	SAFE_PE_DMASTAT_CRYPTO	0x00000100	/* crypto engine timeout */
 | |
| +#define	SAFE_PE_DMASTAT_CQACT	0x00000200	/* command queue active */
 | |
| +#define	SAFE_PE_DMASTAT_IRACT	0x00000400	/* input request active */
 | |
| +#define	SAFE_PE_DMASTAT_ORACT	0x00000800	/* output request active */
 | |
| +#define	SAFE_PE_DMASTAT_PEISIZE	0x003ff000	/* PE input size:32-bit words */
 | |
| +#define	SAFE_PE_DMASTAT_PEOSIZE	0xffc00000	/* PE out. size:32-bit words */
 | |
| +
 | |
| +#define	SAFE_PE_RINGCFG_SIZE	0x000003ff	/* ring size (descriptors) */
 | |
| +#define	SAFE_PE_RINGCFG_OFFSET	0xffff0000	/* offset btw desc's (dwords) */
 | |
| +#define	SAFE_PE_RINGCFG_OFFSET_S	16
 | |
| +
 | |
| +#define	SAFE_PE_RINGPOLL_POLL	0x00000fff	/* polling frequency/divisor */
 | |
| +#define	SAFE_PE_RINGPOLL_RETRY	0x03ff0000	/* polling frequency/divisor */
 | |
| +#define	SAFE_PE_RINGPOLL_CONT	0x80000000	/* continuously poll */
 | |
| +
 | |
| +#define	SAFE_PE_IRNGSTAT_CQAVAIL 0x00000001	/* command queue available */
 | |
| +
 | |
| +#define	SAFE_PE_ERNGSTAT_NEXT	0x03ff0000	/* index of next packet desc. */
 | |
| +#define	SAFE_PE_ERNGSTAT_NEXT_S	16
 | |
| +
 | |
| +#define	SAFE_PE_IOTHRESH_INPUT	0x000003ff	/* input threshold (dwords) */
 | |
| +#define	SAFE_PE_IOTHRESH_OUTPUT	0x03ff0000	/* output threshold (dwords) */
 | |
| +
 | |
| +#define	SAFE_PE_PARTCFG_SIZE	0x0000ffff	/* scatter particle size */
 | |
| +#define	SAFE_PE_PARTCFG_GBURST	0x00030000	/* gather particle burst */
 | |
| +#define	SAFE_PE_PARTCFG_GBURST_2	0x00000000
 | |
| +#define	SAFE_PE_PARTCFG_GBURST_4	0x00010000
 | |
| +#define	SAFE_PE_PARTCFG_GBURST_8	0x00020000
 | |
| +#define	SAFE_PE_PARTCFG_GBURST_16	0x00030000
 | |
| +#define	SAFE_PE_PARTCFG_SBURST	0x000c0000	/* scatter particle burst */
 | |
| +#define	SAFE_PE_PARTCFG_SBURST_2	0x00000000
 | |
| +#define	SAFE_PE_PARTCFG_SBURST_4	0x00040000
 | |
| +#define	SAFE_PE_PARTCFG_SBURST_8	0x00080000
 | |
| +#define	SAFE_PE_PARTCFG_SBURST_16	0x000c0000
 | |
| +
 | |
| +#define	SAFE_PE_PARTSIZE_SCAT	0xffff0000	/* scatter particle ring size */
 | |
| +#define	SAFE_PE_PARTSIZE_GATH	0x0000ffff	/* gather particle ring size */
 | |
| +
 | |
| +#define	SAFE_CRYPTO_CTRL_3DES	0x00000001	/* enable 3DES support */
 | |
| +#define	SAFE_CRYPTO_CTRL_PKEY	0x00010000	/* enable public key support */
 | |
| +#define	SAFE_CRYPTO_CTRL_RNG	0x00020000	/* enable RNG support */
 | |
| +
 | |
| +#define	SAFE_DEVINFO_REV_MIN	0x0000000f	/* minor rev for chip */
 | |
| +#define	SAFE_DEVINFO_REV_MAJ	0x000000f0	/* major rev for chip */
 | |
| +#define	SAFE_DEVINFO_REV_MAJ_S	4
 | |
| +#define	SAFE_DEVINFO_DES	0x00000100	/* DES/3DES support present */
 | |
| +#define	SAFE_DEVINFO_ARC4	0x00000200	/* ARC4 support present */
 | |
| +#define	SAFE_DEVINFO_AES	0x00000400	/* AES support present */
 | |
| +#define	SAFE_DEVINFO_MD5	0x00001000	/* MD5 support present */
 | |
| +#define	SAFE_DEVINFO_SHA1	0x00002000	/* SHA-1 support present */
 | |
| +#define	SAFE_DEVINFO_RIPEMD	0x00004000	/* RIPEMD support present */
 | |
| +#define	SAFE_DEVINFO_DEFLATE	0x00010000	/* Deflate support present */
 | |
| +#define	SAFE_DEVINFO_SARAM	0x00100000	/* on-chip SA RAM present */
 | |
| +#define	SAFE_DEVINFO_EMIBUS	0x00200000	/* EMI bus present */
 | |
| +#define	SAFE_DEVINFO_PKEY	0x00400000	/* public key support present */
 | |
| +#define	SAFE_DEVINFO_RNG	0x00800000	/* RNG present */
 | |
| +
 | |
| +#define	SAFE_REV(_maj, _min)	(((_maj) << SAFE_DEVINFO_REV_MAJ_S) | (_min))
 | |
| +#define	SAFE_REV_MAJ(_chiprev) \
 | |
| +	(((_chiprev) & SAFE_DEVINFO_REV_MAJ) >> SAFE_DEVINFO_REV_MAJ_S)
 | |
| +#define	SAFE_REV_MIN(_chiprev)	((_chiprev) & SAFE_DEVINFO_REV_MIN)
 | |
| +
 | |
| +#define	SAFE_PK_FUNC_MULT	0x00000001	/* Multiply function */
 | |
| +#define	SAFE_PK_FUNC_SQUARE	0x00000004	/* Square function */
 | |
| +#define	SAFE_PK_FUNC_ADD	0x00000010	/* Add function */
 | |
| +#define	SAFE_PK_FUNC_SUB	0x00000020	/* Subtract function */
 | |
| +#define	SAFE_PK_FUNC_LSHIFT	0x00000040	/* Left-shift function */
 | |
| +#define	SAFE_PK_FUNC_RSHIFT	0x00000080	/* Right-shift function */
 | |
| +#define	SAFE_PK_FUNC_DIV	0x00000100	/* Divide function */
 | |
| +#define	SAFE_PK_FUNC_CMP	0x00000400	/* Compare function */
 | |
| +#define	SAFE_PK_FUNC_COPY	0x00000800	/* Copy function */
 | |
| +#define	SAFE_PK_FUNC_EXP16	0x00002000	/* Exponentiate (4-bit ACT) */
 | |
| +#define	SAFE_PK_FUNC_EXP4	0x00004000	/* Exponentiate (2-bit ACT) */
 | |
| +#define	SAFE_PK_FUNC_RUN	0x00008000	/* start/status */
 | |
| +
 | |
| +#define	SAFE_RNG_STAT_BUSY	0x00000001	/* busy, data not valid */
 | |
| +
 | |
| +#define	SAFE_RNG_CTRL_PRE_LFSR	0x00000001	/* enable output pre-LFSR */
 | |
| +#define	SAFE_RNG_CTRL_TST_MODE	0x00000002	/* enable test mode */
 | |
| +#define	SAFE_RNG_CTRL_TST_RUN	0x00000004	/* start test state machine */
 | |
| +#define	SAFE_RNG_CTRL_ENA_RING1	0x00000008	/* test entropy oscillator #1 */
 | |
| +#define	SAFE_RNG_CTRL_ENA_RING2	0x00000010	/* test entropy oscillator #2 */
 | |
| +#define	SAFE_RNG_CTRL_DIS_ALARM	0x00000020	/* disable RNG alarm reports */
 | |
| +#define	SAFE_RNG_CTRL_TST_CLOCK	0x00000040	/* enable test clock */
 | |
| +#define	SAFE_RNG_CTRL_SHORTEN	0x00000080	/* shorten state timers */
 | |
| +#define	SAFE_RNG_CTRL_TST_ALARM	0x00000100	/* simulate alarm state */
 | |
| +#define	SAFE_RNG_CTRL_RST_LFSR	0x00000200	/* reset LFSR */
 | |
| +
 | |
| +/*
 | |
| + * Packet engine descriptor.  Note that d_csr is a copy of the
 | |
| + * SAFE_PE_CSR register and all definitions apply, and d_len
 | |
| + * is a copy of the SAFE_PE_LEN register and all definitions apply.
 | |
| + * d_src and d_len may point directly to contiguous data or to a
 | |
| + * list of ``particle descriptors'' when using scatter/gather i/o.
 | |
| + */
 | |
| +struct safe_desc {
 | |
| +	u_int32_t	d_csr;			/* per-packet control/status */
 | |
| +	u_int32_t	d_src;			/* source address */
 | |
| +	u_int32_t	d_dst;			/* destination address */
 | |
| +	u_int32_t	d_sa;			/* SA address */
 | |
| +	u_int32_t	d_len;			/* length, bypass, status */
 | |
| +};
 | |
| +
 | |
| +/*
 | |
| + * Scatter/Gather particle descriptor.
 | |
| + *
 | |
| + * NB: scatter descriptors do not specify a size; this is fixed
 | |
| + *     by the setting of the SAFE_PE_PARTCFG register.
 | |
| + */
 | |
| +struct safe_pdesc {
 | |
| +	u_int32_t	pd_addr;		/* particle address */
 | |
| +#ifdef __BIG_ENDIAN
 | |
| +	u_int16_t	pd_flags;		/* control word */
 | |
| +	u_int16_t	pd_size;		/* particle size (bytes) */
 | |
| +#else
 | |
| +	u_int16_t	pd_flags;		/* control word */
 | |
| +	u_int16_t	pd_size;		/* particle size (bytes) */
 | |
| +#endif
 | |
| +};
 | |
| +
 | |
| +#define	SAFE_PD_READY	0x0001			/* ready for processing */
 | |
| +#define	SAFE_PD_DONE	0x0002			/* h/w completed processing */
 | |
| +
 | |
| +/*
 | |
| + * Security Association (SA) Record (Rev 1).  One of these is
 | |
| + * required for each operation processed by the packet engine.
 | |
| + */
 | |
| +struct safe_sarec {
 | |
| +	u_int32_t	sa_cmd0;
 | |
| +	u_int32_t	sa_cmd1;
 | |
| +	u_int32_t	sa_resv0;
 | |
| +	u_int32_t	sa_resv1;
 | |
| +	u_int32_t	sa_key[8];		/* DES/3DES/AES key */
 | |
| +	u_int32_t	sa_indigest[5];		/* inner digest */
 | |
| +	u_int32_t	sa_outdigest[5];	/* outer digest */
 | |
| +	u_int32_t	sa_spi;			/* SPI */
 | |
| +	u_int32_t	sa_seqnum;		/* sequence number */
 | |
| +	u_int32_t	sa_seqmask[2];		/* sequence number mask */
 | |
| +	u_int32_t	sa_resv2;
 | |
| +	u_int32_t	sa_staterec;		/* address of state record */
 | |
| +	u_int32_t	sa_resv3[2];
 | |
| +	u_int32_t	sa_samgmt0;		/* SA management field 0 */
 | |
| +	u_int32_t	sa_samgmt1;		/* SA management field 0 */
 | |
| +};
 | |
| +
 | |
| +#define	SAFE_SA_CMD0_OP		0x00000007	/* operation code */
 | |
| +#define	SAFE_SA_CMD0_OP_CRYPT	0x00000000	/* encrypt/decrypt (basic) */
 | |
| +#define	SAFE_SA_CMD0_OP_BOTH	0x00000001	/* encrypt-hash/hash-decrypto */
 | |
| +#define	SAFE_SA_CMD0_OP_HASH	0x00000003	/* hash (outbound-only) */
 | |
| +#define	SAFE_SA_CMD0_OP_ESP	0x00000000	/* ESP in/out (proto) */
 | |
| +#define	SAFE_SA_CMD0_OP_AH	0x00000001	/* AH in/out (proto) */
 | |
| +#define	SAFE_SA_CMD0_INBOUND	0x00000008	/* inbound operation */
 | |
| +#define	SAFE_SA_CMD0_OUTBOUND	0x00000000	/* outbound operation */
 | |
| +#define	SAFE_SA_CMD0_GROUP	0x00000030	/* operation group */
 | |
| +#define	SAFE_SA_CMD0_BASIC	0x00000000	/* basic operation */
 | |
| +#define	SAFE_SA_CMD0_PROTO	0x00000010	/* protocol/packet operation */
 | |
| +#define	SAFE_SA_CMD0_BUNDLE	0x00000020	/* bundled operation (resvd) */
 | |
| +#define	SAFE_SA_CMD0_PAD	0x000000c0	/* crypto pad method */
 | |
| +#define	SAFE_SA_CMD0_PAD_IPSEC	0x00000000	/* IPsec padding */
 | |
| +#define	SAFE_SA_CMD0_PAD_PKCS7	0x00000040	/* PKCS#7 padding */
 | |
| +#define	SAFE_SA_CMD0_PAD_CONS	0x00000080	/* constant padding */
 | |
| +#define	SAFE_SA_CMD0_PAD_ZERO	0x000000c0	/* zero padding */
 | |
| +#define	SAFE_SA_CMD0_CRYPT_ALG	0x00000f00	/* symmetric crypto algorithm */
 | |
| +#define	SAFE_SA_CMD0_DES	0x00000000	/* DES crypto algorithm */
 | |
| +#define	SAFE_SA_CMD0_3DES	0x00000100	/* 3DES crypto algorithm */
 | |
| +#define	SAFE_SA_CMD0_AES	0x00000300	/* AES crypto algorithm */
 | |
| +#define	SAFE_SA_CMD0_CRYPT_NULL	0x00000f00	/* null crypto algorithm */
 | |
| +#define	SAFE_SA_CMD0_HASH_ALG	0x0000f000	/* hash algorithm */
 | |
| +#define	SAFE_SA_CMD0_MD5	0x00000000	/* MD5 hash algorithm */
 | |
| +#define	SAFE_SA_CMD0_SHA1	0x00001000	/* SHA-1 hash algorithm */
 | |
| +#define	SAFE_SA_CMD0_HASH_NULL	0x0000f000	/* null hash algorithm */
 | |
| +#define	SAFE_SA_CMD0_HDR_PROC	0x00080000	/* header processing */
 | |
| +#define	SAFE_SA_CMD0_IBUSID	0x00300000	/* input bus id */
 | |
| +#define	SAFE_SA_CMD0_IPCI	0x00100000	/* PCI input bus id */
 | |
| +#define	SAFE_SA_CMD0_OBUSID	0x00c00000	/* output bus id */
 | |
| +#define	SAFE_SA_CMD0_OPCI	0x00400000	/* PCI output bus id */
 | |
| +#define	SAFE_SA_CMD0_IVLD	0x03000000	/* IV loading */
 | |
| +#define	SAFE_SA_CMD0_IVLD_NONE	0x00000000	/* IV no load (reuse) */
 | |
| +#define	SAFE_SA_CMD0_IVLD_IBUF	0x01000000	/* IV load from input buffer */
 | |
| +#define	SAFE_SA_CMD0_IVLD_STATE	0x02000000	/* IV load from state */
 | |
| +#define	SAFE_SA_CMD0_HSLD	0x0c000000	/* hash state loading */
 | |
| +#define	SAFE_SA_CMD0_HSLD_SA	0x00000000	/* hash state load from SA */
 | |
| +#define	SAFE_SA_CMD0_HSLD_STATE	0x08000000	/* hash state load from state */
 | |
| +#define	SAFE_SA_CMD0_HSLD_NONE	0x0c000000	/* hash state no load */
 | |
| +#define	SAFE_SA_CMD0_SAVEIV	0x10000000	/* save IV */
 | |
| +#define	SAFE_SA_CMD0_SAVEHASH	0x20000000	/* save hash state */
 | |
| +#define	SAFE_SA_CMD0_IGATHER	0x40000000	/* input gather */
 | |
| +#define	SAFE_SA_CMD0_OSCATTER	0x80000000	/* output scatter */
 | |
| +
 | |
| +#define	SAFE_SA_CMD1_HDRCOPY	0x00000002	/* copy header to output */
 | |
| +#define	SAFE_SA_CMD1_PAYCOPY	0x00000004	/* copy payload to output */
 | |
| +#define	SAFE_SA_CMD1_PADCOPY	0x00000008	/* copy pad to output */
 | |
| +#define	SAFE_SA_CMD1_IPV4	0x00000000	/* IPv4 protocol */
 | |
| +#define	SAFE_SA_CMD1_IPV6	0x00000010	/* IPv6 protocol */
 | |
| +#define	SAFE_SA_CMD1_MUTABLE	0x00000020	/* mutable bit processing */
 | |
| +#define	SAFE_SA_CMD1_SRBUSID	0x000000c0	/* state record bus id */
 | |
| +#define	SAFE_SA_CMD1_SRPCI	0x00000040	/* state record from PCI */
 | |
| +#define	SAFE_SA_CMD1_CRMODE	0x00000300	/* crypto mode */
 | |
| +#define	SAFE_SA_CMD1_ECB	0x00000000	/* ECB crypto mode */
 | |
| +#define	SAFE_SA_CMD1_CBC	0x00000100	/* CBC crypto mode */
 | |
| +#define	SAFE_SA_CMD1_OFB	0x00000200	/* OFB crypto mode */
 | |
| +#define	SAFE_SA_CMD1_CFB	0x00000300	/* CFB crypto mode */
 | |
| +#define	SAFE_SA_CMD1_CRFEEDBACK	0x00000c00	/* crypto feedback mode */
 | |
| +#define	SAFE_SA_CMD1_64BIT	0x00000000	/* 64-bit crypto feedback */
 | |
| +#define	SAFE_SA_CMD1_8BIT	0x00000400	/* 8-bit crypto feedback */
 | |
| +#define	SAFE_SA_CMD1_1BIT	0x00000800	/* 1-bit crypto feedback */
 | |
| +#define	SAFE_SA_CMD1_128BIT	0x00000c00	/* 128-bit crypto feedback */
 | |
| +#define	SAFE_SA_CMD1_OPTIONS	0x00001000	/* HMAC/options mutable bit */
 | |
| +#define	SAFE_SA_CMD1_HMAC	SAFE_SA_CMD1_OPTIONS
 | |
| +#define	SAFE_SA_CMD1_SAREV1	0x00008000	/* SA Revision 1 */
 | |
| +#define	SAFE_SA_CMD1_OFFSET	0x00ff0000	/* hash/crypto offset(dwords) */
 | |
| +#define	SAFE_SA_CMD1_OFFSET_S	16
 | |
| +#define	SAFE_SA_CMD1_AESKEYLEN	0x0f000000	/* AES key length */
 | |
| +#define	SAFE_SA_CMD1_AES128	0x02000000	/* 128-bit AES key */
 | |
| +#define	SAFE_SA_CMD1_AES192	0x03000000	/* 192-bit AES key */
 | |
| +#define	SAFE_SA_CMD1_AES256	0x04000000	/* 256-bit AES key */
 | |
| +
 | |
| +/*
 | |
| + * Security Associate State Record (Rev 1).
 | |
| + */
 | |
| +struct safe_sastate {
 | |
| +	u_int32_t	sa_saved_iv[4];		/* saved IV (DES/3DES/AES) */
 | |
| +	u_int32_t	sa_saved_hashbc;	/* saved hash byte count */
 | |
| +	u_int32_t	sa_saved_indigest[5];	/* saved inner digest */
 | |
| +};
 | |
| +#endif /* _SAFE_SAFEREG_H_ */
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/safe/safevar.h
 | |
| @@ -0,0 +1,230 @@
 | |
| +/*-
 | |
| + * The linux port of this code done by David McCullough
 | |
| + * Copyright (C) 2004-2007 David McCullough <david_mccullough@securecomputing.com>
 | |
| + * The license and original author are listed below.
 | |
| + *
 | |
| + * Copyright (c) 2003 Sam Leffler, Errno Consulting
 | |
| + * Copyright (c) 2003 Global Technology Associates, Inc.
 | |
| + * All rights reserved.
 | |
| + *
 | |
| + * Redistribution and use in source and binary forms, with or without
 | |
| + * modification, are permitted provided that the following conditions
 | |
| + * are met:
 | |
| + * 1. Redistributions of source code must retain the above copyright
 | |
| + *    notice, this list of conditions and the following disclaimer.
 | |
| + * 2. Redistributions in binary form must reproduce the above copyright
 | |
| + *    notice, this list of conditions and the following disclaimer in the
 | |
| + *    documentation and/or other materials provided with the distribution.
 | |
| + *
 | |
| + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
 | |
| + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 | |
| + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 | |
| + * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
 | |
| + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 | |
| + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 | |
| + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 | |
| + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 | |
| + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 | |
| + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 | |
| + * SUCH DAMAGE.
 | |
| + *
 | |
| + * $FreeBSD: src/sys/dev/safe/safevar.h,v 1.2 2006/05/17 18:34:26 pjd Exp $
 | |
| + */
 | |
| +#ifndef _SAFE_SAFEVAR_H_
 | |
| +#define	_SAFE_SAFEVAR_H_
 | |
| +
 | |
| +/* Maximum queue length */
 | |
| +#ifndef SAFE_MAX_NQUEUE
 | |
| +#define SAFE_MAX_NQUEUE	60
 | |
| +#endif
 | |
| +
 | |
| +#define	SAFE_MAX_PART		64	/* Maximum scatter/gather depth */
 | |
| +#define	SAFE_DMA_BOUNDARY	0	/* No boundary for source DMA ops */
 | |
| +#define	SAFE_MAX_DSIZE		2048 /* MCLBYTES Fixed scatter particle size */
 | |
| +#define	SAFE_MAX_SSIZE		0x0ffff	/* Maximum gather particle size */
 | |
| +#define	SAFE_MAX_DMA		0xfffff	/* Maximum PE operand size (20 bits) */
 | |
| +/* total src+dst particle descriptors */
 | |
| +#define	SAFE_TOTAL_DPART	(SAFE_MAX_NQUEUE * SAFE_MAX_PART)
 | |
| +#define	SAFE_TOTAL_SPART	(SAFE_MAX_NQUEUE * SAFE_MAX_PART)
 | |
| +
 | |
| +#define	SAFE_RNG_MAXBUFSIZ	128	/* 32-bit words */
 | |
| +
 | |
| +#define	SAFE_CARD(sid)		(((sid) & 0xf0000000) >> 28)
 | |
| +#define	SAFE_SESSION(sid)	( (sid) & 0x0fffffff)
 | |
| +#define	SAFE_SID(crd, sesn)	(((crd) << 28) | ((sesn) & 0x0fffffff))
 | |
| +
 | |
| +#define SAFE_DEF_RTY		0xff	/* PCI Retry Timeout */
 | |
| +#define SAFE_DEF_TOUT		0xff	/* PCI TRDY Timeout */
 | |
| +#define SAFE_DEF_CACHELINE	0x01	/* Cache Line setting */
 | |
| +
 | |
| +#ifdef __KERNEL__
 | |
| +/*
 | |
| + * State associated with the allocation of each chunk
 | |
| + * of memory setup for DMA.
 | |
| + */
 | |
| +struct safe_dma_alloc {
 | |
| +	dma_addr_t		dma_paddr;
 | |
| +	void			*dma_vaddr;
 | |
| +};
 | |
| +
 | |
| +/*
 | |
| + * Cryptographic operand state.  One of these exists for each
 | |
| + * source and destination operand passed in from the crypto
 | |
| + * subsystem.  When possible source and destination operands
 | |
| + * refer to the same memory.  More often they are distinct.
 | |
| + * We track the virtual address of each operand as well as
 | |
| + * where each is mapped for DMA.
 | |
| + */
 | |
| +struct safe_operand {
 | |
| +	union {
 | |
| +		struct sk_buff *skb;
 | |
| +		struct uio *io;
 | |
| +	} u;
 | |
| +	void			*map;
 | |
| +	int				mapsize;	/* total number of bytes in segs */
 | |
| +	struct {
 | |
| +		dma_addr_t	ds_addr;
 | |
| +		int			ds_len;
 | |
| +		int			ds_tlen;
 | |
| +	} segs[SAFE_MAX_PART];
 | |
| +	int				nsegs;
 | |
| +};
 | |
| +
 | |
| +/*
 | |
| + * Packet engine ring entry and cryptographic operation state.
 | |
| + * The packet engine requires a ring of descriptors that contain
 | |
| + * pointers to various cryptographic state.  However the ring
 | |
| + * configuration register allows you to specify an arbitrary size
 | |
| + * for ring entries.  We use this feature to collect most of the
 | |
| + * state for each cryptographic request into one spot.  Other than
 | |
| + * ring entries only the ``particle descriptors'' (scatter/gather
 | |
| + * lists) and the actual operand data are kept separate.  The
 | |
| + * particle descriptors must also be organized in rings.  The
 | |
| + * operand data can be located aribtrarily (modulo alignment constraints).
 | |
| + *
 | |
| + * Note that the descriptor ring is mapped onto the PCI bus so
 | |
| + * the hardware can DMA data.  This means the entire ring must be
 | |
| + * contiguous.
 | |
| + */
 | |
| +struct safe_ringentry {
 | |
| +	struct safe_desc	re_desc;	/* command descriptor */
 | |
| +	struct safe_sarec	re_sa;		/* SA record */
 | |
| +	struct safe_sastate	re_sastate;	/* SA state record */
 | |
| +
 | |
| +	struct cryptop		*re_crp;	/* crypto operation */
 | |
| +
 | |
| +	struct safe_operand	re_src;		/* source operand */
 | |
| +	struct safe_operand	re_dst;		/* destination operand */
 | |
| +
 | |
| +	int			re_sesn;	/* crypto session ID */
 | |
| +	int			re_flags;
 | |
| +#define	SAFE_QFLAGS_COPYOUTIV	0x1		/* copy back on completion */
 | |
| +#define	SAFE_QFLAGS_COPYOUTICV	0x2		/* copy back on completion */
 | |
| +};
 | |
| +
 | |
| +#define	re_src_skb	re_src.u.skb
 | |
| +#define	re_src_io	re_src.u.io
 | |
| +#define	re_src_map	re_src.map
 | |
| +#define	re_src_nsegs	re_src.nsegs
 | |
| +#define	re_src_segs	re_src.segs
 | |
| +#define	re_src_mapsize	re_src.mapsize
 | |
| +
 | |
| +#define	re_dst_skb	re_dst.u.skb
 | |
| +#define	re_dst_io	re_dst.u.io
 | |
| +#define	re_dst_map	re_dst.map
 | |
| +#define	re_dst_nsegs	re_dst.nsegs
 | |
| +#define	re_dst_segs	re_dst.segs
 | |
| +#define	re_dst_mapsize	re_dst.mapsize
 | |
| +
 | |
| +struct rndstate_test;
 | |
| +
 | |
| +struct safe_session {
 | |
| +	u_int32_t	ses_used;
 | |
| +	u_int32_t	ses_klen;		/* key length in bits */
 | |
| +	u_int32_t	ses_key[8];		/* DES/3DES/AES key */
 | |
| +	u_int32_t	ses_mlen;		/* hmac length in bytes */
 | |
| +	u_int32_t	ses_hminner[5];		/* hmac inner state */
 | |
| +	u_int32_t	ses_hmouter[5];		/* hmac outer state */
 | |
| +	u_int32_t	ses_iv[4];		/* DES/3DES/AES iv */
 | |
| +};
 | |
| +
 | |
| +struct safe_pkq {
 | |
| +	struct list_head	pkq_list;
 | |
| +	struct cryptkop		*pkq_krp;
 | |
| +};
 | |
| +
 | |
| +struct safe_softc {
 | |
| +	softc_device_decl	sc_dev;
 | |
| +	u32			sc_irq;
 | |
| +
 | |
| +	struct pci_dev		*sc_pcidev;
 | |
| +	ocf_iomem_t		sc_base_addr;
 | |
| +
 | |
| +	u_int			sc_chiprev;	/* major/minor chip revision */
 | |
| +	int			sc_flags;	/* device specific flags */
 | |
| +#define	SAFE_FLAGS_KEY		0x01		/* has key accelerator */
 | |
| +#define	SAFE_FLAGS_RNG		0x02		/* hardware rng */
 | |
| +	int			sc_suspended;
 | |
| +	int			sc_needwakeup;	/* notify crypto layer */
 | |
| +	int32_t			sc_cid;		/* crypto tag */
 | |
| +
 | |
| +	struct safe_dma_alloc	sc_ringalloc;	/* PE ring allocation state */
 | |
| +	struct safe_ringentry	*sc_ring;	/* PE ring */
 | |
| +	struct safe_ringentry	*sc_ringtop;	/* PE ring top */
 | |
| +	struct safe_ringentry	*sc_front;	/* next free entry */
 | |
| +	struct safe_ringentry	*sc_back;	/* next pending entry */
 | |
| +	int			sc_nqchip;	/* # passed to chip */
 | |
| +	spinlock_t		sc_ringmtx;	/* PE ring lock */
 | |
| +	struct safe_pdesc	*sc_spring;	/* src particle ring */
 | |
| +	struct safe_pdesc	*sc_springtop;	/* src particle ring top */
 | |
| +	struct safe_pdesc	*sc_spfree;	/* next free src particle */
 | |
| +	struct safe_dma_alloc	sc_spalloc;	/* src particle ring state */
 | |
| +	struct safe_pdesc	*sc_dpring;	/* dest particle ring */
 | |
| +	struct safe_pdesc	*sc_dpringtop;	/* dest particle ring top */
 | |
| +	struct safe_pdesc	*sc_dpfree;	/* next free dest particle */
 | |
| +	struct safe_dma_alloc	sc_dpalloc;	/* dst particle ring state */
 | |
| +	int			sc_nsessions;	/* # of sessions */
 | |
| +	struct safe_session	*sc_sessions;	/* sessions */
 | |
| +
 | |
| +	struct timer_list	sc_pkto;	/* PK polling */
 | |
| +	spinlock_t		sc_pkmtx;	/* PK lock */
 | |
| +	struct list_head	sc_pkq;		/* queue of PK requests */
 | |
| +	struct safe_pkq		*sc_pkq_cur;	/* current processing request */
 | |
| +	u_int32_t		sc_pk_reslen, sc_pk_resoff;
 | |
| +
 | |
| +	int			sc_max_dsize;	/* maximum safe DMA size */
 | |
| +};
 | |
| +#endif /* __KERNEL__ */
 | |
| +
 | |
| +struct safe_stats {
 | |
| +	u_int64_t st_ibytes;
 | |
| +	u_int64_t st_obytes;
 | |
| +	u_int32_t st_ipackets;
 | |
| +	u_int32_t st_opackets;
 | |
| +	u_int32_t st_invalid;		/* invalid argument */
 | |
| +	u_int32_t st_badsession;	/* invalid session id */
 | |
| +	u_int32_t st_badflags;		/* flags indicate !(mbuf | uio) */
 | |
| +	u_int32_t st_nodesc;		/* op submitted w/o descriptors */
 | |
| +	u_int32_t st_badalg;		/* unsupported algorithm */
 | |
| +	u_int32_t st_ringfull;		/* PE descriptor ring full */
 | |
| +	u_int32_t st_peoperr;		/* PE marked error */
 | |
| +	u_int32_t st_dmaerr;		/* PE DMA error */
 | |
| +	u_int32_t st_bypasstoobig;	/* bypass > 96 bytes */
 | |
| +	u_int32_t st_skipmismatch;	/* enc part begins before auth part */
 | |
| +	u_int32_t st_lenmismatch;	/* enc length different auth length */
 | |
| +	u_int32_t st_coffmisaligned;	/* crypto offset not 32-bit aligned */
 | |
| +	u_int32_t st_cofftoobig;	/* crypto offset > 255 words */
 | |
| +	u_int32_t st_iovmisaligned;	/* iov op not aligned */
 | |
| +	u_int32_t st_iovnotuniform;	/* iov op not suitable */
 | |
| +	u_int32_t st_unaligned;		/* unaligned src caused copy */
 | |
| +	u_int32_t st_notuniform;	/* non-uniform src caused copy */
 | |
| +	u_int32_t st_nomap;		/* bus_dmamap_create failed */
 | |
| +	u_int32_t st_noload;		/* bus_dmamap_load_* failed */
 | |
| +	u_int32_t st_nombuf;		/* MGET* failed */
 | |
| +	u_int32_t st_nomcl;		/* MCLGET* failed */
 | |
| +	u_int32_t st_maxqchip;		/* max mcr1 ops out for processing */
 | |
| +	u_int32_t st_rng;		/* RNG requests */
 | |
| +	u_int32_t st_rngalarm;		/* RNG alarm requests */
 | |
| +	u_int32_t st_noicvcopy;		/* ICV data copies suppressed */
 | |
| +};
 | |
| +#endif /* _SAFE_SAFEVAR_H_ */
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/crypto.c
 | |
| @@ -0,0 +1,1741 @@
 | |
| +/*-
 | |
| + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
 | |
| + * Copyright (C) 2006-2007 David McCullough
 | |
| + * Copyright (C) 2004-2005 Intel Corporation.
 | |
| + * The license and original author are listed below.
 | |
| + *
 | |
| + * Redistribution and use in source and binary forms, with or without
 | |
| + * Copyright (c) 2002-2006 Sam Leffler.  All rights reserved.
 | |
| + *
 | |
| + * modification, are permitted provided that the following conditions
 | |
| + * are met:
 | |
| + * 1. Redistributions of source code must retain the above copyright
 | |
| + *    notice, this list of conditions and the following disclaimer.
 | |
| + * 2. Redistributions in binary form must reproduce the above copyright
 | |
| + *    notice, this list of conditions and the following disclaimer in the
 | |
| + *    documentation and/or other materials provided with the distribution.
 | |
| + *
 | |
| + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
 | |
| + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
 | |
| + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
 | |
| + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
 | |
| + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 | |
| + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 | |
| + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 | |
| + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 | |
| + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 | |
| + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 | |
| + */
 | |
| +
 | |
| +#if 0
 | |
| +#include <sys/cdefs.h>
 | |
| +__FBSDID("$FreeBSD: src/sys/opencrypto/crypto.c,v 1.27 2007/03/21 03:42:51 sam Exp $");
 | |
| +#endif
 | |
| +
 | |
| +/*
 | |
| + * Cryptographic Subsystem.
 | |
| + *
 | |
| + * This code is derived from the Openbsd Cryptographic Framework (OCF)
 | |
| + * that has the copyright shown below.  Very little of the original
 | |
| + * code remains.
 | |
| + */
 | |
| +/*-
 | |
| + * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
 | |
| + *
 | |
| + * This code was written by Angelos D. Keromytis in Athens, Greece, in
 | |
| + * February 2000. Network Security Technologies Inc. (NSTI) kindly
 | |
| + * supported the development of this code.
 | |
| + *
 | |
| + * Copyright (c) 2000, 2001 Angelos D. Keromytis
 | |
| + *
 | |
| + * Permission to use, copy, and modify this software with or without fee
 | |
| + * is hereby granted, provided that this entire notice is included in
 | |
| + * all source code copies of any software which is or includes a copy or
 | |
| + * modification of this software.
 | |
| + *
 | |
| + * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
 | |
| + * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
 | |
| + * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
 | |
| + * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
 | |
| + * PURPOSE.
 | |
| + *
 | |
| +__FBSDID("$FreeBSD: src/sys/opencrypto/crypto.c,v 1.16 2005/01/07 02:29:16 imp Exp $");
 | |
| + */
 | |
| +
 | |
| +
 | |
| +#ifndef AUTOCONF_INCLUDED
 | |
| +#include <linux/config.h>
 | |
| +#endif
 | |
| +#include <linux/module.h>
 | |
| +#include <linux/init.h>
 | |
| +#include <linux/list.h>
 | |
| +#include <linux/slab.h>
 | |
| +#include <linux/wait.h>
 | |
| +#include <linux/sched.h>
 | |
| +#include <linux/spinlock.h>
 | |
| +#include <linux/version.h>
 | |
| +#include <cryptodev.h>
 | |
| +
 | |
| +/*
 | |
| + * keep track of whether or not we have been initialised, a big
 | |
| + * issue if we are linked into the kernel and a driver gets started before
 | |
| + * us
 | |
| + */
 | |
| +static int crypto_initted = 0;
 | |
| +
 | |
| +/*
 | |
| + * Crypto drivers register themselves by allocating a slot in the
 | |
| + * crypto_drivers table with crypto_get_driverid() and then registering
 | |
| + * each algorithm they support with crypto_register() and crypto_kregister().
 | |
| + */
 | |
| +
 | |
| +/*
 | |
| + * lock on driver table
 | |
| + * we track its state as spin_is_locked does not do anything on non-SMP boxes
 | |
| + */
 | |
| +static spinlock_t	crypto_drivers_lock;
 | |
| +static int			crypto_drivers_locked;		/* for non-SMP boxes */
 | |
| +
 | |
| +#define	CRYPTO_DRIVER_LOCK() \
 | |
| +			({ \
 | |
| +				spin_lock_irqsave(&crypto_drivers_lock, d_flags); \
 | |
| +			 	crypto_drivers_locked = 1; \
 | |
| +				dprintk("%s,%d: DRIVER_LOCK()\n", __FILE__, __LINE__); \
 | |
| +			 })
 | |
| +#define	CRYPTO_DRIVER_UNLOCK() \
 | |
| +			({ \
 | |
| +			 	dprintk("%s,%d: DRIVER_UNLOCK()\n", __FILE__, __LINE__); \
 | |
| +			 	crypto_drivers_locked = 0; \
 | |
| +				spin_unlock_irqrestore(&crypto_drivers_lock, d_flags); \
 | |
| +			 })
 | |
| +#define	CRYPTO_DRIVER_ASSERT() \
 | |
| +			({ \
 | |
| +			 	if (!crypto_drivers_locked) { \
 | |
| +					dprintk("%s,%d: DRIVER_ASSERT!\n", __FILE__, __LINE__); \
 | |
| +			 	} \
 | |
| +			 })
 | |
| +
 | |
| +/*
 | |
| + * Crypto device/driver capabilities structure.
 | |
| + *
 | |
| + * Synchronization:
 | |
| + * (d) - protected by CRYPTO_DRIVER_LOCK()
 | |
| + * (q) - protected by CRYPTO_Q_LOCK()
 | |
| + * Not tagged fields are read-only.
 | |
| + */
 | |
| +struct cryptocap {
 | |
| +	device_t	cc_dev;			/* (d) device/driver */
 | |
| +	u_int32_t	cc_sessions;		/* (d) # of sessions */
 | |
| +	u_int32_t	cc_koperations;		/* (d) # os asym operations */
 | |
| +	/*
 | |
| +	 * Largest possible operator length (in bits) for each type of
 | |
| +	 * encryption algorithm. XXX not used
 | |
| +	 */
 | |
| +	u_int16_t	cc_max_op_len[CRYPTO_ALGORITHM_MAX + 1];
 | |
| +	u_int8_t	cc_alg[CRYPTO_ALGORITHM_MAX + 1];
 | |
| +	u_int8_t	cc_kalg[CRK_ALGORITHM_MAX + 1];
 | |
| +
 | |
| +	int		cc_flags;		/* (d) flags */
 | |
| +#define CRYPTOCAP_F_CLEANUP	0x80000000	/* needs resource cleanup */
 | |
| +	int		cc_qblocked;		/* (q) symmetric q blocked */
 | |
| +	int		cc_kqblocked;		/* (q) asymmetric q blocked */
 | |
| +};
 | |
| +static struct cryptocap *crypto_drivers = NULL;
 | |
| +static int crypto_drivers_num = 0;
 | |
| +
 | |
| +/*
 | |
| + * There are two queues for crypto requests; one for symmetric (e.g.
 | |
| + * cipher) operations and one for asymmetric (e.g. MOD)operations.
 | |
| + * A single mutex is used to lock access to both queues.  We could
 | |
| + * have one per-queue but having one simplifies handling of block/unblock
 | |
| + * operations.
 | |
| + */
 | |
| +static	int crp_sleep = 0;
 | |
| +static LIST_HEAD(crp_q);		/* request queues */
 | |
| +static LIST_HEAD(crp_kq);
 | |
| +
 | |
| +static spinlock_t crypto_q_lock;
 | |
| +
 | |
| +int crypto_all_qblocked = 0;  /* protect with Q_LOCK */
 | |
| +module_param(crypto_all_qblocked, int, 0444);
 | |
| +MODULE_PARM_DESC(crypto_all_qblocked, "Are all crypto queues blocked");
 | |
| +
 | |
| +int crypto_all_kqblocked = 0; /* protect with Q_LOCK */
 | |
| +module_param(crypto_all_kqblocked, int, 0444);
 | |
| +MODULE_PARM_DESC(crypto_all_kqblocked, "Are all asym crypto queues blocked");
 | |
| +
 | |
| +#define	CRYPTO_Q_LOCK() \
 | |
| +			({ \
 | |
| +				spin_lock_irqsave(&crypto_q_lock, q_flags); \
 | |
| +			 	dprintk("%s,%d: Q_LOCK()\n", __FILE__, __LINE__); \
 | |
| +			 })
 | |
| +#define	CRYPTO_Q_UNLOCK() \
 | |
| +			({ \
 | |
| +			 	dprintk("%s,%d: Q_UNLOCK()\n", __FILE__, __LINE__); \
 | |
| +				spin_unlock_irqrestore(&crypto_q_lock, q_flags); \
 | |
| +			 })
 | |
| +
 | |
| +/*
 | |
| + * There are two queues for processing completed crypto requests; one
 | |
| + * for the symmetric and one for the asymmetric ops.  We only need one
 | |
| + * but have two to avoid type futzing (cryptop vs. cryptkop).  A single
 | |
| + * mutex is used to lock access to both queues.  Note that this lock
 | |
| + * must be separate from the lock on request queues to insure driver
 | |
| + * callbacks don't generate lock order reversals.
 | |
| + */
 | |
| +static LIST_HEAD(crp_ret_q);		/* callback queues */
 | |
| +static LIST_HEAD(crp_ret_kq);
 | |
| +
 | |
| +static spinlock_t crypto_ret_q_lock;
 | |
| +#define	CRYPTO_RETQ_LOCK() \
 | |
| +			({ \
 | |
| +				spin_lock_irqsave(&crypto_ret_q_lock, r_flags); \
 | |
| +				dprintk("%s,%d: RETQ_LOCK\n", __FILE__, __LINE__); \
 | |
| +			 })
 | |
| +#define	CRYPTO_RETQ_UNLOCK() \
 | |
| +			({ \
 | |
| +			 	dprintk("%s,%d: RETQ_UNLOCK\n", __FILE__, __LINE__); \
 | |
| +				spin_unlock_irqrestore(&crypto_ret_q_lock, r_flags); \
 | |
| +			 })
 | |
| +#define	CRYPTO_RETQ_EMPTY()	(list_empty(&crp_ret_q) && list_empty(&crp_ret_kq))
 | |
| +
 | |
| +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
 | |
| +static kmem_cache_t *cryptop_zone;
 | |
| +static kmem_cache_t *cryptodesc_zone;
 | |
| +#else
 | |
| +static struct kmem_cache *cryptop_zone;
 | |
| +static struct kmem_cache *cryptodesc_zone;
 | |
| +#endif
 | |
| +
 | |
| +#define debug crypto_debug
 | |
| +int crypto_debug = 0;
 | |
| +module_param(crypto_debug, int, 0644);
 | |
| +MODULE_PARM_DESC(crypto_debug, "Enable debug");
 | |
| +EXPORT_SYMBOL(crypto_debug);
 | |
| +
 | |
| +/*
 | |
| + * Maximum number of outstanding crypto requests before we start
 | |
| + * failing requests.  We need this to prevent DOS when too many
 | |
| + * requests are arriving for us to keep up.  Otherwise we will
 | |
| + * run the system out of memory.  Since crypto is slow,  we are
 | |
| + * usually the bottleneck that needs to say, enough is enough.
 | |
| + *
 | |
| + * We cannot print errors when this condition occurs,  we are already too
 | |
| + * slow,  printing anything will just kill us
 | |
| + */
 | |
| +
 | |
| +static int crypto_q_cnt = 0;
 | |
| +module_param(crypto_q_cnt, int, 0444);
 | |
| +MODULE_PARM_DESC(crypto_q_cnt,
 | |
| +		"Current number of outstanding crypto requests");
 | |
| +
 | |
| +static int crypto_q_max = 1000;
 | |
| +module_param(crypto_q_max, int, 0644);
 | |
| +MODULE_PARM_DESC(crypto_q_max,
 | |
| +		"Maximum number of outstanding crypto requests");
 | |
| +
 | |
| +#define bootverbose crypto_verbose
 | |
| +static int crypto_verbose = 0;
 | |
| +module_param(crypto_verbose, int, 0644);
 | |
| +MODULE_PARM_DESC(crypto_verbose,
 | |
| +		"Enable verbose crypto startup");
 | |
| +
 | |
| +int	crypto_usercrypto = 1;	/* userland may do crypto reqs */
 | |
| +module_param(crypto_usercrypto, int, 0644);
 | |
| +MODULE_PARM_DESC(crypto_usercrypto,
 | |
| +	   "Enable/disable user-mode access to crypto support");
 | |
| +
 | |
| +int	crypto_userasymcrypto = 1;	/* userland may do asym crypto reqs */
 | |
| +module_param(crypto_userasymcrypto, int, 0644);
 | |
| +MODULE_PARM_DESC(crypto_userasymcrypto,
 | |
| +	   "Enable/disable user-mode access to asymmetric crypto support");
 | |
| +
 | |
| +int	crypto_devallowsoft = 0;	/* only use hardware crypto */
 | |
| +module_param(crypto_devallowsoft, int, 0644);
 | |
| +MODULE_PARM_DESC(crypto_devallowsoft,
 | |
| +	   "Enable/disable use of software crypto support");
 | |
| +
 | |
| +static pid_t	cryptoproc = (pid_t) -1;
 | |
| +static struct	completion cryptoproc_exited;
 | |
| +static DECLARE_WAIT_QUEUE_HEAD(cryptoproc_wait);
 | |
| +static pid_t	cryptoretproc = (pid_t) -1;
 | |
| +static struct	completion cryptoretproc_exited;
 | |
| +static DECLARE_WAIT_QUEUE_HEAD(cryptoretproc_wait);
 | |
| +
 | |
| +static	int crypto_proc(void *arg);
 | |
| +static	int crypto_ret_proc(void *arg);
 | |
| +static	int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint);
 | |
| +static	int crypto_kinvoke(struct cryptkop *krp, int flags);
 | |
| +static	void crypto_exit(void);
 | |
| +static  int crypto_init(void);
 | |
| +
 | |
| +static	struct cryptostats cryptostats;
 | |
| +
 | |
| +static struct cryptocap *
 | |
| +crypto_checkdriver(u_int32_t hid)
 | |
| +{
 | |
| +	if (crypto_drivers == NULL)
 | |
| +		return NULL;
 | |
| +	return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Compare a driver's list of supported algorithms against another
 | |
| + * list; return non-zero if all algorithms are supported.
 | |
| + */
 | |
| +static int
 | |
| +driver_suitable(const struct cryptocap *cap, const struct cryptoini *cri)
 | |
| +{
 | |
| +	const struct cryptoini *cr;
 | |
| +
 | |
| +	/* See if all the algorithms are supported. */
 | |
| +	for (cr = cri; cr; cr = cr->cri_next)
 | |
| +		if (cap->cc_alg[cr->cri_alg] == 0)
 | |
| +			return 0;
 | |
| +	return 1;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Select a driver for a new session that supports the specified
 | |
| + * algorithms and, optionally, is constrained according to the flags.
 | |
| + * The algorithm we use here is pretty stupid; just use the
 | |
| + * first driver that supports all the algorithms we need. If there
 | |
| + * are multiple drivers we choose the driver with the fewest active
 | |
| + * sessions.  We prefer hardware-backed drivers to software ones.
 | |
| + *
 | |
| + * XXX We need more smarts here (in real life too, but that's
 | |
| + * XXX another story altogether).
 | |
| + */
 | |
| +static struct cryptocap *
 | |
| +crypto_select_driver(const struct cryptoini *cri, int flags)
 | |
| +{
 | |
| +	struct cryptocap *cap, *best;
 | |
| +	int match, hid;
 | |
| +
 | |
| +	CRYPTO_DRIVER_ASSERT();
 | |
| +
 | |
| +	/*
 | |
| +	 * Look first for hardware crypto devices if permitted.
 | |
| +	 */
 | |
| +	if (flags & CRYPTOCAP_F_HARDWARE)
 | |
| +		match = CRYPTOCAP_F_HARDWARE;
 | |
| +	else
 | |
| +		match = CRYPTOCAP_F_SOFTWARE;
 | |
| +	best = NULL;
 | |
| +again:
 | |
| +	for (hid = 0; hid < crypto_drivers_num; hid++) {
 | |
| +		cap = &crypto_drivers[hid];
 | |
| +		/*
 | |
| +		 * If it's not initialized, is in the process of
 | |
| +		 * going away, or is not appropriate (hardware
 | |
| +		 * or software based on match), then skip.
 | |
| +		 */
 | |
| +		if (cap->cc_dev == NULL ||
 | |
| +		    (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
 | |
| +		    (cap->cc_flags & match) == 0)
 | |
| +			continue;
 | |
| +
 | |
| +		/* verify all the algorithms are supported. */
 | |
| +		if (driver_suitable(cap, cri)) {
 | |
| +			if (best == NULL ||
 | |
| +			    cap->cc_sessions < best->cc_sessions)
 | |
| +				best = cap;
 | |
| +		}
 | |
| +	}
 | |
| +	if (best != NULL)
 | |
| +		return best;
 | |
| +	if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
 | |
| +		/* sort of an Algol 68-style for loop */
 | |
| +		match = CRYPTOCAP_F_SOFTWARE;
 | |
| +		goto again;
 | |
| +	}
 | |
| +	return best;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Create a new session.  The crid argument specifies a crypto
 | |
| + * driver to use or constraints on a driver to select (hardware
 | |
| + * only, software only, either).  Whatever driver is selected
 | |
| + * must be capable of the requested crypto algorithms.
 | |
| + */
 | |
| +int
 | |
| +crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int crid)
 | |
| +{
 | |
| +	struct cryptocap *cap;
 | |
| +	u_int32_t hid, lid;
 | |
| +	int err;
 | |
| +	unsigned long d_flags;
 | |
| +
 | |
| +	CRYPTO_DRIVER_LOCK();
 | |
| +	if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
 | |
| +		/*
 | |
| +		 * Use specified driver; verify it is capable.
 | |
| +		 */
 | |
| +		cap = crypto_checkdriver(crid);
 | |
| +		if (cap != NULL && !driver_suitable(cap, cri))
 | |
| +			cap = NULL;
 | |
| +	} else {
 | |
| +		/*
 | |
| +		 * No requested driver; select based on crid flags.
 | |
| +		 */
 | |
| +		cap = crypto_select_driver(cri, crid);
 | |
| +		/*
 | |
| +		 * if NULL then can't do everything in one session.
 | |
| +		 * XXX Fix this. We need to inject a "virtual" session
 | |
| +		 * XXX layer right about here.
 | |
| +		 */
 | |
| +	}
 | |
| +	if (cap != NULL) {
 | |
| +		/* Call the driver initialization routine. */
 | |
| +		hid = cap - crypto_drivers;
 | |
| +		lid = hid;		/* Pass the driver ID. */
 | |
| +		cap->cc_sessions++;
 | |
| +		CRYPTO_DRIVER_UNLOCK();
 | |
| +		err = CRYPTODEV_NEWSESSION(cap->cc_dev, &lid, cri);
 | |
| +		CRYPTO_DRIVER_LOCK();
 | |
| +		if (err == 0) {
 | |
| +			(*sid) = (cap->cc_flags & 0xff000000)
 | |
| +			       | (hid & 0x00ffffff);
 | |
| +			(*sid) <<= 32;
 | |
| +			(*sid) |= (lid & 0xffffffff);
 | |
| +		} else
 | |
| +			cap->cc_sessions--;
 | |
| +	} else
 | |
| +		err = EINVAL;
 | |
| +	CRYPTO_DRIVER_UNLOCK();
 | |
| +	return err;
 | |
| +}
 | |
| +
 | |
| +static void
 | |
| +crypto_remove(struct cryptocap *cap)
 | |
| +{
 | |
| +	CRYPTO_DRIVER_ASSERT();
 | |
| +	if (cap->cc_sessions == 0 && cap->cc_koperations == 0)
 | |
| +		bzero(cap, sizeof(*cap));
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Delete an existing session (or a reserved session on an unregistered
 | |
| + * driver).
 | |
| + */
 | |
| +int
 | |
| +crypto_freesession(u_int64_t sid)
 | |
| +{
 | |
| +	struct cryptocap *cap;
 | |
| +	u_int32_t hid;
 | |
| +	int err = 0;
 | |
| +	unsigned long d_flags;
 | |
| +
 | |
| +	dprintk("%s()\n", __FUNCTION__);
 | |
| +	CRYPTO_DRIVER_LOCK();
 | |
| +
 | |
| +	if (crypto_drivers == NULL) {
 | |
| +		err = EINVAL;
 | |
| +		goto done;
 | |
| +	}
 | |
| +
 | |
| +	/* Determine two IDs. */
 | |
| +	hid = CRYPTO_SESID2HID(sid);
 | |
| +
 | |
| +	if (hid >= crypto_drivers_num) {
 | |
| +		dprintk("%s - INVALID DRIVER NUM %d\n", __FUNCTION__, hid);
 | |
| +		err = ENOENT;
 | |
| +		goto done;
 | |
| +	}
 | |
| +	cap = &crypto_drivers[hid];
 | |
| +
 | |
| +	if (cap->cc_dev) {
 | |
| +		CRYPTO_DRIVER_UNLOCK();
 | |
| +		/* Call the driver cleanup routine, if available, unlocked. */
 | |
| +		err = CRYPTODEV_FREESESSION(cap->cc_dev, sid);
 | |
| +		CRYPTO_DRIVER_LOCK();
 | |
| +	}
 | |
| +
 | |
| +	if (cap->cc_sessions)
 | |
| +		cap->cc_sessions--;
 | |
| +
 | |
| +	if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
 | |
| +		crypto_remove(cap);
 | |
| +
 | |
| +done:
 | |
| +	CRYPTO_DRIVER_UNLOCK();
 | |
| +	return err;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Return an unused driver id.  Used by drivers prior to registering
 | |
| + * support for the algorithms they handle.
 | |
| + */
 | |
| +int32_t
 | |
| +crypto_get_driverid(device_t dev, int flags)
 | |
| +{
 | |
| +	struct cryptocap *newdrv;
 | |
| +	int i;
 | |
| +	unsigned long d_flags;
 | |
| +
 | |
| +	if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
 | |
| +		printf("%s: no flags specified when registering driver\n",
 | |
| +		    device_get_nameunit(dev));
 | |
| +		return -1;
 | |
| +	}
 | |
| +
 | |
| +	CRYPTO_DRIVER_LOCK();
 | |
| +
 | |
| +	for (i = 0; i < crypto_drivers_num; i++) {
 | |
| +		if (crypto_drivers[i].cc_dev == NULL &&
 | |
| +		    (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0) {
 | |
| +			break;
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	/* Out of entries, allocate some more. */
 | |
| +	if (i == crypto_drivers_num) {
 | |
| +		/* Be careful about wrap-around. */
 | |
| +		if (2 * crypto_drivers_num <= crypto_drivers_num) {
 | |
| +			CRYPTO_DRIVER_UNLOCK();
 | |
| +			printk("crypto: driver count wraparound!\n");
 | |
| +			return -1;
 | |
| +		}
 | |
| +
 | |
| +		newdrv = kmalloc(2 * crypto_drivers_num * sizeof(struct cryptocap),
 | |
| +				GFP_KERNEL);
 | |
| +		if (newdrv == NULL) {
 | |
| +			CRYPTO_DRIVER_UNLOCK();
 | |
| +			printk("crypto: no space to expand driver table!\n");
 | |
| +			return -1;
 | |
| +		}
 | |
| +
 | |
| +		memcpy(newdrv, crypto_drivers,
 | |
| +				crypto_drivers_num * sizeof(struct cryptocap));
 | |
| +		memset(&newdrv[crypto_drivers_num], 0,
 | |
| +				crypto_drivers_num * sizeof(struct cryptocap));
 | |
| +
 | |
| +		crypto_drivers_num *= 2;
 | |
| +
 | |
| +		kfree(crypto_drivers);
 | |
| +		crypto_drivers = newdrv;
 | |
| +	}
 | |
| +
 | |
| +	/* NB: state is zero'd on free */
 | |
| +	crypto_drivers[i].cc_sessions = 1;	/* Mark */
 | |
| +	crypto_drivers[i].cc_dev = dev;
 | |
| +	crypto_drivers[i].cc_flags = flags;
 | |
| +	if (bootverbose)
 | |
| +		printf("crypto: assign %s driver id %u, flags %u\n",
 | |
| +		    device_get_nameunit(dev), i, flags);
 | |
| +
 | |
| +	CRYPTO_DRIVER_UNLOCK();
 | |
| +
 | |
| +	return i;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Lookup a driver by name.  We match against the full device
 | |
| + * name and unit, and against just the name.  The latter gives
 | |
| + * us a simple widlcarding by device name.  On success return the
 | |
| + * driver/hardware identifier; otherwise return -1.
 | |
| + */
 | |
| +int
 | |
| +crypto_find_driver(const char *match)
 | |
| +{
 | |
| +	int i, len = strlen(match);
 | |
| +	unsigned long d_flags;
 | |
| +
 | |
| +	CRYPTO_DRIVER_LOCK();
 | |
| +	for (i = 0; i < crypto_drivers_num; i++) {
 | |
| +		device_t dev = crypto_drivers[i].cc_dev;
 | |
| +		if (dev == NULL ||
 | |
| +		    (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP))
 | |
| +			continue;
 | |
| +		if (strncmp(match, device_get_nameunit(dev), len) == 0 ||
 | |
| +		    strncmp(match, device_get_name(dev), len) == 0)
 | |
| +			break;
 | |
| +	}
 | |
| +	CRYPTO_DRIVER_UNLOCK();
 | |
| +	return i < crypto_drivers_num ? i : -1;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Return the device_t for the specified driver or NULL
 | |
| + * if the driver identifier is invalid.
 | |
| + */
 | |
| +device_t
 | |
| +crypto_find_device_byhid(int hid)
 | |
| +{
 | |
| +	struct cryptocap *cap = crypto_checkdriver(hid);
 | |
| +	return cap != NULL ? cap->cc_dev : NULL;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Return the device/driver capabilities.
 | |
| + */
 | |
| +int
 | |
| +crypto_getcaps(int hid)
 | |
| +{
 | |
| +	struct cryptocap *cap = crypto_checkdriver(hid);
 | |
| +	return cap != NULL ? cap->cc_flags : 0;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Register support for a key-related algorithm.  This routine
 | |
| + * is called once for each algorithm supported a driver.
 | |
| + */
 | |
| +int
 | |
| +crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags)
 | |
| +{
 | |
| +	struct cryptocap *cap;
 | |
| +	int err;
 | |
| +	unsigned long d_flags;
 | |
| +
 | |
| +	dprintk("%s()\n", __FUNCTION__);
 | |
| +	CRYPTO_DRIVER_LOCK();
 | |
| +
 | |
| +	cap = crypto_checkdriver(driverid);
 | |
| +	if (cap != NULL &&
 | |
| +	    (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
 | |
| +		/*
 | |
| +		 * XXX Do some performance testing to determine placing.
 | |
| +		 * XXX We probably need an auxiliary data structure that
 | |
| +		 * XXX describes relative performances.
 | |
| +		 */
 | |
| +
 | |
| +		cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
 | |
| +		if (bootverbose)
 | |
| +			printf("crypto: %s registers key alg %u flags %u\n"
 | |
| +				, device_get_nameunit(cap->cc_dev)
 | |
| +				, kalg
 | |
| +				, flags
 | |
| +			);
 | |
| +		err = 0;
 | |
| +	} else
 | |
| +		err = EINVAL;
 | |
| +
 | |
| +	CRYPTO_DRIVER_UNLOCK();
 | |
| +	return err;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Register support for a non-key-related algorithm.  This routine
 | |
| + * is called once for each such algorithm supported by a driver.
 | |
| + */
 | |
| +int
 | |
| +crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
 | |
| +    u_int32_t flags)
 | |
| +{
 | |
| +	struct cryptocap *cap;
 | |
| +	int err;
 | |
| +	unsigned long d_flags;
 | |
| +
 | |
| +	dprintk("%s(id=0x%x, alg=%d, maxoplen=%d, flags=0x%x)\n", __FUNCTION__,
 | |
| +			driverid, alg, maxoplen, flags);
 | |
| +
 | |
| +	CRYPTO_DRIVER_LOCK();
 | |
| +
 | |
| +	cap = crypto_checkdriver(driverid);
 | |
| +	/* NB: algorithms are in the range [1..max] */
 | |
| +	if (cap != NULL &&
 | |
| +	    (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
 | |
| +		/*
 | |
| +		 * XXX Do some performance testing to determine placing.
 | |
| +		 * XXX We probably need an auxiliary data structure that
 | |
| +		 * XXX describes relative performances.
 | |
| +		 */
 | |
| +
 | |
| +		cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
 | |
| +		cap->cc_max_op_len[alg] = maxoplen;
 | |
| +		if (bootverbose)
 | |
| +			printf("crypto: %s registers alg %u flags %u maxoplen %u\n"
 | |
| +				, device_get_nameunit(cap->cc_dev)
 | |
| +				, alg
 | |
| +				, flags
 | |
| +				, maxoplen
 | |
| +			);
 | |
| +		cap->cc_sessions = 0;		/* Unmark */
 | |
| +		err = 0;
 | |
| +	} else
 | |
| +		err = EINVAL;
 | |
| +
 | |
| +	CRYPTO_DRIVER_UNLOCK();
 | |
| +	return err;
 | |
| +}
 | |
| +
 | |
| +static void
 | |
| +driver_finis(struct cryptocap *cap)
 | |
| +{
 | |
| +	u_int32_t ses, kops;
 | |
| +
 | |
| +	CRYPTO_DRIVER_ASSERT();
 | |
| +
 | |
| +	ses = cap->cc_sessions;
 | |
| +	kops = cap->cc_koperations;
 | |
| +	bzero(cap, sizeof(*cap));
 | |
| +	if (ses != 0 || kops != 0) {
 | |
| +		/*
 | |
| +		 * If there are pending sessions,
 | |
| +		 * just mark as invalid.
 | |
| +		 */
 | |
| +		cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
 | |
| +		cap->cc_sessions = ses;
 | |
| +		cap->cc_koperations = kops;
 | |
| +	}
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Unregister a crypto driver. If there are pending sessions using it,
 | |
| + * leave enough information around so that subsequent calls using those
 | |
| + * sessions will correctly detect the driver has been unregistered and
 | |
| + * reroute requests.
 | |
| + */
 | |
| +int
 | |
| +crypto_unregister(u_int32_t driverid, int alg)
 | |
| +{
 | |
| +	struct cryptocap *cap;
 | |
| +	int i, err;
 | |
| +	unsigned long d_flags;
 | |
| +
 | |
| +	dprintk("%s()\n", __FUNCTION__);
 | |
| +	CRYPTO_DRIVER_LOCK();
 | |
| +
 | |
| +	cap = crypto_checkdriver(driverid);
 | |
| +	if (cap != NULL &&
 | |
| +	    (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
 | |
| +	    cap->cc_alg[alg] != 0) {
 | |
| +		cap->cc_alg[alg] = 0;
 | |
| +		cap->cc_max_op_len[alg] = 0;
 | |
| +
 | |
| +		/* Was this the last algorithm ? */
 | |
| +		for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
 | |
| +			if (cap->cc_alg[i] != 0)
 | |
| +				break;
 | |
| +
 | |
| +		if (i == CRYPTO_ALGORITHM_MAX + 1)
 | |
| +			driver_finis(cap);
 | |
| +		err = 0;
 | |
| +	} else
 | |
| +		err = EINVAL;
 | |
| +	CRYPTO_DRIVER_UNLOCK();
 | |
| +	return err;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Unregister all algorithms associated with a crypto driver.
 | |
| + * If there are pending sessions using it, leave enough information
 | |
| + * around so that subsequent calls using those sessions will
 | |
| + * correctly detect the driver has been unregistered and reroute
 | |
| + * requests.
 | |
| + */
 | |
| +int
 | |
| +crypto_unregister_all(u_int32_t driverid)
 | |
| +{
 | |
| +	struct cryptocap *cap;
 | |
| +	int err;
 | |
| +	unsigned long d_flags;
 | |
| +
 | |
| +	dprintk("%s()\n", __FUNCTION__);
 | |
| +	CRYPTO_DRIVER_LOCK();
 | |
| +	cap = crypto_checkdriver(driverid);
 | |
| +	if (cap != NULL) {
 | |
| +		driver_finis(cap);
 | |
| +		err = 0;
 | |
| +	} else
 | |
| +		err = EINVAL;
 | |
| +	CRYPTO_DRIVER_UNLOCK();
 | |
| +
 | |
| +	return err;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Clear blockage on a driver.  The what parameter indicates whether
 | |
| + * the driver is now ready for cryptop's and/or cryptokop's.
 | |
| + */
 | |
| +int
 | |
| +crypto_unblock(u_int32_t driverid, int what)
 | |
| +{
 | |
| +	struct cryptocap *cap;
 | |
| +	int err;
 | |
| +	unsigned long q_flags;
 | |
| +
 | |
| +	CRYPTO_Q_LOCK();
 | |
| +	cap = crypto_checkdriver(driverid);
 | |
| +	if (cap != NULL) {
 | |
| +		if (what & CRYPTO_SYMQ) {
 | |
| +			cap->cc_qblocked = 0;
 | |
| +			crypto_all_qblocked = 0;
 | |
| +		}
 | |
| +		if (what & CRYPTO_ASYMQ) {
 | |
| +			cap->cc_kqblocked = 0;
 | |
| +			crypto_all_kqblocked = 0;
 | |
| +		}
 | |
| +		if (crp_sleep)
 | |
| +			wake_up_interruptible(&cryptoproc_wait);
 | |
| +		err = 0;
 | |
| +	} else
 | |
| +		err = EINVAL;
 | |
| +	CRYPTO_Q_UNLOCK(); //DAVIDM should this be a driver lock
 | |
| +
 | |
| +	return err;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Add a crypto request to a queue, to be processed by the kernel thread.
 | |
| + */
 | |
| +int
 | |
| +crypto_dispatch(struct cryptop *crp)
 | |
| +{
 | |
| +	struct cryptocap *cap;
 | |
| +	int result = -1;
 | |
| +	unsigned long q_flags;
 | |
| +
 | |
| +	dprintk("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	cryptostats.cs_ops++;
 | |
| +
 | |
| +	CRYPTO_Q_LOCK();
 | |
| +	if (crypto_q_cnt >= crypto_q_max) {
 | |
| +		CRYPTO_Q_UNLOCK();
 | |
| +		cryptostats.cs_drops++;
 | |
| +		return ENOMEM;
 | |
| +	}
 | |
| +	crypto_q_cnt++;
 | |
| +
 | |
| +	/*
 | |
| +	 * Caller marked the request to be processed immediately; dispatch
 | |
| +	 * it directly to the driver unless the driver is currently blocked.
 | |
| +	 */
 | |
| +	if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
 | |
| +		int hid = CRYPTO_SESID2HID(crp->crp_sid);
 | |
| +		cap = crypto_checkdriver(hid);
 | |
| +		/* Driver cannot disappear when there is an active session. */
 | |
| +		KASSERT(cap != NULL, ("%s: Driver disappeared.", __func__));
 | |
| +		if (!cap->cc_qblocked) {
 | |
| +			crypto_all_qblocked = 0;
 | |
| +			crypto_drivers[hid].cc_qblocked = 1;
 | |
| +			CRYPTO_Q_UNLOCK();
 | |
| +			result = crypto_invoke(cap, crp, 0);
 | |
| +			CRYPTO_Q_LOCK();
 | |
| +			if (result != ERESTART)
 | |
| +				crypto_drivers[hid].cc_qblocked = 0;
 | |
| +		}
 | |
| +	}
 | |
| +	if (result == ERESTART) {
 | |
| +		/*
 | |
| +		 * The driver ran out of resources, mark the
 | |
| +		 * driver ``blocked'' for cryptop's and put
 | |
| +		 * the request back in the queue.  It would
 | |
| +		 * best to put the request back where we got
 | |
| +		 * it but that's hard so for now we put it
 | |
| +		 * at the front.  This should be ok; putting
 | |
| +		 * it at the end does not work.
 | |
| +		 */
 | |
| +		list_add(&crp->crp_next, &crp_q);
 | |
| +		cryptostats.cs_blocks++;
 | |
| +	} else if (result == -1) {
 | |
| +		TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
 | |
| +	}
 | |
| +	if (crp_sleep)
 | |
| +		wake_up_interruptible(&cryptoproc_wait);
 | |
| +	CRYPTO_Q_UNLOCK();
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Add an asymetric crypto request to a queue,
 | |
| + * to be processed by the kernel thread.
 | |
| + */
 | |
| +int
 | |
| +crypto_kdispatch(struct cryptkop *krp)
 | |
| +{
 | |
| +	int error;
 | |
| +	unsigned long q_flags;
 | |
| +
 | |
| +	cryptostats.cs_kops++;
 | |
| +
 | |
| +	error = crypto_kinvoke(krp, krp->krp_crid);
 | |
| +	if (error == ERESTART) {
 | |
| +		CRYPTO_Q_LOCK();
 | |
| +		TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
 | |
| +		if (crp_sleep)
 | |
| +			wake_up_interruptible(&cryptoproc_wait);
 | |
| +		CRYPTO_Q_UNLOCK();
 | |
| +		error = 0;
 | |
| +	}
 | |
| +	return error;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Verify a driver is suitable for the specified operation.
 | |
| + */
 | |
| +static __inline int
 | |
| +kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp)
 | |
| +{
 | |
| +	return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Select a driver for an asym operation.  The driver must
 | |
| + * support the necessary algorithm.  The caller can constrain
 | |
| + * which device is selected with the flags parameter.  The
 | |
| + * algorithm we use here is pretty stupid; just use the first
 | |
| + * driver that supports the algorithms we need. If there are
 | |
| + * multiple suitable drivers we choose the driver with the
 | |
| + * fewest active operations.  We prefer hardware-backed
 | |
| + * drivers to software ones when either may be used.
 | |
| + */
 | |
| +static struct cryptocap *
 | |
| +crypto_select_kdriver(const struct cryptkop *krp, int flags)
 | |
| +{
 | |
| +	struct cryptocap *cap, *best, *blocked;
 | |
| +	int match, hid;
 | |
| +
 | |
| +	CRYPTO_DRIVER_ASSERT();
 | |
| +
 | |
| +	/*
 | |
| +	 * Look first for hardware crypto devices if permitted.
 | |
| +	 */
 | |
| +	if (flags & CRYPTOCAP_F_HARDWARE)
 | |
| +		match = CRYPTOCAP_F_HARDWARE;
 | |
| +	else
 | |
| +		match = CRYPTOCAP_F_SOFTWARE;
 | |
| +	best = NULL;
 | |
| +	blocked = NULL;
 | |
| +again:
 | |
| +	for (hid = 0; hid < crypto_drivers_num; hid++) {
 | |
| +		cap = &crypto_drivers[hid];
 | |
| +		/*
 | |
| +		 * If it's not initialized, is in the process of
 | |
| +		 * going away, or is not appropriate (hardware
 | |
| +		 * or software based on match), then skip.
 | |
| +		 */
 | |
| +		if (cap->cc_dev == NULL ||
 | |
| +		    (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
 | |
| +		    (cap->cc_flags & match) == 0)
 | |
| +			continue;
 | |
| +
 | |
| +		/* verify all the algorithms are supported. */
 | |
| +		if (kdriver_suitable(cap, krp)) {
 | |
| +			if (best == NULL ||
 | |
| +			    cap->cc_koperations < best->cc_koperations)
 | |
| +				best = cap;
 | |
| +		}
 | |
| +	}
 | |
| +	if (best != NULL)
 | |
| +		return best;
 | |
| +	if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
 | |
| +		/* sort of an Algol 68-style for loop */
 | |
| +		match = CRYPTOCAP_F_SOFTWARE;
 | |
| +		goto again;
 | |
| +	}
 | |
| +	return best;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Dispatch an assymetric crypto request.
 | |
| + */
 | |
| +static int
 | |
| +crypto_kinvoke(struct cryptkop *krp, int crid)
 | |
| +{
 | |
| +	struct cryptocap *cap = NULL;
 | |
| +	int error;
 | |
| +	unsigned long d_flags;
 | |
| +
 | |
| +	KASSERT(krp != NULL, ("%s: krp == NULL", __func__));
 | |
| +	KASSERT(krp->krp_callback != NULL,
 | |
| +	    ("%s: krp->crp_callback == NULL", __func__));
 | |
| +
 | |
| +	CRYPTO_DRIVER_LOCK();
 | |
| +	if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
 | |
| +		cap = crypto_checkdriver(crid);
 | |
| +		if (cap != NULL) {
 | |
| +			/*
 | |
| +			 * Driver present, it must support the necessary
 | |
| +			 * algorithm and, if s/w drivers are excluded,
 | |
| +			 * it must be registered as hardware-backed.
 | |
| +			 */
 | |
| +			if (!kdriver_suitable(cap, krp) ||
 | |
| +			    (!crypto_devallowsoft &&
 | |
| +			     (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0))
 | |
| +				cap = NULL;
 | |
| +		}
 | |
| +	} else {
 | |
| +		/*
 | |
| +		 * No requested driver; select based on crid flags.
 | |
| +		 */
 | |
| +		if (!crypto_devallowsoft)	/* NB: disallow s/w drivers */
 | |
| +			crid &= ~CRYPTOCAP_F_SOFTWARE;
 | |
| +		cap = crypto_select_kdriver(krp, crid);
 | |
| +	}
 | |
| +	if (cap != NULL && !cap->cc_kqblocked) {
 | |
| +		krp->krp_hid = cap - crypto_drivers;
 | |
| +		cap->cc_koperations++;
 | |
| +		CRYPTO_DRIVER_UNLOCK();
 | |
| +		error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0);
 | |
| +		CRYPTO_DRIVER_LOCK();
 | |
| +		if (error == ERESTART) {
 | |
| +			cap->cc_koperations--;
 | |
| +			CRYPTO_DRIVER_UNLOCK();
 | |
| +			return (error);
 | |
| +		}
 | |
| +		/* return the actual device used */
 | |
| +		krp->krp_crid = krp->krp_hid;
 | |
| +	} else {
 | |
| +		/*
 | |
| +		 * NB: cap is !NULL if device is blocked; in
 | |
| +		 *     that case return ERESTART so the operation
 | |
| +		 *     is resubmitted if possible.
 | |
| +		 */
 | |
| +		error = (cap == NULL) ? ENODEV : ERESTART;
 | |
| +	}
 | |
| +	CRYPTO_DRIVER_UNLOCK();
 | |
| +
 | |
| +	if (error) {
 | |
| +		krp->krp_status = error;
 | |
| +		crypto_kdone(krp);
 | |
| +	}
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +
 | |
| +/*
 | |
| + * Dispatch a crypto request to the appropriate crypto devices.
 | |
| + */
 | |
| +static int
 | |
| +crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint)
 | |
| +{
 | |
| +	KASSERT(crp != NULL, ("%s: crp == NULL", __func__));
 | |
| +	KASSERT(crp->crp_callback != NULL,
 | |
| +	    ("%s: crp->crp_callback == NULL", __func__));
 | |
| +	KASSERT(crp->crp_desc != NULL, ("%s: crp->crp_desc == NULL", __func__));
 | |
| +
 | |
| +	dprintk("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +#ifdef CRYPTO_TIMING
 | |
| +	if (crypto_timing)
 | |
| +		crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
 | |
| +#endif
 | |
| +	if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
 | |
| +		struct cryptodesc *crd;
 | |
| +		u_int64_t nid;
 | |
| +
 | |
| +		/*
 | |
| +		 * Driver has unregistered; migrate the session and return
 | |
| +		 * an error to the caller so they'll resubmit the op.
 | |
| +		 *
 | |
| +		 * XXX: What if there are more already queued requests for this
 | |
| +		 *      session?
 | |
| +		 */
 | |
| +		crypto_freesession(crp->crp_sid);
 | |
| +
 | |
| +		for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
 | |
| +			crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
 | |
| +
 | |
| +		/* XXX propagate flags from initial session? */
 | |
| +		if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI),
 | |
| +		    CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0)
 | |
| +			crp->crp_sid = nid;
 | |
| +
 | |
| +		crp->crp_etype = EAGAIN;
 | |
| +		crypto_done(crp);
 | |
| +		return 0;
 | |
| +	} else {
 | |
| +		/*
 | |
| +		 * Invoke the driver to process the request.
 | |
| +		 */
 | |
| +		return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint);
 | |
| +	}
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Release a set of crypto descriptors.
 | |
| + */
 | |
| +void
 | |
| +crypto_freereq(struct cryptop *crp)
 | |
| +{
 | |
| +	struct cryptodesc *crd;
 | |
| +
 | |
| +	if (crp == NULL)
 | |
| +		return;
 | |
| +
 | |
| +#ifdef DIAGNOSTIC
 | |
| +	{
 | |
| +		struct cryptop *crp2;
 | |
| +		unsigned long q_flags;
 | |
| +
 | |
| +		CRYPTO_Q_LOCK();
 | |
| +		TAILQ_FOREACH(crp2, &crp_q, crp_next) {
 | |
| +			KASSERT(crp2 != crp,
 | |
| +			    ("Freeing cryptop from the crypto queue (%p).",
 | |
| +			    crp));
 | |
| +		}
 | |
| +		CRYPTO_Q_UNLOCK();
 | |
| +		CRYPTO_RETQ_LOCK();
 | |
| +		TAILQ_FOREACH(crp2, &crp_ret_q, crp_next) {
 | |
| +			KASSERT(crp2 != crp,
 | |
| +			    ("Freeing cryptop from the return queue (%p).",
 | |
| +			    crp));
 | |
| +		}
 | |
| +		CRYPTO_RETQ_UNLOCK();
 | |
| +	}
 | |
| +#endif
 | |
| +
 | |
| +	while ((crd = crp->crp_desc) != NULL) {
 | |
| +		crp->crp_desc = crd->crd_next;
 | |
| +		kmem_cache_free(cryptodesc_zone, crd);
 | |
| +	}
 | |
| +	kmem_cache_free(cryptop_zone, crp);
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Acquire a set of crypto descriptors.
 | |
| + */
 | |
| +struct cryptop *
 | |
| +crypto_getreq(int num)
 | |
| +{
 | |
| +	struct cryptodesc *crd;
 | |
| +	struct cryptop *crp;
 | |
| +
 | |
| +	crp = kmem_cache_alloc(cryptop_zone, SLAB_ATOMIC);
 | |
| +	if (crp != NULL) {
 | |
| +		memset(crp, 0, sizeof(*crp));
 | |
| +		INIT_LIST_HEAD(&crp->crp_next);
 | |
| +		init_waitqueue_head(&crp->crp_waitq);
 | |
| +		while (num--) {
 | |
| +			crd = kmem_cache_alloc(cryptodesc_zone, SLAB_ATOMIC);
 | |
| +			if (crd == NULL) {
 | |
| +				crypto_freereq(crp);
 | |
| +				return NULL;
 | |
| +			}
 | |
| +			memset(crd, 0, sizeof(*crd));
 | |
| +			crd->crd_next = crp->crp_desc;
 | |
| +			crp->crp_desc = crd;
 | |
| +		}
 | |
| +	}
 | |
| +	return crp;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Invoke the callback on behalf of the driver.
 | |
| + */
 | |
| +void
 | |
| +crypto_done(struct cryptop *crp)
 | |
| +{
 | |
| +	unsigned long q_flags;
 | |
| +
 | |
| +	dprintk("%s()\n", __FUNCTION__);
 | |
| +	if ((crp->crp_flags & CRYPTO_F_DONE) == 0) {
 | |
| +		crp->crp_flags |= CRYPTO_F_DONE;
 | |
| +		CRYPTO_Q_LOCK();
 | |
| +		crypto_q_cnt--;
 | |
| +		CRYPTO_Q_UNLOCK();
 | |
| +	} else
 | |
| +		printk("crypto: crypto_done op already done, flags 0x%x",
 | |
| +				crp->crp_flags);
 | |
| +	if (crp->crp_etype != 0)
 | |
| +		cryptostats.cs_errs++;
 | |
| +	/*
 | |
| +	 * CBIMM means unconditionally do the callback immediately;
 | |
| +	 * CBIFSYNC means do the callback immediately only if the
 | |
| +	 * operation was done synchronously.  Both are used to avoid
 | |
| +	 * doing extraneous context switches; the latter is mostly
 | |
| +	 * used with the software crypto driver.
 | |
| +	 */
 | |
| +	if ((crp->crp_flags & CRYPTO_F_CBIMM) ||
 | |
| +	    ((crp->crp_flags & CRYPTO_F_CBIFSYNC) &&
 | |
| +	     (CRYPTO_SESID2CAPS(crp->crp_sid) & CRYPTOCAP_F_SYNC))) {
 | |
| +		/*
 | |
| +		 * Do the callback directly.  This is ok when the
 | |
| +		 * callback routine does very little (e.g. the
 | |
| +		 * /dev/crypto callback method just does a wakeup).
 | |
| +		 */
 | |
| +		crp->crp_callback(crp);
 | |
| +	} else {
 | |
| +		unsigned long r_flags;
 | |
| +		/*
 | |
| +		 * Normal case; queue the callback for the thread.
 | |
| +		 */
 | |
| +		CRYPTO_RETQ_LOCK();
 | |
| +		if (CRYPTO_RETQ_EMPTY())
 | |
| +			wake_up_interruptible(&cryptoretproc_wait);/* shared wait channel */
 | |
| +		TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
 | |
| +		CRYPTO_RETQ_UNLOCK();
 | |
| +	}
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Invoke the callback on behalf of the driver.
 | |
| + */
 | |
| +void
 | |
| +crypto_kdone(struct cryptkop *krp)
 | |
| +{
 | |
| +	struct cryptocap *cap;
 | |
| +	unsigned long d_flags;
 | |
| +
 | |
| +	if ((krp->krp_flags & CRYPTO_KF_DONE) != 0)
 | |
| +		printk("crypto: crypto_kdone op already done, flags 0x%x",
 | |
| +				krp->krp_flags);
 | |
| +	krp->krp_flags |= CRYPTO_KF_DONE;
 | |
| +	if (krp->krp_status != 0)
 | |
| +		cryptostats.cs_kerrs++;
 | |
| +
 | |
| +	CRYPTO_DRIVER_LOCK();
 | |
| +	/* XXX: What if driver is loaded in the meantime? */
 | |
| +	if (krp->krp_hid < crypto_drivers_num) {
 | |
| +		cap = &crypto_drivers[krp->krp_hid];
 | |
| +		cap->cc_koperations--;
 | |
| +		KASSERT(cap->cc_koperations >= 0, ("cc_koperations < 0"));
 | |
| +		if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
 | |
| +			crypto_remove(cap);
 | |
| +	}
 | |
| +	CRYPTO_DRIVER_UNLOCK();
 | |
| +
 | |
| +	/*
 | |
| +	 * CBIMM means unconditionally do the callback immediately;
 | |
| +	 * This is used to avoid doing extraneous context switches
 | |
| +	 */
 | |
| +	if ((krp->krp_flags & CRYPTO_KF_CBIMM)) {
 | |
| +		/*
 | |
| +		 * Do the callback directly.  This is ok when the
 | |
| +		 * callback routine does very little (e.g. the
 | |
| +		 * /dev/crypto callback method just does a wakeup).
 | |
| +		 */
 | |
| +		krp->krp_callback(krp);
 | |
| +	} else {
 | |
| +		unsigned long r_flags;
 | |
| +		/*
 | |
| +		 * Normal case; queue the callback for the thread.
 | |
| +		 */
 | |
| +		CRYPTO_RETQ_LOCK();
 | |
| +		if (CRYPTO_RETQ_EMPTY())
 | |
| +			wake_up_interruptible(&cryptoretproc_wait);/* shared wait channel */
 | |
| +		TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
 | |
| +		CRYPTO_RETQ_UNLOCK();
 | |
| +	}
 | |
| +}
 | |
| +
 | |
| +int
 | |
| +crypto_getfeat(int *featp)
 | |
| +{
 | |
| +	int hid, kalg, feat = 0;
 | |
| +	unsigned long d_flags;
 | |
| +
 | |
| +	CRYPTO_DRIVER_LOCK();
 | |
| +	for (hid = 0; hid < crypto_drivers_num; hid++) {
 | |
| +		const struct cryptocap *cap = &crypto_drivers[hid];
 | |
| +
 | |
| +		if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
 | |
| +		    !crypto_devallowsoft) {
 | |
| +			continue;
 | |
| +		}
 | |
| +		for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
 | |
| +			if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED)
 | |
| +				feat |=  1 << kalg;
 | |
| +	}
 | |
| +	CRYPTO_DRIVER_UNLOCK();
 | |
| +	*featp = feat;
 | |
| +	return (0);
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Crypto thread, dispatches crypto requests.
 | |
| + */
 | |
| +static int
 | |
| +crypto_proc(void *arg)
 | |
| +{
 | |
| +	struct cryptop *crp, *submit;
 | |
| +	struct cryptkop *krp, *krpp;
 | |
| +	struct cryptocap *cap;
 | |
| +	u_int32_t hid;
 | |
| +	int result, hint;
 | |
| +	unsigned long q_flags;
 | |
| +
 | |
| +	ocf_daemonize("crypto");
 | |
| +
 | |
| +	CRYPTO_Q_LOCK();
 | |
| +	for (;;) {
 | |
| +		/*
 | |
| +		 * we need to make sure we don't get into a busy loop with nothing
 | |
| +		 * to do,  the two crypto_all_*blocked vars help us find out when
 | |
| +		 * we are all full and can do nothing on any driver or Q.  If so we
 | |
| +		 * wait for an unblock.
 | |
| +		 */
 | |
| +		crypto_all_qblocked  = !list_empty(&crp_q);
 | |
| +
 | |
| +		/*
 | |
| +		 * Find the first element in the queue that can be
 | |
| +		 * processed and look-ahead to see if multiple ops
 | |
| +		 * are ready for the same driver.
 | |
| +		 */
 | |
| +		submit = NULL;
 | |
| +		hint = 0;
 | |
| +		list_for_each_entry(crp, &crp_q, crp_next) {
 | |
| +			hid = CRYPTO_SESID2HID(crp->crp_sid);
 | |
| +			cap = crypto_checkdriver(hid);
 | |
| +			/*
 | |
| +			 * Driver cannot disappear when there is an active
 | |
| +			 * session.
 | |
| +			 */
 | |
| +			KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
 | |
| +			    __func__, __LINE__));
 | |
| +			if (cap == NULL || cap->cc_dev == NULL) {
 | |
| +				/* Op needs to be migrated, process it. */
 | |
| +				if (submit == NULL)
 | |
| +					submit = crp;
 | |
| +				break;
 | |
| +			}
 | |
| +			if (!cap->cc_qblocked) {
 | |
| +				if (submit != NULL) {
 | |
| +					/*
 | |
| +					 * We stop on finding another op,
 | |
| +					 * regardless whether its for the same
 | |
| +					 * driver or not.  We could keep
 | |
| +					 * searching the queue but it might be
 | |
| +					 * better to just use a per-driver
 | |
| +					 * queue instead.
 | |
| +					 */
 | |
| +					if (CRYPTO_SESID2HID(submit->crp_sid) == hid)
 | |
| +						hint = CRYPTO_HINT_MORE;
 | |
| +					break;
 | |
| +				} else {
 | |
| +					submit = crp;
 | |
| +					if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
 | |
| +						break;
 | |
| +					/* keep scanning for more are q'd */
 | |
| +				}
 | |
| +			}
 | |
| +		}
 | |
| +		if (submit != NULL) {
 | |
| +			hid = CRYPTO_SESID2HID(submit->crp_sid);
 | |
| +			crypto_all_qblocked = 0;
 | |
| +			list_del(&submit->crp_next);
 | |
| +			crypto_drivers[hid].cc_qblocked = 1;
 | |
| +			cap = crypto_checkdriver(hid);
 | |
| +			CRYPTO_Q_UNLOCK();
 | |
| +			KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
 | |
| +			    __func__, __LINE__));
 | |
| +			result = crypto_invoke(cap, submit, hint);
 | |
| +			CRYPTO_Q_LOCK();
 | |
| +			if (result == ERESTART) {
 | |
| +				/*
 | |
| +				 * The driver ran out of resources, mark the
 | |
| +				 * driver ``blocked'' for cryptop's and put
 | |
| +				 * the request back in the queue.  It would
 | |
| +				 * best to put the request back where we got
 | |
| +				 * it but that's hard so for now we put it
 | |
| +				 * at the front.  This should be ok; putting
 | |
| +				 * it at the end does not work.
 | |
| +				 */
 | |
| +				/* XXX validate sid again? */
 | |
| +				list_add(&submit->crp_next, &crp_q);
 | |
| +				cryptostats.cs_blocks++;
 | |
| +			} else
 | |
| +				crypto_drivers[hid].cc_qblocked=0;
 | |
| +		}
 | |
| +
 | |
| +		crypto_all_kqblocked = !list_empty(&crp_kq);
 | |
| +
 | |
| +		/* As above, but for key ops */
 | |
| +		krp = NULL;
 | |
| +		list_for_each_entry(krpp, &crp_kq, krp_next) {
 | |
| +			cap = crypto_checkdriver(krpp->krp_hid);
 | |
| +			if (cap == NULL || cap->cc_dev == NULL) {
 | |
| +				/*
 | |
| +				 * Operation needs to be migrated, invalidate
 | |
| +				 * the assigned device so it will reselect a
 | |
| +				 * new one below.  Propagate the original
 | |
| +				 * crid selection flags if supplied.
 | |
| +				 */
 | |
| +				krp->krp_hid = krp->krp_crid &
 | |
| +				    (CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE);
 | |
| +				if (krp->krp_hid == 0)
 | |
| +					krp->krp_hid =
 | |
| +				    CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE;
 | |
| +				break;
 | |
| +			}
 | |
| +			if (!cap->cc_kqblocked) {
 | |
| +				krp = krpp;
 | |
| +				break;
 | |
| +			}
 | |
| +		}
 | |
| +		if (krp != NULL) {
 | |
| +			crypto_all_kqblocked = 0;
 | |
| +			list_del(&krp->krp_next);
 | |
| +			crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
 | |
| +			CRYPTO_Q_UNLOCK();
 | |
| +			result = crypto_kinvoke(krp, krp->krp_hid);
 | |
| +			CRYPTO_Q_LOCK();
 | |
| +			if (result == ERESTART) {
 | |
| +				/*
 | |
| +				 * The driver ran out of resources, mark the
 | |
| +				 * driver ``blocked'' for cryptkop's and put
 | |
| +				 * the request back in the queue.  It would
 | |
| +				 * best to put the request back where we got
 | |
| +				 * it but that's hard so for now we put it
 | |
| +				 * at the front.  This should be ok; putting
 | |
| +				 * it at the end does not work.
 | |
| +				 */
 | |
| +				/* XXX validate sid again? */
 | |
| +				list_add(&krp->krp_next, &crp_kq);
 | |
| +				cryptostats.cs_kblocks++;
 | |
| +			} else
 | |
| +				crypto_drivers[krp->krp_hid].cc_kqblocked = 0;
 | |
| +		}
 | |
| +
 | |
| +		if (submit == NULL && krp == NULL) {
 | |
| +			/*
 | |
| +			 * Nothing more to be processed.  Sleep until we're
 | |
| +			 * woken because there are more ops to process.
 | |
| +			 * This happens either by submission or by a driver
 | |
| +			 * becoming unblocked and notifying us through
 | |
| +			 * crypto_unblock.  Note that when we wakeup we
 | |
| +			 * start processing each queue again from the
 | |
| +			 * front. It's not clear that it's important to
 | |
| +			 * preserve this ordering since ops may finish
 | |
| +			 * out of order if dispatched to different devices
 | |
| +			 * and some become blocked while others do not.
 | |
| +			 */
 | |
| +			dprintk("%s - sleeping (qe=%d qb=%d kqe=%d kqb=%d)\n",
 | |
| +					__FUNCTION__,
 | |
| +					list_empty(&crp_q), crypto_all_qblocked,
 | |
| +					list_empty(&crp_kq), crypto_all_kqblocked);
 | |
| +			CRYPTO_Q_UNLOCK();
 | |
| +			crp_sleep = 1;
 | |
| +			wait_event_interruptible(cryptoproc_wait,
 | |
| +					!(list_empty(&crp_q) || crypto_all_qblocked) ||
 | |
| +					!(list_empty(&crp_kq) || crypto_all_kqblocked) ||
 | |
| +					cryptoproc == (pid_t) -1);
 | |
| +			crp_sleep = 0;
 | |
| +			if (signal_pending (current)) {
 | |
| +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
 | |
| +				spin_lock_irq(¤t->sigmask_lock);
 | |
| +#endif
 | |
| +				flush_signals(current);
 | |
| +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
 | |
| +				spin_unlock_irq(¤t->sigmask_lock);
 | |
| +#endif
 | |
| +			}
 | |
| +			CRYPTO_Q_LOCK();
 | |
| +			dprintk("%s - awake\n", __FUNCTION__);
 | |
| +			if (cryptoproc == (pid_t) -1)
 | |
| +				break;
 | |
| +			cryptostats.cs_intrs++;
 | |
| +		}
 | |
| +	}
 | |
| +	CRYPTO_Q_UNLOCK();
 | |
| +	complete_and_exit(&cryptoproc_exited, 0);
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Crypto returns thread, does callbacks for processed crypto requests.
 | |
| + * Callbacks are done here, rather than in the crypto drivers, because
 | |
| + * callbacks typically are expensive and would slow interrupt handling.
 | |
| + */
 | |
| +static int
 | |
| +crypto_ret_proc(void *arg)
 | |
| +{
 | |
| +	struct cryptop *crpt;
 | |
| +	struct cryptkop *krpt;
 | |
| +	unsigned long  r_flags;
 | |
| +
 | |
| +	ocf_daemonize("crypto_ret");
 | |
| +
 | |
| +	CRYPTO_RETQ_LOCK();
 | |
| +	for (;;) {
 | |
| +		/* Harvest return q's for completed ops */
 | |
| +		crpt = NULL;
 | |
| +		if (!list_empty(&crp_ret_q))
 | |
| +			crpt = list_entry(crp_ret_q.next, typeof(*crpt), crp_next);
 | |
| +		if (crpt != NULL)
 | |
| +			list_del(&crpt->crp_next);
 | |
| +
 | |
| +		krpt = NULL;
 | |
| +		if (!list_empty(&crp_ret_kq))
 | |
| +			krpt = list_entry(crp_ret_kq.next, typeof(*krpt), krp_next);
 | |
| +		if (krpt != NULL)
 | |
| +			list_del(&krpt->krp_next);
 | |
| +
 | |
| +		if (crpt != NULL || krpt != NULL) {
 | |
| +			CRYPTO_RETQ_UNLOCK();
 | |
| +			/*
 | |
| +			 * Run callbacks unlocked.
 | |
| +			 */
 | |
| +			if (crpt != NULL)
 | |
| +				crpt->crp_callback(crpt);
 | |
| +			if (krpt != NULL)
 | |
| +				krpt->krp_callback(krpt);
 | |
| +			CRYPTO_RETQ_LOCK();
 | |
| +		} else {
 | |
| +			/*
 | |
| +			 * Nothing more to be processed.  Sleep until we're
 | |
| +			 * woken because there are more returns to process.
 | |
| +			 */
 | |
| +			dprintk("%s - sleeping\n", __FUNCTION__);
 | |
| +			CRYPTO_RETQ_UNLOCK();
 | |
| +			wait_event_interruptible(cryptoretproc_wait,
 | |
| +					cryptoretproc == (pid_t) -1 ||
 | |
| +					!list_empty(&crp_ret_q) ||
 | |
| +					!list_empty(&crp_ret_kq));
 | |
| +			if (signal_pending (current)) {
 | |
| +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
 | |
| +				spin_lock_irq(¤t->sigmask_lock);
 | |
| +#endif
 | |
| +				flush_signals(current);
 | |
| +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
 | |
| +				spin_unlock_irq(¤t->sigmask_lock);
 | |
| +#endif
 | |
| +			}
 | |
| +			CRYPTO_RETQ_LOCK();
 | |
| +			dprintk("%s - awake\n", __FUNCTION__);
 | |
| +			if (cryptoretproc == (pid_t) -1) {
 | |
| +				dprintk("%s - EXITING!\n", __FUNCTION__);
 | |
| +				break;
 | |
| +			}
 | |
| +			cryptostats.cs_rets++;
 | |
| +		}
 | |
| +	}
 | |
| +	CRYPTO_RETQ_UNLOCK();
 | |
| +	complete_and_exit(&cryptoretproc_exited, 0);
 | |
| +}
 | |
| +
 | |
| +
 | |
| +#if 0 /* should put this into /proc or something */
 | |
| +static void
 | |
| +db_show_drivers(void)
 | |
| +{
 | |
| +	int hid;
 | |
| +
 | |
| +	db_printf("%12s %4s %4s %8s %2s %2s\n"
 | |
| +		, "Device"
 | |
| +		, "Ses"
 | |
| +		, "Kops"
 | |
| +		, "Flags"
 | |
| +		, "QB"
 | |
| +		, "KB"
 | |
| +	);
 | |
| +	for (hid = 0; hid < crypto_drivers_num; hid++) {
 | |
| +		const struct cryptocap *cap = &crypto_drivers[hid];
 | |
| +		if (cap->cc_dev == NULL)
 | |
| +			continue;
 | |
| +		db_printf("%-12s %4u %4u %08x %2u %2u\n"
 | |
| +		    , device_get_nameunit(cap->cc_dev)
 | |
| +		    , cap->cc_sessions
 | |
| +		    , cap->cc_koperations
 | |
| +		    , cap->cc_flags
 | |
| +		    , cap->cc_qblocked
 | |
| +		    , cap->cc_kqblocked
 | |
| +		);
 | |
| +	}
 | |
| +}
 | |
| +
 | |
| +DB_SHOW_COMMAND(crypto, db_show_crypto)
 | |
| +{
 | |
| +	struct cryptop *crp;
 | |
| +
 | |
| +	db_show_drivers();
 | |
| +	db_printf("\n");
 | |
| +
 | |
| +	db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n",
 | |
| +	    "HID", "Caps", "Ilen", "Olen", "Etype", "Flags",
 | |
| +	    "Desc", "Callback");
 | |
| +	TAILQ_FOREACH(crp, &crp_q, crp_next) {
 | |
| +		db_printf("%4u %08x %4u %4u %4u %04x %8p %8p\n"
 | |
| +		    , (int) CRYPTO_SESID2HID(crp->crp_sid)
 | |
| +		    , (int) CRYPTO_SESID2CAPS(crp->crp_sid)
 | |
| +		    , crp->crp_ilen, crp->crp_olen
 | |
| +		    , crp->crp_etype
 | |
| +		    , crp->crp_flags
 | |
| +		    , crp->crp_desc
 | |
| +		    , crp->crp_callback
 | |
| +		);
 | |
| +	}
 | |
| +	if (!TAILQ_EMPTY(&crp_ret_q)) {
 | |
| +		db_printf("\n%4s %4s %4s %8s\n",
 | |
| +		    "HID", "Etype", "Flags", "Callback");
 | |
| +		TAILQ_FOREACH(crp, &crp_ret_q, crp_next) {
 | |
| +			db_printf("%4u %4u %04x %8p\n"
 | |
| +			    , (int) CRYPTO_SESID2HID(crp->crp_sid)
 | |
| +			    , crp->crp_etype
 | |
| +			    , crp->crp_flags
 | |
| +			    , crp->crp_callback
 | |
| +			);
 | |
| +		}
 | |
| +	}
 | |
| +}
 | |
| +
 | |
| +DB_SHOW_COMMAND(kcrypto, db_show_kcrypto)
 | |
| +{
 | |
| +	struct cryptkop *krp;
 | |
| +
 | |
| +	db_show_drivers();
 | |
| +	db_printf("\n");
 | |
| +
 | |
| +	db_printf("%4s %5s %4s %4s %8s %4s %8s\n",
 | |
| +	    "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback");
 | |
| +	TAILQ_FOREACH(krp, &crp_kq, krp_next) {
 | |
| +		db_printf("%4u %5u %4u %4u %08x %4u %8p\n"
 | |
| +		    , krp->krp_op
 | |
| +		    , krp->krp_status
 | |
| +		    , krp->krp_iparams, krp->krp_oparams
 | |
| +		    , krp->krp_crid, krp->krp_hid
 | |
| +		    , krp->krp_callback
 | |
| +		);
 | |
| +	}
 | |
| +	if (!TAILQ_EMPTY(&crp_ret_q)) {
 | |
| +		db_printf("%4s %5s %8s %4s %8s\n",
 | |
| +		    "Op", "Status", "CRID", "HID", "Callback");
 | |
| +		TAILQ_FOREACH(krp, &crp_ret_kq, krp_next) {
 | |
| +			db_printf("%4u %5u %08x %4u %8p\n"
 | |
| +			    , krp->krp_op
 | |
| +			    , krp->krp_status
 | |
| +			    , krp->krp_crid, krp->krp_hid
 | |
| +			    , krp->krp_callback
 | |
| +			);
 | |
| +		}
 | |
| +	}
 | |
| +}
 | |
| +#endif
 | |
| +
 | |
| +
 | |
| +static int
 | |
| +crypto_init(void)
 | |
| +{
 | |
| +	int error;
 | |
| +
 | |
| +	dprintk("%s(0x%x)\n", __FUNCTION__, (int) crypto_init);
 | |
| +
 | |
| +	if (crypto_initted)
 | |
| +		return 0;
 | |
| +	crypto_initted = 1;
 | |
| +
 | |
| +	spin_lock_init(&crypto_drivers_lock);
 | |
| +	spin_lock_init(&crypto_q_lock);
 | |
| +	spin_lock_init(&crypto_ret_q_lock);
 | |
| +
 | |
| +	cryptop_zone = kmem_cache_create("cryptop", sizeof(struct cryptop),
 | |
| +				       0, SLAB_HWCACHE_ALIGN, NULL
 | |
| +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
 | |
| +				       , NULL
 | |
| +#endif
 | |
| +					);
 | |
| +
 | |
| +	cryptodesc_zone = kmem_cache_create("cryptodesc", sizeof(struct cryptodesc),
 | |
| +				       0, SLAB_HWCACHE_ALIGN, NULL
 | |
| +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
 | |
| +				       , NULL
 | |
| +#endif
 | |
| +					);
 | |
| +
 | |
| +	if (cryptodesc_zone == NULL || cryptop_zone == NULL) {
 | |
| +		printk("crypto: crypto_init cannot setup crypto zones\n");
 | |
| +		error = ENOMEM;
 | |
| +		goto bad;
 | |
| +	}
 | |
| +
 | |
| +	crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
 | |
| +	crypto_drivers = kmalloc(crypto_drivers_num * sizeof(struct cryptocap),
 | |
| +			GFP_KERNEL);
 | |
| +	if (crypto_drivers == NULL) {
 | |
| +		printk("crypto: crypto_init cannot setup crypto drivers\n");
 | |
| +		error = ENOMEM;
 | |
| +		goto bad;
 | |
| +	}
 | |
| +
 | |
| +	memset(crypto_drivers, 0, crypto_drivers_num * sizeof(struct cryptocap));
 | |
| +
 | |
| +	init_completion(&cryptoproc_exited);
 | |
| +	init_completion(&cryptoretproc_exited);
 | |
| +
 | |
| +	cryptoproc = 0; /* to avoid race condition where proc runs first */
 | |
| +	cryptoproc = kernel_thread(crypto_proc, NULL, CLONE_FS|CLONE_FILES);
 | |
| +	if (cryptoproc < 0) {
 | |
| +		error = cryptoproc;
 | |
| +		printk("crypto: crypto_init cannot start crypto thread; error %d",
 | |
| +			error);
 | |
| +		goto bad;
 | |
| +	}
 | |
| +
 | |
| +	cryptoretproc = 0; /* to avoid race condition where proc runs first */
 | |
| +	cryptoretproc = kernel_thread(crypto_ret_proc, NULL, CLONE_FS|CLONE_FILES);
 | |
| +	if (cryptoretproc < 0) {
 | |
| +		error = cryptoretproc;
 | |
| +		printk("crypto: crypto_init cannot start cryptoret thread; error %d",
 | |
| +				error);
 | |
| +		goto bad;
 | |
| +	}
 | |
| +
 | |
| +	return 0;
 | |
| +bad:
 | |
| +	crypto_exit();
 | |
| +	return error;
 | |
| +}
 | |
| +
 | |
| +
 | |
| +static void
 | |
| +crypto_exit(void)
 | |
| +{
 | |
| +	pid_t p;
 | |
| +	unsigned long d_flags;
 | |
| +
 | |
| +	dprintk("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	/*
 | |
| +	 * Terminate any crypto threads.
 | |
| +	 */
 | |
| +
 | |
| +	CRYPTO_DRIVER_LOCK();
 | |
| +	p = cryptoproc;
 | |
| +	cryptoproc = (pid_t) -1;
 | |
| +	kill_proc(p, SIGTERM, 1);
 | |
| +	wake_up_interruptible(&cryptoproc_wait);
 | |
| +	CRYPTO_DRIVER_UNLOCK();
 | |
| +
 | |
| +	wait_for_completion(&cryptoproc_exited);
 | |
| +
 | |
| +	CRYPTO_DRIVER_LOCK();
 | |
| +	p = cryptoretproc;
 | |
| +	cryptoretproc = (pid_t) -1;
 | |
| +	kill_proc(p, SIGTERM, 1);
 | |
| +	wake_up_interruptible(&cryptoretproc_wait);
 | |
| +	CRYPTO_DRIVER_UNLOCK();
 | |
| +
 | |
| +	wait_for_completion(&cryptoretproc_exited);
 | |
| +
 | |
| +	/* XXX flush queues??? */
 | |
| +
 | |
| +	/*
 | |
| +	 * Reclaim dynamically allocated resources.
 | |
| +	 */
 | |
| +	if (crypto_drivers != NULL)
 | |
| +		kfree(crypto_drivers);
 | |
| +
 | |
| +	if (cryptodesc_zone != NULL)
 | |
| +		kmem_cache_destroy(cryptodesc_zone);
 | |
| +	if (cryptop_zone != NULL)
 | |
| +		kmem_cache_destroy(cryptop_zone);
 | |
| +}
 | |
| +
 | |
| +
 | |
| +EXPORT_SYMBOL(crypto_newsession);
 | |
| +EXPORT_SYMBOL(crypto_freesession);
 | |
| +EXPORT_SYMBOL(crypto_get_driverid);
 | |
| +EXPORT_SYMBOL(crypto_kregister);
 | |
| +EXPORT_SYMBOL(crypto_register);
 | |
| +EXPORT_SYMBOL(crypto_unregister);
 | |
| +EXPORT_SYMBOL(crypto_unregister_all);
 | |
| +EXPORT_SYMBOL(crypto_unblock);
 | |
| +EXPORT_SYMBOL(crypto_dispatch);
 | |
| +EXPORT_SYMBOL(crypto_kdispatch);
 | |
| +EXPORT_SYMBOL(crypto_freereq);
 | |
| +EXPORT_SYMBOL(crypto_getreq);
 | |
| +EXPORT_SYMBOL(crypto_done);
 | |
| +EXPORT_SYMBOL(crypto_kdone);
 | |
| +EXPORT_SYMBOL(crypto_getfeat);
 | |
| +EXPORT_SYMBOL(crypto_userasymcrypto);
 | |
| +EXPORT_SYMBOL(crypto_getcaps);
 | |
| +EXPORT_SYMBOL(crypto_find_driver);
 | |
| +EXPORT_SYMBOL(crypto_find_device_byhid);
 | |
| +
 | |
| +module_init(crypto_init);
 | |
| +module_exit(crypto_exit);
 | |
| +
 | |
| +MODULE_LICENSE("BSD");
 | |
| +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
 | |
| +MODULE_DESCRIPTION("OCF (OpenBSD Cryptographic Framework)");
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/criov.c
 | |
| @@ -0,0 +1,215 @@
 | |
| +/*      $OpenBSD: criov.c,v 1.9 2002/01/29 15:48:29 jason Exp $	*/
 | |
| +
 | |
| +/*
 | |
| + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
 | |
| + * Copyright (C) 2006-2007 David McCullough
 | |
| + * Copyright (C) 2004-2005 Intel Corporation.
 | |
| + * The license and original author are listed below.
 | |
| + *
 | |
| + * Copyright (c) 1999 Theo de Raadt
 | |
| + *
 | |
| + * Redistribution and use in source and binary forms, with or without
 | |
| + * modification, are permitted provided that the following conditions
 | |
| + * are met:
 | |
| + *
 | |
| + * 1. Redistributions of source code must retain the above copyright
 | |
| + *   notice, this list of conditions and the following disclaimer.
 | |
| + * 2. Redistributions in binary form must reproduce the above copyright
 | |
| + *   notice, this list of conditions and the following disclaimer in the
 | |
| + *   documentation and/or other materials provided with the distribution.
 | |
| + * 3. The name of the author may not be used to endorse or promote products
 | |
| + *   derived from this software without specific prior written permission.
 | |
| + *
 | |
| + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
 | |
| + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
 | |
| + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
 | |
| + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
 | |
| + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 | |
| + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 | |
| + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 | |
| + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 | |
| + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 | |
| + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 | |
| + *
 | |
| +__FBSDID("$FreeBSD: src/sys/opencrypto/criov.c,v 1.5 2006/06/04 22:15:13 pjd Exp $");
 | |
| + */
 | |
| +
 | |
| +#ifndef AUTOCONF_INCLUDED
 | |
| +#include <linux/config.h>
 | |
| +#endif
 | |
| +#include <linux/module.h>
 | |
| +#include <linux/init.h>
 | |
| +#include <linux/slab.h>
 | |
| +#include <linux/uio.h>
 | |
| +#include <linux/skbuff.h>
 | |
| +#include <linux/kernel.h>
 | |
| +#include <linux/mm.h>
 | |
| +#include <asm/io.h>
 | |
| +
 | |
| +#include <uio.h>
 | |
| +#include <cryptodev.h>
 | |
| +
 | |
| +/*
 | |
| + * This macro is only for avoiding code duplication, as we need to skip
 | |
| + * given number of bytes in the same way in three functions below.
 | |
| + */
 | |
| +#define	CUIO_SKIP()	do {						\
 | |
| +	KASSERT(off >= 0, ("%s: off %d < 0", __func__, off));		\
 | |
| +	KASSERT(len >= 0, ("%s: len %d < 0", __func__, len));		\
 | |
| +	while (off > 0) {						\
 | |
| +		KASSERT(iol >= 0, ("%s: empty in skip", __func__));	\
 | |
| +		if (off < iov->iov_len)					\
 | |
| +			break;						\
 | |
| +		off -= iov->iov_len;					\
 | |
| +		iol--;							\
 | |
| +		iov++;							\
 | |
| +	}								\
 | |
| +} while (0)
 | |
| +
 | |
| +void
 | |
| +cuio_copydata(struct uio* uio, int off, int len, caddr_t cp)
 | |
| +{
 | |
| +	struct iovec *iov = uio->uio_iov;
 | |
| +	int iol = uio->uio_iovcnt;
 | |
| +	unsigned count;
 | |
| +
 | |
| +	CUIO_SKIP();
 | |
| +	while (len > 0) {
 | |
| +		KASSERT(iol >= 0, ("%s: empty", __func__));
 | |
| +		count = min((int)(iov->iov_len - off), len);
 | |
| +		memcpy(cp, ((caddr_t)iov->iov_base) + off, count);
 | |
| +		len -= count;
 | |
| +		cp += count;
 | |
| +		off = 0;
 | |
| +		iol--;
 | |
| +		iov++;
 | |
| +	}
 | |
| +}
 | |
| +
 | |
| +void
 | |
| +cuio_copyback(struct uio* uio, int off, int len, caddr_t cp)
 | |
| +{
 | |
| +	struct iovec *iov = uio->uio_iov;
 | |
| +	int iol = uio->uio_iovcnt;
 | |
| +	unsigned count;
 | |
| +
 | |
| +	CUIO_SKIP();
 | |
| +	while (len > 0) {
 | |
| +		KASSERT(iol >= 0, ("%s: empty", __func__));
 | |
| +		count = min((int)(iov->iov_len - off), len);
 | |
| +		memcpy(((caddr_t)iov->iov_base) + off, cp, count);
 | |
| +		len -= count;
 | |
| +		cp += count;
 | |
| +		off = 0;
 | |
| +		iol--;
 | |
| +		iov++;
 | |
| +	}
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Return a pointer to iov/offset of location in iovec list.
 | |
| + */
 | |
| +struct iovec *
 | |
| +cuio_getptr(struct uio *uio, int loc, int *off)
 | |
| +{
 | |
| +	struct iovec *iov = uio->uio_iov;
 | |
| +	int iol = uio->uio_iovcnt;
 | |
| +
 | |
| +	while (loc >= 0) {
 | |
| +		/* Normal end of search */
 | |
| +		if (loc < iov->iov_len) {
 | |
| +	    		*off = loc;
 | |
| +	    		return (iov);
 | |
| +		}
 | |
| +
 | |
| +		loc -= iov->iov_len;
 | |
| +		if (iol == 0) {
 | |
| +			if (loc == 0) {
 | |
| +				/* Point at the end of valid data */
 | |
| +				*off = iov->iov_len;
 | |
| +				return (iov);
 | |
| +			} else
 | |
| +				return (NULL);
 | |
| +		} else {
 | |
| +			iov++, iol--;
 | |
| +		}
 | |
| +    	}
 | |
| +
 | |
| +	return (NULL);
 | |
| +}
 | |
| +
 | |
| +EXPORT_SYMBOL(cuio_copyback);
 | |
| +EXPORT_SYMBOL(cuio_copydata);
 | |
| +EXPORT_SYMBOL(cuio_getptr);
 | |
| +
 | |
| +
 | |
| +static void
 | |
| +skb_copy_bits_back(struct sk_buff *skb, int offset, caddr_t cp, int len)
 | |
| +{
 | |
| +	int i;
 | |
| +	if (offset < skb_headlen(skb)) {
 | |
| +		memcpy(skb->data + offset, cp, min_t(int, skb_headlen(skb), len));
 | |
| +		len -= skb_headlen(skb);
 | |
| +		cp += skb_headlen(skb);
 | |
| +	}
 | |
| +	offset -= skb_headlen(skb);
 | |
| +	for (i = 0; len > 0 && i < skb_shinfo(skb)->nr_frags; i++) {
 | |
| +		if (offset < skb_shinfo(skb)->frags[i].size) {
 | |
| +			memcpy(page_address(skb_shinfo(skb)->frags[i].page) +
 | |
| +					skb_shinfo(skb)->frags[i].page_offset,
 | |
| +					cp, min_t(int, skb_shinfo(skb)->frags[i].size, len));
 | |
| +			len -= skb_shinfo(skb)->frags[i].size;
 | |
| +			cp += skb_shinfo(skb)->frags[i].size;
 | |
| +		}
 | |
| +		offset -= skb_shinfo(skb)->frags[i].size;
 | |
| +	}
 | |
| +}
 | |
| +
 | |
| +void
 | |
| +crypto_copyback(int flags, caddr_t buf, int off, int size, caddr_t in)
 | |
| +{
 | |
| +
 | |
| +	if ((flags & CRYPTO_F_SKBUF) != 0)
 | |
| +		skb_copy_bits_back((struct sk_buff *)buf, off, in, size);
 | |
| +	else if ((flags & CRYPTO_F_IOV) != 0)
 | |
| +		cuio_copyback((struct uio *)buf, off, size, in);
 | |
| +	else
 | |
| +		bcopy(in, buf + off, size);
 | |
| +}
 | |
| +
 | |
| +void
 | |
| +crypto_copydata(int flags, caddr_t buf, int off, int size, caddr_t out)
 | |
| +{
 | |
| +
 | |
| +	if ((flags & CRYPTO_F_SKBUF) != 0)
 | |
| +		skb_copy_bits((struct sk_buff *)buf, off, out, size);
 | |
| +	else if ((flags & CRYPTO_F_IOV) != 0)
 | |
| +		cuio_copydata((struct uio *)buf, off, size, out);
 | |
| +	else
 | |
| +		bcopy(buf + off, out, size);
 | |
| +}
 | |
| +
 | |
| +int
 | |
| +crypto_apply(int flags, caddr_t buf, int off, int len,
 | |
| +    int (*f)(void *, void *, u_int), void *arg)
 | |
| +{
 | |
| +#if 0
 | |
| +	int error;
 | |
| +
 | |
| +	if ((flags & CRYPTO_F_SKBUF) != 0)
 | |
| +		error = XXXXXX((struct mbuf *)buf, off, len, f, arg);
 | |
| +	else if ((flags & CRYPTO_F_IOV) != 0)
 | |
| +		error = cuio_apply((struct uio *)buf, off, len, f, arg);
 | |
| +	else
 | |
| +		error = (*f)(arg, buf + off, len);
 | |
| +	return (error);
 | |
| +#else
 | |
| +	KASSERT(0, ("crypto_apply not implemented!\n"));
 | |
| +#endif
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +EXPORT_SYMBOL(crypto_copyback);
 | |
| +EXPORT_SYMBOL(crypto_copydata);
 | |
| +EXPORT_SYMBOL(crypto_apply);
 | |
| +
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/uio.h
 | |
| @@ -0,0 +1,54 @@
 | |
| +#ifndef _OCF_UIO_H_
 | |
| +#define _OCF_UIO_H_
 | |
| +
 | |
| +#include <linux/uio.h>
 | |
| +
 | |
| +/*
 | |
| + * The linux uio.h doesn't have all we need.  To be fully api compatible
 | |
| + * with the BSD cryptodev,  we need to keep this around.  Perhaps this can
 | |
| + * be moved back into the linux/uio.h
 | |
| + *
 | |
| + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
 | |
| + * Copyright (C) 2006-2007 David McCullough
 | |
| + * Copyright (C) 2004-2005 Intel Corporation.
 | |
| + *
 | |
| + * LICENSE TERMS
 | |
| + *
 | |
| + * The free distribution and use of this software in both source and binary
 | |
| + * form is allowed (with or without changes) provided that:
 | |
| + *
 | |
| + *   1. distributions of this source code include the above copyright
 | |
| + *      notice, this list of conditions and the following disclaimer;
 | |
| + *
 | |
| + *   2. distributions in binary form include the above copyright
 | |
| + *      notice, this list of conditions and the following disclaimer
 | |
| + *      in the documentation and/or other associated materials;
 | |
| + *
 | |
| + *   3. the copyright holder's name is not used to endorse products
 | |
| + *      built using this software without specific written permission.
 | |
| + *
 | |
| + * ALTERNATIVELY, provided that this notice is retained in full, this product
 | |
| + * may be distributed under the terms of the GNU General Public License (GPL),
 | |
| + * in which case the provisions of the GPL apply INSTEAD OF those given above.
 | |
| + *
 | |
| + * DISCLAIMER
 | |
| + *
 | |
| + * This software is provided 'as is' with no explicit or implied warranties
 | |
| + * in respect of its properties, including, but not limited to, correctness
 | |
| + * and/or fitness for purpose.
 | |
| + * ---------------------------------------------------------------------------
 | |
| + */
 | |
| +
 | |
| +struct uio {
 | |
| +	struct	iovec *uio_iov;
 | |
| +	int		uio_iovcnt;
 | |
| +	off_t	uio_offset;
 | |
| +	int		uio_resid;
 | |
| +#if 0
 | |
| +	enum	uio_seg uio_segflg;
 | |
| +	enum	uio_rw uio_rw;
 | |
| +	struct  thread *uio_td;
 | |
| +#endif
 | |
| +};
 | |
| +
 | |
| +#endif
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/talitos/talitos.c
 | |
| @@ -0,0 +1,1359 @@
 | |
| +/*
 | |
| + * crypto/ocf/talitos/talitos.c
 | |
| + *
 | |
| + * An OCF-Linux module that uses Freescale's SEC to do the crypto.
 | |
| + * Based on crypto/ocf/hifn and crypto/ocf/safe OCF drivers
 | |
| + *
 | |
| + * Copyright (c) 2006 Freescale Semiconductor, Inc.
 | |
| + *
 | |
| + * This code written by Kim A. B. Phillips <kim.phillips@freescale.com>
 | |
| + * some code copied from files with the following:
 | |
| + * Copyright (C) 2004-2007 David McCullough <david_mccullough@securecomputing.com
 | |
| + *
 | |
| + * Redistribution and use in source and binary forms, with or without
 | |
| + * modification, are permitted provided that the following conditions
 | |
| + * are met:
 | |
| + *
 | |
| + * 1. Redistributions of source code must retain the above copyright
 | |
| + *    notice, this list of conditions and the following disclaimer.
 | |
| + * 2. Redistributions in binary form must reproduce the above copyright
 | |
| + *    notice, this list of conditions and the following disclaimer in the
 | |
| + *    documentation and/or other materials provided with the distribution.
 | |
| + * 3. The name of the author may not be used to endorse or promote products
 | |
| + *    derived from this software without specific prior written permission.
 | |
| + *
 | |
| + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
 | |
| + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
 | |
| + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
 | |
| + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
 | |
| + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 | |
| + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 | |
| + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 | |
| + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 | |
| + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 | |
| + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 | |
| + *
 | |
| + * ---------------------------------------------------------------------------
 | |
| + *
 | |
| + * NOTES:
 | |
| + *
 | |
| + * The Freescale SEC (also known as 'talitos') resides on the
 | |
| + * internal bus, and runs asynchronous to the processor core.  It has
 | |
| + * a wide gamut of cryptographic acceleration features, including single-
 | |
| + * pass IPsec (also known as algorithm chaining).  To properly utilize
 | |
| + * all of the SEC's performance enhancing features, further reworking
 | |
| + * of higher level code (framework, applications) will be necessary.
 | |
| + *
 | |
| + * The following table shows which SEC version is present in which devices:
 | |
| + *
 | |
| + * Devices       SEC version
 | |
| + *
 | |
| + * 8272, 8248    SEC 1.0
 | |
| + * 885, 875      SEC 1.2
 | |
| + * 8555E, 8541E  SEC 2.0
 | |
| + * 8349E         SEC 2.01
 | |
| + * 8548E         SEC 2.1
 | |
| + *
 | |
| + * The following table shows the features offered by each SEC version:
 | |
| + *
 | |
| + * 	                       Max.   chan-
 | |
| + * version  Bus I/F       Clock  nels  DEU AESU AFEU MDEU PKEU RNG KEU
 | |
| + *
 | |
| + * SEC 1.0  internal 64b  100MHz   4     1    1    1    1    1   1   0
 | |
| + * SEC 1.2  internal 32b   66MHz   1     1    1    0    1    0   0   0
 | |
| + * SEC 2.0  internal 64b  166MHz   4     1    1    1    1    1   1   0
 | |
| + * SEC 2.01 internal 64b  166MHz   4     1    1    1    1    1   1   0
 | |
| + * SEC 2.1  internal 64b  333MHz   4     1    1    1    1    1   1   1
 | |
| + *
 | |
| + * Each execution unit in the SEC has two modes of execution; channel and
 | |
| + * slave/debug.  This driver employs the channel infrastructure in the
 | |
| + * device for convenience.  Only the RNG is directly accessed due to the
 | |
| + * convenience of its random fifo pool.  The relationship between the
 | |
| + * channels and execution units is depicted in the following diagram:
 | |
| + *
 | |
| + *    -------   ------------
 | |
| + * ---| ch0 |---|          |
 | |
| + *    -------   |          |
 | |
| + *              |          |------+-------+-------+-------+------------
 | |
| + *    -------   |          |      |       |       |       |           |
 | |
| + * ---| ch1 |---|          |      |       |       |       |           |
 | |
| + *    -------   |          |   ------  ------  ------  ------      ------
 | |
| + *              |controller|   |DEU |  |AESU|  |MDEU|  |PKEU| ...  |RNG |
 | |
| + *    -------   |          |   ------  ------  ------  ------      ------
 | |
| + * ---| ch2 |---|          |      |       |       |       |           |
 | |
| + *    -------   |          |      |       |       |       |           |
 | |
| + *              |          |------+-------+-------+-------+------------
 | |
| + *    -------   |          |
 | |
| + * ---| ch3 |---|          |
 | |
| + *    -------   ------------
 | |
| + *
 | |
| + * Channel ch0 may drive an aes operation to the aes unit (AESU),
 | |
| + * and, at the same time, ch1 may drive a message digest operation
 | |
| + * to the mdeu. Each channel has an input descriptor FIFO, and the
 | |
| + * FIFO can contain, e.g. on the 8541E, up to 24 entries, before a
 | |
| + * a buffer overrun error is triggered. The controller is responsible
 | |
| + * for fetching the data from descriptor pointers, and passing the
 | |
| + * data to the appropriate EUs. The controller also writes the
 | |
| + * cryptographic operation's result to memory. The SEC notifies
 | |
| + * completion by triggering an interrupt and/or setting the 1st byte
 | |
| + * of the hdr field to 0xff.
 | |
| + *
 | |
| + * TODO:
 | |
| + * o support more algorithms
 | |
| + * o support more versions of the SEC
 | |
| + * o add support for linux 2.4
 | |
| + * o scatter-gather (sg) support
 | |
| + * o add support for public key ops (PKEU)
 | |
| + * o add statistics
 | |
| + */
 | |
| +
 | |
| +#ifndef AUTOCONF_INCLUDED
 | |
| +#include <linux/config.h>
 | |
| +#endif
 | |
| +#include <linux/module.h>
 | |
| +#include <linux/init.h>
 | |
| +#include <linux/interrupt.h>
 | |
| +#include <linux/spinlock.h>
 | |
| +#include <linux/random.h>
 | |
| +#include <linux/skbuff.h>
 | |
| +#include <asm/scatterlist.h>
 | |
| +#include <linux/dma-mapping.h>  /* dma_map_single() */
 | |
| +#include <linux/moduleparam.h>
 | |
| +
 | |
| +#include <linux/version.h>
 | |
| +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
 | |
| +#include <linux/platform_device.h>
 | |
| +#endif
 | |
| +
 | |
| +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
 | |
| +#include <linux/of_platform.h>
 | |
| +#endif
 | |
| +
 | |
| +#include <cryptodev.h>
 | |
| +#include <uio.h>
 | |
| +
 | |
| +#define DRV_NAME "talitos"
 | |
| +
 | |
| +#include "talitos_dev.h"
 | |
| +#include "talitos_soft.h"
 | |
| +
 | |
| +#define read_random(p,l) get_random_bytes(p,l)
 | |
| +
 | |
| +const char talitos_driver_name[] = "Talitos OCF";
 | |
| +const char talitos_driver_version[] = "0.2";
 | |
| +
 | |
| +static int talitos_newsession(device_t dev, u_int32_t *sidp,
 | |
| +								struct cryptoini *cri);
 | |
| +static int talitos_freesession(device_t dev, u_int64_t tid);
 | |
| +static int talitos_process(device_t dev, struct cryptop *crp, int hint);
 | |
| +static void dump_talitos_status(struct talitos_softc *sc);
 | |
| +static int talitos_submit(struct talitos_softc *sc, struct talitos_desc *td,
 | |
| +								int chsel);
 | |
| +static void talitos_doneprocessing(struct talitos_softc *sc);
 | |
| +static void talitos_init_device(struct talitos_softc *sc);
 | |
| +static void talitos_reset_device_master(struct talitos_softc *sc);
 | |
| +static void talitos_reset_device(struct talitos_softc *sc);
 | |
| +static void talitos_errorprocessing(struct talitos_softc *sc);
 | |
| +#ifdef CONFIG_PPC_MERGE
 | |
| +static int talitos_probe(struct of_device *ofdev, const struct of_device_id *match);
 | |
| +static int talitos_remove(struct of_device *ofdev);
 | |
| +#else
 | |
| +static int talitos_probe(struct platform_device *pdev);
 | |
| +static int talitos_remove(struct platform_device *pdev);
 | |
| +#endif
 | |
| +#ifdef CONFIG_OCF_RANDOMHARVEST
 | |
| +static int talitos_read_random(void *arg, u_int32_t *buf, int maxwords);
 | |
| +static void talitos_rng_init(struct talitos_softc *sc);
 | |
| +#endif
 | |
| +
 | |
| +static device_method_t talitos_methods = {
 | |
| +	/* crypto device methods */
 | |
| +	DEVMETHOD(cryptodev_newsession,	talitos_newsession),
 | |
| +	DEVMETHOD(cryptodev_freesession,talitos_freesession),
 | |
| +	DEVMETHOD(cryptodev_process,	talitos_process),
 | |
| +};
 | |
| +
 | |
| +#define debug talitos_debug
 | |
| +int talitos_debug = 0;
 | |
| +module_param(talitos_debug, int, 0644);
 | |
| +MODULE_PARM_DESC(talitos_debug, "Enable debug");
 | |
| +
 | |
| +static inline void talitos_write(volatile unsigned *addr, u32 val)
 | |
| +{
 | |
| +        out_be32(addr, val);
 | |
| +}
 | |
| +
 | |
| +static inline u32 talitos_read(volatile unsigned *addr)
 | |
| +{
 | |
| +        u32 val;
 | |
| +        val = in_be32(addr);
 | |
| +        return val;
 | |
| +}
 | |
| +
 | |
| +static void dump_talitos_status(struct talitos_softc *sc)
 | |
| +{
 | |
| +	unsigned int v, v_hi, i, *ptr;
 | |
| +	v = talitos_read(sc->sc_base_addr + TALITOS_MCR);
 | |
| +	v_hi = talitos_read(sc->sc_base_addr + TALITOS_MCR_HI);
 | |
| +	printk(KERN_INFO "%s: MCR          0x%08x_%08x\n",
 | |
| +			device_get_nameunit(sc->sc_cdev), v, v_hi);
 | |
| +	v = talitos_read(sc->sc_base_addr + TALITOS_IMR);
 | |
| +	v_hi = talitos_read(sc->sc_base_addr + TALITOS_IMR_HI);
 | |
| +	printk(KERN_INFO "%s: IMR          0x%08x_%08x\n",
 | |
| +			device_get_nameunit(sc->sc_cdev), v, v_hi);
 | |
| +	v = talitos_read(sc->sc_base_addr + TALITOS_ISR);
 | |
| +	v_hi = talitos_read(sc->sc_base_addr + TALITOS_ISR_HI);
 | |
| +	printk(KERN_INFO "%s: ISR          0x%08x_%08x\n",
 | |
| +			device_get_nameunit(sc->sc_cdev), v, v_hi);
 | |
| +	for (i = 0; i < sc->sc_num_channels; i++) {
 | |
| +		v = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
 | |
| +			TALITOS_CH_CDPR);
 | |
| +		v_hi = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
 | |
| +			TALITOS_CH_CDPR_HI);
 | |
| +		printk(KERN_INFO "%s: CDPR     ch%d 0x%08x_%08x\n",
 | |
| +				device_get_nameunit(sc->sc_cdev), i, v, v_hi);
 | |
| +	}
 | |
| +	for (i = 0; i < sc->sc_num_channels; i++) {
 | |
| +		v = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
 | |
| +			TALITOS_CH_CCPSR);
 | |
| +		v_hi = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
 | |
| +			TALITOS_CH_CCPSR_HI);
 | |
| +		printk(KERN_INFO "%s: CCPSR    ch%d 0x%08x_%08x\n",
 | |
| +				device_get_nameunit(sc->sc_cdev), i, v, v_hi);
 | |
| +	}
 | |
| +	ptr = sc->sc_base_addr + TALITOS_CH_DESCBUF;
 | |
| +	for (i = 0; i < 16; i++) {
 | |
| +		v = talitos_read(ptr++); v_hi = talitos_read(ptr++);
 | |
| +		printk(KERN_INFO "%s: DESCBUF  ch0 0x%08x_%08x (tdp%02d)\n",
 | |
| +				device_get_nameunit(sc->sc_cdev), v, v_hi, i);
 | |
| +	}
 | |
| +	return;
 | |
| +}
 | |
| +
 | |
| +
 | |
| +#ifdef CONFIG_OCF_RANDOMHARVEST
 | |
| +/*
 | |
| + * pull random numbers off the RNG FIFO, not exceeding amount available
 | |
| + */
 | |
| +static int
 | |
| +talitos_read_random(void *arg, u_int32_t *buf, int maxwords)
 | |
| +{
 | |
| +	struct talitos_softc *sc = (struct talitos_softc *) arg;
 | |
| +	int rc;
 | |
| +	u_int32_t v;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	/* check for things like FIFO underflow */
 | |
| +	v = talitos_read(sc->sc_base_addr + TALITOS_RNGISR_HI);
 | |
| +	if (unlikely(v)) {
 | |
| +		printk(KERN_ERR "%s: RNGISR_HI error %08x\n",
 | |
| +				device_get_nameunit(sc->sc_cdev), v);
 | |
| +		return 0;
 | |
| +	}
 | |
| +	/*
 | |
| +	 * OFL is number of available 64-bit words,
 | |
| +	 * shift and convert to a 32-bit word count
 | |
| +	 */
 | |
| +	v = talitos_read(sc->sc_base_addr + TALITOS_RNGSR_HI);
 | |
| +	v = (v & TALITOS_RNGSR_HI_OFL) >> (16 - 1);
 | |
| +	if (maxwords > v)
 | |
| +		maxwords = v;
 | |
| +	for (rc = 0; rc < maxwords; rc++) {
 | |
| +		buf[rc] = talitos_read(sc->sc_base_addr +
 | |
| +			TALITOS_RNG_FIFO + rc*sizeof(u_int32_t));
 | |
| +	}
 | |
| +	if (maxwords & 1) {
 | |
| +		/*
 | |
| +		 * RNG will complain with an AE in the RNGISR
 | |
| +		 * if we don't complete the pairs of 32-bit reads
 | |
| +		 * to its 64-bit register based FIFO
 | |
| +		 */
 | |
| +		v = talitos_read(sc->sc_base_addr +
 | |
| +			TALITOS_RNG_FIFO + rc*sizeof(u_int32_t));
 | |
| +	}
 | |
| +
 | |
| +	return rc;
 | |
| +}
 | |
| +
 | |
| +static void
 | |
| +talitos_rng_init(struct talitos_softc *sc)
 | |
| +{
 | |
| +	u_int32_t v;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +	/* reset RNG EU */
 | |
| +	v = talitos_read(sc->sc_base_addr + TALITOS_RNGRCR_HI);
 | |
| +	v |= TALITOS_RNGRCR_HI_SR;
 | |
| +	talitos_write(sc->sc_base_addr + TALITOS_RNGRCR_HI, v);
 | |
| +	while ((talitos_read(sc->sc_base_addr + TALITOS_RNGSR_HI)
 | |
| +		& TALITOS_RNGSR_HI_RD) == 0)
 | |
| +			cpu_relax();
 | |
| +	/*
 | |
| +	 * we tell the RNG to start filling the RNG FIFO
 | |
| +	 * by writing the RNGDSR
 | |
| +	 */
 | |
| +	v = talitos_read(sc->sc_base_addr + TALITOS_RNGDSR_HI);
 | |
| +	talitos_write(sc->sc_base_addr + TALITOS_RNGDSR_HI, v);
 | |
| +	/*
 | |
| +	 * 64 bits of data will be pushed onto the FIFO every
 | |
| +	 * 256 SEC cycles until the FIFO is full.  The RNG then
 | |
| +	 * attempts to keep the FIFO full.
 | |
| +	 */
 | |
| +	v = talitos_read(sc->sc_base_addr + TALITOS_RNGISR_HI);
 | |
| +	if (v) {
 | |
| +		printk(KERN_ERR "%s: RNGISR_HI error %08x\n",
 | |
| +			device_get_nameunit(sc->sc_cdev), v);
 | |
| +		return;
 | |
| +	}
 | |
| +	/*
 | |
| +	 * n.b. we need to add a FIPS test here - if the RNG is going
 | |
| +	 * to fail, it's going to fail at reset time
 | |
| +	 */
 | |
| +	return;
 | |
| +}
 | |
| +#endif /* CONFIG_OCF_RANDOMHARVEST */
 | |
| +
 | |
| +/*
 | |
| + * Generate a new software session.
 | |
| + */
 | |
| +static int
 | |
| +talitos_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
 | |
| +{
 | |
| +	struct cryptoini *c, *encini = NULL, *macini = NULL;
 | |
| +	struct talitos_softc *sc = device_get_softc(dev);
 | |
| +	struct talitos_session *ses = NULL;
 | |
| +	int sesn;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +	if (sidp == NULL || cri == NULL || sc == NULL) {
 | |
| +		DPRINTF("%s,%d - EINVAL\n", __FILE__, __LINE__);
 | |
| +		return EINVAL;
 | |
| +	}
 | |
| +	for (c = cri; c != NULL; c = c->cri_next) {
 | |
| +		if (c->cri_alg == CRYPTO_MD5 ||
 | |
| +		    c->cri_alg == CRYPTO_MD5_HMAC ||
 | |
| +		    c->cri_alg == CRYPTO_SHA1 ||
 | |
| +		    c->cri_alg == CRYPTO_SHA1_HMAC ||
 | |
| +		    c->cri_alg == CRYPTO_NULL_HMAC) {
 | |
| +			if (macini)
 | |
| +				return EINVAL;
 | |
| +			macini = c;
 | |
| +		} else if (c->cri_alg == CRYPTO_DES_CBC ||
 | |
| +		    c->cri_alg == CRYPTO_3DES_CBC ||
 | |
| +		    c->cri_alg == CRYPTO_AES_CBC ||
 | |
| +		    c->cri_alg == CRYPTO_NULL_CBC) {
 | |
| +			if (encini)
 | |
| +				return EINVAL;
 | |
| +			encini = c;
 | |
| +		} else {
 | |
| +			DPRINTF("UNKNOWN c->cri_alg %d\n", encini->cri_alg);
 | |
| +			return EINVAL;
 | |
| +		}
 | |
| +	}
 | |
| +	if (encini == NULL && macini == NULL)
 | |
| +		return EINVAL;
 | |
| +	if (encini) {
 | |
| +		/* validate key length */
 | |
| +		switch (encini->cri_alg) {
 | |
| +		case CRYPTO_DES_CBC:
 | |
| +			if (encini->cri_klen != 64)
 | |
| +				return EINVAL;
 | |
| +			break;
 | |
| +		case CRYPTO_3DES_CBC:
 | |
| +			if (encini->cri_klen != 192) {
 | |
| +				return EINVAL;
 | |
| +			}
 | |
| +			break;
 | |
| +		case CRYPTO_AES_CBC:
 | |
| +			if (encini->cri_klen != 128 &&
 | |
| +			    encini->cri_klen != 192 &&
 | |
| +			    encini->cri_klen != 256)
 | |
| +				return EINVAL;
 | |
| +			break;
 | |
| +		default:
 | |
| +			DPRINTF("UNKNOWN encini->cri_alg %d\n",
 | |
| +				encini->cri_alg);
 | |
| +			return EINVAL;
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	if (sc->sc_sessions == NULL) {
 | |
| +		ses = sc->sc_sessions = (struct talitos_session *)
 | |
| +			kmalloc(sizeof(struct talitos_session), SLAB_ATOMIC);
 | |
| +		if (ses == NULL)
 | |
| +			return ENOMEM;
 | |
| +		memset(ses, 0, sizeof(struct talitos_session));
 | |
| +		sesn = 0;
 | |
| +		sc->sc_nsessions = 1;
 | |
| +	} else {
 | |
| +		for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
 | |
| +			if (sc->sc_sessions[sesn].ses_used == 0) {
 | |
| +				ses = &sc->sc_sessions[sesn];
 | |
| +				break;
 | |
| +			}
 | |
| +		}
 | |
| +
 | |
| +		if (ses == NULL) {
 | |
| +			/* allocating session */
 | |
| +			sesn = sc->sc_nsessions;
 | |
| +			ses = (struct talitos_session *) kmalloc(
 | |
| +				(sesn + 1) * sizeof(struct talitos_session),
 | |
| +				SLAB_ATOMIC);
 | |
| +			if (ses == NULL)
 | |
| +				return ENOMEM;
 | |
| +			memset(ses, 0,
 | |
| +				(sesn + 1) * sizeof(struct talitos_session));
 | |
| +			memcpy(ses, sc->sc_sessions,
 | |
| +				sesn * sizeof(struct talitos_session));
 | |
| +			memset(sc->sc_sessions, 0,
 | |
| +				sesn * sizeof(struct talitos_session));
 | |
| +			kfree(sc->sc_sessions);
 | |
| +			sc->sc_sessions = ses;
 | |
| +			ses = &sc->sc_sessions[sesn];
 | |
| +			sc->sc_nsessions++;
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	ses->ses_used = 1;
 | |
| +
 | |
| +	if (encini) {
 | |
| +		/* get an IV */
 | |
| +		/* XXX may read fewer than requested */
 | |
| +		read_random(ses->ses_iv, sizeof(ses->ses_iv));
 | |
| +
 | |
| +		ses->ses_klen = (encini->cri_klen + 7) / 8;
 | |
| +		memcpy(ses->ses_key, encini->cri_key, ses->ses_klen);
 | |
| +		if (macini) {
 | |
| +			/* doing hash on top of cipher */
 | |
| +			ses->ses_hmac_len = (macini->cri_klen + 7) / 8;
 | |
| +			memcpy(ses->ses_hmac, macini->cri_key,
 | |
| +				ses->ses_hmac_len);
 | |
| +		}
 | |
| +	} else if (macini) {
 | |
| +		/* doing hash */
 | |
| +		ses->ses_klen = (macini->cri_klen + 7) / 8;
 | |
| +		memcpy(ses->ses_key, macini->cri_key, ses->ses_klen);
 | |
| +	}
 | |
| +
 | |
| +	/* back compat way of determining MSC result len */
 | |
| +	if (macini) {
 | |
| +		ses->ses_mlen = macini->cri_mlen;
 | |
| +		if (ses->ses_mlen == 0) {
 | |
| +			if (macini->cri_alg == CRYPTO_MD5_HMAC)
 | |
| +				ses->ses_mlen = MD5_HASH_LEN;
 | |
| +			else
 | |
| +				ses->ses_mlen = SHA1_HASH_LEN;
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	/* really should make up a template td here,
 | |
| +	 * and only fill things like i/o and direction in process() */
 | |
| +
 | |
| +	/* assign session ID */
 | |
| +	*sidp = TALITOS_SID(sc->sc_num, sesn);
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Deallocate a session.
 | |
| + */
 | |
| +static int
 | |
| +talitos_freesession(device_t dev, u_int64_t tid)
 | |
| +{
 | |
| +	struct talitos_softc *sc = device_get_softc(dev);
 | |
| +	int session, ret;
 | |
| +	u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
 | |
| +
 | |
| +	if (sc == NULL)
 | |
| +		return EINVAL;
 | |
| +	session = TALITOS_SESSION(sid);
 | |
| +	if (session < sc->sc_nsessions) {
 | |
| +		memset(&sc->sc_sessions[session], 0,
 | |
| +			sizeof(sc->sc_sessions[session]));
 | |
| +		ret = 0;
 | |
| +	} else
 | |
| +		ret = EINVAL;
 | |
| +	return ret;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * launch device processing - it will come back with done notification
 | |
| + * in the form of an interrupt and/or HDR_DONE_BITS in header
 | |
| + */
 | |
| +static int
 | |
| +talitos_submit(
 | |
| +	struct talitos_softc *sc,
 | |
| +	struct talitos_desc *td,
 | |
| +	int chsel)
 | |
| +{
 | |
| +	u_int32_t v;
 | |
| +
 | |
| +	v = dma_map_single(NULL, td, sizeof(*td), DMA_TO_DEVICE);
 | |
| +	talitos_write(sc->sc_base_addr +
 | |
| +		chsel*TALITOS_CH_OFFSET + TALITOS_CH_FF, 0);
 | |
| +	talitos_write(sc->sc_base_addr +
 | |
| +		chsel*TALITOS_CH_OFFSET + TALITOS_CH_FF_HI, v);
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +static int
 | |
| +talitos_process(device_t dev, struct cryptop *crp, int hint)
 | |
| +{
 | |
| +	int i, err = 0, ivsize;
 | |
| +	struct talitos_softc *sc = device_get_softc(dev);
 | |
| +	struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
 | |
| +	caddr_t iv;
 | |
| +	struct talitos_session *ses;
 | |
| +	struct talitos_desc *td;
 | |
| +	unsigned long flags;
 | |
| +	/* descriptor mappings */
 | |
| +	int hmac_key, hmac_data, cipher_iv, cipher_key,
 | |
| +		in_fifo, out_fifo, cipher_iv_out;
 | |
| +	static int chsel = -1;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	if (crp == NULL || crp->crp_callback == NULL || sc == NULL) {
 | |
| +		return EINVAL;
 | |
| +	}
 | |
| +	crp->crp_etype = 0;
 | |
| +	if (TALITOS_SESSION(crp->crp_sid) >= sc->sc_nsessions) {
 | |
| +		return EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	ses = &sc->sc_sessions[TALITOS_SESSION(crp->crp_sid)];
 | |
| +
 | |
| +        /* enter the channel scheduler */
 | |
| +	spin_lock_irqsave(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
 | |
| +
 | |
| +	/* reuse channel that already had/has requests for the required EU */
 | |
| +	for (i = 0; i < sc->sc_num_channels; i++) {
 | |
| +		if (sc->sc_chnlastalg[i] == crp->crp_desc->crd_alg)
 | |
| +			break;
 | |
| +	}
 | |
| +	if (i == sc->sc_num_channels) {
 | |
| +		/*
 | |
| +		 * haven't seen this algo the last sc_num_channels or more
 | |
| +		 * use round robin in this case
 | |
| +	 	 * nb: sc->sc_num_channels must be power of 2
 | |
| +		 */
 | |
| +		chsel = (chsel + 1) & (sc->sc_num_channels - 1);
 | |
| +	} else {
 | |
| +		/*
 | |
| +		 * matches channel with same target execution unit;
 | |
| +		 * use same channel in this case
 | |
| +		 */
 | |
| +		chsel = i;
 | |
| +	}
 | |
| +	sc->sc_chnlastalg[chsel] = crp->crp_desc->crd_alg;
 | |
| +
 | |
| +        /* release the channel scheduler lock */
 | |
| +	spin_unlock_irqrestore(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
 | |
| +
 | |
| +	/* acquire the selected channel fifo lock */
 | |
| +	spin_lock_irqsave(&sc->sc_chnfifolock[chsel], flags);
 | |
| +
 | |
| +	/* find and reserve next available descriptor-cryptop pair */
 | |
| +	for (i = 0; i < sc->sc_chfifo_len; i++) {
 | |
| +		if (sc->sc_chnfifo[chsel][i].cf_desc.hdr == 0) {
 | |
| +			/*
 | |
| +			 * ensure correct descriptor formation by
 | |
| +			 * avoiding inadvertently setting "optional" entries
 | |
| +			 * e.g. not using "optional" dptr2 for MD/HMAC descs
 | |
| +			 */
 | |
| +			memset(&sc->sc_chnfifo[chsel][i].cf_desc,
 | |
| +				0, sizeof(*td));
 | |
| +			/* reserve it with done notification request bit */
 | |
| +			sc->sc_chnfifo[chsel][i].cf_desc.hdr |=
 | |
| +				TALITOS_DONE_NOTIFY;
 | |
| +			break;
 | |
| +		}
 | |
| +	}
 | |
| +	spin_unlock_irqrestore(&sc->sc_chnfifolock[chsel], flags);
 | |
| +
 | |
| +	if (i == sc->sc_chfifo_len) {
 | |
| +		/* fifo full */
 | |
| +		err = ERESTART;
 | |
| +		goto errout;
 | |
| +	}
 | |
| +
 | |
| +	td = &sc->sc_chnfifo[chsel][i].cf_desc;
 | |
| +	sc->sc_chnfifo[chsel][i].cf_crp = crp;
 | |
| +
 | |
| +	crd1 = crp->crp_desc;
 | |
| +	if (crd1 == NULL) {
 | |
| +		err = EINVAL;
 | |
| +		goto errout;
 | |
| +	}
 | |
| +	crd2 = crd1->crd_next;
 | |
| +	/* prevent compiler warning */
 | |
| +	hmac_key = 0;
 | |
| +	hmac_data = 0;
 | |
| +	if (crd2 == NULL) {
 | |
| +		td->hdr |= TD_TYPE_COMMON_NONSNOOP_NO_AFEU;
 | |
| +		/* assign descriptor dword ptr mappings for this desc. type */
 | |
| +		cipher_iv = 1;
 | |
| +		cipher_key = 2;
 | |
| +		in_fifo = 3;
 | |
| +		cipher_iv_out = 5;
 | |
| +		if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
 | |
| +		    crd1->crd_alg == CRYPTO_SHA1_HMAC ||
 | |
| +		    crd1->crd_alg == CRYPTO_SHA1 ||
 | |
| +		    crd1->crd_alg == CRYPTO_MD5) {
 | |
| +			out_fifo = 5;
 | |
| +			maccrd = crd1;
 | |
| +			enccrd = NULL;
 | |
| +		} else if (crd1->crd_alg == CRYPTO_DES_CBC ||
 | |
| +		    crd1->crd_alg == CRYPTO_3DES_CBC ||
 | |
| +		    crd1->crd_alg == CRYPTO_AES_CBC ||
 | |
| +		    crd1->crd_alg == CRYPTO_ARC4) {
 | |
| +			out_fifo = 4;
 | |
| +			maccrd = NULL;
 | |
| +			enccrd = crd1;
 | |
| +		} else {
 | |
| +			DPRINTF("UNKNOWN crd1->crd_alg %d\n", crd1->crd_alg);
 | |
| +			err = EINVAL;
 | |
| +			goto errout;
 | |
| +		}
 | |
| +	} else {
 | |
| +		if (sc->sc_desc_types & TALITOS_HAS_DT_IPSEC_ESP) {
 | |
| +			td->hdr |= TD_TYPE_IPSEC_ESP;
 | |
| +		} else {
 | |
| +			DPRINTF("unimplemented: multiple descriptor ipsec\n");
 | |
| +			err = EINVAL;
 | |
| +			goto errout;
 | |
| +		}
 | |
| +		/* assign descriptor dword ptr mappings for this desc. type */
 | |
| +		hmac_key = 0;
 | |
| +		hmac_data = 1;
 | |
| +		cipher_iv = 2;
 | |
| +		cipher_key = 3;
 | |
| +		in_fifo = 4;
 | |
| +		out_fifo = 5;
 | |
| +		cipher_iv_out = 6;
 | |
| +		if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
 | |
| +                     crd1->crd_alg == CRYPTO_SHA1_HMAC ||
 | |
| +                     crd1->crd_alg == CRYPTO_MD5 ||
 | |
| +                     crd1->crd_alg == CRYPTO_SHA1) &&
 | |
| +		    (crd2->crd_alg == CRYPTO_DES_CBC ||
 | |
| +		     crd2->crd_alg == CRYPTO_3DES_CBC ||
 | |
| +		     crd2->crd_alg == CRYPTO_AES_CBC ||
 | |
| +		     crd2->crd_alg == CRYPTO_ARC4) &&
 | |
| +		    ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
 | |
| +			maccrd = crd1;
 | |
| +			enccrd = crd2;
 | |
| +		} else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
 | |
| +		     crd1->crd_alg == CRYPTO_ARC4 ||
 | |
| +		     crd1->crd_alg == CRYPTO_3DES_CBC ||
 | |
| +		     crd1->crd_alg == CRYPTO_AES_CBC) &&
 | |
| +		    (crd2->crd_alg == CRYPTO_MD5_HMAC ||
 | |
| +                     crd2->crd_alg == CRYPTO_SHA1_HMAC ||
 | |
| +                     crd2->crd_alg == CRYPTO_MD5 ||
 | |
| +                     crd2->crd_alg == CRYPTO_SHA1) &&
 | |
| +		    (crd1->crd_flags & CRD_F_ENCRYPT)) {
 | |
| +			enccrd = crd1;
 | |
| +			maccrd = crd2;
 | |
| +		} else {
 | |
| +			/* We cannot order the SEC as requested */
 | |
| +			printk("%s: cannot do the order\n",
 | |
| +					device_get_nameunit(sc->sc_cdev));
 | |
| +			err = EINVAL;
 | |
| +			goto errout;
 | |
| +		}
 | |
| +	}
 | |
| +	/* assign in_fifo and out_fifo based on input/output struct type */
 | |
| +	if (crp->crp_flags & CRYPTO_F_SKBUF) {
 | |
| +		/* using SKB buffers */
 | |
| +		struct sk_buff *skb = (struct sk_buff *)crp->crp_buf;
 | |
| +		if (skb_shinfo(skb)->nr_frags) {
 | |
| +			printk("%s: skb frags unimplemented\n",
 | |
| +					device_get_nameunit(sc->sc_cdev));
 | |
| +			err = EINVAL;
 | |
| +			goto errout;
 | |
| +		}
 | |
| +		td->ptr[in_fifo].ptr = dma_map_single(NULL, skb->data,
 | |
| +			skb->len, DMA_TO_DEVICE);
 | |
| +		td->ptr[in_fifo].len = skb->len;
 | |
| +		td->ptr[out_fifo].ptr = dma_map_single(NULL, skb->data,
 | |
| +			skb->len, DMA_TO_DEVICE);
 | |
| +		td->ptr[out_fifo].len = skb->len;
 | |
| +		td->ptr[hmac_data].ptr = dma_map_single(NULL, skb->data,
 | |
| +			skb->len, DMA_TO_DEVICE);
 | |
| +	} else if (crp->crp_flags & CRYPTO_F_IOV) {
 | |
| +		/* using IOV buffers */
 | |
| +		struct uio *uiop = (struct uio *)crp->crp_buf;
 | |
| +		if (uiop->uio_iovcnt > 1) {
 | |
| +			printk("%s: iov frags unimplemented\n",
 | |
| +					device_get_nameunit(sc->sc_cdev));
 | |
| +			err = EINVAL;
 | |
| +			goto errout;
 | |
| +		}
 | |
| +		td->ptr[in_fifo].ptr = dma_map_single(NULL,
 | |
| +			uiop->uio_iov->iov_base, crp->crp_ilen, DMA_TO_DEVICE);
 | |
| +		td->ptr[in_fifo].len = crp->crp_ilen;
 | |
| +		/* crp_olen is never set; always use crp_ilen */
 | |
| +		td->ptr[out_fifo].ptr = dma_map_single(NULL,
 | |
| +			uiop->uio_iov->iov_base,
 | |
| +			crp->crp_ilen, DMA_TO_DEVICE);
 | |
| +		td->ptr[out_fifo].len = crp->crp_ilen;
 | |
| +	} else {
 | |
| +		/* using contig buffers */
 | |
| +		td->ptr[in_fifo].ptr = dma_map_single(NULL,
 | |
| +			crp->crp_buf, crp->crp_ilen, DMA_TO_DEVICE);
 | |
| +		td->ptr[in_fifo].len = crp->crp_ilen;
 | |
| +		td->ptr[out_fifo].ptr = dma_map_single(NULL,
 | |
| +			crp->crp_buf, crp->crp_ilen, DMA_TO_DEVICE);
 | |
| +		td->ptr[out_fifo].len = crp->crp_ilen;
 | |
| +	}
 | |
| +	if (enccrd) {
 | |
| +		switch (enccrd->crd_alg) {
 | |
| +		case CRYPTO_3DES_CBC:
 | |
| +			td->hdr |= TALITOS_MODE0_DEU_3DES;
 | |
| +			/* FALLTHROUGH */
 | |
| +		case CRYPTO_DES_CBC:
 | |
| +			td->hdr |= TALITOS_SEL0_DEU
 | |
| +				|  TALITOS_MODE0_DEU_CBC;
 | |
| +			if (enccrd->crd_flags & CRD_F_ENCRYPT)
 | |
| +				td->hdr |= TALITOS_MODE0_DEU_ENC;
 | |
| +			ivsize = 2*sizeof(u_int32_t);
 | |
| +			DPRINTF("%cDES ses %d ch %d len %d\n",
 | |
| +				(td->hdr & TALITOS_MODE0_DEU_3DES)?'3':'1',
 | |
| +				(u32)TALITOS_SESSION(crp->crp_sid),
 | |
| +				chsel, td->ptr[in_fifo].len);
 | |
| +			break;
 | |
| +		case CRYPTO_AES_CBC:
 | |
| +			td->hdr |= TALITOS_SEL0_AESU
 | |
| +				|  TALITOS_MODE0_AESU_CBC;
 | |
| +			if (enccrd->crd_flags & CRD_F_ENCRYPT)
 | |
| +				td->hdr |= TALITOS_MODE0_AESU_ENC;
 | |
| +			ivsize = 4*sizeof(u_int32_t);
 | |
| +			DPRINTF("AES  ses %d ch %d len %d\n",
 | |
| +				(u32)TALITOS_SESSION(crp->crp_sid),
 | |
| +				chsel, td->ptr[in_fifo].len);
 | |
| +			break;
 | |
| +		default:
 | |
| +			printk("%s: unimplemented enccrd->crd_alg %d\n",
 | |
| +					device_get_nameunit(sc->sc_cdev), enccrd->crd_alg);
 | |
| +			err = EINVAL;
 | |
| +			goto errout;
 | |
| +		}
 | |
| +		/*
 | |
| +		 * Setup encrypt/decrypt state.  When using basic ops
 | |
| +		 * we can't use an inline IV because hash/crypt offset
 | |
| +		 * must be from the end of the IV to the start of the
 | |
| +		 * crypt data and this leaves out the preceding header
 | |
| +		 * from the hash calculation.  Instead we place the IV
 | |
| +		 * in the state record and set the hash/crypt offset to
 | |
| +		 * copy both the header+IV.
 | |
| +		 */
 | |
| +		if (enccrd->crd_flags & CRD_F_ENCRYPT) {
 | |
| +			td->hdr |= TALITOS_DIR_OUTBOUND;
 | |
| +			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
 | |
| +				iv = enccrd->crd_iv;
 | |
| +			else
 | |
| +				iv = (caddr_t) ses->ses_iv;
 | |
| +			if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
 | |
| +				crypto_copyback(crp->crp_flags, crp->crp_buf,
 | |
| +				    enccrd->crd_inject, ivsize, iv);
 | |
| +			}
 | |
| +		} else {
 | |
| +			td->hdr |= TALITOS_DIR_INBOUND;
 | |
| +			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
 | |
| +				iv = enccrd->crd_iv;
 | |
| +				bcopy(enccrd->crd_iv, iv, ivsize);
 | |
| +			} else {
 | |
| +				iv = (caddr_t) ses->ses_iv;
 | |
| +				crypto_copydata(crp->crp_flags, crp->crp_buf,
 | |
| +				    enccrd->crd_inject, ivsize, iv);
 | |
| +			}
 | |
| +		}
 | |
| +		td->ptr[cipher_iv].ptr = dma_map_single(NULL, iv, ivsize,
 | |
| +			DMA_TO_DEVICE);
 | |
| +		td->ptr[cipher_iv].len = ivsize;
 | |
| +		/*
 | |
| +		 * we don't need the cipher iv out length/pointer
 | |
| +		 * field to do ESP IPsec. Therefore we set the len field as 0,
 | |
| +		 * which tells the SEC not to do anything with this len/ptr
 | |
| +		 * field. Previously, when length/pointer as pointing to iv,
 | |
| +		 * it gave us corruption of packets.
 | |
| +		 */
 | |
| +		td->ptr[cipher_iv_out].len = 0;
 | |
| +	}
 | |
| +	if (enccrd && maccrd) {
 | |
| +		/* this is ipsec only for now */
 | |
| +		td->hdr |= TALITOS_SEL1_MDEU
 | |
| +			|  TALITOS_MODE1_MDEU_INIT
 | |
| +			|  TALITOS_MODE1_MDEU_PAD;
 | |
| +		switch (maccrd->crd_alg) {
 | |
| +			case	CRYPTO_MD5:
 | |
| +				td->hdr |= TALITOS_MODE1_MDEU_MD5;
 | |
| +				break;
 | |
| +			case	CRYPTO_MD5_HMAC:
 | |
| +				td->hdr |= TALITOS_MODE1_MDEU_MD5_HMAC;
 | |
| +				break;
 | |
| +			case	CRYPTO_SHA1:
 | |
| +				td->hdr |= TALITOS_MODE1_MDEU_SHA1;
 | |
| +				break;
 | |
| +			case	CRYPTO_SHA1_HMAC:
 | |
| +				td->hdr |= TALITOS_MODE1_MDEU_SHA1_HMAC;
 | |
| +				break;
 | |
| +			default:
 | |
| +				/* We cannot order the SEC as requested */
 | |
| +				printk("%s: cannot do the order\n",
 | |
| +						device_get_nameunit(sc->sc_cdev));
 | |
| +				err = EINVAL;
 | |
| +				goto errout;
 | |
| +		}
 | |
| +		if ((maccrd->crd_alg == CRYPTO_MD5_HMAC) ||
 | |
| +		   (maccrd->crd_alg == CRYPTO_SHA1_HMAC)) {
 | |
| +			/*
 | |
| +			 * The offset from hash data to the start of
 | |
| +			 * crypt data is the difference in the skips.
 | |
| +			 */
 | |
| +			/* ipsec only for now */
 | |
| +			td->ptr[hmac_key].ptr = dma_map_single(NULL,
 | |
| +				ses->ses_hmac, ses->ses_hmac_len, DMA_TO_DEVICE);
 | |
| +			td->ptr[hmac_key].len = ses->ses_hmac_len;
 | |
| +			td->ptr[in_fifo].ptr  += enccrd->crd_skip;
 | |
| +			td->ptr[in_fifo].len  =  enccrd->crd_len;
 | |
| +			td->ptr[out_fifo].ptr += enccrd->crd_skip;
 | |
| +			td->ptr[out_fifo].len =  enccrd->crd_len;
 | |
| +			/* bytes of HMAC to postpend to ciphertext */
 | |
| +			td->ptr[out_fifo].extent =  ses->ses_mlen;
 | |
| +			td->ptr[hmac_data].ptr += maccrd->crd_skip;
 | |
| +			td->ptr[hmac_data].len = enccrd->crd_skip - maccrd->crd_skip;
 | |
| +		}
 | |
| +		if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
 | |
| +			printk("%s: CRD_F_KEY_EXPLICIT unimplemented\n",
 | |
| +					device_get_nameunit(sc->sc_cdev));
 | |
| +		}
 | |
| +	}
 | |
| +	if (!enccrd && maccrd) {
 | |
| +		/* single MD5 or SHA */
 | |
| +		td->hdr |= TALITOS_SEL0_MDEU
 | |
| +				|  TALITOS_MODE0_MDEU_INIT
 | |
| +				|  TALITOS_MODE0_MDEU_PAD;
 | |
| +		switch (maccrd->crd_alg) {
 | |
| +			case	CRYPTO_MD5:
 | |
| +				td->hdr |= TALITOS_MODE0_MDEU_MD5;
 | |
| +				DPRINTF("MD5  ses %d ch %d len %d\n",
 | |
| +					(u32)TALITOS_SESSION(crp->crp_sid),
 | |
| +					chsel, td->ptr[in_fifo].len);
 | |
| +				break;
 | |
| +			case	CRYPTO_MD5_HMAC:
 | |
| +				td->hdr |= TALITOS_MODE0_MDEU_MD5_HMAC;
 | |
| +				break;
 | |
| +			case	CRYPTO_SHA1:
 | |
| +				td->hdr |= TALITOS_MODE0_MDEU_SHA1;
 | |
| +				DPRINTF("SHA1 ses %d ch %d len %d\n",
 | |
| +					(u32)TALITOS_SESSION(crp->crp_sid),
 | |
| +					chsel, td->ptr[in_fifo].len);
 | |
| +				break;
 | |
| +			case	CRYPTO_SHA1_HMAC:
 | |
| +				td->hdr |= TALITOS_MODE0_MDEU_SHA1_HMAC;
 | |
| +				break;
 | |
| +			default:
 | |
| +				/* We cannot order the SEC as requested */
 | |
| +				DPRINTF("cannot do the order\n");
 | |
| +				err = EINVAL;
 | |
| +				goto errout;
 | |
| +		}
 | |
| +
 | |
| +		if (crp->crp_flags & CRYPTO_F_IOV)
 | |
| +			td->ptr[out_fifo].ptr += maccrd->crd_inject;
 | |
| +
 | |
| +		if ((maccrd->crd_alg == CRYPTO_MD5_HMAC) ||
 | |
| +		   (maccrd->crd_alg == CRYPTO_SHA1_HMAC)) {
 | |
| +			td->ptr[hmac_key].ptr = dma_map_single(NULL,
 | |
| +				ses->ses_hmac, ses->ses_hmac_len,
 | |
| +				DMA_TO_DEVICE);
 | |
| +			td->ptr[hmac_key].len = ses->ses_hmac_len;
 | |
| +		}
 | |
| +	}
 | |
| +	else {
 | |
| +		/* using process key (session data has duplicate) */
 | |
| +		td->ptr[cipher_key].ptr = dma_map_single(NULL,
 | |
| +			enccrd->crd_key, (enccrd->crd_klen + 7) / 8,
 | |
| +			DMA_TO_DEVICE);
 | |
| +		td->ptr[cipher_key].len = (enccrd->crd_klen + 7) / 8;
 | |
| +	}
 | |
| +	/* descriptor complete - GO! */
 | |
| +	return talitos_submit(sc, td, chsel);
 | |
| +
 | |
| +errout:
 | |
| +	if (err != ERESTART) {
 | |
| +		crp->crp_etype = err;
 | |
| +		crypto_done(crp);
 | |
| +	}
 | |
| +	return err;
 | |
| +}
 | |
| +
 | |
| +/* go through all channels descriptors, notifying OCF what has
 | |
| + * _and_hasn't_ successfully completed and reset the device
 | |
| + * (otherwise it's up to decoding desc hdrs!)
 | |
| + */
 | |
| +static void talitos_errorprocessing(struct talitos_softc *sc)
 | |
| +{
 | |
| +	unsigned long flags;
 | |
| +	int i, j;
 | |
| +
 | |
| +	/* disable further scheduling until under control */
 | |
| +	spin_lock_irqsave(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
 | |
| +
 | |
| +	if (debug) dump_talitos_status(sc);
 | |
| +	/* go through descriptors, try and salvage those successfully done,
 | |
| +	 * and EIO those that weren't
 | |
| +	 */
 | |
| +	for (i = 0; i < sc->sc_num_channels; i++) {
 | |
| +		spin_lock_irqsave(&sc->sc_chnfifolock[i], flags);
 | |
| +		for (j = 0; j < sc->sc_chfifo_len; j++) {
 | |
| +			if (sc->sc_chnfifo[i][j].cf_desc.hdr) {
 | |
| +				if ((sc->sc_chnfifo[i][j].cf_desc.hdr
 | |
| +					& TALITOS_HDR_DONE_BITS)
 | |
| +					!= TALITOS_HDR_DONE_BITS) {
 | |
| +					/* this one didn't finish */
 | |
| +					/* signify in crp->etype */
 | |
| +					sc->sc_chnfifo[i][j].cf_crp->crp_etype
 | |
| +						= EIO;
 | |
| +				}
 | |
| +			} else
 | |
| +				continue; /* free entry */
 | |
| +			/* either way, notify ocf */
 | |
| +			crypto_done(sc->sc_chnfifo[i][j].cf_crp);
 | |
| +			/* and tag it available again
 | |
| +			 *
 | |
| +			 * memset to ensure correct descriptor formation by
 | |
| +			 * avoiding inadvertently setting "optional" entries
 | |
| +			 * e.g. not using "optional" dptr2 MD/HMAC processing
 | |
| +			 */
 | |
| +			memset(&sc->sc_chnfifo[i][j].cf_desc,
 | |
| +				0, sizeof(struct talitos_desc));
 | |
| +		}
 | |
| +		spin_unlock_irqrestore(&sc->sc_chnfifolock[i], flags);
 | |
| +	}
 | |
| +	/* reset and initialize the SEC h/w device */
 | |
| +	talitos_reset_device(sc);
 | |
| +	talitos_init_device(sc);
 | |
| +#ifdef CONFIG_OCF_RANDOMHARVEST
 | |
| +	if (sc->sc_exec_units & TALITOS_HAS_EU_RNG)
 | |
| +		talitos_rng_init(sc);
 | |
| +#endif
 | |
| +
 | |
| +	/* Okay. Stand by. */
 | |
| +	spin_unlock_irqrestore(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
 | |
| +
 | |
| +	return;
 | |
| +}
 | |
| +
 | |
| +/* go through all channels descriptors, notifying OCF what's been done */
 | |
| +static void talitos_doneprocessing(struct talitos_softc *sc)
 | |
| +{
 | |
| +	unsigned long flags;
 | |
| +	int i, j;
 | |
| +
 | |
| +	/* go through descriptors looking for done bits */
 | |
| +	for (i = 0; i < sc->sc_num_channels; i++) {
 | |
| +		spin_lock_irqsave(&sc->sc_chnfifolock[i], flags);
 | |
| +		for (j = 0; j < sc->sc_chfifo_len; j++) {
 | |
| +			/* descriptor has done bits set? */
 | |
| +			if ((sc->sc_chnfifo[i][j].cf_desc.hdr
 | |
| +				& TALITOS_HDR_DONE_BITS)
 | |
| +				== TALITOS_HDR_DONE_BITS) {
 | |
| +				/* notify ocf */
 | |
| +				crypto_done(sc->sc_chnfifo[i][j].cf_crp);
 | |
| +				/* and tag it available again
 | |
| +				 *
 | |
| +				 * memset to ensure correct descriptor formation by
 | |
| +				 * avoiding inadvertently setting "optional" entries
 | |
| +				 * e.g. not using "optional" dptr2 MD/HMAC processing
 | |
| +				 */
 | |
| +				memset(&sc->sc_chnfifo[i][j].cf_desc,
 | |
| +					0, sizeof(struct talitos_desc));
 | |
| +			}
 | |
| +		}
 | |
| +		spin_unlock_irqrestore(&sc->sc_chnfifolock[i], flags);
 | |
| +	}
 | |
| +	return;
 | |
| +}
 | |
| +
 | |
| +static irqreturn_t
 | |
| +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
 | |
| +talitos_intr(int irq, void *arg)
 | |
| +#else
 | |
| +talitos_intr(int irq, void *arg, struct pt_regs *regs)
 | |
| +#endif
 | |
| +{
 | |
| +	struct talitos_softc *sc = arg;
 | |
| +	u_int32_t v, v_hi;
 | |
| +
 | |
| +	/* ack */
 | |
| +	v = talitos_read(sc->sc_base_addr + TALITOS_ISR);
 | |
| +	v_hi = talitos_read(sc->sc_base_addr + TALITOS_ISR_HI);
 | |
| +	talitos_write(sc->sc_base_addr + TALITOS_ICR, v);
 | |
| +	talitos_write(sc->sc_base_addr + TALITOS_ICR_HI, v_hi);
 | |
| +
 | |
| +	if (unlikely(v & TALITOS_ISR_ERROR)) {
 | |
| +		/* Okay, Houston, we've had a problem here. */
 | |
| +		printk(KERN_DEBUG "%s: got error interrupt - ISR 0x%08x_%08x\n",
 | |
| +				device_get_nameunit(sc->sc_cdev), v, v_hi);
 | |
| +		talitos_errorprocessing(sc);
 | |
| +	} else
 | |
| +	if (likely(v & TALITOS_ISR_DONE)) {
 | |
| +		talitos_doneprocessing(sc);
 | |
| +	}
 | |
| +	return IRQ_HANDLED;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Initialize registers we need to touch only once.
 | |
| + */
 | |
| +static void
 | |
| +talitos_init_device(struct talitos_softc *sc)
 | |
| +{
 | |
| +	u_int32_t v;
 | |
| +	int i;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	/* init all channels */
 | |
| +	for (i = 0; i < sc->sc_num_channels; i++) {
 | |
| +		v = talitos_read(sc->sc_base_addr +
 | |
| +			i*TALITOS_CH_OFFSET + TALITOS_CH_CCCR_HI);
 | |
| +		v |= TALITOS_CH_CCCR_HI_CDWE
 | |
| +		  |  TALITOS_CH_CCCR_HI_CDIE;  /* invoke interrupt if done */
 | |
| +		talitos_write(sc->sc_base_addr +
 | |
| +			i*TALITOS_CH_OFFSET + TALITOS_CH_CCCR_HI, v);
 | |
| +	}
 | |
| +	/* enable all interrupts */
 | |
| +	v = talitos_read(sc->sc_base_addr + TALITOS_IMR);
 | |
| +	v |= TALITOS_IMR_ALL;
 | |
| +	talitos_write(sc->sc_base_addr + TALITOS_IMR, v);
 | |
| +	v = talitos_read(sc->sc_base_addr + TALITOS_IMR_HI);
 | |
| +	v |= TALITOS_IMR_HI_ERRONLY;
 | |
| +	talitos_write(sc->sc_base_addr + TALITOS_IMR_HI, v);
 | |
| +	return;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * set the master reset bit on the device.
 | |
| + */
 | |
| +static void
 | |
| +talitos_reset_device_master(struct talitos_softc *sc)
 | |
| +{
 | |
| +	u_int32_t v;
 | |
| +
 | |
| +	/* Reset the device by writing 1 to MCR:SWR and waiting 'til cleared */
 | |
| +	v = talitos_read(sc->sc_base_addr + TALITOS_MCR);
 | |
| +	talitos_write(sc->sc_base_addr + TALITOS_MCR, v | TALITOS_MCR_SWR);
 | |
| +
 | |
| +	while (talitos_read(sc->sc_base_addr + TALITOS_MCR) & TALITOS_MCR_SWR)
 | |
| +		cpu_relax();
 | |
| +
 | |
| +	return;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Resets the device.  Values in the registers are left as is
 | |
| + * from the reset (i.e. initial values are assigned elsewhere).
 | |
| + */
 | |
| +static void
 | |
| +talitos_reset_device(struct talitos_softc *sc)
 | |
| +{
 | |
| +	u_int32_t v;
 | |
| +	int i;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	/*
 | |
| +	 * Master reset
 | |
| +	 * errata documentation: warning: certain SEC interrupts
 | |
| +	 * are not fully cleared by writing the MCR:SWR bit,
 | |
| +	 * set bit twice to completely reset
 | |
| +	 */
 | |
| +	talitos_reset_device_master(sc);	/* once */
 | |
| +	talitos_reset_device_master(sc);	/* and once again */
 | |
| +
 | |
| +	/* reset all channels */
 | |
| +	for (i = 0; i < sc->sc_num_channels; i++) {
 | |
| +		v = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
 | |
| +			TALITOS_CH_CCCR);
 | |
| +		talitos_write(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
 | |
| +			TALITOS_CH_CCCR, v | TALITOS_CH_CCCR_RESET);
 | |
| +	}
 | |
| +}
 | |
| +
 | |
| +/* Set up the crypto device structure, private data,
 | |
| + * and anything else we need before we start */
 | |
| +#ifdef CONFIG_PPC_MERGE
 | |
| +static int talitos_probe(struct of_device *ofdev, const struct of_device_id *match)
 | |
| +#else
 | |
| +static int talitos_probe(struct platform_device *pdev)
 | |
| +#endif
 | |
| +{
 | |
| +	struct talitos_softc *sc = NULL;
 | |
| +	struct resource *r;
 | |
| +#ifdef CONFIG_PPC_MERGE
 | |
| +	struct device *device = &ofdev->dev;
 | |
| +	struct device_node *np = ofdev->node;
 | |
| +	const unsigned int *prop;
 | |
| +	int err;
 | |
| +	struct resource res;
 | |
| +#endif
 | |
| +	static int num_chips = 0;
 | |
| +	int rc;
 | |
| +	int i;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	sc = (struct talitos_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
 | |
| +	if (!sc)
 | |
| +		return -ENOMEM;
 | |
| +	memset(sc, 0, sizeof(*sc));
 | |
| +
 | |
| +	softc_device_init(sc, DRV_NAME, num_chips, talitos_methods);
 | |
| +
 | |
| +	sc->sc_irq = -1;
 | |
| +	sc->sc_cid = -1;
 | |
| +#ifndef CONFIG_PPC_MERGE
 | |
| +	sc->sc_dev = pdev;
 | |
| +#endif
 | |
| +	sc->sc_num = num_chips++;
 | |
| +
 | |
| +#ifdef CONFIG_PPC_MERGE
 | |
| +	dev_set_drvdata(device, sc);
 | |
| +#else
 | |
| +	platform_set_drvdata(sc->sc_dev, sc);
 | |
| +#endif
 | |
| +
 | |
| +	/* get the irq line */
 | |
| +#ifdef CONFIG_PPC_MERGE
 | |
| +	err = of_address_to_resource(np, 0, &res);
 | |
| +	if (err)
 | |
| +		return -EINVAL;
 | |
| +	r = &res;
 | |
| +
 | |
| +	sc->sc_irq = irq_of_parse_and_map(np, 0);
 | |
| +#else
 | |
| +	/* get a pointer to the register memory */
 | |
| +	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 | |
| +
 | |
| +	sc->sc_irq = platform_get_irq(pdev, 0);
 | |
| +#endif
 | |
| +	rc = request_irq(sc->sc_irq, talitos_intr, 0,
 | |
| +			device_get_nameunit(sc->sc_cdev), sc);
 | |
| +	if (rc) {
 | |
| +		printk(KERN_ERR "%s: failed to hook irq %d\n",
 | |
| +				device_get_nameunit(sc->sc_cdev), sc->sc_irq);
 | |
| +		sc->sc_irq = -1;
 | |
| +		goto out;
 | |
| +	}
 | |
| +
 | |
| +	sc->sc_base_addr = (ocf_iomem_t) ioremap(r->start, (r->end - r->start));
 | |
| +	if (!sc->sc_base_addr) {
 | |
| +		printk(KERN_ERR "%s: failed to ioremap\n",
 | |
| +				device_get_nameunit(sc->sc_cdev));
 | |
| +		goto out;
 | |
| +	}
 | |
| +
 | |
| +	/* figure out our SEC's properties and capabilities */
 | |
| +	sc->sc_chiprev = (u64)talitos_read(sc->sc_base_addr + TALITOS_ID) << 32
 | |
| +		 | talitos_read(sc->sc_base_addr + TALITOS_ID_HI);
 | |
| +	DPRINTF("sec id 0x%llx\n", sc->sc_chiprev);
 | |
| +
 | |
| +#ifdef CONFIG_PPC_MERGE
 | |
| +	/* get SEC properties from device tree, defaulting to SEC 2.0 */
 | |
| +
 | |
| +	prop = of_get_property(np, "num-channels", NULL);
 | |
| +	sc->sc_num_channels = prop ? *prop : TALITOS_NCHANNELS_SEC_2_0;
 | |
| +
 | |
| +	prop = of_get_property(np, "channel-fifo-len", NULL);
 | |
| +	sc->sc_chfifo_len = prop ? *prop : TALITOS_CHFIFOLEN_SEC_2_0;
 | |
| +
 | |
| +	prop = of_get_property(np, "exec-units-mask", NULL);
 | |
| +	sc->sc_exec_units = prop ? *prop : TALITOS_HAS_EUS_SEC_2_0;
 | |
| +
 | |
| +	prop = of_get_property(np, "descriptor-types-mask", NULL);
 | |
| +	sc->sc_desc_types = prop ? *prop : TALITOS_HAS_DESCTYPES_SEC_2_0;
 | |
| +#else
 | |
| +	/* bulk should go away with openfirmware flat device tree support */
 | |
| +	if (sc->sc_chiprev & TALITOS_ID_SEC_2_0) {
 | |
| +		sc->sc_num_channels = TALITOS_NCHANNELS_SEC_2_0;
 | |
| +		sc->sc_chfifo_len = TALITOS_CHFIFOLEN_SEC_2_0;
 | |
| +		sc->sc_exec_units = TALITOS_HAS_EUS_SEC_2_0;
 | |
| +		sc->sc_desc_types = TALITOS_HAS_DESCTYPES_SEC_2_0;
 | |
| +	} else {
 | |
| +		printk(KERN_ERR "%s: failed to id device\n",
 | |
| +				device_get_nameunit(sc->sc_cdev));
 | |
| +		goto out;
 | |
| +	}
 | |
| +#endif
 | |
| +
 | |
| +	/* + 1 is for the meta-channel lock used by the channel scheduler */
 | |
| +	sc->sc_chnfifolock = (spinlock_t *) kmalloc(
 | |
| +		(sc->sc_num_channels + 1) * sizeof(spinlock_t), GFP_KERNEL);
 | |
| +	if (!sc->sc_chnfifolock)
 | |
| +		goto out;
 | |
| +	for (i = 0; i < sc->sc_num_channels + 1; i++) {
 | |
| +		spin_lock_init(&sc->sc_chnfifolock[i]);
 | |
| +	}
 | |
| +
 | |
| +	sc->sc_chnlastalg = (int *) kmalloc(
 | |
| +		sc->sc_num_channels * sizeof(int), GFP_KERNEL);
 | |
| +	if (!sc->sc_chnlastalg)
 | |
| +		goto out;
 | |
| +	memset(sc->sc_chnlastalg, 0, sc->sc_num_channels * sizeof(int));
 | |
| +
 | |
| +	sc->sc_chnfifo = (struct desc_cryptop_pair **) kmalloc(
 | |
| +		sc->sc_num_channels * sizeof(struct desc_cryptop_pair *),
 | |
| +		GFP_KERNEL);
 | |
| +	if (!sc->sc_chnfifo)
 | |
| +		goto out;
 | |
| +	for (i = 0; i < sc->sc_num_channels; i++) {
 | |
| +		sc->sc_chnfifo[i] = (struct desc_cryptop_pair *) kmalloc(
 | |
| +			sc->sc_chfifo_len * sizeof(struct desc_cryptop_pair),
 | |
| +			GFP_KERNEL);
 | |
| +		if (!sc->sc_chnfifo[i])
 | |
| +			goto out;
 | |
| +		memset(sc->sc_chnfifo[i], 0,
 | |
| +			sc->sc_chfifo_len * sizeof(struct desc_cryptop_pair));
 | |
| +	}
 | |
| +
 | |
| +	/* reset and initialize the SEC h/w device */
 | |
| +	talitos_reset_device(sc);
 | |
| +	talitos_init_device(sc);
 | |
| +
 | |
| +	sc->sc_cid = crypto_get_driverid(softc_get_device(sc),CRYPTOCAP_F_HARDWARE);
 | |
| +	if (sc->sc_cid < 0) {
 | |
| +		printk(KERN_ERR "%s: could not get crypto driver id\n",
 | |
| +				device_get_nameunit(sc->sc_cdev));
 | |
| +		goto out;
 | |
| +	}
 | |
| +
 | |
| +	/* register algorithms with the framework */
 | |
| +	printk("%s:", device_get_nameunit(sc->sc_cdev));
 | |
| +
 | |
| +	if (sc->sc_exec_units & TALITOS_HAS_EU_RNG)  {
 | |
| +		printk(" rng");
 | |
| +#ifdef CONFIG_OCF_RANDOMHARVEST
 | |
| +		talitos_rng_init(sc);
 | |
| +		crypto_rregister(sc->sc_cid, talitos_read_random, sc);
 | |
| +#endif
 | |
| +	}
 | |
| +	if (sc->sc_exec_units & TALITOS_HAS_EU_DEU) {
 | |
| +		printk(" des/3des");
 | |
| +		crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
 | |
| +		crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
 | |
| +	}
 | |
| +	if (sc->sc_exec_units & TALITOS_HAS_EU_AESU) {
 | |
| +		printk(" aes");
 | |
| +		crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
 | |
| +	}
 | |
| +	if (sc->sc_exec_units & TALITOS_HAS_EU_MDEU) {
 | |
| +		printk(" md5");
 | |
| +		crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
 | |
| +		/* HMAC support only with IPsec for now */
 | |
| +		crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
 | |
| +		printk(" sha1");
 | |
| +		crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
 | |
| +		/* HMAC support only with IPsec for now */
 | |
| +		crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
 | |
| +	}
 | |
| +	printk("\n");
 | |
| +	return 0;
 | |
| +
 | |
| +out:
 | |
| +#ifndef CONFIG_PPC_MERGE
 | |
| +	talitos_remove(pdev);
 | |
| +#endif
 | |
| +	return -ENOMEM;
 | |
| +}
 | |
| +
 | |
| +#ifdef CONFIG_PPC_MERGE
 | |
| +static int talitos_remove(struct of_device *ofdev)
 | |
| +#else
 | |
| +static int talitos_remove(struct platform_device *pdev)
 | |
| +#endif
 | |
| +{
 | |
| +#ifdef CONFIG_PPC_MERGE
 | |
| +	struct talitos_softc *sc = dev_get_drvdata(&ofdev->dev);
 | |
| +#else
 | |
| +	struct talitos_softc *sc = platform_get_drvdata(pdev);
 | |
| +#endif
 | |
| +	int i;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +	if (sc->sc_cid >= 0)
 | |
| +		crypto_unregister_all(sc->sc_cid);
 | |
| +	if (sc->sc_chnfifo) {
 | |
| +		for (i = 0; i < sc->sc_num_channels; i++)
 | |
| +			if (sc->sc_chnfifo[i])
 | |
| +				kfree(sc->sc_chnfifo[i]);
 | |
| +		kfree(sc->sc_chnfifo);
 | |
| +	}
 | |
| +	if (sc->sc_chnlastalg)
 | |
| +		kfree(sc->sc_chnlastalg);
 | |
| +	if (sc->sc_chnfifolock)
 | |
| +		kfree(sc->sc_chnfifolock);
 | |
| +	if (sc->sc_irq != -1)
 | |
| +		free_irq(sc->sc_irq, sc);
 | |
| +	if (sc->sc_base_addr)
 | |
| +		iounmap((void *) sc->sc_base_addr);
 | |
| +	kfree(sc);
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +#ifdef CONFIG_PPC_MERGE
 | |
| +static struct of_device_id talitos_match[] = {
 | |
| +	{
 | |
| +		.type = "crypto",
 | |
| +		.compatible = "talitos",
 | |
| +	},
 | |
| +	{},
 | |
| +};
 | |
| +
 | |
| +MODULE_DEVICE_TABLE(of, talitos_match);
 | |
| +
 | |
| +static struct of_platform_driver talitos_driver = {
 | |
| +	.name		= DRV_NAME,
 | |
| +	.match_table	= talitos_match,
 | |
| +	.probe		= talitos_probe,
 | |
| +	.remove		= talitos_remove,
 | |
| +};
 | |
| +
 | |
| +static int __init talitos_init(void)
 | |
| +{
 | |
| +	return of_register_platform_driver(&talitos_driver);
 | |
| +}
 | |
| +
 | |
| +static void __exit talitos_exit(void)
 | |
| +{
 | |
| +	of_unregister_platform_driver(&talitos_driver);
 | |
| +}
 | |
| +#else
 | |
| +/* Structure for a platform device driver */
 | |
| +static struct platform_driver talitos_driver = {
 | |
| +	.probe = talitos_probe,
 | |
| +	.remove = talitos_remove,
 | |
| +	.driver = {
 | |
| +		.name = "fsl-sec2",
 | |
| +	}
 | |
| +};
 | |
| +
 | |
| +static int __init talitos_init(void)
 | |
| +{
 | |
| +	return platform_driver_register(&talitos_driver);
 | |
| +}
 | |
| +
 | |
| +static void __exit talitos_exit(void)
 | |
| +{
 | |
| +	platform_driver_unregister(&talitos_driver);
 | |
| +}
 | |
| +#endif
 | |
| +
 | |
| +module_init(talitos_init);
 | |
| +module_exit(talitos_exit);
 | |
| +
 | |
| +MODULE_LICENSE("Dual BSD/GPL");
 | |
| +MODULE_AUTHOR("kim.phillips@freescale.com");
 | |
| +MODULE_DESCRIPTION("OCF driver for Freescale SEC (talitos)");
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/talitos/talitos_soft.h
 | |
| @@ -0,0 +1,77 @@
 | |
| +/*
 | |
| + * Freescale SEC data structures for integration with ocf-linux
 | |
| + *
 | |
| + * Copyright (c) 2006 Freescale Semiconductor, Inc.
 | |
| + *
 | |
| + * Redistribution and use in source and binary forms, with or without
 | |
| + * modification, are permitted provided that the following conditions
 | |
| + * are met:
 | |
| + *
 | |
| + * 1. Redistributions of source code must retain the above copyright
 | |
| + *    notice, this list of conditions and the following disclaimer.
 | |
| + * 2. Redistributions in binary form must reproduce the above copyright
 | |
| + *    notice, this list of conditions and the following disclaimer in the
 | |
| + *    documentation and/or other materials provided with the distribution.
 | |
| + * 3. The name of the author may not be used to endorse or promote products
 | |
| + *    derived from this software without specific prior written permission.
 | |
| + *
 | |
| + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
 | |
| + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
 | |
| + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
 | |
| + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
 | |
| + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 | |
| + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 | |
| + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 | |
| + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 | |
| + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 | |
| + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 | |
| + */
 | |
| +
 | |
| +/*
 | |
| + * paired descriptor and associated crypto operation
 | |
| + */
 | |
| +struct desc_cryptop_pair {
 | |
| +	struct talitos_desc	cf_desc;	/* descriptor ptr */
 | |
| +	struct cryptop		*cf_crp;	/* cryptop ptr */
 | |
| +};
 | |
| +
 | |
| +/*
 | |
| + * Holds data specific to a single talitos device.
 | |
| + */
 | |
| +struct talitos_softc {
 | |
| +	softc_device_decl	sc_cdev;
 | |
| +	struct platform_device	*sc_dev;	/* device backpointer */
 | |
| +	ocf_iomem_t		sc_base_addr;
 | |
| +	int			sc_irq;
 | |
| +	int			sc_num;		/* if we have multiple chips */
 | |
| +	int32_t			sc_cid;		/* crypto tag */
 | |
| +	u64			sc_chiprev;	/* major/minor chip revision */
 | |
| +	int			sc_nsessions;
 | |
| +	struct talitos_session	*sc_sessions;
 | |
| +	int			sc_num_channels;/* number of crypto channels */
 | |
| +	int			sc_chfifo_len;	/* channel fetch fifo len */
 | |
| +	int			sc_exec_units;	/* execution units mask */
 | |
| +	int			sc_desc_types;	/* descriptor types mask */
 | |
| +	/*
 | |
| +	 * mutual exclusion for intra-channel resources, e.g. fetch fifos
 | |
| +	 * the last entry is a meta-channel lock used by the channel scheduler
 | |
| +	 */
 | |
| +	spinlock_t		*sc_chnfifolock;
 | |
| +	/* sc_chnlastalgo contains last algorithm for that channel */
 | |
| +	int			*sc_chnlastalg;
 | |
| +	/* sc_chnfifo holds pending descriptor--crypto operation pairs */
 | |
| +	struct desc_cryptop_pair	**sc_chnfifo;
 | |
| +};
 | |
| +
 | |
| +struct talitos_session {
 | |
| +	u_int32_t	ses_used;
 | |
| +	u_int32_t	ses_klen;		/* key length in bits */
 | |
| +	u_int32_t	ses_key[8];		/* DES/3DES/AES key */
 | |
| +	u_int32_t	ses_hmac[5];		/* hmac inner state */
 | |
| +	u_int32_t	ses_hmac_len;		/* hmac length */
 | |
| +	u_int32_t	ses_iv[4];		/* DES/3DES/AES iv */
 | |
| +	u_int32_t	ses_mlen;		/* desired hash result len (12=ipsec or 16) */
 | |
| +};
 | |
| +
 | |
| +#define	TALITOS_SESSION(sid)	((sid) & 0x0fffffff)
 | |
| +#define	TALITOS_SID(crd, sesn)	(((crd) << 28) | ((sesn) & 0x0fffffff))
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/talitos/talitos_dev.h
 | |
| @@ -0,0 +1,277 @@
 | |
| +/*
 | |
| + * Freescale SEC (talitos) device dependent data structures
 | |
| + *
 | |
| + * Copyright (c) 2006 Freescale Semiconductor, Inc.
 | |
| + *
 | |
| + * Redistribution and use in source and binary forms, with or without
 | |
| + * modification, are permitted provided that the following conditions
 | |
| + * are met:
 | |
| + *
 | |
| + * 1. Redistributions of source code must retain the above copyright
 | |
| + *    notice, this list of conditions and the following disclaimer.
 | |
| + * 2. Redistributions in binary form must reproduce the above copyright
 | |
| + *    notice, this list of conditions and the following disclaimer in the
 | |
| + *    documentation and/or other materials provided with the distribution.
 | |
| + * 3. The name of the author may not be used to endorse or promote products
 | |
| + *    derived from this software without specific prior written permission.
 | |
| + *
 | |
| + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
 | |
| + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
 | |
| + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
 | |
| + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
 | |
| + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 | |
| + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 | |
| + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 | |
| + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 | |
| + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 | |
| + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 | |
| + *
 | |
| + */
 | |
| +
 | |
| +/* device ID register values */
 | |
| +#define TALITOS_ID_SEC_2_0	0x40
 | |
| +#define TALITOS_ID_SEC_2_1	0x40 /* cross ref with IP block revision reg */
 | |
| +
 | |
| +/*
 | |
| + * following num_channels, channel-fifo-depth, exec-unit-mask, and
 | |
| + * descriptor-types-mask are for forward-compatibility with openfirmware
 | |
| + * flat device trees
 | |
| + */
 | |
| +
 | |
| +/*
 | |
| + *  num_channels : the number of channels available in each SEC version.
 | |
| + */
 | |
| +
 | |
| +/* n.b. this driver requires these values be a power of 2 */
 | |
| +#define TALITOS_NCHANNELS_SEC_1_0	4
 | |
| +#define TALITOS_NCHANNELS_SEC_1_2	1
 | |
| +#define TALITOS_NCHANNELS_SEC_2_0	4
 | |
| +#define TALITOS_NCHANNELS_SEC_2_01	4
 | |
| +#define TALITOS_NCHANNELS_SEC_2_1	4
 | |
| +#define TALITOS_NCHANNELS_SEC_2_4	4
 | |
| +
 | |
| +/*
 | |
| + *  channel-fifo-depth : The number of descriptor
 | |
| + *  pointers a channel fetch fifo can hold.
 | |
| + */
 | |
| +#define TALITOS_CHFIFOLEN_SEC_1_0	1
 | |
| +#define TALITOS_CHFIFOLEN_SEC_1_2	1
 | |
| +#define TALITOS_CHFIFOLEN_SEC_2_0	24
 | |
| +#define TALITOS_CHFIFOLEN_SEC_2_01	24
 | |
| +#define TALITOS_CHFIFOLEN_SEC_2_1	24
 | |
| +#define TALITOS_CHFIFOLEN_SEC_2_4	24
 | |
| +
 | |
| +/*
 | |
| + *  exec-unit-mask : The bitmask representing what Execution Units (EUs)
 | |
| + *  are available. EU information should be encoded following the SEC's
 | |
| + *  EU_SEL0 bitfield documentation, i.e. as follows:
 | |
| + *
 | |
| + *    bit 31 = set if SEC permits no-EU selection (should be always set)
 | |
| + *    bit 30 = set if SEC has the ARC4 EU (AFEU)
 | |
| + *    bit 29 = set if SEC has the des/3des EU (DEU)
 | |
| + *    bit 28 = set if SEC has the message digest EU (MDEU)
 | |
| + *    bit 27 = set if SEC has the random number generator EU (RNG)
 | |
| + *    bit 26 = set if SEC has the public key EU (PKEU)
 | |
| + *    bit 25 = set if SEC has the aes EU (AESU)
 | |
| + *    bit 24 = set if SEC has the Kasumi EU (KEU)
 | |
| + *
 | |
| + */
 | |
| +#define TALITOS_HAS_EU_NONE		(1<<0)
 | |
| +#define TALITOS_HAS_EU_AFEU		(1<<1)
 | |
| +#define TALITOS_HAS_EU_DEU		(1<<2)
 | |
| +#define TALITOS_HAS_EU_MDEU		(1<<3)
 | |
| +#define TALITOS_HAS_EU_RNG		(1<<4)
 | |
| +#define TALITOS_HAS_EU_PKEU		(1<<5)
 | |
| +#define TALITOS_HAS_EU_AESU		(1<<6)
 | |
| +#define TALITOS_HAS_EU_KEU		(1<<7)
 | |
| +
 | |
| +/* the corresponding masks for each SEC version */
 | |
| +#define TALITOS_HAS_EUS_SEC_1_0		0x7f
 | |
| +#define TALITOS_HAS_EUS_SEC_1_2		0x4d
 | |
| +#define TALITOS_HAS_EUS_SEC_2_0		0x7f
 | |
| +#define TALITOS_HAS_EUS_SEC_2_01	0x7f
 | |
| +#define TALITOS_HAS_EUS_SEC_2_1		0xff
 | |
| +#define TALITOS_HAS_EUS_SEC_2_4		0x7f
 | |
| +
 | |
| +/*
 | |
| + *  descriptor-types-mask : The bitmask representing what descriptors
 | |
| + *  are available. Descriptor type information should be encoded
 | |
| + *  following the SEC's Descriptor Header Dword DESC_TYPE field
 | |
| + *  documentation, i.e. as follows:
 | |
| + *
 | |
| + *    bit 0  = set if SEC supports the aesu_ctr_nonsnoop desc. type
 | |
| + *    bit 1  = set if SEC supports the ipsec_esp descriptor type
 | |
| + *    bit 2  = set if SEC supports the common_nonsnoop desc. type
 | |
| + *    bit 3  = set if SEC supports the 802.11i AES ccmp desc. type
 | |
| + *    bit 4  = set if SEC supports the hmac_snoop_no_afeu desc. type
 | |
| + *    bit 5  = set if SEC supports the srtp descriptor type
 | |
| + *    bit 6  = set if SEC supports the non_hmac_snoop_no_afeu desc.type
 | |
| + *    bit 7  = set if SEC supports the pkeu_assemble descriptor type
 | |
| + *    bit 8  = set if SEC supports the aesu_key_expand_output desc.type
 | |
| + *    bit 9  = set if SEC supports the pkeu_ptmul descriptor type
 | |
| + *    bit 10 = set if SEC supports the common_nonsnoop_afeu desc. type
 | |
| + *    bit 11 = set if SEC supports the pkeu_ptadd_dbl descriptor type
 | |
| + *
 | |
| + *  ..and so on and so forth.
 | |
| + */
 | |
| +#define TALITOS_HAS_DT_AESU_CTR_NONSNOOP	(1<<0)
 | |
| +#define TALITOS_HAS_DT_IPSEC_ESP		(1<<1)
 | |
| +#define TALITOS_HAS_DT_COMMON_NONSNOOP		(1<<2)
 | |
| +
 | |
| +/* the corresponding masks for each SEC version */
 | |
| +#define TALITOS_HAS_DESCTYPES_SEC_2_0	0x01010ebf
 | |
| +#define TALITOS_HAS_DESCTYPES_SEC_2_1	0x012b0ebf
 | |
| +
 | |
| +/*
 | |
| + * a TALITOS_xxx_HI address points to the low data bits (32-63) of the register
 | |
| + */
 | |
| +
 | |
| +/* global register offset addresses */
 | |
| +#define TALITOS_ID		0x1020
 | |
| +#define TALITOS_ID_HI		0x1024
 | |
| +#define TALITOS_MCR		0x1030		/* master control register */
 | |
| +#define TALITOS_MCR_HI		0x1038		/* master control register */
 | |
| +#define TALITOS_MCR_SWR		0x1
 | |
| +#define TALITOS_IMR		0x1008		/* interrupt mask register */
 | |
| +#define TALITOS_IMR_ALL		0x00010fff	/* enable all interrupts mask */
 | |
| +#define TALITOS_IMR_ERRONLY	0x00010aaa	/* enable error interrupts */
 | |
| +#define TALITOS_IMR_HI		0x100C		/* interrupt mask register */
 | |
| +#define TALITOS_IMR_HI_ALL	0x00323333	/* enable all interrupts mask */
 | |
| +#define TALITOS_IMR_HI_ERRONLY	0x00222222	/* enable error interrupts */
 | |
| +#define TALITOS_ISR		0x1010		/* interrupt status register */
 | |
| +#define TALITOS_ISR_ERROR	0x00010faa	/* errors mask */
 | |
| +#define TALITOS_ISR_DONE	0x00000055	/* channel(s) done mask */
 | |
| +#define TALITOS_ISR_HI		0x1014		/* interrupt status register */
 | |
| +#define TALITOS_ICR		0x1018		/* interrupt clear register */
 | |
| +#define TALITOS_ICR_HI		0x101C		/* interrupt clear register */
 | |
| +
 | |
| +/* channel register address stride */
 | |
| +#define TALITOS_CH_OFFSET	0x100
 | |
| +
 | |
| +/* channel register offset addresses and bits */
 | |
| +#define TALITOS_CH_CCCR		0x1108	/* Crypto-Channel Config Register */
 | |
| +#define TALITOS_CH_CCCR_RESET	0x1	/* Channel Reset bit */
 | |
| +#define TALITOS_CH_CCCR_HI	0x110c	/* Crypto-Channel Config Register */
 | |
| +#define TALITOS_CH_CCCR_HI_CDWE	0x10	/* Channel done writeback enable bit */
 | |
| +#define TALITOS_CH_CCCR_HI_NT	0x4	/* Notification type bit */
 | |
| +#define TALITOS_CH_CCCR_HI_CDIE	0x2	/* Channel Done Interrupt Enable bit */
 | |
| +#define TALITOS_CH_CCPSR	0x1110	/* Crypto-Channel Pointer Status Reg */
 | |
| +#define TALITOS_CH_CCPSR_HI	0x1114	/* Crypto-Channel Pointer Status Reg */
 | |
| +#define TALITOS_CH_FF		0x1148	/* Fetch FIFO */
 | |
| +#define TALITOS_CH_FF_HI	0x114c	/* Fetch FIFO's FETCH_ADRS */
 | |
| +#define TALITOS_CH_CDPR		0x1140	/* Crypto-Channel Pointer Status Reg */
 | |
| +#define TALITOS_CH_CDPR_HI	0x1144	/* Crypto-Channel Pointer Status Reg */
 | |
| +#define TALITOS_CH_DESCBUF	0x1180	/* (thru 11bf) Crypto-Channel
 | |
| +					 * Descriptor Buffer (debug) */
 | |
| +
 | |
| +/* execution unit register offset addresses and bits */
 | |
| +#define TALITOS_DEUSR		0x2028	/* DEU status register */
 | |
| +#define TALITOS_DEUSR_HI	0x202c	/* DEU status register */
 | |
| +#define TALITOS_DEUISR		0x2030	/* DEU interrupt status register */
 | |
| +#define TALITOS_DEUISR_HI	0x2034	/* DEU interrupt status register */
 | |
| +#define TALITOS_DEUICR		0x2038	/* DEU interrupt control register */
 | |
| +#define TALITOS_DEUICR_HI	0x203c	/* DEU interrupt control register */
 | |
| +#define TALITOS_AESUISR		0x4030	/* AESU interrupt status register */
 | |
| +#define TALITOS_AESUISR_HI	0x4034	/* AESU interrupt status register */
 | |
| +#define TALITOS_AESUICR		0x4038	/* AESU interrupt control register */
 | |
| +#define TALITOS_AESUICR_HI	0x403c	/* AESU interrupt control register */
 | |
| +#define TALITOS_MDEUISR		0x6030	/* MDEU interrupt status register */
 | |
| +#define TALITOS_MDEUISR_HI	0x6034	/* MDEU interrupt status register */
 | |
| +#define TALITOS_RNGSR		0xa028	/* RNG status register */
 | |
| +#define TALITOS_RNGSR_HI	0xa02c	/* RNG status register */
 | |
| +#define TALITOS_RNGSR_HI_RD	0x1	/* RNG Reset done */
 | |
| +#define TALITOS_RNGSR_HI_OFL	0xff0000/* number of dwords in RNG output FIFO*/
 | |
| +#define TALITOS_RNGDSR		0xa010	/* RNG data size register */
 | |
| +#define TALITOS_RNGDSR_HI	0xa014	/* RNG data size register */
 | |
| +#define TALITOS_RNG_FIFO	0xa800	/* RNG FIFO - pool of random numbers */
 | |
| +#define TALITOS_RNGISR		0xa030	/* RNG Interrupt status register */
 | |
| +#define TALITOS_RNGISR_HI	0xa034	/* RNG Interrupt status register */
 | |
| +#define TALITOS_RNGRCR		0xa018	/* RNG Reset control register */
 | |
| +#define TALITOS_RNGRCR_HI	0xa01c	/* RNG Reset control register */
 | |
| +#define TALITOS_RNGRCR_HI_SR	0x1	/* RNG RNGRCR:Software Reset */
 | |
| +
 | |
| +/* descriptor pointer entry */
 | |
| +struct talitos_desc_ptr {
 | |
| +	u16	len;		/* length */
 | |
| +	u8	extent;		/* jump (to s/g link table) and extent */
 | |
| +	u8	res;		/* reserved */
 | |
| +	u32	ptr;		/* pointer */
 | |
| +};
 | |
| +
 | |
| +/* descriptor */
 | |
| +struct talitos_desc {
 | |
| +	u32	hdr;				/* header */
 | |
| +	u32	res;				/* reserved */
 | |
| +	struct talitos_desc_ptr		ptr[7];	/* ptr/len pair array */
 | |
| +};
 | |
| +
 | |
| +/* talitos descriptor header (hdr) bits */
 | |
| +
 | |
| +/* primary execution unit select */
 | |
| +#define	TALITOS_SEL0_AFEU	0x10000000
 | |
| +#define	TALITOS_SEL0_DEU	0x20000000
 | |
| +#define	TALITOS_SEL0_MDEU	0x30000000
 | |
| +#define	TALITOS_SEL0_RNG	0x40000000
 | |
| +#define	TALITOS_SEL0_PKEU	0x50000000
 | |
| +#define	TALITOS_SEL0_AESU	0x60000000
 | |
| +
 | |
| +/* primary execution unit mode (MODE0) and derivatives */
 | |
| +#define	TALITOS_MODE0_AESU_CBC		0x00200000
 | |
| +#define	TALITOS_MODE0_AESU_ENC		0x00100000
 | |
| +#define	TALITOS_MODE0_DEU_CBC		0x00400000
 | |
| +#define	TALITOS_MODE0_DEU_3DES		0x00200000
 | |
| +#define	TALITOS_MODE0_DEU_ENC		0x00100000
 | |
| +#define	TALITOS_MODE0_MDEU_INIT		0x01000000	/* init starting regs */
 | |
| +#define	TALITOS_MODE0_MDEU_HMAC		0x00800000
 | |
| +#define	TALITOS_MODE0_MDEU_PAD		0x00400000	/* PD */
 | |
| +#define	TALITOS_MODE0_MDEU_MD5		0x00200000
 | |
| +#define	TALITOS_MODE0_MDEU_SHA256	0x00100000
 | |
| +#define	TALITOS_MODE0_MDEU_SHA1		0x00000000	/* SHA-160 */
 | |
| +#define	TALITOS_MODE0_MDEU_MD5_HMAC	\
 | |
| +		(TALITOS_MODE0_MDEU_MD5 | TALITOS_MODE0_MDEU_HMAC)
 | |
| +#define	TALITOS_MODE0_MDEU_SHA256_HMAC	\
 | |
| +		(TALITOS_MODE0_MDEU_SHA256 | TALITOS_MODE0_MDEU_HMAC)
 | |
| +#define	TALITOS_MODE0_MDEU_SHA1_HMAC	\
 | |
| +		(TALITOS_MODE0_MDEU_SHA1 | TALITOS_MODE0_MDEU_HMAC)
 | |
| +
 | |
| +/* secondary execution unit select (SEL1) */
 | |
| +/* it's MDEU or nothing */
 | |
| +#define	TALITOS_SEL1_MDEU	0x00030000
 | |
| +
 | |
| +/* secondary execution unit mode (MODE1) and derivatives */
 | |
| +#define	TALITOS_MODE1_MDEU_INIT		0x00001000	/* init starting regs */
 | |
| +#define	TALITOS_MODE1_MDEU_HMAC		0x00000800
 | |
| +#define	TALITOS_MODE1_MDEU_PAD		0x00000400	/* PD */
 | |
| +#define	TALITOS_MODE1_MDEU_MD5		0x00000200
 | |
| +#define	TALITOS_MODE1_MDEU_SHA256	0x00000100
 | |
| +#define	TALITOS_MODE1_MDEU_SHA1		0x00000000	/* SHA-160 */
 | |
| +#define	TALITOS_MODE1_MDEU_MD5_HMAC	\
 | |
| +	(TALITOS_MODE1_MDEU_MD5 | TALITOS_MODE1_MDEU_HMAC)
 | |
| +#define	TALITOS_MODE1_MDEU_SHA256_HMAC	\
 | |
| +	(TALITOS_MODE1_MDEU_SHA256 | TALITOS_MODE1_MDEU_HMAC)
 | |
| +#define	TALITOS_MODE1_MDEU_SHA1_HMAC	\
 | |
| +	(TALITOS_MODE1_MDEU_SHA1 | TALITOS_MODE1_MDEU_HMAC)
 | |
| +
 | |
| +/* direction of overall data flow (DIR) */
 | |
| +#define	TALITOS_DIR_OUTBOUND	0x00000000
 | |
| +#define	TALITOS_DIR_INBOUND	0x00000002
 | |
| +
 | |
| +/* done notification (DN) */
 | |
| +#define	TALITOS_DONE_NOTIFY	0x00000001
 | |
| +
 | |
| +/* descriptor types */
 | |
| +/* odd numbers here are valid on SEC2 and greater only (e.g. ipsec_esp) */
 | |
| +#define TD_TYPE_AESU_CTR_NONSNOOP	(0 << 3)
 | |
| +#define TD_TYPE_IPSEC_ESP		(1 << 3)
 | |
| +#define TD_TYPE_COMMON_NONSNOOP_NO_AFEU	(2 << 3)
 | |
| +#define TD_TYPE_HMAC_SNOOP_NO_AFEU	(4 << 3)
 | |
| +
 | |
| +#define TALITOS_HDR_DONE_BITS	0xff000000
 | |
| +
 | |
| +#define	DPRINTF(a...)	do { \
 | |
| +						if (debug) { \
 | |
| +							printk("%s: ", sc ? \
 | |
| +								device_get_nameunit(sc->sc_cdev) : "talitos"); \
 | |
| +							printk(a); \
 | |
| +						} \
 | |
| +					} while (0)
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/random.c
 | |
| @@ -0,0 +1,317 @@
 | |
| +/*
 | |
| + * A system independant way of adding entropy to the kernels pool
 | |
| + * this way the drivers can focus on the real work and we can take
 | |
| + * care of pushing it to the appropriate place in the kernel.
 | |
| + *
 | |
| + * This should be fast and callable from timers/interrupts
 | |
| + *
 | |
| + * Written by David McCullough <david_mccullough@securecomputing.com>
 | |
| + * Copyright (C) 2006-2007 David McCullough
 | |
| + * Copyright (C) 2004-2005 Intel Corporation.
 | |
| + *
 | |
| + * LICENSE TERMS
 | |
| + *
 | |
| + * The free distribution and use of this software in both source and binary
 | |
| + * form is allowed (with or without changes) provided that:
 | |
| + *
 | |
| + *   1. distributions of this source code include the above copyright
 | |
| + *      notice, this list of conditions and the following disclaimer;
 | |
| + *
 | |
| + *   2. distributions in binary form include the above copyright
 | |
| + *      notice, this list of conditions and the following disclaimer
 | |
| + *      in the documentation and/or other associated materials;
 | |
| + *
 | |
| + *   3. the copyright holder's name is not used to endorse products
 | |
| + *      built using this software without specific written permission.
 | |
| + *
 | |
| + * ALTERNATIVELY, provided that this notice is retained in full, this product
 | |
| + * may be distributed under the terms of the GNU General Public License (GPL),
 | |
| + * in which case the provisions of the GPL apply INSTEAD OF those given above.
 | |
| + *
 | |
| + * DISCLAIMER
 | |
| + *
 | |
| + * This software is provided 'as is' with no explicit or implied warranties
 | |
| + * in respect of its properties, including, but not limited to, correctness
 | |
| + * and/or fitness for purpose.
 | |
| + */
 | |
| +
 | |
| +#ifndef AUTOCONF_INCLUDED
 | |
| +#include <linux/config.h>
 | |
| +#endif
 | |
| +#include <linux/module.h>
 | |
| +#include <linux/init.h>
 | |
| +#include <linux/list.h>
 | |
| +#include <linux/slab.h>
 | |
| +#include <linux/wait.h>
 | |
| +#include <linux/sched.h>
 | |
| +#include <linux/spinlock.h>
 | |
| +#include <linux/version.h>
 | |
| +#include <linux/unistd.h>
 | |
| +#include <linux/poll.h>
 | |
| +#include <linux/random.h>
 | |
| +#include <cryptodev.h>
 | |
| +
 | |
| +#ifdef CONFIG_OCF_FIPS
 | |
| +#include "rndtest.h"
 | |
| +#endif
 | |
| +
 | |
| +#ifndef HAS_RANDOM_INPUT_WAIT
 | |
| +#error "Please do not enable OCF_RANDOMHARVEST unless you have applied patches"
 | |
| +#endif
 | |
| +
 | |
| +/*
 | |
| + * a hack to access the debug levels from the crypto driver
 | |
| + */
 | |
| +extern int crypto_debug;
 | |
| +#define debug crypto_debug
 | |
| +
 | |
| +/*
 | |
| + * a list of all registered random providers
 | |
| + */
 | |
| +static LIST_HEAD(random_ops);
 | |
| +static int started = 0;
 | |
| +static int initted = 0;
 | |
| +
 | |
| +struct random_op {
 | |
| +	struct list_head random_list;
 | |
| +	u_int32_t driverid;
 | |
| +	int (*read_random)(void *arg, u_int32_t *buf, int len);
 | |
| +	void *arg;
 | |
| +};
 | |
| +
 | |
| +static int random_proc(void *arg);
 | |
| +
 | |
| +static pid_t		randomproc = (pid_t) -1;
 | |
| +static spinlock_t	random_lock;
 | |
| +
 | |
| +/*
 | |
| + * just init the spin locks
 | |
| + */
 | |
| +static int
 | |
| +crypto_random_init(void)
 | |
| +{
 | |
| +	spin_lock_init(&random_lock);
 | |
| +	initted = 1;
 | |
| +	return(0);
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Add the given random reader to our list (if not present)
 | |
| + * and start the thread (if not already started)
 | |
| + *
 | |
| + * we have to assume that driver id is ok for now
 | |
| + */
 | |
| +int
 | |
| +crypto_rregister(
 | |
| +	u_int32_t driverid,
 | |
| +	int (*read_random)(void *arg, u_int32_t *buf, int len),
 | |
| +	void *arg)
 | |
| +{
 | |
| +	unsigned long flags;
 | |
| +	int ret = 0;
 | |
| +	struct random_op	*rops, *tmp;
 | |
| +
 | |
| +	dprintk("%s,%d: %s(0x%x, %p, %p)\n", __FILE__, __LINE__,
 | |
| +			__FUNCTION__, driverid, read_random, arg);
 | |
| +
 | |
| +	if (!initted)
 | |
| +		crypto_random_init();
 | |
| +
 | |
| +#if 0
 | |
| +	struct cryptocap	*cap;
 | |
| +
 | |
| +	cap = crypto_checkdriver(driverid);
 | |
| +	if (!cap)
 | |
| +		return EINVAL;
 | |
| +#endif
 | |
| +
 | |
| +	list_for_each_entry_safe(rops, tmp, &random_ops, random_list) {
 | |
| +		if (rops->driverid == driverid && rops->read_random == read_random)
 | |
| +			return EEXIST;
 | |
| +	}
 | |
| +
 | |
| +	rops = (struct random_op *) kmalloc(sizeof(*rops), GFP_KERNEL);
 | |
| +	if (!rops)
 | |
| +		return ENOMEM;
 | |
| +
 | |
| +	rops->driverid    = driverid;
 | |
| +	rops->read_random = read_random;
 | |
| +	rops->arg = arg;
 | |
| +
 | |
| +	spin_lock_irqsave(&random_lock, flags);
 | |
| +	list_add_tail(&rops->random_list, &random_ops);
 | |
| +	if (!started) {
 | |
| +		randomproc = kernel_thread(random_proc, NULL, CLONE_FS|CLONE_FILES);
 | |
| +		if (randomproc < 0) {
 | |
| +			ret = randomproc;
 | |
| +			printk("crypto: crypto_rregister cannot start random thread; "
 | |
| +					"error %d", ret);
 | |
| +		} else
 | |
| +			started = 1;
 | |
| +	}
 | |
| +	spin_unlock_irqrestore(&random_lock, flags);
 | |
| +
 | |
| +	return ret;
 | |
| +}
 | |
| +EXPORT_SYMBOL(crypto_rregister);
 | |
| +
 | |
| +int
 | |
| +crypto_runregister_all(u_int32_t driverid)
 | |
| +{
 | |
| +	struct random_op *rops, *tmp;
 | |
| +	unsigned long flags;
 | |
| +
 | |
| +	dprintk("%s,%d: %s(0x%x)\n", __FILE__, __LINE__, __FUNCTION__, driverid);
 | |
| +
 | |
| +	list_for_each_entry_safe(rops, tmp, &random_ops, random_list) {
 | |
| +		if (rops->driverid == driverid) {
 | |
| +			list_del(&rops->random_list);
 | |
| +			kfree(rops);
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	spin_lock_irqsave(&random_lock, flags);
 | |
| +	if (list_empty(&random_ops) && started)
 | |
| +		kill_proc(randomproc, SIGKILL, 1);
 | |
| +	spin_unlock_irqrestore(&random_lock, flags);
 | |
| +	return(0);
 | |
| +}
 | |
| +EXPORT_SYMBOL(crypto_runregister_all);
 | |
| +
 | |
| +/*
 | |
| + * while we can add entropy to random.c continue to read random data from
 | |
| + * the drivers and push it to random.
 | |
| + */
 | |
| +static int
 | |
| +random_proc(void *arg)
 | |
| +{
 | |
| +	int n;
 | |
| +	int wantcnt;
 | |
| +	int bufcnt = 0;
 | |
| +	int retval = 0;
 | |
| +	int *buf = NULL;
 | |
| +
 | |
| +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
 | |
| +	daemonize();
 | |
| +	spin_lock_irq(¤t->sigmask_lock);
 | |
| +	sigemptyset(¤t->blocked);
 | |
| +	recalc_sigpending(current);
 | |
| +	spin_unlock_irq(¤t->sigmask_lock);
 | |
| +	sprintf(current->comm, "ocf-random");
 | |
| +#else
 | |
| +	daemonize("ocf-random");
 | |
| +	allow_signal(SIGKILL);
 | |
| +#endif
 | |
| +
 | |
| +	(void) get_fs();
 | |
| +	set_fs(get_ds());
 | |
| +
 | |
| +#ifdef CONFIG_OCF_FIPS
 | |
| +#define NUM_INT (RNDTEST_NBYTES/sizeof(int))
 | |
| +#else
 | |
| +#define NUM_INT 32
 | |
| +#endif
 | |
| +
 | |
| +	/*
 | |
| +	 * some devices can transferr their RNG data direct into memory,
 | |
| +	 * so make sure it is device friendly
 | |
| +	 */
 | |
| +	buf = kmalloc(NUM_INT * sizeof(int), GFP_DMA);
 | |
| +	if (NULL == buf) {
 | |
| +		printk("crypto: RNG could not allocate memory\n");
 | |
| +		retval = -ENOMEM;
 | |
| +		goto bad_alloc;
 | |
| +	}
 | |
| +
 | |
| +	wantcnt = NUM_INT;   /* start by adding some entropy */
 | |
| +
 | |
| +	/*
 | |
| +	 * its possible due to errors or driver removal that we no longer
 | |
| +	 * have anything to do,  if so exit or we will consume all the CPU
 | |
| +	 * doing nothing
 | |
| +	 */
 | |
| +	while (!list_empty(&random_ops)) {
 | |
| +		struct random_op	*rops, *tmp;
 | |
| +
 | |
| +#ifdef CONFIG_OCF_FIPS
 | |
| +		if (wantcnt)
 | |
| +			wantcnt = NUM_INT; /* FIPs mode can do 20000 bits or none */
 | |
| +#endif
 | |
| +
 | |
| +		/* see if we can get enough entropy to make the world
 | |
| +		 * a better place.
 | |
| +		 */
 | |
| +		while (bufcnt < wantcnt && bufcnt < NUM_INT) {
 | |
| +			list_for_each_entry_safe(rops, tmp, &random_ops, random_list) {
 | |
| +
 | |
| +				n = (*rops->read_random)(rops->arg, &buf[bufcnt],
 | |
| +							 NUM_INT - bufcnt);
 | |
| +
 | |
| +				/* on failure remove the random number generator */
 | |
| +				if (n == -1) {
 | |
| +					list_del(&rops->random_list);
 | |
| +					printk("crypto: RNG (driverid=0x%x) failed, disabling\n",
 | |
| +							rops->driverid);
 | |
| +					kfree(rops);
 | |
| +				} else if (n > 0)
 | |
| +					bufcnt += n;
 | |
| +			}
 | |
| +			/* give up CPU for a bit, just in case as this is a loop */
 | |
| +			schedule();
 | |
| +		}
 | |
| +
 | |
| +
 | |
| +#ifdef CONFIG_OCF_FIPS
 | |
| +		if (bufcnt > 0 && rndtest_buf((unsigned char *) &buf[0])) {
 | |
| +			dprintk("crypto: buffer had fips errors, discarding\n");
 | |
| +			bufcnt = 0;
 | |
| +		}
 | |
| +#endif
 | |
| +
 | |
| +		/*
 | |
| +		 * if we have a certified buffer,  we can send some data
 | |
| +		 * to /dev/random and move along
 | |
| +		 */
 | |
| +		if (bufcnt > 0) {
 | |
| +			/* add what we have */
 | |
| +			random_input_words(buf, bufcnt, bufcnt*sizeof(int)*8);
 | |
| +			bufcnt = 0;
 | |
| +		}
 | |
| +
 | |
| +		/* give up CPU for a bit so we don't hog while filling */
 | |
| +		schedule();
 | |
| +
 | |
| +		/* wait for needing more */
 | |
| +		wantcnt = random_input_wait();
 | |
| +
 | |
| +		if (wantcnt <= 0)
 | |
| +			wantcnt = 0; /* try to get some info again */
 | |
| +		else
 | |
| +		 	/* round up to one word or we can loop forever */
 | |
| +			wantcnt = (wantcnt + (sizeof(int)*8)) / (sizeof(int)*8);
 | |
| +		if (wantcnt > NUM_INT) {
 | |
| +			wantcnt = NUM_INT;
 | |
| +		}
 | |
| +
 | |
| +		if (signal_pending(current)) {
 | |
| +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
 | |
| +			spin_lock_irq(¤t->sigmask_lock);
 | |
| +#endif
 | |
| +			flush_signals(current);
 | |
| +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
 | |
| +			spin_unlock_irq(¤t->sigmask_lock);
 | |
| +#endif
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	kfree(buf);
 | |
| +
 | |
| +bad_alloc:
 | |
| +	spin_lock_irq(&random_lock);
 | |
| +	randomproc = (pid_t) -1;
 | |
| +	started = 0;
 | |
| +	spin_unlock_irq(&random_lock);
 | |
| +
 | |
| +	return retval;
 | |
| +}
 | |
| +
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/ocf-bench.c
 | |
| @@ -0,0 +1,436 @@
 | |
| +/*
 | |
| + * A loadable module that benchmarks the OCF crypto speed from kernel space.
 | |
| + *
 | |
| + * Copyright (C) 2004-2007 David McCullough <david_mccullough@securecomputing.com>
 | |
| + *
 | |
| + * LICENSE TERMS
 | |
| + *
 | |
| + * The free distribution and use of this software in both source and binary
 | |
| + * form is allowed (with or without changes) provided that:
 | |
| + *
 | |
| + *   1. distributions of this source code include the above copyright
 | |
| + *      notice, this list of conditions and the following disclaimer;
 | |
| + *
 | |
| + *   2. distributions in binary form include the above copyright
 | |
| + *      notice, this list of conditions and the following disclaimer
 | |
| + *      in the documentation and/or other associated materials;
 | |
| + *
 | |
| + *   3. the copyright holder's name is not used to endorse products
 | |
| + *      built using this software without specific written permission.
 | |
| + *
 | |
| + * ALTERNATIVELY, provided that this notice is retained in full, this product
 | |
| + * may be distributed under the terms of the GNU General Public License (GPL),
 | |
| + * in which case the provisions of the GPL apply INSTEAD OF those given above.
 | |
| + *
 | |
| + * DISCLAIMER
 | |
| + *
 | |
| + * This software is provided 'as is' with no explicit or implied warranties
 | |
| + * in respect of its properties, including, but not limited to, correctness
 | |
| + * and/or fitness for purpose.
 | |
| + */
 | |
| +
 | |
| +
 | |
| +#ifndef AUTOCONF_INCLUDED
 | |
| +#include <linux/config.h>
 | |
| +#endif
 | |
| +#include <linux/module.h>
 | |
| +#include <linux/init.h>
 | |
| +#include <linux/list.h>
 | |
| +#include <linux/slab.h>
 | |
| +#include <linux/wait.h>
 | |
| +#include <linux/sched.h>
 | |
| +#include <linux/spinlock.h>
 | |
| +#include <linux/version.h>
 | |
| +#include <linux/interrupt.h>
 | |
| +#include <cryptodev.h>
 | |
| +
 | |
| +#ifdef I_HAVE_AN_XSCALE_WITH_INTEL_SDK
 | |
| +#define BENCH_IXP_ACCESS_LIB 1
 | |
| +#endif
 | |
| +#ifdef BENCH_IXP_ACCESS_LIB
 | |
| +#include <IxTypes.h>
 | |
| +#include <IxOsBuffMgt.h>
 | |
| +#include <IxNpeDl.h>
 | |
| +#include <IxCryptoAcc.h>
 | |
| +#include <IxQMgr.h>
 | |
| +#include <IxOsServices.h>
 | |
| +#include <IxOsCacheMMU.h>
 | |
| +#endif
 | |
| +
 | |
| +/*
 | |
| + * support for access lib version 1.4
 | |
| + */
 | |
| +#ifndef IX_MBUF_PRIV
 | |
| +#define IX_MBUF_PRIV(x) ((x)->priv)
 | |
| +#endif
 | |
| +
 | |
| +/*
 | |
| + * the number of simultaneously active requests
 | |
| + */
 | |
| +static int request_q_len = 20;
 | |
| +module_param(request_q_len, int, 0);
 | |
| +MODULE_PARM_DESC(request_q_len, "Number of outstanding requests");
 | |
| +/*
 | |
| + * how many requests we want to have processed
 | |
| + */
 | |
| +static int request_num = 1024;
 | |
| +module_param(request_num, int, 0);
 | |
| +MODULE_PARM_DESC(request_num, "run for at least this many requests");
 | |
| +/*
 | |
| + * the size of each request
 | |
| + */
 | |
| +static int request_size = 1500;
 | |
| +module_param(request_size, int, 0);
 | |
| +MODULE_PARM_DESC(request_size, "size of each request");
 | |
| +
 | |
| +/*
 | |
| + * a structure for each request
 | |
| + */
 | |
| +typedef struct  {
 | |
| +	struct work_struct work;
 | |
| +#ifdef BENCH_IXP_ACCESS_LIB
 | |
| +	IX_MBUF mbuf;
 | |
| +#endif
 | |
| +	unsigned char *buffer;
 | |
| +} request_t;
 | |
| +
 | |
| +static request_t *requests;
 | |
| +
 | |
| +static int outstanding;
 | |
| +static int total;
 | |
| +
 | |
| +/*************************************************************************/
 | |
| +/*
 | |
| + * OCF benchmark routines
 | |
| + */
 | |
| +
 | |
| +static uint64_t ocf_cryptoid;
 | |
| +static int ocf_init(void);
 | |
| +static int ocf_cb(struct cryptop *crp);
 | |
| +static void ocf_request(void *arg);
 | |
| +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
 | |
| +static void ocf_request_wq(struct work_struct *work);
 | |
| +#endif
 | |
| +
 | |
| +static int
 | |
| +ocf_init(void)
 | |
| +{
 | |
| +	int error;
 | |
| +	struct cryptoini crie, cria;
 | |
| +	struct cryptodesc crda, crde;
 | |
| +
 | |
| +	memset(&crie, 0, sizeof(crie));
 | |
| +	memset(&cria, 0, sizeof(cria));
 | |
| +	memset(&crde, 0, sizeof(crde));
 | |
| +	memset(&crda, 0, sizeof(crda));
 | |
| +
 | |
| +	cria.cri_alg  = CRYPTO_SHA1_HMAC;
 | |
| +	cria.cri_klen = 20 * 8;
 | |
| +	cria.cri_key  = "0123456789abcdefghij";
 | |
| +
 | |
| +	crie.cri_alg  = CRYPTO_3DES_CBC;
 | |
| +	crie.cri_klen = 24 * 8;
 | |
| +	crie.cri_key  = "0123456789abcdefghijklmn";
 | |
| +
 | |
| +	crie.cri_next = &cria;
 | |
| +
 | |
| +	error = crypto_newsession(&ocf_cryptoid, &crie, 0);
 | |
| +	if (error) {
 | |
| +		printk("crypto_newsession failed %d\n", error);
 | |
| +		return -1;
 | |
| +	}
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +static int
 | |
| +ocf_cb(struct cryptop *crp)
 | |
| +{
 | |
| +	request_t *r = (request_t *) crp->crp_opaque;
 | |
| +
 | |
| +	if (crp->crp_etype)
 | |
| +		printk("Error in OCF processing: %d\n", crp->crp_etype);
 | |
| +	total++;
 | |
| +	crypto_freereq(crp);
 | |
| +	crp = NULL;
 | |
| +
 | |
| +	if (total > request_num) {
 | |
| +		outstanding--;
 | |
| +		return 0;
 | |
| +	}
 | |
| +
 | |
| +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
 | |
| +	INIT_WORK(&r->work, ocf_request_wq);
 | |
| +#else
 | |
| +	INIT_WORK(&r->work, ocf_request, r);
 | |
| +#endif
 | |
| +	schedule_work(&r->work);
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +
 | |
| +static void
 | |
| +ocf_request(void *arg)
 | |
| +{
 | |
| +	request_t *r = arg;
 | |
| +	struct cryptop *crp = crypto_getreq(2);
 | |
| +	struct cryptodesc *crde, *crda;
 | |
| +
 | |
| +	if (!crp) {
 | |
| +		outstanding--;
 | |
| +		return;
 | |
| +	}
 | |
| +
 | |
| +	crde = crp->crp_desc;
 | |
| +	crda = crde->crd_next;
 | |
| +
 | |
| +	crda->crd_skip = 0;
 | |
| +	crda->crd_flags = 0;
 | |
| +	crda->crd_len = request_size;
 | |
| +	crda->crd_inject = request_size;
 | |
| +	crda->crd_alg = CRYPTO_SHA1_HMAC;
 | |
| +	crda->crd_key = "0123456789abcdefghij";
 | |
| +	crda->crd_klen = 20 * 8;
 | |
| +
 | |
| +	crde->crd_skip = 0;
 | |
| +	crde->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_ENCRYPT;
 | |
| +	crde->crd_len = request_size;
 | |
| +	crde->crd_inject = request_size;
 | |
| +	crde->crd_alg = CRYPTO_3DES_CBC;
 | |
| +	crde->crd_key = "0123456789abcdefghijklmn";
 | |
| +	crde->crd_klen = 24 * 8;
 | |
| +
 | |
| +	crp->crp_ilen = request_size + 64;
 | |
| +	crp->crp_flags = CRYPTO_F_CBIMM;
 | |
| +	crp->crp_buf = (caddr_t) r->buffer;
 | |
| +	crp->crp_callback = ocf_cb;
 | |
| +	crp->crp_sid = ocf_cryptoid;
 | |
| +	crp->crp_opaque = (caddr_t) r;
 | |
| +	crypto_dispatch(crp);
 | |
| +}
 | |
| +
 | |
| +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
 | |
| +static void
 | |
| +ocf_request_wq(struct work_struct *work)
 | |
| +{
 | |
| +	request_t *r = container_of(work, request_t, work);
 | |
| +	ocf_request(r);
 | |
| +}
 | |
| +#endif
 | |
| +
 | |
| +/*************************************************************************/
 | |
| +#ifdef BENCH_IXP_ACCESS_LIB
 | |
| +/*************************************************************************/
 | |
| +/*
 | |
| + * CryptoAcc benchmark routines
 | |
| + */
 | |
| +
 | |
| +static IxCryptoAccCtx ixp_ctx;
 | |
| +static UINT32 ixp_ctx_id;
 | |
| +static IX_MBUF ixp_pri;
 | |
| +static IX_MBUF ixp_sec;
 | |
| +static int ixp_registered = 0;
 | |
| +
 | |
| +static void ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp,
 | |
| +					IxCryptoAccStatus status);
 | |
| +static void ixp_perform_cb(UINT32 ctx_id, IX_MBUF *sbufp, IX_MBUF *dbufp,
 | |
| +					IxCryptoAccStatus status);
 | |
| +static void ixp_request(void *arg);
 | |
| +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
 | |
| +static void ixp_request_wq(struct work_struct *work);
 | |
| +#endif
 | |
| +
 | |
| +static int
 | |
| +ixp_init(void)
 | |
| +{
 | |
| +	IxCryptoAccStatus status;
 | |
| +
 | |
| +	ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_3DES;
 | |
| +	ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
 | |
| +	ixp_ctx.cipherCtx.cipherKeyLen = 24;
 | |
| +	ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
 | |
| +	ixp_ctx.cipherCtx.cipherInitialVectorLen = IX_CRYPTO_ACC_DES_IV_64;
 | |
| +	memcpy(ixp_ctx.cipherCtx.key.cipherKey, "0123456789abcdefghijklmn", 24);
 | |
| +
 | |
| +	ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_SHA1;
 | |
| +	ixp_ctx.authCtx.authDigestLen = 12;
 | |
| +	ixp_ctx.authCtx.aadLen = 0;
 | |
| +	ixp_ctx.authCtx.authKeyLen = 20;
 | |
| +	memcpy(ixp_ctx.authCtx.key.authKey, "0123456789abcdefghij", 20);
 | |
| +
 | |
| +	ixp_ctx.useDifferentSrcAndDestMbufs = 0;
 | |
| +	ixp_ctx.operation = IX_CRYPTO_ACC_OP_ENCRYPT_AUTH ;
 | |
| +
 | |
| +	IX_MBUF_MLEN(&ixp_pri)  = IX_MBUF_PKT_LEN(&ixp_pri) = 128;
 | |
| +	IX_MBUF_MDATA(&ixp_pri) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
 | |
| +	IX_MBUF_MLEN(&ixp_sec)  = IX_MBUF_PKT_LEN(&ixp_sec) = 128;
 | |
| +	IX_MBUF_MDATA(&ixp_sec) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
 | |
| +
 | |
| +	status = ixCryptoAccCtxRegister(&ixp_ctx, &ixp_pri, &ixp_sec,
 | |
| +			ixp_register_cb, ixp_perform_cb, &ixp_ctx_id);
 | |
| +
 | |
| +	if (IX_CRYPTO_ACC_STATUS_SUCCESS == status) {
 | |
| +		while (!ixp_registered)
 | |
| +			schedule();
 | |
| +		return ixp_registered < 0 ? -1 : 0;
 | |
| +	}
 | |
| +
 | |
| +	printk("ixp: ixCryptoAccCtxRegister failed %d\n", status);
 | |
| +	return -1;
 | |
| +}
 | |
| +
 | |
| +static void
 | |
| +ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp, IxCryptoAccStatus status)
 | |
| +{
 | |
| +	if (bufp) {
 | |
| +		IX_MBUF_MLEN(bufp) = IX_MBUF_PKT_LEN(bufp) = 0;
 | |
| +		kfree(IX_MBUF_MDATA(bufp));
 | |
| +		IX_MBUF_MDATA(bufp) = NULL;
 | |
| +	}
 | |
| +
 | |
| +	if (IX_CRYPTO_ACC_STATUS_WAIT == status)
 | |
| +		return;
 | |
| +	if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
 | |
| +		ixp_registered = 1;
 | |
| +	else
 | |
| +		ixp_registered = -1;
 | |
| +}
 | |
| +
 | |
| +static void
 | |
| +ixp_perform_cb(
 | |
| +	UINT32 ctx_id,
 | |
| +	IX_MBUF *sbufp,
 | |
| +	IX_MBUF *dbufp,
 | |
| +	IxCryptoAccStatus status)
 | |
| +{
 | |
| +	request_t *r = NULL;
 | |
| +
 | |
| +	total++;
 | |
| +	if (total > request_num) {
 | |
| +		outstanding--;
 | |
| +		return;
 | |
| +	}
 | |
| +
 | |
| +	if (!sbufp || !(r = IX_MBUF_PRIV(sbufp))) {
 | |
| +		printk("crappo %p %p\n", sbufp, r);
 | |
| +		outstanding--;
 | |
| +		return;
 | |
| +	}
 | |
| +
 | |
| +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
 | |
| +	INIT_WORK(&r->work, ixp_request_wq);
 | |
| +#else
 | |
| +	INIT_WORK(&r->work, ixp_request, r);
 | |
| +#endif
 | |
| +	schedule_work(&r->work);
 | |
| +}
 | |
| +
 | |
| +static void
 | |
| +ixp_request(void *arg)
 | |
| +{
 | |
| +	request_t *r = arg;
 | |
| +	IxCryptoAccStatus status;
 | |
| +
 | |
| +	memset(&r->mbuf, 0, sizeof(r->mbuf));
 | |
| +	IX_MBUF_MLEN(&r->mbuf) = IX_MBUF_PKT_LEN(&r->mbuf) = request_size + 64;
 | |
| +	IX_MBUF_MDATA(&r->mbuf) = r->buffer;
 | |
| +	IX_MBUF_PRIV(&r->mbuf) = r;
 | |
| +	status = ixCryptoAccAuthCryptPerform(ixp_ctx_id, &r->mbuf, NULL,
 | |
| +			0, request_size, 0, request_size, request_size, r->buffer);
 | |
| +	if (IX_CRYPTO_ACC_STATUS_SUCCESS != status) {
 | |
| +		printk("status1 = %d\n", status);
 | |
| +		outstanding--;
 | |
| +		return;
 | |
| +	}
 | |
| +	return;
 | |
| +}
 | |
| +
 | |
| +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
 | |
| +static void
 | |
| +ixp_request_wq(struct work_struct *work)
 | |
| +{
 | |
| +	request_t *r = container_of(work, request_t, work);
 | |
| +	ixp_request(r);
 | |
| +}
 | |
| +#endif
 | |
| +
 | |
| +/*************************************************************************/
 | |
| +#endif /* BENCH_IXP_ACCESS_LIB */
 | |
| +/*************************************************************************/
 | |
| +
 | |
| +int
 | |
| +ocfbench_init(void)
 | |
| +{
 | |
| +	int i, jstart, jstop;
 | |
| +
 | |
| +	printk("Crypto Speed tests\n");
 | |
| +
 | |
| +	requests = kmalloc(sizeof(request_t) * request_q_len, GFP_KERNEL);
 | |
| +	if (!requests) {
 | |
| +		printk("malloc failed\n");
 | |
| +		return -EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	for (i = 0; i < request_q_len; i++) {
 | |
| +		/* +64 for return data */
 | |
| +		requests[i].buffer = kmalloc(request_size + 128, GFP_DMA);
 | |
| +		if (!requests[i].buffer) {
 | |
| +			printk("malloc failed\n");
 | |
| +			return -EINVAL;
 | |
| +		}
 | |
| +		memset(requests[i].buffer, '0' + i, request_size + 128);
 | |
| +	}
 | |
| +
 | |
| +	/*
 | |
| +	 * OCF benchmark
 | |
| +	 */
 | |
| +	printk("OCF: testing ...\n");
 | |
| +	ocf_init();
 | |
| +	total = outstanding = 0;
 | |
| +	jstart = jiffies;
 | |
| +	for (i = 0; i < request_q_len; i++) {
 | |
| +		outstanding++;
 | |
| +		ocf_request(&requests[i]);
 | |
| +	}
 | |
| +	while (outstanding > 0)
 | |
| +		schedule();
 | |
| +	jstop = jiffies;
 | |
| +
 | |
| +	printk("OCF: %d requests of %d bytes in %d jiffies\n", total, request_size,
 | |
| +			jstop - jstart);
 | |
| +
 | |
| +#ifdef BENCH_IXP_ACCESS_LIB
 | |
| +	/*
 | |
| +	 * IXP benchmark
 | |
| +	 */
 | |
| +	printk("IXP: testing ...\n");
 | |
| +	ixp_init();
 | |
| +	total = outstanding = 0;
 | |
| +	jstart = jiffies;
 | |
| +	for (i = 0; i < request_q_len; i++) {
 | |
| +		outstanding++;
 | |
| +		ixp_request(&requests[i]);
 | |
| +	}
 | |
| +	while (outstanding > 0)
 | |
| +		schedule();
 | |
| +	jstop = jiffies;
 | |
| +
 | |
| +	printk("IXP: %d requests of %d bytes in %d jiffies\n", total, request_size,
 | |
| +			jstop - jstart);
 | |
| +#endif /* BENCH_IXP_ACCESS_LIB */
 | |
| +
 | |
| +	for (i = 0; i < request_q_len; i++)
 | |
| +		kfree(requests[i].buffer);
 | |
| +	kfree(requests);
 | |
| +	return -EINVAL; /* always fail to load so it can be re-run quickly ;-) */
 | |
| +}
 | |
| +
 | |
| +static void __exit ocfbench_exit(void)
 | |
| +{
 | |
| +}
 | |
| +
 | |
| +module_init(ocfbench_init);
 | |
| +module_exit(ocfbench_exit);
 | |
| +
 | |
| +MODULE_LICENSE("BSD");
 | |
| +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
 | |
| +MODULE_DESCRIPTION("Benchmark various in-kernel crypto speeds");
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/ixp4xx/ixp4xx.c
 | |
| @@ -0,0 +1,1328 @@
 | |
| +/*
 | |
| + * An OCF module that uses Intels IXP CryptACC API to do the crypto.
 | |
| + * This driver requires the IXP400 Access Library that is available
 | |
| + * from Intel in order to operate (or compile).
 | |
| + *
 | |
| + * Written by David McCullough <david_mccullough@securecomputing.com>
 | |
| + * Copyright (C) 2006-2007 David McCullough
 | |
| + * Copyright (C) 2004-2005 Intel Corporation.
 | |
| + *
 | |
| + * LICENSE TERMS
 | |
| + *
 | |
| + * The free distribution and use of this software in both source and binary
 | |
| + * form is allowed (with or without changes) provided that:
 | |
| + *
 | |
| + *   1. distributions of this source code include the above copyright
 | |
| + *      notice, this list of conditions and the following disclaimer;
 | |
| + *
 | |
| + *   2. distributions in binary form include the above copyright
 | |
| + *      notice, this list of conditions and the following disclaimer
 | |
| + *      in the documentation and/or other associated materials;
 | |
| + *
 | |
| + *   3. the copyright holder's name is not used to endorse products
 | |
| + *      built using this software without specific written permission.
 | |
| + *
 | |
| + * ALTERNATIVELY, provided that this notice is retained in full, this product
 | |
| + * may be distributed under the terms of the GNU General Public License (GPL),
 | |
| + * in which case the provisions of the GPL apply INSTEAD OF those given above.
 | |
| + *
 | |
| + * DISCLAIMER
 | |
| + *
 | |
| + * This software is provided 'as is' with no explicit or implied warranties
 | |
| + * in respect of its properties, including, but not limited to, correctness
 | |
| + * and/or fitness for purpose.
 | |
| + */
 | |
| +
 | |
| +#ifndef AUTOCONF_INCLUDED
 | |
| +#include <linux/config.h>
 | |
| +#endif
 | |
| +#include <linux/module.h>
 | |
| +#include <linux/init.h>
 | |
| +#include <linux/list.h>
 | |
| +#include <linux/slab.h>
 | |
| +#include <linux/sched.h>
 | |
| +#include <linux/wait.h>
 | |
| +#include <linux/crypto.h>
 | |
| +#include <linux/interrupt.h>
 | |
| +#include <asm/scatterlist.h>
 | |
| +
 | |
| +#include <IxTypes.h>
 | |
| +#include <IxOsBuffMgt.h>
 | |
| +#include <IxNpeDl.h>
 | |
| +#include <IxCryptoAcc.h>
 | |
| +#include <IxQMgr.h>
 | |
| +#include <IxOsServices.h>
 | |
| +#include <IxOsCacheMMU.h>
 | |
| +
 | |
| +#include <cryptodev.h>
 | |
| +#include <uio.h>
 | |
| +
 | |
| +#ifndef IX_MBUF_PRIV
 | |
| +#define IX_MBUF_PRIV(x) ((x)->priv)
 | |
| +#endif
 | |
| +
 | |
| +struct ixp_data;
 | |
| +
 | |
| +struct ixp_q {
 | |
| +	struct list_head	 ixp_q_list;
 | |
| +	struct ixp_data		*ixp_q_data;
 | |
| +	struct cryptop		*ixp_q_crp;
 | |
| +	struct cryptodesc	*ixp_q_ccrd;
 | |
| +	struct cryptodesc	*ixp_q_acrd;
 | |
| +	IX_MBUF				 ixp_q_mbuf;
 | |
| +	UINT8				*ixp_hash_dest; /* Location for hash in client buffer */
 | |
| +	UINT8				*ixp_hash_src; /* Location of hash in internal buffer */
 | |
| +	unsigned char		 ixp_q_iv_data[IX_CRYPTO_ACC_MAX_CIPHER_IV_LENGTH];
 | |
| +	unsigned char		*ixp_q_iv;
 | |
| +};
 | |
| +
 | |
| +struct ixp_data {
 | |
| +	int					 ixp_registered;	/* is the context registered */
 | |
| +	int					 ixp_crd_flags;		/* detect direction changes */
 | |
| +
 | |
| +	int					 ixp_cipher_alg;
 | |
| +	int					 ixp_auth_alg;
 | |
| +
 | |
| +	UINT32				 ixp_ctx_id;
 | |
| +	UINT32				 ixp_hash_key_id;	/* used when hashing */
 | |
| +	IxCryptoAccCtx		 ixp_ctx;
 | |
| +	IX_MBUF				 ixp_pri_mbuf;
 | |
| +	IX_MBUF				 ixp_sec_mbuf;
 | |
| +
 | |
| +	struct work_struct   ixp_pending_work;
 | |
| +	struct work_struct   ixp_registration_work;
 | |
| +	struct list_head	 ixp_q;				/* unprocessed requests */
 | |
| +};
 | |
| +
 | |
| +#ifdef __ixp46X
 | |
| +
 | |
| +#define	MAX_IOP_SIZE	64	/* words */
 | |
| +#define	MAX_OOP_SIZE	128
 | |
| +
 | |
| +#define	MAX_PARAMS		3
 | |
| +
 | |
| +struct ixp_pkq {
 | |
| +	struct list_head			 pkq_list;
 | |
| +	struct cryptkop				*pkq_krp;
 | |
| +
 | |
| +	IxCryptoAccPkeEauInOperands	 pkq_op;
 | |
| +	IxCryptoAccPkeEauOpResult	 pkq_result;
 | |
| +
 | |
| +	UINT32						 pkq_ibuf0[MAX_IOP_SIZE];
 | |
| +	UINT32						 pkq_ibuf1[MAX_IOP_SIZE];
 | |
| +	UINT32						 pkq_ibuf2[MAX_IOP_SIZE];
 | |
| +	UINT32						 pkq_obuf[MAX_OOP_SIZE];
 | |
| +};
 | |
| +
 | |
| +static LIST_HEAD(ixp_pkq); /* current PK wait list */
 | |
| +static struct ixp_pkq *ixp_pk_cur;
 | |
| +static spinlock_t ixp_pkq_lock;
 | |
| +
 | |
| +#endif /* __ixp46X */
 | |
| +
 | |
| +static int ixp_blocked = 0;
 | |
| +
 | |
| +static int32_t			 ixp_id = -1;
 | |
| +static struct ixp_data **ixp_sessions = NULL;
 | |
| +static u_int32_t		 ixp_sesnum = 0;
 | |
| +
 | |
| +static int ixp_process(device_t, struct cryptop *, int);
 | |
| +static int ixp_newsession(device_t, u_int32_t *, struct cryptoini *);
 | |
| +static int ixp_freesession(device_t, u_int64_t);
 | |
| +#ifdef __ixp46X
 | |
| +static int ixp_kprocess(device_t, struct cryptkop *krp, int hint);
 | |
| +#endif
 | |
| +
 | |
| +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
 | |
| +static kmem_cache_t *qcache;
 | |
| +#else
 | |
| +static struct kmem_cache *qcache;
 | |
| +#endif
 | |
| +
 | |
| +#define debug ixp_debug
 | |
| +static int ixp_debug = 0;
 | |
| +module_param(ixp_debug, int, 0644);
 | |
| +MODULE_PARM_DESC(ixp_debug, "Enable debug");
 | |
| +
 | |
| +static int ixp_init_crypto = 1;
 | |
| +module_param(ixp_init_crypto, int, 0444); /* RO after load/boot */
 | |
| +MODULE_PARM_DESC(ixp_init_crypto, "Call ixCryptoAccInit (default is 1)");
 | |
| +
 | |
| +static void ixp_process_pending(void *arg);
 | |
| +static void ixp_registration(void *arg);
 | |
| +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
 | |
| +static void ixp_process_pending_wq(struct work_struct *work);
 | |
| +static void ixp_registration_wq(struct work_struct *work);
 | |
| +#endif
 | |
| +
 | |
| +/*
 | |
| + * dummy device structure
 | |
| + */
 | |
| +
 | |
| +static struct {
 | |
| +	softc_device_decl	sc_dev;
 | |
| +} ixpdev;
 | |
| +
 | |
| +static device_method_t ixp_methods = {
 | |
| +	/* crypto device methods */
 | |
| +	DEVMETHOD(cryptodev_newsession,	ixp_newsession),
 | |
| +	DEVMETHOD(cryptodev_freesession,ixp_freesession),
 | |
| +	DEVMETHOD(cryptodev_process,	ixp_process),
 | |
| +#ifdef __ixp46X
 | |
| +	DEVMETHOD(cryptodev_kprocess,	ixp_kprocess),
 | |
| +#endif
 | |
| +};
 | |
| +
 | |
| +/*
 | |
| + * Generate a new software session.
 | |
| + */
 | |
| +static int
 | |
| +ixp_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
 | |
| +{
 | |
| +	struct ixp_data *ixp;
 | |
| +	u_int32_t i;
 | |
| +#define AUTH_LEN(cri, def) \
 | |
| +	(cri->cri_mlen ? cri->cri_mlen : (def))
 | |
| +
 | |
| +	dprintk("%s():alg %d\n", __FUNCTION__,cri->cri_alg);
 | |
| +	if (sid == NULL || cri == NULL) {
 | |
| +		dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
 | |
| +		return EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	if (ixp_sessions) {
 | |
| +		for (i = 1; i < ixp_sesnum; i++)
 | |
| +			if (ixp_sessions[i] == NULL)
 | |
| +				break;
 | |
| +	} else
 | |
| +		i = 1;		/* NB: to silence compiler warning */
 | |
| +
 | |
| +	if (ixp_sessions == NULL || i == ixp_sesnum) {
 | |
| +		struct ixp_data **ixpd;
 | |
| +
 | |
| +		if (ixp_sessions == NULL) {
 | |
| +			i = 1; /* We leave ixp_sessions[0] empty */
 | |
| +			ixp_sesnum = CRYPTO_SW_SESSIONS;
 | |
| +		} else
 | |
| +			ixp_sesnum *= 2;
 | |
| +
 | |
| +		ixpd = kmalloc(ixp_sesnum * sizeof(struct ixp_data *), SLAB_ATOMIC);
 | |
| +		if (ixpd == NULL) {
 | |
| +			/* Reset session number */
 | |
| +			if (ixp_sesnum == CRYPTO_SW_SESSIONS)
 | |
| +				ixp_sesnum = 0;
 | |
| +			else
 | |
| +				ixp_sesnum /= 2;
 | |
| +			dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
 | |
| +			return ENOBUFS;
 | |
| +		}
 | |
| +		memset(ixpd, 0, ixp_sesnum * sizeof(struct ixp_data *));
 | |
| +
 | |
| +		/* Copy existing sessions */
 | |
| +		if (ixp_sessions) {
 | |
| +			memcpy(ixpd, ixp_sessions,
 | |
| +			    (ixp_sesnum / 2) * sizeof(struct ixp_data *));
 | |
| +			kfree(ixp_sessions);
 | |
| +		}
 | |
| +
 | |
| +		ixp_sessions = ixpd;
 | |
| +	}
 | |
| +
 | |
| +	ixp_sessions[i] = (struct ixp_data *) kmalloc(sizeof(struct ixp_data),
 | |
| +			SLAB_ATOMIC);
 | |
| +	if (ixp_sessions[i] == NULL) {
 | |
| +		ixp_freesession(NULL, i);
 | |
| +		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
 | |
| +		return ENOBUFS;
 | |
| +	}
 | |
| +
 | |
| +	*sid = i;
 | |
| +
 | |
| +	ixp = ixp_sessions[i];
 | |
| +	memset(ixp, 0, sizeof(*ixp));
 | |
| +
 | |
| +	ixp->ixp_cipher_alg = -1;
 | |
| +	ixp->ixp_auth_alg = -1;
 | |
| +	ixp->ixp_ctx_id = -1;
 | |
| +	INIT_LIST_HEAD(&ixp->ixp_q);
 | |
| +
 | |
| +	ixp->ixp_ctx.useDifferentSrcAndDestMbufs = 0;
 | |
| +
 | |
| +	while (cri) {
 | |
| +		switch (cri->cri_alg) {
 | |
| +		case CRYPTO_DES_CBC:
 | |
| +			ixp->ixp_cipher_alg = cri->cri_alg;
 | |
| +			ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_DES;
 | |
| +			ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
 | |
| +			ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
 | |
| +			ixp->ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
 | |
| +			ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen =
 | |
| +						IX_CRYPTO_ACC_DES_IV_64;
 | |
| +			memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
 | |
| +					cri->cri_key, (cri->cri_klen + 7) / 8);
 | |
| +			break;
 | |
| +
 | |
| +		case CRYPTO_3DES_CBC:
 | |
| +			ixp->ixp_cipher_alg = cri->cri_alg;
 | |
| +			ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_3DES;
 | |
| +			ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
 | |
| +			ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
 | |
| +			ixp->ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
 | |
| +			ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen =
 | |
| +						IX_CRYPTO_ACC_DES_IV_64;
 | |
| +			memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
 | |
| +					cri->cri_key, (cri->cri_klen + 7) / 8);
 | |
| +			break;
 | |
| +
 | |
| +		case CRYPTO_RIJNDAEL128_CBC:
 | |
| +			ixp->ixp_cipher_alg = cri->cri_alg;
 | |
| +			ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_AES;
 | |
| +			ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
 | |
| +			ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
 | |
| +			ixp->ixp_ctx.cipherCtx.cipherBlockLen = 16;
 | |
| +			ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen = 16;
 | |
| +			memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
 | |
| +					cri->cri_key, (cri->cri_klen + 7) / 8);
 | |
| +			break;
 | |
| +
 | |
| +		case CRYPTO_MD5:
 | |
| +		case CRYPTO_MD5_HMAC:
 | |
| +			ixp->ixp_auth_alg = cri->cri_alg;
 | |
| +			ixp->ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_MD5;
 | |
| +			ixp->ixp_ctx.authCtx.authDigestLen = AUTH_LEN(cri, MD5_HASH_LEN);
 | |
| +			ixp->ixp_ctx.authCtx.aadLen = 0;
 | |
| +			/* Only MD5_HMAC needs a key */
 | |
| +			if (cri->cri_alg == CRYPTO_MD5_HMAC) {
 | |
| +				ixp->ixp_ctx.authCtx.authKeyLen = (cri->cri_klen + 7) / 8;
 | |
| +				if (ixp->ixp_ctx.authCtx.authKeyLen >
 | |
| +						sizeof(ixp->ixp_ctx.authCtx.key.authKey)) {
 | |
| +					printk(
 | |
| +						"ixp4xx: Invalid key length for MD5_HMAC - %d bits\n",
 | |
| +							cri->cri_klen);
 | |
| +					ixp_freesession(NULL, i);
 | |
| +					return EINVAL;
 | |
| +				}
 | |
| +				memcpy(ixp->ixp_ctx.authCtx.key.authKey,
 | |
| +						cri->cri_key, (cri->cri_klen + 7) / 8);
 | |
| +			}
 | |
| +			break;
 | |
| +
 | |
| +		case CRYPTO_SHA1:
 | |
| +		case CRYPTO_SHA1_HMAC:
 | |
| +			ixp->ixp_auth_alg = cri->cri_alg;
 | |
| +			ixp->ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_SHA1;
 | |
| +			ixp->ixp_ctx.authCtx.authDigestLen = AUTH_LEN(cri, SHA1_HASH_LEN);
 | |
| +			ixp->ixp_ctx.authCtx.aadLen = 0;
 | |
| +			/* Only SHA1_HMAC needs a key */
 | |
| +			if (cri->cri_alg == CRYPTO_SHA1_HMAC) {
 | |
| +				ixp->ixp_ctx.authCtx.authKeyLen = (cri->cri_klen + 7) / 8;
 | |
| +				if (ixp->ixp_ctx.authCtx.authKeyLen >
 | |
| +						sizeof(ixp->ixp_ctx.authCtx.key.authKey)) {
 | |
| +					printk(
 | |
| +						"ixp4xx: Invalid key length for SHA1_HMAC - %d bits\n",
 | |
| +							cri->cri_klen);
 | |
| +					ixp_freesession(NULL, i);
 | |
| +					return EINVAL;
 | |
| +				}
 | |
| +				memcpy(ixp->ixp_ctx.authCtx.key.authKey,
 | |
| +						cri->cri_key, (cri->cri_klen + 7) / 8);
 | |
| +			}
 | |
| +			break;
 | |
| +
 | |
| +		default:
 | |
| +			printk("ixp: unknown algo 0x%x\n", cri->cri_alg);
 | |
| +			ixp_freesession(NULL, i);
 | |
| +			return EINVAL;
 | |
| +		}
 | |
| +		cri = cri->cri_next;
 | |
| +	}
 | |
| +
 | |
| +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
 | |
| +	INIT_WORK(&ixp->ixp_pending_work, ixp_process_pending_wq);
 | |
| +	INIT_WORK(&ixp->ixp_registration_work, ixp_registration_wq);
 | |
| +#else
 | |
| +	INIT_WORK(&ixp->ixp_pending_work, ixp_process_pending, ixp);
 | |
| +	INIT_WORK(&ixp->ixp_registration_work, ixp_registration, ixp);
 | |
| +#endif
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +
 | |
| +/*
 | |
| + * Free a session.
 | |
| + */
 | |
| +static int
 | |
| +ixp_freesession(device_t dev, u_int64_t tid)
 | |
| +{
 | |
| +	u_int32_t sid = CRYPTO_SESID2LID(tid);
 | |
| +
 | |
| +	dprintk("%s()\n", __FUNCTION__);
 | |
| +	if (sid > ixp_sesnum || ixp_sessions == NULL ||
 | |
| +			ixp_sessions[sid] == NULL) {
 | |
| +		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
 | |
| +		return EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	/* Silently accept and return */
 | |
| +	if (sid == 0)
 | |
| +		return 0;
 | |
| +
 | |
| +	if (ixp_sessions[sid]) {
 | |
| +		if (ixp_sessions[sid]->ixp_ctx_id != -1) {
 | |
| +			ixCryptoAccCtxUnregister(ixp_sessions[sid]->ixp_ctx_id);
 | |
| +			ixp_sessions[sid]->ixp_ctx_id = -1;
 | |
| +		}
 | |
| +
 | |
| +		flush_scheduled_work();
 | |
| +
 | |
| +		kfree(ixp_sessions[sid]);
 | |
| +	}
 | |
| +	ixp_sessions[sid] = NULL;
 | |
| +	if (ixp_blocked) {
 | |
| +		ixp_blocked = 0;
 | |
| +		crypto_unblock(ixp_id, CRYPTO_SYMQ);
 | |
| +	}
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +
 | |
| +/*
 | |
| + * callback for when hash processing is complete
 | |
| + */
 | |
| +
 | |
| +static void
 | |
| +ixp_hash_perform_cb(
 | |
| +	UINT32 hash_key_id,
 | |
| +	IX_MBUF *bufp,
 | |
| +	IxCryptoAccStatus status)
 | |
| +{
 | |
| +	struct ixp_q *q;
 | |
| +
 | |
| +	dprintk("%s(%u, %p, 0x%x)\n", __FUNCTION__, hash_key_id, bufp, status);
 | |
| +
 | |
| +	if (bufp == NULL) {
 | |
| +		printk("ixp: NULL buf in %s\n", __FUNCTION__);
 | |
| +		return;
 | |
| +	}
 | |
| +
 | |
| +	q = IX_MBUF_PRIV(bufp);
 | |
| +	if (q == NULL) {
 | |
| +		printk("ixp: NULL priv in %s\n", __FUNCTION__);
 | |
| +		return;
 | |
| +	}
 | |
| +
 | |
| +	if (status == IX_CRYPTO_ACC_STATUS_SUCCESS) {
 | |
| +		/* On success, need to copy hash back into original client buffer */
 | |
| +		memcpy(q->ixp_hash_dest, q->ixp_hash_src,
 | |
| +				(q->ixp_q_data->ixp_auth_alg == CRYPTO_SHA1) ?
 | |
| +					SHA1_HASH_LEN : MD5_HASH_LEN);
 | |
| +	}
 | |
| +	else {
 | |
| +		printk("ixp: hash perform failed status=%d\n", status);
 | |
| +		q->ixp_q_crp->crp_etype = EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	/* Free internal buffer used for hashing */
 | |
| +	kfree(IX_MBUF_MDATA(&q->ixp_q_mbuf));
 | |
| +
 | |
| +	crypto_done(q->ixp_q_crp);
 | |
| +	kmem_cache_free(qcache, q);
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * setup a request and perform it
 | |
| + */
 | |
| +static void
 | |
| +ixp_q_process(struct ixp_q *q)
 | |
| +{
 | |
| +	IxCryptoAccStatus status;
 | |
| +	struct ixp_data *ixp = q->ixp_q_data;
 | |
| +	int auth_off = 0;
 | |
| +	int auth_len = 0;
 | |
| +	int crypt_off = 0;
 | |
| +	int crypt_len = 0;
 | |
| +	int icv_off = 0;
 | |
| +	char *crypt_func;
 | |
| +
 | |
| +	dprintk("%s(%p)\n", __FUNCTION__, q);
 | |
| +
 | |
| +	if (q->ixp_q_ccrd) {
 | |
| +		if (q->ixp_q_ccrd->crd_flags & CRD_F_IV_EXPLICIT) {
 | |
| +			q->ixp_q_iv = q->ixp_q_ccrd->crd_iv;
 | |
| +		} else {
 | |
| +			q->ixp_q_iv = q->ixp_q_iv_data;
 | |
| +			crypto_copydata(q->ixp_q_crp->crp_flags, q->ixp_q_crp->crp_buf,
 | |
| +					q->ixp_q_ccrd->crd_inject,
 | |
| +					ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen,
 | |
| +					(caddr_t) q->ixp_q_iv);
 | |
| +		}
 | |
| +
 | |
| +		if (q->ixp_q_acrd) {
 | |
| +			auth_off = q->ixp_q_acrd->crd_skip;
 | |
| +			auth_len = q->ixp_q_acrd->crd_len;
 | |
| +			icv_off  = q->ixp_q_acrd->crd_inject;
 | |
| +		}
 | |
| +
 | |
| +		crypt_off = q->ixp_q_ccrd->crd_skip;
 | |
| +		crypt_len = q->ixp_q_ccrd->crd_len;
 | |
| +	} else { /* if (q->ixp_q_acrd) */
 | |
| +		auth_off = q->ixp_q_acrd->crd_skip;
 | |
| +		auth_len = q->ixp_q_acrd->crd_len;
 | |
| +		icv_off  = q->ixp_q_acrd->crd_inject;
 | |
| +	}
 | |
| +
 | |
| +	if (q->ixp_q_crp->crp_flags & CRYPTO_F_SKBUF) {
 | |
| +		struct sk_buff *skb = (struct sk_buff *) q->ixp_q_crp->crp_buf;
 | |
| +		if (skb_shinfo(skb)->nr_frags) {
 | |
| +			/*
 | |
| +			 * DAVIDM fix this limitation one day by using
 | |
| +			 * a buffer pool and chaining,  it is not currently
 | |
| +			 * needed for current user/kernel space acceleration
 | |
| +			 */
 | |
| +			printk("ixp: Cannot handle fragmented skb's yet !\n");
 | |
| +			q->ixp_q_crp->crp_etype = ENOENT;
 | |
| +			goto done;
 | |
| +		}
 | |
| +		IX_MBUF_MLEN(&q->ixp_q_mbuf) =
 | |
| +				IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) =  skb->len;
 | |
| +		IX_MBUF_MDATA(&q->ixp_q_mbuf) = skb->data;
 | |
| +	} else if (q->ixp_q_crp->crp_flags & CRYPTO_F_IOV) {
 | |
| +		struct uio *uiop = (struct uio *) q->ixp_q_crp->crp_buf;
 | |
| +		if (uiop->uio_iovcnt != 1) {
 | |
| +			/*
 | |
| +			 * DAVIDM fix this limitation one day by using
 | |
| +			 * a buffer pool and chaining,  it is not currently
 | |
| +			 * needed for current user/kernel space acceleration
 | |
| +			 */
 | |
| +			printk("ixp: Cannot handle more than 1 iovec yet !\n");
 | |
| +			q->ixp_q_crp->crp_etype = ENOENT;
 | |
| +			goto done;
 | |
| +		}
 | |
| +		IX_MBUF_MLEN(&q->ixp_q_mbuf) =
 | |
| +				IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = uiop->uio_iov[0].iov_len;
 | |
| +		IX_MBUF_MDATA(&q->ixp_q_mbuf) = uiop->uio_iov[0].iov_base;
 | |
| +	} else /* contig buffer */ {
 | |
| +		IX_MBUF_MLEN(&q->ixp_q_mbuf)  =
 | |
| +				IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = q->ixp_q_crp->crp_ilen;
 | |
| +		IX_MBUF_MDATA(&q->ixp_q_mbuf) = q->ixp_q_crp->crp_buf;
 | |
| +	}
 | |
| +
 | |
| +	IX_MBUF_PRIV(&q->ixp_q_mbuf) = q;
 | |
| +
 | |
| +	if (ixp->ixp_auth_alg == CRYPTO_SHA1 || ixp->ixp_auth_alg == CRYPTO_MD5) {
 | |
| +		/*
 | |
| +		 * For SHA1 and MD5 hash, need to create an internal buffer that is big
 | |
| +		 * enough to hold the original data + the appropriate padding for the
 | |
| +		 * hash algorithm.
 | |
| +		 */
 | |
| +		UINT8 *tbuf = NULL;
 | |
| +
 | |
| +		IX_MBUF_MLEN(&q->ixp_q_mbuf) = IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) =
 | |
| +			((IX_MBUF_MLEN(&q->ixp_q_mbuf) * 8) + 72 + 511) / 8;
 | |
| +		tbuf = kmalloc(IX_MBUF_MLEN(&q->ixp_q_mbuf), SLAB_ATOMIC);
 | |
| +
 | |
| +		if (IX_MBUF_MDATA(&q->ixp_q_mbuf) == NULL) {
 | |
| +			printk("ixp: kmalloc(%u, SLAB_ATOMIC) failed\n",
 | |
| +					IX_MBUF_MLEN(&q->ixp_q_mbuf));
 | |
| +			q->ixp_q_crp->crp_etype = ENOMEM;
 | |
| +			goto done;
 | |
| +		}
 | |
| +		memcpy(tbuf, &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_off], auth_len);
 | |
| +
 | |
| +		/* Set location in client buffer to copy hash into */
 | |
| +		q->ixp_hash_dest =
 | |
| +			&(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_off + auth_len];
 | |
| +
 | |
| +		IX_MBUF_MDATA(&q->ixp_q_mbuf) = tbuf;
 | |
| +
 | |
| +		/* Set location in internal buffer for where hash starts */
 | |
| +		q->ixp_hash_src = &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_len];
 | |
| +
 | |
| +		crypt_func = "ixCryptoAccHashPerform";
 | |
| +		status = ixCryptoAccHashPerform(ixp->ixp_ctx.authCtx.authAlgo,
 | |
| +				&q->ixp_q_mbuf, ixp_hash_perform_cb, 0, auth_len, auth_len,
 | |
| +				&ixp->ixp_hash_key_id);
 | |
| +	}
 | |
| +	else {
 | |
| +		crypt_func = "ixCryptoAccAuthCryptPerform";
 | |
| +		status = ixCryptoAccAuthCryptPerform(ixp->ixp_ctx_id, &q->ixp_q_mbuf,
 | |
| +			NULL, auth_off, auth_len, crypt_off, crypt_len, icv_off,
 | |
| +			q->ixp_q_iv);
 | |
| +	}
 | |
| +
 | |
| +	if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
 | |
| +		return;
 | |
| +
 | |
| +	if (IX_CRYPTO_ACC_STATUS_QUEUE_FULL == status) {
 | |
| +		q->ixp_q_crp->crp_etype = ENOMEM;
 | |
| +		goto done;
 | |
| +	}
 | |
| +
 | |
| +	printk("ixp: %s failed %u\n", crypt_func, status);
 | |
| +	q->ixp_q_crp->crp_etype = EINVAL;
 | |
| +
 | |
| +done:
 | |
| +	crypto_done(q->ixp_q_crp);
 | |
| +	kmem_cache_free(qcache, q);
 | |
| +}
 | |
| +
 | |
| +
 | |
| +/*
 | |
| + * because we cannot process the Q from the Register callback
 | |
| + * we do it here on a task Q.
 | |
| + */
 | |
| +
 | |
| +static void
 | |
| +ixp_process_pending(void *arg)
 | |
| +{
 | |
| +	struct ixp_data *ixp = arg;
 | |
| +	struct ixp_q *q = NULL;
 | |
| +
 | |
| +	dprintk("%s(%p)\n", __FUNCTION__, arg);
 | |
| +
 | |
| +	if (!ixp)
 | |
| +		return;
 | |
| +
 | |
| +	while (!list_empty(&ixp->ixp_q)) {
 | |
| +		q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
 | |
| +		list_del(&q->ixp_q_list);
 | |
| +		ixp_q_process(q);
 | |
| +	}
 | |
| +}
 | |
| +
 | |
| +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
 | |
| +static void
 | |
| +ixp_process_pending_wq(struct work_struct *work)
 | |
| +{
 | |
| +	struct ixp_data *ixp = container_of(work, struct ixp_data,
 | |
| +								ixp_pending_work);
 | |
| +	ixp_process_pending(ixp);
 | |
| +}
 | |
| +#endif
 | |
| +
 | |
| +/*
 | |
| + * callback for when context registration is complete
 | |
| + */
 | |
| +
 | |
| +static void
 | |
| +ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp, IxCryptoAccStatus status)
 | |
| +{
 | |
| +	int i;
 | |
| +	struct ixp_data *ixp;
 | |
| +	struct ixp_q *q;
 | |
| +
 | |
| +	dprintk("%s(%d, %p, %d)\n", __FUNCTION__, ctx_id, bufp, status);
 | |
| +
 | |
| +	/*
 | |
| +	 * free any buffer passed in to this routine
 | |
| +	 */
 | |
| +	if (bufp) {
 | |
| +		IX_MBUF_MLEN(bufp) = IX_MBUF_PKT_LEN(bufp) = 0;
 | |
| +		kfree(IX_MBUF_MDATA(bufp));
 | |
| +		IX_MBUF_MDATA(bufp) = NULL;
 | |
| +	}
 | |
| +
 | |
| +	for (i = 0; i < ixp_sesnum; i++) {
 | |
| +		ixp = ixp_sessions[i];
 | |
| +		if (ixp && ixp->ixp_ctx_id == ctx_id)
 | |
| +			break;
 | |
| +	}
 | |
| +	if (i >= ixp_sesnum) {
 | |
| +		printk("ixp: invalid context id %d\n", ctx_id);
 | |
| +		return;
 | |
| +	}
 | |
| +
 | |
| +	if (IX_CRYPTO_ACC_STATUS_WAIT == status) {
 | |
| +		/* this is normal to free the first of two buffers */
 | |
| +		dprintk("ixp: register not finished yet.\n");
 | |
| +		return;
 | |
| +	}
 | |
| +
 | |
| +	if (IX_CRYPTO_ACC_STATUS_SUCCESS != status) {
 | |
| +		printk("ixp: register failed 0x%x\n", status);
 | |
| +		while (!list_empty(&ixp->ixp_q)) {
 | |
| +			q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
 | |
| +			list_del(&q->ixp_q_list);
 | |
| +			q->ixp_q_crp->crp_etype = EINVAL;
 | |
| +			crypto_done(q->ixp_q_crp);
 | |
| +			kmem_cache_free(qcache, q);
 | |
| +		}
 | |
| +		return;
 | |
| +	}
 | |
| +
 | |
| +	/*
 | |
| +	 * we are now registered,  we cannot start processing the Q here
 | |
| +	 * or we get strange errors with AES (DES/3DES seem to be ok).
 | |
| +	 */
 | |
| +	ixp->ixp_registered = 1;
 | |
| +	schedule_work(&ixp->ixp_pending_work);
 | |
| +}
 | |
| +
 | |
| +
 | |
| +/*
 | |
| + * callback for when data processing is complete
 | |
| + */
 | |
| +
 | |
| +static void
 | |
| +ixp_perform_cb(
 | |
| +	UINT32 ctx_id,
 | |
| +	IX_MBUF *sbufp,
 | |
| +	IX_MBUF *dbufp,
 | |
| +	IxCryptoAccStatus status)
 | |
| +{
 | |
| +	struct ixp_q *q;
 | |
| +
 | |
| +	dprintk("%s(%d, %p, %p, 0x%x)\n", __FUNCTION__, ctx_id, sbufp,
 | |
| +			dbufp, status);
 | |
| +
 | |
| +	if (sbufp == NULL) {
 | |
| +		printk("ixp: NULL sbuf in ixp_perform_cb\n");
 | |
| +		return;
 | |
| +	}
 | |
| +
 | |
| +	q = IX_MBUF_PRIV(sbufp);
 | |
| +	if (q == NULL) {
 | |
| +		printk("ixp: NULL priv in ixp_perform_cb\n");
 | |
| +		return;
 | |
| +	}
 | |
| +
 | |
| +	if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
 | |
| +		printk("ixp: perform failed status=%d\n", status);
 | |
| +		q->ixp_q_crp->crp_etype = EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	crypto_done(q->ixp_q_crp);
 | |
| +	kmem_cache_free(qcache, q);
 | |
| +}
 | |
| +
 | |
| +
 | |
| +/*
 | |
| + * registration is not callable at IRQ time,  so we defer
 | |
| + * to a task queue,  this routines completes the registration for us
 | |
| + * when the task queue runs
 | |
| + *
 | |
| + * Unfortunately this means we cannot tell OCF that the driver is blocked,
 | |
| + * we do that on the next request.
 | |
| + */
 | |
| +
 | |
| +static void
 | |
| +ixp_registration(void *arg)
 | |
| +{
 | |
| +	struct ixp_data *ixp = arg;
 | |
| +	struct ixp_q *q = NULL;
 | |
| +	IX_MBUF *pri = NULL, *sec = NULL;
 | |
| +	int status = IX_CRYPTO_ACC_STATUS_SUCCESS;
 | |
| +
 | |
| +	if (!ixp) {
 | |
| +		printk("ixp: ixp_registration with no arg\n");
 | |
| +		return;
 | |
| +	}
 | |
| +
 | |
| +	if (ixp->ixp_ctx_id != -1) {
 | |
| +		ixCryptoAccCtxUnregister(ixp->ixp_ctx_id);
 | |
| +		ixp->ixp_ctx_id = -1;
 | |
| +	}
 | |
| +
 | |
| +	if (list_empty(&ixp->ixp_q)) {
 | |
| +		printk("ixp: ixp_registration with no Q\n");
 | |
| +		return;
 | |
| +	}
 | |
| +
 | |
| +	/*
 | |
| +	 * setup the primary and secondary buffers
 | |
| +	 */
 | |
| +	q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
 | |
| +	if (q->ixp_q_acrd) {
 | |
| +		pri = &ixp->ixp_pri_mbuf;
 | |
| +		sec = &ixp->ixp_sec_mbuf;
 | |
| +		IX_MBUF_MLEN(pri)  = IX_MBUF_PKT_LEN(pri) = 128;
 | |
| +		IX_MBUF_MDATA(pri) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
 | |
| +		IX_MBUF_MLEN(sec)  = IX_MBUF_PKT_LEN(sec) = 128;
 | |
| +		IX_MBUF_MDATA(sec) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
 | |
| +	}
 | |
| +
 | |
| +	/* Only need to register if a crypt op or HMAC op */
 | |
| +	if (!(ixp->ixp_auth_alg == CRYPTO_SHA1 ||
 | |
| +				ixp->ixp_auth_alg == CRYPTO_MD5)) {
 | |
| +		status = ixCryptoAccCtxRegister(
 | |
| +					&ixp->ixp_ctx,
 | |
| +					pri, sec,
 | |
| +					ixp_register_cb,
 | |
| +					ixp_perform_cb,
 | |
| +					&ixp->ixp_ctx_id);
 | |
| +	}
 | |
| +	else {
 | |
| +		/* Otherwise we start processing pending q */
 | |
| +		schedule_work(&ixp->ixp_pending_work);
 | |
| +	}
 | |
| +
 | |
| +	if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
 | |
| +		return;
 | |
| +
 | |
| +	if (IX_CRYPTO_ACC_STATUS_EXCEED_MAX_TUNNELS == status) {
 | |
| +		printk("ixp: ixCryptoAccCtxRegister failed (out of tunnels)\n");
 | |
| +		ixp_blocked = 1;
 | |
| +		/* perhaps we should return EGAIN on queued ops ? */
 | |
| +		return;
 | |
| +	}
 | |
| +
 | |
| +	printk("ixp: ixCryptoAccCtxRegister failed %d\n", status);
 | |
| +	ixp->ixp_ctx_id = -1;
 | |
| +
 | |
| +	/*
 | |
| +	 * everything waiting is toasted
 | |
| +	 */
 | |
| +	while (!list_empty(&ixp->ixp_q)) {
 | |
| +		q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
 | |
| +		list_del(&q->ixp_q_list);
 | |
| +		q->ixp_q_crp->crp_etype = ENOENT;
 | |
| +		crypto_done(q->ixp_q_crp);
 | |
| +		kmem_cache_free(qcache, q);
 | |
| +	}
 | |
| +}
 | |
| +
 | |
| +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
 | |
| +static void
 | |
| +ixp_registration_wq(struct work_struct *work)
 | |
| +{
 | |
| +	struct ixp_data *ixp = container_of(work, struct ixp_data,
 | |
| +								ixp_registration_work);
 | |
| +	ixp_registration(ixp);
 | |
| +}
 | |
| +#endif
 | |
| +
 | |
| +/*
 | |
| + * Process a request.
 | |
| + */
 | |
| +static int
 | |
| +ixp_process(device_t dev, struct cryptop *crp, int hint)
 | |
| +{
 | |
| +	struct ixp_data *ixp;
 | |
| +	unsigned int lid;
 | |
| +	struct ixp_q *q = NULL;
 | |
| +	int status;
 | |
| +
 | |
| +	dprintk("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	/* Sanity check */
 | |
| +	if (crp == NULL) {
 | |
| +		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
 | |
| +		return EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	crp->crp_etype = 0;
 | |
| +
 | |
| +	if (ixp_blocked)
 | |
| +		return ERESTART;
 | |
| +
 | |
| +	if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
 | |
| +		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
 | |
| +		crp->crp_etype = EINVAL;
 | |
| +		goto done;
 | |
| +	}
 | |
| +
 | |
| +	/*
 | |
| +	 * find the session we are using
 | |
| +	 */
 | |
| +
 | |
| +	lid = crp->crp_sid & 0xffffffff;
 | |
| +	if (lid >= ixp_sesnum || lid == 0 || ixp_sessions == NULL ||
 | |
| +			ixp_sessions[lid] == NULL) {
 | |
| +		crp->crp_etype = ENOENT;
 | |
| +		dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
 | |
| +		goto done;
 | |
| +	}
 | |
| +	ixp = ixp_sessions[lid];
 | |
| +
 | |
| +	/*
 | |
| +	 * setup a new request ready for queuing
 | |
| +	 */
 | |
| +	q = kmem_cache_alloc(qcache, SLAB_ATOMIC);
 | |
| +	if (q == NULL) {
 | |
| +		dprintk("%s,%d: ENOMEM\n", __FILE__, __LINE__);
 | |
| +		crp->crp_etype = ENOMEM;
 | |
| +		goto done;
 | |
| +	}
 | |
| +	/*
 | |
| +	 * save some cycles by only zeroing the important bits
 | |
| +	 */
 | |
| +	memset(&q->ixp_q_mbuf, 0, sizeof(q->ixp_q_mbuf));
 | |
| +	q->ixp_q_ccrd = NULL;
 | |
| +	q->ixp_q_acrd = NULL;
 | |
| +	q->ixp_q_crp = crp;
 | |
| +	q->ixp_q_data = ixp;
 | |
| +
 | |
| +	/*
 | |
| +	 * point the cipher and auth descriptors appropriately
 | |
| +	 * check that we have something to do
 | |
| +	 */
 | |
| +	if (crp->crp_desc->crd_alg == ixp->ixp_cipher_alg)
 | |
| +		q->ixp_q_ccrd = crp->crp_desc;
 | |
| +	else if (crp->crp_desc->crd_alg == ixp->ixp_auth_alg)
 | |
| +		q->ixp_q_acrd = crp->crp_desc;
 | |
| +	else {
 | |
| +		crp->crp_etype = ENOENT;
 | |
| +		dprintk("%s,%d: bad desc match: ENOENT\n", __FILE__, __LINE__);
 | |
| +		goto done;
 | |
| +	}
 | |
| +	if (crp->crp_desc->crd_next) {
 | |
| +		if (crp->crp_desc->crd_next->crd_alg == ixp->ixp_cipher_alg)
 | |
| +			q->ixp_q_ccrd = crp->crp_desc->crd_next;
 | |
| +		else if (crp->crp_desc->crd_next->crd_alg == ixp->ixp_auth_alg)
 | |
| +			q->ixp_q_acrd = crp->crp_desc->crd_next;
 | |
| +		else {
 | |
| +			crp->crp_etype = ENOENT;
 | |
| +			dprintk("%s,%d: bad desc match: ENOENT\n", __FILE__, __LINE__);
 | |
| +			goto done;
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	/*
 | |
| +	 * If there is a direction change for this context then we mark it as
 | |
| +	 * unregistered and re-register is for the new direction.  This is not
 | |
| +	 * a very expensive operation and currently only tends to happen when
 | |
| +	 * user-space application are doing benchmarks
 | |
| +	 *
 | |
| +	 * DM - we should be checking for pending requests before unregistering.
 | |
| +	 */
 | |
| +	if (q->ixp_q_ccrd && ixp->ixp_registered &&
 | |
| +			ixp->ixp_crd_flags != (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT)) {
 | |
| +		dprintk("%s - detected direction change on session\n", __FUNCTION__);
 | |
| +		ixp->ixp_registered = 0;
 | |
| +	}
 | |
| +
 | |
| +	/*
 | |
| +	 * if we are registered,  call straight into the perform code
 | |
| +	 */
 | |
| +	if (ixp->ixp_registered) {
 | |
| +		ixp_q_process(q);
 | |
| +		return 0;
 | |
| +	}
 | |
| +
 | |
| +	/*
 | |
| +	 * the only part of the context not set in newsession is the direction
 | |
| +	 * dependent parts
 | |
| +	 */
 | |
| +	if (q->ixp_q_ccrd) {
 | |
| +		ixp->ixp_crd_flags = (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT);
 | |
| +		if (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT) {
 | |
| +			ixp->ixp_ctx.operation = q->ixp_q_acrd ?
 | |
| +					IX_CRYPTO_ACC_OP_ENCRYPT_AUTH : IX_CRYPTO_ACC_OP_ENCRYPT;
 | |
| +		} else {
 | |
| +			ixp->ixp_ctx.operation = q->ixp_q_acrd ?
 | |
| +					IX_CRYPTO_ACC_OP_AUTH_DECRYPT : IX_CRYPTO_ACC_OP_DECRYPT;
 | |
| +		}
 | |
| +	} else {
 | |
| +		/* q->ixp_q_acrd must be set if we are here */
 | |
| +		ixp->ixp_ctx.operation = IX_CRYPTO_ACC_OP_AUTH_CALC;
 | |
| +	}
 | |
| +
 | |
| +	status = list_empty(&ixp->ixp_q);
 | |
| +	list_add_tail(&q->ixp_q_list, &ixp->ixp_q);
 | |
| +	if (status)
 | |
| +		schedule_work(&ixp->ixp_registration_work);
 | |
| +	return 0;
 | |
| +
 | |
| +done:
 | |
| +	if (q)
 | |
| +		kmem_cache_free(qcache, q);
 | |
| +	crypto_done(crp);
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +
 | |
| +#ifdef __ixp46X
 | |
| +/*
 | |
| + * key processing support for the ixp465
 | |
| + */
 | |
| +
 | |
| +
 | |
| +/*
 | |
| + * copy a BN (LE) into a buffer (BE) an fill out the op appropriately
 | |
| + * assume zeroed and only copy bits that are significant
 | |
| + */
 | |
| +
 | |
| +static int
 | |
| +ixp_copy_ibuf(struct crparam *p, IxCryptoAccPkeEauOperand *op, UINT32 *buf)
 | |
| +{
 | |
| +	unsigned char *src = (unsigned char *) p->crp_p;
 | |
| +	unsigned char *dst;
 | |
| +	int len, bits = p->crp_nbits;
 | |
| +
 | |
| +	dprintk("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	if (bits > MAX_IOP_SIZE * sizeof(UINT32) * 8) {
 | |
| +		dprintk("%s - ibuf too big (%d > %d)\n", __FUNCTION__,
 | |
| +				bits, MAX_IOP_SIZE * sizeof(UINT32) * 8);
 | |
| +		return -1;
 | |
| +	}
 | |
| +
 | |
| +	len = (bits + 31) / 32; /* the number UINT32's needed */
 | |
| +
 | |
| +	dst = (unsigned char *) &buf[len];
 | |
| +	dst--;
 | |
| +
 | |
| +	while (bits > 0) {
 | |
| +		*dst-- = *src++;
 | |
| +		bits -= 8;
 | |
| +	}
 | |
| +
 | |
| +#if 0 /* no need to zero remaining bits as it is done during request alloc */
 | |
| +	while (dst > (unsigned char *) buf)
 | |
| +		*dst-- = '\0';
 | |
| +#endif
 | |
| +
 | |
| +	op->pData = buf;
 | |
| +	op->dataLen = len;
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * copy out the result,  be as forgiving as we can about small output buffers
 | |
| + */
 | |
| +
 | |
| +static int
 | |
| +ixp_copy_obuf(struct crparam *p, IxCryptoAccPkeEauOpResult *op, UINT32 *buf)
 | |
| +{
 | |
| +	unsigned char *dst = (unsigned char *) p->crp_p;
 | |
| +	unsigned char *src = (unsigned char *) buf;
 | |
| +	int len, z, bits = p->crp_nbits;
 | |
| +
 | |
| +	dprintk("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	len = op->dataLen * sizeof(UINT32);
 | |
| +
 | |
| +	/* skip leading zeroes to be small buffer friendly */
 | |
| +	z = 0;
 | |
| +	while (z < len && src[z] == '\0')
 | |
| +		z++;
 | |
| +
 | |
| +	src += len;
 | |
| +	src--;
 | |
| +	len -= z;
 | |
| +
 | |
| +	while (len > 0 && bits > 0) {
 | |
| +		*dst++ = *src--;
 | |
| +		len--;
 | |
| +		bits -= 8;
 | |
| +	}
 | |
| +
 | |
| +	while (bits > 0) {
 | |
| +		*dst++ = '\0';
 | |
| +		bits -= 8;
 | |
| +	}
 | |
| +
 | |
| +	if (len > 0) {
 | |
| +		dprintk("%s - obuf is %d (z=%d, ob=%d) bytes too small\n",
 | |
| +				__FUNCTION__, len, z, p->crp_nbits / 8);
 | |
| +		return -1;
 | |
| +	}
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +
 | |
| +/*
 | |
| + * the parameter offsets for exp_mod
 | |
| + */
 | |
| +
 | |
| +#define IXP_PARAM_BASE 0
 | |
| +#define IXP_PARAM_EXP  1
 | |
| +#define IXP_PARAM_MOD  2
 | |
| +#define IXP_PARAM_RES  3
 | |
| +
 | |
| +/*
 | |
| + * key processing complete callback,  is also used to start processing
 | |
| + * by passing a NULL for pResult
 | |
| + */
 | |
| +
 | |
| +static void
 | |
| +ixp_kperform_cb(
 | |
| +	IxCryptoAccPkeEauOperation operation,
 | |
| +	IxCryptoAccPkeEauOpResult *pResult,
 | |
| +	BOOL carryOrBorrow,
 | |
| +	IxCryptoAccStatus status)
 | |
| +{
 | |
| +	struct ixp_pkq *q, *tmp;
 | |
| +	unsigned long flags;
 | |
| +
 | |
| +	dprintk("%s(0x%x, %p, %d, 0x%x)\n", __FUNCTION__, operation, pResult,
 | |
| +			carryOrBorrow, status);
 | |
| +
 | |
| +	/* handle a completed request */
 | |
| +	if (pResult) {
 | |
| +		if (ixp_pk_cur && &ixp_pk_cur->pkq_result == pResult) {
 | |
| +			q = ixp_pk_cur;
 | |
| +			if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
 | |
| +				dprintk("%s() - op failed 0x%x\n", __FUNCTION__, status);
 | |
| +				q->pkq_krp->krp_status = ERANGE; /* could do better */
 | |
| +			} else {
 | |
| +				/* copy out the result */
 | |
| +				if (ixp_copy_obuf(&q->pkq_krp->krp_param[IXP_PARAM_RES],
 | |
| +						&q->pkq_result, q->pkq_obuf))
 | |
| +					q->pkq_krp->krp_status = ERANGE;
 | |
| +			}
 | |
| +			crypto_kdone(q->pkq_krp);
 | |
| +			kfree(q);
 | |
| +			ixp_pk_cur = NULL;
 | |
| +		} else
 | |
| +			printk("%s - callback with invalid result pointer\n", __FUNCTION__);
 | |
| +	}
 | |
| +
 | |
| +	spin_lock_irqsave(&ixp_pkq_lock, flags);
 | |
| +	if (ixp_pk_cur || list_empty(&ixp_pkq)) {
 | |
| +		spin_unlock_irqrestore(&ixp_pkq_lock, flags);
 | |
| +		return;
 | |
| +	}
 | |
| +
 | |
| +	list_for_each_entry_safe(q, tmp, &ixp_pkq, pkq_list) {
 | |
| +
 | |
| +		list_del(&q->pkq_list);
 | |
| +		ixp_pk_cur = q;
 | |
| +
 | |
| +		spin_unlock_irqrestore(&ixp_pkq_lock, flags);
 | |
| +
 | |
| +		status = ixCryptoAccPkeEauPerform(
 | |
| +				IX_CRYPTO_ACC_OP_EAU_MOD_EXP,
 | |
| +				&q->pkq_op,
 | |
| +				ixp_kperform_cb,
 | |
| +				&q->pkq_result);
 | |
| +
 | |
| +		if (status == IX_CRYPTO_ACC_STATUS_SUCCESS) {
 | |
| +			dprintk("%s() - ixCryptoAccPkeEauPerform SUCCESS\n", __FUNCTION__);
 | |
| +			return; /* callback will return here for callback */
 | |
| +		} else if (status == IX_CRYPTO_ACC_STATUS_RETRY) {
 | |
| +			printk("%s() - ixCryptoAccPkeEauPerform RETRY\n", __FUNCTION__);
 | |
| +		} else {
 | |
| +			printk("%s() - ixCryptoAccPkeEauPerform failed %d\n",
 | |
| +					__FUNCTION__, status);
 | |
| +		}
 | |
| +		q->pkq_krp->krp_status = ERANGE; /* could do better */
 | |
| +		crypto_kdone(q->pkq_krp);
 | |
| +		kfree(q);
 | |
| +		spin_lock_irqsave(&ixp_pkq_lock, flags);
 | |
| +	}
 | |
| +	spin_unlock_irqrestore(&ixp_pkq_lock, flags);
 | |
| +}
 | |
| +
 | |
| +
 | |
| +static int
 | |
| +ixp_kprocess(device_t dev, struct cryptkop *krp, int hint)
 | |
| +{
 | |
| +	struct ixp_pkq *q;
 | |
| +	int rc = 0;
 | |
| +	unsigned long flags;
 | |
| +
 | |
| +	dprintk("%s l1=%d l2=%d l3=%d l4=%d\n", __FUNCTION__,
 | |
| +			krp->krp_param[IXP_PARAM_BASE].crp_nbits,
 | |
| +			krp->krp_param[IXP_PARAM_EXP].crp_nbits,
 | |
| +			krp->krp_param[IXP_PARAM_MOD].crp_nbits,
 | |
| +			krp->krp_param[IXP_PARAM_RES].crp_nbits);
 | |
| +
 | |
| +
 | |
| +	if (krp->krp_op != CRK_MOD_EXP) {
 | |
| +		krp->krp_status = EOPNOTSUPP;
 | |
| +		goto err;
 | |
| +	}
 | |
| +
 | |
| +	q = (struct ixp_pkq *) kmalloc(sizeof(*q), GFP_KERNEL);
 | |
| +	if (q == NULL) {
 | |
| +		krp->krp_status = ENOMEM;
 | |
| +		goto err;
 | |
| +	}
 | |
| +
 | |
| +	/*
 | |
| +	 * The PKE engine does not appear to zero the output buffer
 | |
| +	 * appropriately, so we need to do it all here.
 | |
| +	 */
 | |
| +	memset(q, 0, sizeof(*q));
 | |
| +
 | |
| +	q->pkq_krp = krp;
 | |
| +	INIT_LIST_HEAD(&q->pkq_list);
 | |
| +
 | |
| +	if (ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_BASE], &q->pkq_op.modExpOpr.M,
 | |
| +			q->pkq_ibuf0))
 | |
| +		rc = 1;
 | |
| +	if (!rc && ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_EXP],
 | |
| +				&q->pkq_op.modExpOpr.e, q->pkq_ibuf1))
 | |
| +		rc = 2;
 | |
| +	if (!rc && ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_MOD],
 | |
| +				&q->pkq_op.modExpOpr.N, q->pkq_ibuf2))
 | |
| +		rc = 3;
 | |
| +
 | |
| +	if (rc) {
 | |
| +		kfree(q);
 | |
| +		krp->krp_status = ERANGE;
 | |
| +		goto err;
 | |
| +	}
 | |
| +
 | |
| +	q->pkq_result.pData           = q->pkq_obuf;
 | |
| +	q->pkq_result.dataLen         =
 | |
| +			(krp->krp_param[IXP_PARAM_RES].crp_nbits + 31) / 32;
 | |
| +
 | |
| +	spin_lock_irqsave(&ixp_pkq_lock, flags);
 | |
| +	list_add_tail(&q->pkq_list, &ixp_pkq);
 | |
| +	spin_unlock_irqrestore(&ixp_pkq_lock, flags);
 | |
| +
 | |
| +	if (!ixp_pk_cur)
 | |
| +		ixp_kperform_cb(0, NULL, 0, 0);
 | |
| +	return (0);
 | |
| +
 | |
| +err:
 | |
| +	crypto_kdone(krp);
 | |
| +	return (0);
 | |
| +}
 | |
| +
 | |
| +
 | |
| +
 | |
| +#ifdef CONFIG_OCF_RANDOMHARVEST
 | |
| +/*
 | |
| + * We run the random number generator output through SHA so that it
 | |
| + * is FIPS compliant.
 | |
| + */
 | |
| +
 | |
| +static volatile int sha_done = 0;
 | |
| +static unsigned char sha_digest[20];
 | |
| +
 | |
| +static void
 | |
| +ixp_hash_cb(UINT8 *digest, IxCryptoAccStatus status)
 | |
| +{
 | |
| +	dprintk("%s(%p, %d)\n", __FUNCTION__, digest, status);
 | |
| +	if (sha_digest != digest)
 | |
| +		printk("digest error\n");
 | |
| +	if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
 | |
| +		sha_done = 1;
 | |
| +	else
 | |
| +		sha_done = -status;
 | |
| +}
 | |
| +
 | |
| +static int
 | |
| +ixp_read_random(void *arg, u_int32_t *buf, int maxwords)
 | |
| +{
 | |
| +	IxCryptoAccStatus status;
 | |
| +	int i, n, rc;
 | |
| +
 | |
| +	dprintk("%s(%p, %d)\n", __FUNCTION__, buf, maxwords);
 | |
| +	memset(buf, 0, maxwords * sizeof(*buf));
 | |
| +	status = ixCryptoAccPkePseudoRandomNumberGet(maxwords, buf);
 | |
| +	if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
 | |
| +		dprintk("%s: ixCryptoAccPkePseudoRandomNumberGet failed %d\n",
 | |
| +				__FUNCTION__, status);
 | |
| +		return 0;
 | |
| +	}
 | |
| +
 | |
| +	/*
 | |
| +	 * run the random data through SHA to make it look more random
 | |
| +	 */
 | |
| +
 | |
| +	n = sizeof(sha_digest); /* process digest bytes at a time */
 | |
| +
 | |
| +	rc = 0;
 | |
| +	for (i = 0; i < maxwords; i += n / sizeof(*buf)) {
 | |
| +		if ((maxwords - i) * sizeof(*buf) < n)
 | |
| +			n = (maxwords - i) * sizeof(*buf);
 | |
| +		sha_done = 0;
 | |
| +		status = ixCryptoAccPkeHashPerform(IX_CRYPTO_ACC_AUTH_SHA1,
 | |
| +				(UINT8 *) &buf[i], n, ixp_hash_cb, sha_digest);
 | |
| +		if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
 | |
| +			dprintk("ixCryptoAccPkeHashPerform failed %d\n", status);
 | |
| +			return -EIO;
 | |
| +		}
 | |
| +		while (!sha_done)
 | |
| +			schedule();
 | |
| +		if (sha_done < 0) {
 | |
| +			dprintk("ixCryptoAccPkeHashPerform failed CB %d\n", -sha_done);
 | |
| +			return 0;
 | |
| +		}
 | |
| +		memcpy(&buf[i], sha_digest, n);
 | |
| +		rc += n / sizeof(*buf);;
 | |
| +	}
 | |
| +
 | |
| +	return rc;
 | |
| +}
 | |
| +#endif /* CONFIG_OCF_RANDOMHARVEST */
 | |
| +
 | |
| +#endif /* __ixp46X */
 | |
| +
 | |
| +
 | |
| +
 | |
| +/*
 | |
| + * our driver startup and shutdown routines
 | |
| + */
 | |
| +
 | |
| +static int
 | |
| +ixp_init(void)
 | |
| +{
 | |
| +	dprintk("%s(%p)\n", __FUNCTION__, ixp_init);
 | |
| +
 | |
| +	if (ixp_init_crypto && ixCryptoAccInit() != IX_CRYPTO_ACC_STATUS_SUCCESS)
 | |
| +		printk("ixCryptoAccInit failed, assuming already initialised!\n");
 | |
| +
 | |
| +	qcache = kmem_cache_create("ixp4xx_q", sizeof(struct ixp_q), 0,
 | |
| +				SLAB_HWCACHE_ALIGN, NULL
 | |
| +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
 | |
| +				, NULL
 | |
| +#endif
 | |
| +				  );
 | |
| +	if (!qcache) {
 | |
| +		printk("failed to create Qcache\n");
 | |
| +		return -ENOENT;
 | |
| +	}
 | |
| +
 | |
| +	memset(&ixpdev, 0, sizeof(ixpdev));
 | |
| +	softc_device_init(&ixpdev, "ixp4xx", 0, ixp_methods);
 | |
| +
 | |
| +	ixp_id = crypto_get_driverid(softc_get_device(&ixpdev),
 | |
| +				CRYPTOCAP_F_HARDWARE);
 | |
| +	if (ixp_id < 0)
 | |
| +		panic("IXP/OCF crypto device cannot initialize!");
 | |
| +
 | |
| +#define	REGISTER(alg) \
 | |
| +	crypto_register(ixp_id,alg,0,0)
 | |
| +
 | |
| +	REGISTER(CRYPTO_DES_CBC);
 | |
| +	REGISTER(CRYPTO_3DES_CBC);
 | |
| +	REGISTER(CRYPTO_RIJNDAEL128_CBC);
 | |
| +#ifdef CONFIG_OCF_IXP4XX_SHA1_MD5
 | |
| +	REGISTER(CRYPTO_MD5);
 | |
| +	REGISTER(CRYPTO_SHA1);
 | |
| +#endif
 | |
| +	REGISTER(CRYPTO_MD5_HMAC);
 | |
| +	REGISTER(CRYPTO_SHA1_HMAC);
 | |
| +#undef REGISTER
 | |
| +
 | |
| +#ifdef __ixp46X
 | |
| +	spin_lock_init(&ixp_pkq_lock);
 | |
| +	/*
 | |
| +	 * we do not enable the go fast options here as they can potentially
 | |
| +	 * allow timing based attacks
 | |
| +	 *
 | |
| +	 * http://www.openssl.org/news/secadv_20030219.txt
 | |
| +	 */
 | |
| +	ixCryptoAccPkeEauExpConfig(0, 0);
 | |
| +	crypto_kregister(ixp_id, CRK_MOD_EXP, 0);
 | |
| +#ifdef CONFIG_OCF_RANDOMHARVEST
 | |
| +	crypto_rregister(ixp_id, ixp_read_random, NULL);
 | |
| +#endif
 | |
| +#endif
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +static void
 | |
| +ixp_exit(void)
 | |
| +{
 | |
| +	dprintk("%s()\n", __FUNCTION__);
 | |
| +	crypto_unregister_all(ixp_id);
 | |
| +	ixp_id = -1;
 | |
| +	kmem_cache_destroy(qcache);
 | |
| +	qcache = NULL;
 | |
| +}
 | |
| +
 | |
| +module_init(ixp_init);
 | |
| +module_exit(ixp_exit);
 | |
| +
 | |
| +MODULE_LICENSE("Dual BSD/GPL");
 | |
| +MODULE_AUTHOR("David McCullough <dmccullough@cyberguard.com>");
 | |
| +MODULE_DESCRIPTION("ixp (OCF module for IXP4xx crypto)");
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/cryptodev.c
 | |
| @@ -0,0 +1,1048 @@
 | |
| +/*	$OpenBSD: cryptodev.c,v 1.52 2002/06/19 07:22:46 deraadt Exp $	*/
 | |
| +
 | |
| +/*-
 | |
| + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
 | |
| + * Copyright (C) 2006-2007 David McCullough
 | |
| + * Copyright (C) 2004-2005 Intel Corporation.
 | |
| + * The license and original author are listed below.
 | |
| + *
 | |
| + * Copyright (c) 2001 Theo de Raadt
 | |
| + * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
 | |
| + *
 | |
| + * Redistribution and use in source and binary forms, with or without
 | |
| + * modification, are permitted provided that the following conditions
 | |
| + * are met:
 | |
| + *
 | |
| + * 1. Redistributions of source code must retain the above copyright
 | |
| + *   notice, this list of conditions and the following disclaimer.
 | |
| + * 2. Redistributions in binary form must reproduce the above copyright
 | |
| + *   notice, this list of conditions and the following disclaimer in the
 | |
| + *   documentation and/or other materials provided with the distribution.
 | |
| + * 3. The name of the author may not be used to endorse or promote products
 | |
| + *   derived from this software without specific prior written permission.
 | |
| + *
 | |
| + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
 | |
| + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
 | |
| + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
 | |
| + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
 | |
| + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 | |
| + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 | |
| + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 | |
| + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 | |
| + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 | |
| + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 | |
| + *
 | |
| + * Effort sponsored in part by the Defense Advanced Research Projects
 | |
| + * Agency (DARPA) and Air Force Research Laboratory, Air Force
 | |
| + * Materiel Command, USAF, under agreement number F30602-01-2-0537.
 | |
| + *
 | |
| +__FBSDID("$FreeBSD: src/sys/opencrypto/cryptodev.c,v 1.34 2007/05/09 19:37:02 gnn Exp $");
 | |
| + */
 | |
| +
 | |
| +#ifndef AUTOCONF_INCLUDED
 | |
| +#include <linux/config.h>
 | |
| +#endif
 | |
| +#include <linux/types.h>
 | |
| +#include <linux/time.h>
 | |
| +#include <linux/delay.h>
 | |
| +#include <linux/list.h>
 | |
| +#include <linux/init.h>
 | |
| +#include <linux/sched.h>
 | |
| +#include <linux/unistd.h>
 | |
| +#include <linux/module.h>
 | |
| +#include <linux/wait.h>
 | |
| +#include <linux/slab.h>
 | |
| +#include <linux/fs.h>
 | |
| +#include <linux/dcache.h>
 | |
| +#include <linux/fdtable.h>
 | |
| +#include <linux/mount.h>
 | |
| +#include <linux/miscdevice.h>
 | |
| +#include <linux/version.h>
 | |
| +#include <asm/uaccess.h>
 | |
| +
 | |
| +#include <cryptodev.h>
 | |
| +#include <uio.h>
 | |
| +
 | |
| +extern asmlinkage long sys_dup(unsigned int fildes);
 | |
| +
 | |
| +#define debug cryptodev_debug
 | |
| +int cryptodev_debug = 0;
 | |
| +module_param(cryptodev_debug, int, 0644);
 | |
| +MODULE_PARM_DESC(cryptodev_debug, "Enable cryptodev debug");
 | |
| +
 | |
| +struct csession_info {
 | |
| +	u_int16_t	blocksize;
 | |
| +	u_int16_t	minkey, maxkey;
 | |
| +
 | |
| +	u_int16_t	keysize;
 | |
| +	/* u_int16_t	hashsize;  */
 | |
| +	u_int16_t	authsize;
 | |
| +	/* u_int16_t	ctxsize; */
 | |
| +};
 | |
| +
 | |
| +struct csession {
 | |
| +	struct list_head	list;
 | |
| +	u_int64_t	sid;
 | |
| +	u_int32_t	ses;
 | |
| +
 | |
| +	wait_queue_head_t waitq;
 | |
| +
 | |
| +	u_int32_t	cipher;
 | |
| +
 | |
| +	u_int32_t	mac;
 | |
| +
 | |
| +	caddr_t		key;
 | |
| +	int		keylen;
 | |
| +	u_char		tmp_iv[EALG_MAX_BLOCK_LEN];
 | |
| +
 | |
| +	caddr_t		mackey;
 | |
| +	int		mackeylen;
 | |
| +
 | |
| +	struct csession_info info;
 | |
| +
 | |
| +	struct iovec	iovec;
 | |
| +	struct uio	uio;
 | |
| +	int		error;
 | |
| +};
 | |
| +
 | |
| +struct fcrypt {
 | |
| +	struct list_head	csessions;
 | |
| +	int		sesn;
 | |
| +};
 | |
| +
 | |
| +static struct csession *csefind(struct fcrypt *, u_int);
 | |
| +static int csedelete(struct fcrypt *, struct csession *);
 | |
| +static struct csession *cseadd(struct fcrypt *, struct csession *);
 | |
| +static struct csession *csecreate(struct fcrypt *, u_int64_t,
 | |
| +		struct cryptoini *crie, struct cryptoini *cria, struct csession_info *);
 | |
| +static int csefree(struct csession *);
 | |
| +
 | |
| +static	int cryptodev_op(struct csession *, struct crypt_op *);
 | |
| +static	int cryptodev_key(struct crypt_kop *);
 | |
| +static	int cryptodev_find(struct crypt_find_op *);
 | |
| +
 | |
| +static int cryptodev_cb(void *);
 | |
| +static int cryptodev_open(struct inode *inode, struct file *filp);
 | |
| +
 | |
| +/*
 | |
| + * Check a crypto identifier to see if it requested
 | |
| + * a valid crid and it's capabilities match.
 | |
| + */
 | |
| +static int
 | |
| +checkcrid(int crid)
 | |
| +{
 | |
| +	int hid = crid & ~(CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE);
 | |
| +	int typ = crid & (CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE);
 | |
| +	int caps = 0;
 | |
| +
 | |
| +	/* if the user hasn't selected a driver, then just call newsession */
 | |
| +	if (hid == 0 && typ != 0)
 | |
| +		return 0;
 | |
| +
 | |
| +	caps = crypto_getcaps(hid);
 | |
| +
 | |
| +	/* didn't find anything with capabilities */
 | |
| +	if (caps == 0) {
 | |
| +		dprintk("%s: hid=%x typ=%x not matched\n", __FUNCTION__, hid, typ);
 | |
| +		return EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	/* the user didn't specify SW or HW, so the driver is ok */
 | |
| +	if (typ == 0)
 | |
| +		return 0;
 | |
| +
 | |
| +	/* if the type specified didn't match */
 | |
| +	if (typ != (caps & (CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE))) {
 | |
| +		dprintk("%s: hid=%x typ=%x caps=%x not matched\n", __FUNCTION__,
 | |
| +				hid, typ, caps);
 | |
| +		return EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +static int
 | |
| +cryptodev_op(struct csession *cse, struct crypt_op *cop)
 | |
| +{
 | |
| +	struct cryptop *crp = NULL;
 | |
| +	struct cryptodesc *crde = NULL, *crda = NULL;
 | |
| +	int error = 0;
 | |
| +
 | |
| +	dprintk("%s()\n", __FUNCTION__);
 | |
| +	if (cop->len > CRYPTO_MAX_DATA_LEN) {
 | |
| +		dprintk("%s: %d > %d\n", __FUNCTION__, cop->len, CRYPTO_MAX_DATA_LEN);
 | |
| +		return (E2BIG);
 | |
| +	}
 | |
| +
 | |
| +	if (cse->info.blocksize && (cop->len % cse->info.blocksize) != 0) {
 | |
| +		dprintk("%s: blocksize=%d len=%d\n", __FUNCTION__, cse->info.blocksize,
 | |
| +				cop->len);
 | |
| +		return (EINVAL);
 | |
| +	}
 | |
| +
 | |
| +	cse->uio.uio_iov = &cse->iovec;
 | |
| +	cse->uio.uio_iovcnt = 1;
 | |
| +	cse->uio.uio_offset = 0;
 | |
| +#if 0
 | |
| +	cse->uio.uio_resid = cop->len;
 | |
| +	cse->uio.uio_segflg = UIO_SYSSPACE;
 | |
| +	cse->uio.uio_rw = UIO_WRITE;
 | |
| +	cse->uio.uio_td = td;
 | |
| +#endif
 | |
| +	cse->uio.uio_iov[0].iov_len = cop->len;
 | |
| +	if (cse->info.authsize)
 | |
| +		cse->uio.uio_iov[0].iov_len += cse->info.authsize;
 | |
| +	cse->uio.uio_iov[0].iov_base = kmalloc(cse->uio.uio_iov[0].iov_len,
 | |
| +			GFP_KERNEL);
 | |
| +
 | |
| +	if (cse->uio.uio_iov[0].iov_base == NULL) {
 | |
| +		dprintk("%s: iov_base kmalloc(%d) failed\n", __FUNCTION__,
 | |
| +				cse->uio.uio_iov[0].iov_len);
 | |
| +		return (ENOMEM);
 | |
| +	}
 | |
| +
 | |
| +	crp = crypto_getreq((cse->info.blocksize != 0) + (cse->info.authsize != 0));
 | |
| +	if (crp == NULL) {
 | |
| +		dprintk("%s: ENOMEM\n", __FUNCTION__);
 | |
| +		error = ENOMEM;
 | |
| +		goto bail;
 | |
| +	}
 | |
| +
 | |
| +	if (cse->info.authsize) {
 | |
| +		crda = crp->crp_desc;
 | |
| +		if (cse->info.blocksize)
 | |
| +			crde = crda->crd_next;
 | |
| +	} else {
 | |
| +		if (cse->info.blocksize)
 | |
| +			crde = crp->crp_desc;
 | |
| +		else {
 | |
| +			dprintk("%s: bad request\n", __FUNCTION__);
 | |
| +			error = EINVAL;
 | |
| +			goto bail;
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	if ((error = copy_from_user(cse->uio.uio_iov[0].iov_base, cop->src,
 | |
| +					cop->len))) {
 | |
| +		dprintk("%s: bad copy\n", __FUNCTION__);
 | |
| +		goto bail;
 | |
| +	}
 | |
| +
 | |
| +	if (crda) {
 | |
| +		crda->crd_skip = 0;
 | |
| +		crda->crd_len = cop->len;
 | |
| +		crda->crd_inject = cop->len;
 | |
| +
 | |
| +		crda->crd_alg = cse->mac;
 | |
| +		crda->crd_key = cse->mackey;
 | |
| +		crda->crd_klen = cse->mackeylen * 8;
 | |
| +	}
 | |
| +
 | |
| +	if (crde) {
 | |
| +		if (cop->op == COP_ENCRYPT)
 | |
| +			crde->crd_flags |= CRD_F_ENCRYPT;
 | |
| +		else
 | |
| +			crde->crd_flags &= ~CRD_F_ENCRYPT;
 | |
| +		crde->crd_len = cop->len;
 | |
| +		crde->crd_inject = 0;
 | |
| +
 | |
| +		crde->crd_alg = cse->cipher;
 | |
| +		crde->crd_key = cse->key;
 | |
| +		crde->crd_klen = cse->keylen * 8;
 | |
| +	}
 | |
| +
 | |
| +	crp->crp_ilen = cse->uio.uio_iov[0].iov_len;
 | |
| +	crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIMM
 | |
| +		       | (cop->flags & COP_F_BATCH);
 | |
| +	crp->crp_buf = (caddr_t)&cse->uio;
 | |
| +	crp->crp_callback = (int (*) (struct cryptop *)) cryptodev_cb;
 | |
| +	crp->crp_sid = cse->sid;
 | |
| +	crp->crp_opaque = (void *)cse;
 | |
| +
 | |
| +	if (cop->iv) {
 | |
| +		if (crde == NULL) {
 | |
| +			error = EINVAL;
 | |
| +			dprintk("%s no crde\n", __FUNCTION__);
 | |
| +			goto bail;
 | |
| +		}
 | |
| +		if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
 | |
| +			error = EINVAL;
 | |
| +			dprintk("%s arc4 with IV\n", __FUNCTION__);
 | |
| +			goto bail;
 | |
| +		}
 | |
| +		if ((error = copy_from_user(cse->tmp_iv, cop->iv,
 | |
| +						cse->info.blocksize))) {
 | |
| +			dprintk("%s bad iv copy\n", __FUNCTION__);
 | |
| +			goto bail;
 | |
| +		}
 | |
| +		memcpy(crde->crd_iv, cse->tmp_iv, cse->info.blocksize);
 | |
| +		crde->crd_flags |= CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
 | |
| +		crde->crd_skip = 0;
 | |
| +	} else if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
 | |
| +		crde->crd_skip = 0;
 | |
| +	} else if (crde) {
 | |
| +		crde->crd_flags |= CRD_F_IV_PRESENT;
 | |
| +		crde->crd_skip = cse->info.blocksize;
 | |
| +		crde->crd_len -= cse->info.blocksize;
 | |
| +	}
 | |
| +
 | |
| +	if (cop->mac && crda == NULL) {
 | |
| +		error = EINVAL;
 | |
| +		dprintk("%s no crda\n", __FUNCTION__);
 | |
| +		goto bail;
 | |
| +	}
 | |
| +
 | |
| +	/*
 | |
| +	 * Let the dispatch run unlocked, then, interlock against the
 | |
| +	 * callback before checking if the operation completed and going
 | |
| +	 * to sleep.  This insures drivers don't inherit our lock which
 | |
| +	 * results in a lock order reversal between crypto_dispatch forced
 | |
| +	 * entry and the crypto_done callback into us.
 | |
| +	 */
 | |
| +	error = crypto_dispatch(crp);
 | |
| +	if (error == 0) {
 | |
| +		dprintk("%s about to WAIT\n", __FUNCTION__);
 | |
| +		/*
 | |
| +		 * we really need to wait for driver to complete to maintain
 | |
| +		 * state,  luckily interrupts will be remembered
 | |
| +		 */
 | |
| +		do {
 | |
| +			error = wait_event_interruptible(crp->crp_waitq,
 | |
| +					((crp->crp_flags & CRYPTO_F_DONE) != 0));
 | |
| +			/*
 | |
| +			 * we can't break out of this loop or we will leave behind
 | |
| +			 * a huge mess,  however,  staying here means if your driver
 | |
| +			 * is broken user applications can hang and not be killed.
 | |
| +			 * The solution,  fix your driver :-)
 | |
| +			 */
 | |
| +			if (error) {
 | |
| +				schedule();
 | |
| +				error = 0;
 | |
| +			}
 | |
| +		} while ((crp->crp_flags & CRYPTO_F_DONE) == 0);
 | |
| +		dprintk("%s finished WAITING error=%d\n", __FUNCTION__, error);
 | |
| +	}
 | |
| +
 | |
| +	if (crp->crp_etype != 0) {
 | |
| +		error = crp->crp_etype;
 | |
| +		dprintk("%s error in crp processing\n", __FUNCTION__);
 | |
| +		goto bail;
 | |
| +	}
 | |
| +
 | |
| +	if (cse->error) {
 | |
| +		error = cse->error;
 | |
| +		dprintk("%s error in cse processing\n", __FUNCTION__);
 | |
| +		goto bail;
 | |
| +	}
 | |
| +
 | |
| +	if (cop->dst && (error = copy_to_user(cop->dst,
 | |
| +					cse->uio.uio_iov[0].iov_base, cop->len))) {
 | |
| +		dprintk("%s bad dst copy\n", __FUNCTION__);
 | |
| +		goto bail;
 | |
| +	}
 | |
| +
 | |
| +	if (cop->mac &&
 | |
| +			(error=copy_to_user(cop->mac,
 | |
| +				(caddr_t)cse->uio.uio_iov[0].iov_base + cop->len,
 | |
| +				cse->info.authsize))) {
 | |
| +		dprintk("%s bad mac copy\n", __FUNCTION__);
 | |
| +		goto bail;
 | |
| +	}
 | |
| +
 | |
| +bail:
 | |
| +	if (crp)
 | |
| +		crypto_freereq(crp);
 | |
| +	if (cse->uio.uio_iov[0].iov_base)
 | |
| +		kfree(cse->uio.uio_iov[0].iov_base);
 | |
| +
 | |
| +	return (error);
 | |
| +}
 | |
| +
 | |
| +static int
 | |
| +cryptodev_cb(void *op)
 | |
| +{
 | |
| +	struct cryptop *crp = (struct cryptop *) op;
 | |
| +	struct csession *cse = (struct csession *)crp->crp_opaque;
 | |
| +	int error;
 | |
| +
 | |
| +	dprintk("%s()\n", __FUNCTION__);
 | |
| +	error = crp->crp_etype;
 | |
| +	if (error == EAGAIN) {
 | |
| +		crp->crp_flags &= ~CRYPTO_F_DONE;
 | |
| +#ifdef NOTYET
 | |
| +		/*
 | |
| +		 * DAVIDM I am fairly sure that we should turn this into a batch
 | |
| +		 * request to stop bad karma/lockup, revisit
 | |
| +		 */
 | |
| +		crp->crp_flags |= CRYPTO_F_BATCH;
 | |
| +#endif
 | |
| +		return crypto_dispatch(crp);
 | |
| +	}
 | |
| +	if (error != 0 || (crp->crp_flags & CRYPTO_F_DONE)) {
 | |
| +		cse->error = error;
 | |
| +		wake_up_interruptible(&crp->crp_waitq);
 | |
| +	}
 | |
| +	return (0);
 | |
| +}
 | |
| +
 | |
| +static int
 | |
| +cryptodevkey_cb(void *op)
 | |
| +{
 | |
| +	struct cryptkop *krp = (struct cryptkop *) op;
 | |
| +	dprintk("%s()\n", __FUNCTION__);
 | |
| +	wake_up_interruptible(&krp->krp_waitq);
 | |
| +	return (0);
 | |
| +}
 | |
| +
 | |
| +static int
 | |
| +cryptodev_key(struct crypt_kop *kop)
 | |
| +{
 | |
| +	struct cryptkop *krp = NULL;
 | |
| +	int error = EINVAL;
 | |
| +	int in, out, size, i;
 | |
| +
 | |
| +	dprintk("%s()\n", __FUNCTION__);
 | |
| +	if (kop->crk_iparams + kop->crk_oparams > CRK_MAXPARAM) {
 | |
| +		dprintk("%s params too big\n", __FUNCTION__);
 | |
| +		return (EFBIG);
 | |
| +	}
 | |
| +
 | |
| +	in = kop->crk_iparams;
 | |
| +	out = kop->crk_oparams;
 | |
| +	switch (kop->crk_op) {
 | |
| +	case CRK_MOD_EXP:
 | |
| +		if (in == 3 && out == 1)
 | |
| +			break;
 | |
| +		return (EINVAL);
 | |
| +	case CRK_MOD_EXP_CRT:
 | |
| +		if (in == 6 && out == 1)
 | |
| +			break;
 | |
| +		return (EINVAL);
 | |
| +	case CRK_DSA_SIGN:
 | |
| +		if (in == 5 && out == 2)
 | |
| +			break;
 | |
| +		return (EINVAL);
 | |
| +	case CRK_DSA_VERIFY:
 | |
| +		if (in == 7 && out == 0)
 | |
| +			break;
 | |
| +		return (EINVAL);
 | |
| +	case CRK_DH_COMPUTE_KEY:
 | |
| +		if (in == 3 && out == 1)
 | |
| +			break;
 | |
| +		return (EINVAL);
 | |
| +	default:
 | |
| +		return (EINVAL);
 | |
| +	}
 | |
| +
 | |
| +	krp = (struct cryptkop *)kmalloc(sizeof *krp, GFP_KERNEL);
 | |
| +	if (!krp)
 | |
| +		return (ENOMEM);
 | |
| +	bzero(krp, sizeof *krp);
 | |
| +	krp->krp_op = kop->crk_op;
 | |
| +	krp->krp_status = kop->crk_status;
 | |
| +	krp->krp_iparams = kop->crk_iparams;
 | |
| +	krp->krp_oparams = kop->crk_oparams;
 | |
| +	krp->krp_crid = kop->crk_crid;
 | |
| +	krp->krp_status = 0;
 | |
| +	krp->krp_flags = CRYPTO_KF_CBIMM;
 | |
| +	krp->krp_callback = (int (*) (struct cryptkop *)) cryptodevkey_cb;
 | |
| +	init_waitqueue_head(&krp->krp_waitq);
 | |
| +
 | |
| +	for (i = 0; i < CRK_MAXPARAM; i++)
 | |
| +		krp->krp_param[i].crp_nbits = kop->crk_param[i].crp_nbits;
 | |
| +	for (i = 0; i < krp->krp_iparams + krp->krp_oparams; i++) {
 | |
| +		size = (krp->krp_param[i].crp_nbits + 7) / 8;
 | |
| +		if (size == 0)
 | |
| +			continue;
 | |
| +		krp->krp_param[i].crp_p = (caddr_t) kmalloc(size, GFP_KERNEL);
 | |
| +		if (i >= krp->krp_iparams)
 | |
| +			continue;
 | |
| +		error = copy_from_user(krp->krp_param[i].crp_p,
 | |
| +				kop->crk_param[i].crp_p, size);
 | |
| +		if (error)
 | |
| +			goto fail;
 | |
| +	}
 | |
| +
 | |
| +	error = crypto_kdispatch(krp);
 | |
| +	if (error)
 | |
| +		goto fail;
 | |
| +
 | |
| +	do {
 | |
| +		error = wait_event_interruptible(krp->krp_waitq,
 | |
| +				((krp->krp_flags & CRYPTO_KF_DONE) != 0));
 | |
| +		/*
 | |
| +		 * we can't break out of this loop or we will leave behind
 | |
| +		 * a huge mess,  however,  staying here means if your driver
 | |
| +		 * is broken user applications can hang and not be killed.
 | |
| +		 * The solution,  fix your driver :-)
 | |
| +		 */
 | |
| +		if (error) {
 | |
| +			schedule();
 | |
| +			error = 0;
 | |
| +		}
 | |
| +	} while ((krp->krp_flags & CRYPTO_KF_DONE) == 0);
 | |
| +
 | |
| +	dprintk("%s finished WAITING error=%d\n", __FUNCTION__, error);
 | |
| +
 | |
| +	kop->crk_crid = krp->krp_crid;		/* device that did the work */
 | |
| +	if (krp->krp_status != 0) {
 | |
| +		error = krp->krp_status;
 | |
| +		goto fail;
 | |
| +	}
 | |
| +
 | |
| +	for (i = krp->krp_iparams; i < krp->krp_iparams + krp->krp_oparams; i++) {
 | |
| +		size = (krp->krp_param[i].crp_nbits + 7) / 8;
 | |
| +		if (size == 0)
 | |
| +			continue;
 | |
| +		error = copy_to_user(kop->crk_param[i].crp_p, krp->krp_param[i].crp_p,
 | |
| +				size);
 | |
| +		if (error)
 | |
| +			goto fail;
 | |
| +	}
 | |
| +
 | |
| +fail:
 | |
| +	if (krp) {
 | |
| +		kop->crk_status = krp->krp_status;
 | |
| +		for (i = 0; i < CRK_MAXPARAM; i++) {
 | |
| +			if (krp->krp_param[i].crp_p)
 | |
| +				kfree(krp->krp_param[i].crp_p);
 | |
| +		}
 | |
| +		kfree(krp);
 | |
| +	}
 | |
| +	return (error);
 | |
| +}
 | |
| +
 | |
| +static int
 | |
| +cryptodev_find(struct crypt_find_op *find)
 | |
| +{
 | |
| +	device_t dev;
 | |
| +
 | |
| +	if (find->crid != -1) {
 | |
| +		dev = crypto_find_device_byhid(find->crid);
 | |
| +		if (dev == NULL)
 | |
| +			return (ENOENT);
 | |
| +		strlcpy(find->name, device_get_nameunit(dev),
 | |
| +		    sizeof(find->name));
 | |
| +	} else {
 | |
| +		find->crid = crypto_find_driver(find->name);
 | |
| +		if (find->crid == -1)
 | |
| +			return (ENOENT);
 | |
| +	}
 | |
| +	return (0);
 | |
| +}
 | |
| +
 | |
| +static struct csession *
 | |
| +csefind(struct fcrypt *fcr, u_int ses)
 | |
| +{
 | |
| +	struct csession *cse;
 | |
| +
 | |
| +	dprintk("%s()\n", __FUNCTION__);
 | |
| +	list_for_each_entry(cse, &fcr->csessions, list)
 | |
| +		if (cse->ses == ses)
 | |
| +			return (cse);
 | |
| +	return (NULL);
 | |
| +}
 | |
| +
 | |
| +static int
 | |
| +csedelete(struct fcrypt *fcr, struct csession *cse_del)
 | |
| +{
 | |
| +	struct csession *cse;
 | |
| +
 | |
| +	dprintk("%s()\n", __FUNCTION__);
 | |
| +	list_for_each_entry(cse, &fcr->csessions, list) {
 | |
| +		if (cse == cse_del) {
 | |
| +			list_del(&cse->list);
 | |
| +			return (1);
 | |
| +		}
 | |
| +	}
 | |
| +	return (0);
 | |
| +}
 | |
| +
 | |
| +static struct csession *
 | |
| +cseadd(struct fcrypt *fcr, struct csession *cse)
 | |
| +{
 | |
| +	dprintk("%s()\n", __FUNCTION__);
 | |
| +	list_add_tail(&cse->list, &fcr->csessions);
 | |
| +	cse->ses = fcr->sesn++;
 | |
| +	return (cse);
 | |
| +}
 | |
| +
 | |
| +static struct csession *
 | |
| +csecreate(struct fcrypt *fcr, u_int64_t sid, struct cryptoini *crie,
 | |
| +	struct cryptoini *cria, struct csession_info *info)
 | |
| +{
 | |
| +	struct csession *cse;
 | |
| +
 | |
| +	dprintk("%s()\n", __FUNCTION__);
 | |
| +	cse = (struct csession *) kmalloc(sizeof(struct csession), GFP_KERNEL);
 | |
| +	if (cse == NULL)
 | |
| +		return NULL;
 | |
| +	memset(cse, 0, sizeof(struct csession));
 | |
| +
 | |
| +	INIT_LIST_HEAD(&cse->list);
 | |
| +	init_waitqueue_head(&cse->waitq);
 | |
| +
 | |
| +	cse->key = crie->cri_key;
 | |
| +	cse->keylen = crie->cri_klen/8;
 | |
| +	cse->mackey = cria->cri_key;
 | |
| +	cse->mackeylen = cria->cri_klen/8;
 | |
| +	cse->sid = sid;
 | |
| +	cse->cipher = crie->cri_alg;
 | |
| +	cse->mac = cria->cri_alg;
 | |
| +	cse->info = *info;
 | |
| +	cseadd(fcr, cse);
 | |
| +	return (cse);
 | |
| +}
 | |
| +
 | |
| +static int
 | |
| +csefree(struct csession *cse)
 | |
| +{
 | |
| +	int error;
 | |
| +
 | |
| +	dprintk("%s()\n", __FUNCTION__);
 | |
| +	error = crypto_freesession(cse->sid);
 | |
| +	if (cse->key)
 | |
| +		kfree(cse->key);
 | |
| +	if (cse->mackey)
 | |
| +		kfree(cse->mackey);
 | |
| +	kfree(cse);
 | |
| +	return(error);
 | |
| +}
 | |
| +
 | |
| +static int
 | |
| +cryptodev_ioctl(
 | |
| +	struct inode *inode,
 | |
| +	struct file *filp,
 | |
| +	unsigned int cmd,
 | |
| +	unsigned long arg)
 | |
| +{
 | |
| +	struct cryptoini cria, crie;
 | |
| +	struct fcrypt *fcr = filp->private_data;
 | |
| +	struct csession *cse;
 | |
| +	struct csession_info info;
 | |
| +	struct session2_op sop;
 | |
| +	struct crypt_op cop;
 | |
| +	struct crypt_kop kop;
 | |
| +	struct crypt_find_op fop;
 | |
| +	u_int64_t sid;
 | |
| +	u_int32_t ses;
 | |
| +	int feat, fd, error = 0, crid;
 | |
| +	mm_segment_t fs;
 | |
| +
 | |
| +	dprintk("%s(cmd=%x arg=%lx)\n", __FUNCTION__, cmd, arg);
 | |
| +
 | |
| +	switch (cmd) {
 | |
| +
 | |
| +	case CRIOGET: {
 | |
| +		dprintk("%s(CRIOGET)\n", __FUNCTION__);
 | |
| +		fs = get_fs();
 | |
| +		set_fs(get_ds());
 | |
| +		for (fd = 0; fd < files_fdtable(current->files)->max_fds; fd++)
 | |
| +			if (files_fdtable(current->files)->fd[fd] == filp)
 | |
| +				break;
 | |
| +		fd = sys_dup(fd);
 | |
| +		set_fs(fs);
 | |
| +		put_user(fd, (int *) arg);
 | |
| +		return IS_ERR_VALUE(fd) ? fd : 0;
 | |
| +		}
 | |
| +
 | |
| +#define	CIOCGSESSSTR	(cmd == CIOCGSESSION ? "CIOCGSESSION" : "CIOCGSESSION2")
 | |
| +	case CIOCGSESSION:
 | |
| +	case CIOCGSESSION2:
 | |
| +		dprintk("%s(%s)\n", __FUNCTION__, CIOCGSESSSTR);
 | |
| +		memset(&crie, 0, sizeof(crie));
 | |
| +		memset(&cria, 0, sizeof(cria));
 | |
| +		memset(&info, 0, sizeof(info));
 | |
| +		memset(&sop, 0, sizeof(sop));
 | |
| +
 | |
| +		if (copy_from_user(&sop, (void*)arg, (cmd == CIOCGSESSION) ?
 | |
| +					sizeof(struct session_op) : sizeof(sop))) {
 | |
| +			dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
 | |
| +			error = EFAULT;
 | |
| +			goto bail;
 | |
| +		}
 | |
| +
 | |
| +		switch (sop.cipher) {
 | |
| +		case 0:
 | |
| +			dprintk("%s(%s) - no cipher\n", __FUNCTION__, CIOCGSESSSTR);
 | |
| +			break;
 | |
| +		case CRYPTO_NULL_CBC:
 | |
| +			info.blocksize = NULL_BLOCK_LEN;
 | |
| +			info.minkey = NULL_MIN_KEY_LEN;
 | |
| +			info.maxkey = NULL_MAX_KEY_LEN;
 | |
| +			break;
 | |
| +		case CRYPTO_DES_CBC:
 | |
| +			info.blocksize = DES_BLOCK_LEN;
 | |
| +			info.minkey = DES_MIN_KEY_LEN;
 | |
| +			info.maxkey = DES_MAX_KEY_LEN;
 | |
| +			break;
 | |
| +		case CRYPTO_3DES_CBC:
 | |
| +			info.blocksize = DES3_BLOCK_LEN;
 | |
| +			info.minkey = DES3_MIN_KEY_LEN;
 | |
| +			info.maxkey = DES3_MAX_KEY_LEN;
 | |
| +			break;
 | |
| +		case CRYPTO_BLF_CBC:
 | |
| +			info.blocksize = BLOWFISH_BLOCK_LEN;
 | |
| +			info.minkey = BLOWFISH_MIN_KEY_LEN;
 | |
| +			info.maxkey = BLOWFISH_MAX_KEY_LEN;
 | |
| +			break;
 | |
| +		case CRYPTO_CAST_CBC:
 | |
| +			info.blocksize = CAST128_BLOCK_LEN;
 | |
| +			info.minkey = CAST128_MIN_KEY_LEN;
 | |
| +			info.maxkey = CAST128_MAX_KEY_LEN;
 | |
| +			break;
 | |
| +		case CRYPTO_SKIPJACK_CBC:
 | |
| +			info.blocksize = SKIPJACK_BLOCK_LEN;
 | |
| +			info.minkey = SKIPJACK_MIN_KEY_LEN;
 | |
| +			info.maxkey = SKIPJACK_MAX_KEY_LEN;
 | |
| +			break;
 | |
| +		case CRYPTO_AES_CBC:
 | |
| +			info.blocksize = AES_BLOCK_LEN;
 | |
| +			info.minkey = AES_MIN_KEY_LEN;
 | |
| +			info.maxkey = AES_MAX_KEY_LEN;
 | |
| +			break;
 | |
| +		case CRYPTO_ARC4:
 | |
| +			info.blocksize = ARC4_BLOCK_LEN;
 | |
| +			info.minkey = ARC4_MIN_KEY_LEN;
 | |
| +			info.maxkey = ARC4_MAX_KEY_LEN;
 | |
| +			break;
 | |
| +		case CRYPTO_CAMELLIA_CBC:
 | |
| +			info.blocksize = CAMELLIA_BLOCK_LEN;
 | |
| +			info.minkey = CAMELLIA_MIN_KEY_LEN;
 | |
| +			info.maxkey = CAMELLIA_MAX_KEY_LEN;
 | |
| +			break;
 | |
| +		default:
 | |
| +			dprintk("%s(%s) - bad cipher\n", __FUNCTION__, CIOCGSESSSTR);
 | |
| +			error = EINVAL;
 | |
| +			goto bail;
 | |
| +		}
 | |
| +
 | |
| +		switch (sop.mac) {
 | |
| +		case 0:
 | |
| +			dprintk("%s(%s) - no mac\n", __FUNCTION__, CIOCGSESSSTR);
 | |
| +			break;
 | |
| +		case CRYPTO_NULL_HMAC:
 | |
| +			info.authsize = NULL_HASH_LEN;
 | |
| +			break;
 | |
| +		case CRYPTO_MD5:
 | |
| +			info.authsize = MD5_HASH_LEN;
 | |
| +			break;
 | |
| +		case CRYPTO_SHA1:
 | |
| +			info.authsize = SHA1_HASH_LEN;
 | |
| +			break;
 | |
| +		case CRYPTO_SHA2_256:
 | |
| +			info.authsize = SHA2_256_HASH_LEN;
 | |
| +			break;
 | |
| +		case CRYPTO_SHA2_384:
 | |
| +			info.authsize = SHA2_384_HASH_LEN;
 | |
| +  			break;
 | |
| +		case CRYPTO_SHA2_512:
 | |
| +			info.authsize = SHA2_512_HASH_LEN;
 | |
| +			break;
 | |
| +		case CRYPTO_RIPEMD160:
 | |
| +			info.authsize = RIPEMD160_HASH_LEN;
 | |
| +			break;
 | |
| +		case CRYPTO_MD5_HMAC:
 | |
| +			info.authsize = MD5_HASH_LEN;
 | |
| +			break;
 | |
| +		case CRYPTO_SHA1_HMAC:
 | |
| +			info.authsize = SHA1_HASH_LEN;
 | |
| +			break;
 | |
| +		case CRYPTO_SHA2_256_HMAC:
 | |
| +			info.authsize = SHA2_256_HASH_LEN;
 | |
| +			break;
 | |
| +		case CRYPTO_SHA2_384_HMAC:
 | |
| +			info.authsize = SHA2_384_HASH_LEN;
 | |
| +  			break;
 | |
| +		case CRYPTO_SHA2_512_HMAC:
 | |
| +			info.authsize = SHA2_512_HASH_LEN;
 | |
| +			break;
 | |
| +		case CRYPTO_RIPEMD160_HMAC:
 | |
| +			info.authsize = RIPEMD160_HASH_LEN;
 | |
| +			break;
 | |
| +		default:
 | |
| +			dprintk("%s(%s) - bad mac\n", __FUNCTION__, CIOCGSESSSTR);
 | |
| +			error = EINVAL;
 | |
| +			goto bail;
 | |
| +		}
 | |
| +
 | |
| +		if (info.blocksize) {
 | |
| +			crie.cri_alg = sop.cipher;
 | |
| +			crie.cri_klen = sop.keylen * 8;
 | |
| +			if ((info.maxkey && sop.keylen > info.maxkey) ||
 | |
| +				   	sop.keylen < info.minkey) {
 | |
| +				dprintk("%s(%s) - bad key\n", __FUNCTION__, CIOCGSESSSTR);
 | |
| +				error = EINVAL;
 | |
| +				goto bail;
 | |
| +			}
 | |
| +
 | |
| +			crie.cri_key = (u_int8_t *) kmalloc(crie.cri_klen/8+1, GFP_KERNEL);
 | |
| +			if (copy_from_user(crie.cri_key, sop.key,
 | |
| +							crie.cri_klen/8)) {
 | |
| +				dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
 | |
| +				error = EFAULT;
 | |
| +				goto bail;
 | |
| +			}
 | |
| +			if (info.authsize)
 | |
| +				crie.cri_next = &cria;
 | |
| +		}
 | |
| +
 | |
| +		if (info.authsize) {
 | |
| +			cria.cri_alg = sop.mac;
 | |
| +			cria.cri_klen = sop.mackeylen * 8;
 | |
| +			if ((info.maxkey && sop.mackeylen > info.maxkey) ||
 | |
| +					sop.keylen < info.minkey) {
 | |
| +				dprintk("%s(%s) - mackeylen %d\n", __FUNCTION__, CIOCGSESSSTR,
 | |
| +						sop.mackeylen);
 | |
| +				error = EINVAL;
 | |
| +				goto bail;
 | |
| +			}
 | |
| +
 | |
| +			if (cria.cri_klen) {
 | |
| +				cria.cri_key = (u_int8_t *) kmalloc(cria.cri_klen/8,GFP_KERNEL);
 | |
| +				if (copy_from_user(cria.cri_key, sop.mackey,
 | |
| +								cria.cri_klen / 8)) {
 | |
| +					dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
 | |
| +					error = EFAULT;
 | |
| +					goto bail;
 | |
| +				}
 | |
| +			}
 | |
| +		}
 | |
| +
 | |
| +		/* NB: CIOGSESSION2 has the crid */
 | |
| +		if (cmd == CIOCGSESSION2) {
 | |
| +			crid = sop.crid;
 | |
| +			error = checkcrid(crid);
 | |
| +			if (error) {
 | |
| +				dprintk("%s(%s) - checkcrid %x\n", __FUNCTION__,
 | |
| +						CIOCGSESSSTR, error);
 | |
| +				goto bail;
 | |
| +			}
 | |
| +		} else {
 | |
| +			/* allow either HW or SW to be used */
 | |
| +			crid = CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE;
 | |
| +		}
 | |
| +		error = crypto_newsession(&sid, (info.blocksize ? &crie : &cria), crid);
 | |
| +		if (error) {
 | |
| +			dprintk("%s(%s) - newsession %d\n",__FUNCTION__,CIOCGSESSSTR,error);
 | |
| +			goto bail;
 | |
| +		}
 | |
| +
 | |
| +		cse = csecreate(fcr, sid, &crie, &cria, &info);
 | |
| +		if (cse == NULL) {
 | |
| +			crypto_freesession(sid);
 | |
| +			error = EINVAL;
 | |
| +			dprintk("%s(%s) - csecreate failed\n", __FUNCTION__, CIOCGSESSSTR);
 | |
| +			goto bail;
 | |
| +		}
 | |
| +		sop.ses = cse->ses;
 | |
| +
 | |
| +		if (cmd == CIOCGSESSION2) {
 | |
| +			/* return hardware/driver id */
 | |
| +			sop.crid = CRYPTO_SESID2HID(cse->sid);
 | |
| +		}
 | |
| +
 | |
| +		if (copy_to_user((void*)arg, &sop, (cmd == CIOCGSESSION) ?
 | |
| +					sizeof(struct session_op) : sizeof(sop))) {
 | |
| +			dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
 | |
| +			error = EFAULT;
 | |
| +		}
 | |
| +bail:
 | |
| +		if (error) {
 | |
| +			dprintk("%s(%s) - bail %d\n", __FUNCTION__, CIOCGSESSSTR, error);
 | |
| +			if (crie.cri_key)
 | |
| +				kfree(crie.cri_key);
 | |
| +			if (cria.cri_key)
 | |
| +				kfree(cria.cri_key);
 | |
| +		}
 | |
| +		break;
 | |
| +	case CIOCFSESSION:
 | |
| +		dprintk("%s(CIOCFSESSION)\n", __FUNCTION__);
 | |
| +		get_user(ses, (uint32_t*)arg);
 | |
| +		cse = csefind(fcr, ses);
 | |
| +		if (cse == NULL) {
 | |
| +			error = EINVAL;
 | |
| +			dprintk("%s(CIOCFSESSION) - Fail %d\n", __FUNCTION__, error);
 | |
| +			break;
 | |
| +		}
 | |
| +		csedelete(fcr, cse);
 | |
| +		error = csefree(cse);
 | |
| +		break;
 | |
| +	case CIOCCRYPT:
 | |
| +		dprintk("%s(CIOCCRYPT)\n", __FUNCTION__);
 | |
| +		if(copy_from_user(&cop, (void*)arg, sizeof(cop))) {
 | |
| +			dprintk("%s(CIOCCRYPT) - bad copy\n", __FUNCTION__);
 | |
| +			error = EFAULT;
 | |
| +			goto bail;
 | |
| +		}
 | |
| +		cse = csefind(fcr, cop.ses);
 | |
| +		if (cse == NULL) {
 | |
| +			error = EINVAL;
 | |
| +			dprintk("%s(CIOCCRYPT) - Fail %d\n", __FUNCTION__, error);
 | |
| +			break;
 | |
| +		}
 | |
| +		error = cryptodev_op(cse, &cop);
 | |
| +		if(copy_to_user((void*)arg, &cop, sizeof(cop))) {
 | |
| +			dprintk("%s(CIOCCRYPT) - bad return copy\n", __FUNCTION__);
 | |
| +			error = EFAULT;
 | |
| +			goto bail;
 | |
| +		}
 | |
| +		break;
 | |
| +	case CIOCKEY:
 | |
| +	case CIOCKEY2:
 | |
| +		dprintk("%s(CIOCKEY)\n", __FUNCTION__);
 | |
| +		if (!crypto_userasymcrypto)
 | |
| +			return (EPERM);		/* XXX compat? */
 | |
| +		if(copy_from_user(&kop, (void*)arg, sizeof(kop))) {
 | |
| +			dprintk("%s(CIOCKEY) - bad copy\n", __FUNCTION__);
 | |
| +			error = EFAULT;
 | |
| +			goto bail;
 | |
| +		}
 | |
| +		if (cmd == CIOCKEY) {
 | |
| +			/* NB: crypto core enforces s/w driver use */
 | |
| +			kop.crk_crid =
 | |
| +			    CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE;
 | |
| +		}
 | |
| +		error = cryptodev_key(&kop);
 | |
| +		if(copy_to_user((void*)arg, &kop, sizeof(kop))) {
 | |
| +			dprintk("%s(CIOCGKEY) - bad return copy\n", __FUNCTION__);
 | |
| +			error = EFAULT;
 | |
| +			goto bail;
 | |
| +		}
 | |
| +		break;
 | |
| +	case CIOCASYMFEAT:
 | |
| +		dprintk("%s(CIOCASYMFEAT)\n", __FUNCTION__);
 | |
| +		if (!crypto_userasymcrypto) {
 | |
| +			/*
 | |
| +			 * NB: if user asym crypto operations are
 | |
| +			 * not permitted return "no algorithms"
 | |
| +			 * so well-behaved applications will just
 | |
| +			 * fallback to doing them in software.
 | |
| +			 */
 | |
| +			feat = 0;
 | |
| +		} else
 | |
| +			error = crypto_getfeat(&feat);
 | |
| +		if (!error) {
 | |
| +		  error = copy_to_user((void*)arg, &feat, sizeof(feat));
 | |
| +		}
 | |
| +		break;
 | |
| +	case CIOCFINDDEV:
 | |
| +		if (copy_from_user(&fop, (void*)arg, sizeof(fop))) {
 | |
| +			dprintk("%s(CIOCFINDDEV) - bad copy\n", __FUNCTION__);
 | |
| +			error = EFAULT;
 | |
| +			goto bail;
 | |
| +		}
 | |
| +		error = cryptodev_find(&fop);
 | |
| +		if (copy_to_user((void*)arg, &fop, sizeof(fop))) {
 | |
| +			dprintk("%s(CIOCFINDDEV) - bad return copy\n", __FUNCTION__);
 | |
| +			error = EFAULT;
 | |
| +			goto bail;
 | |
| +		}
 | |
| +		break;
 | |
| +	default:
 | |
| +		dprintk("%s(unknown ioctl 0x%x)\n", __FUNCTION__, cmd);
 | |
| +		error = EINVAL;
 | |
| +		break;
 | |
| +	}
 | |
| +	return(-error);
 | |
| +}
 | |
| +
 | |
| +#ifdef HAVE_UNLOCKED_IOCTL
 | |
| +static long
 | |
| +cryptodev_unlocked_ioctl(
 | |
| +	struct file *filp,
 | |
| +	unsigned int cmd,
 | |
| +	unsigned long arg)
 | |
| +{
 | |
| +	return cryptodev_ioctl(NULL, filp, cmd, arg);
 | |
| +}
 | |
| +#endif
 | |
| +
 | |
| +static int
 | |
| +cryptodev_open(struct inode *inode, struct file *filp)
 | |
| +{
 | |
| +	struct fcrypt *fcr;
 | |
| +
 | |
| +	dprintk("%s()\n", __FUNCTION__);
 | |
| +	if (filp->private_data) {
 | |
| +		printk("cryptodev: Private data already exists !\n");
 | |
| +		return(0);
 | |
| +	}
 | |
| +
 | |
| +	fcr = kmalloc(sizeof(*fcr), GFP_KERNEL);
 | |
| +	if (!fcr) {
 | |
| +		dprintk("%s() - malloc failed\n", __FUNCTION__);
 | |
| +		return(-ENOMEM);
 | |
| +	}
 | |
| +	memset(fcr, 0, sizeof(*fcr));
 | |
| +
 | |
| +	INIT_LIST_HEAD(&fcr->csessions);
 | |
| +	filp->private_data = fcr;
 | |
| +	return(0);
 | |
| +}
 | |
| +
 | |
| +static int
 | |
| +cryptodev_release(struct inode *inode, struct file *filp)
 | |
| +{
 | |
| +	struct fcrypt *fcr = filp->private_data;
 | |
| +	struct csession *cse, *tmp;
 | |
| +
 | |
| +	dprintk("%s()\n", __FUNCTION__);
 | |
| +	if (!filp) {
 | |
| +		printk("cryptodev: No private data on release\n");
 | |
| +		return(0);
 | |
| +	}
 | |
| +
 | |
| +	list_for_each_entry_safe(cse, tmp, &fcr->csessions, list) {
 | |
| +		list_del(&cse->list);
 | |
| +		(void)csefree(cse);
 | |
| +	}
 | |
| +	filp->private_data = NULL;
 | |
| +	kfree(fcr);
 | |
| +	return(0);
 | |
| +}
 | |
| +
 | |
| +static struct file_operations cryptodev_fops = {
 | |
| +	.owner = THIS_MODULE,
 | |
| +	.open = cryptodev_open,
 | |
| +	.release = cryptodev_release,
 | |
| +	.ioctl = cryptodev_ioctl,
 | |
| +#ifdef HAVE_UNLOCKED_IOCTL
 | |
| +	.unlocked_ioctl = cryptodev_unlocked_ioctl,
 | |
| +#endif
 | |
| +};
 | |
| +
 | |
| +static struct miscdevice cryptodev = {
 | |
| +	.minor = CRYPTODEV_MINOR,
 | |
| +	.name = "crypto",
 | |
| +	.fops = &cryptodev_fops,
 | |
| +};
 | |
| +
 | |
| +static int __init
 | |
| +cryptodev_init(void)
 | |
| +{
 | |
| +	int rc;
 | |
| +
 | |
| +	dprintk("%s(%p)\n", __FUNCTION__, cryptodev_init);
 | |
| +	rc = misc_register(&cryptodev);
 | |
| +	if (rc) {
 | |
| +		printk(KERN_ERR "cryptodev: registration of /dev/crypto failed\n");
 | |
| +		return(rc);
 | |
| +	}
 | |
| +
 | |
| +	return(0);
 | |
| +}
 | |
| +
 | |
| +static void __exit
 | |
| +cryptodev_exit(void)
 | |
| +{
 | |
| +	dprintk("%s()\n", __FUNCTION__);
 | |
| +	misc_deregister(&cryptodev);
 | |
| +}
 | |
| +
 | |
| +module_init(cryptodev_init);
 | |
| +module_exit(cryptodev_exit);
 | |
| +
 | |
| +MODULE_LICENSE("BSD");
 | |
| +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
 | |
| +MODULE_DESCRIPTION("Cryptodev (user interface to OCF)");
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/cryptodev.h
 | |
| @@ -0,0 +1,478 @@
 | |
| +/*	$FreeBSD: src/sys/opencrypto/cryptodev.h,v 1.25 2007/05/09 19:37:02 gnn Exp $	*/
 | |
| +/*	$OpenBSD: cryptodev.h,v 1.31 2002/06/11 11:14:29 beck Exp $	*/
 | |
| +
 | |
| +/*-
 | |
| + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
 | |
| + * Copyright (C) 2006-2007 David McCullough
 | |
| + * Copyright (C) 2004-2005 Intel Corporation.
 | |
| + * The license and original author are listed below.
 | |
| + *
 | |
| + * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
 | |
| + * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
 | |
| + *
 | |
| + * This code was written by Angelos D. Keromytis in Athens, Greece, in
 | |
| + * February 2000. Network Security Technologies Inc. (NSTI) kindly
 | |
| + * supported the development of this code.
 | |
| + *
 | |
| + * Copyright (c) 2000 Angelos D. Keromytis
 | |
| + *
 | |
| + * Permission to use, copy, and modify this software with or without fee
 | |
| + * is hereby granted, provided that this entire notice is included in
 | |
| + * all source code copies of any software which is or includes a copy or
 | |
| + * modification of this software.
 | |
| + *
 | |
| + * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
 | |
| + * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
 | |
| + * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
 | |
| + * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
 | |
| + * PURPOSE.
 | |
| + *
 | |
| + * Copyright (c) 2001 Theo de Raadt
 | |
| + *
 | |
| + * Redistribution and use in source and binary forms, with or without
 | |
| + * modification, are permitted provided that the following conditions
 | |
| + * are met:
 | |
| + *
 | |
| + * 1. Redistributions of source code must retain the above copyright
 | |
| + *   notice, this list of conditions and the following disclaimer.
 | |
| + * 2. Redistributions in binary form must reproduce the above copyright
 | |
| + *   notice, this list of conditions and the following disclaimer in the
 | |
| + *   documentation and/or other materials provided with the distribution.
 | |
| + * 3. The name of the author may not be used to endorse or promote products
 | |
| + *   derived from this software without specific prior written permission.
 | |
| + *
 | |
| + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
 | |
| + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
 | |
| + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
 | |
| + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
 | |
| + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 | |
| + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 | |
| + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 | |
| + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 | |
| + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 | |
| + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 | |
| + *
 | |
| + * Effort sponsored in part by the Defense Advanced Research Projects
 | |
| + * Agency (DARPA) and Air Force Research Laboratory, Air Force
 | |
| + * Materiel Command, USAF, under agreement number F30602-01-2-0537.
 | |
| + *
 | |
| + */
 | |
| +
 | |
| +#ifndef _CRYPTO_CRYPTO_H_
 | |
| +#define _CRYPTO_CRYPTO_H_
 | |
| +
 | |
| +/* Some initial values */
 | |
| +#define CRYPTO_DRIVERS_INITIAL	4
 | |
| +#define CRYPTO_SW_SESSIONS	32
 | |
| +
 | |
| +/* Hash values */
 | |
| +#define NULL_HASH_LEN		0
 | |
| +#define MD5_HASH_LEN		16
 | |
| +#define SHA1_HASH_LEN		20
 | |
| +#define RIPEMD160_HASH_LEN	20
 | |
| +#define SHA2_256_HASH_LEN	32
 | |
| +#define SHA2_384_HASH_LEN	48
 | |
| +#define SHA2_512_HASH_LEN	64
 | |
| +#define MD5_KPDK_HASH_LEN	16
 | |
| +#define SHA1_KPDK_HASH_LEN	20
 | |
| +/* Maximum hash algorithm result length */
 | |
| +#define HASH_MAX_LEN		SHA2_512_HASH_LEN /* Keep this updated */
 | |
| +
 | |
| +/* HMAC values */
 | |
| +#define NULL_HMAC_BLOCK_LEN			1
 | |
| +#define MD5_HMAC_BLOCK_LEN			64
 | |
| +#define SHA1_HMAC_BLOCK_LEN			64
 | |
| +#define RIPEMD160_HMAC_BLOCK_LEN	64
 | |
| +#define SHA2_256_HMAC_BLOCK_LEN		64
 | |
| +#define SHA2_384_HMAC_BLOCK_LEN		128
 | |
| +#define SHA2_512_HMAC_BLOCK_LEN		128
 | |
| +/* Maximum HMAC block length */
 | |
| +#define HMAC_MAX_BLOCK_LEN		SHA2_512_HMAC_BLOCK_LEN /* Keep this updated */
 | |
| +#define HMAC_IPAD_VAL			0x36
 | |
| +#define HMAC_OPAD_VAL			0x5C
 | |
| +
 | |
| +/* Encryption algorithm block sizes */
 | |
| +#define NULL_BLOCK_LEN			1
 | |
| +#define DES_BLOCK_LEN			8
 | |
| +#define DES3_BLOCK_LEN			8
 | |
| +#define BLOWFISH_BLOCK_LEN		8
 | |
| +#define SKIPJACK_BLOCK_LEN		8
 | |
| +#define CAST128_BLOCK_LEN		8
 | |
| +#define RIJNDAEL128_BLOCK_LEN	16
 | |
| +#define AES_BLOCK_LEN			RIJNDAEL128_BLOCK_LEN
 | |
| +#define CAMELLIA_BLOCK_LEN		16
 | |
| +#define ARC4_BLOCK_LEN			1
 | |
| +#define EALG_MAX_BLOCK_LEN		AES_BLOCK_LEN /* Keep this updated */
 | |
| +
 | |
| +/* Encryption algorithm min and max key sizes */
 | |
| +#define NULL_MIN_KEY_LEN		0
 | |
| +#define NULL_MAX_KEY_LEN		0
 | |
| +#define DES_MIN_KEY_LEN			8
 | |
| +#define DES_MAX_KEY_LEN			8
 | |
| +#define DES3_MIN_KEY_LEN		24
 | |
| +#define DES3_MAX_KEY_LEN		24
 | |
| +#define BLOWFISH_MIN_KEY_LEN	4
 | |
| +#define BLOWFISH_MAX_KEY_LEN	56
 | |
| +#define SKIPJACK_MIN_KEY_LEN	10
 | |
| +#define SKIPJACK_MAX_KEY_LEN	10
 | |
| +#define CAST128_MIN_KEY_LEN		5
 | |
| +#define CAST128_MAX_KEY_LEN		16
 | |
| +#define RIJNDAEL128_MIN_KEY_LEN	16
 | |
| +#define RIJNDAEL128_MAX_KEY_LEN	32
 | |
| +#define AES_MIN_KEY_LEN			RIJNDAEL128_MIN_KEY_LEN
 | |
| +#define AES_MAX_KEY_LEN			RIJNDAEL128_MAX_KEY_LEN
 | |
| +#define CAMELLIA_MIN_KEY_LEN	16
 | |
| +#define CAMELLIA_MAX_KEY_LEN	32
 | |
| +#define ARC4_MIN_KEY_LEN		1
 | |
| +#define ARC4_MAX_KEY_LEN		256
 | |
| +
 | |
| +/* Max size of data that can be processed */
 | |
| +#define CRYPTO_MAX_DATA_LEN		64*1024 - 1
 | |
| +
 | |
| +#define CRYPTO_ALGORITHM_MIN	1
 | |
| +#define CRYPTO_DES_CBC			1
 | |
| +#define CRYPTO_3DES_CBC			2
 | |
| +#define CRYPTO_BLF_CBC			3
 | |
| +#define CRYPTO_CAST_CBC			4
 | |
| +#define CRYPTO_SKIPJACK_CBC		5
 | |
| +#define CRYPTO_MD5_HMAC			6
 | |
| +#define CRYPTO_SHA1_HMAC		7
 | |
| +#define CRYPTO_RIPEMD160_HMAC	8
 | |
| +#define CRYPTO_MD5_KPDK			9
 | |
| +#define CRYPTO_SHA1_KPDK		10
 | |
| +#define CRYPTO_RIJNDAEL128_CBC	11 /* 128 bit blocksize */
 | |
| +#define CRYPTO_AES_CBC			11 /* 128 bit blocksize -- the same as above */
 | |
| +#define CRYPTO_ARC4				12
 | |
| +#define CRYPTO_MD5				13
 | |
| +#define CRYPTO_SHA1				14
 | |
| +#define CRYPTO_NULL_HMAC		15
 | |
| +#define CRYPTO_NULL_CBC			16
 | |
| +#define CRYPTO_DEFLATE_COMP		17 /* Deflate compression algorithm */
 | |
| +#define CRYPTO_SHA2_256_HMAC	18
 | |
| +#define CRYPTO_SHA2_384_HMAC	19
 | |
| +#define CRYPTO_SHA2_512_HMAC	20
 | |
| +#define CRYPTO_CAMELLIA_CBC		21
 | |
| +#define CRYPTO_SHA2_256			22
 | |
| +#define CRYPTO_SHA2_384			23
 | |
| +#define CRYPTO_SHA2_512			24
 | |
| +#define CRYPTO_RIPEMD160		25
 | |
| +#define CRYPTO_ALGORITHM_MAX	25 /* Keep updated - see below */
 | |
| +
 | |
| +/* Algorithm flags */
 | |
| +#define CRYPTO_ALG_FLAG_SUPPORTED	0x01 /* Algorithm is supported */
 | |
| +#define CRYPTO_ALG_FLAG_RNG_ENABLE	0x02 /* Has HW RNG for DH/DSA */
 | |
| +#define CRYPTO_ALG_FLAG_DSA_SHA		0x04 /* Can do SHA on msg */
 | |
| +
 | |
| +/*
 | |
| + * Crypto driver/device flags.  They can set in the crid
 | |
| + * parameter when creating a session or submitting a key
 | |
| + * op to affect the device/driver assigned.  If neither
 | |
| + * of these are specified then the crid is assumed to hold
 | |
| + * the driver id of an existing (and suitable) device that
 | |
| + * must be used to satisfy the request.
 | |
| + */
 | |
| +#define CRYPTO_FLAG_HARDWARE	0x01000000	/* hardware accelerated */
 | |
| +#define CRYPTO_FLAG_SOFTWARE	0x02000000	/* software implementation */
 | |
| +
 | |
| +/* NB: deprecated */
 | |
| +struct session_op {
 | |
| +	u_int32_t	cipher;		/* ie. CRYPTO_DES_CBC */
 | |
| +	u_int32_t	mac;		/* ie. CRYPTO_MD5_HMAC */
 | |
| +
 | |
| +	u_int32_t	keylen;		/* cipher key */
 | |
| +	caddr_t		key;
 | |
| +	int		mackeylen;	/* mac key */
 | |
| +	caddr_t		mackey;
 | |
| +
 | |
| +  	u_int32_t	ses;		/* returns: session # */
 | |
| +};
 | |
| +
 | |
| +struct session2_op {
 | |
| +	u_int32_t	cipher;		/* ie. CRYPTO_DES_CBC */
 | |
| +	u_int32_t	mac;		/* ie. CRYPTO_MD5_HMAC */
 | |
| +
 | |
| +	u_int32_t	keylen;		/* cipher key */
 | |
| +	caddr_t		key;
 | |
| +	int		mackeylen;	/* mac key */
 | |
| +	caddr_t		mackey;
 | |
| +
 | |
| +  	u_int32_t	ses;		/* returns: session # */
 | |
| +	int		crid;		/* driver id + flags (rw) */
 | |
| +	int		pad[4];		/* for future expansion */
 | |
| +};
 | |
| +
 | |
| +struct crypt_op {
 | |
| +	u_int32_t	ses;
 | |
| +	u_int16_t	op;		/* i.e. COP_ENCRYPT */
 | |
| +#define COP_NONE	0
 | |
| +#define COP_ENCRYPT	1
 | |
| +#define COP_DECRYPT	2
 | |
| +	u_int16_t	flags;
 | |
| +#define	COP_F_BATCH	0x0008		/* Batch op if possible */
 | |
| +	u_int		len;
 | |
| +	caddr_t		src, dst;	/* become iov[] inside kernel */
 | |
| +	caddr_t		mac;		/* must be big enough for chosen MAC */
 | |
| +	caddr_t		iv;
 | |
| +};
 | |
| +
 | |
| +/*
 | |
| + * Parameters for looking up a crypto driver/device by
 | |
| + * device name or by id.  The latter are returned for
 | |
| + * created sessions (crid) and completed key operations.
 | |
| + */
 | |
| +struct crypt_find_op {
 | |
| +	int		crid;		/* driver id + flags */
 | |
| +	char		name[32];	/* device/driver name */
 | |
| +};
 | |
| +
 | |
| +/* bignum parameter, in packed bytes, ... */
 | |
| +struct crparam {
 | |
| +	caddr_t		crp_p;
 | |
| +	u_int		crp_nbits;
 | |
| +};
 | |
| +
 | |
| +#define CRK_MAXPARAM	8
 | |
| +
 | |
| +struct crypt_kop {
 | |
| +	u_int		crk_op;		/* ie. CRK_MOD_EXP or other */
 | |
| +	u_int		crk_status;	/* return status */
 | |
| +	u_short		crk_iparams;	/* # of input parameters */
 | |
| +	u_short		crk_oparams;	/* # of output parameters */
 | |
| +	u_int		crk_crid;	/* NB: only used by CIOCKEY2 (rw) */
 | |
| +	struct crparam	crk_param[CRK_MAXPARAM];
 | |
| +};
 | |
| +#define CRK_ALGORITM_MIN	0
 | |
| +#define CRK_MOD_EXP		0
 | |
| +#define CRK_MOD_EXP_CRT		1
 | |
| +#define CRK_DSA_SIGN		2
 | |
| +#define CRK_DSA_VERIFY		3
 | |
| +#define CRK_DH_COMPUTE_KEY	4
 | |
| +#define CRK_ALGORITHM_MAX	4 /* Keep updated - see below */
 | |
| +
 | |
| +#define CRF_MOD_EXP		(1 << CRK_MOD_EXP)
 | |
| +#define CRF_MOD_EXP_CRT		(1 << CRK_MOD_EXP_CRT)
 | |
| +#define CRF_DSA_SIGN		(1 << CRK_DSA_SIGN)
 | |
| +#define CRF_DSA_VERIFY		(1 << CRK_DSA_VERIFY)
 | |
| +#define CRF_DH_COMPUTE_KEY	(1 << CRK_DH_COMPUTE_KEY)
 | |
| +
 | |
| +/*
 | |
| + * done against open of /dev/crypto, to get a cloned descriptor.
 | |
| + * Please use F_SETFD against the cloned descriptor.
 | |
| + */
 | |
| +#define CRIOGET		_IOWR('c', 100, u_int32_t)
 | |
| +#define CRIOASYMFEAT	CIOCASYMFEAT
 | |
| +#define CRIOFINDDEV	CIOCFINDDEV
 | |
| +
 | |
| +/* the following are done against the cloned descriptor */
 | |
| +#define CIOCGSESSION	_IOWR('c', 101, struct session_op)
 | |
| +#define CIOCFSESSION	_IOW('c', 102, u_int32_t)
 | |
| +#define CIOCCRYPT	_IOWR('c', 103, struct crypt_op)
 | |
| +#define CIOCKEY		_IOWR('c', 104, struct crypt_kop)
 | |
| +#define CIOCASYMFEAT	_IOR('c', 105, u_int32_t)
 | |
| +#define CIOCGSESSION2	_IOWR('c', 106, struct session2_op)
 | |
| +#define CIOCKEY2	_IOWR('c', 107, struct crypt_kop)
 | |
| +#define CIOCFINDDEV	_IOWR('c', 108, struct crypt_find_op)
 | |
| +
 | |
| +struct cryptotstat {
 | |
| +	struct timespec	acc;		/* total accumulated time */
 | |
| +	struct timespec	min;		/* min time */
 | |
| +	struct timespec	max;		/* max time */
 | |
| +	u_int32_t	count;		/* number of observations */
 | |
| +};
 | |
| +
 | |
| +struct cryptostats {
 | |
| +	u_int32_t	cs_ops;		/* symmetric crypto ops submitted */
 | |
| +	u_int32_t	cs_errs;	/* symmetric crypto ops that failed */
 | |
| +	u_int32_t	cs_kops;	/* asymetric/key ops submitted */
 | |
| +	u_int32_t	cs_kerrs;	/* asymetric/key ops that failed */
 | |
| +	u_int32_t	cs_intrs;	/* crypto swi thread activations */
 | |
| +	u_int32_t	cs_rets;	/* crypto return thread activations */
 | |
| +	u_int32_t	cs_blocks;	/* symmetric op driver block */
 | |
| +	u_int32_t	cs_kblocks;	/* symmetric op driver block */
 | |
| +	/*
 | |
| +	 * When CRYPTO_TIMING is defined at compile time and the
 | |
| +	 * sysctl debug.crypto is set to 1, the crypto system will
 | |
| +	 * accumulate statistics about how long it takes to process
 | |
| +	 * crypto requests at various points during processing.
 | |
| +	 */
 | |
| +	struct cryptotstat cs_invoke;	/* crypto_dipsatch -> crypto_invoke */
 | |
| +	struct cryptotstat cs_done;	/* crypto_invoke -> crypto_done */
 | |
| +	struct cryptotstat cs_cb;	/* crypto_done -> callback */
 | |
| +	struct cryptotstat cs_finis;	/* callback -> callback return */
 | |
| +
 | |
| +	u_int32_t	cs_drops;		/* crypto ops dropped due to congestion */
 | |
| +};
 | |
| +
 | |
| +#ifdef __KERNEL__
 | |
| +
 | |
| +/* Standard initialization structure beginning */
 | |
| +struct cryptoini {
 | |
| +	int		cri_alg;	/* Algorithm to use */
 | |
| +	int		cri_klen;	/* Key length, in bits */
 | |
| +	int		cri_mlen;	/* Number of bytes we want from the
 | |
| +					   entire hash. 0 means all. */
 | |
| +	caddr_t		cri_key;	/* key to use */
 | |
| +	u_int8_t	cri_iv[EALG_MAX_BLOCK_LEN];	/* IV to use */
 | |
| +	struct cryptoini *cri_next;
 | |
| +};
 | |
| +
 | |
| +/* Describe boundaries of a single crypto operation */
 | |
| +struct cryptodesc {
 | |
| +	int		crd_skip;	/* How many bytes to ignore from start */
 | |
| +	int		crd_len;	/* How many bytes to process */
 | |
| +	int		crd_inject;	/* Where to inject results, if applicable */
 | |
| +	int		crd_flags;
 | |
| +
 | |
| +#define CRD_F_ENCRYPT		0x01	/* Set when doing encryption */
 | |
| +#define CRD_F_IV_PRESENT	0x02	/* When encrypting, IV is already in
 | |
| +					   place, so don't copy. */
 | |
| +#define CRD_F_IV_EXPLICIT	0x04	/* IV explicitly provided */
 | |
| +#define CRD_F_DSA_SHA_NEEDED	0x08	/* Compute SHA-1 of buffer for DSA */
 | |
| +#define CRD_F_KEY_EXPLICIT	0x10	/* Key explicitly provided */
 | |
| +#define CRD_F_COMP		0x0f    /* Set when doing compression */
 | |
| +
 | |
| +	struct cryptoini	CRD_INI; /* Initialization/context data */
 | |
| +#define crd_iv		CRD_INI.cri_iv
 | |
| +#define crd_key		CRD_INI.cri_key
 | |
| +#define crd_alg		CRD_INI.cri_alg
 | |
| +#define crd_klen	CRD_INI.cri_klen
 | |
| +
 | |
| +	struct cryptodesc *crd_next;
 | |
| +};
 | |
| +
 | |
| +/* Structure describing complete operation */
 | |
| +struct cryptop {
 | |
| +	struct list_head crp_next;
 | |
| +	wait_queue_head_t crp_waitq;
 | |
| +
 | |
| +	u_int64_t	crp_sid;	/* Session ID */
 | |
| +	int		crp_ilen;	/* Input data total length */
 | |
| +	int		crp_olen;	/* Result total length */
 | |
| +
 | |
| +	int		crp_etype;	/*
 | |
| +					 * Error type (zero means no error).
 | |
| +					 * All error codes except EAGAIN
 | |
| +					 * indicate possible data corruption (as in,
 | |
| +					 * the data have been touched). On all
 | |
| +					 * errors, the crp_sid may have changed
 | |
| +					 * (reset to a new one), so the caller
 | |
| +					 * should always check and use the new
 | |
| +					 * value on future requests.
 | |
| +					 */
 | |
| +	int		crp_flags;
 | |
| +
 | |
| +#define CRYPTO_F_SKBUF		0x0001	/* Input/output are skbuf chains */
 | |
| +#define CRYPTO_F_IOV		0x0002	/* Input/output are uio */
 | |
| +#define CRYPTO_F_REL		0x0004	/* Must return data in same place */
 | |
| +#define CRYPTO_F_BATCH		0x0008	/* Batch op if possible */
 | |
| +#define CRYPTO_F_CBIMM		0x0010	/* Do callback immediately */
 | |
| +#define CRYPTO_F_DONE		0x0020	/* Operation completed */
 | |
| +#define CRYPTO_F_CBIFSYNC	0x0040	/* Do CBIMM if op is synchronous */
 | |
| +
 | |
| +	caddr_t		crp_buf;	/* Data to be processed */
 | |
| +	caddr_t		crp_opaque;	/* Opaque pointer, passed along */
 | |
| +	struct cryptodesc *crp_desc;	/* Linked list of processing descriptors */
 | |
| +
 | |
| +	int (*crp_callback)(struct cryptop *); /* Callback function */
 | |
| +};
 | |
| +
 | |
| +#define CRYPTO_BUF_CONTIG	0x0
 | |
| +#define CRYPTO_BUF_IOV		0x1
 | |
| +#define CRYPTO_BUF_SKBUF		0x2
 | |
| +
 | |
| +#define CRYPTO_OP_DECRYPT	0x0
 | |
| +#define CRYPTO_OP_ENCRYPT	0x1
 | |
| +
 | |
| +/*
 | |
| + * Hints passed to process methods.
 | |
| + */
 | |
| +#define CRYPTO_HINT_MORE	0x1	/* more ops coming shortly */
 | |
| +
 | |
| +struct cryptkop {
 | |
| +	struct list_head krp_next;
 | |
| +	wait_queue_head_t krp_waitq;
 | |
| +
 | |
| +	int		krp_flags;
 | |
| +#define CRYPTO_KF_DONE		0x0001	/* Operation completed */
 | |
| +#define CRYPTO_KF_CBIMM		0x0002	/* Do callback immediately */
 | |
| +
 | |
| +	u_int		krp_op;		/* ie. CRK_MOD_EXP or other */
 | |
| +	u_int		krp_status;	/* return status */
 | |
| +	u_short		krp_iparams;	/* # of input parameters */
 | |
| +	u_short		krp_oparams;	/* # of output parameters */
 | |
| +	u_int		krp_crid;	/* desired device, etc. */
 | |
| +	u_int32_t	krp_hid;
 | |
| +	struct crparam	krp_param[CRK_MAXPARAM];	/* kvm */
 | |
| +	int		(*krp_callback)(struct cryptkop *);
 | |
| +};
 | |
| +
 | |
| +#include <ocf-compat.h>
 | |
| +
 | |
| +/*
 | |
| + * Session ids are 64 bits.  The lower 32 bits contain a "local id" which
 | |
| + * is a driver-private session identifier.  The upper 32 bits contain a
 | |
| + * "hardware id" used by the core crypto code to identify the driver and
 | |
| + * a copy of the driver's capabilities that can be used by client code to
 | |
| + * optimize operation.
 | |
| + */
 | |
| +#define CRYPTO_SESID2HID(_sid)	(((_sid) >> 32) & 0x00ffffff)
 | |
| +#define CRYPTO_SESID2CAPS(_sid)	(((_sid) >> 32) & 0xff000000)
 | |
| +#define CRYPTO_SESID2LID(_sid)	(((u_int32_t) (_sid)) & 0xffffffff)
 | |
| +
 | |
| +extern	int crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard);
 | |
| +extern	int crypto_freesession(u_int64_t sid);
 | |
| +#define CRYPTOCAP_F_HARDWARE	CRYPTO_FLAG_HARDWARE
 | |
| +#define CRYPTOCAP_F_SOFTWARE	CRYPTO_FLAG_SOFTWARE
 | |
| +#define CRYPTOCAP_F_SYNC	0x04000000	/* operates synchronously */
 | |
| +extern	int32_t crypto_get_driverid(device_t dev, int flags);
 | |
| +extern	int crypto_find_driver(const char *);
 | |
| +extern	device_t crypto_find_device_byhid(int hid);
 | |
| +extern	int crypto_getcaps(int hid);
 | |
| +extern	int crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
 | |
| +	    u_int32_t flags);
 | |
| +extern	int crypto_kregister(u_int32_t, int, u_int32_t);
 | |
| +extern	int crypto_unregister(u_int32_t driverid, int alg);
 | |
| +extern	int crypto_unregister_all(u_int32_t driverid);
 | |
| +extern	int crypto_dispatch(struct cryptop *crp);
 | |
| +extern	int crypto_kdispatch(struct cryptkop *);
 | |
| +#define CRYPTO_SYMQ	0x1
 | |
| +#define CRYPTO_ASYMQ	0x2
 | |
| +extern	int crypto_unblock(u_int32_t, int);
 | |
| +extern	void crypto_done(struct cryptop *crp);
 | |
| +extern	void crypto_kdone(struct cryptkop *);
 | |
| +extern	int crypto_getfeat(int *);
 | |
| +
 | |
| +extern	void crypto_freereq(struct cryptop *crp);
 | |
| +extern	struct cryptop *crypto_getreq(int num);
 | |
| +
 | |
| +extern  int crypto_usercrypto;      /* userland may do crypto requests */
 | |
| +extern  int crypto_userasymcrypto;  /* userland may do asym crypto reqs */
 | |
| +extern  int crypto_devallowsoft;    /* only use hardware crypto */
 | |
| +
 | |
| +/*
 | |
| + * random number support,  crypto_unregister_all will unregister
 | |
| + */
 | |
| +extern int crypto_rregister(u_int32_t driverid,
 | |
| +		int (*read_random)(void *arg, u_int32_t *buf, int len), void *arg);
 | |
| +extern int crypto_runregister_all(u_int32_t driverid);
 | |
| +
 | |
| +/*
 | |
| + * Crypto-related utility routines used mainly by drivers.
 | |
| + *
 | |
| + * XXX these don't really belong here; but for now they're
 | |
| + *     kept apart from the rest of the system.
 | |
| + */
 | |
| +struct uio;
 | |
| +extern	void cuio_copydata(struct uio* uio, int off, int len, caddr_t cp);
 | |
| +extern	void cuio_copyback(struct uio* uio, int off, int len, caddr_t cp);
 | |
| +extern	struct iovec *cuio_getptr(struct uio *uio, int loc, int *off);
 | |
| +
 | |
| +extern	void crypto_copyback(int flags, caddr_t buf, int off, int size,
 | |
| +	    caddr_t in);
 | |
| +extern	void crypto_copydata(int flags, caddr_t buf, int off, int size,
 | |
| +	    caddr_t out);
 | |
| +extern	int crypto_apply(int flags, caddr_t buf, int off, int len,
 | |
| +	    int (*f)(void *, void *, u_int), void *arg);
 | |
| +
 | |
| +#endif /* __KERNEL__ */
 | |
| +#endif /* _CRYPTO_CRYPTO_H_ */
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/ocfnull/ocfnull.c
 | |
| @@ -0,0 +1,203 @@
 | |
| +/*
 | |
| + * An OCF module for determining the cost of crypto versus the cost of
 | |
| + * IPSec processing outside of OCF.  This modules gives us the effect of
 | |
| + * zero cost encryption,  of course you will need to run it at both ends
 | |
| + * since it does no crypto at all.
 | |
| + *
 | |
| + * Written by David McCullough <david_mccullough@securecomputing.com>
 | |
| + * Copyright (C) 2006-2007 David McCullough
 | |
| + *
 | |
| + * LICENSE TERMS
 | |
| + *
 | |
| + * The free distribution and use of this software in both source and binary
 | |
| + * form is allowed (with or without changes) provided that:
 | |
| + *
 | |
| + *   1. distributions of this source code include the above copyright
 | |
| + *      notice, this list of conditions and the following disclaimer;
 | |
| + *
 | |
| + *   2. distributions in binary form include the above copyright
 | |
| + *      notice, this list of conditions and the following disclaimer
 | |
| + *      in the documentation and/or other associated materials;
 | |
| + *
 | |
| + *   3. the copyright holder's name is not used to endorse products
 | |
| + *      built using this software without specific written permission.
 | |
| + *
 | |
| + * ALTERNATIVELY, provided that this notice is retained in full, this product
 | |
| + * may be distributed under the terms of the GNU General Public License (GPL),
 | |
| + * in which case the provisions of the GPL apply INSTEAD OF those given above.
 | |
| + *
 | |
| + * DISCLAIMER
 | |
| + *
 | |
| + * This software is provided 'as is' with no explicit or implied warranties
 | |
| + * in respect of its properties, including, but not limited to, correctness
 | |
| + * and/or fitness for purpose.
 | |
| + */
 | |
| +
 | |
| +#ifndef AUTOCONF_INCLUDED
 | |
| +#include <linux/config.h>
 | |
| +#endif
 | |
| +#include <linux/module.h>
 | |
| +#include <linux/init.h>
 | |
| +#include <linux/list.h>
 | |
| +#include <linux/slab.h>
 | |
| +#include <linux/sched.h>
 | |
| +#include <linux/wait.h>
 | |
| +#include <linux/crypto.h>
 | |
| +#include <linux/interrupt.h>
 | |
| +
 | |
| +#include <cryptodev.h>
 | |
| +#include <uio.h>
 | |
| +
 | |
| +static int32_t			 null_id = -1;
 | |
| +static u_int32_t		 null_sesnum = 0;
 | |
| +
 | |
| +static int null_process(device_t, struct cryptop *, int);
 | |
| +static int null_newsession(device_t, u_int32_t *, struct cryptoini *);
 | |
| +static int null_freesession(device_t, u_int64_t);
 | |
| +
 | |
| +#define debug ocfnull_debug
 | |
| +int ocfnull_debug = 0;
 | |
| +module_param(ocfnull_debug, int, 0644);
 | |
| +MODULE_PARM_DESC(ocfnull_debug, "Enable debug");
 | |
| +
 | |
| +/*
 | |
| + * dummy device structure
 | |
| + */
 | |
| +
 | |
| +static struct {
 | |
| +	softc_device_decl	sc_dev;
 | |
| +} nulldev;
 | |
| +
 | |
| +static device_method_t null_methods = {
 | |
| +	/* crypto device methods */
 | |
| +	DEVMETHOD(cryptodev_newsession,	null_newsession),
 | |
| +	DEVMETHOD(cryptodev_freesession,null_freesession),
 | |
| +	DEVMETHOD(cryptodev_process,	null_process),
 | |
| +};
 | |
| +
 | |
| +/*
 | |
| + * Generate a new software session.
 | |
| + */
 | |
| +static int
 | |
| +null_newsession(device_t arg, u_int32_t *sid, struct cryptoini *cri)
 | |
| +{
 | |
| +	dprintk("%s()\n", __FUNCTION__);
 | |
| +	if (sid == NULL || cri == NULL) {
 | |
| +		dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
 | |
| +		return EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	if (null_sesnum == 0)
 | |
| +		null_sesnum++;
 | |
| +	*sid = null_sesnum++;
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +
 | |
| +/*
 | |
| + * Free a session.
 | |
| + */
 | |
| +static int
 | |
| +null_freesession(device_t arg, u_int64_t tid)
 | |
| +{
 | |
| +	u_int32_t sid = CRYPTO_SESID2LID(tid);
 | |
| +
 | |
| +	dprintk("%s()\n", __FUNCTION__);
 | |
| +	if (sid > null_sesnum) {
 | |
| +		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
 | |
| +		return EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	/* Silently accept and return */
 | |
| +	if (sid == 0)
 | |
| +		return 0;
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +
 | |
| +/*
 | |
| + * Process a request.
 | |
| + */
 | |
| +static int
 | |
| +null_process(device_t arg, struct cryptop *crp, int hint)
 | |
| +{
 | |
| +	unsigned int lid;
 | |
| +
 | |
| +	dprintk("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	/* Sanity check */
 | |
| +	if (crp == NULL) {
 | |
| +		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
 | |
| +		return EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	crp->crp_etype = 0;
 | |
| +
 | |
| +	if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
 | |
| +		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
 | |
| +		crp->crp_etype = EINVAL;
 | |
| +		goto done;
 | |
| +	}
 | |
| +
 | |
| +	/*
 | |
| +	 * find the session we are using
 | |
| +	 */
 | |
| +
 | |
| +	lid = crp->crp_sid & 0xffffffff;
 | |
| +	if (lid >= null_sesnum || lid == 0) {
 | |
| +		crp->crp_etype = ENOENT;
 | |
| +		dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
 | |
| +		goto done;
 | |
| +	}
 | |
| +
 | |
| +done:
 | |
| +	crypto_done(crp);
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +
 | |
| +/*
 | |
| + * our driver startup and shutdown routines
 | |
| + */
 | |
| +
 | |
| +static int
 | |
| +null_init(void)
 | |
| +{
 | |
| +	dprintk("%s(%p)\n", __FUNCTION__, null_init);
 | |
| +
 | |
| +	memset(&nulldev, 0, sizeof(nulldev));
 | |
| +	softc_device_init(&nulldev, "ocfnull", 0, null_methods);
 | |
| +
 | |
| +	null_id = crypto_get_driverid(softc_get_device(&nulldev),
 | |
| +				CRYPTOCAP_F_HARDWARE);
 | |
| +	if (null_id < 0)
 | |
| +		panic("ocfnull: crypto device cannot initialize!");
 | |
| +
 | |
| +#define	REGISTER(alg) \
 | |
| +	crypto_register(null_id,alg,0,0)
 | |
| +	REGISTER(CRYPTO_DES_CBC);
 | |
| +	REGISTER(CRYPTO_3DES_CBC);
 | |
| +	REGISTER(CRYPTO_RIJNDAEL128_CBC);
 | |
| +	REGISTER(CRYPTO_MD5);
 | |
| +	REGISTER(CRYPTO_SHA1);
 | |
| +	REGISTER(CRYPTO_MD5_HMAC);
 | |
| +	REGISTER(CRYPTO_SHA1_HMAC);
 | |
| +#undef REGISTER
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +static void
 | |
| +null_exit(void)
 | |
| +{
 | |
| +	dprintk("%s()\n", __FUNCTION__);
 | |
| +	crypto_unregister_all(null_id);
 | |
| +	null_id = -1;
 | |
| +}
 | |
| +
 | |
| +module_init(null_init);
 | |
| +module_exit(null_exit);
 | |
| +
 | |
| +MODULE_LICENSE("Dual BSD/GPL");
 | |
| +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
 | |
| +MODULE_DESCRIPTION("ocfnull - claims a lot but does nothing");
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/cryptosoft.c
 | |
| @@ -0,0 +1,898 @@
 | |
| +/*
 | |
| + * An OCF module that uses the linux kernel cryptoapi, based on the
 | |
| + * original cryptosoft for BSD by Angelos D. Keromytis (angelos@cis.upenn.edu)
 | |
| + * but is mostly unrecognisable,
 | |
| + *
 | |
| + * Written by David McCullough <david_mccullough@securecomputing.com>
 | |
| + * Copyright (C) 2004-2007 David McCullough
 | |
| + * Copyright (C) 2004-2005 Intel Corporation.
 | |
| + *
 | |
| + * LICENSE TERMS
 | |
| + *
 | |
| + * The free distribution and use of this software in both source and binary
 | |
| + * form is allowed (with or without changes) provided that:
 | |
| + *
 | |
| + *   1. distributions of this source code include the above copyright
 | |
| + *      notice, this list of conditions and the following disclaimer;
 | |
| + *
 | |
| + *   2. distributions in binary form include the above copyright
 | |
| + *      notice, this list of conditions and the following disclaimer
 | |
| + *      in the documentation and/or other associated materials;
 | |
| + *
 | |
| + *   3. the copyright holder's name is not used to endorse products
 | |
| + *      built using this software without specific written permission.
 | |
| + *
 | |
| + * ALTERNATIVELY, provided that this notice is retained in full, this product
 | |
| + * may be distributed under the terms of the GNU General Public License (GPL),
 | |
| + * in which case the provisions of the GPL apply INSTEAD OF those given above.
 | |
| + *
 | |
| + * DISCLAIMER
 | |
| + *
 | |
| + * This software is provided 'as is' with no explicit or implied warranties
 | |
| + * in respect of its properties, including, but not limited to, correctness
 | |
| + * and/or fitness for purpose.
 | |
| + * ---------------------------------------------------------------------------
 | |
| + */
 | |
| +
 | |
| +#ifndef AUTOCONF_INCLUDED
 | |
| +#include <linux/config.h>
 | |
| +#endif
 | |
| +#include <linux/module.h>
 | |
| +#include <linux/init.h>
 | |
| +#include <linux/list.h>
 | |
| +#include <linux/slab.h>
 | |
| +#include <linux/sched.h>
 | |
| +#include <linux/wait.h>
 | |
| +#include <linux/crypto.h>
 | |
| +#include <linux/mm.h>
 | |
| +#include <linux/skbuff.h>
 | |
| +#include <linux/random.h>
 | |
| +#include <asm/scatterlist.h>
 | |
| +
 | |
| +#include <cryptodev.h>
 | |
| +#include <uio.h>
 | |
| +
 | |
| +struct {
 | |
| +	softc_device_decl	sc_dev;
 | |
| +} swcr_softc;
 | |
| +
 | |
| +#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
 | |
| +
 | |
| +/* Software session entry */
 | |
| +
 | |
| +#define SW_TYPE_CIPHER		0
 | |
| +#define SW_TYPE_HMAC		1
 | |
| +#define SW_TYPE_AUTH2		2
 | |
| +#define SW_TYPE_HASH		3
 | |
| +#define SW_TYPE_COMP		4
 | |
| +#define SW_TYPE_BLKCIPHER	5
 | |
| +
 | |
| +struct swcr_data {
 | |
| +	int					sw_type;
 | |
| +	int					sw_alg;
 | |
| +	struct crypto_tfm	*sw_tfm;
 | |
| +	union {
 | |
| +		struct {
 | |
| +			char *sw_key;
 | |
| +			int  sw_klen;
 | |
| +			int  sw_mlen;
 | |
| +		} hmac;
 | |
| +		void *sw_comp_buf;
 | |
| +	} u;
 | |
| +	struct swcr_data	*sw_next;
 | |
| +};
 | |
| +
 | |
| +#ifndef CRYPTO_TFM_MODE_CBC
 | |
| +/*
 | |
| + * As of linux-2.6.21 this is no longer defined, and presumably no longer
 | |
| + * needed to be passed into the crypto core code.
 | |
| + */
 | |
| +#define	CRYPTO_TFM_MODE_CBC	0
 | |
| +#define	CRYPTO_TFM_MODE_ECB	0
 | |
| +#endif
 | |
| +
 | |
| +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
 | |
| +	/*
 | |
| +	 * Linux 2.6.19 introduced a new Crypto API, setup macro's to convert new
 | |
| +	 * API into old API.
 | |
| +	 */
 | |
| +
 | |
| +	/* Symmetric/Block Cipher */
 | |
| +	struct blkcipher_desc
 | |
| +	{
 | |
| +		struct crypto_tfm *tfm;
 | |
| +		void *info;
 | |
| +	};
 | |
| +	#define ecb(X)								#X
 | |
| +	#define cbc(X)								#X
 | |
| +	#define crypto_has_blkcipher(X, Y, Z)		crypto_alg_available(X, 0)
 | |
| +	#define crypto_blkcipher_cast(X)			X
 | |
| +	#define crypto_blkcipher_tfm(X)				X
 | |
| +	#define crypto_alloc_blkcipher(X, Y, Z)		crypto_alloc_tfm(X, mode)
 | |
| +	#define crypto_blkcipher_ivsize(X)			crypto_tfm_alg_ivsize(X)
 | |
| +	#define crypto_blkcipher_blocksize(X)		crypto_tfm_alg_blocksize(X)
 | |
| +	#define crypto_blkcipher_setkey(X, Y, Z)	crypto_cipher_setkey(X, Y, Z)
 | |
| +	#define crypto_blkcipher_encrypt_iv(W, X, Y, Z)	\
 | |
| +				crypto_cipher_encrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
 | |
| +	#define crypto_blkcipher_decrypt_iv(W, X, Y, Z)	\
 | |
| +				crypto_cipher_decrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
 | |
| +
 | |
| +	/* Hash/HMAC/Digest */
 | |
| +	struct hash_desc
 | |
| +	{
 | |
| +		struct crypto_tfm *tfm;
 | |
| +	};
 | |
| +	#define hmac(X)							#X
 | |
| +	#define crypto_has_hash(X, Y, Z)		crypto_alg_available(X, 0)
 | |
| +	#define crypto_hash_cast(X)				X
 | |
| +	#define crypto_hash_tfm(X)				X
 | |
| +	#define crypto_alloc_hash(X, Y, Z)		crypto_alloc_tfm(X, mode)
 | |
| +	#define crypto_hash_digestsize(X)		crypto_tfm_alg_digestsize(X)
 | |
| +	#define crypto_hash_digest(W, X, Y, Z)	\
 | |
| +				crypto_digest_digest((W)->tfm, X, sg_num, Z)
 | |
| +
 | |
| +	/* Asymmetric Cipher */
 | |
| +	#define crypto_has_cipher(X, Y, Z)		crypto_alg_available(X, 0)
 | |
| +
 | |
| +	/* Compression */
 | |
| +	#define crypto_has_comp(X, Y, Z)		crypto_alg_available(X, 0)
 | |
| +	#define crypto_comp_tfm(X)				X
 | |
| +	#define crypto_comp_cast(X)				X
 | |
| +	#define crypto_alloc_comp(X, Y, Z)		crypto_alloc_tfm(X, mode)
 | |
| +#else
 | |
| +	#define ecb(X)	"ecb(" #X ")"
 | |
| +	#define cbc(X)	"cbc(" #X ")"
 | |
| +	#define hmac(X)	"hmac(" #X ")"
 | |
| +#endif /* if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
 | |
| +
 | |
| +struct crypto_details
 | |
| +{
 | |
| +	char *alg_name;
 | |
| +	int mode;
 | |
| +	int sw_type;
 | |
| +};
 | |
| +
 | |
| +/*
 | |
| + * This needs to be kept updated with CRYPTO_xxx list (cryptodev.h).
 | |
| + * If the Algorithm is not supported, then insert a {NULL, 0, 0} entry.
 | |
| + *
 | |
| + * IMPORTANT: The index to the array IS CRYPTO_xxx.
 | |
| + */
 | |
| +static struct crypto_details crypto_details[CRYPTO_ALGORITHM_MAX + 1] = {
 | |
| +	{ NULL,              0,                   0 },
 | |
| +	/* CRYPTO_xxx index starts at 1 */
 | |
| +	{ cbc(des),          CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
 | |
| +	{ cbc(des3_ede),     CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
 | |
| +	{ cbc(blowfish),     CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
 | |
| +	{ cbc(cast5),        CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
 | |
| +	{ cbc(skipjack),     CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
 | |
| +	{ hmac(md5),         0,                   SW_TYPE_HMAC },
 | |
| +	{ hmac(sha1),        0,                   SW_TYPE_HMAC },
 | |
| +	{ hmac(ripemd160),   0,                   SW_TYPE_HMAC },
 | |
| +	{ "md5-kpdk??",      0,                   SW_TYPE_HASH },
 | |
| +	{ "sha1-kpdk??",     0,                   SW_TYPE_HASH },
 | |
| +	{ cbc(aes),          CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
 | |
| +	{ ecb(arc4),         CRYPTO_TFM_MODE_ECB, SW_TYPE_BLKCIPHER },
 | |
| +	{ "md5",             0,                   SW_TYPE_HASH },
 | |
| +	{ "sha1",            0,                   SW_TYPE_HASH },
 | |
| +	{ hmac(digest_null), 0,                   SW_TYPE_HMAC },
 | |
| +	{ cbc(cipher_null),  CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
 | |
| +	{ "deflate",         0,                   SW_TYPE_COMP },
 | |
| +	{ hmac(sha256),      0,                   SW_TYPE_HMAC },
 | |
| +	{ hmac(sha384),      0,                   SW_TYPE_HMAC },
 | |
| +	{ hmac(sha512),      0,                   SW_TYPE_HMAC },
 | |
| +	{ cbc(camellia),     CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
 | |
| +	{ "sha256",          0,                   SW_TYPE_HASH },
 | |
| +	{ "sha384",          0,                   SW_TYPE_HASH },
 | |
| +	{ "sha512",          0,                   SW_TYPE_HASH },
 | |
| +	{ "ripemd160",       0,                   SW_TYPE_HASH },
 | |
| +};
 | |
| +
 | |
| +int32_t swcr_id = -1;
 | |
| +module_param(swcr_id, int, 0444);
 | |
| +MODULE_PARM_DESC(swcr_id, "Read-Only OCF ID for cryptosoft driver");
 | |
| +
 | |
| +int swcr_fail_if_compression_grows = 1;
 | |
| +module_param(swcr_fail_if_compression_grows, int, 0644);
 | |
| +MODULE_PARM_DESC(swcr_fail_if_compression_grows,
 | |
| +                "Treat compression that results in more data as a failure");
 | |
| +
 | |
| +static struct swcr_data **swcr_sessions = NULL;
 | |
| +static u_int32_t swcr_sesnum = 0;
 | |
| +
 | |
| +static	int swcr_process(device_t, struct cryptop *, int);
 | |
| +static	int swcr_newsession(device_t, u_int32_t *, struct cryptoini *);
 | |
| +static	int swcr_freesession(device_t, u_int64_t);
 | |
| +
 | |
| +static device_method_t swcr_methods = {
 | |
| +	/* crypto device methods */
 | |
| +	DEVMETHOD(cryptodev_newsession,	swcr_newsession),
 | |
| +	DEVMETHOD(cryptodev_freesession,swcr_freesession),
 | |
| +	DEVMETHOD(cryptodev_process,	swcr_process),
 | |
| +};
 | |
| +
 | |
| +#define debug swcr_debug
 | |
| +int swcr_debug = 0;
 | |
| +module_param(swcr_debug, int, 0644);
 | |
| +MODULE_PARM_DESC(swcr_debug, "Enable debug");
 | |
| +
 | |
| +/*
 | |
| + * Generate a new software session.
 | |
| + */
 | |
| +static int
 | |
| +swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
 | |
| +{
 | |
| +	struct swcr_data **swd;
 | |
| +	u_int32_t i;
 | |
| +	int error;
 | |
| +	char *algo;
 | |
| +	int mode, sw_type;
 | |
| +
 | |
| +	dprintk("%s()\n", __FUNCTION__);
 | |
| +	if (sid == NULL || cri == NULL) {
 | |
| +		dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
 | |
| +		return EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	if (swcr_sessions) {
 | |
| +		for (i = 1; i < swcr_sesnum; i++)
 | |
| +			if (swcr_sessions[i] == NULL)
 | |
| +				break;
 | |
| +	} else
 | |
| +		i = 1;		/* NB: to silence compiler warning */
 | |
| +
 | |
| +	if (swcr_sessions == NULL || i == swcr_sesnum) {
 | |
| +		if (swcr_sessions == NULL) {
 | |
| +			i = 1; /* We leave swcr_sessions[0] empty */
 | |
| +			swcr_sesnum = CRYPTO_SW_SESSIONS;
 | |
| +		} else
 | |
| +			swcr_sesnum *= 2;
 | |
| +
 | |
| +		swd = kmalloc(swcr_sesnum * sizeof(struct swcr_data *), SLAB_ATOMIC);
 | |
| +		if (swd == NULL) {
 | |
| +			/* Reset session number */
 | |
| +			if (swcr_sesnum == CRYPTO_SW_SESSIONS)
 | |
| +				swcr_sesnum = 0;
 | |
| +			else
 | |
| +				swcr_sesnum /= 2;
 | |
| +			dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
 | |
| +			return ENOBUFS;
 | |
| +		}
 | |
| +		memset(swd, 0, swcr_sesnum * sizeof(struct swcr_data *));
 | |
| +
 | |
| +		/* Copy existing sessions */
 | |
| +		if (swcr_sessions) {
 | |
| +			memcpy(swd, swcr_sessions,
 | |
| +			    (swcr_sesnum / 2) * sizeof(struct swcr_data *));
 | |
| +			kfree(swcr_sessions);
 | |
| +		}
 | |
| +
 | |
| +		swcr_sessions = swd;
 | |
| +	}
 | |
| +
 | |
| +	swd = &swcr_sessions[i];
 | |
| +	*sid = i;
 | |
| +
 | |
| +	while (cri) {
 | |
| +		*swd = (struct swcr_data *) kmalloc(sizeof(struct swcr_data),
 | |
| +				SLAB_ATOMIC);
 | |
| +		if (*swd == NULL) {
 | |
| +			swcr_freesession(NULL, i);
 | |
| +			dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
 | |
| +			return ENOBUFS;
 | |
| +		}
 | |
| +		memset(*swd, 0, sizeof(struct swcr_data));
 | |
| +
 | |
| +		if (cri->cri_alg > CRYPTO_ALGORITHM_MAX) {
 | |
| +			printk("cryptosoft: Unknown algorithm 0x%x\n", cri->cri_alg);
 | |
| +			swcr_freesession(NULL, i);
 | |
| +			return EINVAL;
 | |
| +		}
 | |
| +
 | |
| +		algo = crypto_details[cri->cri_alg].alg_name;
 | |
| +		if (!algo || !*algo) {
 | |
| +			printk("cryptosoft: Unsupported algorithm 0x%x\n", cri->cri_alg);
 | |
| +			swcr_freesession(NULL, i);
 | |
| +			return EINVAL;
 | |
| +		}
 | |
| +
 | |
| +		mode = crypto_details[cri->cri_alg].mode;
 | |
| +		sw_type = crypto_details[cri->cri_alg].sw_type;
 | |
| +
 | |
| +		/* Algorithm specific configuration */
 | |
| +		switch (cri->cri_alg) {
 | |
| +		case CRYPTO_NULL_CBC:
 | |
| +			cri->cri_klen = 0; /* make it work with crypto API */
 | |
| +			break;
 | |
| +		default:
 | |
| +			break;
 | |
| +		}
 | |
| +
 | |
| +		if (sw_type == SW_TYPE_BLKCIPHER) {
 | |
| +			dprintk("%s crypto_alloc_blkcipher(%s, 0x%x)\n", __FUNCTION__,
 | |
| +					algo, mode);
 | |
| +
 | |
| +			(*swd)->sw_tfm = crypto_blkcipher_tfm(
 | |
| +								crypto_alloc_blkcipher(algo, 0,
 | |
| +									CRYPTO_ALG_ASYNC));
 | |
| +			if (!(*swd)->sw_tfm) {
 | |
| +				dprintk("cryptosoft: crypto_alloc_blkcipher failed(%s,0x%x)\n",
 | |
| +						algo,mode);
 | |
| +				swcr_freesession(NULL, i);
 | |
| +				return EINVAL;
 | |
| +			}
 | |
| +
 | |
| +			if (debug) {
 | |
| +				dprintk("%s key:cri->cri_klen=%d,(cri->cri_klen + 7)/8=%d",
 | |
| +						__FUNCTION__,cri->cri_klen,(cri->cri_klen + 7)/8);
 | |
| +				for (i = 0; i < (cri->cri_klen + 7) / 8; i++)
 | |
| +				{
 | |
| +					dprintk("%s0x%x", (i % 8) ? " " : "\n    ",cri->cri_key[i]);
 | |
| +				}
 | |
| +				dprintk("\n");
 | |
| +			}
 | |
| +			error = crypto_blkcipher_setkey(
 | |
| +						crypto_blkcipher_cast((*swd)->sw_tfm), cri->cri_key,
 | |
| +							(cri->cri_klen + 7) / 8);
 | |
| +			if (error) {
 | |
| +				printk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n", error,
 | |
| +						(*swd)->sw_tfm->crt_flags);
 | |
| +				swcr_freesession(NULL, i);
 | |
| +				return error;
 | |
| +			}
 | |
| +		} else if (sw_type == SW_TYPE_HMAC || sw_type == SW_TYPE_HASH) {
 | |
| +			dprintk("%s crypto_alloc_hash(%s, 0x%x)\n", __FUNCTION__,
 | |
| +					algo, mode);
 | |
| +
 | |
| +			(*swd)->sw_tfm = crypto_hash_tfm(
 | |
| +								crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC));
 | |
| +
 | |
| +			if (!(*swd)->sw_tfm) {
 | |
| +				dprintk("cryptosoft: crypto_alloc_hash failed(%s,0x%x)\n",
 | |
| +						algo, mode);
 | |
| +				swcr_freesession(NULL, i);
 | |
| +				return EINVAL;
 | |
| +			}
 | |
| +
 | |
| +			(*swd)->u.hmac.sw_klen = (cri->cri_klen + 7) / 8;
 | |
| +			(*swd)->u.hmac.sw_key = (char *)kmalloc((*swd)->u.hmac.sw_klen,
 | |
| +				SLAB_ATOMIC);
 | |
| +			if ((*swd)->u.hmac.sw_key == NULL) {
 | |
| +				swcr_freesession(NULL, i);
 | |
| +				dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
 | |
| +				return ENOBUFS;
 | |
| +			}
 | |
| +			memcpy((*swd)->u.hmac.sw_key, cri->cri_key, (*swd)->u.hmac.sw_klen);
 | |
| +			if (cri->cri_mlen) {
 | |
| +				(*swd)->u.hmac.sw_mlen = cri->cri_mlen;
 | |
| +			} else {
 | |
| +				(*swd)->u.hmac.sw_mlen =
 | |
| +						crypto_hash_digestsize(
 | |
| +								crypto_hash_cast((*swd)->sw_tfm));
 | |
| +			}
 | |
| +		} else if (sw_type == SW_TYPE_COMP) {
 | |
| +			(*swd)->sw_tfm = crypto_comp_tfm(
 | |
| +					crypto_alloc_comp(algo, 0, CRYPTO_ALG_ASYNC));
 | |
| +			if (!(*swd)->sw_tfm) {
 | |
| +				dprintk("cryptosoft: crypto_alloc_comp failed(%s,0x%x)\n",
 | |
| +						algo, mode);
 | |
| +				swcr_freesession(NULL, i);
 | |
| +				return EINVAL;
 | |
| +			}
 | |
| +			(*swd)->u.sw_comp_buf = kmalloc(CRYPTO_MAX_DATA_LEN, SLAB_ATOMIC);
 | |
| +			if ((*swd)->u.sw_comp_buf == NULL) {
 | |
| +				swcr_freesession(NULL, i);
 | |
| +				dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
 | |
| +				return ENOBUFS;
 | |
| +			}
 | |
| +		} else {
 | |
| +			printk("cryptosoft: Unhandled sw_type %d\n", sw_type);
 | |
| +			swcr_freesession(NULL, i);
 | |
| +			return EINVAL;
 | |
| +		}
 | |
| +
 | |
| +		(*swd)->sw_alg = cri->cri_alg;
 | |
| +		(*swd)->sw_type = sw_type;
 | |
| +
 | |
| +		cri = cri->cri_next;
 | |
| +		swd = &((*swd)->sw_next);
 | |
| +	}
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Free a session.
 | |
| + */
 | |
| +static int
 | |
| +swcr_freesession(device_t dev, u_int64_t tid)
 | |
| +{
 | |
| +	struct swcr_data *swd;
 | |
| +	u_int32_t sid = CRYPTO_SESID2LID(tid);
 | |
| +
 | |
| +	dprintk("%s()\n", __FUNCTION__);
 | |
| +	if (sid > swcr_sesnum || swcr_sessions == NULL ||
 | |
| +			swcr_sessions[sid] == NULL) {
 | |
| +		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
 | |
| +		return(EINVAL);
 | |
| +	}
 | |
| +
 | |
| +	/* Silently accept and return */
 | |
| +	if (sid == 0)
 | |
| +		return(0);
 | |
| +
 | |
| +	while ((swd = swcr_sessions[sid]) != NULL) {
 | |
| +		swcr_sessions[sid] = swd->sw_next;
 | |
| +		if (swd->sw_tfm)
 | |
| +			crypto_free_tfm(swd->sw_tfm);
 | |
| +		if (swd->sw_type == SW_TYPE_COMP) {
 | |
| +			if (swd->u.sw_comp_buf)
 | |
| +				kfree(swd->u.sw_comp_buf);
 | |
| +		} else {
 | |
| +			if (swd->u.hmac.sw_key)
 | |
| +				kfree(swd->u.hmac.sw_key);
 | |
| +		}
 | |
| +		kfree(swd);
 | |
| +	}
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Process a software request.
 | |
| + */
 | |
| +static int
 | |
| +swcr_process(device_t dev, struct cryptop *crp, int hint)
 | |
| +{
 | |
| +	struct cryptodesc *crd;
 | |
| +	struct swcr_data *sw;
 | |
| +	u_int32_t lid;
 | |
| +#define SCATTERLIST_MAX 16
 | |
| +	struct scatterlist sg[SCATTERLIST_MAX];
 | |
| +	int sg_num, sg_len, skip;
 | |
| +	struct sk_buff *skb = NULL;
 | |
| +	struct uio *uiop = NULL;
 | |
| +
 | |
| +	dprintk("%s()\n", __FUNCTION__);
 | |
| +	/* Sanity check */
 | |
| +	if (crp == NULL) {
 | |
| +		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
 | |
| +		return EINVAL;
 | |
| +	}
 | |
| +
 | |
| +	crp->crp_etype = 0;
 | |
| +
 | |
| +	if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
 | |
| +		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
 | |
| +		crp->crp_etype = EINVAL;
 | |
| +		goto done;
 | |
| +	}
 | |
| +
 | |
| +	lid = crp->crp_sid & 0xffffffff;
 | |
| +	if (lid >= swcr_sesnum || lid == 0 || swcr_sessions == NULL ||
 | |
| +			swcr_sessions[lid] == NULL) {
 | |
| +		crp->crp_etype = ENOENT;
 | |
| +		dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
 | |
| +		goto done;
 | |
| +	}
 | |
| +
 | |
| +	/*
 | |
| +	 * do some error checking outside of the loop for SKB and IOV processing
 | |
| +	 * this leaves us with valid skb or uiop pointers for later
 | |
| +	 */
 | |
| +	if (crp->crp_flags & CRYPTO_F_SKBUF) {
 | |
| +		skb = (struct sk_buff *) crp->crp_buf;
 | |
| +		if (skb_shinfo(skb)->nr_frags >= SCATTERLIST_MAX) {
 | |
| +			printk("%s,%d: %d nr_frags > SCATTERLIST_MAX", __FILE__, __LINE__,
 | |
| +					skb_shinfo(skb)->nr_frags);
 | |
| +			goto done;
 | |
| +		}
 | |
| +	} else if (crp->crp_flags & CRYPTO_F_IOV) {
 | |
| +		uiop = (struct uio *) crp->crp_buf;
 | |
| +		if (uiop->uio_iovcnt > SCATTERLIST_MAX) {
 | |
| +			printk("%s,%d: %d uio_iovcnt > SCATTERLIST_MAX", __FILE__, __LINE__,
 | |
| +					uiop->uio_iovcnt);
 | |
| +			goto done;
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	/* Go through crypto descriptors, processing as we go */
 | |
| +	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
 | |
| +		/*
 | |
| +		 * Find the crypto context.
 | |
| +		 *
 | |
| +		 * XXX Note that the logic here prevents us from having
 | |
| +		 * XXX the same algorithm multiple times in a session
 | |
| +		 * XXX (or rather, we can but it won't give us the right
 | |
| +		 * XXX results). To do that, we'd need some way of differentiating
 | |
| +		 * XXX between the various instances of an algorithm (so we can
 | |
| +		 * XXX locate the correct crypto context).
 | |
| +		 */
 | |
| +		for (sw = swcr_sessions[lid]; sw && sw->sw_alg != crd->crd_alg;
 | |
| +				sw = sw->sw_next)
 | |
| +			;
 | |
| +
 | |
| +		/* No such context ? */
 | |
| +		if (sw == NULL) {
 | |
| +			crp->crp_etype = EINVAL;
 | |
| +			dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
 | |
| +			goto done;
 | |
| +		}
 | |
| +
 | |
| +		skip = crd->crd_skip;
 | |
| +
 | |
| +		/*
 | |
| +		 * setup the SG list skip from the start of the buffer
 | |
| +		 */
 | |
| +		memset(sg, 0, sizeof(sg));
 | |
| +		if (crp->crp_flags & CRYPTO_F_SKBUF) {
 | |
| +			int i, len;
 | |
| +
 | |
| +			sg_num = 0;
 | |
| +			sg_len = 0;
 | |
| +
 | |
| +			if (skip < skb_headlen(skb)) {
 | |
| +				len = skb_headlen(skb) - skip;
 | |
| +				if (len + sg_len > crd->crd_len)
 | |
| +					len = crd->crd_len - sg_len;
 | |
| +				sg_set_page(&sg[sg_num],
 | |
| +					virt_to_page(skb->data + skip), len,
 | |
| +					offset_in_page(skb->data + skip));
 | |
| +				sg_len += len;
 | |
| +				sg_num++;
 | |
| +				skip = 0;
 | |
| +			} else
 | |
| +				skip -= skb_headlen(skb);
 | |
| +
 | |
| +			for (i = 0; sg_len < crd->crd_len &&
 | |
| +						i < skb_shinfo(skb)->nr_frags &&
 | |
| +						sg_num < SCATTERLIST_MAX; i++) {
 | |
| +				if (skip < skb_shinfo(skb)->frags[i].size) {
 | |
| +					len = skb_shinfo(skb)->frags[i].size - skip;
 | |
| +					if (len + sg_len > crd->crd_len)
 | |
| +						len = crd->crd_len - sg_len;
 | |
| +					sg_set_page(&sg[sg_num],
 | |
| +						skb_shinfo(skb)->frags[i].page,
 | |
| +						len,
 | |
| +						skb_shinfo(skb)->frags[i].page_offset + skip);
 | |
| +					sg_len += len;
 | |
| +					sg_num++;
 | |
| +					skip = 0;
 | |
| +				} else
 | |
| +					skip -= skb_shinfo(skb)->frags[i].size;
 | |
| +			}
 | |
| +		} else if (crp->crp_flags & CRYPTO_F_IOV) {
 | |
| +			int len;
 | |
| +
 | |
| +			sg_len = 0;
 | |
| +			for (sg_num = 0; sg_len <= crd->crd_len &&
 | |
| +					sg_num < uiop->uio_iovcnt &&
 | |
| +					sg_num < SCATTERLIST_MAX; sg_num++) {
 | |
| +				if (skip <= uiop->uio_iov[sg_num].iov_len) {
 | |
| +					len = uiop->uio_iov[sg_num].iov_len - skip;
 | |
| +					if (len + sg_len > crd->crd_len)
 | |
| +						len = crd->crd_len - sg_len;
 | |
| +					sg_set_page(&sg[sg_num],
 | |
| +						virt_to_page(uiop->uio_iov[sg_num].iov_base+skip),
 | |
| +						len,
 | |
| +						offset_in_page(uiop->uio_iov[sg_num].iov_base+skip));
 | |
| +					sg_len += len;
 | |
| +					skip = 0;
 | |
| +				} else
 | |
| +					skip -= uiop->uio_iov[sg_num].iov_len;
 | |
| +			}
 | |
| +		} else {
 | |
| +			sg_len = (crp->crp_ilen - skip);
 | |
| +			if (sg_len > crd->crd_len)
 | |
| +				sg_len = crd->crd_len;
 | |
| +			sg_set_page(&sg[0], virt_to_page(crp->crp_buf + skip),
 | |
| +				sg_len, offset_in_page(crp->crp_buf + skip));
 | |
| +			sg_num = 1;
 | |
| +		}
 | |
| +
 | |
| +
 | |
| +		switch (sw->sw_type) {
 | |
| +		case SW_TYPE_BLKCIPHER: {
 | |
| +			unsigned char iv[EALG_MAX_BLOCK_LEN];
 | |
| +			unsigned char *ivp = iv;
 | |
| +			int ivsize =
 | |
| +				crypto_blkcipher_ivsize(crypto_blkcipher_cast(sw->sw_tfm));
 | |
| +			struct blkcipher_desc desc;
 | |
| +
 | |
| +			if (sg_len < crypto_blkcipher_blocksize(
 | |
| +					crypto_blkcipher_cast(sw->sw_tfm))) {
 | |
| +				crp->crp_etype = EINVAL;
 | |
| +				dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__, __LINE__,
 | |
| +						sg_len, crypto_blkcipher_blocksize(
 | |
| +							crypto_blkcipher_cast(sw->sw_tfm)));
 | |
| +				goto done;
 | |
| +			}
 | |
| +
 | |
| +			if (ivsize > sizeof(iv)) {
 | |
| +				crp->crp_etype = EINVAL;
 | |
| +				dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
 | |
| +				goto done;
 | |
| +			}
 | |
| +
 | |
| +			if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
 | |
| +				int i, error;
 | |
| +
 | |
| +				if (debug) {
 | |
| +					dprintk("%s key:", __FUNCTION__);
 | |
| +					for (i = 0; i < (crd->crd_klen + 7) / 8; i++)
 | |
| +						dprintk("%s0x%x", (i % 8) ? " " : "\n    ",
 | |
| +								crd->crd_key[i]);
 | |
| +					dprintk("\n");
 | |
| +				}
 | |
| +				error = crypto_blkcipher_setkey(
 | |
| +							crypto_blkcipher_cast(sw->sw_tfm), crd->crd_key,
 | |
| +							(crd->crd_klen + 7) / 8);
 | |
| +				if (error) {
 | |
| +					dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
 | |
| +							error, sw->sw_tfm->crt_flags);
 | |
| +					crp->crp_etype = -error;
 | |
| +				}
 | |
| +			}
 | |
| +
 | |
| +			memset(&desc, 0, sizeof(desc));
 | |
| +			desc.tfm = crypto_blkcipher_cast(sw->sw_tfm);
 | |
| +
 | |
| +			if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
 | |
| +
 | |
| +				if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
 | |
| +					ivp = crd->crd_iv;
 | |
| +				} else {
 | |
| +					get_random_bytes(ivp, ivsize);
 | |
| +				}
 | |
| +				/*
 | |
| +				 * do we have to copy the IV back to the buffer ?
 | |
| +				 */
 | |
| +				if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
 | |
| +					crypto_copyback(crp->crp_flags, crp->crp_buf,
 | |
| +							crd->crd_inject, ivsize, (caddr_t)ivp);
 | |
| +				}
 | |
| +				desc.info = ivp;
 | |
| +				crypto_blkcipher_encrypt_iv(&desc, sg, sg, sg_len);
 | |
| +
 | |
| +			} else { /*decrypt */
 | |
| +
 | |
| +				if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
 | |
| +					ivp = crd->crd_iv;
 | |
| +				} else {
 | |
| +					crypto_copydata(crp->crp_flags, crp->crp_buf,
 | |
| +							crd->crd_inject, ivsize, (caddr_t)ivp);
 | |
| +				}
 | |
| +				desc.info = ivp;
 | |
| +				crypto_blkcipher_decrypt_iv(&desc, sg, sg, sg_len);
 | |
| +			}
 | |
| +			} break;
 | |
| +		case SW_TYPE_HMAC:
 | |
| +		case SW_TYPE_HASH:
 | |
| +			{
 | |
| +			char result[HASH_MAX_LEN];
 | |
| +			struct hash_desc desc;
 | |
| +
 | |
| +			/* check we have room for the result */
 | |
| +			if (crp->crp_ilen - crd->crd_inject < sw->u.hmac.sw_mlen) {
 | |
| +				dprintk(
 | |
| +			"cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d digestsize=%d\n",
 | |
| +						crp->crp_ilen, crd->crd_skip + sg_len, crd->crd_inject,
 | |
| +						sw->u.hmac.sw_mlen);
 | |
| +				crp->crp_etype = EINVAL;
 | |
| +				goto done;
 | |
| +			}
 | |
| +
 | |
| +			memset(&desc, 0, sizeof(desc));
 | |
| +			desc.tfm = crypto_hash_cast(sw->sw_tfm);
 | |
| +
 | |
| +			memset(result, 0, sizeof(result));
 | |
| +
 | |
| +			if (sw->sw_type == SW_TYPE_HMAC) {
 | |
| +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
 | |
| +				crypto_hmac(sw->sw_tfm, sw->u.hmac.sw_key, &sw->u.hmac.sw_klen,
 | |
| +						sg, sg_num, result);
 | |
| +#else
 | |
| +				crypto_hash_setkey(desc.tfm, sw->u.hmac.sw_key,
 | |
| +						sw->u.hmac.sw_klen);
 | |
| +				crypto_hash_digest(&desc, sg, sg_len, result);
 | |
| +#endif /* #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
 | |
| +
 | |
| +			} else { /* SW_TYPE_HASH */
 | |
| +				crypto_hash_digest(&desc, sg, sg_len, result);
 | |
| +			}
 | |
| +
 | |
| +			crypto_copyback(crp->crp_flags, crp->crp_buf,
 | |
| +					crd->crd_inject, sw->u.hmac.sw_mlen, result);
 | |
| +			}
 | |
| +			break;
 | |
| +
 | |
| +		case SW_TYPE_COMP: {
 | |
| +			void *ibuf = NULL;
 | |
| +			void *obuf = sw->u.sw_comp_buf;
 | |
| +			int ilen = sg_len, olen = CRYPTO_MAX_DATA_LEN;
 | |
| +			int ret = 0;
 | |
| +
 | |
| +			/*
 | |
| +			 * we need to use an additional copy if there is more than one
 | |
| +			 * input chunk since the kernel comp routines do not handle
 | |
| +			 * SG yet.  Otherwise we just use the input buffer as is.
 | |
| +			 * Rather than allocate another buffer we just split the tmp
 | |
| +			 * buffer we already have.
 | |
| +			 * Perhaps we should just use zlib directly ?
 | |
| +			 */
 | |
| +			if (sg_num > 1) {
 | |
| +				int blk;
 | |
| +
 | |
| +				ibuf = obuf;
 | |
| +				for (blk = 0; blk < sg_num; blk++) {
 | |
| +					memcpy(obuf, sg_virt(&sg[blk]),
 | |
| +							sg[blk].length);
 | |
| +					obuf += sg[blk].length;
 | |
| +				}
 | |
| +				olen -= sg_len;
 | |
| +			} else
 | |
| +				ibuf = sg_virt(&sg[0]);
 | |
| +
 | |
| +			if (crd->crd_flags & CRD_F_ENCRYPT) { /* compress */
 | |
| +				ret = crypto_comp_compress(crypto_comp_cast(sw->sw_tfm),
 | |
| +						ibuf, ilen, obuf, &olen);
 | |
| +				if (!ret && olen > crd->crd_len) {
 | |
| +					dprintk("cryptosoft: ERANGE compress %d into %d\n",
 | |
| +							crd->crd_len, olen);
 | |
| +					if (swcr_fail_if_compression_grows)
 | |
| +						ret = ERANGE;
 | |
| +				}
 | |
| +			} else { /* decompress */
 | |
| +				ret = crypto_comp_decompress(crypto_comp_cast(sw->sw_tfm),
 | |
| +						ibuf, ilen, obuf, &olen);
 | |
| +				if (!ret && (olen + crd->crd_inject) > crp->crp_olen) {
 | |
| +					dprintk("cryptosoft: ETOOSMALL decompress %d into %d, "
 | |
| +							"space for %d,at offset %d\n",
 | |
| +							crd->crd_len, olen, crp->crp_olen, crd->crd_inject);
 | |
| +					ret = ETOOSMALL;
 | |
| +				}
 | |
| +			}
 | |
| +			if (ret)
 | |
| +				dprintk("%s,%d: ret = %d\n", __FILE__, __LINE__, ret);
 | |
| +
 | |
| +			/*
 | |
| +			 * on success copy result back,
 | |
| +			 * linux crpyto API returns -errno,  we need to fix that
 | |
| +			 */
 | |
| +			crp->crp_etype = ret < 0 ? -ret : ret;
 | |
| +			if (ret == 0) {
 | |
| +				/* copy back the result and return it's size */
 | |
| +				crypto_copyback(crp->crp_flags, crp->crp_buf,
 | |
| +						crd->crd_inject, olen, obuf);
 | |
| +				crp->crp_olen = olen;
 | |
| +			}
 | |
| +
 | |
| +
 | |
| +			} break;
 | |
| +
 | |
| +		default:
 | |
| +			/* Unknown/unsupported algorithm */
 | |
| +			dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
 | |
| +			crp->crp_etype = EINVAL;
 | |
| +			goto done;
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +done:
 | |
| +	crypto_done(crp);
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +static int
 | |
| +cryptosoft_init(void)
 | |
| +{
 | |
| +	int i, sw_type, mode;
 | |
| +	char *algo;
 | |
| +
 | |
| +	dprintk("%s(%p)\n", __FUNCTION__, cryptosoft_init);
 | |
| +
 | |
| +	softc_device_init(&swcr_softc, "cryptosoft", 0, swcr_methods);
 | |
| +
 | |
| +	swcr_id = crypto_get_driverid(softc_get_device(&swcr_softc),
 | |
| +			CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
 | |
| +	if (swcr_id < 0) {
 | |
| +		printk("Software crypto device cannot initialize!");
 | |
| +		return -ENODEV;
 | |
| +	}
 | |
| +
 | |
| +#define	REGISTER(alg) \
 | |
| +		crypto_register(swcr_id, alg, 0,0);
 | |
| +
 | |
| +	for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; ++i)
 | |
| +	{
 | |
| +
 | |
| +		algo = crypto_details[i].alg_name;
 | |
| +		if (!algo || !*algo)
 | |
| +		{
 | |
| +			dprintk("%s:Algorithm %d not supported\n", __FUNCTION__, i);
 | |
| +			continue;
 | |
| +		}
 | |
| +
 | |
| +		mode = crypto_details[i].mode;
 | |
| +		sw_type = crypto_details[i].sw_type;
 | |
| +
 | |
| +		switch (sw_type)
 | |
| +		{
 | |
| +			case SW_TYPE_CIPHER:
 | |
| +				if (crypto_has_cipher(algo, 0, CRYPTO_ALG_ASYNC))
 | |
| +				{
 | |
| +					REGISTER(i);
 | |
| +				}
 | |
| +				else
 | |
| +				{
 | |
| +					dprintk("%s:CIPHER algorithm %d:'%s' not supported\n",
 | |
| +								__FUNCTION__, i, algo);
 | |
| +				}
 | |
| +				break;
 | |
| +			case SW_TYPE_HMAC:
 | |
| +				if (crypto_has_hash(algo, 0, CRYPTO_ALG_ASYNC))
 | |
| +				{
 | |
| +					REGISTER(i);
 | |
| +				}
 | |
| +				else
 | |
| +				{
 | |
| +					dprintk("%s:HMAC algorithm %d:'%s' not supported\n",
 | |
| +								__FUNCTION__, i, algo);
 | |
| +				}
 | |
| +				break;
 | |
| +			case SW_TYPE_HASH:
 | |
| +				if (crypto_has_hash(algo, 0, CRYPTO_ALG_ASYNC))
 | |
| +				{
 | |
| +					REGISTER(i);
 | |
| +				}
 | |
| +				else
 | |
| +				{
 | |
| +					dprintk("%s:HASH algorithm %d:'%s' not supported\n",
 | |
| +								__FUNCTION__, i, algo);
 | |
| +				}
 | |
| +				break;
 | |
| +			case SW_TYPE_COMP:
 | |
| +				if (crypto_has_comp(algo, 0, CRYPTO_ALG_ASYNC))
 | |
| +				{
 | |
| +					REGISTER(i);
 | |
| +				}
 | |
| +				else
 | |
| +				{
 | |
| +					dprintk("%s:COMP algorithm %d:'%s' not supported\n",
 | |
| +								__FUNCTION__, i, algo);
 | |
| +				}
 | |
| +				break;
 | |
| +			case SW_TYPE_BLKCIPHER:
 | |
| +				if (crypto_has_blkcipher(algo, 0, CRYPTO_ALG_ASYNC))
 | |
| +				{
 | |
| +					REGISTER(i);
 | |
| +				}
 | |
| +				else
 | |
| +				{
 | |
| +					dprintk("%s:BLKCIPHER algorithm %d:'%s' not supported\n",
 | |
| +								__FUNCTION__, i, algo);
 | |
| +				}
 | |
| +				break;
 | |
| +			default:
 | |
| +				dprintk(
 | |
| +				"%s:Algorithm Type %d not supported (algorithm %d:'%s')\n",
 | |
| +					__FUNCTION__, sw_type, i, algo);
 | |
| +				break;
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	return(0);
 | |
| +}
 | |
| +
 | |
| +static void
 | |
| +cryptosoft_exit(void)
 | |
| +{
 | |
| +	dprintk("%s()\n", __FUNCTION__);
 | |
| +	crypto_unregister_all(swcr_id);
 | |
| +	swcr_id = -1;
 | |
| +}
 | |
| +
 | |
| +module_init(cryptosoft_init);
 | |
| +module_exit(cryptosoft_exit);
 | |
| +
 | |
| +MODULE_LICENSE("Dual BSD/GPL");
 | |
| +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
 | |
| +MODULE_DESCRIPTION("Cryptosoft (OCF module for kernel crypto)");
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/rndtest.c
 | |
| @@ -0,0 +1,300 @@
 | |
| +/*	$OpenBSD$	*/
 | |
| +
 | |
| +/*
 | |
| + * OCF/Linux port done by David McCullough <david_mccullough@securecomputing.com>
 | |
| + * Copyright (C) 2006-2007 David McCullough
 | |
| + * Copyright (C) 2004-2005 Intel Corporation.
 | |
| + * The license and original author are listed below.
 | |
| + *
 | |
| + * Copyright (c) 2002 Jason L. Wright (jason@thought.net)
 | |
| + * All rights reserved.
 | |
| + *
 | |
| + * Redistribution and use in source and binary forms, with or without
 | |
| + * modification, are permitted provided that the following conditions
 | |
| + * are met:
 | |
| + * 1. Redistributions of source code must retain the above copyright
 | |
| + *    notice, this list of conditions and the following disclaimer.
 | |
| + * 2. Redistributions in binary form must reproduce the above copyright
 | |
| + *    notice, this list of conditions and the following disclaimer in the
 | |
| + *    documentation and/or other materials provided with the distribution.
 | |
| + * 3. All advertising materials mentioning features or use of this software
 | |
| + *    must display the following acknowledgement:
 | |
| + *	This product includes software developed by Jason L. Wright
 | |
| + * 4. The name of the author may not be used to endorse or promote products
 | |
| + *    derived from this software without specific prior written permission.
 | |
| + *
 | |
| + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
 | |
| + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 | |
| + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
 | |
| + * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
 | |
| + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
 | |
| + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
 | |
| + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 | |
| + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
 | |
| + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
 | |
| + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 | |
| + * POSSIBILITY OF SUCH DAMAGE.
 | |
| + */
 | |
| +
 | |
| +#ifndef AUTOCONF_INCLUDED
 | |
| +#include <linux/config.h>
 | |
| +#endif
 | |
| +#include <linux/module.h>
 | |
| +#include <linux/list.h>
 | |
| +#include <linux/wait.h>
 | |
| +#include <linux/time.h>
 | |
| +#include <linux/version.h>
 | |
| +#include <linux/unistd.h>
 | |
| +#include <linux/kernel.h>
 | |
| +#include <linux/string.h>
 | |
| +#include <linux/time.h>
 | |
| +#include <cryptodev.h>
 | |
| +#include "rndtest.h"
 | |
| +
 | |
| +static struct rndtest_stats rndstats;
 | |
| +
 | |
| +static	void rndtest_test(struct rndtest_state *);
 | |
| +
 | |
| +/* The tests themselves */
 | |
| +static	int rndtest_monobit(struct rndtest_state *);
 | |
| +static	int rndtest_runs(struct rndtest_state *);
 | |
| +static	int rndtest_longruns(struct rndtest_state *);
 | |
| +static	int rndtest_chi_4(struct rndtest_state *);
 | |
| +
 | |
| +static	int rndtest_runs_check(struct rndtest_state *, int, int *);
 | |
| +static	void rndtest_runs_record(struct rndtest_state *, int, int *);
 | |
| +
 | |
| +static const struct rndtest_testfunc {
 | |
| +	int (*test)(struct rndtest_state *);
 | |
| +} rndtest_funcs[] = {
 | |
| +	{ rndtest_monobit },
 | |
| +	{ rndtest_runs },
 | |
| +	{ rndtest_chi_4 },
 | |
| +	{ rndtest_longruns },
 | |
| +};
 | |
| +
 | |
| +#define	RNDTEST_NTESTS	(sizeof(rndtest_funcs)/sizeof(rndtest_funcs[0]))
 | |
| +
 | |
| +static void
 | |
| +rndtest_test(struct rndtest_state *rsp)
 | |
| +{
 | |
| +	int i, rv = 0;
 | |
| +
 | |
| +	rndstats.rst_tests++;
 | |
| +	for (i = 0; i < RNDTEST_NTESTS; i++)
 | |
| +		rv |= (*rndtest_funcs[i].test)(rsp);
 | |
| +	rsp->rs_discard = (rv != 0);
 | |
| +}
 | |
| +
 | |
| +
 | |
| +extern int crypto_debug;
 | |
| +#define rndtest_verbose 2
 | |
| +#define rndtest_report(rsp, failure, fmt, a...) \
 | |
| +	{ if (failure || crypto_debug) { printk("rng_test: " fmt "\n", a); } else; }
 | |
| +
 | |
| +#define	RNDTEST_MONOBIT_MINONES	9725
 | |
| +#define	RNDTEST_MONOBIT_MAXONES	10275
 | |
| +
 | |
| +static int
 | |
| +rndtest_monobit(struct rndtest_state *rsp)
 | |
| +{
 | |
| +	int i, ones = 0, j;
 | |
| +	u_int8_t r;
 | |
| +
 | |
| +	for (i = 0; i < RNDTEST_NBYTES; i++) {
 | |
| +		r = rsp->rs_buf[i];
 | |
| +		for (j = 0; j < 8; j++, r <<= 1)
 | |
| +			if (r & 0x80)
 | |
| +				ones++;
 | |
| +	}
 | |
| +	if (ones > RNDTEST_MONOBIT_MINONES &&
 | |
| +	    ones < RNDTEST_MONOBIT_MAXONES) {
 | |
| +		if (rndtest_verbose > 1)
 | |
| +			rndtest_report(rsp, 0, "monobit pass (%d < %d < %d)",
 | |
| +			    RNDTEST_MONOBIT_MINONES, ones,
 | |
| +			    RNDTEST_MONOBIT_MAXONES);
 | |
| +		return (0);
 | |
| +	} else {
 | |
| +		if (rndtest_verbose)
 | |
| +			rndtest_report(rsp, 1,
 | |
| +			    "monobit failed (%d ones)", ones);
 | |
| +		rndstats.rst_monobit++;
 | |
| +		return (-1);
 | |
| +	}
 | |
| +}
 | |
| +
 | |
| +#define	RNDTEST_RUNS_NINTERVAL	6
 | |
| +
 | |
| +static const struct rndtest_runs_tabs {
 | |
| +	u_int16_t min, max;
 | |
| +} rndtest_runs_tab[] = {
 | |
| +	{ 2343, 2657 },
 | |
| +	{ 1135, 1365 },
 | |
| +	{ 542, 708 },
 | |
| +	{ 251, 373 },
 | |
| +	{ 111, 201 },
 | |
| +	{ 111, 201 },
 | |
| +};
 | |
| +
 | |
| +static int
 | |
| +rndtest_runs(struct rndtest_state *rsp)
 | |
| +{
 | |
| +	int i, j, ones, zeros, rv = 0;
 | |
| +	int onei[RNDTEST_RUNS_NINTERVAL], zeroi[RNDTEST_RUNS_NINTERVAL];
 | |
| +	u_int8_t c;
 | |
| +
 | |
| +	bzero(onei, sizeof(onei));
 | |
| +	bzero(zeroi, sizeof(zeroi));
 | |
| +	ones = zeros = 0;
 | |
| +	for (i = 0; i < RNDTEST_NBYTES; i++) {
 | |
| +		c = rsp->rs_buf[i];
 | |
| +		for (j = 0; j < 8; j++, c <<= 1) {
 | |
| +			if (c & 0x80) {
 | |
| +				ones++;
 | |
| +				rndtest_runs_record(rsp, zeros, zeroi);
 | |
| +				zeros = 0;
 | |
| +			} else {
 | |
| +				zeros++;
 | |
| +				rndtest_runs_record(rsp, ones, onei);
 | |
| +				ones = 0;
 | |
| +			}
 | |
| +		}
 | |
| +	}
 | |
| +	rndtest_runs_record(rsp, ones, onei);
 | |
| +	rndtest_runs_record(rsp, zeros, zeroi);
 | |
| +
 | |
| +	rv |= rndtest_runs_check(rsp, 0, zeroi);
 | |
| +	rv |= rndtest_runs_check(rsp, 1, onei);
 | |
| +
 | |
| +	if (rv)
 | |
| +		rndstats.rst_runs++;
 | |
| +
 | |
| +	return (rv);
 | |
| +}
 | |
| +
 | |
| +static void
 | |
| +rndtest_runs_record(struct rndtest_state *rsp, int len, int *intrv)
 | |
| +{
 | |
| +	if (len == 0)
 | |
| +		return;
 | |
| +	if (len > RNDTEST_RUNS_NINTERVAL)
 | |
| +		len = RNDTEST_RUNS_NINTERVAL;
 | |
| +	len -= 1;
 | |
| +	intrv[len]++;
 | |
| +}
 | |
| +
 | |
| +static int
 | |
| +rndtest_runs_check(struct rndtest_state *rsp, int val, int *src)
 | |
| +{
 | |
| +	int i, rv = 0;
 | |
| +
 | |
| +	for (i = 0; i < RNDTEST_RUNS_NINTERVAL; i++) {
 | |
| +		if (src[i] < rndtest_runs_tab[i].min ||
 | |
| +		    src[i] > rndtest_runs_tab[i].max) {
 | |
| +			rndtest_report(rsp, 1,
 | |
| +			    "%s interval %d failed (%d, %d-%d)",
 | |
| +			    val ? "ones" : "zeros",
 | |
| +			    i + 1, src[i], rndtest_runs_tab[i].min,
 | |
| +			    rndtest_runs_tab[i].max);
 | |
| +			rv = -1;
 | |
| +		} else {
 | |
| +			rndtest_report(rsp, 0,
 | |
| +			    "runs pass %s interval %d (%d < %d < %d)",
 | |
| +			    val ? "ones" : "zeros",
 | |
| +			    i + 1, rndtest_runs_tab[i].min, src[i],
 | |
| +			    rndtest_runs_tab[i].max);
 | |
| +		}
 | |
| +	}
 | |
| +	return (rv);
 | |
| +}
 | |
| +
 | |
| +static int
 | |
| +rndtest_longruns(struct rndtest_state *rsp)
 | |
| +{
 | |
| +	int i, j, ones = 0, zeros = 0, maxones = 0, maxzeros = 0;
 | |
| +	u_int8_t c;
 | |
| +
 | |
| +	for (i = 0; i < RNDTEST_NBYTES; i++) {
 | |
| +		c = rsp->rs_buf[i];
 | |
| +		for (j = 0; j < 8; j++, c <<= 1) {
 | |
| +			if (c & 0x80) {
 | |
| +				zeros = 0;
 | |
| +				ones++;
 | |
| +				if (ones > maxones)
 | |
| +					maxones = ones;
 | |
| +			} else {
 | |
| +				ones = 0;
 | |
| +				zeros++;
 | |
| +				if (zeros > maxzeros)
 | |
| +					maxzeros = zeros;
 | |
| +			}
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	if (maxones < 26 && maxzeros < 26) {
 | |
| +		rndtest_report(rsp, 0, "longruns pass (%d ones, %d zeros)",
 | |
| +			maxones, maxzeros);
 | |
| +		return (0);
 | |
| +	} else {
 | |
| +		rndtest_report(rsp, 1, "longruns fail (%d ones, %d zeros)",
 | |
| +			maxones, maxzeros);
 | |
| +		rndstats.rst_longruns++;
 | |
| +		return (-1);
 | |
| +	}
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * chi^2 test over 4 bits: (this is called the poker test in FIPS 140-2,
 | |
| + * but it is really the chi^2 test over 4 bits (the poker test as described
 | |
| + * by Knuth vol 2 is something different, and I take him as authoritative
 | |
| + * on nomenclature over NIST).
 | |
| + */
 | |
| +#define	RNDTEST_CHI4_K	16
 | |
| +#define	RNDTEST_CHI4_K_MASK	(RNDTEST_CHI4_K - 1)
 | |
| +
 | |
| +/*
 | |
| + * The unnormalized values are used so that we don't have to worry about
 | |
| + * fractional precision.  The "real" value is found by:
 | |
| + *	(V - 1562500) * (16 / 5000) = Vn   (where V is the unnormalized value)
 | |
| + */
 | |
| +#define	RNDTEST_CHI4_VMIN	1563181		/* 2.1792 */
 | |
| +#define	RNDTEST_CHI4_VMAX	1576929		/* 46.1728 */
 | |
| +
 | |
| +static int
 | |
| +rndtest_chi_4(struct rndtest_state *rsp)
 | |
| +{
 | |
| +	unsigned int freq[RNDTEST_CHI4_K], i, sum;
 | |
| +
 | |
| +	for (i = 0; i < RNDTEST_CHI4_K; i++)
 | |
| +		freq[i] = 0;
 | |
| +
 | |
| +	/* Get number of occurances of each 4 bit pattern */
 | |
| +	for (i = 0; i < RNDTEST_NBYTES; i++) {
 | |
| +		freq[(rsp->rs_buf[i] >> 4) & RNDTEST_CHI4_K_MASK]++;
 | |
| +		freq[(rsp->rs_buf[i] >> 0) & RNDTEST_CHI4_K_MASK]++;
 | |
| +	}
 | |
| +
 | |
| +	for (i = 0, sum = 0; i < RNDTEST_CHI4_K; i++)
 | |
| +		sum += freq[i] * freq[i];
 | |
| +
 | |
| +	if (sum >= 1563181 && sum <= 1576929) {
 | |
| +		rndtest_report(rsp, 0, "chi^2(4): pass (sum %u)", sum);
 | |
| +		return (0);
 | |
| +	} else {
 | |
| +		rndtest_report(rsp, 1, "chi^2(4): failed (sum %u)", sum);
 | |
| +		rndstats.rst_chi++;
 | |
| +		return (-1);
 | |
| +	}
 | |
| +}
 | |
| +
 | |
| +int
 | |
| +rndtest_buf(unsigned char *buf)
 | |
| +{
 | |
| +	struct rndtest_state rsp;
 | |
| +
 | |
| +	memset(&rsp, 0, sizeof(rsp));
 | |
| +	rsp.rs_buf = buf;
 | |
| +	rndtest_test(&rsp);
 | |
| +	return(rsp.rs_discard);
 | |
| +}
 | |
| +
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/rndtest.h
 | |
| @@ -0,0 +1,54 @@
 | |
| +/*	$FreeBSD: src/sys/dev/rndtest/rndtest.h,v 1.1 2003/03/11 22:54:44 sam Exp $	*/
 | |
| +/*	$OpenBSD$	*/
 | |
| +
 | |
| +/*
 | |
| + * Copyright (c) 2002 Jason L. Wright (jason@thought.net)
 | |
| + * All rights reserved.
 | |
| + *
 | |
| + * Redistribution and use in source and binary forms, with or without
 | |
| + * modification, are permitted provided that the following conditions
 | |
| + * are met:
 | |
| + * 1. Redistributions of source code must retain the above copyright
 | |
| + *    notice, this list of conditions and the following disclaimer.
 | |
| + * 2. Redistributions in binary form must reproduce the above copyright
 | |
| + *    notice, this list of conditions and the following disclaimer in the
 | |
| + *    documentation and/or other materials provided with the distribution.
 | |
| + * 3. All advertising materials mentioning features or use of this software
 | |
| + *    must display the following acknowledgement:
 | |
| + *	This product includes software developed by Jason L. Wright
 | |
| + * 4. The name of the author may not be used to endorse or promote products
 | |
| + *    derived from this software without specific prior written permission.
 | |
| + *
 | |
| + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
 | |
| + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 | |
| + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
 | |
| + * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
 | |
| + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
 | |
| + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
 | |
| + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 | |
| + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
 | |
| + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
 | |
| + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 | |
| + * POSSIBILITY OF SUCH DAMAGE.
 | |
| + */
 | |
| +
 | |
| +
 | |
| +/* Some of the tests depend on these values */
 | |
| +#define	RNDTEST_NBYTES	2500
 | |
| +#define	RNDTEST_NBITS	(8 * RNDTEST_NBYTES)
 | |
| +
 | |
| +struct rndtest_state {
 | |
| +	int		rs_discard;	/* discard/accept random data */
 | |
| +	u_int8_t	*rs_buf;
 | |
| +};
 | |
| +
 | |
| +struct rndtest_stats {
 | |
| +	u_int32_t	rst_discard;	/* number of bytes discarded */
 | |
| +	u_int32_t	rst_tests;	/* number of test runs */
 | |
| +	u_int32_t	rst_monobit;	/* monobit test failures */
 | |
| +	u_int32_t	rst_runs;	/* 0/1 runs failures */
 | |
| +	u_int32_t	rst_longruns;	/* longruns failures */
 | |
| +	u_int32_t	rst_chi;	/* chi^2 failures */
 | |
| +};
 | |
| +
 | |
| +extern int rndtest_buf(unsigned char *buf);
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/ocf-compat.h
 | |
| @@ -0,0 +1,268 @@
 | |
| +#ifndef _BSD_COMPAT_H_
 | |
| +#define _BSD_COMPAT_H_ 1
 | |
| +/****************************************************************************/
 | |
| +/*
 | |
| + * Provide compat routines for older linux kernels and BSD kernels
 | |
| + *
 | |
| + * Written by David McCullough <david_mccullough@securecomputing.com>
 | |
| + * Copyright (C) 2007 David McCullough <david_mccullough@securecomputing.com>
 | |
| + *
 | |
| + * LICENSE TERMS
 | |
| + *
 | |
| + * The free distribution and use of this software in both source and binary
 | |
| + * form is allowed (with or without changes) provided that:
 | |
| + *
 | |
| + *   1. distributions of this source code include the above copyright
 | |
| + *      notice, this list of conditions and the following disclaimer;
 | |
| + *
 | |
| + *   2. distributions in binary form include the above copyright
 | |
| + *      notice, this list of conditions and the following disclaimer
 | |
| + *      in the documentation and/or other associated materials;
 | |
| + *
 | |
| + *   3. the copyright holder's name is not used to endorse products
 | |
| + *      built using this software without specific written permission.
 | |
| + *
 | |
| + * ALTERNATIVELY, provided that this notice is retained in full, this file
 | |
| + * may be distributed under the terms of the GNU General Public License (GPL),
 | |
| + * in which case the provisions of the GPL apply INSTEAD OF those given above.
 | |
| + *
 | |
| + * DISCLAIMER
 | |
| + *
 | |
| + * This software is provided 'as is' with no explicit or implied warranties
 | |
| + * in respect of its properties, including, but not limited to, correctness
 | |
| + * and/or fitness for purpose.
 | |
| + */
 | |
| +/****************************************************************************/
 | |
| +#ifdef __KERNEL__
 | |
| +/*
 | |
| + * fake some BSD driver interface stuff specifically for OCF use
 | |
| + */
 | |
| +
 | |
| +typedef struct ocf_device *device_t;
 | |
| +
 | |
| +typedef struct {
 | |
| +	int (*cryptodev_newsession)(device_t dev, u_int32_t *sidp, struct cryptoini *cri);
 | |
| +	int (*cryptodev_freesession)(device_t dev, u_int64_t tid);
 | |
| +	int (*cryptodev_process)(device_t dev, struct cryptop *crp, int hint);
 | |
| +	int (*cryptodev_kprocess)(device_t dev, struct cryptkop *krp, int hint);
 | |
| +} device_method_t;
 | |
| +#define DEVMETHOD(id, func)	id: func
 | |
| +
 | |
| +struct ocf_device {
 | |
| +	char name[32];		/* the driver name */
 | |
| +	char nameunit[32];	/* the driver name + HW instance */
 | |
| +	int  unit;
 | |
| +	device_method_t	methods;
 | |
| +	void *softc;
 | |
| +};
 | |
| +
 | |
| +#define CRYPTODEV_NEWSESSION(dev, sid, cri) \
 | |
| +	((*(dev)->methods.cryptodev_newsession)(dev,sid,cri))
 | |
| +#define CRYPTODEV_FREESESSION(dev, sid) \
 | |
| +	((*(dev)->methods.cryptodev_freesession)(dev, sid))
 | |
| +#define CRYPTODEV_PROCESS(dev, crp, hint) \
 | |
| +	((*(dev)->methods.cryptodev_process)(dev, crp, hint))
 | |
| +#define CRYPTODEV_KPROCESS(dev, krp, hint) \
 | |
| +	((*(dev)->methods.cryptodev_kprocess)(dev, krp, hint))
 | |
| +
 | |
| +#define device_get_name(dev)	((dev)->name)
 | |
| +#define device_get_nameunit(dev)	((dev)->nameunit)
 | |
| +#define device_get_unit(dev)	((dev)->unit)
 | |
| +#define device_get_softc(dev)	((dev)->softc)
 | |
| +
 | |
| +#define	softc_device_decl \
 | |
| +		struct ocf_device _device; \
 | |
| +		device_t
 | |
| +
 | |
| +#define	softc_device_init(_sc, _name, _unit, _methods) \
 | |
| +	if (1) {\
 | |
| +	strncpy((_sc)->_device.name, _name, sizeof((_sc)->_device.name) - 1); \
 | |
| +	snprintf((_sc)->_device.nameunit, sizeof((_sc)->_device.name), "%s%d", _name, _unit); \
 | |
| +	(_sc)->_device.unit = _unit; \
 | |
| +	(_sc)->_device.methods = _methods; \
 | |
| +	(_sc)->_device.softc = (void *) _sc; \
 | |
| +	*(device_t *)((softc_get_device(_sc))+1) = &(_sc)->_device; \
 | |
| +	} else
 | |
| +
 | |
| +#define	softc_get_device(_sc)	(&(_sc)->_device)
 | |
| +
 | |
| +/*
 | |
| + * iomem support for 2.4 and 2.6 kernels
 | |
| + */
 | |
| +#include <linux/version.h>
 | |
| +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
 | |
| +#define ocf_iomem_t	unsigned long
 | |
| +
 | |
| +/*
 | |
| + * implement simple workqueue like support for older kernels
 | |
| + */
 | |
| +
 | |
| +#include <linux/tqueue.h>
 | |
| +
 | |
| +#define work_struct tq_struct
 | |
| +
 | |
| +#define INIT_WORK(wp, fp, ap) \
 | |
| +	do { \
 | |
| +		(wp)->sync = 0; \
 | |
| +		(wp)->routine = (fp); \
 | |
| +		(wp)->data = (ap); \
 | |
| +	} while (0)
 | |
| +
 | |
| +#define schedule_work(wp) \
 | |
| +	do { \
 | |
| +		queue_task((wp), &tq_immediate); \
 | |
| +		mark_bh(IMMEDIATE_BH); \
 | |
| +	} while (0)
 | |
| +
 | |
| +#define flush_scheduled_work()	run_task_queue(&tq_immediate)
 | |
| +
 | |
| +#else
 | |
| +#define ocf_iomem_t	void __iomem *
 | |
| +
 | |
| +#include <linux/workqueue.h>
 | |
| +
 | |
| +#endif
 | |
| +
 | |
| +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
 | |
| +#define files_fdtable(files)	(files)
 | |
| +#endif
 | |
| +
 | |
| +#ifdef MODULE_PARM
 | |
| +#undef module_param	/* just in case */
 | |
| +#define	module_param(a,b,c)		MODULE_PARM(a,"i")
 | |
| +#endif
 | |
| +
 | |
| +#define bzero(s,l)		memset(s,0,l)
 | |
| +#define bcopy(s,d,l)	memcpy(d,s,l)
 | |
| +#define bcmp(x, y, l)	memcmp(x,y,l)
 | |
| +
 | |
| +#define MIN(x,y)	((x) < (y) ? (x) : (y))
 | |
| +
 | |
| +#define device_printf(dev, a...) ({ \
 | |
| +				printk("%s: ", device_get_nameunit(dev)); printk(a); \
 | |
| +			})
 | |
| +
 | |
| +#undef printf
 | |
| +#define printf(fmt...)	printk(fmt)
 | |
| +
 | |
| +#define KASSERT(c,p)	if (!(c)) { printk p ; } else
 | |
| +
 | |
| +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
 | |
| +#define ocf_daemonize(str) \
 | |
| +	daemonize(); \
 | |
| +	spin_lock_irq(¤t->sigmask_lock); \
 | |
| +	sigemptyset(¤t->blocked); \
 | |
| +	recalc_sigpending(current); \
 | |
| +	spin_unlock_irq(¤t->sigmask_lock); \
 | |
| +	sprintf(current->comm, str);
 | |
| +#else
 | |
| +#define ocf_daemonize(str) daemonize(str);
 | |
| +#endif
 | |
| +
 | |
| +#define	TAILQ_INSERT_TAIL(q,d,m) list_add_tail(&(d)->m, (q))
 | |
| +#define	TAILQ_EMPTY(q)	list_empty(q)
 | |
| +#define	TAILQ_FOREACH(v, q, m) list_for_each_entry(v, q, m)
 | |
| +
 | |
| +#define read_random(p,l) get_random_bytes(p,l)
 | |
| +
 | |
| +#define DELAY(x)	((x) > 2000 ? mdelay((x)/1000) : udelay(x))
 | |
| +#define strtoul simple_strtoul
 | |
| +
 | |
| +#define pci_get_vendor(dev)	((dev)->vendor)
 | |
| +#define pci_get_device(dev)	((dev)->device)
 | |
| +
 | |
| +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
 | |
| +#define pci_set_consistent_dma_mask(dev, mask) (0)
 | |
| +#endif
 | |
| +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
 | |
| +#define pci_dma_sync_single_for_cpu pci_dma_sync_single
 | |
| +#endif
 | |
| +
 | |
| +#ifndef DMA_32BIT_MASK
 | |
| +#define DMA_32BIT_MASK  0x00000000ffffffffULL
 | |
| +#endif
 | |
| +
 | |
| +#define htole32(x)	cpu_to_le32(x)
 | |
| +#define htobe32(x)	cpu_to_be32(x)
 | |
| +#define htole16(x)	cpu_to_le16(x)
 | |
| +#define htobe16(x)	cpu_to_be16(x)
 | |
| +
 | |
| +/* older kernels don't have these */
 | |
| +
 | |
| +#ifndef IRQ_NONE
 | |
| +#define IRQ_NONE
 | |
| +#define IRQ_HANDLED
 | |
| +#define irqreturn_t void
 | |
| +#endif
 | |
| +#ifndef IRQF_SHARED
 | |
| +#define IRQF_SHARED	SA_SHIRQ
 | |
| +#endif
 | |
| +
 | |
| +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
 | |
| +# define strlcpy(dest,src,len) \
 | |
| +		({strncpy(dest,src,(len)-1); ((char *)dest)[(len)-1] = '\0'; })
 | |
| +#endif
 | |
| +
 | |
| +#ifndef MAX_ERRNO
 | |
| +#define MAX_ERRNO	4095
 | |
| +#endif
 | |
| +#ifndef IS_ERR_VALUE
 | |
| +#define IS_ERR_VALUE(x) ((unsigned long)(x) >= (unsigned long)-MAX_ERRNO)
 | |
| +#endif
 | |
| +
 | |
| +/*
 | |
| + * common debug for all
 | |
| + */
 | |
| +#if 1
 | |
| +#define dprintk(a...)	do { if (debug) printk(a); } while(0)
 | |
| +#else
 | |
| +#define dprintk(a...)
 | |
| +#endif
 | |
| +
 | |
| +#ifndef SLAB_ATOMIC
 | |
| +/* Changed in 2.6.20, must use GFP_ATOMIC now */
 | |
| +#define	SLAB_ATOMIC	GFP_ATOMIC
 | |
| +#endif
 | |
| +
 | |
| +/*
 | |
| + * need some additional support for older kernels */
 | |
| +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,2)
 | |
| +#define pci_register_driver_compat(driver, rc) \
 | |
| +	do { \
 | |
| +		if ((rc) > 0) { \
 | |
| +			(rc) = 0; \
 | |
| +		} else if (rc == 0) { \
 | |
| +			(rc) = -ENODEV; \
 | |
| +		} else { \
 | |
| +			pci_unregister_driver(driver); \
 | |
| +		} \
 | |
| +	} while (0)
 | |
| +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
 | |
| +#define pci_register_driver_compat(driver,rc) ((rc) = (rc) < 0 ? (rc) : 0)
 | |
| +#else
 | |
| +#define pci_register_driver_compat(driver,rc)
 | |
| +#endif
 | |
| +
 | |
| +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
 | |
| +
 | |
| +#include <asm/scatterlist.h>
 | |
| +
 | |
| +static inline void sg_set_page(struct scatterlist *sg,  struct page *page,
 | |
| +			       unsigned int len, unsigned int offset)
 | |
| +{
 | |
| +	sg->page = page;
 | |
| +	sg->offset = offset;
 | |
| +	sg->length = len;
 | |
| +}
 | |
| +
 | |
| +static inline void *sg_virt(struct scatterlist *sg)
 | |
| +{
 | |
| +	return page_address(sg->page) + sg->offset;
 | |
| +}
 | |
| +
 | |
| +#endif
 | |
| +
 | |
| +#endif /* __KERNEL__ */
 | |
| +
 | |
| +/****************************************************************************/
 | |
| +#endif /* _BSD_COMPAT_H_ */
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/pasemi/pasemi.c
 | |
| @@ -0,0 +1,1009 @@
 | |
| +/*
 | |
| + * Copyright (C) 2007 PA Semi, Inc
 | |
| + *
 | |
| + * Driver for the PA Semi PWRficient DMA Crypto Engine
 | |
| + *
 | |
| + * This program is free software; you can redistribute it and/or modify
 | |
| + * it under the terms of the GNU General Public License version 2 as
 | |
| + * published by the Free Software Foundation.
 | |
| + *
 | |
| + * This program is distributed in the hope that it will be useful,
 | |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 | |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | |
| + * GNU General Public License for more details.
 | |
| + *
 | |
| + * You should have received a copy of the GNU General Public License
 | |
| + * along with this program; if not, write to the Free Software
 | |
| + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 | |
| + */
 | |
| +
 | |
| +#ifndef AUTOCONF_INCLUDED
 | |
| +#include <linux/config.h>
 | |
| +#endif
 | |
| +#include <linux/module.h>
 | |
| +#include <linux/init.h>
 | |
| +#include <linux/interrupt.h>
 | |
| +#include <linux/timer.h>
 | |
| +#include <linux/random.h>
 | |
| +#include <linux/skbuff.h>
 | |
| +#include <asm/scatterlist.h>
 | |
| +#include <linux/moduleparam.h>
 | |
| +#include <linux/pci.h>
 | |
| +#include <cryptodev.h>
 | |
| +#include <uio.h>
 | |
| +#include "pasemi_fnu.h"
 | |
| +
 | |
| +#define DRV_NAME "pasemi"
 | |
| +
 | |
| +#define TIMER_INTERVAL 1000
 | |
| +
 | |
| +static void __devexit pasemi_dma_remove(struct pci_dev *pdev);
 | |
| +static struct pasdma_status volatile * dma_status;
 | |
| +
 | |
| +static int debug;
 | |
| +module_param(debug, int, 0644);
 | |
| +MODULE_PARM_DESC(debug, "Enable debug");
 | |
| +
 | |
| +static void pasemi_desc_start(struct pasemi_desc *desc, u64 hdr)
 | |
| +{
 | |
| +	desc->postop = 0;
 | |
| +	desc->quad[0] = hdr;
 | |
| +	desc->quad_cnt = 1;
 | |
| +	desc->size = 1;
 | |
| +}
 | |
| +
 | |
| +static void pasemi_desc_build(struct pasemi_desc *desc, u64 val)
 | |
| +{
 | |
| +	desc->quad[desc->quad_cnt++] = val;
 | |
| +	desc->size = (desc->quad_cnt + 1) / 2;
 | |
| +}
 | |
| +
 | |
| +static void pasemi_desc_hdr(struct pasemi_desc *desc, u64 hdr)
 | |
| +{
 | |
| +	desc->quad[0] |= hdr;
 | |
| +}
 | |
| +
 | |
| +static int pasemi_desc_size(struct pasemi_desc *desc)
 | |
| +{
 | |
| +	return desc->size;
 | |
| +}
 | |
| +
 | |
| +static void pasemi_ring_add_desc(
 | |
| +				 struct pasemi_fnu_txring *ring,
 | |
| +				 struct pasemi_desc *desc,
 | |
| +				 struct cryptop *crp) {
 | |
| +	int i;
 | |
| +	int ring_index = 2 * (ring->next_to_fill & (TX_RING_SIZE-1));
 | |
| +
 | |
| +	TX_DESC_INFO(ring, ring->next_to_fill).desc_size = desc->size;
 | |
| +	TX_DESC_INFO(ring, ring->next_to_fill).desc_postop = desc->postop;
 | |
| +	TX_DESC_INFO(ring, ring->next_to_fill).cf_crp = crp;
 | |
| +
 | |
| +	for (i = 0; i < desc->quad_cnt; i += 2) {
 | |
| +		ring_index = 2 * (ring->next_to_fill & (TX_RING_SIZE-1));
 | |
| +		ring->desc[ring_index] = desc->quad[i];
 | |
| +		ring->desc[ring_index + 1] = desc->quad[i + 1];
 | |
| +		ring->next_to_fill++;
 | |
| +	}
 | |
| +
 | |
| +	if (desc->quad_cnt & 1)
 | |
| +		ring->desc[ring_index + 1] = 0;
 | |
| +}
 | |
| +
 | |
| +static void pasemi_ring_incr(struct pasemi_softc *sc, int chan_index, int incr)
 | |
| +{
 | |
| +	out_le32(sc->dma_regs + PAS_DMA_TXCHAN_INCR(sc->base_chan + chan_index),
 | |
| +		 incr);
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Generate a new software session.
 | |
| + */
 | |
| +static int
 | |
| +pasemi_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
 | |
| +{
 | |
| +	struct cryptoini *c, *encini = NULL, *macini = NULL;
 | |
| +	struct pasemi_softc *sc = device_get_softc(dev);
 | |
| +	struct pasemi_session *ses = NULL, **sespp;
 | |
| +	int sesn, blksz = 0;
 | |
| +	u64 ccmd = 0;
 | |
| +	unsigned long flags;
 | |
| +	struct pasemi_desc init_desc;
 | |
| +	struct pasemi_fnu_txring *txring;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +	if (sidp == NULL || cri == NULL || sc == NULL) {
 | |
| +		DPRINTF("%s,%d - EINVAL\n", __FILE__, __LINE__);
 | |
| +		return -EINVAL;
 | |
| +	}
 | |
| +	for (c = cri; c != NULL; c = c->cri_next) {
 | |
| +		if (ALG_IS_SIG(c->cri_alg)) {
 | |
| +			if (macini)
 | |
| +				return -EINVAL;
 | |
| +			macini = c;
 | |
| +		} else if (ALG_IS_CIPHER(c->cri_alg)) {
 | |
| +			if (encini)
 | |
| +				return -EINVAL;
 | |
| +			encini = c;
 | |
| +		} else {
 | |
| +			DPRINTF("UNKNOWN c->cri_alg %d\n", c->cri_alg);
 | |
| +			return -EINVAL;
 | |
| +		}
 | |
| +	}
 | |
| +	if (encini == NULL && macini == NULL)
 | |
| +		return -EINVAL;
 | |
| +	if (encini) {
 | |
| +		/* validate key length */
 | |
| +		switch (encini->cri_alg) {
 | |
| +		case CRYPTO_DES_CBC:
 | |
| +			if (encini->cri_klen != 64)
 | |
| +				return -EINVAL;
 | |
| +			ccmd = DMA_CALGO_DES;
 | |
| +			break;
 | |
| +		case CRYPTO_3DES_CBC:
 | |
| +			if (encini->cri_klen != 192)
 | |
| +				return -EINVAL;
 | |
| +			ccmd = DMA_CALGO_3DES;
 | |
| +			break;
 | |
| +		case CRYPTO_AES_CBC:
 | |
| +			if (encini->cri_klen != 128 &&
 | |
| +			    encini->cri_klen != 192 &&
 | |
| +			    encini->cri_klen != 256)
 | |
| +				return -EINVAL;
 | |
| +			ccmd = DMA_CALGO_AES;
 | |
| +			break;
 | |
| +		case CRYPTO_ARC4:
 | |
| +			if (encini->cri_klen != 128)
 | |
| +				return -EINVAL;
 | |
| +			ccmd = DMA_CALGO_ARC;
 | |
| +			break;
 | |
| +		default:
 | |
| +			DPRINTF("UNKNOWN encini->cri_alg %d\n",
 | |
| +				encini->cri_alg);
 | |
| +			return -EINVAL;
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	if (macini) {
 | |
| +		switch (macini->cri_alg) {
 | |
| +		case CRYPTO_MD5:
 | |
| +		case CRYPTO_MD5_HMAC:
 | |
| +			blksz = 16;
 | |
| +			break;
 | |
| +		case CRYPTO_SHA1:
 | |
| +		case CRYPTO_SHA1_HMAC:
 | |
| +			blksz = 20;
 | |
| +			break;
 | |
| +		default:
 | |
| +			DPRINTF("UNKNOWN macini->cri_alg %d\n",
 | |
| +				macini->cri_alg);
 | |
| +			return -EINVAL;
 | |
| +		}
 | |
| +		if (((macini->cri_klen + 7) / 8) > blksz) {
 | |
| +			DPRINTF("key length %d bigger than blksize %d not supported\n",
 | |
| +				((macini->cri_klen + 7) / 8), blksz);
 | |
| +			return -EINVAL;
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
 | |
| +		if (sc->sc_sessions[sesn] == NULL) {
 | |
| +			sc->sc_sessions[sesn] = (struct pasemi_session *)
 | |
| +				kzalloc(sizeof(struct pasemi_session), GFP_ATOMIC);
 | |
| +			ses = sc->sc_sessions[sesn];
 | |
| +			break;
 | |
| +		} else if (sc->sc_sessions[sesn]->used == 0) {
 | |
| +			ses = sc->sc_sessions[sesn];
 | |
| +			break;
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	if (ses == NULL) {
 | |
| +		sespp = (struct pasemi_session **)
 | |
| +			kzalloc(sc->sc_nsessions * 2 *
 | |
| +				sizeof(struct pasemi_session *), GFP_ATOMIC);
 | |
| +		if (sespp == NULL)
 | |
| +			return -ENOMEM;
 | |
| +		memcpy(sespp, sc->sc_sessions,
 | |
| +		       sc->sc_nsessions * sizeof(struct pasemi_session *));
 | |
| +		kfree(sc->sc_sessions);
 | |
| +		sc->sc_sessions = sespp;
 | |
| +		sesn = sc->sc_nsessions;
 | |
| +		ses = sc->sc_sessions[sesn] = (struct pasemi_session *)
 | |
| +			kzalloc(sizeof(struct pasemi_session), GFP_ATOMIC);
 | |
| +		if (ses == NULL)
 | |
| +			return -ENOMEM;
 | |
| +		sc->sc_nsessions *= 2;
 | |
| +	}
 | |
| +
 | |
| +	ses->used = 1;
 | |
| +
 | |
| +	ses->dma_addr = pci_map_single(sc->dma_pdev, (void *) ses->civ,
 | |
| +				       sizeof(struct pasemi_session), DMA_TO_DEVICE);
 | |
| +
 | |
| +	/* enter the channel scheduler */
 | |
| +	spin_lock_irqsave(&sc->sc_chnlock, flags);
 | |
| +
 | |
| +	/* ARC4 has to be processed by the even channel */
 | |
| +	if (encini && (encini->cri_alg == CRYPTO_ARC4))
 | |
| +		ses->chan = sc->sc_lastchn & ~1;
 | |
| +	else
 | |
| +		ses->chan = sc->sc_lastchn;
 | |
| +	sc->sc_lastchn = (sc->sc_lastchn + 1) % sc->sc_num_channels;
 | |
| +
 | |
| +	spin_unlock_irqrestore(&sc->sc_chnlock, flags);
 | |
| +
 | |
| +	txring = &sc->tx[ses->chan];
 | |
| +
 | |
| +	if (encini) {
 | |
| +		ses->ccmd = ccmd;
 | |
| +
 | |
| +		/* get an IV */
 | |
| +		/* XXX may read fewer than requested */
 | |
| +		get_random_bytes(ses->civ, sizeof(ses->civ));
 | |
| +
 | |
| +		ses->keysz = (encini->cri_klen - 63) / 64;
 | |
| +		memcpy(ses->key, encini->cri_key, (ses->keysz + 1) * 8);
 | |
| +
 | |
| +		pasemi_desc_start(&init_desc,
 | |
| +				  XCT_CTRL_HDR(ses->chan, (encini && macini) ? 0x68 : 0x40, DMA_FN_CIV0));
 | |
| +		pasemi_desc_build(&init_desc,
 | |
| +				  XCT_FUN_SRC_PTR((encini && macini) ? 0x68 : 0x40, ses->dma_addr));
 | |
| +	}
 | |
| +	if (macini) {
 | |
| +		if (macini->cri_alg == CRYPTO_MD5_HMAC ||
 | |
| +		    macini->cri_alg == CRYPTO_SHA1_HMAC)
 | |
| +			memcpy(ses->hkey, macini->cri_key, blksz);
 | |
| +		else {
 | |
| +			/* Load initialization constants(RFC 1321, 3174) */
 | |
| +			ses->hiv[0] = 0x67452301efcdab89ULL;
 | |
| +			ses->hiv[1] = 0x98badcfe10325476ULL;
 | |
| +			ses->hiv[2] = 0xc3d2e1f000000000ULL;
 | |
| +		}
 | |
| +		ses->hseq = 0ULL;
 | |
| +	}
 | |
| +
 | |
| +	spin_lock_irqsave(&txring->fill_lock, flags);
 | |
| +
 | |
| +	if (((txring->next_to_fill + pasemi_desc_size(&init_desc)) -
 | |
| +	     txring->next_to_clean) > TX_RING_SIZE) {
 | |
| +		spin_unlock_irqrestore(&txring->fill_lock, flags);
 | |
| +		return ERESTART;
 | |
| +	}
 | |
| +
 | |
| +	if (encini) {
 | |
| +		pasemi_ring_add_desc(txring, &init_desc, NULL);
 | |
| +		pasemi_ring_incr(sc, ses->chan,
 | |
| +				 pasemi_desc_size(&init_desc));
 | |
| +	}
 | |
| +
 | |
| +	txring->sesn = sesn;
 | |
| +	spin_unlock_irqrestore(&txring->fill_lock, flags);
 | |
| +
 | |
| +	*sidp = PASEMI_SID(sesn);
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +/*
 | |
| + * Deallocate a session.
 | |
| + */
 | |
| +static int
 | |
| +pasemi_freesession(device_t dev, u_int64_t tid)
 | |
| +{
 | |
| +	struct pasemi_softc *sc = device_get_softc(dev);
 | |
| +	int session;
 | |
| +	u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	if (sc == NULL)
 | |
| +		return -EINVAL;
 | |
| +	session = PASEMI_SESSION(sid);
 | |
| +	if (session >= sc->sc_nsessions || !sc->sc_sessions[session])
 | |
| +		return -EINVAL;
 | |
| +
 | |
| +	pci_unmap_single(sc->dma_pdev,
 | |
| +			 sc->sc_sessions[session]->dma_addr,
 | |
| +			 sizeof(struct pasemi_session), DMA_TO_DEVICE);
 | |
| +	memset(sc->sc_sessions[session], 0,
 | |
| +	       sizeof(struct pasemi_session));
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +static int
 | |
| +pasemi_process(device_t dev, struct cryptop *crp, int hint)
 | |
| +{
 | |
| +
 | |
| +	int err = 0, ivsize, srclen = 0, reinit = 0, reinit_size = 0, chsel;
 | |
| +	struct pasemi_softc *sc = device_get_softc(dev);
 | |
| +	struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
 | |
| +	caddr_t ivp;
 | |
| +	struct pasemi_desc init_desc, work_desc;
 | |
| +	struct pasemi_session *ses;
 | |
| +	struct sk_buff *skb;
 | |
| +	struct uio *uiop;
 | |
| +	unsigned long flags;
 | |
| +	struct pasemi_fnu_txring *txring;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	if (crp == NULL || crp->crp_callback == NULL || sc == NULL)
 | |
| +		return -EINVAL;
 | |
| +
 | |
| +	crp->crp_etype = 0;
 | |
| +	if (PASEMI_SESSION(crp->crp_sid) >= sc->sc_nsessions)
 | |
| +		return -EINVAL;
 | |
| +
 | |
| +	ses = sc->sc_sessions[PASEMI_SESSION(crp->crp_sid)];
 | |
| +
 | |
| +	crd1 = crp->crp_desc;
 | |
| +	if (crd1 == NULL) {
 | |
| +		err = -EINVAL;
 | |
| +		goto errout;
 | |
| +	}
 | |
| +	crd2 = crd1->crd_next;
 | |
| +
 | |
| +	if (ALG_IS_SIG(crd1->crd_alg)) {
 | |
| +		maccrd = crd1;
 | |
| +		if (crd2 == NULL)
 | |
| +			enccrd = NULL;
 | |
| +		else if (ALG_IS_CIPHER(crd2->crd_alg) &&
 | |
| +			 (crd2->crd_flags & CRD_F_ENCRYPT) == 0)
 | |
| +			enccrd = crd2;
 | |
| +		else
 | |
| +			goto erralg;
 | |
| +	} else if (ALG_IS_CIPHER(crd1->crd_alg)) {
 | |
| +		enccrd = crd1;
 | |
| +		if (crd2 == NULL)
 | |
| +			maccrd = NULL;
 | |
| +		else if (ALG_IS_SIG(crd2->crd_alg) &&
 | |
| +			 (crd1->crd_flags & CRD_F_ENCRYPT))
 | |
| +			maccrd = crd2;
 | |
| +		else
 | |
| +			goto erralg;
 | |
| +	} else
 | |
| +		goto erralg;
 | |
| +
 | |
| +	chsel = ses->chan;
 | |
| +
 | |
| +	txring = &sc->tx[chsel];
 | |
| +
 | |
| +	if (enccrd && !maccrd) {
 | |
| +		if (enccrd->crd_alg == CRYPTO_ARC4)
 | |
| +			reinit = 1;
 | |
| +		reinit_size = 0x40;
 | |
| +		srclen = crp->crp_ilen;
 | |
| +
 | |
| +		pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I
 | |
| +				  | XCT_FUN_FUN(chsel));
 | |
| +		if (enccrd->crd_flags & CRD_F_ENCRYPT)
 | |
| +			pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_ENC);
 | |
| +		else
 | |
| +			pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_DEC);
 | |
| +	} else if (enccrd && maccrd) {
 | |
| +		if (enccrd->crd_alg == CRYPTO_ARC4)
 | |
| +			reinit = 1;
 | |
| +		reinit_size = 0x68;
 | |
| +
 | |
| +		if (enccrd->crd_flags & CRD_F_ENCRYPT) {
 | |
| +			/* Encrypt -> Authenticate */
 | |
| +			pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_ENC_SIG
 | |
| +					  | XCT_FUN_A | XCT_FUN_FUN(chsel));
 | |
| +			srclen = maccrd->crd_skip + maccrd->crd_len;
 | |
| +		} else {
 | |
| +			/* Authenticate -> Decrypt */
 | |
| +			pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG_DEC
 | |
| +					  | XCT_FUN_24BRES | XCT_FUN_FUN(chsel));
 | |
| +			pasemi_desc_build(&work_desc, 0);
 | |
| +			pasemi_desc_build(&work_desc, 0);
 | |
| +			pasemi_desc_build(&work_desc, 0);
 | |
| +			work_desc.postop = PASEMI_CHECK_SIG;
 | |
| +			srclen = crp->crp_ilen;
 | |
| +		}
 | |
| +
 | |
| +		pasemi_desc_hdr(&work_desc, XCT_FUN_SHL(maccrd->crd_skip / 4));
 | |
| +		pasemi_desc_hdr(&work_desc, XCT_FUN_CHL(enccrd->crd_skip - maccrd->crd_skip));
 | |
| +	} else if (!enccrd && maccrd) {
 | |
| +		srclen = maccrd->crd_len;
 | |
| +
 | |
| +		pasemi_desc_start(&init_desc,
 | |
| +				  XCT_CTRL_HDR(chsel, 0x58, DMA_FN_HKEY0));
 | |
| +		pasemi_desc_build(&init_desc,
 | |
| +				  XCT_FUN_SRC_PTR(0x58, ((struct pasemi_session *)ses->dma_addr)->hkey));
 | |
| +
 | |
| +		pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG
 | |
| +				  | XCT_FUN_A | XCT_FUN_FUN(chsel));
 | |
| +	}
 | |
| +
 | |
| +	if (enccrd) {
 | |
| +		switch (enccrd->crd_alg) {
 | |
| +		case CRYPTO_3DES_CBC:
 | |
| +			pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_3DES |
 | |
| +					XCT_FUN_BCM_CBC);
 | |
| +			ivsize = sizeof(u64);
 | |
| +			break;
 | |
| +		case CRYPTO_DES_CBC:
 | |
| +			pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_DES |
 | |
| +					XCT_FUN_BCM_CBC);
 | |
| +			ivsize = sizeof(u64);
 | |
| +			break;
 | |
| +		case CRYPTO_AES_CBC:
 | |
| +			pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_AES |
 | |
| +					XCT_FUN_BCM_CBC);
 | |
| +			ivsize = 2 * sizeof(u64);
 | |
| +			break;
 | |
| +		case CRYPTO_ARC4:
 | |
| +			pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_ARC);
 | |
| +			ivsize = 0;
 | |
| +			break;
 | |
| +		default:
 | |
| +			printk(DRV_NAME ": unimplemented enccrd->crd_alg %d\n",
 | |
| +			       enccrd->crd_alg);
 | |
| +			err = -EINVAL;
 | |
| +			goto errout;
 | |
| +		}
 | |
| +
 | |
| +		ivp = (ivsize == sizeof(u64)) ? (caddr_t) &ses->civ[1] : (caddr_t) &ses->civ[0];
 | |
| +		if (enccrd->crd_flags & CRD_F_ENCRYPT) {
 | |
| +			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
 | |
| +				memcpy(ivp, enccrd->crd_iv, ivsize);
 | |
| +			/* If IV is not present in the buffer already, it has to be copied there */
 | |
| +			if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
 | |
| +				crypto_copyback(crp->crp_flags, crp->crp_buf,
 | |
| +						enccrd->crd_inject, ivsize, ivp);
 | |
| +		} else {
 | |
| +			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
 | |
| +				/* IV is provided expicitly in descriptor */
 | |
| +				memcpy(ivp, enccrd->crd_iv, ivsize);
 | |
| +			else
 | |
| +				/* IV is provided in the packet */
 | |
| +				crypto_copydata(crp->crp_flags, crp->crp_buf,
 | |
| +						enccrd->crd_inject, ivsize,
 | |
| +						ivp);
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	if (maccrd) {
 | |
| +		switch (maccrd->crd_alg) {
 | |
| +		case CRYPTO_MD5:
 | |
| +			pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_MD5 |
 | |
| +					XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
 | |
| +			break;
 | |
| +		case CRYPTO_SHA1:
 | |
| +			pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_SHA1 |
 | |
| +					XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
 | |
| +			break;
 | |
| +		case CRYPTO_MD5_HMAC:
 | |
| +			pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_MD5 |
 | |
| +					XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
 | |
| +			break;
 | |
| +		case CRYPTO_SHA1_HMAC:
 | |
| +			pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_SHA1 |
 | |
| +					XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
 | |
| +			break;
 | |
| +		default:
 | |
| +			printk(DRV_NAME ": unimplemented maccrd->crd_alg %d\n",
 | |
| +			       maccrd->crd_alg);
 | |
| +			err = -EINVAL;
 | |
| +			goto errout;
 | |
| +		}
 | |
| +	}
 | |
| +
 | |
| +	if (crp->crp_flags & CRYPTO_F_SKBUF) {
 | |
| +		/* using SKB buffers */
 | |
| +		skb = (struct sk_buff *)crp->crp_buf;
 | |
| +		if (skb_shinfo(skb)->nr_frags) {
 | |
| +			printk(DRV_NAME ": skb frags unimplemented\n");
 | |
| +			err = -EINVAL;
 | |
| +			goto errout;
 | |
| +		}
 | |
| +		pasemi_desc_build(
 | |
| +			&work_desc,
 | |
| +			XCT_FUN_DST_PTR(skb->len, pci_map_single(
 | |
| +						sc->dma_pdev, skb->data,
 | |
| +						skb->len, DMA_TO_DEVICE)));
 | |
| +		pasemi_desc_build(
 | |
| +			&work_desc,
 | |
| +			XCT_FUN_SRC_PTR(
 | |
| +				srclen, pci_map_single(
 | |
| +					sc->dma_pdev, skb->data,
 | |
| +					srclen, DMA_TO_DEVICE)));
 | |
| +		pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
 | |
| +	} else if (crp->crp_flags & CRYPTO_F_IOV) {
 | |
| +		/* using IOV buffers */
 | |
| +		uiop = (struct uio *)crp->crp_buf;
 | |
| +		if (uiop->uio_iovcnt > 1) {
 | |
| +			printk(DRV_NAME ": iov frags unimplemented\n");
 | |
| +			err = -EINVAL;
 | |
| +			goto errout;
 | |
| +		}
 | |
| +
 | |
| +		/* crp_olen is never set; always use crp_ilen */
 | |
| +		pasemi_desc_build(
 | |
| +			&work_desc,
 | |
| +			XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single(
 | |
| +						sc->dma_pdev,
 | |
| +						uiop->uio_iov->iov_base,
 | |
| +						crp->crp_ilen, DMA_TO_DEVICE)));
 | |
| +		pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
 | |
| +
 | |
| +		pasemi_desc_build(
 | |
| +			&work_desc,
 | |
| +			XCT_FUN_SRC_PTR(srclen, pci_map_single(
 | |
| +						sc->dma_pdev,
 | |
| +						uiop->uio_iov->iov_base,
 | |
| +						srclen, DMA_TO_DEVICE)));
 | |
| +	} else {
 | |
| +		/* using contig buffers */
 | |
| +		pasemi_desc_build(
 | |
| +			&work_desc,
 | |
| +			XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single(
 | |
| +						sc->dma_pdev,
 | |
| +						crp->crp_buf,
 | |
| +						crp->crp_ilen, DMA_TO_DEVICE)));
 | |
| +		pasemi_desc_build(
 | |
| +			&work_desc,
 | |
| +			XCT_FUN_SRC_PTR(srclen, pci_map_single(
 | |
| +						sc->dma_pdev,
 | |
| +						crp->crp_buf, srclen,
 | |
| +						DMA_TO_DEVICE)));
 | |
| +		pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
 | |
| +	}
 | |
| +
 | |
| +	spin_lock_irqsave(&txring->fill_lock, flags);
 | |
| +
 | |
| +	if (txring->sesn != PASEMI_SESSION(crp->crp_sid)) {
 | |
| +		txring->sesn = PASEMI_SESSION(crp->crp_sid);
 | |
| +		reinit = 1;
 | |
| +	}
 | |
| +
 | |
| +	if (enccrd) {
 | |
| +		pasemi_desc_start(&init_desc,
 | |
| +				  XCT_CTRL_HDR(chsel, reinit ? reinit_size : 0x10, DMA_FN_CIV0));
 | |
| +		pasemi_desc_build(&init_desc,
 | |
| +				  XCT_FUN_SRC_PTR(reinit ? reinit_size : 0x10, ses->dma_addr));
 | |
| +	}
 | |
| +
 | |
| +	if (((txring->next_to_fill + pasemi_desc_size(&init_desc) +
 | |
| +	      pasemi_desc_size(&work_desc)) -
 | |
| +	     txring->next_to_clean) > TX_RING_SIZE) {
 | |
| +		spin_unlock_irqrestore(&txring->fill_lock, flags);
 | |
| +		err = ERESTART;
 | |
| +		goto errout;
 | |
| +	}
 | |
| +
 | |
| +	pasemi_ring_add_desc(txring, &init_desc, NULL);
 | |
| +	pasemi_ring_add_desc(txring, &work_desc, crp);
 | |
| +
 | |
| +	pasemi_ring_incr(sc, chsel,
 | |
| +			 pasemi_desc_size(&init_desc) +
 | |
| +			 pasemi_desc_size(&work_desc));
 | |
| +
 | |
| +	spin_unlock_irqrestore(&txring->fill_lock, flags);
 | |
| +
 | |
| +	mod_timer(&txring->crypto_timer, jiffies + TIMER_INTERVAL);
 | |
| +
 | |
| +	return 0;
 | |
| +
 | |
| +erralg:
 | |
| +	printk(DRV_NAME ": unsupported algorithm or algorithm order alg1 %d alg2 %d\n",
 | |
| +	       crd1->crd_alg, crd2->crd_alg);
 | |
| +	err = -EINVAL;
 | |
| +
 | |
| +errout:
 | |
| +	if (err != ERESTART) {
 | |
| +		crp->crp_etype = err;
 | |
| +		crypto_done(crp);
 | |
| +	}
 | |
| +	return err;
 | |
| +}
 | |
| +
 | |
| +static int pasemi_clean_tx(struct pasemi_softc *sc, int chan)
 | |
| +{
 | |
| +	int i, j, ring_idx;
 | |
| +	struct pasemi_fnu_txring *ring = &sc->tx[chan];
 | |
| +	u16 delta_cnt;
 | |
| +	int flags, loops = 10;
 | |
| +	int desc_size;
 | |
| +	struct cryptop *crp;
 | |
| +
 | |
| +	spin_lock_irqsave(&ring->clean_lock, flags);
 | |
| +
 | |
| +	while ((delta_cnt = (dma_status->tx_sta[sc->base_chan + chan]
 | |
| +			     & PAS_STATUS_PCNT_M) - ring->total_pktcnt)
 | |
| +	       && loops--) {
 | |
| +
 | |
| +		for (i = 0; i < delta_cnt; i++) {
 | |
| +			desc_size = TX_DESC_INFO(ring, ring->next_to_clean).desc_size;
 | |
| +			crp = TX_DESC_INFO(ring, ring->next_to_clean).cf_crp;
 | |
| +			if (crp) {
 | |
| +				ring_idx = 2 * (ring->next_to_clean & (TX_RING_SIZE-1));
 | |
| +				if (TX_DESC_INFO(ring, ring->next_to_clean).desc_postop & PASEMI_CHECK_SIG) {
 | |
| +					/* Need to make sure signature matched,
 | |
| +					 * if not - return error */
 | |
| +					if (!(ring->desc[ring_idx + 1] & (1ULL << 63)))
 | |
| +						crp->crp_etype = -EINVAL;
 | |
| +				}
 | |
| +				crypto_done(TX_DESC_INFO(ring,
 | |
| +							 ring->next_to_clean).cf_crp);
 | |
| +				TX_DESC_INFO(ring, ring->next_to_clean).cf_crp = NULL;
 | |
| +				pci_unmap_single(
 | |
| +					sc->dma_pdev,
 | |
| +					XCT_PTR_ADDR_LEN(ring->desc[ring_idx + 1]),
 | |
| +					PCI_DMA_TODEVICE);
 | |
| +
 | |
| +				ring->desc[ring_idx] = ring->desc[ring_idx + 1] = 0;
 | |
| +
 | |
| +				ring->next_to_clean++;
 | |
| +				for (j = 1; j < desc_size; j++) {
 | |
| +					ring_idx = 2 *
 | |
| +						(ring->next_to_clean &
 | |
| +						 (TX_RING_SIZE-1));
 | |
| +					pci_unmap_single(
 | |
| +						sc->dma_pdev,
 | |
| +						XCT_PTR_ADDR_LEN(ring->desc[ring_idx]),
 | |
| +						PCI_DMA_TODEVICE);
 | |
| +					if (ring->desc[ring_idx + 1])
 | |
| +						pci_unmap_single(
 | |
| +							sc->dma_pdev,
 | |
| +							XCT_PTR_ADDR_LEN(
 | |
| +								ring->desc[
 | |
| +									ring_idx + 1]),
 | |
| +							PCI_DMA_TODEVICE);
 | |
| +					ring->desc[ring_idx] =
 | |
| +						ring->desc[ring_idx + 1] = 0;
 | |
| +					ring->next_to_clean++;
 | |
| +				}
 | |
| +			} else {
 | |
| +				for (j = 0; j < desc_size; j++) {
 | |
| +					ring_idx = 2 * (ring->next_to_clean & (TX_RING_SIZE-1));
 | |
| +					ring->desc[ring_idx] =
 | |
| +						ring->desc[ring_idx + 1] = 0;
 | |
| +					ring->next_to_clean++;
 | |
| +				}
 | |
| +			}
 | |
| +		}
 | |
| +
 | |
| +		ring->total_pktcnt += delta_cnt;
 | |
| +	}
 | |
| +	spin_unlock_irqrestore(&ring->clean_lock, flags);
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +static void sweepup_tx(struct pasemi_softc *sc)
 | |
| +{
 | |
| +	int i;
 | |
| +
 | |
| +	for (i = 0; i < sc->sc_num_channels; i++)
 | |
| +		pasemi_clean_tx(sc, i);
 | |
| +}
 | |
| +
 | |
| +static irqreturn_t pasemi_intr(int irq, void *arg, struct pt_regs *regs)
 | |
| +{
 | |
| +	struct pasemi_softc *sc = arg;
 | |
| +	unsigned int reg;
 | |
| +	int chan = irq - sc->base_irq;
 | |
| +	int chan_index = sc->base_chan + chan;
 | |
| +	u64 stat = dma_status->tx_sta[chan_index];
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	if (!(stat & PAS_STATUS_CAUSE_M))
 | |
| +		return IRQ_NONE;
 | |
| +
 | |
| +	pasemi_clean_tx(sc, chan);
 | |
| +
 | |
| +	stat = dma_status->tx_sta[chan_index];
 | |
| +
 | |
| +	reg = PAS_IOB_DMA_TXCH_RESET_PINTC |
 | |
| +		PAS_IOB_DMA_TXCH_RESET_PCNT(sc->tx[chan].total_pktcnt);
 | |
| +
 | |
| +	if (stat & PAS_STATUS_SOFT)
 | |
| +		reg |= PAS_IOB_DMA_RXCH_RESET_SINTC;
 | |
| +
 | |
| +	out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_RESET(chan_index), reg);
 | |
| +
 | |
| +
 | |
| +	return IRQ_HANDLED;
 | |
| +}
 | |
| +
 | |
| +static int pasemi_dma_setup_tx_resources(struct pasemi_softc *sc, int chan)
 | |
| +{
 | |
| +	u32 val;
 | |
| +	int chan_index = chan + sc->base_chan;
 | |
| +	int ret;
 | |
| +	struct pasemi_fnu_txring *ring;
 | |
| +
 | |
| +	ring = &sc->tx[chan];
 | |
| +
 | |
| +	spin_lock_init(&ring->fill_lock);
 | |
| +	spin_lock_init(&ring->clean_lock);
 | |
| +
 | |
| +	ring->desc_info = kzalloc(sizeof(struct pasemi_desc_info) *
 | |
| +				  TX_RING_SIZE, GFP_KERNEL);
 | |
| +	if (!ring->desc_info)
 | |
| +		return -ENOMEM;
 | |
| +
 | |
| +	/* Allocate descriptors */
 | |
| +	ring->desc = dma_alloc_coherent(&sc->dma_pdev->dev,
 | |
| +					TX_RING_SIZE *
 | |
| +					2 * sizeof(u64),
 | |
| +					&ring->dma, GFP_KERNEL);
 | |
| +	if (!ring->desc)
 | |
| +		return -ENOMEM;
 | |
| +
 | |
| +	memset((void *) ring->desc, 0, TX_RING_SIZE * 2 * sizeof(u64));
 | |
| +
 | |
| +	out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_RESET(chan_index), 0x30);
 | |
| +
 | |
| +	ring->total_pktcnt = 0;
 | |
| +
 | |
| +	out_le32(sc->dma_regs + PAS_DMA_TXCHAN_BASEL(chan_index),
 | |
| +		 PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma));
 | |
| +
 | |
| +	val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32);
 | |
| +	val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 2);
 | |
| +
 | |
| +	out_le32(sc->dma_regs + PAS_DMA_TXCHAN_BASEU(chan_index), val);
 | |
| +
 | |
| +	out_le32(sc->dma_regs + PAS_DMA_TXCHAN_CFG(chan_index),
 | |
| +		 PAS_DMA_TXCHAN_CFG_TY_FUNC |
 | |
| +		 PAS_DMA_TXCHAN_CFG_TATTR(chan) |
 | |
| +		 PAS_DMA_TXCHAN_CFG_WT(2));
 | |
| +
 | |
| +	/* enable tx channel */
 | |
| +	out_le32(sc->dma_regs +
 | |
| +		 PAS_DMA_TXCHAN_TCMDSTA(chan_index),
 | |
| +		 PAS_DMA_TXCHAN_TCMDSTA_EN);
 | |
| +
 | |
| +	out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_CFG(chan_index),
 | |
| +		 PAS_IOB_DMA_TXCH_CFG_CNTTH(1000));
 | |
| +
 | |
| +	ring->next_to_fill = 0;
 | |
| +	ring->next_to_clean = 0;
 | |
| +
 | |
| +	snprintf(ring->irq_name, sizeof(ring->irq_name),
 | |
| +		 "%s%d", "crypto", chan);
 | |
| +
 | |
| +	ring->irq = irq_create_mapping(NULL, sc->base_irq + chan);
 | |
| +	ret = request_irq(ring->irq, (irq_handler_t)
 | |
| +			  pasemi_intr, IRQF_DISABLED, ring->irq_name, sc);
 | |
| +	if (ret) {
 | |
| +		printk(KERN_ERR DRV_NAME ": failed to hook irq %d ret %d\n",
 | |
| +		       ring->irq, ret);
 | |
| +		ring->irq = -1;
 | |
| +		return ret;
 | |
| +	}
 | |
| +
 | |
| +	setup_timer(&ring->crypto_timer, (void *) sweepup_tx, (unsigned long) sc);
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +static device_method_t pasemi_methods = {
 | |
| +	/* crypto device methods */
 | |
| +	DEVMETHOD(cryptodev_newsession,		pasemi_newsession),
 | |
| +	DEVMETHOD(cryptodev_freesession,	pasemi_freesession),
 | |
| +	DEVMETHOD(cryptodev_process,		pasemi_process),
 | |
| +};
 | |
| +
 | |
| +/* Set up the crypto device structure, private data,
 | |
| + * and anything else we need before we start */
 | |
| +
 | |
| +static int __devinit
 | |
| +pasemi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 | |
| +{
 | |
| +	struct pasemi_softc *sc;
 | |
| +	int ret, i;
 | |
| +
 | |
| +	DPRINTF(KERN_ERR "%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	sc = kzalloc(sizeof(*sc), GFP_KERNEL);
 | |
| +	if (!sc)
 | |
| +		return -ENOMEM;
 | |
| +
 | |
| +	softc_device_init(sc, DRV_NAME, 1, pasemi_methods);
 | |
| +
 | |
| +	pci_set_drvdata(pdev, sc);
 | |
| +
 | |
| +	spin_lock_init(&sc->sc_chnlock);
 | |
| +
 | |
| +	sc->sc_sessions = (struct pasemi_session **)
 | |
| +		kzalloc(PASEMI_INITIAL_SESSIONS *
 | |
| +			sizeof(struct pasemi_session *), GFP_ATOMIC);
 | |
| +	if (sc->sc_sessions == NULL) {
 | |
| +		ret = -ENOMEM;
 | |
| +		goto out;
 | |
| +	}
 | |
| +
 | |
| +	sc->sc_nsessions = PASEMI_INITIAL_SESSIONS;
 | |
| +	sc->sc_lastchn = 0;
 | |
| +	sc->base_irq = pdev->irq + 6;
 | |
| +	sc->base_chan = 6;
 | |
| +	sc->sc_cid = -1;
 | |
| +	sc->dma_pdev = pdev;
 | |
| +
 | |
| +	sc->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
 | |
| +	if (!sc->iob_pdev) {
 | |
| +		dev_err(&pdev->dev, "Can't find I/O Bridge\n");
 | |
| +		ret = -ENODEV;
 | |
| +		goto out;
 | |
| +	}
 | |
| +
 | |
| +	/* This is hardcoded and ugly, but we have some firmware versions
 | |
| +	 * who don't provide the register space in the device tree. Luckily
 | |
| +	 * they are at well-known locations so we can just do the math here.
 | |
| +	 */
 | |
| +	sc->dma_regs =
 | |
| +		ioremap(0xe0000000 + (sc->dma_pdev->devfn << 12), 0x2000);
 | |
| +	sc->iob_regs =
 | |
| +		ioremap(0xe0000000 + (sc->iob_pdev->devfn << 12), 0x2000);
 | |
| +	if (!sc->dma_regs || !sc->iob_regs) {
 | |
| +		dev_err(&pdev->dev, "Can't map registers\n");
 | |
| +		ret = -ENODEV;
 | |
| +		goto out;
 | |
| +	}
 | |
| +
 | |
| +	dma_status = __ioremap(0xfd800000, 0x1000, 0);
 | |
| +	if (!dma_status) {
 | |
| +		ret = -ENODEV;
 | |
| +		dev_err(&pdev->dev, "Can't map dmastatus space\n");
 | |
| +		goto out;
 | |
| +	}
 | |
| +
 | |
| +	sc->tx = (struct pasemi_fnu_txring *)
 | |
| +		kzalloc(sizeof(struct pasemi_fnu_txring)
 | |
| +			* 8, GFP_KERNEL);
 | |
| +	if (!sc->tx) {
 | |
| +		ret = -ENOMEM;
 | |
| +		goto out;
 | |
| +	}
 | |
| +
 | |
| +	/* Initialize the h/w */
 | |
| +	out_le32(sc->dma_regs + PAS_DMA_COM_CFG,
 | |
| +		 (in_le32(sc->dma_regs + PAS_DMA_COM_CFG) |
 | |
| +		  PAS_DMA_COM_CFG_FWF));
 | |
| +	out_le32(sc->dma_regs + PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN);
 | |
| +
 | |
| +	for (i = 0; i < PASEMI_FNU_CHANNELS; i++) {
 | |
| +		sc->sc_num_channels++;
 | |
| +		ret = pasemi_dma_setup_tx_resources(sc, i);
 | |
| +		if (ret)
 | |
| +			goto out;
 | |
| +	}
 | |
| +
 | |
| +	sc->sc_cid = crypto_get_driverid(softc_get_device(sc),
 | |
| +					 CRYPTOCAP_F_HARDWARE);
 | |
| +	if (sc->sc_cid < 0) {
 | |
| +		printk(KERN_ERR DRV_NAME ": could not get crypto driver id\n");
 | |
| +		ret = -ENXIO;
 | |
| +		goto out;
 | |
| +	}
 | |
| +
 | |
| +	/* register algorithms with the framework */
 | |
| +	printk(DRV_NAME ":");
 | |
| +
 | |
| +	crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
 | |
| +	crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
 | |
| +	crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
 | |
| +	crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0);
 | |
| +	crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
 | |
| +	crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
 | |
| +	crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
 | |
| +	crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
 | |
| +
 | |
| +	return 0;
 | |
| +
 | |
| +out:
 | |
| +	pasemi_dma_remove(pdev);
 | |
| +	return ret;
 | |
| +}
 | |
| +
 | |
| +#define MAX_RETRIES 5000
 | |
| +
 | |
| +static void pasemi_free_tx_resources(struct pasemi_softc *sc, int chan)
 | |
| +{
 | |
| +	struct pasemi_fnu_txring *ring = &sc->tx[chan];
 | |
| +	int chan_index = chan + sc->base_chan;
 | |
| +	int retries;
 | |
| +	u32 stat;
 | |
| +
 | |
| +	/* Stop the channel */
 | |
| +	out_le32(sc->dma_regs +
 | |
| +		 PAS_DMA_TXCHAN_TCMDSTA(chan_index),
 | |
| +		 PAS_DMA_TXCHAN_TCMDSTA_ST);
 | |
| +
 | |
| +	for (retries = 0; retries < MAX_RETRIES; retries++) {
 | |
| +		stat = in_le32(sc->dma_regs +
 | |
| +			       PAS_DMA_TXCHAN_TCMDSTA(chan_index));
 | |
| +		if (!(stat & PAS_DMA_TXCHAN_TCMDSTA_ACT))
 | |
| +			break;
 | |
| +		cond_resched();
 | |
| +	}
 | |
| +
 | |
| +	if (stat & PAS_DMA_TXCHAN_TCMDSTA_ACT)
 | |
| +		dev_err(&sc->dma_pdev->dev, "Failed to stop tx channel %d\n",
 | |
| +			chan_index);
 | |
| +
 | |
| +	/* Disable the channel */
 | |
| +	out_le32(sc->dma_regs +
 | |
| +		 PAS_DMA_TXCHAN_TCMDSTA(chan_index),
 | |
| +		 0);
 | |
| +
 | |
| +	if (ring->desc_info)
 | |
| +		kfree((void *) ring->desc_info);
 | |
| +	if (ring->desc)
 | |
| +		dma_free_coherent(&sc->dma_pdev->dev,
 | |
| +				  TX_RING_SIZE *
 | |
| +				  2 * sizeof(u64),
 | |
| +				  (void *) ring->desc, ring->dma);
 | |
| +	if (ring->irq != -1)
 | |
| +		free_irq(ring->irq, sc);
 | |
| +
 | |
| +	del_timer(&ring->crypto_timer);
 | |
| +}
 | |
| +
 | |
| +static void __devexit pasemi_dma_remove(struct pci_dev *pdev)
 | |
| +{
 | |
| +	struct pasemi_softc *sc = pci_get_drvdata(pdev);
 | |
| +	int i;
 | |
| +
 | |
| +	DPRINTF("%s()\n", __FUNCTION__);
 | |
| +
 | |
| +	if (sc->sc_cid >= 0) {
 | |
| +		crypto_unregister_all(sc->sc_cid);
 | |
| +	}
 | |
| +
 | |
| +	if (sc->tx) {
 | |
| +		for (i = 0; i < sc->sc_num_channels; i++)
 | |
| +			pasemi_free_tx_resources(sc, i);
 | |
| +
 | |
| +		kfree(sc->tx);
 | |
| +	}
 | |
| +	if (sc->sc_sessions) {
 | |
| +		for (i = 0; i < sc->sc_nsessions; i++)
 | |
| +			kfree(sc->sc_sessions[i]);
 | |
| +		kfree(sc->sc_sessions);
 | |
| +	}
 | |
| +	if (sc->iob_pdev)
 | |
| +		pci_dev_put(sc->iob_pdev);
 | |
| +	if (sc->dma_regs)
 | |
| +		iounmap(sc->dma_regs);
 | |
| +	if (sc->iob_regs)
 | |
| +		iounmap(sc->iob_regs);
 | |
| +	kfree(sc);
 | |
| +}
 | |
| +
 | |
| +static struct pci_device_id pasemi_dma_pci_tbl[] = {
 | |
| +	{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa007) },
 | |
| +};
 | |
| +
 | |
| +MODULE_DEVICE_TABLE(pci, pasemi_dma_pci_tbl);
 | |
| +
 | |
| +static struct pci_driver pasemi_dma_driver = {
 | |
| +	.name		= "pasemi_dma",
 | |
| +	.id_table	= pasemi_dma_pci_tbl,
 | |
| +	.probe		= pasemi_dma_probe,
 | |
| +	.remove		= __devexit_p(pasemi_dma_remove),
 | |
| +};
 | |
| +
 | |
| +static void __exit pasemi_dma_cleanup_module(void)
 | |
| +{
 | |
| +	pci_unregister_driver(&pasemi_dma_driver);
 | |
| +	__iounmap(dma_status);
 | |
| +	dma_status = NULL;
 | |
| +}
 | |
| +
 | |
| +int pasemi_dma_init_module(void)
 | |
| +{
 | |
| +	return pci_register_driver(&pasemi_dma_driver);
 | |
| +}
 | |
| +
 | |
| +module_init(pasemi_dma_init_module);
 | |
| +module_exit(pasemi_dma_cleanup_module);
 | |
| +
 | |
| +MODULE_LICENSE("Dual BSD/GPL");
 | |
| +MODULE_AUTHOR("Egor Martovetsky egor@pasemi.com");
 | |
| +MODULE_DESCRIPTION("OCF driver for PA Semi PWRficient DMA Crypto Engine");
 | |
| --- /dev/null
 | |
| +++ b/crypto/ocf/pasemi/pasemi_fnu.h
 | |
| @@ -0,0 +1,410 @@
 | |
| +/*
 | |
| + * Copyright (C) 2007 PA Semi, Inc
 | |
| + *
 | |
| + * Driver for the PA Semi PWRficient DMA Crypto Engine, soft state and
 | |
| + * hardware register layouts.
 | |
| + *
 | |
| + * This program is free software; you can redistribute it and/or modify
 | |
| + * it under the terms of the GNU General Public License version 2 as
 | |
| + * published by the Free Software Foundation.
 | |
| + *
 | |
| + * This program is distributed in the hope that it will be useful,
 | |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 | |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | |
| + * GNU General Public License for more details.
 | |
| + *
 | |
| + * You should have received a copy of the GNU General Public License
 | |
| + * along with this program; if not, write to the Free Software
 | |
| + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 | |
| + */
 | |
| +
 | |
| +#ifndef PASEMI_FNU_H
 | |
| +#define PASEMI_FNU_H
 | |
| +
 | |
| +#include <linux/spinlock.h>
 | |
| +
 | |
| +#define	PASEMI_SESSION(sid)	((sid) & 0xffffffff)
 | |
| +#define	PASEMI_SID(sesn)	((sesn) & 0xffffffff)
 | |
| +#define	DPRINTF(a...)	if (debug) { printk(DRV_NAME ": " a); }
 | |
| +
 | |
| +/* Must be a power of two */
 | |
| +#define RX_RING_SIZE 512
 | |
| +#define TX_RING_SIZE 512
 | |
| +#define TX_DESC(ring, num)	((ring)->desc[2 * (num & (TX_RING_SIZE-1))])
 | |
| +#define TX_DESC_INFO(ring, num)	((ring)->desc_info[(num) & (TX_RING_SIZE-1)])
 | |
| +#define MAX_DESC_SIZE 8
 | |
| +#define PASEMI_INITIAL_SESSIONS 10
 | |
| +#define PASEMI_FNU_CHANNELS 8
 | |
| +
 | |
| +/* DMA descriptor */
 | |
| +struct pasemi_desc {
 | |
| +	u64 quad[2*MAX_DESC_SIZE];
 | |
| +	int quad_cnt;
 | |
| +	int size;
 | |
| +	int postop;
 | |
| +};
 | |
| +
 | |
| +/*
 | |
| + * Holds per descriptor data
 | |
| + */
 | |
| +struct pasemi_desc_info {
 | |
| +	int			desc_size;
 | |
| +	int			desc_postop;
 | |
| +#define PASEMI_CHECK_SIG 0x1
 | |
| +
 | |
| +	struct cryptop          *cf_crp;
 | |
| +};
 | |
| +
 | |
| +/*
 | |
| + * Holds per channel data
 | |
| + */
 | |
| +struct pasemi_fnu_txring {
 | |
| +	volatile u64		*desc;
 | |
| +	volatile struct
 | |
| +	pasemi_desc_info	*desc_info;
 | |
| +	dma_addr_t		dma;
 | |
| +	struct timer_list       crypto_timer;
 | |
| +	spinlock_t		fill_lock;
 | |
| +	spinlock_t		clean_lock;
 | |
| +	unsigned int		next_to_fill;
 | |
| +	unsigned int		next_to_clean;
 | |
| +	u16			total_pktcnt;
 | |
| +	int			irq;
 | |
| +	int			sesn;
 | |
| +	char			irq_name[10];
 | |
| +};
 | |
| +
 | |
| +/*
 | |
| + * Holds data specific to a single pasemi device.
 | |
| + */
 | |
| +struct pasemi_softc {
 | |
| +	softc_device_decl	sc_cdev;
 | |
| +	struct pci_dev		*dma_pdev;	/* device backpointer */
 | |
| +	struct pci_dev		*iob_pdev;	/* device backpointer */
 | |
| +	void __iomem		*dma_regs;
 | |
| +	void __iomem		*iob_regs;
 | |
| +	int			base_irq;
 | |
| +	int			base_chan;
 | |
| +	int32_t			sc_cid;		/* crypto tag */
 | |
| +	int			sc_nsessions;
 | |
| +	struct pasemi_session	**sc_sessions;
 | |
| +	int			sc_num_channels;/* number of crypto channels */
 | |
| +
 | |
| +	/* pointer to the array of txring datastructures, one txring per channel */
 | |
| +	struct pasemi_fnu_txring *tx;
 | |
| +
 | |
| +	/*
 | |
| +	 * mutual exclusion for the channel scheduler
 | |
| +	 */
 | |
| +	spinlock_t		sc_chnlock;
 | |
| +	/* last channel used, for now use round-robin to allocate channels */
 | |
| +	int			sc_lastchn;
 | |
| +};
 | |
| +
 | |
| +struct pasemi_session {
 | |
| +	u64 civ[2];
 | |
| +	u64 keysz;
 | |
| +	u64 key[4];
 | |
| +	u64 ccmd;
 | |
| +	u64 hkey[4];
 | |
| +	u64 hseq;
 | |
| +	u64 giv[2];
 | |
| +	u64 hiv[4];
 | |
| +
 | |
| +	int used;
 | |
| +	dma_addr_t	dma_addr;
 | |
| +	int chan;
 | |
| +};
 | |
| +
 | |
| +/* status register layout in IOB region, at 0xfd800000 */
 | |
| +struct pasdma_status {
 | |
| +	u64 rx_sta[64];
 | |
| +	u64 tx_sta[20];
 | |
| +};
 | |
| +
 | |
| +#define ALG_IS_CIPHER(alg) ((alg == CRYPTO_DES_CBC)		|| \
 | |
| +				(alg == CRYPTO_3DES_CBC)	|| \
 | |
| +				(alg == CRYPTO_AES_CBC)		|| \
 | |
| +				(alg == CRYPTO_ARC4)		|| \
 | |
| +				(alg == CRYPTO_NULL_CBC))
 | |
| +
 | |
| +#define ALG_IS_SIG(alg) ((alg == CRYPTO_MD5)			|| \
 | |
| +				(alg == CRYPTO_MD5_HMAC)	|| \
 | |
| +				(alg == CRYPTO_SHA1)		|| \
 | |
| +				(alg == CRYPTO_SHA1_HMAC)	|| \
 | |
| +				(alg == CRYPTO_NULL_HMAC))
 | |
| +
 | |
| +enum {
 | |
| +	PAS_DMA_COM_TXCMD = 0x100,	/* Transmit Command Register  */
 | |
| +	PAS_DMA_COM_TXSTA = 0x104,	/* Transmit Status Register   */
 | |
| +	PAS_DMA_COM_RXCMD = 0x108,	/* Receive Command Register   */
 | |
| +	PAS_DMA_COM_RXSTA = 0x10c,	/* Receive Status Register    */
 | |
| +	PAS_DMA_COM_CFG   = 0x114,	/* DMA Configuration Register */
 | |
| +};
 | |
| +
 | |
| +/* All these registers live in the PCI configuration space for the DMA PCI
 | |
| + * device. Use the normal PCI config access functions for them.
 | |
| + */
 | |
| +
 | |
| +#define PAS_DMA_COM_CFG_FWF	0x18000000
 | |
| +
 | |
| +#define PAS_DMA_COM_TXCMD_EN	0x00000001 /* enable */
 | |
| +#define PAS_DMA_COM_TXSTA_ACT	0x00000001 /* active */
 | |
| +#define PAS_DMA_COM_RXCMD_EN	0x00000001 /* enable */
 | |
| +#define PAS_DMA_COM_RXSTA_ACT	0x00000001 /* active */
 | |
| +
 | |
| +#define _PAS_DMA_TXCHAN_STRIDE	0x20    /* Size per channel		*/
 | |
| +#define _PAS_DMA_TXCHAN_TCMDSTA	0x300	/* Command / Status		*/
 | |
| +#define _PAS_DMA_TXCHAN_CFG	0x304	/* Configuration		*/
 | |
| +#define _PAS_DMA_TXCHAN_DSCRBU	0x308	/* Descriptor BU Allocation	*/
 | |
| +#define _PAS_DMA_TXCHAN_INCR	0x310	/* Descriptor increment		*/
 | |
| +#define _PAS_DMA_TXCHAN_CNT	0x314	/* Descriptor count/offset	*/
 | |
| +#define _PAS_DMA_TXCHAN_BASEL	0x318	/* Descriptor ring base (low)	*/
 | |
| +#define _PAS_DMA_TXCHAN_BASEU	0x31c	/*			(high)	*/
 | |
| +#define PAS_DMA_TXCHAN_TCMDSTA(c) (0x300+(c)*_PAS_DMA_TXCHAN_STRIDE)
 | |
| +#define    PAS_DMA_TXCHAN_TCMDSTA_EN	0x00000001	/* Enabled */
 | |
| +#define    PAS_DMA_TXCHAN_TCMDSTA_ST	0x00000002	/* Stop interface */
 | |
| +#define    PAS_DMA_TXCHAN_TCMDSTA_ACT	0x00010000	/* Active */
 | |
| +#define PAS_DMA_TXCHAN_CFG(c)     (0x304+(c)*_PAS_DMA_TXCHAN_STRIDE)
 | |
| +#define    PAS_DMA_TXCHAN_CFG_TY_FUNC	0x00000002	/* Type = interface */
 | |
| +#define    PAS_DMA_TXCHAN_CFG_TY_IFACE	0x00000000	/* Type = interface */
 | |
| +#define    PAS_DMA_TXCHAN_CFG_TATTR_M	0x0000003c
 | |
| +#define    PAS_DMA_TXCHAN_CFG_TATTR_S	2
 | |
| +#define    PAS_DMA_TXCHAN_CFG_TATTR(x)	(((x) << PAS_DMA_TXCHAN_CFG_TATTR_S) & \
 | |
| +					 PAS_DMA_TXCHAN_CFG_TATTR_M)
 | |
| +#define    PAS_DMA_TXCHAN_CFG_WT_M	0x000001c0
 | |
| +#define    PAS_DMA_TXCHAN_CFG_WT_S	6
 | |
| +#define    PAS_DMA_TXCHAN_CFG_WT(x)	(((x) << PAS_DMA_TXCHAN_CFG_WT_S) & \
 | |
| +					 PAS_DMA_TXCHAN_CFG_WT_M)
 | |
| +#define    PAS_DMA_TXCHAN_CFG_LPSQ_FAST	0x00000400
 | |
| +#define    PAS_DMA_TXCHAN_CFG_LPDQ_FAST	0x00000800
 | |
| +#define    PAS_DMA_TXCHAN_CFG_CF	0x00001000	/* Clean first line */
 | |
| +#define    PAS_DMA_TXCHAN_CFG_CL	0x00002000	/* Clean last line */
 | |
| +#define    PAS_DMA_TXCHAN_CFG_UP	0x00004000	/* update tx descr when sent */
 | |
| +#define PAS_DMA_TXCHAN_INCR(c)    (0x310+(c)*_PAS_DMA_TXCHAN_STRIDE)
 | |
| +#define PAS_DMA_TXCHAN_BASEL(c)   (0x318+(c)*_PAS_DMA_TXCHAN_STRIDE)
 | |
| +#define    PAS_DMA_TXCHAN_BASEL_BRBL_M	0xffffffc0
 | |
| +#define    PAS_DMA_TXCHAN_BASEL_BRBL_S	0
 | |
| +#define    PAS_DMA_TXCHAN_BASEL_BRBL(x)	(((x) << PAS_DMA_TXCHAN_BASEL_BRBL_S) & \
 | |
| +					 PAS_DMA_TXCHAN_BASEL_BRBL_M)
 | |
| +#define PAS_DMA_TXCHAN_BASEU(c)   (0x31c+(c)*_PAS_DMA_TXCHAN_STRIDE)
 | |
| +#define    PAS_DMA_TXCHAN_BASEU_BRBH_M	0x00000fff
 | |
| +#define    PAS_DMA_TXCHAN_BASEU_BRBH_S	0
 | |
| +#define    PAS_DMA_TXCHAN_BASEU_BRBH(x)	(((x) << PAS_DMA_TXCHAN_BASEU_BRBH_S) & \
 | |
| +					 PAS_DMA_TXCHAN_BASEU_BRBH_M)
 | |
| +/* # of cache lines worth of buffer ring */
 | |
| +#define    PAS_DMA_TXCHAN_BASEU_SIZ_M	0x3fff0000
 | |
| +#define    PAS_DMA_TXCHAN_BASEU_SIZ_S	16		/* 0 = 16K */
 | |
| +#define    PAS_DMA_TXCHAN_BASEU_SIZ(x)	(((x) << PAS_DMA_TXCHAN_BASEU_SIZ_S) & \
 | |
| +					 PAS_DMA_TXCHAN_BASEU_SIZ_M)
 | |
| +
 | |
| +#define    PAS_STATUS_PCNT_M		0x000000000000ffffull
 | |
| +#define    PAS_STATUS_PCNT_S		0
 | |
| +#define    PAS_STATUS_DCNT_M		0x00000000ffff0000ull
 | |
| +#define    PAS_STATUS_DCNT_S		16
 | |
| +#define    PAS_STATUS_BPCNT_M		0x0000ffff00000000ull
 | |
| +#define    PAS_STATUS_BPCNT_S		32
 | |
| +#define    PAS_STATUS_CAUSE_M		0xf000000000000000ull
 | |
| +#define    PAS_STATUS_TIMER		0x1000000000000000ull
 | |
| +#define    PAS_STATUS_ERROR		0x2000000000000000ull
 | |
| +#define    PAS_STATUS_SOFT		0x4000000000000000ull
 | |
| +#define    PAS_STATUS_INT		0x8000000000000000ull
 | |
| +
 | |
| +#define PAS_IOB_DMA_RXCH_CFG(i)		(0x1100 + (i)*4)
 | |
| +#define    PAS_IOB_DMA_RXCH_CFG_CNTTH_M		0x00000fff
 | |
| +#define    PAS_IOB_DMA_RXCH_CFG_CNTTH_S		0
 | |
| +#define    PAS_IOB_DMA_RXCH_CFG_CNTTH(x)	(((x) << PAS_IOB_DMA_RXCH_CFG_CNTTH_S) & \
 | |
| +						 PAS_IOB_DMA_RXCH_CFG_CNTTH_M)
 | |
| +#define PAS_IOB_DMA_TXCH_CFG(i)		(0x1200 + (i)*4)
 | |
| +#define    PAS_IOB_DMA_TXCH_CFG_CNTTH_M		0x00000fff
 | |
| +#define    PAS_IOB_DMA_TXCH_CFG_CNTTH_S		0
 | |
| +#define    PAS_IOB_DMA_TXCH_CFG_CNTTH(x)	(((x) << PAS_IOB_DMA_TXCH_CFG_CNTTH_S) & \
 | |
| +						 PAS_IOB_DMA_TXCH_CFG_CNTTH_M)
 | |
| +#define PAS_IOB_DMA_RXCH_STAT(i)	(0x1300 + (i)*4)
 | |
| +#define    PAS_IOB_DMA_RXCH_STAT_INTGEN	0x00001000
 | |
| +#define    PAS_IOB_DMA_RXCH_STAT_CNTDEL_M	0x00000fff
 | |
| +#define    PAS_IOB_DMA_RXCH_STAT_CNTDEL_S	0
 | |
| +#define    PAS_IOB_DMA_RXCH_STAT_CNTDEL(x)	(((x) << PAS_IOB_DMA_RXCH_STAT_CNTDEL_S) &\
 | |
| +						 PAS_IOB_DMA_RXCH_STAT_CNTDEL_M)
 | |
| +#define PAS_IOB_DMA_TXCH_STAT(i)	(0x1400 + (i)*4)
 | |
| +#define    PAS_IOB_DMA_TXCH_STAT_INTGEN	0x00001000
 | |
| +#define    PAS_IOB_DMA_TXCH_STAT_CNTDEL_M	0x00000fff
 | |
| +#define    PAS_IOB_DMA_TXCH_STAT_CNTDEL_S	0
 | |
| +#define    PAS_IOB_DMA_TXCH_STAT_CNTDEL(x)	(((x) << PAS_IOB_DMA_TXCH_STAT_CNTDEL_S) &\
 | |
| +						 PAS_IOB_DMA_TXCH_STAT_CNTDEL_M)
 | |
| +#define PAS_IOB_DMA_RXCH_RESET(i)	(0x1500 + (i)*4)
 | |
| +#define    PAS_IOB_DMA_RXCH_RESET_PCNT_M	0xffff0000
 | |
| +#define    PAS_IOB_DMA_RXCH_RESET_PCNT_S	16
 | |
| +#define    PAS_IOB_DMA_RXCH_RESET_PCNT(x)	(((x) << PAS_IOB_DMA_RXCH_RESET_PCNT_S) & \
 | |
| +						 PAS_IOB_DMA_RXCH_RESET_PCNT_M)
 | |
| +#define    PAS_IOB_DMA_RXCH_RESET_PCNTRST	0x00000020
 | |
| +#define    PAS_IOB_DMA_RXCH_RESET_DCNTRST	0x00000010
 | |
| +#define    PAS_IOB_DMA_RXCH_RESET_TINTC		0x00000008
 | |
| +#define    PAS_IOB_DMA_RXCH_RESET_DINTC		0x00000004
 | |
| +#define    PAS_IOB_DMA_RXCH_RESET_SINTC		0x00000002
 | |
| +#define    PAS_IOB_DMA_RXCH_RESET_PINTC		0x00000001
 | |
| +#define PAS_IOB_DMA_TXCH_RESET(i)	(0x1600 + (i)*4)
 | |
| +#define    PAS_IOB_DMA_TXCH_RESET_PCNT_M	0xffff0000
 | |
| +#define    PAS_IOB_DMA_TXCH_RESET_PCNT_S	16
 | |
| +#define    PAS_IOB_DMA_TXCH_RESET_PCNT(x)	(((x) << PAS_IOB_DMA_TXCH_RESET_PCNT_S) & \
 | |
| +						 PAS_IOB_DMA_TXCH_RESET_PCNT_M)
 | |
| +#define    PAS_IOB_DMA_TXCH_RESET_PCNTRST	0x00000020
 | |
| +#define    PAS_IOB_DMA_TXCH_RESET_DCNTRST	0x00000010
 | |
| +#define    PAS_IOB_DMA_TXCH_RESET_TINTC		0x00000008
 | |
| +#define    PAS_IOB_DMA_TXCH_RESET_DINTC		0x00000004
 | |
| +#define    PAS_IOB_DMA_TXCH_RESET_SINTC		0x00000002
 | |
| +#define    PAS_IOB_DMA_TXCH_RESET_PINTC		0x00000001
 | |
| +
 | |
| +#define PAS_IOB_DMA_COM_TIMEOUTCFG		0x1700
 | |
| +#define    PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M	0x00ffffff
 | |
| +#define    PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S	0
 | |
| +#define    PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(x)	(((x) << PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S) & \
 | |
| +						 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M)
 | |
| +
 | |
| +/* Transmit descriptor fields */
 | |
| +#define	XCT_MACTX_T		0x8000000000000000ull
 | |
| +#define	XCT_MACTX_ST		0x4000000000000000ull
 | |
| +#define XCT_MACTX_NORES		0x0000000000000000ull
 | |
| +#define XCT_MACTX_8BRES		0x1000000000000000ull
 | |
| +#define XCT_MACTX_24BRES	0x2000000000000000ull
 | |
| +#define XCT_MACTX_40BRES	0x3000000000000000ull
 | |
| +#define XCT_MACTX_I		0x0800000000000000ull
 | |
| +#define XCT_MACTX_O		0x0400000000000000ull
 | |
| +#define XCT_MACTX_E		0x0200000000000000ull
 | |
| +#define XCT_MACTX_VLAN_M	0x0180000000000000ull
 | |
| +#define XCT_MACTX_VLAN_NOP	0x0000000000000000ull
 | |
| +#define XCT_MACTX_VLAN_REMOVE	0x0080000000000000ull
 | |
| +#define XCT_MACTX_VLAN_INSERT   0x0100000000000000ull
 | |
| +#define XCT_MACTX_VLAN_REPLACE  0x0180000000000000ull
 | |
| +#define XCT_MACTX_CRC_M		0x0060000000000000ull
 | |
| +#define XCT_MACTX_CRC_NOP	0x0000000000000000ull
 | |
| +#define XCT_MACTX_CRC_INSERT	0x0020000000000000ull
 | |
| +#define XCT_MACTX_CRC_PAD	0x0040000000000000ull
 | |
| +#define XCT_MACTX_CRC_REPLACE	0x0060000000000000ull
 | |
| +#define XCT_MACTX_SS		0x0010000000000000ull
 | |
| +#define XCT_MACTX_LLEN_M	0x00007fff00000000ull
 | |
| +#define XCT_MACTX_LLEN_S	32ull
 | |
| +#define XCT_MACTX_LLEN(x)	((((long)(x)) << XCT_MACTX_LLEN_S) & \
 | |
| +				 XCT_MACTX_LLEN_M)
 | |
| +#define XCT_MACTX_IPH_M		0x00000000f8000000ull
 | |
| +#define XCT_MACTX_IPH_S		27ull
 | |
| +#define XCT_MACTX_IPH(x)	((((long)(x)) << XCT_MACTX_IPH_S) & \
 | |
| +				 XCT_MACTX_IPH_M)
 | |
| +#define XCT_MACTX_IPO_M		0x0000000007c00000ull
 | |
| +#define XCT_MACTX_IPO_S		22ull
 | |
| +#define XCT_MACTX_IPO(x)	((((long)(x)) << XCT_MACTX_IPO_S) & \
 | |
| +				 XCT_MACTX_IPO_M)
 | |
| +#define XCT_MACTX_CSUM_M	0x0000000000000060ull
 | |
| +#define XCT_MACTX_CSUM_NOP	0x0000000000000000ull
 | |
| +#define XCT_MACTX_CSUM_TCP	0x0000000000000040ull
 | |
| +#define XCT_MACTX_CSUM_UDP	0x0000000000000060ull
 | |
| +#define XCT_MACTX_V6		0x0000000000000010ull
 | |
| +#define XCT_MACTX_C		0x0000000000000004ull
 | |
| +#define XCT_MACTX_AL2		0x0000000000000002ull
 | |
| +
 | |
| +#define XCT_PTR_T		0x8000000000000000ull
 | |
| +#define XCT_PTR_LEN_M		0x7ffff00000000000ull
 | |
| +#define XCT_PTR_LEN_S		44
 | |
| +#define XCT_PTR_LEN(x)		((((long)(x)) << XCT_PTR_LEN_S) & \
 | |
| +				 XCT_PTR_LEN_M)
 | |
| +#define XCT_PTR_ADDR_M		0x00000fffffffffffull
 | |
| +#define XCT_PTR_ADDR_S		0
 | |
| +#define XCT_PTR_ADDR(x)		((((long)(x)) << XCT_PTR_ADDR_S) & \
 | |
| +				 XCT_PTR_ADDR_M)
 | |
| +
 | |
| +/* Function descriptor fields */
 | |
| +#define	XCT_FUN_T		0x8000000000000000ull
 | |
| +#define	XCT_FUN_ST		0x4000000000000000ull
 | |
| +#define XCT_FUN_NORES		0x0000000000000000ull
 | |
| +#define XCT_FUN_8BRES		0x1000000000000000ull
 | |
| +#define XCT_FUN_24BRES		0x2000000000000000ull
 | |
| +#define XCT_FUN_40BRES		0x3000000000000000ull
 | |
| +#define XCT_FUN_I		0x0800000000000000ull
 | |
| +#define XCT_FUN_O		0x0400000000000000ull
 | |
| +#define XCT_FUN_E		0x0200000000000000ull
 | |
| +#define XCT_FUN_FUN_S		54
 | |
| +#define XCT_FUN_FUN_M		0x01c0000000000000ull
 | |
| +#define XCT_FUN_FUN(num)	((((long)(num)) << XCT_FUN_FUN_S) & \
 | |
| +				XCT_FUN_FUN_M)
 | |
| +#define XCT_FUN_CRM_NOP		0x0000000000000000ull
 | |
| +#define XCT_FUN_CRM_SIG		0x0008000000000000ull
 | |
| +#define XCT_FUN_CRM_ENC		0x0010000000000000ull
 | |
| +#define XCT_FUN_CRM_DEC		0x0018000000000000ull
 | |
| +#define XCT_FUN_CRM_SIG_ENC	0x0020000000000000ull
 | |
| +#define XCT_FUN_CRM_ENC_SIG	0x0028000000000000ull
 | |
| +#define XCT_FUN_CRM_SIG_DEC	0x0030000000000000ull
 | |
| +#define XCT_FUN_CRM_DEC_SIG	0x0038000000000000ull
 | |
| +#define XCT_FUN_LLEN_M		0x0007ffff00000000ull
 | |
| +#define XCT_FUN_LLEN_S		32ULL
 | |
| +#define XCT_FUN_LLEN(x)		((((long)(x)) << XCT_FUN_LLEN_S) & \
 | |
| +				 XCT_FUN_LLEN_M)
 | |
| +#define XCT_FUN_SHL_M		0x00000000f8000000ull
 | |
| +#define XCT_FUN_SHL_S		27ull
 | |
| +#define XCT_FUN_SHL(x)		((((long)(x)) << XCT_FUN_SHL_S) & \
 | |
| +				 XCT_FUN_SHL_M)
 | |
| +#define XCT_FUN_CHL_M		0x0000000007c00000ull
 | |
| +#define XCT_FUN_CHL_S		22ull
 | |
| +#define XCT_FUN_CHL(x)		((((long)(x)) << XCT_FUN_CHL_S) & \
 | |
| +				 XCT_FUN_CHL_M)
 | |
| +#define XCT_FUN_HSZ_M		0x00000000003c0000ull
 | |
| +#define XCT_FUN_HSZ_S		18ull
 | |
| +#define XCT_FUN_HSZ(x)		((((long)(x)) << XCT_FUN_HSZ_S) & \
 | |
| +				 XCT_FUN_HSZ_M)
 | |
| +#define XCT_FUN_ALG_DES		0x0000000000000000ull
 | |
| +#define XCT_FUN_ALG_3DES	0x0000000000008000ull
 | |
| +#define XCT_FUN_ALG_AES		0x0000000000010000ull
 | |
| +#define XCT_FUN_ALG_ARC		0x0000000000018000ull
 | |
| +#define XCT_FUN_ALG_KASUMI	0x0000000000020000ull
 | |
| +#define XCT_FUN_BCM_ECB		0x0000000000000000ull
 | |
| +#define XCT_FUN_BCM_CBC		0x0000000000001000ull
 | |
| +#define XCT_FUN_BCM_CFB		0x0000000000002000ull
 | |
| +#define XCT_FUN_BCM_OFB		0x0000000000003000ull
 | |
| +#define XCT_FUN_BCM_CNT		0x0000000000003800ull
 | |
| +#define XCT_FUN_BCM_KAS_F8	0x0000000000002800ull
 | |
| +#define XCT_FUN_BCM_KAS_F9	0x0000000000001800ull
 | |
| +#define XCT_FUN_BCP_NO_PAD	0x0000000000000000ull
 | |
| +#define XCT_FUN_BCP_ZRO		0x0000000000000200ull
 | |
| +#define XCT_FUN_BCP_PL		0x0000000000000400ull
 | |
| +#define XCT_FUN_BCP_INCR	0x0000000000000600ull
 | |
| +#define XCT_FUN_SIG_MD5		(0ull << 4)
 | |
| +#define XCT_FUN_SIG_SHA1	(2ull << 4)
 | |
| +#define XCT_FUN_SIG_HMAC_MD5	(8ull << 4)
 | |
| +#define XCT_FUN_SIG_HMAC_SHA1	(10ull << 4)
 | |
| +#define XCT_FUN_A		0x0000000000000008ull
 | |
| +#define XCT_FUN_C		0x0000000000000004ull
 | |
| +#define XCT_FUN_AL2		0x0000000000000002ull
 | |
| +#define XCT_FUN_SE		0x0000000000000001ull
 | |
| +
 | |
| +#define XCT_FUN_SRC_PTR(len, addr)	(XCT_PTR_LEN(len) | XCT_PTR_ADDR(addr))
 | |
| +#define XCT_FUN_DST_PTR(len, addr)	(XCT_FUN_SRC_PTR(len, addr) | \
 | |
| +					0x8000000000000000ull)
 | |
| +
 | |
| +#define XCT_CTRL_HDR_FUN_NUM_M		0x01c0000000000000ull
 | |
| +#define XCT_CTRL_HDR_FUN_NUM_S		54
 | |
| +#define XCT_CTRL_HDR_LEN_M		0x0007ffff00000000ull
 | |
| +#define XCT_CTRL_HDR_LEN_S		32
 | |
| +#define XCT_CTRL_HDR_REG_M		0x00000000000000ffull
 | |
| +#define XCT_CTRL_HDR_REG_S		0
 | |
| +
 | |
| +#define XCT_CTRL_HDR(funcN,len,reg)	(0x9400000000000000ull | \
 | |
| +			((((long)(funcN)) << XCT_CTRL_HDR_FUN_NUM_S) \
 | |
| +			& XCT_CTRL_HDR_FUN_NUM_M) | \
 | |
| +			((((long)(len)) << \
 | |
| +			XCT_CTRL_HDR_LEN_S) & XCT_CTRL_HDR_LEN_M) | \
 | |
| +			((((long)(reg)) << \
 | |
| +			XCT_CTRL_HDR_REG_S) & XCT_CTRL_HDR_REG_M))
 | |
| +
 | |
| +/* Function config command options */
 | |
| +#define	DMA_CALGO_DES			0x00
 | |
| +#define	DMA_CALGO_3DES			0x01
 | |
| +#define	DMA_CALGO_AES			0x02
 | |
| +#define	DMA_CALGO_ARC			0x03
 | |
| +
 | |
| +#define DMA_FN_CIV0			0x02
 | |
| +#define DMA_FN_CIV1			0x03
 | |
| +#define DMA_FN_HKEY0			0x0a
 | |
| +
 | |
| +#define XCT_PTR_ADDR_LEN(ptr)		((ptr) & XCT_PTR_ADDR_M), \
 | |
| +			(((ptr) & XCT_PTR_LEN_M) >> XCT_PTR_LEN_S)
 | |
| +
 | |
| +#endif /* PASEMI_FNU_H */
 |