android_kernel_motorola_sm6225/crypto/tcrypt.c
Greg Kroah-Hartman f66335a3cf This is the 4.19.270 stable release
-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmPHynkACgkQONu9yGCS
 aT5AtBAAsdmYCYkmKZsRcS1EUTqdwKVN7FDILDcdMjfmrSp4ZDliaD1dUc0EmDRl
 yy+aNGCrhbuYACk9WdQsSUrUIh1dK0H5VsioB1m0cjCgifbNjsYqjYWK5ewXKUyX
 yjc+NmY1HVUFQDLnYHJxSbnB/o+nobWjts8nGuWHwQmoh7UmFe7lvMqZg753x6Bw
 wCiaC1DrU3aKHYK7IirdWgOiDiGia8DX1nX6PmLi6JTsXj+Io0i8PXKkFzANDf/p
 /rOyg7j8NOXIQPZGN0Zu88QiMWsNk7u2bOORZgtFbwo7r9BFbzXfWk/x8QxzDX1B
 iH1p02XQvBwm44xGJZKiWEY2nZdw4mpyzLXZNOL8V7vn9xhT6HDksVAPnyIkU8Dh
 wsij2r27x18VI9H7sstvAHvIyg6ihmq2E6WuC4W74tUcys7MXxCFc2DuJzMMocf0
 7LMTmx3/oUHvuM1riJ9STo9mzXbTmfNd6hnqRnFgGKiGGhOE+pX//RHfupaXRieQ
 Rq51ODFKcJdDIM7hxeyPdACYF/kso8sNEODCgQ5/+3opel1mzLdBJ1T2bV12DpQe
 ZhTsESPCVSoUAjCnC9Jje3g0u3qztClYq1faHOXtnjykn9mHmmedVwvdfJL/sOsr
 ec7NgqzM9xvMQVe4CNf0mouugaLpn2m6uQDTu+GWswRfEKuCx2Q=
 =ksam
 -----END PGP SIGNATURE-----

Merge 4.19.270 into android-4.19-stable

Changes in 4.19.270
	mm/khugepaged: fix GUP-fast interaction by sending IPI
	mm/khugepaged: invoke MMU notifiers in shmem/file collapse paths
	block: unhash blkdev part inode when the part is deleted
	nfp: fix use-after-free in area_cache_get()
	ASoC: ops: Check bounds for second channel in snd_soc_put_volsw_sx()
	pinctrl: meditatek: Startup with the IRQs disabled
	can: sja1000: fix size of OCR_MODE_MASK define
	can: mcba_usb: Fix termination command argument
	ASoC: ops: Correct bounds check for second channel on SX controls
	perf script python: Remove explicit shebang from tests/attr.c
	udf: Discard preallocation before extending file with a hole
	udf: Fix preallocation discarding at indirect extent boundary
	udf: Do not bother looking for prealloc extents if i_lenExtents matches i_size
	udf: Fix extending file within last block
	usb: gadget: uvc: Prevent buffer overflow in setup handler
	USB: serial: option: add Quectel EM05-G modem
	USB: serial: cp210x: add Kamstrup RF sniffer PIDs
	USB: serial: f81534: fix division by zero on line-speed change
	igb: Initialize mailbox message for VF reset
	Bluetooth: L2CAP: Fix u8 overflow
	net: loopback: use NET_NAME_PREDICTABLE for name_assign_type
	usb: musb: remove extra check in musb_gadget_vbus_draw
	ARM: dts: qcom: apq8064: fix coresight compatible
	drivers: soc: ti: knav_qmss_queue: Mark knav_acc_firmwares as static
	arm: dts: spear600: Fix clcd interrupt
	soc: ti: smartreflex: Fix PM disable depth imbalance in omap_sr_probe
	perf: arm_dsu: Fix hotplug callback leak in dsu_pmu_init()
	arm64: dts: mt2712e: Fix unit_address_vs_reg warning for oscillators
	arm64: dts: mt2712e: Fix unit address for pinctrl node
	arm64: dts: mt2712-evb: Fix vproc fixed regulators unit names
	arm64: dts: mediatek: mt6797: Fix 26M oscillator unit name
	ARM: dts: dove: Fix assigned-addresses for every PCIe Root Port
	ARM: dts: armada-370: Fix assigned-addresses for every PCIe Root Port
	ARM: dts: armada-xp: Fix assigned-addresses for every PCIe Root Port
	ARM: dts: armada-375: Fix assigned-addresses for every PCIe Root Port
	ARM: dts: armada-38x: Fix assigned-addresses for every PCIe Root Port
	ARM: dts: armada-39x: Fix assigned-addresses for every PCIe Root Port
	ARM: dts: turris-omnia: Add ethernet aliases
	ARM: dts: turris-omnia: Add switch port 6 node
	pstore/ram: Fix error return code in ramoops_probe()
	ARM: mmp: fix timer_read delay
	pstore: Avoid kcore oops by vmap()ing with VM_IOREMAP
	tpm/tpm_crb: Fix error message in __crb_relinquish_locality()
	cpuidle: dt: Return the correct numbers of parsed idle states
	alpha: fix syscall entry in !AUDUT_SYSCALL case
	fs: don't audit the capability check in simple_xattr_list()
	selftests/ftrace: event_triggers: wait longer for test_event_enable
	perf: Fix possible memleak in pmu_dev_alloc()
	timerqueue: Use rb_entry_safe() in timerqueue_getnext()
	proc: fixup uptime selftest
	ocfs2: fix memory leak in ocfs2_stack_glue_init()
	MIPS: vpe-mt: fix possible memory leak while module exiting
	MIPS: vpe-cmp: fix possible memory leak while module exiting
	PNP: fix name memory leak in pnp_alloc_dev()
	perf/x86/intel/uncore: Fix reference count leak in hswep_has_limit_sbox()
	irqchip: gic-pm: Use pm_runtime_resume_and_get() in gic_probe()
	cpufreq: amd_freq_sensitivity: Add missing pci_dev_put()
	libfs: add DEFINE_SIMPLE_ATTRIBUTE_SIGNED for signed value
	lib/notifier-error-inject: fix error when writing -errno to debugfs file
	debugfs: fix error when writing negative value to atomic_t debugfs file
	rapidio: fix possible name leaks when rio_add_device() fails
	rapidio: rio: fix possible name leak in rio_register_mport()
	clocksource/drivers/sh_cmt: Make sure channel clock supply is enabled
	ACPICA: Fix use-after-free in acpi_ut_copy_ipackage_to_ipackage()
	uprobes/x86: Allow to probe a NOP instruction with 0x66 prefix
	xen/events: only register debug interrupt for 2-level events
	x86/xen: Fix memory leak in xen_smp_intr_init{_pv}()
	x86/xen: Fix memory leak in xen_init_lock_cpu()
	xen/privcmd: Fix a possible warning in privcmd_ioctl_mmap_resource()
	PM: runtime: Improve path in rpm_idle() when no callback
	PM: runtime: Do not call __rpm_callback() from rpm_idle()
	platform/x86: mxm-wmi: fix memleak in mxm_wmi_call_mx[ds|mx]()
	MIPS: BCM63xx: Add check for NULL for clk in clk_enable
	fs: sysv: Fix sysv_nblocks() returns wrong value
	rapidio: fix possible UAF when kfifo_alloc() fails
	eventfd: change int to __u64 in eventfd_signal() ifndef CONFIG_EVENTFD
	relay: fix type mismatch when allocating memory in relay_create_buf()
	hfs: Fix OOB Write in hfs_asc2mac
	rapidio: devices: fix missing put_device in mport_cdev_open
	wifi: ath9k: hif_usb: fix memory leak of urbs in ath9k_hif_usb_dealloc_tx_urbs()
	wifi: ath9k: hif_usb: Fix use-after-free in ath9k_hif_usb_reg_in_cb()
	wifi: rtl8xxxu: Fix reading the vendor of combo chips
	pata_ipx4xx_cf: Fix unsigned comparison with less than zero
	media: i2c: ad5820: Fix error path
	can: kvaser_usb: do not increase tx statistics when sending error message frames
	can: kvaser_usb: kvaser_usb_leaf: Get capabilities from device
	can: kvaser_usb: kvaser_usb_leaf: Rename {leaf,usbcan}_cmd_error_event to {leaf,usbcan}_cmd_can_error_event
	can: kvaser_usb: kvaser_usb_leaf: Handle CMD_ERROR_EVENT
	can: kvaser_usb_leaf: Set Warning state even without bus errors
	can: kvaser_usb_leaf: Fix improved state not being reported
	can: kvaser_usb_leaf: Fix wrong CAN state after stopping
	can: kvaser_usb_leaf: Fix bogus restart events
	can: kvaser_usb: Add struct kvaser_usb_busparams
	can: kvaser_usb: Compare requested bittiming parameters with actual parameters in do_set_{,data}_bittiming
	spi: Update reference to struct spi_controller
	media: vivid: fix compose size exceed boundary
	mtd: Fix device name leak when register device failed in add_mtd_device()
	wifi: rsi: Fix handling of 802.3 EAPOL frames sent via control port
	media: camss: Clean up received buffers on failed start of streaming
	net, proc: Provide PROC_FS=n fallback for proc_create_net_single_write()
	drm/radeon: Add the missed acpi_put_table() to fix memory leak
	ASoC: pxa: fix null-pointer dereference in filter()
	regulator: core: fix unbalanced of node refcount in regulator_dev_lookup()
	ima: Fix misuse of dereference of pointer in template_desc_init_fields()
	wifi: ath10k: Fix return value in ath10k_pci_init()
	mtd: lpddr2_nvm: Fix possible null-ptr-deref
	Input: elants_i2c - properly handle the reset GPIO when power is off
	media: solo6x10: fix possible memory leak in solo_sysfs_init()
	media: platform: exynos4-is: Fix error handling in fimc_md_init()
	HID: hid-sensor-custom: set fixed size for custom attributes
	ALSA: seq: fix undefined behavior in bit shift for SNDRV_SEQ_FILTER_USE_EVENT
	clk: rockchip: Fix memory leak in rockchip_clk_register_pll()
	bonding: Export skip slave logic to function
	mtd: maps: pxa2xx-flash: fix memory leak in probe
	drbd: remove call to memset before free device/resource/connection
	media: imon: fix a race condition in send_packet()
	pinctrl: pinconf-generic: add missing of_node_put()
	media: dvb-core: Fix ignored return value in dvb_register_frontend()
	media: dvb-usb: az6027: fix null-ptr-deref in az6027_i2c_xfer()
	media: s5p-mfc: Add variant data for MFC v7 hardware for Exynos 3250 SoC
	drm/tegra: Add missing clk_disable_unprepare() in tegra_dc_probe()
	NFSv4.2: Fix a memory stomp in decode_attr_security_label
	NFSv4: Fix a deadlock between nfs4_open_recover_helper() and delegreturn
	ALSA: asihpi: fix missing pci_disable_device()
	drm/radeon: Fix PCI device refcount leak in radeon_atrm_get_bios()
	drm/amdgpu: Fix PCI device refcount leak in amdgpu_atrm_get_bios()
	ASoC: pcm512x: Fix PM disable depth imbalance in pcm512x_probe
	bonding: uninitialized variable in bond_miimon_inspect()
	wifi: cfg80211: Fix not unregister reg_pdev when load_builtin_regdb_keys() fails
	regulator: core: fix module refcount leak in set_supply()
	media: saa7164: fix missing pci_disable_device()
	ALSA: mts64: fix possible null-ptr-defer in snd_mts64_interrupt
	SUNRPC: Fix missing release socket in rpc_sockname()
	NFSv4.x: Fail client initialisation if state manager thread can't run
	mmc: moxart: fix return value check of mmc_add_host()
	mmc: mxcmmc: fix return value check of mmc_add_host()
	mmc: rtsx_usb_sdmmc: fix return value check of mmc_add_host()
	mmc: toshsd: fix return value check of mmc_add_host()
	mmc: vub300: fix return value check of mmc_add_host()
	mmc: wmt-sdmmc: fix return value check of mmc_add_host()
	mmc: atmel-mci: fix return value check of mmc_add_host()
	mmc: meson-gx: fix return value check of mmc_add_host()
	mmc: via-sdmmc: fix return value check of mmc_add_host()
	mmc: wbsd: fix return value check of mmc_add_host()
	mmc: mmci: fix return value check of mmc_add_host()
	media: c8sectpfe: Add of_node_put() when breaking out of loop
	media: coda: Add check for dcoda_iram_alloc
	media: coda: Add check for kmalloc
	clk: samsung: Fix memory leak in _samsung_clk_register_pll()
	wifi: rtl8xxxu: Add __packed to struct rtl8723bu_c2h
	rtl8xxxu: add enumeration for channel bandwidth
	wifi: brcmfmac: Fix error return code in brcmf_sdio_download_firmware()
	blktrace: Fix output non-blktrace event when blk_classic option enabled
	clk: socfpga: clk-pll: Remove unused variable 'rc'
	clk: socfpga: use clk_hw_register for a5/c5
	net: vmw_vsock: vmci: Check memcpy_from_msg()
	net: defxx: Fix missing err handling in dfx_init()
	drivers: net: qlcnic: Fix potential memory leak in qlcnic_sriov_init()
	ethernet: s2io: don't call dev_kfree_skb() under spin_lock_irqsave()
	net: farsync: Fix kmemleak when rmmods farsync
	net/tunnel: wait until all sk_user_data reader finish before releasing the sock
	net: apple: mace: don't call dev_kfree_skb() under spin_lock_irqsave()
	net: apple: bmac: don't call dev_kfree_skb() under spin_lock_irqsave()
	net: emaclite: don't call dev_kfree_skb() under spin_lock_irqsave()
	net: ethernet: dnet: don't call dev_kfree_skb() under spin_lock_irqsave()
	hamradio: don't call dev_kfree_skb() under spin_lock_irqsave()
	net: amd: lance: don't call dev_kfree_skb() under spin_lock_irqsave()
	net: amd-xgbe: Fix logic around active and passive cables
	net: amd-xgbe: Check only the minimum speed for active/passive cables
	net: lan9303: Fix read error execution path
	ntb_netdev: Use dev_kfree_skb_any() in interrupt context
	Bluetooth: btusb: don't call kfree_skb() under spin_lock_irqsave()
	Bluetooth: hci_qca: don't call kfree_skb() under spin_lock_irqsave()
	Bluetooth: hci_h5: don't call kfree_skb() under spin_lock_irqsave()
	Bluetooth: hci_bcsp: don't call kfree_skb() under spin_lock_irqsave()
	Bluetooth: hci_core: don't call kfree_skb() under spin_lock_irqsave()
	Bluetooth: RFCOMM: don't call kfree_skb() under spin_lock_irqsave()
	stmmac: fix potential division by 0
	apparmor: fix a memleak in multi_transaction_new()
	apparmor: fix lockdep warning when removing a namespace
	apparmor: Fix abi check to include v8 abi
	f2fs: fix normal discard process
	RDMA/nldev: Return "-EAGAIN" if the cm_id isn't from expected port
	scsi: scsi_debug: Fix a warning in resp_write_scat()
	PCI: Check for alloc failure in pci_request_irq()
	RDMA/hfi: Decrease PCI device reference count in error path
	crypto: ccree - Make cc_debugfs_global_fini() available for module init function
	RDMA/rxe: Fix NULL-ptr-deref in rxe_qp_do_cleanup() when socket create failed
	scsi: hpsa: use local workqueues instead of system workqueues
	scsi: hpsa: Fix possible memory leak in hpsa_init_one()
	crypto: tcrypt - Fix multibuffer skcipher speed test mem leak
	scsi: hpsa: Fix error handling in hpsa_add_sas_host()
	scsi: hpsa: Fix possible memory leak in hpsa_add_sas_device()
	scsi: fcoe: Fix possible name leak when device_register() fails
	scsi: ipr: Fix WARNING in ipr_init()
	scsi: fcoe: Fix transport not deattached when fcoe_if_init() fails
	scsi: snic: Fix possible UAF in snic_tgt_create()
	RDMA/hfi1: Fix error return code in parse_platform_config()
	orangefs: Fix sysfs not cleanup when dev init failed
	crypto: img-hash - Fix variable dereferenced before check 'hdev->req'
	hwrng: amd - Fix PCI device refcount leak
	hwrng: geode - Fix PCI device refcount leak
	IB/IPoIB: Fix queue count inconsistency for PKEY child interfaces
	drivers: dio: fix possible memory leak in dio_init()
	serial: tegra: avoid reg access when clk disabled
	serial: tegra: check for FIFO mode enabled status
	serial: tegra: set maximum num of uart ports to 8
	serial: tegra: add support to use 8 bytes trigger
	serial: tegra: add support to adjust baud rate
	serial: tegra: report clk rate errors
	serial: tegra: Add PIO mode support
	tty: serial: tegra: Activate RX DMA transfer by request
	serial: tegra: Read DMA status before terminating
	class: fix possible memory leak in __class_register()
	vfio: platform: Do not pass return buffer to ACPI _RST method
	uio: uio_dmem_genirq: Fix missing unlock in irq configuration
	uio: uio_dmem_genirq: Fix deadlock between irq config and handling
	usb: fotg210-udc: Fix ages old endianness issues
	staging: vme_user: Fix possible UAF in tsi148_dma_list_add
	usb: typec: Check for ops->exit instead of ops->enter in altmode_exit
	serial: amba-pl011: avoid SBSA UART accessing DMACR register
	serial: pl011: Do not clear RX FIFO & RX interrupt in unthrottle.
	serial: pch: Fix PCI device refcount leak in pch_request_dma()
	tty: serial: clean up stop-tx part in altera_uart_tx_chars()
	tty: serial: altera_uart_{r,t}x_chars() need only uart_port
	serial: altera_uart: fix locking in polling mode
	serial: sunsab: Fix error handling in sunsab_init()
	test_firmware: fix memory leak in test_firmware_init()
	misc: tifm: fix possible memory leak in tifm_7xx1_switch_media()
	misc: sgi-gru: fix use-after-free error in gru_set_context_option, gru_fault and gru_handle_user_call_os
	cxl: fix possible null-ptr-deref in cxl_guest_init_afu|adapter()
	cxl: fix possible null-ptr-deref in cxl_pci_init_afu|adapter()
	usb: gadget: f_hid: optional SETUP/SET_REPORT mode
	usb: gadget: f_hid: fix f_hidg lifetime vs cdev
	usb: gadget: f_hid: fix refcount leak on error path
	drivers: mcb: fix resource leak in mcb_probe()
	mcb: mcb-parse: fix error handing in chameleon_parse_gdd()
	chardev: fix error handling in cdev_device_add()
	i2c: pxa-pci: fix missing pci_disable_device() on error in ce4100_i2c_probe
	staging: rtl8192u: Fix use after free in ieee80211_rx()
	staging: rtl8192e: Fix potential use-after-free in rtllib_rx_Monitor()
	vme: Fix error not catched in fake_init()
	i2c: ismt: Fix an out-of-bounds bug in ismt_access()
	usb: storage: Add check for kcalloc
	tracing/hist: Fix issue of losting command info in error_log
	samples: vfio-mdev: Fix missing pci_disable_device() in mdpy_fb_probe()
	fbdev: ssd1307fb: Drop optional dependency
	fbdev: pm2fb: fix missing pci_disable_device()
	fbdev: via: Fix error in via_core_init()
	fbdev: vermilion: decrease reference count in error path
	fbdev: uvesafb: Fixes an error handling path in uvesafb_probe()
	HSI: omap_ssi_core: fix unbalanced pm_runtime_disable()
	HSI: omap_ssi_core: fix possible memory leak in ssi_probe()
	power: supply: fix residue sysfs file in error handle route of __power_supply_register()
	perf symbol: correction while adjusting symbol
	HSI: omap_ssi_core: Fix error handling in ssi_init()
	include/uapi/linux/swab: Fix potentially missing __always_inline
	rtc: snvs: Allow a time difference on clock register read
	iommu/amd: Fix pci device refcount leak in ppr_notifier()
	iommu/fsl_pamu: Fix resource leak in fsl_pamu_probe()
	macintosh: fix possible memory leak in macio_add_one_device()
	macintosh/macio-adb: check the return value of ioremap()
	powerpc/52xx: Fix a resource leak in an error handling path
	cxl: Fix refcount leak in cxl_calc_capp_routing
	powerpc/xive: add missing iounmap() in error path in xive_spapr_populate_irq_data()
	powerpc/perf: callchain validate kernel stack pointer bounds
	powerpc/83xx/mpc832x_rdb: call platform_device_put() in error case in of_fsl_spi_probe()
	powerpc/hv-gpci: Fix hv_gpci event list
	selftests/powerpc: Fix resource leaks
	rtc: st-lpc: Add missing clk_disable_unprepare in st_rtc_probe()
	nfsd: under NFSv4.1, fix double svc_xprt_put on rpc_create failure
	mISDN: hfcsusb: don't call dev_kfree_skb/kfree_skb() under spin_lock_irqsave()
	mISDN: hfcpci: don't call dev_kfree_skb/kfree_skb() under spin_lock_irqsave()
	mISDN: hfcmulti: don't call dev_kfree_skb/kfree_skb() under spin_lock_irqsave()
	nfc: pn533: Clear nfc_target before being used
	r6040: Fix kmemleak in probe and remove
	rtc: mxc_v2: Add missing clk_disable_unprepare()
	openvswitch: Fix flow lookup to use unmasked key
	skbuff: Account for tail adjustment during pull operations
	net_sched: reject TCF_EM_SIMPLE case for complex ematch module
	rxrpc: Fix missing unlock in rxrpc_do_sendmsg()
	myri10ge: Fix an error handling path in myri10ge_probe()
	net: stream: purge sk_error_queue in sk_stream_kill_queues()
	binfmt_misc: fix shift-out-of-bounds in check_special_flags
	fs: jfs: fix shift-out-of-bounds in dbAllocAG
	udf: Avoid double brelse() in udf_rename()
	fs: jfs: fix shift-out-of-bounds in dbDiscardAG
	ACPICA: Fix error code path in acpi_ds_call_control_method()
	nilfs2: fix shift-out-of-bounds/overflow in nilfs_sb2_bad_offset()
	acct: fix potential integer overflow in encode_comp_t()
	hfs: fix OOB Read in __hfs_brec_find
	wifi: ath9k: verify the expected usb_endpoints are present
	wifi: ar5523: Fix use-after-free on ar5523_cmd() timed out
	ASoC: codecs: rt298: Add quirk for KBL-R RVP platform
	ipmi: fix memleak when unload ipmi driver
	bpf: make sure skb->len != 0 when redirecting to a tunneling device
	net: ethernet: ti: Fix return type of netcp_ndo_start_xmit()
	hamradio: baycom_epp: Fix return type of baycom_send_packet()
	wifi: brcmfmac: Fix potential shift-out-of-bounds in brcmf_fw_alloc_request()
	igb: Do not free q_vector unless new one was allocated
	drm/amdgpu: Fix type of second parameter in trans_msg() callback
	s390/ctcm: Fix return type of ctc{mp,}m_tx()
	s390/netiucv: Fix return type of netiucv_tx()
	s390/lcs: Fix return type of lcs_start_xmit()
	drm/sti: Use drm_mode_copy()
	drivers/md/md-bitmap: check the return value of md_bitmap_get_counter()
	md/raid1: stop mdx_raid1 thread when raid1 array run failed
	mrp: introduce active flags to prevent UAF when applicant uninit
	ppp: associate skb with a device at tx
	media: dvb-frontends: fix leak of memory fw
	media: dvbdev: adopts refcnt to avoid UAF
	media: dvb-usb: fix memory leak in dvb_usb_adapter_init()
	blk-mq: fix possible memleak when register 'hctx' failed
	regulator: core: fix use_count leakage when handling boot-on
	mmc: f-sdh30: Add quirks for broken timeout clock capability
	media: si470x: Fix use-after-free in si470x_int_in_callback()
	clk: st: Fix memory leak in st_of_quadfs_setup()
	drm/fsl-dcu: Fix return type of fsl_dcu_drm_connector_mode_valid()
	drm/sti: Fix return type of sti_{dvo,hda,hdmi}_connector_mode_valid()
	orangefs: Fix kmemleak in orangefs_prepare_debugfs_help_string()
	ASoC: mediatek: mt8173-rt5650-rt5514: fix refcount leak in mt8173_rt5650_rt5514_dev_probe()
	ASoC: rockchip: pdm: Add missing clk_disable_unprepare() in rockchip_pdm_runtime_resume()
	ASoC: wm8994: Fix potential deadlock
	ASoC: rockchip: spdif: Add missing clk_disable_unprepare() in rk_spdif_runtime_resume()
	ASoC: rt5670: Remove unbalanced pm_runtime_put()
	pstore: Switch pmsg_lock to an rt_mutex to avoid priority inversion
	pstore: Make sure CONFIG_PSTORE_PMSG selects CONFIG_RT_MUTEXES
	usb: dwc3: core: defer probe on ulpi_read_id timeout
	HID: wacom: Ensure bootloader PID is usable in hidraw mode
	reiserfs: Add missing calls to reiserfs_security_free()
	iio: adc: ad_sigma_delta: do not use internal iio_dev lock
	gcov: add support for checksum field
	media: dvbdev: fix build warning due to comments
	media: dvbdev: fix refcnt bug
	ata: ahci: Fix PCS quirk application for suspend
	powerpc/rtas: avoid device tree lookups in rtas_os_term()
	powerpc/rtas: avoid scheduling in rtas_os_term()
	HID: plantronics: Additional PIDs for double volume key presses quirk
	hfsplus: fix bug causing custom uid and gid being unable to be assigned with mount
	ovl: Use ovl mounter's fsuid and fsgid in ovl_link()
	ALSA: line6: correct midi status byte when receiving data from podxt
	ALSA: line6: fix stack overflow in line6_midi_transmit
	pnode: terminate at peers of source
	md: fix a crash in mempool_free
	mmc: vub300: fix warning - do not call blocking ops when !TASK_RUNNING
	tpm: tpm_crb: Add the missed acpi_put_table() to fix memory leak
	tpm: tpm_tis: Add the missed acpi_put_table() to fix memory leak
	SUNRPC: Don't leak netobj memory when gss_read_proxy_verf() fails
	media: stv0288: use explicitly signed char
	soc: qcom: Select REMAP_MMIO for LLCC driver
	ktest.pl minconfig: Unset configs instead of just removing them
	ARM: ux500: do not directly dereference __iomem
	selftests: Use optional USERCFLAGS and USERLDFLAGS
	binfmt: Move install_exec_creds after setup_new_exec to match binfmt_elf
	binfmt: Fix error return code in load_elf_fdpic_binary()
	dm cache: Fix ABBA deadlock between shrink_slab and dm_cache_metadata_abort
	dm thin: Use last transaction's pmd->root when commit failed
	dm thin: Fix UAF in run_timer_softirq()
	dm cache: Fix UAF in destroy()
	dm cache: set needs_check flag after aborting metadata
	x86/microcode/intel: Do not retry microcode reloading on the APs
	tracing: Fix infinite loop in tracing_read_pipe on overflowed print_trace_line
	ARM: 9256/1: NWFPE: avoid compiler-generated __aeabi_uldivmod
	media: dvb-core: Fix double free in dvb_register_device()
	media: dvb-core: Fix UAF due to refcount races at releasing
	cifs: fix confusing debug message
	md/bitmap: Fix bitmap chunk size overflow issues
	ipmi: fix long wait in unload when IPMI disconnect
	ima: Fix a potential NULL pointer access in ima_restore_measurement_list
	ipmi: fix use after free in _ipmi_destroy_user()
	PCI: Fix pci_device_is_present() for VFs by checking PF
	PCI/sysfs: Fix double free in error path
	crypto: n2 - add missing hash statesize
	iommu/amd: Fix ivrs_acpihid cmdline parsing code
	parisc: led: Fix potential null-ptr-deref in start_task()
	device_cgroup: Roll back to original exceptions after copy failure
	drm/connector: send hotplug uevent on connector cleanup
	drm/vmwgfx: Validate the box size for the snooped cursor
	ext4: add inode table check in __ext4_get_inode_loc to aovid possible infinite loop
	ext4: fix undefined behavior in bit shift for ext4_check_flag_values
	ext4: add helper to check quota inums
	ext4: fix bug_on in __es_tree_search caused by bad boot loader inode
	ext4: init quota for 'old.inode' in 'ext4_rename'
	ext4: fix corruption when online resizing a 1K bigalloc fs
	ext4: fix error code return to user-space in ext4_get_branch()
	ext4: avoid BUG_ON when creating xattrs
	ext4: fix inode leak in ext4_xattr_inode_create() on an error path
	ext4: initialize quota before expanding inode in setproject ioctl
	ext4: avoid unaccounted block allocation when expanding inode
	ext4: allocate extended attribute value in vmalloc area
	btrfs: send: avoid unnecessary backref lookups when finding clone source
	btrfs: replace strncpy() with strscpy()
	media: s5p-mfc: Fix to handle reference queue during finishing
	media: s5p-mfc: Clear workbit to handle error condition
	media: s5p-mfc: Fix in register read and write for H264
	dm thin: resume even if in FAIL mode
	perf probe: Use dwarf_attr_integrate as generic DWARF attr accessor
	perf probe: Fix to get the DW_AT_decl_file and DW_AT_call_file as unsinged data
	ravb: Fix "failed to switch device to config mode" message during unbind
	driver core: Set deferred_probe_timeout to a longer default if CONFIG_MODULES is set
	ext4: goto right label 'failed_mount3a'
	ext4: correct inconsistent error msg in nojournal mode
	ext4: use kmemdup() to replace kmalloc + memcpy
	mbcache: don't reclaim used entries
	mbcache: add functions to delete entry if unused
	ext4: remove EA inode entry from mbcache on inode eviction
	ext4: unindent codeblock in ext4_xattr_block_set()
	ext4: fix race when reusing xattr blocks
	mbcache: automatically delete entries from cache on freeing
	ext4: fix deadlock due to mbcache entry corruption
	SUNRPC: ensure the matching upcall is in-flight upon downcall
	bpf: pull before calling skb_postpull_rcsum()
	qlcnic: prevent ->dcb use-after-free on qlcnic_dcb_enable() failure
	nfc: Fix potential resource leaks
	net: amd-xgbe: add missed tasklet_kill
	net: phy: xgmiitorgmii: Fix refcount leak in xgmiitorgmii_probe
	RDMA/mlx5: Fix validation of max_rd_atomic caps for DC
	net: sched: atm: dont intepret cls results when asked to drop
	usb: rndis_host: Secure rndis_query check against int overflow
	caif: fix memory leak in cfctrl_linkup_request()
	udf: Fix extension of the last extent in the file
	ASoC: Intel: bytcr_rt5640: Add quirk for the Advantech MICA-071 tablet
	x86/bugs: Flush IBP in ib_prctl_set()
	nfsd: fix handling of readdir in v4root vs. mount upcall timeout
	riscv: uaccess: fix type of 0 variable on error in get_user()
	ext4: don't allow journal inode to have encrypt flag
	hfs/hfsplus: use WARN_ON for sanity check
	hfs/hfsplus: avoid WARN_ON() for sanity check, use proper error handling
	mbcache: Avoid nesting of cache->c_list_lock under bit locks
	parisc: Align parisc MADV_XXX constants with all other architectures
	driver core: Fix bus_type.match() error handling in __driver_attach()
	net: sched: disallow noqueue for qdisc classes
	docs: Fix the docs build with Sphinx 6.0
	perf auxtrace: Fix address filter duplicate symbol selection
	s390/percpu: add READ_ONCE() to arch_this_cpu_to_op_simple()
	net/ulp: prevent ULP without clone op from entering the LISTEN status
	ALSA: pcm: Move rwsem lock inside snd_ctl_elem_read to prevent UAF
	cifs: Fix uninitialized memory read for smb311 posix symlink create
	platform/x86: sony-laptop: Don't turn off 0x153 keyboard backlight during probe
	ipv6: raw: Deduct extension header length in rawv6_push_pending_frames
	wifi: wilc1000: sdio: fix module autoloading
	ALSA: hda/hdmi: fix failures at PCM open on Intel ICL and later
	ktest: Add support for meta characters in GRUB_MENU
	ktest: introduce _get_grub_index
	ktest: cleanup get_grub_index
	ktest: introduce grub2bls REBOOT_TYPE option
	ktest.pl: Fix incorrect reboot for grub2bls
	kest.pl: Fix grub2 menu handling for rebooting
	usb: ulpi: defer ulpi_register on ulpi_read_id timeout
	quota: Factor out setup of quota inode
	ext4: fix bug_on in __es_tree_search caused by bad quota inode
	ext4: lost matching-pair of trace in ext4_truncate
	ext4: fix use-after-free in ext4_orphan_cleanup
	ext4: fix uninititialized value in 'ext4_evict_inode'
	ext4: generalize extents status tree search functions
	ext4: add new pending reservation mechanism
	ext4: fix reserved cluster accounting at delayed write time
	ext4: fix delayed allocation bug in ext4_clu_mapped for bigalloc + inline
	netfilter: ipset: Fix overflow before widen in the bitmap_ip_create() function.
	x86/boot: Avoid using Intel mnemonics in AT&T syntax asm
	EDAC/device: Fix period calculation in edac_device_reset_delay_period()
	regulator: da9211: Use irq handler when ready
	hvc/xen: lock console list traversal
	nfc: pn533: Wait for out_urb's completion in pn533_usb_send_frame()
	net/mlx5: Rename ptp clock info
	net/mlx5: Fix ptp max frequency adjustment range
	iommu/mediatek-v1: Add error handle for mtk_iommu_probe
	iommu/mediatek-v1: Fix an error handling path in mtk_iommu_v1_probe()
	x86/resctrl: Use task_curr() instead of task_struct->on_cpu to prevent unnecessary IPI
	x86/resctrl: Fix task CLOSID/RMID update race
	drm/virtio: Fix GEM handle creation UAF
	arm64: cmpxchg_double*: hazard against entire exchange variable
	efi: fix NULL-deref in init error path
	Revert "usb: ulpi: defer ulpi_register on ulpi_read_id timeout"
	tty: serial: tegra: Handle RX transfer in PIO mode if DMA wasn't started
	serial: tegra: Only print FIFO error message when an error occurs
	serial: tegra: Change lower tolerance baud rate limit for tegra20 and tegra30
	Linux 4.19.270

Change-Id: Ieb5e7f318a7e06effcc51e5f93751ec02dbb50c4
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
2023-01-18 12:02:44 +00:00

3011 lines
77 KiB
C

/*
* Quick & dirty crypto testing module.
*
* This will only exist until we have a better testing mechanism
* (e.g. a char device).
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
* Copyright (c) 2002 Jean-Francois Dive <jef@linuxbe.org>
* Copyright (c) 2007 Nokia Siemens Networks
*
* Updated RFC4106 AES-GCM testing.
* Authors: Aidan O'Mahony (aidan.o.mahony@intel.com)
* Adrian Hoban <adrian.hoban@intel.com>
* Gabriele Paoloni <gabriele.paoloni@intel.com>
* Tadeusz Struk (tadeusz.struk@intel.com)
* Copyright (c) 2010, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <crypto/aead.h>
#include <crypto/hash.h>
#include <crypto/skcipher.h>
#include <linux/err.h>
#include <linux/fips.h>
#include <linux/init.h>
#include <linux/gfp.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
#include <linux/moduleparam.h>
#include <linux/jiffies.h>
#include <linux/timex.h>
#include <linux/interrupt.h>
#include "tcrypt.h"
/*
* Need slab memory for testing (size in number of pages).
*/
#define TVMEMSIZE 4
/*
* Used by test_cipher_speed()
*/
#define ENCRYPT 1
#define DECRYPT 0
#define MAX_DIGEST_SIZE 64
/*
* return a string with the driver name
*/
#define get_driver_name(tfm_type, tfm) crypto_tfm_alg_driver_name(tfm_type ## _tfm(tfm))
/*
* Used by test_cipher_speed()
*/
static unsigned int sec;
static char *alg = NULL;
static u32 type;
static u32 mask;
static int mode;
static u32 num_mb = 8;
static char *tvmem[TVMEMSIZE];
static char *check[] = {
"des", "md5", "des3_ede", "rot13", "sha1", "sha224", "sha256", "sm3",
"blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes",
"cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
"khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
"camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
"lzo", "cts", "zlib", "sha3-224", "sha3-256", "sha3-384", "sha3-512",
NULL
};
static u32 block_sizes[] = { 16, 64, 256, 1024, 8192, 0 };
static u32 aead_sizes[] = { 16, 64, 256, 512, 1024, 2048, 4096, 8192, 0 };
#define XBUFSIZE 8
#define MAX_IVLEN 32
static int testmgr_alloc_buf(char *buf[XBUFSIZE])
{
int i;
for (i = 0; i < XBUFSIZE; i++) {
buf[i] = (void *)__get_free_page(GFP_KERNEL);
if (!buf[i])
goto err_free_buf;
}
return 0;
err_free_buf:
while (i-- > 0)
free_page((unsigned long)buf[i]);
return -ENOMEM;
}
static void testmgr_free_buf(char *buf[XBUFSIZE])
{
int i;
for (i = 0; i < XBUFSIZE; i++)
free_page((unsigned long)buf[i]);
}
static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE],
unsigned int buflen, const void *assoc,
unsigned int aad_size)
{
int np = (buflen + PAGE_SIZE - 1)/PAGE_SIZE;
int k, rem;
if (np > XBUFSIZE) {
rem = PAGE_SIZE;
np = XBUFSIZE;
} else {
rem = buflen % PAGE_SIZE;
}
sg_init_table(sg, np + 1);
sg_set_buf(&sg[0], assoc, aad_size);
if (rem)
np--;
for (k = 0; k < np; k++)
sg_set_buf(&sg[k + 1], xbuf[k], PAGE_SIZE);
if (rem)
sg_set_buf(&sg[k + 1], xbuf[k], rem);
}
static inline int do_one_aead_op(struct aead_request *req, int ret)
{
struct crypto_wait *wait = req->base.data;
return crypto_wait_req(ret, wait);
}
struct test_mb_aead_data {
struct scatterlist sg[XBUFSIZE];
struct scatterlist sgout[XBUFSIZE];
struct aead_request *req;
struct crypto_wait wait;
char *xbuf[XBUFSIZE];
char *xoutbuf[XBUFSIZE];
char *axbuf[XBUFSIZE];
};
static int do_mult_aead_op(struct test_mb_aead_data *data, int enc,
u32 num_mb, int *rc)
{
int i, err = 0;
/* Fire up a bunch of concurrent requests */
for (i = 0; i < num_mb; i++) {
if (enc == ENCRYPT)
rc[i] = crypto_aead_encrypt(data[i].req);
else
rc[i] = crypto_aead_decrypt(data[i].req);
}
/* Wait for all requests to finish */
for (i = 0; i < num_mb; i++) {
rc[i] = crypto_wait_req(rc[i], &data[i].wait);
if (rc[i]) {
pr_info("concurrent request %d error %d\n", i, rc[i]);
err = rc[i];
}
}
return err;
}
static int test_mb_aead_jiffies(struct test_mb_aead_data *data, int enc,
int blen, int secs, u32 num_mb)
{
unsigned long start, end;
int bcount;
int ret = 0;
int *rc;
rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL);
if (!rc)
return -ENOMEM;
for (start = jiffies, end = start + secs * HZ, bcount = 0;
time_before(jiffies, end); bcount++) {
ret = do_mult_aead_op(data, enc, num_mb, rc);
if (ret)
goto out;
}
pr_cont("%d operations in %d seconds (%llu bytes)\n",
bcount * num_mb, secs, (u64)bcount * blen * num_mb);
out:
kfree(rc);
return ret;
}
static int test_mb_aead_cycles(struct test_mb_aead_data *data, int enc,
int blen, u32 num_mb)
{
unsigned long cycles = 0;
int ret = 0;
int i;
int *rc;
rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL);
if (!rc)
return -ENOMEM;
/* Warm-up run. */
for (i = 0; i < 4; i++) {
ret = do_mult_aead_op(data, enc, num_mb, rc);
if (ret)
goto out;
}
/* The real thing. */
for (i = 0; i < 8; i++) {
cycles_t start, end;
start = get_cycles();
ret = do_mult_aead_op(data, enc, num_mb, rc);
end = get_cycles();
if (ret)
goto out;
cycles += end - start;
}
pr_cont("1 operation in %lu cycles (%d bytes)\n",
(cycles + 4) / (8 * num_mb), blen);
out:
kfree(rc);
return ret;
}
static void test_mb_aead_speed(const char *algo, int enc, int secs,
struct aead_speed_template *template,
unsigned int tcount, u8 authsize,
unsigned int aad_size, u8 *keysize, u32 num_mb)
{
struct test_mb_aead_data *data;
struct crypto_aead *tfm;
unsigned int i, j, iv_len;
const char *key;
const char *e;
void *assoc;
u32 *b_size;
char *iv;
int ret;
if (aad_size >= PAGE_SIZE) {
pr_err("associate data length (%u) too big\n", aad_size);
return;
}
iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
if (!iv)
return;
if (enc == ENCRYPT)
e = "encryption";
else
e = "decryption";
data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL);
if (!data)
goto out_free_iv;
tfm = crypto_alloc_aead(algo, 0, 0);
if (IS_ERR(tfm)) {
pr_err("failed to load transform for %s: %ld\n",
algo, PTR_ERR(tfm));
goto out_free_data;
}
ret = crypto_aead_setauthsize(tfm, authsize);
for (i = 0; i < num_mb; ++i)
if (testmgr_alloc_buf(data[i].xbuf)) {
while (i--)
testmgr_free_buf(data[i].xbuf);
goto out_free_tfm;
}
for (i = 0; i < num_mb; ++i)
if (testmgr_alloc_buf(data[i].axbuf)) {
while (i--)
testmgr_free_buf(data[i].axbuf);
goto out_free_xbuf;
}
for (i = 0; i < num_mb; ++i)
if (testmgr_alloc_buf(data[i].xoutbuf)) {
while (i--)
testmgr_free_buf(data[i].xoutbuf);
goto out_free_axbuf;
}
for (i = 0; i < num_mb; ++i) {
data[i].req = aead_request_alloc(tfm, GFP_KERNEL);
if (!data[i].req) {
pr_err("alg: skcipher: Failed to allocate request for %s\n",
algo);
while (i--)
aead_request_free(data[i].req);
goto out_free_xoutbuf;
}
}
for (i = 0; i < num_mb; ++i) {
crypto_init_wait(&data[i].wait);
aead_request_set_callback(data[i].req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &data[i].wait);
}
pr_info("\ntesting speed of multibuffer %s (%s) %s\n", algo,
get_driver_name(crypto_aead, tfm), e);
i = 0;
do {
b_size = aead_sizes;
do {
if (*b_size + authsize > XBUFSIZE * PAGE_SIZE) {
pr_err("template (%u) too big for buffer (%lu)\n",
authsize + *b_size,
XBUFSIZE * PAGE_SIZE);
goto out;
}
pr_info("test %u (%d bit key, %d byte blocks): ", i,
*keysize * 8, *b_size);
/* Set up tfm global state, i.e. the key */
memset(tvmem[0], 0xff, PAGE_SIZE);
key = tvmem[0];
for (j = 0; j < tcount; j++) {
if (template[j].klen == *keysize) {
key = template[j].key;
break;
}
}
crypto_aead_clear_flags(tfm, ~0);
ret = crypto_aead_setkey(tfm, key, *keysize);
if (ret) {
pr_err("setkey() failed flags=%x\n",
crypto_aead_get_flags(tfm));
goto out;
}
iv_len = crypto_aead_ivsize(tfm);
if (iv_len)
memset(iv, 0xff, iv_len);
/* Now setup per request stuff, i.e. buffers */
for (j = 0; j < num_mb; ++j) {
struct test_mb_aead_data *cur = &data[j];
assoc = cur->axbuf[0];
memset(assoc, 0xff, aad_size);
sg_init_aead(cur->sg, cur->xbuf,
*b_size + (enc ? 0 : authsize),
assoc, aad_size);
sg_init_aead(cur->sgout, cur->xoutbuf,
*b_size + (enc ? authsize : 0),
assoc, aad_size);
aead_request_set_ad(cur->req, aad_size);
if (!enc) {
aead_request_set_crypt(cur->req,
cur->sgout,
cur->sg,
*b_size, iv);
ret = crypto_aead_encrypt(cur->req);
ret = do_one_aead_op(cur->req, ret);
if (ret) {
pr_err("calculating auth failed failed (%d)\n",
ret);
break;
}
}
aead_request_set_crypt(cur->req, cur->sg,
cur->sgout, *b_size +
(enc ? 0 : authsize),
iv);
}
if (secs) {
ret = test_mb_aead_jiffies(data, enc, *b_size,
secs, num_mb);
cond_resched();
} else {
ret = test_mb_aead_cycles(data, enc, *b_size,
num_mb);
}
if (ret) {
pr_err("%s() failed return code=%d\n", e, ret);
break;
}
b_size++;
i++;
} while (*b_size);
keysize++;
} while (*keysize);
out:
for (i = 0; i < num_mb; ++i)
aead_request_free(data[i].req);
out_free_xoutbuf:
for (i = 0; i < num_mb; ++i)
testmgr_free_buf(data[i].xoutbuf);
out_free_axbuf:
for (i = 0; i < num_mb; ++i)
testmgr_free_buf(data[i].axbuf);
out_free_xbuf:
for (i = 0; i < num_mb; ++i)
testmgr_free_buf(data[i].xbuf);
out_free_tfm:
crypto_free_aead(tfm);
out_free_data:
kfree(data);
out_free_iv:
kfree(iv);
}
static int test_aead_jiffies(struct aead_request *req, int enc,
int blen, int secs)
{
unsigned long start, end;
int bcount;
int ret;
for (start = jiffies, end = start + secs * HZ, bcount = 0;
time_before(jiffies, end); bcount++) {
if (enc)
ret = do_one_aead_op(req, crypto_aead_encrypt(req));
else
ret = do_one_aead_op(req, crypto_aead_decrypt(req));
if (ret)
return ret;
}
pr_cont("%d operations in %d seconds (%llu bytes)\n",
bcount, secs, (u64)bcount * blen);
return 0;
}
static int test_aead_cycles(struct aead_request *req, int enc, int blen)
{
unsigned long cycles = 0;
int ret = 0;
int i;
/* Warm-up run. */
for (i = 0; i < 4; i++) {
if (enc)
ret = do_one_aead_op(req, crypto_aead_encrypt(req));
else
ret = do_one_aead_op(req, crypto_aead_decrypt(req));
if (ret)
goto out;
}
/* The real thing. */
for (i = 0; i < 8; i++) {
cycles_t start, end;
start = get_cycles();
if (enc)
ret = do_one_aead_op(req, crypto_aead_encrypt(req));
else
ret = do_one_aead_op(req, crypto_aead_decrypt(req));
end = get_cycles();
if (ret)
goto out;
cycles += end - start;
}
out:
if (ret == 0)
printk("1 operation in %lu cycles (%d bytes)\n",
(cycles + 4) / 8, blen);
return ret;
}
static void test_aead_speed(const char *algo, int enc, unsigned int secs,
struct aead_speed_template *template,
unsigned int tcount, u8 authsize,
unsigned int aad_size, u8 *keysize)
{
unsigned int i, j;
struct crypto_aead *tfm;
int ret = -ENOMEM;
const char *key;
struct aead_request *req;
struct scatterlist *sg;
struct scatterlist *sgout;
const char *e;
void *assoc;
char *iv;
char *xbuf[XBUFSIZE];
char *xoutbuf[XBUFSIZE];
char *axbuf[XBUFSIZE];
unsigned int *b_size;
unsigned int iv_len;
struct crypto_wait wait;
iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
if (!iv)
return;
if (aad_size >= PAGE_SIZE) {
pr_err("associate data length (%u) too big\n", aad_size);
goto out_noxbuf;
}
if (enc == ENCRYPT)
e = "encryption";
else
e = "decryption";
if (testmgr_alloc_buf(xbuf))
goto out_noxbuf;
if (testmgr_alloc_buf(axbuf))
goto out_noaxbuf;
if (testmgr_alloc_buf(xoutbuf))
goto out_nooutbuf;
sg = kmalloc(sizeof(*sg) * 9 * 2, GFP_KERNEL);
if (!sg)
goto out_nosg;
sgout = &sg[9];
tfm = crypto_alloc_aead(algo, 0, 0);
if (IS_ERR(tfm)) {
pr_err("alg: aead: Failed to load transform for %s: %ld\n", algo,
PTR_ERR(tfm));
goto out_notfm;
}
crypto_init_wait(&wait);
printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo,
get_driver_name(crypto_aead, tfm), e);
req = aead_request_alloc(tfm, GFP_KERNEL);
if (!req) {
pr_err("alg: aead: Failed to allocate request for %s\n",
algo);
goto out_noreq;
}
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &wait);
i = 0;
do {
b_size = aead_sizes;
do {
assoc = axbuf[0];
memset(assoc, 0xff, aad_size);
if ((*keysize + *b_size) > TVMEMSIZE * PAGE_SIZE) {
pr_err("template (%u) too big for tvmem (%lu)\n",
*keysize + *b_size,
TVMEMSIZE * PAGE_SIZE);
goto out;
}
key = tvmem[0];
for (j = 0; j < tcount; j++) {
if (template[j].klen == *keysize) {
key = template[j].key;
break;
}
}
ret = crypto_aead_setkey(tfm, key, *keysize);
ret = crypto_aead_setauthsize(tfm, authsize);
iv_len = crypto_aead_ivsize(tfm);
if (iv_len)
memset(iv, 0xff, iv_len);
crypto_aead_clear_flags(tfm, ~0);
printk(KERN_INFO "test %u (%d bit key, %d byte blocks): ",
i, *keysize * 8, *b_size);
memset(tvmem[0], 0xff, PAGE_SIZE);
if (ret) {
pr_err("setkey() failed flags=%x\n",
crypto_aead_get_flags(tfm));
goto out;
}
sg_init_aead(sg, xbuf, *b_size + (enc ? 0 : authsize),
assoc, aad_size);
sg_init_aead(sgout, xoutbuf,
*b_size + (enc ? authsize : 0), assoc,
aad_size);
aead_request_set_ad(req, aad_size);
if (!enc) {
/*
* For decryption we need a proper auth so
* we do the encryption path once with buffers
* reversed (input <-> output) to calculate it
*/
aead_request_set_crypt(req, sgout, sg,
*b_size, iv);
ret = do_one_aead_op(req,
crypto_aead_encrypt(req));
if (ret) {
pr_err("calculating auth failed failed (%d)\n",
ret);
break;
}
}
aead_request_set_crypt(req, sg, sgout,
*b_size + (enc ? 0 : authsize),
iv);
if (secs) {
ret = test_aead_jiffies(req, enc, *b_size,
secs);
cond_resched();
} else {
ret = test_aead_cycles(req, enc, *b_size);
}
if (ret) {
pr_err("%s() failed return code=%d\n", e, ret);
break;
}
b_size++;
i++;
} while (*b_size);
keysize++;
} while (*keysize);
out:
aead_request_free(req);
out_noreq:
crypto_free_aead(tfm);
out_notfm:
kfree(sg);
out_nosg:
testmgr_free_buf(xoutbuf);
out_nooutbuf:
testmgr_free_buf(axbuf);
out_noaxbuf:
testmgr_free_buf(xbuf);
out_noxbuf:
kfree(iv);
}
static void test_hash_sg_init(struct scatterlist *sg)
{
int i;
sg_init_table(sg, TVMEMSIZE);
for (i = 0; i < TVMEMSIZE; i++) {
sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
memset(tvmem[i], 0xff, PAGE_SIZE);
}
}
static inline int do_one_ahash_op(struct ahash_request *req, int ret)
{
struct crypto_wait *wait = req->base.data;
return crypto_wait_req(ret, wait);
}
struct test_mb_ahash_data {
struct scatterlist sg[XBUFSIZE];
char result[64];
struct ahash_request *req;
struct crypto_wait wait;
char *xbuf[XBUFSIZE];
};
static inline int do_mult_ahash_op(struct test_mb_ahash_data *data, u32 num_mb,
int *rc)
{
int i, err = 0;
/* Fire up a bunch of concurrent requests */
for (i = 0; i < num_mb; i++)
rc[i] = crypto_ahash_digest(data[i].req);
/* Wait for all requests to finish */
for (i = 0; i < num_mb; i++) {
rc[i] = crypto_wait_req(rc[i], &data[i].wait);
if (rc[i]) {
pr_info("concurrent request %d error %d\n", i, rc[i]);
err = rc[i];
}
}
return err;
}
static int test_mb_ahash_jiffies(struct test_mb_ahash_data *data, int blen,
int secs, u32 num_mb)
{
unsigned long start, end;
int bcount;
int ret = 0;
int *rc;
rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL);
if (!rc)
return -ENOMEM;
for (start = jiffies, end = start + secs * HZ, bcount = 0;
time_before(jiffies, end); bcount++) {
ret = do_mult_ahash_op(data, num_mb, rc);
if (ret)
goto out;
}
pr_cont("%d operations in %d seconds (%llu bytes)\n",
bcount * num_mb, secs, (u64)bcount * blen * num_mb);
out:
kfree(rc);
return ret;
}
static int test_mb_ahash_cycles(struct test_mb_ahash_data *data, int blen,
u32 num_mb)
{
unsigned long cycles = 0;
int ret = 0;
int i;
int *rc;
rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL);
if (!rc)
return -ENOMEM;
/* Warm-up run. */
for (i = 0; i < 4; i++) {
ret = do_mult_ahash_op(data, num_mb, rc);
if (ret)
goto out;
}
/* The real thing. */
for (i = 0; i < 8; i++) {
cycles_t start, end;
start = get_cycles();
ret = do_mult_ahash_op(data, num_mb, rc);
end = get_cycles();
if (ret)
goto out;
cycles += end - start;
}
pr_cont("1 operation in %lu cycles (%d bytes)\n",
(cycles + 4) / (8 * num_mb), blen);
out:
kfree(rc);
return ret;
}
static void test_mb_ahash_speed(const char *algo, unsigned int secs,
struct hash_speed *speed, u32 num_mb)
{
struct test_mb_ahash_data *data;
struct crypto_ahash *tfm;
unsigned int i, j, k;
int ret;
data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL);
if (!data)
return;
tfm = crypto_alloc_ahash(algo, 0, 0);
if (IS_ERR(tfm)) {
pr_err("failed to load transform for %s: %ld\n",
algo, PTR_ERR(tfm));
goto free_data;
}
for (i = 0; i < num_mb; ++i) {
if (testmgr_alloc_buf(data[i].xbuf))
goto out;
crypto_init_wait(&data[i].wait);
data[i].req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!data[i].req) {
pr_err("alg: hash: Failed to allocate request for %s\n",
algo);
goto out;
}
ahash_request_set_callback(data[i].req, 0, crypto_req_done,
&data[i].wait);
sg_init_table(data[i].sg, XBUFSIZE);
for (j = 0; j < XBUFSIZE; j++) {
sg_set_buf(data[i].sg + j, data[i].xbuf[j], PAGE_SIZE);
memset(data[i].xbuf[j], 0xff, PAGE_SIZE);
}
}
pr_info("\ntesting speed of multibuffer %s (%s)\n", algo,
get_driver_name(crypto_ahash, tfm));
for (i = 0; speed[i].blen != 0; i++) {
/* For some reason this only tests digests. */
if (speed[i].blen != speed[i].plen)
continue;
if (speed[i].blen > XBUFSIZE * PAGE_SIZE) {
pr_err("template (%u) too big for tvmem (%lu)\n",
speed[i].blen, XBUFSIZE * PAGE_SIZE);
goto out;
}
if (speed[i].klen)
crypto_ahash_setkey(tfm, tvmem[0], speed[i].klen);
for (k = 0; k < num_mb; k++)
ahash_request_set_crypt(data[k].req, data[k].sg,
data[k].result, speed[i].blen);
pr_info("test%3u "
"(%5u byte blocks,%5u bytes per update,%4u updates): ",
i, speed[i].blen, speed[i].plen,
speed[i].blen / speed[i].plen);
if (secs) {
ret = test_mb_ahash_jiffies(data, speed[i].blen, secs,
num_mb);
cond_resched();
} else {
ret = test_mb_ahash_cycles(data, speed[i].blen, num_mb);
}
if (ret) {
pr_err("At least one hashing failed ret=%d\n", ret);
break;
}
}
out:
for (k = 0; k < num_mb; ++k)
ahash_request_free(data[k].req);
for (k = 0; k < num_mb; ++k)
testmgr_free_buf(data[k].xbuf);
crypto_free_ahash(tfm);
free_data:
kfree(data);
}
static int test_ahash_jiffies_digest(struct ahash_request *req, int blen,
char *out, int secs)
{
unsigned long start, end;
int bcount;
int ret;
for (start = jiffies, end = start + secs * HZ, bcount = 0;
time_before(jiffies, end); bcount++) {
ret = do_one_ahash_op(req, crypto_ahash_digest(req));
if (ret)
return ret;
}
printk("%6u opers/sec, %9lu bytes/sec\n",
bcount / secs, ((long)bcount * blen) / secs);
return 0;
}
static int test_ahash_jiffies(struct ahash_request *req, int blen,
int plen, char *out, int secs)
{
unsigned long start, end;
int bcount, pcount;
int ret;
if (plen == blen)
return test_ahash_jiffies_digest(req, blen, out, secs);
for (start = jiffies, end = start + secs * HZ, bcount = 0;
time_before(jiffies, end); bcount++) {
ret = do_one_ahash_op(req, crypto_ahash_init(req));
if (ret)
return ret;
for (pcount = 0; pcount < blen; pcount += plen) {
ret = do_one_ahash_op(req, crypto_ahash_update(req));
if (ret)
return ret;
}
/* we assume there is enough space in 'out' for the result */
ret = do_one_ahash_op(req, crypto_ahash_final(req));
if (ret)
return ret;
}
pr_cont("%6u opers/sec, %9lu bytes/sec\n",
bcount / secs, ((long)bcount * blen) / secs);
return 0;
}
static int test_ahash_cycles_digest(struct ahash_request *req, int blen,
char *out)
{
unsigned long cycles = 0;
int ret, i;
/* Warm-up run. */
for (i = 0; i < 4; i++) {
ret = do_one_ahash_op(req, crypto_ahash_digest(req));
if (ret)
goto out;
}
/* The real thing. */
for (i = 0; i < 8; i++) {
cycles_t start, end;
start = get_cycles();
ret = do_one_ahash_op(req, crypto_ahash_digest(req));
if (ret)
goto out;
end = get_cycles();
cycles += end - start;
}
out:
if (ret)
return ret;
pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
cycles / 8, cycles / (8 * blen));
return 0;
}
static int test_ahash_cycles(struct ahash_request *req, int blen,
int plen, char *out)
{
unsigned long cycles = 0;
int i, pcount, ret;
if (plen == blen)
return test_ahash_cycles_digest(req, blen, out);
/* Warm-up run. */
for (i = 0; i < 4; i++) {
ret = do_one_ahash_op(req, crypto_ahash_init(req));
if (ret)
goto out;
for (pcount = 0; pcount < blen; pcount += plen) {
ret = do_one_ahash_op(req, crypto_ahash_update(req));
if (ret)
goto out;
}
ret = do_one_ahash_op(req, crypto_ahash_final(req));
if (ret)
goto out;
}
/* The real thing. */
for (i = 0; i < 8; i++) {
cycles_t start, end;
start = get_cycles();
ret = do_one_ahash_op(req, crypto_ahash_init(req));
if (ret)
goto out;
for (pcount = 0; pcount < blen; pcount += plen) {
ret = do_one_ahash_op(req, crypto_ahash_update(req));
if (ret)
goto out;
}
ret = do_one_ahash_op(req, crypto_ahash_final(req));
if (ret)
goto out;
end = get_cycles();
cycles += end - start;
}
out:
if (ret)
return ret;
pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
cycles / 8, cycles / (8 * blen));
return 0;
}
static void test_ahash_speed_common(const char *algo, unsigned int secs,
struct hash_speed *speed, unsigned mask)
{
struct scatterlist sg[TVMEMSIZE];
struct crypto_wait wait;
struct ahash_request *req;
struct crypto_ahash *tfm;
char *output;
int i, ret;
tfm = crypto_alloc_ahash(algo, 0, mask);
if (IS_ERR(tfm)) {
pr_err("failed to load transform for %s: %ld\n",
algo, PTR_ERR(tfm));
return;
}
printk(KERN_INFO "\ntesting speed of async %s (%s)\n", algo,
get_driver_name(crypto_ahash, tfm));
if (crypto_ahash_digestsize(tfm) > MAX_DIGEST_SIZE) {
pr_err("digestsize(%u) > %d\n", crypto_ahash_digestsize(tfm),
MAX_DIGEST_SIZE);
goto out;
}
test_hash_sg_init(sg);
req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!req) {
pr_err("ahash request allocation failure\n");
goto out;
}
crypto_init_wait(&wait);
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &wait);
output = kmalloc(MAX_DIGEST_SIZE, GFP_KERNEL);
if (!output)
goto out_nomem;
for (i = 0; speed[i].blen != 0; i++) {
if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
pr_err("template (%u) too big for tvmem (%lu)\n",
speed[i].blen, TVMEMSIZE * PAGE_SIZE);
break;
}
if (speed[i].klen)
crypto_ahash_setkey(tfm, tvmem[0], speed[i].klen);
pr_info("test%3u "
"(%5u byte blocks,%5u bytes per update,%4u updates): ",
i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
ahash_request_set_crypt(req, sg, output, speed[i].plen);
if (secs) {
ret = test_ahash_jiffies(req, speed[i].blen,
speed[i].plen, output, secs);
cond_resched();
} else {
ret = test_ahash_cycles(req, speed[i].blen,
speed[i].plen, output);
}
if (ret) {
pr_err("hashing failed ret=%d\n", ret);
break;
}
}
kfree(output);
out_nomem:
ahash_request_free(req);
out:
crypto_free_ahash(tfm);
}
static void test_ahash_speed(const char *algo, unsigned int secs,
struct hash_speed *speed)
{
return test_ahash_speed_common(algo, secs, speed, 0);
}
static void test_hash_speed(const char *algo, unsigned int secs,
struct hash_speed *speed)
{
return test_ahash_speed_common(algo, secs, speed, CRYPTO_ALG_ASYNC);
}
struct test_mb_skcipher_data {
struct scatterlist sg[XBUFSIZE];
struct skcipher_request *req;
struct crypto_wait wait;
char *xbuf[XBUFSIZE];
};
static int do_mult_acipher_op(struct test_mb_skcipher_data *data, int enc,
u32 num_mb, int *rc)
{
int i, err = 0;
/* Fire up a bunch of concurrent requests */
for (i = 0; i < num_mb; i++) {
if (enc == ENCRYPT)
rc[i] = crypto_skcipher_encrypt(data[i].req);
else
rc[i] = crypto_skcipher_decrypt(data[i].req);
}
/* Wait for all requests to finish */
for (i = 0; i < num_mb; i++) {
rc[i] = crypto_wait_req(rc[i], &data[i].wait);
if (rc[i]) {
pr_info("concurrent request %d error %d\n", i, rc[i]);
err = rc[i];
}
}
return err;
}
static int test_mb_acipher_jiffies(struct test_mb_skcipher_data *data, int enc,
int blen, int secs, u32 num_mb)
{
unsigned long start, end;
int bcount;
int ret = 0;
int *rc;
rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL);
if (!rc)
return -ENOMEM;
for (start = jiffies, end = start + secs * HZ, bcount = 0;
time_before(jiffies, end); bcount++) {
ret = do_mult_acipher_op(data, enc, num_mb, rc);
if (ret)
goto out;
}
pr_cont("%d operations in %d seconds (%llu bytes)\n",
bcount * num_mb, secs, (u64)bcount * blen * num_mb);
out:
kfree(rc);
return ret;
}
static int test_mb_acipher_cycles(struct test_mb_skcipher_data *data, int enc,
int blen, u32 num_mb)
{
unsigned long cycles = 0;
int ret = 0;
int i;
int *rc;
rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL);
if (!rc)
return -ENOMEM;
/* Warm-up run. */
for (i = 0; i < 4; i++) {
ret = do_mult_acipher_op(data, enc, num_mb, rc);
if (ret)
goto out;
}
/* The real thing. */
for (i = 0; i < 8; i++) {
cycles_t start, end;
start = get_cycles();
ret = do_mult_acipher_op(data, enc, num_mb, rc);
end = get_cycles();
if (ret)
goto out;
cycles += end - start;
}
pr_cont("1 operation in %lu cycles (%d bytes)\n",
(cycles + 4) / (8 * num_mb), blen);
out:
kfree(rc);
return ret;
}
static void test_mb_skcipher_speed(const char *algo, int enc, int secs,
struct cipher_speed_template *template,
unsigned int tcount, u8 *keysize, u32 num_mb)
{
struct test_mb_skcipher_data *data;
struct crypto_skcipher *tfm;
unsigned int i, j, iv_len;
const char *key;
const char *e;
u32 *b_size;
char iv[128];
int ret;
if (enc == ENCRYPT)
e = "encryption";
else
e = "decryption";
data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL);
if (!data)
return;
tfm = crypto_alloc_skcipher(algo, 0, 0);
if (IS_ERR(tfm)) {
pr_err("failed to load transform for %s: %ld\n",
algo, PTR_ERR(tfm));
goto out_free_data;
}
for (i = 0; i < num_mb; ++i)
if (testmgr_alloc_buf(data[i].xbuf)) {
while (i--)
testmgr_free_buf(data[i].xbuf);
goto out_free_tfm;
}
for (i = 0; i < num_mb; ++i) {
data[i].req = skcipher_request_alloc(tfm, GFP_KERNEL);
if (!data[i].req) {
pr_err("alg: skcipher: Failed to allocate request for %s\n",
algo);
while (i--)
skcipher_request_free(data[i].req);
goto out_free_xbuf;
}
}
for (i = 0; i < num_mb; ++i) {
skcipher_request_set_callback(data[i].req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &data[i].wait);
crypto_init_wait(&data[i].wait);
}
pr_info("\ntesting speed of multibuffer %s (%s) %s\n", algo,
get_driver_name(crypto_skcipher, tfm), e);
i = 0;
do {
b_size = block_sizes;
do {
if (*b_size > XBUFSIZE * PAGE_SIZE) {
pr_err("template (%u) too big for buffer (%lu)\n",
*b_size, XBUFSIZE * PAGE_SIZE);
goto out;
}
pr_info("test %u (%d bit key, %d byte blocks): ", i,
*keysize * 8, *b_size);
/* Set up tfm global state, i.e. the key */
memset(tvmem[0], 0xff, PAGE_SIZE);
key = tvmem[0];
for (j = 0; j < tcount; j++) {
if (template[j].klen == *keysize) {
key = template[j].key;
break;
}
}
crypto_skcipher_clear_flags(tfm, ~0);
ret = crypto_skcipher_setkey(tfm, key, *keysize);
if (ret) {
pr_err("setkey() failed flags=%x\n",
crypto_skcipher_get_flags(tfm));
goto out;
}
iv_len = crypto_skcipher_ivsize(tfm);
if (iv_len)
memset(&iv, 0xff, iv_len);
/* Now setup per request stuff, i.e. buffers */
for (j = 0; j < num_mb; ++j) {
struct test_mb_skcipher_data *cur = &data[j];
unsigned int k = *b_size;
unsigned int pages = DIV_ROUND_UP(k, PAGE_SIZE);
unsigned int p = 0;
sg_init_table(cur->sg, pages);
while (k > PAGE_SIZE) {
sg_set_buf(cur->sg + p, cur->xbuf[p],
PAGE_SIZE);
memset(cur->xbuf[p], 0xff, PAGE_SIZE);
p++;
k -= PAGE_SIZE;
}
sg_set_buf(cur->sg + p, cur->xbuf[p], k);
memset(cur->xbuf[p], 0xff, k);
skcipher_request_set_crypt(cur->req, cur->sg,
cur->sg, *b_size,
iv);
}
if (secs) {
ret = test_mb_acipher_jiffies(data, enc,
*b_size, secs,
num_mb);
cond_resched();
} else {
ret = test_mb_acipher_cycles(data, enc,
*b_size, num_mb);
}
if (ret) {
pr_err("%s() failed flags=%x\n", e,
crypto_skcipher_get_flags(tfm));
break;
}
b_size++;
i++;
} while (*b_size);
keysize++;
} while (*keysize);
out:
for (i = 0; i < num_mb; ++i)
skcipher_request_free(data[i].req);
out_free_xbuf:
for (i = 0; i < num_mb; ++i)
testmgr_free_buf(data[i].xbuf);
out_free_tfm:
crypto_free_skcipher(tfm);
out_free_data:
kfree(data);
}
static inline int do_one_acipher_op(struct skcipher_request *req, int ret)
{
struct crypto_wait *wait = req->base.data;
return crypto_wait_req(ret, wait);
}
static int test_acipher_jiffies(struct skcipher_request *req, int enc,
int blen, int secs)
{
unsigned long start, end;
int bcount;
int ret;
for (start = jiffies, end = start + secs * HZ, bcount = 0;
time_before(jiffies, end); bcount++) {
if (enc)
ret = do_one_acipher_op(req,
crypto_skcipher_encrypt(req));
else
ret = do_one_acipher_op(req,
crypto_skcipher_decrypt(req));
if (ret)
return ret;
}
pr_cont("%d operations in %d seconds (%llu bytes)\n",
bcount, secs, (u64)bcount * blen);
return 0;
}
static int test_acipher_cycles(struct skcipher_request *req, int enc,
int blen)
{
unsigned long cycles = 0;
int ret = 0;
int i;
/* Warm-up run. */
for (i = 0; i < 4; i++) {
if (enc)
ret = do_one_acipher_op(req,
crypto_skcipher_encrypt(req));
else
ret = do_one_acipher_op(req,
crypto_skcipher_decrypt(req));
if (ret)
goto out;
}
/* The real thing. */
for (i = 0; i < 8; i++) {
cycles_t start, end;
start = get_cycles();
if (enc)
ret = do_one_acipher_op(req,
crypto_skcipher_encrypt(req));
else
ret = do_one_acipher_op(req,
crypto_skcipher_decrypt(req));
end = get_cycles();
if (ret)
goto out;
cycles += end - start;
}
out:
if (ret == 0)
pr_cont("1 operation in %lu cycles (%d bytes)\n",
(cycles + 4) / 8, blen);
return ret;
}
static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
struct cipher_speed_template *template,
unsigned int tcount, u8 *keysize, bool async)
{
unsigned int ret, i, j, k, iv_len;
struct crypto_wait wait;
const char *key;
char iv[128];
struct skcipher_request *req;
struct crypto_skcipher *tfm;
const char *e;
u32 *b_size;
if (enc == ENCRYPT)
e = "encryption";
else
e = "decryption";
crypto_init_wait(&wait);
tfm = crypto_alloc_skcipher(algo, 0, async ? 0 : CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm)) {
pr_err("failed to load transform for %s: %ld\n", algo,
PTR_ERR(tfm));
return;
}
pr_info("\ntesting speed of async %s (%s) %s\n", algo,
get_driver_name(crypto_skcipher, tfm), e);
req = skcipher_request_alloc(tfm, GFP_KERNEL);
if (!req) {
pr_err("tcrypt: skcipher: Failed to allocate request for %s\n",
algo);
goto out;
}
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &wait);
i = 0;
do {
b_size = block_sizes;
do {
struct scatterlist sg[TVMEMSIZE];
if ((*keysize + *b_size) > TVMEMSIZE * PAGE_SIZE) {
pr_err("template (%u) too big for "
"tvmem (%lu)\n", *keysize + *b_size,
TVMEMSIZE * PAGE_SIZE);
goto out_free_req;
}
pr_info("test %u (%d bit key, %d byte blocks): ", i,
*keysize * 8, *b_size);
memset(tvmem[0], 0xff, PAGE_SIZE);
/* set key, plain text and IV */
key = tvmem[0];
for (j = 0; j < tcount; j++) {
if (template[j].klen == *keysize) {
key = template[j].key;
break;
}
}
crypto_skcipher_clear_flags(tfm, ~0);
ret = crypto_skcipher_setkey(tfm, key, *keysize);
if (ret) {
pr_err("setkey() failed flags=%x\n",
crypto_skcipher_get_flags(tfm));
goto out_free_req;
}
k = *keysize + *b_size;
sg_init_table(sg, DIV_ROUND_UP(k, PAGE_SIZE));
if (k > PAGE_SIZE) {
sg_set_buf(sg, tvmem[0] + *keysize,
PAGE_SIZE - *keysize);
k -= PAGE_SIZE;
j = 1;
while (k > PAGE_SIZE) {
sg_set_buf(sg + j, tvmem[j], PAGE_SIZE);
memset(tvmem[j], 0xff, PAGE_SIZE);
j++;
k -= PAGE_SIZE;
}
sg_set_buf(sg + j, tvmem[j], k);
memset(tvmem[j], 0xff, k);
} else {
sg_set_buf(sg, tvmem[0] + *keysize, *b_size);
}
iv_len = crypto_skcipher_ivsize(tfm);
if (iv_len)
memset(&iv, 0xff, iv_len);
skcipher_request_set_crypt(req, sg, sg, *b_size, iv);
if (secs) {
ret = test_acipher_jiffies(req, enc,
*b_size, secs);
cond_resched();
} else {
ret = test_acipher_cycles(req, enc,
*b_size);
}
if (ret) {
pr_err("%s() failed flags=%x\n", e,
crypto_skcipher_get_flags(tfm));
break;
}
b_size++;
i++;
} while (*b_size);
keysize++;
} while (*keysize);
out_free_req:
skcipher_request_free(req);
out:
crypto_free_skcipher(tfm);
}
static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
struct cipher_speed_template *template,
unsigned int tcount, u8 *keysize)
{
return test_skcipher_speed(algo, enc, secs, template, tcount, keysize,
true);
}
static void test_cipher_speed(const char *algo, int enc, unsigned int secs,
struct cipher_speed_template *template,
unsigned int tcount, u8 *keysize)
{
return test_skcipher_speed(algo, enc, secs, template, tcount, keysize,
false);
}
static void test_available(void)
{
char **name = check;
while (*name) {
printk("alg %s ", *name);
printk(crypto_has_alg(*name, 0, 0) ?
"found\n" : "not found\n");
name++;
}
}
static inline int tcrypt_test(const char *alg)
{
int ret;
pr_debug("testing %s\n", alg);
ret = alg_test(alg, alg, 0, 0);
/* non-fips algs return -EINVAL in fips mode */
if (fips_enabled && ret == -EINVAL)
ret = 0;
return ret;
}
static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
{
int i;
int ret = 0;
switch (m) {
case 0:
if (alg) {
if (!crypto_has_alg(alg, type,
mask ?: CRYPTO_ALG_TYPE_MASK))
ret = -ENOENT;
break;
}
for (i = 1; i < 200; i++)
ret += do_test(NULL, 0, 0, i, num_mb);
break;
case 1:
ret += tcrypt_test("md5");
break;
case 2:
ret += tcrypt_test("sha1");
break;
case 3:
ret += tcrypt_test("ecb(des)");
ret += tcrypt_test("cbc(des)");
ret += tcrypt_test("ctr(des)");
break;
case 4:
ret += tcrypt_test("ecb(des3_ede)");
ret += tcrypt_test("cbc(des3_ede)");
ret += tcrypt_test("ctr(des3_ede)");
break;
case 5:
ret += tcrypt_test("md4");
break;
case 6:
ret += tcrypt_test("sha256");
break;
case 7:
ret += tcrypt_test("ecb(blowfish)");
ret += tcrypt_test("cbc(blowfish)");
ret += tcrypt_test("ctr(blowfish)");
break;
case 8:
ret += tcrypt_test("ecb(twofish)");
ret += tcrypt_test("cbc(twofish)");
ret += tcrypt_test("ctr(twofish)");
ret += tcrypt_test("lrw(twofish)");
ret += tcrypt_test("xts(twofish)");
break;
case 9:
ret += tcrypt_test("ecb(serpent)");
ret += tcrypt_test("cbc(serpent)");
ret += tcrypt_test("ctr(serpent)");
ret += tcrypt_test("lrw(serpent)");
ret += tcrypt_test("xts(serpent)");
break;
case 10:
ret += tcrypt_test("ecb(aes)");
ret += tcrypt_test("cbc(aes)");
ret += tcrypt_test("lrw(aes)");
ret += tcrypt_test("xts(aes)");
ret += tcrypt_test("ctr(aes)");
ret += tcrypt_test("rfc3686(ctr(aes))");
ret += tcrypt_test("cfb(aes)");
break;
case 11:
ret += tcrypt_test("sha384");
break;
case 12:
ret += tcrypt_test("sha512");
break;
case 13:
ret += tcrypt_test("deflate");
break;
case 14:
ret += tcrypt_test("ecb(cast5)");
ret += tcrypt_test("cbc(cast5)");
ret += tcrypt_test("ctr(cast5)");
break;
case 15:
ret += tcrypt_test("ecb(cast6)");
ret += tcrypt_test("cbc(cast6)");
ret += tcrypt_test("ctr(cast6)");
ret += tcrypt_test("lrw(cast6)");
ret += tcrypt_test("xts(cast6)");
break;
case 16:
ret += tcrypt_test("ecb(arc4)");
break;
case 17:
ret += tcrypt_test("michael_mic");
break;
case 18:
ret += tcrypt_test("crc32c");
break;
case 19:
ret += tcrypt_test("ecb(tea)");
break;
case 20:
ret += tcrypt_test("ecb(xtea)");
break;
case 21:
ret += tcrypt_test("ecb(khazad)");
break;
case 22:
ret += tcrypt_test("wp512");
break;
case 23:
ret += tcrypt_test("wp384");
break;
case 24:
ret += tcrypt_test("wp256");
break;
case 25:
ret += tcrypt_test("ecb(tnepres)");
break;
case 26:
ret += tcrypt_test("ecb(anubis)");
ret += tcrypt_test("cbc(anubis)");
break;
case 27:
ret += tcrypt_test("tgr192");
break;
case 28:
ret += tcrypt_test("tgr160");
break;
case 29:
ret += tcrypt_test("tgr128");
break;
case 30:
ret += tcrypt_test("ecb(xeta)");
break;
case 31:
ret += tcrypt_test("pcbc(fcrypt)");
break;
case 32:
ret += tcrypt_test("ecb(camellia)");
ret += tcrypt_test("cbc(camellia)");
ret += tcrypt_test("ctr(camellia)");
ret += tcrypt_test("lrw(camellia)");
ret += tcrypt_test("xts(camellia)");
break;
case 33:
ret += tcrypt_test("sha224");
break;
case 34:
ret += tcrypt_test("salsa20");
break;
case 35:
ret += tcrypt_test("gcm(aes)");
break;
case 36:
ret += tcrypt_test("lzo");
break;
case 37:
ret += tcrypt_test("ccm(aes)");
break;
case 38:
ret += tcrypt_test("cts(cbc(aes))");
break;
case 39:
ret += tcrypt_test("rmd128");
break;
case 40:
ret += tcrypt_test("rmd160");
break;
case 41:
ret += tcrypt_test("rmd256");
break;
case 42:
ret += tcrypt_test("rmd320");
break;
case 43:
ret += tcrypt_test("ecb(seed)");
break;
case 44:
ret += tcrypt_test("zlib");
break;
case 45:
ret += tcrypt_test("rfc4309(ccm(aes))");
break;
case 46:
ret += tcrypt_test("ghash");
break;
case 47:
ret += tcrypt_test("crct10dif");
break;
case 48:
ret += tcrypt_test("sha3-224");
break;
case 49:
ret += tcrypt_test("sha3-256");
break;
case 50:
ret += tcrypt_test("sha3-384");
break;
case 51:
ret += tcrypt_test("sha3-512");
break;
case 52:
ret += tcrypt_test("sm3");
break;
case 100:
ret += tcrypt_test("hmac(md5)");
break;
case 101:
ret += tcrypt_test("hmac(sha1)");
break;
case 102:
ret += tcrypt_test("hmac(sha256)");
break;
case 103:
ret += tcrypt_test("hmac(sha384)");
break;
case 104:
ret += tcrypt_test("hmac(sha512)");
break;
case 105:
ret += tcrypt_test("hmac(sha224)");
break;
case 106:
ret += tcrypt_test("xcbc(aes)");
break;
case 107:
ret += tcrypt_test("hmac(rmd128)");
break;
case 108:
ret += tcrypt_test("hmac(rmd160)");
break;
case 109:
ret += tcrypt_test("vmac64(aes)");
break;
case 111:
ret += tcrypt_test("hmac(sha3-224)");
break;
case 112:
ret += tcrypt_test("hmac(sha3-256)");
break;
case 113:
ret += tcrypt_test("hmac(sha3-384)");
break;
case 114:
ret += tcrypt_test("hmac(sha3-512)");
break;
case 150:
ret += tcrypt_test("ansi_cprng");
break;
case 151:
ret += tcrypt_test("rfc4106(gcm(aes))");
break;
case 152:
ret += tcrypt_test("rfc4543(gcm(aes))");
break;
case 153:
ret += tcrypt_test("cmac(aes)");
break;
case 154:
ret += tcrypt_test("cmac(des3_ede)");
break;
case 155:
ret += tcrypt_test("authenc(hmac(sha1),cbc(aes))");
break;
case 156:
ret += tcrypt_test("authenc(hmac(md5),ecb(cipher_null))");
break;
case 157:
ret += tcrypt_test("authenc(hmac(sha1),ecb(cipher_null))");
break;
case 181:
ret += tcrypt_test("authenc(hmac(sha1),cbc(des))");
break;
case 182:
ret += tcrypt_test("authenc(hmac(sha1),cbc(des3_ede))");
break;
case 183:
ret += tcrypt_test("authenc(hmac(sha224),cbc(des))");
break;
case 184:
ret += tcrypt_test("authenc(hmac(sha224),cbc(des3_ede))");
break;
case 185:
ret += tcrypt_test("authenc(hmac(sha256),cbc(des))");
break;
case 186:
ret += tcrypt_test("authenc(hmac(sha256),cbc(des3_ede))");
break;
case 187:
ret += tcrypt_test("authenc(hmac(sha384),cbc(des))");
break;
case 188:
ret += tcrypt_test("authenc(hmac(sha384),cbc(des3_ede))");
break;
case 189:
ret += tcrypt_test("authenc(hmac(sha512),cbc(des))");
break;
case 190:
ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))");
break;
case 191:
ret += tcrypt_test("ecb(sm4)");
break;
case 200:
test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ecb(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cbc(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cbc(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("lrw(aes)", ENCRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_cipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_cipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
speed_template_32_64);
test_cipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
speed_template_32_64);
test_cipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cfb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cfb(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
break;
case 201:
test_cipher_speed("ecb(des3_ede)", ENCRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_cipher_speed("ecb(des3_ede)", DECRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_cipher_speed("cbc(des3_ede)", ENCRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_cipher_speed("cbc(des3_ede)", DECRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_cipher_speed("ctr(des3_ede)", ENCRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_cipher_speed("ctr(des3_ede)", DECRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
break;
case 202:
test_cipher_speed("ecb(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ecb(twofish)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cbc(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cbc(twofish)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ctr(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ctr(twofish)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("lrw(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_cipher_speed("lrw(twofish)", DECRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_cipher_speed("xts(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_32_48_64);
test_cipher_speed("xts(twofish)", DECRYPT, sec, NULL, 0,
speed_template_32_48_64);
break;
case 203:
test_cipher_speed("ecb(blowfish)", ENCRYPT, sec, NULL, 0,
speed_template_8_32);
test_cipher_speed("ecb(blowfish)", DECRYPT, sec, NULL, 0,
speed_template_8_32);
test_cipher_speed("cbc(blowfish)", ENCRYPT, sec, NULL, 0,
speed_template_8_32);
test_cipher_speed("cbc(blowfish)", DECRYPT, sec, NULL, 0,
speed_template_8_32);
test_cipher_speed("ctr(blowfish)", ENCRYPT, sec, NULL, 0,
speed_template_8_32);
test_cipher_speed("ctr(blowfish)", DECRYPT, sec, NULL, 0,
speed_template_8_32);
break;
case 204:
test_cipher_speed("ecb(des)", ENCRYPT, sec, NULL, 0,
speed_template_8);
test_cipher_speed("ecb(des)", DECRYPT, sec, NULL, 0,
speed_template_8);
test_cipher_speed("cbc(des)", ENCRYPT, sec, NULL, 0,
speed_template_8);
test_cipher_speed("cbc(des)", DECRYPT, sec, NULL, 0,
speed_template_8);
break;
case 205:
test_cipher_speed("ecb(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ecb(camellia)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cbc(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cbc(camellia)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ctr(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ctr(camellia)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("lrw(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_cipher_speed("lrw(camellia)", DECRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_cipher_speed("xts(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_32_48_64);
test_cipher_speed("xts(camellia)", DECRYPT, sec, NULL, 0,
speed_template_32_48_64);
break;
case 206:
test_cipher_speed("salsa20", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
break;
case 207:
test_cipher_speed("ecb(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("ecb(serpent)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("cbc(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("cbc(serpent)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("ctr(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("ctr(serpent)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("lrw(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_32_48);
test_cipher_speed("lrw(serpent)", DECRYPT, sec, NULL, 0,
speed_template_32_48);
test_cipher_speed("xts(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_32_64);
test_cipher_speed("xts(serpent)", DECRYPT, sec, NULL, 0,
speed_template_32_64);
break;
case 208:
test_cipher_speed("ecb(arc4)", ENCRYPT, sec, NULL, 0,
speed_template_8);
break;
case 209:
test_cipher_speed("ecb(cast5)", ENCRYPT, sec, NULL, 0,
speed_template_8_16);
test_cipher_speed("ecb(cast5)", DECRYPT, sec, NULL, 0,
speed_template_8_16);
test_cipher_speed("cbc(cast5)", ENCRYPT, sec, NULL, 0,
speed_template_8_16);
test_cipher_speed("cbc(cast5)", DECRYPT, sec, NULL, 0,
speed_template_8_16);
test_cipher_speed("ctr(cast5)", ENCRYPT, sec, NULL, 0,
speed_template_8_16);
test_cipher_speed("ctr(cast5)", DECRYPT, sec, NULL, 0,
speed_template_8_16);
break;
case 210:
test_cipher_speed("ecb(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("ecb(cast6)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("cbc(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("cbc(cast6)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("ctr(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("ctr(cast6)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("lrw(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_32_48);
test_cipher_speed("lrw(cast6)", DECRYPT, sec, NULL, 0,
speed_template_32_48);
test_cipher_speed("xts(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_32_64);
test_cipher_speed("xts(cast6)", DECRYPT, sec, NULL, 0,
speed_template_32_64);
break;
case 211:
test_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec,
NULL, 0, 16, 16, aead_speed_template_20);
test_aead_speed("gcm(aes)", ENCRYPT, sec,
NULL, 0, 16, 8, speed_template_16_24_32);
test_aead_speed("rfc4106(gcm(aes))", DECRYPT, sec,
NULL, 0, 16, 16, aead_speed_template_20);
test_aead_speed("gcm(aes)", DECRYPT, sec,
NULL, 0, 16, 8, speed_template_16_24_32);
break;
case 212:
test_aead_speed("rfc4309(ccm(aes))", ENCRYPT, sec,
NULL, 0, 16, 16, aead_speed_template_19);
test_aead_speed("rfc4309(ccm(aes))", DECRYPT, sec,
NULL, 0, 16, 16, aead_speed_template_19);
break;
case 213:
test_aead_speed("rfc7539esp(chacha20,poly1305)", ENCRYPT, sec,
NULL, 0, 16, 8, aead_speed_template_36);
test_aead_speed("rfc7539esp(chacha20,poly1305)", DECRYPT, sec,
NULL, 0, 16, 8, aead_speed_template_36);
break;
case 214:
test_cipher_speed("chacha20", ENCRYPT, sec, NULL, 0,
speed_template_32);
break;
case 215:
test_mb_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec, NULL,
0, 16, 16, aead_speed_template_20, num_mb);
test_mb_aead_speed("gcm(aes)", ENCRYPT, sec, NULL, 0, 16, 8,
speed_template_16_24_32, num_mb);
test_mb_aead_speed("rfc4106(gcm(aes))", DECRYPT, sec, NULL,
0, 16, 16, aead_speed_template_20, num_mb);
test_mb_aead_speed("gcm(aes)", DECRYPT, sec, NULL, 0, 16, 8,
speed_template_16_24_32, num_mb);
break;
case 216:
test_mb_aead_speed("rfc4309(ccm(aes))", ENCRYPT, sec, NULL, 0,
16, 16, aead_speed_template_19, num_mb);
test_mb_aead_speed("rfc4309(ccm(aes))", DECRYPT, sec, NULL, 0,
16, 16, aead_speed_template_19, num_mb);
break;
case 217:
test_mb_aead_speed("rfc7539esp(chacha20,poly1305)", ENCRYPT,
sec, NULL, 0, 16, 8, aead_speed_template_36,
num_mb);
test_mb_aead_speed("rfc7539esp(chacha20,poly1305)", DECRYPT,
sec, NULL, 0, 16, 8, aead_speed_template_36,
num_mb);
break;
case 219:
test_cipher_speed("adiantum(xchacha12,aes)", ENCRYPT, sec, NULL,
0, speed_template_32);
test_cipher_speed("adiantum(xchacha12,aes)", DECRYPT, sec, NULL,
0, speed_template_32);
test_cipher_speed("adiantum(xchacha20,aes)", ENCRYPT, sec, NULL,
0, speed_template_32);
test_cipher_speed("adiantum(xchacha20,aes)", DECRYPT, sec, NULL,
0, speed_template_32);
break;
case 300:
if (alg) {
test_hash_speed(alg, sec, generic_hash_speed_template);
break;
}
/* fall through */
case 301:
test_hash_speed("md4", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
/* fall through */
case 302:
test_hash_speed("md5", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
/* fall through */
case 303:
test_hash_speed("sha1", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
/* fall through */
case 304:
test_hash_speed("sha256", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
/* fall through */
case 305:
test_hash_speed("sha384", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
/* fall through */
case 306:
test_hash_speed("sha512", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
/* fall through */
case 307:
test_hash_speed("wp256", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
/* fall through */
case 308:
test_hash_speed("wp384", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
/* fall through */
case 309:
test_hash_speed("wp512", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
/* fall through */
case 310:
test_hash_speed("tgr128", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
/* fall through */
case 311:
test_hash_speed("tgr160", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
/* fall through */
case 312:
test_hash_speed("tgr192", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
/* fall through */
case 313:
test_hash_speed("sha224", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
/* fall through */
case 314:
test_hash_speed("rmd128", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
/* fall through */
case 315:
test_hash_speed("rmd160", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
/* fall through */
case 316:
test_hash_speed("rmd256", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
/* fall through */
case 317:
test_hash_speed("rmd320", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
/* fall through */
case 318:
test_hash_speed("ghash-generic", sec, hash_speed_template_16);
if (mode > 300 && mode < 400) break;
/* fall through */
case 319:
test_hash_speed("crc32c", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
/* fall through */
case 320:
test_hash_speed("crct10dif", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
/* fall through */
case 321:
test_hash_speed("poly1305", sec, poly1305_speed_template);
if (mode > 300 && mode < 400) break;
/* fall through */
case 322:
test_hash_speed("sha3-224", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
/* fall through */
case 323:
test_hash_speed("sha3-256", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
/* fall through */
case 324:
test_hash_speed("sha3-384", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
/* fall through */
case 325:
test_hash_speed("sha3-512", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
/* fall through */
case 326:
test_hash_speed("sm3", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
/* fall through */
case 399:
break;
case 400:
if (alg) {
test_ahash_speed(alg, sec, generic_hash_speed_template);
break;
}
/* fall through */
case 401:
test_ahash_speed("md4", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
/* fall through */
case 402:
test_ahash_speed("md5", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
/* fall through */
case 403:
test_ahash_speed("sha1", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
/* fall through */
case 404:
test_ahash_speed("sha256", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
/* fall through */
case 405:
test_ahash_speed("sha384", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
/* fall through */
case 406:
test_ahash_speed("sha512", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
/* fall through */
case 407:
test_ahash_speed("wp256", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
/* fall through */
case 408:
test_ahash_speed("wp384", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
/* fall through */
case 409:
test_ahash_speed("wp512", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
/* fall through */
case 410:
test_ahash_speed("tgr128", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
/* fall through */
case 411:
test_ahash_speed("tgr160", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
/* fall through */
case 412:
test_ahash_speed("tgr192", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
/* fall through */
case 413:
test_ahash_speed("sha224", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
/* fall through */
case 414:
test_ahash_speed("rmd128", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
/* fall through */
case 415:
test_ahash_speed("rmd160", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
/* fall through */
case 416:
test_ahash_speed("rmd256", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
/* fall through */
case 417:
test_ahash_speed("rmd320", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
/* fall through */
case 418:
test_ahash_speed("sha3-224", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
/* fall through */
case 419:
test_ahash_speed("sha3-256", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
/* fall through */
case 420:
test_ahash_speed("sha3-384", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
/* fall through */
case 421:
test_ahash_speed("sha3-512", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
/* fall through */
case 422:
test_mb_ahash_speed("sha1", sec, generic_hash_speed_template,
num_mb);
if (mode > 400 && mode < 500) break;
/* fall through */
case 423:
test_mb_ahash_speed("sha256", sec, generic_hash_speed_template,
num_mb);
if (mode > 400 && mode < 500) break;
/* fall through */
case 424:
test_mb_ahash_speed("sha512", sec, generic_hash_speed_template,
num_mb);
if (mode > 400 && mode < 500) break;
/* fall through */
case 425:
test_mb_ahash_speed("sm3", sec, generic_hash_speed_template,
num_mb);
if (mode > 400 && mode < 500) break;
/* fall through */
case 499:
break;
case 500:
test_acipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ecb(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("cbc(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("cbc(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("lrw(aes)", ENCRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_acipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_acipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
speed_template_32_64);
test_acipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
speed_template_32_64);
test_acipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("cfb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("cfb(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ofb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ofb(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("rfc3686(ctr(aes))", ENCRYPT, sec, NULL, 0,
speed_template_20_28_36);
test_acipher_speed("rfc3686(ctr(aes))", DECRYPT, sec, NULL, 0,
speed_template_20_28_36);
break;
case 501:
test_acipher_speed("ecb(des3_ede)", ENCRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_acipher_speed("ecb(des3_ede)", DECRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_acipher_speed("cbc(des3_ede)", ENCRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_acipher_speed("cbc(des3_ede)", DECRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_acipher_speed("cfb(des3_ede)", ENCRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_acipher_speed("cfb(des3_ede)", DECRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_acipher_speed("ofb(des3_ede)", ENCRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_acipher_speed("ofb(des3_ede)", DECRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
break;
case 502:
test_acipher_speed("ecb(des)", ENCRYPT, sec, NULL, 0,
speed_template_8);
test_acipher_speed("ecb(des)", DECRYPT, sec, NULL, 0,
speed_template_8);
test_acipher_speed("cbc(des)", ENCRYPT, sec, NULL, 0,
speed_template_8);
test_acipher_speed("cbc(des)", DECRYPT, sec, NULL, 0,
speed_template_8);
test_acipher_speed("cfb(des)", ENCRYPT, sec, NULL, 0,
speed_template_8);
test_acipher_speed("cfb(des)", DECRYPT, sec, NULL, 0,
speed_template_8);
test_acipher_speed("ofb(des)", ENCRYPT, sec, NULL, 0,
speed_template_8);
test_acipher_speed("ofb(des)", DECRYPT, sec, NULL, 0,
speed_template_8);
break;
case 503:
test_acipher_speed("ecb(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("ecb(serpent)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("cbc(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("cbc(serpent)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("ctr(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("ctr(serpent)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("lrw(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_32_48);
test_acipher_speed("lrw(serpent)", DECRYPT, sec, NULL, 0,
speed_template_32_48);
test_acipher_speed("xts(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_32_64);
test_acipher_speed("xts(serpent)", DECRYPT, sec, NULL, 0,
speed_template_32_64);
break;
case 504:
test_acipher_speed("ecb(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ecb(twofish)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("cbc(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("cbc(twofish)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ctr(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ctr(twofish)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("lrw(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_acipher_speed("lrw(twofish)", DECRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_acipher_speed("xts(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_32_48_64);
test_acipher_speed("xts(twofish)", DECRYPT, sec, NULL, 0,
speed_template_32_48_64);
break;
case 505:
test_acipher_speed("ecb(arc4)", ENCRYPT, sec, NULL, 0,
speed_template_8);
break;
case 506:
test_acipher_speed("ecb(cast5)", ENCRYPT, sec, NULL, 0,
speed_template_8_16);
test_acipher_speed("ecb(cast5)", DECRYPT, sec, NULL, 0,
speed_template_8_16);
test_acipher_speed("cbc(cast5)", ENCRYPT, sec, NULL, 0,
speed_template_8_16);
test_acipher_speed("cbc(cast5)", DECRYPT, sec, NULL, 0,
speed_template_8_16);
test_acipher_speed("ctr(cast5)", ENCRYPT, sec, NULL, 0,
speed_template_8_16);
test_acipher_speed("ctr(cast5)", DECRYPT, sec, NULL, 0,
speed_template_8_16);
break;
case 507:
test_acipher_speed("ecb(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("ecb(cast6)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("cbc(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("cbc(cast6)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("ctr(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("ctr(cast6)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("lrw(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_32_48);
test_acipher_speed("lrw(cast6)", DECRYPT, sec, NULL, 0,
speed_template_32_48);
test_acipher_speed("xts(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_32_64);
test_acipher_speed("xts(cast6)", DECRYPT, sec, NULL, 0,
speed_template_32_64);
break;
case 508:
test_acipher_speed("ecb(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("ecb(camellia)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("cbc(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("cbc(camellia)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("ctr(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("ctr(camellia)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("lrw(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_32_48);
test_acipher_speed("lrw(camellia)", DECRYPT, sec, NULL, 0,
speed_template_32_48);
test_acipher_speed("xts(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_32_64);
test_acipher_speed("xts(camellia)", DECRYPT, sec, NULL, 0,
speed_template_32_64);
break;
case 509:
test_acipher_speed("ecb(blowfish)", ENCRYPT, sec, NULL, 0,
speed_template_8_32);
test_acipher_speed("ecb(blowfish)", DECRYPT, sec, NULL, 0,
speed_template_8_32);
test_acipher_speed("cbc(blowfish)", ENCRYPT, sec, NULL, 0,
speed_template_8_32);
test_acipher_speed("cbc(blowfish)", DECRYPT, sec, NULL, 0,
speed_template_8_32);
test_acipher_speed("ctr(blowfish)", ENCRYPT, sec, NULL, 0,
speed_template_8_32);
test_acipher_speed("ctr(blowfish)", DECRYPT, sec, NULL, 0,
speed_template_8_32);
break;
case 600:
test_mb_skcipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("ecb(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("cbc(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("cbc(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("lrw(aes)", ENCRYPT, sec, NULL, 0,
speed_template_32_40_48, num_mb);
test_mb_skcipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
speed_template_32_40_48, num_mb);
test_mb_skcipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
speed_template_32_64, num_mb);
test_mb_skcipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
speed_template_32_64, num_mb);
test_mb_skcipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("cfb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("cfb(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("ofb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("ofb(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("rfc3686(ctr(aes))", ENCRYPT, sec, NULL,
0, speed_template_20_28_36, num_mb);
test_mb_skcipher_speed("rfc3686(ctr(aes))", DECRYPT, sec, NULL,
0, speed_template_20_28_36, num_mb);
break;
case 601:
test_mb_skcipher_speed("ecb(des3_ede)", ENCRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24, num_mb);
test_mb_skcipher_speed("ecb(des3_ede)", DECRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24, num_mb);
test_mb_skcipher_speed("cbc(des3_ede)", ENCRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24, num_mb);
test_mb_skcipher_speed("cbc(des3_ede)", DECRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24, num_mb);
test_mb_skcipher_speed("cfb(des3_ede)", ENCRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24, num_mb);
test_mb_skcipher_speed("cfb(des3_ede)", DECRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24, num_mb);
test_mb_skcipher_speed("ofb(des3_ede)", ENCRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24, num_mb);
test_mb_skcipher_speed("ofb(des3_ede)", DECRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24, num_mb);
break;
case 602:
test_mb_skcipher_speed("ecb(des)", ENCRYPT, sec, NULL, 0,
speed_template_8, num_mb);
test_mb_skcipher_speed("ecb(des)", DECRYPT, sec, NULL, 0,
speed_template_8, num_mb);
test_mb_skcipher_speed("cbc(des)", ENCRYPT, sec, NULL, 0,
speed_template_8, num_mb);
test_mb_skcipher_speed("cbc(des)", DECRYPT, sec, NULL, 0,
speed_template_8, num_mb);
test_mb_skcipher_speed("cfb(des)", ENCRYPT, sec, NULL, 0,
speed_template_8, num_mb);
test_mb_skcipher_speed("cfb(des)", DECRYPT, sec, NULL, 0,
speed_template_8, num_mb);
test_mb_skcipher_speed("ofb(des)", ENCRYPT, sec, NULL, 0,
speed_template_8, num_mb);
test_mb_skcipher_speed("ofb(des)", DECRYPT, sec, NULL, 0,
speed_template_8, num_mb);
break;
case 603:
test_mb_skcipher_speed("ecb(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("ecb(serpent)", DECRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("cbc(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("cbc(serpent)", DECRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("ctr(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("ctr(serpent)", DECRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("lrw(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_32_48, num_mb);
test_mb_skcipher_speed("lrw(serpent)", DECRYPT, sec, NULL, 0,
speed_template_32_48, num_mb);
test_mb_skcipher_speed("xts(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_32_64, num_mb);
test_mb_skcipher_speed("xts(serpent)", DECRYPT, sec, NULL, 0,
speed_template_32_64, num_mb);
break;
case 604:
test_mb_skcipher_speed("ecb(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("ecb(twofish)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("cbc(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("cbc(twofish)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("ctr(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("ctr(twofish)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("lrw(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_32_40_48, num_mb);
test_mb_skcipher_speed("lrw(twofish)", DECRYPT, sec, NULL, 0,
speed_template_32_40_48, num_mb);
test_mb_skcipher_speed("xts(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_32_48_64, num_mb);
test_mb_skcipher_speed("xts(twofish)", DECRYPT, sec, NULL, 0,
speed_template_32_48_64, num_mb);
break;
case 605:
test_mb_skcipher_speed("ecb(arc4)", ENCRYPT, sec, NULL, 0,
speed_template_8, num_mb);
break;
case 606:
test_mb_skcipher_speed("ecb(cast5)", ENCRYPT, sec, NULL, 0,
speed_template_8_16, num_mb);
test_mb_skcipher_speed("ecb(cast5)", DECRYPT, sec, NULL, 0,
speed_template_8_16, num_mb);
test_mb_skcipher_speed("cbc(cast5)", ENCRYPT, sec, NULL, 0,
speed_template_8_16, num_mb);
test_mb_skcipher_speed("cbc(cast5)", DECRYPT, sec, NULL, 0,
speed_template_8_16, num_mb);
test_mb_skcipher_speed("ctr(cast5)", ENCRYPT, sec, NULL, 0,
speed_template_8_16, num_mb);
test_mb_skcipher_speed("ctr(cast5)", DECRYPT, sec, NULL, 0,
speed_template_8_16, num_mb);
break;
case 607:
test_mb_skcipher_speed("ecb(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("ecb(cast6)", DECRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("cbc(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("cbc(cast6)", DECRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("ctr(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("ctr(cast6)", DECRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("lrw(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_32_48, num_mb);
test_mb_skcipher_speed("lrw(cast6)", DECRYPT, sec, NULL, 0,
speed_template_32_48, num_mb);
test_mb_skcipher_speed("xts(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_32_64, num_mb);
test_mb_skcipher_speed("xts(cast6)", DECRYPT, sec, NULL, 0,
speed_template_32_64, num_mb);
break;
case 608:
test_mb_skcipher_speed("ecb(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("ecb(camellia)", DECRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("cbc(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("cbc(camellia)", DECRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("ctr(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("ctr(camellia)", DECRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("lrw(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_32_48, num_mb);
test_mb_skcipher_speed("lrw(camellia)", DECRYPT, sec, NULL, 0,
speed_template_32_48, num_mb);
test_mb_skcipher_speed("xts(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_32_64, num_mb);
test_mb_skcipher_speed("xts(camellia)", DECRYPT, sec, NULL, 0,
speed_template_32_64, num_mb);
break;
case 609:
test_mb_skcipher_speed("ecb(blowfish)", ENCRYPT, sec, NULL, 0,
speed_template_8_32, num_mb);
test_mb_skcipher_speed("ecb(blowfish)", DECRYPT, sec, NULL, 0,
speed_template_8_32, num_mb);
test_mb_skcipher_speed("cbc(blowfish)", ENCRYPT, sec, NULL, 0,
speed_template_8_32, num_mb);
test_mb_skcipher_speed("cbc(blowfish)", DECRYPT, sec, NULL, 0,
speed_template_8_32, num_mb);
test_mb_skcipher_speed("ctr(blowfish)", ENCRYPT, sec, NULL, 0,
speed_template_8_32, num_mb);
test_mb_skcipher_speed("ctr(blowfish)", DECRYPT, sec, NULL, 0,
speed_template_8_32, num_mb);
break;
case 1000:
test_available();
break;
}
return ret;
}
static int __init tcrypt_mod_init(void)
{
int err = -ENOMEM;
int i;
for (i = 0; i < TVMEMSIZE; i++) {
tvmem[i] = (void *)__get_free_page(GFP_KERNEL);
if (!tvmem[i])
goto err_free_tv;
}
err = do_test(alg, type, mask, mode, num_mb);
if (err) {
printk(KERN_ERR "tcrypt: one or more tests failed!\n");
goto err_free_tv;
} else {
pr_debug("all tests passed\n");
}
/* We intentionaly return -EAGAIN to prevent keeping the module,
* unless we're running in fips mode. It does all its work from
* init() and doesn't offer any runtime functionality, but in
* the fips case, checking for a successful load is helpful.
* => we don't need it in the memory, do we?
* -- mludvig
*/
if (!fips_enabled)
err = -EAGAIN;
err_free_tv:
for (i = 0; i < TVMEMSIZE && tvmem[i]; i++)
free_page((unsigned long)tvmem[i]);
return err;
}
/*
* If an init function is provided, an exit function must also be provided
* to allow module unload.
*/
static void __exit tcrypt_mod_fini(void) { }
module_init(tcrypt_mod_init);
module_exit(tcrypt_mod_fini);
module_param(alg, charp, 0);
module_param(type, uint, 0);
module_param(mask, uint, 0);
module_param(mode, int, 0);
module_param(sec, uint, 0);
MODULE_PARM_DESC(sec, "Length in seconds of speed tests "
"(defaults to zero which uses CPU cycles instead)");
module_param(num_mb, uint, 0000);
MODULE_PARM_DESC(num_mb, "Number of concurrent requests to be used in mb speed tests (defaults to 8)");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Quick & dirty crypto testing module");
MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");