summaryrefslogtreecommitdiff
path: root/src/libstrongswan/plugins
diff options
context:
space:
mode:
authorYves-Alexis Perez <corsac@debian.org>2015-06-01 14:46:30 +0200
committerYves-Alexis Perez <corsac@debian.org>2015-06-01 14:46:30 +0200
commitfc556ec2bc92a9d476c11406fad2c33db8bf7cb0 (patch)
tree7360889e50de867d72741213d534a756c73902c8 /src/libstrongswan/plugins
parent83b8aebb19fe6e49e13a05d4e8f5ab9a06177642 (diff)
downloadvyos-strongswan-fc556ec2bc92a9d476c11406fad2c33db8bf7cb0.tar.gz
vyos-strongswan-fc556ec2bc92a9d476c11406fad2c33db8bf7cb0.zip
Imported Upstream version 5.3.1
Diffstat (limited to 'src/libstrongswan/plugins')
-rw-r--r--src/libstrongswan/plugins/aesni/Makefile.am26
-rw-r--r--src/libstrongswan/plugins/aesni/Makefile.in793
-rw-r--r--src/libstrongswan/plugins/aesni/aesni_cbc.c671
-rw-r--r--src/libstrongswan/plugins/aesni/aesni_cbc.h48
-rw-r--r--src/libstrongswan/plugins/aesni/aesni_ccm.c914
-rw-r--r--src/libstrongswan/plugins/aesni/aesni_ccm.h50
-rw-r--r--src/libstrongswan/plugins/aesni/aesni_cmac.c371
-rw-r--r--src/libstrongswan/plugins/aesni/aesni_cmac.h52
-rw-r--r--src/libstrongswan/plugins/aesni/aesni_ctr.c643
-rw-r--r--src/libstrongswan/plugins/aesni/aesni_ctr.h48
-rw-r--r--src/libstrongswan/plugins/aesni/aesni_gcm.c1447
-rw-r--r--src/libstrongswan/plugins/aesni/aesni_gcm.h50
-rw-r--r--src/libstrongswan/plugins/aesni/aesni_key.c301
-rw-r--r--src/libstrongswan/plugins/aesni/aesni_key.h65
-rw-r--r--src/libstrongswan/plugins/aesni/aesni_plugin.c125
-rw-r--r--src/libstrongswan/plugins/aesni/aesni_plugin.h42
-rw-r--r--src/libstrongswan/plugins/aesni/aesni_xcbc.c367
-rw-r--r--src/libstrongswan/plugins/aesni/aesni_xcbc.h52
-rw-r--r--src/libstrongswan/plugins/af_alg/af_alg_signer.c2
-rw-r--r--src/libstrongswan/plugins/ccm/ccm_aead.c2
-rw-r--r--src/libstrongswan/plugins/fips_prf/fips_prf.c2
-rw-r--r--src/libstrongswan/plugins/gcm/gcm_aead.c2
-rw-r--r--src/libstrongswan/plugins/gcrypt/gcrypt_dh.c19
-rw-r--r--src/libstrongswan/plugins/gcrypt/gcrypt_plugin.c3
-rw-r--r--src/libstrongswan/plugins/gmp/gmp_diffie_hellman.c10
-rw-r--r--src/libstrongswan/plugins/gmp/gmp_rsa_public_key.c5
-rw-r--r--src/libstrongswan/plugins/openssl/openssl_crypter.c2
-rw-r--r--src/libstrongswan/plugins/openssl/openssl_diffie_hellman.c13
-rw-r--r--src/libstrongswan/plugins/openssl/openssl_ec_diffie_hellman.c44
-rw-r--r--src/libstrongswan/plugins/openssl/openssl_hmac.c22
-rw-r--r--src/libstrongswan/plugins/openssl/openssl_pkcs7.c2
-rw-r--r--src/libstrongswan/plugins/openssl/openssl_rsa_public_key.c2
-rw-r--r--src/libstrongswan/plugins/padlock/padlock_plugin.c97
-rw-r--r--src/libstrongswan/plugins/pkcs12/pkcs12_decode.c2
-rw-r--r--src/libstrongswan/plugins/pkcs7/pkcs7_signed_data.c2
-rw-r--r--src/libstrongswan/plugins/plugin_feature.c2
-rw-r--r--src/libstrongswan/plugins/rdrand/rdrand_plugin.c55
-rw-r--r--src/libstrongswan/plugins/sqlite/sqlite_database.c27
-rw-r--r--src/libstrongswan/plugins/sqlite/sqlite_plugin.c9
-rw-r--r--src/libstrongswan/plugins/test_vectors/Makefile.am4
-rw-r--r--src/libstrongswan/plugins/test_vectors/Makefile.in19
-rw-r--r--src/libstrongswan/plugins/test_vectors/test_vectors.h32
-rw-r--r--src/libstrongswan/plugins/test_vectors/test_vectors/aes_ccm.c79
-rw-r--r--src/libstrongswan/plugins/test_vectors/test_vectors/aes_gcm.c150
-rw-r--r--src/libstrongswan/plugins/test_vectors/test_vectors/ecp.c134
-rw-r--r--src/libstrongswan/plugins/test_vectors/test_vectors/ecpbp.c113
-rw-r--r--src/libstrongswan/plugins/test_vectors/test_vectors/modp.c731
-rw-r--r--src/libstrongswan/plugins/test_vectors/test_vectors/modpsub.c164
-rw-r--r--src/libstrongswan/plugins/test_vectors/test_vectors_plugin.c29
49 files changed, 7679 insertions, 165 deletions
diff --git a/src/libstrongswan/plugins/aesni/Makefile.am b/src/libstrongswan/plugins/aesni/Makefile.am
new file mode 100644
index 000000000..2fe85c66c
--- /dev/null
+++ b/src/libstrongswan/plugins/aesni/Makefile.am
@@ -0,0 +1,26 @@
+AM_CPPFLAGS = \
+ -I$(top_srcdir)/src/libstrongswan
+
+AM_CFLAGS = \
+ -maes \
+ -mpclmul \
+ -mssse3 \
+ $(PLUGIN_CFLAGS)
+
+if MONOLITHIC
+noinst_LTLIBRARIES = libstrongswan-aesni.la
+else
+plugin_LTLIBRARIES = libstrongswan-aesni.la
+endif
+
+libstrongswan_aesni_la_SOURCES = \
+ aesni_key.h aesni_key.c \
+ aesni_cbc.h aesni_cbc.c \
+ aesni_ctr.h aesni_ctr.c \
+ aesni_ccm.h aesni_ccm.c \
+ aesni_gcm.h aesni_gcm.c \
+ aesni_xcbc.h aesni_xcbc.c \
+ aesni_cmac.h aesni_cmac.c \
+ aesni_plugin.h aesni_plugin.c
+
+libstrongswan_aesni_la_LDFLAGS = -module -avoid-version
diff --git a/src/libstrongswan/plugins/aesni/Makefile.in b/src/libstrongswan/plugins/aesni/Makefile.in
new file mode 100644
index 000000000..34adaa390
--- /dev/null
+++ b/src/libstrongswan/plugins/aesni/Makefile.in
@@ -0,0 +1,793 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+ case $${target_option-} in \
+ ?) ;; \
+ *) echo "am__make_running_with_option: internal error: invalid" \
+ "target option '$${target_option-}' specified" >&2; \
+ exit 1;; \
+ esac; \
+ has_opt=no; \
+ sane_makeflags=$$MAKEFLAGS; \
+ if $(am__is_gnu_make); then \
+ sane_makeflags=$$MFLAGS; \
+ else \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ bs=\\; \
+ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
+ esac; \
+ fi; \
+ skip_next=no; \
+ strip_trailopt () \
+ { \
+ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+ }; \
+ for flg in $$sane_makeflags; do \
+ test $$skip_next = yes && { skip_next=no; continue; }; \
+ case $$flg in \
+ *=*|--*) continue;; \
+ -*I) strip_trailopt 'I'; skip_next=yes;; \
+ -*I?*) strip_trailopt 'I';; \
+ -*O) strip_trailopt 'O'; skip_next=yes;; \
+ -*O?*) strip_trailopt 'O';; \
+ -*l) strip_trailopt 'l'; skip_next=yes;; \
+ -*l?*) strip_trailopt 'l';; \
+ -[dEDm]) skip_next=yes;; \
+ -[JT]) skip_next=yes;; \
+ esac; \
+ case $$flg in \
+ *$$target_option*) has_opt=yes; break;; \
+ esac; \
+ done; \
+ test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+subdir = src/libstrongswan/plugins/aesni
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+ $(top_srcdir)/depcomp
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/m4/config/libtool.m4 \
+ $(top_srcdir)/m4/config/ltoptions.m4 \
+ $(top_srcdir)/m4/config/ltsugar.m4 \
+ $(top_srcdir)/m4/config/ltversion.m4 \
+ $(top_srcdir)/m4/config/lt~obsolete.m4 \
+ $(top_srcdir)/m4/macros/split-package-version.m4 \
+ $(top_srcdir)/m4/macros/with.m4 \
+ $(top_srcdir)/m4/macros/enable-disable.m4 \
+ $(top_srcdir)/m4/macros/add-plugin.m4 \
+ $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(plugindir)"
+LTLIBRARIES = $(noinst_LTLIBRARIES) $(plugin_LTLIBRARIES)
+libstrongswan_aesni_la_LIBADD =
+am_libstrongswan_aesni_la_OBJECTS = aesni_key.lo aesni_cbc.lo \
+ aesni_ctr.lo aesni_ccm.lo aesni_gcm.lo aesni_xcbc.lo \
+ aesni_cmac.lo aesni_plugin.lo
+libstrongswan_aesni_la_OBJECTS = $(am_libstrongswan_aesni_la_OBJECTS)
+AM_V_lt = $(am__v_lt_@AM_V@)
+am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@)
+am__v_lt_0 = --silent
+am__v_lt_1 =
+libstrongswan_aesni_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC \
+ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CCLD) \
+ $(AM_CFLAGS) $(CFLAGS) $(libstrongswan_aesni_la_LDFLAGS) \
+ $(LDFLAGS) -o $@
+@MONOLITHIC_FALSE@am_libstrongswan_aesni_la_rpath = -rpath \
+@MONOLITHIC_FALSE@ $(plugindir)
+@MONOLITHIC_TRUE@am_libstrongswan_aesni_la_rpath =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo " GEN " $@;
+am__v_GEN_1 =
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 =
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)
+depcomp = $(SHELL) $(top_srcdir)/depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \
+ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \
+ $(AM_CFLAGS) $(CFLAGS)
+AM_V_CC = $(am__v_CC_@AM_V@)
+am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@)
+am__v_CC_0 = @echo " CC " $@;
+am__v_CC_1 =
+CCLD = $(CC)
+LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+ $(AM_LDFLAGS) $(LDFLAGS) -o $@
+AM_V_CCLD = $(am__v_CCLD_@AM_V@)
+am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@)
+am__v_CCLD_0 = @echo " CCLD " $@;
+am__v_CCLD_1 =
+SOURCES = $(libstrongswan_aesni_la_SOURCES)
+DIST_SOURCES = $(libstrongswan_aesni_la_SOURCES)
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates. Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+ BEGIN { nonempty = 0; } \
+ { items[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique. This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+ list='$(am__tagged_files)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+ALLOCA = @ALLOCA@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AR = @AR@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BFDLIB = @BFDLIB@
+BTLIB = @BTLIB@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+COVERAGE_CFLAGS = @COVERAGE_CFLAGS@
+COVERAGE_LDFLAGS = @COVERAGE_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLIB = @DLLIB@
+DLLTOOL = @DLLTOOL@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+EASY_INSTALL = @EASY_INSTALL@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+GEM = @GEM@
+GENHTML = @GENHTML@
+GPERF = @GPERF@
+GPRBUILD = @GPRBUILD@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LCOV = @LCOV@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LEX = @LEX@
+LEXLIB = @LEXLIB@
+LEX_OUTPUT_ROOT = @LEX_OUTPUT_ROOT@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
+MKDIR_P = @MKDIR_P@
+MYSQLCFLAG = @MYSQLCFLAG@
+MYSQLCONFIG = @MYSQLCONFIG@
+MYSQLLIB = @MYSQLLIB@
+NM = @NM@
+NMEDIT = @NMEDIT@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OPENSSL_LIB = @OPENSSL_LIB@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PACKAGE_VERSION_BUILD = @PACKAGE_VERSION_BUILD@
+PACKAGE_VERSION_MAJOR = @PACKAGE_VERSION_MAJOR@
+PACKAGE_VERSION_MINOR = @PACKAGE_VERSION_MINOR@
+PACKAGE_VERSION_REVIEW = @PACKAGE_VERSION_REVIEW@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PERL = @PERL@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PLUGIN_CFLAGS = @PLUGIN_CFLAGS@
+PTHREADLIB = @PTHREADLIB@
+PYTHON = @PYTHON@
+PYTHONEGGINSTALLDIR = @PYTHONEGGINSTALLDIR@
+PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@
+PYTHON_PLATFORM = @PYTHON_PLATFORM@
+PYTHON_PREFIX = @PYTHON_PREFIX@
+PYTHON_VERSION = @PYTHON_VERSION@
+PY_TEST = @PY_TEST@
+RANLIB = @RANLIB@
+RTLIB = @RTLIB@
+RUBY = @RUBY@
+RUBYGEMDIR = @RUBYGEMDIR@
+RUBYINCLUDE = @RUBYINCLUDE@
+RUBYLIB = @RUBYLIB@
+SED = @SED@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SOCKLIB = @SOCKLIB@
+STRIP = @STRIP@
+UNWINDLIB = @UNWINDLIB@
+VERSION = @VERSION@
+YACC = @YACC@
+YFLAGS = @YFLAGS@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+aikgen_plugins = @aikgen_plugins@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+attest_plugins = @attest_plugins@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+c_plugins = @c_plugins@
+charon_natt_port = @charon_natt_port@
+charon_plugins = @charon_plugins@
+charon_udp_port = @charon_udp_port@
+clearsilver_LIBS = @clearsilver_LIBS@
+cmd_plugins = @cmd_plugins@
+datadir = @datadir@
+datarootdir = @datarootdir@
+dbusservicedir = @dbusservicedir@
+dev_headers = @dev_headers@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+fips_mode = @fips_mode@
+gtk_CFLAGS = @gtk_CFLAGS@
+gtk_LIBS = @gtk_LIBS@
+h_plugins = @h_plugins@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+imcvdir = @imcvdir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+ipsec_script = @ipsec_script@
+ipsec_script_upper = @ipsec_script_upper@
+ipsecdir = @ipsecdir@
+ipsecgroup = @ipsecgroup@
+ipseclibdir = @ipseclibdir@
+ipsecuser = @ipsecuser@
+json_CFLAGS = @json_CFLAGS@
+json_LIBS = @json_LIBS@
+libdir = @libdir@
+libexecdir = @libexecdir@
+libiptc_CFLAGS = @libiptc_CFLAGS@
+libiptc_LIBS = @libiptc_LIBS@
+linux_headers = @linux_headers@
+localedir = @localedir@
+localstatedir = @localstatedir@
+maemo_CFLAGS = @maemo_CFLAGS@
+maemo_LIBS = @maemo_LIBS@
+manager_plugins = @manager_plugins@
+mandir = @mandir@
+medsrv_plugins = @medsrv_plugins@
+mkdir_p = @mkdir_p@
+nm_CFLAGS = @nm_CFLAGS@
+nm_LIBS = @nm_LIBS@
+nm_ca_dir = @nm_ca_dir@
+nm_plugins = @nm_plugins@
+oldincludedir = @oldincludedir@
+pcsclite_CFLAGS = @pcsclite_CFLAGS@
+pcsclite_LIBS = @pcsclite_LIBS@
+pdfdir = @pdfdir@
+piddir = @piddir@
+pkgpyexecdir = @pkgpyexecdir@
+pkgpythondir = @pkgpythondir@
+pki_plugins = @pki_plugins@
+plugindir = @plugindir@
+pool_plugins = @pool_plugins@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+pyexecdir = @pyexecdir@
+pythondir = @pythondir@
+random_device = @random_device@
+resolv_conf = @resolv_conf@
+routing_table = @routing_table@
+routing_table_prio = @routing_table_prio@
+s_plugins = @s_plugins@
+sbindir = @sbindir@
+scepclient_plugins = @scepclient_plugins@
+scripts_plugins = @scripts_plugins@
+sharedstatedir = @sharedstatedir@
+soup_CFLAGS = @soup_CFLAGS@
+soup_LIBS = @soup_LIBS@
+srcdir = @srcdir@
+starter_plugins = @starter_plugins@
+strongswan_conf = @strongswan_conf@
+strongswan_options = @strongswan_options@
+swanctldir = @swanctldir@
+sysconfdir = @sysconfdir@
+systemd_daemon_CFLAGS = @systemd_daemon_CFLAGS@
+systemd_daemon_LIBS = @systemd_daemon_LIBS@
+systemd_journal_CFLAGS = @systemd_journal_CFLAGS@
+systemd_journal_LIBS = @systemd_journal_LIBS@
+systemdsystemunitdir = @systemdsystemunitdir@
+t_plugins = @t_plugins@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+urandom_device = @urandom_device@
+xml_CFLAGS = @xml_CFLAGS@
+xml_LIBS = @xml_LIBS@
+AM_CPPFLAGS = \
+ -I$(top_srcdir)/src/libstrongswan
+
+AM_CFLAGS = \
+ -maes \
+ -mpclmul \
+ -mssse3 \
+ $(PLUGIN_CFLAGS)
+
+@MONOLITHIC_TRUE@noinst_LTLIBRARIES = libstrongswan-aesni.la
+@MONOLITHIC_FALSE@plugin_LTLIBRARIES = libstrongswan-aesni.la
+libstrongswan_aesni_la_SOURCES = \
+ aesni_key.h aesni_key.c \
+ aesni_cbc.h aesni_cbc.c \
+ aesni_ctr.h aesni_ctr.c \
+ aesni_ccm.h aesni_ccm.c \
+ aesni_gcm.h aesni_gcm.c \
+ aesni_xcbc.h aesni_xcbc.c \
+ aesni_cmac.h aesni_cmac.c \
+ aesni_plugin.h aesni_plugin.c
+
+libstrongswan_aesni_la_LDFLAGS = -module -avoid-version
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj
+$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu src/libstrongswan/plugins/aesni/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu src/libstrongswan/plugins/aesni/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+clean-noinstLTLIBRARIES:
+ -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES)
+ @list='$(noinst_LTLIBRARIES)'; \
+ locs=`for p in $$list; do echo $$p; done | \
+ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \
+ sort -u`; \
+ test -z "$$locs" || { \
+ echo rm -f $${locs}; \
+ rm -f $${locs}; \
+ }
+
+install-pluginLTLIBRARIES: $(plugin_LTLIBRARIES)
+ @$(NORMAL_INSTALL)
+ @list='$(plugin_LTLIBRARIES)'; test -n "$(plugindir)" || list=; \
+ list2=; for p in $$list; do \
+ if test -f $$p; then \
+ list2="$$list2 $$p"; \
+ else :; fi; \
+ done; \
+ test -z "$$list2" || { \
+ echo " $(MKDIR_P) '$(DESTDIR)$(plugindir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(plugindir)" || exit 1; \
+ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(plugindir)'"; \
+ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(plugindir)"; \
+ }
+
+uninstall-pluginLTLIBRARIES:
+ @$(NORMAL_UNINSTALL)
+ @list='$(plugin_LTLIBRARIES)'; test -n "$(plugindir)" || list=; \
+ for p in $$list; do \
+ $(am__strip_dir) \
+ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(plugindir)/$$f'"; \
+ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(plugindir)/$$f"; \
+ done
+
+clean-pluginLTLIBRARIES:
+ -test -z "$(plugin_LTLIBRARIES)" || rm -f $(plugin_LTLIBRARIES)
+ @list='$(plugin_LTLIBRARIES)'; \
+ locs=`for p in $$list; do echo $$p; done | \
+ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \
+ sort -u`; \
+ test -z "$$locs" || { \
+ echo rm -f $${locs}; \
+ rm -f $${locs}; \
+ }
+
+libstrongswan-aesni.la: $(libstrongswan_aesni_la_OBJECTS) $(libstrongswan_aesni_la_DEPENDENCIES) $(EXTRA_libstrongswan_aesni_la_DEPENDENCIES)
+ $(AM_V_CCLD)$(libstrongswan_aesni_la_LINK) $(am_libstrongswan_aesni_la_rpath) $(libstrongswan_aesni_la_OBJECTS) $(libstrongswan_aesni_la_LIBADD) $(LIBS)
+
+mostlyclean-compile:
+ -rm -f *.$(OBJEXT)
+
+distclean-compile:
+ -rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/aesni_cbc.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/aesni_ccm.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/aesni_cmac.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/aesni_ctr.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/aesni_gcm.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/aesni_key.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/aesni_plugin.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/aesni_xcbc.Plo@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@ $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\
+@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\
+@am__fastdepCC_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $<
+
+.c.obj:
+@am__fastdepCC_TRUE@ $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\
+@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\
+@am__fastdepCC_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@ $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.lo$$||'`;\
+@am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\
+@am__fastdepCC_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $<
+
+mostlyclean-libtool:
+ -rm -f *.lo
+
+clean-libtool:
+ -rm -rf .libs _libs
+
+ID: $(am__tagged_files)
+ $(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-am
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ set x; \
+ here=`pwd`; \
+ $(am__define_uniq_tagged_files); \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: ctags-am
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+ $(am__define_uniq_tagged_files); \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-am
+
+cscopelist-am: $(am__tagged_files)
+ list='$(am__tagged_files)'; \
+ case "$(srcdir)" in \
+ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+ *) sdir=$(subdir)/$(srcdir) ;; \
+ esac; \
+ for i in $$list; do \
+ if test -f "$$i"; then \
+ echo "$(subdir)/$$i"; \
+ else \
+ echo "$$sdir/$$i"; \
+ fi; \
+ done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(LTLIBRARIES)
+installdirs:
+ for dir in "$(DESTDIR)$(plugindir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \
+ clean-pluginLTLIBRARIES mostlyclean-am
+
+distclean: distclean-am
+ -rm -rf ./$(DEPDIR)
+ -rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+ distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-pluginLTLIBRARIES
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -rf ./$(DEPDIR)
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+ mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-pluginLTLIBRARIES
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \
+ clean-libtool clean-noinstLTLIBRARIES clean-pluginLTLIBRARIES \
+ cscopelist-am ctags ctags-am distclean distclean-compile \
+ distclean-generic distclean-libtool distclean-tags distdir dvi \
+ dvi-am html html-am info info-am install install-am \
+ install-data install-data-am install-dvi install-dvi-am \
+ install-exec install-exec-am install-html install-html-am \
+ install-info install-info-am install-man install-pdf \
+ install-pdf-am install-pluginLTLIBRARIES install-ps \
+ install-ps-am install-strip installcheck installcheck-am \
+ installdirs maintainer-clean maintainer-clean-generic \
+ mostlyclean mostlyclean-compile mostlyclean-generic \
+ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-am uninstall \
+ uninstall-am uninstall-pluginLTLIBRARIES
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/src/libstrongswan/plugins/aesni/aesni_cbc.c b/src/libstrongswan/plugins/aesni/aesni_cbc.c
new file mode 100644
index 000000000..78ada7663
--- /dev/null
+++ b/src/libstrongswan/plugins/aesni/aesni_cbc.c
@@ -0,0 +1,671 @@
+/*
+ * Copyright (C) 2015 Martin Willi
+ * Copyright (C) 2015 revosec AG
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "aesni_cbc.h"
+#include "aesni_key.h"
+
+/**
+ * Pipeline parallelism we use for CBC decryption
+ */
+#define CBC_DECRYPT_PARALLELISM 4
+
+typedef struct private_aesni_cbc_t private_aesni_cbc_t;
+
+/**
+ * CBC en/decryption method type
+ */
+typedef void (*aesni_cbc_fn_t)(aesni_key_t*, u_int, u_char*, u_char*, u_char*);
+
+/**
+ * Private data of an aesni_cbc_t object.
+ */
+struct private_aesni_cbc_t {
+
+ /**
+ * Public aesni_cbc_t interface.
+ */
+ aesni_cbc_t public;
+
+ /**
+ * Key size
+ */
+ u_int key_size;
+
+ /**
+ * Encryption key schedule
+ */
+ aesni_key_t *ekey;
+
+ /**
+ * Decryption key schedule
+ */
+ aesni_key_t *dkey;
+
+ /**
+ * Encryption method
+ */
+ aesni_cbc_fn_t encrypt;
+
+ /**
+ * Decryption method
+ */
+ aesni_cbc_fn_t decrypt;
+};
+
+/**
+ * AES-128 CBC encryption
+ */
+static void encrypt_cbc128(aesni_key_t *key, u_int blocks, u_char *in,
+ u_char *iv, u_char *out)
+{
+ __m128i *ks, t, fb, *bi, *bo;
+ int i;
+
+ ks = key->schedule;
+ bi = (__m128i*)in;
+ bo = (__m128i*)out;
+
+ fb = _mm_loadu_si128((__m128i*)iv);
+ for (i = 0; i < blocks; i++)
+ {
+ t = _mm_loadu_si128(bi + i);
+ fb = _mm_xor_si128(t, fb);
+ fb = _mm_xor_si128(fb, ks[0]);
+
+ fb = _mm_aesenc_si128(fb, ks[1]);
+ fb = _mm_aesenc_si128(fb, ks[2]);
+ fb = _mm_aesenc_si128(fb, ks[3]);
+ fb = _mm_aesenc_si128(fb, ks[4]);
+ fb = _mm_aesenc_si128(fb, ks[5]);
+ fb = _mm_aesenc_si128(fb, ks[6]);
+ fb = _mm_aesenc_si128(fb, ks[7]);
+ fb = _mm_aesenc_si128(fb, ks[8]);
+ fb = _mm_aesenc_si128(fb, ks[9]);
+
+ fb = _mm_aesenclast_si128(fb, ks[10]);
+ _mm_storeu_si128(bo + i, fb);
+ }
+}
+
+/**
+ * AES-128 CBC decryption
+ */
+static void decrypt_cbc128(aesni_key_t *key, u_int blocks, u_char *in,
+ u_char *iv, u_char *out)
+{
+ __m128i *ks, last, *bi, *bo;
+ __m128i t1, t2, t3, t4;
+ __m128i f1, f2, f3, f4;
+ u_int i, pblocks;
+
+ ks = key->schedule;
+ bi = (__m128i*)in;
+ bo = (__m128i*)out;
+ pblocks = blocks - (blocks % CBC_DECRYPT_PARALLELISM);
+
+ f1 = _mm_loadu_si128((__m128i*)iv);
+
+ for (i = 0; i < pblocks; i += CBC_DECRYPT_PARALLELISM)
+ {
+ t1 = _mm_loadu_si128(bi + i + 0);
+ t2 = _mm_loadu_si128(bi + i + 1);
+ t3 = _mm_loadu_si128(bi + i + 2);
+ t4 = _mm_loadu_si128(bi + i + 3);
+
+ f2 = t1;
+ f3 = t2;
+ f4 = t3;
+ last = t4;
+
+ t1 = _mm_xor_si128(t1, ks[0]);
+ t2 = _mm_xor_si128(t2, ks[0]);
+ t3 = _mm_xor_si128(t3, ks[0]);
+ t4 = _mm_xor_si128(t4, ks[0]);
+
+ t1 = _mm_aesdec_si128(t1, ks[1]);
+ t2 = _mm_aesdec_si128(t2, ks[1]);
+ t3 = _mm_aesdec_si128(t3, ks[1]);
+ t4 = _mm_aesdec_si128(t4, ks[1]);
+ t1 = _mm_aesdec_si128(t1, ks[2]);
+ t2 = _mm_aesdec_si128(t2, ks[2]);
+ t3 = _mm_aesdec_si128(t3, ks[2]);
+ t4 = _mm_aesdec_si128(t4, ks[2]);
+ t1 = _mm_aesdec_si128(t1, ks[3]);
+ t2 = _mm_aesdec_si128(t2, ks[3]);
+ t3 = _mm_aesdec_si128(t3, ks[3]);
+ t4 = _mm_aesdec_si128(t4, ks[3]);
+ t1 = _mm_aesdec_si128(t1, ks[4]);
+ t2 = _mm_aesdec_si128(t2, ks[4]);
+ t3 = _mm_aesdec_si128(t3, ks[4]);
+ t4 = _mm_aesdec_si128(t4, ks[4]);
+ t1 = _mm_aesdec_si128(t1, ks[5]);
+ t2 = _mm_aesdec_si128(t2, ks[5]);
+ t3 = _mm_aesdec_si128(t3, ks[5]);
+ t4 = _mm_aesdec_si128(t4, ks[5]);
+ t1 = _mm_aesdec_si128(t1, ks[6]);
+ t2 = _mm_aesdec_si128(t2, ks[6]);
+ t3 = _mm_aesdec_si128(t3, ks[6]);
+ t4 = _mm_aesdec_si128(t4, ks[6]);
+ t1 = _mm_aesdec_si128(t1, ks[7]);
+ t2 = _mm_aesdec_si128(t2, ks[7]);
+ t3 = _mm_aesdec_si128(t3, ks[7]);
+ t4 = _mm_aesdec_si128(t4, ks[7]);
+ t1 = _mm_aesdec_si128(t1, ks[8]);
+ t2 = _mm_aesdec_si128(t2, ks[8]);
+ t3 = _mm_aesdec_si128(t3, ks[8]);
+ t4 = _mm_aesdec_si128(t4, ks[8]);
+ t1 = _mm_aesdec_si128(t1, ks[9]);
+ t2 = _mm_aesdec_si128(t2, ks[9]);
+ t3 = _mm_aesdec_si128(t3, ks[9]);
+ t4 = _mm_aesdec_si128(t4, ks[9]);
+
+ t1 = _mm_aesdeclast_si128(t1, ks[10]);
+ t2 = _mm_aesdeclast_si128(t2, ks[10]);
+ t3 = _mm_aesdeclast_si128(t3, ks[10]);
+ t4 = _mm_aesdeclast_si128(t4, ks[10]);
+ t1 = _mm_xor_si128(t1, f1);
+ t2 = _mm_xor_si128(t2, f2);
+ t3 = _mm_xor_si128(t3, f3);
+ t4 = _mm_xor_si128(t4, f4);
+ _mm_storeu_si128(bo + i + 0, t1);
+ _mm_storeu_si128(bo + i + 1, t2);
+ _mm_storeu_si128(bo + i + 2, t3);
+ _mm_storeu_si128(bo + i + 3, t4);
+ f1 = last;
+ }
+
+ for (i = pblocks; i < blocks; i++)
+ {
+ last = _mm_loadu_si128(bi + i);
+ t1 = _mm_xor_si128(last, ks[0]);
+
+ t1 = _mm_aesdec_si128(t1, ks[1]);
+ t1 = _mm_aesdec_si128(t1, ks[2]);
+ t1 = _mm_aesdec_si128(t1, ks[3]);
+ t1 = _mm_aesdec_si128(t1, ks[4]);
+ t1 = _mm_aesdec_si128(t1, ks[5]);
+ t1 = _mm_aesdec_si128(t1, ks[6]);
+ t1 = _mm_aesdec_si128(t1, ks[7]);
+ t1 = _mm_aesdec_si128(t1, ks[8]);
+ t1 = _mm_aesdec_si128(t1, ks[9]);
+
+ t1 = _mm_aesdeclast_si128(t1, ks[10]);
+ t1 = _mm_xor_si128(t1, f1);
+ _mm_storeu_si128(bo + i, t1);
+ f1 = last;
+ }
+}
+
+/**
+ * AES-192 CBC encryption
+ */
+static void encrypt_cbc192(aesni_key_t *key, u_int blocks, u_char *in,
+ u_char *iv, u_char *out)
+{
+ __m128i *ks, t, fb, *bi, *bo;
+ int i;
+
+ ks = key->schedule;
+ bi = (__m128i*)in;
+ bo = (__m128i*)out;
+
+ fb = _mm_loadu_si128((__m128i*)iv);
+ for (i = 0; i < blocks; i++)
+ {
+ t = _mm_loadu_si128(bi + i);
+ fb = _mm_xor_si128(t, fb);
+ fb = _mm_xor_si128(fb, ks[0]);
+
+ fb = _mm_aesenc_si128(fb, ks[1]);
+ fb = _mm_aesenc_si128(fb, ks[2]);
+ fb = _mm_aesenc_si128(fb, ks[3]);
+ fb = _mm_aesenc_si128(fb, ks[4]);
+ fb = _mm_aesenc_si128(fb, ks[5]);
+ fb = _mm_aesenc_si128(fb, ks[6]);
+ fb = _mm_aesenc_si128(fb, ks[7]);
+ fb = _mm_aesenc_si128(fb, ks[8]);
+ fb = _mm_aesenc_si128(fb, ks[9]);
+ fb = _mm_aesenc_si128(fb, ks[10]);
+ fb = _mm_aesenc_si128(fb, ks[11]);
+
+ fb = _mm_aesenclast_si128(fb, ks[12]);
+ _mm_storeu_si128(bo + i, fb);
+ }
+}
+
+/**
+ * AES-192 CBC decryption
+ */
+static void decrypt_cbc192(aesni_key_t *key, u_int blocks, u_char *in,
+ u_char *iv, u_char *out)
+{
+ __m128i *ks, last, *bi, *bo;
+ __m128i t1, t2, t3, t4;
+ __m128i f1, f2, f3, f4;
+ u_int i, pblocks;
+
+ ks = key->schedule;
+ bi = (__m128i*)in;
+ bo = (__m128i*)out;
+ pblocks = blocks - (blocks % CBC_DECRYPT_PARALLELISM);
+
+ f1 = _mm_loadu_si128((__m128i*)iv);
+
+ for (i = 0; i < pblocks; i += CBC_DECRYPT_PARALLELISM)
+ {
+ t1 = _mm_loadu_si128(bi + i + 0);
+ t2 = _mm_loadu_si128(bi + i + 1);
+ t3 = _mm_loadu_si128(bi + i + 2);
+ t4 = _mm_loadu_si128(bi + i + 3);
+
+ f2 = t1;
+ f3 = t2;
+ f4 = t3;
+ last = t4;
+
+ t1 = _mm_xor_si128(t1, ks[0]);
+ t2 = _mm_xor_si128(t2, ks[0]);
+ t3 = _mm_xor_si128(t3, ks[0]);
+ t4 = _mm_xor_si128(t4, ks[0]);
+
+ t1 = _mm_aesdec_si128(t1, ks[1]);
+ t2 = _mm_aesdec_si128(t2, ks[1]);
+ t3 = _mm_aesdec_si128(t3, ks[1]);
+ t4 = _mm_aesdec_si128(t4, ks[1]);
+ t1 = _mm_aesdec_si128(t1, ks[2]);
+ t2 = _mm_aesdec_si128(t2, ks[2]);
+ t3 = _mm_aesdec_si128(t3, ks[2]);
+ t4 = _mm_aesdec_si128(t4, ks[2]);
+ t1 = _mm_aesdec_si128(t1, ks[3]);
+ t2 = _mm_aesdec_si128(t2, ks[3]);
+ t3 = _mm_aesdec_si128(t3, ks[3]);
+ t4 = _mm_aesdec_si128(t4, ks[3]);
+ t1 = _mm_aesdec_si128(t1, ks[4]);
+ t2 = _mm_aesdec_si128(t2, ks[4]);
+ t3 = _mm_aesdec_si128(t3, ks[4]);
+ t4 = _mm_aesdec_si128(t4, ks[4]);
+ t1 = _mm_aesdec_si128(t1, ks[5]);
+ t2 = _mm_aesdec_si128(t2, ks[5]);
+ t3 = _mm_aesdec_si128(t3, ks[5]);
+ t4 = _mm_aesdec_si128(t4, ks[5]);
+ t1 = _mm_aesdec_si128(t1, ks[6]);
+ t2 = _mm_aesdec_si128(t2, ks[6]);
+ t3 = _mm_aesdec_si128(t3, ks[6]);
+ t4 = _mm_aesdec_si128(t4, ks[6]);
+ t1 = _mm_aesdec_si128(t1, ks[7]);
+ t2 = _mm_aesdec_si128(t2, ks[7]);
+ t3 = _mm_aesdec_si128(t3, ks[7]);
+ t4 = _mm_aesdec_si128(t4, ks[7]);
+ t1 = _mm_aesdec_si128(t1, ks[8]);
+ t2 = _mm_aesdec_si128(t2, ks[8]);
+ t3 = _mm_aesdec_si128(t3, ks[8]);
+ t4 = _mm_aesdec_si128(t4, ks[8]);
+ t1 = _mm_aesdec_si128(t1, ks[9]);
+ t2 = _mm_aesdec_si128(t2, ks[9]);
+ t3 = _mm_aesdec_si128(t3, ks[9]);
+ t4 = _mm_aesdec_si128(t4, ks[9]);
+ t1 = _mm_aesdec_si128(t1, ks[10]);
+ t2 = _mm_aesdec_si128(t2, ks[10]);
+ t3 = _mm_aesdec_si128(t3, ks[10]);
+ t4 = _mm_aesdec_si128(t4, ks[10]);
+ t1 = _mm_aesdec_si128(t1, ks[11]);
+ t2 = _mm_aesdec_si128(t2, ks[11]);
+ t3 = _mm_aesdec_si128(t3, ks[11]);
+ t4 = _mm_aesdec_si128(t4, ks[11]);
+
+ t1 = _mm_aesdeclast_si128(t1, ks[12]);
+ t2 = _mm_aesdeclast_si128(t2, ks[12]);
+ t3 = _mm_aesdeclast_si128(t3, ks[12]);
+ t4 = _mm_aesdeclast_si128(t4, ks[12]);
+ t1 = _mm_xor_si128(t1, f1);
+ t2 = _mm_xor_si128(t2, f2);
+ t3 = _mm_xor_si128(t3, f3);
+ t4 = _mm_xor_si128(t4, f4);
+ _mm_storeu_si128(bo + i + 0, t1);
+ _mm_storeu_si128(bo + i + 1, t2);
+ _mm_storeu_si128(bo + i + 2, t3);
+ _mm_storeu_si128(bo + i + 3, t4);
+ f1 = last;
+ }
+
+ for (i = pblocks; i < blocks; i++)
+ {
+ last = _mm_loadu_si128(bi + i);
+ t1 = _mm_xor_si128(last, ks[0]);
+
+ t1 = _mm_aesdec_si128(t1, ks[1]);
+ t1 = _mm_aesdec_si128(t1, ks[2]);
+ t1 = _mm_aesdec_si128(t1, ks[3]);
+ t1 = _mm_aesdec_si128(t1, ks[4]);
+ t1 = _mm_aesdec_si128(t1, ks[5]);
+ t1 = _mm_aesdec_si128(t1, ks[6]);
+ t1 = _mm_aesdec_si128(t1, ks[7]);
+ t1 = _mm_aesdec_si128(t1, ks[8]);
+ t1 = _mm_aesdec_si128(t1, ks[9]);
+ t1 = _mm_aesdec_si128(t1, ks[10]);
+ t1 = _mm_aesdec_si128(t1, ks[11]);
+
+ t1 = _mm_aesdeclast_si128(t1, ks[12]);
+ t1 = _mm_xor_si128(t1, f1);
+ _mm_storeu_si128(bo + i, t1);
+ f1 = last;
+ }
+}
+
+/**
+ * AES-256 CBC encryption
+ */
+static void encrypt_cbc256(aesni_key_t *key, u_int blocks, u_char *in,
+ u_char *iv, u_char *out)
+{
+ __m128i *ks, t, fb, *bi, *bo;
+ int i;
+
+ ks = key->schedule;
+ bi = (__m128i*)in;
+ bo = (__m128i*)out;
+
+ fb = _mm_loadu_si128((__m128i*)iv);
+ for (i = 0; i < blocks; i++)
+ {
+ t = _mm_loadu_si128(bi + i);
+ fb = _mm_xor_si128(t, fb);
+ fb = _mm_xor_si128(fb, ks[0]);
+
+ fb = _mm_aesenc_si128(fb, ks[1]);
+ fb = _mm_aesenc_si128(fb, ks[2]);
+ fb = _mm_aesenc_si128(fb, ks[3]);
+ fb = _mm_aesenc_si128(fb, ks[4]);
+ fb = _mm_aesenc_si128(fb, ks[5]);
+ fb = _mm_aesenc_si128(fb, ks[6]);
+ fb = _mm_aesenc_si128(fb, ks[7]);
+ fb = _mm_aesenc_si128(fb, ks[8]);
+ fb = _mm_aesenc_si128(fb, ks[9]);
+ fb = _mm_aesenc_si128(fb, ks[10]);
+ fb = _mm_aesenc_si128(fb, ks[11]);
+ fb = _mm_aesenc_si128(fb, ks[12]);
+ fb = _mm_aesenc_si128(fb, ks[13]);
+
+ fb = _mm_aesenclast_si128(fb, ks[14]);
+ _mm_storeu_si128(bo + i, fb);
+ }
+}
+
+/**
+ * AES-256 CBC decryption
+ */
+static void decrypt_cbc256(aesni_key_t *key, u_int blocks, u_char *in,
+ u_char *iv, u_char *out)
+{
+ __m128i *ks, last, *bi, *bo;
+ __m128i t1, t2, t3, t4;
+ __m128i f1, f2, f3, f4;
+ u_int i, pblocks;
+
+ ks = key->schedule;
+ bi = (__m128i*)in;
+ bo = (__m128i*)out;
+ pblocks = blocks - (blocks % CBC_DECRYPT_PARALLELISM);
+
+ f1 = _mm_loadu_si128((__m128i*)iv);
+
+ for (i = 0; i < pblocks; i += CBC_DECRYPT_PARALLELISM)
+ {
+ t1 = _mm_loadu_si128(bi + i + 0);
+ t2 = _mm_loadu_si128(bi + i + 1);
+ t3 = _mm_loadu_si128(bi + i + 2);
+ t4 = _mm_loadu_si128(bi + i + 3);
+
+ f2 = t1;
+ f3 = t2;
+ f4 = t3;
+ last = t4;
+
+ t1 = _mm_xor_si128(t1, ks[0]);
+ t2 = _mm_xor_si128(t2, ks[0]);
+ t3 = _mm_xor_si128(t3, ks[0]);
+ t4 = _mm_xor_si128(t4, ks[0]);
+
+ t1 = _mm_aesdec_si128(t1, ks[1]);
+ t2 = _mm_aesdec_si128(t2, ks[1]);
+ t3 = _mm_aesdec_si128(t3, ks[1]);
+ t4 = _mm_aesdec_si128(t4, ks[1]);
+ t1 = _mm_aesdec_si128(t1, ks[2]);
+ t2 = _mm_aesdec_si128(t2, ks[2]);
+ t3 = _mm_aesdec_si128(t3, ks[2]);
+ t4 = _mm_aesdec_si128(t4, ks[2]);
+ t1 = _mm_aesdec_si128(t1, ks[3]);
+ t2 = _mm_aesdec_si128(t2, ks[3]);
+ t3 = _mm_aesdec_si128(t3, ks[3]);
+ t4 = _mm_aesdec_si128(t4, ks[3]);
+ t1 = _mm_aesdec_si128(t1, ks[4]);
+ t2 = _mm_aesdec_si128(t2, ks[4]);
+ t3 = _mm_aesdec_si128(t3, ks[4]);
+ t4 = _mm_aesdec_si128(t4, ks[4]);
+ t1 = _mm_aesdec_si128(t1, ks[5]);
+ t2 = _mm_aesdec_si128(t2, ks[5]);
+ t3 = _mm_aesdec_si128(t3, ks[5]);
+ t4 = _mm_aesdec_si128(t4, ks[5]);
+ t1 = _mm_aesdec_si128(t1, ks[6]);
+ t2 = _mm_aesdec_si128(t2, ks[6]);
+ t3 = _mm_aesdec_si128(t3, ks[6]);
+ t4 = _mm_aesdec_si128(t4, ks[6]);
+ t1 = _mm_aesdec_si128(t1, ks[7]);
+ t2 = _mm_aesdec_si128(t2, ks[7]);
+ t3 = _mm_aesdec_si128(t3, ks[7]);
+ t4 = _mm_aesdec_si128(t4, ks[7]);
+ t1 = _mm_aesdec_si128(t1, ks[8]);
+ t2 = _mm_aesdec_si128(t2, ks[8]);
+ t3 = _mm_aesdec_si128(t3, ks[8]);
+ t4 = _mm_aesdec_si128(t4, ks[8]);
+ t1 = _mm_aesdec_si128(t1, ks[9]);
+ t2 = _mm_aesdec_si128(t2, ks[9]);
+ t3 = _mm_aesdec_si128(t3, ks[9]);
+ t4 = _mm_aesdec_si128(t4, ks[9]);
+ t1 = _mm_aesdec_si128(t1, ks[10]);
+ t2 = _mm_aesdec_si128(t2, ks[10]);
+ t3 = _mm_aesdec_si128(t3, ks[10]);
+ t4 = _mm_aesdec_si128(t4, ks[10]);
+ t1 = _mm_aesdec_si128(t1, ks[11]);
+ t2 = _mm_aesdec_si128(t2, ks[11]);
+ t3 = _mm_aesdec_si128(t3, ks[11]);
+ t4 = _mm_aesdec_si128(t4, ks[11]);
+ t1 = _mm_aesdec_si128(t1, ks[12]);
+ t2 = _mm_aesdec_si128(t2, ks[12]);
+ t3 = _mm_aesdec_si128(t3, ks[12]);
+ t4 = _mm_aesdec_si128(t4, ks[12]);
+ t1 = _mm_aesdec_si128(t1, ks[13]);
+ t2 = _mm_aesdec_si128(t2, ks[13]);
+ t3 = _mm_aesdec_si128(t3, ks[13]);
+ t4 = _mm_aesdec_si128(t4, ks[13]);
+
+ t1 = _mm_aesdeclast_si128(t1, ks[14]);
+ t2 = _mm_aesdeclast_si128(t2, ks[14]);
+ t3 = _mm_aesdeclast_si128(t3, ks[14]);
+ t4 = _mm_aesdeclast_si128(t4, ks[14]);
+ t1 = _mm_xor_si128(t1, f1);
+ t2 = _mm_xor_si128(t2, f2);
+ t3 = _mm_xor_si128(t3, f3);
+ t4 = _mm_xor_si128(t4, f4);
+ _mm_storeu_si128(bo + i + 0, t1);
+ _mm_storeu_si128(bo + i + 1, t2);
+ _mm_storeu_si128(bo + i + 2, t3);
+ _mm_storeu_si128(bo + i + 3, t4);
+ f1 = last;
+ }
+
+ for (i = pblocks; i < blocks; i++)
+ {
+ last = _mm_loadu_si128(bi + i);
+ t1 = _mm_xor_si128(last, ks[0]);
+
+ t1 = _mm_aesdec_si128(t1, ks[1]);
+ t1 = _mm_aesdec_si128(t1, ks[2]);
+ t1 = _mm_aesdec_si128(t1, ks[3]);
+ t1 = _mm_aesdec_si128(t1, ks[4]);
+ t1 = _mm_aesdec_si128(t1, ks[5]);
+ t1 = _mm_aesdec_si128(t1, ks[6]);
+ t1 = _mm_aesdec_si128(t1, ks[7]);
+ t1 = _mm_aesdec_si128(t1, ks[8]);
+ t1 = _mm_aesdec_si128(t1, ks[9]);
+ t1 = _mm_aesdec_si128(t1, ks[10]);
+ t1 = _mm_aesdec_si128(t1, ks[11]);
+ t1 = _mm_aesdec_si128(t1, ks[12]);
+ t1 = _mm_aesdec_si128(t1, ks[13]);
+
+ t1 = _mm_aesdeclast_si128(t1, ks[14]);
+ t1 = _mm_xor_si128(t1, f1);
+ _mm_storeu_si128(bo + i, t1);
+ f1 = last;
+ }
+}
+
+/**
+ * Do inline or allocated de/encryption using key schedule
+ */
+static bool crypt(aesni_cbc_fn_t fn, aesni_key_t *key,
+ chunk_t data, chunk_t iv, chunk_t *out)
+{
+ u_char *buf;
+
+ if (!key || iv.len != AES_BLOCK_SIZE || data.len % AES_BLOCK_SIZE)
+ {
+ return FALSE;
+ }
+ if (out)
+ {
+ *out = chunk_alloc(data.len);
+ buf = out->ptr;
+ }
+ else
+ {
+ buf = data.ptr;
+ }
+ fn(key, data.len / AES_BLOCK_SIZE, data.ptr, iv.ptr, buf);
+ return TRUE;
+}
+
+METHOD(crypter_t, encrypt, bool,
+ private_aesni_cbc_t *this, chunk_t data, chunk_t iv, chunk_t *encrypted)
+{
+ return crypt(this->encrypt, this->ekey, data, iv, encrypted);
+}
+
+METHOD(crypter_t, decrypt, bool,
+ private_aesni_cbc_t *this, chunk_t data, chunk_t iv, chunk_t *decrypted)
+{
+ return crypt(this->decrypt, this->dkey, data, iv, decrypted);
+}
+
+METHOD(crypter_t, get_block_size, size_t,
+ private_aesni_cbc_t *this)
+{
+ return AES_BLOCK_SIZE;
+}
+
+METHOD(crypter_t, get_iv_size, size_t,
+ private_aesni_cbc_t *this)
+{
+ return AES_BLOCK_SIZE;
+}
+
+METHOD(crypter_t, get_key_size, size_t,
+ private_aesni_cbc_t *this)
+{
+ return this->key_size;
+}
+
+METHOD(crypter_t, set_key, bool,
+ private_aesni_cbc_t *this, chunk_t key)
+{
+ if (key.len != this->key_size)
+ {
+ return FALSE;
+ }
+
+ DESTROY_IF(this->ekey);
+ DESTROY_IF(this->dkey);
+
+ this->ekey = aesni_key_create(TRUE, key);
+ this->dkey = aesni_key_create(FALSE, key);
+
+ return this->ekey && this->dkey;
+}
+
+METHOD(crypter_t, destroy, void,
+ private_aesni_cbc_t *this)
+{
+ DESTROY_IF(this->ekey);
+ DESTROY_IF(this->dkey);
+ free_align(this);
+}
+
+/**
+ * See header
+ */
+aesni_cbc_t *aesni_cbc_create(encryption_algorithm_t algo, size_t key_size)
+{
+ private_aesni_cbc_t *this;
+
+ if (algo != ENCR_AES_CBC)
+ {
+ return NULL;
+ }
+ switch (key_size)
+ {
+ case 0:
+ key_size = 16;
+ break;
+ case 16:
+ case 24:
+ case 32:
+ break;
+ default:
+ return NULL;
+ }
+
+ INIT_ALIGN(this, sizeof(__m128i),
+ .public = {
+ .crypter = {
+ .encrypt = _encrypt,
+ .decrypt = _decrypt,
+ .get_block_size = _get_block_size,
+ .get_iv_size = _get_iv_size,
+ .get_key_size = _get_key_size,
+ .set_key = _set_key,
+ .destroy = _destroy,
+ },
+ },
+ .key_size = key_size,
+ );
+
+ switch (key_size)
+ {
+ case 16:
+ this->encrypt = encrypt_cbc128;
+ this->decrypt = decrypt_cbc128;
+ break;
+ case 24:
+ this->encrypt = encrypt_cbc192;
+ this->decrypt = decrypt_cbc192;
+ break;
+ case 32:
+ this->encrypt = encrypt_cbc256;
+ this->decrypt = decrypt_cbc256;
+ break;
+ }
+
+ return &this->public;
+}
diff --git a/src/libstrongswan/plugins/aesni/aesni_cbc.h b/src/libstrongswan/plugins/aesni/aesni_cbc.h
new file mode 100644
index 000000000..c004ec611
--- /dev/null
+++ b/src/libstrongswan/plugins/aesni/aesni_cbc.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2015 Martin Willi
+ * Copyright (C) 2015 revosec AG
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+/**
+ * @defgroup aesni_cbc aesni_cbc
+ * @{ @ingroup aesni
+ */
+
+#ifndef AESNI_CBC_H_
+#define AESNI_CBC_H_
+
+#include <library.h>
+
+typedef struct aesni_cbc_t aesni_cbc_t;
+
+/**
+ * CBC mode crypter using AES-NI
+ */
+struct aesni_cbc_t {
+
+ /**
+ * Implements crypter interface
+ */
+ crypter_t crypter;
+};
+
+/**
+ * Create a aesni_cbc instance.
+ *
+ * @param algo encryption algorithm, AES_ENCR_CBC
+ * @param key_size AES key size, in bytes
+ * @return AES-CBC crypter, NULL if not supported
+ */
+aesni_cbc_t *aesni_cbc_create(encryption_algorithm_t algo, size_t key_size);
+
+#endif /** AESNI_CBC_H_ @}*/
diff --git a/src/libstrongswan/plugins/aesni/aesni_ccm.c b/src/libstrongswan/plugins/aesni/aesni_ccm.c
new file mode 100644
index 000000000..d523bc17a
--- /dev/null
+++ b/src/libstrongswan/plugins/aesni/aesni_ccm.c
@@ -0,0 +1,914 @@
+/*
+ * Copyright (C) 2010-2015 Martin Willi
+ * Copyright (C) 2010-2015 revosec AG
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "aesni_ccm.h"
+#include "aesni_key.h"
+
+#include <crypto/iv/iv_gen_seq.h>
+
+#include <tmmintrin.h>
+
+#define SALT_SIZE 3
+#define IV_SIZE 8
+#define NONCE_SIZE (SALT_SIZE + IV_SIZE) /* 11 */
+#define Q_SIZE (AES_BLOCK_SIZE - NONCE_SIZE - 1) /* 4 */
+
+typedef struct private_aesni_ccm_t private_aesni_ccm_t;
+
+/**
+ * CCM en/decryption method type
+ */
+typedef void (*aesni_ccm_fn_t)(private_aesni_ccm_t*, size_t, u_char*, u_char*,
+ u_char*, size_t, u_char*, u_char*);
+
+/**
+ * Private data of an aesni_ccm_t object.
+ */
+struct private_aesni_ccm_t {
+
+ /**
+ * Public aesni_ccm_t interface.
+ */
+ aesni_ccm_t public;
+
+ /**
+ * Encryption key schedule
+ */
+ aesni_key_t *key;
+
+ /**
+ * IV generator.
+ */
+ iv_gen_t *iv_gen;
+
+ /**
+ * Length of the integrity check value
+ */
+ size_t icv_size;
+
+ /**
+ * Length of the key in bytes
+ */
+ size_t key_size;
+
+ /**
+ * CCM encryption function
+ */
+ aesni_ccm_fn_t encrypt;
+
+ /**
+ * CCM decryption function
+ */
+ aesni_ccm_fn_t decrypt;
+
+ /**
+ * salt to add to nonce
+ */
+ u_char salt[SALT_SIZE];
+};
+
+/**
+ * First block with control information
+ */
+typedef struct __attribute__((packed)) {
+ BITFIELD4(u_int8_t,
+ /* size of p length field q, as q-1 */
+ q_len: 3,
+ /* size of our ICV t, as (t-2)/2 */
+ t_len: 3,
+ /* do we have associated data */
+ assoc: 1,
+ reserved: 1,
+ ) flags;
+ /* nonce value */
+ struct __attribute__((packed)) {
+ u_char salt[SALT_SIZE];
+ u_char iv[IV_SIZE];
+ } nonce;
+ /* length of plain text, q */
+ u_char q[Q_SIZE];
+} b0_t;
+
+/**
+ * Counter block
+ */
+typedef struct __attribute__((packed)) {
+ BITFIELD3(u_int8_t,
+ /* size of p length field q, as q-1 */
+ q_len: 3,
+ zero: 3,
+ reserved: 2,
+ ) flags;
+ /* nonce value */
+ struct __attribute__((packed)) {
+ u_char salt[SALT_SIZE];
+ u_char iv[IV_SIZE];
+ } nonce;
+ /* counter value */
+ u_char i[Q_SIZE];
+} ctr_t;
+
+/**
+ * Build the first block B0
+ */
+static void build_b0(private_aesni_ccm_t *this, size_t len, size_t alen,
+ u_char *iv, void *out)
+{
+ b0_t *block = out;
+
+ block->flags.reserved = 0;
+ block->flags.assoc = alen ? 1 : 0;
+ block->flags.t_len = (this->icv_size - 2) / 2;
+ block->flags.q_len = Q_SIZE - 1;
+ memcpy(block->nonce.salt, this->salt, SALT_SIZE);
+ memcpy(block->nonce.iv, iv, IV_SIZE);
+ htoun32(block->q, len);
+}
+
+/**
+ * Build a counter block for counter i
+ */
+static void build_ctr(private_aesni_ccm_t *this, u_int32_t i, u_char *iv,
+ void *out)
+{
+ ctr_t *ctr = out;
+
+ ctr->flags.reserved = 0;
+ ctr->flags.zero = 0;
+ ctr->flags.q_len = Q_SIZE - 1;
+ memcpy(ctr->nonce.salt, this->salt, SALT_SIZE);
+ memcpy(ctr->nonce.iv, iv, IV_SIZE);
+ htoun32(ctr->i, i);
+}
+
+/**
+ * Calculate the ICV for the b0 and associated data
+ */
+static __m128i icv_header(private_aesni_ccm_t *this, size_t len, u_char *iv,
+ u_int16_t alen, u_char *assoc)
+{
+ __m128i *ks, b, t, c;
+ u_int i, round, blocks, rem;
+
+ ks = this->key->schedule;
+ build_b0(this, len, alen, iv, &b);
+ c = _mm_loadu_si128(&b);
+ c = _mm_xor_si128(c, ks[0]);
+ for (round = 1; round < this->key->rounds; round++)
+ {
+ c = _mm_aesenc_si128(c, ks[round]);
+ }
+ c = _mm_aesenclast_si128(c, ks[this->key->rounds]);
+
+ if (alen)
+ {
+ blocks = (alen + sizeof(alen)) / AES_BLOCK_SIZE;
+ rem = (alen + sizeof(alen)) % AES_BLOCK_SIZE;
+ if (rem)
+ {
+ blocks++;
+ }
+ for (i = 0; i < blocks; i++)
+ {
+ if (i == 0)
+ { /* first block */
+ memset(&b, 0, sizeof(b));
+ htoun16(&b, alen);
+ memcpy(((u_char*)&b) + sizeof(alen), assoc,
+ min(alen, sizeof(b) - sizeof(alen)));
+ t = _mm_loadu_si128(&b);
+ }
+ else if (i == blocks - 1 && rem)
+ { /* last block with padding */
+ memset(&b, 0, sizeof(b));
+ memcpy(&b, ((__m128i*)(assoc - sizeof(alen))) + i, rem);
+ t = _mm_loadu_si128(&b);
+ }
+ else
+ { /* full block */
+ t = _mm_loadu_si128(((__m128i*)(assoc - sizeof(alen))) + i);
+ }
+ c = _mm_xor_si128(t, c);
+ c = _mm_xor_si128(c, ks[0]);
+ for (round = 1; round < this->key->rounds; round++)
+ {
+ c = _mm_aesenc_si128(c, ks[round]);
+ }
+ c = _mm_aesenclast_si128(c, ks[this->key->rounds]);
+ }
+ }
+ return c;
+}
+
+/**
+ * En-/Decrypt the ICV, trim and store it
+ */
+static void crypt_icv(private_aesni_ccm_t *this, u_char *iv,
+ __m128i c, u_char *icv)
+{
+ __m128i *ks, b, t;
+ u_int round;
+
+ ks = this->key->schedule;
+ build_ctr(this, 0, iv, &b);
+
+ t = _mm_loadu_si128(&b);
+ t = _mm_xor_si128(t, ks[0]);
+ for (round = 1; round < this->key->rounds; round++)
+ {
+ t = _mm_aesenc_si128(t, ks[round]);
+ }
+ t = _mm_aesenclast_si128(t, ks[this->key->rounds]);
+
+ t = _mm_xor_si128(t, c);
+
+ _mm_storeu_si128(&b, t);
+ memcpy(icv, &b, this->icv_size);
+}
+
+/**
+ * Do big-endian increment on x
+ */
+static inline __m128i increment_be(__m128i x)
+{
+ __m128i swap;
+
+ swap = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+
+ x = _mm_shuffle_epi8(x, swap);
+ x = _mm_add_epi64(x, _mm_set_epi32(0, 0, 0, 1));
+ x = _mm_shuffle_epi8(x, swap);
+
+ return x;
+}
+
+/**
+ * Encrypt a remaining incomplete block
+ */
+static __m128i encrypt_ccm_rem(aesni_key_t *key, u_int rem, __m128i state,
+ void *in, void *out, __m128i c)
+{
+ __m128i *ks, t, b, d;
+ u_int round;
+
+ ks = key->schedule;
+ memset(&b, 0, sizeof(b));
+ memcpy(&b, in, rem);
+ d = _mm_loadu_si128(&b);
+
+ c = _mm_xor_si128(d, c);
+ c = _mm_xor_si128(c, ks[0]);
+ t = _mm_xor_si128(state, ks[0]);
+ for (round = 1; round < key->rounds; round++)
+ {
+ c = _mm_aesenc_si128(c, ks[round]);
+ t = _mm_aesenc_si128(t, ks[round]);
+ }
+ c = _mm_aesenclast_si128(c, ks[key->rounds]);
+ t = _mm_aesenclast_si128(t, ks[key->rounds]);
+
+ t = _mm_xor_si128(t, d);
+ _mm_storeu_si128(&b, t);
+
+ memcpy(out, &b, rem);
+
+ return c;
+}
+
+/**
+ * Decrypt a remaining incomplete block
+ */
+static __m128i decrypt_ccm_rem(aesni_key_t *key, u_int rem, __m128i state,
+ void *in, void *out, __m128i c)
+{
+ __m128i *ks, t, b, d;
+ u_int round;
+
+ ks = key->schedule;
+ memset(&b, 0, sizeof(b));
+ memcpy(&b, in, rem);
+ d = _mm_loadu_si128(&b);
+
+ t = _mm_xor_si128(state, ks[0]);
+ for (round = 1; round < key->rounds; round++)
+ {
+ t = _mm_aesenc_si128(t, ks[round]);
+ }
+ t = _mm_aesenclast_si128(t, ks[key->rounds]);
+ t = _mm_xor_si128(t, d);
+ _mm_storeu_si128(&b, t);
+
+ memset((u_char*)&b + rem, 0, sizeof(b) - rem);
+ t = _mm_loadu_si128(&b);
+ c = _mm_xor_si128(t, c);
+ c = _mm_xor_si128(c, ks[0]);
+ for (round = 1; round < key->rounds; round++)
+ {
+ c = _mm_aesenc_si128(c, ks[round]);
+ }
+ c = _mm_aesenclast_si128(c, ks[key->rounds]);
+
+ memcpy(out, &b, rem);
+
+ return c;
+}
+
+/**
+ * AES-128 CCM encryption/ICV generation
+ */
+static void encrypt_ccm128(private_aesni_ccm_t *this,
+ size_t len, u_char *in, u_char *out, u_char *iv,
+ size_t alen, u_char *assoc, u_char *icv)
+{
+ __m128i *ks, d, t, c, b, state, *bi, *bo;
+ u_int blocks, rem, i;
+
+ c = icv_header(this, len, iv, alen, assoc);
+ build_ctr(this, 1, iv, &b);
+ state = _mm_load_si128(&b);
+ blocks = len / AES_BLOCK_SIZE;
+ rem = len % AES_BLOCK_SIZE;
+ bi = (__m128i*)in;
+ bo = (__m128i*)out;
+
+ ks = this->key->schedule;
+
+ for (i = 0; i < blocks; i++)
+ {
+ d = _mm_loadu_si128(bi + i);
+
+ c = _mm_xor_si128(d, c);
+ c = _mm_xor_si128(c, ks[0]);
+ t = _mm_xor_si128(state, ks[0]);
+
+ c = _mm_aesenc_si128(c, ks[1]);
+ t = _mm_aesenc_si128(t, ks[1]);
+ c = _mm_aesenc_si128(c, ks[2]);
+ t = _mm_aesenc_si128(t, ks[2]);
+ c = _mm_aesenc_si128(c, ks[3]);
+ t = _mm_aesenc_si128(t, ks[3]);
+ c = _mm_aesenc_si128(c, ks[4]);
+ t = _mm_aesenc_si128(t, ks[4]);
+ c = _mm_aesenc_si128(c, ks[5]);
+ t = _mm_aesenc_si128(t, ks[5]);
+ c = _mm_aesenc_si128(c, ks[6]);
+ t = _mm_aesenc_si128(t, ks[6]);
+ c = _mm_aesenc_si128(c, ks[7]);
+ t = _mm_aesenc_si128(t, ks[7]);
+ c = _mm_aesenc_si128(c, ks[8]);
+ t = _mm_aesenc_si128(t, ks[8]);
+ c = _mm_aesenc_si128(c, ks[9]);
+ t = _mm_aesenc_si128(t, ks[9]);
+
+ c = _mm_aesenclast_si128(c, ks[10]);
+ t = _mm_aesenclast_si128(t, ks[10]);
+
+ t = _mm_xor_si128(t, d);
+ _mm_storeu_si128(bo + i, t);
+
+ state = increment_be(state);
+ }
+
+ if (rem)
+ {
+ c = encrypt_ccm_rem(this->key, rem, state, bi + blocks, bo + blocks, c);
+ }
+ crypt_icv(this, iv, c, icv);
+}
+
+/**
+ * AES-128 CCM decryption/ICV generation
+ */
+static void decrypt_ccm128(private_aesni_ccm_t *this,
+ size_t len, u_char *in, u_char *out, u_char *iv,
+ size_t alen, u_char *assoc, u_char *icv)
+{
+ __m128i *ks, d, t, c, b, state, *bi, *bo;
+ u_int blocks, rem, i;
+
+ c = icv_header(this, len, iv, alen, assoc);
+ build_ctr(this, 1, iv, &b);
+ state = _mm_load_si128(&b);
+ blocks = len / AES_BLOCK_SIZE;
+ rem = len % AES_BLOCK_SIZE;
+ bi = (__m128i*)in;
+ bo = (__m128i*)out;
+
+ ks = this->key->schedule;
+
+ for (i = 0; i < blocks; i++)
+ {
+ d = _mm_loadu_si128(bi + i);
+
+ t = _mm_xor_si128(state, ks[0]);
+
+ t = _mm_aesenc_si128(t, ks[1]);
+ t = _mm_aesenc_si128(t, ks[2]);
+ t = _mm_aesenc_si128(t, ks[3]);
+ t = _mm_aesenc_si128(t, ks[4]);
+ t = _mm_aesenc_si128(t, ks[5]);
+ t = _mm_aesenc_si128(t, ks[6]);
+ t = _mm_aesenc_si128(t, ks[7]);
+ t = _mm_aesenc_si128(t, ks[8]);
+ t = _mm_aesenc_si128(t, ks[9]);
+
+ t = _mm_aesenclast_si128(t, ks[10]);
+ t = _mm_xor_si128(t, d);
+ _mm_storeu_si128(bo + i, t);
+
+ c = _mm_xor_si128(t, c);
+ c = _mm_xor_si128(c, ks[0]);
+
+ c = _mm_aesenc_si128(c, ks[1]);
+ c = _mm_aesenc_si128(c, ks[2]);
+ c = _mm_aesenc_si128(c, ks[3]);
+ c = _mm_aesenc_si128(c, ks[4]);
+ c = _mm_aesenc_si128(c, ks[5]);
+ c = _mm_aesenc_si128(c, ks[6]);
+ c = _mm_aesenc_si128(c, ks[7]);
+ c = _mm_aesenc_si128(c, ks[8]);
+ c = _mm_aesenc_si128(c, ks[9]);
+
+ c = _mm_aesenclast_si128(c, ks[10]);
+
+ state = increment_be(state);
+ }
+
+ if (rem)
+ {
+ c = decrypt_ccm_rem(this->key, rem, state, bi + blocks, bo + blocks, c);
+ }
+ crypt_icv(this, iv, c, icv);
+}
+
+/**
+ * AES-192 CCM encryption/ICV generation
+ */
+static void encrypt_ccm192(private_aesni_ccm_t *this,
+ size_t len, u_char *in, u_char *out, u_char *iv,
+ size_t alen, u_char *assoc, u_char *icv)
+{
+ __m128i *ks, d, t, c, b, state, *bi, *bo;
+ u_int blocks, rem, i;
+
+ c = icv_header(this, len, iv, alen, assoc);
+ build_ctr(this, 1, iv, &b);
+ state = _mm_load_si128(&b);
+ blocks = len / AES_BLOCK_SIZE;
+ rem = len % AES_BLOCK_SIZE;
+ bi = (__m128i*)in;
+ bo = (__m128i*)out;
+
+ ks = this->key->schedule;
+
+ for (i = 0; i < blocks; i++)
+ {
+ d = _mm_loadu_si128(bi + i);
+
+ c = _mm_xor_si128(d, c);
+ c = _mm_xor_si128(c, ks[0]);
+ t = _mm_xor_si128(state, ks[0]);
+
+ c = _mm_aesenc_si128(c, ks[1]);
+ t = _mm_aesenc_si128(t, ks[1]);
+ c = _mm_aesenc_si128(c, ks[2]);
+ t = _mm_aesenc_si128(t, ks[2]);
+ c = _mm_aesenc_si128(c, ks[3]);
+ t = _mm_aesenc_si128(t, ks[3]);
+ c = _mm_aesenc_si128(c, ks[4]);
+ t = _mm_aesenc_si128(t, ks[4]);
+ c = _mm_aesenc_si128(c, ks[5]);
+ t = _mm_aesenc_si128(t, ks[5]);
+ c = _mm_aesenc_si128(c, ks[6]);
+ t = _mm_aesenc_si128(t, ks[6]);
+ c = _mm_aesenc_si128(c, ks[7]);
+ t = _mm_aesenc_si128(t, ks[7]);
+ c = _mm_aesenc_si128(c, ks[8]);
+ t = _mm_aesenc_si128(t, ks[8]);
+ c = _mm_aesenc_si128(c, ks[9]);
+ t = _mm_aesenc_si128(t, ks[9]);
+ c = _mm_aesenc_si128(c, ks[10]);
+ t = _mm_aesenc_si128(t, ks[10]);
+ c = _mm_aesenc_si128(c, ks[11]);
+ t = _mm_aesenc_si128(t, ks[11]);
+
+ c = _mm_aesenclast_si128(c, ks[12]);
+ t = _mm_aesenclast_si128(t, ks[12]);
+
+ t = _mm_xor_si128(t, d);
+ _mm_storeu_si128(bo + i, t);
+
+ state = increment_be(state);
+ }
+
+ if (rem)
+ {
+ c = encrypt_ccm_rem(this->key, rem, state, bi + blocks, bo + blocks, c);
+ }
+ crypt_icv(this, iv, c, icv);
+}
+
+/**
+ * AES-192 CCM decryption/ICV generation
+ */
+static void decrypt_ccm192(private_aesni_ccm_t *this,
+ size_t len, u_char *in, u_char *out, u_char *iv,
+ size_t alen, u_char *assoc, u_char *icv)
+{
+ __m128i *ks, d, t, c, b, state, *bi, *bo;
+ u_int blocks, rem, i;
+
+ c = icv_header(this, len, iv, alen, assoc);
+ build_ctr(this, 1, iv, &b);
+ state = _mm_load_si128(&b);
+ blocks = len / AES_BLOCK_SIZE;
+ rem = len % AES_BLOCK_SIZE;
+ bi = (__m128i*)in;
+ bo = (__m128i*)out;
+
+ ks = this->key->schedule;
+
+ for (i = 0; i < blocks; i++)
+ {
+ d = _mm_loadu_si128(bi + i);
+
+ t = _mm_xor_si128(state, ks[0]);
+
+ t = _mm_aesenc_si128(t, ks[1]);
+ t = _mm_aesenc_si128(t, ks[2]);
+ t = _mm_aesenc_si128(t, ks[3]);
+ t = _mm_aesenc_si128(t, ks[4]);
+ t = _mm_aesenc_si128(t, ks[5]);
+ t = _mm_aesenc_si128(t, ks[6]);
+ t = _mm_aesenc_si128(t, ks[7]);
+ t = _mm_aesenc_si128(t, ks[8]);
+ t = _mm_aesenc_si128(t, ks[9]);
+ t = _mm_aesenc_si128(t, ks[10]);
+ t = _mm_aesenc_si128(t, ks[11]);
+
+ t = _mm_aesenclast_si128(t, ks[12]);
+ t = _mm_xor_si128(t, d);
+ _mm_storeu_si128(bo + i, t);
+
+ c = _mm_xor_si128(t, c);
+ c = _mm_xor_si128(c, ks[0]);
+
+ c = _mm_aesenc_si128(c, ks[1]);
+ c = _mm_aesenc_si128(c, ks[2]);
+ c = _mm_aesenc_si128(c, ks[3]);
+ c = _mm_aesenc_si128(c, ks[4]);
+ c = _mm_aesenc_si128(c, ks[5]);
+ c = _mm_aesenc_si128(c, ks[6]);
+ c = _mm_aesenc_si128(c, ks[7]);
+ c = _mm_aesenc_si128(c, ks[8]);
+ c = _mm_aesenc_si128(c, ks[9]);
+ c = _mm_aesenc_si128(c, ks[10]);
+ c = _mm_aesenc_si128(c, ks[11]);
+
+ c = _mm_aesenclast_si128(c, ks[12]);
+
+ state = increment_be(state);
+ }
+
+ if (rem)
+ {
+ c = decrypt_ccm_rem(this->key, rem, state, bi + blocks, bo + blocks, c);
+ }
+ crypt_icv(this, iv, c, icv);
+}
+
+/**
+ * AES-256 CCM encryption/ICV generation
+ */
+static void encrypt_ccm256(private_aesni_ccm_t *this,
+ size_t len, u_char *in, u_char *out, u_char *iv,
+ size_t alen, u_char *assoc, u_char *icv)
+{
+ __m128i *ks, d, t, c, b, state, *bi, *bo;
+ u_int blocks, rem, i;
+
+ c = icv_header(this, len, iv, alen, assoc);
+ build_ctr(this, 1, iv, &b);
+ state = _mm_load_si128(&b);
+ blocks = len / AES_BLOCK_SIZE;
+ rem = len % AES_BLOCK_SIZE;
+ bi = (__m128i*)in;
+ bo = (__m128i*)out;
+
+ ks = this->key->schedule;
+
+ for (i = 0; i < blocks; i++)
+ {
+ d = _mm_loadu_si128(bi + i);
+
+ c = _mm_xor_si128(d, c);
+ c = _mm_xor_si128(c, ks[0]);
+ t = _mm_xor_si128(state, ks[0]);
+
+ c = _mm_aesenc_si128(c, ks[1]);
+ t = _mm_aesenc_si128(t, ks[1]);
+ c = _mm_aesenc_si128(c, ks[2]);
+ t = _mm_aesenc_si128(t, ks[2]);
+ c = _mm_aesenc_si128(c, ks[3]);
+ t = _mm_aesenc_si128(t, ks[3]);
+ c = _mm_aesenc_si128(c, ks[4]);
+ t = _mm_aesenc_si128(t, ks[4]);
+ c = _mm_aesenc_si128(c, ks[5]);
+ t = _mm_aesenc_si128(t, ks[5]);
+ c = _mm_aesenc_si128(c, ks[6]);
+ t = _mm_aesenc_si128(t, ks[6]);
+ c = _mm_aesenc_si128(c, ks[7]);
+ t = _mm_aesenc_si128(t, ks[7]);
+ c = _mm_aesenc_si128(c, ks[8]);
+ t = _mm_aesenc_si128(t, ks[8]);
+ c = _mm_aesenc_si128(c, ks[9]);
+ t = _mm_aesenc_si128(t, ks[9]);
+ c = _mm_aesenc_si128(c, ks[10]);
+ t = _mm_aesenc_si128(t, ks[10]);
+ c = _mm_aesenc_si128(c, ks[11]);
+ t = _mm_aesenc_si128(t, ks[11]);
+ c = _mm_aesenc_si128(c, ks[12]);
+ t = _mm_aesenc_si128(t, ks[12]);
+ c = _mm_aesenc_si128(c, ks[13]);
+ t = _mm_aesenc_si128(t, ks[13]);
+
+ c = _mm_aesenclast_si128(c, ks[14]);
+ t = _mm_aesenclast_si128(t, ks[14]);
+
+ t = _mm_xor_si128(t, d);
+ _mm_storeu_si128(bo + i, t);
+
+ state = increment_be(state);
+ }
+
+ if (rem)
+ {
+ c = encrypt_ccm_rem(this->key, rem, state, bi + blocks, bo + blocks, c);
+ }
+ crypt_icv(this, iv, c, icv);
+}
+
+/**
+ * AES-256 CCM decryption/ICV generation
+ */
+static void decrypt_ccm256(private_aesni_ccm_t *this,
+ size_t len, u_char *in, u_char *out, u_char *iv,
+ size_t alen, u_char *assoc, u_char *icv)
+{
+ __m128i *ks, d, t, c, b, state, *bi, *bo;
+ u_int blocks, rem, i;
+
+ c = icv_header(this, len, iv, alen, assoc);
+ build_ctr(this, 1, iv, &b);
+ state = _mm_load_si128(&b);
+ blocks = len / AES_BLOCK_SIZE;
+ rem = len % AES_BLOCK_SIZE;
+ bi = (__m128i*)in;
+ bo = (__m128i*)out;
+
+ ks = this->key->schedule;
+
+ for (i = 0; i < blocks; i++)
+ {
+ d = _mm_loadu_si128(bi + i);
+
+ t = _mm_xor_si128(state, ks[0]);
+
+ t = _mm_aesenc_si128(t, ks[1]);
+ t = _mm_aesenc_si128(t, ks[2]);
+ t = _mm_aesenc_si128(t, ks[3]);
+ t = _mm_aesenc_si128(t, ks[4]);
+ t = _mm_aesenc_si128(t, ks[5]);
+ t = _mm_aesenc_si128(t, ks[6]);
+ t = _mm_aesenc_si128(t, ks[7]);
+ t = _mm_aesenc_si128(t, ks[8]);
+ t = _mm_aesenc_si128(t, ks[9]);
+ t = _mm_aesenc_si128(t, ks[10]);
+ t = _mm_aesenc_si128(t, ks[11]);
+ t = _mm_aesenc_si128(t, ks[12]);
+ t = _mm_aesenc_si128(t, ks[13]);
+
+ t = _mm_aesenclast_si128(t, ks[14]);
+ t = _mm_xor_si128(t, d);
+ _mm_storeu_si128(bo + i, t);
+
+ c = _mm_xor_si128(t, c);
+ c = _mm_xor_si128(c, ks[0]);
+
+ c = _mm_aesenc_si128(c, ks[1]);
+ c = _mm_aesenc_si128(c, ks[2]);
+ c = _mm_aesenc_si128(c, ks[3]);
+ c = _mm_aesenc_si128(c, ks[4]);
+ c = _mm_aesenc_si128(c, ks[5]);
+ c = _mm_aesenc_si128(c, ks[6]);
+ c = _mm_aesenc_si128(c, ks[7]);
+ c = _mm_aesenc_si128(c, ks[8]);
+ c = _mm_aesenc_si128(c, ks[9]);
+ c = _mm_aesenc_si128(c, ks[10]);
+ c = _mm_aesenc_si128(c, ks[11]);
+ c = _mm_aesenc_si128(c, ks[12]);
+ c = _mm_aesenc_si128(c, ks[13]);
+
+ c = _mm_aesenclast_si128(c, ks[14]);
+
+ state = increment_be(state);
+ }
+
+ if (rem)
+ {
+ c = decrypt_ccm_rem(this->key, rem, state, bi + blocks, bo + blocks, c);
+ }
+ crypt_icv(this, iv, c, icv);
+}
+
+METHOD(aead_t, encrypt, bool,
+ private_aesni_ccm_t *this, chunk_t plain, chunk_t assoc, chunk_t iv,
+ chunk_t *encr)
+{
+ u_char *out;
+
+ if (!this->key || iv.len != IV_SIZE)
+ {
+ return FALSE;
+ }
+ out = plain.ptr;
+ if (encr)
+ {
+ *encr = chunk_alloc(plain.len + this->icv_size);
+ out = encr->ptr;
+ }
+ this->encrypt(this, plain.len, plain.ptr, out, iv.ptr,
+ assoc.len, assoc.ptr, out + plain.len);
+ return TRUE;
+}
+
+METHOD(aead_t, decrypt, bool,
+ private_aesni_ccm_t *this, chunk_t encr, chunk_t assoc, chunk_t iv,
+ chunk_t *plain)
+{
+ u_char *out, icv[this->icv_size];
+
+ if (!this->key || iv.len != IV_SIZE || encr.len < this->icv_size)
+ {
+ return FALSE;
+ }
+ encr.len -= this->icv_size;
+ out = encr.ptr;
+ if (plain)
+ {
+ *plain = chunk_alloc(encr.len);
+ out = plain->ptr;
+ }
+
+ this->decrypt(this, encr.len, encr.ptr, out, iv.ptr,
+ assoc.len, assoc.ptr, icv);
+ return memeq_const(icv, encr.ptr + encr.len, this->icv_size);
+}
+
+METHOD(aead_t, get_block_size, size_t,
+ private_aesni_ccm_t *this)
+{
+ return 1;
+}
+
+METHOD(aead_t, get_icv_size, size_t,
+ private_aesni_ccm_t *this)
+{
+ return this->icv_size;
+}
+
+METHOD(aead_t, get_iv_size, size_t,
+ private_aesni_ccm_t *this)
+{
+ return IV_SIZE;
+}
+
+METHOD(aead_t, get_iv_gen, iv_gen_t*,
+ private_aesni_ccm_t *this)
+{
+ return this->iv_gen;
+}
+
+METHOD(aead_t, get_key_size, size_t,
+ private_aesni_ccm_t *this)
+{
+ return this->key_size + SALT_SIZE;
+}
+
+METHOD(aead_t, set_key, bool,
+ private_aesni_ccm_t *this, chunk_t key)
+{
+ if (key.len != this->key_size + SALT_SIZE)
+ {
+ return FALSE;
+ }
+
+ memcpy(this->salt, key.ptr + key.len - SALT_SIZE, SALT_SIZE);
+ key.len -= SALT_SIZE;
+
+ DESTROY_IF(this->key);
+ this->key = aesni_key_create(TRUE, key);
+ return TRUE;
+}
+
+METHOD(aead_t, destroy, void,
+ private_aesni_ccm_t *this)
+{
+ DESTROY_IF(this->key);
+ this->iv_gen->destroy(this->iv_gen);
+ free_align(this);
+}
+
+/**
+ * See header
+ */
+aesni_ccm_t *aesni_ccm_create(encryption_algorithm_t algo,
+ size_t key_size, size_t salt_size)
+{
+ private_aesni_ccm_t *this;
+ size_t icv_size;
+
+ switch (key_size)
+ {
+ case 0:
+ key_size = 16;
+ break;
+ case 16:
+ case 24:
+ case 32:
+ break;
+ default:
+ return NULL;
+ }
+ if (salt_size && salt_size != SALT_SIZE)
+ {
+ /* currently not supported */
+ return NULL;
+ }
+ switch (algo)
+ {
+ case ENCR_AES_CCM_ICV8:
+ algo = ENCR_AES_CBC;
+ icv_size = 8;
+ break;
+ case ENCR_AES_CCM_ICV12:
+ algo = ENCR_AES_CBC;
+ icv_size = 12;
+ break;
+ case ENCR_AES_CCM_ICV16:
+ algo = ENCR_AES_CBC;
+ icv_size = 16;
+ break;
+ default:
+ return NULL;
+ }
+
+ INIT_ALIGN(this, sizeof(__m128i),
+ .public = {
+ .aead = {
+ .encrypt = _encrypt,
+ .decrypt = _decrypt,
+ .get_block_size = _get_block_size,
+ .get_icv_size = _get_icv_size,
+ .get_iv_size = _get_iv_size,
+ .get_iv_gen = _get_iv_gen,
+ .get_key_size = _get_key_size,
+ .set_key = _set_key,
+ .destroy = _destroy,
+ },
+ },
+ .key_size = key_size,
+ .iv_gen = iv_gen_seq_create(),
+ .icv_size = icv_size,
+ );
+
+ switch (key_size)
+ {
+ case 16:
+ this->encrypt = encrypt_ccm128;
+ this->decrypt = decrypt_ccm128;
+ break;
+ case 24:
+ this->encrypt = encrypt_ccm192;
+ this->decrypt = decrypt_ccm192;
+ break;
+ case 32:
+ this->encrypt = encrypt_ccm256;
+ this->decrypt = decrypt_ccm256;
+ break;
+ }
+
+ return &this->public;
+}
diff --git a/src/libstrongswan/plugins/aesni/aesni_ccm.h b/src/libstrongswan/plugins/aesni/aesni_ccm.h
new file mode 100644
index 000000000..69612b515
--- /dev/null
+++ b/src/libstrongswan/plugins/aesni/aesni_ccm.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2015 Martin Willi
+ * Copyright (C) 2015 revosec AG
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+/**
+ * @defgroup aesni_ccm aesni_ccm
+ * @{ @ingroup aesni
+ */
+
+#ifndef AESNI_CCM_H_
+#define AESNI_CCM_H_
+
+#include <library.h>
+
+typedef struct aesni_ccm_t aesni_ccm_t;
+
+/**
+ * CCM mode AEAD using AES-NI
+ */
+struct aesni_ccm_t {
+
+ /**
+ * Implements aead_t interface
+ */
+ aead_t aead;
+};
+
+/**
+ * Create a aesni_ccm instance.
+ *
+ * @param algo encryption algorithm, ENCR_AES_CCM*
+ * @param key_size AES key size, in bytes
+ * @param salt_size size of salt value
+ * @return AES-CCM AEAD, NULL if not supported
+ */
+aesni_ccm_t *aesni_ccm_create(encryption_algorithm_t algo,
+ size_t key_size, size_t salt_size);
+
+#endif /** AESNI_CCM_H_ @}*/
diff --git a/src/libstrongswan/plugins/aesni/aesni_cmac.c b/src/libstrongswan/plugins/aesni/aesni_cmac.c
new file mode 100644
index 000000000..d6a87e6d7
--- /dev/null
+++ b/src/libstrongswan/plugins/aesni/aesni_cmac.c
@@ -0,0 +1,371 @@
+/*
+ * Copyright (C) 2012 Tobias Brunner
+ * Hochschule fuer Technik Rapperswil
+ * Copyright (C) 2015 Martin Willi
+ * Copyright (C) 2015 revosec AG
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "aesni_cmac.h"
+#include "aesni_key.h"
+
+#include <crypto/prfs/mac_prf.h>
+#include <crypto/signers/mac_signer.h>
+
+typedef struct private_mac_t private_mac_t;
+
+/**
+ * Private data of a mac_t object.
+ */
+struct private_mac_t {
+
+ /**
+ * Public interface.
+ */
+ mac_t public;
+
+ /**
+ * Key schedule for key K
+ */
+ aesni_key_t *k;
+
+ /**
+ * K1
+ */
+ __m128i k1;
+
+ /**
+ * K2
+ */
+ __m128i k2;
+
+ /**
+ * T
+ */
+ __m128i t;
+
+ /**
+ * remaining, unprocessed bytes in append mode
+ */
+ u_char rem[AES_BLOCK_SIZE];
+
+ /**
+ * number of bytes in remaining
+ */
+ int rem_size;
+};
+
+METHOD(mac_t, get_mac, bool,
+ private_mac_t *this, chunk_t data, u_int8_t *out)
+{
+ __m128i *ks, t, l, *bi;
+ u_int blocks, rem, i;
+
+ if (!this->k)
+ {
+ return FALSE;
+ }
+
+ ks = this->k->schedule;
+ t = this->t;
+
+ if (this->rem_size + data.len > AES_BLOCK_SIZE)
+ {
+ /* T := 0x00000000000000000000000000000000 (initially)
+ * for each block M_i (except the last)
+ * X := T XOR M_i;
+ * T := AES-128(K, X);
+ */
+
+ /* append data to remaining bytes, process block M_1 */
+ memcpy(this->rem + this->rem_size, data.ptr,
+ AES_BLOCK_SIZE - this->rem_size);
+ data = chunk_skip(data, AES_BLOCK_SIZE - this->rem_size);
+
+ t = _mm_xor_si128(t, _mm_loadu_si128((__m128i*)this->rem));
+
+ t = _mm_xor_si128(t, ks[0]);
+ t = _mm_aesenc_si128(t, ks[1]);
+ t = _mm_aesenc_si128(t, ks[2]);
+ t = _mm_aesenc_si128(t, ks[3]);
+ t = _mm_aesenc_si128(t, ks[4]);
+ t = _mm_aesenc_si128(t, ks[5]);
+ t = _mm_aesenc_si128(t, ks[6]);
+ t = _mm_aesenc_si128(t, ks[7]);
+ t = _mm_aesenc_si128(t, ks[8]);
+ t = _mm_aesenc_si128(t, ks[9]);
+ t = _mm_aesenclast_si128(t, ks[10]);
+
+ /* process blocks M_2 ... M_n-1 */
+ bi = (__m128i*)data.ptr;
+ rem = data.len % AES_BLOCK_SIZE;
+ blocks = data.len / AES_BLOCK_SIZE;
+ if (!rem && blocks)
+ { /* don't do last block */
+ rem = AES_BLOCK_SIZE;
+ blocks--;
+ }
+
+ /* process blocks M[2] ... M[n-1] */
+ for (i = 0; i < blocks; i++)
+ {
+ t = _mm_xor_si128(t, _mm_loadu_si128(bi + i));
+
+ t = _mm_xor_si128(t, ks[0]);
+ t = _mm_aesenc_si128(t, ks[1]);
+ t = _mm_aesenc_si128(t, ks[2]);
+ t = _mm_aesenc_si128(t, ks[3]);
+ t = _mm_aesenc_si128(t, ks[4]);
+ t = _mm_aesenc_si128(t, ks[5]);
+ t = _mm_aesenc_si128(t, ks[6]);
+ t = _mm_aesenc_si128(t, ks[7]);
+ t = _mm_aesenc_si128(t, ks[8]);
+ t = _mm_aesenc_si128(t, ks[9]);
+ t = _mm_aesenclast_si128(t, ks[10]);
+ }
+
+ /* store remaining bytes of block M_n */
+ memcpy(this->rem, data.ptr + data.len - rem, rem);
+ this->rem_size = rem;
+ }
+ else
+ {
+ /* no complete block (or last block), just copy into remaining */
+ memcpy(this->rem + this->rem_size, data.ptr, data.len);
+ this->rem_size += data.len;
+ }
+ if (out)
+ {
+ /* if last block is complete
+ * M_last := M_n XOR K1;
+ * else
+ * M_last := padding(M_n) XOR K2;
+ */
+ if (this->rem_size == AES_BLOCK_SIZE)
+ {
+ l = _mm_loadu_si128((__m128i*)this->rem);
+ l = _mm_xor_si128(l, this->k1);
+ }
+ else
+ {
+ /* padding(x) = x || 10^i where i is 128-8*r-1
+ * That is, padding(x) is the concatenation of x and a single '1',
+ * followed by the minimum number of '0's, so that the total length is
+ * equal to 128 bits.
+ */
+ if (this->rem_size < AES_BLOCK_SIZE)
+ {
+ memset(this->rem + this->rem_size, 0,
+ AES_BLOCK_SIZE - this->rem_size);
+ this->rem[this->rem_size] = 0x80;
+ }
+ l = _mm_loadu_si128((__m128i*)this->rem);
+ l = _mm_xor_si128(l, this->k2);
+ }
+ /* T := M_last XOR T;
+ * T := AES-128(K,T);
+ */
+ t = _mm_xor_si128(l, t);
+
+ t = _mm_xor_si128(t, ks[0]);
+ t = _mm_aesenc_si128(t, ks[1]);
+ t = _mm_aesenc_si128(t, ks[2]);
+ t = _mm_aesenc_si128(t, ks[3]);
+ t = _mm_aesenc_si128(t, ks[4]);
+ t = _mm_aesenc_si128(t, ks[5]);
+ t = _mm_aesenc_si128(t, ks[6]);
+ t = _mm_aesenc_si128(t, ks[7]);
+ t = _mm_aesenc_si128(t, ks[8]);
+ t = _mm_aesenc_si128(t, ks[9]);
+ t = _mm_aesenclast_si128(t, ks[10]);
+
+ _mm_storeu_si128((__m128i*)out, t);
+
+ /* reset state */
+ t = _mm_setzero_si128();
+ this->rem_size = 0;
+ }
+ this->t = t;
+ return TRUE;
+}
+
+METHOD(mac_t, get_mac_size, size_t,
+ private_mac_t *this)
+{
+ return AES_BLOCK_SIZE;
+}
+
+/**
+ * Left-shift the given chunk by one bit.
+ */
+static void bit_shift(chunk_t chunk)
+{
+ size_t i;
+
+ for (i = 0; i < chunk.len; i++)
+ {
+ chunk.ptr[i] <<= 1;
+ if (i < chunk.len - 1 && chunk.ptr[i + 1] & 0x80)
+ {
+ chunk.ptr[i] |= 0x01;
+ }
+ }
+}
+
+METHOD(mac_t, set_key, bool,
+ private_mac_t *this, chunk_t key)
+{
+ __m128i rb, msb, l, a;
+ u_int round;
+ chunk_t k;
+
+ this->t = _mm_setzero_si128();
+ this->rem_size = 0;
+
+ /* we support variable keys as defined in RFC 4615 */
+ if (key.len == AES_BLOCK_SIZE)
+ {
+ k = key;
+ }
+ else
+ { /* use cmac recursively to resize longer or shorter keys */
+ k = chunk_alloca(AES_BLOCK_SIZE);
+ memset(k.ptr, 0, k.len);
+ if (!set_key(this, k) || !get_mac(this, key, k.ptr))
+ {
+ return FALSE;
+ }
+ }
+
+ DESTROY_IF(this->k);
+ this->k = aesni_key_create(TRUE, k);
+ if (!this->k)
+ {
+ return FALSE;
+ }
+
+ /*
+ * Rb = 0x00000000000000000000000000000087
+ * L = 0x00000000000000000000000000000000 encrypted with K
+ * if MSB(L) == 0
+ * K1 = L << 1
+ * else
+ * K1 = (L << 1) XOR Rb
+ * if MSB(K1) == 0
+ * K2 = K1 << 1
+ * else
+ * K2 = (K1 << 1) XOR Rb
+ */
+
+ rb = _mm_set_epi32(0x87000000, 0, 0, 0);
+ msb = _mm_set_epi32(0, 0, 0, 0x80);
+
+ l = _mm_setzero_si128();
+
+ l = _mm_xor_si128(l, this->k->schedule[0]);
+ for (round = 1; round < this->k->rounds; round++)
+ {
+ l = _mm_aesenc_si128(l, this->k->schedule[round]);
+ }
+ l = _mm_aesenclast_si128(l, this->k->schedule[this->k->rounds]);
+
+ this->k1 = l;
+ bit_shift(chunk_from_thing(this->k1));
+ a = _mm_and_si128(l, msb);
+ if (memchr(&a, 0x80, 1))
+ {
+ this->k1 = _mm_xor_si128(this->k1, rb);
+ }
+ this->k2 = this->k1;
+ bit_shift(chunk_from_thing(this->k2));
+ a = _mm_and_si128(this->k1, msb);
+ if (memchr(&a, 0x80, 1))
+ {
+ this->k2 = _mm_xor_si128(this->k2, rb);
+ }
+
+ return TRUE;
+}
+
+METHOD(mac_t, destroy, void,
+ private_mac_t *this)
+{
+ DESTROY_IF(this->k);
+ memwipe(&this->k1, sizeof(this->k1));
+ memwipe(&this->k2, sizeof(this->k2));
+ free_align(this);
+}
+
+/*
+ * Described in header
+ */
+mac_t *aesni_cmac_create(encryption_algorithm_t algo, size_t key_size)
+{
+ private_mac_t *this;
+
+ INIT_ALIGN(this, sizeof(__m128i),
+ .public = {
+ .get_mac = _get_mac,
+ .get_mac_size = _get_mac_size,
+ .set_key = _set_key,
+ .destroy = _destroy,
+ },
+ );
+
+ return &this->public;
+}
+
+/*
+ * Described in header.
+ */
+prf_t *aesni_cmac_prf_create(pseudo_random_function_t algo)
+{
+ mac_t *cmac;
+
+ switch (algo)
+ {
+ case PRF_AES128_CMAC:
+ cmac = aesni_cmac_create(ENCR_AES_CBC, 16);
+ break;
+ default:
+ return NULL;
+ }
+ if (cmac)
+ {
+ return mac_prf_create(cmac);
+ }
+ return NULL;
+}
+
+/*
+ * Described in header
+ */
+signer_t *aesni_cmac_signer_create(integrity_algorithm_t algo)
+{
+ size_t truncation;
+ mac_t *cmac;
+
+ switch (algo)
+ {
+ case AUTH_AES_CMAC_96:
+ cmac = aesni_cmac_create(ENCR_AES_CBC, 16);
+ truncation = 12;
+ break;
+ default:
+ return NULL;
+ }
+ if (cmac)
+ {
+ return mac_signer_create(cmac, truncation);
+ }
+ return NULL;
+}
diff --git a/src/libstrongswan/plugins/aesni/aesni_cmac.h b/src/libstrongswan/plugins/aesni/aesni_cmac.h
new file mode 100644
index 000000000..5f0af7393
--- /dev/null
+++ b/src/libstrongswan/plugins/aesni/aesni_cmac.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015 Martin Willi
+ * Copyright (C) 2015 revosec AG
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+/**
+ * @defgroup aesni_xcbc aesni_xcbc
+ * @{ @ingroup aesni
+ */
+
+#ifndef CMAC_H_
+#define CMAC_H_
+
+#include <crypto/mac.h>
+#include <crypto/prfs/prf.h>
+#include <crypto/signers/signer.h>
+
+/**
+ * Create a generic mac_t object using AESNI CMAC.
+ *
+ * @param algo underlying encryption algorithm
+ * @param key_size size of encryption key, in bytes
+ */
+mac_t *aesni_cmac_create(encryption_algorithm_t algo, size_t key_size);
+
+/**
+ * Creates a new prf_t object based AESNI CMAC.
+ *
+ * @param algo algorithm to implement
+ * @return prf_t object, NULL if not supported
+ */
+prf_t *aesni_cmac_prf_create(pseudo_random_function_t algo);
+
+/**
+ * Creates a new signer_t object based on AESNI CMAC.
+ *
+ * @param algo algorithm to implement
+ * @return signer_t, NULL if not supported
+ */
+signer_t *aesni_cmac_signer_create(integrity_algorithm_t algo);
+
+#endif /** CMAC_H_ @}*/
diff --git a/src/libstrongswan/plugins/aesni/aesni_ctr.c b/src/libstrongswan/plugins/aesni/aesni_ctr.c
new file mode 100644
index 000000000..989813814
--- /dev/null
+++ b/src/libstrongswan/plugins/aesni/aesni_ctr.c
@@ -0,0 +1,643 @@
+/*
+ * Copyright (C) 2015 Martin Willi
+ * Copyright (C) 2015 revosec AG
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "aesni_ctr.h"
+#include "aesni_key.h"
+
+#include <tmmintrin.h>
+
+/**
+ * Pipeline parallelism we use for CTR en/decryption
+ */
+#define CTR_CRYPT_PARALLELISM 4
+
+typedef struct private_aesni_ctr_t private_aesni_ctr_t;
+
+/**
+ * CTR en/decryption method type
+ */
+typedef void (*aesni_ctr_fn_t)(private_aesni_ctr_t*, size_t, u_char*, u_char*);
+
+/**
+ * Private data of an aesni_ctr_t object.
+ */
+struct private_aesni_ctr_t {
+
+ /**
+ * Public aesni_ctr_t interface.
+ */
+ aesni_ctr_t public;
+
+ /**
+ * Key size
+ */
+ u_int key_size;
+
+ /**
+ * Key schedule
+ */
+ aesni_key_t *key;
+
+ /**
+ * Encryption method
+ */
+ aesni_ctr_fn_t crypt;
+
+ /**
+ * Counter state
+ */
+ struct {
+ char nonce[4];
+ char iv[8];
+ u_int32_t counter;
+ } __attribute__((packed, aligned(sizeof(__m128i)))) state;
+};
+
+/**
+ * Do big-endian increment on x
+ */
+static inline __m128i increment_be(__m128i x)
+{
+ __m128i swap;
+
+ swap = _mm_setr_epi8(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+
+ x = _mm_shuffle_epi8(x, swap);
+ x = _mm_add_epi64(x, _mm_set_epi32(0, 0, 0, 1));
+ x = _mm_shuffle_epi8(x, swap);
+
+ return x;
+}
+
+/**
+ * AES-128 CTR encryption
+ */
+static void encrypt_ctr128(private_aesni_ctr_t *this,
+ size_t len, u_char *in, u_char *out)
+{
+ __m128i t1, t2, t3, t4;
+ __m128i d1, d2, d3, d4;
+ __m128i *ks, state, b, *bi, *bo;
+ u_int i, blocks, pblocks, rem;
+
+ state = _mm_load_si128((__m128i*)&this->state);
+ blocks = len / AES_BLOCK_SIZE;
+ pblocks = blocks - (blocks % CTR_CRYPT_PARALLELISM);
+ rem = len % AES_BLOCK_SIZE;
+ bi = (__m128i*)in;
+ bo = (__m128i*)out;
+
+ ks = this->key->schedule;
+
+ for (i = 0; i < pblocks; i += CTR_CRYPT_PARALLELISM)
+ {
+ d1 = _mm_loadu_si128(bi + i + 0);
+ d2 = _mm_loadu_si128(bi + i + 1);
+ d3 = _mm_loadu_si128(bi + i + 2);
+ d4 = _mm_loadu_si128(bi + i + 3);
+
+ t1 = _mm_xor_si128(state, ks[0]);
+ state = increment_be(state);
+ t2 = _mm_xor_si128(state, ks[0]);
+ state = increment_be(state);
+ t3 = _mm_xor_si128(state, ks[0]);
+ state = increment_be(state);
+ t4 = _mm_xor_si128(state, ks[0]);
+ state = increment_be(state);
+
+ t1 = _mm_aesenc_si128(t1, ks[1]);
+ t2 = _mm_aesenc_si128(t2, ks[1]);
+ t3 = _mm_aesenc_si128(t3, ks[1]);
+ t4 = _mm_aesenc_si128(t4, ks[1]);
+ t1 = _mm_aesenc_si128(t1, ks[2]);
+ t2 = _mm_aesenc_si128(t2, ks[2]);
+ t3 = _mm_aesenc_si128(t3, ks[2]);
+ t4 = _mm_aesenc_si128(t4, ks[2]);
+ t1 = _mm_aesenc_si128(t1, ks[3]);
+ t2 = _mm_aesenc_si128(t2, ks[3]);
+ t3 = _mm_aesenc_si128(t3, ks[3]);
+ t4 = _mm_aesenc_si128(t4, ks[3]);
+ t1 = _mm_aesenc_si128(t1, ks[4]);
+ t2 = _mm_aesenc_si128(t2, ks[4]);
+ t3 = _mm_aesenc_si128(t3, ks[4]);
+ t4 = _mm_aesenc_si128(t4, ks[4]);
+ t1 = _mm_aesenc_si128(t1, ks[5]);
+ t2 = _mm_aesenc_si128(t2, ks[5]);
+ t3 = _mm_aesenc_si128(t3, ks[5]);
+ t4 = _mm_aesenc_si128(t4, ks[5]);
+ t1 = _mm_aesenc_si128(t1, ks[6]);
+ t2 = _mm_aesenc_si128(t2, ks[6]);
+ t3 = _mm_aesenc_si128(t3, ks[6]);
+ t4 = _mm_aesenc_si128(t4, ks[6]);
+ t1 = _mm_aesenc_si128(t1, ks[7]);
+ t2 = _mm_aesenc_si128(t2, ks[7]);
+ t3 = _mm_aesenc_si128(t3, ks[7]);
+ t4 = _mm_aesenc_si128(t4, ks[7]);
+ t1 = _mm_aesenc_si128(t1, ks[8]);
+ t2 = _mm_aesenc_si128(t2, ks[8]);
+ t3 = _mm_aesenc_si128(t3, ks[8]);
+ t4 = _mm_aesenc_si128(t4, ks[8]);
+ t1 = _mm_aesenc_si128(t1, ks[9]);
+ t2 = _mm_aesenc_si128(t2, ks[9]);
+ t3 = _mm_aesenc_si128(t3, ks[9]);
+ t4 = _mm_aesenc_si128(t4, ks[9]);
+
+ t1 = _mm_aesenclast_si128(t1, ks[10]);
+ t2 = _mm_aesenclast_si128(t2, ks[10]);
+ t3 = _mm_aesenclast_si128(t3, ks[10]);
+ t4 = _mm_aesenclast_si128(t4, ks[10]);
+ t1 = _mm_xor_si128(t1, d1);
+ t2 = _mm_xor_si128(t2, d2);
+ t3 = _mm_xor_si128(t3, d3);
+ t4 = _mm_xor_si128(t4, d4);
+ _mm_storeu_si128(bo + i + 0, t1);
+ _mm_storeu_si128(bo + i + 1, t2);
+ _mm_storeu_si128(bo + i + 2, t3);
+ _mm_storeu_si128(bo + i + 3, t4);
+ }
+
+ for (i = pblocks; i < blocks; i++)
+ {
+ d1 = _mm_loadu_si128(bi + i);
+
+ t1 = _mm_xor_si128(state, ks[0]);
+ state = increment_be(state);
+
+ t1 = _mm_aesenc_si128(t1, ks[1]);
+ t1 = _mm_aesenc_si128(t1, ks[2]);
+ t1 = _mm_aesenc_si128(t1, ks[3]);
+ t1 = _mm_aesenc_si128(t1, ks[4]);
+ t1 = _mm_aesenc_si128(t1, ks[5]);
+ t1 = _mm_aesenc_si128(t1, ks[6]);
+ t1 = _mm_aesenc_si128(t1, ks[7]);
+ t1 = _mm_aesenc_si128(t1, ks[8]);
+ t1 = _mm_aesenc_si128(t1, ks[9]);
+
+ t1 = _mm_aesenclast_si128(t1, ks[10]);
+ t1 = _mm_xor_si128(t1, d1);
+ _mm_storeu_si128(bo + i, t1);
+ }
+
+ if (rem)
+ {
+ memset(&b, 0, sizeof(b));
+ memcpy(&b, bi + blocks, rem);
+
+ d1 = _mm_loadu_si128(&b);
+ t1 = _mm_xor_si128(state, ks[0]);
+
+ t1 = _mm_aesenc_si128(t1, ks[1]);
+ t1 = _mm_aesenc_si128(t1, ks[2]);
+ t1 = _mm_aesenc_si128(t1, ks[3]);
+ t1 = _mm_aesenc_si128(t1, ks[4]);
+ t1 = _mm_aesenc_si128(t1, ks[5]);
+ t1 = _mm_aesenc_si128(t1, ks[6]);
+ t1 = _mm_aesenc_si128(t1, ks[7]);
+ t1 = _mm_aesenc_si128(t1, ks[8]);
+ t1 = _mm_aesenc_si128(t1, ks[9]);
+
+ t1 = _mm_aesenclast_si128(t1, ks[10]);
+ t1 = _mm_xor_si128(t1, d1);
+ _mm_storeu_si128(&b, t1);
+
+ memcpy(bo + blocks, &b, rem);
+ }
+}
+
+/**
+ * AES-192 CTR encryption
+ */
+static void encrypt_ctr192(private_aesni_ctr_t *this,
+ size_t len, u_char *in, u_char *out)
+{
+ __m128i t1, t2, t3, t4;
+ __m128i d1, d2, d3, d4;
+ __m128i *ks, state, b, *bi, *bo;
+ u_int i, blocks, pblocks, rem;
+
+ state = _mm_load_si128((__m128i*)&this->state);
+ blocks = len / AES_BLOCK_SIZE;
+ pblocks = blocks - (blocks % CTR_CRYPT_PARALLELISM);
+ rem = len % AES_BLOCK_SIZE;
+ bi = (__m128i*)in;
+ bo = (__m128i*)out;
+
+ ks = this->key->schedule;
+
+ for (i = 0; i < pblocks; i += CTR_CRYPT_PARALLELISM)
+ {
+ d1 = _mm_loadu_si128(bi + i + 0);
+ d2 = _mm_loadu_si128(bi + i + 1);
+ d3 = _mm_loadu_si128(bi + i + 2);
+ d4 = _mm_loadu_si128(bi + i + 3);
+
+ t1 = _mm_xor_si128(state, ks[0]);
+ state = increment_be(state);
+ t2 = _mm_xor_si128(state, ks[0]);
+ state = increment_be(state);
+ t3 = _mm_xor_si128(state, ks[0]);
+ state = increment_be(state);
+ t4 = _mm_xor_si128(state, ks[0]);
+ state = increment_be(state);
+
+ t1 = _mm_aesenc_si128(t1, ks[1]);
+ t2 = _mm_aesenc_si128(t2, ks[1]);
+ t3 = _mm_aesenc_si128(t3, ks[1]);
+ t4 = _mm_aesenc_si128(t4, ks[1]);
+ t1 = _mm_aesenc_si128(t1, ks[2]);
+ t2 = _mm_aesenc_si128(t2, ks[2]);
+ t3 = _mm_aesenc_si128(t3, ks[2]);
+ t4 = _mm_aesenc_si128(t4, ks[2]);
+ t1 = _mm_aesenc_si128(t1, ks[3]);
+ t2 = _mm_aesenc_si128(t2, ks[3]);
+ t3 = _mm_aesenc_si128(t3, ks[3]);
+ t4 = _mm_aesenc_si128(t4, ks[3]);
+ t1 = _mm_aesenc_si128(t1, ks[4]);
+ t2 = _mm_aesenc_si128(t2, ks[4]);
+ t3 = _mm_aesenc_si128(t3, ks[4]);
+ t4 = _mm_aesenc_si128(t4, ks[4]);
+ t1 = _mm_aesenc_si128(t1, ks[5]);
+ t2 = _mm_aesenc_si128(t2, ks[5]);
+ t3 = _mm_aesenc_si128(t3, ks[5]);
+ t4 = _mm_aesenc_si128(t4, ks[5]);
+ t1 = _mm_aesenc_si128(t1, ks[6]);
+ t2 = _mm_aesenc_si128(t2, ks[6]);
+ t3 = _mm_aesenc_si128(t3, ks[6]);
+ t4 = _mm_aesenc_si128(t4, ks[6]);
+ t1 = _mm_aesenc_si128(t1, ks[7]);
+ t2 = _mm_aesenc_si128(t2, ks[7]);
+ t3 = _mm_aesenc_si128(t3, ks[7]);
+ t4 = _mm_aesenc_si128(t4, ks[7]);
+ t1 = _mm_aesenc_si128(t1, ks[8]);
+ t2 = _mm_aesenc_si128(t2, ks[8]);
+ t3 = _mm_aesenc_si128(t3, ks[8]);
+ t4 = _mm_aesenc_si128(t4, ks[8]);
+ t1 = _mm_aesenc_si128(t1, ks[9]);
+ t2 = _mm_aesenc_si128(t2, ks[9]);
+ t3 = _mm_aesenc_si128(t3, ks[9]);
+ t4 = _mm_aesenc_si128(t4, ks[9]);
+ t1 = _mm_aesenc_si128(t1, ks[10]);
+ t2 = _mm_aesenc_si128(t2, ks[10]);
+ t3 = _mm_aesenc_si128(t3, ks[10]);
+ t4 = _mm_aesenc_si128(t4, ks[10]);
+ t1 = _mm_aesenc_si128(t1, ks[11]);
+ t2 = _mm_aesenc_si128(t2, ks[11]);
+ t3 = _mm_aesenc_si128(t3, ks[11]);
+ t4 = _mm_aesenc_si128(t4, ks[11]);
+
+ t1 = _mm_aesenclast_si128(t1, ks[12]);
+ t2 = _mm_aesenclast_si128(t2, ks[12]);
+ t3 = _mm_aesenclast_si128(t3, ks[12]);
+ t4 = _mm_aesenclast_si128(t4, ks[12]);
+ t1 = _mm_xor_si128(t1, d1);
+ t2 = _mm_xor_si128(t2, d2);
+ t3 = _mm_xor_si128(t3, d3);
+ t4 = _mm_xor_si128(t4, d4);
+ _mm_storeu_si128(bo + i + 0, t1);
+ _mm_storeu_si128(bo + i + 1, t2);
+ _mm_storeu_si128(bo + i + 2, t3);
+ _mm_storeu_si128(bo + i + 3, t4);
+ }
+
+ for (i = pblocks; i < blocks; i++)
+ {
+ d1 = _mm_loadu_si128(bi + i);
+
+ t1 = _mm_xor_si128(state, ks[0]);
+ state = increment_be(state);
+
+ t1 = _mm_aesenc_si128(t1, ks[1]);
+ t1 = _mm_aesenc_si128(t1, ks[2]);
+ t1 = _mm_aesenc_si128(t1, ks[3]);
+ t1 = _mm_aesenc_si128(t1, ks[4]);
+ t1 = _mm_aesenc_si128(t1, ks[5]);
+ t1 = _mm_aesenc_si128(t1, ks[6]);
+ t1 = _mm_aesenc_si128(t1, ks[7]);
+ t1 = _mm_aesenc_si128(t1, ks[8]);
+ t1 = _mm_aesenc_si128(t1, ks[9]);
+ t1 = _mm_aesenc_si128(t1, ks[10]);
+ t1 = _mm_aesenc_si128(t1, ks[11]);
+
+ t1 = _mm_aesenclast_si128(t1, ks[12]);
+ t1 = _mm_xor_si128(t1, d1);
+ _mm_storeu_si128(bo + i, t1);
+ }
+
+ if (rem)
+ {
+ memset(&b, 0, sizeof(b));
+ memcpy(&b, bi + blocks, rem);
+
+ d1 = _mm_loadu_si128(&b);
+ t1 = _mm_xor_si128(state, ks[0]);
+
+ t1 = _mm_aesenc_si128(t1, ks[1]);
+ t1 = _mm_aesenc_si128(t1, ks[2]);
+ t1 = _mm_aesenc_si128(t1, ks[3]);
+ t1 = _mm_aesenc_si128(t1, ks[4]);
+ t1 = _mm_aesenc_si128(t1, ks[5]);
+ t1 = _mm_aesenc_si128(t1, ks[6]);
+ t1 = _mm_aesenc_si128(t1, ks[7]);
+ t1 = _mm_aesenc_si128(t1, ks[8]);
+ t1 = _mm_aesenc_si128(t1, ks[9]);
+ t1 = _mm_aesenc_si128(t1, ks[10]);
+ t1 = _mm_aesenc_si128(t1, ks[11]);
+
+ t1 = _mm_aesenclast_si128(t1, ks[12]);
+ t1 = _mm_xor_si128(t1, d1);
+ _mm_storeu_si128(&b, t1);
+
+ memcpy(bo + blocks, &b, rem);
+ }
+}
+
+/**
+ * AES-256 CTR encryption
+ */
+static void encrypt_ctr256(private_aesni_ctr_t *this,
+ size_t len, u_char *in, u_char *out)
+{
+ __m128i t1, t2, t3, t4;
+ __m128i d1, d2, d3, d4;
+ __m128i *ks, state, b, *bi, *bo;
+ u_int i, blocks, pblocks, rem;
+
+ state = _mm_load_si128((__m128i*)&this->state);
+ blocks = len / AES_BLOCK_SIZE;
+ pblocks = blocks - (blocks % CTR_CRYPT_PARALLELISM);
+ rem = len % AES_BLOCK_SIZE;
+ bi = (__m128i*)in;
+ bo = (__m128i*)out;
+
+ ks = this->key->schedule;
+
+ for (i = 0; i < pblocks; i += CTR_CRYPT_PARALLELISM)
+ {
+ d1 = _mm_loadu_si128(bi + i + 0);
+ d2 = _mm_loadu_si128(bi + i + 1);
+ d3 = _mm_loadu_si128(bi + i + 2);
+ d4 = _mm_loadu_si128(bi + i + 3);
+
+ t1 = _mm_xor_si128(state, ks[0]);
+ state = increment_be(state);
+ t2 = _mm_xor_si128(state, ks[0]);
+ state = increment_be(state);
+ t3 = _mm_xor_si128(state, ks[0]);
+ state = increment_be(state);
+ t4 = _mm_xor_si128(state, ks[0]);
+ state = increment_be(state);
+
+ t1 = _mm_aesenc_si128(t1, ks[1]);
+ t2 = _mm_aesenc_si128(t2, ks[1]);
+ t3 = _mm_aesenc_si128(t3, ks[1]);
+ t4 = _mm_aesenc_si128(t4, ks[1]);
+ t1 = _mm_aesenc_si128(t1, ks[2]);
+ t2 = _mm_aesenc_si128(t2, ks[2]);
+ t3 = _mm_aesenc_si128(t3, ks[2]);
+ t4 = _mm_aesenc_si128(t4, ks[2]);
+ t1 = _mm_aesenc_si128(t1, ks[3]);
+ t2 = _mm_aesenc_si128(t2, ks[3]);
+ t3 = _mm_aesenc_si128(t3, ks[3]);
+ t4 = _mm_aesenc_si128(t4, ks[3]);
+ t1 = _mm_aesenc_si128(t1, ks[4]);
+ t2 = _mm_aesenc_si128(t2, ks[4]);
+ t3 = _mm_aesenc_si128(t3, ks[4]);
+ t4 = _mm_aesenc_si128(t4, ks[4]);
+ t1 = _mm_aesenc_si128(t1, ks[5]);
+ t2 = _mm_aesenc_si128(t2, ks[5]);
+ t3 = _mm_aesenc_si128(t3, ks[5]);
+ t4 = _mm_aesenc_si128(t4, ks[5]);
+ t1 = _mm_aesenc_si128(t1, ks[6]);
+ t2 = _mm_aesenc_si128(t2, ks[6]);
+ t3 = _mm_aesenc_si128(t3, ks[6]);
+ t4 = _mm_aesenc_si128(t4, ks[6]);
+ t1 = _mm_aesenc_si128(t1, ks[7]);
+ t2 = _mm_aesenc_si128(t2, ks[7]);
+ t3 = _mm_aesenc_si128(t3, ks[7]);
+ t4 = _mm_aesenc_si128(t4, ks[7]);
+ t1 = _mm_aesenc_si128(t1, ks[8]);
+ t2 = _mm_aesenc_si128(t2, ks[8]);
+ t3 = _mm_aesenc_si128(t3, ks[8]);
+ t4 = _mm_aesenc_si128(t4, ks[8]);
+ t1 = _mm_aesenc_si128(t1, ks[9]);
+ t2 = _mm_aesenc_si128(t2, ks[9]);
+ t3 = _mm_aesenc_si128(t3, ks[9]);
+ t4 = _mm_aesenc_si128(t4, ks[9]);
+ t1 = _mm_aesenc_si128(t1, ks[10]);
+ t2 = _mm_aesenc_si128(t2, ks[10]);
+ t3 = _mm_aesenc_si128(t3, ks[10]);
+ t4 = _mm_aesenc_si128(t4, ks[10]);
+ t1 = _mm_aesenc_si128(t1, ks[11]);
+ t2 = _mm_aesenc_si128(t2, ks[11]);
+ t3 = _mm_aesenc_si128(t3, ks[11]);
+ t4 = _mm_aesenc_si128(t4, ks[11]);
+ t1 = _mm_aesenc_si128(t1, ks[12]);
+ t2 = _mm_aesenc_si128(t2, ks[12]);
+ t3 = _mm_aesenc_si128(t3, ks[12]);
+ t4 = _mm_aesenc_si128(t4, ks[12]);
+ t1 = _mm_aesenc_si128(t1, ks[13]);
+ t2 = _mm_aesenc_si128(t2, ks[13]);
+ t3 = _mm_aesenc_si128(t3, ks[13]);
+ t4 = _mm_aesenc_si128(t4, ks[13]);
+
+ t1 = _mm_aesenclast_si128(t1, ks[14]);
+ t2 = _mm_aesenclast_si128(t2, ks[14]);
+ t3 = _mm_aesenclast_si128(t3, ks[14]);
+ t4 = _mm_aesenclast_si128(t4, ks[14]);
+ t1 = _mm_xor_si128(t1, d1);
+ t2 = _mm_xor_si128(t2, d2);
+ t3 = _mm_xor_si128(t3, d3);
+ t4 = _mm_xor_si128(t4, d4);
+ _mm_storeu_si128(bo + i + 0, t1);
+ _mm_storeu_si128(bo + i + 1, t2);
+ _mm_storeu_si128(bo + i + 2, t3);
+ _mm_storeu_si128(bo + i + 3, t4);
+ }
+
+ for (i = pblocks; i < blocks; i++)
+ {
+ d1 = _mm_loadu_si128(bi + i);
+
+ t1 = _mm_xor_si128(state, ks[0]);
+ state = increment_be(state);
+
+ t1 = _mm_aesenc_si128(t1, ks[1]);
+ t1 = _mm_aesenc_si128(t1, ks[2]);
+ t1 = _mm_aesenc_si128(t1, ks[3]);
+ t1 = _mm_aesenc_si128(t1, ks[4]);
+ t1 = _mm_aesenc_si128(t1, ks[5]);
+ t1 = _mm_aesenc_si128(t1, ks[6]);
+ t1 = _mm_aesenc_si128(t1, ks[7]);
+ t1 = _mm_aesenc_si128(t1, ks[8]);
+ t1 = _mm_aesenc_si128(t1, ks[9]);
+ t1 = _mm_aesenc_si128(t1, ks[10]);
+ t1 = _mm_aesenc_si128(t1, ks[11]);
+ t1 = _mm_aesenc_si128(t1, ks[12]);
+ t1 = _mm_aesenc_si128(t1, ks[13]);
+
+ t1 = _mm_aesenclast_si128(t1, ks[14]);
+ t1 = _mm_xor_si128(t1, d1);
+ _mm_storeu_si128(bo + i, t1);
+ }
+
+ if (rem)
+ {
+ memset(&b, 0, sizeof(b));
+ memcpy(&b, bi + blocks, rem);
+
+ d1 = _mm_loadu_si128(&b);
+ t1 = _mm_xor_si128(state, ks[0]);
+
+ t1 = _mm_aesenc_si128(t1, ks[1]);
+ t1 = _mm_aesenc_si128(t1, ks[2]);
+ t1 = _mm_aesenc_si128(t1, ks[3]);
+ t1 = _mm_aesenc_si128(t1, ks[4]);
+ t1 = _mm_aesenc_si128(t1, ks[5]);
+ t1 = _mm_aesenc_si128(t1, ks[6]);
+ t1 = _mm_aesenc_si128(t1, ks[7]);
+ t1 = _mm_aesenc_si128(t1, ks[8]);
+ t1 = _mm_aesenc_si128(t1, ks[9]);
+ t1 = _mm_aesenc_si128(t1, ks[10]);
+ t1 = _mm_aesenc_si128(t1, ks[11]);
+ t1 = _mm_aesenc_si128(t1, ks[12]);
+ t1 = _mm_aesenc_si128(t1, ks[13]);
+
+ t1 = _mm_aesenclast_si128(t1, ks[14]);
+ t1 = _mm_xor_si128(t1, d1);
+ _mm_storeu_si128(&b, t1);
+
+ memcpy(bo + blocks, &b, rem);
+ }
+}
+
+METHOD(crypter_t, crypt, bool,
+ private_aesni_ctr_t *this, chunk_t in, chunk_t iv, chunk_t *out)
+{
+ u_char *buf;
+
+ if (!this->key || iv.len != sizeof(this->state.iv))
+ {
+ return FALSE;
+ }
+ memcpy(this->state.iv, iv.ptr, sizeof(this->state.iv));
+ this->state.counter = htonl(1);
+
+ buf = in.ptr;
+ if (out)
+ {
+ *out = chunk_alloc(in.len);
+ buf = out->ptr;
+ }
+ this->crypt(this, in.len, in.ptr, buf);
+ return TRUE;
+}
+
+METHOD(crypter_t, get_block_size, size_t,
+ private_aesni_ctr_t *this)
+{
+ return 1;
+}
+
+METHOD(crypter_t, get_iv_size, size_t,
+ private_aesni_ctr_t *this)
+{
+ return sizeof(this->state.iv);
+}
+
+METHOD(crypter_t, get_key_size, size_t,
+ private_aesni_ctr_t *this)
+{
+ return this->key_size + sizeof(this->state.nonce);
+}
+
+METHOD(crypter_t, set_key, bool,
+ private_aesni_ctr_t *this, chunk_t key)
+{
+ if (key.len != get_key_size(this))
+ {
+ return FALSE;
+ }
+
+ memcpy(this->state.nonce, key.ptr + key.len - sizeof(this->state.nonce),
+ sizeof(this->state.nonce));
+ key.len -= sizeof(this->state.nonce);
+
+ DESTROY_IF(this->key);
+ this->key = aesni_key_create(TRUE, key);
+
+ return this->key;
+}
+
+METHOD(crypter_t, destroy, void,
+ private_aesni_ctr_t *this)
+{
+ DESTROY_IF(this->key);
+ free_align(this);
+}
+
+/**
+ * See header
+ */
+aesni_ctr_t *aesni_ctr_create(encryption_algorithm_t algo, size_t key_size)
+{
+ private_aesni_ctr_t *this;
+
+ if (algo != ENCR_AES_CTR)
+ {
+ return NULL;
+ }
+ switch (key_size)
+ {
+ case 0:
+ key_size = 16;
+ break;
+ case 16:
+ case 24:
+ case 32:
+ break;
+ default:
+ return NULL;
+ }
+
+ INIT_ALIGN(this, sizeof(__m128i),
+ .public = {
+ .crypter = {
+ .encrypt = _crypt,
+ .decrypt = _crypt,
+ .get_block_size = _get_block_size,
+ .get_iv_size = _get_iv_size,
+ .get_key_size = _get_key_size,
+ .set_key = _set_key,
+ .destroy = _destroy,
+ },
+ },
+ .key_size = key_size,
+ );
+
+ switch (key_size)
+ {
+ case 16:
+ this->crypt = encrypt_ctr128;
+ break;
+ case 24:
+ this->crypt = encrypt_ctr192;
+ break;
+ case 32:
+ this->crypt = encrypt_ctr256;
+ break;
+ }
+
+ return &this->public;
+}
diff --git a/src/libstrongswan/plugins/aesni/aesni_ctr.h b/src/libstrongswan/plugins/aesni/aesni_ctr.h
new file mode 100644
index 000000000..6126a2c75
--- /dev/null
+++ b/src/libstrongswan/plugins/aesni/aesni_ctr.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2015 Martin Willi
+ * Copyright (C) 2015 revosec AG
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+/**
+ * @defgroup aesni_ctr aesni_ctr
+ * @{ @ingroup aesni
+ */
+
+#ifndef AESNI_CTR_H_
+#define AESNI_CTR_H_
+
+#include <library.h>
+
+typedef struct aesni_ctr_t aesni_ctr_t;
+
+/**
+ * CTR mode crypter using AES-NI
+ */
+struct aesni_ctr_t {
+
+ /**
+ * Implements crypter interface
+ */
+ crypter_t crypter;
+};
+
+/**
+ * Create a aesni_ctr instance.
+ *
+ * @param algo encryption algorithm, AES_ENCR_CTR
+ * @param key_size AES key size, in bytes
+ * @return AES-CTR crypter, NULL if not supported
+ */
+aesni_ctr_t *aesni_ctr_create(encryption_algorithm_t algo, size_t key_size);
+
+#endif /** AESNI_CTR_H_ @}*/
diff --git a/src/libstrongswan/plugins/aesni/aesni_gcm.c b/src/libstrongswan/plugins/aesni/aesni_gcm.c
new file mode 100644
index 000000000..53c0b144e
--- /dev/null
+++ b/src/libstrongswan/plugins/aesni/aesni_gcm.c
@@ -0,0 +1,1447 @@
+/*
+ * Copyright (C) 2015 Martin Willi
+ * Copyright (C) 2015 revosec AG
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "aesni_gcm.h"
+#include "aesni_key.h"
+
+#include <crypto/iv/iv_gen_seq.h>
+
+#include <tmmintrin.h>
+
+#define NONCE_SIZE 12
+#define IV_SIZE 8
+#define SALT_SIZE (NONCE_SIZE - IV_SIZE)
+
+/**
+ * Parallel pipelining
+ */
+#define GCM_CRYPT_PARALLELISM 4
+
+typedef struct private_aesni_gcm_t private_aesni_gcm_t;
+
+/**
+ * GCM en/decryption method type
+ */
+typedef void (*aesni_gcm_fn_t)(private_aesni_gcm_t*, size_t, u_char*, u_char*,
+ u_char*, size_t, u_char*, u_char*);
+
+/**
+ * Private data of an aesni_gcm_t object.
+ */
+struct private_aesni_gcm_t {
+
+ /**
+ * Public aesni_gcm_t interface.
+ */
+ aesni_gcm_t public;
+
+ /**
+ * Encryption key schedule
+ */
+ aesni_key_t *key;
+
+ /**
+ * IV generator.
+ */
+ iv_gen_t *iv_gen;
+
+ /**
+ * Length of the integrity check value
+ */
+ size_t icv_size;
+
+ /**
+ * Length of the key in bytes
+ */
+ size_t key_size;
+
+ /**
+ * GCM encryption function
+ */
+ aesni_gcm_fn_t encrypt;
+
+ /**
+ * GCM decryption function
+ */
+ aesni_gcm_fn_t decrypt;
+
+ /**
+ * salt to add to nonce
+ */
+ u_char salt[SALT_SIZE];
+
+ /**
+ * GHASH subkey H, big-endian
+ */
+ __m128i h;
+
+ /**
+ * GHASH key H^2, big-endian
+ */
+ __m128i hh;
+
+ /**
+ * GHASH key H^3, big-endian
+ */
+ __m128i hhh;
+
+ /**
+ * GHASH key H^4, big-endian
+ */
+ __m128i hhhh;
+};
+
+/**
+ * Byte-swap a 128-bit integer
+ */
+static inline __m128i swap128(__m128i x)
+{
+ return _mm_shuffle_epi8(x,
+ _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15));
+}
+
+/**
+ * Multiply two blocks in GF128
+ */
+static __m128i mult_block(__m128i h, __m128i y)
+{
+ __m128i t1, t2, t3, t4, t5, t6;
+
+ y = swap128(y);
+
+ t1 = _mm_clmulepi64_si128(h, y, 0x00);
+ t2 = _mm_clmulepi64_si128(h, y, 0x01);
+ t3 = _mm_clmulepi64_si128(h, y, 0x10);
+ t4 = _mm_clmulepi64_si128(h, y, 0x11);
+
+ t2 = _mm_xor_si128(t2, t3);
+ t3 = _mm_slli_si128(t2, 8);
+ t2 = _mm_srli_si128(t2, 8);
+ t1 = _mm_xor_si128(t1, t3);
+ t4 = _mm_xor_si128(t4, t2);
+
+ t5 = _mm_srli_epi32(t1, 31);
+ t1 = _mm_slli_epi32(t1, 1);
+ t6 = _mm_srli_epi32(t4, 31);
+ t4 = _mm_slli_epi32(t4, 1);
+
+ t3 = _mm_srli_si128(t5, 12);
+ t6 = _mm_slli_si128(t6, 4);
+ t5 = _mm_slli_si128(t5, 4);
+ t1 = _mm_or_si128(t1, t5);
+ t4 = _mm_or_si128(t4, t6);
+ t4 = _mm_or_si128(t4, t3);
+
+ t5 = _mm_slli_epi32(t1, 31);
+ t6 = _mm_slli_epi32(t1, 30);
+ t3 = _mm_slli_epi32(t1, 25);
+
+ t5 = _mm_xor_si128(t5, t6);
+ t5 = _mm_xor_si128(t5, t3);
+ t6 = _mm_srli_si128(t5, 4);
+ t4 = _mm_xor_si128(t4, t6);
+ t5 = _mm_slli_si128(t5, 12);
+ t1 = _mm_xor_si128(t1, t5);
+ t4 = _mm_xor_si128(t4, t1);
+
+ t5 = _mm_srli_epi32(t1, 1);
+ t2 = _mm_srli_epi32(t1, 2);
+ t3 = _mm_srli_epi32(t1, 7);
+ t4 = _mm_xor_si128(t4, t2);
+ t4 = _mm_xor_si128(t4, t3);
+ t4 = _mm_xor_si128(t4, t5);
+
+ return swap128(t4);
+}
+
+/**
+ * Multiply four consecutive blocks by their respective GHASH key, XOR
+ */
+static inline __m128i mult4xor(__m128i h1, __m128i h2, __m128i h3, __m128i h4,
+ __m128i d1, __m128i d2, __m128i d3, __m128i d4)
+{
+ __m128i t0, t1, t2, t3, t4, t5, t6, t7, t8, t9;
+
+ d1 = swap128(d1);
+ d2 = swap128(d2);
+ d3 = swap128(d3);
+ d4 = swap128(d4);
+
+ t0 = _mm_clmulepi64_si128(h1, d1, 0x00);
+ t1 = _mm_clmulepi64_si128(h2, d2, 0x00);
+ t2 = _mm_clmulepi64_si128(h3, d3, 0x00);
+ t3 = _mm_clmulepi64_si128(h4, d4, 0x00);
+ t8 = _mm_xor_si128(t0, t1);
+ t8 = _mm_xor_si128(t8, t2);
+ t8 = _mm_xor_si128(t8, t3);
+
+ t4 = _mm_clmulepi64_si128(h1, d1, 0x11);
+ t5 = _mm_clmulepi64_si128(h2, d2, 0x11);
+ t6 = _mm_clmulepi64_si128(h3, d3, 0x11);
+ t7 = _mm_clmulepi64_si128(h4, d4, 0x11);
+ t9 = _mm_xor_si128(t4, t5);
+ t9 = _mm_xor_si128(t9, t6);
+ t9 = _mm_xor_si128(t9, t7);
+
+ t0 = _mm_shuffle_epi32(h1, 78);
+ t4 = _mm_shuffle_epi32(d1, 78);
+ t0 = _mm_xor_si128(t0, h1);
+ t4 = _mm_xor_si128(t4, d1);
+ t1 = _mm_shuffle_epi32(h2, 78);
+ t5 = _mm_shuffle_epi32(d2, 78);
+ t1 = _mm_xor_si128(t1, h2);
+ t5 = _mm_xor_si128(t5, d2);
+ t2 = _mm_shuffle_epi32(h3, 78);
+ t6 = _mm_shuffle_epi32(d3, 78);
+ t2 = _mm_xor_si128(t2, h3);
+ t6 = _mm_xor_si128(t6, d3);
+ t3 = _mm_shuffle_epi32(h4, 78);
+ t7 = _mm_shuffle_epi32(d4, 78);
+ t3 = _mm_xor_si128(t3, h4);
+ t7 = _mm_xor_si128(t7, d4);
+
+ t0 = _mm_clmulepi64_si128(t0, t4, 0x00);
+ t1 = _mm_clmulepi64_si128(t1, t5, 0x00);
+ t2 = _mm_clmulepi64_si128(t2, t6, 0x00);
+ t3 = _mm_clmulepi64_si128(t3, t7, 0x00);
+ t0 = _mm_xor_si128(t0, t8);
+ t0 = _mm_xor_si128(t0, t9);
+ t0 = _mm_xor_si128(t1, t0);
+ t0 = _mm_xor_si128(t2, t0);
+
+ t0 = _mm_xor_si128(t3, t0);
+ t4 = _mm_slli_si128(t0, 8);
+ t0 = _mm_srli_si128(t0, 8);
+ t3 = _mm_xor_si128(t4, t8);
+ t6 = _mm_xor_si128(t0, t9);
+ t7 = _mm_srli_epi32(t3, 31);
+ t8 = _mm_srli_epi32(t6, 31);
+ t3 = _mm_slli_epi32(t3, 1);
+ t6 = _mm_slli_epi32(t6, 1);
+ t9 = _mm_srli_si128(t7, 12);
+ t8 = _mm_slli_si128(t8, 4);
+ t7 = _mm_slli_si128(t7, 4);
+ t3 = _mm_or_si128(t3, t7);
+ t6 = _mm_or_si128(t6, t8);
+ t6 = _mm_or_si128(t6, t9);
+ t7 = _mm_slli_epi32(t3, 31);
+ t8 = _mm_slli_epi32(t3, 30);
+ t9 = _mm_slli_epi32(t3, 25);
+ t7 = _mm_xor_si128(t7, t8);
+ t7 = _mm_xor_si128(t7, t9);
+ t8 = _mm_srli_si128(t7, 4);
+ t7 = _mm_slli_si128(t7, 12);
+ t3 = _mm_xor_si128(t3, t7);
+ t2 = _mm_srli_epi32(t3, 1);
+ t4 = _mm_srli_epi32(t3, 2);
+ t5 = _mm_srli_epi32(t3, 7);
+ t2 = _mm_xor_si128(t2, t4);
+ t2 = _mm_xor_si128(t2, t5);
+ t2 = _mm_xor_si128(t2, t8);
+ t3 = _mm_xor_si128(t3, t2);
+ t6 = _mm_xor_si128(t6, t3);
+
+ return swap128(t6);
+}
+
+/**
+ * GHASH on a single block
+ */
+static __m128i ghash(__m128i h, __m128i y, __m128i x)
+{
+ return mult_block(h, _mm_xor_si128(y, x));
+}
+
+/**
+ * Start constructing the ICV for the associated data
+ */
+static __m128i icv_header(private_aesni_gcm_t *this, void *assoc, size_t alen)
+{
+ u_int blocks, pblocks, rem, i;
+ __m128i h1, h2, h3, h4, d1, d2, d3, d4;
+ __m128i y, last, *ab;
+
+ h1 = this->hhhh;
+ h2 = this->hhh;
+ h3 = this->hh;
+ h4 = this->h;
+
+ y = _mm_setzero_si128();
+ ab = assoc;
+ blocks = alen / AES_BLOCK_SIZE;
+ pblocks = blocks - (blocks % GCM_CRYPT_PARALLELISM);
+ rem = alen % AES_BLOCK_SIZE;
+ for (i = 0; i < pblocks; i += GCM_CRYPT_PARALLELISM)
+ {
+ d1 = _mm_loadu_si128(ab + i + 0);
+ d2 = _mm_loadu_si128(ab + i + 1);
+ d3 = _mm_loadu_si128(ab + i + 2);
+ d4 = _mm_loadu_si128(ab + i + 3);
+ y = _mm_xor_si128(y, d1);
+ y = mult4xor(h1, h2, h3, h4, y, d2, d3, d4);
+ }
+ for (i = pblocks; i < blocks; i++)
+ {
+ y = ghash(this->h, y, _mm_loadu_si128(ab + i));
+ }
+ if (rem)
+ {
+ last = _mm_setzero_si128();
+ memcpy(&last, ab + blocks, rem);
+
+ y = ghash(this->h, y, last);
+ }
+
+ return y;
+}
+
+/**
+ * Complete the ICV by hashing a assoc/data length block
+ */
+static __m128i icv_tailer(private_aesni_gcm_t *this, __m128i y,
+ size_t alen, size_t dlen)
+{
+ __m128i b;
+
+ htoun64(&b, alen * 8);
+ htoun64((u_char*)&b + sizeof(u_int64_t), dlen * 8);
+
+ return ghash(this->h, y, b);
+}
+
+/**
+ * En-/Decrypt the ICV, trim and store it
+ */
+static void icv_crypt(private_aesni_gcm_t *this, __m128i y, __m128i j,
+ u_char *icv)
+{
+ __m128i *ks, t, b;
+ u_int round;
+
+ ks = this->key->schedule;
+ t = _mm_xor_si128(j, ks[0]);
+ for (round = 1; round < this->key->rounds; round++)
+ {
+ t = _mm_aesenc_si128(t, ks[round]);
+ }
+ t = _mm_aesenclast_si128(t, ks[this->key->rounds]);
+
+ t = _mm_xor_si128(y, t);
+
+ _mm_storeu_si128(&b, t);
+ memcpy(icv, &b, this->icv_size);
+}
+
+/**
+ * Do big-endian increment on x
+ */
+static inline __m128i increment_be(__m128i x)
+{
+ x = swap128(x);
+ x = _mm_add_epi64(x, _mm_set_epi32(0, 0, 0, 1));
+ x = swap128(x);
+
+ return x;
+}
+
+/**
+ * Generate the block J0
+ */
+static inline __m128i create_j(private_aesni_gcm_t *this, u_char *iv)
+{
+ u_char j[AES_BLOCK_SIZE];
+
+ memcpy(j, this->salt, SALT_SIZE);
+ memcpy(j + SALT_SIZE, iv, IV_SIZE);
+ htoun32(j + SALT_SIZE + IV_SIZE, 1);
+
+ return _mm_loadu_si128((__m128i*)j);
+}
+
+/**
+ * Encrypt a remaining incomplete block, return updated Y
+ */
+static __m128i encrypt_gcm_rem(private_aesni_gcm_t *this, u_int rem,
+ void *in, void *out, __m128i cb, __m128i y)
+{
+ __m128i *ks, t, b;
+ u_int round;
+
+ memset(&b, 0, sizeof(b));
+ memcpy(&b, in, rem);
+
+ ks = this->key->schedule;
+ t = _mm_xor_si128(cb, ks[0]);
+ for (round = 1; round < this->key->rounds; round++)
+ {
+ t = _mm_aesenc_si128(t, ks[round]);
+ }
+ t = _mm_aesenclast_si128(t, ks[this->key->rounds]);
+ b = _mm_xor_si128(t, b);
+
+ memcpy(out, &b, rem);
+
+ memset((u_char*)&b + rem, 0, AES_BLOCK_SIZE - rem);
+ return ghash(this->h, y, b);
+}
+
+/**
+ * Decrypt a remaining incomplete block, return updated Y
+ */
+static __m128i decrypt_gcm_rem(private_aesni_gcm_t *this, u_int rem,
+ void *in, void *out, __m128i cb, __m128i y)
+{
+ __m128i *ks, t, b;
+ u_int round;
+
+ memset(&b, 0, sizeof(b));
+ memcpy(&b, in, rem);
+
+ y = ghash(this->h, y, b);
+
+ ks = this->key->schedule;
+ t = _mm_xor_si128(cb, ks[0]);
+ for (round = 1; round < this->key->rounds; round++)
+ {
+ t = _mm_aesenc_si128(t, ks[round]);
+ }
+ t = _mm_aesenclast_si128(t, ks[this->key->rounds]);
+ b = _mm_xor_si128(t, b);
+
+ memcpy(out, &b, rem);
+
+ return y;
+}
+
+/**
+ * AES-128 GCM encryption/ICV generation
+ */
+static void encrypt_gcm128(private_aesni_gcm_t *this,
+ size_t len, u_char *in, u_char *out, u_char *iv,
+ size_t alen, u_char *assoc, u_char *icv)
+{
+ __m128i d1, d2, d3, d4, t1, t2, t3, t4;
+ __m128i *ks, y, j, cb, *bi, *bo;
+ u_int blocks, pblocks, rem, i;
+
+ j = create_j(this, iv);
+ cb = increment_be(j);
+ y = icv_header(this, assoc, alen);
+ blocks = len / AES_BLOCK_SIZE;
+ pblocks = blocks - (blocks % GCM_CRYPT_PARALLELISM);
+ rem = len % AES_BLOCK_SIZE;
+ bi = (__m128i*)in;
+ bo = (__m128i*)out;
+
+ ks = this->key->schedule;
+
+ for (i = 0; i < pblocks; i += GCM_CRYPT_PARALLELISM)
+ {
+ d1 = _mm_loadu_si128(bi + i + 0);
+ d2 = _mm_loadu_si128(bi + i + 1);
+ d3 = _mm_loadu_si128(bi + i + 2);
+ d4 = _mm_loadu_si128(bi + i + 3);
+
+ t1 = _mm_xor_si128(cb, ks[0]);
+ cb = increment_be(cb);
+ t2 = _mm_xor_si128(cb, ks[0]);
+ cb = increment_be(cb);
+ t3 = _mm_xor_si128(cb, ks[0]);
+ cb = increment_be(cb);
+ t4 = _mm_xor_si128(cb, ks[0]);
+ cb = increment_be(cb);
+
+ t1 = _mm_aesenc_si128(t1, ks[1]);
+ t2 = _mm_aesenc_si128(t2, ks[1]);
+ t3 = _mm_aesenc_si128(t3, ks[1]);
+ t4 = _mm_aesenc_si128(t4, ks[1]);
+ t1 = _mm_aesenc_si128(t1, ks[2]);
+ t2 = _mm_aesenc_si128(t2, ks[2]);
+ t3 = _mm_aesenc_si128(t3, ks[2]);
+ t4 = _mm_aesenc_si128(t4, ks[2]);
+ t1 = _mm_aesenc_si128(t1, ks[3]);
+ t2 = _mm_aesenc_si128(t2, ks[3]);
+ t3 = _mm_aesenc_si128(t3, ks[3]);
+ t4 = _mm_aesenc_si128(t4, ks[3]);
+ t1 = _mm_aesenc_si128(t1, ks[4]);
+ t2 = _mm_aesenc_si128(t2, ks[4]);
+ t3 = _mm_aesenc_si128(t3, ks[4]);
+ t4 = _mm_aesenc_si128(t4, ks[4]);
+ t1 = _mm_aesenc_si128(t1, ks[5]);
+ t2 = _mm_aesenc_si128(t2, ks[5]);
+ t3 = _mm_aesenc_si128(t3, ks[5]);
+ t4 = _mm_aesenc_si128(t4, ks[5]);
+ t1 = _mm_aesenc_si128(t1, ks[6]);
+ t2 = _mm_aesenc_si128(t2, ks[6]);
+ t3 = _mm_aesenc_si128(t3, ks[6]);
+ t4 = _mm_aesenc_si128(t4, ks[6]);
+ t1 = _mm_aesenc_si128(t1, ks[7]);
+ t2 = _mm_aesenc_si128(t2, ks[7]);
+ t3 = _mm_aesenc_si128(t3, ks[7]);
+ t4 = _mm_aesenc_si128(t4, ks[7]);
+ t1 = _mm_aesenc_si128(t1, ks[8]);
+ t2 = _mm_aesenc_si128(t2, ks[8]);
+ t3 = _mm_aesenc_si128(t3, ks[8]);
+ t4 = _mm_aesenc_si128(t4, ks[8]);
+ t1 = _mm_aesenc_si128(t1, ks[9]);
+ t2 = _mm_aesenc_si128(t2, ks[9]);
+ t3 = _mm_aesenc_si128(t3, ks[9]);
+ t4 = _mm_aesenc_si128(t4, ks[9]);
+
+ t1 = _mm_aesenclast_si128(t1, ks[10]);
+ t2 = _mm_aesenclast_si128(t2, ks[10]);
+ t3 = _mm_aesenclast_si128(t3, ks[10]);
+ t4 = _mm_aesenclast_si128(t4, ks[10]);
+
+ t1 = _mm_xor_si128(t1, d1);
+ t2 = _mm_xor_si128(t2, d2);
+ t3 = _mm_xor_si128(t3, d3);
+ t4 = _mm_xor_si128(t4, d4);
+
+ y = _mm_xor_si128(y, t1);
+ y = mult4xor(this->hhhh, this->hhh, this->hh, this->h, y, t2, t3, t4);
+
+ _mm_storeu_si128(bo + i + 0, t1);
+ _mm_storeu_si128(bo + i + 1, t2);
+ _mm_storeu_si128(bo + i + 2, t3);
+ _mm_storeu_si128(bo + i + 3, t4);
+ }
+
+ for (i = pblocks; i < blocks; i++)
+ {
+ d1 = _mm_loadu_si128(bi + i);
+
+ t1 = _mm_xor_si128(cb, ks[0]);
+ t1 = _mm_aesenc_si128(t1, ks[1]);
+ t1 = _mm_aesenc_si128(t1, ks[2]);
+ t1 = _mm_aesenc_si128(t1, ks[3]);
+ t1 = _mm_aesenc_si128(t1, ks[4]);
+ t1 = _mm_aesenc_si128(t1, ks[5]);
+ t1 = _mm_aesenc_si128(t1, ks[6]);
+ t1 = _mm_aesenc_si128(t1, ks[7]);
+ t1 = _mm_aesenc_si128(t1, ks[8]);
+ t1 = _mm_aesenc_si128(t1, ks[9]);
+ t1 = _mm_aesenclast_si128(t1, ks[10]);
+
+ t1 = _mm_xor_si128(t1, d1);
+ _mm_storeu_si128(bo + i, t1);
+
+ y = ghash(this->h, y, t1);
+
+ cb = increment_be(cb);
+ }
+
+ if (rem)
+ {
+ y = encrypt_gcm_rem(this, rem, bi + blocks, bo + blocks, cb, y);
+ }
+ y = icv_tailer(this, y, alen, len);
+ icv_crypt(this, y, j, icv);
+}
+
+/**
+ * AES-128 GCM decryption/ICV generation
+ */
+static void decrypt_gcm128(private_aesni_gcm_t *this,
+ size_t len, u_char *in, u_char *out, u_char *iv,
+ size_t alen, u_char *assoc, u_char *icv)
+{
+ __m128i d1, d2, d3, d4, t1, t2, t3, t4;
+ __m128i *ks, y, j, cb, *bi, *bo;
+ u_int blocks, pblocks, rem, i;
+
+ j = create_j(this, iv);
+ cb = increment_be(j);
+ y = icv_header(this, assoc, alen);
+ blocks = len / AES_BLOCK_SIZE;
+ pblocks = blocks - (blocks % GCM_CRYPT_PARALLELISM);
+ rem = len % AES_BLOCK_SIZE;
+ bi = (__m128i*)in;
+ bo = (__m128i*)out;
+
+ ks = this->key->schedule;
+
+ for (i = 0; i < pblocks; i += GCM_CRYPT_PARALLELISM)
+ {
+ d1 = _mm_loadu_si128(bi + i + 0);
+ d2 = _mm_loadu_si128(bi + i + 1);
+ d3 = _mm_loadu_si128(bi + i + 2);
+ d4 = _mm_loadu_si128(bi + i + 3);
+
+ y = _mm_xor_si128(y, d1);
+ y = mult4xor(this->hhhh, this->hhh, this->hh, this->h, y, d2, d3, d4);
+
+ t1 = _mm_xor_si128(cb, ks[0]);
+ cb = increment_be(cb);
+ t2 = _mm_xor_si128(cb, ks[0]);
+ cb = increment_be(cb);
+ t3 = _mm_xor_si128(cb, ks[0]);
+ cb = increment_be(cb);
+ t4 = _mm_xor_si128(cb, ks[0]);
+ cb = increment_be(cb);
+
+ t1 = _mm_aesenc_si128(t1, ks[1]);
+ t2 = _mm_aesenc_si128(t2, ks[1]);
+ t3 = _mm_aesenc_si128(t3, ks[1]);
+ t4 = _mm_aesenc_si128(t4, ks[1]);
+ t1 = _mm_aesenc_si128(t1, ks[2]);
+ t2 = _mm_aesenc_si128(t2, ks[2]);
+ t3 = _mm_aesenc_si128(t3, ks[2]);
+ t4 = _mm_aesenc_si128(t4, ks[2]);
+ t1 = _mm_aesenc_si128(t1, ks[3]);
+ t2 = _mm_aesenc_si128(t2, ks[3]);
+ t3 = _mm_aesenc_si128(t3, ks[3]);
+ t4 = _mm_aesenc_si128(t4, ks[3]);
+ t1 = _mm_aesenc_si128(t1, ks[4]);
+ t2 = _mm_aesenc_si128(t2, ks[4]);
+ t3 = _mm_aesenc_si128(t3, ks[4]);
+ t4 = _mm_aesenc_si128(t4, ks[4]);
+ t1 = _mm_aesenc_si128(t1, ks[5]);
+ t2 = _mm_aesenc_si128(t2, ks[5]);
+ t3 = _mm_aesenc_si128(t3, ks[5]);
+ t4 = _mm_aesenc_si128(t4, ks[5]);
+ t1 = _mm_aesenc_si128(t1, ks[6]);
+ t2 = _mm_aesenc_si128(t2, ks[6]);
+ t3 = _mm_aesenc_si128(t3, ks[6]);
+ t4 = _mm_aesenc_si128(t4, ks[6]);
+ t1 = _mm_aesenc_si128(t1, ks[7]);
+ t2 = _mm_aesenc_si128(t2, ks[7]);
+ t3 = _mm_aesenc_si128(t3, ks[7]);
+ t4 = _mm_aesenc_si128(t4, ks[7]);
+ t1 = _mm_aesenc_si128(t1, ks[8]);
+ t2 = _mm_aesenc_si128(t2, ks[8]);
+ t3 = _mm_aesenc_si128(t3, ks[8]);
+ t4 = _mm_aesenc_si128(t4, ks[8]);
+ t1 = _mm_aesenc_si128(t1, ks[9]);
+ t2 = _mm_aesenc_si128(t2, ks[9]);
+ t3 = _mm_aesenc_si128(t3, ks[9]);
+ t4 = _mm_aesenc_si128(t4, ks[9]);
+
+ t1 = _mm_aesenclast_si128(t1, ks[10]);
+ t2 = _mm_aesenclast_si128(t2, ks[10]);
+ t3 = _mm_aesenclast_si128(t3, ks[10]);
+ t4 = _mm_aesenclast_si128(t4, ks[10]);
+
+ t1 = _mm_xor_si128(t1, d1);
+ t2 = _mm_xor_si128(t2, d2);
+ t3 = _mm_xor_si128(t3, d3);
+ t4 = _mm_xor_si128(t4, d4);
+
+ _mm_storeu_si128(bo + i + 0, t1);
+ _mm_storeu_si128(bo + i + 1, t2);
+ _mm_storeu_si128(bo + i + 2, t3);
+ _mm_storeu_si128(bo + i + 3, t4);
+ }
+
+ for (i = pblocks; i < blocks; i++)
+ {
+ d1 = _mm_loadu_si128(bi + i);
+
+ y = ghash(this->h, y, d1);
+
+ t1 = _mm_xor_si128(cb, ks[0]);
+ t1 = _mm_aesenc_si128(t1, ks[1]);
+ t1 = _mm_aesenc_si128(t1, ks[2]);
+ t1 = _mm_aesenc_si128(t1, ks[3]);
+ t1 = _mm_aesenc_si128(t1, ks[4]);
+ t1 = _mm_aesenc_si128(t1, ks[5]);
+ t1 = _mm_aesenc_si128(t1, ks[6]);
+ t1 = _mm_aesenc_si128(t1, ks[7]);
+ t1 = _mm_aesenc_si128(t1, ks[8]);
+ t1 = _mm_aesenc_si128(t1, ks[9]);
+ t1 = _mm_aesenclast_si128(t1, ks[10]);
+
+ t1 = _mm_xor_si128(t1, d1);
+ _mm_storeu_si128(bo + i, t1);
+
+ cb = increment_be(cb);
+ }
+
+ if (rem)
+ {
+ y = decrypt_gcm_rem(this, rem, bi + blocks, bo + blocks, cb, y);
+ }
+ y = icv_tailer(this, y, alen, len);
+ icv_crypt(this, y, j, icv);
+}
+
+/**
+ * AES-192 GCM encryption/ICV generation
+ */
+static void encrypt_gcm192(private_aesni_gcm_t *this,
+ size_t len, u_char *in, u_char *out, u_char *iv,
+ size_t alen, u_char *assoc, u_char *icv)
+{
+ __m128i d1, d2, d3, d4, t1, t2, t3, t4;
+ __m128i *ks, y, j, cb, *bi, *bo;
+ u_int blocks, pblocks, rem, i;
+
+ j = create_j(this, iv);
+ cb = increment_be(j);
+ y = icv_header(this, assoc, alen);
+ blocks = len / AES_BLOCK_SIZE;
+ pblocks = blocks - (blocks % GCM_CRYPT_PARALLELISM);
+ rem = len % AES_BLOCK_SIZE;
+ bi = (__m128i*)in;
+ bo = (__m128i*)out;
+
+ ks = this->key->schedule;
+
+ for (i = 0; i < pblocks; i += GCM_CRYPT_PARALLELISM)
+ {
+ d1 = _mm_loadu_si128(bi + i + 0);
+ d2 = _mm_loadu_si128(bi + i + 1);
+ d3 = _mm_loadu_si128(bi + i + 2);
+ d4 = _mm_loadu_si128(bi + i + 3);
+
+ t1 = _mm_xor_si128(cb, ks[0]);
+ cb = increment_be(cb);
+ t2 = _mm_xor_si128(cb, ks[0]);
+ cb = increment_be(cb);
+ t3 = _mm_xor_si128(cb, ks[0]);
+ cb = increment_be(cb);
+ t4 = _mm_xor_si128(cb, ks[0]);
+ cb = increment_be(cb);
+
+ t1 = _mm_aesenc_si128(t1, ks[1]);
+ t2 = _mm_aesenc_si128(t2, ks[1]);
+ t3 = _mm_aesenc_si128(t3, ks[1]);
+ t4 = _mm_aesenc_si128(t4, ks[1]);
+ t1 = _mm_aesenc_si128(t1, ks[2]);
+ t2 = _mm_aesenc_si128(t2, ks[2]);
+ t3 = _mm_aesenc_si128(t3, ks[2]);
+ t4 = _mm_aesenc_si128(t4, ks[2]);
+ t1 = _mm_aesenc_si128(t1, ks[3]);
+ t2 = _mm_aesenc_si128(t2, ks[3]);
+ t3 = _mm_aesenc_si128(t3, ks[3]);
+ t4 = _mm_aesenc_si128(t4, ks[3]);
+ t1 = _mm_aesenc_si128(t1, ks[4]);
+ t2 = _mm_aesenc_si128(t2, ks[4]);
+ t3 = _mm_aesenc_si128(t3, ks[4]);
+ t4 = _mm_aesenc_si128(t4, ks[4]);
+ t1 = _mm_aesenc_si128(t1, ks[5]);
+ t2 = _mm_aesenc_si128(t2, ks[5]);
+ t3 = _mm_aesenc_si128(t3, ks[5]);
+ t4 = _mm_aesenc_si128(t4, ks[5]);
+ t1 = _mm_aesenc_si128(t1, ks[6]);
+ t2 = _mm_aesenc_si128(t2, ks[6]);
+ t3 = _mm_aesenc_si128(t3, ks[6]);
+ t4 = _mm_aesenc_si128(t4, ks[6]);
+ t1 = _mm_aesenc_si128(t1, ks[7]);
+ t2 = _mm_aesenc_si128(t2, ks[7]);
+ t3 = _mm_aesenc_si128(t3, ks[7]);
+ t4 = _mm_aesenc_si128(t4, ks[7]);
+ t1 = _mm_aesenc_si128(t1, ks[8]);
+ t2 = _mm_aesenc_si128(t2, ks[8]);
+ t3 = _mm_aesenc_si128(t3, ks[8]);
+ t4 = _mm_aesenc_si128(t4, ks[8]);
+ t1 = _mm_aesenc_si128(t1, ks[9]);
+ t2 = _mm_aesenc_si128(t2, ks[9]);
+ t3 = _mm_aesenc_si128(t3, ks[9]);
+ t4 = _mm_aesenc_si128(t4, ks[9]);
+ t1 = _mm_aesenc_si128(t1, ks[10]);
+ t2 = _mm_aesenc_si128(t2, ks[10]);
+ t3 = _mm_aesenc_si128(t3, ks[10]);
+ t4 = _mm_aesenc_si128(t4, ks[10]);
+ t1 = _mm_aesenc_si128(t1, ks[11]);
+ t2 = _mm_aesenc_si128(t2, ks[11]);
+ t3 = _mm_aesenc_si128(t3, ks[11]);
+ t4 = _mm_aesenc_si128(t4, ks[11]);
+
+ t1 = _mm_aesenclast_si128(t1, ks[12]);
+ t2 = _mm_aesenclast_si128(t2, ks[12]);
+ t3 = _mm_aesenclast_si128(t3, ks[12]);
+ t4 = _mm_aesenclast_si128(t4, ks[12]);
+
+ t1 = _mm_xor_si128(t1, d1);
+ t2 = _mm_xor_si128(t2, d2);
+ t3 = _mm_xor_si128(t3, d3);
+ t4 = _mm_xor_si128(t4, d4);
+
+ y = _mm_xor_si128(y, t1);
+ y = mult4xor(this->hhhh, this->hhh, this->hh, this->h, y, t2, t3, t4);
+
+ _mm_storeu_si128(bo + i + 0, t1);
+ _mm_storeu_si128(bo + i + 1, t2);
+ _mm_storeu_si128(bo + i + 2, t3);
+ _mm_storeu_si128(bo + i + 3, t4);
+ }
+
+ for (i = pblocks; i < blocks; i++)
+ {
+ d1 = _mm_loadu_si128(bi + i);
+
+ t1 = _mm_xor_si128(cb, ks[0]);
+ t1 = _mm_aesenc_si128(t1, ks[1]);
+ t1 = _mm_aesenc_si128(t1, ks[2]);
+ t1 = _mm_aesenc_si128(t1, ks[3]);
+ t1 = _mm_aesenc_si128(t1, ks[4]);
+ t1 = _mm_aesenc_si128(t1, ks[5]);
+ t1 = _mm_aesenc_si128(t1, ks[6]);
+ t1 = _mm_aesenc_si128(t1, ks[7]);
+ t1 = _mm_aesenc_si128(t1, ks[8]);
+ t1 = _mm_aesenc_si128(t1, ks[9]);
+ t1 = _mm_aesenc_si128(t1, ks[10]);
+ t1 = _mm_aesenc_si128(t1, ks[11]);
+ t1 = _mm_aesenclast_si128(t1, ks[12]);
+
+ t1 = _mm_xor_si128(t1, d1);
+ _mm_storeu_si128(bo + i, t1);
+
+ y = ghash(this->h, y, t1);
+
+ cb = increment_be(cb);
+ }
+
+ if (rem)
+ {
+ y = encrypt_gcm_rem(this, rem, bi + blocks, bo + blocks, cb, y);
+ }
+ y = icv_tailer(this, y, alen, len);
+ icv_crypt(this, y, j, icv);
+}
+
+/**
+ * AES-192 GCM decryption/ICV generation
+ */
+static void decrypt_gcm192(private_aesni_gcm_t *this,
+ size_t len, u_char *in, u_char *out, u_char *iv,
+ size_t alen, u_char *assoc, u_char *icv)
+{
+ __m128i d1, d2, d3, d4, t1, t2, t3, t4;
+ __m128i *ks, y, j, cb, *bi, *bo;
+ u_int blocks, pblocks, rem, i;
+
+ j = create_j(this, iv);
+ cb = increment_be(j);
+ y = icv_header(this, assoc, alen);
+ blocks = len / AES_BLOCK_SIZE;
+ pblocks = blocks - (blocks % GCM_CRYPT_PARALLELISM);
+ rem = len % AES_BLOCK_SIZE;
+ bi = (__m128i*)in;
+ bo = (__m128i*)out;
+
+ ks = this->key->schedule;
+
+ for (i = 0; i < pblocks; i += GCM_CRYPT_PARALLELISM)
+ {
+ d1 = _mm_loadu_si128(bi + i + 0);
+ d2 = _mm_loadu_si128(bi + i + 1);
+ d3 = _mm_loadu_si128(bi + i + 2);
+ d4 = _mm_loadu_si128(bi + i + 3);
+
+ y = _mm_xor_si128(y, d1);
+ y = mult4xor(this->hhhh, this->hhh, this->hh, this->h, y, d2, d3, d4);
+
+ t1 = _mm_xor_si128(cb, ks[0]);
+ cb = increment_be(cb);
+ t2 = _mm_xor_si128(cb, ks[0]);
+ cb = increment_be(cb);
+ t3 = _mm_xor_si128(cb, ks[0]);
+ cb = increment_be(cb);
+ t4 = _mm_xor_si128(cb, ks[0]);
+ cb = increment_be(cb);
+
+ t1 = _mm_aesenc_si128(t1, ks[1]);
+ t2 = _mm_aesenc_si128(t2, ks[1]);
+ t3 = _mm_aesenc_si128(t3, ks[1]);
+ t4 = _mm_aesenc_si128(t4, ks[1]);
+ t1 = _mm_aesenc_si128(t1, ks[2]);
+ t2 = _mm_aesenc_si128(t2, ks[2]);
+ t3 = _mm_aesenc_si128(t3, ks[2]);
+ t4 = _mm_aesenc_si128(t4, ks[2]);
+ t1 = _mm_aesenc_si128(t1, ks[3]);
+ t2 = _mm_aesenc_si128(t2, ks[3]);
+ t3 = _mm_aesenc_si128(t3, ks[3]);
+ t4 = _mm_aesenc_si128(t4, ks[3]);
+ t1 = _mm_aesenc_si128(t1, ks[4]);
+ t2 = _mm_aesenc_si128(t2, ks[4]);
+ t3 = _mm_aesenc_si128(t3, ks[4]);
+ t4 = _mm_aesenc_si128(t4, ks[4]);
+ t1 = _mm_aesenc_si128(t1, ks[5]);
+ t2 = _mm_aesenc_si128(t2, ks[5]);
+ t3 = _mm_aesenc_si128(t3, ks[5]);
+ t4 = _mm_aesenc_si128(t4, ks[5]);
+ t1 = _mm_aesenc_si128(t1, ks[6]);
+ t2 = _mm_aesenc_si128(t2, ks[6]);
+ t3 = _mm_aesenc_si128(t3, ks[6]);
+ t4 = _mm_aesenc_si128(t4, ks[6]);
+ t1 = _mm_aesenc_si128(t1, ks[7]);
+ t2 = _mm_aesenc_si128(t2, ks[7]);
+ t3 = _mm_aesenc_si128(t3, ks[7]);
+ t4 = _mm_aesenc_si128(t4, ks[7]);
+ t1 = _mm_aesenc_si128(t1, ks[8]);
+ t2 = _mm_aesenc_si128(t2, ks[8]);
+ t3 = _mm_aesenc_si128(t3, ks[8]);
+ t4 = _mm_aesenc_si128(t4, ks[8]);
+ t1 = _mm_aesenc_si128(t1, ks[9]);
+ t2 = _mm_aesenc_si128(t2, ks[9]);
+ t3 = _mm_aesenc_si128(t3, ks[9]);
+ t4 = _mm_aesenc_si128(t4, ks[9]);
+ t1 = _mm_aesenc_si128(t1, ks[10]);
+ t2 = _mm_aesenc_si128(t2, ks[10]);
+ t3 = _mm_aesenc_si128(t3, ks[10]);
+ t4 = _mm_aesenc_si128(t4, ks[10]);
+ t1 = _mm_aesenc_si128(t1, ks[11]);
+ t2 = _mm_aesenc_si128(t2, ks[11]);
+ t3 = _mm_aesenc_si128(t3, ks[11]);
+ t4 = _mm_aesenc_si128(t4, ks[11]);
+
+ t1 = _mm_aesenclast_si128(t1, ks[12]);
+ t2 = _mm_aesenclast_si128(t2, ks[12]);
+ t3 = _mm_aesenclast_si128(t3, ks[12]);
+ t4 = _mm_aesenclast_si128(t4, ks[12]);
+
+ t1 = _mm_xor_si128(t1, d1);
+ t2 = _mm_xor_si128(t2, d2);
+ t3 = _mm_xor_si128(t3, d3);
+ t4 = _mm_xor_si128(t4, d4);
+
+ _mm_storeu_si128(bo + i + 0, t1);
+ _mm_storeu_si128(bo + i + 1, t2);
+ _mm_storeu_si128(bo + i + 2, t3);
+ _mm_storeu_si128(bo + i + 3, t4);
+ }
+
+ for (i = pblocks; i < blocks; i++)
+ {
+ d1 = _mm_loadu_si128(bi + i);
+
+ y = ghash(this->h, y, d1);
+
+ t1 = _mm_xor_si128(cb, ks[0]);
+ t1 = _mm_aesenc_si128(t1, ks[1]);
+ t1 = _mm_aesenc_si128(t1, ks[2]);
+ t1 = _mm_aesenc_si128(t1, ks[3]);
+ t1 = _mm_aesenc_si128(t1, ks[4]);
+ t1 = _mm_aesenc_si128(t1, ks[5]);
+ t1 = _mm_aesenc_si128(t1, ks[6]);
+ t1 = _mm_aesenc_si128(t1, ks[7]);
+ t1 = _mm_aesenc_si128(t1, ks[8]);
+ t1 = _mm_aesenc_si128(t1, ks[9]);
+ t1 = _mm_aesenc_si128(t1, ks[10]);
+ t1 = _mm_aesenc_si128(t1, ks[11]);
+ t1 = _mm_aesenclast_si128(t1, ks[12]);
+
+ t1 = _mm_xor_si128(t1, d1);
+ _mm_storeu_si128(bo + i, t1);
+
+ cb = increment_be(cb);
+ }
+
+ if (rem)
+ {
+ y = decrypt_gcm_rem(this, rem, bi + blocks, bo + blocks, cb, y);
+ }
+ y = icv_tailer(this, y, alen, len);
+ icv_crypt(this, y, j, icv);
+}
+
+/**
+ * AES-256 GCM encryption/ICV generation
+ */
+static void encrypt_gcm256(private_aesni_gcm_t *this,
+ size_t len, u_char *in, u_char *out, u_char *iv,
+ size_t alen, u_char *assoc, u_char *icv)
+{
+ __m128i d1, d2, d3, d4, t1, t2, t3, t4;
+ __m128i *ks, y, j, cb, *bi, *bo;
+ u_int blocks, pblocks, rem, i;
+
+ j = create_j(this, iv);
+ cb = increment_be(j);
+ y = icv_header(this, assoc, alen);
+ blocks = len / AES_BLOCK_SIZE;
+ pblocks = blocks - (blocks % GCM_CRYPT_PARALLELISM);
+ rem = len % AES_BLOCK_SIZE;
+ bi = (__m128i*)in;
+ bo = (__m128i*)out;
+
+ ks = this->key->schedule;
+
+ for (i = 0; i < pblocks; i += GCM_CRYPT_PARALLELISM)
+ {
+ d1 = _mm_loadu_si128(bi + i + 0);
+ d2 = _mm_loadu_si128(bi + i + 1);
+ d3 = _mm_loadu_si128(bi + i + 2);
+ d4 = _mm_loadu_si128(bi + i + 3);
+
+ t1 = _mm_xor_si128(cb, ks[0]);
+ cb = increment_be(cb);
+ t2 = _mm_xor_si128(cb, ks[0]);
+ cb = increment_be(cb);
+ t3 = _mm_xor_si128(cb, ks[0]);
+ cb = increment_be(cb);
+ t4 = _mm_xor_si128(cb, ks[0]);
+ cb = increment_be(cb);
+
+ t1 = _mm_aesenc_si128(t1, ks[1]);
+ t2 = _mm_aesenc_si128(t2, ks[1]);
+ t3 = _mm_aesenc_si128(t3, ks[1]);
+ t4 = _mm_aesenc_si128(t4, ks[1]);
+ t1 = _mm_aesenc_si128(t1, ks[2]);
+ t2 = _mm_aesenc_si128(t2, ks[2]);
+ t3 = _mm_aesenc_si128(t3, ks[2]);
+ t4 = _mm_aesenc_si128(t4, ks[2]);
+ t1 = _mm_aesenc_si128(t1, ks[3]);
+ t2 = _mm_aesenc_si128(t2, ks[3]);
+ t3 = _mm_aesenc_si128(t3, ks[3]);
+ t4 = _mm_aesenc_si128(t4, ks[3]);
+ t1 = _mm_aesenc_si128(t1, ks[4]);
+ t2 = _mm_aesenc_si128(t2, ks[4]);
+ t3 = _mm_aesenc_si128(t3, ks[4]);
+ t4 = _mm_aesenc_si128(t4, ks[4]);
+ t1 = _mm_aesenc_si128(t1, ks[5]);
+ t2 = _mm_aesenc_si128(t2, ks[5]);
+ t3 = _mm_aesenc_si128(t3, ks[5]);
+ t4 = _mm_aesenc_si128(t4, ks[5]);
+ t1 = _mm_aesenc_si128(t1, ks[6]);
+ t2 = _mm_aesenc_si128(t2, ks[6]);
+ t3 = _mm_aesenc_si128(t3, ks[6]);
+ t4 = _mm_aesenc_si128(t4, ks[6]);
+ t1 = _mm_aesenc_si128(t1, ks[7]);
+ t2 = _mm_aesenc_si128(t2, ks[7]);
+ t3 = _mm_aesenc_si128(t3, ks[7]);
+ t4 = _mm_aesenc_si128(t4, ks[7]);
+ t1 = _mm_aesenc_si128(t1, ks[8]);
+ t2 = _mm_aesenc_si128(t2, ks[8]);
+ t3 = _mm_aesenc_si128(t3, ks[8]);
+ t4 = _mm_aesenc_si128(t4, ks[8]);
+ t1 = _mm_aesenc_si128(t1, ks[9]);
+ t2 = _mm_aesenc_si128(t2, ks[9]);
+ t3 = _mm_aesenc_si128(t3, ks[9]);
+ t4 = _mm_aesenc_si128(t4, ks[9]);
+ t1 = _mm_aesenc_si128(t1, ks[10]);
+ t2 = _mm_aesenc_si128(t2, ks[10]);
+ t3 = _mm_aesenc_si128(t3, ks[10]);
+ t4 = _mm_aesenc_si128(t4, ks[10]);
+ t1 = _mm_aesenc_si128(t1, ks[11]);
+ t2 = _mm_aesenc_si128(t2, ks[11]);
+ t3 = _mm_aesenc_si128(t3, ks[11]);
+ t4 = _mm_aesenc_si128(t4, ks[11]);
+ t1 = _mm_aesenc_si128(t1, ks[12]);
+ t2 = _mm_aesenc_si128(t2, ks[12]);
+ t3 = _mm_aesenc_si128(t3, ks[12]);
+ t4 = _mm_aesenc_si128(t4, ks[12]);
+ t1 = _mm_aesenc_si128(t1, ks[13]);
+ t2 = _mm_aesenc_si128(t2, ks[13]);
+ t3 = _mm_aesenc_si128(t3, ks[13]);
+ t4 = _mm_aesenc_si128(t4, ks[13]);
+
+ t1 = _mm_aesenclast_si128(t1, ks[14]);
+ t2 = _mm_aesenclast_si128(t2, ks[14]);
+ t3 = _mm_aesenclast_si128(t3, ks[14]);
+ t4 = _mm_aesenclast_si128(t4, ks[14]);
+
+ t1 = _mm_xor_si128(t1, d1);
+ t2 = _mm_xor_si128(t2, d2);
+ t3 = _mm_xor_si128(t3, d3);
+ t4 = _mm_xor_si128(t4, d4);
+
+ y = _mm_xor_si128(y, t1);
+ y = mult4xor(this->hhhh, this->hhh, this->hh, this->h, y, t2, t3, t4);
+
+ _mm_storeu_si128(bo + i + 0, t1);
+ _mm_storeu_si128(bo + i + 1, t2);
+ _mm_storeu_si128(bo + i + 2, t3);
+ _mm_storeu_si128(bo + i + 3, t4);
+ }
+
+ for (i = pblocks; i < blocks; i++)
+ {
+ d1 = _mm_loadu_si128(bi + i);
+
+ t1 = _mm_xor_si128(cb, ks[0]);
+ t1 = _mm_aesenc_si128(t1, ks[1]);
+ t1 = _mm_aesenc_si128(t1, ks[2]);
+ t1 = _mm_aesenc_si128(t1, ks[3]);
+ t1 = _mm_aesenc_si128(t1, ks[4]);
+ t1 = _mm_aesenc_si128(t1, ks[5]);
+ t1 = _mm_aesenc_si128(t1, ks[6]);
+ t1 = _mm_aesenc_si128(t1, ks[7]);
+ t1 = _mm_aesenc_si128(t1, ks[8]);
+ t1 = _mm_aesenc_si128(t1, ks[9]);
+ t1 = _mm_aesenc_si128(t1, ks[10]);
+ t1 = _mm_aesenc_si128(t1, ks[11]);
+ t1 = _mm_aesenc_si128(t1, ks[12]);
+ t1 = _mm_aesenc_si128(t1, ks[13]);
+ t1 = _mm_aesenclast_si128(t1, ks[14]);
+
+ t1 = _mm_xor_si128(t1, d1);
+ _mm_storeu_si128(bo + i, t1);
+
+ y = ghash(this->h, y, t1);
+
+ cb = increment_be(cb);
+ }
+
+ if (rem)
+ {
+ y = encrypt_gcm_rem(this, rem, bi + blocks, bo + blocks, cb, y);
+ }
+ y = icv_tailer(this, y, alen, len);
+ icv_crypt(this, y, j, icv);
+}
+
+/**
+ * AES-256 GCM decryption/ICV generation
+ */
+static void decrypt_gcm256(private_aesni_gcm_t *this,
+ size_t len, u_char *in, u_char *out, u_char *iv,
+ size_t alen, u_char *assoc, u_char *icv)
+{
+ __m128i d1, d2, d3, d4, t1, t2, t3, t4;
+ __m128i *ks, y, j, cb, *bi, *bo;
+ u_int blocks, pblocks, rem, i;
+
+ j = create_j(this, iv);
+ cb = increment_be(j);
+ y = icv_header(this, assoc, alen);
+ blocks = len / AES_BLOCK_SIZE;
+ pblocks = blocks - (blocks % GCM_CRYPT_PARALLELISM);
+ rem = len % AES_BLOCK_SIZE;
+ bi = (__m128i*)in;
+ bo = (__m128i*)out;
+
+ ks = this->key->schedule;
+
+ for (i = 0; i < pblocks; i += GCM_CRYPT_PARALLELISM)
+ {
+ d1 = _mm_loadu_si128(bi + i + 0);
+ d2 = _mm_loadu_si128(bi + i + 1);
+ d3 = _mm_loadu_si128(bi + i + 2);
+ d4 = _mm_loadu_si128(bi + i + 3);
+
+ y = _mm_xor_si128(y, d1);
+ y = mult4xor(this->hhhh, this->hhh, this->hh, this->h, y, d2, d3, d4);
+
+ t1 = _mm_xor_si128(cb, ks[0]);
+ cb = increment_be(cb);
+ t2 = _mm_xor_si128(cb, ks[0]);
+ cb = increment_be(cb);
+ t3 = _mm_xor_si128(cb, ks[0]);
+ cb = increment_be(cb);
+ t4 = _mm_xor_si128(cb, ks[0]);
+ cb = increment_be(cb);
+
+ t1 = _mm_aesenc_si128(t1, ks[1]);
+ t2 = _mm_aesenc_si128(t2, ks[1]);
+ t3 = _mm_aesenc_si128(t3, ks[1]);
+ t4 = _mm_aesenc_si128(t4, ks[1]);
+ t1 = _mm_aesenc_si128(t1, ks[2]);
+ t2 = _mm_aesenc_si128(t2, ks[2]);
+ t3 = _mm_aesenc_si128(t3, ks[2]);
+ t4 = _mm_aesenc_si128(t4, ks[2]);
+ t1 = _mm_aesenc_si128(t1, ks[3]);
+ t2 = _mm_aesenc_si128(t2, ks[3]);
+ t3 = _mm_aesenc_si128(t3, ks[3]);
+ t4 = _mm_aesenc_si128(t4, ks[3]);
+ t1 = _mm_aesenc_si128(t1, ks[4]);
+ t2 = _mm_aesenc_si128(t2, ks[4]);
+ t3 = _mm_aesenc_si128(t3, ks[4]);
+ t4 = _mm_aesenc_si128(t4, ks[4]);
+ t1 = _mm_aesenc_si128(t1, ks[5]);
+ t2 = _mm_aesenc_si128(t2, ks[5]);
+ t3 = _mm_aesenc_si128(t3, ks[5]);
+ t4 = _mm_aesenc_si128(t4, ks[5]);
+ t1 = _mm_aesenc_si128(t1, ks[6]);
+ t2 = _mm_aesenc_si128(t2, ks[6]);
+ t3 = _mm_aesenc_si128(t3, ks[6]);
+ t4 = _mm_aesenc_si128(t4, ks[6]);
+ t1 = _mm_aesenc_si128(t1, ks[7]);
+ t2 = _mm_aesenc_si128(t2, ks[7]);
+ t3 = _mm_aesenc_si128(t3, ks[7]);
+ t4 = _mm_aesenc_si128(t4, ks[7]);
+ t1 = _mm_aesenc_si128(t1, ks[8]);
+ t2 = _mm_aesenc_si128(t2, ks[8]);
+ t3 = _mm_aesenc_si128(t3, ks[8]);
+ t4 = _mm_aesenc_si128(t4, ks[8]);
+ t1 = _mm_aesenc_si128(t1, ks[9]);
+ t2 = _mm_aesenc_si128(t2, ks[9]);
+ t3 = _mm_aesenc_si128(t3, ks[9]);
+ t4 = _mm_aesenc_si128(t4, ks[9]);
+ t1 = _mm_aesenc_si128(t1, ks[10]);
+ t2 = _mm_aesenc_si128(t2, ks[10]);
+ t3 = _mm_aesenc_si128(t3, ks[10]);
+ t4 = _mm_aesenc_si128(t4, ks[10]);
+ t1 = _mm_aesenc_si128(t1, ks[11]);
+ t2 = _mm_aesenc_si128(t2, ks[11]);
+ t3 = _mm_aesenc_si128(t3, ks[11]);
+ t4 = _mm_aesenc_si128(t4, ks[11]);
+ t1 = _mm_aesenc_si128(t1, ks[12]);
+ t2 = _mm_aesenc_si128(t2, ks[12]);
+ t3 = _mm_aesenc_si128(t3, ks[12]);
+ t4 = _mm_aesenc_si128(t4, ks[12]);
+ t1 = _mm_aesenc_si128(t1, ks[13]);
+ t2 = _mm_aesenc_si128(t2, ks[13]);
+ t3 = _mm_aesenc_si128(t3, ks[13]);
+ t4 = _mm_aesenc_si128(t4, ks[13]);
+
+ t1 = _mm_aesenclast_si128(t1, ks[14]);
+ t2 = _mm_aesenclast_si128(t2, ks[14]);
+ t3 = _mm_aesenclast_si128(t3, ks[14]);
+ t4 = _mm_aesenclast_si128(t4, ks[14]);
+
+ t1 = _mm_xor_si128(t1, d1);
+ t2 = _mm_xor_si128(t2, d2);
+ t3 = _mm_xor_si128(t3, d3);
+ t4 = _mm_xor_si128(t4, d4);
+
+ _mm_storeu_si128(bo + i + 0, t1);
+ _mm_storeu_si128(bo + i + 1, t2);
+ _mm_storeu_si128(bo + i + 2, t3);
+ _mm_storeu_si128(bo + i + 3, t4);
+ }
+
+ for (i = pblocks; i < blocks; i++)
+ {
+ d1 = _mm_loadu_si128(bi + i);
+
+ y = ghash(this->h, y, d1);
+
+ t1 = _mm_xor_si128(cb, ks[0]);
+ t1 = _mm_aesenc_si128(t1, ks[1]);
+ t1 = _mm_aesenc_si128(t1, ks[2]);
+ t1 = _mm_aesenc_si128(t1, ks[3]);
+ t1 = _mm_aesenc_si128(t1, ks[4]);
+ t1 = _mm_aesenc_si128(t1, ks[5]);
+ t1 = _mm_aesenc_si128(t1, ks[6]);
+ t1 = _mm_aesenc_si128(t1, ks[7]);
+ t1 = _mm_aesenc_si128(t1, ks[8]);
+ t1 = _mm_aesenc_si128(t1, ks[9]);
+ t1 = _mm_aesenc_si128(t1, ks[10]);
+ t1 = _mm_aesenc_si128(t1, ks[11]);
+ t1 = _mm_aesenc_si128(t1, ks[12]);
+ t1 = _mm_aesenc_si128(t1, ks[13]);
+ t1 = _mm_aesenclast_si128(t1, ks[14]);
+
+ t1 = _mm_xor_si128(t1, d1);
+ _mm_storeu_si128(bo + i, t1);
+
+ cb = increment_be(cb);
+ }
+
+ if (rem)
+ {
+ y = decrypt_gcm_rem(this, rem, bi + blocks, bo + blocks, cb, y);
+ }
+ y = icv_tailer(this, y, alen, len);
+ icv_crypt(this, y, j, icv);
+}
+
+METHOD(aead_t, encrypt, bool,
+ private_aesni_gcm_t *this, chunk_t plain, chunk_t assoc, chunk_t iv,
+ chunk_t *encr)
+{
+ u_char *out;
+
+ if (!this->key || iv.len != IV_SIZE)
+ {
+ return FALSE;
+ }
+ out = plain.ptr;
+ if (encr)
+ {
+ *encr = chunk_alloc(plain.len + this->icv_size);
+ out = encr->ptr;
+ }
+ this->encrypt(this, plain.len, plain.ptr, out, iv.ptr,
+ assoc.len, assoc.ptr, out + plain.len);
+ return TRUE;
+}
+
+METHOD(aead_t, decrypt, bool,
+ private_aesni_gcm_t *this, chunk_t encr, chunk_t assoc, chunk_t iv,
+ chunk_t *plain)
+{
+ u_char *out, icv[this->icv_size];
+
+ if (!this->key || iv.len != IV_SIZE || encr.len < this->icv_size)
+ {
+ return FALSE;
+ }
+ encr.len -= this->icv_size;
+ out = encr.ptr;
+ if (plain)
+ {
+ *plain = chunk_alloc(encr.len);
+ out = plain->ptr;
+ }
+ this->decrypt(this, encr.len, encr.ptr, out, iv.ptr,
+ assoc.len, assoc.ptr, icv);
+ return memeq_const(icv, encr.ptr + encr.len, this->icv_size);
+}
+
+METHOD(aead_t, get_block_size, size_t,
+ private_aesni_gcm_t *this)
+{
+ return 1;
+}
+
+METHOD(aead_t, get_icv_size, size_t,
+ private_aesni_gcm_t *this)
+{
+ return this->icv_size;
+}
+
+METHOD(aead_t, get_iv_size, size_t,
+ private_aesni_gcm_t *this)
+{
+ return IV_SIZE;
+}
+
+METHOD(aead_t, get_iv_gen, iv_gen_t*,
+ private_aesni_gcm_t *this)
+{
+ return this->iv_gen;
+}
+
+METHOD(aead_t, get_key_size, size_t,
+ private_aesni_gcm_t *this)
+{
+ return this->key_size + SALT_SIZE;
+}
+
+METHOD(aead_t, set_key, bool,
+ private_aesni_gcm_t *this, chunk_t key)
+{
+ u_int round;
+ __m128i *ks, h;
+
+ if (key.len != this->key_size + SALT_SIZE)
+ {
+ return FALSE;
+ }
+
+ memcpy(this->salt, key.ptr + key.len - SALT_SIZE, SALT_SIZE);
+ key.len -= SALT_SIZE;
+
+ DESTROY_IF(this->key);
+ this->key = aesni_key_create(TRUE, key);
+
+ ks = this->key->schedule;
+ h = _mm_xor_si128(_mm_setzero_si128(), ks[0]);
+ for (round = 1; round < this->key->rounds; round++)
+ {
+ h = _mm_aesenc_si128(h, ks[round]);
+ }
+ h = _mm_aesenclast_si128(h, ks[this->key->rounds]);
+
+ this->h = h;
+ h = swap128(h);
+ this->hh = mult_block(h, this->h);
+ this->hhh = mult_block(h, this->hh);
+ this->hhhh = mult_block(h, this->hhh);
+ this->h = swap128(this->h);
+ this->hh = swap128(this->hh);
+ this->hhh = swap128(this->hhh);
+ this->hhhh = swap128(this->hhhh);
+
+ return TRUE;
+}
+
+METHOD(aead_t, destroy, void,
+ private_aesni_gcm_t *this)
+{
+ DESTROY_IF(this->key);
+ memwipe(&this->h, sizeof(this->h));
+ memwipe(&this->hh, sizeof(this->hh));
+ memwipe(&this->hhh, sizeof(this->hhh));
+ memwipe(&this->hhhh, sizeof(this->hhhh));
+ this->iv_gen->destroy(this->iv_gen);
+ free_align(this);
+}
+
+/**
+ * See header
+ */
+aesni_gcm_t *aesni_gcm_create(encryption_algorithm_t algo,
+ size_t key_size, size_t salt_size)
+{
+ private_aesni_gcm_t *this;
+ size_t icv_size;
+
+ switch (key_size)
+ {
+ case 0:
+ key_size = 16;
+ break;
+ case 16:
+ case 24:
+ case 32:
+ break;
+ default:
+ return NULL;
+ }
+ if (salt_size && salt_size != SALT_SIZE)
+ {
+ /* currently not supported */
+ return NULL;
+ }
+ switch (algo)
+ {
+ case ENCR_AES_GCM_ICV8:
+ algo = ENCR_AES_CBC;
+ icv_size = 8;
+ break;
+ case ENCR_AES_GCM_ICV12:
+ algo = ENCR_AES_CBC;
+ icv_size = 12;
+ break;
+ case ENCR_AES_GCM_ICV16:
+ algo = ENCR_AES_CBC;
+ icv_size = 16;
+ break;
+ default:
+ return NULL;
+ }
+
+ INIT_ALIGN(this, sizeof(__m128i),
+ .public = {
+ .aead = {
+ .encrypt = _encrypt,
+ .decrypt = _decrypt,
+ .get_block_size = _get_block_size,
+ .get_icv_size = _get_icv_size,
+ .get_iv_size = _get_iv_size,
+ .get_iv_gen = _get_iv_gen,
+ .get_key_size = _get_key_size,
+ .set_key = _set_key,
+ .destroy = _destroy,
+ },
+ },
+ .key_size = key_size,
+ .iv_gen = iv_gen_seq_create(),
+ .icv_size = icv_size,
+ );
+
+ switch (key_size)
+ {
+ case 16:
+ this->encrypt = encrypt_gcm128;
+ this->decrypt = decrypt_gcm128;
+ break;
+ case 24:
+ this->encrypt = encrypt_gcm192;
+ this->decrypt = decrypt_gcm192;
+ break;
+ case 32:
+ this->encrypt = encrypt_gcm256;
+ this->decrypt = decrypt_gcm256;
+ break;
+ }
+
+ return &this->public;
+}
diff --git a/src/libstrongswan/plugins/aesni/aesni_gcm.h b/src/libstrongswan/plugins/aesni/aesni_gcm.h
new file mode 100644
index 000000000..5a256c8db
--- /dev/null
+++ b/src/libstrongswan/plugins/aesni/aesni_gcm.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2015 Martin Willi
+ * Copyright (C) 2015 revosec AG
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+/**
+ * @defgroup aesni_gcm aesni_gcm
+ * @{ @ingroup aesni
+ */
+
+#ifndef AESNI_GCM_H_
+#define AESNI_GCM_H_
+
+#include <library.h>
+
+typedef struct aesni_gcm_t aesni_gcm_t;
+
+/**
+ * GCM mode AEAD using AES-NI
+ */
+struct aesni_gcm_t {
+
+ /**
+ * Implements aead_t interface
+ */
+ aead_t aead;
+};
+
+/**
+ * Create a aesni_gcm instance.
+ *
+ * @param algo encryption algorithm, ENCR_AES_GCM*
+ * @param key_size AES key size, in bytes
+ * @param salt_size size of salt value
+ * @return AES-GCM AEAD, NULL if not supported
+ */
+aesni_gcm_t *aesni_gcm_create(encryption_algorithm_t algo,
+ size_t key_size, size_t salt_size);
+
+#endif /** AESNI_GCM_H_ @}*/
diff --git a/src/libstrongswan/plugins/aesni/aesni_key.c b/src/libstrongswan/plugins/aesni/aesni_key.c
new file mode 100644
index 000000000..523266a30
--- /dev/null
+++ b/src/libstrongswan/plugins/aesni/aesni_key.c
@@ -0,0 +1,301 @@
+/*
+ * Copyright (C) 2015 Martin Willi
+ * Copyright (C) 2015 revosec AG
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "aesni_key.h"
+
+/**
+ * Rounds used for each AES key size
+ */
+#define AES128_ROUNDS 10
+#define AES192_ROUNDS 12
+#define AES256_ROUNDS 14
+
+typedef struct private_aesni_key_t private_aesni_key_t;
+
+/**
+ * Private data of an aesni_key_t object.
+ */
+struct private_aesni_key_t {
+
+ /**
+ * Public aesni_key_t interface.
+ */
+ aesni_key_t public;
+};
+
+/**
+ * Invert round encryption keys to get a decryption key schedule
+ */
+static void reverse_key(aesni_key_t *this)
+{
+ __m128i t[this->rounds + 1];
+ int i;
+
+ for (i = 0; i <= this->rounds; i++)
+ {
+ t[i] = this->schedule[i];
+ }
+ this->schedule[this->rounds] = t[0];
+ for (i = 1; i < this->rounds; i++)
+ {
+ this->schedule[this->rounds - i] = _mm_aesimc_si128(t[i]);
+ }
+ this->schedule[0] = t[this->rounds];
+
+ memwipe(t, sizeof(t));
+}
+
+/**
+ * Assist in creating a 128-bit round key
+ */
+static __m128i assist128(__m128i a, __m128i b)
+{
+ __m128i c;
+
+ b = _mm_shuffle_epi32(b ,0xff);
+ c = _mm_slli_si128(a, 0x04);
+ a = _mm_xor_si128(a, c);
+ c = _mm_slli_si128(c, 0x04);
+ a = _mm_xor_si128(a, c);
+ c = _mm_slli_si128(c, 0x04);
+ a = _mm_xor_si128(a, c);
+ a = _mm_xor_si128(a, b);
+
+ return a;
+}
+
+/**
+ * Expand a 128-bit key to encryption round keys
+ */
+static void expand128(__m128i *key, __m128i *schedule)
+{
+ __m128i t;
+
+ schedule[0] = t = _mm_loadu_si128(key);
+ schedule[1] = t = assist128(t, _mm_aeskeygenassist_si128(t, 0x01));
+ schedule[2] = t = assist128(t, _mm_aeskeygenassist_si128(t, 0x02));
+ schedule[3] = t = assist128(t, _mm_aeskeygenassist_si128(t, 0x04));
+ schedule[4] = t = assist128(t, _mm_aeskeygenassist_si128(t, 0x08));
+ schedule[5] = t = assist128(t, _mm_aeskeygenassist_si128(t, 0x10));
+ schedule[6] = t = assist128(t, _mm_aeskeygenassist_si128(t, 0x20));
+ schedule[7] = t = assist128(t, _mm_aeskeygenassist_si128(t, 0x40));
+ schedule[8] = t = assist128(t, _mm_aeskeygenassist_si128(t, 0x80));
+ schedule[9] = t = assist128(t, _mm_aeskeygenassist_si128(t, 0x1b));
+ schedule[10] = assist128(t, _mm_aeskeygenassist_si128(t, 0x36));
+}
+
+/**
+ * Assist in creating a 192-bit round key
+ */
+static __m128i assist192(__m128i b, __m128i c, __m128i *a)
+{
+ __m128i t;
+
+ b = _mm_shuffle_epi32(b, 0x55);
+ t = _mm_slli_si128(*a, 0x04);
+ *a = _mm_xor_si128(*a, t);
+ t = _mm_slli_si128(t, 0x04);
+ *a = _mm_xor_si128(*a, t);
+ t = _mm_slli_si128(t, 0x04);
+ *a = _mm_xor_si128(*a, t);
+ *a = _mm_xor_si128(*a, b);
+ b = _mm_shuffle_epi32(*a, 0xff);
+ t = _mm_slli_si128(c, 0x04);
+ t = _mm_xor_si128(c, t);
+ t = _mm_xor_si128(t, b);
+
+ return t;
+}
+
+/**
+ * return a[63:0] | b[63:0] << 64
+ */
+static __m128i _mm_shuffle_i00(__m128i a, __m128i b)
+{
+ return (__m128i)_mm_shuffle_pd((__m128d)a, (__m128d)b, 0);
+}
+
+/**
+ * return a[127:64] >> 64 | b[63:0] << 64
+ */
+static __m128i _mm_shuffle_i01(__m128i a, __m128i b)
+{
+ return (__m128i)_mm_shuffle_pd((__m128d)a, (__m128d)b, 1);
+}
+
+/**
+ * Expand a 192-bit encryption key to round keys
+ */
+static void expand192(__m128i *key, __m128i *schedule)
+{
+ __m128i t1, t2, t3;
+
+ schedule[0] = t1 = _mm_loadu_si128(key);
+ t2 = t3 = _mm_loadu_si128(key + 1);
+
+ t2 = assist192(_mm_aeskeygenassist_si128(t2, 0x1), t2, &t1);
+ schedule[1] = _mm_shuffle_i00(t3, t1);
+ schedule[2] = _mm_shuffle_i01(t1, t2);
+ t2 = t3 = assist192(_mm_aeskeygenassist_si128(t2, 0x2), t2, &t1);
+ schedule[3] = t1;
+
+ t2 = assist192(_mm_aeskeygenassist_si128(t2, 0x4), t2, &t1);
+ schedule[4] = _mm_shuffle_i00(t3, t1);
+ schedule[5] = _mm_shuffle_i01(t1, t2);
+ t2 = t3 = assist192(_mm_aeskeygenassist_si128(t2, 0x8), t2, &t1);
+ schedule[6] = t1;
+
+ t2 = assist192(_mm_aeskeygenassist_si128 (t2,0x10), t2, &t1);
+ schedule[7] = _mm_shuffle_i00(t3, t1);
+ schedule[8] = _mm_shuffle_i01(t1, t2);
+ t2 = t3 = assist192(_mm_aeskeygenassist_si128 (t2,0x20), t2, &t1);
+ schedule[9] = t1;
+
+ t2 = assist192(_mm_aeskeygenassist_si128(t2, 0x40), t2, &t1);
+ schedule[10] = _mm_shuffle_i00(t3, t1);
+ schedule[11] = _mm_shuffle_i01(t1, t2);
+ assist192(_mm_aeskeygenassist_si128(t2, 0x80), t2, &t1);
+ schedule[12] = t1;
+}
+
+/**
+ * Assist in creating a 256-bit round key
+ */
+static __m128i assist256_1(__m128i a, __m128i b)
+{
+ __m128i x, y;
+
+ b = _mm_shuffle_epi32(b, 0xff);
+ y = _mm_slli_si128(a, 0x04);
+ x = _mm_xor_si128(a, y);
+ y = _mm_slli_si128(y, 0x04);
+ x = _mm_xor_si128 (x, y);
+ y = _mm_slli_si128(y, 0x04);
+ x = _mm_xor_si128(x, y);
+ x = _mm_xor_si128(x, b);
+
+ return x;
+}
+
+/**
+ * Assist in creating a 256-bit round key
+ */
+static __m128i assist256_2(__m128i a, __m128i b)
+{
+ __m128i x, y, z;
+
+ y = _mm_aeskeygenassist_si128(a, 0x00);
+ z = _mm_shuffle_epi32(y, 0xaa);
+ y = _mm_slli_si128(b, 0x04);
+ x = _mm_xor_si128(b, y);
+ y = _mm_slli_si128(y, 0x04);
+ x = _mm_xor_si128(x, y);
+ y = _mm_slli_si128(y, 0x04);
+ x = _mm_xor_si128(x, y);
+ x = _mm_xor_si128(x, z);
+
+ return x;
+}
+
+/**
+ * Expand a 256-bit encryption key to round keys
+ */
+static void expand256(__m128i *key, __m128i *schedule)
+{
+ __m128i t1, t2;
+
+ schedule[0] = t1 = _mm_loadu_si128(key);
+ schedule[1] = t2 = _mm_loadu_si128(key + 1);
+
+ schedule[2] = t1 = assist256_1(t1, _mm_aeskeygenassist_si128(t2, 0x01));
+ schedule[3] = t2 = assist256_2(t1, t2);
+
+ schedule[4] = t1 = assist256_1(t1, _mm_aeskeygenassist_si128(t2, 0x02));
+ schedule[5] = t2 = assist256_2(t1, t2);
+
+ schedule[6] = t1 = assist256_1(t1, _mm_aeskeygenassist_si128(t2, 0x04));
+ schedule[7] = t2 = assist256_2(t1, t2);
+
+ schedule[8] = t1 = assist256_1(t1, _mm_aeskeygenassist_si128(t2, 0x08));
+ schedule[9] = t2 = assist256_2(t1, t2);
+
+ schedule[10] = t1 = assist256_1(t1, _mm_aeskeygenassist_si128(t2, 0x10));
+ schedule[11] = t2 = assist256_2(t1, t2);
+
+ schedule[12] = t1 = assist256_1(t1, _mm_aeskeygenassist_si128(t2, 0x20));
+ schedule[13] = t2 = assist256_2(t1, t2);
+
+ schedule[14] = assist256_1(t1, _mm_aeskeygenassist_si128(t2, 0x40));
+}
+
+METHOD(aesni_key_t, destroy, void,
+ private_aesni_key_t *this)
+{
+ memwipe(this, sizeof(*this) + (this->public.rounds + 1) * AES_BLOCK_SIZE);
+ free_align(this);
+}
+
+/**
+ * See header
+ */
+aesni_key_t *aesni_key_create(bool encrypt, chunk_t key)
+{
+ private_aesni_key_t *this;
+ int rounds;
+
+ switch (key.len)
+ {
+ case 16:
+ rounds = AES128_ROUNDS;
+ break;
+ case 24:
+ rounds = AES192_ROUNDS;
+ break;
+ case 32:
+ rounds = AES256_ROUNDS;
+ break;
+ default:
+ return NULL;
+ }
+
+ INIT_EXTRA_ALIGN(this, (rounds + 1) * AES_BLOCK_SIZE, sizeof(__m128i),
+ .public = {
+ .destroy = _destroy,
+ .rounds = rounds,
+ },
+ );
+
+ switch (key.len)
+ {
+ case 16:
+ expand128((__m128i*)key.ptr, this->public.schedule);
+ break;
+ case 24:
+ expand192((__m128i*)key.ptr, this->public.schedule);
+ break;
+ case 32:
+ expand256((__m128i*)key.ptr, this->public.schedule);
+ break;
+ default:
+ break;
+ }
+
+ if (!encrypt)
+ {
+ reverse_key(&this->public);
+ }
+
+ return &this->public;
+}
diff --git a/src/libstrongswan/plugins/aesni/aesni_key.h b/src/libstrongswan/plugins/aesni/aesni_key.h
new file mode 100644
index 000000000..12dcd221d
--- /dev/null
+++ b/src/libstrongswan/plugins/aesni/aesni_key.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2015 Martin Willi
+ * Copyright (C) 2015 revosec AG
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+/**
+ * @defgroup aesni_key aesni_key
+ * @{ @ingroup aesni
+ */
+
+#ifndef AESNI_KEY_H_
+#define AESNI_KEY_H_
+
+#include <library.h>
+
+#include <wmmintrin.h>
+
+/**
+ * AES block size, in bytes
+ */
+#define AES_BLOCK_SIZE 16
+
+typedef struct aesni_key_t aesni_key_t;
+
+/**
+ * Key schedule for encryption/decryption using on AES-NI.
+ */
+struct aesni_key_t {
+
+ /**
+ * Destroy a aesni_key_t.
+ */
+ void (*destroy)(aesni_key_t *this);
+
+ /**
+ * Number of AES rounds (10, 12, 14)
+ */
+ int rounds;
+
+ /**
+ * Key schedule, for each round + the round 0 (whitening)
+ */
+ __attribute__((aligned(sizeof(__m128i)))) __m128i schedule[];
+};
+
+/**
+ * Create a AESNI key schedule instance.
+ *
+ * @param encrypt TRUE for encryption schedule, FALSE for decryption
+ * @param key non-expanded crypto key, 16, 24 or 32 bytes
+ * @return key schedule, NULL on invalid key size
+ */
+aesni_key_t *aesni_key_create(bool encrypt, chunk_t key);
+
+#endif /** AESNI_KEY_H_ @}*/
diff --git a/src/libstrongswan/plugins/aesni/aesni_plugin.c b/src/libstrongswan/plugins/aesni/aesni_plugin.c
new file mode 100644
index 000000000..b92419dc4
--- /dev/null
+++ b/src/libstrongswan/plugins/aesni/aesni_plugin.c
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2015 Martin Willi
+ * Copyright (C) 2015 revosec AG
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "aesni_plugin.h"
+#include "aesni_cbc.h"
+#include "aesni_ctr.h"
+#include "aesni_ccm.h"
+#include "aesni_gcm.h"
+#include "aesni_xcbc.h"
+#include "aesni_cmac.h"
+
+#include <stdio.h>
+
+#include <library.h>
+#include <utils/debug.h>
+#include <utils/cpu_feature.h>
+
+typedef struct private_aesni_plugin_t private_aesni_plugin_t;
+typedef enum cpuid_feature_t cpuid_feature_t;
+
+/**
+ * private data of aesni_plugin
+ */
+struct private_aesni_plugin_t {
+
+ /**
+ * public functions
+ */
+ aesni_plugin_t public;
+};
+
+METHOD(plugin_t, get_name, char*,
+ private_aesni_plugin_t *this)
+{
+ return "aesni";
+}
+
+METHOD(plugin_t, get_features, int,
+ private_aesni_plugin_t *this, plugin_feature_t *features[])
+{
+ static plugin_feature_t f[] = {
+ PLUGIN_REGISTER(CRYPTER, aesni_cbc_create),
+ PLUGIN_PROVIDE(CRYPTER, ENCR_AES_CBC, 16),
+ PLUGIN_PROVIDE(CRYPTER, ENCR_AES_CBC, 24),
+ PLUGIN_PROVIDE(CRYPTER, ENCR_AES_CBC, 32),
+ PLUGIN_REGISTER(CRYPTER, aesni_ctr_create),
+ PLUGIN_PROVIDE(CRYPTER, ENCR_AES_CTR, 16),
+ PLUGIN_PROVIDE(CRYPTER, ENCR_AES_CTR, 24),
+ PLUGIN_PROVIDE(CRYPTER, ENCR_AES_CTR, 32),
+ PLUGIN_REGISTER(AEAD, aesni_ccm_create),
+ PLUGIN_PROVIDE(AEAD, ENCR_AES_CCM_ICV8, 16),
+ PLUGIN_PROVIDE(AEAD, ENCR_AES_CCM_ICV12, 16),
+ PLUGIN_PROVIDE(AEAD, ENCR_AES_CCM_ICV16, 16),
+ PLUGIN_PROVIDE(AEAD, ENCR_AES_CCM_ICV8, 24),
+ PLUGIN_PROVIDE(AEAD, ENCR_AES_CCM_ICV12, 24),
+ PLUGIN_PROVIDE(AEAD, ENCR_AES_CCM_ICV16, 24),
+ PLUGIN_PROVIDE(AEAD, ENCR_AES_CCM_ICV8, 32),
+ PLUGIN_PROVIDE(AEAD, ENCR_AES_CCM_ICV12, 32),
+ PLUGIN_PROVIDE(AEAD, ENCR_AES_CCM_ICV16, 32),
+ PLUGIN_REGISTER(AEAD, aesni_gcm_create),
+ PLUGIN_PROVIDE(AEAD, ENCR_AES_GCM_ICV8, 16),
+ PLUGIN_PROVIDE(AEAD, ENCR_AES_GCM_ICV12, 16),
+ PLUGIN_PROVIDE(AEAD, ENCR_AES_GCM_ICV16, 16),
+ PLUGIN_PROVIDE(AEAD, ENCR_AES_GCM_ICV8, 24),
+ PLUGIN_PROVIDE(AEAD, ENCR_AES_GCM_ICV12, 24),
+ PLUGIN_PROVIDE(AEAD, ENCR_AES_GCM_ICV16, 24),
+ PLUGIN_PROVIDE(AEAD, ENCR_AES_GCM_ICV8, 32),
+ PLUGIN_PROVIDE(AEAD, ENCR_AES_GCM_ICV12, 32),
+ PLUGIN_PROVIDE(AEAD, ENCR_AES_GCM_ICV16, 32),
+ PLUGIN_REGISTER(PRF, aesni_xcbc_prf_create),
+ PLUGIN_PROVIDE(PRF, PRF_AES128_XCBC),
+ PLUGIN_REGISTER(SIGNER, aesni_xcbc_signer_create),
+ PLUGIN_PROVIDE(SIGNER, AUTH_AES_XCBC_96),
+ PLUGIN_REGISTER(PRF, aesni_cmac_prf_create),
+ PLUGIN_PROVIDE(PRF, PRF_AES128_CMAC),
+ PLUGIN_REGISTER(SIGNER, aesni_cmac_signer_create),
+ PLUGIN_PROVIDE(SIGNER, AUTH_AES_CMAC_96),
+ };
+
+ *features = f;
+ if (cpu_feature_available(CPU_FEATURE_AESNI | CPU_FEATURE_PCLMULQDQ))
+ {
+ return countof(f);
+ }
+ return 0;
+}
+
+METHOD(plugin_t, destroy, void,
+ private_aesni_plugin_t *this)
+{
+ free(this);
+}
+
+/*
+ * see header file
+ */
+plugin_t *aesni_plugin_create()
+{
+ private_aesni_plugin_t *this;
+
+ INIT(this,
+ .public = {
+ .plugin = {
+ .get_name = _get_name,
+ .get_features = _get_features,
+ .reload = (void*)return_false,
+ .destroy = _destroy,
+ },
+ },
+ );
+
+ return &this->public.plugin;
+}
diff --git a/src/libstrongswan/plugins/aesni/aesni_plugin.h b/src/libstrongswan/plugins/aesni/aesni_plugin.h
new file mode 100644
index 000000000..2b0c92c25
--- /dev/null
+++ b/src/libstrongswan/plugins/aesni/aesni_plugin.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2015 Martin Willi
+ * Copyright (C) 2015 revosec AG
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+/**
+ * @defgroup aesni aesni
+ * @ingroup plugins
+ *
+ * @defgroup aesni_plugin aesni_plugin
+ * @{ @ingroup aesni
+ */
+
+#ifndef AESNI_PLUGIN_H_
+#define AESNI_PLUGIN_H_
+
+#include <plugins/plugin.h>
+
+typedef struct aesni_plugin_t aesni_plugin_t;
+
+/**
+ * Plugin providing crypto primitives based on Intel AES-NI instructions.
+ */
+struct aesni_plugin_t {
+
+ /**
+ * implements plugin interface
+ */
+ plugin_t plugin;
+};
+
+#endif /** AESNI_PLUGIN_H_ @}*/
diff --git a/src/libstrongswan/plugins/aesni/aesni_xcbc.c b/src/libstrongswan/plugins/aesni/aesni_xcbc.c
new file mode 100644
index 000000000..24a75cec0
--- /dev/null
+++ b/src/libstrongswan/plugins/aesni/aesni_xcbc.c
@@ -0,0 +1,367 @@
+/*
+ * Copyright (C) 2008-2015 Martin Willi
+ * Copyright (C) 2012 Tobias Brunner
+ * Hochschule fuer Technik Rapperswil
+ * Copyright (C) 2015 revosec AG
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "aesni_xcbc.h"
+#include "aesni_key.h"
+
+#include <crypto/prfs/mac_prf.h>
+#include <crypto/signers/mac_signer.h>
+
+typedef struct private_aesni_mac_t private_aesni_mac_t;
+
+/**
+ * Private data of a mac_t object.
+ */
+struct private_aesni_mac_t {
+
+ /**
+ * Public mac_t interface.
+ */
+ mac_t public;
+
+ /**
+ * Key schedule for K1
+ */
+ aesni_key_t *k1;
+
+ /**
+ * k2
+ */
+ __m128i k2;
+
+ /**
+ * k3
+ */
+ __m128i k3;
+
+ /**
+ * E
+ */
+ __m128i e;
+
+ /**
+ * remaining, unprocessed bytes in append mode
+ */
+ u_char rem[AES_BLOCK_SIZE];
+
+ /**
+ * number of bytes used in remaining
+ */
+ int rem_size;
+
+ /**
+ * TRUE if we have zero bytes to xcbc in final()
+ */
+ bool zero;
+};
+
+METHOD(mac_t, get_mac, bool,
+ private_aesni_mac_t *this, chunk_t data, u_int8_t *out)
+{
+ __m128i *ks, e, *bi;
+ u_int blocks, rem, i;
+
+ if (!this->k1)
+ {
+ return FALSE;
+ }
+
+ ks = this->k1->schedule;
+
+ e = this->e;
+
+ if (data.len)
+ {
+ this->zero = FALSE;
+ }
+
+ if (this->rem_size + data.len > AES_BLOCK_SIZE)
+ {
+ /* (3) For each block M[i], where i = 1 ... n-1:
+ * XOR M[i] with E[i-1], then encrypt the result with Key K1,
+ * yielding E[i].
+ */
+
+ /* append data to remaining bytes, process block M[1] */
+ memcpy(this->rem + this->rem_size, data.ptr,
+ AES_BLOCK_SIZE - this->rem_size);
+ data = chunk_skip(data, AES_BLOCK_SIZE - this->rem_size);
+
+ e = _mm_xor_si128(e, _mm_loadu_si128((__m128i*)this->rem));
+
+ e = _mm_xor_si128(e, ks[0]);
+ e = _mm_aesenc_si128(e, ks[1]);
+ e = _mm_aesenc_si128(e, ks[2]);
+ e = _mm_aesenc_si128(e, ks[3]);
+ e = _mm_aesenc_si128(e, ks[4]);
+ e = _mm_aesenc_si128(e, ks[5]);
+ e = _mm_aesenc_si128(e, ks[6]);
+ e = _mm_aesenc_si128(e, ks[7]);
+ e = _mm_aesenc_si128(e, ks[8]);
+ e = _mm_aesenc_si128(e, ks[9]);
+ e = _mm_aesenclast_si128(e, ks[10]);
+
+ bi = (__m128i*)data.ptr;
+ rem = data.len % AES_BLOCK_SIZE;
+ blocks = data.len / AES_BLOCK_SIZE;
+ if (!rem && blocks)
+ { /* don't do last block */
+ rem = AES_BLOCK_SIZE;
+ blocks--;
+ }
+
+ /* process blocks M[2] ... M[n-1] */
+ for (i = 0; i < blocks; i++)
+ {
+ e = _mm_xor_si128(e, _mm_loadu_si128(bi + i));
+
+ e = _mm_xor_si128(e, ks[0]);
+ e = _mm_aesenc_si128(e, ks[1]);
+ e = _mm_aesenc_si128(e, ks[2]);
+ e = _mm_aesenc_si128(e, ks[3]);
+ e = _mm_aesenc_si128(e, ks[4]);
+ e = _mm_aesenc_si128(e, ks[5]);
+ e = _mm_aesenc_si128(e, ks[6]);
+ e = _mm_aesenc_si128(e, ks[7]);
+ e = _mm_aesenc_si128(e, ks[8]);
+ e = _mm_aesenc_si128(e, ks[9]);
+ e = _mm_aesenclast_si128(e, ks[10]);
+ }
+
+ /* store remaining bytes of block M[n] */
+ memcpy(this->rem, data.ptr + data.len - rem, rem);
+ this->rem_size = rem;
+ }
+ else
+ {
+ /* no complete block, just copy into remaining */
+ memcpy(this->rem + this->rem_size, data.ptr, data.len);
+ this->rem_size += data.len;
+ }
+
+ if (out)
+ {
+ /* (4) For block M[n]: */
+ if (this->rem_size == AES_BLOCK_SIZE && !this->zero)
+ {
+ /* a) If the blocksize of M[n] is 128 bits:
+ * XOR M[n] with E[n-1] and Key K2, then encrypt the result with
+ * Key K1, yielding E[n].
+ */
+ e = _mm_xor_si128(e, this->k2);
+ }
+ else
+ {
+ /* b) If the blocksize of M[n] is less than 128 bits:
+ *
+ * i) Pad M[n] with a single "1" bit, followed by the number of
+ * "0" bits (possibly none) required to increase M[n]'s
+ * blocksize to 128 bits.
+ */
+ if (this->rem_size < AES_BLOCK_SIZE)
+ {
+ memset(this->rem + this->rem_size, 0,
+ AES_BLOCK_SIZE - this->rem_size);
+ this->rem[this->rem_size] = 0x80;
+ }
+ /* ii) XOR M[n] with E[n-1] and Key K3, then encrypt the result
+ * with Key K1, yielding E[n].
+ */
+ e = _mm_xor_si128(e, this->k3);
+ }
+ e = _mm_xor_si128(e, _mm_loadu_si128((__m128i*)this->rem));
+
+ e = _mm_xor_si128(e, ks[0]);
+ e = _mm_aesenc_si128(e, ks[1]);
+ e = _mm_aesenc_si128(e, ks[2]);
+ e = _mm_aesenc_si128(e, ks[3]);
+ e = _mm_aesenc_si128(e, ks[4]);
+ e = _mm_aesenc_si128(e, ks[5]);
+ e = _mm_aesenc_si128(e, ks[6]);
+ e = _mm_aesenc_si128(e, ks[7]);
+ e = _mm_aesenc_si128(e, ks[8]);
+ e = _mm_aesenc_si128(e, ks[9]);
+ e = _mm_aesenclast_si128(e, ks[10]);
+ _mm_storeu_si128((__m128i*)out, e);
+
+ /* (2) Define E[0] = 0x00000000000000000000000000000000 */
+ e = _mm_setzero_si128();
+ this->rem_size = 0;
+ this->zero = TRUE;
+ }
+ this->e = e;
+ return TRUE;
+}
+
+METHOD(mac_t, get_mac_size, size_t,
+ private_aesni_mac_t *this)
+{
+ return AES_BLOCK_SIZE;
+}
+
+METHOD(mac_t, set_key, bool,
+ private_aesni_mac_t *this, chunk_t key)
+{
+ __m128i t1, t2, t3;
+ u_char k1[AES_BLOCK_SIZE];
+ u_int round;
+ chunk_t k;
+
+ /* reset state */
+ this->e = _mm_setzero_si128();
+ this->rem_size = 0;
+ this->zero = TRUE;
+
+ /* Create RFC4434 variable keys if required */
+ if (key.len == AES_BLOCK_SIZE)
+ {
+ k = key;
+ }
+ else if (key.len < AES_BLOCK_SIZE)
+ { /* pad short keys */
+ k = chunk_alloca(AES_BLOCK_SIZE);
+ memset(k.ptr, 0, k.len);
+ memcpy(k.ptr, key.ptr, key.len);
+ }
+ else
+ { /* shorten key using XCBC */
+ k = chunk_alloca(AES_BLOCK_SIZE);
+ memset(k.ptr, 0, k.len);
+ if (!set_key(this, k) || !get_mac(this, key, k.ptr))
+ {
+ return FALSE;
+ }
+ }
+
+ /*
+ * (1) Derive 3 128-bit keys (K1, K2 and K3) from the 128-bit secret
+ * key K, as follows:
+ * K1 = 0x01010101010101010101010101010101 encrypted with Key K
+ * K2 = 0x02020202020202020202020202020202 encrypted with Key K
+ * K3 = 0x03030303030303030303030303030303 encrypted with Key K
+ */
+
+ DESTROY_IF(this->k1);
+ this->k1 = aesni_key_create(TRUE, k);
+ if (!this->k1)
+ {
+ return FALSE;
+ }
+
+ t1 = _mm_set1_epi8(0x01);
+ t2 = _mm_set1_epi8(0x02);
+ t3 = _mm_set1_epi8(0x03);
+
+ t1 = _mm_xor_si128(t1, this->k1->schedule[0]);
+ t2 = _mm_xor_si128(t2, this->k1->schedule[0]);
+ t3 = _mm_xor_si128(t3, this->k1->schedule[0]);
+
+ for (round = 1; round < this->k1->rounds; round++)
+ {
+ t1 = _mm_aesenc_si128(t1, this->k1->schedule[round]);
+ t2 = _mm_aesenc_si128(t2, this->k1->schedule[round]);
+ t3 = _mm_aesenc_si128(t3, this->k1->schedule[round]);
+ }
+
+ t1 = _mm_aesenclast_si128(t1, this->k1->schedule[this->k1->rounds]);
+ t2 = _mm_aesenclast_si128(t2, this->k1->schedule[this->k1->rounds]);
+ t3 = _mm_aesenclast_si128(t3, this->k1->schedule[this->k1->rounds]);
+
+ _mm_storeu_si128((__m128i*)k1, t1);
+ this->k2 = t2;
+ this->k3 = t3;
+
+ this->k1->destroy(this->k1);
+ this->k1 = aesni_key_create(TRUE, chunk_from_thing(k1));
+
+ memwipe(k1, AES_BLOCK_SIZE);
+ return this->k1 != NULL;
+}
+
+METHOD(mac_t, destroy, void,
+ private_aesni_mac_t *this)
+{
+ DESTROY_IF(this->k1);
+ memwipe(&this->k2, sizeof(this->k2));
+ memwipe(&this->k3, sizeof(this->k3));
+ free_align(this);
+}
+
+/*
+ * Described in header
+ */
+mac_t *aesni_xcbc_create(encryption_algorithm_t algo, size_t key_size)
+{
+ private_aesni_mac_t *this;
+
+ INIT_ALIGN(this, sizeof(__m128i),
+ .public = {
+ .get_mac = _get_mac,
+ .get_mac_size = _get_mac_size,
+ .set_key = _set_key,
+ .destroy = _destroy,
+ },
+ );
+
+ return &this->public;
+}
+
+/*
+ * Described in header.
+ */
+prf_t *aesni_xcbc_prf_create(pseudo_random_function_t algo)
+{
+ mac_t *xcbc;
+
+ switch (algo)
+ {
+ case PRF_AES128_XCBC:
+ xcbc = aesni_xcbc_create(ENCR_AES_CBC, 16);
+ break;
+ default:
+ return NULL;
+ }
+ if (xcbc)
+ {
+ return mac_prf_create(xcbc);
+ }
+ return NULL;
+}
+
+/*
+ * Described in header
+ */
+signer_t *aesni_xcbc_signer_create(integrity_algorithm_t algo)
+{
+ size_t trunc;
+ mac_t *xcbc;
+
+ switch (algo)
+ {
+ case AUTH_AES_XCBC_96:
+ xcbc = aesni_xcbc_create(ENCR_AES_CBC, 16);
+ trunc = 12;
+ break;
+ default:
+ return NULL;
+ }
+ if (xcbc)
+ {
+ return mac_signer_create(xcbc, trunc);
+ }
+ return NULL;
+}
diff --git a/src/libstrongswan/plugins/aesni/aesni_xcbc.h b/src/libstrongswan/plugins/aesni/aesni_xcbc.h
new file mode 100644
index 000000000..53f559feb
--- /dev/null
+++ b/src/libstrongswan/plugins/aesni/aesni_xcbc.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015 Martin Willi
+ * Copyright (C) 2015 revosec AG
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+/**
+ * @defgroup aesni_xcbc aesni_xcbc
+ * @{ @ingroup aesni
+ */
+
+#ifndef AESNI_XCBC_H_
+#define AESNI_XCBC_H_
+
+#include <crypto/mac.h>
+#include <crypto/prfs/prf.h>
+#include <crypto/signers/signer.h>
+
+/**
+ * Create a generic mac_t object using AESNI XCBC
+ *
+ * @param algo underlying encryption algorithm
+ * @param key_size size of encryption key, in bytes
+ */
+mac_t *aesni_xcbc_create(encryption_algorithm_t algo, size_t key_size);
+
+/**
+ * Creates a new prf_t object based AESNI XCBC.
+ *
+ * @param algo algorithm to implement
+ * @return prf_t object, NULL if not supported
+ */
+prf_t *aesni_xcbc_prf_create(pseudo_random_function_t algo);
+
+/**
+ * Creates a new signer_t object based on AESNI XCBC.
+ *
+ * @param algo algorithm to implement
+ * @return signer_t, NULL if not supported
+ */
+signer_t *aesni_xcbc_signer_create(integrity_algorithm_t algo);
+
+#endif /** AESNI_XCBC_H_ @}*/
diff --git a/src/libstrongswan/plugins/af_alg/af_alg_signer.c b/src/libstrongswan/plugins/af_alg/af_alg_signer.c
index 9ad01103a..1403144ab 100644
--- a/src/libstrongswan/plugins/af_alg/af_alg_signer.c
+++ b/src/libstrongswan/plugins/af_alg/af_alg_signer.c
@@ -138,7 +138,7 @@ METHOD(signer_t, verify_signature, bool,
{
return FALSE;
}
- return memeq(signature.ptr, sig, signature.len);
+ return memeq_const(signature.ptr, sig, signature.len);
}
METHOD(signer_t, get_key_size, size_t,
diff --git a/src/libstrongswan/plugins/ccm/ccm_aead.c b/src/libstrongswan/plugins/ccm/ccm_aead.c
index 6d4b2e13c..676d67681 100644
--- a/src/libstrongswan/plugins/ccm/ccm_aead.c
+++ b/src/libstrongswan/plugins/ccm/ccm_aead.c
@@ -256,7 +256,7 @@ static bool verify_icv(private_ccm_aead_t *this, chunk_t plain, chunk_t assoc,
char buf[this->icv_size];
return create_icv(this, plain, assoc, iv, buf) &&
- memeq(buf, icv, this->icv_size);
+ memeq_const(buf, icv, this->icv_size);
}
METHOD(aead_t, encrypt, bool,
diff --git a/src/libstrongswan/plugins/fips_prf/fips_prf.c b/src/libstrongswan/plugins/fips_prf/fips_prf.c
index 25accf996..92977909e 100644
--- a/src/libstrongswan/plugins/fips_prf/fips_prf.c
+++ b/src/libstrongswan/plugins/fips_prf/fips_prf.c
@@ -15,8 +15,6 @@
#include "fips_prf.h"
-#include <arpa/inet.h>
-
#include <utils/debug.h>
typedef struct private_fips_prf_t private_fips_prf_t;
diff --git a/src/libstrongswan/plugins/gcm/gcm_aead.c b/src/libstrongswan/plugins/gcm/gcm_aead.c
index 4ab17017f..6e1694a34 100644
--- a/src/libstrongswan/plugins/gcm/gcm_aead.c
+++ b/src/libstrongswan/plugins/gcm/gcm_aead.c
@@ -276,7 +276,7 @@ static bool verify_icv(private_gcm_aead_t *this, chunk_t assoc, chunk_t crypt,
char tmp[this->icv_size];
return create_icv(this, assoc, crypt, j, tmp) &&
- memeq(tmp, icv, this->icv_size);
+ memeq_const(tmp, icv, this->icv_size);
}
METHOD(aead_t, encrypt, bool,
diff --git a/src/libstrongswan/plugins/gcrypt/gcrypt_dh.c b/src/libstrongswan/plugins/gcrypt/gcrypt_dh.c
index 744ec0bbf..cee25ea74 100644
--- a/src/libstrongswan/plugins/gcrypt/gcrypt_dh.c
+++ b/src/libstrongswan/plugins/gcrypt/gcrypt_dh.c
@@ -145,6 +145,24 @@ METHOD(diffie_hellman_t, get_my_public_value, bool,
return TRUE;
}
+METHOD(diffie_hellman_t, set_private_value, bool,
+ private_gcrypt_dh_t *this, chunk_t value)
+{
+ gcry_error_t err;
+ gcry_mpi_t xa;
+
+ err = gcry_mpi_scan(&xa, GCRYMPI_FMT_USG, value.ptr, value.len, NULL);
+ if (!err)
+ {
+ gcry_mpi_release(this->xa);
+ this->xa = xa;
+ gcry_mpi_powm(this->ya, this->g, this->xa, this->p);
+ gcry_mpi_release(this->zz);
+ this->zz = NULL;
+ }
+ return !err;
+}
+
METHOD(diffie_hellman_t, get_shared_secret, bool,
private_gcrypt_dh_t *this, chunk_t *secret)
{
@@ -191,6 +209,7 @@ gcrypt_dh_t *create_generic(diffie_hellman_group_t group, size_t exp_len,
.get_shared_secret = _get_shared_secret,
.set_other_public_value = _set_other_public_value,
.get_my_public_value = _get_my_public_value,
+ .set_private_value = _set_private_value,
.get_dh_group = _get_dh_group,
.destroy = _destroy,
},
diff --git a/src/libstrongswan/plugins/gcrypt/gcrypt_plugin.c b/src/libstrongswan/plugins/gcrypt/gcrypt_plugin.c
index 480c083c0..04f1f43ef 100644
--- a/src/libstrongswan/plugins/gcrypt/gcrypt_plugin.c
+++ b/src/libstrongswan/plugins/gcrypt/gcrypt_plugin.c
@@ -158,6 +158,9 @@ plugin_t *gcrypt_plugin_create()
}
gcry_control(GCRYCTL_INITIALIZATION_FINISHED, 0);
+ /* initialize static allocations we want to exclude from leak-detective */
+ gcry_create_nonce(NULL, 0);
+
INIT(this,
.public = {
.plugin = {
diff --git a/src/libstrongswan/plugins/gmp/gmp_diffie_hellman.c b/src/libstrongswan/plugins/gmp/gmp_diffie_hellman.c
index 4fcb168fa..b7ee94ee0 100644
--- a/src/libstrongswan/plugins/gmp/gmp_diffie_hellman.c
+++ b/src/libstrongswan/plugins/gmp/gmp_diffie_hellman.c
@@ -162,6 +162,15 @@ METHOD(diffie_hellman_t, get_my_public_value, bool,
return TRUE;
}
+METHOD(diffie_hellman_t, set_private_value, bool,
+ private_gmp_diffie_hellman_t *this, chunk_t value)
+{
+ mpz_import(this->xa, value.len, 1, 1, 1, 0, value.ptr);
+ mpz_powm(this->ya, this->g, this->xa, this->p);
+ this->computed = FALSE;
+ return TRUE;
+}
+
METHOD(diffie_hellman_t, get_shared_secret, bool,
private_gmp_diffie_hellman_t *this, chunk_t *secret)
{
@@ -212,6 +221,7 @@ static gmp_diffie_hellman_t *create_generic(diffie_hellman_group_t group,
.get_shared_secret = _get_shared_secret,
.set_other_public_value = _set_other_public_value,
.get_my_public_value = _get_my_public_value,
+ .set_private_value = _set_private_value,
.get_dh_group = _get_dh_group,
.destroy = _destroy,
},
diff --git a/src/libstrongswan/plugins/gmp/gmp_rsa_public_key.c b/src/libstrongswan/plugins/gmp/gmp_rsa_public_key.c
index ad659e4d7..e738908e2 100644
--- a/src/libstrongswan/plugins/gmp/gmp_rsa_public_key.c
+++ b/src/libstrongswan/plugins/gmp/gmp_rsa_public_key.c
@@ -187,7 +187,7 @@ static bool verify_emsa_pkcs1_signature(private_gmp_rsa_public_key_t *this,
" %u bytes", em.len, data.len);
goto end;
}
- success = memeq(em.ptr, data.ptr, data.len);
+ success = memeq_const(em.ptr, data.ptr, data.len);
}
else
{ /* IKEv2 and X.509 certificate signatures */
@@ -258,7 +258,7 @@ static bool verify_emsa_pkcs1_signature(private_gmp_rsa_public_key_t *this,
goto end_parser;
}
hasher->destroy(hasher);
- success = memeq(object.ptr, hash.ptr, hash.len);
+ success = memeq_const(object.ptr, hash.ptr, hash.len);
free(hash.ptr);
break;
}
@@ -500,4 +500,3 @@ gmp_rsa_public_key_t *gmp_rsa_public_key_load(key_type_t type, va_list args)
return &this->public;
}
-
diff --git a/src/libstrongswan/plugins/openssl/openssl_crypter.c b/src/libstrongswan/plugins/openssl/openssl_crypter.c
index c2478a4ed..26f4700b8 100644
--- a/src/libstrongswan/plugins/openssl/openssl_crypter.c
+++ b/src/libstrongswan/plugins/openssl/openssl_crypter.c
@@ -226,10 +226,12 @@ openssl_crypter_t *openssl_crypter_create(encryption_algorithm_t algo,
return NULL;
}
break;
+#ifndef OPENSSL_NO_DES
case ENCR_DES_ECB:
key_size = 8;
this->cipher = EVP_des_ecb();
break;
+#endif
default:
{
char* name;
diff --git a/src/libstrongswan/plugins/openssl/openssl_diffie_hellman.c b/src/libstrongswan/plugins/openssl/openssl_diffie_hellman.c
index 2615d60a2..cac442fc0 100644
--- a/src/libstrongswan/plugins/openssl/openssl_diffie_hellman.c
+++ b/src/libstrongswan/plugins/openssl/openssl_diffie_hellman.c
@@ -112,6 +112,18 @@ METHOD(diffie_hellman_t, set_other_public_value, bool,
return TRUE;
}
+METHOD(diffie_hellman_t, set_private_value, bool,
+ private_openssl_diffie_hellman_t *this, chunk_t value)
+{
+ if (BN_bin2bn(value.ptr, value.len, this->dh->priv_key))
+ {
+ chunk_clear(&this->shared_secret);
+ this->computed = FALSE;
+ return DH_generate_key(this->dh);
+ }
+ return FALSE;
+}
+
METHOD(diffie_hellman_t, get_dh_group, diffie_hellman_group_t,
private_openssl_diffie_hellman_t *this)
{
@@ -160,6 +172,7 @@ openssl_diffie_hellman_t *openssl_diffie_hellman_create(
.get_shared_secret = _get_shared_secret,
.set_other_public_value = _set_other_public_value,
.get_my_public_value = _get_my_public_value,
+ .set_private_value = _set_private_value,
.get_dh_group = _get_dh_group,
.destroy = _destroy,
},
diff --git a/src/libstrongswan/plugins/openssl/openssl_ec_diffie_hellman.c b/src/libstrongswan/plugins/openssl/openssl_ec_diffie_hellman.c
index 550a5432f..a1af500e2 100644
--- a/src/libstrongswan/plugins/openssl/openssl_ec_diffie_hellman.c
+++ b/src/libstrongswan/plugins/openssl/openssl_ec_diffie_hellman.c
@@ -248,6 +248,49 @@ METHOD(diffie_hellman_t, get_my_public_value, bool,
return TRUE;
}
+METHOD(diffie_hellman_t, set_private_value, bool,
+ private_openssl_ec_diffie_hellman_t *this, chunk_t value)
+{
+ EC_POINT *pub = NULL;
+ BIGNUM *priv = NULL;
+ bool ret = FALSE;
+
+ priv = BN_bin2bn(value.ptr, value.len, NULL);
+ if (!priv)
+ {
+ goto error;
+ }
+ pub = EC_POINT_new(EC_KEY_get0_group(this->key));
+ if (!pub)
+ {
+ goto error;
+ }
+ if (EC_POINT_mul(this->ec_group, pub, priv, NULL, NULL, NULL) != 1)
+ {
+ goto error;
+ }
+ if (EC_KEY_set_private_key(this->key, priv) != 1)
+ {
+ goto error;
+ }
+ if (EC_KEY_set_public_key(this->key, pub) != 1)
+ {
+ goto error;
+ }
+ ret = TRUE;
+
+error:
+ if (pub)
+ {
+ EC_POINT_free(pub);
+ }
+ if (priv)
+ {
+ BN_free(priv);
+ }
+ return ret;
+}
+
METHOD(diffie_hellman_t, get_shared_secret, bool,
private_openssl_ec_diffie_hellman_t *this, chunk_t *secret)
{
@@ -558,6 +601,7 @@ openssl_ec_diffie_hellman_t *openssl_ec_diffie_hellman_create(diffie_hellman_gro
.get_shared_secret = _get_shared_secret,
.set_other_public_value = _set_other_public_value,
.get_my_public_value = _get_my_public_value,
+ .set_private_value = _set_private_value,
.get_dh_group = _get_dh_group,
.destroy = _destroy,
},
diff --git a/src/libstrongswan/plugins/openssl/openssl_hmac.c b/src/libstrongswan/plugins/openssl/openssl_hmac.c
index 4f0bcc7c3..065187a8c 100644
--- a/src/libstrongswan/plugins/openssl/openssl_hmac.c
+++ b/src/libstrongswan/plugins/openssl/openssl_hmac.c
@@ -69,15 +69,26 @@ struct private_mac_t {
* Current HMAC context
*/
HMAC_CTX hmac;
+
+ /**
+ * Key set on HMAC_CTX?
+ */
+ bool key_set;
};
METHOD(mac_t, set_key, bool,
private_mac_t *this, chunk_t key)
{
#if OPENSSL_VERSION_NUMBER >= 0x10000000L
- return HMAC_Init_ex(&this->hmac, key.ptr, key.len, this->hasher, NULL);
+ if (HMAC_Init_ex(&this->hmac, key.ptr, key.len, this->hasher, NULL))
+ {
+ this->key_set = TRUE;
+ return TRUE;
+ }
+ return FALSE;
#else /* OPENSSL_VERSION_NUMBER < 1.0 */
HMAC_Init_ex(&this->hmac, key.ptr, key.len, this->hasher, NULL);
+ this->key_set = TRUE;
return TRUE;
#endif
}
@@ -85,6 +96,10 @@ METHOD(mac_t, set_key, bool,
METHOD(mac_t, get_mac, bool,
private_mac_t *this, chunk_t data, u_int8_t *out)
{
+ if (!this->key_set)
+ {
+ return FALSE;
+ }
#if OPENSSL_VERSION_NUMBER >= 0x10000000L
if (!HMAC_Update(&this->hmac, data.ptr, data.len))
{
@@ -153,11 +168,6 @@ static mac_t *hmac_create(hash_algorithm_t algo)
}
HMAC_CTX_init(&this->hmac);
- if (!set_key(this, chunk_empty))
- {
- destroy(this);
- return NULL;
- }
return &this->public;
}
diff --git a/src/libstrongswan/plugins/openssl/openssl_pkcs7.c b/src/libstrongswan/plugins/openssl/openssl_pkcs7.c
index 9c3c4040c..891e829ae 100644
--- a/src/libstrongswan/plugins/openssl/openssl_pkcs7.c
+++ b/src/libstrongswan/plugins/openssl/openssl_pkcs7.c
@@ -305,7 +305,7 @@ static bool verify_digest(CMS_ContentInfo *cms, CMS_SignerInfo *si, int hash_oid
}
hasher->destroy(hasher);
- if (!chunk_equals(digest, hash))
+ if (!chunk_equals_const(digest, hash))
{
free(hash.ptr);
DBG1(DBG_LIB, "invalid messageDigest");
diff --git a/src/libstrongswan/plugins/openssl/openssl_rsa_public_key.c b/src/libstrongswan/plugins/openssl/openssl_rsa_public_key.c
index 9748e28f2..aa54d3bbd 100644
--- a/src/libstrongswan/plugins/openssl/openssl_rsa_public_key.c
+++ b/src/libstrongswan/plugins/openssl/openssl_rsa_public_key.c
@@ -74,7 +74,7 @@ static bool verify_emsa_pkcs1_signature(private_openssl_rsa_public_key_t *this,
RSA_PKCS1_PADDING);
if (len != -1)
{
- valid = chunk_equals(data, chunk_create(buf, len));
+ valid = chunk_equals_const(data, chunk_create(buf, len));
}
free(buf);
}
diff --git a/src/libstrongswan/plugins/padlock/padlock_plugin.c b/src/libstrongswan/plugins/padlock/padlock_plugin.c
index 2005ef648..9ce210961 100644
--- a/src/libstrongswan/plugins/padlock/padlock_plugin.c
+++ b/src/libstrongswan/plugins/padlock/padlock_plugin.c
@@ -23,32 +23,13 @@
#include <library.h>
#include <plugins/plugin_feature.h>
+#include <utils/cpu_feature.h>
#include <utils/debug.h>
typedef struct private_padlock_plugin_t private_padlock_plugin_t;
typedef enum padlock_feature_t padlock_feature_t;
/**
- * Feature flags of padlock, received via cpuid()
- */
-enum padlock_feature_t {
- PADLOCK_RESERVED_1 = (1<<0),
- PADLOCK_RESERVED_2 = (1<<1),
- PADLOCK_RNG_AVAILABLE = (1<<2),
- PADLOCK_RNG_ENABLED = (1<<3),
- PADLOCK_RESERVED_3 = (1<<4),
- PADLOCK_RESERVED_4 = (1<<5),
- PADLOCK_ACE_AVAILABLE = (1<<6),
- PADLOCK_ACE_ENABLED = (1<<7),
- PADLOCK_ACE2_AVAILABLE = (1<<8),
- PADLOCK_ACE2_ENABLED = (1<<9),
- PADLOCK_PHE_AVAILABLE = (1<<10),
- PADLOCK_PHE_ENABLED = (1<<11),
- PADLOCK_PMM_AVAILABLE = (1<<12),
- PADLOCK_PMM_ENABLED = (1<<13),
-};
-
-/**
* private data of aes_plugin
*/
struct private_padlock_plugin_t {
@@ -61,48 +42,9 @@ struct private_padlock_plugin_t {
/**
* features supported by Padlock
*/
- padlock_feature_t features;
+ cpu_feature_t features;
};
-/**
- * Get cpuid for info, return eax, ebx, ecx and edx. -fPIC requires to save ebx.
- */
-#define cpuid(op, a, b, c, d)\
- asm (\
- "pushl %%ebx \n\t"\
- "cpuid \n\t"\
- "movl %%ebx, %1 \n\t"\
- "popl %%ebx \n\t"\
- : "=a" (a), "=r" (b), "=c" (c), "=d" (d) \
- : "a" (op));
-
-/**
- * Get features supported by Padlock
- */
-static padlock_feature_t get_padlock_features()
-{
- char vendor[3 * sizeof(int) + 1];
- int a, b, c, d;
-
- cpuid(0, a, b, c, d);
- /* VendorID string is in b-d-c (yes, in this order) */
- snprintf(vendor, sizeof(vendor), "%.4s%.4s%.4s", &b, &d, &c);
-
- /* check if we have a VIA chip */
- if (streq(vendor, "CentaurHauls"))
- {
- cpuid(0xC0000000, a, b, c, d);
- /* check Centaur Extended Feature Flags */
- if (a >= 0xC0000001)
- {
- cpuid(0xC0000001, a, b, c, d);
- return d;
- }
- }
- DBG1(DBG_LIB, "Padlock not found, CPU is %s", vendor);
- return 0;
-}
-
METHOD(plugin_t, get_name, char*,
private_padlock_plugin_t *this)
{
@@ -132,15 +74,15 @@ METHOD(plugin_t, get_features, int,
if (!count)
{ /* initialize only once */
- if (this->features & PADLOCK_RNG_ENABLED)
+ if (this->features & CPU_FEATURE_PADLOCK_RNG_ENABLED)
{
plugin_features_add(f, f_rng, countof(f_rng), &count);
}
- if (this->features & PADLOCK_ACE2_ENABLED)
+ if (this->features & CPU_FEATURE_PADLOCK_ACE2_ENABLED)
{
plugin_features_add(f, f_aes, countof(f_aes), &count);
}
- if (this->features & PADLOCK_PHE_ENABLED)
+ if (this->features & CPU_FEATURE_PADLOCK_PHE_ENABLED)
{
plugin_features_add(f, f_sha1, countof(f_sha1), &count);
}
@@ -170,25 +112,20 @@ plugin_t *padlock_plugin_create()
.destroy = _destroy,
},
},
- .features = get_padlock_features(),
+ .features = cpu_feature_get_all(),
);
- if (!this->features)
- {
- free(this);
- return NULL;
- }
- DBG1(DBG_LIB, "Padlock found, supports:%s%s%s%s%s, enabled:%s%s%s%s%s",
- this->features & PADLOCK_RNG_AVAILABLE ? " RNG" : "",
- this->features & PADLOCK_ACE_AVAILABLE ? " ACE" : "",
- this->features & PADLOCK_ACE2_AVAILABLE ? " ACE2" : "",
- this->features & PADLOCK_PHE_AVAILABLE ? " PHE" : "",
- this->features & PADLOCK_PMM_AVAILABLE ? " PMM" : "",
- this->features & PADLOCK_RNG_ENABLED ? " RNG" : "",
- this->features & PADLOCK_ACE_ENABLED ? " ACE" : "",
- this->features & PADLOCK_ACE2_ENABLED ? " ACE2" : "",
- this->features & PADLOCK_PHE_ENABLED ? " PHE" : "",
- this->features & PADLOCK_PMM_ENABLED ? " PMM" : "");
+ DBG1(DBG_LIB, "Padlock features supported:%s%s%s%s%s, enabled:%s%s%s%s%s",
+ this->features & CPU_FEATURE_PADLOCK_RNG_AVAILABLE ? " RNG" : "",
+ this->features & CPU_FEATURE_PADLOCK_ACE_AVAILABLE ? " ACE" : "",
+ this->features & CPU_FEATURE_PADLOCK_ACE2_AVAILABLE ? " ACE2" : "",
+ this->features & CPU_FEATURE_PADLOCK_PHE_AVAILABLE ? " PHE" : "",
+ this->features & CPU_FEATURE_PADLOCK_PMM_AVAILABLE ? " PMM" : "",
+ this->features & CPU_FEATURE_PADLOCK_RNG_ENABLED ? " RNG" : "",
+ this->features & CPU_FEATURE_PADLOCK_ACE_ENABLED ? " ACE" : "",
+ this->features & CPU_FEATURE_PADLOCK_ACE2_ENABLED ? " ACE2" : "",
+ this->features & CPU_FEATURE_PADLOCK_PHE_ENABLED ? " PHE" : "",
+ this->features & CPU_FEATURE_PADLOCK_PMM_ENABLED ? " PMM" : "");
return &this->public.plugin;
}
diff --git a/src/libstrongswan/plugins/pkcs12/pkcs12_decode.c b/src/libstrongswan/plugins/pkcs12/pkcs12_decode.c
index 379f24796..4441b278f 100644
--- a/src/libstrongswan/plugins/pkcs12/pkcs12_decode.c
+++ b/src/libstrongswan/plugins/pkcs12/pkcs12_decode.c
@@ -356,7 +356,7 @@ static bool verify_mac(hash_algorithm_t hash, chunk_t salt,
{
break;
}
- if (chunk_equals(mac, calculated))
+ if (chunk_equals_const(mac, calculated))
{
success = TRUE;
break;
diff --git a/src/libstrongswan/plugins/pkcs7/pkcs7_signed_data.c b/src/libstrongswan/plugins/pkcs7/pkcs7_signed_data.c
index 48fb5e6a4..d224ef3aa 100644
--- a/src/libstrongswan/plugins/pkcs7/pkcs7_signed_data.c
+++ b/src/libstrongswan/plugins/pkcs7/pkcs7_signed_data.c
@@ -269,7 +269,7 @@ METHOD(enumerator_t, enumerate, bool,
hasher->destroy(hasher);
DBG3(DBG_LIB, "hash: %B", &hash);
- valid = chunk_equals(chunk, hash);
+ valid = chunk_equals_const(chunk, hash);
free(hash.ptr);
if (!valid)
{
diff --git a/src/libstrongswan/plugins/plugin_feature.c b/src/libstrongswan/plugins/plugin_feature.c
index 65cdbe9d9..2d0ce8a4c 100644
--- a/src/libstrongswan/plugins/plugin_feature.c
+++ b/src/libstrongswan/plugins/plugin_feature.c
@@ -437,10 +437,12 @@ bool plugin_feature_load(plugin_t *plugin, plugin_feature_t *feature,
{
case FEATURE_CRYPTER:
lib->crypto->add_crypter(lib->crypto, feature->arg.crypter.alg,
+ feature->arg.crypter.key_size,
name, reg->arg.reg.f);
break;
case FEATURE_AEAD:
lib->crypto->add_aead(lib->crypto, feature->arg.aead.alg,
+ feature->arg.aead.key_size,
name, reg->arg.reg.f);
break;
case FEATURE_SIGNER:
diff --git a/src/libstrongswan/plugins/rdrand/rdrand_plugin.c b/src/libstrongswan/plugins/rdrand/rdrand_plugin.c
index b416c872f..b63bc2f43 100644
--- a/src/libstrongswan/plugins/rdrand/rdrand_plugin.c
+++ b/src/libstrongswan/plugins/rdrand/rdrand_plugin.c
@@ -20,6 +20,7 @@
#include <library.h>
#include <utils/debug.h>
+#include <utils/cpu_feature.h>
typedef struct private_rdrand_plugin_t private_rdrand_plugin_t;
typedef enum cpuid_feature_t cpuid_feature_t;
@@ -35,56 +36,6 @@ struct private_rdrand_plugin_t {
rdrand_plugin_t public;
};
-/**
- * CPU feature flags, returned via cpuid(1)
- */
-enum cpuid_feature_t {
- CPUID_RDRAND = (1<<30),
-};
-
-/**
- * Get cpuid for info, return eax, ebx, ecx and edx.
- * -fPIC requires to save ebx on IA-32.
- */
-static void cpuid(u_int op, u_int *a, u_int *b, u_int *c, u_int *d)
-{
-#ifdef __x86_64__
- asm("cpuid" : "=a" (*a), "=b" (*b), "=c" (*c), "=d" (*d) : "a" (op));
-#else /* __i386__ */
- asm("pushl %%ebx;"
- "cpuid;"
- "movl %%ebx, %1;"
- "popl %%ebx;"
- : "=a" (*a), "=r" (*b), "=c" (*c), "=d" (*d) : "a" (op));
-#endif /* __x86_64__ / __i386__*/
-}
-
-/**
- * Check if we have RDRAND instruction
- */
-static bool have_rdrand()
-{
- char vendor[3 * sizeof(u_int32_t) + 1];
- u_int a, b, c, d;
-
- cpuid(0, &a, &b, &c, &d);
- /* VendorID string is in b-d-c (yes, in this order) */
- snprintf(vendor, sizeof(vendor), "%.4s%.4s%.4s", &b, &d, &c);
-
- /* check if we have an Intel CPU */
- if (streq(vendor, "GenuineIntel"))
- {
- cpuid(1, &a, &b, &c, &d);
- if (c & CPUID_RDRAND)
- {
- DBG2(DBG_LIB, "detected RDRAND support on %s CPU", vendor);
- return TRUE;
- }
- }
- DBG2(DBG_LIB, "no RDRAND support on %s CPU, disabled", vendor);
- return FALSE;
-}
-
METHOD(plugin_t, get_name, char*,
private_rdrand_plugin_t *this)
{
@@ -102,10 +53,12 @@ METHOD(plugin_t, get_features, int,
PLUGIN_DEPENDS(CRYPTER, ENCR_AES_CBC, 16),
};
*features = f;
- if (have_rdrand())
+ if (cpu_feature_available(CPU_FEATURE_RDRAND))
{
+ DBG2(DBG_LIB, "detected RDRAND support, enabled");
return countof(f);
}
+ DBG2(DBG_LIB, "no RDRAND support detected, disabled");
return 0;
}
diff --git a/src/libstrongswan/plugins/sqlite/sqlite_database.c b/src/libstrongswan/plugins/sqlite/sqlite_database.c
index ec1ca1404..0a35e3017 100644
--- a/src/libstrongswan/plugins/sqlite/sqlite_database.c
+++ b/src/libstrongswan/plugins/sqlite/sqlite_database.c
@@ -69,6 +69,18 @@ typedef struct {
} transaction_t;
/**
+ * Check if the SQLite library is thread safe
+ */
+static bool is_threadsave()
+{
+#if SQLITE_VERSION_NUMBER >= 3005000
+ return sqlite3_threadsafe() > 0;
+#endif
+ /* sqlite connections prior to 3.5 may be used by a single thread only */
+ return FALSE;
+}
+
+/**
* Create and run a sqlite stmt using a sql string and args
*/
static sqlite3_stmt* run(private_sqlite_database_t *this, char *sql,
@@ -168,9 +180,10 @@ typedef struct {
static void sqlite_enumerator_destroy(sqlite_enumerator_t *this)
{
sqlite3_finalize(this->stmt);
-#if SQLITE_VERSION_NUMBER < 3005000
- this->database->mutex->unlock(this->database->mutex);
-#endif
+ if (!is_threadsave())
+ {
+ this->database->mutex->unlock(this->database->mutex);
+ }
free(this->columns);
free(this);
}
@@ -248,10 +261,10 @@ METHOD(database_t, query, enumerator_t*,
sqlite_enumerator_t *enumerator = NULL;
int i;
-#if SQLITE_VERSION_NUMBER < 3005000
- /* sqlite connections prior to 3.5 may be used by a single thread only, */
- this->mutex->lock(this->mutex);
-#endif
+ if (!is_threadsave())
+ {
+ this->mutex->lock(this->mutex);
+ }
va_start(args, sql);
stmt = run(this, sql, &args);
diff --git a/src/libstrongswan/plugins/sqlite/sqlite_plugin.c b/src/libstrongswan/plugins/sqlite/sqlite_plugin.c
index f554a9e4f..7f46aced7 100644
--- a/src/libstrongswan/plugins/sqlite/sqlite_plugin.c
+++ b/src/libstrongswan/plugins/sqlite/sqlite_plugin.c
@@ -16,6 +16,7 @@
#include "sqlite_plugin.h"
#include <library.h>
+#include <sqlite3.h>
#include "sqlite_database.h"
typedef struct private_sqlite_plugin_t private_sqlite_plugin_t;
@@ -60,6 +61,7 @@ METHOD(plugin_t, destroy, void,
plugin_t *sqlite_plugin_create()
{
private_sqlite_plugin_t *this;
+ int threadsave = 0;
INIT(this,
.public = {
@@ -71,6 +73,11 @@ plugin_t *sqlite_plugin_create()
},
);
+#if SQLITE_VERSION_NUMBER >= 3005000
+ threadsave = sqlite3_threadsafe();
+#endif
+ DBG2(DBG_LIB, "using SQLite %s, thread safety %d",
+ sqlite3_libversion(), threadsave);
+
return &this->public.plugin;
}
-
diff --git a/src/libstrongswan/plugins/test_vectors/Makefile.am b/src/libstrongswan/plugins/test_vectors/Makefile.am
index 85f86726b..bde27b873 100644
--- a/src/libstrongswan/plugins/test_vectors/Makefile.am
+++ b/src/libstrongswan/plugins/test_vectors/Makefile.am
@@ -40,6 +40,10 @@ libstrongswan_test_vectors_la_SOURCES = \
test_vectors/sha2.c \
test_vectors/sha2_hmac.c \
test_vectors/fips_prf.c \
+ test_vectors/modp.c \
+ test_vectors/modpsub.c \
+ test_vectors/ecp.c \
+ test_vectors/ecpbp.c \
test_vectors/rng.c
libstrongswan_test_vectors_la_LDFLAGS = -module -avoid-version
diff --git a/src/libstrongswan/plugins/test_vectors/Makefile.in b/src/libstrongswan/plugins/test_vectors/Makefile.in
index 8980ec46c..e98119b85 100644
--- a/src/libstrongswan/plugins/test_vectors/Makefile.in
+++ b/src/libstrongswan/plugins/test_vectors/Makefile.in
@@ -143,7 +143,8 @@ am_libstrongswan_test_vectors_la_OBJECTS = test_vectors_plugin.lo \
test_vectors/md5_hmac.lo test_vectors/sha1.lo \
test_vectors/sha1_hmac.lo test_vectors/sha2.lo \
test_vectors/sha2_hmac.lo test_vectors/fips_prf.lo \
- test_vectors/rng.lo
+ test_vectors/modp.lo test_vectors/modpsub.lo \
+ test_vectors/ecp.lo test_vectors/ecpbp.lo test_vectors/rng.lo
libstrongswan_test_vectors_la_OBJECTS = \
$(am_libstrongswan_test_vectors_la_OBJECTS)
AM_V_lt = $(am__v_lt_@AM_V@)
@@ -481,6 +482,10 @@ libstrongswan_test_vectors_la_SOURCES = \
test_vectors/sha2.c \
test_vectors/sha2_hmac.c \
test_vectors/fips_prf.c \
+ test_vectors/modp.c \
+ test_vectors/modpsub.c \
+ test_vectors/ecp.c \
+ test_vectors/ecpbp.c \
test_vectors/rng.c
libstrongswan_test_vectors_la_LDFLAGS = -module -avoid-version
@@ -626,6 +631,14 @@ test_vectors/sha2_hmac.lo: test_vectors/$(am__dirstamp) \
test_vectors/$(DEPDIR)/$(am__dirstamp)
test_vectors/fips_prf.lo: test_vectors/$(am__dirstamp) \
test_vectors/$(DEPDIR)/$(am__dirstamp)
+test_vectors/modp.lo: test_vectors/$(am__dirstamp) \
+ test_vectors/$(DEPDIR)/$(am__dirstamp)
+test_vectors/modpsub.lo: test_vectors/$(am__dirstamp) \
+ test_vectors/$(DEPDIR)/$(am__dirstamp)
+test_vectors/ecp.lo: test_vectors/$(am__dirstamp) \
+ test_vectors/$(DEPDIR)/$(am__dirstamp)
+test_vectors/ecpbp.lo: test_vectors/$(am__dirstamp) \
+ test_vectors/$(DEPDIR)/$(am__dirstamp)
test_vectors/rng.lo: test_vectors/$(am__dirstamp) \
test_vectors/$(DEPDIR)/$(am__dirstamp)
@@ -654,12 +667,16 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@test_vectors/$(DEPDIR)/camellia_xcbc.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@test_vectors/$(DEPDIR)/cast.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@test_vectors/$(DEPDIR)/des.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@test_vectors/$(DEPDIR)/ecp.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@test_vectors/$(DEPDIR)/ecpbp.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@test_vectors/$(DEPDIR)/fips_prf.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@test_vectors/$(DEPDIR)/idea.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@test_vectors/$(DEPDIR)/md2.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@test_vectors/$(DEPDIR)/md4.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@test_vectors/$(DEPDIR)/md5.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@test_vectors/$(DEPDIR)/md5_hmac.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@test_vectors/$(DEPDIR)/modp.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@test_vectors/$(DEPDIR)/modpsub.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@test_vectors/$(DEPDIR)/null.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@test_vectors/$(DEPDIR)/rc2.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@test_vectors/$(DEPDIR)/rc5.Plo@am__quote@
diff --git a/src/libstrongswan/plugins/test_vectors/test_vectors.h b/src/libstrongswan/plugins/test_vectors/test_vectors.h
index 33c13d9f4..f7450aa9e 100644
--- a/src/libstrongswan/plugins/test_vectors/test_vectors.h
+++ b/src/libstrongswan/plugins/test_vectors/test_vectors.h
@@ -86,6 +86,11 @@ TEST_VECTOR_AEAD(aes_ccm8)
TEST_VECTOR_AEAD(aes_ccm9)
TEST_VECTOR_AEAD(aes_ccm10)
TEST_VECTOR_AEAD(aes_ccm11)
+TEST_VECTOR_AEAD(aes_ccm12)
+TEST_VECTOR_AEAD(aes_ccm13)
+TEST_VECTOR_AEAD(aes_ccm14)
+TEST_VECTOR_AEAD(aes_ccm15)
+TEST_VECTOR_AEAD(aes_ccm16)
TEST_VECTOR_AEAD(aes_gcm1)
TEST_VECTOR_AEAD(aes_gcm2)
TEST_VECTOR_AEAD(aes_gcm3_1)
@@ -100,6 +105,13 @@ TEST_VECTOR_AEAD(aes_gcm13)
TEST_VECTOR_AEAD(aes_gcm14)
TEST_VECTOR_AEAD(aes_gcm15)
TEST_VECTOR_AEAD(aes_gcm16)
+TEST_VECTOR_AEAD(aes_gcm17)
+TEST_VECTOR_AEAD(aes_gcm18)
+TEST_VECTOR_AEAD(aes_gcm19)
+TEST_VECTOR_AEAD(aes_gcm20)
+TEST_VECTOR_AEAD(aes_gcm21)
+TEST_VECTOR_AEAD(aes_gcm22)
+TEST_VECTOR_AEAD(aes_gcm23)
TEST_VECTOR_SIGNER(aes_xcbc_s1)
TEST_VECTOR_SIGNER(aes_xcbc_s2)
@@ -228,3 +240,23 @@ TEST_VECTOR_RNG(rng_runs_1)
TEST_VECTOR_RNG(rng_runs_2)
TEST_VECTOR_RNG(rng_runs_3)
+TEST_VECTOR_DH(modp768)
+TEST_VECTOR_DH(modp1024)
+TEST_VECTOR_DH(modp1536)
+TEST_VECTOR_DH(modp2048)
+TEST_VECTOR_DH(modp3072)
+TEST_VECTOR_DH(modp4096)
+TEST_VECTOR_DH(modp6144)
+TEST_VECTOR_DH(modp8192)
+TEST_VECTOR_DH(modp1024_160)
+TEST_VECTOR_DH(modp2048_224)
+TEST_VECTOR_DH(modp2048_256)
+TEST_VECTOR_DH(ecp192)
+TEST_VECTOR_DH(ecp224)
+TEST_VECTOR_DH(ecp256)
+TEST_VECTOR_DH(ecp384)
+TEST_VECTOR_DH(ecp521)
+TEST_VECTOR_DH(ecp224bp)
+TEST_VECTOR_DH(ecp256bp)
+TEST_VECTOR_DH(ecp384bp)
+TEST_VECTOR_DH(ecp512bp)
diff --git a/src/libstrongswan/plugins/test_vectors/test_vectors/aes_ccm.c b/src/libstrongswan/plugins/test_vectors/test_vectors/aes_ccm.c
index 95c41ecbc..cb45254ea 100644
--- a/src/libstrongswan/plugins/test_vectors/test_vectors/aes_ccm.c
+++ b/src/libstrongswan/plugins/test_vectors/test_vectors/aes_ccm.c
@@ -166,3 +166,82 @@ aead_test_vector_t aes_ccm11 = {
"\x66\xca\x61\x1e\x96\x7a\x61\xb3\x1c\x16\x45\x52\xba\x04\x9c\x9f"
"\xb1\xd2\x40\xbc\x52\x7c\x6f\xb1",
};
+
+/**
+ * The vectors below are defined by ourself
+ */
+aead_test_vector_t aes_ccm12 = {
+ .alg = ENCR_AES_CCM_ICV8, .key_size = 24, .salt_size = 3,
+ .len = 32, .alen = 27,
+ .key = "\x58\x5d\xa0\x96\x65\x1a\x04\xd7\x96\xe5\xc5\x68\xaa\x95\x35\xe0"
+ "\x29\xa0\xba\x9e\x48\x78\xd1\xba\xee\x49\x83",
+ .iv = "\xe9\xa9\xff\xe9\x57\xba\xfd\x9e",
+ .adata = "\x44\xa6\x2c\x05\xe9\xe1\x43\xb1\x58\x7c\xf2\x5c\x6d\x39\x0a\x64"
+ "\xa4\xf0\x13\x05\xd1\x77\x99\x67\x11\xc4\xc6",
+ .plain = "\x85\x34\x66\x42\xc8\x92\x0f\x36\x58\xe0\x6b\x91\x3c\x98\x5c\xbb"
+ "\x0a\x85\xcc\x02\xad\x7a\x96\xe9\x65\x43\xa4\xc3\x0f\xdc\x55\x81",
+ .cipher = "\xfb\xe5\x5d\x34\xbe\xe5\xe8\xe7\x5a\xef\x2f\xbf\x1f\x7f\xd4\xb2"
+ "\x66\xca\x61\x1e\x96\x7a\x61\xb3\x1c\x16\x45\x52\xba\x04\x9c\x9f"
+ "\x24\x0e\xd1\xa5\x40\x74\xc8\x4e",
+};
+
+aead_test_vector_t aes_ccm13 = {
+ .alg = ENCR_AES_CCM_ICV8, .key_size = 24, .salt_size = 3,
+ .len = 27, .alen = 32,
+ .key = "\x58\x5d\xa0\x96\x65\x1a\x04\xd7\x96\xe5\xc5\x68\xaa\x95\x35\xe0"
+ "\x29\xa0\xba\x9e\x48\x78\xd1\xba\xee\x49\x83",
+ .iv = "\xe9\xa9\xff\xe9\x57\xba\xfd\x9e",
+ .adata = "\x44\xa6\x2c\x05\xe9\xe1\x43\xb1\x58\x7c\xf2\x5c\x6d\x39\x0a\x64"
+ "\xa4\xf0\x13\x05\xd1\x77\x99\x67\x11\xc4\xc6\xdb\x00\x56\x36\x61",
+ .plain = "\x85\x34\x66\x42\xc8\x92\x0f\x36\x58\xe0\x6b\x91\x3c\x98\x5c\xbb"
+ "\x0a\x85\xcc\x02\xad\x7a\x96\xe9\x65\x43\xa4",
+ .cipher = "\xfb\xe5\x5d\x34\xbe\xe5\xe8\xe7\x5a\xef\x2f\xbf\x1f\x7f\xd4\xb2"
+ "\x66\xca\x61\x1e\x96\x7a\x61\xb3\x1c\x16\x45\xa6\xe9\x3c\xa8\x50"
+ "\x4e\x62\x97",
+};
+
+aead_test_vector_t aes_ccm14 = {
+ .alg = ENCR_AES_CCM_ICV8, .key_size = 24, .salt_size = 3,
+ .len = 27, .alen = 27,
+ .key = "\x58\x5d\xa0\x96\x65\x1a\x04\xd7\x96\xe5\xc5\x68\xaa\x95\x35\xe0"
+ "\x29\xa0\xba\x9e\x48\x78\xd1\xba\xee\x49\x83",
+ .iv = "\xe9\xa9\xff\xe9\x57\xba\xfd\x9e",
+ .adata = "\x44\xa6\x2c\x05\xe9\xe1\x43\xb1\x58\x7c\xf2\x5c\x6d\x39\x0a\x64"
+ "\xa4\xf0\x13\x05\xd1\x77\x99\x67\x11\xc4\xc6",
+ .plain = "\x85\x34\x66\x42\xc8\x92\x0f\x36\x58\xe0\x6b\x91\x3c\x98\x5c\xbb"
+ "\x0a\x85\xcc\x02\xad\x7a\x96\xe9\x65\x43\xa4",
+ .cipher = "\xfb\xe5\x5d\x34\xbe\xe5\xe8\xe7\x5a\xef\x2f\xbf\x1f\x7f\xd4\xb2"
+ "\x66\xca\x61\x1e\x96\x7a\x61\xb3\x1c\x16\x45\x11\x03\x16\x48\xfb"
+ "\xb7\xde\xf1",
+};
+
+aead_test_vector_t aes_ccm15 = {
+ .alg = ENCR_AES_CCM_ICV12, .key_size = 16, .salt_size = 3,
+ .len = 32, .alen = 32,
+ .key = "\x7c\xc8\x18\x3b\x8d\x99\xe0\x7c\x45\x41\xb8\xbd\x5c\xa7\xc2\x32"
+ "\x8a\xb8\x02\x59\xa4\xfe\xa9\x2c\x09\x75\x9a\x9b\x3c\x9b\x27\x39"
+ "\xf9\xd9\x4e",
+ .iv = "\x63\xb5\x3d\x9d\x43\xf6\x1e\x50",
+ .adata = "\x57\xf5\x6b\x8b\x57\x5c\x3d\x3b\x13\x02\x01\x0c\x83\x4c\x96\x35"
+ "\x8e\xd6\x39\xcf\x7d\x14\x9b\x94\xb0\x39\x36\xe6\x8f\x57\xe0\x13",
+ .plain = "\x3b\x6c\x29\x36\xb6\xef\x07\xa6\x83\x72\x07\x4f\xcf\xfa\x66\x89"
+ "\x5f\xca\xb1\xba\xd5\x8f\x2c\x27\x30\xdb\x75\x09\x93\xd4\x65\xe4",
+ .cipher = "\x2b\x94\x71\x1a\xd3\x28\x21\xe5\xe2\xeb\x75\xe8\x09\x98\x9c\x0a"
+ "\xc9\xea\x3e\xe4\x3a\xf9\x71\x4c\x4f\x16\x73\x1d\xa5\x10\x93\x5b"
+ "\x83\xcd\xdd\x30\xb9\x3f\x86\xb3\x14\xbb\x7d\x81",
+};
+
+aead_test_vector_t aes_ccm16 = {
+ .alg = ENCR_AES_CCM_ICV12, .key_size = 24, .salt_size = 3,
+ .len = 32, .alen = 32,
+ .key = "\x7c\xc8\x18\x3b\x8d\x99\xe0\x7c\x45\x41\xb8\xbd\x5c\xa7\xc2\x32"
+ "\x8a\xb8\x02\x59\xa4\xfe\xa9\x2c\xf9\xd9\x4e",
+ .iv = "\x63\xb5\x3d\x9d\x43\xf6\x1e\x50",
+ .adata = "\x57\xf5\x6b\x8b\x57\x5c\x3d\x3b\x13\x02\x01\x0c\x83\x4c\x96\x35"
+ "\x8e\xd6\x39\xcf\x7d\x14\x9b\x94\xb0\x39\x36\xe6\x8f\x57\xe0\x13",
+ .plain = "\x3b\x6c\x29\x36\xb6\xef\x07\xa6\x83\x72\x07\x4f\xcf\xfa\x66\x89"
+ "\x5f\xca\xb1\xba\xd5\x8f\x2c\x27\x30\xdb\x75\x09\x93\xd4\x65\xe4",
+ .cipher = "\x48\x19\x60\xbb\x65\xa8\x00\xb8\x26\xf1\x7f\x16\x1f\x3c\xfc\x6d"
+ "\x86\x62\x10\xc5\x51\xcf\xef\x74\xac\xc6\xdf\x28\xac\x36\x6f\xa0"
+ "\x3a\x38\x24\x50\x68\x0f\x40\x1e\xaf\xea\x42\x16",
+};
diff --git a/src/libstrongswan/plugins/test_vectors/test_vectors/aes_gcm.c b/src/libstrongswan/plugins/test_vectors/test_vectors/aes_gcm.c
index 1f33bcbd5..f348cd4d1 100644
--- a/src/libstrongswan/plugins/test_vectors/test_vectors/aes_gcm.c
+++ b/src/libstrongswan/plugins/test_vectors/test_vectors/aes_gcm.c
@@ -220,3 +220,153 @@ aead_test_vector_t aes_gcm16 = {
"\xc5\xf6\x1e\x63\x93\xba\x7a\x0a\xbc\xc9\xf6\x62\x76\xfc\x6e\xce"
"\x0f\x4e\x17\x68\xcd\xdf\x88\x53\xbb\x2d\x55\x1b",
};
+
+/**
+ * Some self made vectors for AES-192/256 with ICV8/12
+ */
+aead_test_vector_t aes_gcm17 = {
+ .alg = ENCR_AES_GCM_ICV8, .key_size = 24, .salt_size = 4,
+ .len = 70, .alen = 0,
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+ "\xfe\xff\xe9\x92\x86\x65\x73\x1c\xca\xfe\xba\xbe",
+ .iv = "\xfa\xce\xdb\xad\xde\xca\xf8\x88",
+ .plain = "\xd9\x31\x32\x25\xf8\x84\x06\xe5\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
+ "\x86\xa7\xa9\x53\x15\x34\xf7\xda\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
+ "\x1c\x3c\x0c\x95\x95\x68\x09\x53\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
+ "\x1c\x3c\x0c\x95\x95\x68\x09\x53\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
+ "\xb1\x6a\xed\xf5\xaa\x0d",
+ .cipher = "\x39\x80\xca\x0b\x3c\x00\xe8\x41\xeb\x06\xfa\xc4\x87\x2a\x27\x57"
+ "\x85\x9e\x1c\xea\xa6\xef\xd9\x84\x62\x85\x93\xb4\x0c\xa1\xe1\x9c"
+ "\x7d\x77\x3d\x00\xc1\x44\xc5\x25\xac\x61\x9d\x18\xc8\x4a\x3f\x47"
+ "\xb5\xb4\xa5\xeb\x10\x86\xcb\xdd\x59\x76\x52\x0d\xff\xa4\x85\x26"
+ "\x4b\x54\x22\xa0\xc6\x65\x4d\xa8\x46\x73\xec\xc0\x61\x68",
+};
+aead_test_vector_t aes_gcm18 = {
+ .alg = ENCR_AES_GCM_ICV12, .key_size = 24, .salt_size = 4,
+ .len = 70, .alen = 0,
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+ "\xfe\xff\xe9\x92\x86\x65\x73\x1c\xca\xfe\xba\xbe",
+ .iv = "\xfa\xce\xdb\xad\xde\xca\xf8\x88",
+ .plain = "\xd9\x31\x32\x25\xf8\x84\x06\xe5\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
+ "\x86\xa7\xa9\x53\x15\x34\xf7\xda\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
+ "\x1c\x3c\x0c\x95\x95\x68\x09\x53\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
+ "\x1c\x3c\x0c\x95\x95\x68\x09\x53\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
+ "\xb1\x6a\xed\xf5\xaa\x0d",
+ .cipher = "\x39\x80\xca\x0b\x3c\x00\xe8\x41\xeb\x06\xfa\xc4\x87\x2a\x27\x57"
+ "\x85\x9e\x1c\xea\xa6\xef\xd9\x84\x62\x85\x93\xb4\x0c\xa1\xe1\x9c"
+ "\x7d\x77\x3d\x00\xc1\x44\xc5\x25\xac\x61\x9d\x18\xc8\x4a\x3f\x47"
+ "\xb5\xb4\xa5\xeb\x10\x86\xcb\xdd\x59\x76\x52\x0d\xff\xa4\x85\x26"
+ "\x4b\x54\x22\xa0\xc6\x65\x4d\xa8\x46\x73\xec\xc0\x61\x68\x0f\x00"
+ "\x0c\x32",
+};
+aead_test_vector_t aes_gcm19 = {
+ .alg = ENCR_AES_GCM_ICV8, .key_size = 32, .salt_size = 4,
+ .len = 70, .alen = 0,
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+ "\xfe\xff\xe9\x92\x86\x65\x73\x1c\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+ "\xca\xfe\xba\xbe",
+ .iv = "\xfa\xce\xdb\xad\xde\xca\xf8\x88",
+ .plain = "\xd9\x31\x32\x25\xf8\x84\x06\xe5\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
+ "\x86\xa7\xa9\x53\x15\x34\xf7\xda\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
+ "\x1c\x3c\x0c\x95\x95\x68\x09\x53\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
+ "\x1c\x3c\x0c\x95\x95\x68\x09\x53\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
+ "\xb1\x6a\xed\xf5\xaa\x0d",
+ .cipher = "\x52\x2d\xc1\xf0\x99\x56\x7d\x07\xf4\x7f\x37\xa3\x2a\x84\x42\x7d"
+ "\x64\x3a\x8c\xdc\xbf\xe5\xc0\xc9\x75\x98\xa2\xbd\x25\x55\xd1\xaa"
+ "\x8c\xb0\x8e\x48\x59\x0d\xbb\x3d\xa7\xb0\x8b\x10\x56\x82\x88\x38"
+ "\x68\xa0\xff\x03\xac\xdf\x95\x0e\x29\x65\x83\x7f\xda\x89\x72\xdd"
+ "\xd5\xc5\x96\xa3\x4a\xe0\xe6\x2f\x1e\xe2\x04\x80\xd7\xb7",
+};
+aead_test_vector_t aes_gcm20 = {
+ .alg = ENCR_AES_GCM_ICV12, .key_size = 32, .salt_size = 4,
+ .len = 70, .alen = 0,
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+ "\xfe\xff\xe9\x92\x86\x65\x73\x1c\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+ "\xca\xfe\xba\xbe",
+ .iv = "\xfa\xce\xdb\xad\xde\xca\xf8\x88",
+ .plain = "\xd9\x31\x32\x25\xf8\x84\x06\xe5\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
+ "\x86\xa7\xa9\x53\x15\x34\xf7\xda\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
+ "\x1c\x3c\x0c\x95\x95\x68\x09\x53\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
+ "\x1c\x3c\x0c\x95\x95\x68\x09\x53\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
+ "\xb1\x6a\xed\xf5\xaa\x0d",
+ .cipher = "\x52\x2d\xc1\xf0\x99\x56\x7d\x07\xf4\x7f\x37\xa3\x2a\x84\x42\x7d"
+ "\x64\x3a\x8c\xdc\xbf\xe5\xc0\xc9\x75\x98\xa2\xbd\x25\x55\xd1\xaa"
+ "\x8c\xb0\x8e\x48\x59\x0d\xbb\x3d\xa7\xb0\x8b\x10\x56\x82\x88\x38"
+ "\x68\xa0\xff\x03\xac\xdf\x95\x0e\x29\x65\x83\x7f\xda\x89\x72\xdd"
+ "\xd5\xc5\x96\xa3\x4a\xe0\xe6\x2f\x1e\xe2\x04\x80\xd7\xb7\x5b\x65"
+ "\x9a\xad",
+};
+
+/**
+ * Some self-made vectors using more associated data
+ */
+aead_test_vector_t aes_gcm21 = {
+ .alg = ENCR_AES_GCM_ICV16, .key_size = 16, .salt_size = 4,
+ .len = 70, .alen = 69,
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+ "\xca\xfe\xba\xbe",
+ .iv = "\xfa\xce\xdb\xad\xde\xca\xf8\x88",
+ .adata = "\xfe\xed\xfa\xce\xde\xad\xbe\xef\xfe\xed\xfa\xce\xde\xad\xbe\xef"
+ "\xab\xad\xda\xd2\xfe\xed\xfa\xce\xde\xad\xbe\xef\xfe\xed\xfa\xce"
+ "\xde\xad\xbe\xef\xda\xd2\xfe\xed\xfa\xce\xde\xad\xbe\xef\xfe\xda"
+ "\xd2\xfe\xed\xfa\xce\xde\xad\xbe\xef\xfe\xda\xd2\xfe\xed\xfa\xce"
+ "\xde\xad\xbe\xef\xfe",
+ .plain = "\xd9\x31\x32\x25\xf8\x84\x06\xe5\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
+ "\x86\xa7\xa9\x53\x15\x34\xf7\xda\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
+ "\x1c\x3c\x0c\x95\x95\x68\x09\x53\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
+ "\x1c\x3c\x0c\x95\x95\x68\x09\x53\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
+ "\xb1\x6a\xed\xf5\xaa\x0d",
+ .cipher = "\x42\x83\x1e\xc2\x21\x77\x74\x24\x4b\x72\x21\xb7\x84\xd0\xd4\x9c"
+ "\xe3\xaa\x21\x2f\x2c\x02\xa4\xe0\x35\xc1\x7e\x23\x29\xac\xa1\x2e"
+ "\x21\xd5\x14\xb2\x54\x66\x93\x1c\x7d\x8f\x6a\x5a\xac\x84\xaa\x05"
+ "\xb6\xf5\xea\x59\x55\x6f\x43\x93\xa8\xf4\x95\x8c\x14\x36\x3e\xf5"
+ "\x6c\xc2\x8a\x31\x64\xff\xe9\x24\x77\xc3\xaf\x6b\x64\xc7\x8b\xb9"
+ "\xec\xb9\x48\x84\xa2\xdb",
+};
+aead_test_vector_t aes_gcm22 = {
+ .alg = ENCR_AES_GCM_ICV16, .key_size = 24, .salt_size = 4,
+ .len = 70, .alen = 69,
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+ "\xfe\xff\xe9\x92\x86\x65\x73\x1c\xca\xfe\xba\xbe",
+ .iv = "\xfa\xce\xdb\xad\xde\xca\xf8\x88",
+ .adata = "\xfe\xed\xfa\xce\xde\xad\xbe\xef\xfe\xed\xfa\xce\xde\xad\xbe\xef"
+ "\xab\xad\xda\xd2\xfe\xed\xfa\xce\xde\xad\xbe\xef\xfe\xed\xfa\xce"
+ "\xde\xad\xbe\xef\xda\xd2\xfe\xed\xfa\xce\xde\xad\xbe\xef\xfe\xda"
+ "\xd2\xfe\xed\xfa\xce\xde\xad\xbe\xef\xfe\xda\xd2\xfe\xed\xfa\xce"
+ "\xde\xad\xbe\xef\xfe",
+ .plain = "\xd9\x31\x32\x25\xf8\x84\x06\xe5\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
+ "\x86\xa7\xa9\x53\x15\x34\xf7\xda\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
+ "\x1c\x3c\x0c\x95\x95\x68\x09\x53\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
+ "\x1c\x3c\x0c\x95\x95\x68\x09\x53\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
+ "\xb1\x6a\xed\xf5\xaa\x0d",
+ .cipher = "\x39\x80\xca\x0b\x3c\x00\xe8\x41\xeb\x06\xfa\xc4\x87\x2a\x27\x57"
+ "\x85\x9e\x1c\xea\xa6\xef\xd9\x84\x62\x85\x93\xb4\x0c\xa1\xe1\x9c"
+ "\x7d\x77\x3d\x00\xc1\x44\xc5\x25\xac\x61\x9d\x18\xc8\x4a\x3f\x47"
+ "\xb5\xb4\xa5\xeb\x10\x86\xcb\xdd\x59\x76\x52\x0d\xff\xa4\x85\x26"
+ "\x4b\x54\x22\xa0\xc6\x65\x82\x33\xf3\x2d\x00\xe5\x03\x29\x8f\x7f"
+ "\x70\x74\xe6\xfe\x60\x75",
+};
+aead_test_vector_t aes_gcm23 = {
+ .alg = ENCR_AES_GCM_ICV16, .key_size = 32, .salt_size = 4,
+ .len = 70, .alen = 69,
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+ "\xfe\xff\xe9\x92\x86\x65\x73\x1c\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+ "\xca\xfe\xba\xbe",
+ .iv = "\xfa\xce\xdb\xad\xde\xca\xf8\x88",
+ .adata = "\xfe\xed\xfa\xce\xde\xad\xbe\xef\xfe\xed\xfa\xce\xde\xad\xbe\xef"
+ "\xab\xad\xda\xd2\xfe\xed\xfa\xce\xde\xad\xbe\xef\xfe\xed\xfa\xce"
+ "\xde\xad\xbe\xef\xda\xd2\xfe\xed\xfa\xce\xde\xad\xbe\xef\xfe\xda"
+ "\xd2\xfe\xed\xfa\xce\xde\xad\xbe\xef\xfe\xda\xd2\xfe\xed\xfa\xce"
+ "\xde\xad\xbe\xef\xfe",
+ .plain = "\xd9\x31\x32\x25\xf8\x84\x06\xe5\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
+ "\x86\xa7\xa9\x53\x15\x34\xf7\xda\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
+ "\x1c\x3c\x0c\x95\x95\x68\x09\x53\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
+ "\x1c\x3c\x0c\x95\x95\x68\x09\x53\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
+ "\xb1\x6a\xed\xf5\xaa\x0d",
+ .cipher = "\x52\x2d\xc1\xf0\x99\x56\x7d\x07\xf4\x7f\x37\xa3\x2a\x84\x42\x7d"
+ "\x64\x3a\x8c\xdc\xbf\xe5\xc0\xc9\x75\x98\xa2\xbd\x25\x55\xd1\xaa"
+ "\x8c\xb0\x8e\x48\x59\x0d\xbb\x3d\xa7\xb0\x8b\x10\x56\x82\x88\x38"
+ "\x68\xa0\xff\x03\xac\xdf\x95\x0e\x29\x65\x83\x7f\xda\x89\x72\xdd"
+ "\xd5\xc5\x96\xa3\x4a\xe0\xa8\xb6\x0f\xfe\xd5\xe5\x33\xf4\x37\x74"
+ "\x83\x93\xf8\xaf\x80\x43",
+};
diff --git a/src/libstrongswan/plugins/test_vectors/test_vectors/ecp.c b/src/libstrongswan/plugins/test_vectors/test_vectors/ecp.c
new file mode 100644
index 000000000..b3c94b2de
--- /dev/null
+++ b/src/libstrongswan/plugins/test_vectors/test_vectors/ecp.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2015 Martin Willi
+ * Copyright (C) 2015 revosec AG
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the Licenseor (at your
+ * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
+ *
+ * This program is distributed in the hope that it will be usefulbut
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <crypto/crypto_tester.h>
+
+/**
+ * Test vectors from RFC5114
+ */
+
+dh_test_vector_t ecp192 = {
+ .group = ECP_192_BIT, .priv_len = 24, .pub_len = 48, .shared_len = 24,
+ .priv_a = "\x32\x3f\xa3\x16\x9d\x8e\x9c\x65\x93\xf5\x94\x76\xbc\x14\x20\x00"
+ "\xab\x5b\xe0\xe2\x49\xc4\x34\x26",
+ .priv_b = "\x63\x1f\x95\xbb\x4a\x67\x63\x2c\x9c\x47\x6e\xee\x9a\xb6\x95\xab"
+ "\x24\x0a\x04\x99\x30\x7f\xcf\x62",
+ .pub_a = "\xcd\x46\x48\x9e\xcf\xd6\xc1\x05\xe7\xb3\xd3\x25\x66\xe2\xb1\x22"
+ "\xe2\x49\xab\xaa\xdd\x87\x06\x12\x68\x88\x7b\x48\x77\xdf\x51\xdd"
+ "\x4d\xc3\xd6\xfd\x11\xf0\xa2\x6f\x8f\xd3\x84\x43\x17\x91\x6e\x9a",
+ .pub_b = "\x51\x9a\x12\x16\x80\xe0\x04\x54\x66\xba\x21\xdf\x2e\xee\x47\xf5"
+ "\x97\x3b\x50\x05\x77\xef\x13\xd5\xff\x61\x3a\xb4\xd6\x4c\xee\x3a"
+ "\x20\x87\x5b\xdb\x10\xf9\x53\xf6\xb3\x0c\xa0\x72\xc6\x0a\xa5\x7f",
+ .shared = "\xad\x42\x01\x82\x63\x3f\x85\x26\xbf\xe9\x54\xac\xda\x37\x6f\x05"
+ "\xe5\xff\x4f\x83\x7f\x54\xfe\xbe",
+};
+
+dh_test_vector_t ecp224 = {
+ .group = ECP_224_BIT, .priv_len = 28, .pub_len = 56, .shared_len = 28,
+ .priv_a = "\xb5\x58\xeb\x6c\x28\x8d\xa7\x07\xbb\xb4\xf8\xfb\xae\x2a\xb9\xe9"
+ "\xcb\x62\xe3\xbc\x5c\x75\x73\xe2\x2e\x26\xd3\x7f",
+ .priv_b = "\xac\x3b\x1a\xdd\x3d\x97\x70\xe6\xf6\xa7\x08\xee\x9f\x3b\x8e\x0a"
+ "\xb3\xb4\x80\xe9\xf2\x7f\x85\xc8\x8b\x5e\x6d\x18",
+ .pub_a = "\x49\xdf\xef\x30\x9f\x81\x48\x8c\x30\x4c\xff\x5a\xb3\xee\x5a\x21"
+ "\x54\x36\x7d\xc7\x83\x31\x50\xe0\xa5\x1f\x3e\xeb\x4f\x2b\x5e\xe4"
+ "\x57\x62\xc4\xf6\x54\xc1\xa0\xc6\x7f\x54\xcf\x88\xb0\x16\xb5\x1b"
+ "\xce\x3d\x7c\x22\x8d\x57\xad\xb4",
+ .pub_b = "\x6b\x3a\xc9\x6a\x8d\x0c\xde\x6a\x55\x99\xbe\x80\x32\xed\xf1\x0c"
+ "\x16\x2d\x0a\x8a\xd2\x19\x50\x6d\xcd\x42\xa2\x07\xd4\x91\xbe\x99"
+ "\xc2\x13\xa7\xd1\xca\x37\x06\xde\xbf\xe3\x05\xf3\x61\xaf\xcb\xb3"
+ "\x3e\x26\x09\xc8\xb1\x61\x8a\xd5",
+ .shared = "\x52\x27\x2f\x50\xf4\x6f\x4e\xdc\x91\x51\x56\x90\x92\xf4\x6d\xf2"
+ "\xd9\x6e\xcc\x3b\x6d\xc1\x71\x4a\x4e\xa9\x49\xfa",
+};
+
+dh_test_vector_t ecp256 = {
+ .group = ECP_256_BIT, .priv_len = 32, .pub_len = 64, .shared_len = 32,
+ .priv_a = "\x81\x42\x64\x14\x5f\x2f\x56\xf2\xe9\x6a\x8e\x33\x7a\x12\x84\x99"
+ "\x3f\xaf\x43\x2a\x5a\xbc\xe5\x9e\x86\x7b\x72\x91\xd5\x07\xa3\xaf",
+ .priv_b = "\x2c\xe1\x78\x8e\xc1\x97\xe0\x96\xdb\x95\xa2\x00\xcc\x0a\xb2\x6a"
+ "\x19\xce\x6b\xcc\xad\x56\x2b\x8e\xee\x1b\x59\x37\x61\xcf\x7f\x41",
+ .pub_a = "\x2a\xf5\x02\xf3\xbe\x89\x52\xf2\xc9\xb5\xa8\xd4\x16\x0d\x09\xe9"
+ "\x71\x65\xbe\x50\xbc\x42\xae\x4a\x5e\x8d\x3b\x4b\xa8\x3a\xeb\x15"
+ "\xeb\x0f\xaf\x4c\xa9\x86\xc4\xd3\x86\x81\xa0\xf9\x87\x2d\x79\xd5"
+ "\x67\x95\xbd\x4b\xff\x6e\x6d\xe3\xc0\xf5\x01\x5e\xce\x5e\xfd\x85",
+ .pub_b = "\xb1\x20\xde\x4a\xa3\x64\x92\x79\x53\x46\xe8\xde\x6c\x2c\x86\x46"
+ "\xae\x06\xaa\xea\x27\x9f\xa7\x75\xb3\xab\x07\x15\xf6\xce\x51\xb0"
+ "\x9f\x1b\x7e\xec\xe2\x0d\x7b\x5e\xd8\xec\x68\x5f\xa3\xf0\x71\xd8"
+ "\x37\x27\x02\x70\x92\xa8\x41\x13\x85\xc3\x4d\xde\x57\x08\xb2\xb6",
+ .shared = "\xdd\x0f\x53\x96\x21\x9d\x1e\xa3\x93\x31\x04\x12\xd1\x9a\x08\xf1"
+ "\xf5\x81\x1e\x9d\xc8\xec\x8e\xea\x7f\x80\xd2\x1c\x82\x0c\x27\x88",
+};
+
+dh_test_vector_t ecp384 = {
+ .group = ECP_384_BIT, .priv_len = 48, .pub_len = 96, .shared_len = 48,
+ .priv_a = "\xd2\x73\x35\xea\x71\x66\x4a\xf2\x44\xdd\x14\xe9\xfd\x12\x60\x71"
+ "\x5d\xfd\x8a\x79\x65\x57\x1c\x48\xd7\x09\xee\x7a\x79\x62\xa1\x56"
+ "\xd7\x06\xa9\x0c\xbc\xb5\xdf\x29\x86\xf0\x5f\xea\xdb\x93\x76\xf1",
+ .priv_b = "\x52\xd1\x79\x1f\xdb\x4b\x70\xf8\x9c\x0f\x00\xd4\x56\xc2\xf7\x02"
+ "\x3b\x61\x25\x26\x2c\x36\xa7\xdf\x1f\x80\x23\x11\x21\xcc\xe3\xd3"
+ "\x9b\xe5\x2e\x00\xc1\x94\xa4\x13\x2c\x4a\x6c\x76\x8b\xcd\x94\xd2",
+ .pub_a = "\x79\x31\x48\xf1\x78\x76\x34\xd5\xda\x4c\x6d\x90\x74\x41\x7d\x05"
+ "\xe0\x57\xab\x62\xf8\x20\x54\xd1\x0e\xe6\xb0\x40\x3d\x62\x79\x54"
+ "\x7e\x6a\x8e\xa9\xd1\xfd\x77\x42\x7d\x01\x6f\xe2\x7a\x8b\x8c\x66"
+ "\xc6\xc4\x12\x94\x33\x1d\x23\xe6\xf4\x80\xf4\xfb\x4c\xd4\x05\x04"
+ "\xc9\x47\x39\x2e\x94\xf4\xc3\xf0\x6b\x8f\x39\x8b\xb2\x9e\x42\x36"
+ "\x8f\x7a\x68\x59\x23\xde\x3b\x67\xba\xce\xd2\x14\xa1\xa1\xd1\x28",
+ .pub_b = "\x5c\xd4\x2a\xb9\xc4\x1b\x53\x47\xf7\x4b\x8d\x4e\xfb\x70\x8b\x3d"
+ "\x5b\x36\xdb\x65\x91\x53\x59\xb4\x4a\xbc\x17\x64\x7b\x6b\x99\x99"
+ "\x78\x9d\x72\xa8\x48\x65\xae\x2f\x22\x3f\x12\xb5\xa1\xab\xc1\x20"
+ "\xe1\x71\x45\x8f\xea\xa9\x39\xaa\xa3\xa8\xbf\xac\x46\xb4\x04\xbd"
+ "\x8f\x6d\x5b\x34\x8c\x0f\xa4\xd8\x0c\xec\xa1\x63\x56\xca\x93\x32"
+ "\x40\xbd\xe8\x72\x34\x15\xa8\xec\xe0\x35\xb0\xed\xf3\x67\x55\xde",
+ .shared = "\x5e\xa1\xfc\x4a\xf7\x25\x6d\x20\x55\x98\x1b\x11\x05\x75\xe0\xa8"
+ "\xca\xe5\x31\x60\x13\x7d\x90\x4c\x59\xd9\x26\xeb\x1b\x84\x56\xe4"
+ "\x27\xaa\x8a\x45\x40\x88\x4c\x37\xde\x15\x9a\x58\x02\x8a\xbc\x0e",
+};
+
+dh_test_vector_t ecp521 = {
+ .group = ECP_521_BIT, .priv_len = 66, .pub_len = 132, .shared_len = 66,
+ .priv_a = "\x01\x13\xf8\x2d\xa8\x25\x73\x5e\x3d\x97\x27\x66\x83\xb2\xb7\x42"
+ "\x77\xba\xd2\x73\x35\xea\x71\x66\x4a\xf2\x43\x0c\xc4\xf3\x34\x59"
+ "\xb9\x66\x9e\xe7\x8b\x3f\xfb\x9b\x86\x83\x01\x5d\x34\x4d\xcb\xfe"
+ "\xf6\xfb\x9a\xf4\xc6\xc4\x70\xbe\x25\x45\x16\xcd\x3c\x1a\x1f\xb4"
+ "\x73\x62",
+ .priv_b = "\x00\xce\xe3\x48\x0d\x86\x45\xa1\x7d\x24\x9f\x27\x76\xd2\x8b\xae"
+ "\x61\x69\x52\xd1\x79\x1f\xdb\x4b\x70\xf7\xc3\x37\x87\x32\xaa\x1b"
+ "\x22\x92\x84\x48\xbc\xd1\xdc\x24\x96\xd4\x35\xb0\x10\x48\x06\x6e"
+ "\xbe\x4f\x72\x90\x3c\x36\x1b\x1a\x9d\xc1\x19\x3d\xc2\xc9\xd0\x89"
+ "\x1b\x96",
+ .pub_a = "\x01\xeb\xb3\x4d\xd7\x57\x21\xab\xf8\xad\xc9\xdb\xed\x17\x88\x9c"
+ "\xbb\x97\x65\xd9\x0a\x7c\x60\xf2\xce\xf0\x07\xbb\x0f\x2b\x26\xe1"
+ "\x48\x81\xfd\x44\x42\xe6\x89\xd6\x1c\xb2\xdd\x04\x6e\xe3\x0e\x3f"
+ "\xfd\x20\xf9\xa4\x5b\xbd\xf6\x41\x3d\x58\x3a\x2d\xbf\x59\x92\x4f"
+ "\xd3\x5c\x00\xf6\xb6\x32\xd1\x94\xc0\x38\x8e\x22\xd8\x43\x7e\x55"
+ "\x8c\x55\x2a\xe1\x95\xad\xfd\x15\x3f\x92\xd7\x49\x08\x35\x1b\x2f"
+ "\x8c\x4e\xda\x94\xed\xb0\x91\x6d\x1b\x53\xc0\x20\xb5\xee\xca\xed"
+ "\x1a\x5f\xc3\x8a\x23\x3e\x48\x30\x58\x7b\xb2\xee\x34\x89\xb3\xb4"
+ "\x2a\x5a\x86\xa4",
+ .pub_b = "\x01\x0e\xbf\xaf\xc6\xe8\x5e\x08\xd2\x4b\xff\xfc\xc1\xa4\x51\x1d"
+ "\xb0\xe6\x34\xbe\xeb\x1b\x6d\xec\x8c\x59\x39\xae\x44\x76\x62\x01"
+ "\xaf\x62\x00\x43\x0b\xa9\x7c\x8a\xc6\xa0\xe9\xf0\x8b\x33\xce\x7e"
+ "\x9f\xee\xb5\xba\x4e\xe5\xe0\xd8\x15\x10\xc2\x42\x95\xb8\xa0\x8d"
+ "\x02\x35\x00\xa4\xa6\xec\x30\x0d\xf9\xe2\x57\xb0\x37\x2b\x5e\x7a"
+ "\xbf\xef\x09\x34\x36\x71\x9a\x77\x88\x7e\xbb\x0b\x18\xcf\x80\x99"
+ "\xb9\xf4\x21\x2b\x6e\x30\xa1\x41\x9c\x18\xe0\x29\xd3\x68\x63\xcc"
+ "\x9d\x44\x8f\x4d\xba\x4d\x2a\x0e\x60\x71\x1b\xe5\x72\x91\x5f\xbd"
+ "\x4f\xef\x26\x95",
+ .shared = "\x00\xcd\xea\x89\x62\x1c\xfa\x46\xb1\x32\xf9\xe4\xcf\xe2\x26\x1c"
+ "\xde\x2d\x43\x68\xeb\x56\x56\x63\x4c\x7c\xc9\x8c\x7a\x00\xcd\xe5"
+ "\x4e\xd1\x86\x6a\x0d\xd3\xe6\x12\x6c\x9d\x2f\x84\x5d\xaf\xf8\x2c"
+ "\xeb\x1d\xa0\x8f\x5d\x87\x52\x1b\xb0\xeb\xec\xa7\x79\x11\x16\x9c"
+ "\x20\xcc",
+};
diff --git a/src/libstrongswan/plugins/test_vectors/test_vectors/ecpbp.c b/src/libstrongswan/plugins/test_vectors/test_vectors/ecpbp.c
new file mode 100644
index 000000000..de4399868
--- /dev/null
+++ b/src/libstrongswan/plugins/test_vectors/test_vectors/ecpbp.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2015 Martin Willi
+ * Copyright (C) 2015 revosec AG
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the Licenseor (at your
+ * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
+ *
+ * This program is distributed in the hope that it will be usefulbut
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <crypto/crypto_tester.h>
+
+/**
+ * Test vectors from RFC6923/RFC7027
+ */
+
+dh_test_vector_t ecp224bp = {
+ .group = ECP_224_BP, .priv_len = 28, .pub_len = 56, .shared_len = 28,
+ .priv_a = "\x7c\x4b\x7a\x2c\x8a\x4b\xad\x1f\xbb\x7d\x79\xcc\x09\x55\xdb\x7c"
+ "\x6a\x46\x60\xca\x64\xcc\x47\x78\x15\x9b\x49\x5e",
+ .priv_b = "\x63\x97\x6d\x4a\xae\x6c\xd0\xf6\xdd\x18\xde\xfe\xf5\x5d\x96\x56"
+ "\x9d\x05\x07\xc0\x3e\x74\xd6\x48\x6f\xfa\x28\xfb",
+ .pub_a = "\xb1\x04\xa6\x7a\x6f\x6e\x85\xe1\x4e\xc1\x82\x5e\x15\x39\xe8\xec"
+ "\xdb\xbf\x58\x49\x22\x36\x7d\xd8\x8c\x6b\xdc\xf2\x46\xd7\x82\xe7"
+ "\xfd\xb5\xf6\x0c\xd8\x40\x43\x01\xac\x59\x49\xc5\x8e\xdb\x26\xbc"
+ "\x68\xba\x07\x69\x5b\x75\x0a\x94",
+ .pub_b = "\x2a\x97\x08\x9a\x92\x96\x14\x7b\x71\xb2\x1a\x4b\x57\x4e\x12\x78"
+ "\x24\x5b\x53\x6f\x14\xd8\xc2\xb9\xd0\x7a\x87\x4e\x9b\x90\x0d\x7c"
+ "\x77\xa7\x09\xa7\x97\x27\x6b\x8c\xa1\xba\x61\xbb\x95\xb5\x46\xfc"
+ "\x29\xf8\x62\xe4\x4d\x59\xd2\x5b",
+ .shared = "\x31\x2d\xfd\x98\x78\x3f\x9f\xb7\x7b\x97\x04\x94\x5a\x73\xbe\xb6"
+ "\xdc\xcb\xe3\xb6\x5d\x0f\x96\x7d\xca\xb5\x74\xeb",
+};
+
+dh_test_vector_t ecp256bp = {
+ .group = ECP_256_BP, .priv_len = 32, .pub_len = 64, .shared_len = 32,
+ .priv_a = "\x81\xdb\x1e\xe1\x00\x15\x0f\xf2\xea\x33\x8d\x70\x82\x71\xbe\x38"
+ "\x30\x0c\xb5\x42\x41\xd7\x99\x50\xf7\x7b\x06\x30\x39\x80\x4f\x1d",
+ .priv_b = "\x55\xe4\x0b\xc4\x1e\x37\xe3\xe2\xad\x25\xc3\xc6\x65\x45\x11\xff"
+ "\xa8\x47\x4a\x91\xa0\x03\x20\x87\x59\x38\x52\xd3\xe7\xd7\x6b\xd3",
+ .pub_a = "\x44\x10\x6e\x91\x3f\x92\xbc\x02\xa1\x70\x5d\x99\x53\xa8\x41\x4d"
+ "\xb9\x5e\x1a\xaa\x49\xe8\x1d\x9e\x85\xf9\x29\xa8\xe3\x10\x0b\xe5"
+ "\x8a\xb4\x84\x6f\x11\xca\xcc\xb7\x3c\xe4\x9c\xbd\xd1\x20\xf5\xa9"
+ "\x00\xa6\x9f\xd3\x2c\x27\x22\x23\xf7\x89\xef\x10\xeb\x08\x9b\xdc",
+ .pub_b = "\x8d\x2d\x68\x8c\x6c\xf9\x3e\x11\x60\xad\x04\xcc\x44\x29\x11\x7d"
+ "\xc2\xc4\x18\x25\xe1\xe9\xfc\xa0\xad\xdd\x34\xe6\xf1\xb3\x9f\x7b"
+ "\x99\x0c\x57\x52\x08\x12\xbe\x51\x26\x41\xe4\x70\x34\x83\x21\x06"
+ "\xbc\x7d\x3e\x8d\xd0\xe4\xc7\xf1\x13\x6d\x70\x06\x54\x7c\xec\x6a",
+ .shared = "\x89\xaf\xc3\x9d\x41\xd3\xb3\x27\x81\x4b\x80\x94\x0b\x04\x25\x90"
+ "\xf9\x65\x56\xec\x91\xe6\xae\x79\x39\xbc\xe3\x1f\x3a\x18\xbf\x2b",
+};
+
+dh_test_vector_t ecp384bp = {
+ .group = ECP_384_BP, .priv_len = 48, .pub_len = 96, .shared_len = 48,
+ .priv_a = "\x1e\x20\xf5\xe0\x48\xa5\x88\x6f\x1f\x15\x7c\x74\xe9\x1b\xde\x2b"
+ "\x98\xc8\xb5\x2d\x58\xe5\x00\x3d\x57\x05\x3f\xc4\xb0\xbd\x65\xd6"
+ "\xf1\x5e\xb5\xd1\xee\x16\x10\xdf\x87\x07\x95\x14\x36\x27\xd0\x42",
+ .priv_b = "\x03\x26\x40\xbc\x60\x03\xc5\x92\x60\xf7\x25\x0c\x3d\xb5\x8c\xe6"
+ "\x47\xf9\x8e\x12\x60\xac\xce\x4a\xcd\xa3\xdd\x86\x9f\x74\xe0\x1f"
+ "\x8b\xa5\xe0\x32\x43\x09\xdb\x6a\x98\x31\x49\x7a\xba\xc9\x66\x70",
+ .pub_a = "\x68\xb6\x65\xdd\x91\xc1\x95\x80\x06\x50\xcd\xd3\x63\xc6\x25\xf4"
+ "\xe7\x42\xe8\x13\x46\x67\xb7\x67\xb1\xb4\x76\x79\x35\x88\xf8\x85"
+ "\xab\x69\x8c\x85\x2d\x4a\x6e\x77\xa2\x52\xd6\x38\x0f\xca\xf0\x68"
+ "\x55\xbc\x91\xa3\x9c\x9e\xc0\x1d\xee\x36\x01\x7b\x7d\x67\x3a\x93"
+ "\x12\x36\xd2\xf1\xf5\xc8\x39\x42\xd0\x49\xe3\xfa\x20\x60\x74\x93"
+ "\xe0\xd0\x38\xff\x2f\xd3\x0c\x2a\xb6\x7d\x15\xc8\x5f\x7f\xaa\x59",
+ .pub_b = "\x4d\x44\x32\x6f\x26\x9a\x59\x7a\x5b\x58\xbb\xa5\x65\xda\x55\x56"
+ "\xed\x7f\xd9\xa8\xa9\xeb\x76\xc2\x5f\x46\xdb\x69\xd1\x9d\xc8\xce"
+ "\x6a\xd1\x8e\x40\x4b\x15\x73\x8b\x20\x86\xdf\x37\xe7\x1d\x1e\xb4"
+ "\x62\xd6\x92\x13\x6d\xe5\x6c\xbe\x93\xbf\x5f\xa3\x18\x8e\xf5\x8b"
+ "\xc8\xa3\xa0\xec\x6c\x1e\x15\x1a\x21\x03\x8a\x42\xe9\x18\x53\x29"
+ "\xb5\xb2\x75\x90\x3d\x19\x2f\x8d\x4e\x1f\x32\xfe\x9c\xc7\x8c\x48",
+ .shared = "\x0b\xd9\xd3\xa7\xea\x0b\x3d\x51\x9d\x09\xd8\xe4\x8d\x07\x85\xfb"
+ "\x74\x4a\x6b\x35\x5e\x63\x04\xbc\x51\xc2\x29\xfb\xbc\xe2\x39\xbb"
+ "\xad\xf6\x40\x37\x15\xc3\x5d\x4f\xb2\xa5\x44\x4f\x57\x5d\x4f\x42",
+};
+
+dh_test_vector_t ecp512bp = {
+ .group = ECP_512_BP, .priv_len = 64, .pub_len = 128, .shared_len = 64,
+ .priv_a = "\x16\x30\x2f\xf0\xdb\xbb\x5a\x8d\x73\x3d\xab\x71\x41\xc1\xb4\x5a"
+ "\xcb\xc8\x71\x59\x39\x67\x7f\x6a\x56\x85\x0a\x38\xbd\x87\xbd\x59"
+ "\xb0\x9e\x80\x27\x96\x09\xff\x33\x3e\xb9\xd4\xc0\x61\x23\x1f\xb2"
+ "\x6f\x92\xee\xb0\x49\x82\xa5\xf1\xd1\x76\x4c\xad\x57\x66\x54\x22",
+ .priv_b = "\x23\x0e\x18\xe1\xbc\xc8\x8a\x36\x2f\xa5\x4e\x4e\xa3\x90\x20\x09"
+ "\x29\x2f\x7f\x80\x33\x62\x4f\xd4\x71\xb5\xd8\xac\xe4\x9d\x12\xcf"
+ "\xab\xbc\x19\x96\x3d\xab\x8e\x2f\x1e\xba\x00\xbf\xfb\x29\xe4\xd7"
+ "\x2d\x13\xf2\x22\x45\x62\xf4\x05\xcb\x80\x50\x36\x66\xb2\x54\x29",
+ .pub_a = "\x0a\x42\x05\x17\xe4\x06\xaa\xc0\xac\xdc\xe9\x0f\xcd\x71\x48\x77"
+ "\x18\xd3\xb9\x53\xef\xd7\xfb\xec\x5f\x7f\x27\xe2\x8c\x61\x49\x99"
+ "\x93\x97\xe9\x1e\x02\x9e\x06\x45\x7d\xb2\xd3\xe6\x40\x66\x8b\x39"
+ "\x2c\x2a\x7e\x73\x7a\x7f\x0b\xf0\x44\x36\xd1\x16\x40\xfd\x09\xfd"
+ "\x72\xe6\x88\x2e\x8d\xb2\x8a\xad\x36\x23\x7c\xd2\x5d\x58\x0d\xb2"
+ "\x37\x83\x96\x1c\x8d\xc5\x2d\xfa\x2e\xc1\x38\xad\x47\x2a\x0f\xce"
+ "\xf3\x88\x7c\xf6\x2b\x62\x3b\x2a\x87\xde\x5c\x58\x83\x01\xea\x3e"
+ "\x5f\xc2\x69\xb3\x73\xb6\x07\x24\xf5\xe8\x2a\x6a\xd1\x47\xfd\xe7",
+ .pub_b = "\x9d\x45\xf6\x6d\xe5\xd6\x7e\x2e\x6d\xb6\xe9\x3a\x59\xce\x0b\xb4"
+ "\x81\x06\x09\x7f\xf7\x8a\x08\x1d\xe7\x81\xcd\xb3\x1f\xce\x8c\xcb"
+ "\xaa\xea\x8d\xd4\x32\x0c\x41\x19\xf1\xe9\xcd\x43\x7a\x2e\xab\x37"
+ "\x31\xfa\x96\x68\xab\x26\x8d\x87\x1d\xed\xa5\x5a\x54\x73\x19\x9f"
+ "\x2f\xdc\x31\x30\x95\xbc\xdd\x5f\xb3\xa9\x16\x36\xf0\x7a\x95\x9c"
+ "\x8e\x86\xb5\x63\x6a\x1e\x93\x0e\x83\x96\x04\x9c\xb4\x81\x96\x1d"
+ "\x36\x5c\xc1\x14\x53\xa0\x6c\x71\x98\x35\x47\x5b\x12\xcb\x52\xfc"
+ "\x3c\x38\x3b\xce\x35\xe2\x7e\xf1\x94\x51\x2b\x71\x87\x62\x85\xfa",
+ .shared = "\xa7\x92\x70\x98\x65\x5f\x1f\x99\x76\xfa\x50\xa9\xd5\x66\x86\x5d"
+ "\xc5\x30\x33\x18\x46\x38\x1c\x87\x25\x6b\xaf\x32\x26\x24\x4b\x76"
+ "\xd3\x64\x03\xc0\x24\xd7\xbb\xf0\xaa\x08\x03\xea\xff\x40\x5d\x3d"
+ "\x24\xf1\x1a\x9b\x5c\x0b\xef\x67\x9f\xe1\x45\x4b\x21\xc4\xcd\x1f",
+};
diff --git a/src/libstrongswan/plugins/test_vectors/test_vectors/modp.c b/src/libstrongswan/plugins/test_vectors/test_vectors/modp.c
new file mode 100644
index 000000000..482e41c36
--- /dev/null
+++ b/src/libstrongswan/plugins/test_vectors/test_vectors/modp.c
@@ -0,0 +1,731 @@
+/*
+ * Copyright (C) 2015 Martin Willi
+ * Copyright (C) 2015 revosec AG
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the Licenseor (at your
+ * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
+ *
+ * This program is distributed in the hope that it will be usefulbut
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <crypto/crypto_tester.h>
+
+/**
+ * As I couldn't find any test vectors for common MODP diffie hellman groups
+ * these have been generated.
+ */
+
+dh_test_vector_t modp768 = {
+ .group = MODP_768_BIT, .priv_len = 32, .pub_len = 96, .shared_len = 96,
+ .priv_a = "\x60\x91\xff\xc6\xde\x28\xc1\xcc\xc7\xc6\x5d\xa6\x11\xab\xfa\xe8"
+ "\x6a\x10\x74\xb2\x22\x43\xe3\x70\x6c\xb6\xde\x2f\xe2\x9d\x11\x42",
+ .priv_b = "\x76\xe0\x2f\xc3\xea\xbe\x6a\x0f\xce\xd6\xc3\x1e\x59\x45\xd1\x67"
+ "\xfa\xd0\x02\x00\xb4\xaf\x12\xcd\x6f\xc6\xd7\xe3\x81\x48\x62\x78",
+ .pub_a = "\xd7\xa2\x54\x62\x99\x01\xc8\x89\x53\x2c\x3e\x44\xda\x3d\x0b\x7e"
+ "\x92\x27\x37\x70\xc6\x26\xc3\x91\x90\x52\x2d\xab\x67\x07\xca\xff"
+ "\x36\x59\x10\x9f\x2f\x43\x24\xa4\x17\xeb\x7d\xc4\x56\x3a\x09\xba"
+ "\x04\xcd\x3c\x9b\x27\xd3\xc1\x22\x7e\xb1\x9d\xcb\x69\xfe\xf3\xb6"
+ "\xe2\xaa\x13\x81\x23\x24\x06\x64\x2d\xd1\x50\x78\x57\x07\xf4\x7c"
+ "\x3d\x74\x8a\x3d\x6b\x96\xd0\x00\xc5\x2c\x99\xd1\x0b\x65\xf2\xd1",
+ .pub_b = "\xf0\xe9\xdc\x78\x10\x4c\x97\x99\xb6\x70\x74\xb0\x7d\x8f\x09\x7a"
+ "\xa8\x82\xbd\xe4\x64\xc8\xeb\x9a\x0a\xcd\xef\x13\x86\x19\x4f\x49"
+ "\xc0\x63\xc6\x7d\x68\xf1\x4c\x5e\x3b\x04\x19\x57\x67\x8e\xa7\xcb"
+ "\x39\x7d\x87\x07\x20\x81\x9e\xa1\x08\x5a\x28\xd8\x13\xe3\x73\x9a"
+ "\x64\x00\x6f\x24\x66\xe8\x6c\x09\xe0\xc6\x9c\x2c\xa4\xf1\x0a\x04"
+ "\xc4\x9b\xb3\x01\x86\xbd\xfa\xb5\x4d\xf4\x20\x83\x14\xd9\x3c\x52",
+ .shared = "\x29\xcb\x14\x7d\x2a\x2b\x0d\x02\x59\x4d\xb0\x7b\xf0\x44\x70\x5c"
+ "\xb1\x44\x89\xd0\xf0\xa9\x32\xcc\x87\xf6\x04\x05\x1d\x1c\xb6\xe2"
+ "\xbe\x90\x39\x16\xe2\x5c\x65\x39\x93\xc8\x05\x5e\xd4\x37\x6c\xa4"
+ "\xdb\xa8\x40\x12\x39\x1a\x92\xa3\xf3\x42\x64\xaf\x64\x1f\xd8\x18"
+ "\xe1\xba\x4e\x99\x34\x30\x09\x97\xd7\x32\xd7\x0d\x0d\x73\x73\xcd"
+ "\x3d\xaa\xad\x22\xe8\x68\xb7\xe2\x50\x02\x9d\x30\x7e\xe5\x41\x48",
+};
+
+dh_test_vector_t modp1024 = {
+ .group = MODP_1024_BIT, .priv_len = 32, .pub_len = 128, .shared_len = 128,
+ .priv_a = "\xe5\x3c\x20\x5d\xa0\xd8\xe4\xbf\xb4\x17\x49\x44\x32\x0f\xc6\xe4"
+ "\xea\x66\xfe\x44\xe3\xc9\x31\xac\x5d\xa1\x45\x0a\xea\x47\xeb\xcf",
+ .priv_b = "\x7f\x9a\xf7\x21\xeb\x7c\xd2\xa9\x00\xa3\x6e\x39\x9e\xbc\x5c\x65"
+ "\xee\xcc\xe6\x62\x9c\x8e\x1c\x5a\x7f\xf3\x32\x93\x64\x5f\xd3\xe3",
+ .pub_a = "\x66\x61\x7c\x9b\xbe\x91\xee\x99\x00\xd8\x06\x41\x5b\x73\x84\xec"
+ "\xb0\xb6\xb8\x0a\x39\xbd\x5b\x07\x03\x96\xee\x32\x37\x5a\x8f\x68"
+ "\x37\x6b\x35\x2b\x97\xba\xf3\x2f\x95\xc4\xd1\x8c\x06\xab\x96\xbf"
+ "\xe3\xf3\x75\x2d\xf1\xe3\xc5\x57\x41\xb6\xf4\x24\x41\x17\xee\xbd"
+ "\xa1\x01\x59\x83\xc6\xba\x00\x8f\xe8\x9f\xe5\x1c\xf4\xc9\x69\x25"
+ "\x92\xeb\xf7\x42\x43\x6c\x39\x3d\xf6\x1a\x60\xcc\xc6\x4f\xd7\x90"
+ "\x7a\x6d\x26\x26\x7c\x0f\x15\x56\x23\x0c\x3e\x33\x2b\x2e\x11\xd1"
+ "\x18\xa6\x98\x25\x89\x79\x3a\x2f\x6d\x66\xb8\x66\x76\xba\xe4\x7a",
+ .pub_b = "\x7b\x90\x10\x60\x9e\xbf\x2c\x49\x70\x17\x45\x8a\xfa\xab\x42\x02"
+ "\x88\x5d\x25\xde\x7b\x5e\x5c\xe9\xb0\x5b\xd4\x42\xa3\xe9\x7b\x52"
+ "\xce\xa7\x60\xd7\xdb\xcb\x21\xdd\x71\xd8\x0c\xd4\x34\x7c\xaa\x9e"
+ "\xdf\xbc\x2d\xf4\xc1\xcd\xc1\x66\x9b\x8a\xd2\x44\xeb\x34\x5b\x33"
+ "\x1d\x87\x54\x92\x88\x3d\xf0\x4a\x3d\x0b\x1a\x8b\x89\x27\xd5\x09"
+ "\x91\xfe\x03\xe7\x35\x7e\xb6\xbd\xfc\xe3\xd8\xc6\x84\xc9\x86\x1b"
+ "\xc5\xce\x03\x96\x2f\xfb\x87\xbf\x05\xda\xbd\x5a\x37\x27\x99\x4d"
+ "\xde\xe5\xd3\xc6\xec\xc5\x89\x85\x99\x91\xb9\x32\x55\x76\x1f\xd5",
+ .shared = "\x95\x79\x0f\x5c\x46\xae\x7c\xa1\xa4\x71\xdd\x78\x6a\xa8\xe4\x44"
+ "\x07\x3e\xce\xc9\x69\x5d\x00\x46\x20\xcd\x7c\x9d\x36\x09\xa6\x97"
+ "\x3c\x89\xa2\x30\x75\x09\x35\x63\x8b\x86\xd1\xe6\x5b\x27\xb7\x84"
+ "\x88\x81\xf9\x01\x3a\xbd\x03\x62\x80\xd1\x86\x2b\xb0\x3c\xa6\x0b"
+ "\xa9\x0b\x70\xf9\xae\x7e\xdf\x71\xff\x80\xf5\xa7\xee\xfb\xe2\x67"
+ "\x29\xb5\xbb\xb1\xbb\x1f\xeb\x0d\x24\x0e\x53\xc9\xd7\x4b\x4f\xe9"
+ "\xd2\x62\xb5\x3b\xd5\xd8\xa7\x38\x3f\x90\xf0\x1e\x35\x96\x47\xd0"
+ "\x48\x02\xd7\x4a\x4f\x75\x3b\x29\x4a\x96\x50\x3f\x26\x05\xd3\xf1",
+};
+
+dh_test_vector_t modp1536 = {
+ .group = MODP_1536_BIT, .priv_len = 32, .pub_len = 192, .shared_len = 192,
+ .priv_a = "\x22\xd9\xdc\xc7\x30\x79\x93\x6a\x85\x8c\x07\xaa\x85\xed\x07\xb3"
+ "\xd1\xe8\xb6\x70\xe7\xca\xaf\xa1\x92\x83\x76\x96\x07\x0f\xef\x29",
+ .priv_b = "\x32\x34\x74\xde\x77\x88\xe0\x03\x6b\x30\x95\x49\x56\x0b\x00\x0d"
+ "\x81\xf8\x2f\xdb\x69\x78\xf3\xc0\x3b\x70\x16\x80\xde\x6e\x28\x10",
+ .pub_a = "\x3d\x7d\x1d\xd1\xbc\xa7\x13\x7a\x64\x23\x73\xd5\xd9\xb8\x6e\xf9"
+ "\x95\x84\x9e\xa5\x1c\xb6\xcd\x51\xa6\xb9\x3e\x83\xb3\x4f\x96\x7d"
+ "\xbf\x7d\x66\xbc\x7c\xe5\xd9\x58\xbf\x01\x90\x2b\x60\xf1\xc3\x07"
+ "\x6c\xfe\x14\x7b\xeb\x45\xf0\x83\x97\xcf\xf4\xc3\xa8\x02\x7c\xaa"
+ "\xe1\x84\x78\x8e\xf3\xeb\x0d\xd5\x6e\x14\xc6\xdd\x2c\xe9\x54\xe3"
+ "\xd5\xcc\x80\xdb\x84\xde\xb5\x34\xce\x38\x24\x45\xe7\xa4\x41\xdb"
+ "\x97\x12\x30\x02\x0a\x1a\x45\xca\x00\x70\xd6\x94\xf0\x93\xc7\x16"
+ "\xd8\x07\x68\x33\x6e\x61\xb5\x6f\xf7\x8b\x35\x09\x39\xfe\x4e\x9e"
+ "\x03\x2c\x85\xbb\x58\x81\xc4\xc8\xd7\xdb\xd5\x30\xa6\xfc\x50\x13"
+ "\x00\xf7\xe9\xe6\x5b\xff\xb9\x83\x34\x8a\xd0\x5c\xc5\x6e\x2c\x19"
+ "\xf5\x97\xa9\x9f\xb9\x68\x91\x4b\xe9\xb5\x7a\xcf\x91\x11\xe7\x5b"
+ "\x57\x6a\x61\x37\x67\x15\x76\x57\x90\x0d\xcf\x22\xf4\x20\x0c\x3d",
+ .pub_b = "\xd6\x32\x58\xd5\x54\x35\x3d\x6b\x2b\xcc\x0b\x53\x53\xfa\x80\x00"
+ "\xb3\xa3\x54\xa2\x41\x1d\x7f\x17\xab\xca\x69\x1d\xe8\x22\x7c\xd7"
+ "\xd4\x3e\x7f\xef\x8b\x3e\xe7\xa0\xa9\x1a\x66\x3d\x30\xc5\x4f\x3c"
+ "\x5f\x4b\x95\xc9\xfe\x38\xc6\xcf\x19\x39\xb4\x80\x2b\xb6\xf0\xa9"
+ "\x51\x12\x8f\xdc\x39\x1d\x90\xfa\x8b\x40\x48\x4f\x45\xb6\xda\x02"
+ "\xc7\x6c\xf9\x1b\x43\x31\xc4\xcf\x78\x51\xe5\x50\xa2\xd1\xc0\x25"
+ "\x53\x14\x03\xe0\x40\x3a\xf4\x72\xb3\x83\x41\xdc\x56\x2b\xe9\x8d"
+ "\x1a\xa2\x80\x49\x4d\x62\x64\x31\x6a\x6f\x77\x4c\xe0\xee\xd4\x01"
+ "\x57\xb6\x37\xc5\x4e\x69\x50\x3b\xec\xb8\xf0\xd7\x57\xb2\x86\xe4"
+ "\xd8\x43\xc2\x24\x21\x9a\x92\x3f\x73\xab\x57\x83\x15\xaf\x44\x5a"
+ "\xfa\xd9\x56\x9d\xf2\x5a\xcf\xca\x3a\x9f\x7c\x93\xd3\x03\xf4\xef"
+ "\x24\x32\xbf\x62\xce\x52\x5c\x8a\x56\xba\xbd\x2f\xfb\x54\x38\x32",
+ .shared = "\x6e\x3d\xf6\xe2\x52\xba\x11\x53\xca\x45\xe9\xa8\xbb\xe5\x48\x33"
+ "\x7b\x69\x57\x2a\xff\x4e\x61\xb4\x6e\xc9\x86\xb8\x70\xa8\x63\xd3"
+ "\x85\x3a\xb9\xa5\x4d\x9c\x63\x2a\x09\x48\xce\x8c\x65\xf1\xa2\x9b"
+ "\x06\x7a\x14\x51\x19\x8d\xab\x9f\x24\x77\x6e\x86\x42\x5a\x06\xbd"
+ "\xaa\x9f\x26\xdc\xe9\xe8\x9e\x36\x8d\x0e\x16\x70\x27\x74\x01\x5a"
+ "\x14\x30\xaa\xa1\xe8\x33\x22\x7f\x9d\xea\x7a\x58\x18\xce\x47\x43"
+ "\x20\xb3\x9c\xe8\x67\xca\x79\xa4\x9b\x31\xe6\xab\xce\xa6\xc8\xda"
+ "\xff\x00\xb8\x06\x4e\x2a\x75\x73\x72\xd4\x0d\x58\xa5\x92\xe7\xa2"
+ "\xde\xb1\xf9\xa0\xd9\xab\xab\x1f\xc0\x81\x2e\xe1\xff\xa6\x2a\x20"
+ "\xff\x68\xce\x4d\x02\xac\xb8\x4a\x1a\x03\x3d\x03\xe4\xf0\x5e\x97"
+ "\xa4\xfa\xd8\x9e\xc2\x3a\xee\x34\x9e\x26\x4e\xfa\x61\xae\x59\xe9"
+ "\x38\x1b\x1e\x5b\x7a\xa5\xd0\x9a\xb6\x6b\x74\x99\x7c\xba\xed\x20",
+};
+
+dh_test_vector_t modp2048 = {
+ .group = MODP_2048_BIT, .priv_len = 48, .pub_len = 256, .shared_len = 256,
+ .priv_a = "\xaf\x3b\xfd\x38\x62\xca\xa1\x17\x74\xce\x2b\x74\x84\x08\x07\xc1"
+ "\xde\x5c\xd6\xa7\x61\x9b\xb3\xa0\xc7\xaf\x39\xee\xda\xa6\xeb\x89"
+ "\xe2\xe9\xc1\x44\xb3\x62\x5b\x27\x31\x87\x9c\xb5\x8f\xa3\x76\x6d",
+ .priv_b = "\x77\xa4\x8e\x47\x72\xe2\x3e\x28\x4c\xe4\xaf\x81\x39\x9d\xcd\x58"
+ "\x9b\xeb\x7c\xef\xbc\xc9\xd1\x96\xf0\x6d\xcf\xdf\xc3\xa8\x8e\x3b"
+ "\x4c\x1c\x82\xbe\xfe\xc0\xe6\x4b\xa4\x95\xcc\xde\x32\x99\x36\xce",
+ .pub_a = "\x1a\x27\xc5\xa7\x23\x7a\xe8\xfe\x0a\x71\xc0\xaf\x64\x94\xfa\xec"
+ "\xcd\xd4\xa5\x03\x1d\x47\xa1\xa6\xb7\x16\x2d\xfc\xdf\x03\xbb\x17"
+ "\xb7\xbb\x08\x9b\xd6\x52\xa5\xf3\x1b\x36\x61\xbb\x11\x4d\x7c\x48"
+ "\xd8\x3d\x24\x4f\x46\xdb\x64\x87\x0c\x9c\x83\x27\xf1\xa8\x30\xab"
+ "\xf5\x31\xe5\x18\xdd\x52\x1a\x2e\x94\xe7\x06\x1e\x94\x42\x09\xba"
+ "\x53\x31\x01\x2b\x3d\xff\x00\x84\x9e\xaa\xb5\x8c\x7c\x7a\xf1\x52"
+ "\x65\x21\x0f\xbc\xf0\xf8\x7b\x9d\xd9\x32\xf8\xba\x4e\x9a\x9f\x91"
+ "\xb6\x32\x94\x53\x0a\x12\x00\xb8\x8b\x3a\x03\xe1\xa0\xc7\xfd\x34"
+ "\xde\xec\x6f\xad\x50\x1e\xde\x06\xce\x7e\xc9\xe2\x91\xd4\x7b\x69"
+ "\xb3\xa4\xb6\x1b\xb0\x87\x63\xb6\xd4\x6b\xfb\xda\xb9\x7b\xde\xce"
+ "\xa0\xff\x09\xa8\x72\x22\x1f\xb4\xb6\xcc\xca\x6b\x5c\x6f\xaa\xd8"
+ "\xeb\x87\x3b\x9a\x2f\x39\xb6\x3e\x62\xcf\xad\xd4\x76\x07\xe6\x8e"
+ "\x2c\x29\x05\x83\xf7\x26\x8f\xb4\x3f\xc0\x0a\x7d\xc8\x81\x0e\xcd"
+ "\xac\x1a\x59\xbd\x1b\x47\x7a\xaf\x41\xcb\x4b\x24\xad\xa3\xe5\xf7"
+ "\xb8\xcb\x98\x6f\x1a\xe4\x76\xaf\xc7\x67\x7a\xa6\x25\x70\xa1\xb1"
+ "\x83\x8d\xda\x26\xbe\x78\x63\xee\x2f\x40\xb5\xe5\x38\xa4\xe1\x81",
+ .pub_b = "\x66\x72\x82\x31\x8d\x65\xcb\x05\xcd\x32\x3c\x25\x09\x1e\x66\x4c"
+ "\xc7\x5a\x3a\x20\xc0\x14\xb2\xf6\x38\x9d\x3b\x27\xf5\xc1\xe9\x08"
+ "\xe0\xd4\x0f\xf8\x55\x2b\xd5\x87\x75\x25\x50\x30\x22\x85\xb4\x9f"
+ "\x75\xf8\xff\xae\xc9\x37\xfb\x2e\x69\xc8\x15\x0f\x88\xd6\x50\x8b"
+ "\xf0\xfb\x1f\x16\xe9\x67\x6f\x5e\xcf\x71\x8f\x87\x4f\x3d\x8d\xe3"
+ "\xc8\x68\x87\x49\xdb\xcf\x1a\xff\xd4\xd6\x2f\xf9\xea\x5d\x9e\x30"
+ "\x35\xb2\xce\x4a\x6e\xae\x28\x1c\x40\x8f\xc8\x08\x4a\xe2\x35\xd9"
+ "\x98\xa0\x37\x61\x0a\x8c\xb4\x8c\x87\x8c\xd0\x63\x5f\xa1\x36\x9f"
+ "\xc9\x0b\x86\x4c\x53\x23\x67\x38\x45\x97\xfd\xee\xfd\xb6\x74\x2b"
+ "\x00\x6a\xde\xda\xe2\x01\x5b\xc4\xce\x2f\x9c\x54\x51\xc9\x63\x6a"
+ "\x16\xed\x7d\x10\xb4\xe4\x0d\x82\x5b\x50\xaa\x76\x33\xa0\xe8\xfb"
+ "\x81\x3b\xeb\xc0\x49\xf7\xff\x6a\x71\xec\xfb\x14\xe8\x80\xf5\x09"
+ "\xd3\x8b\x57\x1a\x80\x98\x08\xa4\x96\xb4\x51\xb5\xb5\x56\x7a\x36"
+ "\x08\xb0\x1a\x22\xe4\x99\x83\x55\xf1\x81\xb8\x79\xde\x26\x23\x04"
+ "\xf2\x9d\xe1\x42\xff\x5e\x52\xcc\x56\x4f\x3a\x2d\x1e\x84\xa0\xc3"
+ "\x3d\x8f\x3d\xa7\xbf\x64\x12\xb3\xf0\x18\xe0\x0d\x90\xfa\x5b\x94",
+ .shared = "\x26\xf9\xe8\x45\x7c\xf2\xd7\xd5\x6d\xd8\x19\xc2\xa8\x67\xd7\xc3"
+ "\xa9\x0a\xa2\xb9\x44\xe2\x0b\x8b\xde\xd3\xa5\xbf\x91\xaa\x1f\xf8"
+ "\x1a\x7e\xa8\x9d\x52\x4c\x36\x5f\x38\x23\x51\x08\x35\xcd\x86\x44"
+ "\xc4\xd5\xda\xcf\x89\x3e\x61\x0d\x2c\x1a\x14\x06\x16\xd1\xc8\xc1"
+ "\xd3\x5f\x46\xde\x2c\x5f\xaa\xd9\xae\xce\xc6\xb6\x77\xac\xd9\x84"
+ "\x09\x81\x42\xa8\xaa\x0c\x85\x42\xe9\xac\x98\xb3\xbc\x7b\x57\x7c"
+ "\x59\xb8\xec\x53\x34\x6e\x15\x67\xa9\x00\x39\xac\x92\xb2\x24\x63"
+ "\x5a\xb2\x16\x73\x1b\x06\x35\x39\x25\x64\x2c\x33\x0a\x20\x1b\xa4"
+ "\xb3\x12\x84\xa9\x51\x58\x60\xf3\x5e\x93\x08\xf5\x51\x5a\x77\x99"
+ "\x84\xfb\xd9\xf5\xce\x41\x77\xdb\x78\xd1\xcb\x03\x84\xb6\x3c\x73"
+ "\x9c\x6d\x74\x4a\xd7\xa7\x00\xb9\x5a\x53\x1b\x29\xf3\xb7\x44\xed"
+ "\x38\xe6\x9a\xee\x67\x07\x2c\x45\xa9\x1c\xee\x6b\x14\x21\x5e\x04"
+ "\xf2\x7c\x31\x35\x8c\x86\xdc\xe4\x48\xd6\x0a\x22\x3b\xdc\x55\x4e"
+ "\xda\xa3\xe4\x07\x2e\xf6\x03\xa0\xf4\x61\x9f\x8d\xb3\x9c\xec\x29"
+ "\x1c\x86\x01\x74\x15\x5d\x8a\xbf\x9e\x10\x82\x93\x1d\xf5\x8e\xd9"
+ "\xee\x9c\x12\x15\xdd\x23\x93\xde\x02\xf5\xc1\x76\x7f\x07\x0e\x28",
+};
+
+dh_test_vector_t modp3072 = {
+ .group = MODP_3072_BIT, .priv_len = 48, .pub_len = 384, .shared_len = 384,
+ .priv_a = "\x51\xf8\xaa\xb6\x63\x20\x1e\xb2\x86\xba\xd2\x99\x32\xb2\xe5\x8a"
+ "\x92\x96\xbf\x2a\xa7\x78\x79\xcc\x8c\x64\x29\xd5\xa6\x68\xad\xf7"
+ "\x60\x57\xad\xc3\x77\xcc\x75\xfd\x86\x47\x96\xb8\xfa\x7b\x42\x8c",
+ .priv_b = "\xaf\x7f\x44\x29\x0b\xaa\xc8\x81\x5e\x70\x75\x9b\x6b\x27\x42\x27"
+ "\x12\x22\x50\xea\x8d\x5a\x12\xef\x4b\x0a\x82\xcf\xe9\x1d\x52\x98"
+ "\x9d\x96\xc1\xa8\x40\x89\x2a\x9f\xfa\x9f\x3a\x65\xc7\x7c\x8c\xd9",
+ .pub_a = "\xae\x98\x27\xa0\x7a\x37\x2e\x16\x01\x67\x20\x63\x72\xc5\x5b\x61"
+ "\x5c\xd4\x9e\x8c\x43\xf7\x1a\x6e\x6b\x22\x83\x8c\x0f\x9b\x9d\x90"
+ "\x3a\xe5\x97\x97\xf3\x95\xc9\xca\x7c\x78\x0f\xa8\x8d\x79\x27\xa3"
+ "\xac\x48\xf1\xb5\x05\xe6\x7a\x0d\xb1\xeb\xad\x57\x8e\xb1\x57\x2b"
+ "\x86\x52\x5e\x84\x0c\x4c\xe2\x43\x5e\xd4\xcd\xe1\x75\x68\xa2\xcb"
+ "\x27\x08\x85\xc9\xd3\xa7\x24\xf8\x73\x46\xe5\x19\xa9\x47\x84\xee"
+ "\x88\xaf\xec\x27\x86\x68\x9f\x0b\xc1\x23\xe0\xe4\x65\x79\x57\xb4"
+ "\xf7\x5f\x24\xb5\x2d\xd4\x80\x39\x09\x3a\xe8\xeb\x5d\xa4\x0d\x1e"
+ "\x31\xfc\x53\xfd\x98\xa0\x58\xb0\x38\x9b\xd5\xc7\xaf\x5b\x0d\xf4"
+ "\x49\xd6\x13\x90\x1c\xa2\x71\xb5\x8e\x47\x0f\x88\xd3\x45\x93\x1f"
+ "\x3f\x64\xe8\x1f\x03\x1d\xe9\x5a\xda\xef\x78\xf6\x0e\x64\x5e\xca"
+ "\x7d\xd5\x61\x8a\xa6\xed\xd3\x9f\xa8\x91\x22\x46\x39\xa9\xae\x42"
+ "\xa8\xa9\x37\x4d\x68\x3f\xd3\xf4\x82\x6b\x97\x4c\x8d\x00\x39\x2f"
+ "\x31\x12\x45\xa7\x1c\x86\xdb\x4e\xb5\xef\xc4\x78\x34\xce\x6b\xaf"
+ "\xb2\x08\xaa\xba\xc6\x8c\x35\xe3\x4f\x79\x15\x78\x40\x1c\x7b\x75"
+ "\x01\x15\xb1\x6e\x5c\x3a\xc3\x62\x42\xf4\xa9\x0a\x99\x73\x4c\x08"
+ "\x36\x8d\x48\xab\x5d\x50\xec\x92\xf3\xf1\x26\x7e\x54\xb9\x33\x34"
+ "\xa5\x0b\x00\xd7\x57\x1d\x1c\xeb\x1e\x66\x21\x58\xd5\x66\x36\x20"
+ "\xd1\xf0\x22\x8a\x64\xf4\x92\xea\x8e\x1f\xe1\x81\x7e\x35\x83\x57"
+ "\x7f\x1e\x48\xa5\x84\x91\xc1\x0d\x98\xff\xa0\xea\xa8\x64\x05\xd0"
+ "\x2c\x94\xb4\x9f\x99\xfd\xdc\x6a\x3b\x7b\xd6\xe3\xbc\xe2\x26\x1e"
+ "\xa3\xcb\xbd\x82\x1f\x49\x5a\x2a\xa7\x1a\xe9\x09\xb3\xcd\x0d\x2c"
+ "\x9b\xec\x0b\x06\x8b\xa7\x26\x20\xff\x06\x32\x4c\xdb\x80\x0b\xf8"
+ "\x56\x5e\x4b\x78\x21\x84\x61\x0e\xf0\x77\xbc\x4e\x8f\xc5\x17\xf7",
+ .pub_b = "\x2b\x27\x0c\x8e\x66\x74\x09\x31\x45\xef\x84\xab\x0c\x34\x56\x0f"
+ "\x5c\x4f\x94\x82\xa5\x0f\x2f\x44\x72\xfd\xe9\x87\xb4\x6e\xb8\x40"
+ "\x34\x02\x03\xd6\x31\x10\x2f\x2c\x7f\x9e\xb4\x8d\x20\x74\x44\xfb"
+ "\x21\x71\x7d\xaf\x76\xf6\x60\x04\xa7\x9e\xa8\xee\xe7\x7c\xc9\x80"
+ "\x19\xba\x12\x34\xf1\x6f\xbf\xf7\xdd\xa9\x45\xa4\x46\x81\x1d\x28"
+ "\x5d\x31\xaf\xd3\x31\xdf\xa8\x0c\x87\x0e\xfb\x19\x96\x68\xbb\x38"
+ "\x76\x18\xe1\x16\x21\xf5\x32\x92\x90\x8f\xd3\x29\xaa\xdb\x91\x63"
+ "\x5e\x47\x5f\x33\x00\x4d\x9e\xff\xca\x65\xe7\x3f\xdf\xe5\x58\xf3"
+ "\x4b\xa9\xee\xf3\xcf\x96\x1d\xab\x56\x5c\x5a\x72\x2a\xfb\x22\x8c"
+ "\x26\xbf\x38\xd3\x9c\x84\xf7\xca\x70\xf8\x51\xbf\x21\x8a\x7e\x2c"
+ "\x2d\xb5\xd3\x56\x93\x7a\x01\x5a\x65\x50\x31\x13\xf6\xc0\xd5\x2d"
+ "\x3b\x2a\x21\x21\xa0\xca\x42\xc6\xcc\xde\xbb\x0b\x2c\xfc\x68\xaa"
+ "\x66\x2f\x3e\x08\xdd\x69\x82\x42\x7a\x0a\x11\xb2\xe1\x44\xe0\xf1"
+ "\xa3\x47\xd1\xd2\x7f\xf8\x96\xee\x56\xf5\xd9\xfb\x25\xf7\x40\xb5"
+ "\xc7\x09\x88\xfc\xdc\x91\x12\xdf\xce\xa2\xde\x11\x7c\x12\x87\xd3"
+ "\x2d\xa8\x42\x70\x49\xce\xef\x09\x7b\xf4\x57\x81\x84\x6a\x02\x1d"
+ "\x38\x3e\x73\x1a\xa6\xe5\xc3\xdc\x0a\x46\x22\x6e\x54\xc5\xa6\x36"
+ "\x7a\xd8\x90\x0b\x8e\xad\xba\x54\x09\x3e\xc6\xf7\x1c\x29\x74\x4a"
+ "\xdc\x73\x02\xe8\x11\x85\xed\x80\xdb\x14\xb3\xe4\x3f\x29\x36\x1a"
+ "\xf4\xe6\x1c\x33\x1e\x5d\x77\x4d\x76\x17\xbf\x31\x55\x71\x3b\x0d"
+ "\x4f\x90\x38\x97\x6f\x2f\xf8\x51\x99\xc1\xd7\x0d\xcb\x3e\xfa\x2b"
+ "\x63\xe0\xc7\x7c\xee\x78\x9f\xa0\x05\x8f\xdf\x52\x67\xc6\xb1\xbd"
+ "\xed\xc1\xd1\xef\x47\xca\x08\x22\x2a\x9a\x27\xe6\x5f\x89\x2e\xef"
+ "\xde\x06\x12\xf4\x6b\x51\xbe\x1a\x14\x77\x4d\x45\x14\xd7\xe7\xca",
+ .shared = "\x45\x3c\xaf\x89\xd6\xbf\xf3\x9b\xda\xe1\x85\x46\x17\x61\xd6\xde"
+ "\xf5\x89\xa5\x82\x66\x9c\x1a\xc8\x6b\x0b\x7e\xe4\x69\x56\xc9\xf9"
+ "\x4d\x74\x77\xdd\xd8\x1c\xde\x12\x75\x76\xda\xeb\xd3\x52\x95\x15"
+ "\xf0\x6a\xa7\xdf\x5e\xb1\x31\x27\x67\x5c\xe1\xb4\xdc\xa2\xac\xb6"
+ "\x47\xe3\x55\x76\xb9\x40\x50\xbc\xc3\x85\xb3\xb4\x89\x44\xdd\x1e"
+ "\xca\xbe\x6c\x76\x70\x45\xcd\xcd\xdd\x3a\x1d\x65\x83\x8c\xcc\xb0"
+ "\x82\xf5\x44\xf0\xfd\x9b\xe6\xc3\xd4\xff\xe7\x55\xe4\xd5\x6d\xfb"
+ "\xb4\x20\x93\x52\x3f\x45\xd2\x41\x5e\xad\xf3\x6a\x18\x5c\x87\xa6"
+ "\xe0\x14\x27\xbb\xcc\x98\x95\x68\xa2\x93\x66\xb1\xc6\x91\x72\x4d"
+ "\x81\x0e\x99\xfb\x05\x83\x00\x26\x72\xa5\x3e\x49\x9f\xf2\x30\x4a"
+ "\x4b\xcc\x10\xa3\xb8\x0b\xf4\x8a\x9e\x31\x5b\xe9\x98\xbf\x17\x0c"
+ "\xe9\x0e\xc4\xeb\x87\x05\x57\x0e\x5e\xc4\x80\xc2\x1c\x4d\xd0\xa5"
+ "\x3a\x1c\xb2\x06\xa3\x42\x45\x2e\xa8\xa6\x84\xd5\x69\xd8\x10\xf8"
+ "\xe9\x8f\xea\xd7\x03\x9c\x64\xef\xef\x59\x35\x24\xf5\x07\x5c\xb8"
+ "\x7e\x75\x63\xa4\x2a\xd1\xb5\x78\x19\xc7\xb0\x19\x96\x3d\x07\x0b"
+ "\x4c\xef\xe2\x2c\x59\x6b\x3a\x96\xa8\xba\x41\xbd\xab\xe0\x54\xa9"
+ "\xa9\xb4\xd1\xc4\x27\x8a\x83\x64\xea\xd9\x96\xa0\x39\xe8\xbd\xdc"
+ "\x00\x60\x1e\xcf\x10\x2d\x20\xd6\xab\xce\xd2\xff\x19\x29\x49\xf5"
+ "\xf8\x1c\x36\xfb\x47\x96\x44\x39\xcd\x44\x03\x6a\x23\xac\x30\x86"
+ "\x8c\xa5\x23\x25\x84\x7b\x31\x00\xd3\xd4\x28\x16\x76\xbf\x94\x84"
+ "\xa0\x05\x66\xf1\x3b\x6a\xe8\x9c\x57\xb7\x73\x2f\x8e\x60\x43\x4e"
+ "\x8e\x48\x85\xcb\x1b\xf9\x47\xce\xd1\x4d\x31\x53\xe1\xb6\xc8\x1b"
+ "\x33\x12\x0f\xfb\xd8\x45\x94\x91\xd2\xc5\x78\xaa\xb0\xa9\x7e\x83"
+ "\xdc\x87\x87\x5b\xe5\x88\xc0\xcd\xee\xee\xfd\x19\xcc\x4f\x1d\x40",
+};
+
+dh_test_vector_t modp4096 = {
+ .group = MODP_4096_BIT, .priv_len = 64, .pub_len = 512, .shared_len = 512,
+ .priv_a = "\xab\x69\xbc\xe9\x61\xf9\x8a\xa9\xd5\x91\xe3\xfd\x9a\xbc\x46\xc8"
+ "\x0d\xde\x39\x02\x84\xf1\x91\x42\xe8\x81\x5a\xb0\x80\x54\x72\x2b"
+ "\xbd\x2e\x14\x1e\x27\x9e\xc7\xfd\x30\xaa\xfa\xca\x66\x40\x93\x73"
+ "\x1e\xcc\x75\xa2\xbd\x07\xe4\xa5\x88\x3f\x56\x08\x93\xc8\x33\x50",
+ .priv_b = "\xef\x3e\xcb\x88\xf4\xf9\x88\xf7\x2a\xcd\x5a\xa2\x2e\xbe\xbf\x19"
+ "\x0f\xde\x7e\x8e\x4d\x0d\x50\xcc\xde\x80\xf9\x41\xb3\xbb\xd6\xa2"
+ "\xf9\xa1\x2a\xee\x9b\xb3\xe5\xc5\x80\x3f\x67\xaf\xfe\x27\xc8\x0d"
+ "\x05\x46\x9b\x52\x54\xeb\x1f\x32\x6c\x18\x73\x1f\xd3\x4a\xc2\xcd",
+ .pub_a = "\x3e\xc6\x3c\x0c\x68\x32\x05\xc3\x9d\x4b\x97\x72\x39\xfe\x7f\x96"
+ "\x17\x56\x0b\x1a\x56\xc4\x7f\xd9\x07\x02\x6a\xd8\x09\x0a\xa4\x4b"
+ "\xcc\x29\xd3\xc2\xb3\x8d\x34\xe1\x5c\x9e\x14\xc8\x05\x81\xa5\xc4"
+ "\x83\x2f\x5c\xd8\x8e\x43\x73\xbf\x59\x0e\xad\x63\x6f\x68\xc8\x99"
+ "\x59\xdf\xeb\xc4\x33\x03\x48\x97\x4e\xc4\x80\x7c\xa4\x4f\xb7\xd4"
+ "\x4f\x02\xb7\x97\x70\x9e\x16\x0e\x3c\x0e\xc4\x06\x1f\x75\x9a\xec"
+ "\x63\xd9\x96\xd2\x37\x7a\x18\xdc\xc5\x94\xc2\x08\xfa\x83\x7a\xeb"
+ "\xc1\x68\x9d\x5c\x0a\xb4\x30\x5c\x1b\xbe\x86\xb1\xd4\xa1\x6f\x4c"
+ "\xb5\x25\xfc\xcc\xf1\x00\x6e\x21\x23\xc7\x76\xb1\xff\xd1\xfe\xa2"
+ "\x97\x7b\x1e\xac\x82\xd1\xee\xec\x4c\x46\x73\xa5\x17\xdb\xc4\x2e"
+ "\x0f\x89\x30\xdb\x28\xd8\xc8\xe8\xb4\x0f\x8a\x49\x8d\xa5\x83\x05"
+ "\x5c\x9c\x12\x35\x34\xcc\x2c\x53\x34\xd0\xbe\xbe\xa3\x76\x26\x78"
+ "\xd1\xf5\x34\xba\x64\x6f\x75\x22\xf2\x68\x57\xa0\xff\x28\x8f\x7e"
+ "\xfc\x38\xc1\xdf\xa0\xa6\x4c\x3e\xb5\x31\x64\xc1\x8f\x6c\x2a\x4e"
+ "\x51\x5c\x13\xc1\xb7\x39\x35\xd9\xbd\xb4\x58\x27\x33\x55\x34\x29"
+ "\x67\xa8\x54\xc7\x8b\x16\x8b\x58\x90\xf7\xfe\x08\xd0\x8c\x2e\x4a"
+ "\x6f\x16\x63\x32\x2e\x7b\x52\xef\x02\x17\xc8\x15\x13\x72\x2f\x34"
+ "\xc0\x95\x48\x02\xe4\x2c\xfb\x6c\x9e\xe9\xd2\xc3\x98\x68\x71\x1f"
+ "\x1a\xfc\x4e\x47\x78\x52\x2d\x9c\x45\x09\x0b\x26\x23\xf4\xaf\xa3"
+ "\x88\x42\x6b\x89\x75\x64\x05\x93\xbe\x79\x2b\xb8\xa7\x15\xbe\xf2"
+ "\xc4\x6c\x2c\x50\xa6\x8a\xa6\x30\x13\x8a\xb2\x6d\xf9\xda\xe2\xb0"
+ "\xe2\xc4\xeb\xc1\x0a\x86\x36\xda\xd9\xbe\x2f\xfb\x79\x19\xcd\x52"
+ "\x78\x85\x84\x3c\xb7\x6c\x71\xf2\x8e\x71\xf6\xf8\x23\xd9\x9c\x2f"
+ "\xe3\x1a\xd7\xfb\xcc\x9a\xbb\x24\xef\x26\x66\xa1\x21\xcc\xf2\x1f"
+ "\x88\x1f\xa6\x6a\xb7\x18\xba\xf9\x2b\xef\x5c\xec\xaa\xcf\x4c\x92"
+ "\x9c\x9c\xb5\x44\x24\xd1\x7a\xb7\x73\x8c\xa6\x80\x7b\x4e\xfa\x49"
+ "\x36\x0a\x3d\xb6\x52\x7c\x72\x4c\x45\x31\x62\x9e\x9d\xb1\x84\x5c"
+ "\x6a\x1e\xb4\x63\x2e\x0c\x07\xab\x6c\x31\x37\x7c\x80\x83\x70\x49"
+ "\x46\x44\xa3\x5b\xd4\x78\x5f\x49\x5f\x60\x63\x8b\x98\x81\x71\xde"
+ "\xce\x5d\x5c\x44\x90\x35\x7d\x02\x89\xb3\x94\x4b\xc9\xf7\x85\x4d"
+ "\xb8\x06\x97\xd5\xf6\x3b\xf3\xe3\x1d\xb4\x4e\xfb\x9a\x43\xe1\xdc"
+ "\x9d\x17\xee\x47\x01\x2d\xfb\xac\xfd\xaf\x4d\xeb\xfe\xf0\x8b\x5c",
+ .pub_b = "\x20\xb7\xb4\x5f\x69\xe2\xc3\x0b\xcc\xf8\x41\xca\xe8\x04\x2c\x36"
+ "\x35\x37\x25\x42\x05\x99\x33\x5f\xa2\xe8\x7b\xbe\x59\xce\xbb\xc7"
+ "\xa7\xd3\xf6\x1e\xb6\x69\x82\x50\x3a\x75\x76\xc3\x47\x63\xdf\xef"
+ "\x6a\x6b\x18\x0a\x93\xaf\x66\xe4\xf7\x2f\x12\xd5\x8c\x93\x84\x6d"
+ "\x16\x05\x58\xb0\xd3\x16\x03\x9f\x6b\xa9\x9e\xa6\x4f\x00\x5b\xa1"
+ "\x1e\x59\xf3\xa9\xcb\x99\x3d\x28\x27\x1a\x4f\xb8\x30\xc3\xf6\xc4"
+ "\xce\xb9\xb0\x16\x2c\xcc\xa1\x97\xff\x65\x15\x78\x9d\x43\x6c\x94"
+ "\x7e\xb5\xd8\x01\x09\x74\xeb\xcd\x36\x6b\xc4\x76\x83\x41\x09\x0a"
+ "\x6f\xb5\x5c\xa8\x4e\x31\xd2\x48\x9e\x35\x27\xa2\x60\x77\x6f\x9b"
+ "\x8a\x58\x57\x7b\xdc\xd6\x89\xd8\xe4\xb7\x25\x14\xcf\x15\xee\xa5"
+ "\xa4\x96\x29\xa2\xf2\xc4\x86\xc5\x1b\x5d\x14\xd4\x9b\x11\x93\x09"
+ "\xbf\xe9\xc9\x32\xb6\x04\xc6\xf1\xc0\xe9\x2c\x44\x8d\xc1\x9f\x54"
+ "\xf4\x21\x11\x2f\x28\x87\x23\x8c\x91\x37\xc5\x59\xb6\x9a\x93\xac"
+ "\xf3\x6c\xc1\xf0\xbd\xfe\x4c\xca\x0b\x60\x47\x71\xee\x2a\xf1\x7c"
+ "\x34\x04\x5d\x42\x29\xb2\xb8\x0a\xcd\xfb\x7f\x56\xe4\xea\xee\x81"
+ "\xed\x6c\x88\x5a\x2e\x45\xaf\xc9\x8d\xe1\x21\x2e\x5f\x71\x4f\x5f"
+ "\x00\x88\x12\xd7\x17\x06\x89\x6d\x2d\x5e\xd8\x59\x0c\xee\x66\x41"
+ "\x08\x79\xdc\x31\x95\xa9\x21\xef\xe0\x85\xdb\x41\xd6\x87\xec\x2d"
+ "\xe9\x06\xa8\x10\x33\x6f\xa7\x57\x0d\x43\x11\x2d\xec\xf9\xff\xa4"
+ "\xae\x50\xf5\x4f\xad\x3e\xec\x2b\xb3\xed\x86\xdd\xa3\x66\x2b\xc0"
+ "\xfc\x1c\x28\x94\xd4\xde\x7d\xa9\x26\x0f\x74\x73\xbe\x67\x6d\xbf"
+ "\x60\x90\x33\x32\x0d\xba\xa4\x91\x72\x0c\xe2\xd3\x5d\x07\x8d\xbd"
+ "\xde\x84\x5b\x4a\x49\x5a\xd2\xec\xc2\xe6\xda\x5f\x1c\x0c\x89\x20"
+ "\xe4\xea\x0e\xcc\xa0\xe0\x77\xda\xc5\x9b\x97\xf0\xe9\x4b\x6c\xca"
+ "\xd3\xf4\x2a\x34\xb6\x63\x4e\xde\x83\xb2\x9d\x9c\x76\x15\xf7\x41"
+ "\x90\xdf\x33\xb2\x0e\x7d\x69\x08\xd6\x63\x69\x8a\xdb\x8a\x2d\xac"
+ "\xd9\x79\xf1\xca\x13\xf2\xcc\x84\x02\xf8\xf6\xcb\xf9\x11\x92\xf1"
+ "\x11\xdd\xf5\xad\x29\x39\x05\x95\x54\xac\x47\xb4\x78\x74\x7b\x78"
+ "\xa2\x71\x92\x97\xae\xda\x20\xe3\xd8\x53\x38\x26\x9b\x30\x80\xfc"
+ "\x3f\xd7\x8d\xe4\xac\xf3\x4e\x09\x47\x7a\x61\xca\xc7\xc1\xb7\xdc"
+ "\x76\x94\xed\x14\x1a\x51\x48\x0e\x6b\x2b\x43\xc0\x25\x56\xe8\xa8"
+ "\x49\x7c\x4f\xca\x56\xf7\xfd\x56\xc8\xeb\x36\xa1\xf8\xc3\xd1\x24",
+ .shared = "\x23\xb0\x6f\x49\x39\x60\x9b\x0f\x67\x08\x85\x2d\x4f\x87\xb3\x56"
+ "\x98\x8f\xb4\x5b\x1e\x4c\x95\x1b\x0b\x1d\x59\xbb\xa8\xca\x8b\x60"
+ "\xc2\x55\xa1\x8f\x37\x55\xa6\xdb\x05\xe4\x28\x5f\xe8\xf1\xf9\xd2"
+ "\x6a\xdd\x24\x14\x19\xf0\xfc\xa9\x82\x37\xfd\x7a\x5e\x52\x6d\x57"
+ "\xab\x5a\xd0\x69\xc3\xf1\x89\xf3\xc2\x91\x50\x11\x44\xa6\x55\x14"
+ "\xa5\x66\xb7\x4d\x0d\xc6\x41\x9c\x13\x06\xcf\xbf\xf3\x5d\xbe\xb4"
+ "\x3a\xbd\x5e\xd3\x9c\xe1\x37\x53\xb2\x8a\xe5\xe1\x05\xf7\x19\x1b"
+ "\xc6\xd7\x0a\xc3\x55\x2c\x82\x37\x3c\x6b\xd9\xdb\x8e\xd5\xee\x15"
+ "\x46\xfd\xb5\x49\x9a\xe7\x0a\x6b\xcb\xd1\x85\x9a\x31\xec\x43\xdc"
+ "\xcf\x05\x52\x5e\xe4\x0c\x94\x98\x87\xdd\x81\x68\xae\x29\xcc\x53"
+ "\x7a\x6f\x57\xa5\x26\xf4\x25\xdd\x07\x5f\x39\xd6\xee\x71\xcb\x49"
+ "\x7a\x57\x1f\xe5\x79\x58\xc7\xde\x32\x1d\x64\xf4\xe4\x89\x22\x43"
+ "\xe7\x8f\xef\x47\xb3\x31\xa6\x76\x84\x49\x89\x19\x1f\x97\xad\xf7"
+ "\x91\x32\x60\x7a\x14\x8f\x19\x3c\x7d\xd5\xe6\xd5\x99\x25\x7e\x1b"
+ "\xf1\x21\x93\x24\x68\xdb\xbe\x21\x60\xc9\x7a\xf0\x3f\x9d\x1b\x19"
+ "\xb1\x6b\x4f\x8f\xec\x5d\xcb\xc7\x98\x34\x4a\x87\xdb\xd4\x02\xa6"
+ "\x26\x6e\x10\xc0\xe8\xa7\x22\xfe\x9f\x67\xe8\x63\x6c\xb0\xa7\x3e"
+ "\x22\x4d\x53\x23\xde\x9b\x7f\xa7\xc6\x6c\x62\xa1\xf4\x00\x42\x04"
+ "\x1c\xba\xde\xf2\x4b\x4f\xaa\xfd\xa9\x14\x79\xec\x91\x97\x64\xb0"
+ "\xf4\x8b\x95\x9e\x67\x99\xf0\x94\x96\x6d\x24\x61\x27\xc0\x0e\x9c"
+ "\xc7\xd8\xf5\xa8\x43\xd1\xa4\xd6\x1c\x5c\x0a\x64\xb6\xb1\x6c\xa7"
+ "\x32\x44\x93\x75\xd9\xcf\x5d\x32\xd3\x99\xf6\x56\xfd\x51\x4f\xbf"
+ "\xe6\x6e\xea\x82\xe4\x79\xfc\x73\x18\x0b\x72\x1d\xd4\xc5\xbb\x20"
+ "\xd4\x50\xc5\xa1\x95\x9e\x1f\x8f\xed\x9d\xd2\x8c\x97\x05\x12\x72"
+ "\xf6\x64\x00\xcd\xd4\x13\x45\x7d\xdd\xe6\x9a\xc7\x43\x5e\xe4\xa4"
+ "\xf7\x2a\x37\x02\x49\x82\xb4\xa7\xf6\xf5\x5e\x03\x07\x03\x82\xb8"
+ "\x3b\x2c\xed\xb7\x75\x25\x17\xf0\x48\xb7\xc6\x91\xd1\xf1\xd7\xb8"
+ "\x52\xa5\xb7\xcd\x3b\x2b\xde\x97\x62\x0e\x9b\x2c\xd9\xc7\x7f\xd8"
+ "\xcc\xb6\x92\x5a\xde\xf7\x06\xa6\x77\x0a\x2e\xfb\x62\x1c\x93\xf1"
+ "\xca\x24\xf0\x9a\x68\x6e\x8b\x86\x05\x81\x49\x47\x39\x92\x15\x33"
+ "\x9a\x1f\x29\xfb\x57\xac\xf9\xce\x9e\xba\x2c\xbd\x49\x69\xc8\x9e"
+ "\x4f\xb9\x39\x02\x12\xb9\xb2\xa3\x5d\x4a\xfa\x17\xb3\xee\xc0\x8a",
+};
+
+dh_test_vector_t modp6144 = {
+ .group = MODP_6144_BIT, .priv_len = 64, .pub_len = 768, .shared_len = 768,
+ .priv_a = "\xab\x36\xf0\x65\x7c\x4f\xba\xdc\x2a\x3b\x07\xed\xd1\xc8\xaf\xcb"
+ "\x42\xaf\xcd\x7f\xf9\x1c\x57\x01\x37\x25\x50\x0d\x89\x42\x9f\x34"
+ "\x79\x8f\x99\xf5\xde\x53\xd1\x08\x8f\xd9\xf6\x60\xa1\xa5\x2b\xe4"
+ "\x54\xf9\x63\x4d\x15\xbb\x4e\x4c\xe0\x9e\x5a\x69\xe0\xf9\x9d\x59",
+ .priv_b = "\x59\xa5\x52\xa4\x0d\x17\x80\xaf\x64\x33\xbc\x9e\x8a\x9b\x6d\x48"
+ "\x30\xdd\xd3\x57\x53\x07\x40\x9a\x90\xc9\x2b\xda\x40\x4b\xb7\x99"
+ "\xf0\x09\x3e\xfe\x21\xbe\xad\x85\xf1\x6a\x23\x7b\x05\x90\xc3\x35"
+ "\x6d\x96\x8c\xc0\x9d\xcd\x5b\x2d\x96\x86\x07\xd6\xd1\xbf\x70\xc2",
+ .pub_a = "\x0a\x78\x01\x5c\x3e\x0d\x36\x2a\xff\xe5\xd9\x3a\x9c\x2d\xf2\xdf"
+ "\x5b\xb9\x6b\x7f\xf2\xfc\xc2\xdd\x96\x6c\x53\x9d\x1d\x4c\xdb\xac"
+ "\xec\xe2\x6d\x16\xab\x6d\x3d\xe8\x24\xe0\x75\x87\x29\x12\xeb\xa0"
+ "\x44\x33\x66\x11\x0e\x0d\x7e\x17\x27\x4b\x95\x78\xaf\x12\x46\x63"
+ "\xe6\x55\x17\x0b\xf7\xb7\xb2\xbd\xb4\xbf\xe4\x7a\x88\x97\x68\x95"
+ "\x6a\x98\xf1\xce\x1a\xc8\xc6\xc7\xc4\x8e\x13\xa3\x6c\x16\x1c\x82"
+ "\x00\x7c\x98\x35\xb2\x2e\x7b\x47\x99\x38\x92\xa1\x71\x2b\x27\x5d"
+ "\xdc\x84\x54\xf4\x33\x0b\xd1\xfc\x08\xab\xfc\x48\x8e\x71\x8b\x43"
+ "\xa6\x21\x3a\x09\xc5\x68\x74\xce\xef\xb3\xa9\xfa\xe7\xe1\x11\xc0"
+ "\x0a\x10\x43\x3f\x4b\x23\x75\xef\xab\x04\x2b\xd5\xc1\xc2\x9a\xaf"
+ "\x97\x0c\xeb\xae\xb5\x7f\x10\xf0\x10\x6e\xa6\x3d\x25\x72\x59\x93"
+ "\x0a\xf5\xb3\x3b\xc2\x64\x27\xe6\xef\x47\x32\xde\x43\xdc\xea\x0a"
+ "\x88\x72\x9b\x93\x7e\x6a\x9a\xfc\xf2\x92\xa2\x9f\x8b\xe9\x9b\x71"
+ "\x88\xd8\x2d\xfc\x13\x7e\x69\x19\x9a\x53\x5d\x92\xdc\x61\x37\x60"
+ "\x03\x38\x67\x1e\x6d\x46\x76\x31\xff\xc2\xbd\x69\x59\x42\xcd\x7e"
+ "\xbb\x40\x2c\x54\xea\x39\x1b\x9a\x02\xde\x1f\x28\x99\xfe\xd5\xbd"
+ "\xb7\x1f\x27\xab\x71\x0b\xba\xaa\x32\xb2\xc2\xe0\xec\x8d\x4b\xd4"
+ "\xca\x6c\xc5\x07\xd9\x72\x0b\x20\xaf\x9c\xce\xcd\x7f\x4e\x88\xed"
+ "\x55\x0a\xea\xbc\x43\xdd\x0b\x3d\xc0\x20\xdb\x3e\x14\x89\x76\xc7"
+ "\x61\xf5\x44\x21\x8a\x79\xb7\x3b\x37\x77\x24\x99\xf3\x61\xba\x0b"
+ "\x1d\x3c\xf2\x10\x23\x75\x36\xfb\x89\xd8\x57\x86\x51\xed\x67\x51"
+ "\xd1\xe8\x10\x95\x61\x2b\x0f\xcf\x65\x36\xbc\xb0\xff\x17\x2c\x3d"
+ "\x54\xdc\x07\x13\x19\x99\xd4\x11\x98\xf9\x7e\xa8\x32\x9a\xbb\x04"
+ "\xc3\x75\x3f\x83\xe1\xfd\x3b\x92\x78\x72\x3c\x98\x67\xf4\xc1\xff"
+ "\x19\xe1\xd2\xad\x7d\x34\x65\xf0\xb8\xc2\xdd\x9d\x4c\xcd\x36\x1a"
+ "\xbd\xf8\x56\x66\xd6\xfe\x04\x2c\x98\x04\x2b\xec\xa9\x4b\x66\x4b"
+ "\x71\xcf\x78\x07\x56\xe5\xba\x9c\x8a\x72\xb8\xc9\xe4\x82\xd6\x59"
+ "\x22\x59\x39\x75\xd6\xdd\x00\xf3\x16\xc7\xb2\x0c\x81\xeb\x67\x4f"
+ "\x0b\xbe\xa8\x1e\xed\xe6\x7b\xbf\xf1\x17\x38\x3f\xf4\x86\x0b\x97"
+ "\x75\xa7\x81\x86\x14\xb8\x6d\x48\x5e\x88\x98\xa9\x2f\x54\xfd\x7f"
+ "\x05\x45\xb4\x32\xcd\x5f\xab\xec\x2e\xa8\x42\xd8\x3b\x51\xc2\x18"
+ "\x91\x7a\xb6\x10\x5e\x26\x8b\xc8\x50\x08\x2c\x79\xa1\xd0\x74\x12"
+ "\xce\xc4\xd2\x3c\xb0\x04\x54\xa8\x87\x2b\x9f\xb3\x05\x4a\x89\x64"
+ "\xb5\xaf\x7f\x64\x4b\x45\xcd\xd7\xf2\xb8\xa8\x8c\xd8\x7c\x6e\xe6"
+ "\x19\xd9\xaf\x59\xb5\x2b\x07\x37\x1b\x8f\xb0\x57\xe7\x05\x9f\x21"
+ "\x52\x6d\xc4\x89\x4a\xff\xce\xda\xc8\x5b\x73\xf3\xd4\x07\xc7\x29"
+ "\x02\x7e\xa6\x79\x82\xd3\x01\xba\x93\x0e\x37\x17\x3d\xfc\x38\xd3"
+ "\x25\x7e\x52\xd2\x53\xba\x20\xe8\xe9\xef\xa2\x96\x38\x49\x14\xd2"
+ "\x83\x8b\x2c\x62\xb0\x27\xc6\x5d\x36\x34\xd4\x58\x14\x25\x6e\xc1"
+ "\xcf\xd0\x2d\x21\xa3\xc0\x9c\x9b\x14\x20\x83\xec\x1a\xeb\x14\x2a"
+ "\xd3\x97\x40\xad\xd0\xeb\x51\x8f\xa2\x10\x62\xb4\x50\x94\xff\x35"
+ "\x43\xc2\x29\x88\x0e\xf6\xb9\x4c\x85\x80\x13\xed\x2f\x56\x15\xdc"
+ "\x0f\x09\xd2\xe5\x40\x11\x70\x34\x76\x2c\xed\xb1\xac\xe5\x82\x77"
+ "\x45\x42\x3e\x8e\x8d\x08\x6e\x5b\xbe\x34\xf9\x93\x0f\x8a\x43\xec"
+ "\xa6\x9f\x7c\x56\xe6\x95\x31\x85\x9d\xb1\x97\xaf\x2d\xac\x76\x81"
+ "\x1f\xfb\x4d\x53\xfe\x04\xe2\x48\xbe\xac\x50\xe2\xb3\x74\x77\x5f"
+ "\x48\xec\x26\xd0\x9b\xb9\xa3\x28\x23\xa9\x2c\xc2\x0a\xb7\xd3\x80"
+ "\x87\x03\xa2\x3b\x74\x07\xaf\xa5\x5f\x2b\x9b\x90\xa2\xf8\x89\x3c",
+ .pub_b = "\x75\x3a\x06\x23\x48\x41\x6e\x90\x3b\x5b\x3b\x25\x89\x38\xf1\xa4"
+ "\x3f\xe0\x96\x2a\xcb\x3c\xd2\x7a\x71\xb3\xed\x8a\xd4\xa5\x62\x77"
+ "\x4b\x6f\xf4\xf2\x29\x31\x2a\xfc\xb4\x7b\x34\xfe\x9c\xb0\x83\x62"
+ "\xe7\x45\xc9\x93\x19\x89\xdb\x90\x99\xc5\x77\x85\x06\x97\xa6\x2f"
+ "\xde\x6d\x98\x01\xbc\x4f\x51\x92\x94\x6f\x10\x3a\x7a\x56\x14\x48"
+ "\xad\x7d\x1d\x15\x0c\x8c\xda\xc9\x01\xf1\x3b\xfd\x27\x09\x2e\xf7"
+ "\xec\x0f\x82\x1e\x0e\xa6\xb9\x1b\x63\x90\xc3\x3e\x7e\xf1\xad\x5c"
+ "\xaf\x6f\x6d\x9d\x3f\x25\x4f\xe9\x53\xaf\x03\x6e\xdc\x24\xf3\x2c"
+ "\x65\x67\xc7\x08\x61\x80\x18\x7c\x19\x97\x44\x56\x5e\xf0\xa2\x94"
+ "\x7c\x59\x01\x94\x5b\x46\xa8\x0b\x28\x6c\xa0\xfc\xa0\xad\xe4\x4a"
+ "\x2c\x87\x77\x7b\x44\x28\x25\xd4\xa2\x24\x70\x69\x9a\x83\xf7\x65"
+ "\xde\xe3\xeb\x03\x14\x00\x4c\xba\x87\x87\xf2\x47\x4c\x3e\x44\x67"
+ "\x66\x85\x48\xb4\x12\xa6\x15\x22\x0d\x39\x02\x07\x66\x59\x07\x3a"
+ "\x64\x9e\xba\x6e\xc4\xdc\x29\x07\x5b\x67\xff\x42\xca\xe0\x1d\xcd"
+ "\x39\x08\xff\x63\x03\xb1\x76\xc4\xa3\xdc\x81\x33\xfb\x4c\x28\xa1"
+ "\xe4\x7e\xbe\x5f\x73\x24\x92\x7a\x40\x8d\x75\xc5\x94\x13\x26\x91"
+ "\xef\x9a\xee\x45\xaa\xff\xfc\xae\x61\x34\xdb\x20\x96\x99\xe9\x18"
+ "\x30\x95\x37\x23\xaf\x3d\x2f\x3b\x03\x69\x4b\xfa\x92\x92\x57\x8e"
+ "\x66\xe2\x89\xf0\x62\xe5\x2b\x2c\x23\xca\xcd\x8d\xdd\x88\x92\xb4"
+ "\xc9\x8e\x9d\x57\x62\x69\x3e\xd1\xd7\xc8\x7e\x99\xac\x4e\x7c\x65"
+ "\xaf\xea\x99\xfa\x69\xd8\x46\xb2\xc7\x1f\x76\xf1\x3e\x99\xb7\x23"
+ "\x2c\x7c\x80\x8b\x3a\x5e\x86\x2c\x50\x5a\x36\x48\x0a\x23\x23\xdf"
+ "\x69\x95\xa0\x1b\xfd\x1f\x4e\x06\xc5\x0b\x17\x3c\x62\xce\x5a\x63"
+ "\x82\xcd\x14\x64\xb8\x60\x36\xb9\x74\x9c\xa4\xe1\xa5\x0c\xc0\x77"
+ "\x05\x41\x46\xac\x16\xdb\xb5\x16\x71\x71\x6e\x62\x93\x17\xd6\xdc"
+ "\xbb\xbd\xb3\x01\x5f\x08\xa9\x71\x91\x97\x92\xb1\x1d\xa8\x0a\xf9"
+ "\xc3\xaa\x4c\xc2\x63\x48\xd1\xae\x21\xbb\xf3\xb7\xda\x04\x5e\x6e"
+ "\x82\x89\x5d\xdc\xfb\xae\x76\xaf\x50\x42\x71\x06\x8b\x0c\xfd\xb9"
+ "\x0f\x00\x24\x97\xe0\x0c\x9f\xf2\x95\x11\x63\x6f\xcf\x59\xfb\xd2"
+ "\xcc\x10\xec\xaa\xef\x89\xff\x1b\x48\xc9\xce\x78\x22\x50\xf6\x31"
+ "\x47\x78\x38\x3b\xae\x32\xed\xf6\xaa\xa9\x7a\x53\x71\xc6\xbd\x10"
+ "\xcf\x17\xf4\x1b\x1e\xb0\x90\x4d\xd1\xd2\xa2\x9b\x5c\x37\xd3\x9c"
+ "\x31\xb2\xb8\x5b\x8c\xa2\xde\x11\xf7\x97\x03\xea\x45\x38\xc5\x5c"
+ "\x22\x8e\x3d\x60\x4a\xc7\x32\xaa\xee\x7a\x67\x9a\xa5\x85\x1f\x64"
+ "\xb1\x45\xe7\xe1\x69\x68\x5c\x65\x1e\x0a\xf3\xf3\x11\x26\x98\x7b"
+ "\xf8\x27\x23\xad\xf4\x25\x6f\xab\x83\x48\xc4\x5e\xba\xea\x73\x6a"
+ "\x2b\x82\x66\x02\xf5\x21\x5a\xbc\xf5\xbf\xf1\xa4\x72\x1c\xd9\x9d"
+ "\xb6\x46\xe9\xb5\x61\xbe\xe5\x59\x8a\xf9\x8e\xfa\x79\x2e\xa6\x02"
+ "\xad\x22\xea\x06\x2c\x42\x66\xb9\x0c\x6d\x4b\x2b\x8b\xd9\xa3\x8b"
+ "\x60\xe2\x63\xe0\x44\x54\x02\x2f\x75\xb7\x41\x81\x9c\xe7\xce\xc4"
+ "\x3e\x82\x05\x5e\x0e\x4c\x16\x0a\x59\xfa\xb1\x13\x02\x87\xb8\xd8"
+ "\xa7\xbc\x15\xb2\x5a\xb5\xea\x50\x76\x76\x73\xa1\xf4\xc2\x71\x88"
+ "\x5d\x0c\x8c\xbe\x32\x3d\x60\x15\xdb\xad\xde\x37\xf8\x8e\xb8\xd2"
+ "\x24\xc3\x3c\x97\xe7\x9c\xc6\xdc\xcd\xcd\x43\x93\x06\xd8\x64\x9f"
+ "\xca\x07\x15\x47\xca\x13\x39\x8d\xd6\x75\xe0\x61\x7f\x7f\x15\x28"
+ "\x8f\xe8\x4d\x19\xb6\x41\x20\x93\x17\x03\xaf\x1b\x16\x13\x07\xc7"
+ "\x50\xfe\xeb\x97\x7c\xe3\x72\x32\x9b\x87\xab\xab\x2d\x47\xa0\x93"
+ "\xc3\xc9\x17\x58\xc6\x2c\x8a\xa8\x78\x6e\x6c\x30\x6c\xbf\x3f\x66",
+ .shared = "\xfc\xe8\xe5\xeb\xf2\xb0\x07\xfc\x46\x60\x17\xa8\xed\xf0\xf1\xa6"
+ "\x9f\x5a\xf2\xea\x49\x2a\x09\xd8\x08\xd2\x84\xc2\x2d\x9b\x3d\x07"
+ "\x2b\x9f\x89\x1b\x0c\x0f\x09\xe4\x67\xd1\x6e\x33\x8d\x2e\x6d\xd1"
+ "\xf5\x1b\xdb\xc4\xe8\x64\x61\xf5\x49\x47\xd0\x07\x0f\xbe\xc1\xfc"
+ "\xe3\xe4\xf3\xd8\xa6\xeb\x73\x8c\xb1\xd5\xcf\xc6\x54\x6d\xe2\x07"
+ "\xba\x55\x17\xe8\x2b\x39\xc2\xab\x30\x8d\x2d\x48\xe1\xe7\x3f\x7f"
+ "\x5c\xf8\x4a\xfa\x7b\x71\xb3\xf2\x4c\x52\x45\x5d\xfd\xa0\x94\x98"
+ "\xe9\xde\x3f\x14\x52\x7f\xa5\xd6\xf7\xa4\x67\x27\x4d\x84\x4e\xaf"
+ "\x7f\x20\xef\xc6\xaf\x5d\xac\xaf\xd7\xe1\x52\x02\x6f\xd4\x84\x73"
+ "\x15\x7b\x74\x89\x65\xcd\xa7\xb8\xd5\x8d\x54\x94\x44\x88\xf6\xd2"
+ "\xa9\x7f\xa9\x12\xd1\x19\xa7\x75\x2f\xcf\x3c\xb9\xc2\xc7\x66\x18"
+ "\x91\x83\xd9\xa1\x69\xe2\x09\xc0\x38\x7f\x7b\xff\x5e\x44\x9d\x34"
+ "\x3c\xef\xb3\xbc\x86\x57\xaa\x28\x22\x09\x7a\xa1\x64\xad\xf0\x16"
+ "\xcb\x15\x8f\xaf\x9e\xb8\x0f\x9f\x53\x1c\x8a\xf9\xf3\x2d\xee\xf7"
+ "\x31\xa8\xcd\x44\x6a\x5f\xea\x66\x95\x81\x96\x49\x83\xd7\x1d\x54"
+ "\xaf\x34\x81\x5d\x3c\x4e\x4e\xc5\xe6\x5c\x04\x4d\x7a\x5c\x4b\x7b"
+ "\x54\x14\xa1\x19\x31\xe3\xda\x28\x4f\x8a\x3a\x99\x12\x3c\x22\xb5"
+ "\xce\xe7\x8b\x1c\xf0\x3a\xdf\x88\x5b\xe5\xdb\x0a\x88\xcd\x84\x9d"
+ "\x41\xdd\xf1\x63\x00\xf3\x6c\x23\xa4\x4b\x81\x04\x74\xd1\x35\x8f"
+ "\xef\x99\x9d\xf9\x46\x7f\x96\x98\x15\xbc\xa6\x25\x39\x6e\x32\xd1"
+ "\x7f\xa0\xcc\xe9\x60\x41\xc7\xcf\x0d\xd3\x16\x99\xee\xb7\x63\xbb"
+ "\xb2\x4c\x93\x7f\x50\xd0\xfe\x90\x39\x33\x87\xc6\x17\x97\x44\x83"
+ "\xad\x3c\x20\x2f\xd5\xc0\x30\xf4\xfa\xde\xdf\x42\x43\x16\x46\x6a"
+ "\x6b\x36\x59\xcd\xf1\x8c\x39\x82\xba\x54\xf6\x34\x3f\x6b\xf2\xf2"
+ "\x1f\x58\x58\xea\xdb\x8f\x80\xa2\x4d\x01\xe8\x2d\x70\x0c\x58\xf9"
+ "\x56\x4f\x6e\xea\xe2\x98\x0c\xc7\x01\x74\x40\x25\x24\xcc\x87\x16"
+ "\x98\x67\x5a\x7e\x95\x87\xa1\x41\x0e\xa6\x22\x89\x13\x26\x0f\x6f"
+ "\x81\xdc\xd5\x3d\x75\xe0\xd6\x71\xe7\xde\x0a\x6c\x62\x3b\x64\xab"
+ "\x2f\x67\xa0\xad\xdc\x32\x70\x1c\xa1\xfa\xe9\x08\xed\x5d\x90\x09"
+ "\xad\x42\x74\x5d\xcd\x12\x24\xfb\x14\xe8\xb9\x89\xa4\x4f\x0b\x07"
+ "\xa9\x4a\x7a\x27\xce\x19\x8d\x32\x6f\x74\x9a\xc5\x10\x91\xf1\xfc"
+ "\x03\xfa\x1c\x87\xe8\x70\x5c\x36\xa8\xef\x2c\xc7\xb6\xe4\x47\xac"
+ "\xb7\x3d\x0e\x44\x12\xbd\xa2\xf9\xdb\x49\x5f\xc5\xe6\xdf\x7a\x37"
+ "\x87\x32\x73\xb3\x6d\x08\xac\xef\xb8\xa3\x0a\x14\xb6\xd7\x7f\x76"
+ "\x0f\x02\x9b\x93\x45\x7b\xee\xd4\x92\x53\x40\x6c\x9e\xe1\x52\xe4"
+ "\x22\x31\xcf\xcb\xc5\x30\x11\xe0\xb6\xe4\x17\xad\x03\xe9\x8d\xaf"
+ "\x2e\xc8\x5a\x2c\x89\x49\x9d\xde\x67\xc4\x03\x1d\x91\x8f\x30\x7d"
+ "\x11\xd1\x93\x8c\x7b\xb9\x17\x71\x7c\x3b\x14\x34\x03\xba\x76\x8a"
+ "\x4b\xe0\xd0\x93\xc9\x25\x59\xce\x3f\xcc\x5d\x38\xfe\x18\xfa\xbb"
+ "\x0d\xde\xe0\xd4\x17\x3e\x18\x27\x58\x73\x86\x89\x71\xa0\x7f\xcd"
+ "\xd4\x8e\x54\xcc\x3f\x63\x1e\x44\x3d\xb9\x39\x77\x4c\xb8\xaa\x57"
+ "\x5f\x7b\xd2\xa9\x86\x9c\x15\xbe\xa7\xf1\xea\xa9\xdf\x95\x32\x2c"
+ "\xbf\x93\xb0\x31\xaf\x6e\x74\xe0\x37\x7c\x94\xf0\x4f\x6c\x44\x3c"
+ "\xd9\x1c\x3b\x48\x24\x8b\x28\x2f\x65\x54\xce\x69\xf4\x5b\xb6\x11"
+ "\xef\xab\xec\x45\x4d\x10\x58\xa7\xf2\xa7\xc7\x1a\x9f\xfa\xdc\xcd"
+ "\x54\xfa\x0d\xe8\x96\xbc\x0d\x55\x0a\x49\xf1\x2a\x31\x28\x15\x26"
+ "\x56\xf2\xa0\x6c\x84\x51\xbd\x6b\xee\x20\xc5\xd6\x4b\x36\x5f\x7b"
+ "\xb8\xd0\xeb\x41\xb6\x7c\xfb\x9d\x9d\xfd\x62\x0e\xb7\x99\xca\x17",
+};
+
+dh_test_vector_t modp8192 = {
+ .group = MODP_8192_BIT, .priv_len = 64, .pub_len = 1024, .shared_len = 1024,
+ .priv_a = "\xa6\x48\x81\x47\x45\xfe\x1e\xd1\x98\x9b\x75\xba\x6d\xd5\x01\xe4"
+ "\x4e\x77\x6d\xc9\x97\xa0\xae\x27\x37\x64\x61\xb0\xee\x79\x65\x94"
+ "\xc2\xe6\xdb\x07\xe5\xf9\xd8\x7d\x94\x4d\x37\x01\x22\x38\xe5\x70"
+ "\xaf\x52\x3a\x47\xf3\xe8\x6e\x2a\x4c\xd0\xdb\x3a\x49\x40\xcf\x0b",
+ .priv_b = "\xd8\xf2\xb0\x9f\x17\xbe\x6f\x13\x64\x5c\xb4\x57\xbe\x0a\xa8\x02"
+ "\x02\xe8\x43\xf5\x10\xd2\x93\xf9\x32\x55\x98\x2f\x89\x87\x26\x61"
+ "\x79\xd6\x01\x90\x95\x70\xa3\xf9\xcc\x58\x50\x8b\x62\xf8\x8d\x64"
+ "\xe0\xd0\x59\xf1\xa2\xed\xea\xd4\x42\x82\x04\xcc\x09\x2a\x2d\x82",
+ .pub_a = "\x1f\x6d\x66\x0a\xfb\x04\x87\x42\x55\xe6\x83\xee\x89\x9a\xd2\xfd"
+ "\xbc\xf7\x35\x7f\xdf\xe2\x47\xef\x3b\x2f\xf9\x41\x19\x48\x45\x26"
+ "\x44\x53\x41\xc5\xdf\xea\x28\xa7\x45\xa0\xff\x56\x95\x13\x24\x72"
+ "\x34\x1b\x11\xb2\xe6\x9b\xd8\xe9\x98\x18\xf0\x5c\x22\x74\x26\x65"
+ "\x9c\xf0\x34\x81\x55\x50\x1d\xce\x5a\x7b\x5d\x11\xae\xaf\xfb\xd0"
+ "\x52\xb5\xff\x8a\xc8\xe3\x5e\x78\x50\x29\x79\x53\x9e\xf4\xe4\xa7"
+ "\xa9\x51\x55\x1a\xcc\x10\x02\x72\x6b\xa8\xee\x95\x95\x70\x19\x92"
+ "\x34\x15\xd8\xec\x61\x27\xbe\x25\x2c\x44\x65\xb0\xef\xae\x8a\xee"
+ "\xcd\x39\x0f\x33\x55\xcf\x94\x52\x5e\xef\x38\xdb\x92\x62\x79\xb8"
+ "\xfd\xce\xe1\x3c\x43\x8e\xdd\xb2\xa5\x22\x44\xa7\x1b\xda\x1e\x85"
+ "\x30\x91\x82\xfa\x8c\x57\xff\x0c\xd4\x06\x9d\x4f\xfb\xcc\x42\xe5"
+ "\xe6\x60\xc1\xf0\x07\x3c\x85\x6a\xc6\x11\xcb\xf7\xf9\xf3\x2c\x95"
+ "\x67\xa1\x8e\x44\x94\xed\xe7\x42\x7b\x1a\x18\x16\x55\xdb\xc7\x91"
+ "\xc7\x9d\x54\x7d\x88\xfd\x49\x45\x9b\xd3\x56\xee\x47\x26\xa9\x5c"
+ "\xd3\x7b\xf2\x42\xe4\x8a\x94\xd9\x22\xcc\x06\x66\xce\x4f\x29\x91"
+ "\x2a\x1e\x4e\xc8\x8e\x3e\x40\xde\xdc\x95\xe1\x2c\xe2\x50\x47\x2e"
+ "\x58\x37\x49\x32\xe4\x64\x81\xe7\x03\x11\x81\x13\xb7\xdd\x6f\xef"
+ "\xae\xf5\xc5\xe6\x1c\x13\x6b\x76\xa1\xb0\xbf\x4d\xf0\x01\xc3\x09"
+ "\xd8\x59\xff\x13\x34\x6d\x52\x11\x9b\xa6\xc9\xca\x38\xe6\x71\x80"
+ "\x39\xae\x37\x4b\x1c\xe1\x13\xfb\xc6\x91\x8f\x32\xc3\x6a\x04\xd8"
+ "\xc6\x80\x08\x4e\xef\xf6\x80\xf9\x8f\xda\xaf\x27\x79\x9b\x21\x2d"
+ "\x2d\xea\x87\x4b\x4d\xeb\x5a\x87\xfb\x72\x36\xe2\x3e\x9b\x42\xcf"
+ "\xa9\xeb\x49\x41\xe6\xc2\xb8\xc0\xad\xbd\xb3\x61\xc1\x61\x5f\x9c"
+ "\xb6\xbd\x8f\x99\x3a\xe8\xca\x86\xb8\xd8\x7f\x2f\xb2\x33\xc0\x9f"
+ "\xd3\x8e\x44\xdc\x6d\x0c\x19\x89\xb9\x05\xc6\x36\xf4\x7a\xc3\x06"
+ "\x46\xa4\x22\x6e\xef\x76\xba\x92\xfd\xaf\xce\x71\xbc\x0c\x05\xfe"
+ "\xec\x8f\x8d\xfb\x59\x46\xf9\x7c\xc1\x1f\x1c\x52\x62\x4d\x01\x57"
+ "\x4e\x86\x03\x94\x8b\xba\xf7\xbd\xca\xbb\x15\xaa\x61\xdd\xbc\x32"
+ "\xdf\x9e\x5f\xad\x3b\xf8\xf6\xfb\x4d\x1e\x9c\x86\x9e\x7e\x0f\xaf"
+ "\xf6\x91\x7e\x08\xf4\xfb\x55\xd8\xe0\x4b\xd9\xcd\x23\x57\x83\x58"
+ "\x59\xd7\x56\x93\xee\x14\x2f\x2b\xd2\x83\xa4\xce\x45\xa2\x90\xba"
+ "\x15\x4d\xca\x0c\x8b\x29\x7a\xe5\xbc\xba\xa0\x45\xd0\x08\x27\x75"
+ "\x61\x3a\x83\x99\x2f\x39\xc6\x41\x03\xc0\xb7\xd6\xfb\x5b\x94\x64"
+ "\xc3\xfe\xca\x32\x1c\xe3\x66\xc8\xb4\x49\x44\x1c\x63\xb0\xaa\x18"
+ "\x31\x4b\x15\x8a\xda\x77\xc5\xfd\xea\x33\x6c\x9a\x45\x4c\x24\xb5"
+ "\x1c\xd2\x5d\x56\x98\x3c\xdf\xb1\x5f\x10\xee\xc1\x17\xec\xbe\x7c"
+ "\xdb\x24\xdd\xdb\x22\xf6\x47\x8a\xfc\xc5\xb5\xa8\x9a\x8f\xb8\x27"
+ "\x72\xa1\xd4\x5e\x43\xcd\x7b\x33\x2e\xe3\x09\x94\x77\x20\xfe\xba"
+ "\xae\x2e\x9b\xc1\x43\xdd\x9d\x44\xd8\xd6\xfe\xff\xe3\x0f\xf6\xd3"
+ "\x71\xa1\xf8\xda\x1c\xff\x41\x21\x6f\x07\xc9\x55\x99\x6f\x0a\xef"
+ "\xd6\x5a\x6c\xa5\xdd\xba\x98\x46\x30\x43\xad\x27\xe4\xf1\x1e\x3a"
+ "\x89\x4a\xb3\xc8\x6d\xf7\xe9\x2f\x48\xd0\xd7\x29\x38\x5c\xe7\xac"
+ "\xbc\x3f\x65\x5e\x23\xdd\xc1\xad\x73\xed\x1a\xee\x81\xf3\x63\x29"
+ "\x7e\x72\x8f\x1a\xfc\x2d\x03\xf9\xbb\x3c\x38\x42\xc2\xfb\x53\x2f"
+ "\x56\xd6\xca\xb9\xeb\xa4\x17\x46\xdb\x53\x56\xf0\xdd\x1d\x8a\xfc"
+ "\x03\x06\x4d\x8c\x97\x7e\xf0\xc6\x5d\x6d\x5a\x23\xed\xee\xf9\x11"
+ "\xed\x04\x34\x0c\x04\xa0\x60\xf9\xa8\xfe\x8f\xfa\xd6\xf3\x27\x3d"
+ "\x32\x48\xbe\x3b\x56\x3a\xe8\x76\xe9\x54\xe7\x81\xef\xe3\x8f\xd9"
+ "\x03\x42\x5b\xa7\xd2\x69\x96\x39\x05\x8f\x41\x25\x35\x3a\x56\x66"
+ "\x5a\xc8\x36\xda\x84\xc5\xe9\x2e\x55\xac\xe9\xeb\xdc\x8f\xd8\x26"
+ "\x06\x3b\x61\x7f\xd7\x78\x4e\x5b\xe5\xd7\xca\x76\xf0\xd0\x71\xf6"
+ "\x45\x21\x30\xdf\xfc\x69\x82\xcf\xc3\x02\xda\x4f\x4e\x51\x59\x4f"
+ "\x56\xd1\x60\x5a\xd9\x12\x21\x7a\xa9\x99\x81\x5a\xa8\xa8\x10\x94"
+ "\xd7\x3e\x58\x7e\xd9\xde\xbc\xf5\x83\xf0\x92\x4f\xe6\xdd\x17\xb3"
+ "\x9c\xdd\xd5\x7b\xc0\xb4\xb5\x8c\xae\x7b\x63\x1a\xaa\xd0\x93\x55"
+ "\x31\x28\xd7\x8e\xec\x8c\x28\x64\x81\x9e\x2e\x47\xa9\xb3\xa0\x06"
+ "\x2a\x3d\xfe\x27\x40\x9c\x13\x96\xa0\xff\x65\xd9\x05\xd8\x92\xa2"
+ "\x63\x0e\xbf\xe2\xc5\xe3\x51\xbf\x20\x74\xdc\xbe\x5a\xfb\xd5\x5c"
+ "\x1b\x8f\x41\x21\x43\xaf\xdf\x35\xd1\x46\x80\x16\xf0\x28\x5d\x55"
+ "\x52\x0c\x05\x24\x63\x68\x5d\x50\x7a\x50\xde\x2d\xfa\xae\xd0\x71"
+ "\x9a\xad\x2a\x56\xb6\xa2\x52\xbc\xe1\x93\xc4\x93\x36\xc6\x08\xec"
+ "\xf3\x26\x14\x0b\xa5\x4c\x9e\x15\xf3\x48\xf6\xd1\xd4\x45\x41\xd7"
+ "\xb7\x5c\xd9\x89\x89\xe6\x62\xe6\x4b\x9f\x3a\xf7\xc7\x77\xd7\xc3"
+ "\xae\x18\x3c\x5c\xee\x2a\xf1\x80\x8a\x73\x83\xa4\x0e\x63\x12\x04",
+ .pub_b = "\x8e\x2b\x1b\xac\x90\x76\x05\x3f\xf4\xd5\xde\xa3\xad\xf0\xe9\x40"
+ "\x27\x69\xb2\x65\x1a\x54\x5c\x89\x0a\x82\xbd\x59\xc3\x4f\x55\x02"
+ "\x77\xdd\xac\x4c\x3b\x43\xb5\x34\xf5\xa2\x15\x85\xba\xa2\x40\x6f"
+ "\xb7\x37\x6c\xdb\x3d\xd1\xc0\xc9\xa0\x6c\x17\x3c\xa3\xc1\xcf\x7f"
+ "\x86\x86\xcb\xaf\x9a\x5c\x27\x36\x8e\x47\xac\x3f\x30\x46\xe7\xd9"
+ "\xd9\x56\x54\x22\xfe\x43\xb6\xc9\x04\xf9\xd0\x63\x02\x02\xcd\x7d"
+ "\x77\xcc\x21\x1b\x7b\xea\x4a\xc1\x9c\xdb\x68\x0e\x34\x00\x43\x4b"
+ "\xd1\x66\x03\x07\x42\xc9\x79\x7f\x81\x0f\xcc\xff\x4b\x2c\x00\xf9"
+ "\xb9\xd4\x70\xde\x92\x65\x45\x32\xac\x64\x76\x49\xe6\xc0\x26\x77"
+ "\xbc\xb8\x09\xa6\xbd\xa1\x22\x73\x13\x4b\x1c\x7c\xa8\x88\xe1\x69"
+ "\xb1\xe2\x64\x48\x3f\x4b\x2f\xd8\xa3\xb4\xf6\x4e\x5a\x79\x06\xf0"
+ "\xcb\x72\x9c\x72\x0e\xe6\x39\xa6\x69\x7b\xa3\x32\x9e\xa0\x81\xb0"
+ "\xd6\xa2\x10\xee\xbb\x5b\xd5\x51\xd0\xd6\xbf\x28\x80\x0c\x67\xf4"
+ "\x38\xf4\xc3\x16\x3b\x83\xd2\x6c\xd3\xf3\x02\x34\x64\xeb\xa1\x6c"
+ "\xb3\xa5\x13\x6a\x64\xb5\xa6\x3a\x1b\x63\x5e\xe7\x03\x96\xdb\x37"
+ "\x4f\xc8\xb8\x60\x86\x45\x30\x61\x97\xfa\x8e\x3d\xae\x48\xa4\x7d"
+ "\xfe\x72\x6a\xe9\x98\xeb\x77\x13\x4a\x4e\x6a\xae\x24\xf2\xd2\xad"
+ "\xa6\xca\x7e\x98\x38\x53\x24\xea\x98\x09\x2c\x5b\x0f\x46\x9c\x6a"
+ "\x50\x0c\x46\x27\x98\x51\x56\x06\x6e\xca\xe9\xb8\x47\xe7\x20\xd7"
+ "\x71\x38\xdc\x17\x74\x4e\x0b\x9d\xa6\xe3\x8e\x69\x28\x2a\x90\x45"
+ "\x3e\x72\xdf\x62\x88\x3a\x8a\x04\x59\x05\x43\x2a\xa3\x22\x90\x1f"
+ "\x62\xa2\x46\x03\x90\x81\xd8\x1a\x12\x77\x37\x08\x34\x58\x0d\x0b"
+ "\x5d\x02\x5f\xa3\x66\xfb\x6e\xeb\x22\x5c\xe3\xbc\xcc\x8d\xa5\x94"
+ "\xe8\x14\xd5\x08\xfe\x8b\x75\x6a\xf1\x9a\xde\x32\x26\x10\xb4\xb9"
+ "\x9f\x5d\x60\x83\x66\xdf\xfc\xfe\x4d\xdc\xd9\x95\xcf\xa9\x89\xc4"
+ "\xe6\x92\x8a\xd5\x99\x35\x7e\xc5\x5b\x42\x62\x92\x3f\x7e\x2a\x32"
+ "\xd0\x64\x8b\x0a\x61\x0f\x3d\xa8\x83\xb8\x95\x77\x34\x3c\xd0\x31"
+ "\xe6\xf9\x01\x69\x2f\x3f\xb4\xa1\x03\x18\x1b\xf1\xbd\xac\xe1\x8d"
+ "\x61\xb8\xd0\xf6\x7f\xd9\x7a\x3f\x26\xc1\x81\x16\x80\x93\xe6\x65"
+ "\x56\x5b\xc1\x56\xfb\xf1\x65\xd9\x5a\x21\xe9\xf2\xd2\xde\x7d\x89"
+ "\x12\xa2\x4f\x8f\xd9\x61\x0c\x51\xbf\xfc\x36\x65\x36\x5b\x51\x20"
+ "\x90\x42\x3a\x99\xd1\x88\x21\xf1\xd7\x1a\xe1\xed\xca\xdd\x08\x4e"
+ "\xb5\x29\xa3\x72\x83\xed\x85\x15\xe5\x20\x5c\x6e\xbb\x39\x2a\x38"
+ "\x12\xde\xe0\x5a\x1c\x10\x7b\xb2\x5b\x46\xa2\x80\xb4\x3e\x59\x1a"
+ "\x7d\x09\x70\x9d\xbc\x6f\x76\x8a\xf4\xb4\xc9\x35\x26\xfb\xcc\x7a"
+ "\xb5\x52\x8b\x06\xe3\x6f\x50\xb1\xf6\x4d\xf4\xbd\xa9\x1e\x39\x37"
+ "\x56\x1b\x25\x62\x0f\x7a\x4a\xfb\xe1\xff\xf5\x28\xf8\x40\xd4\xf2"
+ "\x99\xd1\xad\xd9\x23\x5b\x4e\x3b\x8a\x57\x87\x54\x23\xd7\x73\x54"
+ "\xf9\xa0\x0e\xa5\x49\x5f\x8e\x93\x8e\x41\xeb\x69\x7b\xef\x48\xeb"
+ "\x1b\xb6\x6b\x48\x29\xff\x4e\x89\xb1\xf2\xbd\xef\x77\x7a\x92\x71"
+ "\x97\xac\x21\x76\x6e\x54\xd3\x89\xe2\xca\x07\xbe\x00\xe8\xb4\xd4"
+ "\xd5\x63\xf1\x4b\xbb\x76\x16\xc8\xdb\xf1\x14\x18\x4b\xbe\x39\x00"
+ "\x75\xf2\x85\x2b\x8e\xd8\xc0\x81\x12\x9b\xe4\x9f\x98\x74\x6f\x70"
+ "\xa8\xa4\x7f\x14\x66\x21\x91\x61\x86\x2b\xa6\x01\xf5\xd1\x00\x6c"
+ "\x7b\x3a\x39\xb8\x3b\x6c\xe8\x4b\x16\xd0\x99\xcc\x2a\x9f\x63\xdd"
+ "\x77\xc1\x1d\x14\x98\x38\xca\x3f\x5a\xdf\xc4\x9d\xf5\xe6\xa8\x2a"
+ "\x4b\xfb\x53\x68\x6e\x31\x39\x12\x07\x2b\x7e\xe3\x78\x3d\x23\x79"
+ "\xc4\x2b\xc1\xac\x29\x20\x59\x91\xf0\xd8\xfb\x9d\x20\x5b\x21\x70"
+ "\x0e\xa8\x46\xca\xfc\x09\x38\x12\x1c\x2c\xa4\x67\xa8\x94\x04\x87"
+ "\xa4\xaa\x2c\x25\xea\x57\x35\xc2\x4e\xa9\x15\x0e\x7c\x46\xe3\xdf"
+ "\x40\x77\x81\xd6\xf3\x9c\xee\xd4\x22\x3b\xe9\x20\x98\x1a\x18\x4e"
+ "\xd3\x3d\xe6\xd0\xfc\xcc\x2d\x5c\x72\x69\xe7\xc4\xca\x12\xe9\xb7"
+ "\xbe\x84\x31\x9e\x5d\x6f\xcf\x91\x37\x15\xeb\xf8\x08\xba\x50\xc3"
+ "\xb4\xad\x36\x09\xe0\xd3\x61\xea\x9b\xe6\x28\x56\x33\x73\x2b\xe7"
+ "\x51\x52\xc5\x40\x66\x34\x47\x03\xe4\xd9\xff\x4b\x23\xaf\x80\x63"
+ "\xbd\x30\xe0\x2d\xe2\x08\x4a\x09\xd3\x04\x0a\x4d\xbc\x7b\xac\x80"
+ "\x94\x8c\x4d\xd7\x4a\x35\x72\xa4\x18\x66\xf2\x0e\x01\x6a\x0f\x31"
+ "\x17\x31\x07\xb9\x65\x68\xd6\xac\x2f\x53\x36\x01\x3f\x06\x18\x77"
+ "\xae\xbe\xb4\xac\x60\x9a\x73\x3d\x7d\x40\xdc\xed\x19\x27\x03\x57"
+ "\x0a\xca\xe4\xcc\xc6\xfe\x3b\x7a\x9d\x73\xb2\xa4\xf1\x21\x32\x11"
+ "\x21\x86\x77\x1e\x25\x96\xc8\xb0\xce\xd5\x1e\x4c\xea\xd0\x04\xde"
+ "\x67\xa8\x36\x23\x89\x85\x7b\xa9\x2a\x0c\x52\xd5\x63\xbe\x17\xdb"
+ "\x82\xd5\xf6\x51\xc3\x9a\x9f\xab\x58\x46\x87\x0c\xb3\xc1\x23\x87"
+ "\x61\xac\x00\xed\x46\xe5\x79\x2c\x5b\xd6\x6e\xaa\xee\x4b\x08\xb2",
+ .shared = "\x7d\xc5\xc9\x35\xc9\x62\x79\x93\x1d\xfc\x55\xc6\x20\x36\x95\x7c"
+ "\xa1\x5d\x9c\x0f\xdf\x95\xa7\x1f\x59\xae\xb7\xa6\x39\x0f\xd1\x7e"
+ "\x93\x34\x7f\xb6\x72\x96\xf2\xe3\x20\x30\x40\xe7\x77\x51\xc1\x0a"
+ "\x08\xf3\xad\x86\x40\xec\xf0\xd2\x9d\x88\xba\xff\x50\xf7\x4e\x88"
+ "\x1f\xf0\xdd\x12\x17\x05\x97\x1e\x3c\xe3\x91\x65\x10\xd9\x16\x32"
+ "\x21\x44\xd8\x81\xed\x17\x9a\x60\x34\xee\x4d\x81\x03\xbc\x05\x56"
+ "\xba\x7f\xa7\x31\xc9\x3b\x57\x5e\x38\x1c\x45\xbf\xfc\x51\x48\xf3"
+ "\x05\xa9\x74\x39\xdf\xa1\x34\x48\x62\x31\x5d\x58\x45\x16\xc1\x9e"
+ "\x26\x38\xb4\x59\x95\xdd\x92\x52\x1e\x26\x20\xed\xd2\xb2\xb3\x98"
+ "\x6b\xde\xbe\xf1\xa0\xbc\x52\xc9\xfe\x97\x65\x78\xd6\xce\x91\xb1"
+ "\x8e\x9b\x04\xfc\x74\xb9\x1d\x52\x7d\x0b\xf0\x1d\x2a\x3c\xde\x2e"
+ "\x4c\x49\xee\x62\x9c\x59\x09\x12\xd7\x4a\xd7\x0d\x03\x72\x3d\x04"
+ "\x58\xd5\x19\x9e\x42\x97\x2c\x32\xb9\xda\x5c\xee\xaf\x40\x44\xc9"
+ "\xfd\x69\xcf\x6e\x81\x6b\x2b\xa4\x21\x3b\xde\x2d\xc3\xb7\x03\xe3"
+ "\xa3\xf2\x77\xd8\x70\xf5\x6f\x24\x48\xbf\x48\xb5\xa5\xff\x86\x06"
+ "\x0c\x23\xc2\xc9\x8e\xfa\x0f\xa6\xaa\x77\x95\x1a\x1a\x91\xfe\x30"
+ "\x6b\x18\xc5\xfb\x13\x76\x7a\xa5\x29\xa3\x02\xce\x2b\x46\x74\x5a"
+ "\x1c\xe6\x7c\x3a\xb7\x43\xea\xf9\x3f\x53\xa9\xfb\x63\x94\x26\x42"
+ "\x82\xdc\x9a\x4a\xd6\xbb\xa9\xd1\x6e\xc2\xc7\x92\x28\x3b\x33\xc3"
+ "\x58\xf6\xfb\x44\x5c\x5d\xea\xe0\x62\xaa\x30\x97\x30\x38\x36\x47"
+ "\xb1\xae\x3b\x99\x35\xab\xc9\x45\x2a\x07\xec\x34\x88\x61\x70\xb0"
+ "\x47\xf9\xd6\x61\x23\x48\x71\x6f\x85\x3b\xf1\x8a\x17\x72\xc1\x99"
+ "\x6d\x4f\x94\x2b\x11\xf2\x75\x33\x08\x67\x08\xba\xa6\x50\x2a\x1f"
+ "\x6b\x0e\x38\x0e\x52\xea\xe0\xd8\xcd\xd0\x11\x80\xa7\xb9\x97\xd9"
+ "\x68\x1b\x21\x7c\xe2\x69\x8e\x14\x61\x39\x49\x65\xbb\xc2\x7c\x2e"
+ "\x8e\xb1\x83\x28\x7b\xf0\xf0\x62\xdc\x35\x33\xea\xc9\x14\x3f\x83"
+ "\x60\xd3\xec\x37\xd4\xe3\x87\x86\x2c\x43\x53\x80\x3f\x39\x36\x4d"
+ "\x1e\x73\x2a\x5c\x8c\xfb\x2d\x2e\x78\xd1\xf1\xe5\x8c\x89\x35\x1e"
+ "\x0a\xd3\x07\x3c\x05\x0d\x4e\xa0\x81\x6a\xee\xb5\x27\xef\xa6\x2e"
+ "\x04\x07\x01\xf7\x40\x9b\xbf\x7b\xf8\x3d\x0f\x68\x79\xc6\x96\x16"
+ "\x53\x5b\xb0\x8f\xb1\xbd\xd4\x28\xa7\x73\x37\x7b\x06\xbf\x9c\xd2"
+ "\xb4\x28\x82\xb4\xd2\x63\xf9\xb2\x56\x7c\x28\x02\x4b\x91\x63\x03"
+ "\xdc\x58\x6e\xb5\x88\x2e\x76\x3a\x53\x4a\xd4\x22\x7c\xa5\xca\xdc"
+ "\x96\x14\xa6\xfb\x64\xe9\xa5\x09\xf2\x3e\x4d\x7e\x40\x01\x93\x05"
+ "\xe0\xad\x2e\x20\x7c\x4e\x17\x45\xa8\x70\x7e\xd1\xc7\xbb\x10\x94"
+ "\x81\x36\x22\x62\x3f\x44\xe2\xfa\x36\x9e\x4e\x37\x75\xdf\x8d\x6f"
+ "\x15\x73\x41\xf0\x6f\x32\xce\xa9\xfc\x06\x8f\x39\xcf\xfb\x34\x16"
+ "\xba\x74\x7d\xe6\x02\x18\xd3\xe4\x8c\xee\xd1\xef\xdc\xd4\x59\x8d"
+ "\xeb\x25\x7a\x09\xc8\xcc\x38\x7b\xcb\x8b\x3c\xb8\x77\xb4\xad\x1a"
+ "\x9e\x78\x20\x58\x6b\x36\x4d\x5d\xbb\x07\x38\xba\x65\xf4\xfe\x4d"
+ "\x86\x94\xdf\x77\x52\x08\xb1\x75\x07\x85\xaf\x77\xe9\xb3\xab\x3a"
+ "\x35\x30\x8a\xfd\x34\x99\x3b\x8f\xfc\x9a\x9e\xe1\x46\xcb\xbf\xf3"
+ "\xbd\xb9\x9c\x8c\x4e\x29\x6d\xa3\x29\x5a\x1f\x88\x23\xdf\x89\xa7"
+ "\x88\x7c\x90\x75\x5a\xd5\x97\x60\x17\xdf\x5e\x96\x04\xf8\xae\x24"
+ "\xd8\xf1\xa3\x3a\xa1\x40\x40\xd1\x45\x8e\xa5\xe5\x32\x32\xcc\xcc"
+ "\x1c\x8a\x9f\x6d\xa0\xa8\x89\x9d\x75\x2b\xd2\xc2\x07\xb2\xe1\x1a"
+ "\xcd\x4f\x31\x80\xb2\x19\x5f\x06\x9d\x8e\x7a\xf1\x2e\x15\x1d\xbc"
+ "\x4c\xb4\x75\x6f\x7b\xb7\xe4\x70\xd6\x0c\xe1\x27\x93\xce\x4a\x63"
+ "\xde\xe7\x48\x87\x5c\x75\xae\x34\x82\x5a\x71\xda\x1f\x93\x72\x64"
+ "\x7c\x7b\xda\x6a\xd7\xf9\xe8\x1f\x72\x67\x71\x68\x6f\x85\x64\x53"
+ "\x72\x80\x8f\x1f\x7a\x15\x1c\x11\xfd\x11\xf7\x8c\xc4\x43\x41\x60"
+ "\x03\xf2\x79\x19\x27\xdf\x98\x37\xaf\x07\xb3\x8d\x0b\xee\x01\x85"
+ "\x6f\x6d\xa9\xbd\x0e\x1f\x79\xdf\x21\x78\x24\xbf\xc5\x09\x29\x9e"
+ "\x64\x0d\x8e\xff\xc9\xe7\xe8\x51\xd3\xe8\xfe\xa5\xe0\xa9\x8d\x54"
+ "\x17\x3a\x34\xaf\xd9\xbd\xfd\xaa\x8c\x81\x79\xfb\x65\x85\x67\x73"
+ "\x32\x2d\x32\x00\xcb\x3b\xe2\x0e\x43\xe8\x40\xf6\x42\xf2\x74\x00"
+ "\xa0\xfd\xe6\x96\x4a\x2b\xbb\xa9\xd9\x95\xc4\x42\x75\x12\x3f\xbb"
+ "\x79\x35\x9c\x91\xb5\x24\x10\xc4\xc5\xbd\x4a\x4c\x47\xd8\x89\x92"
+ "\x70\xa5\xe6\xc9\xed\x2e\xbd\x98\xc0\x17\xb0\xad\x8c\x31\x95\x81"
+ "\x84\x86\xb1\xaa\x42\xf6\x2e\x10\x92\x2f\x67\x73\x33\xb9\x02\x43"
+ "\x52\x24\x05\xdb\x9c\xec\xc5\xf1\x3e\x78\x05\xcb\x04\xd6\x91\xa8"
+ "\x51\x9e\x48\xa8\xae\xa8\x8d\x13\x2d\xcd\xa1\xbe\x23\x9e\x00\x4c"
+ "\x0a\x59\xf8\x18\xb0\x0a\x06\xe2\x0a\xb4\x16\x02\xa7\x21\x4c\xac"
+ "\x9a\x80\x62\x7f\xb6\xd6\xa0\x3b\x11\xd3\x30\xf9\x3d\xfd\x26\x27",
+};
diff --git a/src/libstrongswan/plugins/test_vectors/test_vectors/modpsub.c b/src/libstrongswan/plugins/test_vectors/test_vectors/modpsub.c
new file mode 100644
index 000000000..bb64e2df9
--- /dev/null
+++ b/src/libstrongswan/plugins/test_vectors/test_vectors/modpsub.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright (C) 2015 Martin Willi
+ * Copyright (C) 2015 revosec AG
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the Licenseor (at your
+ * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
+ *
+ * This program is distributed in the hope that it will be usefulbut
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <crypto/crypto_tester.h>
+
+/**
+ * Test vectors from RFC5114
+ */
+
+dh_test_vector_t modp1024_160 = {
+ .group = MODP_1024_160, .priv_len = 20, .pub_len = 128, .shared_len = 128,
+ .priv_a = "\xB9\xA3\xB3\xAE\x8F\xEF\xC1\xA2\x93\x04\x96\x50\x70\x86\xF8\x45"
+ "\x5D\x48\x94\x3E",
+ .priv_b = "\x93\x92\xC9\xF9\xEB\x6A\x7A\x6A\x90\x22\xF7\xD8\x3E\x72\x23\xC6"
+ "\x83\x5B\xBD\xDA",
+ .pub_a = "\x2A\x85\x3B\x3D\x92\x19\x75\x01\xB9\x01\x5B\x2D\xEB\x3E\xD8\x4F"
+ "\x5E\x02\x1D\xCC\x3E\x52\xF1\x09\xD3\x27\x3D\x2B\x75\x21\x28\x1C"
+ "\xBA\xBE\x0E\x76\xFF\x57\x27\xFA\x8A\xCC\xE2\x69\x56\xBA\x9A\x1F"
+ "\xCA\x26\xF2\x02\x28\xD8\x69\x3F\xEB\x10\x84\x1D\x84\xA7\x36\x00"
+ "\x54\xEC\xE5\xA7\xF5\xB7\xA6\x1A\xD3\xDF\xB3\xC6\x0D\x2E\x43\x10"
+ "\x6D\x87\x27\xDA\x37\xDF\x9C\xCE\x95\xB4\x78\x75\x5D\x06\xBC\xEA"
+ "\x8F\x9D\x45\x96\x5F\x75\xA5\xF3\xD1\xDF\x37\x01\x16\x5F\xC9\xE5"
+ "\x0C\x42\x79\xCE\xB0\x7F\x98\x95\x40\xAE\x96\xD5\xD8\x8E\xD7\x76",
+ .pub_b = "\x71\x7A\x6C\xB0\x53\x37\x1F\xF4\xA3\xB9\x32\x94\x1C\x1E\x56\x63"
+ "\xF8\x61\xA1\xD6\xAD\x34\xAE\x66\x57\x6D\xFB\x98\xF6\xC6\xCB\xF9"
+ "\xDD\xD5\xA5\x6C\x78\x33\xF6\xBC\xFD\xFF\x09\x55\x82\xAD\x86\x8E"
+ "\x44\x0E\x8D\x09\xFD\x76\x9E\x3C\xEC\xCD\xC3\xD3\xB1\xE4\xCF\xA0"
+ "\x57\x77\x6C\xAA\xF9\x73\x9B\x6A\x9F\xEE\x8E\x74\x11\xF8\xD6\xDA"
+ "\xC0\x9D\x6A\x4E\xDB\x46\xCC\x2B\x5D\x52\x03\x09\x0E\xAE\x61\x26"
+ "\x31\x1E\x53\xFD\x2C\x14\xB5\x74\xE6\xA3\x10\x9A\x3D\xA1\xBE\x41"
+ "\xBD\xCE\xAA\x18\x6F\x5C\xE0\x67\x16\xA2\xB6\xA0\x7B\x3C\x33\xFE",
+ .shared = "\x5C\x80\x4F\x45\x4D\x30\xD9\xC4\xDF\x85\x27\x1F\x93\x52\x8C\x91"
+ "\xDF\x6B\x48\xAB\x5F\x80\xB3\xB5\x9C\xAA\xC1\xB2\x8F\x8A\xCB\xA9"
+ "\xCD\x3E\x39\xF3\xCB\x61\x45\x25\xD9\x52\x1D\x2E\x64\x4C\x53\xB8"
+ "\x07\xB8\x10\xF3\x40\x06\x2F\x25\x7D\x7D\x6F\xBF\xE8\xD5\xE8\xF0"
+ "\x72\xE9\xB6\xE9\xAF\xDA\x94\x13\xEA\xFB\x2E\x8B\x06\x99\xB1\xFB"
+ "\x5A\x0C\xAC\xED\xDE\xAE\xAD\x7E\x9C\xFB\xB3\x6A\xE2\xB4\x20\x83"
+ "\x5B\xD8\x3A\x19\xFB\x0B\x5E\x96\xBF\x8F\xA4\xD0\x9E\x34\x55\x25"
+ "\x16\x7E\xCD\x91\x55\x41\x6F\x46\xF4\x08\xED\x31\xB6\x3C\x6E\x6D",
+};
+
+dh_test_vector_t modp2048_224 = {
+ .group = MODP_2048_224, .priv_len = 28, .pub_len = 256, .shared_len = 256,
+ .priv_a = "\x22\xe6\x26\x01\xdb\xff\xd0\x67\x08\xa6\x80\xf7\x47\xf3\x61\xf7"
+ "\x6d\x8f\x4f\x72\x1a\x05\x48\xe4\x83\x29\x4b\x0c",
+ .priv_b = "\x4f\xf3\xbc\x96\xc7\xfc\x6a\x6d\x71\xd3\xb3\x63\x80\x0a\x7c\xdf"
+ "\xef\x6f\xc4\x1b\x44\x17\xea\x15\x35\x3b\x75\x90",
+ .pub_a = "\x1b\x3a\x63\x45\x1b\xd8\x86\xe6\x99\xe6\x7b\x49\x4e\x28\x8b\xd7"
+ "\xf8\xe0\xd3\x70\xba\xdd\xa7\xa0\xef\xd2\xfd\xe7\xd8\xf6\x61\x45"
+ "\xcc\x9f\x28\x04\x19\x97\x5e\xb8\x08\x87\x7c\x8a\x4c\x0c\x8e\x0b"
+ "\xd4\x8d\x4a\x54\x01\xeb\x1e\x87\x76\xbf\xee\xe1\x34\xc0\x38\x31"
+ "\xac\x27\x3c\xd9\xd6\x35\xab\x0c\xe0\x06\xa4\x2a\x88\x7e\x3f\x52"
+ "\xfb\x87\x66\xb6\x50\xf3\x80\x78\xbc\x8e\xe8\x58\x0c\xef\xe2\x43"
+ "\x96\x8c\xfc\x4f\x8d\xc3\xdb\x08\x45\x54\x17\x1d\x41\xbf\x2e\x86"
+ "\x1b\x7b\xb4\xd6\x9d\xd0\xe0\x1e\xa3\x87\xcb\xaa\x5c\xa6\x72\xaf"
+ "\xcb\xe8\xbd\xb9\xd6\x2d\x4c\xe1\x5f\x17\xdd\x36\xf9\x1e\xd1\xee"
+ "\xdd\x65\xca\x4a\x06\x45\x5c\xb9\x4c\xd4\x0a\x52\xec\x36\x0e\x84"
+ "\xb3\xc9\x26\xe2\x2c\x43\x80\xa3\xbf\x30\x9d\x56\x84\x97\x68\xb7"
+ "\xf5\x2c\xfd\xf6\x55\xfd\x05\x3a\x7e\xf7\x06\x97\x9e\x7e\x58\x06"
+ "\xb1\x7d\xfa\xe5\x3a\xd2\xa5\xbc\x56\x8e\xbb\x52\x9a\x7a\x61\xd6"
+ "\x8d\x25\x6f\x8f\xc9\x7c\x07\x4a\x86\x1d\x82\x7e\x2e\xbc\x8c\x61"
+ "\x34\x55\x31\x15\xb7\x0e\x71\x03\x92\x0a\xa1\x6d\x85\xe5\x2b\xcb"
+ "\xab\x8d\x78\x6a\x68\x17\x8f\xa8\xff\x7c\x2f\x5c\x71\x64\x8d\x6f",
+ .pub_b = "\x4d\xce\xe9\x92\xa9\x76\x2a\x13\xf2\xf8\x38\x44\xad\x3d\x77\xee"
+ "\x0e\x31\xc9\x71\x8b\x3d\xb6\xc2\x03\x5d\x39\x61\x18\x2c\x3e\x0b"
+ "\xa2\x47\xec\x41\x82\xd7\x60\xcd\x48\xd9\x95\x99\x97\x06\x22\xa1"
+ "\x88\x1b\xba\x2d\xc8\x22\x93\x9c\x78\xc3\x91\x2c\x66\x61\xfa\x54"
+ "\x38\xb2\x07\x66\x22\x2b\x75\xe2\x4c\x2e\x3a\xd0\xc7\x28\x72\x36"
+ "\x12\x95\x25\xee\x15\xb5\xdd\x79\x98\xaa\x04\xc4\xa9\x69\x6c\xac"
+ "\xd7\x17\x20\x83\xa9\x7a\x81\x66\x4e\xad\x2c\x47\x9e\x44\x4e\x4c"
+ "\x06\x54\xcc\x19\xe2\x8d\x77\x03\xce\xe8\xda\xcd\x61\x26\xf5\xd6"
+ "\x65\xec\x52\xc6\x72\x55\xdb\x92\x01\x4b\x03\x7e\xb6\x21\xa2\xac"
+ "\x8e\x36\x5d\xe0\x71\xff\xc1\x40\x0a\xcf\x07\x7a\x12\x91\x3d\xd8"
+ "\xde\x89\x47\x34\x37\xab\x7b\xa3\x46\x74\x3c\x1b\x21\x5d\xd9\xc1"
+ "\x21\x64\xa7\xe4\x05\x31\x18\xd1\x99\xbe\xc8\xef\x6f\xc5\x61\x17"
+ "\x0c\x84\xc8\x7d\x10\xee\x9a\x67\x4a\x1f\xa8\xff\xe1\x3b\xdf\xba"
+ "\x1d\x44\xde\x48\x94\x6d\x68\xdc\x0c\xdd\x77\x76\x35\xa7\xab\x5b"
+ "\xfb\x1e\x4b\xb7\xb8\x56\xf9\x68\x27\x73\x4c\x18\x41\x38\xe9\x15"
+ "\xd9\xc3\x00\x2e\xbc\xe5\x31\x20\x54\x6a\x7e\x20\x02\x14\x2b\x6c",
+ .shared = "\x34\xd9\xbd\xdc\x1b\x42\x17\x6c\x31\x3f\xea\x03\x4c\x21\x03\x4d"
+ "\x07\x4a\x63\x13\xbb\x4e\xcd\xb3\x70\x3f\xff\x42\x45\x67\xa4\x6b"
+ "\xdf\x75\x53\x0e\xde\x0a\x9d\xa5\x22\x9d\xe7\xd7\x67\x32\x28\x6c"
+ "\xbc\x0f\x91\xda\x4c\x3c\x85\x2f\xc0\x99\xc6\x79\x53\x1d\x94\xc7"
+ "\x8a\xb0\x3d\x9d\xec\xb0\xa4\xe4\xca\x8b\x2b\xb4\x59\x1c\x40\x21"
+ "\xcf\x8c\xe3\xa2\x0a\x54\x1d\x33\x99\x40\x17\xd0\x20\x0a\xe2\xc9"
+ "\x51\x6e\x2f\xf5\x14\x57\x79\x26\x9e\x86\x2b\x0f\xb4\x74\xa2\xd5"
+ "\x6d\xc3\x1e\xd5\x69\xa7\x70\x0b\x4c\x4a\xb1\x6b\x22\xa4\x55\x13"
+ "\x53\x1e\xf5\x23\xd7\x12\x12\x07\x7b\x5a\x16\x9b\xde\xff\xad\x7a"
+ "\xd9\x60\x82\x84\xc7\x79\x5b\x6d\x5a\x51\x83\xb8\x70\x66\xde\x17"
+ "\xd8\xd6\x71\xc9\xeb\xd8\xec\x89\x54\x4d\x45\xec\x06\x15\x93\xd4"
+ "\x42\xc6\x2a\xb9\xce\x3b\x1c\xb9\x94\x3a\x1d\x23\xa5\xea\x3b\xcf"
+ "\x21\xa0\x14\x71\xe6\x7e\x00\x3e\x7f\x8a\x69\xc7\x28\xbe\x49\x0b"
+ "\x2f\xc8\x8c\xfe\xb9\x2d\xb6\xa2\x15\xe5\xd0\x3c\x17\xc4\x64\xc9"
+ "\xac\x1a\x46\xe2\x03\xe1\x3f\x95\x29\x95\xfb\x03\xc6\x9d\x3c\xc4"
+ "\x7f\xcb\x51\x0b\x69\x98\xff\xd3\xaa\x6d\xe7\x3c\xf9\xf6\x38\x69",
+};
+
+dh_test_vector_t modp2048_256 = {
+ .group = MODP_2048_256, .priv_len = 32, .pub_len = 256, .shared_len = 256,
+ .priv_a = "\x08\x81\x38\x2c\xdb\x87\x66\x0c\x6d\xc1\x3e\x61\x49\x38\xd5\xb9"
+ "\xc8\xb2\xf2\x48\x58\x1c\xc5\xe3\x1b\x35\x45\x43\x97\xfc\xe5\x0e",
+ .priv_b = "\x7d\x62\xa7\xe3\xef\x36\xde\x61\x7b\x13\xd1\xaf\xb8\x2c\x78\x0d"
+ "\x83\xa2\x3b\xd4\xee\x67\x05\x64\x51\x21\xf3\x71\xf5\x46\xa5\x3d",
+ .pub_a = "\x2e\x93\x80\xc8\x32\x3a\xf9\x75\x45\xbc\x49\x41\xde\xb0\xec\x37"
+ "\x42\xc6\x2f\xe0\xec\xe8\x24\xa6\xab\xdb\xe6\x6c\x59\xbe\xe0\x24"
+ "\x29\x11\xbf\xb9\x67\x23\x5c\xeb\xa3\x5a\xe1\x3e\x4e\xc7\x52\xbe"
+ "\x63\x0b\x92\xdc\x4b\xde\x28\x47\xa9\xc6\x2c\xb8\x15\x27\x45\x42"
+ "\x1f\xb7\xeb\x60\xa6\x3c\x0f\xe9\x15\x9f\xcc\xe7\x26\xce\x7c\xd8"
+ "\x52\x3d\x74\x50\x66\x7e\xf8\x40\xe4\x91\x91\x21\xeb\x5f\x01\xc8"
+ "\xc9\xb0\xd3\xd6\x48\xa9\x3b\xfb\x75\x68\x9e\x82\x44\xac\x13\x4a"
+ "\xf5\x44\x71\x1c\xe7\x9a\x02\xdc\xc3\x42\x26\x68\x47\x80\xdd\xdc"
+ "\xb4\x98\x59\x41\x06\xc3\x7f\x5b\xc7\x98\x56\x48\x7a\xf5\xab\x02"
+ "\x2a\x2e\x5e\x42\xf0\x98\x97\xc1\xa8\x5a\x11\xea\x02\x12\xaf\x04"
+ "\xd9\xb4\xce\xbc\x93\x7c\x3c\x1a\x3e\x15\xa8\xa0\x34\x2e\x33\x76"
+ "\x15\xc8\x4e\x7f\xe3\xb8\xb9\xb8\x7f\xb1\xe7\x3a\x15\xaf\x12\xa3"
+ "\x0d\x74\x6e\x06\xdf\xc3\x4f\x29\x0d\x79\x7c\xe5\x1a\xa1\x3a\xa7"
+ "\x85\xbf\x66\x58\xaf\xf5\xe4\xb0\x93\x00\x3c\xbe\xaf\x66\x5b\x3c"
+ "\x2e\x11\x3a\x3a\x4e\x90\x52\x69\x34\x1d\xc0\x71\x14\x26\x68\x5f"
+ "\x4e\xf3\x7e\x86\x8a\x81\x26\xff\x3f\x22\x79\xb5\x7c\xa6\x7e\x29",
+ .pub_b = "\x57\x5f\x03\x51\xbd\x2b\x1b\x81\x74\x48\xbd\xf8\x7a\x6c\x36\x2c"
+ "\x1e\x28\x9d\x39\x03\xa3\x0b\x98\x32\xc5\x74\x1f\xa2\x50\x36\x3e"
+ "\x7a\xcb\xc7\xf7\x7f\x3d\xac\xbc\x1f\x13\x1a\xdd\x8e\x03\x36\x7e"
+ "\xff\x8f\xbb\xb3\xe1\xc5\x78\x44\x24\x80\x9b\x25\xaf\xe4\xd2\x26"
+ "\x2a\x1a\x6f\xd2\xfa\xb6\x41\x05\xca\x30\xa6\x74\xe0\x7f\x78\x09"
+ "\x85\x20\x88\x63\x2f\xc0\x49\x23\x37\x91\xad\x4e\xdd\x08\x3a\x97"
+ "\x8b\x88\x3e\xe6\x18\xbc\x5e\x0d\xd0\x47\x41\x5f\x2d\x95\xe6\x83"
+ "\xcf\x14\x82\x6b\x5f\xbe\x10\xd3\xce\x41\xc6\xc1\x20\xc7\x8a\xb2"
+ "\x00\x08\xc6\x98\xbf\x7f\x0b\xca\xb9\xd7\xf4\x07\xbe\xd0\xf4\x3a"
+ "\xfb\x29\x70\xf5\x7f\x8d\x12\x04\x39\x63\xe6\x6d\xdd\x32\x0d\x59"
+ "\x9a\xd9\x93\x6c\x8f\x44\x13\x7c\x08\xb1\x80\xec\x5e\x98\x5c\xeb"
+ "\xe1\x86\xf3\xd5\x49\x67\x7e\x80\x60\x73\x31\xee\x17\xaf\x33\x80"
+ "\xa7\x25\xb0\x78\x23\x17\xd7\xdd\x43\xf5\x9d\x7a\xf9\x56\x8a\x9b"
+ "\xb6\x3a\x84\xd3\x65\xf9\x22\x44\xed\x12\x09\x88\x21\x93\x02\xf4"
+ "\x29\x24\xc7\xca\x90\xb8\x9d\x24\xf7\x1b\x0a\xb6\x97\x82\x3d\x7d"
+ "\xeb\x1a\xff\x5b\x0e\x8e\x4a\x45\xd4\x9f\x7f\x53\x75\x7e\x19\x13",
+ .shared = "\x86\xc7\x0b\xf8\xd0\xbb\x81\xbb\x01\x07\x8a\x17\x21\x9c\xb7\xd2"
+ "\x72\x03\xdb\x2a\x19\xc8\x77\xf1\xd1\xf1\x9f\xd7\xd7\x7e\xf2\x25"
+ "\x46\xa6\x8f\x00\x5a\xd5\x2d\xc8\x45\x53\xb7\x8f\xc6\x03\x30\xbe"
+ "\x51\xea\x7c\x06\x72\xca\xc1\x51\x5e\x4b\x35\xc0\x47\xb9\xa5\x51"
+ "\xb8\x8f\x39\xdc\x26\xda\x14\xa0\x9e\xf7\x47\x74\xd4\x7c\x76\x2d"
+ "\xd1\x77\xf9\xed\x5b\xc2\xf1\x1e\x52\xc8\x79\xbd\x95\x09\x85\x04"
+ "\xcd\x9e\xec\xd8\xa8\xf9\xb3\xef\xbd\x1f\x00\x8a\xc5\x85\x30\x97"
+ "\xd9\xd1\x83\x7f\x2b\x18\xf7\x7c\xd7\xbe\x01\xaf\x80\xa7\xc7\xb5"
+ "\xea\x3c\xa5\x4c\xc0\x2d\x0c\x11\x6f\xee\x3f\x95\xbb\x87\x39\x93"
+ "\x85\x87\x5d\x7e\x86\x74\x7e\x67\x6e\x72\x89\x38\xac\xbf\xf7\x09"
+ "\x8e\x05\xbe\x4d\xcf\xb2\x40\x52\xb8\x3a\xef\xfb\x14\x78\x3f\x02"
+ "\x9a\xdb\xde\x7f\x53\xfa\xe9\x20\x84\x22\x40\x90\xe0\x07\xce\xe9"
+ "\x4d\x4b\xf2\xba\xce\x9f\xfd\x4b\x57\xd2\xaf\x7c\x72\x4d\x0c\xaa"
+ "\x19\xbf\x05\x01\xf6\xf1\x7b\x4a\xa1\x0f\x42\x5e\x3e\xa7\x60\x80"
+ "\xb4\xb9\xd6\xb3\xce\xfe\xa1\x15\xb2\xce\xb8\x78\x9b\xb8\xa3\xb0"
+ "\xea\x87\xfe\xbe\x63\xb6\xc8\xf8\x46\xec\x6d\xb0\xc2\x6c\x5d\x7c",
+};
diff --git a/src/libstrongswan/plugins/test_vectors/test_vectors_plugin.c b/src/libstrongswan/plugins/test_vectors/test_vectors_plugin.c
index cd0a12a5c..0505e2c40 100644
--- a/src/libstrongswan/plugins/test_vectors/test_vectors_plugin.c
+++ b/src/libstrongswan/plugins/test_vectors/test_vectors_plugin.c
@@ -19,12 +19,13 @@
#include <crypto/crypto_tester.h>
/* define symbols of all test vectors */
-#define TEST_VECTOR_CRYPTER(x) crypter_test_vector_t x;
-#define TEST_VECTOR_AEAD(x) aead_test_vector_t x;
-#define TEST_VECTOR_SIGNER(x) signer_test_vector_t x;
-#define TEST_VECTOR_HASHER(x) hasher_test_vector_t x;
-#define TEST_VECTOR_PRF(x) prf_test_vector_t x;
-#define TEST_VECTOR_RNG(x) rng_test_vector_t x;
+#define TEST_VECTOR_CRYPTER(x) extern crypter_test_vector_t x;
+#define TEST_VECTOR_AEAD(x) extern aead_test_vector_t x;
+#define TEST_VECTOR_SIGNER(x) extern signer_test_vector_t x;
+#define TEST_VECTOR_HASHER(x) extern hasher_test_vector_t x;
+#define TEST_VECTOR_PRF(x) extern prf_test_vector_t x;
+#define TEST_VECTOR_RNG(x) extern rng_test_vector_t x;
+#define TEST_VECTOR_DH(x) extern dh_test_vector_t x;
#include "test_vectors.h"
@@ -34,6 +35,7 @@
#undef TEST_VECTOR_HASHER
#undef TEST_VECTOR_PRF
#undef TEST_VECTOR_RNG
+#undef TEST_VECTOR_DH
#define TEST_VECTOR_CRYPTER(x)
#define TEST_VECTOR_AEAD(x)
@@ -41,6 +43,7 @@
#define TEST_VECTOR_HASHER(x)
#define TEST_VECTOR_PRF(x)
#define TEST_VECTOR_RNG(x)
+#define TEST_VECTOR_DH(x)
/* create test vector arrays */
#undef TEST_VECTOR_CRYPTER
@@ -91,6 +94,14 @@ static rng_test_vector_t *rng[] = {
#undef TEST_VECTOR_RNG
#define TEST_VECTOR_RNG(x)
+#undef TEST_VECTOR_DH
+#define TEST_VECTOR_DH(x) &x,
+static dh_test_vector_t *dh[] = {
+#include "test_vectors.h"
+};
+#undef TEST_VECTOR_DH
+#define TEST_VECTOR_DH(x)
+
typedef struct private_test_vectors_plugin_t private_test_vectors_plugin_t;
/**
@@ -175,7 +186,11 @@ plugin_t *test_vectors_plugin_create()
lib->crypto->add_test_vector(lib->crypto,
RANDOM_NUMBER_GENERATOR, rng[i]);
}
+ for (i = 0; i < countof(dh); i++)
+ {
+ lib->crypto->add_test_vector(lib->crypto,
+ DIFFIE_HELLMAN_GROUP, dh[i]);
+ }
return &this->public.plugin;
}
-