From 7ca1e3783deb50175b9c37f23fc0797818ef32b3 Mon Sep 17 00:00:00 2001 From: Brian Smith Date: Sun, 16 Feb 2025 11:34:47 -0800 Subject: [PATCH] aes-gcm: Enable AVX-512 implementation. --- Cargo.toml | 1 + build.rs | 5 + .../aes/asm/aes-gcm-avx512-x86_64.pl | 12 ++- src/aead/aes_gcm.rs | 23 +++++ src/aead/aes_gcm/vaesclmulavx512.rs | 91 +++++++++++++++++++ src/aead/gcm.rs | 10 ++ src/aead/gcm/vclmulavx2.rs | 1 + src/aead/gcm/vclmulavx512.rs | 49 ++++++++++ src/cpu.rs | 14 +++ src/cpu/intel.rs | 39 ++++++++ 10 files changed, 242 insertions(+), 3 deletions(-) create mode 100644 src/aead/aes_gcm/vaesclmulavx512.rs create mode 100644 src/aead/gcm/vclmulavx512.rs diff --git a/Cargo.toml b/Cargo.toml index 8f8da17dad..af9cf85d5a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -51,6 +51,7 @@ include = [ "crypto/curve25519/internal.h", "crypto/fipsmodule/aes/aes_nohw.c", "crypto/fipsmodule/aes/asm/aes-gcm-avx2-x86_64.pl", + "crypto/fipsmodule/aes/asm/aes-gcm-avx512-x86_64.pl", "crypto/fipsmodule/aes/asm/aesni-x86.pl", "crypto/fipsmodule/aes/asm/aesni-gcm-x86_64.pl", "crypto/fipsmodule/aes/asm/aesni-x86_64.pl", diff --git a/build.rs b/build.rs index 8062d6bd21..bb3cbd7d98 100644 --- a/build.rs +++ b/build.rs @@ -77,6 +77,7 @@ const RING_SRCS: &[(&[&str], &str)] = &[ (&[X86_64], "crypto/chacha/asm/chacha-x86_64.pl"), (&[X86_64], "crypto/curve25519/curve25519_64_adx.c"), (&[X86_64], "crypto/fipsmodule/aes/asm/aes-gcm-avx2-x86_64.pl"), + (&[X86_64], "crypto/fipsmodule/aes/asm/aes-gcm-avx512-x86_64.pl"), (&[X86_64], "crypto/fipsmodule/aes/asm/aesni-gcm-x86_64.pl"), (&[X86_64], "crypto/fipsmodule/aes/asm/aesni-x86_64.pl"), (&[X86_64], "crypto/fipsmodule/aes/asm/ghash-x86_64.pl"), @@ -889,8 +890,10 @@ fn prefix_all_symbols(pp: char, prefix_prefix: &str, prefix: &str) -> String { "OPENSSL_cpuid_setup", "aes_gcm_dec_kernel", "aes_gcm_dec_update_vaes_avx2", + "aes_gcm_dec_update_vaes_avx512", "aes_gcm_enc_kernel", "aes_gcm_enc_update_vaes_avx2", + "aes_gcm_enc_update_vaes_avx512", "aes_hw_ctr32_encrypt_blocks", "aes_hw_set_encrypt_key", "aes_hw_set_encrypt_key_alt", @@ -950,12 +953,14 @@ fn prefix_all_symbols(pp: char, prefix_prefix: &str, prefix: &str) -> String { "gcm_ghash_clmul", "gcm_ghash_neon", "gcm_ghash_vpclmulqdq_avx2_16", + "gcm_ghash_vpclmulqdq_avx512_16", "gcm_gmult_clmul", "gcm_gmult_neon", "gcm_init_avx", "gcm_init_clmul", "gcm_init_neon", "gcm_init_vpclmulqdq_avx2", + "gcm_init_vpclmulqdq_avx512", "k25519Precomp", "limbs_mul_add_limb", "little_endian_bytes_from_scalar", diff --git a/crypto/fipsmodule/aes/asm/aes-gcm-avx512-x86_64.pl b/crypto/fipsmodule/aes/asm/aes-gcm-avx512-x86_64.pl index 4b98b770ca..5629216e31 100644 --- a/crypto/fipsmodule/aes/asm/aes-gcm-avx512-x86_64.pl +++ b/crypto/fipsmodule/aes/asm/aes-gcm-avx512-x86_64.pl @@ -588,10 +588,11 @@ sub _ghash_4x { return $code; } -# void gcm_gmult_vpclmulqdq_avx512(uint8_t Xi[16], const u128 Htable[16]); -$code .= _begin_func "gcm_gmult_vpclmulqdq_avx512", 1; +# void gcm_ghash_vpclmulqdq_avx512_16(uint8_t Xi[16], const u128 Htable[16], +# const uint8_t aad[16], size_t aad_len_16);); +$code .= _begin_func "gcm_ghash_vpclmulqdq_avx512_16", 1; { - my ( $GHASH_ACC_PTR, $HTABLE ) = @argregs[ 0 .. 1 ]; + my ( $GHASH_ACC_PTR, $HTABLE, $AAD, $AAD_LEN_16 ) = @argregs[ 0 .. 3 ]; my ( $GHASH_ACC, $BSWAP_MASK, $H_POW1, $GFPOLY, $T0, $T1, $T2 ) = map( "%xmm$_", ( 0 .. 6 ) ); @@ -599,7 +600,12 @@ sub _ghash_4x { @{[ _save_xmmregs (6) ]} .seh_endprologue + # Load the GHASH accumulator. vmovdqu ($GHASH_ACC_PTR), $GHASH_ACC + + # XOR the AAD into the accumulator. + vpxor ($AAD), $GHASH_ACC, $GHASH_ACC + vmovdqu .Lbswap_mask(%rip), $BSWAP_MASK vmovdqu $OFFSETOFEND_H_POWERS-16($HTABLE), $H_POW1 vmovdqu .Lgfpoly(%rip), $GFPOLY diff --git a/src/aead/aes_gcm.rs b/src/aead/aes_gcm.rs index a1791e4b18..d0210938c3 100644 --- a/src/aead/aes_gcm.rs +++ b/src/aead/aes_gcm.rs @@ -36,6 +36,7 @@ use cpu::GetFeature as _; mod aarch64; mod aeshwclmulmovbe; mod vaesclmulavx2; +mod vaesclmulavx512; #[derive(Clone)] pub(super) struct Key(DynKey); @@ -51,6 +52,9 @@ impl Key { #[derive(Clone)] enum DynKey { + #[cfg(target_arch = "x86_64")] + VAesClMulAvx512(Combo), + #[cfg(target_arch = "x86_64")] VAesClMulAvx2(Combo), @@ -85,6 +89,9 @@ impl DynKey { let aes_key = aes::hw::Key::new(key, aes, cpu.get_feature())?; let gcm_key_value = derive_gcm_key_value(&aes_key); let combo = if let Some(cpu) = cpu.get_feature() { + let gcm_key = gcm::vclmulavx512::Key::new(gcm_key_value, cpu); + Self::VAesClMulAvx512(Combo { aes_key, gcm_key }) + } else if let Some(cpu) = cpu.get_feature() { let gcm_key = gcm::vclmulavx2::Key::new(gcm_key_value, cpu); Self::VAesClMulAvx2(Combo { aes_key, gcm_key }) } else if let Some(cpu) = cpu.get_feature() { @@ -189,6 +196,11 @@ pub(super) fn seal( seal_whole_partial(c, aad, in_out, ctr, tag_iv, aarch64::seal_whole) } + #[cfg(target_arch = "x86_64")] + DynKey::VAesClMulAvx512(c) => { + seal_whole_partial(c, aad, in_out, ctr, tag_iv, vaesclmulavx512::seal_whole) + } + #[cfg(target_arch = "x86_64")] DynKey::VAesClMulAvx2(c) => seal_whole_partial( c, @@ -316,6 +328,17 @@ pub(super) fn open( open_whole_partial(c, aad, in_out_slice, src, ctr, tag_iv, aarch64::open_whole) } + #[cfg(target_arch = "x86_64")] + DynKey::VAesClMulAvx512(c) => open_whole_partial( + c, + aad, + in_out_slice, + src, + ctr, + tag_iv, + vaesclmulavx512::open_whole, + ), + #[cfg(target_arch = "x86_64")] DynKey::VAesClMulAvx2(c) => open_whole_partial( c, diff --git a/src/aead/aes_gcm/vaesclmulavx512.rs b/src/aead/aes_gcm/vaesclmulavx512.rs new file mode 100644 index 0000000000..6ebb27166b --- /dev/null +++ b/src/aead/aes_gcm/vaesclmulavx512.rs @@ -0,0 +1,91 @@ +// Copyright 2015-2025 Brian Smith. +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +// SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION +// OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +// CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +#![cfg(target_arch = "x86_64")] + +use super::{aes, gcm, Counter, BLOCK_LEN}; +use crate::{aead::aes::Overlapping, c, polyfill::slice::AsChunksMut}; +use core::num::{NonZeroU32, NonZeroUsize}; + +pub(super) fn seal_whole( + aes_key: &aes::hw::Key, + auth: &mut gcm::Context, + ctr: &mut Counter, + mut in_out: AsChunksMut, +) { + prefixed_extern! { + fn aes_gcm_enc_update_vaes_avx512( + input: *const u8, + output: *mut u8, + len: c::NonZero_size_t, // TODO? zero OK? + key: &aes::AES_KEY, + ivec: &Counter, + Htable: &gcm::HTable, + Xi: &mut gcm::Xi); + } + + let in_out = in_out.as_flattened_mut(); + + // Precondition: Since we have a `gcm::Context` then the number of blocks + // must fit in `u32`. + let blocks = u32::try_from(in_out.len() / BLOCK_LEN).unwrap(); + + if let Some(len) = NonZeroUsize::new(in_out.len()) { + let aes_key = aes_key.inner_less_safe(); + let (htable, xi) = auth.inner(); + let input = in_out.as_ptr(); + let output = in_out.as_mut_ptr(); + unsafe { aes_gcm_enc_update_vaes_avx512(input, output, len, aes_key, ctr, htable, xi) }; + let blocks = NonZeroU32::new(blocks).unwrap_or_else(|| { + unreachable!() // Due to previous checks. + }); + ctr.increment_by_less_safe(blocks); + } +} + +pub(super) fn open_whole( + aes_key: &aes::hw::Key, + auth: &mut gcm::Context, + in_out: Overlapping, + ctr: &mut Counter, +) { + prefixed_extern! { + fn aes_gcm_dec_update_vaes_avx512( + input: *const u8, + output: *mut u8, + len: c::NonZero_size_t, // TODO? zero OK? + key: &aes::AES_KEY, + ivec: &mut Counter, + Htable: &gcm::HTable, + Xi: &mut gcm::Xi); + } + + // Precondition. TODO: Create an overlapping::AsChunks for this. + assert_eq!(in_out.len() % BLOCK_LEN, 0); + // Precondition: Since we have a `gcm::Context` then the number of blocks + // must fit in `u32`. + let blocks = u32::try_from(in_out.len() / BLOCK_LEN).unwrap(); + + in_out.with_input_output_len(|input, output, len| { + if let Some(len) = NonZeroUsize::new(len) { + let aes_key = aes_key.inner_less_safe(); + let (htable, xi) = auth.inner(); + unsafe { aes_gcm_dec_update_vaes_avx512(input, output, len, aes_key, ctr, htable, xi) }; + let blocks = NonZeroU32::new(blocks).unwrap_or_else(|| { + unreachable!() // Due to previous checks. + }); + ctr.increment_by_less_safe(blocks); + } + }) +} diff --git a/src/aead/gcm.rs b/src/aead/gcm.rs index 443c19e16b..b95ff8c70e 100644 --- a/src/aead/gcm.rs +++ b/src/aead/gcm.rs @@ -39,6 +39,7 @@ pub(super) mod clmulavxmovbe; pub(super) mod fallback; pub(super) mod neon; pub(super) mod vclmulavx2; +pub(super) mod vclmulavx512; pub(super) struct Context<'key, K> { Xi: Xi, @@ -128,6 +129,15 @@ impl Context<'_, vclmulavx2::Key> { } } +#[cfg(target_arch = "x86_64")] +impl Context<'_, vclmulavx512::Key> { + /// Access to `inner` for the integrated AES-GCM implementations only. + #[inline] + pub(super) fn inner(&mut self) -> (&HTable, &mut Xi) { + (self.key.inner(), &mut self.Xi) + } +} + impl Context<'_, K> { #[inline(always)] pub fn update_blocks(&mut self, input: AsChunks) { diff --git a/src/aead/gcm/vclmulavx2.rs b/src/aead/gcm/vclmulavx2.rs index 2646b52f8f..8dad064a7f 100644 --- a/src/aead/gcm/vclmulavx2.rs +++ b/src/aead/gcm/vclmulavx2.rs @@ -27,6 +27,7 @@ pub struct Key { } impl Key { + #[inline(never)] pub(in super::super) fn new(value: KeyValue, _cpu: (Avx2, VAesClmul)) -> Self { Self { h_table: unsafe { htable_new!(gcm_init_vpclmulqdq_avx2, value) }, diff --git a/src/aead/gcm/vclmulavx512.rs b/src/aead/gcm/vclmulavx512.rs new file mode 100644 index 0000000000..e804d719d5 --- /dev/null +++ b/src/aead/gcm/vclmulavx512.rs @@ -0,0 +1,49 @@ +// Copyright 2018-2025 Brian Smith. +// +// Permission to use, copy, modify, and/or distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +// SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION +// OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +// CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +#![cfg(target_arch = "x86_64")] + +use super::{ffi::KeyValue, HTable, UpdateBlock, Xi}; +use crate::{ + aead::gcm::ffi::BLOCK_LEN, + cpu::intel::{Avx2, Avx512_BW_VL_ZMM, Bmi2, VAesClmul}, + polyfill::slice::AsChunks, +}; + +#[derive(Clone)] +pub struct Key { + h_table: HTable, +} + +impl Key { + pub(in super::super) fn new( + value: KeyValue, + _cpu: (Avx2, Avx512_BW_VL_ZMM, Bmi2, VAesClmul), + ) -> Self { + Self { + h_table: unsafe { htable_new!(gcm_init_vpclmulqdq_avx512, value) }, + } + } + + pub(super) fn inner(&self) -> &HTable { + &self.h_table + } +} + +impl UpdateBlock for Key { + fn update_block(&self, xi: &mut Xi, a: [u8; BLOCK_LEN]) { + let input: AsChunks = (&a).into(); + unsafe { ghash!(gcm_ghash_vpclmulqdq_avx512_16, xi, &self.h_table, input) } + } +} diff --git a/src/cpu.rs b/src/cpu.rs index 293e1b5355..cc73e32fb6 100644 --- a/src/cpu.rs +++ b/src/cpu.rs @@ -113,6 +113,20 @@ where } } +impl GetFeature<(A, B, C, D)> for features::Values +where + features::Values: GetFeature<(A, B)>, + features::Values: GetFeature<(C, D)>, +{ + #[inline(always)] + fn get_feature(&self) -> Option<(A, B, C, D)> { + match (self.get_feature(), self.get_feature()) { + (Some((a, b)), Some((c, d))) => Some((a, b, c, d)), + _ => None, + } + } +} + impl GetFeature for Features where features::Values: GetFeature, diff --git a/src/cpu/intel.rs b/src/cpu/intel.rs index f34f6e385b..5acb9c71cd 100644 --- a/src/cpu/intel.rs +++ b/src/cpu/intel.rs @@ -139,6 +139,11 @@ fn cpuid_to_caps_and_set_c_flags(cpuid: &[u32; 4]) -> u32 { #[cfg(target_arch = "x86_64")] let (extended_features_ebx, extended_features_ecx) = (cpuid[2], cpuid[3]); + // `OPENSSL_cpuid_setup` synthesizes this bit when it detects an Intel + // CPU family that is known to downclock when ZMM registers are used. + #[cfg(target_arch = "x86_64")] + let avoid_zmm = check(cpuid[2], 14); + let mut caps = 0; // AMD: "Collectively the SSE1, [...] are referred to as the legacy SSE @@ -236,6 +241,35 @@ fn cpuid_to_caps_and_set_c_flags(cpuid: &[u32; 4]) -> u32 { // calling into the C code. let flag = unsafe { &avx2_available }; flag.store(1, core::sync::atomic::Ordering::Relaxed); + + // AVX-512. + // Initial releases of macOS 12 had a serious bug w.r.t. AVX-512 + // support; see https://go-review.googlesource.com/c/sys/+/620256. + // Given that, plus Apple's transition to ARM, AVX-512 isn't worth + // supporting for their targets. + #[cfg(not(target_vendor = "apple"))] + { + // Intel: "15.3 DETECTION OF 512-BIT INSTRUCTION GROUPS OF THE INTEL + // AVX-512 FAMILY". + // `OPENSSL_cpuid_setup` clears these bits when XCR0[7:5] isn't 0b111. + // doesn't AVX-512 state. + let f = check(extended_features_ebx, 16); + let bw = check(extended_features_ebx, 30); + + // Intel: "15.4 DETECTION OF INTEL AVX-512 INSTRUCTION GROUPS + // OPERATING AT 256 AND 128-BIT VECTOR LENGTHS" + let vl = check(extended_features_ebx, 31); + + // Intel: "15.4 DETECTION OF INTEL AVX-512 INSTRUCTION GROUPS + // OPERATING AT 256 AND 128-BIT VECTOR LENGTHS." + if !avoid_zmm && f { + // Intel: "Table 15-2. Feature Flag Collection Required of + // 256/128 Bit Vector Lengths for Each Instruction Group." + if bw && vl { + set(&mut caps, Shift::Avx512_BW_VL_ZMM) + } + } + } } // Intel: "12.13.4 Checking for Intel AES-NI Support" @@ -348,6 +382,11 @@ impl_get_feature! { { ("x86", "x86_64") => Aes }, { ("x86", "x86_64") => Avx }, { ("x86_64") => Bmi1 }, + + // AVX512BW + AVX512VL + AND using ZMM registers isn't expected to caus + // downclocking. + { ("x86_64") => Avx512_BW_VL_ZMM }, + { ("x86_64") => Avx2 }, { ("x86_64") => Bmi2 }, { ("x86_64") => Adx },