diff --git a/backport-CVE-2021-3580.patch b/backport-CVE-2021-3580.patch new file mode 100644 index 0000000..0cf8ae3 --- /dev/null +++ b/backport-CVE-2021-3580.patch @@ -0,0 +1,382 @@ +Description: Fix for CVE-2021-3580 - potential crash on invalid input to the RSA decryption functions. +Origin: upstream, https://git.lysator.liu.se/nettle/nettle/-/compare/7a5f86321f4c67d7219aa87ea4e2ddca677d7378...0ad0b5df315665250dfdaa4a1e087f4799edaefe +Bug-RedHat: https://bugzilla.redhat.com/show_bug.cgi?id=CVE-2021-3580 + +--- a/pkcs1-sec-decrypt.c ++++ b/pkcs1-sec-decrypt.c +@@ -64,7 +64,9 @@ _pkcs1_sec_decrypt (size_t length, uint8 + volatile int ok; + size_t i, t; + +- assert (padded_message_length >= length); ++ /* Message independent branch */ ++ if (length + 11 > padded_message_length) ++ return 0; + + t = padded_message_length - length - 1; + +--- a/rsa-decrypt-tr.c ++++ b/rsa-decrypt-tr.c +@@ -52,14 +52,17 @@ rsa_decrypt_tr(const struct rsa_public_k + mp_size_t key_limb_size; + int res; + +- key_limb_size = NETTLE_OCTET_SIZE_TO_LIMB_SIZE(key->size); ++ /* First check that input is in range. */ ++ if (mpz_sgn (gibberish) < 0 || mpz_cmp (gibberish, pub->n) >= 0) ++ return 0; ++ ++ key_limb_size = mpz_size(pub->n); + + TMP_GMP_ALLOC (m, key_limb_size); + TMP_GMP_ALLOC (em, key->size); ++ mpz_limbs_copy(m, gibberish, key_limb_size); + +- res = _rsa_sec_compute_root_tr (pub, key, random_ctx, random, m, +- mpz_limbs_read(gibberish), +- mpz_size(gibberish)); ++ res = _rsa_sec_compute_root_tr (pub, key, random_ctx, random, m, m); + + mpn_get_base256 (em, key->size, m, key_limb_size); + +--- a/rsa-decrypt.c ++++ b/rsa-decrypt.c +@@ -48,6 +48,16 @@ rsa_decrypt(const struct rsa_private_key + int res; + + mpz_init(m); ++ ++ /* First check that input is in range. Since we don't have the ++ public key available here, we need to reconstruct n. */ ++ mpz_mul (m, key->p, key->q); ++ if (mpz_sgn (gibberish) < 0 || mpz_cmp (gibberish, m) >= 0) ++ { ++ mpz_clear (m); ++ return 0; ++ } ++ + rsa_compute_root(key, m, gibberish); + + res = pkcs1_decrypt (key->size, m, length, message); +--- a/rsa-internal.h ++++ b/rsa-internal.h +@@ -53,11 +53,11 @@ _rsa_sec_compute_root(const struct rsa_p + mp_limb_t *scratch); + + /* Safe side-channel silent variant, using RSA blinding, and checking the +- * result after CRT. */ ++ * result after CRT. In-place calls, with x == m, is allowed. */ + int + _rsa_sec_compute_root_tr(const struct rsa_public_key *pub, + const struct rsa_private_key *key, + void *random_ctx, nettle_random_func *random, +- mp_limb_t *x, const mp_limb_t *m, size_t mn); ++ mp_limb_t *x, const mp_limb_t *m); + + #endif /* NETTLE_RSA_INTERNAL_H_INCLUDED */ +--- a/rsa-sec-decrypt.c ++++ b/rsa-sec-decrypt.c +@@ -54,12 +54,19 @@ rsa_sec_decrypt(const struct rsa_public_ + TMP_GMP_DECL (em, uint8_t); + int res; + ++ /* First check that input is in range. */ ++ if (mpz_sgn (gibberish) < 0 || mpz_cmp (gibberish, pub->n) >= 0) ++ return 0; ++ + TMP_GMP_ALLOC (m, mpz_size(pub->n)); + TMP_GMP_ALLOC (em, key->size); + +- res = _rsa_sec_compute_root_tr (pub, key, random_ctx, random, m, +- mpz_limbs_read(gibberish), +- mpz_size(gibberish)); ++ /* We need a copy because m can be shorter than key_size, ++ * but _rsa_sec_compute_root_tr expect all inputs to be ++ * normalized to a key_size long buffer length */ ++ mpz_limbs_copy(m, gibberish, mpz_size(pub->n)); ++ ++ res = _rsa_sec_compute_root_tr (pub, key, random_ctx, random, m, m); + + mpn_get_base256 (em, key->size, m, mpz_size(pub->n)); + +--- a/rsa-sign-tr.c ++++ b/rsa-sign-tr.c +@@ -131,35 +131,34 @@ int + _rsa_sec_compute_root_tr(const struct rsa_public_key *pub, + const struct rsa_private_key *key, + void *random_ctx, nettle_random_func *random, +- mp_limb_t *x, const mp_limb_t *m, size_t mn) ++ mp_limb_t *x, const mp_limb_t *m) + { ++ mp_size_t nn; + mpz_t mz; + mpz_t xz; + int res; + +- mpz_init(mz); + mpz_init(xz); + +- mpn_copyi(mpz_limbs_write(mz, mn), m, mn); +- mpz_limbs_finish(mz, mn); ++ nn = mpz_size (pub->n); + +- res = rsa_compute_root_tr(pub, key, random_ctx, random, xz, mz); ++ res = rsa_compute_root_tr(pub, key, random_ctx, random, xz, ++ mpz_roinit_n(mz, m, nn)); + + if (res) +- mpz_limbs_copy(x, xz, mpz_size(pub->n)); ++ mpz_limbs_copy(x, xz, nn); + +- mpz_clear(mz); + mpz_clear(xz); + return res; + } + #else + /* Blinds m, by computing c = m r^e (mod n), for a random r. Also +- returns the inverse (ri), for use by rsa_unblind. */ ++ returns the inverse (ri), for use by rsa_unblind. Must have c != m, ++ no in-place operation.*/ + static void + rsa_sec_blind (const struct rsa_public_key *pub, + void *random_ctx, nettle_random_func *random, +- mp_limb_t *c, mp_limb_t *ri, const mp_limb_t *m, +- mp_size_t mn) ++ mp_limb_t *c, mp_limb_t *ri, const mp_limb_t *m) + { + const mp_limb_t *ep = mpz_limbs_read (pub->e); + const mp_limb_t *np = mpz_limbs_read (pub->n); +@@ -177,15 +176,15 @@ rsa_sec_blind (const struct rsa_public_k + + /* c = m*(r^e) mod n */ + itch = mpn_sec_powm_itch(nn, ebn, nn); +- i2 = mpn_sec_mul_itch(nn, mn); ++ i2 = mpn_sec_mul_itch(nn, nn); + itch = MAX(itch, i2); +- i2 = mpn_sec_div_r_itch(nn + mn, nn); ++ i2 = mpn_sec_div_r_itch(2*nn, nn); + itch = MAX(itch, i2); + i2 = mpn_sec_invert_itch(nn); + itch = MAX(itch, i2); + +- TMP_GMP_ALLOC (tp, nn + mn + itch); +- scratch = tp + nn + mn; ++ TMP_GMP_ALLOC (tp, 2*nn + itch); ++ scratch = tp + 2*nn; + + /* ri = r^(-1) */ + do +@@ -198,9 +197,8 @@ rsa_sec_blind (const struct rsa_public_k + while (!mpn_sec_invert (ri, tp, np, nn, 2 * nn * GMP_NUMB_BITS, scratch)); + + mpn_sec_powm (c, rp, nn, ep, ebn, np, nn, scratch); +- /* normally mn == nn, but m can be smaller in some cases */ +- mpn_sec_mul (tp, c, nn, m, mn, scratch); +- mpn_sec_div_r (tp, nn + mn, np, nn, scratch); ++ mpn_sec_mul (tp, c, nn, m, nn, scratch); ++ mpn_sec_div_r (tp, 2*nn, np, nn, scratch); + mpn_copyi(c, tp, nn); + + TMP_GMP_FREE (r); +@@ -208,7 +206,7 @@ rsa_sec_blind (const struct rsa_public_k + TMP_GMP_FREE (tp); + } + +-/* m = c ri mod n */ ++/* m = c ri mod n. Allows x == c. */ + static void + rsa_sec_unblind (const struct rsa_public_key *pub, + mp_limb_t *x, mp_limb_t *ri, const mp_limb_t *c) +@@ -298,7 +296,7 @@ int + _rsa_sec_compute_root_tr(const struct rsa_public_key *pub, + const struct rsa_private_key *key, + void *random_ctx, nettle_random_func *random, +- mp_limb_t *x, const mp_limb_t *m, size_t mn) ++ mp_limb_t *x, const mp_limb_t *m) + { + TMP_GMP_DECL (c, mp_limb_t); + TMP_GMP_DECL (ri, mp_limb_t); +@@ -306,7 +304,7 @@ _rsa_sec_compute_root_tr(const struct rs + size_t key_limb_size; + int ret; + +- key_limb_size = NETTLE_OCTET_SIZE_TO_LIMB_SIZE(key->size); ++ key_limb_size = mpz_size(pub->n); + + /* mpz_powm_sec handles only odd moduli. If p, q or n is even, the + key is invalid and rejected by rsa_private_key_prepare. However, +@@ -320,19 +318,18 @@ _rsa_sec_compute_root_tr(const struct rs + } + + assert(mpz_size(pub->n) == key_limb_size); +- assert(mn <= key_limb_size); + + TMP_GMP_ALLOC (c, key_limb_size); + TMP_GMP_ALLOC (ri, key_limb_size); + TMP_GMP_ALLOC (scratch, _rsa_sec_compute_root_itch(key)); + +- rsa_sec_blind (pub, random_ctx, random, x, ri, m, mn); ++ rsa_sec_blind (pub, random_ctx, random, c, ri, m); + +- _rsa_sec_compute_root(key, c, x, scratch); ++ _rsa_sec_compute_root(key, x, c, scratch); + +- ret = rsa_sec_check_root(pub, c, x); ++ ret = rsa_sec_check_root(pub, x, c); + +- rsa_sec_unblind(pub, x, ri, c); ++ rsa_sec_unblind(pub, x, ri, x); + + cnd_mpn_zero(1 - ret, x, key_limb_size); + +@@ -356,17 +353,17 @@ rsa_compute_root_tr(const struct rsa_pub + mpz_t x, const mpz_t m) + { + TMP_GMP_DECL (l, mp_limb_t); ++ mp_size_t nn = mpz_size(pub->n); + int res; + +- mp_size_t l_size = NETTLE_OCTET_SIZE_TO_LIMB_SIZE(key->size); +- TMP_GMP_ALLOC (l, l_size); ++ TMP_GMP_ALLOC (l, nn); ++ mpz_limbs_copy(l, m, nn); + +- res = _rsa_sec_compute_root_tr (pub, key, random_ctx, random, l, +- mpz_limbs_read(m), mpz_size(m)); ++ res = _rsa_sec_compute_root_tr (pub, key, random_ctx, random, l, l); + if (res) { +- mp_limb_t *xp = mpz_limbs_write (x, l_size); +- mpn_copyi (xp, l, l_size); +- mpz_limbs_finish (x, l_size); ++ mp_limb_t *xp = mpz_limbs_write (x, nn); ++ mpn_copyi (xp, l, nn); ++ mpz_limbs_finish (x, nn); + } + + TMP_GMP_FREE (l); +--- a/rsa.h ++++ b/rsa.h +@@ -433,13 +433,14 @@ rsa_sec_decrypt(const struct rsa_public_ + size_t length, uint8_t *message, + const mpz_t gibberish); + +-/* Compute x, the e:th root of m. Calling it with x == m is allowed. */ ++/* Compute x, the e:th root of m. Calling it with x == m is allowed. ++ It is required that 0 <= m < n. */ + void + rsa_compute_root(const struct rsa_private_key *key, + mpz_t x, const mpz_t m); + + /* Safer variant, using RSA blinding, and checking the result after +- CRT. */ ++ CRT. It is required that 0 <= m < n. */ + int + rsa_compute_root_tr(const struct rsa_public_key *pub, + const struct rsa_private_key *key, +--- a/testsuite/rsa-encrypt-test.c ++++ b/testsuite/rsa-encrypt-test.c +@@ -19,10 +19,12 @@ test_main(void) + uint8_t after; + + mpz_t gibberish; ++ mpz_t bad_input; + + rsa_private_key_init(&key); + rsa_public_key_init(&pub); + mpz_init(gibberish); ++ mpz_init(bad_input); + + knuth_lfib_init(&lfib, 17); + +@@ -101,6 +103,42 @@ test_main(void) + ASSERT(decrypted[decrypted_length] == after); + ASSERT(decrypted[0] == 'A'); + ++ /* Test zero input. */ ++ mpz_set_ui (bad_input, 0); ++ decrypted_length = msg_length; ++ ASSERT(!rsa_decrypt(&key, &decrypted_length, decrypted, bad_input)); ++ ASSERT(!rsa_decrypt_tr(&pub, &key, ++ &lfib, (nettle_random_func *) knuth_lfib_random, ++ &decrypted_length, decrypted, bad_input)); ++ ASSERT(!rsa_sec_decrypt(&pub, &key, ++ &lfib, (nettle_random_func *) knuth_lfib_random, ++ decrypted_length, decrypted, bad_input)); ++ ASSERT(decrypted_length == msg_length); ++ ++ /* Test input that is slightly larger than n */ ++ mpz_add(bad_input, gibberish, pub.n); ++ decrypted_length = msg_length; ++ ASSERT(!rsa_decrypt(&key, &decrypted_length, decrypted, bad_input)); ++ ASSERT(!rsa_decrypt_tr(&pub, &key, ++ &lfib, (nettle_random_func *) knuth_lfib_random, ++ &decrypted_length, decrypted, bad_input)); ++ ASSERT(!rsa_sec_decrypt(&pub, &key, ++ &lfib, (nettle_random_func *) knuth_lfib_random, ++ decrypted_length, decrypted, bad_input)); ++ ASSERT(decrypted_length == msg_length); ++ ++ /* Test input that is considerably larger than n */ ++ mpz_mul_2exp (bad_input, pub.n, 100); ++ mpz_add (bad_input, bad_input, gibberish); ++ decrypted_length = msg_length; ++ ASSERT(!rsa_decrypt(&key, &decrypted_length, decrypted, bad_input)); ++ ASSERT(!rsa_decrypt_tr(&pub, &key, ++ &lfib, (nettle_random_func *) knuth_lfib_random, ++ &decrypted_length, decrypted, bad_input)); ++ ASSERT(!rsa_sec_decrypt(&pub, &key, ++ &lfib, (nettle_random_func *) knuth_lfib_random, ++ decrypted_length, decrypted, bad_input)); ++ ASSERT(decrypted_length == msg_length); + + /* Test invalid key. */ + mpz_add_ui (key.q, key.q, 2); +@@ -112,6 +150,6 @@ test_main(void) + rsa_private_key_clear(&key); + rsa_public_key_clear(&pub); + mpz_clear(gibberish); ++ mpz_clear(bad_input); + free(decrypted); + } +- +--- a/testsuite/rsa-sec-decrypt-test.c ++++ b/testsuite/rsa-sec-decrypt-test.c +@@ -55,6 +55,7 @@ rsa_decrypt_for_test(const struct rsa_pu + #endif + + #define PAYLOAD_SIZE 50 ++#define DECRYPTED_SIZE 256 + void + test_main(void) + { +@@ -63,7 +64,7 @@ test_main(void) + struct knuth_lfib_ctx random_ctx; + + uint8_t plaintext[PAYLOAD_SIZE]; +- uint8_t decrypted[PAYLOAD_SIZE]; ++ uint8_t decrypted[DECRYPTED_SIZE]; + uint8_t verifybad[PAYLOAD_SIZE]; + unsigned n_size = 1024; + mpz_t gibberish; +@@ -98,6 +99,20 @@ test_main(void) + PAYLOAD_SIZE, decrypted, gibberish) == 1); + ASSERT (MEMEQ (PAYLOAD_SIZE, plaintext, decrypted)); + ++ ASSERT (pub.size > 10); ++ ASSERT (pub.size <= DECRYPTED_SIZE); ++ ++ /* Check that too large message length is rejected, largest ++ valid size is pub.size - 11. */ ++ ASSERT (!rsa_decrypt_for_test (&pub, &key, &random_ctx, ++ (nettle_random_func *) knuth_lfib_random, ++ pub.size - 10, decrypted, gibberish)); ++ ++ /* This case used to result in arithmetic underflow and a crash. */ ++ ASSERT (!rsa_decrypt_for_test (&pub, &key, &random_ctx, ++ (nettle_random_func *) knuth_lfib_random, ++ pub.size, decrypted, gibberish)); ++ + /* bad one */ + memcpy(decrypted, verifybad, PAYLOAD_SIZE); + nettle_mpz_random_size(garbage, &random_ctx, + diff --git a/nettle.spec b/nettle.spec index 68491ee..0cc52aa 100644 --- a/nettle.spec +++ b/nettle.spec @@ -1,6 +1,6 @@ Name: nettle Version: 3.6 -Release: 6 +Release: 7 Summary: A low-level cryptographic library License: LGPLv3+ or GPLv2+ URL: https://www.lysator.liu.se/~nisse/nettle/ @@ -15,6 +15,7 @@ Patch6003: backport-0004-CVE-2021-20305.patch Patch6004: backport-0005-CVE-2021-20305.patch Patch6005: backport-0006-CVE-2021-20305.patch Patch6006: backport-0007-CVE-2021-20305.patch +Patch6007: backport-CVE-2021-3580.patch BuildRequires: automake autoconf fipscheck gcc gettext-devel gmp-devel libtool m4 BuildRequires: nettle @@ -86,6 +87,11 @@ make check %ldconfig_scriptlets %changelog +* Mon Aug 16 2021 gaihuiying - 3.6-7 +- Type:CVE +- CVE:CVE-2021-3580 +- SUG:NA +- DESC:fix CVE-2021-3580 * Mon Apr 19 2021 xihaochen - 3.6-6 - Type:CVE - CVE:CVE-2021-20305