@@ -690,14 +690,10 @@ static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm)
690
690
691
691
c_ctx -> fallback = false;
692
692
693
- /* Currently, only XTS mode need fallback tfm when using 192bit key */
694
- if (likely (strncmp (alg , "xts" , SEC_XTS_NAME_SZ )))
695
- return 0 ;
696
-
697
693
c_ctx -> fbtfm = crypto_alloc_sync_skcipher (alg , 0 ,
698
694
CRYPTO_ALG_NEED_FALLBACK );
699
695
if (IS_ERR (c_ctx -> fbtfm )) {
700
- pr_err ("failed to alloc xts mode fallback tfm!\n" );
696
+ pr_err ("failed to alloc fallback tfm for %s !\n" , alg );
701
697
return PTR_ERR (c_ctx -> fbtfm );
702
698
}
703
699
@@ -857,7 +853,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
857
853
}
858
854
859
855
memcpy (c_ctx -> c_key , key , keylen );
860
- if (c_ctx -> fallback && c_ctx -> fbtfm ) {
856
+ if (c_ctx -> fbtfm ) {
861
857
ret = crypto_sync_skcipher_setkey (c_ctx -> fbtfm , key , keylen );
862
858
if (ret ) {
863
859
dev_err (dev , "failed to set fallback skcipher key!\n" );
@@ -1159,8 +1155,10 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1159
1155
}
1160
1156
1161
1157
ret = crypto_authenc_extractkeys (& keys , key , keylen );
1162
- if (ret )
1158
+ if (ret ) {
1159
+ dev_err (dev , "sec extract aead keys err!\n" );
1163
1160
goto bad_key ;
1161
+ }
1164
1162
1165
1163
ret = sec_aead_aes_set_key (c_ctx , & keys );
1166
1164
if (ret ) {
@@ -1174,12 +1172,6 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1174
1172
goto bad_key ;
1175
1173
}
1176
1174
1177
- if (ctx -> a_ctx .a_key_len & WORD_MASK ) {
1178
- ret = - EINVAL ;
1179
- dev_err (dev , "AUTH key length error!\n" );
1180
- goto bad_key ;
1181
- }
1182
-
1183
1175
ret = sec_aead_fallback_setkey (a_ctx , tfm , key , keylen );
1184
1176
if (ret ) {
1185
1177
dev_err (dev , "set sec fallback key err!\n" );
@@ -1999,8 +1991,7 @@ static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
1999
1991
return sec_aead_ctx_init (tfm , "sha512" );
2000
1992
}
2001
1993
2002
- static int sec_skcipher_cryptlen_check (struct sec_ctx * ctx ,
2003
- struct sec_req * sreq )
1994
+ static int sec_skcipher_cryptlen_check (struct sec_ctx * ctx , struct sec_req * sreq )
2004
1995
{
2005
1996
u32 cryptlen = sreq -> c_req .sk_req -> cryptlen ;
2006
1997
struct device * dev = ctx -> dev ;
@@ -2022,10 +2013,6 @@ static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx,
2022
2013
}
2023
2014
break ;
2024
2015
case SEC_CMODE_CTR :
2025
- if (unlikely (ctx -> sec -> qm .ver < QM_HW_V3 )) {
2026
- dev_err (dev , "skcipher HW version error!\n" );
2027
- ret = - EINVAL ;
2028
- }
2029
2016
break ;
2030
2017
default :
2031
2018
ret = - EINVAL ;
@@ -2034,17 +2021,21 @@ static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx,
2034
2021
return ret ;
2035
2022
}
2036
2023
2037
- static int sec_skcipher_param_check (struct sec_ctx * ctx , struct sec_req * sreq )
2024
+ static int sec_skcipher_param_check (struct sec_ctx * ctx ,
2025
+ struct sec_req * sreq , bool * need_fallback )
2038
2026
{
2039
2027
struct skcipher_request * sk_req = sreq -> c_req .sk_req ;
2040
2028
struct device * dev = ctx -> dev ;
2041
2029
u8 c_alg = ctx -> c_ctx .c_alg ;
2042
2030
2043
- if (unlikely (!sk_req -> src || !sk_req -> dst ||
2044
- sk_req -> cryptlen > MAX_INPUT_DATA_LEN )) {
2031
+ if (unlikely (!sk_req -> src || !sk_req -> dst )) {
2045
2032
dev_err (dev , "skcipher input param error!\n" );
2046
2033
return - EINVAL ;
2047
2034
}
2035
+
2036
+ if (sk_req -> cryptlen > MAX_INPUT_DATA_LEN )
2037
+ * need_fallback = true;
2038
+
2048
2039
sreq -> c_req .c_len = sk_req -> cryptlen ;
2049
2040
2050
2041
if (ctx -> pbuf_supported && sk_req -> cryptlen <= SEC_PBUF_SZ )
@@ -2102,6 +2093,7 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
2102
2093
struct crypto_skcipher * tfm = crypto_skcipher_reqtfm (sk_req );
2103
2094
struct sec_req * req = skcipher_request_ctx (sk_req );
2104
2095
struct sec_ctx * ctx = crypto_skcipher_ctx (tfm );
2096
+ bool need_fallback = false;
2105
2097
int ret ;
2106
2098
2107
2099
if (!sk_req -> cryptlen ) {
@@ -2115,11 +2107,11 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
2115
2107
req -> c_req .encrypt = encrypt ;
2116
2108
req -> ctx = ctx ;
2117
2109
2118
- ret = sec_skcipher_param_check (ctx , req );
2110
+ ret = sec_skcipher_param_check (ctx , req , & need_fallback );
2119
2111
if (unlikely (ret ))
2120
2112
return - EINVAL ;
2121
2113
2122
- if (unlikely (ctx -> c_ctx .fallback ))
2114
+ if (unlikely (ctx -> c_ctx .fallback || need_fallback ))
2123
2115
return sec_skcipher_soft_crypto (ctx , sk_req , encrypt );
2124
2116
2125
2117
return ctx -> req_op -> process (ctx , req );
@@ -2227,52 +2219,35 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
2227
2219
struct crypto_aead * tfm = crypto_aead_reqtfm (req );
2228
2220
size_t sz = crypto_aead_authsize (tfm );
2229
2221
u8 c_mode = ctx -> c_ctx .c_mode ;
2230
- struct device * dev = ctx -> dev ;
2231
2222
int ret ;
2232
2223
2233
- /* Hardware does not handle cases where authsize is not 4 bytes aligned */
2234
- if (c_mode == SEC_CMODE_CBC && (sz & WORD_MASK )) {
2235
- sreq -> aead_req .fallback = true;
2224
+ if (unlikely (ctx -> sec -> qm .ver == QM_HW_V2 && !sreq -> c_req .c_len ))
2236
2225
return - EINVAL ;
2237
- }
2238
2226
2239
2227
if (unlikely (req -> cryptlen + req -> assoclen > MAX_INPUT_DATA_LEN ||
2240
- req -> assoclen > SEC_MAX_AAD_LEN )) {
2241
- dev_err (dev , "aead input spec error!\n" );
2228
+ req -> assoclen > SEC_MAX_AAD_LEN ))
2242
2229
return - EINVAL ;
2243
- }
2244
2230
2245
2231
if (c_mode == SEC_CMODE_CCM ) {
2246
- if (unlikely (req -> assoclen > SEC_MAX_CCM_AAD_LEN )) {
2247
- dev_err_ratelimited (dev , "CCM input aad parameter is too long!\n" );
2232
+ if (unlikely (req -> assoclen > SEC_MAX_CCM_AAD_LEN ))
2248
2233
return - EINVAL ;
2249
- }
2250
- ret = aead_iv_demension_check (req );
2251
- if (ret ) {
2252
- dev_err (dev , "aead input iv param error!\n" );
2253
- return ret ;
2254
- }
2255
- }
2256
2234
2257
- if ( sreq -> c_req . encrypt )
2258
- sreq -> c_req . c_len = req -> cryptlen ;
2259
- else
2260
- sreq -> c_req . c_len = req -> cryptlen - sz ;
2261
- if (c_mode == SEC_CMODE_CBC ) {
2262
- if ( unlikely ( sreq -> c_req . c_len & ( AES_BLOCK_SIZE - 1 ))) {
2263
- dev_err ( dev , "aead crypto length error!\n" );
2235
+ ret = aead_iv_demension_check ( req );
2236
+ if ( unlikely ( ret ))
2237
+ return - EINVAL ;
2238
+ } else if ( c_mode == SEC_CMODE_CBC ) {
2239
+ if (unlikely ( sz & WORD_MASK ))
2240
+ return - EINVAL ;
2241
+ if ( unlikely ( ctx -> a_ctx . a_key_len & WORD_MASK ))
2264
2242
return - EINVAL ;
2265
- }
2266
2243
}
2267
2244
2268
2245
return 0 ;
2269
2246
}
2270
2247
2271
- static int sec_aead_param_check (struct sec_ctx * ctx , struct sec_req * sreq )
2248
+ static int sec_aead_param_check (struct sec_ctx * ctx , struct sec_req * sreq , bool * need_fallback )
2272
2249
{
2273
2250
struct aead_request * req = sreq -> aead_req .aead_req ;
2274
- struct crypto_aead * tfm = crypto_aead_reqtfm (req );
2275
- size_t authsize = crypto_aead_authsize (tfm );
2276
2251
struct device * dev = ctx -> dev ;
2277
2252
u8 c_alg = ctx -> c_ctx .c_alg ;
2278
2253
@@ -2281,12 +2256,10 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
2281
2256
return - EINVAL ;
2282
2257
}
2283
2258
2284
- if (ctx -> sec -> qm .ver == QM_HW_V2 ) {
2285
- if (unlikely (!req -> cryptlen || (!sreq -> c_req .encrypt &&
2286
- req -> cryptlen <= authsize ))) {
2287
- sreq -> aead_req .fallback = true;
2288
- return - EINVAL ;
2289
- }
2259
+ if (unlikely (ctx -> c_ctx .c_mode == SEC_CMODE_CBC &&
2260
+ sreq -> c_req .c_len & (AES_BLOCK_SIZE - 1 ))) {
2261
+ dev_err (dev , "aead cbc mode input data length error!\n" );
2262
+ return - EINVAL ;
2290
2263
}
2291
2264
2292
2265
/* Support AES or SM4 */
@@ -2295,8 +2268,10 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
2295
2268
return - EINVAL ;
2296
2269
}
2297
2270
2298
- if (unlikely (sec_aead_spec_check (ctx , sreq )))
2271
+ if (unlikely (sec_aead_spec_check (ctx , sreq ))) {
2272
+ * need_fallback = true;
2299
2273
return - EINVAL ;
2274
+ }
2300
2275
2301
2276
if (ctx -> pbuf_supported && (req -> cryptlen + req -> assoclen ) <=
2302
2277
SEC_PBUF_SZ )
@@ -2340,17 +2315,19 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
2340
2315
struct crypto_aead * tfm = crypto_aead_reqtfm (a_req );
2341
2316
struct sec_req * req = aead_request_ctx (a_req );
2342
2317
struct sec_ctx * ctx = crypto_aead_ctx (tfm );
2318
+ size_t sz = crypto_aead_authsize (tfm );
2319
+ bool need_fallback = false;
2343
2320
int ret ;
2344
2321
2345
2322
req -> flag = a_req -> base .flags ;
2346
2323
req -> aead_req .aead_req = a_req ;
2347
2324
req -> c_req .encrypt = encrypt ;
2348
2325
req -> ctx = ctx ;
2349
- req -> aead_req . fallback = false ;
2326
+ req -> c_req . c_len = a_req -> cryptlen - ( req -> c_req . encrypt ? 0 : sz ) ;
2350
2327
2351
- ret = sec_aead_param_check (ctx , req );
2328
+ ret = sec_aead_param_check (ctx , req , & need_fallback );
2352
2329
if (unlikely (ret )) {
2353
- if (req -> aead_req . fallback )
2330
+ if (need_fallback )
2354
2331
return sec_aead_soft_crypto (ctx , a_req , encrypt );
2355
2332
return - EINVAL ;
2356
2333
}
0 commit comments