#include <pthread.h>
#include <stdlib.h>
#include <string.h>
#include <strings.h>
#include <sys/debug.h>
#include <sys/types.h>
#include <security/cryptoki.h>
#include <aes_impl.h>
#include <cryptoutil.h>
#include "softSession.h"
#include "softObject.h"
#include "softCrypt.h"
#include "softOps.h"
static CK_RV
soft_aes_check_mech_param(CK_MECHANISM_PTR mech, aes_ctx_t **ctxp)
{
void *(*allocf)(int) = NULL;
size_t param_len = 0;
boolean_t param_req = B_TRUE;
switch (mech->mechanism) {
case CKM_AES_ECB:
param_req = B_FALSE;
allocf = ecb_alloc_ctx;
break;
case CKM_AES_CMAC:
param_req = B_FALSE;
allocf = cmac_alloc_ctx;
break;
case CKM_AES_CMAC_GENERAL:
param_len = sizeof (CK_MAC_GENERAL_PARAMS);
allocf = cmac_alloc_ctx;
break;
case CKM_AES_GMAC:
param_len = AES_GMAC_IV_LEN;
allocf = gmac_alloc_ctx;
break;
case CKM_AES_CBC:
case CKM_AES_CBC_PAD:
param_len = AES_BLOCK_LEN;
allocf = cbc_alloc_ctx;
break;
case CKM_AES_CTR:
param_len = sizeof (CK_AES_CTR_PARAMS);
allocf = ctr_alloc_ctx;
break;
case CKM_AES_CCM:
param_len = sizeof (CK_CCM_PARAMS);
allocf = ccm_alloc_ctx;
break;
case CKM_AES_GCM:
param_len = sizeof (CK_GCM_PARAMS);
allocf = gcm_alloc_ctx;
break;
default:
return (CKR_MECHANISM_INVALID);
}
if (param_req && (mech->pParameter == NULL ||
mech->ulParameterLen != param_len)) {
return (CKR_MECHANISM_PARAM_INVALID);
}
*ctxp = allocf(0);
if (*ctxp == NULL) {
return (CKR_HOST_MEMORY);
}
return (CKR_OK);
}
static CK_RV
soft_aes_init_key(aes_ctx_t *aes_ctx, soft_object_t *key_p)
{
void *ks = NULL;
size_t size = 0;
CK_RV rv = CKR_OK;
(void) pthread_mutex_lock(&key_p->object_mutex);
switch (OBJ_SEC_VALUE_LEN(key_p)) {
case AES_MIN_KEY_BYTES:
case AES_MAX_KEY_BYTES:
case AES_192_KEY_BYTES:
break;
default:
rv = CKR_KEY_SIZE_RANGE;
goto done;
}
ks = aes_alloc_keysched(&size, 0);
if (ks == NULL) {
rv = CKR_HOST_MEMORY;
goto done;
}
if (key_p->bool_attr_mask & SENSITIVE_BOOL_ON) {
#ifdef __sparcv9
aes_init_keysched(OBJ_SEC_VALUE(key_p), (uint_t)
(OBJ_SEC_VALUE_LEN(key_p) * NBBY), ks);
#else
aes_init_keysched(OBJ_SEC_VALUE(key_p),
(OBJ_SEC_VALUE_LEN(key_p) * NBBY), ks);
#endif
goto done;
}
if (OBJ_KEY_SCHED(key_p) == NULL) {
void *obj_ks = NULL;
obj_ks = aes_alloc_keysched(&size, 0);
if (obj_ks == NULL) {
rv = CKR_HOST_MEMORY;
goto done;
}
#ifdef __sparcv9
aes_init_keysched(OBJ_SEC_VALUE(key_p),
(uint_t)(OBJ_SEC_VALUE_LEN(key_p) * 8), obj_ks);
#else
aes_init_keysched(OBJ_SEC_VALUE(key_p),
(OBJ_SEC_VALUE_LEN(key_p) * 8), obj_ks);
#endif
OBJ_KEY_SCHED_LEN(key_p) = size;
OBJ_KEY_SCHED(key_p) = obj_ks;
}
(void) memcpy(ks, OBJ_KEY_SCHED(key_p), OBJ_KEY_SCHED_LEN(key_p));
done:
(void) pthread_mutex_unlock(&key_p->object_mutex);
if (rv == CKR_OK) {
aes_ctx->ac_keysched = ks;
aes_ctx->ac_keysched_len = size;
} else {
freezero(ks, size);
}
return (rv);
}
static CK_RV
soft_aes_init_ctx(aes_ctx_t *aes_ctx, CK_MECHANISM_PTR mech_p,
boolean_t encrypt)
{
int rc = CRYPTO_SUCCESS;
switch (mech_p->mechanism) {
case CKM_AES_ECB:
aes_ctx->ac_flags |= ECB_MODE;
break;
case CKM_AES_CMAC:
case CKM_AES_CMAC_GENERAL:
rc = cmac_init_ctx((cbc_ctx_t *)aes_ctx, AES_BLOCK_LEN);
break;
case CKM_AES_GMAC: {
CK_AES_GMAC_PARAMS gmac_params = {
.pIv = mech_p->pParameter,
.pAAD = NULL,
.ulAADLen = 0
};
rc = gmac_init_ctx((gcm_ctx_t *)aes_ctx, (char *)&gmac_params,
AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
aes_xor_block);
break;
}
case CKM_AES_CBC:
case CKM_AES_CBC_PAD:
rc = cbc_init_ctx((cbc_ctx_t *)aes_ctx, mech_p->pParameter,
mech_p->ulParameterLen, AES_BLOCK_LEN, aes_copy_block64);
break;
case CKM_AES_CTR:
{
CK_AES_CTR_PARAMS *pp = (CK_AES_CTR_PARAMS *)mech_p->pParameter;
rc = ctr_init_ctx((ctr_ctx_t *)aes_ctx, pp->ulCounterBits,
pp->cb, aes_encrypt_block, aes_copy_block);
break;
}
case CKM_AES_CCM: {
CK_CCM_PARAMS *pp = (CK_CCM_PARAMS *)mech_p->pParameter;
CK_AES_CCM_PARAMS ccm_params = {
.ulMACSize = pp->ulMACLen,
.ulNonceSize = pp->ulNonceLen,
.ulAuthDataSize = pp->ulAADLen,
.ulDataSize = pp->ulDataLen,
.nonce = pp->pNonce,
.authData = pp->pAAD
};
rc = ccm_init_ctx((ccm_ctx_t *)aes_ctx, (char *)&ccm_params, 0,
encrypt, AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
break;
}
case CKM_AES_GCM:
rc = gcm_init_ctx((gcm_ctx_t *)aes_ctx, mech_p->pParameter,
AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
aes_xor_block);
break;
}
return (crypto2pkcs11_error_number(rc));
}
CK_RV
soft_aes_crypt_init_common(soft_session_t *session_p,
CK_MECHANISM_PTR pMechanism, soft_object_t *key_p,
boolean_t encrypt)
{
aes_ctx_t *aes_ctx = NULL;
CK_RV rv = CKR_OK;
if (key_p->key_type != CKK_AES)
return (CKR_KEY_TYPE_INCONSISTENT);
rv = soft_aes_check_mech_param(pMechanism, &aes_ctx);
if (rv != CKR_OK) {
goto done;
}
rv = soft_aes_init_key(aes_ctx, key_p);
if (rv != CKR_OK) {
goto done;
}
rv = soft_aes_init_ctx(aes_ctx, pMechanism, encrypt);
if (rv != CKR_OK) {
goto done;
}
(void) pthread_mutex_lock(&session_p->session_mutex);
if (encrypt) {
session_p->encrypt.context = aes_ctx;
session_p->encrypt.mech.mechanism = pMechanism->mechanism;
} else {
session_p->decrypt.context = aes_ctx;
session_p->decrypt.mech.mechanism = pMechanism->mechanism;
}
(void) pthread_mutex_unlock(&session_p->session_mutex);
done:
if (rv != CKR_OK) {
soft_aes_free_ctx(aes_ctx);
}
return (rv);
}
CK_RV
soft_aes_encrypt(soft_session_t *session_p, CK_BYTE_PTR pData,
CK_ULONG ulDataLen, CK_BYTE_PTR pEncryptedData,
CK_ULONG_PTR pulEncryptedDataLen)
{
aes_ctx_t *aes_ctx = session_p->encrypt.context;
CK_MECHANISM_TYPE mech = session_p->encrypt.mech.mechanism;
size_t length_needed;
size_t remainder;
int rc = CRYPTO_SUCCESS;
CK_RV rv = CKR_OK;
crypto_data_t out = {
.cd_format = CRYPTO_DATA_RAW,
.cd_offset = 0,
.cd_length = *pulEncryptedDataLen,
.cd_raw.iov_base = (char *)pEncryptedData,
.cd_raw.iov_len = *pulEncryptedDataLen
};
if ((pData == NULL || ulDataLen == 0) &&
!(aes_ctx->ac_flags & (CCM_MODE|GCM_MODE|CMAC_MODE|GMAC_MODE))) {
return (CKR_ARGUMENTS_BAD);
}
remainder = ulDataLen % AES_BLOCK_LEN;
switch (mech) {
case CKM_AES_CBC_PAD:
case CKM_AES_CMAC:
case CKM_AES_CMAC_GENERAL:
case CKM_AES_CTR:
case CKM_AES_CCM:
case CKM_AES_GCM:
case CKM_AES_GMAC:
break;
default:
if (remainder != 0) {
rv = CKR_DATA_LEN_RANGE;
goto cleanup;
}
}
switch (mech) {
case CKM_AES_CCM:
length_needed = ulDataLen + aes_ctx->ac_mac_len;
break;
case CKM_AES_GCM:
length_needed = ulDataLen + aes_ctx->ac_tag_len;
break;
case CKM_AES_CMAC:
case CKM_AES_CMAC_GENERAL:
length_needed = AES_BLOCK_LEN;
break;
case CKM_AES_GMAC:
length_needed = AES_BLOCK_LEN;
break;
case CKM_AES_CBC_PAD:
length_needed = ulDataLen + AES_BLOCK_LEN - remainder;
break;
default:
length_needed = ulDataLen;
break;
}
if (pEncryptedData == NULL) {
*pulEncryptedDataLen = length_needed;
return (CKR_OK);
}
if (*pulEncryptedDataLen < length_needed) {
*pulEncryptedDataLen = length_needed;
return (CKR_BUFFER_TOO_SMALL);
}
if (ulDataLen > 0) {
rv = soft_aes_encrypt_update(session_p, pData, ulDataLen,
pEncryptedData, pulEncryptedDataLen);
if (rv != CKR_OK) {
rv = CKR_FUNCTION_FAILED;
goto cleanup;
}
out.cd_offset = *pulEncryptedDataLen;
}
switch (mech) {
case CKM_AES_CBC_PAD: {
char block[AES_BLOCK_LEN];
size_t amt = AES_BLOCK_LEN - remainder;
VERIFY3U(remainder, ==, aes_ctx->ac_remainder_len);
(void) memset(block, amt & 0xff, sizeof (block));
rc = aes_encrypt_contiguous_blocks(aes_ctx, block, amt, &out);
rv = crypto2pkcs11_error_number(rc);
explicit_bzero(block, sizeof (block));
break;
}
case CKM_AES_CCM:
rc = ccm_encrypt_final((ccm_ctx_t *)aes_ctx, &out,
AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
rv = crypto2pkcs11_error_number(rc);
break;
case CKM_AES_GCM:
rc = gcm_encrypt_final((gcm_ctx_t *)aes_ctx, &out,
AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
aes_xor_block);
rv = crypto2pkcs11_error_number(rc);
break;
case CKM_AES_CMAC:
case CKM_AES_CMAC_GENERAL:
rc = cmac_mode_final((cbc_ctx_t *)aes_ctx, &out,
aes_encrypt_block, aes_xor_block);
rv = crypto2pkcs11_error_number(rc);
aes_ctx->ac_remainder_len = 0;
break;
case CKM_AES_GMAC:
rc = gmac_mode_final((gcm_ctx_t *)aes_ctx, &out,
AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
rv = crypto2pkcs11_error_number(rc);
break;
case CKM_AES_CTR:
break;
case CKM_AES_ECB:
case CKM_AES_CBC:
break;
default:
rv = CKR_MECHANISM_INVALID;
break;
}
cleanup:
switch (rv) {
case CKR_OK:
*pulEncryptedDataLen = out.cd_offset;
break;
case CKR_BUFFER_TOO_SMALL:
break;
default:
*pulEncryptedDataLen = 0;
break;
}
(void) pthread_mutex_lock(&session_p->session_mutex);
soft_aes_free_ctx(aes_ctx);
session_p->encrypt.context = NULL;
(void) pthread_mutex_unlock(&session_p->session_mutex);
return (rv);
}
static CK_RV
soft_aes_cbc_pad_decrypt(aes_ctx_t *aes_ctx, CK_BYTE_PTR pEncryptedData,
CK_ULONG ulEncryptedDataLen, crypto_data_t *out_orig)
{
aes_ctx_t *ctx = aes_ctx;
uint8_t *buf = NULL;
uint8_t *outbuf = (uint8_t *)out_orig->cd_raw.iov_base;
crypto_data_t out = *out_orig;
size_t i;
int rc;
CK_RV rv = CKR_OK;
uint8_t pad_len;
boolean_t speculate = B_FALSE;
if (out_orig->cd_raw.iov_base == NULL) {
out_orig->cd_length = ulEncryptedDataLen;
return (CKR_OK);
}
if (out_orig->cd_length < ulEncryptedDataLen) {
void *ks = malloc(aes_ctx->ac_keysched_len);
ctx = malloc(sizeof (*aes_ctx));
buf = malloc(ulEncryptedDataLen);
if (ks == NULL || ctx == NULL || buf == NULL) {
free(ks);
free(ctx);
free(buf);
return (CKR_HOST_MEMORY);
}
bcopy(aes_ctx, ctx, sizeof (*ctx));
bcopy(aes_ctx->ac_keysched, ks, aes_ctx->ac_keysched_len);
ctx->ac_keysched = ks;
out.cd_length = ulEncryptedDataLen;
out.cd_raw.iov_base = (char *)buf;
out.cd_raw.iov_len = ulEncryptedDataLen;
outbuf = buf;
speculate = B_TRUE;
}
rc = aes_decrypt_contiguous_blocks(ctx, (char *)pEncryptedData,
ulEncryptedDataLen, &out);
if (rc != CRYPTO_SUCCESS) {
out_orig->cd_offset = 0;
rv = CKR_FUNCTION_FAILED;
goto done;
}
pad_len = outbuf[ulEncryptedDataLen - 1];
if (pad_len == 0 || pad_len > AES_BLOCK_LEN) {
rv = CKR_ENCRYPTED_DATA_INVALID;
goto done;
}
out.cd_offset -= pad_len;
for (i = ulEncryptedDataLen - pad_len; i < ulEncryptedDataLen; i++) {
if (outbuf[i] != pad_len) {
rv = CKR_ENCRYPTED_DATA_INVALID;
}
}
if (rv != CKR_OK) {
goto done;
}
if (speculate) {
if (out.cd_offset <= out_orig->cd_length) {
bcopy(out.cd_raw.iov_base, out_orig->cd_raw.iov_base,
out.cd_offset);
} else {
rv = CKR_BUFFER_TOO_SMALL;
}
}
out_orig->cd_offset = out.cd_offset;
done:
freezero(buf, ulEncryptedDataLen);
if (ctx != aes_ctx) {
VERIFY(speculate);
soft_aes_free_ctx(ctx);
}
return (rv);
}
CK_RV
soft_aes_decrypt(soft_session_t *session_p, CK_BYTE_PTR pEncryptedData,
CK_ULONG ulEncryptedDataLen, CK_BYTE_PTR pData, CK_ULONG_PTR pulDataLen)
{
aes_ctx_t *aes_ctx = session_p->decrypt.context;
CK_MECHANISM_TYPE mech = session_p->decrypt.mech.mechanism;
size_t length_needed;
size_t remainder;
int rc = CRYPTO_SUCCESS;
CK_RV rv = CKR_OK;
crypto_data_t out = {
.cd_format = CRYPTO_DATA_RAW,
.cd_offset = 0,
.cd_length = *pulDataLen,
.cd_raw.iov_base = (char *)pData,
.cd_raw.iov_len = *pulDataLen
};
if ((pEncryptedData == NULL || ulEncryptedDataLen == 0) &&
!(aes_ctx->ac_flags & (CCM_MODE|GCM_MODE))) {
return (CKR_ARGUMENTS_BAD);
}
remainder = ulEncryptedDataLen % AES_BLOCK_LEN;
switch (mech) {
case CKM_AES_CMAC:
case CKM_AES_CMAC_GENERAL:
case CKM_AES_CTR:
case CKM_AES_CCM:
case CKM_AES_GCM:
case CKM_AES_GMAC:
break;
default:
if (remainder != 0) {
rv = CKR_DATA_LEN_RANGE;
goto cleanup;
}
}
if (mech == CKM_AES_CBC_PAD) {
rv = soft_aes_cbc_pad_decrypt(aes_ctx, pEncryptedData,
ulEncryptedDataLen, &out);
if (pData == NULL || rv == CKR_BUFFER_TOO_SMALL) {
*pulDataLen = out.cd_offset;
return (rv);
}
goto cleanup;
}
switch (aes_ctx->ac_flags & (CCM_MODE|GCM_MODE)) {
case CCM_MODE:
length_needed = aes_ctx->ac_processed_data_len;
break;
case GCM_MODE:
length_needed = ulEncryptedDataLen - aes_ctx->ac_tag_len;
break;
default:
length_needed = ulEncryptedDataLen;
}
if (pData == NULL) {
*pulDataLen = length_needed;
return (CKR_OK);
}
if (*pulDataLen < length_needed) {
*pulDataLen = length_needed;
return (CKR_BUFFER_TOO_SMALL);
}
if (ulEncryptedDataLen > 0) {
rv = soft_aes_decrypt_update(session_p, pEncryptedData,
ulEncryptedDataLen, pData, pulDataLen);
}
if (rv != CKR_OK) {
rv = CKR_FUNCTION_FAILED;
goto cleanup;
}
out.cd_offset = *pulDataLen;
if (aes_ctx->ac_flags & CCM_MODE) {
ASSERT3U(aes_ctx->ac_processed_data_len, ==,
aes_ctx->ac_data_len);
ASSERT3U(aes_ctx->ac_processed_mac_len, ==,
aes_ctx->ac_mac_len);
rc = ccm_decrypt_final((ccm_ctx_t *)aes_ctx, &out,
AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
aes_xor_block);
rv = crypto2pkcs11_error_number(rc);
} else if (aes_ctx->ac_flags & GCM_MODE) {
rc = gcm_decrypt_final((gcm_ctx_t *)aes_ctx, &out,
AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
rv = crypto2pkcs11_error_number(rc);
}
cleanup:
if (rv == CKR_OK) {
*pulDataLen = out.cd_offset;
} else {
*pulDataLen = 0;
}
(void) pthread_mutex_lock(&session_p->session_mutex);
soft_aes_free_ctx(aes_ctx);
session_p->decrypt.context = NULL;
(void) pthread_mutex_unlock(&session_p->session_mutex);
return (rv);
}
CK_RV
soft_aes_encrypt_update(soft_session_t *session_p, CK_BYTE_PTR pData,
CK_ULONG ulDataLen, CK_BYTE_PTR pEncryptedData,
CK_ULONG_PTR pulEncryptedDataLen)
{
aes_ctx_t *aes_ctx = session_p->encrypt.context;
crypto_data_t out = {
.cd_format = CRYPTO_DATA_RAW,
.cd_offset = 0,
.cd_length = *pulEncryptedDataLen,
.cd_raw.iov_base = (char *)pEncryptedData,
.cd_raw.iov_len = *pulEncryptedDataLen
};
CK_MECHANISM_TYPE mech = session_p->encrypt.mech.mechanism;
CK_RV rv = CKR_OK;
size_t out_len;
int rc;
IMPLY(pData == NULL, ulDataLen == 0);
switch (mech) {
case CKM_AES_CMAC:
out_len = 0;
break;
case CKM_AES_GMAC:
out_len = 0;
break;
case CKM_AES_CTR:
out_len = ulDataLen;
break;
default:
out_len = aes_ctx->ac_remainder_len + ulDataLen;
out_len &= ~(AES_BLOCK_LEN - 1);
break;
}
if (pEncryptedData == NULL) {
*pulEncryptedDataLen = out_len;
return (CKR_OK);
}
if (*pulEncryptedDataLen < out_len) {
*pulEncryptedDataLen = out_len;
return (CKR_BUFFER_TOO_SMALL);
}
rc = aes_encrypt_contiguous_blocks(aes_ctx, (char *)pData, ulDataLen,
&out);
*pulEncryptedDataLen = out.cd_offset;
if (rc != CRYPTO_SUCCESS) {
return (CKR_FUNCTION_FAILED);
}
rv = crypto2pkcs11_error_number(rc);
return (rv);
}
CK_RV
soft_aes_decrypt_update(soft_session_t *session_p, CK_BYTE_PTR pEncryptedData,
CK_ULONG ulEncryptedDataLen, CK_BYTE_PTR pData, CK_ULONG_PTR pulDataLen)
{
aes_ctx_t *aes_ctx = session_p->decrypt.context;
uint8_t *buffer_block = NULL;
crypto_data_t out = {
.cd_format = CRYPTO_DATA_RAW,
.cd_offset = 0,
.cd_length = *pulDataLen,
.cd_raw.iov_base = (char *)pData,
.cd_raw.iov_len = *pulDataLen
};
CK_MECHANISM_TYPE mech = session_p->decrypt.mech.mechanism;
CK_RV rv = CKR_OK;
size_t in_len = ulEncryptedDataLen;
size_t out_len;
int rc = CRYPTO_SUCCESS;
switch (mech) {
case CKM_AES_CCM:
case CKM_AES_GCM:
out_len = 0;
break;
case CKM_AES_CBC_PAD:
VERIFY3U(aes_ctx->ac_remainder_len, <=, AES_BLOCK_LEN);
if (in_len >= SIZE_MAX - AES_BLOCK_LEN)
return (CKR_ENCRYPTED_DATA_LEN_RANGE);
out_len = aes_ctx->ac_remainder_len + in_len;
if (out_len <= AES_BLOCK_LEN) {
uint8_t *dest = (uint8_t *)aes_ctx->ac_remainder +
aes_ctx->ac_remainder_len;
bcopy(pEncryptedData, dest, in_len);
aes_ctx->ac_remainder_len += in_len;
*pulDataLen = 0;
return (CKR_OK);
} else if (out_len % AES_BLOCK_LEN == 0) {
VERIFY3U(in_len, >=, AES_BLOCK_LEN);
buffer_block = pEncryptedData + in_len - AES_BLOCK_LEN;
in_len -= AES_BLOCK_LEN;
out_len -= AES_BLOCK_LEN;
} else {
out_len &= ~(AES_BLOCK_LEN - 1);
}
break;
case CKM_AES_CTR:
out_len = in_len;
break;
default:
out_len = aes_ctx->ac_remainder_len + in_len;
out_len &= ~(AES_BLOCK_LEN - 1);
break;
}
if (pData == NULL) {
*pulDataLen = out_len;
return (CKR_OK);
}
if (*pulDataLen < out_len) {
*pulDataLen = out_len;
return (CKR_BUFFER_TOO_SMALL);
}
rc = aes_decrypt_contiguous_blocks(aes_ctx, (char *)pEncryptedData,
in_len, &out);
if (rc != CRYPTO_SUCCESS) {
rv = CKR_FUNCTION_FAILED;
goto done;
}
*pulDataLen = out.cd_offset;
switch (mech) {
case CKM_AES_CBC_PAD:
if (buffer_block == NULL) {
break;
}
VERIFY0(aes_ctx->ac_remainder_len);
bcopy(buffer_block, aes_ctx->ac_remainder, AES_BLOCK_LEN);
aes_ctx->ac_remainder_len = AES_BLOCK_LEN;
break;
}
done:
return (rv);
}
CK_RV
soft_aes_encrypt_final(soft_session_t *session_p,
CK_BYTE_PTR pLastEncryptedPart, CK_ULONG_PTR pulLastEncryptedPartLen)
{
aes_ctx_t *aes_ctx = session_p->encrypt.context;
crypto_data_t data = {
.cd_format = CRYPTO_DATA_RAW,
.cd_offset = 0,
.cd_length = *pulLastEncryptedPartLen,
.cd_raw.iov_base = (char *)pLastEncryptedPart,
.cd_raw.iov_len = *pulLastEncryptedPartLen
};
CK_MECHANISM_TYPE mech = session_p->encrypt.mech.mechanism;
CK_RV rv = CKR_OK;
size_t out_len;
int rc = CRYPTO_SUCCESS;
switch (mech) {
case CKM_AES_CBC_PAD:
out_len = AES_BLOCK_LEN;
break;
case CKM_AES_CTR:
out_len = 0;
break;
case CKM_AES_CCM:
out_len = aes_ctx->ac_remainder_len +
aes_ctx->acu.acu_ccm.ccm_mac_len;
break;
case CKM_AES_GCM:
out_len = aes_ctx->ac_remainder_len +
aes_ctx->acu.acu_gcm.gcm_tag_len;
break;
case CKM_AES_CMAC:
case CKM_AES_CMAC_GENERAL:
out_len = AES_BLOCK_LEN;
break;
case CKM_AES_GMAC:
out_len = AES_BLOCK_LEN;
break;
default:
if (aes_ctx->ac_remainder_len > 0) {
rv = CKR_DATA_LEN_RANGE;
goto done;
}
out_len = 0;
}
if (*pulLastEncryptedPartLen < out_len || pLastEncryptedPart == NULL) {
*pulLastEncryptedPartLen = out_len;
return ((pLastEncryptedPart == NULL) ?
CKR_OK : CKR_BUFFER_TOO_SMALL);
}
switch (mech) {
case CKM_AES_CBC_PAD: {
char block[AES_BLOCK_LEN] = { 0 };
size_t padlen = AES_BLOCK_LEN - aes_ctx->ac_remainder_len;
if (padlen == 0) {
padlen = AES_BLOCK_LEN;
}
(void) memset(block, padlen & 0xff, sizeof (block));
rc = aes_encrypt_contiguous_blocks(aes_ctx, block,
padlen, &data);
explicit_bzero(block, sizeof (block));
break;
}
case CKM_AES_CTR:
break;
case CKM_AES_CCM:
rc = ccm_encrypt_final((ccm_ctx_t *)aes_ctx, &data,
AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
break;
case CKM_AES_GCM:
rc = gcm_encrypt_final((gcm_ctx_t *)aes_ctx, &data,
AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
aes_xor_block);
break;
case CKM_AES_CMAC:
case CKM_AES_CMAC_GENERAL:
rc = cmac_mode_final((cbc_ctx_t *)aes_ctx, &data,
aes_encrypt_block, aes_xor_block);
break;
case CKM_AES_GMAC:
rc = gmac_mode_final((gcm_ctx_t *)aes_ctx, &data,
AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
break;
default:
break;
}
rv = crypto2pkcs11_error_number(rc);
done:
if (rv == CKR_OK) {
*pulLastEncryptedPartLen = data.cd_offset;
}
soft_aes_free_ctx(aes_ctx);
session_p->encrypt.context = NULL;
return (rv);
}
CK_RV
soft_aes_decrypt_final(soft_session_t *session_p, CK_BYTE_PTR pLastPart,
CK_ULONG_PTR pulLastPartLen)
{
aes_ctx_t *aes_ctx = session_p->decrypt.context;
CK_MECHANISM_TYPE mech = session_p->decrypt.mech.mechanism;
CK_RV rv = CKR_OK;
int rc = CRYPTO_SUCCESS;
size_t out_len;
crypto_data_t out = {
.cd_format = CRYPTO_DATA_RAW,
.cd_offset = 0,
.cd_length = *pulLastPartLen,
.cd_raw.iov_base = (char *)pLastPart,
.cd_raw.iov_len = *pulLastPartLen
};
switch (mech) {
case CKM_AES_CBC_PAD:
if ((aes_ctx->ac_flags & P11_DECRYPTED) == 0) {
uint8_t block[AES_BLOCK_LEN] = { 0 };
crypto_data_t block_out = {
.cd_format = CRYPTO_DATA_RAW,
.cd_offset = 0,
.cd_length = sizeof (block),
.cd_raw.iov_base = (char *)block,
.cd_raw.iov_len = sizeof (block)
};
size_t amt, i;
uint8_t pad_len;
if (aes_ctx->ac_remainder_len != AES_BLOCK_LEN) {
return (CKR_DATA_LEN_RANGE);
}
rc = aes_decrypt_contiguous_blocks(aes_ctx,
(char *)block, 0, &block_out);
if (rc != CRYPTO_SUCCESS) {
explicit_bzero(block, sizeof (block));
return (CKR_FUNCTION_FAILED);
}
pad_len = block[AES_BLOCK_LEN - 1];
if (pad_len == 0 || pad_len > AES_BLOCK_LEN) {
rv = CKR_ENCRYPTED_DATA_INVALID;
explicit_bzero(block, sizeof (block));
goto done;
}
amt = AES_BLOCK_LEN - pad_len;
for (i = amt; i < AES_BLOCK_LEN; i++) {
if (block[i] != pad_len) {
rv = CKR_ENCRYPTED_DATA_INVALID;
}
}
if (rv != CKR_OK) {
explicit_bzero(block, sizeof (block));
goto done;
}
bcopy(block, aes_ctx->ac_remainder, amt);
explicit_bzero(block, sizeof (block));
aes_ctx->ac_flags |= P11_DECRYPTED;
aes_ctx->ac_remainder_len = amt;
}
out_len = aes_ctx->ac_remainder_len;
break;
case CKM_AES_CTR:
out_len = 0;
break;
case CKM_AES_CCM:
out_len = aes_ctx->ac_data_len;
break;
case CKM_AES_GCM:
out_len = aes_ctx->acu.acu_gcm.gcm_processed_data_len -
aes_ctx->acu.acu_gcm.gcm_tag_len;
break;
default:
if (aes_ctx->ac_remainder_len > 0) {
rv = CKR_DATA_LEN_RANGE;
goto done;
}
out_len = 0;
break;
}
if (*pulLastPartLen < out_len || pLastPart == NULL) {
*pulLastPartLen = out_len;
return ((pLastPart == NULL) ? CKR_OK : CKR_BUFFER_TOO_SMALL);
}
switch (mech) {
case CKM_AES_CBC_PAD:
*pulLastPartLen = out_len;
if (out_len == 0) {
break;
}
bcopy(aes_ctx->ac_remainder, pLastPart, out_len);
out.cd_offset += out_len;
break;
case CKM_AES_CCM:
ASSERT3U(aes_ctx->ac_processed_data_len, ==, out_len);
ASSERT3U(aes_ctx->ac_processed_mac_len, ==,
aes_ctx->ac_mac_len);
rc = ccm_decrypt_final((ccm_ctx_t *)aes_ctx, &out,
AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
aes_xor_block);
break;
case CKM_AES_GCM:
rc = gcm_decrypt_final((gcm_ctx_t *)aes_ctx, &out,
AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
break;
default:
break;
}
VERIFY3U(out.cd_offset, ==, out_len);
rv = crypto2pkcs11_error_number(rc);
done:
if (rv == CKR_OK) {
*pulLastPartLen = out.cd_offset;
}
soft_aes_free_ctx(aes_ctx);
session_p->decrypt.context = NULL;
return (rv);
}
CK_RV
soft_aes_sign_verify_init_common(soft_session_t *session_p,
CK_MECHANISM_PTR pMechanism, soft_object_t *key_p, boolean_t sign_op)
{
soft_aes_sign_ctx_t *ctx = NULL;
size_t mac_len = AES_BLOCK_LEN;
CK_BYTE iv_zero[AES_BLOCK_LEN] = { 0 };
CK_MECHANISM encrypt_mech = { 0 };
CK_RV rv;
switch (pMechanism->mechanism) {
case CKM_AES_CMAC_GENERAL:
if (pMechanism->pParameter == NULL) {
return (CKR_MECHANISM_PARAM_INVALID);
}
mac_len = *(CK_MAC_GENERAL_PARAMS *)pMechanism->pParameter;
if (mac_len > AES_BLOCK_LEN) {
return (CKR_MECHANISM_PARAM_INVALID);
}
encrypt_mech.mechanism = CKM_AES_CMAC;
encrypt_mech.pParameter = iv_zero;
encrypt_mech.ulParameterLen = AES_BLOCK_LEN;
break;
case CKM_AES_CMAC:
encrypt_mech.mechanism = CKM_AES_CMAC;
encrypt_mech.pParameter = iv_zero;
encrypt_mech.ulParameterLen = AES_BLOCK_LEN;
break;
case CKM_AES_GMAC:
if (pMechanism->pParameter == NULL ||
pMechanism->ulParameterLen != AES_GMAC_IV_LEN)
return (CKR_MECHANISM_PARAM_INVALID);
encrypt_mech.mechanism = CKM_AES_GMAC;
encrypt_mech.pParameter = pMechanism->pParameter;
encrypt_mech.ulParameterLen = pMechanism->ulParameterLen;
break;
default:
return (CKR_MECHANISM_INVALID);
}
if (key_p->key_type != CKK_AES)
return (CKR_KEY_TYPE_INCONSISTENT);
ctx = calloc(1, sizeof (*ctx));
if (ctx == NULL) {
return (CKR_HOST_MEMORY);
}
rv = soft_aes_check_mech_param(pMechanism, &ctx->aes_ctx);
if (rv != CKR_OK) {
soft_aes_free_ctx(ctx->aes_ctx);
goto done;
}
if ((rv = soft_encrypt_init_internal(session_p, &encrypt_mech,
key_p)) != CKR_OK) {
soft_aes_free_ctx(ctx->aes_ctx);
goto done;
}
ctx->mac_len = mac_len;
(void) pthread_mutex_lock(&session_p->session_mutex);
if (sign_op) {
session_p->sign.context = ctx;
session_p->sign.mech.mechanism = pMechanism->mechanism;
} else {
session_p->verify.context = ctx;
session_p->verify.mech.mechanism = pMechanism->mechanism;
}
(void) pthread_mutex_unlock(&session_p->session_mutex);
done:
if (rv != CKR_OK) {
soft_aes_free_ctx(ctx->aes_ctx);
free(ctx);
}
return (rv);
}
CK_RV
soft_aes_sign_verify_common(soft_session_t *session_p, CK_BYTE_PTR pData,
CK_ULONG ulDataLen, CK_BYTE_PTR pSigned, CK_ULONG_PTR pulSignedLen,
boolean_t sign_op, boolean_t Final)
{
soft_aes_sign_ctx_t *soft_aes_ctx_sign_verify;
CK_RV rv;
CK_BYTE *pEncrypted = NULL;
CK_ULONG ulEncryptedLen = AES_BLOCK_LEN;
CK_BYTE last_block[AES_BLOCK_LEN];
if (sign_op) {
soft_aes_ctx_sign_verify =
(soft_aes_sign_ctx_t *)session_p->sign.context;
if (soft_aes_ctx_sign_verify->mac_len == 0) {
*pulSignedLen = 0;
goto clean_exit;
}
if (pSigned == NULL) {
*pulSignedLen = soft_aes_ctx_sign_verify->mac_len;
return (CKR_OK);
}
if (*pulSignedLen < soft_aes_ctx_sign_verify->mac_len) {
*pulSignedLen = soft_aes_ctx_sign_verify->mac_len;
return (CKR_BUFFER_TOO_SMALL);
}
} else {
soft_aes_ctx_sign_verify =
(soft_aes_sign_ctx_t *)session_p->verify.context;
}
if (Final) {
rv = soft_encrypt_final(session_p, last_block,
&ulEncryptedLen);
} else {
rv = soft_encrypt(session_p, pData, ulDataLen,
last_block, &ulEncryptedLen);
}
if (rv == CKR_OK) {
*pulSignedLen = soft_aes_ctx_sign_verify->mac_len;
(void) memcpy(pSigned, last_block, *pulSignedLen);
}
clean_exit:
(void) pthread_mutex_lock(&session_p->session_mutex);
if (sign_op) {
free(session_p->sign.context);
session_p->sign.context = NULL;
} else {
free(session_p->verify.context);
session_p->verify.context = NULL;
}
session_p->encrypt.flags = 0;
(void) pthread_mutex_unlock(&session_p->session_mutex);
if (pEncrypted) {
free(pEncrypted);
}
return (rv);
}
CK_RV
soft_aes_mac_sign_verify_update(soft_session_t *session_p, CK_BYTE_PTR pPart,
CK_ULONG ulPartLen)
{
CK_BYTE buf[AES_BLOCK_LEN];
CK_ULONG ulEncryptedLen = AES_BLOCK_LEN;
CK_RV rv;
rv = soft_encrypt_update(session_p, pPart, ulPartLen,
buf, &ulEncryptedLen);
explicit_bzero(buf, sizeof (buf));
return (rv);
}
void
soft_aes_free_ctx(aes_ctx_t *ctx)
{
size_t len = 0;
if (ctx == NULL)
return;
if (ctx->ac_flags & ECB_MODE) {
len = sizeof (ecb_ctx_t);
} else if (ctx->ac_flags & (CBC_MODE|CMAC_MODE)) {
len = sizeof (cbc_ctx_t);
} else if (ctx->ac_flags & CTR_MODE) {
len = sizeof (ctr_ctx_t);
} else if (ctx->ac_flags & CCM_MODE) {
ccm_ctx_t *ccm_ctx = &ctx->acu.acu_ccm;
if (ccm_ctx->ccm_pt_buf != NULL) {
freezero(ccm_ctx->ccm_pt_buf,
ccm_ctx->ccm_data_len);
ccm_ctx->ccm_pt_buf = NULL;
}
len = sizeof (ccm_ctx_t);
} else if (ctx->ac_flags & GMAC_MODE) {
len = sizeof (gcm_ctx_t);
} else if (ctx->ac_flags & GCM_MODE) {
gcm_ctx_t *gcm_ctx = &ctx->acu.acu_gcm;
if (gcm_ctx->gcm_pt_buf != NULL) {
freezero(gcm_ctx->gcm_pt_buf,
gcm_ctx->gcm_pt_buf_len);
gcm_ctx->gcm_pt_buf = NULL;
}
len = sizeof (gcm_ctx_t);
}
freezero(ctx->ac_keysched, ctx->ac_keysched_len);
freezero(ctx, len);
}