#include <crypto/skcipher.h>
#include <linux/export.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/ratelimit.h>
#include <linux/scatterlist.h>
#include "fscrypt_private.h"
static unsigned int num_prealloc_crypto_pages = 32;
module_param(num_prealloc_crypto_pages, uint, 0444);
MODULE_PARM_DESC(num_prealloc_crypto_pages,
"Number of crypto pages to preallocate");
static mempool_t *fscrypt_bounce_page_pool = NULL;
static struct workqueue_struct *fscrypt_read_workqueue;
static DEFINE_MUTEX(fscrypt_init_mutex);
struct kmem_cache *fscrypt_inode_info_cachep;
void fscrypt_enqueue_decrypt_work(struct work_struct *work)
{
queue_work(fscrypt_read_workqueue, work);
}
EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work);
struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags)
{
if (WARN_ON_ONCE(!fscrypt_bounce_page_pool)) {
return NULL;
}
return mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
}
void fscrypt_free_bounce_page(struct page *bounce_page)
{
if (!bounce_page)
return;
set_page_private(bounce_page, (unsigned long)NULL);
ClearPagePrivate(bounce_page);
mempool_free(bounce_page, fscrypt_bounce_page_pool);
}
EXPORT_SYMBOL(fscrypt_free_bounce_page);
void fscrypt_generate_iv(union fscrypt_iv *iv, u64 index,
const struct fscrypt_inode_info *ci)
{
u8 flags = fscrypt_policy_flags(&ci->ci_policy);
memset(iv, 0, ci->ci_mode->ivsize);
if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) {
WARN_ON_ONCE(index > U32_MAX);
WARN_ON_ONCE(ci->ci_inode->i_ino > U32_MAX);
index |= (u64)ci->ci_inode->i_ino << 32;
} else if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) {
WARN_ON_ONCE(index > U32_MAX);
index = (u32)(ci->ci_hashed_ino + index);
} else if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) {
memcpy(iv->nonce, ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE);
}
iv->index = cpu_to_le64(index);
}
int fscrypt_crypt_data_unit(const struct fscrypt_inode_info *ci,
fscrypt_direction_t rw, u64 index,
struct page *src_page, struct page *dest_page,
unsigned int len, unsigned int offs)
{
struct crypto_sync_skcipher *tfm = ci->ci_enc_key.tfm;
SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
union fscrypt_iv iv;
struct scatterlist dst, src;
int err;
if (WARN_ON_ONCE(len <= 0))
return -EINVAL;
if (WARN_ON_ONCE(len % FSCRYPT_CONTENTS_ALIGNMENT != 0))
return -EINVAL;
fscrypt_generate_iv(&iv, index, ci);
skcipher_request_set_callback(
req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
NULL, NULL);
sg_init_table(&dst, 1);
sg_set_page(&dst, dest_page, len, offs);
sg_init_table(&src, 1);
sg_set_page(&src, src_page, len, offs);
skcipher_request_set_crypt(req, &src, &dst, len, &iv);
if (rw == FS_DECRYPT)
err = crypto_skcipher_decrypt(req);
else
err = crypto_skcipher_encrypt(req);
if (err)
fscrypt_err(ci->ci_inode,
"%scryption failed for data unit %llu: %d",
(rw == FS_DECRYPT ? "De" : "En"), index, err);
return err;
}
struct page *fscrypt_encrypt_pagecache_blocks(struct folio *folio,
size_t len, size_t offs, gfp_t gfp_flags)
{
const struct inode *inode = folio->mapping->host;
const struct fscrypt_inode_info *ci = fscrypt_get_inode_info_raw(inode);
const unsigned int du_bits = ci->ci_data_unit_bits;
const unsigned int du_size = 1U << du_bits;
struct page *ciphertext_page;
u64 index = ((u64)folio->index << (PAGE_SHIFT - du_bits)) +
(offs >> du_bits);
unsigned int i;
int err;
VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
if (WARN_ON_ONCE(!folio_test_locked(folio)))
return ERR_PTR(-EINVAL);
if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, du_size)))
return ERR_PTR(-EINVAL);
ciphertext_page = fscrypt_alloc_bounce_page(gfp_flags);
if (!ciphertext_page)
return ERR_PTR(-ENOMEM);
for (i = offs; i < offs + len; i += du_size, index++) {
err = fscrypt_crypt_data_unit(ci, FS_ENCRYPT, index,
&folio->page, ciphertext_page,
du_size, i);
if (err) {
fscrypt_free_bounce_page(ciphertext_page);
return ERR_PTR(err);
}
}
SetPagePrivate(ciphertext_page);
set_page_private(ciphertext_page, (unsigned long)folio);
return ciphertext_page;
}
EXPORT_SYMBOL(fscrypt_encrypt_pagecache_blocks);
int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page,
unsigned int len, unsigned int offs,
u64 lblk_num)
{
if (WARN_ON_ONCE(inode->i_sb->s_cop->supports_subblock_data_units))
return -EOPNOTSUPP;
return fscrypt_crypt_data_unit(fscrypt_get_inode_info_raw(inode),
FS_ENCRYPT, lblk_num, page, page, len,
offs);
}
EXPORT_SYMBOL(fscrypt_encrypt_block_inplace);
int fscrypt_decrypt_pagecache_blocks(struct folio *folio, size_t len,
size_t offs)
{
const struct inode *inode = folio->mapping->host;
const struct fscrypt_inode_info *ci = fscrypt_get_inode_info_raw(inode);
const unsigned int du_bits = ci->ci_data_unit_bits;
const unsigned int du_size = 1U << du_bits;
u64 index = ((u64)folio->index << (PAGE_SHIFT - du_bits)) +
(offs >> du_bits);
size_t i;
int err;
if (WARN_ON_ONCE(!folio_test_locked(folio)))
return -EINVAL;
if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, du_size)))
return -EINVAL;
for (i = offs; i < offs + len; i += du_size, index++) {
struct page *page = folio_page(folio, i >> PAGE_SHIFT);
err = fscrypt_crypt_data_unit(ci, FS_DECRYPT, index, page,
page, du_size, i & ~PAGE_MASK);
if (err)
return err;
}
return 0;
}
EXPORT_SYMBOL(fscrypt_decrypt_pagecache_blocks);
int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page,
unsigned int len, unsigned int offs,
u64 lblk_num)
{
if (WARN_ON_ONCE(inode->i_sb->s_cop->supports_subblock_data_units))
return -EOPNOTSUPP;
return fscrypt_crypt_data_unit(fscrypt_get_inode_info_raw(inode),
FS_DECRYPT, lblk_num, page, page, len,
offs);
}
EXPORT_SYMBOL(fscrypt_decrypt_block_inplace);
int fscrypt_initialize(struct super_block *sb)
{
int err = 0;
mempool_t *pool;
if (likely(smp_load_acquire(&fscrypt_bounce_page_pool)))
return 0;
if (!sb->s_cop->needs_bounce_pages)
return 0;
mutex_lock(&fscrypt_init_mutex);
if (fscrypt_bounce_page_pool)
goto out_unlock;
err = -ENOMEM;
pool = mempool_create_page_pool(num_prealloc_crypto_pages, 0);
if (!pool)
goto out_unlock;
smp_store_release(&fscrypt_bounce_page_pool, pool);
err = 0;
out_unlock:
mutex_unlock(&fscrypt_init_mutex);
return err;
}
void fscrypt_msg(const struct inode *inode, const char *level,
const char *fmt, ...)
{
static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
struct va_format vaf;
va_list args;
if (!__ratelimit(&rs))
return;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
if (inode && inode->i_ino)
printk("%sfscrypt (%s, inode %lu): %pV\n",
level, inode->i_sb->s_id, inode->i_ino, &vaf);
else if (inode)
printk("%sfscrypt (%s): %pV\n", level, inode->i_sb->s_id, &vaf);
else
printk("%sfscrypt: %pV\n", level, &vaf);
va_end(args);
}
static int __init fscrypt_init(void)
{
int err = -ENOMEM;
fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
WQ_UNBOUND | WQ_HIGHPRI,
num_online_cpus());
if (!fscrypt_read_workqueue)
goto fail;
fscrypt_inode_info_cachep = KMEM_CACHE(fscrypt_inode_info,
SLAB_RECLAIM_ACCOUNT);
if (!fscrypt_inode_info_cachep)
goto fail_free_queue;
err = fscrypt_init_keyring();
if (err)
goto fail_free_inode_info;
return 0;
fail_free_inode_info:
kmem_cache_destroy(fscrypt_inode_info_cachep);
fail_free_queue:
destroy_workqueue(fscrypt_read_workqueue);
fail:
return err;
}
late_initcall(fscrypt_init)