#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include "internal.h"
bool afs_check_validity(const struct afs_vnode *vnode)
{
const struct afs_volume *volume = vnode->volume;
enum afs_vnode_invalid_trace trace = afs_vnode_valid_trace;
time64_t cb_expires_at = atomic64_read(&vnode->cb_expires_at);
time64_t deadline = ktime_get_real_seconds() + 10;
if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
return true;
if (atomic_read(&volume->cb_v_check) != atomic_read(&volume->cb_v_break))
trace = afs_vnode_invalid_trace_cb_v_break;
else if (cb_expires_at == AFS_NO_CB_PROMISE)
trace = afs_vnode_invalid_trace_no_cb_promise;
else if (cb_expires_at <= deadline)
trace = afs_vnode_invalid_trace_expired;
else if (volume->cb_expires_at <= deadline)
trace = afs_vnode_invalid_trace_vol_expired;
else if (vnode->cb_ro_snapshot != atomic_read(&volume->cb_ro_snapshot))
trace = afs_vnode_invalid_trace_cb_ro_snapshot;
else if (vnode->cb_scrub != atomic_read(&volume->cb_scrub))
trace = afs_vnode_invalid_trace_cb_scrub;
else if (test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags))
trace = afs_vnode_invalid_trace_zap_data;
else
return true;
trace_afs_vnode_invalid(vnode, trace);
return false;
}
static bool __afs_is_server_excluded(struct afs_operation *op, struct afs_volume *volume)
{
const struct afs_server_entry *se;
const struct afs_server_list *slist;
bool is_excluded = true;
int i;
rcu_read_lock();
slist = rcu_dereference(volume->servers);
for (i = 0; i < slist->nr_servers; i++) {
se = &slist->servers[i];
if (op->server == se->server) {
is_excluded = test_bit(AFS_SE_EXCLUDED, &se->flags);
break;
}
}
rcu_read_unlock();
return is_excluded;
}
static int afs_is_server_excluded(struct afs_operation *op, struct afs_volume *volume)
{
int ret;
if (__afs_is_server_excluded(op, volume))
return 1;
set_bit(AFS_VOLUME_NEEDS_UPDATE, &volume->flags);
ret = afs_check_volume_status(op->volume, op);
if (ret < 0)
return ret;
return __afs_is_server_excluded(op, volume);
}
static int afs_update_volume_creation_time(struct afs_operation *op, struct afs_volume *volume)
{
unsigned int snap;
time64_t cur = volume->creation_time;
time64_t old = op->pre_volsync.creation;
time64_t new = op->volsync.creation;
int ret;
_enter("%llx,%llx,%llx->%llx", volume->vid, cur, old, new);
if (cur == TIME64_MIN) {
volume->creation_time = new;
return 0;
}
if (new == cur)
return 0;
if (cur != old)
return 0;
if (volume->type == AFSVL_RWVOL)
goto regressed;
if (volume->type == AFSVL_BACKVOL) {
if (new < old)
goto regressed;
goto advance;
}
ret = afs_is_server_excluded(op, volume);
if (ret < 0)
return ret;
if (ret > 0) {
snap = atomic_read(&volume->cb_ro_snapshot);
trace_afs_cb_v_break(volume->vid, snap, afs_cb_break_volume_excluded);
return ret;
}
advance:
snap = atomic_inc_return(&volume->cb_ro_snapshot);
trace_afs_cb_v_break(volume->vid, snap, afs_cb_break_for_vos_release);
volume->creation_time = new;
return 0;
regressed:
atomic_inc(&volume->cb_scrub);
trace_afs_cb_v_break(volume->vid, 0, afs_cb_break_for_creation_regress);
volume->creation_time = new;
return 0;
}
static void afs_update_volume_update_time(struct afs_operation *op, struct afs_volume *volume)
{
enum afs_cb_break_reason reason = afs_cb_break_no_break;
time64_t cur = volume->update_time;
time64_t old = op->pre_volsync.update;
time64_t new = op->volsync.update;
_enter("%llx,%llx,%llx->%llx", volume->vid, cur, old, new);
if (cur == TIME64_MIN) {
volume->update_time = new;
return;
}
if (new == cur)
return;
if (new < old)
reason = afs_cb_break_for_update_regress;
if (cur == old) {
if (reason == afs_cb_break_for_update_regress) {
atomic_inc(&volume->cb_scrub);
trace_afs_cb_v_break(volume->vid, 0, reason);
}
volume->update_time = new;
}
}
static int afs_update_volume_times(struct afs_operation *op, struct afs_volume *volume)
{
int ret = 0;
if (likely(op->volsync.creation == volume->creation_time &&
op->volsync.update == volume->update_time))
return 0;
mutex_lock(&volume->volsync_lock);
if (op->volsync.creation != volume->creation_time) {
ret = afs_update_volume_creation_time(op, volume);
if (ret < 0)
goto out;
}
if (op->volsync.update != volume->update_time)
afs_update_volume_update_time(op, volume);
out:
mutex_unlock(&volume->volsync_lock);
return ret;
}
int afs_update_volume_state(struct afs_operation *op)
{
struct afs_server_list *slist = op->server_list;
struct afs_server_entry *se = &slist->servers[op->server_index];
struct afs_callback *cb = &op->file[0].scb.callback;
struct afs_volume *volume = op->volume;
unsigned int cb_v_break = atomic_read(&volume->cb_v_break);
unsigned int cb_v_check = atomic_read(&volume->cb_v_check);
int ret;
_enter("%llx", op->volume->vid);
if (op->volsync.creation != TIME64_MIN || op->volsync.update != TIME64_MIN) {
ret = afs_update_volume_times(op, volume);
if (ret != 0) {
_leave(" = %d", ret);
return ret;
}
}
if (op->cb_v_break == cb_v_break &&
(op->file[0].scb.have_cb || op->file[1].scb.have_cb)) {
time64_t expires_at = cb->expires_at;
if (!op->file[0].scb.have_cb)
expires_at = op->file[1].scb.callback.expires_at;
se->cb_expires_at = expires_at;
volume->cb_expires_at = expires_at;
}
if (cb_v_check < op->cb_v_break)
atomic_cmpxchg(&volume->cb_v_check, cb_v_check, op->cb_v_break);
return 0;
}
static void afs_zap_data(struct afs_vnode *vnode)
{
_enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
afs_invalidate_cache(vnode, 0);
if (S_ISREG(vnode->netfs.inode.i_mode))
filemap_invalidate_inode(&vnode->netfs.inode, true, 0, LLONG_MAX);
else
filemap_invalidate_inode(&vnode->netfs.inode, false, 0, LLONG_MAX);
}
int afs_validate(struct afs_vnode *vnode, struct key *key)
{
struct afs_volume *volume = vnode->volume;
unsigned int cb_ro_snapshot, cb_scrub;
time64_t deadline = ktime_get_real_seconds() + 10;
bool zap = false, locked_vol = false;
int ret;
_enter("{v={%llx:%llu} fl=%lx},%x",
vnode->fid.vid, vnode->fid.vnode, vnode->flags,
key_serial(key));
if (afs_check_validity(vnode))
return test_bit(AFS_VNODE_DELETED, &vnode->flags) ? -ESTALE : 0;
ret = down_write_killable(&vnode->validate_lock);
if (ret < 0)
goto error;
if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
ret = -ESTALE;
goto error_unlock;
}
if (volume->cb_expires_at <= deadline ||
atomic_read(&volume->cb_v_check) != atomic_read(&volume->cb_v_break)) {
ret = mutex_lock_interruptible(&volume->cb_check_lock);
if (ret < 0)
goto error_unlock;
locked_vol = true;
}
cb_ro_snapshot = atomic_read(&volume->cb_ro_snapshot);
cb_scrub = atomic_read(&volume->cb_scrub);
if (vnode->cb_ro_snapshot != cb_ro_snapshot ||
vnode->cb_scrub != cb_scrub)
unmap_mapping_pages(vnode->netfs.inode.i_mapping, 0, 0, false);
if (vnode->cb_ro_snapshot != cb_ro_snapshot ||
vnode->cb_scrub != cb_scrub ||
volume->cb_expires_at <= deadline ||
atomic_read(&volume->cb_v_check) != atomic_read(&volume->cb_v_break) ||
atomic64_read(&vnode->cb_expires_at) <= deadline
) {
ret = afs_fetch_status(vnode, key, false, NULL);
if (ret < 0) {
if (ret == -ENOENT) {
set_bit(AFS_VNODE_DELETED, &vnode->flags);
ret = -ESTALE;
}
goto error_unlock;
}
_debug("new promise [fl=%lx]", vnode->flags);
}
if (locked_vol) {
mutex_unlock(&volume->cb_check_lock);
locked_vol = false;
}
cb_ro_snapshot = atomic_read(&volume->cb_ro_snapshot);
cb_scrub = atomic_read(&volume->cb_scrub);
_debug("vnode inval %x==%x %x==%x",
vnode->cb_ro_snapshot, cb_ro_snapshot,
vnode->cb_scrub, cb_scrub);
if (vnode->cb_scrub != cb_scrub)
zap = true;
vnode->cb_ro_snapshot = cb_ro_snapshot;
vnode->cb_scrub = cb_scrub;
zap |= test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags);
if (zap)
afs_zap_data(vnode);
up_write(&vnode->validate_lock);
_leave(" = 0");
return 0;
error_unlock:
if (locked_vol)
mutex_unlock(&volume->cb_check_lock);
up_write(&vnode->validate_lock);
error:
_leave(" = %d", ret);
return ret;
}