__next
struct task_struct *__next = (next); \
__switch_to_fpu(__prev, __next); \
struct task_struct *__next = (next); \
__switch_to_fpu(__prev, __next); \
__switch_to_vector(__prev, __next); \
if (switch_to_should_flush_icache(__next)) \
__switch_to_envcfg(__next); \
((last) = __switch_to(__prev, __next)); \
#define __switch_to_fpu(__prev, __next) do { } while (0)
#define __switch_to_vector(__prev, __next) do {} while (0)
struct drm_gpusvm_range *range, *__next;
drm_gpusvm_for_each_range_safe(range, __next, notifier, 0,
struct drm_gpusvm_range *range, *__next;
drm_gpusvm_for_each_range_safe(range, __next, notifier, start, end) {
goto __next;
__next:
typeof((array)[0]) *__next = prev ? ((typeof(__next)) prev) + 1 : (array); \
if (__next - (array) < ARRAY_SIZE((array))) { \
void (*__get_desc)(typeof(__next), char *) = get_desc; \
__get_desc(__next, desc); \
return __next; \
typeof((array)[0]) *__next = prev ? ((typeof(__next)) prev) + 1 : (array); \
if (__next - (array) < ARRAY_SIZE((array))) { \
strscpy(desc, __next->desc_member, KUNIT_PARAM_DESC_SIZE); \
return __next; \
struct list_head *__next = READ_ONCE(__ptr->next); \
likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \
struct list_head *__next = READ_ONCE(__ptr->next); \
likely(__next != __head) ? list_entry_rcu(__next, type, \
#define vla_group(groupname) size_t groupname##__next = 0
#define vla_group_size(groupname) groupname##__next
if (groupname##__next != SIZE_MAX) { \
offset = (groupname##__next + align_mask) & \
&groupname##__next)) { \
groupname##__next = SIZE_MAX; \
if (groupname##__next != SIZE_MAX) { \
offset = (groupname##__next + align_mask) & \
&groupname##__next)) { \
groupname##__next = SIZE_MAX; \
return __next(m, v, pos, TRACE_PIDS);
return __next(m, v, pos, TRACE_NO_PIDS);
return __next(m, pos);
return __next(m, pos);
goto __next;
__next: