arch/arm/include/asm/procinfo.h
38
struct cpu_user_fns *user;
arch/arm/include/asm/tls.h
14
mrc p15, 0, \tmp2, c13, c0, 2 @ get the user r/w register
arch/arm/include/asm/tls.h
15
@ TLS register update is deferred until return to user space
arch/arm/include/asm/tls.h
16
mcr p15, 0, \tpuser, c13, c0, 2 @ set the user r/w register
arch/arm/kernel/ptrace.c
267
else if (off >= sizeof(struct user))
arch/arm/kernel/ptrace.c
280
if (off & 3 || off >= sizeof(struct user))
arch/arm/kernel/setup.c
713
cpu_user = *list->user;
arch/arm/kernel/sys_oabi-compat.c
202
struct oabi_flock64 user;
arch/arm/kernel/sys_oabi-compat.c
204
if (copy_from_user(&user, (struct oabi_flock64 __user *)arg,
arch/arm/kernel/sys_oabi-compat.c
205
sizeof(user)))
arch/arm/kernel/sys_oabi-compat.c
208
kernel->l_type = user.l_type;
arch/arm/kernel/sys_oabi-compat.c
209
kernel->l_whence = user.l_whence;
arch/arm/kernel/sys_oabi-compat.c
210
kernel->l_start = user.l_start;
arch/arm/kernel/sys_oabi-compat.c
211
kernel->l_len = user.l_len;
arch/arm/kernel/sys_oabi-compat.c
212
kernel->l_pid = user.l_pid;
arch/arm/kernel/sys_oabi-compat.c
219
struct oabi_flock64 user;
arch/arm/kernel/sys_oabi-compat.c
221
user.l_type = kernel->l_type;
arch/arm/kernel/sys_oabi-compat.c
222
user.l_whence = kernel->l_whence;
arch/arm/kernel/sys_oabi-compat.c
223
user.l_start = kernel->l_start;
arch/arm/kernel/sys_oabi-compat.c
224
user.l_len = kernel->l_len;
arch/arm/kernel/sys_oabi-compat.c
225
user.l_pid = kernel->l_pid;
arch/arm/kernel/sys_oabi-compat.c
228
&user, sizeof(user)))
arch/arm/kernel/sys_oabi-compat.c
286
struct oabi_epoll_event user;
arch/arm/kernel/sys_oabi-compat.c
290
copy_from_user(&user, event, sizeof(user)))
arch/arm/kernel/sys_oabi-compat.c
293
kernel.events = user.events;
arch/arm/kernel/sys_oabi-compat.c
294
kernel.data = user.data;
arch/arm/mach-omap2/clockdomain.c
544
int clkdm_for_each(int (*fn)(struct clockdomain *clkdm, void *user),
arch/arm/mach-omap2/clockdomain.c
545
void *user)
arch/arm/mach-omap2/clockdomain.c
554
ret = (*fn)(clkdm, user);
arch/arm/mach-omap2/clockdomain.h
190
int clkdm_for_each(int (*fn)(struct clockdomain *clkdm, void *user),
arch/arm/mach-omap2/clockdomain.h
191
void *user);
arch/arm/mach-omap2/omap_hwmod.c
1129
if (os->user & OCP_USER_MPU) {
arch/arm/mach-omap2/omap_hwmod.c
2626
if (!oi || !oi->master || !oi->slave || !oi->user)
arch/arm/mach-omap2/omap_hwmod.c
3435
oi->user = OCP_USER_MPU | OCP_USER_SDMA;
arch/arm/mach-omap2/omap_hwmod.c
3829
void *user),
arch/arm/mach-omap2/omap_hwmod.c
3830
void *user)
arch/arm/mach-omap2/omap_hwmod.c
3845
ret = (*fn)(temp_oh, user);
arch/arm/mach-omap2/omap_hwmod.h
250
u8 user;
arch/arm/mach-omap2/omap_hwmod.h
644
void *user),
arch/arm/mach-omap2/omap_hwmod.h
645
void *user);
arch/arm/mach-omap2/omap_hwmod.h
655
int (*fn)(struct omap_hwmod *oh, void *user),
arch/arm/mach-omap2/omap_hwmod.h
656
void *user)
arch/arm/mach-omap2/omap_hwmod_2420_data.c
239
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2420_data.c
247
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2420_data.c
255
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2420_data.c
263
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2420_data.c
271
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2420_data.c
279
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2420_data.c
287
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2420_data.c
295
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2420_data.c
303
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2420_data.c
310
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2420_data.c
318
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2420_data.c
326
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2420_data.c
334
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2420_data.c
342
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2420_data.c
350
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2430_data.c
380
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_2430_data.c
388
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2430_data.c
396
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2430_data.c
404
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_2430_data.c
412
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2430_data.c
420
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2430_data.c
428
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2430_data.c
436
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2430_data.c
444
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2430_data.c
452
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2430_data.c
460
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2430_data.c
468
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2430_data.c
476
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2430_data.c
484
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2430_data.c
491
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2430_data.c
499
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2430_data.c
507
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2430_data.c
515
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2430_data.c
523
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2430_data.c
531
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2430_data.c
539
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2430_data.c
547
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
102
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
110
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
118
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
126
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
134
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
142
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
150
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
158
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
166
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
174
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
188
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
202
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
216
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
231
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
239
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
247
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
255
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
27
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
34
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
47
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
54
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
62
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
70
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
78
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
86
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
94
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1459
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1466
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1474
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1482
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1489
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1501
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1509
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1516
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1524
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1532
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1541
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1549
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1559
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1569
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1578
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1587
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1596
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1605
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1620
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1635
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1651
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1659
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1666
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1675
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1682
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1689
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1697
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1705
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1714
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1723
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1732
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1741
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1750
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1759
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1767
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1775
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1784
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1799
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1813
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1828
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1843
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1858
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1874
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1882
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1890
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1898
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1929
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1951
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1976
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1984
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
1992
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
2000
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
2009
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
2018
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
2027
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
2036
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
2045
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
2054
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
2061
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
2069
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
2077
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
2085
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
2094
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
2101
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
2109
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
2117
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
2125
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
2149
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
2162
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
2190
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
2203
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
2210
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
2248
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
2290
.user = OCP_USER_MPU | OCP_USER_SDMA,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
1012
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
1088
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
1095
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
1102
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
1109
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
1144
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
1179
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
150
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
157
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
178
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
185
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
205
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
212
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
248
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
287
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
308
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
329
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
366
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
403
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
423
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
453
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
496
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
522
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
548
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
574
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
610
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
646
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
666
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
701
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
721
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
741
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
761
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
781
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
812
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
841
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
862
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
895
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
941
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
965
.user = OCP_USER_MPU,
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
989
.user = OCP_USER_MPU,
arch/arm/mach-omap2/pm-debug.c
111
static int pwrdm_dbg_show_timer(struct powerdomain *pwrdm, void *user)
arch/arm/mach-omap2/pm-debug.c
113
struct seq_file *s = (struct seq_file *)user;
arch/arm/mach-omap2/pm-debug.c
67
static int clkdm_dbg_show_counter(struct clockdomain *clkdm, void *user)
arch/arm/mach-omap2/pm-debug.c
69
struct seq_file *s = (struct seq_file *)user;
arch/arm/mach-omap2/pm-debug.c
82
static int pwrdm_dbg_show_counter(struct powerdomain *pwrdm, void *user)
arch/arm/mach-omap2/pm-debug.c
84
struct seq_file *s = (struct seq_file *)user;
arch/arm/mach-omap2/powerdomain.c
437
int pwrdm_for_each(int (*fn)(struct powerdomain *pwrdm, void *user),
arch/arm/mach-omap2/powerdomain.c
438
void *user)
arch/arm/mach-omap2/powerdomain.c
447
ret = (*fn)(temp_pwrdm, user);
arch/arm/mach-omap2/powerdomain.h
209
int pwrdm_for_each(int (*fn)(struct powerdomain *pwrdm, void *user),
arch/arm/mach-omap2/powerdomain.h
210
void *user);
arch/arm/mach-omap2/sr_device.c
152
static int __init sr_dev_init(struct omap_hwmod *oh, void *user)
arch/arm/mach-omap2/sr_device.c
166
static int __init sr_dev_init(struct omap_hwmod *oh, void *user)
arch/arm/mm/alignment.c
348
goto user;
arch/arm/mm/alignment.c
364
user:
arch/arm/mm/alignment.c
410
goto user;
arch/arm/mm/alignment.c
425
user:
arch/arm/mm/alignment.c
848
goto user;
arch/arm/mm/alignment.c
970
user:
arch/arm64/kernel/signal.c
1009
err = parse_user_sigframe(&user, sf);
arch/arm64/kernel/signal.c
1012
if (!user.fpsimd)
arch/arm64/kernel/signal.c
1015
if (user.sve)
arch/arm64/kernel/signal.c
1016
err = restore_sve_fpsimd_context(&user);
arch/arm64/kernel/signal.c
1018
err = restore_fpsimd_context(&user);
arch/arm64/kernel/signal.c
1021
if (err == 0 && system_supports_gcs() && user.gcs)
arch/arm64/kernel/signal.c
1022
err = restore_gcs_context(&user);
arch/arm64/kernel/signal.c
1024
if (err == 0 && system_supports_tpidr2() && user.tpidr2)
arch/arm64/kernel/signal.c
1025
err = restore_tpidr2_context(&user);
arch/arm64/kernel/signal.c
1027
if (err == 0 && system_supports_fpmr() && user.fpmr)
arch/arm64/kernel/signal.c
1028
err = restore_fpmr_context(&user);
arch/arm64/kernel/signal.c
1030
if (err == 0 && system_supports_sme() && user.za)
arch/arm64/kernel/signal.c
1031
err = restore_za_context(&user);
arch/arm64/kernel/signal.c
1033
if (err == 0 && system_supports_sme2() && user.zt)
arch/arm64/kernel/signal.c
1034
err = restore_zt_context(&user);
arch/arm64/kernel/signal.c
1036
if (err == 0 && system_supports_poe() && user.poe)
arch/arm64/kernel/signal.c
1037
err = restore_poe_context(&user, ua_state);
arch/arm64/kernel/signal.c
1140
static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
arch/arm64/kernel/signal.c
1146
err = sigframe_alloc(user, &user->fpsimd_offset,
arch/arm64/kernel/signal.c
1154
err = sigframe_alloc(user, &user->esr_offset,
arch/arm64/kernel/signal.c
1162
err = sigframe_alloc(user, &user->gcs_offset,
arch/arm64/kernel/signal.c
1182
err = sigframe_alloc(user, &user->sve_offset,
arch/arm64/kernel/signal.c
1189
err = sigframe_alloc(user, &user->tpidr2_offset,
arch/arm64/kernel/signal.c
1207
err = sigframe_alloc(user, &user->za_offset,
arch/arm64/kernel/signal.c
1215
err = sigframe_alloc(user, &user->zt_offset,
arch/arm64/kernel/signal.c
1223
err = sigframe_alloc(user, &user->fpmr_offset,
arch/arm64/kernel/signal.c
1230
err = sigframe_alloc(user, &user->poe_offset,
arch/arm64/kernel/signal.c
1236
return sigframe_alloc_end(user);
arch/arm64/kernel/signal.c
1239
static int setup_sigframe(struct rt_sigframe_user_layout *user,
arch/arm64/kernel/signal.c
1244
struct rt_sigframe __user *sf = user->sigframe;
arch/arm64/kernel/signal.c
1247
__put_user_error(regs->regs[29], &user->next_frame->fp, err);
arch/arm64/kernel/signal.c
1248
__put_user_error(regs->regs[30], &user->next_frame->lr, err);
arch/arm64/kernel/signal.c
1263
apply_user_offset(user, user->fpsimd_offset);
arch/arm64/kernel/signal.c
1268
if (err == 0 && user->esr_offset) {
arch/arm64/kernel/signal.c
1270
apply_user_offset(user, user->esr_offset);
arch/arm64/kernel/signal.c
1277
if (system_supports_gcs() && err == 0 && user->gcs_offset) {
arch/arm64/kernel/signal.c
1279
apply_user_offset(user, user->gcs_offset);
arch/arm64/kernel/signal.c
1285
err == 0 && user->sve_offset) {
arch/arm64/kernel/signal.c
1287
apply_user_offset(user, user->sve_offset);
arch/arm64/kernel/signal.c
129
static void init_user_layout(struct rt_sigframe_user_layout *user)
arch/arm64/kernel/signal.c
1294
apply_user_offset(user, user->tpidr2_offset);
arch/arm64/kernel/signal.c
1301
apply_user_offset(user, user->fpmr_offset);
arch/arm64/kernel/signal.c
1307
apply_user_offset(user, user->poe_offset);
arch/arm64/kernel/signal.c
1313
if (system_supports_sme() && err == 0 && user->za_offset) {
arch/arm64/kernel/signal.c
1315
apply_user_offset(user, user->za_offset);
arch/arm64/kernel/signal.c
132
sizeof(user->sigframe->uc.uc_mcontext.__reserved);
arch/arm64/kernel/signal.c
1320
if (system_supports_sme2() && err == 0 && user->zt_offset) {
arch/arm64/kernel/signal.c
1322
apply_user_offset(user, user->zt_offset);
arch/arm64/kernel/signal.c
1326
if (err == 0 && user->extra_offset) {
arch/arm64/kernel/signal.c
1327
char __user *sfp = (char __user *)user->sigframe;
arch/arm64/kernel/signal.c
1329
apply_user_offset(user, user->extra_offset);
arch/arm64/kernel/signal.c
134
memset(user, 0, sizeof(*user));
arch/arm64/kernel/signal.c
1348
extra_size = sfp + round_up(user->size, 16) - userp;
arch/arm64/kernel/signal.c
135
user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved);
arch/arm64/kernel/signal.c
1363
apply_user_offset(user, user->end_offset);
arch/arm64/kernel/signal.c
137
user->limit = user->size + reserved_size;
arch/arm64/kernel/signal.c
1372
static int get_sigframe(struct rt_sigframe_user_layout *user,
arch/arm64/kernel/signal.c
1378
init_user_layout(user);
arch/arm64/kernel/signal.c
1379
err = setup_sigframe_layout(user, false);
arch/arm64/kernel/signal.c
1386
user->next_frame = (struct frame_record __user *)sp;
arch/arm64/kernel/signal.c
1388
sp = round_down(sp, 16) - sigframe_size(user);
arch/arm64/kernel/signal.c
1389
user->sigframe = (struct rt_sigframe __user *)sp;
arch/arm64/kernel/signal.c
139
user->limit -= TERMINATOR_SIZE;
arch/arm64/kernel/signal.c
1394
if (!access_ok(user->sigframe, sp_top - sp))
arch/arm64/kernel/signal.c
140
user->limit -= EXTRA_CONTEXT_SIZE;
arch/arm64/kernel/signal.c
144
static size_t sigframe_size(struct rt_sigframe_user_layout const *user)
arch/arm64/kernel/signal.c
1444
struct rt_sigframe_user_layout *user, int usig)
arch/arm64/kernel/signal.c
146
return round_up(max(user->size, sizeof(struct rt_sigframe)), 16);
arch/arm64/kernel/signal.c
1468
regs->regs[1] = (unsigned long)&user->sigframe->info;
arch/arm64/kernel/signal.c
1469
regs->regs[2] = (unsigned long)&user->sigframe->uc;
arch/arm64/kernel/signal.c
1471
regs->sp = (unsigned long)user->sigframe;
arch/arm64/kernel/signal.c
1472
regs->regs[29] = (unsigned long)&user->next_frame->fp;
arch/arm64/kernel/signal.c
1508
struct rt_sigframe_user_layout user;
arch/arm64/kernel/signal.c
1515
if (get_sigframe(&user, ksig, regs))
arch/arm64/kernel/signal.c
1519
frame = user.sigframe;
arch/arm64/kernel/signal.c
1525
err |= setup_sigframe(&user, regs, set, &ua_state);
arch/arm64/kernel/signal.c
1530
err = setup_return(regs, ksig, &user, usig);
arch/arm64/kernel/signal.c
157
static int __sigframe_alloc(struct rt_sigframe_user_layout *user,
arch/arm64/kernel/signal.c
162
if (padded_size > user->limit - user->size &&
arch/arm64/kernel/signal.c
163
!user->extra_offset &&
arch/arm64/kernel/signal.c
167
user->limit += EXTRA_CONTEXT_SIZE;
arch/arm64/kernel/signal.c
1674
struct rt_sigframe_user_layout user;
arch/arm64/kernel/signal.c
1676
init_user_layout(&user);
arch/arm64/kernel/signal.c
168
ret = __sigframe_alloc(user, &user->extra_offset,
arch/arm64/kernel/signal.c
1682
if (WARN_ON(setup_sigframe_layout(&user, true)))
arch/arm64/kernel/signal.c
1685
signal_minsigstksz = sigframe_size(&user) +
arch/arm64/kernel/signal.c
171
user->limit -= EXTRA_CONTEXT_SIZE;
arch/arm64/kernel/signal.c
176
user->size += TERMINATOR_SIZE;
arch/arm64/kernel/signal.c
182
user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE;
arch/arm64/kernel/signal.c
186
if (padded_size > user->limit - user->size)
arch/arm64/kernel/signal.c
189
*offset = user->size;
arch/arm64/kernel/signal.c
190
user->size += padded_size;
arch/arm64/kernel/signal.c
200
static int sigframe_alloc(struct rt_sigframe_user_layout *user,
arch/arm64/kernel/signal.c
203
return __sigframe_alloc(user, offset, size, true);
arch/arm64/kernel/signal.c
207
static int sigframe_alloc_end(struct rt_sigframe_user_layout *user)
arch/arm64/kernel/signal.c
212
user->limit += TERMINATOR_SIZE;
arch/arm64/kernel/signal.c
214
ret = sigframe_alloc(user, &user->end_offset,
arch/arm64/kernel/signal.c
220
user->limit = user->size;
arch/arm64/kernel/signal.c
225
struct rt_sigframe_user_layout const *user, unsigned long offset)
arch/arm64/kernel/signal.c
227
char __user *base = (char __user *)user->sigframe;
arch/arm64/kernel/signal.c
272
struct user_ctxs *user)
arch/arm64/kernel/signal.c
277
if (user->fpsimd_size != sizeof(struct fpsimd_context))
arch/arm64/kernel/signal.c
281
err = __copy_from_user(fpsimd->vregs, &(user->fpsimd->vregs),
arch/arm64/kernel/signal.c
283
__get_user_error(fpsimd->fpsr, &(user->fpsimd->fpsr), err);
arch/arm64/kernel/signal.c
284
__get_user_error(fpsimd->fpcr, &(user->fpsimd->fpcr), err);
arch/arm64/kernel/signal.c
289
static int restore_fpsimd_context(struct user_ctxs *user)
arch/arm64/kernel/signal.c
294
err = read_fpsimd_context(&fpsimd, user);
arch/arm64/kernel/signal.c
318
static int restore_fpmr_context(struct user_ctxs *user)
arch/arm64/kernel/signal.c
323
if (user->fpmr_size != sizeof(*user->fpmr))
arch/arm64/kernel/signal.c
326
__get_user_error(fpmr, &user->fpmr->fpmr, err);
arch/arm64/kernel/signal.c
345
static int restore_poe_context(struct user_ctxs *user,
arch/arm64/kernel/signal.c
351
if (user->poe_size != sizeof(*user->poe))
arch/arm64/kernel/signal.c
354
__get_user_error(por_el0, &(user->poe->por_el0), err);
arch/arm64/kernel/signal.c
398
static int restore_sve_fpsimd_context(struct user_ctxs *user)
arch/arm64/kernel/signal.c
406
if (user->sve_size < sizeof(*user->sve))
arch/arm64/kernel/signal.c
409
__get_user_error(user_vl, &(user->sve->vl), err);
arch/arm64/kernel/signal.c
410
__get_user_error(flags, &(user->sve->flags), err);
arch/arm64/kernel/signal.c
444
if (!sm && user->sve_size == sizeof(*user->sve))
arch/arm64/kernel/signal.c
445
return restore_fpsimd_context(user);
arch/arm64/kernel/signal.c
449
if (user->sve_size < SVE_SIG_CONTEXT_SIZE(vq))
arch/arm64/kernel/signal.c
475
(char __user const *)user->sve +
arch/arm64/kernel/signal.c
481
err = read_fpsimd_context(&fpsimd, user);
arch/arm64/kernel/signal.c
493
static int restore_sve_fpsimd_context(struct user_ctxs *user)
arch/arm64/kernel/signal.c
518
static int restore_tpidr2_context(struct user_ctxs *user)
arch/arm64/kernel/signal.c
523
if (user->tpidr2_size != sizeof(*user->tpidr2))
arch/arm64/kernel/signal.c
526
__get_user_error(tpidr2_el0, &user->tpidr2->tpidr2, err);
arch/arm64/kernel/signal.c
563
static int restore_za_context(struct user_ctxs *user)
arch/arm64/kernel/signal.c
569
if (user->za_size < sizeof(*user->za))
arch/arm64/kernel/signal.c
572
__get_user_error(user_vl, &(user->za->vl), err);
arch/arm64/kernel/signal.c
579
if (user->za_size == sizeof(*user->za)) {
arch/arm64/kernel/signal.c
586
if (user->za_size < ZA_SIG_CONTEXT_SIZE(vq))
arch/arm64/kernel/signal.c
601
(char __user const *)user->za +
arch/arm64/kernel/signal.c
637
static int restore_zt_context(struct user_ctxs *user)
arch/arm64/kernel/signal.c
646
if (user->zt_size != ZT_SIG_CONTEXT_SIZE(1))
arch/arm64/kernel/signal.c
649
if (__copy_from_user(&nregs, &(user->zt->nregs), sizeof(nregs)))
arch/arm64/kernel/signal.c
656
(char __user const *)user->zt +
arch/arm64/kernel/signal.c
669
extern int restore_tpidr2_context(struct user_ctxs *user);
arch/arm64/kernel/signal.c
671
extern int restore_za_context(struct user_ctxs *user);
arch/arm64/kernel/signal.c
673
extern int restore_zt_context(struct user_ctxs *user);
arch/arm64/kernel/signal.c
704
static int restore_gcs_context(struct user_ctxs *user)
arch/arm64/kernel/signal.c
709
if (user->gcs_size != sizeof(*user->gcs))
arch/arm64/kernel/signal.c
712
__get_user_error(gcspr, &user->gcs->gcspr, err);
arch/arm64/kernel/signal.c
713
__get_user_error(enabled, &user->gcs->features_enabled, err);
arch/arm64/kernel/signal.c
749
extern int restore_gcs_context(struct user_ctxs *user);
arch/arm64/kernel/signal.c
753
static int parse_user_sigframe(struct user_ctxs *user,
arch/arm64/kernel/signal.c
764
user->fpsimd = NULL;
arch/arm64/kernel/signal.c
765
user->sve = NULL;
arch/arm64/kernel/signal.c
766
user->tpidr2 = NULL;
arch/arm64/kernel/signal.c
767
user->za = NULL;
arch/arm64/kernel/signal.c
768
user->zt = NULL;
arch/arm64/kernel/signal.c
769
user->fpmr = NULL;
arch/arm64/kernel/signal.c
770
user->poe = NULL;
arch/arm64/kernel/signal.c
771
user->gcs = NULL;
arch/arm64/kernel/signal.c
811
if (user->fpsimd)
arch/arm64/kernel/signal.c
814
user->fpsimd = (struct fpsimd_context __user *)head;
arch/arm64/kernel/signal.c
815
user->fpsimd_size = size;
arch/arm64/kernel/signal.c
826
if (user->poe)
arch/arm64/kernel/signal.c
829
user->poe = (struct poe_context __user *)head;
arch/arm64/kernel/signal.c
830
user->poe_size = size;
arch/arm64/kernel/signal.c
837
if (user->sve)
arch/arm64/kernel/signal.c
840
user->sve = (struct sve_context __user *)head;
arch/arm64/kernel/signal.c
841
user->sve_size = size;
arch/arm64/kernel/signal.c
848
if (user->tpidr2)
arch/arm64/kernel/signal.c
851
user->tpidr2 = (struct tpidr2_context __user *)head;
arch/arm64/kernel/signal.c
852
user->tpidr2_size = size;
arch/arm64/kernel/signal.c
859
if (user->za)
arch/arm64/kernel/signal.c
862
user->za = (struct za_context __user *)head;
arch/arm64/kernel/signal.c
863
user->za_size = size;
arch/arm64/kernel/signal.c
870
if (user->zt)
arch/arm64/kernel/signal.c
873
user->zt = (struct zt_context __user *)head;
arch/arm64/kernel/signal.c
874
user->zt_size = size;
arch/arm64/kernel/signal.c
881
if (user->fpmr)
arch/arm64/kernel/signal.c
884
user->fpmr = (struct fpmr_context __user *)head;
arch/arm64/kernel/signal.c
885
user->fpmr_size = size;
arch/arm64/kernel/signal.c
892
if (user->gcs)
arch/arm64/kernel/signal.c
895
user->gcs = (struct gcs_context __user *)head;
arch/arm64/kernel/signal.c
896
user->gcs_size = size;
arch/arm64/kernel/signal.c
987
struct user_ctxs user;
arch/arm64/kvm/pvtime.c
110
u64 __user *user = (u64 __user *)attr->addr;
arch/arm64/kvm/pvtime.c
119
if (put_user(ipa, user))
arch/arm64/kvm/pvtime.c
78
u64 __user *user = (u64 __user *)attr->addr;
arch/arm64/kvm/pvtime.c
88
if (get_user(ipa, user))
arch/arm64/kvm/sys_regs.c
2058
u8 host, user;
arch/arm64/kvm/sys_regs.c
2065
user = SYS_FIELD_GET(ID_AA64DFR0_EL1, DoubleLock, val);
arch/arm64/kvm/sys_regs.c
2068
user == ID_AA64DFR0_EL1_DoubleLock_IMP;
arch/arm64/kvm/sys_regs.c
2231
#define tgran2_val_allowed(tg, safe, user) \
arch/arm64/kvm/sys_regs.c
2234
u8 __u = SYS_FIELD_GET(ID_AA64MMFR0_EL1, tg, user); \
arch/loongarch/kernel/access-helper.h
10
static inline int __get_addr(unsigned long *a, unsigned long *p, bool user)
arch/loongarch/kernel/access-helper.h
12
return user ? get_user(*a, (unsigned long __user *)p) : get_kernel_nofault(*a, p);
arch/loongarch/kernel/access-helper.h
5
static inline int __get_inst(u32 *i, u32 *p, bool user)
arch/loongarch/kernel/access-helper.h
7
return user ? get_user(*i, (u32 __user *)p) : get_kernel_nofault(*i, p);
arch/loongarch/kernel/traps.c
103
const struct pt_regs *regs, const char *loglvl, bool user)
arch/loongarch/kernel/traps.c
122
if (__get_addr(&stackdata, sp++, user)) {
arch/loongarch/kernel/traps.c
131
show_backtrace(task, regs, loglvl, user);
arch/loongarch/kernel/traps.c
157
static void show_code(unsigned int *pc, bool user)
arch/loongarch/kernel/traps.c
165
if (__get_inst(&insn, pc + i, user)) {
arch/loongarch/kernel/traps.c
630
bool user = user_mode(regs);
arch/loongarch/kernel/traps.c
649
if (__get_inst(&insn.word, (u32 *)era, user))
arch/loongarch/kernel/traps.c
720
bool user = user_mode(regs);
arch/loongarch/kernel/traps.c
729
if (__get_inst(&opcode, (u32 *)era, user))
arch/loongarch/kernel/traps.c
84
const char *loglvl, bool user)
arch/loongarch/kernel/unaligned.c
271
bool user = user_mode(regs);
arch/loongarch/kernel/unaligned.c
278
__get_inst(&insn.word, pc, user);
arch/loongarch/kernel/unaligned.c
439
if (user && !access_ok(addr, size))
arch/loongarch/kernel/unaligned.c
473
if (user)
arch/loongarch/kvm/vcpu.c
1121
u64 __user *user = (u64 __user *)attr->addr;
arch/loongarch/kvm/vcpu.c
1128
if (put_user(gpa, user))
arch/loongarch/kvm/vcpu.c
1157
u64 __user *user = (u64 __user *)attr->addr;
arch/loongarch/kvm/vcpu.c
1162
if (get_user(val, user))
arch/loongarch/kvm/vcpu.c
1184
u64 gpa, __user *user = (u64 __user *)attr->addr;
arch/loongarch/kvm/vcpu.c
1191
if (get_user(gpa, user))
arch/m68k/fpsp040/fpsp.h
63
| because it needs to report an exception back to the user. This
arch/m68k/fpsp040/fpsp.h
83
.set USER_D0,LV+0 | saved user D0
arch/m68k/fpsp040/fpsp.h
84
.set USER_D1,LV+4 | saved user D1
arch/m68k/fpsp040/fpsp.h
85
.set USER_A0,LV+8 | saved user A0
arch/m68k/fpsp040/fpsp.h
86
.set USER_A1,LV+12 | saved user A1
arch/m68k/fpsp040/fpsp.h
87
.set USER_FP0,LV+16 | saved user FP0
arch/m68k/fpsp040/fpsp.h
88
.set USER_FP1,LV+28 | saved user FP1
arch/m68k/fpsp040/fpsp.h
89
.set USER_FP2,LV+40 | saved user FP2
arch/m68k/fpsp040/fpsp.h
90
.set USER_FP3,LV+52 | saved user FP3
arch/m68k/fpsp040/fpsp.h
91
.set USER_FPCR,LV+64 | saved user FPCR
arch/m68k/fpsp040/fpsp.h
94
.set USER_FPSR,LV+68 | saved user FPSR
arch/m68k/fpsp040/fpsp.h
99
.set USER_FPIAR,LV+72 | saved user FPIAR
arch/m68k/ifpsp060/src/isp.S
578
# if exception occurred in user mode, then we have to restore a7 in case it
arch/m68k/math-emu/fp_decode.h
405
| get the absolute short address from user space
arch/m68k/math-emu/fp_decode.h
411
| get the absolute long address from user space
arch/mips/include/asm/unaligned-emul.h
764
#define LoadHWUE(addr, value, res) _LoadHWU(addr, value, res, user)
arch/mips/include/asm/unaligned-emul.h
766
#define LoadWUE(addr, value, res) _LoadWU(addr, value, res, user)
arch/mips/include/asm/unaligned-emul.h
768
#define LoadHWE(addr, value, res) _LoadHW(addr, value, res, user)
arch/mips/include/asm/unaligned-emul.h
770
#define LoadWE(addr, value, res) _LoadW(addr, value, res, user)
arch/mips/include/asm/unaligned-emul.h
774
#define StoreHWE(addr, value, res) _StoreHW(addr, value, res, user)
arch/mips/include/asm/unaligned-emul.h
776
#define StoreWE(addr, value, res) _StoreW(addr, value, res, user)
arch/mips/kernel/access-helper.h
11
static inline int __get_inst16(u16 *i, u16 *p, bool user)
arch/mips/kernel/access-helper.h
13
return user ? get_user(*i, (u16 __user *)p) : get_kernel_nofault(*i, p);
arch/mips/kernel/access-helper.h
16
static inline int __get_inst32(u32 *i, u32 *p, bool user)
arch/mips/kernel/access-helper.h
18
return user ? get_user(*i, (u32 __user *)p) : get_kernel_nofault(*i, p);
arch/mips/kernel/access-helper.h
5
static inline int __get_addr(unsigned long *a, unsigned long *p, bool user)
arch/mips/kernel/access-helper.h
7
return user ? get_user(*a, (unsigned long __user *)p) :
arch/mips/kernel/traps.c
1034
bool user = user_mode(regs);
arch/mips/kernel/traps.c
1041
if (__get_inst16(&instr[0], (u16 *)epc, user))
arch/mips/kernel/traps.c
1052
if (__get_inst16(&instr[1], (u16 *)(epc + 2), user))
arch/mips/kernel/traps.c
1058
if (__get_inst32(&opcode, (u32 *)epc, user))
arch/mips/kernel/traps.c
1121
bool user = user_mode(regs);
arch/mips/kernel/traps.c
1127
if (__get_inst16(&instr[0], (u16 *)(epc + 0), user) ||
arch/mips/kernel/traps.c
1128
__get_inst16(&instr[1], (u16 *)(epc + 2), user))
arch/mips/kernel/traps.c
1135
if (__get_inst32(&opcode, (u32 *)epc, user))
arch/mips/kernel/traps.c
122
bool user)
arch/mips/kernel/traps.c
132
if (__get_addr(&addr, sp++, user)) {
arch/mips/kernel/traps.c
153
const char *loglvl, bool user)
arch/mips/kernel/traps.c
163
show_raw_backtrace(sp, loglvl, user);
arch/mips/kernel/traps.c
179
const struct pt_regs *regs, const char *loglvl, bool user)
arch/mips/kernel/traps.c
198
if (__get_addr(&stackdata, sp++, user)) {
arch/mips/kernel/traps.c
207
show_backtrace(task, regs, loglvl, user);
arch/mips/kernel/traps.c
231
static void show_code(void *pc, bool user)
arch/mips/kernel/traps.c
245
if (__get_inst16(&insn16, pc16 + i, user))
arch/mips/kernel/traps.c
252
if (__get_inst32(&insn32, (u32 *)pc + i, user))
arch/mips/kernel/unaligned.c
1014
if (user && !access_ok(addr, 4 * rvar))
arch/mips/kernel/unaligned.c
1034
if (user && !access_ok(addr, 4 * rvar))
arch/mips/kernel/unaligned.c
1088
if (user && !access_ok(addr, 2))
arch/mips/kernel/unaligned.c
1098
if (user && !access_ok(addr, 2))
arch/mips/kernel/unaligned.c
1108
if (user && !access_ok(addr, 4))
arch/mips/kernel/unaligned.c
1126
if (user && !access_ok(addr, 4))
arch/mips/kernel/unaligned.c
1148
if (user && !access_ok(addr, 8))
arch/mips/kernel/unaligned.c
1162
if (user && !access_ok(addr, 2))
arch/mips/kernel/unaligned.c
1172
if (user && !access_ok(addr, 4))
arch/mips/kernel/unaligned.c
118
bool user = user_mode(regs);
arch/mips/kernel/unaligned.c
1190
if (user && !access_ok(addr, 8))
arch/mips/kernel/unaligned.c
1247
bool user = user_mode(regs);
arch/mips/kernel/unaligned.c
128
__get_inst32(&insn.word, pc, user);
arch/mips/kernel/unaligned.c
1349
if (user && !access_ok(addr, 2))
arch/mips/kernel/unaligned.c
1360
if (user && !access_ok(addr, 2))
arch/mips/kernel/unaligned.c
1373
if (user && !access_ok(addr, 4))
arch/mips/kernel/unaligned.c
1392
if (user && !access_ok(addr, 4))
arch/mips/kernel/unaligned.c
1416
if (user && !access_ok(addr, 8))
arch/mips/kernel/unaligned.c
1431
if (user && !access_ok(addr, 2))
arch/mips/kernel/unaligned.c
1444
if (user && !access_ok(addr, 4))
arch/mips/kernel/unaligned.c
1464
if (user && !access_ok(addr, 8))
arch/mips/kernel/unaligned.c
171
if (user && !access_ok(addr, 4))
arch/mips/kernel/unaligned.c
180
if (user && !access_ok(addr, 2))
arch/mips/kernel/unaligned.c
189
if (user && !access_ok(addr, 2))
arch/mips/kernel/unaligned.c
209
if (user && !access_ok(addr, 4))
arch/mips/kernel/unaligned.c
218
if (user && !access_ok(addr, 2))
arch/mips/kernel/unaligned.c
290
if (user && !access_ok(addr, 2))
arch/mips/kernel/unaligned.c
293
if (IS_ENABLED(CONFIG_EVA) && user)
arch/mips/kernel/unaligned.c
305
if (user && !access_ok(addr, 4))
arch/mips/kernel/unaligned.c
308
if (IS_ENABLED(CONFIG_EVA) && user)
arch/mips/kernel/unaligned.c
320
if (user && !access_ok(addr, 2))
arch/mips/kernel/unaligned.c
323
if (IS_ENABLED(CONFIG_EVA) && user)
arch/mips/kernel/unaligned.c
343
if (user && !access_ok(addr, 4))
arch/mips/kernel/unaligned.c
366
if (user && !access_ok(addr, 8))
arch/mips/kernel/unaligned.c
381
if (user && !access_ok(addr, 2))
arch/mips/kernel/unaligned.c
387
if (IS_ENABLED(CONFIG_EVA) && user)
arch/mips/kernel/unaligned.c
397
if (user && !access_ok(addr, 4))
arch/mips/kernel/unaligned.c
403
if (IS_ENABLED(CONFIG_EVA) && user)
arch/mips/kernel/unaligned.c
421
if (user && !access_ok(addr, 8))
arch/mips/kernel/unaligned.c
629
bool user = user_mode(regs);
arch/mips/kernel/unaligned.c
693
if (user && !access_ok(addr, 8))
arch/mips/kernel/unaligned.c
712
if (user && !access_ok(addr, 8))
arch/mips/kernel/unaligned.c
732
if (user && !access_ok(addr, 16))
arch/mips/kernel/unaligned.c
755
if (user && !access_ok(addr, 16))
arch/mips/kernel/unaligned.c
778
if (user && !access_ok(addr, 4 * (rvar + 1)))
arch/mips/kernel/unaligned.c
781
if (user && !access_ok(addr, 4 * rvar))
arch/mips/kernel/unaligned.c
814
if (user && !access_ok(addr, 4 * (rvar + 1)))
arch/mips/kernel/unaligned.c
817
if (user && !access_ok(addr, 4 * rvar))
arch/mips/kernel/unaligned.c
851
if (user && !access_ok(addr, 8 * (rvar + 1)))
arch/mips/kernel/unaligned.c
854
if (user && !access_ok(addr, 8 * rvar))
arch/mips/kernel/unaligned.c
892
if (user && !access_ok(addr, 8 * (rvar + 1)))
arch/mips/kernel/unaligned.c
895
if (user && !access_ok(addr, 8 * rvar))
arch/mips/mm/c-r4k.c
638
bool user;
arch/mips/mm/c-r4k.c
644
bool user)
arch/mips/mm/c-r4k.c
652
if (user)
arch/mips/mm/c-r4k.c
669
if (user)
arch/mips/mm/c-r4k.c
696
bool user = fir_args->user;
arch/mips/mm/c-r4k.c
698
__local_r4k_flush_icache_range(start, end, type, user);
arch/mips/mm/c-r4k.c
702
bool user)
arch/mips/mm/c-r4k.c
710
args.user = user;
arch/parisc/kernel/traps.c
129
int i, user;
arch/parisc/kernel/traps.c
133
user = user_mode(regs);
arch/parisc/kernel/traps.c
134
level = user ? KERN_DEBUG : KERN_CRIT;
arch/parisc/kernel/traps.c
143
if (user)
arch/parisc/kernel/traps.c
157
if (user) {
arch/powerpc/platforms/powernv/pci-ioda-tce.c
83
static __be64 *pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc)
arch/powerpc/platforms/powernv/pci-ioda-tce.c
85
__be64 *tmp = user ? tbl->it_userspace : (__be64 *) tbl->it_base;
arch/riscv/kernel/traps.c
271
bool user = user_mode(regs);
arch/riscv/kernel/traps.c
273
return user ? uprobe_single_step_handler(regs) : kprobe_single_step_handler(regs);
arch/riscv/kernel/traps.c
278
bool user = user_mode(regs);
arch/riscv/kernel/traps.c
280
return user ? uprobe_breakpoint_handler(regs) : kprobe_breakpoint_handler(regs);
arch/s390/kernel/ptrace.c
205
if (addr < offsetof(struct user, regs.acrs)) {
arch/s390/kernel/ptrace.c
210
if (addr == offsetof(struct user, regs.psw.mask)) {
arch/s390/kernel/ptrace.c
216
} else if (addr < offsetof(struct user, regs.orig_gpr2)) {
arch/s390/kernel/ptrace.c
220
offset = addr - offsetof(struct user, regs.acrs);
arch/s390/kernel/ptrace.c
226
if (addr == offsetof(struct user, regs.acrs[15]))
arch/s390/kernel/ptrace.c
231
} else if (addr == offsetof(struct user, regs.orig_gpr2)) {
arch/s390/kernel/ptrace.c
237
} else if (addr < offsetof(struct user, regs.fp_regs)) {
arch/s390/kernel/ptrace.c
244
} else if (addr == offsetof(struct user, regs.fp_regs.fpc)) {
arch/s390/kernel/ptrace.c
251
} else if (addr < offsetof(struct user, regs.fp_regs) + sizeof(s390_fp_regs)) {
arch/s390/kernel/ptrace.c
255
offset = addr - offsetof(struct user, regs.fp_regs.fprs);
arch/s390/kernel/ptrace.c
257
} else if (addr < offsetof(struct user, regs.per_info) + sizeof(per_struct)) {
arch/s390/kernel/ptrace.c
261
addr -= offsetof(struct user, regs.per_info);
arch/s390/kernel/ptrace.c
280
if (addr >= offsetof(struct user, regs.acrs) &&
arch/s390/kernel/ptrace.c
281
addr < offsetof(struct user, regs.orig_gpr2))
arch/s390/kernel/ptrace.c
283
if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
arch/s390/kernel/ptrace.c
328
if (addr < offsetof(struct user, regs.acrs)) {
arch/s390/kernel/ptrace.c
333
if (addr == offsetof(struct user, regs.psw.mask)) {
arch/s390/kernel/ptrace.c
349
addr == offsetof(struct user, regs.gprs[2])) {
arch/s390/kernel/ptrace.c
355
} else if (addr < offsetof(struct user, regs.orig_gpr2)) {
arch/s390/kernel/ptrace.c
359
offset = addr - offsetof(struct user, regs.acrs);
arch/s390/kernel/ptrace.c
366
if (addr == offsetof(struct user, regs.acrs[15]))
arch/s390/kernel/ptrace.c
371
} else if (addr == offsetof(struct user, regs.orig_gpr2)) {
arch/s390/kernel/ptrace.c
377
} else if (addr < offsetof(struct user, regs.fp_regs)) {
arch/s390/kernel/ptrace.c
384
} else if (addr == offsetof(struct user, regs.fp_regs.fpc)) {
arch/s390/kernel/ptrace.c
392
} else if (addr < offsetof(struct user, regs.fp_regs) + sizeof(s390_fp_regs)) {
arch/s390/kernel/ptrace.c
396
offset = addr - offsetof(struct user, regs.fp_regs.fprs);
arch/s390/kernel/ptrace.c
398
} else if (addr < offsetof(struct user, regs.per_info) + sizeof(per_struct)) {
arch/s390/kernel/ptrace.c
402
addr -= offsetof(struct user, regs.per_info);
arch/s390/kernel/ptrace.c
419
if (addr >= offsetof(struct user, regs.acrs) &&
arch/s390/kernel/ptrace.c
420
addr < offsetof(struct user, regs.orig_gpr2))
arch/s390/kernel/ptrace.c
422
if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
arch/s390/kernel/vtime.c
119
u64 timer, clock, user, guest, system, hardirq, softirq;
arch/s390/kernel/vtime.c
143
user = update_tsk_timer(&tsk->thread.user_timer, lc->user_timer);
arch/s390/kernel/vtime.c
148
lc->steal_timer += clock - user - guest - system - hardirq - softirq;
arch/s390/kernel/vtime.c
151
if (user) {
arch/s390/kernel/vtime.c
152
account_user_time(tsk, cputime_to_nsecs(user));
arch/s390/kernel/vtime.c
153
tsk->utimescaled += cputime_to_nsecs(scale_vtime(user));
arch/s390/kernel/vtime.c
168
return virt_timer_forward(user + guest + system + hardirq + softirq);
arch/s390/kvm/pci.c
195
struct user_struct *user = get_uid(current_user());
arch/s390/kvm/pci.c
197
if (user)
arch/s390/kvm/pci.c
198
atomic_long_sub(nr_pages, &user->locked_vm);
arch/s390/kvm/pci.c
205
struct user_struct *user = get_uid(current_user());
arch/s390/kvm/pci.c
210
cur_pages = atomic_long_read(&user->locked_vm);
arch/s390/kvm/pci.c
215
} while (!atomic_long_try_cmpxchg(&user->locked_vm, &cur_pages, new_pages));
arch/sh/kernel/ptrace_32.c
351
addr > sizeof(struct user) - 3)
arch/sh/kernel/ptrace_32.c
356
else if (addr >= offsetof(struct user, fpu) &&
arch/sh/kernel/ptrace_32.c
357
addr < offsetof(struct user, u_fpvalid)) {
arch/sh/kernel/ptrace_32.c
359
if (addr == offsetof(struct user, fpu.fpscr))
arch/sh/kernel/ptrace_32.c
368
index = addr - offsetof(struct user, fpu);
arch/sh/kernel/ptrace_32.c
372
} else if (addr == offsetof(struct user, u_fpvalid))
arch/sh/kernel/ptrace_32.c
391
addr > sizeof(struct user) - 3)
arch/sh/kernel/ptrace_32.c
396
else if (addr >= offsetof(struct user, fpu) &&
arch/sh/kernel/ptrace_32.c
397
addr < offsetof(struct user, u_fpvalid)) {
arch/sh/kernel/ptrace_32.c
402
index = addr - offsetof(struct user, fpu);
arch/sh/kernel/ptrace_32.c
407
} else if (addr == offsetof(struct user, u_fpvalid)) {
arch/um/drivers/vector_kern.h
134
char user[];
arch/x86/include/asm/pgtable.h
33
bool user);
arch/x86/include/asm/pm-trace.h
18
generate_pm_trace(tracedata, user); \
arch/x86/include/asm/pm-trace.h
22
#define TRACE_SUSPEND(user) TRACE_RESUME(user)
arch/x86/include/asm/pm-trace.h
7
#define TRACE_RESUME(user) \
arch/x86/kernel/ptrace.c
1148
if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
arch/x86/kernel/ptrace.c
1155
else if (addr >= offsetof(struct user, u_debugreg[0]) &&
arch/x86/kernel/ptrace.c
1156
addr <= offsetof(struct user, u_debugreg[7])) {
arch/x86/kernel/ptrace.c
1157
addr -= offsetof(struct user, u_debugreg[0]);
arch/x86/kernel/ptrace.c
1169
if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
arch/x86/kernel/ptrace.c
1175
else if (addr >= offsetof(struct user, u_debugreg[0]) &&
arch/x86/kernel/ptrace.c
1176
addr <= offsetof(struct user, u_debugreg[7])) {
arch/x86/kernel/ptrace.c
1177
addr -= offsetof(struct user, u_debugreg[0]);
arch/x86/kernel/ptrace.c
750
if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
arch/x86/kernel/ptrace.c
756
else if (addr >= offsetof(struct user, u_debugreg[0]) &&
arch/x86/kernel/ptrace.c
757
addr <= offsetof(struct user, u_debugreg[7])) {
arch/x86/kernel/ptrace.c
758
addr -= offsetof(struct user, u_debugreg[0]);
arch/x86/kernel/ptrace.c
767
if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
arch/x86/kernel/ptrace.c
772
else if (addr >= offsetof(struct user, u_debugreg[0]) &&
arch/x86/kernel/ptrace.c
773
addr <= offsetof(struct user, u_debugreg[7])) {
arch/x86/kernel/ptrace.c
774
addr -= offsetof(struct user, u_debugreg[0]);
arch/x86/kernel/vm86_32.c
112
user = vm86->user_vm86;
arch/x86/kernel/vm86_32.c
114
if (!user_access_begin(user, vm86->vm86plus.is_vm86pus ?
arch/x86/kernel/vm86_32.c
119
unsafe_put_user(regs->pt.bx, &user->regs.ebx, Efault_end);
arch/x86/kernel/vm86_32.c
120
unsafe_put_user(regs->pt.cx, &user->regs.ecx, Efault_end);
arch/x86/kernel/vm86_32.c
121
unsafe_put_user(regs->pt.dx, &user->regs.edx, Efault_end);
arch/x86/kernel/vm86_32.c
122
unsafe_put_user(regs->pt.si, &user->regs.esi, Efault_end);
arch/x86/kernel/vm86_32.c
123
unsafe_put_user(regs->pt.di, &user->regs.edi, Efault_end);
arch/x86/kernel/vm86_32.c
124
unsafe_put_user(regs->pt.bp, &user->regs.ebp, Efault_end);
arch/x86/kernel/vm86_32.c
125
unsafe_put_user(regs->pt.ax, &user->regs.eax, Efault_end);
arch/x86/kernel/vm86_32.c
126
unsafe_put_user(regs->pt.ip, &user->regs.eip, Efault_end);
arch/x86/kernel/vm86_32.c
127
unsafe_put_user(regs->pt.cs, &user->regs.cs, Efault_end);
arch/x86/kernel/vm86_32.c
128
unsafe_put_user(regs->pt.flags, &user->regs.eflags, Efault_end);
arch/x86/kernel/vm86_32.c
129
unsafe_put_user(regs->pt.sp, &user->regs.esp, Efault_end);
arch/x86/kernel/vm86_32.c
130
unsafe_put_user(regs->pt.ss, &user->regs.ss, Efault_end);
arch/x86/kernel/vm86_32.c
131
unsafe_put_user(regs->es, &user->regs.es, Efault_end);
arch/x86/kernel/vm86_32.c
132
unsafe_put_user(regs->ds, &user->regs.ds, Efault_end);
arch/x86/kernel/vm86_32.c
133
unsafe_put_user(regs->fs, &user->regs.fs, Efault_end);
arch/x86/kernel/vm86_32.c
134
unsafe_put_user(regs->gs, &user->regs.gs, Efault_end);
arch/x86/kernel/vm86_32.c
99
struct vm86plus_struct __user *user;
arch/x86/kvm/mmu/mmu_internal.h
237
const bool user;
arch/x86/kvm/mmu/mmu_internal.h
351
.user = err & PFERR_USER_MASK,
arch/x86/kvm/mmu/paging_tmpl.h
815
!is_cr0_wp(vcpu->arch.mmu) && !fault->user && fault->slot) {
arch/x86/mm/dump_pagetables.c
480
bool user)
arch/x86/mm/dump_pagetables.c
484
if (user && boot_cpu_has(X86_FEATURE_PTI))
arch/x86/um/ptrace_32.c
107
else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
arch/x86/um/ptrace_32.c
108
(addr <= offsetof(struct user, u_debugreg[7]))) {
arch/x86/um/ptrace_32.c
109
addr -= offsetof(struct user, u_debugreg[0]);
arch/x86/um/ptrace_32.c
163
else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
arch/x86/um/ptrace_32.c
164
(addr <= offsetof(struct user, u_debugreg[7]))) {
arch/x86/um/ptrace_32.c
165
addr -= offsetof(struct user, u_debugreg[0]);
arch/x86/um/ptrace_64.c
119
else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
arch/x86/um/ptrace_64.c
120
(addr <= offsetof(struct user, u_debugreg[7]))) {
arch/x86/um/ptrace_64.c
121
addr -= offsetof(struct user, u_debugreg[0]);
arch/x86/um/ptrace_64.c
183
else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
arch/x86/um/ptrace_64.c
184
(addr <= offsetof(struct user, u_debugreg[7]))) {
arch/x86/um/ptrace_64.c
185
addr -= offsetof(struct user, u_debugreg[0]);
arch/xtensa/include/asm/elf.h
180
xtregs_user_t user;
arch/xtensa/kernel/ptrace.c
132
newregs->user = ti->xtregs_user;
arch/xtensa/kernel/ptrace.c
170
ti->xtregs_user = newregs->user;
arch/xtensa/kernel/signal.c
173
err |= __copy_to_user(&frame->xtregs.user, &ti->xtregs_user,
arch/xtensa/kernel/signal.c
235
err |= __copy_from_user(&ti->xtregs_user, &frame->xtregs.user,
arch/xtensa/kernel/signal.c
41
xtregs_user_t user;
block/blk-iocost.c
3221
bool enable, user;
block/blk-iocost.c
3254
user = ioc->user_qos_params;
block/blk-iocost.c
3274
user = false;
block/blk-iocost.c
3276
user = true;
block/blk-iocost.c
3316
user = true;
block/blk-iocost.c
3332
if (user) {
block/blk-iocost.c
3414
bool user;
block/blk-iocost.c
3444
user = ioc->user_cost_model;
block/blk-iocost.c
3459
user = false;
block/blk-iocost.c
3461
user = true;
block/blk-iocost.c
3478
user = true;
block/blk-iocost.c
3481
if (user) {
block/ioprio.c
110
user = current_user();
block/ioprio.c
112
user = find_user(uid);
block/ioprio.c
114
if (!user)
block/ioprio.c
127
free_uid(user);
block/ioprio.c
183
struct user_struct *user;
block/ioprio.c
220
user = current_user();
block/ioprio.c
222
user = find_user(uid);
block/ioprio.c
224
if (!user)
block/ioprio.c
228
if (!uid_eq(task_uid(p), user->uid) ||
block/ioprio.c
241
free_uid(user);
block/ioprio.c
68
struct user_struct *user;
drivers/accel/ivpu/ivpu_drv.c
175
args->value = vdev->hw->ranges.user.start;
drivers/accel/ivpu/ivpu_fw.c
672
boot_params->global_aliased_pio_base = vdev->hw->ranges.user.start;
drivers/accel/ivpu/ivpu_fw.c
673
boot_params->global_aliased_pio_size = ivpu_hw_range_size(&vdev->hw->ranges.user);
drivers/accel/ivpu/ivpu_gem.c
300
range = &vdev->hw->ranges.user;
drivers/accel/ivpu/ivpu_hw.c
178
ivpu_hw_range_init(vdev, &vdev->hw->ranges.user, 0xa0000000, 511 * SZ_1M);
drivers/accel/ivpu/ivpu_hw.c
185
ivpu_hw_range_init(vdev, &vdev->hw->ranges.user, 0x100000000, SZ_256G);
drivers/accel/ivpu/ivpu_hw.c
186
vdev->hw->ranges.dma = vdev->hw->ranges.user;
drivers/accel/ivpu/ivpu_hw.h
26
struct ivpu_addr_range user;
drivers/accel/ivpu/ivpu_job.c
40
cmdq->primary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.user,
drivers/accel/ivpu/ivpu_mmu_context.c
591
start = min_t(u64, vdev->hw->ranges.user.start, vdev->hw->ranges.shave.start);
drivers/accel/ivpu/ivpu_mmu_context.c
592
end = max_t(u64, vdev->hw->ranges.user.end, vdev->hw->ranges.dma.end);
drivers/accessibility/speakup/i18n.c
513
static bool fmt_validate(char *template, char *user)
drivers/accessibility/speakup/i18n.c
518
char *user_ptr = user;
drivers/acpi/acpi_ipmi.c
118
struct ipmi_user *user;
drivers/acpi/acpi_ipmi.c
133
ipmi_device, &user);
drivers/acpi/acpi_ipmi.c
139
ipmi_device->user_interface = user;
drivers/acpi/acpi_ipmi.c
383
if (msg->user != ipmi_device->user_interface) {
drivers/acpi/acpi_ipmi.c
386
msg->user, ipmi_device->user_interface);
drivers/acpi/processor_throttling.c
233
if (p_limit->user.tx > target_state)
drivers/acpi/processor_throttling.c
234
target_state = p_limit->user.tx;
drivers/acpi/processor_throttling.c
327
if (limit->user.tx > target_state)
drivers/acpi/processor_throttling.c
328
target_state = limit->user.tx;
drivers/base/power/trace.c
167
void generate_pm_trace(const void *tracedata, unsigned int user)
drivers/base/power/trace.c
176
user_hash_value = user % USERHASH;
drivers/base/power/trace.c
287
unsigned int user, file, dev;
drivers/base/power/trace.c
292
user = val % USERHASH;
drivers/base/power/trace.c
298
pr_info(" Magic number: %d:%d:%d\n", user, file, dev);
drivers/base/power/trace.c
86
static int set_magic_time(unsigned int user, unsigned int file, unsigned int device)
drivers/base/power/trace.c
88
unsigned int n = user + USERHASH*(file + FILEHASH*device);
drivers/char/ipmi/ipmi_devintf.c
100
&priv->user);
drivers/char/ipmi/ipmi_devintf.c
127
ipmi_destroy_user(priv->user);
drivers/char/ipmi/ipmi_devintf.c
137
static int handle_send_req(struct ipmi_user *user,
drivers/char/ipmi/ipmi_devintf.c
182
rv = ipmi_request_settime(user,
drivers/char/ipmi/ipmi_devintf.c
29
struct ipmi_user *user;
drivers/char/ipmi/ipmi_devintf.c
317
rv = handle_send_req(priv->user, &req, retries, retry_time_ms);
drivers/char/ipmi/ipmi_devintf.c
330
rv = handle_send_req(priv->user,
drivers/char/ipmi/ipmi_devintf.c
359
rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd,
drivers/char/ipmi/ipmi_devintf.c
373
rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd,
drivers/char/ipmi/ipmi_devintf.c
387
rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd,
drivers/char/ipmi/ipmi_devintf.c
401
rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd,
drivers/char/ipmi/ipmi_devintf.c
415
rv = ipmi_set_gets_events(priv->user, val);
drivers/char/ipmi/ipmi_devintf.c
429
rv = ipmi_set_my_address(priv->user, 0, val);
drivers/char/ipmi/ipmi_devintf.c
438
rv = ipmi_get_my_address(priv->user, 0, &rval);
drivers/char/ipmi/ipmi_devintf.c
460
rv = ipmi_set_my_LUN(priv->user, 0, val);
drivers/char/ipmi/ipmi_devintf.c
469
rv = ipmi_get_my_LUN(priv->user, 0, &rval);
drivers/char/ipmi/ipmi_devintf.c
491
return ipmi_set_my_address(priv->user, val.channel, val.value);
drivers/char/ipmi/ipmi_devintf.c
503
rv = ipmi_get_my_address(priv->user, val.channel, &val.value);
drivers/char/ipmi/ipmi_devintf.c
523
rv = ipmi_set_my_LUN(priv->user, val.channel, val.value);
drivers/char/ipmi/ipmi_devintf.c
536
rv = ipmi_get_my_LUN(priv->user, val.channel, &val.value);
drivers/char/ipmi/ipmi_devintf.c
586
mode = ipmi_get_maintenance_mode(priv->user);
drivers/char/ipmi/ipmi_devintf.c
603
rv = ipmi_set_maintenance_mode(priv->user, mode);
drivers/char/ipmi/ipmi_devintf.c
739
return handle_send_req(priv->user, &rp,
drivers/char/ipmi/ipmi_devintf.c
752
return handle_send_req(priv->user, &sp.req,
drivers/char/ipmi/ipmi_msghandler.c
1230
struct ipmi_user **user)
drivers/char/ipmi/ipmi_msghandler.c
1313
*user = new_user;
drivers/char/ipmi/ipmi_msghandler.c
1343
static void _ipmi_destroy_user(struct ipmi_user *user)
drivers/char/ipmi/ipmi_msghandler.c
1345
struct ipmi_smi *intf = user->intf;
drivers/char/ipmi/ipmi_msghandler.c
1351
if (!refcount_dec_if_one(&user->destroyed))
drivers/char/ipmi/ipmi_msghandler.c
1354
if (user->handler->shutdown)
drivers/char/ipmi/ipmi_msghandler.c
1355
user->handler->shutdown(user->handler_data);
drivers/char/ipmi/ipmi_msghandler.c
1357
if (user->handler->ipmi_watchdog_pretimeout)
drivers/char/ipmi/ipmi_msghandler.c
1360
if (user->gets_events)
drivers/char/ipmi/ipmi_msghandler.c
1364
list_del(&user->link);
drivers/char/ipmi/ipmi_msghandler.c
1370
&& (intf->seq_table[i].recv_msg->user == user)) {
drivers/char/ipmi/ipmi_msghandler.c
1387
if (rcvr->user == user) {
drivers/char/ipmi/ipmi_msghandler.c
1402
if (msg->user != user)
drivers/char/ipmi/ipmi_msghandler.c
1409
release_ipmi_user(user);
drivers/char/ipmi/ipmi_msghandler.c
1412
void ipmi_destroy_user(struct ipmi_user *user)
drivers/char/ipmi/ipmi_msghandler.c
1414
struct ipmi_smi *intf = user->intf;
drivers/char/ipmi/ipmi_msghandler.c
1417
_ipmi_destroy_user(user);
drivers/char/ipmi/ipmi_msghandler.c
1420
kref_put(&user->refcount, free_ipmi_user);
drivers/char/ipmi/ipmi_msghandler.c
1424
int ipmi_get_version(struct ipmi_user *user,
drivers/char/ipmi/ipmi_msghandler.c
1431
user = acquire_ipmi_user(user);
drivers/char/ipmi/ipmi_msghandler.c
1432
if (!user)
drivers/char/ipmi/ipmi_msghandler.c
1435
rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL);
drivers/char/ipmi/ipmi_msghandler.c
1440
release_ipmi_user(user);
drivers/char/ipmi/ipmi_msghandler.c
1446
int ipmi_set_my_address(struct ipmi_user *user,
drivers/char/ipmi/ipmi_msghandler.c
1452
user = acquire_ipmi_user(user);
drivers/char/ipmi/ipmi_msghandler.c
1453
if (!user)
drivers/char/ipmi/ipmi_msghandler.c
1460
user->intf->addrinfo[channel].address = address;
drivers/char/ipmi/ipmi_msghandler.c
1462
release_ipmi_user(user);
drivers/char/ipmi/ipmi_msghandler.c
1468
int ipmi_get_my_address(struct ipmi_user *user,
drivers/char/ipmi/ipmi_msghandler.c
1474
user = acquire_ipmi_user(user);
drivers/char/ipmi/ipmi_msghandler.c
1475
if (!user)
drivers/char/ipmi/ipmi_msghandler.c
1482
*address = user->intf->addrinfo[channel].address;
drivers/char/ipmi/ipmi_msghandler.c
1484
release_ipmi_user(user);
drivers/char/ipmi/ipmi_msghandler.c
1490
int ipmi_set_my_LUN(struct ipmi_user *user,
drivers/char/ipmi/ipmi_msghandler.c
1496
user = acquire_ipmi_user(user);
drivers/char/ipmi/ipmi_msghandler.c
1497
if (!user)
drivers/char/ipmi/ipmi_msghandler.c
1504
user->intf->addrinfo[channel].lun = LUN & 0x3;
drivers/char/ipmi/ipmi_msghandler.c
1506
release_ipmi_user(user);
drivers/char/ipmi/ipmi_msghandler.c
1512
int ipmi_get_my_LUN(struct ipmi_user *user,
drivers/char/ipmi/ipmi_msghandler.c
1518
user = acquire_ipmi_user(user);
drivers/char/ipmi/ipmi_msghandler.c
1519
if (!user)
drivers/char/ipmi/ipmi_msghandler.c
1526
*address = user->intf->addrinfo[channel].lun;
drivers/char/ipmi/ipmi_msghandler.c
1528
release_ipmi_user(user);
drivers/char/ipmi/ipmi_msghandler.c
1534
int ipmi_get_maintenance_mode(struct ipmi_user *user)
drivers/char/ipmi/ipmi_msghandler.c
1539
user = acquire_ipmi_user(user);
drivers/char/ipmi/ipmi_msghandler.c
1540
if (!user)
drivers/char/ipmi/ipmi_msghandler.c
1543
spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
drivers/char/ipmi/ipmi_msghandler.c
1544
mode = user->intf->maintenance_mode;
drivers/char/ipmi/ipmi_msghandler.c
1545
spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
drivers/char/ipmi/ipmi_msghandler.c
1546
release_ipmi_user(user);
drivers/char/ipmi/ipmi_msghandler.c
1566
int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
drivers/char/ipmi/ipmi_msghandler.c
1570
struct ipmi_smi *intf = user->intf;
drivers/char/ipmi/ipmi_msghandler.c
1572
user = acquire_ipmi_user(user);
drivers/char/ipmi/ipmi_msghandler.c
1573
if (!user)
drivers/char/ipmi/ipmi_msghandler.c
1603
release_ipmi_user(user);
drivers/char/ipmi/ipmi_msghandler.c
1609
int ipmi_set_gets_events(struct ipmi_user *user, bool val)
drivers/char/ipmi/ipmi_msghandler.c
1611
struct ipmi_smi *intf = user->intf;
drivers/char/ipmi/ipmi_msghandler.c
1615
user = acquire_ipmi_user(user);
drivers/char/ipmi/ipmi_msghandler.c
1616
if (!user)
drivers/char/ipmi/ipmi_msghandler.c
1622
if (user->gets_events == val)
drivers/char/ipmi/ipmi_msghandler.c
1625
user->gets_events = val;
drivers/char/ipmi/ipmi_msghandler.c
1635
while (user->gets_events && !list_empty(&intf->waiting_events)) {
drivers/char/ipmi/ipmi_msghandler.c
1645
ipmi_set_recv_msg_user(msg, user);
drivers/char/ipmi/ipmi_msghandler.c
1652
release_ipmi_user(user);
drivers/char/ipmi/ipmi_msghandler.c
1690
int ipmi_register_for_cmd(struct ipmi_user *user,
drivers/char/ipmi/ipmi_msghandler.c
1695
struct ipmi_smi *intf = user->intf;
drivers/char/ipmi/ipmi_msghandler.c
1699
user = acquire_ipmi_user(user);
drivers/char/ipmi/ipmi_msghandler.c
1700
if (!user)
drivers/char/ipmi/ipmi_msghandler.c
1711
rcvr->user = user;
drivers/char/ipmi/ipmi_msghandler.c
1729
release_ipmi_user(user);
drivers/char/ipmi/ipmi_msghandler.c
1735
int ipmi_unregister_for_cmd(struct ipmi_user *user,
drivers/char/ipmi/ipmi_msghandler.c
1740
struct ipmi_smi *intf = user->intf;
drivers/char/ipmi/ipmi_msghandler.c
1745
user = acquire_ipmi_user(user);
drivers/char/ipmi/ipmi_msghandler.c
1746
if (!user)
drivers/char/ipmi/ipmi_msghandler.c
1756
if (rcvr->user == user) {
drivers/char/ipmi/ipmi_msghandler.c
1768
release_ipmi_user(user);
drivers/char/ipmi/ipmi_msghandler.c
206
struct ipmi_user *user;
drivers/char/ipmi/ipmi_msghandler.c
2308
static int i_ipmi_request(struct ipmi_user *user,
drivers/char/ipmi/ipmi_msghandler.c
2330
recv_msg->user = user;
drivers/char/ipmi/ipmi_msghandler.c
2331
if (user) {
drivers/char/ipmi/ipmi_msghandler.c
2332
atomic_inc(&user->nr_msgs);
drivers/char/ipmi/ipmi_msghandler.c
2334
kref_get(&user->refcount);
drivers/char/ipmi/ipmi_msghandler.c
2337
recv_msg = ipmi_alloc_recv_msg(user);
drivers/char/ipmi/ipmi_msghandler.c
2440
int ipmi_request_settime(struct ipmi_user *user,
drivers/char/ipmi/ipmi_msghandler.c
2452
if (!user)
drivers/char/ipmi/ipmi_msghandler.c
2455
user = acquire_ipmi_user(user);
drivers/char/ipmi/ipmi_msghandler.c
2456
if (!user)
drivers/char/ipmi/ipmi_msghandler.c
2459
rv = check_addr(user->intf, addr, &saddr, &lun);
drivers/char/ipmi/ipmi_msghandler.c
2461
rv = i_ipmi_request(user,
drivers/char/ipmi/ipmi_msghandler.c
2462
user->intf,
drivers/char/ipmi/ipmi_msghandler.c
2474
release_ipmi_user(user);
drivers/char/ipmi/ipmi_msghandler.c
2479
int ipmi_request_supply_msgs(struct ipmi_user *user,
drivers/char/ipmi/ipmi_msghandler.c
2491
if (!user)
drivers/char/ipmi/ipmi_msghandler.c
2494
user = acquire_ipmi_user(user);
drivers/char/ipmi/ipmi_msghandler.c
2495
if (!user)
drivers/char/ipmi/ipmi_msghandler.c
2498
rv = check_addr(user->intf, addr, &saddr, &lun);
drivers/char/ipmi/ipmi_msghandler.c
2500
rv = i_ipmi_request(user,
drivers/char/ipmi/ipmi_msghandler.c
2501
user->intf,
drivers/char/ipmi/ipmi_msghandler.c
2513
release_ipmi_user(user);
drivers/char/ipmi/ipmi_msghandler.c
3549
void ipmi_poll_interface(struct ipmi_user *user)
drivers/char/ipmi/ipmi_msghandler.c
3551
ipmi_poll(user->intf);
drivers/char/ipmi/ipmi_msghandler.c
3572
struct ipmi_user *user;
drivers/char/ipmi/ipmi_msghandler.c
3576
list_for_each_entry(user, &intf->users, link)
drivers/char/ipmi/ipmi_msghandler.c
3577
count += atomic_read(&user->nr_msgs);
drivers/char/ipmi/ipmi_msghandler.c
3863
struct ipmi_user *user = list_first_entry(&intf->users,
drivers/char/ipmi/ipmi_msghandler.c
3866
_ipmi_destroy_user(user);
drivers/char/ipmi/ipmi_msghandler.c
3949
struct ipmi_user *user = NULL;
drivers/char/ipmi/ipmi_msghandler.c
3971
user = rcvr->user;
drivers/char/ipmi/ipmi_msghandler.c
3972
recv_msg = ipmi_alloc_recv_msg(user);
drivers/char/ipmi/ipmi_msghandler.c
3976
if (user == NULL) {
drivers/char/ipmi/ipmi_msghandler.c
4048
struct ipmi_user *user = NULL;
drivers/char/ipmi/ipmi_msghandler.c
4058
user = rcvr->user;
drivers/char/ipmi/ipmi_msghandler.c
4059
recv_msg = ipmi_alloc_recv_msg(user);
drivers/char/ipmi/ipmi_msghandler.c
4063
if (user == NULL) {
drivers/char/ipmi/ipmi_msghandler.c
41
static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user);
drivers/char/ipmi/ipmi_msghandler.c
4225
struct ipmi_user *user = NULL;
drivers/char/ipmi/ipmi_msghandler.c
4247
user = rcvr->user;
drivers/char/ipmi/ipmi_msghandler.c
4248
recv_msg = ipmi_alloc_recv_msg(user);
drivers/char/ipmi/ipmi_msghandler.c
4252
if (user == NULL) {
drivers/char/ipmi/ipmi_msghandler.c
43
struct ipmi_user *user);
drivers/char/ipmi/ipmi_msghandler.c
4337
struct ipmi_user *user = NULL;
drivers/char/ipmi/ipmi_msghandler.c
4367
user = rcvr->user;
drivers/char/ipmi/ipmi_msghandler.c
4368
recv_msg = ipmi_alloc_recv_msg(user);
drivers/char/ipmi/ipmi_msghandler.c
4372
if (user == NULL) {
drivers/char/ipmi/ipmi_msghandler.c
4446
struct ipmi_user *user;
drivers/char/ipmi/ipmi_msghandler.c
4471
list_for_each_entry(user, &intf->users, link) {
drivers/char/ipmi/ipmi_msghandler.c
4472
if (!user->gets_events)
drivers/char/ipmi/ipmi_msghandler.c
4475
recv_msg = ipmi_alloc_recv_msg(user);
drivers/char/ipmi/ipmi_msghandler.c
4480
user = recv_msg->user;
drivers/char/ipmi/ipmi_msghandler.c
4483
kref_put(&user->refcount, free_ipmi_user);
drivers/char/ipmi/ipmi_msghandler.c
4916
struct ipmi_user *user;
drivers/char/ipmi/ipmi_msghandler.c
4919
list_for_each_entry(user, &intf->users, link) {
drivers/char/ipmi/ipmi_msghandler.c
4920
if (user->handler->ipmi_watchdog_pretimeout)
drivers/char/ipmi/ipmi_msghandler.c
4921
user->handler->ipmi_watchdog_pretimeout(
drivers/char/ipmi/ipmi_msghandler.c
4922
user->handler_data);
drivers/char/ipmi/ipmi_msghandler.c
4936
struct ipmi_user *user = msg->user;
drivers/char/ipmi/ipmi_msghandler.c
4940
if (refcount_read(&user->destroyed) == 0)
drivers/char/ipmi/ipmi_msghandler.c
4943
user->handler->ipmi_recv_hndl(msg, user->handler_data);
drivers/char/ipmi/ipmi_msghandler.c
5264
static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user)
drivers/char/ipmi/ipmi_msghandler.c
5268
if (user) {
drivers/char/ipmi/ipmi_msghandler.c
5269
if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) {
drivers/char/ipmi/ipmi_msghandler.c
5270
atomic_dec(&user->nr_msgs);
drivers/char/ipmi/ipmi_msghandler.c
5277
if (user)
drivers/char/ipmi/ipmi_msghandler.c
5278
atomic_dec(&user->nr_msgs);
drivers/char/ipmi/ipmi_msghandler.c
5282
rv->user = user;
drivers/char/ipmi/ipmi_msghandler.c
5284
if (user)
drivers/char/ipmi/ipmi_msghandler.c
5285
kref_get(&user->refcount);
drivers/char/ipmi/ipmi_msghandler.c
5292
if (msg->user && !oops_in_progress) {
drivers/char/ipmi/ipmi_msghandler.c
5293
atomic_dec(&msg->user->nr_msgs);
drivers/char/ipmi/ipmi_msghandler.c
5294
kref_put(&msg->user->refcount, free_ipmi_user);
drivers/char/ipmi/ipmi_msghandler.c
5301
struct ipmi_user *user)
drivers/char/ipmi/ipmi_msghandler.c
5303
WARN_ON_ONCE(msg->user); /* User should not be set. */
drivers/char/ipmi/ipmi_msghandler.c
5304
msg->user = user;
drivers/char/ipmi/ipmi_msghandler.c
5305
atomic_inc(&user->nr_msgs);
drivers/char/ipmi/ipmi_msghandler.c
5306
kref_get(&user->refcount);
drivers/char/ipmi/ipmi_msghandler.c
5356
void ipmi_panic_request_and_wait(struct ipmi_user *user,
drivers/char/ipmi/ipmi_msghandler.c
5360
user->intf->run_to_completion = 1;
drivers/char/ipmi/ipmi_msghandler.c
5361
_ipmi_panic_request_and_wait(user->intf, addr, msg);
drivers/char/ipmi/ipmi_msghandler.c
5541
struct ipmi_user *user;
drivers/char/ipmi/ipmi_msghandler.c
5578
list_for_each_entry(user, &intf->users, link) {
drivers/char/ipmi/ipmi_msghandler.c
5579
if (user->handler->ipmi_panic_handler)
drivers/char/ipmi/ipmi_msghandler.c
5580
user->handler->ipmi_panic_handler(
drivers/char/ipmi/ipmi_msghandler.c
5581
user->handler_data);
drivers/char/ipmi/ipmi_msghandler.c
623
struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
drivers/char/ipmi/ipmi_msghandler.c
626
owner = user->intf->owner;
drivers/char/ipmi/ipmi_msghandler.c
627
kref_put(&user->intf->refcount, intf_free);
drivers/char/ipmi/ipmi_msghandler.c
629
vfree(user);
drivers/char/ipmi/ipmi_msghandler.c
632
static void release_ipmi_user(struct ipmi_user *user)
drivers/char/ipmi/ipmi_msghandler.c
634
kref_put(&user->refcount, free_ipmi_user);
drivers/char/ipmi/ipmi_msghandler.c
637
static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user)
drivers/char/ipmi/ipmi_msghandler.c
639
if (!kref_get_unless_zero(&user->refcount))
drivers/char/ipmi/ipmi_msghandler.c
641
return user;
drivers/char/ipmi/ipmi_msghandler.c
966
if (!msg->user) {
drivers/char/ipmi/ipmi_poweroff.c
118
static int ipmi_request_wait_for_response(struct ipmi_user *user,
drivers/char/ipmi/ipmi_poweroff.c
127
rv = ipmi_request_supply_msgs(user, addr, 0, send_msg, &comp,
drivers/char/ipmi/ipmi_poweroff.c
138
static int ipmi_request_in_rc_mode(struct ipmi_user *user,
drivers/char/ipmi/ipmi_poweroff.c
145
rv = ipmi_request_supply_msgs(user, addr, 0, send_msg, NULL,
drivers/char/ipmi/ipmi_poweroff.c
156
ipmi_poll_interface(user);
drivers/char/ipmi/ipmi_poweroff.c
178
static void (*atca_oem_poweroff_hook)(struct ipmi_user *user);
drivers/char/ipmi/ipmi_poweroff.c
180
static void pps_poweroff_atca(struct ipmi_user *user)
drivers/char/ipmi/ipmi_poweroff.c
198
rv = ipmi_request_in_rc_mode(user,
drivers/char/ipmi/ipmi_poweroff.c
207
static int ipmi_atca_detect(struct ipmi_user *user)
drivers/char/ipmi/ipmi_poweroff.c
229
rv = ipmi_request_wait_for_response(user,
drivers/char/ipmi/ipmi_poweroff.c
242
static void ipmi_poweroff_atca(struct ipmi_user *user)
drivers/char/ipmi/ipmi_poweroff.c
269
rv = ipmi_request_in_rc_mode(user,
drivers/char/ipmi/ipmi_poweroff.c
285
atca_oem_poweroff_hook(user);
drivers/char/ipmi/ipmi_poweroff.c
306
static int ipmi_cpi1_detect(struct ipmi_user *user)
drivers/char/ipmi/ipmi_poweroff.c
312
static void ipmi_poweroff_cpi1(struct ipmi_user *user)
drivers/char/ipmi/ipmi_poweroff.c
340
rv = ipmi_request_in_rc_mode(user,
drivers/char/ipmi/ipmi_poweroff.c
355
rv = ipmi_request_in_rc_mode(user,
drivers/char/ipmi/ipmi_poweroff.c
378
ipmi_request_in_rc_mode(user,
drivers/char/ipmi/ipmi_poweroff.c
390
rv = ipmi_request_in_rc_mode(user,
drivers/char/ipmi/ipmi_poweroff.c
404
rv = ipmi_request_in_rc_mode(user,
drivers/char/ipmi/ipmi_poweroff.c
421
static int ipmi_dell_chassis_detect(struct ipmi_user *user)
drivers/char/ipmi/ipmi_poweroff.c
442
static int ipmi_hp_chassis_detect(struct ipmi_user *user)
drivers/char/ipmi/ipmi_poweroff.c
45
static void (*specific_poweroff_func)(struct ipmi_user *user);
drivers/char/ipmi/ipmi_poweroff.c
458
static int ipmi_chassis_detect(struct ipmi_user *user)
drivers/char/ipmi/ipmi_poweroff.c
464
static void ipmi_poweroff_chassis(struct ipmi_user *user)
drivers/char/ipmi/ipmi_poweroff.c
493
rv = ipmi_request_in_rc_mode(user,
drivers/char/ipmi/ipmi_poweroff.c
514
int (*detect)(struct ipmi_user *user);
drivers/char/ipmi/ipmi_poweroff.c
515
void (*poweroff_func)(struct ipmi_user *user);
drivers/clk/clk.c
819
struct clk *user;
drivers/clk/clk.c
826
hlist_for_each_entry(user, &core->clks, clks_node)
drivers/clk/clk.c
827
if (min_rate > user->max_rate || max_rate < user->min_rate)
drivers/clk/ti/apll.c
128
static void __init omap_clk_register_apll(void *user,
drivers/clk/ti/apll.c
131
struct clk_hw *hw = user;
drivers/clk/ti/clk.c
249
void *user;
drivers/clk/ti/clk.c
265
int __init ti_clk_retry_init(struct device_node *node, void *user,
drivers/clk/ti/clk.c
277
retry->user = user;
drivers/clk/ti/clk.c
461
retry->func(retry->user, retry->node);
drivers/clk/ti/clock.h
221
int ti_clk_retry_init(struct device_node *node, void *user,
drivers/clk/ti/composite.c
111
static void __init _register_composite(void *user,
drivers/clk/ti/composite.c
114
struct clk_hw *hw = user;
drivers/clk/ti/dpll.c
145
static void __init _register_dpll(void *user,
drivers/clk/ti/dpll.c
148
struct clk_hw *hw = user;
drivers/clocksource/em_sti.c
148
static int em_sti_start(struct em_sti_priv *p, unsigned int user)
drivers/clocksource/em_sti.c
160
p->active[user] = 1;
drivers/clocksource/em_sti.c
166
static void em_sti_stop(struct em_sti_priv *p, unsigned int user)
drivers/clocksource/em_sti.c
173
p->active[user] = 0;
drivers/crypto/ccree/cc_cipher.c
202
ctx_p->user.key = kzalloc(max_key_buf_size, GFP_KERNEL);
drivers/crypto/ccree/cc_cipher.c
203
if (!ctx_p->user.key)
drivers/crypto/ccree/cc_cipher.c
207
ctx_p->user.key);
drivers/crypto/ccree/cc_cipher.c
210
ctx_p->user.key_dma_addr = dma_map_single(dev, ctx_p->user.key,
drivers/crypto/ccree/cc_cipher.c
213
if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) {
drivers/crypto/ccree/cc_cipher.c
215
max_key_buf_size, ctx_p->user.key);
drivers/crypto/ccree/cc_cipher.c
219
max_key_buf_size, ctx_p->user.key, &ctx_p->user.key_dma_addr);
drivers/crypto/ccree/cc_cipher.c
224
kfree(ctx_p->user.key);
drivers/crypto/ccree/cc_cipher.c
254
dma_unmap_single(dev, ctx_p->user.key_dma_addr, max_key_buf_size,
drivers/crypto/ccree/cc_cipher.c
257
&ctx_p->user.key_dma_addr);
drivers/crypto/ccree/cc_cipher.c
260
dev_dbg(dev, "Free key buffer in context. key=@%p\n", ctx_p->user.key);
drivers/crypto/ccree/cc_cipher.c
261
kfree_sensitive(ctx_p->user.key);
drivers/crypto/ccree/cc_cipher.c
463
dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr,
drivers/crypto/ccree/cc_cipher.c
466
memcpy(ctx_p->user.key, key, keylen);
drivers/crypto/ccree/cc_cipher.c
473
ctx_p->user.key, keylen,
drivers/crypto/ccree/cc_cipher.c
474
ctx_p->user.key + keylen);
drivers/crypto/ccree/cc_cipher.c
482
dma_sync_single_for_device(dev, ctx_p->user.key_dma_addr,
drivers/crypto/ccree/cc_cipher.c
52
struct cc_user_key_info user;
drivers/crypto/ccree/cc_cipher.c
615
dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
drivers/crypto/ccree/cc_cipher.c
692
dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
drivers/crypto/tegra/tegra-se-aes.c
432
static int tegra_aes_kac_manifest(u32 user, u32 alg, u32 keylen)
drivers/crypto/tegra/tegra-se-hash.c
1088
static int tegra_hash_kac_manifest(u32 user, u32 alg, u32 keylen)
drivers/crypto/tegra/tegra-se.h
421
int (*manifest)(u32 user, u32 alg, u32 keylen);
drivers/fsi/fsi-sbefifo.c
1000
mutex_unlock(&user->file_lock);
drivers/fsi/fsi-sbefifo.c
782
static void sbefifo_release_command(struct sbefifo_user *user)
drivers/fsi/fsi-sbefifo.c
784
if (is_vmalloc_addr(user->pending_cmd))
drivers/fsi/fsi-sbefifo.c
785
vfree(user->pending_cmd);
drivers/fsi/fsi-sbefifo.c
786
user->pending_cmd = NULL;
drivers/fsi/fsi-sbefifo.c
787
user->pending_len = 0;
drivers/fsi/fsi-sbefifo.c
793
struct sbefifo_user *user;
drivers/fsi/fsi-sbefifo.c
795
user = kzalloc_obj(struct sbefifo_user);
drivers/fsi/fsi-sbefifo.c
796
if (!user)
drivers/fsi/fsi-sbefifo.c
799
file->private_data = user;
drivers/fsi/fsi-sbefifo.c
800
user->sbefifo = sbefifo;
drivers/fsi/fsi-sbefifo.c
801
user->cmd_page = (void *)__get_free_page(GFP_KERNEL);
drivers/fsi/fsi-sbefifo.c
802
if (!user->cmd_page) {
drivers/fsi/fsi-sbefifo.c
803
kfree(user);
drivers/fsi/fsi-sbefifo.c
806
mutex_init(&user->file_lock);
drivers/fsi/fsi-sbefifo.c
807
user->cmd_timeout_ms = SBEFIFO_TIMEOUT_IN_CMD;
drivers/fsi/fsi-sbefifo.c
808
user->read_timeout_ms = SBEFIFO_TIMEOUT_START_RSP;
drivers/fsi/fsi-sbefifo.c
816
struct sbefifo_user *user = file->private_data;
drivers/fsi/fsi-sbefifo.c
823
if (!user)
drivers/fsi/fsi-sbefifo.c
825
sbefifo = user->sbefifo;
drivers/fsi/fsi-sbefifo.c
829
mutex_lock(&user->file_lock);
drivers/fsi/fsi-sbefifo.c
832
if (user->pending_len == 0) {
drivers/fsi/fsi-sbefifo.c
836
if (user->pending_len < 8) {
drivers/fsi/fsi-sbefifo.c
840
cmd_len = user->pending_len >> 2;
drivers/fsi/fsi-sbefifo.c
851
sbefifo->timeout_in_cmd_ms = user->cmd_timeout_ms;
drivers/fsi/fsi-sbefifo.c
852
sbefifo->timeout_start_rsp_ms = user->read_timeout_ms;
drivers/fsi/fsi-sbefifo.c
853
rc = __sbefifo_submit(sbefifo, user->pending_cmd, cmd_len, &resp_iter);
drivers/fsi/fsi-sbefifo.c
863
sbefifo_release_command(user);
drivers/fsi/fsi-sbefifo.c
864
mutex_unlock(&user->file_lock);
drivers/fsi/fsi-sbefifo.c
871
struct sbefifo_user *user = file->private_data;
drivers/fsi/fsi-sbefifo.c
875
if (!user)
drivers/fsi/fsi-sbefifo.c
877
sbefifo = user->sbefifo;
drivers/fsi/fsi-sbefifo.c
883
mutex_lock(&user->file_lock);
drivers/fsi/fsi-sbefifo.c
887
user->pending_cmd = user->cmd_page;
drivers/fsi/fsi-sbefifo.c
889
user->pending_cmd = vmalloc(len);
drivers/fsi/fsi-sbefifo.c
890
if (!user->pending_cmd) {
drivers/fsi/fsi-sbefifo.c
896
if (copy_from_user(user->pending_cmd, buf, len)) {
drivers/fsi/fsi-sbefifo.c
902
if (len == 4 && be32_to_cpu(*(__be32 *)user->pending_cmd) ==
drivers/fsi/fsi-sbefifo.c
906
user->pending_len = 0;
drivers/fsi/fsi-sbefifo.c
912
rc = sbefifo_request_reset(user->sbefifo);
drivers/fsi/fsi-sbefifo.c
920
user->pending_len = len;
drivers/fsi/fsi-sbefifo.c
922
if (!user->pending_len)
drivers/fsi/fsi-sbefifo.c
923
sbefifo_release_command(user);
drivers/fsi/fsi-sbefifo.c
925
mutex_unlock(&user->file_lock);
drivers/fsi/fsi-sbefifo.c
933
struct sbefifo_user *user = file->private_data;
drivers/fsi/fsi-sbefifo.c
935
if (!user)
drivers/fsi/fsi-sbefifo.c
938
sbefifo_release_command(user);
drivers/fsi/fsi-sbefifo.c
939
free_page((unsigned long)user->cmd_page);
drivers/fsi/fsi-sbefifo.c
940
kfree(user);
drivers/fsi/fsi-sbefifo.c
945
static int sbefifo_cmd_timeout(struct sbefifo_user *user, void __user *argp)
drivers/fsi/fsi-sbefifo.c
947
struct device *dev = &user->sbefifo->dev;
drivers/fsi/fsi-sbefifo.c
954
user->cmd_timeout_ms = SBEFIFO_TIMEOUT_IN_CMD;
drivers/fsi/fsi-sbefifo.c
955
dev_dbg(dev, "Command timeout reset to %us\n", user->cmd_timeout_ms / 1000);
drivers/fsi/fsi-sbefifo.c
959
user->cmd_timeout_ms = timeout * 1000; /* user timeout is in sec */
drivers/fsi/fsi-sbefifo.c
964
static int sbefifo_read_timeout(struct sbefifo_user *user, void __user *argp)
drivers/fsi/fsi-sbefifo.c
966
struct device *dev = &user->sbefifo->dev;
drivers/fsi/fsi-sbefifo.c
973
user->read_timeout_ms = SBEFIFO_TIMEOUT_START_RSP;
drivers/fsi/fsi-sbefifo.c
974
dev_dbg(dev, "Timeout reset to %us\n", user->read_timeout_ms / 1000);
drivers/fsi/fsi-sbefifo.c
978
user->read_timeout_ms = timeout * 1000; /* user timeout is in sec */
drivers/fsi/fsi-sbefifo.c
985
struct sbefifo_user *user = file->private_data;
drivers/fsi/fsi-sbefifo.c
988
if (!user)
drivers/fsi/fsi-sbefifo.c
991
mutex_lock(&user->file_lock);
drivers/fsi/fsi-sbefifo.c
994
rc = sbefifo_cmd_timeout(user, (void __user *)arg);
drivers/fsi/fsi-sbefifo.c
997
rc = sbefifo_read_timeout(user, (void __user *)arg);
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1541
struct drm_amdgpu_fence *user)
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1548
ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1552
r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1553
user->ring, &entity);
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1559
fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
112
ret = copy_to_user(user, buf, to_copy);
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
126
static ssize_t kfd_smi_ev_write(struct file *filep, const char __user *user,
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
132
if (!access_ok(user, size) || size < sizeof(events))
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
134
if (copy_from_user(&events, user, sizeof(events)))
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
81
static ssize_t kfd_smi_ev_read(struct file *filep, char __user *user,
drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
153
void *user,
drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
170
if (is_switching_user(user, st->binding_user)) {
drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
175
st->binding_user = user;
drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
504
komeda_scaler_validate(void *user,
drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
529
drm_st, user, kcrtc_st->base.crtc);
drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
612
void *user,
drivers/gpu/drm/drm_fbdev_dma.c
19
static int drm_fbdev_dma_fb_open(struct fb_info *info, int user)
drivers/gpu/drm/drm_fbdev_dma.c
24
if (user && !try_module_get(fb_helper->dev->driver->fops->owner))
drivers/gpu/drm/drm_fbdev_dma.c
30
static int drm_fbdev_dma_fb_release(struct fb_info *info, int user)
drivers/gpu/drm/drm_fbdev_dma.c
34
if (user)
drivers/gpu/drm/drm_fbdev_shmem.c
18
static int drm_fbdev_shmem_fb_open(struct fb_info *info, int user)
drivers/gpu/drm/drm_fbdev_shmem.c
23
if (user && !try_module_get(fb_helper->dev->driver->fops->owner))
drivers/gpu/drm/drm_fbdev_shmem.c
29
static int drm_fbdev_shmem_fb_release(struct fb_info *info, int user)
drivers/gpu/drm/drm_fbdev_shmem.c
33
if (user)
drivers/gpu/drm/drm_fbdev_ttm.c
17
static int drm_fbdev_ttm_fb_open(struct fb_info *info, int user)
drivers/gpu/drm/drm_fbdev_ttm.c
22
if (user && !try_module_get(fb_helper->dev->driver->fops->owner))
drivers/gpu/drm/drm_fbdev_ttm.c
28
static int drm_fbdev_ttm_fb_release(struct fb_info *info, int user)
drivers/gpu/drm/drm_fbdev_ttm.c
32
if (user)
drivers/gpu/drm/i915/gem/i915_gem_context.c
1907
const struct drm_i915_gem_context_param_sseu *user,
drivers/gpu/drm/i915/gem/i915_gem_context.c
1915
if (!user->slice_mask || !user->subslice_mask ||
drivers/gpu/drm/i915/gem/i915_gem_context.c
1916
!user->min_eus_per_subslice || !user->max_eus_per_subslice)
drivers/gpu/drm/i915/gem/i915_gem_context.c
1920
if (user->max_eus_per_subslice < user->min_eus_per_subslice)
drivers/gpu/drm/i915/gem/i915_gem_context.c
1927
if (overflows_type(user->slice_mask, context->slice_mask) ||
drivers/gpu/drm/i915/gem/i915_gem_context.c
1928
overflows_type(user->subslice_mask, context->subslice_mask) ||
drivers/gpu/drm/i915/gem/i915_gem_context.c
1929
overflows_type(user->min_eus_per_subslice,
drivers/gpu/drm/i915/gem/i915_gem_context.c
1931
overflows_type(user->max_eus_per_subslice,
drivers/gpu/drm/i915/gem/i915_gem_context.c
1936
if (user->slice_mask & ~device->slice_mask)
drivers/gpu/drm/i915/gem/i915_gem_context.c
1939
if (user->subslice_mask & ~dev_subslice_mask)
drivers/gpu/drm/i915/gem/i915_gem_context.c
1942
if (user->max_eus_per_subslice > device->max_eus_per_subslice)
drivers/gpu/drm/i915/gem/i915_gem_context.c
1945
context->slice_mask = user->slice_mask;
drivers/gpu/drm/i915/gem/i915_gem_context.c
1946
context->subslice_mask = user->subslice_mask;
drivers/gpu/drm/i915/gem/i915_gem_context.c
1947
context->min_eus_per_subslice = user->min_eus_per_subslice;
drivers/gpu/drm/i915/gem/i915_gem_context.c
1948
context->max_eus_per_subslice = user->max_eus_per_subslice;
drivers/gpu/drm/i915/gem/i915_gem_context.c
1995
if ((user->min_eus_per_subslice !=
drivers/gpu/drm/i915/gem/i915_gem_context.c
1997
(user->max_eus_per_subslice !=
drivers/gpu/drm/i915/gem/i915_gem_context.c
2110
struct i915_gem_context_param_context_image user;
drivers/gpu/drm/i915/gem/i915_gem_context.c
2123
if (args->size < sizeof(user))
drivers/gpu/drm/i915/gem/i915_gem_context.c
2126
if (copy_from_user(&user, u64_to_user_ptr(args->value), sizeof(user)))
drivers/gpu/drm/i915/gem/i915_gem_context.c
2129
if (user.mbz)
drivers/gpu/drm/i915/gem/i915_gem_context.c
2132
if (user.flags & ~(I915_CONTEXT_IMAGE_FLAG_ENGINE_INDEX))
drivers/gpu/drm/i915/gem/i915_gem_context.c
2136
if (user.flags & I915_CONTEXT_IMAGE_FLAG_ENGINE_INDEX)
drivers/gpu/drm/i915/gem/i915_gem_context.c
2139
ce = lookup_user_engine(ctx, lookup, &user.engine);
drivers/gpu/drm/i915/gem/i915_gem_context.c
2143
if (user.size < ce->engine->context_size) {
drivers/gpu/drm/i915/gem/i915_gem_context.c
2160
state = memdup_user(u64_to_user_ptr(user.image), ce->engine->context_size);
drivers/gpu/drm/i915/gem/i915_gem_context.c
2181
args->size = sizeof(user);
drivers/gpu/drm/i915/gem/i915_gem_context.c
739
struct i915_context_param_engines __user *user =
drivers/gpu/drm/i915/gem/i915_gem_context.c
750
if (args->size < sizeof(*user) ||
drivers/gpu/drm/i915/gem/i915_gem_context.c
751
!IS_ALIGNED(args->size - sizeof(*user), sizeof(*user->engines))) {
drivers/gpu/drm/i915/gem/i915_gem_context.c
757
set.num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
drivers/gpu/drm/i915/gem/i915_gem_context.c
770
if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
drivers/gpu/drm/i915/gem/i915_gem_context.c
797
if (!get_user(extensions, &user->extensions))
drivers/gpu/drm/i915/gem/i915_gem_context.h
244
const struct drm_i915_gem_context_param_sseu *user,
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
2924
struct drm_i915_gem_exec_fence __user *user;
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
2937
ULONG_MAX / sizeof(*user),
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
2941
user = u64_to_user_ptr(args->cliprects_ptr);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
2942
if (!access_ok(user, num_fences * sizeof(*user)))
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
2958
if (__copy_from_user(&user_fence, user++, sizeof(user_fence)))
drivers/gpu/drm/i915/intel_wakeref.h
273
struct ref_tracker *user = NULL;
drivers/gpu/drm/i915/intel_wakeref.h
275
ref_tracker_alloc(dir, &user, GFP_NOWAIT);
drivers/gpu/drm/i915/intel_wakeref.h
277
return user ?: INTEL_WAKEREF_DEF;
drivers/gpu/drm/nouveau/dispnv50/crc.c
513
if (core->chan.base.user.oclass >= GB202_DISP_CORE_CHANNEL_DMA)
drivers/gpu/drm/nouveau/dispnv50/crc.c
516
ret = nvif_object_ctor(&core->chan.base.user, "kmsCrcNtfyCtxDma",
drivers/gpu/drm/nouveau/dispnv50/curs507a.c
188
&args, sizeof(args), &wndw->wimm.base.user);
drivers/gpu/drm/nouveau/dispnv50/curs507a.c
194
nvif_object_map(&wndw->wimm.base.user, NULL, 0);
drivers/gpu/drm/nouveau/dispnv50/curs507a.c
38
if (NVIF_TV32(&wndw->wimm.base.user, NV507A, FREE, COUNT, >=, 4))
drivers/gpu/drm/nouveau/dispnv50/curs507a.c
49
struct nvif_object *user = &wndw->wimm.base.user;
drivers/gpu/drm/nouveau/dispnv50/curs507a.c
52
NVIF_WR32(user, NV507A, UPDATE,
drivers/gpu/drm/nouveau/dispnv50/curs507a.c
61
struct nvif_object *user = &wndw->wimm.base.user;
drivers/gpu/drm/nouveau/dispnv50/curs507a.c
64
NVIF_WR32(user, NV507A, SET_CURSOR_HOT_SPOT_POINT_OUT,
drivers/gpu/drm/nouveau/dispnv50/cursc37a.c
30
struct nvif_object *user = &wndw->wimm.base.user;
drivers/gpu/drm/nouveau/dispnv50/cursc37a.c
33
NVIF_WR32(user, NVC37A, UPDATE, 0x00000001);
drivers/gpu/drm/nouveau/dispnv50/cursc37a.c
40
struct nvif_object *user = &wndw->wimm.base.user;
drivers/gpu/drm/nouveau/dispnv50/cursc37a.c
43
NVIF_WR32(user, NVC37A, SET_CURSOR_HOT_SPOT_POINT_OUT(0),
drivers/gpu/drm/nouveau/dispnv50/disp.c
115
nvif_object_dtor(&chan->user);
drivers/gpu/drm/nouveau/dispnv50/disp.c
152
NVIF_WV32(&dmac->base.user, NV507C, PUT, PTR, dmac->cur);
drivers/gpu/drm/nouveau/dispnv50/disp.c
162
u32 get = NVIF_RV32(&dmac->base.user, NV507C, GET, PTR);
drivers/gpu/drm/nouveau/dispnv50/disp.c
174
u32 get = NVIF_RV32(&dmac->base.user, NV507C, GET, PTR);
drivers/gpu/drm/nouveau/dispnv50/disp.c
181
if (NVIF_TV32(&dmac->base.user, NV507C, GET, PTR, >, 0))
drivers/gpu/drm/nouveau/dispnv50/disp.c
292
ret = nvif_object_ctor(&dmac->base.user, "kmsSyncCtxDma", NV50_DISP_HANDLE_SYNCBUF,
drivers/gpu/drm/nouveau/dispnv50/disp.c
304
ret = nvif_object_ctor(&dmac->base.user, "kmsVramCtxDma", NV50_DISP_HANDLE_VRAM,
drivers/gpu/drm/nouveau/dispnv50/disp.c
95
&chan->user);
drivers/gpu/drm/nouveau/dispnv50/disp.c
97
ret = nvif_object_map(&chan->user, NULL, 0);
drivers/gpu/drm/nouveau/dispnv50/disp.c
99
nvif_object_dtor(&chan->user);
drivers/gpu/drm/nouveau/dispnv50/disp.h
58
struct nvif_object user;
drivers/gpu/drm/nouveau/dispnv50/oimm507b.c
37
&args, sizeof(args), &wndw->wimm.base.user);
drivers/gpu/drm/nouveau/dispnv50/oimm507b.c
43
nvif_object_map(&wndw->wimm.base.user, NULL, 0);
drivers/gpu/drm/nouveau/dispnv50/wndw.c
559
if (wndw->wndw.base.user.oclass < GB202_DISP_WINDOW_CHANNEL_DMA) {
drivers/gpu/drm/nouveau/dispnv50/wndw.c
873
wndw->ctxdma.parent = &wndw->wndw.base.user;
drivers/gpu/drm/nouveau/include/nvif/device.h
18
struct nvif_user user;
drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h
153
int nvkm_falcon_fw_boot(struct nvkm_falcon_fw *, struct nvkm_subdev *user,
drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h
83
nvkm_printk___((f)->owner, (f)->user, NV_DBG_##l, p, "%s:"fmt, (f)->name, ##a); \
drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h
85
nvkm_printk___((f)->owner, (f)->user, NV_DBG_##l, p, fmt, ##a); \
drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
28
struct nvkm_subdev *user;
drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
21
struct nvkm_device_oclass user;
drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
151
struct nvkm_device_oclass user;
drivers/gpu/drm/nouveau/include/nvkm/subdev/vfn.h
12
u32 user;
drivers/gpu/drm/nouveau/include/nvkm/subdev/vfn.h
17
struct nvkm_device_oclass user;
drivers/gpu/drm/nouveau/nouveau_abi16.c
453
ret = nvif_object_ctor(&chan->chan->user, "abi16CeWar", 0, VOLTA_DMA_COPY_A,
drivers/gpu/drm/nouveau/nouveau_abi16.c
459
ret = nvif_object_ctor(&chan->chan->user, "abi16CeWar", 0, TURING_DMA_COPY_A,
drivers/gpu/drm/nouveau/nouveau_abi16.c
547
ret = nvif_object_sclass_get(&chan->chan->user, &sclass);
drivers/gpu/drm/nouveau/nouveau_abi16.c
606
ret = nvif_object_ctor(&chan->chan->user, "abi16EngObj", init->handle,
drivers/gpu/drm/nouveau/nouveau_abi16.c
669
ret = nvif_object_ctor(&chan->chan->user, "abi16Ntfy", info->handle,
drivers/gpu/drm/nouveau/nouveau_abi16.c
795
ret = nvif_object_ctor(&chan->chan->user, "abi16EngObj", args->handle, args->oclass,
drivers/gpu/drm/nouveau/nouveau_abi16.c
823
ret = nvif_object_sclass_get(&chan->chan->user, &sclass);
drivers/gpu/drm/nouveau/nouveau_abi16.c
839
nouveau_abi16_ioctl(struct drm_file *filp, void __user *user, u32 size)
drivers/gpu/drm/nouveau/nouveau_abi16.c
855
if (copy_from_user(ioctl, user, size))
drivers/gpu/drm/nouveau/nouveau_abi16.c
883
if (copy_to_user(user, ioctl, size))
drivers/gpu/drm/nouveau/nouveau_abi16.h
42
int nouveau_abi16_ioctl(struct drm_file *, void __user *user, u32 size);
drivers/gpu/drm/nouveau/nouveau_bo.c
1045
ret = nvif_object_ctor(&chan->user, "ttmBoMove",
drivers/gpu/drm/nouveau/nouveau_chan.c
104
nvif_object_dtor(&chan->user);
drivers/gpu/drm/nouveau/nouveau_chan.c
333
chan->userd = &chan->user;
drivers/gpu/drm/nouveau/nouveau_chan.c
340
args, __struct_size(args), &chan->user);
drivers/gpu/drm/nouveau/nouveau_chan.c
366
if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) {
drivers/gpu/drm/nouveau/nouveau_chan.c
375
ret = nvif_event_ctor(&chan->user, "abi16ChanKilled", chan->chid,
drivers/gpu/drm/nouveau/nouveau_chan.c
401
ret = nvif_object_ctor(&chan->user, "abi16ChanVramCtxDma", vram,
drivers/gpu/drm/nouveau/nouveau_chan.c
425
ret = nvif_object_ctor(&chan->user, "abi16ChanGartCtxDma", gart,
drivers/gpu/drm/nouveau/nouveau_chan.c
433
if (chan->user.oclass < NV50_CHANNEL_GPFIFO) {
drivers/gpu/drm/nouveau/nouveau_chan.c
438
if (chan->user.oclass < FERMI_CHANNEL_GPFIFO) {
drivers/gpu/drm/nouveau/nouveau_chan.c
445
if (chan->user.oclass < VOLTA_CHANNEL_GPFIFO_A) {
drivers/gpu/drm/nouveau/nouveau_chan.c
457
&drm->client.device.user, chan->token);
drivers/gpu/drm/nouveau/nouveau_chan.c
475
ret = nvif_object_ctor(&chan->user, "abi16NvswFence", 0x006e,
drivers/gpu/drm/nouveau/nouveau_chan.c
96
if (nvif_object_constructed(&chan->user))
drivers/gpu/drm/nouveau/nouveau_chan.h
49
struct nvif_object user;
drivers/gpu/drm/nouveau/nouveau_dma.h
85
nvif_wr32(&chan->user, chan->user_put, ((val) << 2) + chan->push.addr);\
drivers/gpu/drm/nouveau/nouveau_drm.c
391
ret = nvif_object_ctor(&drm->channel->user, "drmNvsw",
drivers/gpu/drm/nouveau/nouveau_drm.c
396
ret = nvif_object_ctor(&drm->channel->user, "drmBlit",
drivers/gpu/drm/nouveau/nouveau_drm.c
435
ret = nvif_object_ctor(&drm->channel->user, "drmM2mfNtfy",
drivers/gpu/drm/nouveau/nouveau_drv.h
194
u_memcpya(uint64_t user, unsigned int nmemb, unsigned int size)
drivers/gpu/drm/nouveau/nouveau_drv.h
196
void __user *userptr = u64_to_user_ptr(user);
drivers/gpu/drm/nouveau/nouveau_exec.c
382
if (chan->user.oclass < NV50_CHANNEL_GPFIFO)
drivers/gpu/drm/nouveau/nouveau_fence.c
199
ret = nvif_event_ctor(&chan->user, "fenceNonStallIntr", (chan->runlist << 16) | chan->chid,
drivers/gpu/drm/nouveau/nouveau_gem.c
853
if (chan->user.oclass >= NV50_CHANNEL_GPFIFO) {
drivers/gpu/drm/nouveau/nouveau_gem.c
963
if (chan->user.oclass >= NV50_CHANNEL_GPFIFO) {
drivers/gpu/drm/nouveau/nv10_fence.c
55
return NVIF_RD32(&chan->user, NV06E, REFERENCE);
drivers/gpu/drm/nouveau/nv17_fence.c
95
ret = nvif_object_ctor(&chan->user, "fenceCtxDma", NvSema,
drivers/gpu/drm/nouveau/nv50_fence.c
54
ret = nvif_object_ctor(&chan->user, "fenceCtxDma", NvSema,
drivers/gpu/drm/nouveau/nvif/device.c
30
if (!device->user.func) {
drivers/gpu/drm/nouveau/nvif/device.c
38
return device->user.func->time(&device->user);
drivers/gpu/drm/nouveau/nvif/device.c
62
device->user.func = NULL;
drivers/gpu/drm/nouveau/nvif/user.c
30
if (device->user.func) {
drivers/gpu/drm/nouveau/nvif/user.c
31
nvif_object_dtor(&device->user.object);
drivers/gpu/drm/nouveau/nvif/user.c
32
device->user.func = NULL;
drivers/gpu/drm/nouveau/nvif/user.c
53
if (device->user.func)
drivers/gpu/drm/nouveau/nvif/user.c
62
&device->user.object);
drivers/gpu/drm/nouveau/nvif/user.c
66
nvif_object_map(&device->user.object, NULL, 0);
drivers/gpu/drm/nouveau/nvif/user.c
67
device->user.func = users[cid].func;
drivers/gpu/drm/nouveau/nvif/userc361.c
25
nvif_userc361_time(struct nvif_user *user)
drivers/gpu/drm/nouveau/nvif/userc361.c
30
hi = nvif_rd32(&user->object, 0x084);
drivers/gpu/drm/nouveau/nvif/userc361.c
31
lo = nvif_rd32(&user->object, 0x080);
drivers/gpu/drm/nouveau/nvif/userc361.c
32
} while (hi != nvif_rd32(&user->object, 0x084));
drivers/gpu/drm/nouveau/nvif/userc361.c
38
nvif_userc361_doorbell(struct nvif_user *user, u32 token)
drivers/gpu/drm/nouveau/nvif/userc361.c
40
nvif_wr32(&user->object, 0x90, token);
drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
297
sclass = &device->mmu->user;
drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
299
sclass = &device->fault->user;
drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
301
sclass = &device->vfn->user;
drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
156
if (disp->chan[chan->chid.user] == chan)
drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
157
disp->chan[chan->chid.user] = NULL;
drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
178
const struct nvkm_disp_chan_user *user = NULL;
drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
183
for (i = 0; disp->func->user[i].ctor; i++) {
drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
184
if (disp->func->user[i].base.oclass == oclass->base.oclass) {
drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
185
user = disp->func->user[i].chan;
drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
190
if (WARN_ON(!user))
drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
195
if (args->v0.id >= nr || !args->v0.pushbuf != !user->func->push)
drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
203
chan->func = user->func;
drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
204
chan->mthd = user->mthd;
drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
206
chan->chid.ctrl = user->ctrl + args->v0.id;
drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
207
chan->chid.user = user->user + args->v0.id;
drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
217
if (disp->chan[chan->chid.user]) {
drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
221
disp->chan[chan->chid.user] = chan;
drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
55
*addr = base + chan->func->user(chan, size);
drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.h
15
int user;
drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.h
40
u64 (*user)(struct nvkm_disp_chan *, u64 *size);
drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.h
70
int user;
drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
180
.user = 3,
drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
230
.user = 1,
drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
317
.user = 0,
drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
334
.user = {
drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
347
.user = 0,
drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
364
.user = {
drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c
137
.user = {
drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
1006
.user = 0,
drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
1246
.user = {
drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
507
const u32 mask = 0x00000001 << chan->chid.user;
drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
523
int user = chan->chid.user;
drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
530
nvkm_error(subdev, "ch %d fini: %08x\n", user,
drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
542
int user = chan->chid.user;
drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
551
nvkm_error(subdev, "ch %d init: %08x\n", user,
drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
564
.user = nv50_disp_chan_user,
drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
570
return nvkm_ramht_insert(chan->disp->ramht, object, chan->chid.user, -9, handle,
drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
571
chan->chid.user << 27 | 0x00000001);
drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
580
int user = chan->chid.user;
drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
589
nvkm_error(subdev, "ch %d fini: %08x\n", user,
drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
602
int user = chan->chid.user;
drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
617
nvkm_error(subdev, "ch %d init: %08x\n", user,
drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
631
.user = nv50_disp_chan_user,
drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
639
.user = 13,
drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
646
.user = 9,
drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
716
.user = 5,
drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
800
.user = 1,
drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
998
.user = nv50_disp_chan_user,
drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
199
.user = 5,
drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
301
.user = 0,
drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
318
.user = {
drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
44
.user = {
drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
98
.user = {
drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
181
.user = {
drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
70
.user = {
drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
135
.user = nv50_disp_chan_user,
drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
143
.user = 0,
drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
185
.user = {
drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
39
int user = chan->chid.user;
drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
54
nvkm_error(subdev, "ch %d init: %08x\n", user,
drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
68
.user = nv50_disp_chan_user,
drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
76
.user = 17,
drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
83
.user = 13,
drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
90
.user = 5,
drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
98
.user = 1,
drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
77
.user = 3,
drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
94
.user = {
drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
252
.user = {
drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
1239
.user = {
drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
336
return 0x690000 + ((chan->chid.user - 1) * 0x1000);
drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
356
return nvkm_ramht_insert(chan->disp->ramht, object, chan->chid.user, -9, handle,
drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
357
chan->chid.user << 25 | 0x00000040);
drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
407
.user = gv100_disp_chan_user,
drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
414
.user = 33,
drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
533
.user = gv100_disp_chan_user,
drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
541
.user = 1,
drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
600
.user = gv100_disp_chan_user,
drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
607
.user = 73,
drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
782
.user = gv100_disp_core_user,
drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
790
.user = 0,
drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
58
.user = {
drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c
72
.user = {
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c
110
.user = { {} },
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
1001
.user = nv50_disp_chan_user,
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
1009
.user = 0,
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
1780
.user = {
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
502
mthd->name, chan->chid.user);
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
553
return 0x640000 + (chan->chid.user * 0x1000);
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
560
const u32 mask = 0x00010001 << chan->chid.user;
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
561
const u32 data = en ? 0x00010000 << chan->chid.user : 0x00000000;
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
572
int user = chan->chid.user;
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
579
nvkm_error(subdev, "ch %d timeout: %08x\n", user,
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
591
int user = chan->chid.user;
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
598
nvkm_error(subdev, "ch %d timeout0: %08x\n", user,
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
609
nvkm_error(subdev, "ch %d timeout1: %08x\n", user,
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
622
.user = nv50_disp_chan_user,
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
628
return nvkm_ramht_insert(chan->disp->ramht, object, chan->chid.user, -10, handle,
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
629
chan->chid.user << 28 | chan->chid.user);
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
638
int user = chan->chid.user;
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
647
nvkm_error(subdev, "ch %d fini timeout, %08x\n", user,
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
660
int user = chan->chid.user;
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
675
nvkm_error(subdev, "ch %d init timeout, %08x\n", user,
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
711
.user = nv50_disp_chan_user,
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
719
.user = 7,
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
726
.user = 5,
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
772
.user = 3,
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
831
.user = 1,
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
106
nvkm_outp_acquire_ior(struct nvkm_outp *outp, u8 user, struct nvkm_ior *ior)
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
111
outp->acquired |= user;
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
117
u8 user, bool hda)
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
126
return nvkm_outp_acquire_ior(outp, user, ior);
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
136
return nvkm_outp_acquire_ior(outp, user, ior);
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
143
nvkm_outp_acquire_or(struct nvkm_outp *outp, u8 user, bool hda)
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
149
OUTP_TRACE(outp, "acquire %02x |= %02x %p", outp->acquired, user, ior);
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
151
outp->acquired |= user;
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
165
return nvkm_outp_acquire_ior(outp, user, ior);
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
185
return nvkm_outp_acquire_ior(outp, user, ior);
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
193
if (!nvkm_outp_acquire_hda(outp, type, user, false))
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
197
return nvkm_outp_acquire_hda(outp, type, user, true);
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
201
if (!nvkm_outp_acquire_hda(outp, type, user, true))
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
207
return nvkm_outp_acquire_hda(outp, type, user, false);
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
92
nvkm_outp_release_or(struct nvkm_outp *outp, u8 user)
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
95
OUTP_TRACE(outp, "release %02x &= %02x %p", outp->acquired, ~user, ior);
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
97
outp->acquired &= ~user;
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
83
int nvkm_outp_acquire_or(struct nvkm_outp *, u8 user, bool hda);
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
84
int nvkm_outp_acquire_ior(struct nvkm_outp *, u8 user, struct nvkm_ior *);
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
86
void nvkm_outp_release_or(struct nvkm_outp *, u8 user);
drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
46
} user[];
drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c
223
.user = {
drivers/gpu/drm/nouveau/nvkm/engine/disp/udisp.c
53
if (disp->func->user[index].ctor) {
drivers/gpu/drm/nouveau/nvkm/engine/disp/udisp.c
54
sclass->base = disp->func->user[index].base;
drivers/gpu/drm/nouveau/nvkm/engine/disp/udisp.c
55
sclass->ctor = disp->func->user[index].ctor;
drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf100.c
105
user = GF100_DMA_V0_PRIV_US;
drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf100.c
109
user = GF100_DMA_V0_PRIV_VM;
drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf100.c
115
if (user > 2)
drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf100.c
117
dmaobj->flags0 |= (kind << 22) | (user << 20) | oclass->base.oclass;
drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf100.c
78
u32 kind, user, unkn;
drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf100.c
99
user = args->v0.priv;
drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv50.c
106
user = NV50_DMA_V0_PRIV_US;
drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv50.c
111
user = NV50_DMA_V0_PRIV_VM;
drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv50.c
119
if (user > 2 || part > 2 || comp > 3 || kind > 0x7f)
drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv50.c
121
dmaobj->flags0 = (comp << 29) | (kind << 22) | (user << 20) |
drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv50.c
78
u32 user, part, comp, kind;
drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv50.c
99
user = args->v0.priv;
drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
102
if (cgrp->user.oclass) {
drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
104
oclass->base = cgrp->user;
drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
105
oclass->engn = &fifo->func->cgrp.user;
drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
112
if (chan->user.oclass) {
drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
114
oclass->base = chan->user;
drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
115
oclass->engn = &fifo->func->chan.user;
drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
78
if (oclass->engn == &fifo->func->cgrp.user)
drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
81
if (oclass->engn == &fifo->func->chan.user)
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
184
const u64 user = nvkm_memory_addr(chan->userd.mem) + chan->userd.base;
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
187
nvkm_wo32(memory, offset + 0x0, lower_32_bits(user) | chan->runq << 1);
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
188
nvkm_wo32(memory, offset + 0x4, upper_32_bits(user));
drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
54
struct nvkm_sclass user;
drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
60
struct nvkm_sclass user;
drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
46
nvkm_wr32(device, device->vfn->addr.user + 0x0090, chan->func->doorbell_handle(chan));
drivers/gpu/drm/nouveau/nvkm/engine/fifo/ucgrp.c
54
if (chan->user.oclass) {
drivers/gpu/drm/nouveau/nvkm/engine/fifo/ucgrp.c
56
oclass->base = chan->user;
drivers/gpu/drm/nouveau/nvkm/falcon/base.c
159
if (nvkm_printk_ok(falcon->owner, falcon->user, NV_DBG_TRACE)) {
drivers/gpu/drm/nouveau/nvkm/falcon/base.c
194
if (nvkm_printk_ok(falcon->owner, falcon->user, NV_DBG_TRACE)) {
drivers/gpu/drm/nouveau/nvkm/falcon/base.c
216
nvkm_warn(falcon->user,
drivers/gpu/drm/nouveau/nvkm/falcon/base.c
286
nvkm_falcon_put(struct nvkm_falcon *falcon, struct nvkm_subdev *user)
drivers/gpu/drm/nouveau/nvkm/falcon/base.c
292
if (falcon->user == user) {
drivers/gpu/drm/nouveau/nvkm/falcon/base.c
293
nvkm_debug(falcon->user, "released %s falcon\n", falcon->name);
drivers/gpu/drm/nouveau/nvkm/falcon/base.c
294
falcon->user = NULL;
drivers/gpu/drm/nouveau/nvkm/falcon/base.c
300
nvkm_falcon_get(struct nvkm_falcon *falcon, struct nvkm_subdev *user)
drivers/gpu/drm/nouveau/nvkm/falcon/base.c
305
if (falcon->user) {
drivers/gpu/drm/nouveau/nvkm/falcon/base.c
306
nvkm_error(user, "%s falcon already acquired by %s!\n",
drivers/gpu/drm/nouveau/nvkm/falcon/base.c
307
falcon->name, falcon->user->name);
drivers/gpu/drm/nouveau/nvkm/falcon/base.c
312
nvkm_debug(user, "acquired %s falcon\n", falcon->name);
drivers/gpu/drm/nouveau/nvkm/falcon/base.c
315
falcon->user = user;
drivers/gpu/drm/nouveau/nvkm/falcon/base.c
89
if (img && nvkm_printk_ok(falcon->owner, falcon->user, NV_DBG_TRACE)) {
drivers/gpu/drm/nouveau/nvkm/falcon/fw.c
122
nvkm_falcon_put(falcon, user);
drivers/gpu/drm/nouveau/nvkm/falcon/fw.c
52
if (nvkm_printk_ok(falcon->owner, falcon->user, NV_DBG_TRACE)) {
drivers/gpu/drm/nouveau/nvkm/falcon/fw.c
74
nvkm_falcon_fw_boot(struct nvkm_falcon_fw *fw, struct nvkm_subdev *user,
drivers/gpu/drm/nouveau/nvkm/falcon/fw.c
80
ret = nvkm_falcon_get(falcon, user);
drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
239
flcn_bl_dmem_desc_v1_dump(fw->falcon->user, &hsdesc);
drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c
63
flcn_bl_dmem_desc_dump(fw->falcon->user, &hsdesc);
drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c
45
flcn_bl_dmem_desc_v2_dump(fw->falcon->user, &hsdesc);
drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
163
fault->user.ctor = nvkm_ufault_new;
drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
164
fault->user.base = func->user.base;
drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
81
.user = { { 0, 0, MAXWELL_FAULT_BUFFER_A }, 0 },
drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c
45
.user = { { 0, 0, MAXWELL_FAULT_BUFFER_A }, 0 },
drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
233
.user = { { 0, 0, VOLTA_FAULT_BUFFER_A }, 1 },
drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
43
} user;
drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
172
.user = { { 0, 0, VOLTA_FAULT_BUFFER_A }, 1 },
drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c
97
struct nvkm_fault_buffer *buffer = fault->buffer[fault->func->user.rp];
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
140
.user = r535_chan_user,
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
146
.user = 73,
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
152
return nvkm_ramht_insert(chan->disp->ramht, object, chan->chid.user, -9, handle,
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
153
chan->chid.user << 25 |
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
161
const u32 uoff = (chan->chid.user - 1) * 0x1000;
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
1737
if (!(rm = kzalloc(sizeof(*rm) + 6 * sizeof(rm->user[0]), GFP_KERNEL)))
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
1751
rm->user[0].base.oclass = gpu->disp.class.caps;
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
1752
rm->user[0].ctor = gv100_disp_caps_new;
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
1754
rm->user[1].base.oclass = gpu->disp.class.core;
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
1755
rm->user[1].ctor = nvkm_disp_core_new;
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
1756
rm->user[1].chan = &r535_core;
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
1758
rm->user[2].base.oclass = gpu->disp.class.wndw;
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
1759
rm->user[2].ctor = nvkm_disp_wndw_new;
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
1760
rm->user[2].chan = &r535_wndw;
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
1762
rm->user[3].base.oclass = gpu->disp.class.wimm;
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
1763
rm->user[3].ctor = nvkm_disp_wndw_new;
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
1764
rm->user[3].chan = &r535_wimm;
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
1766
rm->user[4].base.oclass = gpu->disp.class.curs;
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
1767
rm->user[4].ctor = nvkm_disp_chan_new;
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
1768
rm->user[4].chan = &r535_curs;
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
214
.user = r535_chan_user,
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
224
.user = r535_chan_user,
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
230
.user = 33,
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
236
.user = 1,
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
254
.user = r535_chan_user,
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
261
.user = 0,
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c
600
rm->chan.user.oclass = gpu->fifo.chan.class;
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c
87
fifo->func->chan.user.oclass, sizeof(*args), chan);
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fifo.c
32
fifo->func->chan.user.oclass, sizeof(*args), chan);
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
142
flcn_bl_dmem_desc_v2_dump(fw->falcon->user, &desc);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
429
mmu->user.ctor = nvkm_ummu_new;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
430
mmu->user.base = func->mmu.user;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
22
struct nvkm_sclass user;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
26
struct nvkm_sclass user;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
34
struct nvkm_sclass user;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
37
if (mmu->func->mem.user.oclass) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
39
oclass->base = mmu->func->mem.user;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
45
if (mmu->func->vmm.user.oclass) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
47
oclass->base = mmu->func->vmm.user;
drivers/gpu/drm/nouveau/nvkm/subdev/vfn/base.c
48
vfn->addr.user = vfn->addr.priv + func->user.addr;
drivers/gpu/drm/nouveau/nvkm/subdev/vfn/base.c
57
vfn->user.ctor = nvkm_uvfn_new;
drivers/gpu/drm/nouveau/nvkm/subdev/vfn/base.c
58
vfn->user.base = func->user.base;
drivers/gpu/drm/nouveau/nvkm/subdev/vfn/ga100.c
41
.user = { 0x030000, 0x010000, { -1, -1, AMPERE_USERMODE_A } },
drivers/gpu/drm/nouveau/nvkm/subdev/vfn/gv100.c
28
.user = { 0x810000, 0x010000, { -1, -1, VOLTA_USERMODE_A } },
drivers/gpu/drm/nouveau/nvkm/subdev/vfn/priv.h
17
} user;
drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c
46
rm->user.addr = 0x030000;
drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c
47
rm->user.size = 0x010000;
drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c
48
rm->user.base.minver = -1;
drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c
49
rm->user.base.maxver = -1;
drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c
50
rm->user.base.oclass = gpu->usermode.class;
drivers/gpu/drm/nouveau/nvkm/subdev/vfn/tu102.c
102
.user = { 0x030000, 0x010000, { -1, -1, TURING_USERMODE_A } },
drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c
39
*addr = device->func->resource_addr(device, NVKM_BAR0_PRI) + vfn->addr.user;
drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c
40
*size = vfn->func->user.size;
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
103
user);
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
169
perfcnt->user = user;
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
189
struct panfrost_file_priv *user = file_priv->driver_priv;
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
193
if (user != perfcnt->user)
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
203
perfcnt->user = NULL;
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
256
if (perfcnt->user != file_priv->driver_priv) {
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
282
if (perfcnt->user == pfile)
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
33
struct panfrost_file_priv *user;
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
75
struct panfrost_file_priv *user = file_priv->driver_priv;
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
82
if (user == perfcnt->user)
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
84
else if (perfcnt->user)
drivers/gpu/drm/radeon/radeon_fbdev.c
144
static int radeon_fbdev_fb_open(struct fb_info *info, int user)
drivers/gpu/drm/radeon/radeon_fbdev.c
161
static int radeon_fbdev_fb_release(struct fb_info *info, int user)
drivers/gpu/drm/tests/drm_panic_test.c
56
DRM_TEST_MODE_LIST(user)
drivers/hwmon/ibmaem.c
277
data, &data->user);
drivers/hwmon/ibmaem.c
298
err = ipmi_request_settime(data->user, &data->address, data->tx_msgid,
drivers/hwmon/ibmaem.c
475
ipmi_destroy_user(data->ipmi.user);
drivers/hwmon/ibmaem.c
589
ipmi_destroy_user(data->ipmi.user);
drivers/hwmon/ibmaem.c
729
ipmi_destroy_user(data->ipmi.user);
drivers/hwmon/ibmaem.c
780
ipmi_destroy_user(probe.user);
drivers/hwmon/ibmaem.c
91
struct ipmi_user *user;
drivers/hwmon/ibmpex.c
120
err = ipmi_request_settime(data->user, &data->address, data->tx_msgid,
drivers/hwmon/ibmpex.c
452
data, &data->user);
drivers/hwmon/ibmpex.c
501
ipmi_destroy_user(data->user);
drivers/hwmon/ibmpex.c
525
ipmi_destroy_user(data->user);
drivers/hwmon/ibmpex.c
74
struct ipmi_user *user;
drivers/hwmon/occ/common.c
108
u16 user;
drivers/hwmon/occ/common.c
119
u16 user;
drivers/hwmon/occ/common.c
621
val = get_unaligned_be16(&caps->user) * 1000000ULL;
drivers/hwmon/occ/common.c
671
val = get_unaligned_be16(&caps->user) * 1000000ULL;
drivers/infiniband/core/restrack.c
127
res->user = true;
drivers/infiniband/hw/cxgb4/cq.c
106
if (user && ucontext->is_32b_cqe) {
drivers/infiniband/hw/cxgb4/cq.c
148
((user && ucontext->is_32b_cqe) ?
drivers/infiniband/hw/cxgb4/cq.c
165
user ? &cq->bar2_pa : NULL);
drivers/infiniband/hw/cxgb4/cq.c
166
if (user && !cq->bar2_pa) {
drivers/infiniband/hw/cxgb4/cq.c
77
int user = (uctx != &rdev->uctx);
drivers/infiniband/hw/cxgb4/cq.c
82
if (user)
drivers/infiniband/hw/cxgb4/cq.c
91
if (!user) {
drivers/infiniband/hw/cxgb4/qp.c
140
static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user)
drivers/infiniband/hw/cxgb4/qp.c
143
if (user)
drivers/infiniband/hw/cxgb4/qp.c
205
int user = (uctx != &rdev->uctx);
drivers/infiniband/hw/cxgb4/qp.c
225
if (!user) {
drivers/infiniband/hw/cxgb4/qp.c
2536
int user = (uctx != &rdev->uctx);
drivers/infiniband/hw/cxgb4/qp.c
254
ret = alloc_sq(rdev, &wq->sq, user);
drivers/infiniband/hw/cxgb4/qp.c
2549
if (!user) {
drivers/infiniband/hw/cxgb4/qp.c
2575
user ? &wq->bar2_pa : NULL);
drivers/infiniband/hw/cxgb4/qp.c
2581
if (user && !wq->bar2_va) {
drivers/infiniband/hw/cxgb4/qp.c
2654
if (!user)
drivers/infiniband/hw/cxgb4/qp.c
2657
if (!user)
drivers/infiniband/hw/cxgb4/qp.c
282
user ? &wq->sq.bar2_pa : NULL);
drivers/infiniband/hw/cxgb4/qp.c
287
user ? &wq->rq.bar2_pa : NULL);
drivers/infiniband/hw/cxgb4/qp.c
292
if (user && (!wq->sq.bar2_pa || (need_rq && !wq->rq.bar2_pa))) {
drivers/infiniband/hw/hfi1/firmware.c
1357
u8 user = (u8)read_csr(dd, ASIC_CFG_MUTEX);
drivers/infiniband/hw/hfi1/firmware.c
1359
if (user == mask) {
drivers/infiniband/hw/hfi1/firmware.c
1370
user = (u8)read_csr(dd, ASIC_CFG_MUTEX);
drivers/infiniband/hw/hfi1/firmware.c
1371
if (user == mask)
drivers/infiniband/hw/hfi1/firmware.c
1381
(u32)user, (u32)mask, (try == 0) ? "retrying" : "giving up");
drivers/infiniband/hw/hfi1/firmware.c
1396
u8 user = (u8)read_csr(dd, ASIC_CFG_MUTEX);
drivers/infiniband/hw/hfi1/firmware.c
1398
if (user != mask)
drivers/infiniband/hw/hfi1/firmware.c
1401
(u32)user, (u32)mask);
drivers/infiniband/hw/hns/hns_roce_qp.c
473
bool user)
drivers/infiniband/hw/hns/hns_roce_qp.c
486
if (user)
drivers/infiniband/hw/hns/hns_roce_qp.c
495
struct hns_roce_qp *hr_qp, int has_rq, bool user)
drivers/infiniband/hw/hns/hns_roce_qp.c
497
u32 max_sge = proc_rq_sge(hr_dev, hr_qp, user);
drivers/infiniband/hw/hns/hns_roce_srq.c
268
bool user)
drivers/infiniband/hw/hns/hns_roce_srq.c
281
if (user)
drivers/infiniband/sw/rdmavt/mr.c
292
if (ibpd_to_rvtpd(pd)->user)
drivers/infiniband/sw/rdmavt/mr.c
736
if (pd->user)
drivers/infiniband/sw/rdmavt/mr.c
849
if (pd->user)
drivers/infiniband/sw/rdmavt/pd.c
43
pd->user = !!udata;
drivers/infiniband/sw/rdmavt/qp.c
1872
ibpd_to_rvtpd(qp->ibqp.pd)->user)
drivers/infiniband/sw/rdmavt/trace_mr.h
112
__entry->user = ibpd_to_rvtpd(sge->mr->pd)->user;
drivers/infiniband/sw/rdmavt/trace_mr.h
128
__entry->user
drivers/infiniband/sw/rdmavt/trace_mr.h
95
__field(int, user)
drivers/iommu/iommu.c
3463
unsigned int user;
drivers/iommu/iommu.c
3466
user = group->owner_cnt;
drivers/iommu/iommu.c
3469
return user;
drivers/iommu/iommufd/io_pagetable.h
310
bool inc, struct pfn_reader_user *user);
drivers/iommu/iommufd/pages.c
1012
bool inc, struct pfn_reader_user *user)
drivers/iommu/iommufd/pages.c
1026
rc = update_mm_locked_vm(pages, npages, inc, user);
drivers/iommu/iommufd/pages.c
1057
static int pfn_reader_user_update_pinned(struct pfn_reader_user *user,
drivers/iommu/iommufd/pages.c
1077
return iopt_pages_update_pinned(pages, npages, inc, user);
drivers/iommu/iommufd/pages.c
1134
struct pfn_reader_user user;
drivers/iommu/iommufd/pages.c
1141
return pfn_reader_user_update_pinned(&pfns->user, pfns->pages);
drivers/iommu/iommufd/pages.c
1172
struct pfn_reader_user *user;
drivers/iommu/iommufd/pages.c
1208
user = &pfns->user;
drivers/iommu/iommufd/pages.c
1209
if (start_index >= user->upages_end) {
drivers/iommu/iommufd/pages.c
1210
rc = pfn_reader_user_pin(user, pfns->pages, start_index,
drivers/iommu/iommufd/pages.c
1216
npages = user->upages_end - start_index;
drivers/iommu/iommufd/pages.c
1217
start_index -= user->upages_start;
drivers/iommu/iommufd/pages.c
1220
if (!user->file)
drivers/iommu/iommufd/pages.c
1221
batch_from_pages(&pfns->batch, user->upages + start_index,
drivers/iommu/iommufd/pages.c
1224
rc = batch_from_folios(&pfns->batch, &user->ufolios_next,
drivers/iommu/iommufd/pages.c
1225
&user->ufolios_offset, npages);
drivers/iommu/iommufd/pages.c
1281
pfn_reader_user_init(&pfns->user, pages);
drivers/iommu/iommufd/pages.c
1302
struct pfn_reader_user *user;
drivers/iommu/iommufd/pages.c
1307
user = &pfns->user;
drivers/iommu/iommufd/pages.c
1308
if (user->upages_end > pfns->batch_end_index) {
drivers/iommu/iommufd/pages.c
1311
unsigned long npages = user->upages_end - pfns->batch_end_index;
drivers/iommu/iommufd/pages.c
1313
user->upages_start;
drivers/iommu/iommufd/pages.c
1315
if (!user->file) {
drivers/iommu/iommufd/pages.c
1316
unpin_user_pages(user->upages + start_index, npages);
drivers/iommu/iommufd/pages.c
1318
long n = user->ufolios_len / sizeof(*user->ufolios);
drivers/iommu/iommufd/pages.c
1320
unpin_folios(user->ufolios_next,
drivers/iommu/iommufd/pages.c
1321
user->ufolios + n - user->ufolios_next);
drivers/iommu/iommufd/pages.c
1324
user->upages_end = pfns->batch_end_index;
drivers/iommu/iommufd/pages.c
1338
pfn_reader_user_destroy(&pfns->user, pfns->pages);
drivers/iommu/iommufd/pages.c
2139
struct pfn_reader_user *user,
drivers/iommu/iommufd/pages.c
2148
user->upages = out_pages + (cur_index - start_index);
drivers/iommu/iommufd/pages.c
2149
rc = pfn_reader_user_pin(user, pages, cur_index, last_index);
drivers/iommu/iommufd/pages.c
2152
cur_index = user->upages_end;
drivers/iommu/iommufd/pages.c
2182
struct pfn_reader_user user;
drivers/iommu/iommufd/pages.c
2187
pfn_reader_user_init(&user, pages);
drivers/iommu/iommufd/pages.c
2188
user.upages_len = (last_index - start_index + 1) * sizeof(*out_pages);
drivers/iommu/iommufd/pages.c
2216
rc = iopt_pages_fill(pages, &user, span.start_hole,
drivers/iommu/iommufd/pages.c
2229
rc = pfn_reader_user_update_pinned(&user, pages);
drivers/iommu/iommufd/pages.c
2232
user.upages = NULL;
drivers/iommu/iommufd/pages.c
2233
pfn_reader_user_destroy(&user, pages);
drivers/iommu/iommufd/pages.c
2239
user.upages = NULL;
drivers/iommu/iommufd/pages.c
2240
pfn_reader_user_destroy(&user, pages);
drivers/iommu/iommufd/pages.c
788
static void pfn_reader_user_init(struct pfn_reader_user *user,
drivers/iommu/iommufd/pages.c
791
user->upages = NULL;
drivers/iommu/iommufd/pages.c
792
user->upages_len = 0;
drivers/iommu/iommufd/pages.c
793
user->upages_start = 0;
drivers/iommu/iommufd/pages.c
794
user->upages_end = 0;
drivers/iommu/iommufd/pages.c
795
user->locked = -1;
drivers/iommu/iommufd/pages.c
796
user->gup_flags = FOLL_LONGTERM;
drivers/iommu/iommufd/pages.c
798
user->gup_flags |= FOLL_WRITE;
drivers/iommu/iommufd/pages.c
800
user->file = (pages->type == IOPT_ADDRESS_FILE) ? pages->file : NULL;
drivers/iommu/iommufd/pages.c
801
user->ufolios = NULL;
drivers/iommu/iommufd/pages.c
802
user->ufolios_len = 0;
drivers/iommu/iommufd/pages.c
803
user->ufolios_next = NULL;
drivers/iommu/iommufd/pages.c
804
user->ufolios_offset = 0;
drivers/iommu/iommufd/pages.c
807
static void pfn_reader_user_destroy(struct pfn_reader_user *user,
drivers/iommu/iommufd/pages.c
810
if (user->locked != -1) {
drivers/iommu/iommufd/pages.c
811
if (user->locked)
drivers/iommu/iommufd/pages.c
813
if (!user->file && pages->source_mm != current->mm)
drivers/iommu/iommufd/pages.c
815
user->locked = -1;
drivers/iommu/iommufd/pages.c
818
kfree(user->upages);
drivers/iommu/iommufd/pages.c
819
user->upages = NULL;
drivers/iommu/iommufd/pages.c
820
kfree(user->ufolios);
drivers/iommu/iommufd/pages.c
821
user->ufolios = NULL;
drivers/iommu/iommufd/pages.c
824
static long pin_memfd_pages(struct pfn_reader_user *user, unsigned long start,
drivers/iommu/iommufd/pages.c
830
struct page **upages = user->upages;
drivers/iommu/iommufd/pages.c
832
long nfolios = user->ufolios_len / sizeof(*user->ufolios);
drivers/iommu/iommufd/pages.c
839
nfolios = memfd_pin_folios(user->file, start, end, user->ufolios,
drivers/iommu/iommufd/pages.c
845
user->ufolios_next = user->ufolios;
drivers/iommu/iommufd/pages.c
846
user->ufolios_offset = offset;
drivers/iommu/iommufd/pages.c
849
struct folio *folio = user->ufolios[i];
drivers/iommu/iommufd/pages.c
876
static int pfn_reader_user_pin(struct pfn_reader_user *user,
drivers/iommu/iommufd/pages.c
892
if (!user->file && !user->upages) {
drivers/iommu/iommufd/pages.c
894
user->upages_len = npages * sizeof(*user->upages);
drivers/iommu/iommufd/pages.c
895
user->upages = temp_kmalloc(&user->upages_len, NULL, 0);
drivers/iommu/iommufd/pages.c
896
if (!user->upages)
drivers/iommu/iommufd/pages.c
900
if (user->file && !user->ufolios) {
drivers/iommu/iommufd/pages.c
901
user->ufolios_len = npages * sizeof(*user->ufolios);
drivers/iommu/iommufd/pages.c
902
user->ufolios = temp_kmalloc(&user->ufolios_len, NULL, 0);
drivers/iommu/iommufd/pages.c
903
if (!user->ufolios)
drivers/iommu/iommufd/pages.c
907
if (user->locked == -1) {
drivers/iommu/iommufd/pages.c
913
if (!user->file && remote_mm) {
drivers/iommu/iommufd/pages.c
917
user->locked = 0;
drivers/iommu/iommufd/pages.c
920
unum = user->file ? user->ufolios_len / sizeof(*user->ufolios) :
drivers/iommu/iommufd/pages.c
921
user->upages_len / sizeof(*user->upages);
drivers/iommu/iommufd/pages.c
927
if (user->file) {
drivers/iommu/iommufd/pages.c
929
rc = pin_memfd_pages(user, start, npages);
drivers/iommu/iommufd/pages.c
932
rc = pin_user_pages_fast(uptr, npages, user->gup_flags,
drivers/iommu/iommufd/pages.c
933
user->upages);
drivers/iommu/iommufd/pages.c
936
if (!user->locked) {
drivers/iommu/iommufd/pages.c
938
user->locked = 1;
drivers/iommu/iommufd/pages.c
941
user->gup_flags, user->upages,
drivers/iommu/iommufd/pages.c
942
&user->locked);
drivers/iommu/iommufd/pages.c
950
user->upages_start = start_index;
drivers/iommu/iommufd/pages.c
951
user->upages_end = start_index + rc;
drivers/iommu/iommufd/pages.c
984
bool inc, struct pfn_reader_user *user)
drivers/iommu/iommufd/pages.c
989
if (user && user->locked) {
drivers/iommu/iommufd/pages.c
991
user->locked = 0;
drivers/iommu/iommufd/pages.c
994
} else if ((!user || (!user->upages && !user->ufolios)) &&
drivers/md/dm-ioctl.c
1898
static int check_version(unsigned int cmd, struct dm_ioctl __user *user,
drivers/md/dm-ioctl.c
1906
if (copy_from_user(kernel_params->version, user->version, sizeof(kernel_params->version)))
drivers/md/dm-ioctl.c
1927
if (copy_to_user(user->version, kernel_params->version, sizeof(kernel_params->version)))
drivers/md/dm-ioctl.c
1945
static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kernel,
drivers/md/dm-ioctl.c
1954
(char __user *)user + sizeof(param_kernel->version),
drivers/md/dm-ioctl.c
1984
if (secure_data && clear_user(user, param_kernel->data_size))
drivers/md/dm-ioctl.c
1994
if (copy_from_user(&dmi->data, (char __user *)user + minimum_data_size,
drivers/md/dm-ioctl.c
1999
if (secure_data && clear_user(user, param_kernel->data_size))
drivers/md/dm-ioctl.c
2042
static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *user)
drivers/md/dm-ioctl.c
2066
r = check_version(cmd, user, ¶m_kernel);
drivers/md/dm-ioctl.c
2085
r = copy_params(user, ¶m_kernel, ioctl_flags, ¶m, ¶m_flags);
drivers/md/dm-ioctl.c
2108
if (!r && copy_to_user(user, param, param->data_size))
drivers/md/dm-vdo/indexer/config.c
30
struct uds_configuration *user)
drivers/md/dm-vdo/indexer/config.c
32
struct index_geometry *geometry = user->geometry;
drivers/md/dm-vdo/indexer/config.c
56
if (saved_config->cache_chapters != user->cache_chapters) {
drivers/md/dm-vdo/indexer/config.c
58
saved_config->cache_chapters, user->cache_chapters);
drivers/md/dm-vdo/indexer/config.c
62
if (saved_config->volume_index_mean_delta != user->volume_index_mean_delta) {
drivers/md/dm-vdo/indexer/config.c
65
user->volume_index_mean_delta);
drivers/md/dm-vdo/indexer/config.c
75
if (saved_config->sparse_sample_rate != user->sparse_sample_rate) {
drivers/md/dm-vdo/indexer/config.c
78
user->sparse_sample_rate);
drivers/md/dm-vdo/indexer/config.c
82
if (saved_config->nonce != user->nonce) {
drivers/md/dm-vdo/indexer/config.c
85
(unsigned long long) user->nonce);
drivers/media/dvb-frontends/cxd2880/cxd2880_io.h
39
void *user;
drivers/media/dvb-frontends/cxd2880/cxd2880_spi.h
31
void *user;
drivers/media/dvb-frontends/cxd2880/cxd2880_spi_device.c
110
spi->user = spi_device;
drivers/media/dvb-frontends/cxd2880/cxd2880_spi_device.c
22
if (!spi || !spi->user || !data || size == 0)
drivers/media/dvb-frontends/cxd2880/cxd2880_spi_device.c
25
spi_device = spi->user;
drivers/media/dvb-frontends/cxd2880/cxd2880_spi_device.c
50
if (!spi || !spi->user || !tx_data ||
drivers/media/dvb-frontends/cxd2880/cxd2880_spi_device.c
54
spi_device = spi->user;
drivers/media/dvb-frontends/cxd2880/cxd2880_tnrdmd.h
225
void *user;
drivers/media/usb/pvrusb2/pvrusb2-context.c
297
cp->stream->user = NULL;
drivers/media/usb/pvrusb2/pvrusb2-context.c
381
if (sp && sp->user) {
drivers/media/usb/pvrusb2/pvrusb2-context.c
387
sp->user = cp;
drivers/media/usb/pvrusb2/pvrusb2-context.h
22
struct pvr2_channel *user;
drivers/misc/atmel-ssc.c
52
if (ssc->user) {
drivers/misc/atmel-ssc.c
57
ssc->user++;
drivers/misc/atmel-ssc.c
71
if (ssc->user)
drivers/misc/atmel-ssc.c
72
ssc->user--;
drivers/misc/fastrpc.c
1590
list_del(&fl->user);
drivers/misc/fastrpc.c
1642
INIT_LIST_HEAD(&fl->user);
drivers/misc/fastrpc.c
1656
list_add_tail(&fl->user, &cctx->users);
drivers/misc/fastrpc.c
2461
static void fastrpc_notify_users(struct fastrpc_user *user)
drivers/misc/fastrpc.c
2465
spin_lock(&user->lock);
drivers/misc/fastrpc.c
2466
list_for_each_entry(ctx, &user->pending, node) {
drivers/misc/fastrpc.c
2470
spin_unlock(&user->lock);
drivers/misc/fastrpc.c
2477
struct fastrpc_user *user;
drivers/misc/fastrpc.c
2483
list_for_each_entry(user, &cctx->users, user)
drivers/misc/fastrpc.c
2484
fastrpc_notify_users(user);
drivers/misc/fastrpc.c
297
struct list_head user;
drivers/misc/fastrpc.c
595
struct fastrpc_user *user, u32 kernel, u32 sc,
drivers/misc/fastrpc.c
598
struct fastrpc_channel_ctx *cctx = user->cctx;
drivers/misc/fastrpc.c
608
ctx->fl = user;
drivers/misc/fastrpc.c
635
ctx->client_id = user->client_id;
drivers/misc/fastrpc.c
640
spin_lock(&user->lock);
drivers/misc/fastrpc.c
641
list_add_tail(&ctx->node, &user->pending);
drivers/misc/fastrpc.c
642
spin_unlock(&user->lock);
drivers/misc/fastrpc.c
658
spin_lock(&user->lock);
drivers/misc/fastrpc.c
660
spin_unlock(&user->lock);
drivers/mmc/core/block.c
420
struct mmc_ioc_cmd __user *user)
drivers/mmc/core/block.c
431
if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
drivers/mmc/core/block.c
717
struct mmc_ioc_multi_cmd __user *user,
drivers/mmc/core/block.c
721
struct mmc_ioc_cmd __user *cmds = user->cmds;
drivers/mmc/core/block.c
729
if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
drivers/mtd/nand/ecc-mxic.c
224
struct nand_ecc_props *user = &nand->ecc.user_conf;
drivers/mtd/nand/ecc-mxic.c
252
if (user->step_size && user->strength) {
drivers/mtd/nand/ecc-mxic.c
253
step_size = user->step_size;
drivers/mtd/nand/ecc-mxic.c
254
strength = user->strength;
drivers/mtd/nand/raw/atmel/pmecc.c
347
struct atmel_pmecc_user *user;
drivers/mtd/nand/raw/atmel/pmecc.c
355
size = sizeof(*user);
drivers/mtd/nand/raw/atmel/pmecc.c
366
user = devm_kzalloc(pmecc->dev, size, GFP_KERNEL);
drivers/mtd/nand/raw/atmel/pmecc.c
367
if (!user)
drivers/mtd/nand/raw/atmel/pmecc.c
370
user->pmecc = pmecc;
drivers/mtd/nand/raw/atmel/pmecc.c
372
user->partial_syn = (s16 *)PTR_ALIGN(user + 1, sizeof(u16));
drivers/mtd/nand/raw/atmel/pmecc.c
373
user->si = user->partial_syn + ((2 * req->ecc.strength) + 1);
drivers/mtd/nand/raw/atmel/pmecc.c
374
user->lmu = user->si + ((2 * req->ecc.strength) + 1);
drivers/mtd/nand/raw/atmel/pmecc.c
375
user->smu = user->lmu + (req->ecc.strength + 1);
drivers/mtd/nand/raw/atmel/pmecc.c
376
user->mu = (s32 *)PTR_ALIGN(user->smu +
drivers/mtd/nand/raw/atmel/pmecc.c
380
user->dmu = user->mu + req->ecc.strength + 1;
drivers/mtd/nand/raw/atmel/pmecc.c
381
user->delta = user->dmu + req->ecc.strength + 1;
drivers/mtd/nand/raw/atmel/pmecc.c
387
user->gf_tables = gf_tables;
drivers/mtd/nand/raw/atmel/pmecc.c
389
user->eccbytes = req->ecc.bytes / req->ecc.nsectors;
drivers/mtd/nand/raw/atmel/pmecc.c
396
user->cache.cfg = PMECC_CFG_BCH_STRENGTH(strength) |
drivers/mtd/nand/raw/atmel/pmecc.c
400
user->cache.cfg |= PMECC_CFG_SECTOR1024;
drivers/mtd/nand/raw/atmel/pmecc.c
402
user->cache.sarea = req->oobsize - 1;
drivers/mtd/nand/raw/atmel/pmecc.c
403
user->cache.saddr = req->ecc.ooboffset;
drivers/mtd/nand/raw/atmel/pmecc.c
404
user->cache.eaddr = req->ecc.ooboffset + req->ecc.bytes - 1;
drivers/mtd/nand/raw/atmel/pmecc.c
406
return user;
drivers/mtd/nand/raw/atmel/pmecc.c
410
static int get_strength(struct atmel_pmecc_user *user)
drivers/mtd/nand/raw/atmel/pmecc.c
412
const int *strengths = user->pmecc->caps->strengths;
drivers/mtd/nand/raw/atmel/pmecc.c
414
return strengths[user->cache.cfg & PMECC_CFG_BCH_STRENGTH_MASK];
drivers/mtd/nand/raw/atmel/pmecc.c
417
static int get_sectorsize(struct atmel_pmecc_user *user)
drivers/mtd/nand/raw/atmel/pmecc.c
419
return user->cache.cfg & PMECC_CFG_SECTOR1024 ? 1024 : 512;
drivers/mtd/nand/raw/atmel/pmecc.c
422
static void atmel_pmecc_gen_syndrome(struct atmel_pmecc_user *user, int sector)
drivers/mtd/nand/raw/atmel/pmecc.c
424
int strength = get_strength(user);
drivers/mtd/nand/raw/atmel/pmecc.c
430
value = readl_relaxed(user->pmecc->regs.base +
drivers/mtd/nand/raw/atmel/pmecc.c
435
user->partial_syn[(2 * i) + 1] = value;
drivers/mtd/nand/raw/atmel/pmecc.c
439
static void atmel_pmecc_substitute(struct atmel_pmecc_user *user)
drivers/mtd/nand/raw/atmel/pmecc.c
441
int degree = get_sectorsize(user) == 512 ? 13 : 14;
drivers/mtd/nand/raw/atmel/pmecc.c
443
int strength = get_strength(user);
drivers/mtd/nand/raw/atmel/pmecc.c
444
s16 *alpha_to = user->gf_tables->alpha_to;
drivers/mtd/nand/raw/atmel/pmecc.c
445
s16 *index_of = user->gf_tables->index_of;
drivers/mtd/nand/raw/atmel/pmecc.c
446
s16 *partial_syn = user->partial_syn;
drivers/mtd/nand/raw/atmel/pmecc.c
454
si = user->si;
drivers/mtd/nand/raw/atmel/pmecc.c
480
static void atmel_pmecc_get_sigma(struct atmel_pmecc_user *user)
drivers/mtd/nand/raw/atmel/pmecc.c
482
s16 *lmu = user->lmu;
drivers/mtd/nand/raw/atmel/pmecc.c
483
s16 *si = user->si;
drivers/mtd/nand/raw/atmel/pmecc.c
484
s32 *mu = user->mu;
drivers/mtd/nand/raw/atmel/pmecc.c
485
s32 *dmu = user->dmu;
drivers/mtd/nand/raw/atmel/pmecc.c
486
s32 *delta = user->delta;
drivers/mtd/nand/raw/atmel/pmecc.c
487
int degree = get_sectorsize(user) == 512 ? 13 : 14;
drivers/mtd/nand/raw/atmel/pmecc.c
489
int strength = get_strength(user);
drivers/mtd/nand/raw/atmel/pmecc.c
491
s16 *index_of = user->gf_tables->index_of;
drivers/mtd/nand/raw/atmel/pmecc.c
492
s16 *alpha_to = user->gf_tables->alpha_to;
drivers/mtd/nand/raw/atmel/pmecc.c
495
s16 *smu = user->smu;
drivers/mtd/nand/raw/atmel/pmecc.c
634
static int atmel_pmecc_err_location(struct atmel_pmecc_user *user)
drivers/mtd/nand/raw/atmel/pmecc.c
636
int sector_size = get_sectorsize(user);
drivers/mtd/nand/raw/atmel/pmecc.c
638
struct atmel_pmecc *pmecc = user->pmecc;
drivers/mtd/nand/raw/atmel/pmecc.c
639
int strength = get_strength(user);
drivers/mtd/nand/raw/atmel/pmecc.c
642
s16 *smu = user->smu;
drivers/mtd/nand/raw/atmel/pmecc.c
647
for (i = 0; i <= user->lmu[strength + 1] >> 1; i++) {
drivers/mtd/nand/raw/atmel/pmecc.c
673
if (roots_nbr == user->lmu[strength + 1] >> 1)
drivers/mtd/nand/raw/atmel/pmecc.c
683
int atmel_pmecc_correct_sector(struct atmel_pmecc_user *user, int sector,
drivers/mtd/nand/raw/atmel/pmecc.c
686
struct atmel_pmecc *pmecc = user->pmecc;
drivers/mtd/nand/raw/atmel/pmecc.c
687
int sectorsize = get_sectorsize(user);
drivers/mtd/nand/raw/atmel/pmecc.c
688
int eccbytes = user->eccbytes;
drivers/mtd/nand/raw/atmel/pmecc.c
691
if (!(user->isr & BIT(sector)))
drivers/mtd/nand/raw/atmel/pmecc.c
694
atmel_pmecc_gen_syndrome(user, sector);
drivers/mtd/nand/raw/atmel/pmecc.c
695
atmel_pmecc_substitute(user);
drivers/mtd/nand/raw/atmel/pmecc.c
696
atmel_pmecc_get_sigma(user);
drivers/mtd/nand/raw/atmel/pmecc.c
698
nerrors = atmel_pmecc_err_location(user);
drivers/mtd/nand/raw/atmel/pmecc.c
739
bool atmel_pmecc_correct_erased_chunks(struct atmel_pmecc_user *user)
drivers/mtd/nand/raw/atmel/pmecc.c
741
return user->pmecc->caps->correct_erased_chunks;
drivers/mtd/nand/raw/atmel/pmecc.c
745
void atmel_pmecc_get_generated_eccbytes(struct atmel_pmecc_user *user,
drivers/mtd/nand/raw/atmel/pmecc.c
748
struct atmel_pmecc *pmecc = user->pmecc;
drivers/mtd/nand/raw/atmel/pmecc.c
752
for (i = 0; i < user->eccbytes; i++)
drivers/mtd/nand/raw/atmel/pmecc.c
765
int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op)
drivers/mtd/nand/raw/atmel/pmecc.c
767
struct atmel_pmecc *pmecc = user->pmecc;
drivers/mtd/nand/raw/atmel/pmecc.c
775
mutex_lock(&user->pmecc->lock);
drivers/mtd/nand/raw/atmel/pmecc.c
777
cfg = user->cache.cfg;
drivers/mtd/nand/raw/atmel/pmecc.c
784
writel(user->cache.sarea, pmecc->regs.base + ATMEL_PMECC_SAREA);
drivers/mtd/nand/raw/atmel/pmecc.c
785
writel(user->cache.saddr, pmecc->regs.base + ATMEL_PMECC_SADDR);
drivers/mtd/nand/raw/atmel/pmecc.c
786
writel(user->cache.eaddr, pmecc->regs.base + ATMEL_PMECC_EADDR);
drivers/mtd/nand/raw/atmel/pmecc.c
795
void atmel_pmecc_disable(struct atmel_pmecc_user *user)
drivers/mtd/nand/raw/atmel/pmecc.c
797
atmel_pmecc_reset(user->pmecc);
drivers/mtd/nand/raw/atmel/pmecc.c
798
mutex_unlock(&user->pmecc->lock);
drivers/mtd/nand/raw/atmel/pmecc.c
802
int atmel_pmecc_wait_rdy(struct atmel_pmecc_user *user)
drivers/mtd/nand/raw/atmel/pmecc.c
804
struct atmel_pmecc *pmecc = user->pmecc;
drivers/mtd/nand/raw/atmel/pmecc.c
818
user->isr = readl_relaxed(pmecc->regs.base + ATMEL_PMECC_ISR);
drivers/mtd/nand/raw/atmel/pmecc.h
59
int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op);
drivers/mtd/nand/raw/atmel/pmecc.h
60
void atmel_pmecc_disable(struct atmel_pmecc_user *user);
drivers/mtd/nand/raw/atmel/pmecc.h
61
int atmel_pmecc_wait_rdy(struct atmel_pmecc_user *user);
drivers/mtd/nand/raw/atmel/pmecc.h
62
int atmel_pmecc_correct_sector(struct atmel_pmecc_user *user, int sector,
drivers/mtd/nand/raw/atmel/pmecc.h
64
bool atmel_pmecc_correct_erased_chunks(struct atmel_pmecc_user *user);
drivers/mtd/nand/raw/atmel/pmecc.h
65
void atmel_pmecc_get_generated_eccbytes(struct atmel_pmecc_user *user,
drivers/mtd/nand/spi/esmt.c
112
struct otp_info *buf, size_t *retlen, bool user)
drivers/mtd/nand/spi/esmt.c
119
buf->length = user ? spinand_user_otp_size(spinand) :
drivers/mtd/nand/spi/micron.c
217
bool user)
drivers/mtd/nand/spi/micron.c
230
buf->length = user ? spinand_user_otp_size(spinand) :
drivers/net/dsa/bcm_sf2.c
929
netif_carrier_off(dsa_to_port(ds, port)->user);
drivers/net/dsa/microchip/ksz9477.c
1541
struct net_device *user;
drivers/net/dsa/microchip/ksz9477.c
1573
user = dsa_to_port(ds, port)->user;
drivers/net/dsa/microchip/ksz9477.c
1574
user->features |= KSZ9477_SUPPORTED_HSR_FEATURES;
drivers/net/dsa/microchip/ksz_common.c
3175
if (!netif_carrier_ok(dp->user))
drivers/net/dsa/microchip/ksz_common.c
3284
if (!netif_carrier_ok(dp->user))
drivers/net/dsa/microchip/ksz_common.c
4783
struct net_device *user = dsa_to_port(ds, port)->user;
drivers/net/dsa/microchip/ksz_common.c
4784
const unsigned char *addr = user->dev_addr;
drivers/net/dsa/microchip/ksz_common.c
4815
struct net_device *user = dsa_to_port(ds, port)->user;
drivers/net/dsa/microchip/ksz_common.c
4816
const unsigned char *addr = user->dev_addr;
drivers/net/dsa/mt7530.c
2901
phydev = dp->user->phydev;
drivers/net/dsa/mv88e6xxx/leds.c
724
if (dp->user)
drivers/net/dsa/mv88e6xxx/leds.c
725
return &dp->user->dev;
drivers/net/dsa/ocelot/felix.c
2277
if (!(dsa_to_port(ds, port)->user->flags & IFF_UP))
drivers/net/dsa/ocelot/felix.c
2292
if (!(dsa_to_port(ds, port)->user->flags & IFF_UP))
drivers/net/dsa/ocelot/felix.c
2444
return dsa_to_port(ds, port)->user;
drivers/net/dsa/ocelot/felix.c
537
struct net_device *user = other_dp->user;
drivers/net/dsa/ocelot/felix.c
539
if (other_dp != dp && (user->flags & IFF_UP) &&
drivers/net/dsa/qca/qca8k-leds.c
359
if (dp->user)
drivers/net/dsa/qca/qca8k-leds.c
360
return &dp->user->dev;
drivers/net/dsa/sja1105/sja1105_main.c
2628
dsa_enqueue_skb(skb, dsa_to_port(ds, port)->user);
drivers/net/dsa/xrs700x/xrs700x.c
567
struct net_device *user;
drivers/net/dsa/xrs700x/xrs700x.c
593
ret = hsr_get_port_type(hsr, dsa_to_port(ds, port)->user, &type);
drivers/net/dsa/xrs700x/xrs700x.c
662
user = dsa_to_port(ds, hsr_pair[i])->user;
drivers/net/dsa/xrs700x/xrs700x.c
663
user->features |= XRS7000X_SUPPORTED_HSR_FEATURES;
drivers/net/dsa/xrs700x/xrs700x.c
674
struct net_device *user;
drivers/net/dsa/xrs700x/xrs700x.c
716
user = dsa_to_port(ds, hsr_pair[i])->user;
drivers/net/dsa/xrs700x/xrs700x.c
717
user->features &= ~XRS7000X_SUPPORTED_HSR_FEATURES;
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
1876
int user,
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2345
int user,
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2354
user,
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
532
int user,
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
9256
int user,
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
9265
if (!user && is_t4(adapter->params.chip))
drivers/net/ethernet/freescale/enetc/enetc4_pf.c
456
struct ntmp_user *user = &si->ntmp_user;
drivers/net/ethernet/freescale/enetc/enetc4_pf.c
459
memset(&user->tbl, 0, sizeof(user->tbl));
drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
65
struct ntmp_user *user = &si->ntmp_user;
drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
70
user->cbdr_num = 1;
drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
71
user->dev = dev;
drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
72
user->ring = devm_kcalloc(dev, user->cbdr_num,
drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
74
if (!user->ring)
drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
84
return ntmp_init_cbdr(user->ring, dev, ®s);
drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
90
struct ntmp_user *user = &si->ntmp_user;
drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
92
ntmp_free_cbdr(user->ring);
drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
93
user->dev = NULL;
drivers/net/ethernet/freescale/enetc/ntmp.c
110
static int netc_xmit_ntmp_cmd(struct ntmp_user *user, union netc_cbd *cbd)
drivers/net/ethernet/freescale/enetc/ntmp.c
121
cbdr = &user->ring[0];
drivers/net/ethernet/freescale/enetc/ntmp.c
154
dev_err(user->dev, "Command BD error: 0x%04x\n", status);
drivers/net/ethernet/freescale/enetc/ntmp.c
233
static int ntmp_delete_entry_by_id(struct ntmp_user *user, int tbl_id,
drivers/net/ethernet/freescale/enetc/ntmp.c
238
.dev = user->dev,
drivers/net/ethernet/freescale/enetc/ntmp.c
253
err = netc_xmit_ntmp_cmd(user, &cbd);
drivers/net/ethernet/freescale/enetc/ntmp.c
255
dev_err(user->dev,
drivers/net/ethernet/freescale/enetc/ntmp.c
264
static int ntmp_query_entry_by_id(struct ntmp_user *user, int tbl_id,
drivers/net/ethernet/freescale/enetc/ntmp.c
280
err = netc_xmit_ntmp_cmd(user, &cbd);
drivers/net/ethernet/freescale/enetc/ntmp.c
282
dev_err(user->dev,
drivers/net/ethernet/freescale/enetc/ntmp.c
296
dev_err(user->dev,
drivers/net/ethernet/freescale/enetc/ntmp.c
305
int ntmp_maft_add_entry(struct ntmp_user *user, u32 entry_id,
drivers/net/ethernet/freescale/enetc/ntmp.c
309
.dev = user->dev,
drivers/net/ethernet/freescale/enetc/ntmp.c
321
ntmp_fill_crd_eid(&req->rbe, user->tbl.maft_ver, 0, 0, entry_id);
drivers/net/ethernet/freescale/enetc/ntmp.c
327
err = netc_xmit_ntmp_cmd(user, &cbd);
drivers/net/ethernet/freescale/enetc/ntmp.c
329
dev_err(user->dev, "Failed to add MAFT entry 0x%x, err: %pe\n",
drivers/net/ethernet/freescale/enetc/ntmp.c
338
int ntmp_maft_query_entry(struct ntmp_user *user, u32 entry_id,
drivers/net/ethernet/freescale/enetc/ntmp.c
342
.dev = user->dev,
drivers/net/ethernet/freescale/enetc/ntmp.c
353
ntmp_fill_crd_eid(req, user->tbl.maft_ver, 0, 0, entry_id);
drivers/net/ethernet/freescale/enetc/ntmp.c
354
err = ntmp_query_entry_by_id(user, NTMP_MAFT_ID,
drivers/net/ethernet/freescale/enetc/ntmp.c
371
int ntmp_maft_delete_entry(struct ntmp_user *user, u32 entry_id)
drivers/net/ethernet/freescale/enetc/ntmp.c
373
return ntmp_delete_entry_by_id(user, NTMP_MAFT_ID, user->tbl.maft_ver,
drivers/net/ethernet/freescale/enetc/ntmp.c
378
int ntmp_rsst_update_entry(struct ntmp_user *user, const u32 *table,
drivers/net/ethernet/freescale/enetc/ntmp.c
381
struct ntmp_dma_buf data = {.dev = user->dev};
drivers/net/ethernet/freescale/enetc/ntmp.c
396
ntmp_fill_crd_eid(&req->rbe, user->tbl.rsst_ver, 0,
drivers/net/ethernet/freescale/enetc/ntmp.c
404
err = netc_xmit_ntmp_cmd(user, &cbd);
drivers/net/ethernet/freescale/enetc/ntmp.c
406
dev_err(user->dev, "Failed to update RSST entry, err: %pe\n",
drivers/net/ethernet/freescale/enetc/ntmp.c
415
int ntmp_rsst_query_entry(struct ntmp_user *user, u32 *table, int count)
drivers/net/ethernet/freescale/enetc/ntmp.c
417
struct ntmp_dma_buf data = {.dev = user->dev};
drivers/net/ethernet/freescale/enetc/ntmp.c
434
ntmp_fill_crd_eid(req, user->tbl.rsst_ver, 0, 0, 0);
drivers/net/ethernet/freescale/enetc/ntmp.c
437
err = netc_xmit_ntmp_cmd(user, &cbd);
drivers/net/ethernet/freescale/enetc/ntmp.c
439
dev_err(user->dev, "Failed to query RSST entry, err: %pe\n",
drivers/net/ethernet/ibm/ehea/ehea.h
166
struct h_epa_user user; /* user space accessible resource
drivers/net/ethernet/ibm/ehea/ehea_phyp.h
55
epas->user.addr = paddr_user;
drivers/net/ethernet/ibm/ehea/ehea_phyp.h
63
epas->user.addr = 0;
drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
1330
struct ice_rx_flow_userdef *user)
drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
1408
if (user && user->flex_fltr) {
drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
1410
ice_flow_add_fld_raw(seg, user->flex_offset,
drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
1414
ice_flow_add_fld_raw(&tun_seg[1], user->flex_offset,
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1582
struct idpf_vport_user_config_data *user;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1630
user = &vport_config->user_config;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1631
user->num_req_rx_qs = le16_to_cpu(vport_msg->num_rx_q);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1632
user->num_req_tx_qs = le16_to_cpu(vport_msg->num_tx_q);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1635
num_xdpsq = libeth_xdpsq_num(user->num_req_rx_qs,
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1636
user->num_req_tx_qs,
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1641
vport_msg->num_tx_q = cpu_to_le16(user->num_req_tx_qs + num_xdpsq);
drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
506
erule->user = VCAP_USER_TC_EXTRA;
drivers/net/ethernet/microchip/vcap/vcap_api.c
2073
static u32 vcap_sort_key(u32 max_size, u32 size, u8 user, u16 prio)
drivers/net/ethernet/microchip/vcap/vcap_api.c
2075
return ((max_size - size) << 24) | (user << 16) | prio;
drivers/net/ethernet/microchip/vcap/vcap_api.c
2107
ri->sort_key = vcap_sort_key(sw_count, ri->size, ri->data.user,
drivers/net/ethernet/microchip/vcap/vcap_api.c
2249
if (ri->data.user <= VCAP_USER_QOS)
drivers/net/ethernet/microchip/vcap/vcap_api.c
2313
enum vcap_user user, u16 priority,
drivers/net/ethernet/microchip/vcap/vcap_api.c
2355
ri->data.user = user;
drivers/net/ethernet/microchip/vcap/vcap_api.c
3579
ri->data.user, ri->data.priority, 0);
drivers/net/ethernet/microchip/vcap/vcap_api.h
186
enum vcap_user user; /* rule owner */
drivers/net/ethernet/microchip/vcap/vcap_api_client.h
161
enum vcap_user user,
drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c
248
out->prf(out->dst, " user: %d\n", ri->data.user);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
1357
enum vcap_user user = VCAP_USER_VCAP_UTIL;
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
1380
rule = vcap_alloc_rule(&test_vctrl, &test_netdev, vcap_chain_id, user,
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
247
enum vcap_user user, u16 priority,
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
288
rule = vcap_alloc_rule(&test_vctrl, &test_netdev, cid, user, priority,
drivers/net/ethernet/sfc/falcon/falcon.c
482
static int dummy_rx_push_rss_config(struct ef4_nic *efx, bool user,
drivers/net/ethernet/sfc/falcon/falcon.c
486
(void) user;
drivers/net/ethernet/sfc/falcon/falcon.c
491
static int falcon_b0_rx_push_rss_config(struct ef4_nic *efx, bool user,
drivers/net/ethernet/sfc/falcon/falcon.c
496
(void) user;
drivers/net/ethernet/sfc/falcon/net_driver.h
1088
int (*rx_push_rss_config)(struct ef4_nic *efx, bool user,
drivers/net/ethernet/sfc/mcdi_filters.c
2222
int efx_mcdi_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
drivers/net/ethernet/sfc/mcdi_filters.c
2236
if (rc == -ENOBUFS && !user) {
drivers/net/ethernet/sfc/mcdi_filters.c
2271
int efx_mcdi_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
drivers/net/ethernet/sfc/mcdi_filters.c
2277
if (user)
drivers/net/ethernet/sfc/mcdi_filters.h
151
int efx_mcdi_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
drivers/net/ethernet/sfc/mcdi_filters.h
154
int efx_mcdi_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
drivers/net/ethernet/sfc/net_driver.h
1454
int (*rx_push_rss_config)(struct efx_nic *efx, bool user,
drivers/net/ethernet/sfc/siena/net_driver.h
1353
int (*rx_push_rss_config)(struct efx_nic *efx, bool user,
drivers/net/ethernet/sfc/siena/siena.c
368
static int siena_rx_push_rss_config(struct efx_nic *efx, bool user,
drivers/net/ethernet/sfc/tc.c
2337
act->user = &rule->acts;
drivers/net/ethernet/sfc/tc.h
80
struct efx_tc_action_set_list *user;
drivers/net/ethernet/sfc/tc_encap_actions.c
441
acts = act->user;
drivers/net/ethernet/sfc/tc_encap_actions.c
489
acts = act->user;
drivers/net/ethernet/ti/davinci_mdio.c
361
reg = readl(®s->user[0].access);
drivers/net/ethernet/ti/davinci_mdio.c
381
reg = readl(®s->user[0].access);
drivers/net/ethernet/ti/davinci_mdio.c
426
writel(reg, &data->regs->user[0].access);
drivers/net/ethernet/ti/davinci_mdio.c
434
reg = readl(&data->regs->user[0].access);
drivers/net/ethernet/ti/davinci_mdio.c
467
writel(reg, &data->regs->user[0].access);
drivers/net/ethernet/ti/davinci_mdio.c
86
} user[];
drivers/net/fddi/skfp/fplustm.c
1053
struct fddi_addr *user,
drivers/net/fddi/skfp/fplustm.c
1065
*own = *user ;
drivers/net/fddi/skfp/h/smt.h
762
struct smp_p_user user ; /* user field */
drivers/net/fddi/skfp/smt.c
1074
smt_fill_user(smc,&sif->user) ; /* set user field */
drivers/net/fddi/skfp/smt.c
1499
static void smt_fill_user(struct s_smc *smc, struct smp_p_user *user)
drivers/net/fddi/skfp/smt.c
1501
SMTSETPARA(user,SMT_P_USER) ;
drivers/net/fddi/skfp/smt.c
1502
memcpy((char *) user->us_data,
drivers/net/fddi/skfp/smt.c
1504
sizeof(user->us_data)) ;
drivers/net/fddi/skfp/smt.c
89
static void smt_fill_user(struct s_smc *smc, struct smp_p_user *user);
drivers/net/team/team_core.c
1437
ctx->data.bool_val = port->user.linkup;
drivers/net/team/team_core.c
1447
port->user.linkup = ctx->data.bool_val;
drivers/net/team/team_core.c
1458
ctx->data.bool_val = port->user.linkup_enabled;
drivers/net/team/team_core.c
1466
port->user.linkup_enabled = ctx->data.bool_val;
drivers/net/team/team_core.c
96
bool new_linkup = port->user.linkup_enabled ? port->user.linkup :
drivers/net/wireless/ath/ath11k/dp_rx.c
1364
struct htt_ppdu_stats *ppdu_stats, u8 user)
drivers/net/wireless/ath/ath11k/dp_rx.c
1372
struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
drivers/net/wireless/ath/ath11k/dp_rx.c
1514
u8 user;
drivers/net/wireless/ath/ath11k/dp_rx.c
1516
for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
drivers/net/wireless/ath/ath11k/dp_rx.c
1517
ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user);
drivers/net/wireless/ath/ath12k/dp_htt.c
187
struct htt_ppdu_stats *ppdu_stats, u8 user)
drivers/net/wireless/ath/ath12k/dp_htt.c
194
struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
drivers/net/wireless/ath/ath12k/dp_htt.c
360
u8 user;
drivers/net/wireless/ath/ath12k/dp_htt.c
362
for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
drivers/net/wireless/ath/ath12k/dp_htt.c
363
ath12k_update_per_peer_tx_stats(dp_pdev, ppdu_stats, user);
drivers/net/wireless/ath/ath12k/dp_mon.c
416
u8 user;
drivers/net/wireless/ath/ath12k/dp_mon.c
450
for (user = 0; user < ppduinfo->eht_info.num_user_info; user++)
drivers/net/wireless/ath/ath12k/dp_mon.c
451
put_unaligned_le32(ppduinfo->eht_info.user_info[user],
drivers/net/wireless/ath/ath12k/dp_mon.c
452
&eht->user_info[user]);
drivers/net/wireless/ath/ath12k/wifi7/dp_mon.c
1236
ath12k_wifi7_dp_mon_hal_rx_parse_eht_mumimo_user(const struct hal_eht_sig_mu_mimo *user,
drivers/net/wireless/ath/ath12k/wifi7/dp_mon.c
1252
ATH12K_LE32_DEC_ENC(user->info0,
drivers/net/wireless/ath/ath12k/wifi7/dp_mon.c
1255
ATH12K_LE32_DEC_ENC(user->info0,
drivers/net/wireless/ath/ath12k/wifi7/dp_mon.c
1258
ATH12K_LE32_DEC_ENC(user->info0,
drivers/net/wireless/ath/ath12k/wifi7/dp_mon.c
1261
ATH12K_LE32_DEC_ENC(user->info0,
drivers/net/wireless/ath/ath12k/wifi7/dp_mon.c
1265
ppdu_info->mcs = le32_get_bits(user->info0,
drivers/net/wireless/ath/ath12k/wifi7/dp_mon.c
1270
ath12k_wifi7_dp_mon_hal_rx_parse_eht_non_mumimo_user(const struct hal_eht_sig_non_mu_mimo *user,
drivers/net/wireless/ath/ath12k/wifi7/dp_mon.c
1287
ATH12K_LE32_DEC_ENC(user->info0,
drivers/net/wireless/ath/ath12k/wifi7/dp_mon.c
1290
ATH12K_LE32_DEC_ENC(user->info0,
drivers/net/wireless/ath/ath12k/wifi7/dp_mon.c
1293
ATH12K_LE32_DEC_ENC(user->info0,
drivers/net/wireless/ath/ath12k/wifi7/dp_mon.c
1296
ATH12K_LE32_DEC_ENC(user->info0,
drivers/net/wireless/ath/ath12k/wifi7/dp_mon.c
1299
ATH12K_LE32_DEC_ENC(user->info0,
drivers/net/wireless/ath/ath12k/wifi7/dp_mon.c
1303
ppdu_info->mcs = le32_get_bits(user->info0,
drivers/net/wireless/ath/ath12k/wifi7/dp_mon.c
1306
ppdu_info->nss = le32_get_bits(user->info0,
drivers/net/wireless/ath/carl9170/fwcmd.h
107
__le16 user;
drivers/net/wireless/ath/carl9170/fwcmd.h
98
__le16 user;
drivers/net/wireless/ath/carl9170/mac.c
480
key.user = cpu_to_le16(id);
drivers/net/wireless/ath/carl9170/mac.c
495
key.user = cpu_to_le16(id);
drivers/net/wireless/broadcom/b43/phy_lp.c
716
static void lpphy_set_deaf(struct b43_wldev *dev, bool user)
drivers/net/wireless/broadcom/b43/phy_lp.c
720
if (user)
drivers/net/wireless/broadcom/b43/phy_lp.c
727
static void lpphy_clear_deaf(struct b43_wldev *dev, bool user)
drivers/net/wireless/broadcom/b43/phy_lp.c
731
if (user)
drivers/net/wireless/broadcom/b43/phy_lp.c
753
static void lpphy_disable_crs(struct b43_wldev *dev, bool user)
drivers/net/wireless/broadcom/b43/phy_lp.c
755
lpphy_set_deaf(dev, user);
drivers/net/wireless/broadcom/b43/phy_lp.c
782
static void lpphy_restore_crs(struct b43_wldev *dev, bool user)
drivers/net/wireless/broadcom/b43/phy_lp.c
784
lpphy_clear_deaf(dev, user);
drivers/net/wireless/intel/iwlwifi/mld/rx.c
263
int user = le32_get_bits(phy_data->ntfy->sigs.vht.a1,
drivers/net/wireless/intel/iwlwifi/mld/rx.c
288
vht->mcs_nss[user] &= 0x0f;
drivers/net/wireless/intel/iwlwifi/mld/rx.c
289
vht->mcs_nss[user] |= rx_status->rate_idx << 4;
drivers/net/wireless/intel/iwlwifi/mld/rx.c
293
vht->coding = 0x1 << user;
drivers/net/wireless/realtek/rtw89/core.c
1885
const struct rtw89_rxinfo_user *user;
drivers/net/wireless/realtek/rtw89/core.c
1918
user = &rxinfo->user[i];
drivers/net/wireless/realtek/rtw89/core.c
1919
if (!le32_get_bits(user->w0, RTW89_RXINFO_USER_MAC_ID_VALID))
drivers/net/wireless/realtek/rtw89/core.c
1926
le32_get_bits(user->w0, RTW89_RXINFO_USER_MACID);
drivers/net/wireless/realtek/rtw89/core.c
1929
le32_get_bits(user->w0, RTW89_RXINFO_USER_MACID_V1);
drivers/net/wireless/realtek/rtw89/core.c
1932
le32_get_bits(user->w0, RTW89_RXINFO_USER_DATA);
drivers/net/wireless/realtek/rtw89/core.c
1934
le32_get_bits(user->w0, RTW89_RXINFO_USER_BCN);
drivers/net/wireless/realtek/rtw89/txrx.h
445
struct rtw89_rxinfo_user user[];
drivers/perf/apple_m1_cpu_pmu.c
344
static void __m1_pmu_configure_event_filter(unsigned int index, bool user,
drivers/perf/apple_m1_cpu_pmu.c
363
if (user)
drivers/perf/apple_m1_cpu_pmu.c
413
bool user = config_base & M1_PMU_CFG_COUNT_USER;
drivers/perf/apple_m1_cpu_pmu.c
416
__m1_pmu_configure_event_filter(index, user && host, kernel && host, true);
drivers/perf/apple_m1_cpu_pmu.c
417
__m1_pmu_configure_event_filter(index, user && guest, kernel && guest, false);
drivers/perf/apple_m1_cpu_pmu.c
424
bool user, kernel;
drivers/perf/apple_m1_cpu_pmu.c
428
user = event->hw.config_base & M1_PMU_CFG_COUNT_USER;
drivers/rtc/rtc-isl12022.c
433
u32 user, val;
drivers/rtc/rtc-isl12022.c
442
user = 0;
drivers/rtc/rtc-isl12022.c
444
user |= RTC_VL_BACKUP_LOW;
drivers/rtc/rtc-isl12022.c
447
user |= RTC_VL_BACKUP_EMPTY;
drivers/rtc/rtc-isl12022.c
449
return put_user(user, (u32 __user *)arg);
drivers/sbus/char/openprom.c
374
static char * copyin_string(char __user *user, size_t len)
drivers/sbus/char/openprom.c
379
return memdup_user_nul(user, len);
drivers/scsi/aic7xxx/aic79xx.h
757
struct ahd_transinfo user;
drivers/scsi/aic7xxx/aic79xx_core.c
3689
transinfo = &tinfo->user;
drivers/scsi/aic7xxx/aic79xx_core.c
3771
*offset = min(*offset, (u_int)tinfo->user.offset);
drivers/scsi/aic7xxx/aic79xx_core.c
3799
*bus_width = min((u_int)tinfo->user.width, *bus_width);
drivers/scsi/aic7xxx/aic79xx_core.c
3879
tinfo->user.period = period;
drivers/scsi/aic7xxx/aic79xx_core.c
3880
tinfo->user.offset = offset;
drivers/scsi/aic7xxx/aic79xx_core.c
3881
tinfo->user.ppr_options = ppr_options;
drivers/scsi/aic7xxx/aic79xx_core.c
4016
tinfo->user.width = width;
drivers/scsi/aic7xxx/aic79xx_core.c
7568
tinfo->user.protocol_version = 4;
drivers/scsi/aic7xxx/aic79xx_core.c
7569
tinfo->user.transport_version = 4;
drivers/scsi/aic7xxx/aic79xx_core.c
7576
tinfo->user.period = AHD_SYNCRATE_DT;
drivers/scsi/aic7xxx/aic79xx_core.c
7578
tinfo->user.period = AHD_SYNCRATE_160;
drivers/scsi/aic7xxx/aic79xx_core.c
7580
tinfo->user.offset = MAX_OFFSET;
drivers/scsi/aic7xxx/aic79xx_core.c
7581
tinfo->user.ppr_options = MSG_EXT_PPR_RD_STRM
drivers/scsi/aic7xxx/aic79xx_core.c
7588
tinfo->user.ppr_options |= MSG_EXT_PPR_RTI;
drivers/scsi/aic7xxx/aic79xx_core.c
7590
tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
drivers/scsi/aic7xxx/aic79xx_core.c
7645
user_tinfo = &tinfo->user;
drivers/scsi/aic7xxx/aic79xx_core.c
7650
tinfo->user.protocol_version = 4;
drivers/scsi/aic7xxx/aic79xx_core.c
7651
tinfo->user.transport_version = 4;
drivers/scsi/aic7xxx/aic79xx_osm.c
648
spi_min_period(starget) = tinfo->user.period;
drivers/scsi/aic7xxx/aic79xx_osm.c
649
spi_max_offset(starget) = tinfo->user.offset;
drivers/scsi/aic7xxx/aic79xx_proc.c
169
ahd_format_transinfo(m, &tinfo->user);
drivers/scsi/aic7xxx/aic7xxx.h
703
struct ahc_transinfo user;
drivers/scsi/aic7xxx/aic7xxx_core.c
2223
transinfo = &tinfo->user;
drivers/scsi/aic7xxx/aic7xxx_core.c
2384
*offset = min(*offset, (u_int)tinfo->user.offset);
drivers/scsi/aic7xxx/aic7xxx_core.c
2412
*bus_width = min((u_int)tinfo->user.width, *bus_width);
drivers/scsi/aic7xxx/aic7xxx_core.c
2492
tinfo->user.period = period;
drivers/scsi/aic7xxx/aic7xxx_core.c
2493
tinfo->user.offset = offset;
drivers/scsi/aic7xxx/aic7xxx_core.c
2494
tinfo->user.ppr_options = ppr_options;
drivers/scsi/aic7xxx/aic7xxx_core.c
2610
tinfo->user.width = width;
drivers/scsi/aic7xxx/aic7xxx_core.c
5443
tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
drivers/scsi/aic7xxx/aic7xxx_core.c
5449
tinfo->user.period = ahc_syncrates->period;
drivers/scsi/aic7xxx/aic7xxx_core.c
5450
tinfo->user.offset = MAX_OFFSET;
drivers/scsi/aic7xxx/aic7xxx_core.c
5480
tinfo->user.period =
drivers/scsi/aic7xxx/aic7xxx_core.c
5483
tinfo->user.period = 0;
drivers/scsi/aic7xxx/aic7xxx_core.c
5485
tinfo->user.offset = MAX_OFFSET;
drivers/scsi/aic7xxx/aic7xxx_core.c
5488
tinfo->user.ppr_options =
drivers/scsi/aic7xxx/aic7xxx_core.c
5497
tinfo->user.period =
drivers/scsi/aic7xxx/aic7xxx_core.c
5502
if (tinfo->user.period != 0)
drivers/scsi/aic7xxx/aic7xxx_core.c
5503
tinfo->user.offset = MAX_OFFSET;
drivers/scsi/aic7xxx/aic7xxx_core.c
5505
if (tinfo->user.period == 0)
drivers/scsi/aic7xxx/aic7xxx_core.c
5506
tinfo->user.offset = 0;
drivers/scsi/aic7xxx/aic7xxx_core.c
5509
tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
drivers/scsi/aic7xxx/aic7xxx_core.c
5510
tinfo->user.protocol_version = 4;
drivers/scsi/aic7xxx/aic7xxx_core.c
5512
tinfo->user.transport_version = 3;
drivers/scsi/aic7xxx/aic7xxx_core.c
5514
tinfo->user.transport_version = 2;
drivers/scsi/aic7xxx/aic7xxx_proc.c
149
ahc_format_transinfo(m, &tinfo->user);
drivers/scsi/isci/init.c
431
struct sci_user_parameters *user = &ihost->user_parameters;
drivers/scsi/isci/init.c
454
user->phys[i].max_speed_generation = SCIC_SDS_PARM_GEN2_SPEED;
drivers/scsi/isci/init.c
457
user->phys[i].align_insertion_frequency = 0x7f;
drivers/scsi/isci/init.c
458
user->phys[i].in_connection_align_insertion_frequency = 0xff;
drivers/scsi/isci/init.c
459
user->phys[i].notify_enable_spin_up_insertion_frequency = 0x33;
drivers/scsi/isci/init.c
470
user->stp_inactivity_timeout = 5;
drivers/scsi/isci/init.c
471
user->ssp_inactivity_timeout = 5;
drivers/scsi/isci/init.c
472
user->stp_max_occupancy_timeout = 5;
drivers/scsi/isci/init.c
473
user->ssp_max_occupancy_timeout = 20;
drivers/scsi/isci/init.c
474
user->no_outbound_task_timeout = 2;
drivers/scsi/isci/port.c
366
struct sci_user_parameters *user = &ihost->user_parameters;
drivers/scsi/isci/port.c
390
user->phys[phy_index].max_speed_generation !=
drivers/scsi/isci/port.c
391
user->phys[existing_phy_index].max_speed_generation)
drivers/scsi/ncr53c8xx.c
1742
struct usrcmd user; /* Command from user */
drivers/scsi/qla4xxx/ql4_os.c
7229
uint16_t *idx, int user)
drivers/scsi/qla4xxx/ql4_os.c
7253
if (user) {
drivers/spi/spi-mtk-snfi.c
665
struct nand_ecc_props *user = &nand->ecc.user_conf;
drivers/spi/spi-mtk-snfi.c
684
if (user->step_size && user->strength) {
drivers/spi/spi-mtk-snfi.c
685
step_size = user->step_size;
drivers/spi/spi-mtk-snfi.c
686
strength = user->strength;
drivers/spi/spi-qpic-snand.c
252
struct nand_ecc_props *user = &nand->ecc.user_conf;
drivers/spi/spi-qpic-snand.c
265
if (user->step_size && user->strength) {
drivers/spi/spi-qpic-snand.c
266
ecc_cfg->step_size = user->step_size;
drivers/spi/spi-qpic-snand.c
267
ecc_cfg->strength = user->strength;
drivers/staging/media/ipu3/ipu3-css-params.c
2744
struct ipu3_uapi_params *user)
drivers/staging/media/ipu3/ipu3-css-params.c
2764
&user->lin_vmem_params, vmem0_old, vmem0,
drivers/staging/media/ipu3/ipu3-css-params.c
2785
&user->tnr3_vmem_params,
drivers/staging/media/ipu3/ipu3-css-params.c
2800
&user->xnr3_vmem_params, vmem0_old, vmem0,
drivers/staging/media/ipu3/ipu3-css-params.c
2823
struct ipu3_uapi_params *user)
drivers/staging/media/ipu3/ipu3-css-params.c
2845
&user->tnr3_dmem_params,
drivers/staging/media/ipu3/ipu3-css-params.c
2859
&user->xnr3_dmem_params, dmem0_old, dmem0,
drivers/staging/media/ipu3/ipu3-css-params.h
16
struct ipu3_uapi_params *user);
drivers/staging/media/ipu3/ipu3-css-params.h
21
struct ipu3_uapi_params *user);
drivers/vfio/iommufd.c
251
struct iommufd_access *user;
drivers/vfio/iommufd.c
255
user = iommufd_access_create(ictx, &vfio_user_ops, vdev, out_device_id);
drivers/vfio/iommufd.c
256
if (IS_ERR(user))
drivers/vfio/iommufd.c
257
return PTR_ERR(user);
drivers/vfio/iommufd.c
258
vdev->iommufd_access = user;
drivers/video/fbdev/arcfb.c
183
static int arcfb_open(struct fb_info *info, int user)
drivers/video/fbdev/arcfb.c
191
static int arcfb_release(struct fb_info *info, int user)
drivers/video/fbdev/arkfb.c
512
static int arkfb_open(struct fb_info *info, int user)
drivers/video/fbdev/arkfb.c
536
static int arkfb_release(struct fb_info *info, int user)
drivers/video/fbdev/aty/atyfb_base.c
1590
static int atyfb_open(struct fb_info *info, int user)
drivers/video/fbdev/aty/atyfb_base.c
1594
if (user) {
drivers/video/fbdev/aty/atyfb_base.c
1683
static int atyfb_release(struct fb_info *info, int user)
drivers/video/fbdev/aty/atyfb_base.c
1690
if (!user)
drivers/video/fbdev/aty/atyfb_base.c
237
static int atyfb_open(struct fb_info *info, int user);
drivers/video/fbdev/aty/atyfb_base.c
238
static int atyfb_release(struct fb_info *info, int user);
drivers/video/fbdev/cirrusfb.c
429
static int cirrusfb_open(struct fb_info *info, int user)
drivers/video/fbdev/cirrusfb.c
437
static int cirrusfb_release(struct fb_info *info, int user)
drivers/video/fbdev/core/fbcon.c
871
static int set_con2fb_map(int unit, int newidx, int user)
drivers/video/fbdev/core/fbcon.c
914
show_logo = (fg_console == 0 && !user &&
drivers/video/fbdev/fsl-diu-fb.c
1397
static int fsl_diu_open(struct fb_info *info, int user)
drivers/video/fbdev/fsl-diu-fb.c
1425
static int fsl_diu_release(struct fb_info *info, int user)
drivers/video/fbdev/i740fb.c
181
static int i740fb_open(struct fb_info *info, int user)
drivers/video/fbdev/i740fb.c
192
static int i740fb_release(struct fb_info *info, int user)
drivers/video/fbdev/i810/i810_main.c
1240
static int i810fb_open(struct fb_info *info, int user)
drivers/video/fbdev/i810/i810_main.c
1260
static int i810fb_release(struct fb_info *info, int user)
drivers/video/fbdev/matrox/matroxfb_base.c
388
static int matroxfb_open(struct fb_info *info, int user)
drivers/video/fbdev/matrox/matroxfb_base.c
398
if (user) {
drivers/video/fbdev/matrox/matroxfb_base.c
404
static int matroxfb_release(struct fb_info *info, int user)
drivers/video/fbdev/matrox/matroxfb_base.c
410
if (user) {
drivers/video/fbdev/matrox/matroxfb_crtc2.c
263
static int matroxfb_dh_open(struct fb_info* info, int user) {
drivers/video/fbdev/matrox/matroxfb_crtc2.c
273
err = minfo->fbops.fb_open(&minfo->fbcon, user);
drivers/video/fbdev/matrox/matroxfb_crtc2.c
282
static int matroxfb_dh_release(struct fb_info* info, int user) {
drivers/video/fbdev/matrox/matroxfb_crtc2.c
288
err = minfo->fbops.fb_release(&minfo->fbcon, user);
drivers/video/fbdev/neofb.c
550
neofb_open(struct fb_info *info, int user)
drivers/video/fbdev/neofb.c
565
neofb_release(struct fb_info *info, int user)
drivers/video/fbdev/nvidia/nvidia.c
1008
static int nvidiafb_release(struct fb_info *info, int user)
drivers/video/fbdev/nvidia/nvidia.c
995
static int nvidiafb_open(struct fb_info *info, int user)
drivers/video/fbdev/omap/omapfb_main.c
218
static int omapfb_open(struct fb_info *info, int user)
drivers/video/fbdev/omap/omapfb_main.c
227
static int omapfb_release(struct fb_info *info, int user)
drivers/video/fbdev/omap2/omapfb/omapfb-main.c
750
static int omapfb_open(struct fb_info *fbi, int user)
drivers/video/fbdev/omap2/omapfb/omapfb-main.c
755
static int omapfb_release(struct fb_info *fbi, int user)
drivers/video/fbdev/ps3fb.c
505
static int ps3fb_open(struct fb_info *info, int user)
drivers/video/fbdev/ps3fb.c
511
static int ps3fb_release(struct fb_info *info, int user)
drivers/video/fbdev/pxafb.c
729
static int overlayfb_open(struct fb_info *info, int user)
drivers/video/fbdev/pxafb.c
734
if (user == 0)
drivers/video/fbdev/pxafb.c
747
static int overlayfb_release(struct fb_info *info, int user)
drivers/video/fbdev/riva/fbdev.c
1022
static int rivafb_open(struct fb_info *info, int user)
drivers/video/fbdev/riva/fbdev.c
1049
static int rivafb_release(struct fb_info *info, int user)
drivers/video/fbdev/s3fb.c
505
static int s3fb_open(struct fb_info *info, int user)
drivers/video/fbdev/s3fb.c
529
static int s3fb_release(struct fb_info *info, int user)
drivers/video/fbdev/savage/savagefb_driver.c
1607
static int savagefb_open(struct fb_info *info, int user)
drivers/video/fbdev/savage/savagefb_driver.c
1627
static int savagefb_release(struct fb_info *info, int user)
drivers/video/fbdev/sh_mobile_lcdcfb.c
1782
static int sh_mobile_lcdc_release(struct fb_info *info, int user)
drivers/video/fbdev/sh_mobile_lcdcfb.c
1792
if (user) {
drivers/video/fbdev/sh_mobile_lcdcfb.c
1803
static int sh_mobile_lcdc_open(struct fb_info *info, int user)
drivers/video/fbdev/sis/sis_main.c
1363
sisfb_open(struct fb_info *info, int user)
drivers/video/fbdev/sis/sis_main.c
1369
sisfb_release(struct fb_info *info, int user)
drivers/video/fbdev/skeletonfb.c
149
static int xxxfb_open(struct fb_info *info, int user)
drivers/video/fbdev/skeletonfb.c
168
static int xxxfb_release(struct fb_info *info, int user)
drivers/video/fbdev/smscufx.c
1013
static int ufx_ops_open(struct fb_info *info, int user)
drivers/video/fbdev/smscufx.c
1020
if (user == 0 && !console)
drivers/video/fbdev/smscufx.c
1051
info->node, user, info, dev->fb_count);
drivers/video/fbdev/smscufx.c
1114
static int ufx_ops_release(struct fb_info *info, int user)
drivers/video/fbdev/smscufx.c
1133
info->node, user, dev->fb_count);
drivers/video/fbdev/udlfb.c
898
static int dlfb_ops_open(struct fb_info *info, int user)
drivers/video/fbdev/udlfb.c
907
if ((user == 0) && (!console))
drivers/video/fbdev/udlfb.c
934
user, info, dlfb->fb_count);
drivers/video/fbdev/udlfb.c
974
static int dlfb_ops_release(struct fb_info *info, int user)
drivers/video/fbdev/udlfb.c
986
dev_dbg(info->dev, "release, user=%d count=%d\n", user, dlfb->fb_count);
drivers/video/fbdev/uvesafb.c
1162
static int uvesafb_open(struct fb_info *info, int user)
drivers/video/fbdev/uvesafb.c
1182
static int uvesafb_release(struct fb_info *info, int user)
drivers/video/fbdev/vga16fb.c
299
static int vga16fb_open(struct fb_info *info, int user)
drivers/video/fbdev/vga16fb.c
314
static int vga16fb_release(struct fb_info *info, int user)
drivers/video/fbdev/via/viafbdev.c
162
static int viafb_open(struct fb_info *info, int user)
drivers/video/fbdev/via/viafbdev.c
168
static int viafb_release(struct fb_info *info, int user)
drivers/video/fbdev/vt8623fb.c
280
static int vt8623fb_open(struct fb_info *info, int user)
drivers/video/fbdev/vt8623fb.c
302
static int vt8623fb_release(struct fb_info *info, int user)
drivers/virt/coco/sev-guest/sev-guest.c
203
if (!access_ok(certs_address.user, report_req->certs_len))
drivers/xen/evtchn.c
165
struct per_user_data *u = evtchn->user;
drivers/xen/evtchn.c
393
evtchn->user = u;
drivers/xen/evtchn.c
85
struct per_user_data *user;
fs/autofs/dev-ioctl.c
663
struct autofs_dev_ioctl __user *user)
fs/autofs/dev-ioctl.c
689
param = copy_dev_ioctl(user);
fs/autofs/dev-ioctl.c
750
if (err >= 0 && copy_to_user(user, param, AUTOFS_DEV_IOCTL_SIZE))
fs/efivarfs/file.c
89
while (!__ratelimit(&file->f_cred->user->ratelimit))
fs/eventpoll.c
1161
ep->user = get_current_user();
fs/eventpoll.c
1578
if (unlikely(percpu_counter_compare(&ep->user->epoll_watches,
fs/eventpoll.c
1581
percpu_counter_inc(&ep->user->epoll_watches);
fs/eventpoll.c
1584
percpu_counter_dec(&ep->user->epoll_watches);
fs/eventpoll.c
1602
percpu_counter_dec(&ep->user->epoll_watches);
fs/eventpoll.c
214
struct user_struct *user;
fs/eventpoll.c
820
free_uid(ep->user);
fs/eventpoll.c
886
percpu_counter_dec(&ep->user->epoll_watches);
fs/init.c
110
error = chown_common(&path, user, group);
fs/init.c
99
int __init init_chown(const char *filename, uid_t user, gid_t group, int flags)
fs/internal.h
204
int do_fchownat(int dfd, const char __user *filename, uid_t user, gid_t group,
fs/internal.h
206
int chown_common(const struct path *path, uid_t user, gid_t group);
fs/ntfs3/file.c
102
u8 user[FSLABEL_MAX] = { 0 };
fs/ntfs3/file.c
108
if (copy_from_user(user, buf, FSLABEL_MAX))
fs/ntfs3/file.c
111
len = strnlen(user, FSLABEL_MAX);
fs/ntfs3/file.c
113
return ntfs_set_label(sbi, user, len);
fs/open.c
741
int chown_common(const struct path *path, uid_t user, gid_t group)
fs/open.c
752
uid = make_kuid(current_user_ns(), user);
fs/open.c
762
if ((user != (uid_t)-1) && !setattr_vfsuid(&newattrs, uid))
fs/open.c
789
int do_fchownat(int dfd, const char __user *filename, uid_t user, gid_t group,
fs/open.c
806
error = chown_common(&path, user, group);
fs/open.c
818
SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user,
fs/open.c
821
return do_fchownat(dfd, filename, user, group, flag);
fs/open.c
824
SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group)
fs/open.c
826
return do_fchownat(AT_FDCWD, filename, user, group, 0);
fs/open.c
829
SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group)
fs/open.c
831
return do_fchownat(AT_FDCWD, filename, user, group,
fs/open.c
835
int vfs_fchown(struct file *file, uid_t user, gid_t group)
fs/open.c
843
error = chown_common(&file->f_path, user, group);
fs/open.c
848
int ksys_fchown(unsigned int fd, uid_t user, gid_t group)
fs/open.c
855
return vfs_fchown(fd_file(f), user, group);
fs/open.c
858
SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
fs/open.c
860
return ksys_fchown(fd, user, group);
fs/pipe.c
1389
user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_slots);
fs/pipe.c
1406
(void) account_pipe_buffers(pipe->user, nr_slots, pipe->nr_accounted);
fs/pipe.c
767
unsigned long account_pipe_buffers(struct user_struct *user,
fs/pipe.c
770
return atomic_long_add_return(new - old, &user->pipe_bufs);
fs/pipe.c
796
struct user_struct *user = get_current_user();
fs/pipe.c
807
user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
fs/pipe.c
810
user_bufs = account_pipe_buffers(user, pipe_bufs, PIPE_MIN_DEF_BUFFERS);
fs/pipe.c
827
pipe->user = user;
fs/pipe.c
834
(void) account_pipe_buffers(user, pipe_bufs, 0);
fs/pipe.c
837
free_uid(user);
fs/pipe.c
850
(void) account_pipe_buffers(pipe->user, pipe->nr_accounted, 0);
fs/pipe.c
851
free_uid(pipe->user);
fs/proc/stat.c
105
user += cpustat[CPUTIME_USER];
fs/proc/stat.c
127
seq_put_decimal_ull(p, "cpu ", nsec_to_clock_t(user));
fs/proc/stat.c
146
user = cpustat[CPUTIME_USER];
fs/proc/stat.c
157
seq_put_decimal_ull(p, " ", nsec_to_clock_t(user));
fs/proc/stat.c
85
u64 user, nice, system, idle, iowait, irq, softirq, steal;
fs/proc/stat.c
92
user = nice = system = idle = iowait =
fs/smb/client/cifsencrypt.c
264
__le16 *user;
fs/smb/client/cifsencrypt.c
275
user = kmalloc(2 + (len * 2), GFP_KERNEL);
fs/smb/client/cifsencrypt.c
276
if (user == NULL)
fs/smb/client/cifsencrypt.c
280
len = cifs_strtoUTF16(user, ses->user_name, len, nls_cp);
fs/smb/client/cifsencrypt.c
281
UniStrupr(user);
fs/smb/client/cifsencrypt.c
283
*(u16 *)user = 0;
fs/smb/client/cifsencrypt.c
286
hmac_md5_update(&hmac_ctx, (const u8 *)user, 2 * len);
fs/smb/client/cifsencrypt.c
287
kfree(user);
fs/smb/client/trace.h
1708
__string(user, ses->user_name)
fs/smb/client/trace.h
1719
__assign_str(user);
fs/smb/client/trace.h
1739
__entry->sec, __entry->uid, __entry->cruid, __get_str(user),
fs/smb/server/auth.c
395
struct ksmbd_user *user = NULL;
fs/smb/server/auth.c
426
user = ksmbd_alloc_user(&resp->login_response, resp_ext);
fs/smb/server/auth.c
427
if (!user) {
fs/smb/server/auth.c
433
if (!sess->user) {
fs/smb/server/auth.c
435
sess->user = user;
fs/smb/server/auth.c
437
if (!ksmbd_compare_user(sess->user, user)) {
fs/smb/server/auth.c
440
ksmbd_free_user(user);
fs/smb/server/auth.c
443
ksmbd_free_user(user);
fs/smb/server/auth.c
83
hmac_md5_init_usingrawkey(&ctx, user_passkey(sess->user),
fs/smb/server/auth.c
87
len = strlen(user_name(sess->user));
fs/smb/server/auth.c
94
conv_len = smb_strtoUTF16(uniname, user_name(sess->user), len,
fs/smb/server/mgmt/tree_connect.c
76
tree_conn->user = sess->user;
fs/smb/server/mgmt/tree_connect.h
29
struct ksmbd_user *user;
fs/smb/server/mgmt/user_config.c
16
struct ksmbd_user *user = NULL;
fs/smb/server/mgmt/user_config.c
28
user = ksmbd_alloc_user(resp, resp_ext);
fs/smb/server/mgmt/user_config.c
31
return user;
fs/smb/server/mgmt/user_config.c
37
struct ksmbd_user *user;
fs/smb/server/mgmt/user_config.c
39
user = kmalloc_obj(struct ksmbd_user, KSMBD_DEFAULT_GFP);
fs/smb/server/mgmt/user_config.c
40
if (!user)
fs/smb/server/mgmt/user_config.c
43
user->name = kstrdup(resp->account, KSMBD_DEFAULT_GFP);
fs/smb/server/mgmt/user_config.c
44
user->flags = resp->status;
fs/smb/server/mgmt/user_config.c
45
user->gid = resp->gid;
fs/smb/server/mgmt/user_config.c
46
user->uid = resp->uid;
fs/smb/server/mgmt/user_config.c
47
user->passkey_sz = resp->hash_sz;
fs/smb/server/mgmt/user_config.c
48
user->passkey = kmalloc(resp->hash_sz, KSMBD_DEFAULT_GFP);
fs/smb/server/mgmt/user_config.c
49
if (user->passkey)
fs/smb/server/mgmt/user_config.c
50
memcpy(user->passkey, resp->hash, resp->hash_sz);
fs/smb/server/mgmt/user_config.c
52
user->ngroups = 0;
fs/smb/server/mgmt/user_config.c
53
user->sgid = NULL;
fs/smb/server/mgmt/user_config.c
55
if (!user->name || !user->passkey)
fs/smb/server/mgmt/user_config.c
65
user->sgid = kmemdup(resp_ext->____payload,
fs/smb/server/mgmt/user_config.c
68
if (!user->sgid)
fs/smb/server/mgmt/user_config.c
71
user->ngroups = resp_ext->ngroups;
fs/smb/server/mgmt/user_config.c
72
ksmbd_debug(SMB, "supplementary groups : %d\n", user->ngroups);
fs/smb/server/mgmt/user_config.c
75
return user;
fs/smb/server/mgmt/user_config.c
78
kfree(user->name);
fs/smb/server/mgmt/user_config.c
79
kfree(user->passkey);
fs/smb/server/mgmt/user_config.c
80
kfree(user);
fs/smb/server/mgmt/user_config.c
84
void ksmbd_free_user(struct ksmbd_user *user)
fs/smb/server/mgmt/user_config.c
86
ksmbd_ipc_logout_request(user->name, user->flags);
fs/smb/server/mgmt/user_config.c
87
kfree(user->sgid);
fs/smb/server/mgmt/user_config.c
88
kfree(user->name);
fs/smb/server/mgmt/user_config.c
89
kfree(user->passkey);
fs/smb/server/mgmt/user_config.c
90
kfree(user);
fs/smb/server/mgmt/user_config.c
93
bool ksmbd_anonymous_user(struct ksmbd_user *user)
fs/smb/server/mgmt/user_config.c
95
return user->name[0] == '\0';
fs/smb/server/mgmt/user_config.h
25
static inline bool user_guest(struct ksmbd_user *user)
fs/smb/server/mgmt/user_config.h
27
return user->flags & KSMBD_USER_FLAG_GUEST_ACCOUNT;
fs/smb/server/mgmt/user_config.h
30
static inline void set_user_flag(struct ksmbd_user *user, int flag)
fs/smb/server/mgmt/user_config.h
32
user->flags |= flag;
fs/smb/server/mgmt/user_config.h
35
static inline int test_user_flag(struct ksmbd_user *user, int flag)
fs/smb/server/mgmt/user_config.h
37
return user->flags & flag;
fs/smb/server/mgmt/user_config.h
40
static inline void set_user_guest(struct ksmbd_user *user)
fs/smb/server/mgmt/user_config.h
44
static inline char *user_passkey(struct ksmbd_user *user)
fs/smb/server/mgmt/user_config.h
46
return user->passkey;
fs/smb/server/mgmt/user_config.h
49
static inline char *user_name(struct ksmbd_user *user)
fs/smb/server/mgmt/user_config.h
51
return user->name;
fs/smb/server/mgmt/user_config.h
54
static inline unsigned int user_uid(struct ksmbd_user *user)
fs/smb/server/mgmt/user_config.h
56
return user->uid;
fs/smb/server/mgmt/user_config.h
59
static inline unsigned int user_gid(struct ksmbd_user *user)
fs/smb/server/mgmt/user_config.h
61
return user->gid;
fs/smb/server/mgmt/user_config.h
67
void ksmbd_free_user(struct ksmbd_user *user);
fs/smb/server/mgmt/user_config.h
68
bool ksmbd_anonymous_user(struct ksmbd_user *user);
fs/smb/server/mgmt/user_session.c
386
if (sess->user)
fs/smb/server/mgmt/user_session.c
387
ksmbd_free_user(sess->user);
fs/smb/server/mgmt/user_session.c
595
struct ksmbd_user *user, u64 id)
fs/smb/server/mgmt/user_session.c
607
prev_user = prev_sess->user;
fs/smb/server/mgmt/user_session.c
609
strcmp(user->name, prev_user->name) ||
fs/smb/server/mgmt/user_session.c
610
user->passkey_sz != prev_user->passkey_sz ||
fs/smb/server/mgmt/user_session.c
611
memcmp(user->passkey, prev_user->passkey, user->passkey_sz))
fs/smb/server/mgmt/user_session.c
74
if (user_guest(session->user))
fs/smb/server/mgmt/user_session.c
76
else if (ksmbd_anonymous_user(session->user))
fs/smb/server/mgmt/user_session.c
78
return session->user->name;
fs/smb/server/mgmt/user_session.h
103
struct ksmbd_user *user, u64 id);
fs/smb/server/mgmt/user_session.h
38
struct ksmbd_user *user;
fs/smb/server/smb2pdu.c
1424
struct ksmbd_user *user;
fs/smb/server/smb2pdu.c
1453
user = ksmbd_login_user(name);
fs/smb/server/smb2pdu.c
1455
return user;
fs/smb/server/smb2pdu.c
1465
struct ksmbd_user *user;
fs/smb/server/smb2pdu.c
1485
user = session_user(conn, req);
fs/smb/server/smb2pdu.c
1486
if (!user) {
fs/smb/server/smb2pdu.c
1494
destroy_previous_session(conn, user, prev_id);
fs/smb/server/smb2pdu.c
1501
if (conn->binding == false && ksmbd_anonymous_user(user)) {
fs/smb/server/smb2pdu.c
1502
ksmbd_free_user(user);
fs/smb/server/smb2pdu.c
1506
if (!ksmbd_compare_user(sess->user, user)) {
fs/smb/server/smb2pdu.c
1507
ksmbd_free_user(user);
fs/smb/server/smb2pdu.c
1510
ksmbd_free_user(user);
fs/smb/server/smb2pdu.c
1512
sess->user = user;
fs/smb/server/smb2pdu.c
1515
if (conn->binding == false && user_guest(sess->user)) {
fs/smb/server/smb2pdu.c
1527
set_user_flag(sess->user, KSMBD_USER_FLAG_BAD_PASSWORD);
fs/smb/server/smb2pdu.c
1628
destroy_previous_session(conn, sess->user, prev_sess_id);
fs/smb/server/smb2pdu.c
1786
if (user_guest(sess->user)) {
fs/smb/server/smb2pdu.c
1939
if (sess->user && sess->user->flags & KSMBD_USER_FLAG_DELAY_SESSION)
fs/smb/server/smb2pdu.c
3242
sess->user->uid);
fs/smb/server/smb2pdu.c
3390
rc = smb_inherit_dacl(conn, &path, sess->user->uid,
fs/smb/server/smb2pdu.c
3391
sess->user->gid);
fs/smb/server/smb2pdu.c
9266
sess->user && !user_guest(sess->user) &&
fs/smb/server/smb_common.c
767
struct ksmbd_user *user = sess->user;
fs/smb/server/smb_common.c
774
uid = user_uid(user);
fs/smb/server/smb_common.c
775
gid = user_gid(user);
fs/smb/server/smb_common.c
788
gi = groups_alloc(user->ngroups);
fs/smb/server/smb_common.c
794
for (i = 0; i < user->ngroups; i++)
fs/smb/server/smb_common.c
795
gi->gid[i] = make_kgid(&init_user_ns, user->sgid[i]);
fs/smb/server/smb_common.c
797
if (user->ngroups)
fs/smb/server/transport_ipc.c
231
if (user_guest(sess->user))
fs/smb/server/transport_ipc.c
678
if (strlen(user_name(sess->user)) >= KSMBD_REQ_MAX_ACCOUNT_NAME_SZ)
fs/smb/server/transport_ipc.c
692
req->account_flags = sess->user->flags;
fs/smb/server/transport_ipc.c
695
strscpy(req->account, user_name(sess->user), KSMBD_REQ_MAX_ACCOUNT_NAME_SZ);
include/acpi/processor.h
206
struct acpi_processor_lx user; /* user limit */
include/keys/user-type.h
43
extern void user_describe(const struct key *user, struct seq_file *m);
include/linux/atmel-ssc.h
21
int user;
include/linux/bpf.h
1724
struct user_struct *user;
include/linux/bpfptr.h
23
return (bpfptr_t) { .user = p };
include/linux/bpfptr.h
38
return !bpfptr.user;
include/linux/bpfptr.h
46
bpfptr->user += val;
include/linux/bpfptr.h
53
return copy_from_user(dst, src.user + offset, size);
include/linux/bpfptr.h
86
return strncpy_from_user(dst, src.user, count);
include/linux/compiler_types.h
66
# define __user __attribute__((user))
include/linux/compiler_types.h
68
# define __user BTF_TYPE_TAG(user)
include/linux/cred.h
140
struct user_struct *user; /* real user ID subscription */
include/linux/cred.h
341
__u = get_uid(__cred->user); \
include/linux/cred.h
387
#define current_user() (current_cred_xxx(user))
include/linux/fb.h
247
int (*fb_open)(struct fb_info *info, int user);
include/linux/fb.h
248
int (*fb_release)(struct fb_info *info, int user);
include/linux/filelock.h
257
struct flock __user *user)
include/linux/filelock.h
263
unsigned int cmd, struct flock __user *user)
include/linux/filelock.h
270
struct flock64 *user)
include/linux/filelock.h
276
unsigned int cmd, struct flock64 *user)
include/linux/fs.h
1823
int vfs_fchown(struct file *file, uid_t user, gid_t group);
include/linux/fsl/ntmp.h
102
static inline int ntmp_maft_delete_entry(struct ntmp_user *user, u32 entry_id)
include/linux/fsl/ntmp.h
107
static inline int ntmp_rsst_update_entry(struct ntmp_user *user,
include/linux/fsl/ntmp.h
113
static inline int ntmp_rsst_query_entry(struct ntmp_user *user,
include/linux/fsl/ntmp.h
70
int ntmp_maft_add_entry(struct ntmp_user *user, u32 entry_id,
include/linux/fsl/ntmp.h
72
int ntmp_maft_query_entry(struct ntmp_user *user, u32 entry_id,
include/linux/fsl/ntmp.h
74
int ntmp_maft_delete_entry(struct ntmp_user *user, u32 entry_id);
include/linux/fsl/ntmp.h
75
int ntmp_rsst_update_entry(struct ntmp_user *user, const u32 *table,
include/linux/fsl/ntmp.h
77
int ntmp_rsst_query_entry(struct ntmp_user *user,
include/linux/fsl/ntmp.h
90
static inline int ntmp_maft_add_entry(struct ntmp_user *user, u32 entry_id,
include/linux/fsl/ntmp.h
96
static inline int ntmp_maft_query_entry(struct ntmp_user *user, u32 entry_id,
include/linux/if_team.h
47
} user;
include/linux/init_syscalls.h
8
int __init init_chown(const char *filename, uid_t user, gid_t group, int flags);
include/linux/io_uring_types.h
442
struct user_struct *user;
include/linux/ipmi.h
120
struct ipmi_user **user);
include/linux/ipmi.h
130
void ipmi_destroy_user(struct ipmi_user *user);
include/linux/ipmi.h
133
int ipmi_get_version(struct ipmi_user *user,
include/linux/ipmi.h
146
int ipmi_set_my_address(struct ipmi_user *user,
include/linux/ipmi.h
149
int ipmi_get_my_address(struct ipmi_user *user,
include/linux/ipmi.h
152
int ipmi_set_my_LUN(struct ipmi_user *user,
include/linux/ipmi.h
155
int ipmi_get_my_LUN(struct ipmi_user *user,
include/linux/ipmi.h
172
int ipmi_request_settime(struct ipmi_user *user,
include/linux/ipmi.h
190
int ipmi_request_supply_msgs(struct ipmi_user *user,
include/linux/ipmi.h
206
void ipmi_poll_interface(struct ipmi_user *user);
include/linux/ipmi.h
217
int ipmi_register_for_cmd(struct ipmi_user *user,
include/linux/ipmi.h
221
int ipmi_unregister_for_cmd(struct ipmi_user *user,
include/linux/ipmi.h
252
int ipmi_get_maintenance_mode(struct ipmi_user *user);
include/linux/ipmi.h
253
int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode);
include/linux/ipmi.h
261
int ipmi_set_gets_events(struct ipmi_user *user, bool val);
include/linux/ipmi.h
353
void ipmi_panic_request_and_wait(struct ipmi_user *user,
include/linux/ipmi.h
48
struct ipmi_user *user;
include/linux/isdn/capilli.h
21
int user; /* data in userspace ? */
include/linux/kernel.h
43
struct user;
include/linux/kernel_stat.h
130
static inline void account_process_tick(struct task_struct *tsk, int user)
include/linux/kernel_stat.h
135
extern void account_process_tick(struct task_struct *, int user);
include/linux/key.h
206
struct key_user *user; /* owner of this key */
include/linux/netfilter.h
193
int (*get)(struct sock *sk, int optval, void __user *user, int *len);
include/linux/netfilter/x_tables.h
454
} user;
include/linux/netfilter/x_tables.h
470
} user;
include/linux/perf_event.h
1738
get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
include/linux/pipe_fs_i.h
106
struct user_struct *user;
include/linux/pipe_fs_i.h
325
unsigned long account_pipe_buffers(struct user_struct *user,
include/linux/pm-trace.h
24
extern void generate_pm_trace(const void *tracedata, unsigned int user);
include/linux/rcupdate.h
132
void rcu_sched_clock_irq(int user);
include/linux/skbuff.h
569
struct user_struct *user;
include/linux/sockptr.h
114
return copy_to_user(dst.user + offset, src, size);
include/linux/sockptr.h
161
return strncpy_from_user(dst, src.user, count);
include/linux/sockptr.h
168
return check_zeroed_user(src.user + offset, size);
include/linux/sockptr.h
17
void __user *user;
include/linux/sockptr.h
34
return (sockptr_t) { .user = p };
include/linux/sockptr.h
41
return !sockptr.user;
include/linux/sockptr.h
48
return copy_from_user(dst, src.user + offset, size);
include/linux/sockptr.h
94
return copy_struct_from_user(dst, ksize, src.user, size);
include/linux/syscalls.h
1045
uid_t user, gid_t group);
include/linux/syscalls.h
1049
uid_t user, gid_t group);
include/linux/syscalls.h
1158
old_uid_t user, old_gid_t group);
include/linux/syscalls.h
1160
old_uid_t user, old_gid_t group);
include/linux/syscalls.h
1161
asmlinkage long sys_fchown16(unsigned int fd, old_uid_t user, old_gid_t group);
include/linux/syscalls.h
1236
int ksys_fchown(unsigned int fd, uid_t user, gid_t group);
include/linux/syscalls.h
1270
extern int do_fchownat(int dfd, const char __user *filename, uid_t user,
include/linux/syscalls.h
1273
static inline long ksys_chown(const char __user *filename, uid_t user,
include/linux/syscalls.h
1276
return do_fchownat(AT_FDCWD, filename, user, group, 0);
include/linux/syscalls.h
1279
static inline long ksys_lchown(const char __user *filename, uid_t user,
include/linux/syscalls.h
1282
return do_fchownat(AT_FDCWD, filename, user, group,
include/linux/syscalls.h
461
asmlinkage long sys_fchownat(int dfd, const char __user *filename, uid_t user,
include/linux/syscalls.h
463
asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group);
include/net/bluetooth/l2cap.h
680
int (*probe) (struct l2cap_conn *conn, struct l2cap_user *user);
include/net/bluetooth/l2cap.h
681
void (*remove) (struct l2cap_conn *conn, struct l2cap_user *user);
include/net/bluetooth/l2cap.h
979
int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user);
include/net/bluetooth/l2cap.h
980
void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user);
include/net/dsa.h
245
struct net_device *user;
include/net/dsa.h
761
return dp->user;
include/net/inet_frag.h
51
u32 user;
include/net/inet_frag.h
60
u32 user;
include/net/ip.h
745
static inline bool ip_defrag_user_in_between(u32 user,
include/net/ip.h
749
return user >= lower_bond && user <= upper_bond;
include/net/ip.h
752
int ip_defrag(struct net *net, struct sk_buff *skb, u32 user);
include/net/ip.h
754
struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user);
include/net/ip.h
756
static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
include/net/netfilter/ipv6/nf_defrag_ipv6.h
13
int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user);
include/net/page_pool/types.h
252
} user;
include/net/scm.h
40
struct user_struct *user;
include/net/xdp_sock.h
30
struct user_struct *user;
include/rdma/rdma_vt.h
157
bool user;
include/rdma/restrack.h
119
bool user;
include/rdma/restrack.h
134
return !res->user;
include/uapi/linux/atmsap.h
112
unsigned char user; /* user-specified l2 information */
include/uapi/linux/atmsap.h
123
unsigned char user; /* user specified l3 information */
include/uapi/linux/net_dropmon.h
35
struct net_dm_config_msg user;
include/uapi/linux/netfilter/x_tables.h
19
} user;
include/uapi/linux/netfilter/x_tables.h
42
} user;
include/uapi/linux/netfilter/x_tables.h
59
.target.u.user = { \
include/uapi/linux/xfrm.h
62
__kernel_uid32_t user;
init/init_task.c
86
.user = INIT_USER,
io_uring/io_uring.c
2066
io_free_region(ctx->user, &ctx->sq_region);
io_uring/io_uring.c
2067
io_free_region(ctx->user, &ctx->ring_region);
io_uring/io_uring.c
2161
io_free_region(ctx->user, &ctx->param_region);
io_uring/io_uring.c
2180
free_uid(ctx->user);
io_uring/io_uring.c
2982
ctx->user = get_uid(current_user());
io_uring/kbuf.c
448
io_free_region(ctx->user, &bl->region);
io_uring/kbuf.c
693
io_free_region(ctx->user, &bl->region);
io_uring/memmap.c
108
if (mr->nr_pages && user)
io_uring/memmap.c
109
__io_unaccount_mem(user, mr->nr_pages);
io_uring/memmap.c
210
if (ctx->user) {
io_uring/memmap.c
211
ret = __io_account_mem(ctx->user, nr_pages);
io_uring/memmap.c
229
io_free_region(ctx->user, mr);
io_uring/memmap.c
91
void io_free_region(struct user_struct *user, struct io_mapped_region *mr)
io_uring/memmap.h
20
void io_free_region(struct user_struct *user, struct io_mapped_region *mr);
io_uring/notif.c
33
if (nd->account_pages && notif->ctx->user) {
io_uring/notif.c
34
__io_unaccount_mem(notif->ctx->user, nd->account_pages);
io_uring/notif.h
50
if (ctx->user) {
io_uring/notif.h
51
ret = __io_account_mem(ctx->user, nr_pages);
io_uring/register.c
483
io_free_region(ctx->user, &r->sq_region);
io_uring/register.c
484
io_free_region(ctx->user, &r->ring_region);
io_uring/register.c
706
io_free_region(ctx->user, ®ion);
io_uring/rsrc.c
1160
if (ctx->user != src_ctx->user || ctx->mm_account != src_ctx->mm_account)
io_uring/rsrc.c
135
io_unaccount_mem(ctx->user, ctx->mm_account, imu->acct_pages);
io_uring/rsrc.c
39
int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
io_uring/rsrc.c
49
cur_pages = atomic_long_read(&user->locked_vm);
io_uring/rsrc.c
54
} while (!atomic_long_try_cmpxchg(&user->locked_vm,
io_uring/rsrc.c
59
void io_unaccount_mem(struct user_struct *user, struct mm_struct *mm_account,
io_uring/rsrc.c
62
if (user)
io_uring/rsrc.c
63
__io_unaccount_mem(user, nr_pages);
io_uring/rsrc.c
671
ret = io_account_mem(ctx->user, ctx->mm_account, imu->acct_pages);
io_uring/rsrc.c
69
int io_account_mem(struct user_struct *user, struct mm_struct *mm_account,
io_uring/rsrc.c
74
if (user) {
io_uring/rsrc.c
75
ret = __io_account_mem(user, nr_pages);
io_uring/rsrc.h
126
int __io_account_mem(struct user_struct *user, unsigned long nr_pages);
io_uring/rsrc.h
127
int io_account_mem(struct user_struct *user, struct mm_struct *mm_account,
io_uring/rsrc.h
129
void io_unaccount_mem(struct user_struct *user, struct mm_struct *mm_account,
io_uring/rsrc.h
132
static inline void __io_unaccount_mem(struct user_struct *user,
io_uring/rsrc.h
135
atomic_long_sub(nr_pages, &user->locked_vm);
io_uring/zcrx.c
217
ret = io_account_mem(ifq->user, ifq->mm_account, mem->account_pages);
io_uring/zcrx.c
408
io_free_region(ifq->user, &ifq->region);
io_uring/zcrx.c
420
io_unaccount_mem(ifq->user, ifq->mm_account,
io_uring/zcrx.c
568
free_uid(ifq->user);
io_uring/zcrx.c
801
if (ctx->user) {
io_uring/zcrx.c
802
get_uid(ctx->user);
io_uring/zcrx.c
803
ifq->user = ctx->user;
io_uring/zcrx.h
44
struct user_struct *user;
kernel/bpf/stackmap.c
166
u32 trace_nr, bool user, bool may_fault)
kernel/bpf/stackmap.c
178
if (!user || !current || !current->mm || irq_work_busy ||
kernel/bpf/stackmap.c
256
bool user = flags & BPF_F_USER_STACK;
kernel/bpf/stackmap.c
289
stack_map_get_build_id_offset(id_offs, trace_nr, user, false /* !may_fault */);
kernel/bpf/stackmap.c
327
bool user = flags & BPF_F_USER_STACK;
kernel/bpf/stackmap.c
329
bool kernel = !user;
kernel/bpf/stackmap.c
337
trace = get_perf_callchain(regs, kernel, user, max_depth,
kernel/bpf/stackmap.c
373
bool kernel, user;
kernel/bpf/stackmap.c
386
user = flags & BPF_F_USER_STACK;
kernel/bpf/stackmap.c
387
kernel = !user;
kernel/bpf/stackmap.c
433
bool user = flags & BPF_F_USER_STACK;
kernel/bpf/stackmap.c
435
bool kernel = !user;
kernel/bpf/stackmap.c
450
if (task && user && !user_mode(regs))
kernel/bpf/stackmap.c
456
if (crosstask && user) {
kernel/bpf/stackmap.c
472
trace = get_perf_callchain(regs, kernel, user, max_depth,
kernel/bpf/stackmap.c
501
stack_map_get_build_id_offset(buf, trace_nr, user, may_fault);
kernel/bpf/stackmap.c
603
bool kernel, user;
kernel/bpf/stackmap.c
614
user = flags & BPF_F_USER_STACK;
kernel/bpf/stackmap.c
615
kernel = !user;
kernel/bpf/syscall.c
107
res = check_zeroed_user(uaddr.user + expected_size,
kernel/bpf/syscall.c
2371
free_uid(aux->user);
kernel/bpf/syscall.c
3015
prog->aux->user = get_current_user();
kernel/bpf/syscall.c
3139
free_uid(prog->aux->user);
kernel/bpf/syscall.c
5039
prog->aux->user->uid);
kernel/bpf/syscall.c
6264
err = bpf_prog_query(&attr, uattr.user);
kernel/bpf/syscall.c
6267
err = bpf_prog_test_run(&attr, uattr.user);
kernel/bpf/syscall.c
6270
err = bpf_obj_get_next_id(&attr, uattr.user,
kernel/bpf/syscall.c
6274
err = bpf_obj_get_next_id(&attr, uattr.user,
kernel/bpf/syscall.c
6278
err = bpf_obj_get_next_id(&attr, uattr.user,
kernel/bpf/syscall.c
6288
err = bpf_obj_get_info_by_fd(&attr, uattr.user);
kernel/bpf/syscall.c
6300
err = bpf_task_fd_query(&attr, uattr.user);
kernel/bpf/syscall.c
6306
err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_LOOKUP_BATCH);
kernel/bpf/syscall.c
6309
err = bpf_map_do_batch(&attr, uattr.user,
kernel/bpf/syscall.c
6313
err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_UPDATE_BATCH);
kernel/bpf/syscall.c
6316
err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH);
kernel/bpf/syscall.c
6328
err = bpf_obj_get_next_id(&attr, uattr.user,
kernel/cgroup/rstat.c
678
u64 user = 0;
kernel/cgroup/rstat.c
683
user += cpustat[CPUTIME_USER];
kernel/cgroup/rstat.c
684
user += cpustat[CPUTIME_NICE];
kernel/cgroup/rstat.c
685
cputime->utime += user;
kernel/cgroup/rstat.c
692
cputime->sum_exec_runtime += user;
kernel/context_tracking.c
103
static void noinstr ct_kernel_exit(bool user, int offset)
kernel/context_tracking.c
120
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
kernel/context_tracking.c
142
static void noinstr ct_kernel_enter(bool user, int offset)
kernel/context_tracking.c
165
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
kernel/context_tracking.c
405
static __always_inline void ct_kernel_exit(bool user, int offset) { }
kernel/context_tracking.c
406
static __always_inline void ct_kernel_enter(bool user, int offset) { }
kernel/cred.c
197
get_uid(new->user);
kernel/cred.c
412
if (new->user != old->user || new->user_ns != old->user_ns)
kernel/cred.c
417
if (new->user != old->user || new->user_ns != old->user_ns)
kernel/cred.c
577
get_uid(new->user);
kernel/cred.c
58
free_uid(cred->user);
kernel/events/callchain.c
220
get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
kernel/events/callchain.c
228
if (crosstask && user && !kernel)
kernel/events/callchain.c
247
if (user && !crosstask) {
kernel/events/core.c
7225
struct user_struct *user = current_user();
kernel/events/core.c
7231
user_locked = atomic_long_read(&user->locked_vm);
kernel/events/core.c
7259
struct user_struct *user = current_user();
kernel/events/core.c
7261
atomic_long_add(user_extra, &user->locked_vm);
kernel/events/core.c
8507
bool user = !event->attr.exclude_callchain_user &&
kernel/events/core.c
8511
bool defer_user = IS_ENABLED(CONFIG_UNWIND_USER) && user &&
kernel/events/core.c
8518
user = false;
kernel/events/core.c
8520
if (!kernel && !user)
kernel/events/core.c
8523
if (!(user && defer_user && !crosstask &&
kernel/events/core.c
8527
callchain = get_perf_callchain(regs, kernel, user, max_stack,
kernel/fork.c
2090
if (p->real_cred->user != INIT_USER &&
kernel/printk/printk.c
745
struct devkmsg_user *user = file->private_data;
kernel/printk/printk.c
758
if (!___ratelimit(&user->rs, current->comm))
kernel/printk/printk.c
804
struct devkmsg_user *user = file->private_data;
kernel/printk/printk.c
805
char *outbuf = &user->pbufs.outbuf[0];
kernel/printk/printk.c
807
.pbufs = &user->pbufs,
kernel/printk/printk.c
811
ret = mutex_lock_interruptible(&user->lock);
kernel/printk/printk.c
815
if (!printk_get_next_message(&pmsg, atomic64_read(&user->seq), true, false)) {
kernel/printk/printk.c
832
printk_get_next_message(&pmsg, atomic64_read(&user->seq), true,
kernel/printk/printk.c
840
atomic64_set(&user->seq, pmsg.seq);
kernel/printk/printk.c
845
atomic64_set(&user->seq, pmsg.seq + 1);
kernel/printk/printk.c
858
mutex_unlock(&user->lock);
kernel/printk/printk.c
872
struct devkmsg_user *user = file->private_data;
kernel/printk/printk.c
881
atomic64_set(&user->seq, prb_first_valid_seq(prb));
kernel/printk/printk.c
889
atomic64_set(&user->seq, latched_seq_read_nolock(&clear_seq));
kernel/printk/printk.c
893
atomic64_set(&user->seq, prb_next_seq(prb));
kernel/printk/printk.c
903
struct devkmsg_user *user = file->private_data;
kernel/printk/printk.c
909
if (prb_read_valid_info(prb, atomic64_read(&user->seq), &info, NULL)) {
kernel/printk/printk.c
911
if (info.seq != atomic64_read(&user->seq))
kernel/printk/printk.c
922
struct devkmsg_user *user;
kernel/printk/printk.c
936
user = kvmalloc_obj(struct devkmsg_user);
kernel/printk/printk.c
937
if (!user)
kernel/printk/printk.c
940
ratelimit_default_init(&user->rs);
kernel/printk/printk.c
941
ratelimit_set_flags(&user->rs, RATELIMIT_MSG_ON_RELEASE);
kernel/printk/printk.c
943
mutex_init(&user->lock);
kernel/printk/printk.c
945
atomic64_set(&user->seq, prb_first_valid_seq(prb));
kernel/printk/printk.c
947
file->private_data = user;
kernel/printk/printk.c
953
struct devkmsg_user *user = file->private_data;
kernel/printk/printk.c
955
ratelimit_state_exit(&user->rs);
kernel/printk/printk.c
957
mutex_destroy(&user->lock);
kernel/printk/printk.c
958
kvfree(user);
kernel/rcu/tiny.c
71
void rcu_sched_clock_irq(int user)
kernel/rcu/tiny.c
73
if (user)
kernel/rcu/tree.c
2696
void rcu_sched_clock_irq(int user)
kernel/rcu/tree.c
2711
if (!rcu_is_cpu_rrupt_from_idle() && !user)
kernel/rcu/tree.c
2715
rcu_flavor_sched_clock_irq(user);
kernel/rcu/tree.c
2716
if (rcu_pending(user))
kernel/rcu/tree.c
2718
if (user || rcu_is_cpu_rrupt_from_idle())
kernel/rcu/tree.c
3670
static int rcu_pending(int user)
kernel/rcu/tree.c
3687
if ((user || rcu_is_cpu_rrupt_from_idle() ||
kernel/rcu/tree.c
516
static int rcu_pending(int user);
kernel/rcu/tree.h
488
static void rcu_flavor_sched_clock_irq(int user);
kernel/rcu/tree_plugin.h
1066
static void rcu_flavor_sched_clock_irq(int user)
kernel/rcu/tree_plugin.h
1068
if (user || rcu_is_cpu_rrupt_from_idle() ||
kernel/rcu/tree_plugin.h
810
static void rcu_flavor_sched_clock_irq(int user)
kernel/sched/sched.h
4063
extern int __sched_setscheduler(struct task_struct *p, const struct sched_attr *attr, bool user, bool pi);
kernel/sched/syscalls.c
495
bool user, bool pi)
kernel/sched/syscalls.c
535
if (user) {
kernel/sched/syscalls.c
608
if (user) {
kernel/sys.c
262
struct user_struct *user;
kernel/sys.c
301
user = cred->user;
kernel/sys.c
305
user = find_user(uid);
kernel/sys.c
306
if (!user)
kernel/sys.c
314
free_uid(user); /* For find_user() */
kernel/sys.c
332
struct user_struct *user;
kernel/sys.c
369
user = cred->user;
kernel/sys.c
373
user = find_user(uid);
kernel/sys.c
374
if (!user)
kernel/sys.c
385
free_uid(user); /* for find_user() */
kernel/sys.c
531
free_uid(new->user);
kernel/sys.c
532
new->user = new_user;
kernel/sys.c
549
new->user != INIT_USER)
kernel/time/timekeeping.h
25
extern void update_process_times(int user);
kernel/trace/trace_events_user.c
1086
static void user_event_destroy_validators(struct user_event *user)
kernel/trace/trace_events_user.c
1089
struct list_head *head = &user->validators;
kernel/trace/trace_events_user.c
1097
static void user_event_destroy_fields(struct user_event *user)
kernel/trace/trace_events_user.c
1100
struct list_head *head = &user->fields;
kernel/trace/trace_events_user.c
1108
static int user_event_add_field(struct user_event *user, const char *type,
kernel/trace/trace_events_user.c
1146
list_add_tail(&validator->user_event_link, &user->validators);
kernel/trace/trace_events_user.c
1159
list_add(&field->link, &user->fields);
kernel/trace/trace_events_user.c
1165
user->min_size = (offset + size) - sizeof(struct trace_entry);
kernel/trace/trace_events_user.c
1174
static int user_event_parse_field(char *field, struct user_event *user,
kernel/trace/trace_events_user.c
1261
return user_event_add_field(user, type, name, saved_offset, size,
kernel/trace/trace_events_user.c
1265
static int user_event_parse_fields(struct user_event *user, char *args)
kernel/trace/trace_events_user.c
1275
ret = user_event_parse_field(field, user, &offset);
kernel/trace/trace_events_user.c
1386
static int user_event_set_print_fmt(struct user_event *user, char *buf, int len)
kernel/trace/trace_events_user.c
1389
struct list_head *head = &user->fields;
kernel/trace/trace_events_user.c
1420
static int user_event_create_print_fmt(struct user_event *user)
kernel/trace/trace_events_user.c
1425
len = user_event_set_print_fmt(user, NULL, 0);
kernel/trace/trace_events_user.c
1432
user_event_set_print_fmt(user, print_fmt, len);
kernel/trace/trace_events_user.c
1434
user->call.print_fmt = print_fmt;
kernel/trace/trace_events_user.c
1450
static int user_event_set_call_visible(struct user_event *user, bool visible)
kernel/trace/trace_events_user.c
1469
return trace_add_event_call(&user->call);
kernel/trace/trace_events_user.c
1471
return trace_remove_event_call(&user->call);
kernel/trace/trace_events_user.c
1475
static int destroy_user_event(struct user_event *user)
kernel/trace/trace_events_user.c
1482
user_event_destroy_fields(user);
kernel/trace/trace_events_user.c
1484
ret = user_event_set_call_visible(user, false);
kernel/trace/trace_events_user.c
1489
dyn_event_remove(&user->devent);
kernel/trace/trace_events_user.c
1490
hash_del(&user->node);
kernel/trace/trace_events_user.c
1492
user_event_destroy_validators(user);
kernel/trace/trace_events_user.c
1495
if (EVENT_NAME(user) != EVENT_TP_NAME(user))
kernel/trace/trace_events_user.c
1496
kfree(EVENT_TP_NAME(user));
kernel/trace/trace_events_user.c
1498
kfree(user->call.print_fmt);
kernel/trace/trace_events_user.c
1499
kfree(EVENT_NAME(user));
kernel/trace/trace_events_user.c
1500
kfree(user);
kernel/trace/trace_events_user.c
1514
struct user_event *user;
kernel/trace/trace_events_user.c
1519
hash_for_each_possible(group->register_table, user, node, key) {
kernel/trace/trace_events_user.c
1525
if (EVENT_MULTI_FORMAT(flags) != EVENT_MULTI_FORMAT(user->reg_flags))
kernel/trace/trace_events_user.c
1528
if (strcmp(EVENT_NAME(user), name))
kernel/trace/trace_events_user.c
1531
if (user_fields_match(user, argc, argv))
kernel/trace/trace_events_user.c
1532
return user_event_get(user);
kernel/trace/trace_events_user.c
1544
static int user_event_validate(struct user_event *user, void *data, int len)
kernel/trace/trace_events_user.c
1546
struct list_head *head = &user->validators;
kernel/trace/trace_events_user.c
1580
static void user_event_ftrace(struct user_event *user, struct iov_iter *i,
kernel/trace/trace_events_user.c
1604
if (!list_empty(&user->validators) &&
kernel/trace/trace_events_user.c
1605
unlikely(user_event_validate(user, entry, size)))
kernel/trace/trace_events_user.c
1621
static void user_event_perf(struct user_event *user, struct iov_iter *i,
kernel/trace/trace_events_user.c
1626
perf_head = this_cpu_ptr(user->call.perf_events);
kernel/trace/trace_events_user.c
1645
if (!list_empty(&user->validators) &&
kernel/trace/trace_events_user.c
1646
unlikely(user_event_validate(user, perf_entry, size)))
kernel/trace/trace_events_user.c
1650
user->call.event.type, 1, regs,
kernel/trace/trace_events_user.c
1664
static void update_enable_bit_for(struct user_event *user)
kernel/trace/trace_events_user.c
1666
struct tracepoint *tp = &user->tracepoint;
kernel/trace/trace_events_user.c
1695
user->status = status;
kernel/trace/trace_events_user.c
1697
user_event_enabler_update(user);
kernel/trace/trace_events_user.c
1707
struct user_event *user = (struct user_event *)call->data;
kernel/trace/trace_events_user.c
1710
if (!user)
kernel/trace/trace_events_user.c
1753
user_event_get(user);
kernel/trace/trace_events_user.c
1754
update_enable_bit_for(user);
kernel/trace/trace_events_user.c
1757
update_enable_bit_for(user);
kernel/trace/trace_events_user.c
1758
user_event_put(user, true);
kernel/trace/trace_events_user.c
1765
struct user_event *user;
kernel/trace/trace_events_user.c
1790
ret = user_event_parse_cmd(group, name, &user, USER_EVENT_REG_PERSIST);
kernel/trace/trace_events_user.c
1793
user_event_put(user, false);
kernel/trace/trace_events_user.c
1805
struct user_event *user = container_of(ev, struct user_event, devent);
kernel/trace/trace_events_user.c
1810
seq_printf(m, "%s%s", USER_EVENTS_PREFIX, EVENT_NAME(user));
kernel/trace/trace_events_user.c
1812
head = trace_get_fields(&user->call);
kernel/trace/trace_events_user.c
1835
struct user_event *user = container_of(ev, struct user_event, devent);
kernel/trace/trace_events_user.c
1837
return !user_event_last_ref(user);
kernel/trace/trace_events_user.c
1842
struct user_event *user = container_of(ev, struct user_event, devent);
kernel/trace/trace_events_user.c
1844
if (!user_event_last_ref(user))
kernel/trace/trace_events_user.c
1847
if (!user_event_capable(user->reg_flags))
kernel/trace/trace_events_user.c
1850
return destroy_user_event(user);
kernel/trace/trace_events_user.c
1890
static bool user_fields_match(struct user_event *user, int argc,
kernel/trace/trace_events_user.c
1894
struct list_head *head = &user->fields;
kernel/trace/trace_events_user.c
1914
struct user_event *user = container_of(ev, struct user_event, devent);
kernel/trace/trace_events_user.c
1917
match = strcmp(EVENT_NAME(user), event) == 0;
kernel/trace/trace_events_user.c
1920
match = strcmp(system, user->group->system_name) == 0 ||
kernel/trace/trace_events_user.c
1921
strcmp(system, user->group->system_multi_name) == 0;
kernel/trace/trace_events_user.c
1925
match = user_fields_match(user, argc, argv);
kernel/trace/trace_events_user.c
1938
static int user_event_trace_register(struct user_event *user)
kernel/trace/trace_events_user.c
1942
ret = register_trace_event(&user->call.event);
kernel/trace/trace_events_user.c
1947
ret = user_event_set_call_visible(user, true);
kernel/trace/trace_events_user.c
1950
unregister_trace_event(&user->call.event);
kernel/trace/trace_events_user.c
1955
static int user_event_set_tp_name(struct user_event *user)
kernel/trace/trace_events_user.c
1957
lockdep_assert_held(&user->group->reg_mutex);
kernel/trace/trace_events_user.c
1959
if (EVENT_MULTI_FORMAT(user->reg_flags)) {
kernel/trace/trace_events_user.c
1963
user->reg_name, user->group->multi_id);
kernel/trace/trace_events_user.c
1968
user->call.name = multi_name;
kernel/trace/trace_events_user.c
1969
user->tracepoint.name = multi_name;
kernel/trace/trace_events_user.c
1972
user->group->multi_id++;
kernel/trace/trace_events_user.c
1975
user->call.name = user->reg_name;
kernel/trace/trace_events_user.c
1976
user->tracepoint.name = user->reg_name;
kernel/trace/trace_events_user.c
201
typedef void (*user_event_func_t) (struct user_event *user, struct iov_iter *i,
kernel/trace/trace_events_user.c
2065
struct user_event *user;
kernel/trace/trace_events_user.c
2087
user = find_user_event(group, name, argc, (const char **)argv,
kernel/trace/trace_events_user.c
209
static struct user_event_mm *user_event_mm_get_all(struct user_event *user);
kernel/trace/trace_events_user.c
2094
if (IS_ERR(user))
kernel/trace/trace_events_user.c
2095
return PTR_ERR(user);
kernel/trace/trace_events_user.c
2097
if (user) {
kernel/trace/trace_events_user.c
2098
*newuser = user;
kernel/trace/trace_events_user.c
2108
user = kzalloc_obj(*user, GFP_KERNEL_ACCOUNT);
kernel/trace/trace_events_user.c
211
static int destroy_user_event(struct user_event *user);
kernel/trace/trace_events_user.c
2110
if (!user)
kernel/trace/trace_events_user.c
2113
INIT_LIST_HEAD(&user->class.fields);
kernel/trace/trace_events_user.c
2114
INIT_LIST_HEAD(&user->fields);
kernel/trace/trace_events_user.c
2115
INIT_LIST_HEAD(&user->validators);
kernel/trace/trace_events_user.c
2117
user->group = group;
kernel/trace/trace_events_user.c
2118
user->reg_name = name;
kernel/trace/trace_events_user.c
2119
user->reg_flags = reg_flags;
kernel/trace/trace_events_user.c
212
static bool user_fields_match(struct user_event *user, int argc,
kernel/trace/trace_events_user.c
2121
ret = user_event_set_tp_name(user);
kernel/trace/trace_events_user.c
2126
ret = user_event_parse_fields(user, args);
kernel/trace/trace_events_user.c
2131
ret = user_event_create_print_fmt(user);
kernel/trace/trace_events_user.c
2136
user->call.data = user;
kernel/trace/trace_events_user.c
2137
user->call.class = &user->class;
kernel/trace/trace_events_user.c
2138
user->call.flags = TRACE_EVENT_FL_TRACEPOINT;
kernel/trace/trace_events_user.c
2139
user->call.tp = &user->tracepoint;
kernel/trace/trace_events_user.c
2140
user->call.event.funcs = &user_event_funcs;
kernel/trace/trace_events_user.c
2142
if (EVENT_MULTI_FORMAT(user->reg_flags))
kernel/trace/trace_events_user.c
2143
user->class.system = group->system_multi_name;
kernel/trace/trace_events_user.c
2145
user->class.system = group->system_name;
kernel/trace/trace_events_user.c
2147
user->class.fields_array = user_event_fields_array;
kernel/trace/trace_events_user.c
2148
user->class.get_fields = user_event_get_fields;
kernel/trace/trace_events_user.c
2149
user->class.reg = user_event_reg;
kernel/trace/trace_events_user.c
2150
user->class.probe = user_event_ftrace;
kernel/trace/trace_events_user.c
2152
user->class.perf_probe = user_event_perf;
kernel/trace/trace_events_user.c
2162
ret = user_event_trace_register(user);
kernel/trace/trace_events_user.c
2167
if (user->reg_flags & USER_EVENT_REG_PERSIST) {
kernel/trace/trace_events_user.c
2169
refcount_set(&user->refcnt, 2);
kernel/trace/trace_events_user.c
2172
refcount_set(&user->refcnt, 1);
kernel/trace/trace_events_user.c
2175
dyn_event_init(&user->devent, &user_event_dops);
kernel/trace/trace_events_user.c
2176
dyn_event_add(&user->devent, &user->call);
kernel/trace/trace_events_user.c
2177
hash_add(group->register_table, &user->node, key);
kernel/trace/trace_events_user.c
2182
*newuser = user;
kernel/trace/trace_events_user.c
2187
user_event_destroy_fields(user);
kernel/trace/trace_events_user.c
2188
user_event_destroy_validators(user);
kernel/trace/trace_events_user.c
2189
kfree(user->call.print_fmt);
kernel/trace/trace_events_user.c
2192
if (EVENT_NAME(user) != EVENT_TP_NAME(user))
kernel/trace/trace_events_user.c
2193
kfree(EVENT_TP_NAME(user));
kernel/trace/trace_events_user.c
2195
kfree(user);
kernel/trace/trace_events_user.c
2204
struct user_event *user;
kernel/trace/trace_events_user.c
2210
hash_for_each_possible_safe(group->register_table, user, tmp, node, key) {
kernel/trace/trace_events_user.c
2211
if (strcmp(EVENT_NAME(user), name))
kernel/trace/trace_events_user.c
2214
if (!user_event_last_ref(user))
kernel/trace/trace_events_user.c
2217
if (!user_event_capable(user->reg_flags))
kernel/trace/trace_events_user.c
2220
ret = destroy_user_event(user);
kernel/trace/trace_events_user.c
2236
struct user_event *user = NULL;
kernel/trace/trace_events_user.c
2257
user = refs->events[idx];
kernel/trace/trace_events_user.c
2261
if (unlikely(user == NULL))
kernel/trace/trace_events_user.c
2264
if (unlikely(i->count < user->min_size))
kernel/trace/trace_events_user.c
2267
tp = &user->tracepoint;
kernel/trace/trace_events_user.c
2294
probe_func(user, ©, tpdata, &faulted);
kernel/trace/trace_events_user.c
231
static struct user_event *user_event_get(struct user_event *user)
kernel/trace/trace_events_user.c
233
refcount_inc(&user->refcnt);
kernel/trace/trace_events_user.c
235
return user;
kernel/trace/trace_events_user.c
2350
struct user_event *user)
kernel/trace/trace_events_user.c
2363
if (refs->events[i] == user)
kernel/trace/trace_events_user.c
2379
new_refs->events[i] = user_event_get(user);
kernel/trace/trace_events_user.c
240
struct user_event *user = container_of(
kernel/trace/trace_events_user.c
245
if (!refcount_dec_and_test(&user->refcnt))
kernel/trace/trace_events_user.c
2454
struct user_event *user;
kernel/trace/trace_events_user.c
248
if (destroy_user_event(user)) {
kernel/trace/trace_events_user.c
2483
ret = user_event_parse_cmd(info->group, name, &user, reg.flags);
kernel/trace/trace_events_user.c
2490
ret = user_events_ref_add(info, user);
kernel/trace/trace_events_user.c
2493
user_event_put(user, false);
kernel/trace/trace_events_user.c
2511
enabler = user_event_enabler_create(®, user, &write_result);
kernel/trace/trace_events_user.c
256
refcount_set(&user->refcnt, 1);
kernel/trace/trace_events_user.c
262
static void user_event_put(struct user_event *user, bool locked)
kernel/trace/trace_events_user.c
266
if (unlikely(!user))
kernel/trace/trace_events_user.c
2772
struct user_event *user;
kernel/trace/trace_events_user.c
2781
hash_for_each(group->register_table, i, user, node) {
kernel/trace/trace_events_user.c
2782
status = user->status;
kernel/trace/trace_events_user.c
2784
seq_printf(m, "%s", EVENT_TP_NAME(user));
kernel/trace/trace_events_user.c
282
delete = refcount_dec_and_mutex_lock(&user->refcnt, &event_mutex);
kernel/trace/trace_events_user.c
285
delete = refcount_dec_and_test(&user->refcnt);
kernel/trace/trace_events_user.c
298
if (user->reg_flags & USER_EVENT_REG_PERSIST) {
kernel/trace/trace_events_user.c
309
INIT_WORK(&user->put_work, delayed_destroy_user_event);
kernel/trace/trace_events_user.c
322
refcount_set(&user->refcnt, 1);
kernel/trace/trace_events_user.c
324
if (WARN_ON_ONCE(!schedule_work(&user->put_work))) {
kernel/trace/trace_events_user.c
456
struct user_event *user = enabler->event;
kernel/trace/trace_events_user.c
459
mm->mm, (unsigned long long)uaddr, EVENT_NAME(user));
kernel/trace/trace_events_user.c
592
static void user_event_enabler_update(struct user_event *user)
kernel/trace/trace_events_user.c
612
mm = user_event_mm_get_all(user);
kernel/trace/trace_events_user.c
619
if (enabler->event == user) {
kernel/trace/trace_events_user.c
664
static struct user_event_mm *user_event_mm_get_all(struct user_event *user)
kernel/trace/trace_events_user.c
692
if (enabler->event == user) {
kernel/trace/trace_events_user.c
882
*user_event_enabler_create(struct user_reg *reg, struct user_event *user,
kernel/trace/trace_events_user.c
900
enabler->event = user;
kernel/trace/trace_events_user.c
928
user_event_get(user);
kernel/trace/trace_events_user.c
949
bool user_event_last_ref(struct user_event *user)
kernel/trace/trace_events_user.c
953
if (user->reg_flags & USER_EVENT_REG_PERSIST)
kernel/trace/trace_events_user.c
956
return refcount_read(&user->refcnt) == last;
kernel/trace/trace_events_user.c
975
struct user_event *user = (struct user_event *)call->data;
kernel/trace/trace_events_user.c
977
return &user->fields;
kernel/uid16.c
23
SYSCALL_DEFINE3(chown16, const char __user *, filename, old_uid_t, user, old_gid_t, group)
kernel/uid16.c
25
return ksys_chown(filename, low2highuid(user), low2highgid(group));
kernel/uid16.c
28
SYSCALL_DEFINE3(lchown16, const char __user *, filename, old_uid_t, user, old_gid_t, group)
kernel/uid16.c
30
return ksys_lchown(filename, low2highuid(user), low2highgid(group));
kernel/uid16.c
33
SYSCALL_DEFINE3(fchown16, unsigned int, fd, old_uid_t, user, old_gid_t, group)
kernel/uid16.c
35
return ksys_fchown(fd, low2highuid(user), low2highgid(group));
kernel/user.c
129
struct user_struct *user;
kernel/user.c
131
hlist_for_each_entry(user, hashent, uidhash_node) {
kernel/user.c
132
if (uid_eq(user->uid, uid)) {
kernel/user.c
133
refcount_inc(&user->__count);
kernel/user.c
134
return user;
kernel/watch_queue.c
256
user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_pages);
kernel/watch_queue.c
308
(void) account_pipe_buffers(pipe->user, nr_pages, pipe->nr_accounted);
kernel/watch_queue.c
427
atomic_dec(&watch->cred->user->nr_watches);
kernel/watch_queue.c
474
if (atomic_inc_return(&cred->user->nr_watches) > task_rlimit(current, RLIMIT_NOFILE)) {
kernel/watch_queue.c
475
atomic_dec(&cred->user->nr_watches);
lib/tests/stackinit_kunit.c
451
DEFINE_TEST(user, struct test_user, STRUCT, none, USER_PASS);
net/ax25/af_ax25.c
1102
ax25_uid_assoc *user;
net/ax25/af_ax25.c
1119
user = ax25_findbyuid(current_euid());
net/ax25/af_ax25.c
1120
if (user) {
net/ax25/af_ax25.c
1121
call = user->call;
net/ax25/af_ax25.c
1122
ax25_uid_put(user);
net/ax25/ax25_uid.c
72
ax25_uid_assoc *user;
net/ax25/ax25_uid.c
97
user = ax25_findbyuid(sax25_kuid);
net/ax25/ax25_uid.c
98
if (user) {
net/ax25/ax25_uid.c
99
ax25_uid_put(user);
net/bluetooth/hidp/core.c
1099
struct l2cap_user *user)
net/bluetooth/hidp/core.c
1101
struct hidp_session *session = container_of(user,
net/bluetooth/hidp/core.c
1103
user);
net/bluetooth/hidp/core.c
1160
struct l2cap_user *user)
net/bluetooth/hidp/core.c
1162
struct hidp_session *session = container_of(user,
net/bluetooth/hidp/core.c
1164
user);
net/bluetooth/hidp/core.c
1315
l2cap_unregister_user(session->conn, &session->user);
net/bluetooth/hidp/core.c
1390
ret = l2cap_register_user(conn, &session->user);
net/bluetooth/hidp/core.c
1421
l2cap_unregister_user(session->conn, &session->user);
net/bluetooth/hidp/core.c
66
struct l2cap_user *user);
net/bluetooth/hidp/core.c
68
struct l2cap_user *user);
net/bluetooth/hidp/core.c
936
session->user.probe = hidp_session_probe;
net/bluetooth/hidp/core.c
937
session->user.remove = hidp_session_remove;
net/bluetooth/hidp/core.c
938
INIT_LIST_HEAD(&session->user.list);
net/bluetooth/hidp/hidp.h
151
struct l2cap_user user;
net/bluetooth/l2cap_core.c
1702
int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
net/bluetooth/l2cap_core.c
1714
if (!list_empty(&user->list)) {
net/bluetooth/l2cap_core.c
1725
ret = user->probe(conn, user);
net/bluetooth/l2cap_core.c
1729
list_add(&user->list, &conn->users);
net/bluetooth/l2cap_core.c
1738
void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
net/bluetooth/l2cap_core.c
1742
if (list_empty(&user->list))
net/bluetooth/l2cap_core.c
1745
list_del_init(&user->list);
net/bluetooth/l2cap_core.c
1746
user->remove(conn, user);
net/bluetooth/l2cap_core.c
1755
struct l2cap_user *user;
net/bluetooth/l2cap_core.c
1758
user = list_first_entry(&conn->users, struct l2cap_user, list);
net/bluetooth/l2cap_core.c
1759
list_del_init(&user->list);
net/bluetooth/l2cap_core.c
1760
user->remove(conn, user);
net/bpf/test_run.c
1656
const struct nf_hook_state *user,
net/bpf/test_run.c
1659
if (user->in || user->out)
net/bpf/test_run.c
1662
if (user->net || user->sk || user->okfn)
net/bpf/test_run.c
1665
switch (user->pf) {
net/bpf/test_run.c
1692
state->pf = user->pf;
net/bpf/test_run.c
1693
state->hook = user->hook;
net/bridge/netfilter/ebt_limit.c
59
user2credits(u_int32_t user)
net/bridge/netfilter/ebt_limit.c
62
if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
net/bridge/netfilter/ebt_limit.c
64
return (user / EBT_LIMIT_SCALE) * HZ * CREDITS_PER_JIFFY;
net/bridge/netfilter/ebt_limit.c
66
return (user * HZ * CREDITS_PER_JIFFY) / EBT_LIMIT_SCALE;
net/bridge/netfilter/ebt_mark.c
64
const struct compat_ebt_mark_t_info *user = src;
net/bridge/netfilter/ebt_mark.c
67
kern->mark = user->mark;
net/bridge/netfilter/ebt_mark.c
68
kern->target = user->target;
net/bridge/netfilter/ebt_mark.c
73
struct compat_ebt_mark_t_info __user *user = dst;
net/bridge/netfilter/ebt_mark.c
76
if (put_user(kern->mark, &user->mark) ||
net/bridge/netfilter/ebt_mark.c
77
put_user(kern->target, &user->target))
net/bridge/netfilter/ebt_mark_m.c
48
const struct compat_ebt_mark_m_info *user = src;
net/bridge/netfilter/ebt_mark_m.c
51
kern->mark = user->mark;
net/bridge/netfilter/ebt_mark_m.c
52
kern->mask = user->mask;
net/bridge/netfilter/ebt_mark_m.c
53
kern->invert = user->invert;
net/bridge/netfilter/ebt_mark_m.c
54
kern->bitmask = user->bitmask;
net/bridge/netfilter/ebt_mark_m.c
59
struct compat_ebt_mark_m_info __user *user = dst;
net/bridge/netfilter/ebt_mark_m.c
62
if (put_user(kern->mark, &user->mark) ||
net/bridge/netfilter/ebt_mark_m.c
63
put_user(kern->mask, &user->mask) ||
net/bridge/netfilter/ebt_mark_m.c
64
put_user(kern->invert, &user->invert) ||
net/bridge/netfilter/ebt_mark_m.c
65
put_user(kern->bitmask, &user->bitmask))
net/bridge/netfilter/ebtables.c
1516
void __user *user, unsigned int num_counters,
net/bridge/netfilter/ebtables.c
1537
if (copy_to_user(user, counterstmp,
net/bridge/netfilter/ebtables.c
1545
static int copy_everything_to_user(struct ebt_table *t, void __user *user,
net/bridge/netfilter/ebtables.c
1566
if (copy_from_user(&tmp, user, sizeof(tmp)))
net/bridge/netfilter/ebtables.c
1852
void __user *user, int *len, int cmd)
net/bridge/netfilter/ebtables.c
1874
if (copy_from_user(&tmp, user, sizeof(tmp)))
net/bridge/netfilter/ebtables.c
2373
void __user *user, int *len)
net/bridge/netfilter/ebtables.c
2384
if (copy_from_user(&tmp, user, sizeof(tmp)))
net/bridge/netfilter/ebtables.c
2402
if (copy_to_user(user, &tmp, *len) != 0) {
net/bridge/netfilter/ebtables.c
2413
if (copy_to_user(user, &tmp, *len) != 0) {
net/bridge/netfilter/ebtables.c
2429
if (copy_everything_to_user(t, user, len, cmd) == 0)
net/bridge/netfilter/ebtables.c
2432
ret = compat_copy_everything_to_user(t, user, len, cmd);
net/bridge/netfilter/ebtables.c
2445
static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
net/bridge/netfilter/ebtables.c
2460
return compat_do_ebt_get_ctl(sk, cmd, user, len);
net/bridge/netfilter/ebtables.c
2463
if (copy_from_user(&tmp, user, sizeof(tmp)))
net/bridge/netfilter/ebtables.c
2490
if (copy_to_user(user, &tmp, *len) != 0) {
net/bridge/netfilter/ebtables.c
2499
ret = copy_everything_to_user(t, user, len, cmd);
net/core/page_pool.c
1226
__func__, pool->user.id, inflight, sec);
net/core/page_pool_user.c
131
if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_ID, pool->user.id) ||
net/core/page_pool_user.c
227
if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_ID, pool->user.id))
net/core/page_pool_user.c
246
if (pool->user.detach_time &&
net/core/page_pool_user.c
248
ktime_divns(pool->user.detach_time, NSEC_PER_SEC)))
net/core/page_pool_user.c
271
if (hlist_unhashed(&pool->user.list))
net/core/page_pool_user.c
317
err = xa_alloc_cyclic(&page_pools, &pool->user.id, pool, xa_limit_32b,
net/core/page_pool_user.c
322
INIT_HLIST_NODE(&pool->user.list);
net/core/page_pool_user.c
324
hlist_add_head(&pool->user.list,
net/core/page_pool_user.c
340
pool->user.detach_time = ktime_get_boottime();
net/core/page_pool_user.c
349
xa_erase(&page_pools, pool->user.id);
net/core/page_pool_user.c
350
if (!hlist_unhashed(&pool->user.list))
net/core/page_pool_user.c
351
hlist_del(&pool->user.list);
net/core/page_pool_user.c
366
hlist_for_each_entry_safe(pool, n, &dev->page_pools, user.list) {
net/core/page_pool_user.c
385
hlist_for_each_entry_safe(pool, n, &netdev->page_pools, user.list) {
net/core/page_pool_user.c
386
hlist_del_init(&pool->user.list);
net/core/page_pool_user.c
401
hlist_for_each_entry(pool, &netdev->page_pools, user.list) {
net/core/page_pool_user.c
408
hlist_splice_init(&netdev->page_pools, &last->user.list,
net/core/page_pool_user.c
48
if (!pool || hlist_unhashed(&pool->user.list) ||
net/core/page_pool_user.c
94
hlist_for_each_entry(pool, &netdev->page_pools, user.list) {
net/core/page_pool_user.c
95
if (state->pp_id && state->pp_id < pool->user.id)
net/core/page_pool_user.c
98
state->pp_id = pool->user.id;
net/core/scm.c
129
if (!fpl->user)
net/core/scm.c
130
fpl->user = get_uid(current_user());
net/core/scm.c
144
free_uid(fpl->user);
net/core/scm.c
417
new_fpl->user = get_uid(fpl->user);
net/core/scm.c
93
fpl->user = NULL;
net/core/skbuff.c
1689
struct user_struct *user;
net/core/skbuff.c
1700
user = mmp->user ? : current_user();
net/core/skbuff.c
1702
old_pg = atomic_long_read(&user->locked_vm);
net/core/skbuff.c
1707
} while (!atomic_long_try_cmpxchg(&user->locked_vm, &old_pg, new_pg));
net/core/skbuff.c
1709
if (!mmp->user) {
net/core/skbuff.c
1710
mmp->user = get_uid(user);
net/core/skbuff.c
1722
if (mmp->user) {
net/core/skbuff.c
1723
atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm);
net/core/skbuff.c
1724
free_uid(mmp->user);
net/core/skbuff.c
1743
uarg->mmp.user = NULL;
net/dsa/dsa.c
1002
if (dp->user->flags & IFF_UP)
net/dsa/dsa.c
1630
user_dev = dp->user;
net/dsa/dsa.c
1653
return dp->type == DSA_PORT_TYPE_USER && dp->user;
net/dsa/dsa.c
1666
ret = dsa_user_suspend(dp->user);
net/dsa/dsa.c
1694
ret = dsa_user_resume(dp->user);
net/dsa/dsa.c
1783
err = hsr_get_port_type(hsr, dsa_to_port(ds, port)->user, &type);
net/dsa/dsa.c
1810
dp->user->features |= NETIF_F_HW_HSR_DUP;
net/dsa/dsa.c
1811
other_dp->user->features |= NETIF_F_HW_HSR_DUP;
net/dsa/dsa.c
1827
dp->user->features &= ~NETIF_F_HW_HSR_DUP;
net/dsa/dsa.c
1828
other_dp->user->features &= ~NETIF_F_HW_HSR_DUP;
net/dsa/dsa.c
551
if (dp->user) {
net/dsa/dsa.c
552
dsa_user_destroy(dp->user);
net/dsa/dsa.c
553
dp->user = NULL;
net/dsa/port.c
1432
struct net_device *dev = dp->user;
net/dsa/port.c
488
struct net_device *dev = dp->user;
net/dsa/port.c
740
struct net_device *upper_dev, *user = dp->user;
net/dsa/port.c
743
netdev_for_each_upper_dev_rcu(user, upper_dev, iter) {
net/dsa/port.c
826
struct net_device *user = other_dp->user;
net/dsa/port.c
831
if (!user)
net/dsa/port.c
834
err = dsa_user_manage_vlan_filtering(user,
net/dsa/port.c
842
err = dsa_user_manage_vlan_filtering(dp->user,
net/dsa/switch.c
897
struct net_device *user = dp->user;
net/dsa/switch.c
899
dsa_user_setup_tagger(user);
net/dsa/switch.c
902
dsa_user_change_mtu(user, user->mtu);
net/dsa/tag.h
196
struct net_device *user;
net/dsa/tag.h
218
user = dp->user;
net/dsa/tag.h
220
err = br_vlan_get_info_rcu(user, vid, &vinfo);
net/dsa/tag.h
224
return user;
net/dsa/tag.h
42
return dp->user;
net/dsa/tag_8021q.c
496
return dp->user;
net/dsa/user.c
1967
int dsa_user_manage_vlan_filtering(struct net_device *user,
net/dsa/user.c
1973
user->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
net/dsa/user.c
1975
err = vlan_for_each(user, dsa_user_restore_vlan, user);
net/dsa/user.c
1977
vlan_for_each(user, dsa_user_clear_vlan, user);
net/dsa/user.c
1978
user->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
net/dsa/user.c
1982
err = vlan_for_each(user, dsa_user_clear_vlan, user);
net/dsa/user.c
1986
user->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
net/dsa/user.c
2057
struct net_device *user;
net/dsa/user.c
2068
user = other_dp->user;
net/dsa/user.c
2070
if (min_mtu > user->mtu)
net/dsa/user.c
2071
min_mtu = user->mtu;
net/dsa/user.c
2077
hw_port->dev = user;
net/dsa/user.c
2078
hw_port->old_mtu = user->mtu;
net/dsa/user.c
2088
err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->user->mtu);
net/dsa/user.c
2127
if (!other_dp->user)
net/dsa/user.c
2136
user_mtu = other_dp->user->mtu;
net/dsa/user.c
2259
struct net_device *user = other_dp->user;
net/dsa/user.c
2261
if (!user || user == dev)
net/dsa/user.c
2264
err = setdel(user, app);
net/dsa/user.c
2280
struct net_device *user = other_dp->user;
net/dsa/user.c
2282
if (!user || user == dev)
net/dsa/user.c
2285
restore_err = setdel(user, app);
net/dsa/user.c
2287
netdev_err(user, "Failed to restore DSCP prio entry configuration\n");
net/dsa/user.c
2698
void dsa_user_setup_tagger(struct net_device *user)
net/dsa/user.c
2700
struct dsa_port *dp = dsa_user_to_port(user);
net/dsa/user.c
2702
struct dsa_user_priv *p = netdev_priv(user);
net/dsa/user.c
2706
user->needed_headroom = cpu_dp->tag_ops->needed_headroom;
net/dsa/user.c
2707
user->needed_tailroom = cpu_dp->tag_ops->needed_tailroom;
net/dsa/user.c
2712
user->needed_headroom += conduit->needed_headroom;
net/dsa/user.c
2713
user->needed_tailroom += conduit->needed_tailroom;
net/dsa/user.c
2717
user->features = conduit->vlan_features | NETIF_F_HW_TC;
net/dsa/user.c
2718
user->hw_features |= NETIF_F_HW_TC;
net/dsa/user.c
2719
if (user->needed_tailroom)
net/dsa/user.c
2720
user->features &= ~(NETIF_F_SG | NETIF_F_FRAGLIST);
net/dsa/user.c
2722
user->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
net/dsa/user.c
2724
user->lltx = true;
net/dsa/user.c
2817
port->user = user_dev;
net/dsa/user.c
2876
port->user = NULL;
net/dsa/user.c
3122
struct net_device *user, *br;
net/dsa/user.c
3130
user = vlan_dev_real_dev(dev);
net/dsa/user.c
3131
if (!dsa_user_dev_check(user))
net/dsa/user.c
3134
dp = dsa_user_to_port(user);
net/dsa/user.c
3359
err = dsa_user_change_conduit(dp->user, new_conduit, NULL);
net/dsa/user.c
3361
netdev_err(dp->user,
net/dsa/user.c
3386
err = dsa_user_change_conduit(dp->user, lag_dev, extack);
net/dsa/user.c
3398
err = dsa_user_change_conduit(dp->user, conduit, NULL);
net/dsa/user.c
3400
netdev_err(dp->user,
net/dsa/user.c
3603
list_add(&dp->user->close_list, &close_list);
net/dsa/user.c
656
dsa_user_vlan_check_for_8021q_uppers(struct net_device *user,
net/dsa/user.c
662
netdev_for_each_upper_dev_rcu(user, upper_dev, iter) {
net/dsa/user.h
49
void dsa_user_setup_tagger(struct net_device *user);
net/ipv4/ip_fragment.c
111
static bool frag_expire_skip_icmp(u32 user)
net/ipv4/ip_fragment.c
113
return user == IP_DEFRAG_AF_PACKET ||
net/ipv4/ip_fragment.c
114
ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_IN,
net/ipv4/ip_fragment.c
116
ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_BRIDGE_IN,
net/ipv4/ip_fragment.c
180
if (frag_expire_skip_icmp(qp->q.key.v4.user) &&
net/ipv4/ip_fragment.c
200
u32 user, int vif)
net/ipv4/ip_fragment.c
205
.user = user,
net/ipv4/ip_fragment.c
396
return qp->q.key.v4.user == IP_DEFRAG_LOCAL_DELIVER;
net/ipv4/ip_fragment.c
473
int ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
net/ipv4/ip_fragment.c
485
qp = ip_find(net, ip_hdr(skb), user, vif);
net/ipv4/ip_fragment.c
506
struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
net/ipv4/ip_fragment.c
539
if (ip_defrag(net, skb, user))
net/ipv4/ip_sockglue.c
1656
msg.msg_control_user = optval.user;
net/ipv4/netfilter/arp_tables.c
1106
target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
net/ipv4/netfilter/arp_tables.c
1107
t->u.user.revision);
net/ipv4/netfilter/arp_tables.c
1443
static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
net/ipv4/netfilter/arp_tables.c
1452
ret = get_info(sock_net(sk), user, len);
net/ipv4/netfilter/arp_tables.c
1458
ret = compat_get_entries(sock_net(sk), user, len);
net/ipv4/netfilter/arp_tables.c
1461
ret = get_entries(sock_net(sk), user, len);
net/ipv4/netfilter/arp_tables.c
1471
if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
net/ipv4/netfilter/arp_tables.c
334
(strcmp(t->target.u.user.name,
net/ipv4/netfilter/arp_tables.c
365
if (strcmp(t->target.u.user.name,
net/ipv4/netfilter/arp_tables.c
417
target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
net/ipv4/netfilter/arp_tables.c
418
t->u.user.revision);
net/ipv4/netfilter/arp_tables.c
445
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
net/ipv4/netfilter/arp_tables.c
556
if (strcmp(arpt_get_target(iter)->u.user.name,
net/ipv4/netfilter/arp_tables.c
791
static int get_info(struct net *net, void __user *user, const int *len)
net/ipv4/netfilter/arp_tables.c
800
if (copy_from_user(name, user, sizeof(name)) != 0)
net/ipv4/netfilter/arp_tables.c
831
if (copy_to_user(user, &info, *len) != 0)
net/ipv4/netfilter/ip_tables.c
1263
match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
net/ipv4/netfilter/ip_tables.c
1264
m->u.user.revision);
net/ipv4/netfilter/ip_tables.c
1327
target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
net/ipv4/netfilter/ip_tables.c
1328
t->u.user.revision);
net/ipv4/netfilter/ip_tables.c
1650
do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
net/ipv4/netfilter/ip_tables.c
1659
ret = get_info(sock_net(sk), user, len);
net/ipv4/netfilter/ip_tables.c
1665
ret = compat_get_entries(sock_net(sk), user, len);
net/ipv4/netfilter/ip_tables.c
1668
ret = get_entries(sock_net(sk), user, len);
net/ipv4/netfilter/ip_tables.c
1680
if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
net/ipv4/netfilter/ip_tables.c
397
(strcmp(t->target.u.user.name,
net/ipv4/netfilter/ip_tables.c
427
if (strcmp(t->target.u.user.name,
net/ipv4/netfilter/ip_tables.c
481
match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
net/ipv4/netfilter/ip_tables.c
482
m->u.user.revision);
net/ipv4/netfilter/ip_tables.c
544
target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
net/ipv4/netfilter/ip_tables.c
545
t->u.user.revision);
net/ipv4/netfilter/ip_tables.c
579
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
net/ipv4/netfilter/ip_tables.c
694
if (strcmp(ipt_get_target(iter)->u.user.name,
net/ipv4/netfilter/ip_tables.c
946
static int get_info(struct net *net, void __user *user, const int *len)
net/ipv4/netfilter/ip_tables.c
955
if (copy_from_user(name, user, sizeof(name)) != 0)
net/ipv4/netfilter/ip_tables.c
986
if (copy_to_user(user, &info, *len) != 0)
net/ipv4/netfilter/nf_defrag_ipv4.c
27
u_int32_t user)
net/ipv4/netfilter/nf_defrag_ipv4.c
32
err = ip_defrag(net, skb, user);
net/ipv4/netfilter/nf_defrag_ipv4.c
84
enum ip_defrag_users user =
net/ipv4/netfilter/nf_defrag_ipv4.c
87
if (nf_ct_ipv4_gather_frags(state->net, skb, user))
net/ipv6/ipv6_sockglue.c
1125
msg.msg_control_user = optval.user;
net/ipv6/netfilter/ip6_tables.c
1002
if (copy_to_user(user, &info, *len) != 0)
net/ipv6/netfilter/ip6_tables.c
1279
match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
net/ipv6/netfilter/ip6_tables.c
1280
m->u.user.revision);
net/ipv6/netfilter/ip6_tables.c
1343
target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
net/ipv6/netfilter/ip6_tables.c
1344
t->u.user.revision);
net/ipv6/netfilter/ip6_tables.c
1659
do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
net/ipv6/netfilter/ip6_tables.c
1668
ret = get_info(sock_net(sk), user, len);
net/ipv6/netfilter/ip6_tables.c
1674
ret = compat_get_entries(sock_net(sk), user, len);
net/ipv6/netfilter/ip6_tables.c
1677
ret = get_entries(sock_net(sk), user, len);
net/ipv6/netfilter/ip6_tables.c
1689
if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
net/ipv6/netfilter/ip6_tables.c
415
(strcmp(t->target.u.user.name,
net/ipv6/netfilter/ip6_tables.c
445
if (strcmp(t->target.u.user.name,
net/ipv6/netfilter/ip6_tables.c
498
match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
net/ipv6/netfilter/ip6_tables.c
499
m->u.user.revision);
net/ipv6/netfilter/ip6_tables.c
563
target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
net/ipv6/netfilter/ip6_tables.c
564
t->u.user.revision);
net/ipv6/netfilter/ip6_tables.c
597
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
net/ipv6/netfilter/ip6_tables.c
711
if (strcmp(ip6t_get_target(iter)->u.user.name,
net/ipv6/netfilter/ip6_tables.c
962
static int get_info(struct net *net, void __user *user, const int *len)
net/ipv6/netfilter/ip6_tables.c
971
if (copy_from_user(name, user, sizeof(name)) != 0)
net/ipv6/netfilter/nf_conntrack_reasm.c
145
static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user,
net/ipv6/netfilter/nf_conntrack_reasm.c
153
.user = user,
net/ipv6/netfilter/nf_conntrack_reasm.c
445
int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
net/ipv6/netfilter/nf_conntrack_reasm.c
481
fq = fq_find(net, fhdr->identification, user, hdr,
net/ipv6/reassembly.c
91
.user = IP6_DEFRAG_LOCAL_DELIVER,
net/netfilter/ipset/ip_set_core.c
2242
ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
net/netfilter/ipset/ip_set_core.c
2260
if (copy_from_user(data, user, *len) != 0) {
net/netfilter/ipset/ip_set_core.c
2291
if (copy_to_user(user, req_version,
net/netfilter/ipset/ip_set_core.c
2352
if (copy_to_user(user, data, copylen))
net/netfilter/ipvs/ip_vs_core.c
706
struct sk_buff *skb, u_int32_t user)
net/netfilter/ipvs/ip_vs_core.c
711
err = ip_defrag(ipvs->net, skb, user);
net/netfilter/ipvs/ip_vs_ctl.c
3014
do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
net/netfilter/ipvs/ip_vs_ctl.c
3036
if (copy_from_user(arg, user, copylen) != 0)
net/netfilter/ipvs/ip_vs_ctl.c
3058
if (copy_to_user(user, &d, sizeof(d)) != 0)
net/netfilter/ipvs/ip_vs_ctl.c
3072
if (copy_to_user(user, buf, strlen(buf)+1) != 0) {
net/netfilter/ipvs/ip_vs_ctl.c
3086
if (copy_to_user(user, &info, sizeof(info)) != 0)
net/netfilter/ipvs/ip_vs_ctl.c
3103
ret = __ip_vs_get_service_entries(ipvs, get, user);
net/netfilter/ipvs/ip_vs_ctl.c
3125
if (copy_to_user(user, entry, sizeof(*entry)) != 0)
net/netfilter/ipvs/ip_vs_ctl.c
3144
ret = __ip_vs_get_dest_entries(ipvs, get, user);
net/netfilter/ipvs/ip_vs_ctl.c
3153
if (copy_to_user(user, &t, sizeof(t)) != 0)
net/netfilter/nf_conntrack_ovs.c
150
enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
net/netfilter/nf_conntrack_ovs.c
154
err = ip_defrag(net, skb, user);
net/netfilter/nf_conntrack_ovs.c
162
enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
net/netfilter/nf_conntrack_ovs.c
165
err = nf_ct_frag6_gather(net, skb, user);
net/netfilter/nf_conntrack_proto.c
265
getorigdst(struct sock *sk, int optval, void __user *user, int *len)
net/netfilter/nf_conntrack_proto.c
303
if (copy_to_user(user, &sin, sizeof(sin)) != 0)
net/netfilter/nf_conntrack_proto.c
321
ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len)
net/netfilter/nf_conntrack_proto.c
364
return copy_to_user(user, &sin6, sizeof(sin6)) ? -EFAULT : 0;
net/netfilter/x_tables.c
1160
u_int16_t tsize = ct->u.user.target_size;
net/netfilter/x_tables.c
1161
char name[sizeof(t->u.user.name)];
net/netfilter/x_tables.c
1172
t->u.user.target_size = tsize;
net/netfilter/x_tables.c
1175
strscpy_pad(t->u.user.name, name, sizeof(t->u.user.name));
net/netfilter/x_tables.c
1188
u_int16_t tsize = t->u.user.target_size - off;
net/netfilter/x_tables.c
309
U->u.user.name, K->u.kernel.TYPE->name, \
net/netfilter/x_tables.c
310
&U->u.user.revision, K->u.kernel.TYPE->revision)
net/netfilter/x_tables.c
768
u_int16_t msize = cm->u.user.match_size;
net/netfilter/x_tables.c
769
char name[sizeof(m->u.user.name)];
net/netfilter/x_tables.c
779
m->u.user.match_size = msize;
net/netfilter/x_tables.c
782
strscpy_pad(m->u.user.name, name, sizeof(m->u.user.name));
net/netfilter/x_tables.c
801
u_int16_t msize = m->u.user.match_size - off;
net/netfilter/x_tables.c
852
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0) {
net/netfilter/x_tables.c
860
} else if (strcmp(t->u.user.name, XT_ERROR_TARGET) == 0) {
net/netfilter/x_tables.c
946
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0) {
net/netfilter/x_tables.c
954
} else if (strcmp(t->u.user.name, XT_ERROR_TARGET) == 0) {
net/netfilter/xt_hashlimit.c
491
static u64 user2credits(u64 user, int revision)
net/netfilter/xt_hashlimit.c
500
return div64_u64(user, div64_u64(scale, HZ * cpj));
net/netfilter/xt_hashlimit.c
502
return user * div64_u64(HZ * cpj, scale);
net/netfilter/xt_hashlimit.c
505
static u32 user2credits_byte(u32 user)
net/netfilter/xt_hashlimit.c
507
u64 us = user;
net/netfilter/xt_hashlimit.c
512
static u64 user2rate(u64 user)
net/netfilter/xt_hashlimit.c
514
if (user != 0) {
net/netfilter/xt_hashlimit.c
515
return div64_u64(XT_HASHLIMIT_SCALE_v2, user);
net/netfilter/xt_hashlimit.c
518
user);
net/netfilter/xt_hashlimit.c
523
static u64 user2rate_bytes(u32 user)
net/netfilter/xt_hashlimit.c
527
r = user ? U32_MAX / user : U32_MAX;
net/netfilter/xt_limit.c
100
return (user / XT_LIMIT_SCALE) * HZ * CREDITS_PER_JIFFY;
net/netfilter/xt_limit.c
102
return (user * HZ * CREDITS_PER_JIFFY) / XT_LIMIT_SCALE;
net/netfilter/xt_limit.c
95
static u32 user2credits(u32 user)
net/netfilter/xt_limit.c
98
if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
net/netrom/af_netrom.c
570
ax25_uid_assoc *user;
net/netrom/af_netrom.c
609
user = ax25_findbyuid(current_euid());
net/netrom/af_netrom.c
610
if (user) {
net/netrom/af_netrom.c
611
nr->user_addr = user->call;
net/netrom/af_netrom.c
612
ax25_uid_put(user);
net/netrom/af_netrom.c
642
ax25_uid_assoc *user;
net/netrom/af_netrom.c
688
user = ax25_findbyuid(current_euid());
net/netrom/af_netrom.c
689
if (user) {
net/netrom/af_netrom.c
690
nr->user_addr = user->call;
net/netrom/af_netrom.c
691
ax25_uid_put(user);
net/netrom/af_netrom.c
876
ax25_address *src, *dest, *user;
net/netrom/af_netrom.c
966
user = (ax25_address *)(skb->data + 21);
net/netrom/af_netrom.c
989
nr_make->user_addr = *user;
net/rose/af_rose.c
703
ax25_uid_assoc *user;
net/rose/af_rose.c
731
user = ax25_findbyuid(current_euid());
net/rose/af_rose.c
732
if (user) {
net/rose/af_rose.c
733
rose->source_call = user->call;
net/rose/af_rose.c
734
ax25_uid_put(user);
net/rose/af_rose.c
775
ax25_uid_assoc *user;
net/rose/af_rose.c
848
user = ax25_findbyuid(current_euid());
net/rose/af_rose.c
849
if (!user) {
net/rose/af_rose.c
857
rose->source_call = user->call;
net/rose/af_rose.c
861
ax25_uid_put(user);
net/socket.c
2383
err = ops->getsockopt(sock, level, optname, optval.user,
net/socket.c
2384
optlen.user);
net/tipc/crypto.c
1000
return (ehdr->user != LINK_CONFIG) ? EHDR_SIZE : EHDR_CFG_SIZE;
net/tipc/crypto.c
1045
u32 user = msg_user(hdr);
net/tipc/crypto.c
1050
ehsz = (user != LINK_CONFIG) ? EHDR_SIZE : EHDR_CFG_SIZE;
net/tipc/crypto.c
1072
ehdr->user = 0;
net/tipc/crypto.c
1082
switch (user) {
net/tipc/crypto.c
1084
ehdr->user = LINK_CONFIG;
net/tipc/crypto.c
1088
if (user == LINK_PROTOCOL && msg_type(hdr) == STATE_MSG) {
net/tipc/crypto.c
1089
ehdr->user = LINK_PROTOCOL;
net/tipc/crypto.c
1699
u32 user = msg_user(hdr);
net/tipc/crypto.c
1720
if (user == LINK_CONFIG || user == LINK_PROTOCOL)
net/tipc/crypto.c
1732
user, type);
net/tipc/crypto.c
1735
if (user == LINK_CONFIG ||
net/tipc/crypto.c
1736
(user == LINK_PROTOCOL && type == RESET_MSG) ||
net/tipc/crypto.c
1737
(user == MSG_CRYPTO && type == KEY_DISTR_MSG) ||
net/tipc/crypto.c
1929
WARN_ON(ehdr->user != LINK_CONFIG);
net/tipc/crypto.h
122
user:4,
net/tipc/crypto.h
132
user:4,
net/tipc/msg.c
109
tipc_msg_init(onode, msg, user, type, hdr_sz, dnode);
net/tipc/msg.c
81
void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type,
net/tipc/msg.c
86
msg_set_user(m, user);
net/tipc/msg.c
97
struct sk_buff *tipc_msg_create(uint user, uint type,
net/tipc/msg.h
1160
void tipc_msg_init(u32 own_addr, struct tipc_msg *m, u32 user, u32 type,
net/tipc/msg.h
1162
struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
net/tipc/node.c
2101
if (likely(ehdr->user != LINK_CONFIG)) {
net/tipc/socket.c
1200
int user, mtyp, hlen;
net/tipc/socket.c
1210
user = msg_user(hdr);
net/tipc/socket.c
1222
if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) {
net/unix/af_unix.c
1929
struct user_struct *user = current_user();
net/unix/af_unix.c
1931
if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE)))
net/unix/af_unix.h
33
void unix_schedule_gc(struct user_struct *user);
net/unix/garbage.c
229
WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight + fpl->count);
net/unix/garbage.c
259
WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight - fpl->count);
net/unix/garbage.c
303
unix_schedule_gc(fpl->user);
net/unix/garbage.c
640
void unix_schedule_gc(struct user_struct *user)
net/unix/garbage.c
648
if (user &&
net/unix/garbage.c
649
READ_ONCE(user->unix_inflight) < UNIX_INFLIGHT_SANE_USER)
net/unix/garbage.c
657
if (user && READ_ONCE(unix_graph_cyclic_sccs))
net/xdp/xdp_umem.c
137
umem->user = get_uid(current_user());
net/xdp/xdp_umem.c
140
old_npgs = atomic_long_read(&umem->user->locked_vm);
net/xdp/xdp_umem.c
143
free_uid(umem->user);
net/xdp/xdp_umem.c
144
umem->user = NULL;
net/xdp/xdp_umem.c
147
} while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs,
net/xdp/xdp_umem.c
221
umem->user = NULL;
net/xdp/xdp_umem.c
34
if (umem->user) {
net/xdp/xdp_umem.c
35
atomic_long_sub(umem->npgs, &umem->user->locked_vm);
net/xdp/xdp_umem.c
36
free_uid(umem->user);
security/keys/gc.c
158
atomic_dec(&key->user->nkeys);
security/keys/gc.c
160
atomic_dec(&key->user->nikeys);
security/keys/gc.c
162
key_user_put(key->user);
security/keys/internal.h
71
extern void key_user_put(struct key_user *user);
security/keys/key.c
104
user = candidate;
security/keys/key.c
109
refcount_inc(&user->usage);
security/keys/key.c
113
return user;
security/keys/key.c
119
void key_user_put(struct key_user *user)
security/keys/key.c
121
if (refcount_dec_and_lock(&user->usage, &key_user_lock)) {
security/keys/key.c
122
rb_erase(&user->node, &key_user_tree);
security/keys/key.c
125
kfree(user);
security/keys/key.c
229
struct key_user *user = NULL;
security/keys/key.c
251
user = key_user_lookup(uid);
security/keys/key.c
252
if (!user)
security/keys/key.c
263
spin_lock_irqsave(&user->lock, irqflags);
security/keys/key.c
265
if (user->qnkeys + 1 > maxkeys ||
security/keys/key.c
266
user->qnbytes + quotalen > maxbytes ||
security/keys/key.c
267
user->qnbytes + quotalen < user->qnbytes)
security/keys/key.c
271
user->qnkeys++;
security/keys/key.c
272
user->qnbytes += quotalen;
security/keys/key.c
273
spin_unlock_irqrestore(&user->lock, irqflags);
security/keys/key.c
291
key->user = user;
security/keys/key.c
322
atomic_inc(&user->nkeys);
security/keys/key.c
332
spin_lock_irqsave(&user->lock, irqflags);
security/keys/key.c
333
user->qnkeys--;
security/keys/key.c
334
user->qnbytes -= quotalen;
security/keys/key.c
335
spin_unlock_irqrestore(&user->lock, irqflags);
security/keys/key.c
337
key_user_put(user);
security/keys/key.c
345
spin_lock_irqsave(&user->lock, irqflags);
security/keys/key.c
346
user->qnkeys--;
security/keys/key.c
347
user->qnbytes -= quotalen;
security/keys/key.c
348
spin_unlock_irqrestore(&user->lock, irqflags);
security/keys/key.c
350
key_user_put(user);
security/keys/key.c
356
spin_unlock_irqrestore(&user->lock, irqflags);
security/keys/key.c
357
key_user_put(user);
security/keys/key.c
383
unsigned maxbytes = uid_eq(key->user->uid, GLOBAL_ROOT_UID) ?
security/keys/key.c
387
spin_lock_irqsave(&key->user->lock, flags);
security/keys/key.c
390
(key->user->qnbytes + delta > maxbytes ||
security/keys/key.c
391
key->user->qnbytes + delta < key->user->qnbytes)) {
security/keys/key.c
395
key->user->qnbytes += delta;
security/keys/key.c
398
spin_unlock_irqrestore(&key->user->lock, flags);
security/keys/key.c
450
atomic_inc(&key->user->nikeys);
security/keys/key.c
52
struct key_user *candidate = NULL, *user;
security/keys/key.c
607
atomic_inc(&key->user->nikeys);
security/keys/key.c
63
user = rb_entry(parent, struct key_user, node);
security/keys/key.c
65
if (uid_lt(uid, user->uid))
security/keys/key.c
657
spin_lock_irqsave(&key->user->lock, flags);
security/keys/key.c
658
key->user->qnkeys--;
security/keys/key.c
659
key->user->qnbytes -= key->quotalen;
security/keys/key.c
660
spin_unlock_irqrestore(&key->user->lock, flags);
security/keys/key.c
67
else if (uid_gt(uid, user->uid))
security/keys/key.c
79
user = NULL;
security/keys/keyctl.c
1001
if (user != (uid_t) -1 && !uid_eq(uid, key->uid)) {
security/keys/keyctl.c
1025
spin_lock_irqsave(&key->user->lock, flags);
security/keys/keyctl.c
1026
key->user->qnkeys--;
security/keys/keyctl.c
1027
key->user->qnbytes -= key->quotalen;
security/keys/keyctl.c
1028
spin_unlock_irqrestore(&key->user->lock, flags);
security/keys/keyctl.c
1031
atomic_dec(&key->user->nkeys);
security/keys/keyctl.c
1035
atomic_dec(&key->user->nikeys);
security/keys/keyctl.c
1039
zapowner = key->user;
security/keys/keyctl.c
1040
key->user = newowner;
security/keys/keyctl.c
949
long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group)
security/keys/keyctl.c
959
uid = make_kuid(current_user_ns(), user);
security/keys/keyctl.c
962
if ((user != (uid_t) -1) && !uid_valid(uid))
security/keys/keyctl.c
968
if (user == (uid_t) -1 && group == (gid_t) -1)
security/keys/keyctl.c
988
if (user != (uid_t) -1 && !uid_eq(key->uid, uid))
security/keys/keyring.c
1158
if (!kuid_has_mapping(ns, keyring->user->uid))
security/keys/proc.c
103
if (kuid_has_mapping(user_ns, minkey->user->uid))
security/keys/proc.c
255
struct key_user *user = rb_entry(n, struct key_user, node);
security/keys/proc.c
256
if (kuid_has_mapping(user_ns, user->uid))
security/keys/proc.c
306
struct key_user *user = rb_entry(_p, struct key_user, node);
security/keys/proc.c
307
unsigned maxkeys = uid_eq(user->uid, GLOBAL_ROOT_UID) ?
security/keys/proc.c
309
unsigned maxbytes = uid_eq(user->uid, GLOBAL_ROOT_UID) ?
security/keys/proc.c
313
from_kuid_munged(seq_user_ns(m), user->uid),
security/keys/proc.c
314
refcount_read(&user->usage),
security/keys/proc.c
315
atomic_read(&user->nkeys),
security/keys/proc.c
316
atomic_read(&user->nikeys),
security/keys/proc.c
317
user->qnkeys,
security/keys/proc.c
319
user->qnbytes,
security/keys/proc.c
71
if (kuid_has_mapping(user_ns, key->user->uid))
security/keys/process_keys.c
106
uid_keyring = keyring_alloc(buf, cred->user->uid, INVALID_GID,
security/keys/process_keys.c
128
session_keyring = keyring_alloc(buf, cred->user->uid, INVALID_GID,
security/keys/process_keys.c
206
cred->user->uid));
security/keys/process_keys.c
82
uid_t uid = from_kuid(user_ns, cred->user->uid);
security/keys/process_keys.c
936
new->user = get_uid(old->user);
security/keys/request_key.c
372
struct key_user *user,
security/keys/request_key.c
385
mutex_lock(&user->cons_lock);
security/keys/request_key.c
437
mutex_unlock(&user->cons_lock);
security/keys/request_key.c
459
mutex_unlock(&user->cons_lock);
security/keys/request_key.c
465
mutex_unlock(&user->cons_lock);
security/keys/request_key.c
475
mutex_unlock(&user->cons_lock);
security/keys/request_key.c
481
mutex_unlock(&user->cons_lock);
security/keys/request_key.c
496
struct key_user *user;
security/keys/request_key.c
509
user = key_user_lookup(current_fsuid());
security/keys/request_key.c
510
if (!user) {
security/keys/request_key.c
515
ret = construct_alloc_key(ctx, dest_keyring, flags, user, &key);
security/keys/request_key.c
516
key_user_put(user);
security/selinux/selinuxfs.c
1076
char *con = NULL, *user = NULL, *ptr;
security/selinux/selinuxfs.c
1100
user = kzalloc(size + 1, GFP_KERNEL);
security/selinux/selinuxfs.c
1101
if (!user)
security/selinux/selinuxfs.c
1105
if (sscanf(buf, "%s %s", con, user) != 2)
security/selinux/selinuxfs.c
1112
length = security_get_user_sids(sid, user, &sids, &nsids);
security/selinux/selinuxfs.c
1136
kfree(user);
security/selinux/ss/context.c
29
hash = jhash_3words(c->user, c->role, c->type, hash);
security/selinux/ss/context.h
160
dst->user = src->user;
security/selinux/ss/context.h
184
c->user = c->role = c->type = 0;
security/selinux/ss/context.h
198
return ((c1->user == c2->user) && (c1->role == c2->role) &&
security/selinux/ss/context.h
29
u32 user;
security/selinux/ss/mls.c
205
if (!c->user || c->user > p->p_users.nprim)
security/selinux/ss/mls.c
207
usrdatum = p->user_val_to_struct[c->user - 1];
security/selinux/ss/mls.c
396
struct user_datum *user, struct context *usercon)
security/selinux/ss/mls.c
401
struct mls_level *user_low = &(user->range.level[0]);
security/selinux/ss/mls.c
402
struct mls_level *user_clr = &(user->range.level[1]);
security/selinux/ss/mls.c
403
struct mls_level *user_def = &(user->dfltlevel);
security/selinux/ss/mls.h
50
struct user_datum *user, struct context *usercon);
security/selinux/ss/policydb.c
1074
c->user = le32_to_cpu(buf[0]);
security/selinux/ss/policydb.c
1685
struct user_datum *upper, *user;
security/selinux/ss/policydb.c
1689
upper = user = datum;
security/selinux/ss/policydb.c
1702
ebitmap_for_each_positive_bit(&user->roles, node, bit)
security/selinux/ss/policydb.c
1709
sym_name(p, SYM_USERS, user->value - 1),
security/selinux/ss/policydb.c
2948
buf[0] = cpu_to_le32(c->user);
security/selinux/ss/policydb.c
967
if (!c->user || c->user > p->p_users.nprim)
security/selinux/ss/policydb.c
985
usrdatum = p->user_val_to_struct[c->user - 1];
security/selinux/ss/services.c
1282
*scontext_len += strlen(sym_name(p, SYM_USERS, context->user - 1)) + 1;
security/selinux/ss/services.c
1300
sym_name(p, SYM_USERS, context->user - 1),
security/selinux/ss/services.c
1507
ctx->user = usrdatum->value;
security/selinux/ss/services.c
1831
newcontext.user = tcontext->user;
security/selinux/ss/services.c
1835
newcontext.user = scontext->user;
security/selinux/ss/services.c
1840
newcontext.user = tcontext->user;
security/selinux/ss/services.c
2104
sym_name(args->oldp, SYM_USERS, oldc->user - 1));
security/selinux/ss/services.c
2107
newc->user = usrdatum->value;
security/selinux/ss/services.c
2776
struct user_datum *user;
security/selinux/ss/services.c
2806
user = symtab_search(&policydb->p_users, username);
security/selinux/ss/services.c
2807
if (!user)
security/selinux/ss/services.c
2810
usercon.user = user->value;
security/selinux/ss/services.c
2812
ebitmap_for_each_positive_bit(&user->roles, rnode, i) {
security/selinux/ss/services.c
2818
if (mls_setup_user_range(policydb, fromcon, user,
security/selinux/ss/services.c
300
val1 = scontext->user;
security/selinux/ss/services.c
301
val2 = tcontext->user;
security/selinux/ss/services.c
3275
newcon.user = context1->user;
security/selinux/ss/services.c
3649
tmprule->au_ctxt.user = userdatum->value;
security/selinux/ss/services.c
3757
match = (ctxt->user == rule->au_ctxt.user);
security/selinux/ss/services.c
3760
match = (ctxt->user != rule->au_ctxt.user);
security/selinux/ss/services.c
3910
ctx_new.user = ctx->user;
security/selinux/ss/services.c
408
val1 = c->user;
security/selinux/ss/services.c
757
u16 orig_tclass, bool user)
security/selinux/ss/services.c
780
if (!user)
security/selinux/ss/services.c
820
if (user)
sound/core/seq/seq_clientmgr.c
1094
client->data.user.fifo) {
sound/core/seq/seq_clientmgr.c
1097
if (snd_seq_fifo_poll_wait(client->data.user.fifo, file, wait))
sound/core/seq/seq_clientmgr.c
1194
info->pid = pid_vnr(cptr->data.user.owner);
sound/core/seq/seq_clientmgr.c
1805
info->input_pool = cptr->data.user.fifo_pool_size;
sound/core/seq/seq_clientmgr.c
1807
info->input_free = snd_seq_fifo_unused_cells(cptr->data.user.fifo);
sound/core/seq/seq_clientmgr.c
1842
if (client->type == USER_CLIENT && client->data.user.fifo != NULL &&
sound/core/seq/seq_clientmgr.c
1845
info->input_pool != client->data.user.fifo_pool_size) {
sound/core/seq/seq_clientmgr.c
1847
rc = snd_seq_fifo_resize(client->data.user.fifo, info->input_pool);
sound/core/seq/seq_clientmgr.c
1850
client->data.user.fifo_pool_size = info->input_pool;
sound/core/seq/seq_clientmgr.c
1875
if (client->type == USER_CLIENT && client->data.user.fifo)
sound/core/seq/seq_clientmgr.c
1876
snd_seq_fifo_clear(client->data.user.fifo);
sound/core/seq/seq_clientmgr.c
2622
if (client->type == USER_CLIENT && client->data.user.fifo &&
sound/core/seq/seq_clientmgr.c
2623
client->data.user.fifo->pool) {
sound/core/seq/seq_clientmgr.c
2625
snd_seq_info_pool(buffer, client->data.user.fifo->pool, " ");
sound/core/seq/seq_clientmgr.c
298
struct snd_seq_user_client *user;
sound/core/seq/seq_clientmgr.c
316
user = &client->data.user;
sound/core/seq/seq_clientmgr.c
317
user->fifo = NULL;
sound/core/seq/seq_clientmgr.c
318
user->fifo_pool_size = 0;
sound/core/seq/seq_clientmgr.c
321
user->fifo_pool_size = SNDRV_SEQ_DEFAULT_CLIENT_EVENTS;
sound/core/seq/seq_clientmgr.c
322
user->fifo = snd_seq_fifo_new(user->fifo_pool_size);
sound/core/seq/seq_clientmgr.c
323
if (user->fifo == NULL) {
sound/core/seq/seq_clientmgr.c
338
user->file = file;
sound/core/seq/seq_clientmgr.c
340
client->data.user.owner = get_pid(task_pid(current));
sound/core/seq/seq_clientmgr.c
355
if (client->data.user.fifo)
sound/core/seq/seq_clientmgr.c
356
snd_seq_fifo_delete(&client->data.user.fifo);
sound/core/seq/seq_clientmgr.c
360
put_pid(client->data.user.owner);
sound/core/seq/seq_clientmgr.c
406
fifo = client->data.user.fifo;
sound/core/seq/seq_clientmgr.c
593
if (!dest->data.user.fifo)
sound/core/seq/seq_clientmgr.c
595
return snd_seq_fifo_event_in(dest->data.user.fifo, event);
sound/core/seq/seq_clientmgr.h
59
struct snd_seq_user_client user;
sound/core/timer_compat.c
35
struct snd_timer_gparams32 __user *user)
sound/core/timer_compat.c
39
if (copy_from_user(&gparams.tid, &user->tid, sizeof(gparams.tid)) ||
sound/core/timer_compat.c
40
get_user(gparams.period_num, &user->period_num) ||
sound/core/timer_compat.c
41
get_user(gparams.period_den, &user->period_den))
sound/pci/ctxfi/ctdaio.c
181
entry->user = entry->addr = daio->rscl.ops->index(&daio->rscl);
sound/pci/ctxfi/ctdaio.c
210
entry->user = entry->addr = daio->rscr.ops->index(&daio->rscr);
sound/pci/ctxfi/ctdaio.c
699
entry->slot = entry->addr = entry->next = entry->user = 0;
sound/pci/ctxfi/cthardware.h
128
int (*srcimp_mgr_set_imapuser)(void *blk, unsigned int user);
sound/pci/ctxfi/cthw20k1.c
532
static int srcimp_mgr_set_imapuser(void *blk, unsigned int user)
sound/pci/ctxfi/cthw20k1.c
536
set_field(&ctl->srcimap.srcaim, SRCAIM_SRC, user);
sound/pci/ctxfi/cthw20k2.c
532
static int srcimp_mgr_set_imapuser(void *blk, unsigned int user)
sound/pci/ctxfi/cthw20k2.c
536
set_field(&ctl->srcimap.srcaim, SRCAIM_SRC, user);
sound/pci/ctxfi/ctimap.c
80
entry->next = entry->addr = entry->user = entry->slot = 0;
sound/pci/ctxfi/ctimap.h
22
unsigned short user; /* the id of the user resource consuming data */
sound/pci/ctxfi/ctsrc.c
624
entry->user = src->rsc.ops->index(&src->rsc);
sound/pci/ctxfi/ctsrc.c
772
hw->srcimp_mgr_set_imapuser(mgr->ctrl_blk, entry->user);
sound/pci/ctxfi/ctsrc.c
829
entry->slot = entry->addr = entry->next = entry->user = 0;
sound/pci/ctxfi/ctsrc.h
113
int (*map)(struct srcimp *srcimp, struct src *user, struct rsc *input);
tools/perf/arch/x86/tests/bp-modify.c
155
offsetof(struct user, u_debugreg[0]), bp_1)) {
tools/perf/arch/x86/tests/bp-modify.c
161
offsetof(struct user, u_debugreg[7]), dr7)) {
tools/perf/arch/x86/tests/bp-modify.c
167
offsetof(struct user, u_debugreg[0]), (unsigned long) (-1))) {
tools/perf/arch/x86/tests/bp-modify.c
82
offsetof(struct user, u_debugreg[0]), bp_2)) {
tools/perf/arch/x86/tests/bp-modify.c
88
offsetof(struct user, u_debugreg[0]), bp_1)) {
tools/perf/arch/x86/tests/bp-modify.c
94
offsetof(struct user, u_debugreg[7]), dr7)) {
tools/perf/util/evsel.c
718
MOD_PRINT(user, 'u');
tools/perf/util/parse-events.c
1816
if (mod.user) {
tools/perf/util/parse-events.h
207
bool user : 1; /* 'u' */
tools/perf/util/probe-event.c
1067
bool user)
tools/perf/util/probe-event.c
1086
ret = get_alternative_line_range(dinfo, lr, module, user);
tools/perf/util/probe-event.c
1166
struct nsinfo *nsi, bool user)
tools/perf/util/probe-event.c
1171
ret = init_probe_symbol_maps(user);
tools/perf/util/probe-event.c
1175
ret = __show_line_range(lr, module, user);
tools/perf/util/probe-event.c
1304
bool user __maybe_unused)
tools/perf/util/probe-event.c
202
struct map *get_target_map(const char *target, struct nsinfo *nsi, bool user)
tools/perf/util/probe-event.c
205
if (user) {
tools/perf/util/probe-event.c
3780
struct strfilter *_filter, bool user)
tools/perf/util/probe-event.c
3786
ret = init_probe_symbol_maps(user);
tools/perf/util/probe-event.c
3791
map = get_target_map(target, nsi, user);
tools/perf/util/probe-event.c
469
const char *target, bool user)
tools/perf/util/probe-event.c
482
target, NULL, user);
tools/perf/util/probe-event.h
176
struct nsinfo *nsi, bool user);
tools/perf/util/probe-event.h
180
struct strfilter *filter, bool user);
tools/perf/util/probe-event.h
194
struct map *get_target_map(const char *target, struct nsinfo *nsi, bool user);
tools/testing/selftests/breakpoints/breakpoint_test.c
109
offsetof(struct user, u_debugreg[7]), dr7);
tools/testing/selftests/breakpoints/breakpoint_test.c
47
offsetof(struct user, u_debugreg[n]), addr);
tools/testing/selftests/breakpoints/breakpoint_test.c
89
offsetof(struct user, u_debugreg[7]), 0);
tools/testing/selftests/kvm/include/x86/kvm_util_arch.h
16
uint64_t user;
tools/testing/selftests/kvm/include/x86/processor.h
1470
#define PTE_USER_MASK(mmu) ((mmu)->arch.pte_masks.user)
tools/testing/selftests/kvm/lib/x86/processor.c
185
.user = BIT_ULL(2),
tools/testing/selftests/kvm/lib/x86/svm.c
79
pte_masks.always_set = pte_masks.user;
tools/testing/selftests/kvm/lib/x86/vmx.c
56
.user = 0,
tools/testing/selftests/rlimits/rlimits-per-userns.c
27
static uid_t user = 60000;
tools/testing/selftests/rlimits/rlimits-per-userns.c
63
warnx("(pid=%d): Changing to uid=%d, gid=%d", pid, user, group);
tools/testing/selftests/rlimits/rlimits-per-userns.c
67
if (setuid(user) < 0)
tools/testing/selftests/rlimits/rlimits-per-userns.c
68
err(EXIT_FAILURE, "(pid=%d): setuid(%d)", pid, user);
tools/testing/selftests/user_events/abi_test.c
216
FIXTURE(user) {
tools/testing/selftests/user_events/abi_test.c
222
FIXTURE_SETUP(user) {
tools/testing/selftests/user_events/abi_test.c
230
FIXTURE_TEARDOWN(user) {
tools/testing/selftests/user_events/abi_test.c
234
TEST_F(user, enablement) {
tools/testing/selftests/user_events/abi_test.c
255
TEST_F(user, flags) {
tools/testing/selftests/user_events/abi_test.c
275
TEST_F(user, bit_sizes) {
tools/testing/selftests/user_events/abi_test.c
301
TEST_F(user, multi_format) {
tools/testing/selftests/user_events/abi_test.c
344
TEST_F(user, forks) {
tools/testing/selftests/user_events/abi_test.c
399
TEST_F(user, clones) {
tools/testing/selftests/user_events/dyn_test.c
199
FIXTURE(user) {
tools/testing/selftests/user_events/dyn_test.c
204
FIXTURE_SETUP(user) {
tools/testing/selftests/user_events/dyn_test.c
208
FIXTURE_TEARDOWN(user) {
tools/testing/selftests/user_events/dyn_test.c
214
TEST_F(user, basic_types) {
tools/testing/selftests/user_events/dyn_test.c
238
TEST_F(user, loc_types) {
tools/testing/selftests/user_events/dyn_test.c
246
TEST_F(user, size_types) {
tools/testing/selftests/user_events/dyn_test.c
255
TEST_F(user, matching) {
tools/testing/selftests/user_events/ftrace_test.c
202
FIXTURE(user) {
tools/testing/selftests/user_events/ftrace_test.c
210
FIXTURE_SETUP(user) {
tools/testing/selftests/user_events/ftrace_test.c
222
FIXTURE_TEARDOWN(user) {
tools/testing/selftests/user_events/ftrace_test.c
237
TEST_F(user, register_events) {
tools/testing/selftests/user_events/ftrace_test.c
306
TEST_F(user, write_events) {
tools/testing/selftests/user_events/ftrace_test.c
364
TEST_F(user, write_empty_events) {
tools/testing/selftests/user_events/ftrace_test.c
397
TEST_F(user, write_fault) {
tools/testing/selftests/user_events/ftrace_test.c
434
TEST_F(user, write_validator) {
tools/testing/selftests/user_events/ftrace_test.c
505
TEST_F(user, print_fmt) {
tools/testing/selftests/user_events/perf_test.c
111
FIXTURE(user) {
tools/testing/selftests/user_events/perf_test.c
117
FIXTURE_SETUP(user) {
tools/testing/selftests/user_events/perf_test.c
124
FIXTURE_TEARDOWN(user) {
tools/testing/selftests/user_events/perf_test.c
133
TEST_F(user, perf_write) {
tools/testing/selftests/user_events/perf_test.c
198
TEST_F(user, perf_empty_events) {
tools/testing/selftests/x86/mov_ss_trap.c
79
if (ptrace(PTRACE_POKEUSER, parent, (void *)offsetof(struct user, u_debugreg[0]), dr0) != 0)
tools/testing/selftests/x86/mov_ss_trap.c
82
if (ptrace(PTRACE_POKEUSER, parent, (void *)offsetof(struct user, u_debugreg[1]), dr1) != 0)
tools/testing/selftests/x86/mov_ss_trap.c
85
if (ptrace(PTRACE_POKEUSER, parent, (void *)offsetof(struct user, u_debugreg[7]), dr7) != 0)
tools/tracing/rtla/src/common.c
261
params->user.should_run = 1;
tools/tracing/rtla/src/common.c
263
params->user.stopped_running = 0;
tools/tracing/rtla/src/common.c
265
params->user.set = ¶ms->monitored_cpus;
tools/tracing/rtla/src/common.c
267
params->user.sched_param = ¶ms->sched_param;
tools/tracing/rtla/src/common.c
269
params->user.sched_param = NULL;
tools/tracing/rtla/src/common.c
271
params->user.cgroup_name = params->cgroup_name;
tools/tracing/rtla/src/common.c
273
retval = pthread_create(&user_thread, NULL, timerlat_u_dispatcher, ¶ms->user);
tools/tracing/rtla/src/common.c
289
if (params->user_workload && !params->user.stopped_running) {
tools/tracing/rtla/src/common.c
290
params->user.should_run = 0;
tools/tracing/rtla/src/common.c
371
if (params->user.stopped_running) {
tools/tracing/rtla/src/common.c
422
if (params->user.stopped_running) {
tools/tracing/rtla/src/common.h
107
struct timerlat_u_params user;
tools/tracing/rtla/src/timerlat_hist.c
114
data->hist[cpu].user = calloc(1, sizeof(*data->hist->user) * (entries + 1));
tools/tracing/rtla/src/timerlat_hist.c
115
if (!data->hist[cpu].user)
tools/tracing/rtla/src/timerlat_hist.c
165
hist = data->hist[cpu].user;
tools/tracing/rtla/src/timerlat_hist.c
220
data->hist[j].user[i] = value_user[j];
tools/tracing/rtla/src/timerlat_hist.c
24
int *user;
tools/tracing/rtla/src/timerlat_hist.c
277
data->hist[i].user[data->entries] = value_user[i];
tools/tracing/rtla/src/timerlat_hist.c
648
total += data->hist[cpu].user[bucket];
tools/tracing/rtla/src/timerlat_hist.c
650
data->hist[cpu].user[bucket]);
tools/tracing/rtla/src/timerlat_hist.c
66
if (data->hist[cpu].user)
tools/tracing/rtla/src/timerlat_hist.c
67
free(data->hist[cpu].user);
tools/tracing/rtla/src/timerlat_hist.c
683
data->hist[cpu].user[data->entries]);
tools/tracing/rtla/src/timerlat_top.c
853
if (params->common.user.stopped_running) {