#ifndef _ASM_POWERPC_GUEST_STATE_BUFFER_H
#define _ASM_POWERPC_GUEST_STATE_BUFFER_H
#include "asm/hvcall.h"
#include <linux/gfp.h>
#include <linux/bitmap.h>
#include <asm/plpar_wrappers.h>
#define KVMPPC_GSID_BLANK 0x0000
#define KVMPPC_GSID_HOST_STATE_SIZE 0x0001
#define KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE 0x0002
#define KVMPPC_GSID_LOGICAL_PVR 0x0003
#define KVMPPC_GSID_TB_OFFSET 0x0004
#define KVMPPC_GSID_PARTITION_TABLE 0x0005
#define KVMPPC_GSID_PROCESS_TABLE 0x0006
#define KVMPPC_GSID_L0_GUEST_HEAP 0x0800
#define KVMPPC_GSID_L0_GUEST_HEAP_MAX 0x0801
#define KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE 0x0802
#define KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX 0x0803
#define KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM 0x0804
#define KVMPPC_GSID_RUN_INPUT 0x0C00
#define KVMPPC_GSID_RUN_OUTPUT 0x0C01
#define KVMPPC_GSID_VPA 0x0C02
#define KVMPPC_GSID_GPR(x) (0x1000 + (x))
#define KVMPPC_GSID_HDEC_EXPIRY_TB 0x1020
#define KVMPPC_GSID_NIA 0x1021
#define KVMPPC_GSID_MSR 0x1022
#define KVMPPC_GSID_LR 0x1023
#define KVMPPC_GSID_XER 0x1024
#define KVMPPC_GSID_CTR 0x1025
#define KVMPPC_GSID_CFAR 0x1026
#define KVMPPC_GSID_SRR0 0x1027
#define KVMPPC_GSID_SRR1 0x1028
#define KVMPPC_GSID_DAR 0x1029
#define KVMPPC_GSID_DEC_EXPIRY_TB 0x102A
#define KVMPPC_GSID_VTB 0x102B
#define KVMPPC_GSID_LPCR 0x102C
#define KVMPPC_GSID_HFSCR 0x102D
#define KVMPPC_GSID_FSCR 0x102E
#define KVMPPC_GSID_FPSCR 0x102F
#define KVMPPC_GSID_DAWR0 0x1030
#define KVMPPC_GSID_DAWR1 0x1031
#define KVMPPC_GSID_CIABR 0x1032
#define KVMPPC_GSID_PURR 0x1033
#define KVMPPC_GSID_SPURR 0x1034
#define KVMPPC_GSID_IC 0x1035
#define KVMPPC_GSID_SPRG0 0x1036
#define KVMPPC_GSID_SPRG1 0x1037
#define KVMPPC_GSID_SPRG2 0x1038
#define KVMPPC_GSID_SPRG3 0x1039
#define KVMPPC_GSID_PPR 0x103A
#define KVMPPC_GSID_MMCR(x) (0x103B + (x))
#define KVMPPC_GSID_MMCRA 0x103F
#define KVMPPC_GSID_SIER(x) (0x1040 + (x))
#define KVMPPC_GSID_BESCR 0x1043
#define KVMPPC_GSID_EBBHR 0x1044
#define KVMPPC_GSID_EBBRR 0x1045
#define KVMPPC_GSID_AMR 0x1046
#define KVMPPC_GSID_IAMR 0x1047
#define KVMPPC_GSID_AMOR 0x1048
#define KVMPPC_GSID_UAMOR 0x1049
#define KVMPPC_GSID_SDAR 0x104A
#define KVMPPC_GSID_SIAR 0x104B
#define KVMPPC_GSID_DSCR 0x104C
#define KVMPPC_GSID_TAR 0x104D
#define KVMPPC_GSID_DEXCR 0x104E
#define KVMPPC_GSID_HDEXCR 0x104F
#define KVMPPC_GSID_HASHKEYR 0x1050
#define KVMPPC_GSID_HASHPKEYR 0x1051
#define KVMPPC_GSID_CTRL 0x1052
#define KVMPPC_GSID_DPDES 0x1053
#define KVMPPC_GSID_CR 0x2000
#define KVMPPC_GSID_PIDR 0x2001
#define KVMPPC_GSID_DSISR 0x2002
#define KVMPPC_GSID_VSCR 0x2003
#define KVMPPC_GSID_VRSAVE 0x2004
#define KVMPPC_GSID_DAWRX0 0x2005
#define KVMPPC_GSID_DAWRX1 0x2006
#define KVMPPC_GSID_PMC(x) (0x2007 + (x))
#define KVMPPC_GSID_WORT 0x200D
#define KVMPPC_GSID_PSPB 0x200E
#define KVMPPC_GSID_VSRS(x) (0x3000 + (x))
#define KVMPPC_GSID_HDAR 0xF000
#define KVMPPC_GSID_HDSISR 0xF001
#define KVMPPC_GSID_HEIR 0xF002
#define KVMPPC_GSID_ASDR 0xF003
#define KVMPPC_GSE_GUESTWIDE_START KVMPPC_GSID_BLANK
#define KVMPPC_GSE_GUESTWIDE_END KVMPPC_GSID_PROCESS_TABLE
#define KVMPPC_GSE_GUESTWIDE_COUNT \
(KVMPPC_GSE_GUESTWIDE_END - KVMPPC_GSE_GUESTWIDE_START + 1)
#define KVMPPC_GSE_HOSTWIDE_START KVMPPC_GSID_L0_GUEST_HEAP
#define KVMPPC_GSE_HOSTWIDE_END KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM
#define KVMPPC_GSE_HOSTWIDE_COUNT \
(KVMPPC_GSE_HOSTWIDE_END - KVMPPC_GSE_HOSTWIDE_START + 1)
#define KVMPPC_GSE_META_START KVMPPC_GSID_RUN_INPUT
#define KVMPPC_GSE_META_END KVMPPC_GSID_VPA
#define KVMPPC_GSE_META_COUNT (KVMPPC_GSE_META_END - KVMPPC_GSE_META_START + 1)
#define KVMPPC_GSE_DW_REGS_START KVMPPC_GSID_GPR(0)
#define KVMPPC_GSE_DW_REGS_END KVMPPC_GSID_DPDES
#define KVMPPC_GSE_DW_REGS_COUNT \
(KVMPPC_GSE_DW_REGS_END - KVMPPC_GSE_DW_REGS_START + 1)
#define KVMPPC_GSE_W_REGS_START KVMPPC_GSID_CR
#define KVMPPC_GSE_W_REGS_END KVMPPC_GSID_PSPB
#define KVMPPC_GSE_W_REGS_COUNT \
(KVMPPC_GSE_W_REGS_END - KVMPPC_GSE_W_REGS_START + 1)
#define KVMPPC_GSE_VSRS_START KVMPPC_GSID_VSRS(0)
#define KVMPPC_GSE_VSRS_END KVMPPC_GSID_VSRS(63)
#define KVMPPC_GSE_VSRS_COUNT (KVMPPC_GSE_VSRS_END - KVMPPC_GSE_VSRS_START + 1)
#define KVMPPC_GSE_INTR_REGS_START KVMPPC_GSID_HDAR
#define KVMPPC_GSE_INTR_REGS_END KVMPPC_GSID_ASDR
#define KVMPPC_GSE_INTR_REGS_COUNT \
(KVMPPC_GSE_INTR_REGS_END - KVMPPC_GSE_INTR_REGS_START + 1)
#define KVMPPC_GSE_IDEN_COUNT \
(KVMPPC_GSE_HOSTWIDE_COUNT + \
KVMPPC_GSE_GUESTWIDE_COUNT + KVMPPC_GSE_META_COUNT + \
KVMPPC_GSE_DW_REGS_COUNT + KVMPPC_GSE_W_REGS_COUNT + \
KVMPPC_GSE_VSRS_COUNT + KVMPPC_GSE_INTR_REGS_COUNT)
enum {
KVMPPC_GS_CLASS_GUESTWIDE = 0x01,
KVMPPC_GS_CLASS_HOSTWIDE = 0x02,
KVMPPC_GS_CLASS_META = 0x04,
KVMPPC_GS_CLASS_DWORD_REG = 0x08,
KVMPPC_GS_CLASS_WORD_REG = 0x10,
KVMPPC_GS_CLASS_VECTOR = 0x18,
KVMPPC_GS_CLASS_INTR = 0x20,
};
enum {
KVMPPC_GSE_BE32,
KVMPPC_GSE_BE64,
KVMPPC_GSE_VEC128,
KVMPPC_GSE_PARTITION_TABLE,
KVMPPC_GSE_PROCESS_TABLE,
KVMPPC_GSE_BUFFER,
__KVMPPC_GSE_TYPE_MAX,
};
enum {
KVMPPC_GS_FLAGS_WIDE = 0x01,
KVMPPC_GS_FLAGS_HOST_WIDE = 0x02,
};
struct kvmppc_gs_part_table {
u64 address;
u64 ea_bits;
u64 gpd_size;
};
struct kvmppc_gs_proc_table {
u64 address;
u64 gpd_size;
};
struct kvmppc_gs_buff_info {
u64 address;
u64 size;
};
struct kvmppc_gs_header {
__be32 nelems;
char data[];
} __packed;
struct kvmppc_gs_elem {
__be16 iden;
__be16 len;
char data[];
} __packed;
struct kvmppc_gs_buff {
size_t capacity;
size_t len;
unsigned long guest_id;
unsigned long vcpu_id;
struct kvmppc_gs_header *hdr;
};
struct kvmppc_gs_bitmap {
DECLARE_BITMAP(bitmap, KVMPPC_GSE_IDEN_COUNT);
};
struct kvmppc_gs_parser {
struct kvmppc_gs_bitmap iterator;
struct kvmppc_gs_elem *gses[KVMPPC_GSE_IDEN_COUNT];
};
enum {
GSM_GUEST_WIDE = 0x1,
GSM_SEND = 0x2,
GSM_RECEIVE = 0x4,
GSM_GSB_OWNER = 0x8,
};
struct kvmppc_gs_msg;
struct kvmppc_gs_msg_ops {
size_t (*get_size)(struct kvmppc_gs_msg *gsm);
int (*fill_info)(struct kvmppc_gs_buff *gsb, struct kvmppc_gs_msg *gsm);
int (*refresh_info)(struct kvmppc_gs_msg *gsm,
struct kvmppc_gs_buff *gsb);
};
struct kvmppc_gs_msg {
struct kvmppc_gs_bitmap bitmap;
struct kvmppc_gs_msg_ops *ops;
unsigned long flags;
void *data;
};
u16 kvmppc_gsid_size(u16 iden);
unsigned long kvmppc_gsid_flags(u16 iden);
u64 kvmppc_gsid_mask(u16 iden);
struct kvmppc_gs_buff *kvmppc_gsb_new(size_t size, unsigned long guest_id,
unsigned long vcpu_id, gfp_t flags);
void kvmppc_gsb_free(struct kvmppc_gs_buff *gsb);
void *kvmppc_gsb_put(struct kvmppc_gs_buff *gsb, size_t size);
int kvmppc_gsb_send(struct kvmppc_gs_buff *gsb, unsigned long flags);
int kvmppc_gsb_recv(struct kvmppc_gs_buff *gsb, unsigned long flags);
static inline struct kvmppc_gs_header *
kvmppc_gsb_header(struct kvmppc_gs_buff *gsb)
{
return gsb->hdr;
}
static inline struct kvmppc_gs_elem *kvmppc_gsb_data(struct kvmppc_gs_buff *gsb)
{
return (struct kvmppc_gs_elem *)kvmppc_gsb_header(gsb)->data;
}
static inline size_t kvmppc_gsb_len(struct kvmppc_gs_buff *gsb)
{
return gsb->len;
}
static inline size_t kvmppc_gsb_capacity(struct kvmppc_gs_buff *gsb)
{
return gsb->capacity;
}
static inline u64 kvmppc_gsb_paddress(struct kvmppc_gs_buff *gsb)
{
return __pa(kvmppc_gsb_header(gsb));
}
static inline u32 kvmppc_gsb_nelems(struct kvmppc_gs_buff *gsb)
{
return be32_to_cpu(kvmppc_gsb_header(gsb)->nelems);
}
static inline void kvmppc_gsb_reset(struct kvmppc_gs_buff *gsb)
{
kvmppc_gsb_header(gsb)->nelems = cpu_to_be32(0);
gsb->len = sizeof(struct kvmppc_gs_header);
}
static inline size_t kvmppc_gsb_data_len(struct kvmppc_gs_buff *gsb)
{
return gsb->len - sizeof(struct kvmppc_gs_header);
}
static inline size_t kvmppc_gsb_data_cap(struct kvmppc_gs_buff *gsb)
{
return gsb->capacity - sizeof(struct kvmppc_gs_header);
}
#define kvmppc_gsb_for_each_elem(i, pos, gsb, rem) \
kvmppc_gse_for_each_elem(i, kvmppc_gsb_nelems(gsb), pos, \
kvmppc_gsb_data(gsb), \
kvmppc_gsb_data_cap(gsb), rem)
static inline u16 kvmppc_gse_iden(const struct kvmppc_gs_elem *gse)
{
return be16_to_cpu(gse->iden);
}
static inline u16 kvmppc_gse_len(const struct kvmppc_gs_elem *gse)
{
return be16_to_cpu(gse->len);
}
static inline u16 kvmppc_gse_total_len(const struct kvmppc_gs_elem *gse)
{
return be16_to_cpu(gse->len) + sizeof(*gse);
}
static inline u16 kvmppc_gse_total_size(u16 size)
{
return sizeof(struct kvmppc_gs_elem) + size;
}
static inline void *kvmppc_gse_data(const struct kvmppc_gs_elem *gse)
{
return (void *)gse->data;
}
static inline bool kvmppc_gse_ok(const struct kvmppc_gs_elem *gse,
int remaining)
{
return remaining >= kvmppc_gse_total_len(gse);
}
static inline struct kvmppc_gs_elem *
kvmppc_gse_next(const struct kvmppc_gs_elem *gse, int *remaining)
{
int len = sizeof(*gse) + kvmppc_gse_len(gse);
*remaining -= len;
return (struct kvmppc_gs_elem *)(gse->data + kvmppc_gse_len(gse));
}
#define kvmppc_gse_for_each_elem(i, max, pos, head, len, rem) \
for (i = 0, pos = head, rem = len; kvmppc_gse_ok(pos, rem) && i < max; \
pos = kvmppc_gse_next(pos, &(rem)), i++)
int __kvmppc_gse_put(struct kvmppc_gs_buff *gsb, u16 iden, u16 size,
const void *data);
int kvmppc_gse_parse(struct kvmppc_gs_parser *gsp, struct kvmppc_gs_buff *gsb);
static inline int kvmppc_gse_put_be32(struct kvmppc_gs_buff *gsb, u16 iden,
__be32 val)
{
__be32 tmp;
tmp = val;
return __kvmppc_gse_put(gsb, iden, sizeof(__be32), &tmp);
}
static inline int kvmppc_gse_put_u32(struct kvmppc_gs_buff *gsb, u16 iden,
u32 val)
{
__be32 tmp;
val &= kvmppc_gsid_mask(iden);
tmp = cpu_to_be32(val);
return kvmppc_gse_put_be32(gsb, iden, tmp);
}
static inline int kvmppc_gse_put_be64(struct kvmppc_gs_buff *gsb, u16 iden,
__be64 val)
{
__be64 tmp;
tmp = val;
return __kvmppc_gse_put(gsb, iden, sizeof(__be64), &tmp);
}
static inline int kvmppc_gse_put_u64(struct kvmppc_gs_buff *gsb, u16 iden,
u64 val)
{
__be64 tmp;
val &= kvmppc_gsid_mask(iden);
tmp = cpu_to_be64(val);
return kvmppc_gse_put_be64(gsb, iden, tmp);
}
static inline int __kvmppc_gse_put_reg(struct kvmppc_gs_buff *gsb, u16 iden,
u64 val)
{
val &= kvmppc_gsid_mask(iden);
if (kvmppc_gsid_size(iden) == sizeof(u64))
return kvmppc_gse_put_u64(gsb, iden, val);
if (kvmppc_gsid_size(iden) == sizeof(u32)) {
u32 tmp;
tmp = (u32)val;
if (tmp != val)
return -EINVAL;
return kvmppc_gse_put_u32(gsb, iden, tmp);
}
return -EINVAL;
}
static inline int kvmppc_gse_put_vector128(struct kvmppc_gs_buff *gsb, u16 iden,
vector128 *val)
{
__be64 tmp[2] = { 0 };
union {
__vector128 v;
u64 dw[2];
} u;
u.v = *val;
tmp[0] = cpu_to_be64(u.dw[TS_FPROFFSET]);
#ifdef CONFIG_VSX
tmp[1] = cpu_to_be64(u.dw[TS_VSRLOWOFFSET]);
#endif
return __kvmppc_gse_put(gsb, iden, sizeof(tmp), &tmp);
}
static inline int kvmppc_gse_put_part_table(struct kvmppc_gs_buff *gsb,
u16 iden,
struct kvmppc_gs_part_table val)
{
__be64 tmp[3];
tmp[0] = cpu_to_be64(val.address);
tmp[1] = cpu_to_be64(val.ea_bits);
tmp[2] = cpu_to_be64(val.gpd_size);
return __kvmppc_gse_put(gsb, KVMPPC_GSID_PARTITION_TABLE, sizeof(tmp),
&tmp);
}
static inline int kvmppc_gse_put_proc_table(struct kvmppc_gs_buff *gsb,
u16 iden,
struct kvmppc_gs_proc_table val)
{
__be64 tmp[2];
tmp[0] = cpu_to_be64(val.address);
tmp[1] = cpu_to_be64(val.gpd_size);
return __kvmppc_gse_put(gsb, KVMPPC_GSID_PROCESS_TABLE, sizeof(tmp),
&tmp);
}
static inline int kvmppc_gse_put_buff_info(struct kvmppc_gs_buff *gsb, u16 iden,
struct kvmppc_gs_buff_info val)
{
__be64 tmp[2];
tmp[0] = cpu_to_be64(val.address);
tmp[1] = cpu_to_be64(val.size);
return __kvmppc_gse_put(gsb, iden, sizeof(tmp), &tmp);
}
int __kvmppc_gse_put(struct kvmppc_gs_buff *gsb, u16 iden, u16 size,
const void *data);
static inline __be32 kvmppc_gse_get_be32(const struct kvmppc_gs_elem *gse)
{
if (WARN_ON(kvmppc_gse_len(gse) != sizeof(__be32)))
return 0;
return *(__be32 *)kvmppc_gse_data(gse);
}
static inline u32 kvmppc_gse_get_u32(const struct kvmppc_gs_elem *gse)
{
return be32_to_cpu(kvmppc_gse_get_be32(gse));
}
static inline __be64 kvmppc_gse_get_be64(const struct kvmppc_gs_elem *gse)
{
if (WARN_ON(kvmppc_gse_len(gse) != sizeof(__be64)))
return 0;
return *(__be64 *)kvmppc_gse_data(gse);
}
static inline u64 kvmppc_gse_get_u64(const struct kvmppc_gs_elem *gse)
{
return be64_to_cpu(kvmppc_gse_get_be64(gse));
}
static inline void kvmppc_gse_get_vector128(const struct kvmppc_gs_elem *gse,
vector128 *v)
{
union {
__vector128 v;
u64 dw[2];
} u = { 0 };
__be64 *src;
if (WARN_ON(kvmppc_gse_len(gse) != sizeof(__vector128)))
*v = u.v;
src = (__be64 *)kvmppc_gse_data(gse);
u.dw[TS_FPROFFSET] = be64_to_cpu(src[0]);
#ifdef CONFIG_VSX
u.dw[TS_VSRLOWOFFSET] = be64_to_cpu(src[1]);
#endif
*v = u.v;
}
bool kvmppc_gsbm_test(struct kvmppc_gs_bitmap *gsbm, u16 iden);
void kvmppc_gsbm_set(struct kvmppc_gs_bitmap *gsbm, u16 iden);
void kvmppc_gsbm_clear(struct kvmppc_gs_bitmap *gsbm, u16 iden);
u16 kvmppc_gsbm_next(struct kvmppc_gs_bitmap *gsbm, u16 prev);
static inline void kvmppc_gsbm_zero(struct kvmppc_gs_bitmap *gsbm)
{
bitmap_zero(gsbm->bitmap, KVMPPC_GSE_IDEN_COUNT);
}
static inline void kvmppc_gsbm_fill(struct kvmppc_gs_bitmap *gsbm)
{
bitmap_fill(gsbm->bitmap, KVMPPC_GSE_IDEN_COUNT);
clear_bit(0, gsbm->bitmap);
}
#define kvmppc_gsbm_for_each(gsbm, iden) \
for (iden = kvmppc_gsbm_next(gsbm, 0); iden != 0; \
iden = kvmppc_gsbm_next(gsbm, iden))
void kvmppc_gsp_insert(struct kvmppc_gs_parser *gsp, u16 iden,
struct kvmppc_gs_elem *gse);
struct kvmppc_gs_elem *kvmppc_gsp_lookup(struct kvmppc_gs_parser *gsp,
u16 iden);
#define kvmppc_gsp_for_each(gsp, iden, gse) \
for (iden = kvmppc_gsbm_next(&(gsp)->iterator, 0), \
gse = kvmppc_gsp_lookup((gsp), iden); \
iden != 0; iden = kvmppc_gsbm_next(&(gsp)->iterator, iden), \
gse = kvmppc_gsp_lookup((gsp), iden))
#define kvmppc_gsm_for_each(gsm, iden) \
for (iden = kvmppc_gsbm_next(&gsm->bitmap, 0); iden != 0; \
iden = kvmppc_gsbm_next(&gsm->bitmap, iden))
int kvmppc_gsm_init(struct kvmppc_gs_msg *mgs, struct kvmppc_gs_msg_ops *ops,
void *data, unsigned long flags);
struct kvmppc_gs_msg *kvmppc_gsm_new(struct kvmppc_gs_msg_ops *ops, void *data,
unsigned long flags, gfp_t gfp_flags);
void kvmppc_gsm_free(struct kvmppc_gs_msg *gsm);
size_t kvmppc_gsm_size(struct kvmppc_gs_msg *gsm);
int kvmppc_gsm_fill_info(struct kvmppc_gs_msg *gsm, struct kvmppc_gs_buff *gsb);
int kvmppc_gsm_refresh_info(struct kvmppc_gs_msg *gsm,
struct kvmppc_gs_buff *gsb);
static inline void kvmppc_gsm_include(struct kvmppc_gs_msg *gsm, u16 iden)
{
kvmppc_gsbm_set(&gsm->bitmap, iden);
}
static inline bool kvmppc_gsm_includes(struct kvmppc_gs_msg *gsm, u16 iden)
{
return kvmppc_gsbm_test(&gsm->bitmap, iden);
}
static inline void kvmppc_gsm_include_all(struct kvmppc_gs_msg *gsm)
{
kvmppc_gsbm_fill(&gsm->bitmap);
}
static inline void kvmppc_gsm_reset(struct kvmppc_gs_msg *gsm)
{
kvmppc_gsbm_zero(&gsm->bitmap);
}
static inline int kvmppc_gsb_receive_data(struct kvmppc_gs_buff *gsb,
struct kvmppc_gs_msg *gsm)
{
int rc;
kvmppc_gsb_reset(gsb);
rc = kvmppc_gsm_fill_info(gsm, gsb);
if (rc < 0)
return rc;
rc = kvmppc_gsb_recv(gsb, gsm->flags);
if (rc < 0)
return rc;
rc = kvmppc_gsm_refresh_info(gsm, gsb);
if (rc < 0)
return rc;
return 0;
}
static inline int kvmppc_gsb_receive_datum(struct kvmppc_gs_buff *gsb,
struct kvmppc_gs_msg *gsm, u16 iden)
{
int rc;
kvmppc_gsm_include(gsm, iden);
rc = kvmppc_gsb_receive_data(gsb, gsm);
if (rc < 0)
return rc;
kvmppc_gsm_reset(gsm);
return 0;
}
static inline int kvmppc_gsb_send_data(struct kvmppc_gs_buff *gsb,
struct kvmppc_gs_msg *gsm)
{
int rc;
kvmppc_gsb_reset(gsb);
rc = kvmppc_gsm_fill_info(gsm, gsb);
if (rc < 0)
return rc;
rc = kvmppc_gsb_send(gsb, gsm->flags);
return rc;
}
static inline int kvmppc_gsb_send_datum(struct kvmppc_gs_buff *gsb,
struct kvmppc_gs_msg *gsm, u16 iden)
{
int rc;
kvmppc_gsm_include(gsm, iden);
rc = kvmppc_gsb_send_data(gsb, gsm);
if (rc < 0)
return rc;
kvmppc_gsm_reset(gsm);
return 0;
}
#endif