smi
PMU_EVENT_ATTR_STRING(smi, attr_smi, "event=0x04" );
PMU_EVENT_GROUP(events, smi);
} smi;
events->smi.smm = is_smm(vcpu);
events->smi.pending = vcpu->arch.smi_pending;
events->smi.smm_inside_nmi =
events->smi.latched_init = kvm_lapic_latched_init(vcpu);
if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) {
kvm_smm_changed(vcpu, events->smi.smm);
vcpu->arch.smi_pending = events->smi.pending;
if (events->smi.smm) {
if (events->smi.smm_inside_nmi)
if (events->smi.smm || events->smi.pending ||
events->smi.smm_inside_nmi)
if (events->smi.latched_init)
smi->cur_msg = msg;
spin_unlock_irqrestore(&smi->msg_lock, flags);
spin_unlock_irqrestore(&smi->msg_lock, flags);
static int ipmi_powernv_recv(struct ipmi_smi_powernv *smi)
smi->interface_id);
spin_lock_irqsave(&smi->msg_lock, flags);
if (!smi->cur_msg) {
spin_unlock_irqrestore(&smi->msg_lock, flags);
msg = smi->cur_msg;
opal_msg = smi->opal_msg;
rc = opal_ipmi_recv(smi->interface_id,
spin_unlock_irqrestore(&smi->msg_lock, flags);
smi->cur_msg = NULL;
spin_unlock_irqrestore(&smi->msg_lock, flags);
send_error_reply(smi, msg, IPMI_ERR_UNSPECIFIED);
spin_unlock_irqrestore(&smi->msg_lock, flags);
spin_unlock_irqrestore(&smi->msg_lock, flags);
smi->cur_msg = NULL;
spin_unlock_irqrestore(&smi->msg_lock, flags);
ipmi_smi_msg_received(smi->intf, msg);
struct ipmi_smi_powernv *smi = send_info;
ipmi_powernv_recv(smi);
struct ipmi_smi_powernv *smi = data;
ipmi_powernv_recv(smi);
struct ipmi_smi_powernv *smi = dev_get_drvdata(&pdev->dev);
ipmi_unregister_smi(smi->intf);
free_irq(smi->irq, smi);
irq_dispose_mapping(smi->irq);
struct ipmi_smi_powernv *smi = send_info;
smi->intf = intf;
static void send_error_reply(struct ipmi_smi_powernv *smi,
ipmi_smi_msg_received(smi->intf, msg);
struct ipmi_smi_powernv *smi = send_info;
spin_lock_irqsave(&smi->msg_lock, flags);
if (smi->cur_msg) {
opal_msg = smi->opal_msg;
smi->interface_id, opal_msg, size);
rc = opal_ipmi_send(smi->interface_id, opal_msg, size);
struct smi_info *smi = send_info;
data->addr_src = smi->io.addr_source;
data->dev = smi->io.dev;
data->addr_info = smi->io.addr_info;
get_device(smi->io.dev);
#define smi_inc_stat(smi, stat) \
atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
#define smi_get_stat(smi, stat) \
((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
static int try_smi_init(struct smi_info *smi);
unsigned int (*init_data)(struct si_sm_data *smi,
int (*start_transaction)(struct si_sm_data *smi,
int (*get_result)(struct si_sm_data *smi,
enum si_sm_result (*event)(struct si_sm_data *smi, long time);
int (*detect)(struct si_sm_data *smi);
void (*cleanup)(struct si_sm_data *smi);
DEFINE_CLK_RPM(smi, QCOM_RPM_SMI_CLK);
struct mtk_smi smi;
dev_err(larb->smi.dev, "sleep ctrl is not ready(0x%x).\n", tmp);
static int mtk_smi_dts_clk_init(struct device *dev, struct mtk_smi *smi,
smi->clks[i].id = clks[i];
ret = devm_clk_bulk_get(dev, clk_nr_required, smi->clks);
smi->clks[i].id = clks[i];
smi->clks + clk_nr_required);
smi->clk_num = clk_nr_required + clk_nr_optional;
ret = mtk_smi_dts_clk_init(dev, &larb->smi, mtk_smi_larb_clks,
larb->smi.dev = dev;
ret = clk_bulk_prepare_enable(larb->smi.clk_num, larb->smi.clks);
clk_bulk_disable_unprepare(larb->smi.clk_num, larb->smi.clks);
struct sg_mapping_iter *smi = &host->smi;
if (smi->consumed >= smi->length) {
if (!sg_miter_next(smi))
smi->consumed = 0;
while (smi->consumed < smi->length && shift >= 0) {
((u8 *)smi->addr)[smi->consumed] = (dat >> shift) & 0xff;
smi->consumed++;
sg_miter_stop(smi);
sg_miter_start(&host->smi, mrq->data->sg, mrq->data->sg_len,
struct sg_mapping_iter *smi = &host->smi;
sg_miter_start(smi, mrq->data->sg, mrq->data->sg_len, SG_MITER_FROM_SG);
if (smi->consumed >= smi->length) {
if (!sg_miter_next(smi))
smi->consumed = 0;
while (smi->consumed < smi->length && shift >= 0) {
dat |= (u64)((u8 *)smi->addr)[smi->consumed] << shift;
smi->consumed++;
sg_miter_stop(smi);
struct sg_mapping_iter smi;
NPCM8XX_SFUNC(smi);
NPCM8XX_MKFUNC(smi),
NPCM8XX_PINCFG(170, smi, MFSEL1, 22, smb21, MFSEL5, 29, none, NONE, 0, none, NONE, 0, none, NONE, 0, 0),
NPCM8XX_GRP(smi), \
WPCM450_GRP(smi), \
WPCM450_SFUNC(smi);
WPCM450_MKFUNC(smi),
static int smi_spi_probe(struct platform_device *pdev, struct smi *smi,
smi->spi_devs = devm_kcalloc(dev, count, sizeof(*smi->spi_devs), GFP_KERNEL);
if (!smi->spi_devs)
smi->spi_devs[i] = spi_dev;
smi->spi_num++;
if (smi->spi_num < count) {
dev_info(dev, "Instantiated %d SPI devices.\n", smi->spi_num);
smi_devs_unregister(smi);
static int smi_i2c_probe(struct platform_device *pdev, struct smi *smi,
smi->i2c_devs = devm_kcalloc(dev, count, sizeof(*smi->i2c_devs), GFP_KERNEL);
if (!smi->i2c_devs)
smi->i2c_devs[i] = i2c_acpi_new_device(dev, i, &board_info);
if (IS_ERR(smi->i2c_devs[i])) {
ret = dev_err_probe(dev, PTR_ERR(smi->i2c_devs[i]),
smi->i2c_num++;
if (smi->i2c_num < count) {
dev_info(dev, "Instantiated %d I2C devices.\n", smi->i2c_num);
smi_devs_unregister(smi);
struct smi *smi;
smi = devm_kzalloc(dev, sizeof(*smi), GFP_KERNEL);
if (!smi)
platform_set_drvdata(pdev, smi);
return smi_i2c_probe(pdev, smi, node->instances);
return smi_spi_probe(pdev, smi, node->instances);
ret = smi_i2c_probe(pdev, smi, node->instances);
return smi_spi_probe(pdev, smi, node->instances);
struct smi *smi = platform_get_drvdata(pdev);
smi_devs_unregister(smi);
static void smi_devs_unregister(struct smi *smi)
while (smi->i2c_num--)
i2c_unregister_device(smi->i2c_devs[smi->i2c_num]);
while (smi->spi_num--)
spi_unregister_device(smi->spi_devs[smi->spi_num]);
} smi;
int smi;
if (sysfs__read_int(FREEZE_ON_SMI_PATH, &smi) < 0) {
if (!smi) {
events.smi.pending = 1;