#include "ice.h"
#include "ice_lib.h"
#include "ice_trace.h"
static const char ice_pin_names[][64] = {
"SDP0",
"SDP1",
"SDP2",
"SDP3",
"TIME_SYNC",
"1PPS"
};
static const struct ice_ptp_pin_desc ice_pin_desc_e82x[] = {
{ TIME_SYNC, { 4, -1 }, { 0, 0 }},
{ ONE_PPS, { -1, 5 }, { 0, 11 }},
};
static const struct ice_ptp_pin_desc ice_pin_desc_e825c[] = {
{ SDP0, { 0, 0 }, { 15, 14 }},
{ SDP1, { 1, 1 }, { 15, 14 }},
{ SDP2, { 2, 2 }, { 15, 14 }},
{ SDP3, { 3, 3 }, { 15, 14 }},
{ TIME_SYNC, { 4, -1 }, { 11, 0 }},
{ ONE_PPS, { -1, 5 }, { 0, 9 }},
};
static const struct ice_ptp_pin_desc ice_pin_desc_e810[] = {
{ SDP0, { 0, 0 }, { 0, 1 }},
{ SDP1, { 1, 1 }, { 0, 1 }},
{ SDP2, { 2, 2 }, { 0, 1 }},
{ SDP3, { 3, 3 }, { 0, 1 }},
{ ONE_PPS, { -1, 5 }, { 0, 1 }},
};
static const char ice_pin_names_dpll[][64] = {
"SDP20",
"SDP21",
"SDP22",
"SDP23",
};
static const struct ice_ptp_pin_desc ice_pin_desc_dpll[] = {
{ SDP0, { -1, 0 }, { 0, 1 }},
{ SDP1, { 1, -1 }, { 0, 0 }},
{ SDP2, { -1, 2 }, { 0, 1 }},
{ SDP3, { 3, -1 }, { 0, 0 }},
};
static struct ice_pf *ice_get_ctrl_pf(struct ice_pf *pf)
{
return !pf->adapter ? NULL : pf->adapter->ctrl_pf;
}
static struct ice_ptp *ice_get_ctrl_ptp(struct ice_pf *pf)
{
struct ice_pf *ctrl_pf = ice_get_ctrl_pf(pf);
return !ctrl_pf ? NULL : &ctrl_pf->ptp;
}
static int ice_ptp_find_pin_idx(struct ice_pf *pf, enum ptp_pin_function func,
unsigned int chan)
{
const struct ptp_clock_info *info = &pf->ptp.info;
int i;
for (i = 0; i < info->n_pins; i++) {
if (info->pin_config[i].func == func &&
info->pin_config[i].chan == chan)
return i;
}
return -1;
}
static void ice_ptp_cfg_tx_interrupt(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
bool enable;
u32 val;
switch (pf->ptp.tx_interrupt_mode) {
case ICE_PTP_TX_INTERRUPT_ALL:
wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f);
enable = true;
break;
case ICE_PTP_TX_INTERRUPT_NONE:
wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0);
enable = false;
break;
case ICE_PTP_TX_INTERRUPT_SELF:
default:
enable = pf->ptp.tstamp_config.tx_type == HWTSTAMP_TX_ON;
break;
}
val = rd32(hw, PFINT_OICR_ENA);
if (enable)
val |= PFINT_OICR_TSYN_TX_M;
else
val &= ~PFINT_OICR_TSYN_TX_M;
wr32(hw, PFINT_OICR_ENA, val);
}
static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
{
struct ice_vsi *vsi;
u16 i;
vsi = ice_get_main_vsi(pf);
if (!vsi || !vsi->rx_rings)
return;
ice_for_each_rxq(vsi, i) {
if (!vsi->rx_rings[i])
continue;
vsi->rx_rings[i]->ptp_rx = on;
}
}
static void ice_ptp_disable_timestamp_mode(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
u32 val;
val = rd32(hw, PFINT_OICR_ENA);
val &= ~PFINT_OICR_TSYN_TX_M;
wr32(hw, PFINT_OICR_ENA, val);
ice_set_rx_tstamp(pf, false);
}
void ice_ptp_restore_timestamp_mode(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
bool enable_rx;
ice_ptp_cfg_tx_interrupt(pf);
enable_rx = pf->ptp.tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL;
ice_set_rx_tstamp(pf, enable_rx);
wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
ice_flush(hw);
}
u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf,
struct ptp_system_timestamp *sts)
{
struct ice_hw *hw = &pf->hw;
u32 hi, lo, lo2;
u8 tmr_idx;
if (!ice_is_primary(hw))
hw = ice_get_primary_hw(pf);
tmr_idx = ice_get_ptp_src_clock_index(hw);
guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock);
ptp_read_system_prets(sts);
if (hw->mac_type == ICE_MAC_E830) {
u64 clk_time = rd64(hw, E830_GLTSYN_TIME_L(tmr_idx));
ptp_read_system_postts(sts);
return clk_time;
}
lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
ptp_read_system_postts(sts);
hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx));
if (lo2 < lo) {
ptp_read_system_prets(sts);
lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
ptp_read_system_postts(sts);
hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
}
return ((u64)hi << 32) | lo;
}
static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp)
{
u32 delta, phc_time_lo;
u64 ns;
phc_time_lo = (u32)cached_phc_time;
delta = (in_tstamp - phc_time_lo);
if (delta > (U32_MAX / 2)) {
delta = (phc_time_lo - in_tstamp);
ns = cached_phc_time - delta;
} else {
ns = cached_phc_time + delta;
}
return ns;
}
static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)
{
const u64 mask = GENMASK_ULL(31, 0);
unsigned long discard_time;
discard_time = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
if (time_is_before_jiffies(discard_time)) {
pf->ptp.tx_hwtstamp_discarded++;
return 0;
}
return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time,
(in_tstamp >> 8) & mask);
}
static bool
ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx)
{
lockdep_assert_held(&tx->lock);
return tx->init && !tx->calibrating;
}
void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
{
struct ice_e810_params *params;
struct ice_ptp_port *ptp_port;
unsigned long flags;
struct sk_buff *skb;
struct ice_pf *pf;
if (!tx->init)
return;
ptp_port = container_of(tx, struct ice_ptp_port, tx);
pf = ptp_port_to_pf(ptp_port);
params = &pf->hw.ptp.phy.e810;
if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
pf->ptp.tx_hwtstamp_timeouts++;
skb = tx->tstamps[idx].skb;
tx->tstamps[idx].skb = NULL;
clear_bit(idx, tx->in_use);
dev_kfree_skb_any(skb);
return;
}
ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
spin_lock_irqsave(¶ms->atqbal_wq.lock, flags);
params->atqbal_flags |= ATQBAL_FLAGS_INTR_IN_PROGRESS;
wr32(&pf->hw, REG_LL_PROXY_H,
REG_LL_PROXY_H_TS_INTR_ENA | FIELD_PREP(REG_LL_PROXY_H_TS_IDX, idx) |
REG_LL_PROXY_H_EXEC);
tx->last_ll_ts_idx_read = idx;
spin_unlock_irqrestore(¶ms->atqbal_wq.lock, flags);
}
void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx)
{
struct skb_shared_hwtstamps shhwtstamps = {};
u8 idx = tx->last_ll_ts_idx_read;
struct ice_e810_params *params;
struct ice_ptp_port *ptp_port;
u64 raw_tstamp, tstamp;
bool drop_ts = false;
struct sk_buff *skb;
unsigned long flags;
struct device *dev;
struct ice_pf *pf;
u32 reg_ll_high;
if (!tx->init || tx->last_ll_ts_idx_read < 0)
return;
ptp_port = container_of(tx, struct ice_ptp_port, tx);
pf = ptp_port_to_pf(ptp_port);
dev = ice_pf_to_dev(pf);
params = &pf->hw.ptp.phy.e810;
ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
spin_lock_irqsave(¶ms->atqbal_wq.lock, flags);
if (!(params->atqbal_flags & ATQBAL_FLAGS_INTR_IN_PROGRESS))
dev_dbg(dev, "%s: low latency interrupt request not in progress?\n",
__func__);
raw_tstamp = rd32(&pf->hw, REG_LL_PROXY_L);
reg_ll_high = rd32(&pf->hw, REG_LL_PROXY_H);
params->atqbal_flags &= ~ATQBAL_FLAGS_INTR_IN_PROGRESS;
wake_up_locked(¶ms->atqbal_wq);
spin_unlock_irqrestore(¶ms->atqbal_wq.lock, flags);
if (reg_ll_high & REG_LL_PROXY_H_EXEC) {
dev_err(ice_pf_to_dev(pf), "Failed to get the Tx tstamp - FW not ready");
return;
}
raw_tstamp |= ((u64)FIELD_GET(REG_LL_PROXY_H_TS_HIGH, reg_ll_high)) << 32;
if (raw_tstamp == tx->tstamps[idx].cached_tstamp)
return;
tx->tstamps[idx].cached_tstamp = raw_tstamp;
clear_bit(idx, tx->in_use);
skb = tx->tstamps[idx].skb;
tx->tstamps[idx].skb = NULL;
if (test_and_clear_bit(idx, tx->stale))
drop_ts = true;
if (!skb)
return;
if (drop_ts) {
dev_kfree_skb_any(skb);
return;
}
tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
if (tstamp) {
shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
ice_trace(tx_tstamp_complete, skb, idx);
pf->ptp.tx_hwtstamp_good++;
}
skb_tstamp_tx(skb, &shhwtstamps);
dev_kfree_skb_any(skb);
}
static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
{
struct ice_ptp_port *ptp_port;
unsigned long flags;
u32 tstamp_good = 0;
struct ice_pf *pf;
struct ice_hw *hw;
u64 tstamp_ready;
bool link_up;
int err;
u8 idx;
ptp_port = container_of(tx, struct ice_ptp_port, tx);
pf = ptp_port_to_pf(ptp_port);
hw = &pf->hw;
if (!tx->init)
return;
if (tx->has_ready_bitmap) {
err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
if (err)
return;
}
link_up = ptp_port->link_up;
for_each_set_bit(idx, tx->in_use, tx->len) {
struct skb_shared_hwtstamps shhwtstamps = {};
u8 phy_idx = idx + tx->offset;
u64 raw_tstamp = 0, tstamp;
bool drop_ts = !link_up;
struct sk_buff *skb;
if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
drop_ts = true;
pf->ptp.tx_hwtstamp_timeouts++;
}
if (tx->has_ready_bitmap &&
!(tstamp_ready & BIT_ULL(phy_idx))) {
if (drop_ts)
goto skip_ts_read;
continue;
}
ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
err = ice_read_phy_tstamp(hw, tx->block, phy_idx, &raw_tstamp);
if (err && !drop_ts)
continue;
ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
if (!drop_ts && !tx->has_ready_bitmap &&
raw_tstamp == tx->tstamps[idx].cached_tstamp)
continue;
if (!(raw_tstamp & ICE_PTP_TS_VALID))
drop_ts = true;
skip_ts_read:
spin_lock_irqsave(&tx->lock, flags);
if (!tx->has_ready_bitmap && raw_tstamp)
tx->tstamps[idx].cached_tstamp = raw_tstamp;
clear_bit(idx, tx->in_use);
skb = tx->tstamps[idx].skb;
tx->tstamps[idx].skb = NULL;
if (test_and_clear_bit(idx, tx->stale))
drop_ts = true;
spin_unlock_irqrestore(&tx->lock, flags);
if (!skb)
continue;
if (drop_ts) {
dev_kfree_skb_any(skb);
continue;
}
tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
if (tstamp) {
shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
ice_trace(tx_tstamp_complete, skb, idx);
tstamp_good++;
}
skb_tstamp_tx(skb, &shhwtstamps);
dev_kfree_skb_any(skb);
}
pf->ptp.tx_hwtstamp_good += tstamp_good;
}
static void ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
{
struct ice_ptp_port *port;
mutex_lock(&pf->adapter->ports.lock);
list_for_each_entry(port, &pf->adapter->ports.ports, list_node) {
struct ice_ptp_tx *tx = &port->tx;
if (!tx || !tx->init)
continue;
ice_ptp_process_tx_tstamp(tx);
}
mutex_unlock(&pf->adapter->ports.lock);
}
static int
ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
{
unsigned long *in_use, *stale;
struct ice_tx_tstamp *tstamps;
tstamps = kzalloc_objs(*tstamps, tx->len);
in_use = bitmap_zalloc(tx->len, GFP_KERNEL);
stale = bitmap_zalloc(tx->len, GFP_KERNEL);
if (!tstamps || !in_use || !stale) {
kfree(tstamps);
bitmap_free(in_use);
bitmap_free(stale);
return -ENOMEM;
}
tx->tstamps = tstamps;
tx->in_use = in_use;
tx->stale = stale;
tx->init = 1;
tx->last_ll_ts_idx_read = -1;
spin_lock_init(&tx->lock);
return 0;
}
static void
ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
{
struct ice_hw *hw = &pf->hw;
unsigned long flags;
u64 tstamp_ready;
int err;
u8 idx;
err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
if (err) {
dev_dbg(ice_pf_to_dev(pf), "Failed to get the Tx tstamp ready bitmap for block %u, err %d\n",
tx->block, err);
tstamp_ready = 0;
}
for_each_set_bit(idx, tx->in_use, tx->len) {
u8 phy_idx = idx + tx->offset;
struct sk_buff *skb;
if (!hw->reset_ongoing && (tstamp_ready & BIT_ULL(phy_idx)))
ice_clear_phy_tstamp(hw, tx->block, phy_idx);
spin_lock_irqsave(&tx->lock, flags);
skb = tx->tstamps[idx].skb;
tx->tstamps[idx].skb = NULL;
clear_bit(idx, tx->in_use);
clear_bit(idx, tx->stale);
spin_unlock_irqrestore(&tx->lock, flags);
pf->ptp.tx_hwtstamp_flushed++;
dev_kfree_skb_any(skb);
}
}
static void
ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx)
{
unsigned long flags;
spin_lock_irqsave(&tx->lock, flags);
bitmap_or(tx->stale, tx->stale, tx->in_use, tx->len);
spin_unlock_irqrestore(&tx->lock, flags);
}
static void
ice_ptp_flush_all_tx_tracker(struct ice_pf *pf)
{
struct ice_ptp_port *port;
list_for_each_entry(port, &pf->adapter->ports.ports, list_node)
ice_ptp_flush_tx_tracker(ptp_port_to_pf(port), &port->tx);
}
static void
ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
{
unsigned long flags;
spin_lock_irqsave(&tx->lock, flags);
tx->init = 0;
spin_unlock_irqrestore(&tx->lock, flags);
synchronize_irq(pf->oicr_irq.virq);
ice_ptp_flush_tx_tracker(pf, tx);
kfree(tx->tstamps);
tx->tstamps = NULL;
bitmap_free(tx->in_use);
tx->in_use = NULL;
bitmap_free(tx->stale);
tx->stale = NULL;
tx->len = 0;
}
static int ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx,
u8 port)
{
tx->block = ICE_GET_QUAD_NUM(port);
tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X;
tx->len = INDEX_PER_PORT_E82X;
tx->has_ready_bitmap = 1;
return ice_ptp_alloc_tx_tracker(tx);
}
static int ice_ptp_init_tx(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
{
tx->block = port;
tx->offset = 0;
tx->len = INDEX_PER_PORT;
tx->has_ready_bitmap = pf->hw.mac_type != ICE_MAC_E810;
return ice_ptp_alloc_tx_tracker(tx);
}
static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
unsigned long update_before;
u64 systime;
int i;
update_before = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
if (pf->ptp.cached_phc_time &&
time_is_before_jiffies(update_before)) {
unsigned long time_taken = jiffies - pf->ptp.cached_phc_jiffies;
dev_warn(dev, "%u msecs passed between update to cached PHC time\n",
jiffies_to_msecs(time_taken));
pf->ptp.late_cached_phc_updates++;
}
systime = ice_ptp_read_src_clk_reg(pf, NULL);
WRITE_ONCE(pf->ptp.cached_phc_time, systime);
WRITE_ONCE(pf->ptp.cached_phc_jiffies, jiffies);
if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
return -EAGAIN;
ice_for_each_vsi(pf, i) {
struct ice_vsi *vsi = pf->vsi[i];
int j;
if (!vsi)
continue;
if (vsi->type != ICE_VSI_PF)
continue;
ice_for_each_rxq(vsi, j) {
if (!vsi->rx_rings[j])
continue;
WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
}
}
clear_bit(ICE_CFG_BUSY, pf->state);
return 0;
}
static void ice_ptp_reset_cached_phctime(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
int err;
err = ice_ptp_update_cached_phctime(pf);
if (err) {
dev_warn(dev, "%s: ICE_CFG_BUSY, unable to immediately update cached PHC time\n",
__func__);
kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work,
msecs_to_jiffies(10));
}
ice_ptp_mark_tx_tracker_stale(&pf->ptp.port.tx);
}
static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts)
{
u64 ns = timespec64_to_ns(ts);
struct ice_hw *hw = &pf->hw;
return ice_ptp_init_time(hw, ns);
}
static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj)
{
struct ice_hw *hw = &pf->hw;
return ice_ptp_adj_clock(hw, adj);
}
static u64 ice_base_incval(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
u64 incval;
incval = ice_get_base_incval(hw);
dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n",
incval);
return incval;
}
static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
{
int offs = port->port_num % ICE_PORTS_PER_QUAD;
int quad = ICE_GET_QUAD_NUM(port->port_num);
struct ice_pf *pf;
struct ice_hw *hw;
u32 val, phy_sts;
int err;
pf = ptp_port_to_pf(port);
hw = &pf->hw;
if (port->tx_fifo_busy_cnt == FIFO_OK)
return 0;
if (offs == 0 || offs == 1)
err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO01_STATUS,
&val);
else
err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO23_STATUS,
&val);
if (err) {
dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, err %d\n",
port->port_num, err);
return err;
}
if (offs & 0x1)
phy_sts = FIELD_GET(Q_REG_FIFO13_M, val);
else
phy_sts = FIELD_GET(Q_REG_FIFO02_M, val);
if (phy_sts & FIFO_EMPTY) {
port->tx_fifo_busy_cnt = FIFO_OK;
return 0;
}
port->tx_fifo_busy_cnt++;
dev_dbg(ice_pf_to_dev(pf), "Try %d, port %d FIFO not empty\n",
port->tx_fifo_busy_cnt, port->port_num);
if (port->tx_fifo_busy_cnt == ICE_PTP_FIFO_NUM_CHECKS) {
dev_dbg(ice_pf_to_dev(pf),
"Port %d Tx FIFO still not empty; resetting quad %d\n",
port->port_num, quad);
ice_ptp_reset_ts_memory_quad_e82x(hw, quad);
port->tx_fifo_busy_cnt = FIFO_OK;
return 0;
}
return -EAGAIN;
}
static void ice_ptp_wait_for_offsets(struct kthread_work *work)
{
struct ice_ptp_port *port;
struct ice_pf *pf;
struct ice_hw *hw;
int tx_err;
int rx_err;
port = container_of(work, struct ice_ptp_port, ov_work.work);
pf = ptp_port_to_pf(port);
hw = &pf->hw;
if (ice_is_reset_in_progress(pf->state)) {
kthread_queue_delayed_work(pf->ptp.kworker,
&port->ov_work,
msecs_to_jiffies(100));
return;
}
tx_err = ice_ptp_check_tx_fifo(port);
if (!tx_err)
tx_err = ice_phy_cfg_tx_offset_e82x(hw, port->port_num);
rx_err = ice_phy_cfg_rx_offset_e82x(hw, port->port_num);
if (tx_err || rx_err) {
kthread_queue_delayed_work(pf->ptp.kworker,
&port->ov_work,
msecs_to_jiffies(100));
return;
}
}
static int
ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
{
struct ice_pf *pf = ptp_port_to_pf(ptp_port);
u8 port = ptp_port->port_num;
struct ice_hw *hw = &pf->hw;
int err;
mutex_lock(&ptp_port->ps_lock);
switch (hw->mac_type) {
case ICE_MAC_E810:
case ICE_MAC_E830:
err = 0;
break;
case ICE_MAC_GENERIC:
kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
err = ice_stop_phy_timer_e82x(hw, port, true);
break;
case ICE_MAC_GENERIC_3K_E825:
err = ice_stop_phy_timer_eth56g(hw, port, true);
break;
default:
err = -ENODEV;
}
if (err && err != -EBUSY)
dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n",
port, err);
mutex_unlock(&ptp_port->ps_lock);
return err;
}
static int
ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
{
struct ice_pf *pf = ptp_port_to_pf(ptp_port);
u8 port = ptp_port->port_num;
struct ice_hw *hw = &pf->hw;
unsigned long flags;
int err;
if (!ptp_port->link_up)
return ice_ptp_port_phy_stop(ptp_port);
mutex_lock(&ptp_port->ps_lock);
switch (hw->mac_type) {
case ICE_MAC_E810:
case ICE_MAC_E830:
err = 0;
break;
case ICE_MAC_GENERIC:
kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
spin_lock_irqsave(&ptp_port->tx.lock, flags);
ptp_port->tx.calibrating = true;
spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
ptp_port->tx_fifo_busy_cnt = 0;
err = ice_start_phy_timer_e82x(hw, port);
if (err)
break;
spin_lock_irqsave(&ptp_port->tx.lock, flags);
ptp_port->tx.calibrating = false;
spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work,
0);
break;
case ICE_MAC_GENERIC_3K_E825:
err = ice_start_phy_timer_eth56g(hw, port);
break;
default:
err = -ENODEV;
}
if (err)
dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n",
port, err);
mutex_unlock(&ptp_port->ps_lock);
return err;
}
void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
{
struct ice_ptp_port *ptp_port;
struct ice_hw *hw = &pf->hw;
if (pf->ptp.state != ICE_PTP_READY)
return;
ptp_port = &pf->ptp.port;
ptp_port->link_up = linkup;
if (pf->hw.reset_ongoing)
return;
if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) {
int pin, err;
if (!test_bit(ICE_FLAG_DPLL, pf->flags))
return;
mutex_lock(&pf->dplls.lock);
for (pin = 0; pin < ICE_SYNCE_CLK_NUM; pin++) {
enum ice_synce_clk clk_pin;
bool active;
u8 port_num;
port_num = ptp_port->port_num;
clk_pin = (enum ice_synce_clk)pin;
err = ice_tspll_bypass_mux_active_e825c(hw,
port_num,
&active,
clk_pin);
if (WARN_ON_ONCE(err)) {
mutex_unlock(&pf->dplls.lock);
return;
}
err = ice_tspll_cfg_synce_ethdiv_e825c(hw, clk_pin);
if (active && WARN_ON_ONCE(err)) {
mutex_unlock(&pf->dplls.lock);
return;
}
}
mutex_unlock(&pf->dplls.lock);
}
switch (hw->mac_type) {
case ICE_MAC_E810:
case ICE_MAC_E830:
return;
case ICE_MAC_GENERIC:
ice_ptp_port_phy_restart(ptp_port);
return;
case ICE_MAC_GENERIC_3K_E825:
if (linkup)
ice_ptp_port_phy_restart(ptp_port);
return;
default:
dev_warn(ice_pf_to_dev(pf), "%s: Unknown PHY type\n", __func__);
}
}
static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
ice_ptp_reset_ts_memory(hw);
switch (hw->mac_type) {
case ICE_MAC_E810:
case ICE_MAC_E830:
return 0;
case ICE_MAC_GENERIC: {
int quad;
for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports);
quad++) {
int err;
err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold);
if (err) {
dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n",
quad, err);
return err;
}
}
return 0;
}
case ICE_MAC_GENERIC_3K_E825: {
int port;
for (port = 0; port < hw->ptp.num_lports; port++) {
int err;
err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold);
if (err) {
dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n",
port, err);
return err;
}
}
return 0;
}
case ICE_MAC_UNKNOWN:
default:
return -EOPNOTSUPP;
}
}
static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf)
{
ice_ptp_port_phy_restart(&pf->ptp.port);
}
static void ice_ptp_restart_all_phy(struct ice_pf *pf)
{
struct list_head *entry;
list_for_each(entry, &pf->adapter->ports.ports) {
struct ice_ptp_port *port = list_entry(entry,
struct ice_ptp_port,
list_node);
if (port->link_up)
ice_ptp_port_phy_restart(port);
}
}
static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
{
struct ice_pf *pf = ptp_info_to_pf(info);
struct ice_hw *hw = &pf->hw;
u64 incval;
int err;
incval = adjust_by_scaled_ppm(ice_base_incval(pf), scaled_ppm);
err = ice_ptp_write_incval_locked(hw, incval);
if (err) {
dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n",
err);
return -EIO;
}
return 0;
}
void ice_ptp_extts_event(struct ice_pf *pf)
{
struct ptp_clock_event event;
struct ice_hw *hw = &pf->hw;
u8 chan, tmr_idx;
u32 hi, lo;
if (pf->ptp.state != ICE_PTP_READY)
return;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) {
int pin_desc_idx;
if (!(pf->ptp.ext_ts_irq & (1 << chan)))
continue;
lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
event.timestamp = (u64)hi << 32 | lo;
pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan);
if (pin_desc_idx >= 0) {
const struct ice_ptp_pin_desc *desc;
desc = &pf->ptp.ice_pin_desc[pin_desc_idx];
event.timestamp -= desc->delay[0];
}
event.type = PTP_CLOCK_EXTTS;
event.index = chan;
pf->ptp.ext_ts_irq &= ~(1 << chan);
ptp_clock_event(pf->ptp.clock, &event);
}
}
static int ice_ptp_cfg_extts(struct ice_pf *pf, struct ptp_extts_request *rq,
int on)
{
u32 aux_reg, gpio_reg, irq_reg;
struct ice_hw *hw = &pf->hw;
unsigned int chan, gpio_pin;
int pin_desc_idx;
u8 tmr_idx;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
chan = rq->index;
pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan);
if (pin_desc_idx < 0)
return -EIO;
gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[0];
irq_reg = rd32(hw, PFINT_OICR_ENA);
if (on) {
irq_reg |= PFINT_OICR_TSYN_EVNT_M;
aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M;
#define GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE BIT(0)
#define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE BIT(1)
if (rq->flags & PTP_FALLING_EDGE)
aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE;
if (rq->flags & PTP_RISING_EDGE)
aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE;
gpio_reg = FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M,
1 + chan + (tmr_idx * 3));
} else {
bool last_enabled = true;
aux_reg = 0;
gpio_reg = 0;
for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts; i++)
if ((pf->ptp.extts_rqs[i].flags &
PTP_ENABLE_FEATURE) &&
i != chan) {
last_enabled = false;
}
if (last_enabled)
irq_reg &= ~PFINT_OICR_TSYN_EVNT_M;
}
wr32(hw, PFINT_OICR_ENA, irq_reg);
wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg);
wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg);
return 0;
}
static void ice_ptp_disable_all_extts(struct ice_pf *pf)
{
for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts ; i++)
if (pf->ptp.extts_rqs[i].flags & PTP_ENABLE_FEATURE)
ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[i],
false);
synchronize_irq(pf->oicr_irq.virq);
}
static void ice_ptp_enable_all_extts(struct ice_pf *pf)
{
for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts ; i++)
if (pf->ptp.extts_rqs[i].flags & PTP_ENABLE_FEATURE)
ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[i],
true);
}
static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan,
unsigned int gpio_pin, u64 start, u64 period)
{
u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
u32 val = 0;
wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0);
if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) {
int err;
err = ice_tspll_cfg_pps_out_e825c(hw, !!period);
if (err)
return err;
}
period >>= 1;
#define MIN_PULSE 3
if (!!period && (period <= MIN_PULSE || period > U32_MAX)) {
dev_err(ice_hw_to_dev(hw), "CLK period ticks must be >= %d && <= 2^32",
MIN_PULSE);
return -EIO;
}
wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period));
wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start));
wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start));
if (!!period)
val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M;
wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val);
val = GLGEN_GPIO_CTL_PIN_DIR_M;
if (!!period)
val |= FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M,
8 + chan + (tmr_idx * 4));
wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
ice_flush(hw);
return 0;
}
static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
int on)
{
unsigned int gpio_pin, prop_delay_ns;
u64 clk, period, start, phase;
struct ice_hw *hw = &pf->hw;
int pin_desc_idx;
pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_PEROUT, rq->index);
if (pin_desc_idx < 0)
return -EIO;
gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[1];
prop_delay_ns = pf->ptp.ice_pin_desc[pin_desc_idx].delay[1];
period = rq->period.sec * NSEC_PER_SEC + rq->period.nsec;
if (!on || !period)
return ice_ptp_write_perout(hw, rq->index, gpio_pin, 0, 0);
if (strncmp(pf->ptp.pin_desc[pin_desc_idx].name, "1PPS", 64) == 0 &&
period != NSEC_PER_SEC && hw->mac_type == ICE_MAC_GENERIC) {
dev_err(ice_pf_to_dev(pf), "1PPS pin supports only 1 s period\n");
return -EOPNOTSUPP;
}
if (period & 0x1) {
dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n");
return -EIO;
}
start = rq->start.sec * NSEC_PER_SEC + rq->start.nsec;
if (rq->flags & PTP_PEROUT_PHASE)
phase = start;
else
div64_u64_rem(start, period, &phase);
clk = ice_ptp_read_src_clk_reg(pf, NULL) + NSEC_PER_MSEC * 500;
if (rq->flags & PTP_PEROUT_PHASE || start <= clk - prop_delay_ns)
start = div64_u64(clk + period - 1, period) * period + phase;
start -= prop_delay_ns;
return ice_ptp_write_perout(hw, rq->index, gpio_pin, start, period);
}
static void ice_ptp_disable_all_perout(struct ice_pf *pf)
{
for (unsigned int i = 0; i < pf->ptp.info.n_per_out; i++)
if (pf->ptp.perout_rqs[i].period.sec ||
pf->ptp.perout_rqs[i].period.nsec)
ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[i],
false);
}
static void ice_ptp_enable_all_perout(struct ice_pf *pf)
{
for (unsigned int i = 0; i < pf->ptp.info.n_per_out; i++)
if (pf->ptp.perout_rqs[i].period.sec ||
pf->ptp.perout_rqs[i].period.nsec)
ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[i],
true);
}
static int ice_verify_pin(struct ptp_clock_info *info, unsigned int pin,
enum ptp_pin_function func, unsigned int chan)
{
struct ice_pf *pf = ptp_info_to_pf(info);
const struct ice_ptp_pin_desc *pin_desc;
pin_desc = &pf->ptp.ice_pin_desc[pin];
switch (func) {
case PTP_PF_EXTTS:
if (pin_desc->gpio[0] < 0)
return -EOPNOTSUPP;
break;
case PTP_PF_PEROUT:
if (pin_desc->gpio[1] < 0)
return -EOPNOTSUPP;
break;
case PTP_PF_NONE:
break;
case PTP_PF_PHYSYNC:
default:
return -EOPNOTSUPP;
}
return 0;
}
static int ice_ptp_gpio_enable(struct ptp_clock_info *info,
struct ptp_clock_request *rq, int on)
{
struct ice_pf *pf = ptp_info_to_pf(info);
int err;
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
{
struct ptp_perout_request *cached =
&pf->ptp.perout_rqs[rq->perout.index];
err = ice_ptp_cfg_perout(pf, &rq->perout, on);
if (!err) {
*cached = rq->perout;
} else {
cached->period.sec = 0;
cached->period.nsec = 0;
}
return err;
}
case PTP_CLK_REQ_EXTTS:
{
struct ptp_extts_request *cached =
&pf->ptp.extts_rqs[rq->extts.index];
err = ice_ptp_cfg_extts(pf, &rq->extts, on);
if (!err)
*cached = rq->extts;
else
cached->flags &= ~PTP_ENABLE_FEATURE;
return err;
}
default:
return -EOPNOTSUPP;
}
}
static int
ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts,
struct ptp_system_timestamp *sts)
{
struct ice_pf *pf = ptp_info_to_pf(info);
u64 time_ns;
time_ns = ice_ptp_read_src_clk_reg(pf, sts);
*ts = ns_to_timespec64(time_ns);
return 0;
}
static int
ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
{
struct ice_pf *pf = ptp_info_to_pf(info);
struct timespec64 ts64 = *ts;
struct ice_hw *hw = &pf->hw;
int err;
if (hw->mac_type == ICE_MAC_GENERIC) {
err = ice_ptp_clear_phy_offset_ready_e82x(hw);
if (err)
dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n");
}
if (!ice_ptp_lock(hw)) {
err = -EBUSY;
goto exit;
}
ice_ptp_disable_all_perout(pf);
err = ice_ptp_write_init(pf, &ts64);
ice_ptp_unlock(hw);
if (!err)
ice_ptp_reset_cached_phctime(pf);
ice_ptp_enable_all_perout(pf);
if (hw->mac_type == ICE_MAC_GENERIC)
ice_ptp_restart_all_phy(pf);
exit:
if (err) {
dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err);
return err;
}
return 0;
}
static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta)
{
struct timespec64 now, then;
int ret;
then = ns_to_timespec64(delta);
ret = ice_ptp_gettimex64(info, &now, NULL);
if (ret)
return ret;
now = timespec64_add(now, then);
return ice_ptp_settime64(info, (const struct timespec64 *)&now);
}
static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
{
struct ice_pf *pf = ptp_info_to_pf(info);
struct ice_hw *hw = &pf->hw;
struct device *dev;
int err;
dev = ice_pf_to_dev(pf);
if (delta > S32_MAX || delta < S32_MIN) {
dev_dbg(dev, "delta = %lld, adjtime non-atomic\n", delta);
return ice_ptp_adjtime_nonatomic(info, delta);
}
if (!ice_ptp_lock(hw)) {
dev_err(dev, "PTP failed to acquire semaphore in adjtime\n");
return -EBUSY;
}
ice_ptp_disable_all_perout(pf);
err = ice_ptp_write_adj(pf, delta);
ice_ptp_enable_all_perout(pf);
ice_ptp_unlock(hw);
if (err) {
dev_err(dev, "PTP failed to adjust time, err %d\n", err);
return err;
}
ice_ptp_reset_cached_phctime(pf);
return 0;
}
struct ice_crosststamp_cfg {
u32 lock_reg;
u32 lock_busy;
u32 ctl_reg;
u32 ctl_active;
u32 art_time_l;
u32 art_time_h;
u32 dev_time_l[2];
u32 dev_time_h[2];
};
static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e82x = {
.lock_reg = PFHH_SEM,
.lock_busy = PFHH_SEM_BUSY_M,
.ctl_reg = GLHH_ART_CTL,
.ctl_active = GLHH_ART_CTL_ACTIVE_M,
.art_time_l = GLHH_ART_TIME_L,
.art_time_h = GLHH_ART_TIME_H,
.dev_time_l[0] = GLTSYN_HHTIME_L(0),
.dev_time_h[0] = GLTSYN_HHTIME_H(0),
.dev_time_l[1] = GLTSYN_HHTIME_L(1),
.dev_time_h[1] = GLTSYN_HHTIME_H(1),
};
#ifdef CONFIG_ICE_HWTS
static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e830 = {
.lock_reg = E830_PFPTM_SEM,
.lock_busy = E830_PFPTM_SEM_BUSY_M,
.ctl_reg = E830_GLPTM_ART_CTL,
.ctl_active = E830_GLPTM_ART_CTL_ACTIVE_M,
.art_time_l = E830_GLPTM_ART_TIME_L,
.art_time_h = E830_GLPTM_ART_TIME_H,
.dev_time_l[0] = E830_GLTSYN_PTMTIME_L(0),
.dev_time_h[0] = E830_GLTSYN_PTMTIME_H(0),
.dev_time_l[1] = E830_GLTSYN_PTMTIME_L(1),
.dev_time_h[1] = E830_GLTSYN_PTMTIME_H(1),
};
#endif
struct ice_crosststamp_ctx {
struct system_time_snapshot snapshot;
struct ice_pf *pf;
const struct ice_crosststamp_cfg *cfg;
};
static int ice_capture_crosststamp(ktime_t *device,
struct system_counterval_t *system,
void *__ctx)
{
struct ice_crosststamp_ctx *ctx = __ctx;
const struct ice_crosststamp_cfg *cfg;
u32 lock, ctl, ts_lo, ts_hi, tmr_idx;
struct ice_pf *pf;
struct ice_hw *hw;
int err;
u64 ts;
cfg = ctx->cfg;
pf = ctx->pf;
hw = &pf->hw;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
if (tmr_idx > 1)
return -EINVAL;
err = rd32_poll_timeout(hw, cfg->lock_reg, lock,
!(lock & cfg->lock_busy),
10 * USEC_PER_MSEC, 50 * USEC_PER_MSEC);
if (err) {
dev_err(ice_pf_to_dev(pf), "PTP failed to get cross timestamp lock\n");
return -EBUSY;
}
ktime_get_snapshot(&ctx->snapshot);
ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
ctl = rd32(hw, cfg->ctl_reg);
ctl |= cfg->ctl_active;
wr32(hw, cfg->ctl_reg, ctl);
err = rd32_poll_timeout(hw, cfg->ctl_reg, ctl, !(ctl & cfg->ctl_active),
5, 20 * USEC_PER_MSEC);
if (err)
goto err_timeout;
ts_lo = rd32(hw, cfg->art_time_l);
ts_hi = rd32(hw, cfg->art_time_h);
ts = ((u64)ts_hi << 32) | ts_lo;
system->cycles = ts;
system->cs_id = CSID_X86_ART;
system->use_nsecs = true;
ts_lo = rd32(hw, cfg->dev_time_l[tmr_idx]);
ts_hi = rd32(hw, cfg->dev_time_h[tmr_idx]);
ts = ((u64)ts_hi << 32) | ts_lo;
*device = ns_to_ktime(ts);
err_timeout:
ice_ptp_src_cmd(hw, ICE_PTP_NOP);
lock = rd32(hw, cfg->lock_reg);
lock &= ~cfg->lock_busy;
wr32(hw, cfg->lock_reg, lock);
return err;
}
static int ice_ptp_getcrosststamp(struct ptp_clock_info *info,
struct system_device_crosststamp *cts)
{
struct ice_pf *pf = ptp_info_to_pf(info);
struct ice_crosststamp_ctx ctx = {
.pf = pf,
};
switch (pf->hw.mac_type) {
case ICE_MAC_GENERIC:
case ICE_MAC_GENERIC_3K_E825:
ctx.cfg = &ice_crosststamp_cfg_e82x;
break;
#ifdef CONFIG_ICE_HWTS
case ICE_MAC_E830:
ctx.cfg = &ice_crosststamp_cfg_e830;
break;
#endif
default:
return -EOPNOTSUPP;
}
return get_device_system_crosststamp(ice_capture_crosststamp, &ctx,
&ctx.snapshot, cts);
}
int ice_ptp_hwtstamp_get(struct net_device *netdev,
struct kernel_hwtstamp_config *config)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
if (pf->ptp.state != ICE_PTP_READY)
return -EIO;
*config = pf->ptp.tstamp_config;
return 0;
}
static int ice_ptp_set_timestamp_mode(struct ice_pf *pf,
struct kernel_hwtstamp_config *config)
{
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF;
break;
case HWTSTAMP_TX_ON:
pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON;
break;
default:
return -ERANGE;
}
switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
return -ERANGE;
}
ice_ptp_restore_timestamp_mode(pf);
return 0;
}
int ice_ptp_hwtstamp_set(struct net_device *netdev,
struct kernel_hwtstamp_config *config,
struct netlink_ext_ack *extack)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
int err;
if (pf->ptp.state != ICE_PTP_READY)
return -EAGAIN;
err = ice_ptp_set_timestamp_mode(pf, config);
if (err)
return err;
*config = pf->ptp.tstamp_config;
return 0;
}
u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
const struct ice_pkt_ctx *pkt_ctx)
{
u64 ts_ns, cached_time;
u32 ts_high;
if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID))
return 0;
cached_time = READ_ONCE(pkt_ctx->cached_phctime);
if (!cached_time)
return 0;
ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high);
return ts_ns;
}
static void ice_ptp_setup_pin_cfg(struct ice_pf *pf)
{
for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) {
const struct ice_ptp_pin_desc *desc = &pf->ptp.ice_pin_desc[i];
struct ptp_pin_desc *pin = &pf->ptp.pin_desc[i];
const char *name;
if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
name = ice_pin_names[desc->name_idx];
else
name = ice_pin_names_dpll[desc->name_idx];
strscpy(pin->name, name, sizeof(pin->name));
pin->index = i;
}
pf->ptp.info.pin_config = pf->ptp.pin_desc;
}
static void ice_ptp_disable_pins(struct ice_pf *pf)
{
struct ptp_clock_info *info = &pf->ptp.info;
dev_warn(ice_pf_to_dev(pf), "Failed to configure PTP pin control\n");
info->enable = NULL;
info->verify = NULL;
info->n_pins = 0;
info->n_ext_ts = 0;
info->n_per_out = 0;
}
static int ice_ptp_parse_sdp_entries(struct ice_pf *pf, __le16 *entries,
unsigned int num_entries,
struct ice_ptp_pin_desc *pins)
{
unsigned int n_pins = 0;
unsigned int i;
for (i = 0; i < ICE_N_PINS_MAX; i++) {
pins[i].name_idx = -1;
pins[i].gpio[0] = -1;
pins[i].gpio[1] = -1;
}
for (i = 0; i < num_entries; i++) {
u16 entry = le16_to_cpu(entries[i]);
DECLARE_BITMAP(bitmap, GPIO_NA);
unsigned int idx;
bool dir;
u16 gpio;
*bitmap = FIELD_GET(ICE_AQC_NVM_SDP_AC_PIN_M, entry);
if (bitmap_empty(bitmap, GPIO_NA))
continue;
dir = !!FIELD_GET(ICE_AQC_NVM_SDP_AC_DIR_M, entry);
gpio = FIELD_GET(ICE_AQC_NVM_SDP_AC_SDP_NUM_M, entry);
for (idx = 0; idx < ICE_N_PINS_MAX; idx++) {
if (pins[idx].name_idx == gpio)
break;
}
if (idx == ICE_N_PINS_MAX) {
idx = n_pins++;
pins[idx].name_idx = gpio;
}
pins[idx].gpio[dir] = gpio;
}
for (i = 0; i < n_pins; i++) {
dev_dbg(ice_pf_to_dev(pf),
"NVM pin entry[%d] : name_idx %d gpio_out %d gpio_in %d\n",
i, pins[i].name_idx, pins[i].gpio[1], pins[i].gpio[0]);
}
pf->ptp.info.n_pins = n_pins;
return 0;
}
static void ice_ptp_set_funcs_e82x(struct ice_pf *pf)
{
pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp;
if (pf->hw.mac_type == ICE_MAC_GENERIC_3K_E825) {
pf->ptp.ice_pin_desc = ice_pin_desc_e825c;
pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e825c);
} else {
pf->ptp.ice_pin_desc = ice_pin_desc_e82x;
pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e82x);
}
ice_ptp_setup_pin_cfg(pf);
}
static void ice_ptp_set_funcs_e810(struct ice_pf *pf)
{
__le16 entries[ICE_AQC_NVM_SDP_AC_MAX_SIZE];
struct ice_ptp_pin_desc *desc = NULL;
struct ice_ptp *ptp = &pf->ptp;
unsigned int num_entries;
int err;
err = ice_ptp_read_sdp_ac(&pf->hw, entries, &num_entries);
if (err) {
if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
ptp->ice_pin_desc = ice_pin_desc_dpll;
ptp->info.n_pins = ARRAY_SIZE(ice_pin_desc_dpll);
} else {
pf->ptp.ice_pin_desc = ice_pin_desc_e810;
pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e810);
}
err = 0;
} else {
desc = devm_kcalloc(ice_pf_to_dev(pf), ICE_N_PINS_MAX,
sizeof(struct ice_ptp_pin_desc),
GFP_KERNEL);
if (!desc)
goto err;
err = ice_ptp_parse_sdp_entries(pf, entries, num_entries, desc);
if (err)
goto err;
ptp->ice_pin_desc = (const struct ice_ptp_pin_desc *)desc;
}
ptp->info.pin_config = ptp->pin_desc;
ice_ptp_setup_pin_cfg(pf);
err:
if (err) {
devm_kfree(ice_pf_to_dev(pf), desc);
ice_ptp_disable_pins(pf);
}
}
static void ice_ptp_set_funcs_e830(struct ice_pf *pf)
{
#ifdef CONFIG_ICE_HWTS
if (pcie_ptm_enabled(pf->pdev) && boot_cpu_has(X86_FEATURE_ART))
pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp;
#endif
pf->ptp.ice_pin_desc = ice_pin_desc_e810;
pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e810);
ice_ptp_setup_pin_cfg(pf);
}
static void ice_ptp_set_caps(struct ice_pf *pf)
{
struct ptp_clock_info *info = &pf->ptp.info;
struct device *dev = ice_pf_to_dev(pf);
snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk",
dev_driver_string(dev), dev_name(dev));
info->owner = THIS_MODULE;
info->max_adj = 100000000;
info->adjtime = ice_ptp_adjtime;
info->adjfine = ice_ptp_adjfine;
info->gettimex64 = ice_ptp_gettimex64;
info->settime64 = ice_ptp_settime64;
info->n_per_out = GLTSYN_TGT_H_IDX_MAX;
info->n_ext_ts = GLTSYN_EVNT_H_IDX_MAX;
info->enable = ice_ptp_gpio_enable;
info->verify = ice_verify_pin;
info->supported_extts_flags = PTP_RISING_EDGE |
PTP_FALLING_EDGE |
PTP_STRICT_FLAGS;
info->supported_perout_flags = PTP_PEROUT_PHASE;
switch (pf->hw.mac_type) {
case ICE_MAC_E810:
ice_ptp_set_funcs_e810(pf);
return;
case ICE_MAC_E830:
ice_ptp_set_funcs_e830(pf);
return;
case ICE_MAC_GENERIC:
case ICE_MAC_GENERIC_3K_E825:
ice_ptp_set_funcs_e82x(pf);
return;
default:
return;
}
}
static long ice_ptp_create_clock(struct ice_pf *pf)
{
struct ptp_clock_info *info;
struct device *dev;
if (pf->ptp.clock)
return 0;
ice_ptp_set_caps(pf);
info = &pf->ptp.info;
dev = ice_pf_to_dev(pf);
pf->ptp.clock = ptp_clock_register(info, dev);
if (IS_ERR(pf->ptp.clock)) {
dev_err(ice_pf_to_dev(pf), "Failed to register PTP clock device");
return PTR_ERR(pf->ptp.clock);
}
return 0;
}
s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
{
unsigned long flags;
u8 idx;
spin_lock_irqsave(&tx->lock, flags);
if (!ice_ptp_is_tx_tracker_up(tx)) {
spin_unlock_irqrestore(&tx->lock, flags);
return -1;
}
idx = find_next_zero_bit(tx->in_use, tx->len,
tx->last_ll_ts_idx_read + 1);
if (idx == tx->len)
idx = find_first_zero_bit(tx->in_use, tx->len);
if (idx < tx->len) {
set_bit(idx, tx->in_use);
clear_bit(idx, tx->stale);
tx->tstamps[idx].start = jiffies;
tx->tstamps[idx].skb = skb_get(skb);
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
ice_trace(tx_tstamp_request, skb, idx);
}
spin_unlock_irqrestore(&tx->lock, flags);
if (idx >= tx->len)
return -1;
else
return idx + tx->offset;
}
void ice_ptp_process_ts(struct ice_pf *pf)
{
switch (pf->ptp.tx_interrupt_mode) {
case ICE_PTP_TX_INTERRUPT_NONE:
return;
case ICE_PTP_TX_INTERRUPT_SELF:
ice_ptp_process_tx_tstamp(&pf->ptp.port.tx);
return;
case ICE_PTP_TX_INTERRUPT_ALL:
ice_ptp_tx_tstamp_owner(pf);
return;
default:
WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n",
pf->ptp.tx_interrupt_mode);
return;
}
}
static bool ice_port_has_timestamps(struct ice_ptp_tx *tx)
{
bool more_timestamps;
scoped_guard(spinlock_irqsave, &tx->lock) {
if (!tx->init)
return false;
more_timestamps = !bitmap_empty(tx->in_use, tx->len);
}
return more_timestamps;
}
static bool ice_any_port_has_timestamps(struct ice_pf *pf)
{
struct ice_ptp_port *port;
scoped_guard(mutex, &pf->adapter->ports.lock) {
list_for_each_entry(port, &pf->adapter->ports.ports,
list_node) {
struct ice_ptp_tx *tx = &port->tx;
if (ice_port_has_timestamps(tx))
return true;
}
}
return false;
}
bool ice_ptp_tx_tstamps_pending(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
unsigned int i;
switch (pf->ptp.tx_interrupt_mode) {
case ICE_PTP_TX_INTERRUPT_NONE:
return false;
case ICE_PTP_TX_INTERRUPT_SELF:
if (ice_port_has_timestamps(&pf->ptp.port.tx))
return true;
break;
case ICE_PTP_TX_INTERRUPT_ALL:
if (ice_any_port_has_timestamps(pf))
return true;
break;
default:
WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n",
pf->ptp.tx_interrupt_mode);
break;
}
for (i = 0; i < ICE_GET_QUAD_NUM(hw->ptp.num_lports); i++) {
u64 tstamp_ready = 0;
int err;
err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
if (err || tstamp_ready)
return true;
}
return false;
}
irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
switch (hw->mac_type) {
case ICE_MAC_E810:
if (hw->dev_caps.ts_dev_info.ts_ll_int_read) {
struct ice_ptp_tx *tx = &pf->ptp.port.tx;
u8 idx, last;
if (!ice_pf_state_is_nominal(pf))
return IRQ_HANDLED;
spin_lock(&tx->lock);
if (tx->init) {
last = tx->last_ll_ts_idx_read + 1;
idx = find_next_bit_wrap(tx->in_use, tx->len,
last);
if (idx != tx->len)
ice_ptp_req_tx_single_tstamp(tx, idx);
}
spin_unlock(&tx->lock);
return IRQ_HANDLED;
}
fallthrough;
case ICE_MAC_GENERIC:
case ICE_MAC_GENERIC_3K_E825:
if (!ice_ptp_pf_handles_tx_interrupt(pf))
return IRQ_HANDLED;
set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
return IRQ_WAKE_THREAD;
case ICE_MAC_E830:
ice_ptp_process_ts(pf);
if (ice_ptp_tx_tstamps_pending(pf)) {
wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
ice_flush(hw);
}
return IRQ_HANDLED;
default:
return IRQ_HANDLED;
}
}
static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
bool trigger_oicr = false;
unsigned int i;
if (!pf->ptp.port.tx.has_ready_bitmap)
return;
if (!ice_pf_src_tmr_owned(pf))
return;
for (i = 0; i < ICE_GET_QUAD_NUM(hw->ptp.num_lports); i++) {
u64 tstamp_ready;
int err;
err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
if (!err && tstamp_ready) {
trigger_oicr = true;
break;
}
}
if (trigger_oicr) {
dev_dbg(dev, "PTP periodic task detected waiting timestamps. Triggering Tx timestamp interrupt now.\n");
wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
ice_flush(hw);
}
}
static void ice_ptp_periodic_work(struct kthread_work *work)
{
struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
int err;
if (pf->ptp.state != ICE_PTP_READY)
return;
err = ice_ptp_update_cached_phctime(pf);
ice_ptp_maybe_trigger_tx_interrupt(pf);
kthread_queue_delayed_work(ptp->kworker, &ptp->work,
msecs_to_jiffies(err ? 10 : 500));
}
void ice_ptp_queue_work(struct ice_pf *pf)
{
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags) &&
pf->ptp.state == ICE_PTP_READY)
kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work, 0);
}
static void ice_ptp_prepare_rebuild_sec(struct ice_pf *pf, bool rebuild,
enum ice_reset_req reset_type)
{
struct list_head *entry;
list_for_each(entry, &pf->adapter->ports.ports) {
struct ice_ptp_port *port = list_entry(entry,
struct ice_ptp_port,
list_node);
struct ice_pf *peer_pf = ptp_port_to_pf(port);
if (!ice_is_primary(&peer_pf->hw)) {
if (rebuild) {
ice_ptp_rebuild(peer_pf, reset_type);
} else {
ice_ptp_prepare_for_reset(peer_pf, reset_type);
}
}
}
}
void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
{
struct ice_ptp *ptp = &pf->ptp;
struct ice_hw *hw = &pf->hw;
u8 src_tmr;
if (ptp->state != ICE_PTP_READY)
return;
ptp->state = ICE_PTP_RESETTING;
ice_ptp_disable_timestamp_mode(pf);
kthread_cancel_delayed_work_sync(&ptp->work);
if (reset_type == ICE_RESET_PFR)
return;
if (ice_pf_src_tmr_owned(pf) && hw->mac_type == ICE_MAC_GENERIC_3K_E825)
ice_ptp_prepare_rebuild_sec(pf, false, reset_type);
ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
ice_ptp_disable_all_perout(pf);
src_tmr = ice_get_ptp_src_clock_index(&pf->hw);
wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M);
ptp->reset_time = ktime_get_real_ns();
}
static int ice_ptp_rebuild_owner(struct ice_pf *pf)
{
struct ice_ptp *ptp = &pf->ptp;
struct ice_hw *hw = &pf->hw;
struct timespec64 ts;
u64 time_diff;
int err;
err = ice_ptp_init_phc(hw);
if (err)
return err;
err = ice_tspll_init(hw);
if (err)
return err;
if (!ice_ptp_lock(hw)) {
err = -EBUSY;
return err;
}
err = ice_ptp_write_incval(hw, ice_base_incval(pf));
if (err)
goto err_unlock;
if (ptp->cached_phc_time) {
time_diff = ktime_get_real_ns() - ptp->reset_time;
ts = ns_to_timespec64(ptp->cached_phc_time + time_diff);
} else {
ts = ktime_to_timespec64(ktime_get_real());
}
err = ice_ptp_write_init(pf, &ts);
if (err)
goto err_unlock;
ice_ptp_unlock(hw);
ice_ptp_flush_all_tx_tracker(pf);
err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
if (err)
return err;
ice_ptp_restart_all_phy(pf);
ice_ptp_enable_all_perout(pf);
ice_ptp_enable_all_extts(pf);
return 0;
err_unlock:
ice_ptp_unlock(hw);
return err;
}
void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
{
struct ice_ptp *ptp = &pf->ptp;
int err;
if (ptp->state == ICE_PTP_READY) {
ice_ptp_prepare_for_reset(pf, reset_type);
} else if (ptp->state != ICE_PTP_RESETTING) {
err = -EINVAL;
dev_err(ice_pf_to_dev(pf), "PTP was not initialized\n");
goto err;
}
if (ice_pf_src_tmr_owned(pf) && reset_type != ICE_RESET_PFR) {
err = ice_ptp_rebuild_owner(pf);
if (err)
goto err;
}
ptp->state = ICE_PTP_READY;
dev_info(ice_pf_to_dev(pf), "PTP reset successful\n");
return;
err:
ptp->state = ICE_PTP_ERROR;
dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err);
}
static int ice_ptp_setup_adapter(struct ice_pf *pf)
{
if (!ice_pf_src_tmr_owned(pf) || !ice_is_primary(&pf->hw))
return -EPERM;
pf->adapter->ctrl_pf = pf;
return 0;
}
static int ice_ptp_setup_pf(struct ice_pf *pf)
{
struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf);
struct ice_ptp *ptp = &pf->ptp;
if (WARN_ON(!ctrl_ptp) || pf->hw.mac_type == ICE_MAC_UNKNOWN)
return -ENODEV;
INIT_LIST_HEAD(&ptp->port.list_node);
mutex_lock(&pf->adapter->ports.lock);
list_add(&ptp->port.list_node,
&pf->adapter->ports.ports);
mutex_unlock(&pf->adapter->ports.lock);
return 0;
}
static void ice_ptp_cleanup_pf(struct ice_pf *pf)
{
struct ice_ptp *ptp = &pf->ptp;
if (pf->hw.mac_type != ICE_MAC_UNKNOWN) {
mutex_lock(&pf->adapter->ports.lock);
list_del(&ptp->port.list_node);
mutex_unlock(&pf->adapter->ports.lock);
}
}
int ice_ptp_clock_index(struct ice_pf *pf)
{
struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf);
struct ptp_clock *clock;
if (!ctrl_ptp)
return -1;
clock = ctrl_ptp->clock;
return clock ? ptp_clock_index(clock) : -1;
}
static int ice_ptp_init_owner(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
struct timespec64 ts;
int err;
err = ice_ptp_init_phc(hw);
if (err) {
dev_err(ice_pf_to_dev(pf), "Failed to initialize PHC, err %d\n",
err);
return err;
}
err = ice_tspll_init(hw);
if (err) {
dev_err(ice_pf_to_dev(pf), "Failed to initialize CGU, status %d\n",
err);
return err;
}
if (!ice_ptp_lock(hw)) {
err = -EBUSY;
goto err_exit;
}
err = ice_ptp_write_incval(hw, ice_base_incval(pf));
if (err)
goto err_unlock;
ts = ktime_to_timespec64(ktime_get_real());
err = ice_ptp_write_init(pf, &ts);
if (err)
goto err_unlock;
ice_ptp_unlock(hw);
err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
if (err)
goto err_exit;
err = ice_ptp_create_clock(pf);
if (err)
goto err_clk;
return 0;
err_clk:
pf->ptp.clock = NULL;
err_exit:
return err;
err_unlock:
ice_ptp_unlock(hw);
return err;
}
static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
{
struct kthread_worker *kworker;
kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work);
kworker = kthread_run_worker(0, "ice-ptp-%s",
dev_name(ice_pf_to_dev(pf)));
if (IS_ERR(kworker))
return PTR_ERR(kworker);
ptp->kworker = kworker;
kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
return 0;
}
static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
{
struct ice_hw *hw = &pf->hw;
mutex_init(&ptp_port->ps_lock);
switch (hw->mac_type) {
case ICE_MAC_E810:
case ICE_MAC_E830:
case ICE_MAC_GENERIC_3K_E825:
return ice_ptp_init_tx(pf, &ptp_port->tx, ptp_port->port_num);
case ICE_MAC_GENERIC:
kthread_init_delayed_work(&ptp_port->ov_work,
ice_ptp_wait_for_offsets);
return ice_ptp_init_tx_e82x(pf, &ptp_port->tx,
ptp_port->port_num);
default:
return -ENODEV;
}
}
static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
{
switch (pf->hw.mac_type) {
case ICE_MAC_GENERIC:
case ICE_MAC_GENERIC_3K_E825:
if (ice_pf_src_tmr_owned(pf))
pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_ALL;
else
pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_NONE;
break;
default:
pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_SELF;
}
}
void ice_ptp_init(struct ice_pf *pf)
{
struct ice_ptp *ptp = &pf->ptp;
struct ice_hw *hw = &pf->hw;
int err;
ptp->state = ICE_PTP_INITIALIZING;
if (hw->lane_num < 0) {
err = hw->lane_num;
goto err_exit;
}
ptp->port.port_num = hw->lane_num;
ice_ptp_init_hw(hw);
ice_ptp_init_tx_interrupt_mode(pf);
if (ice_pf_src_tmr_owned(pf) && ice_is_primary(hw)) {
err = ice_ptp_setup_adapter(pf);
if (err)
goto err_exit;
err = ice_ptp_init_owner(pf);
if (err)
goto err_exit;
}
err = ice_ptp_setup_pf(pf);
if (err)
goto err_exit;
err = ice_ptp_init_port(pf, &ptp->port);
if (err)
goto err_clean_pf;
ice_ptp_reset_phy_timestamping(pf);
ice_ptp_cfg_tx_interrupt(pf);
ptp->state = ICE_PTP_READY;
err = ice_ptp_init_work(pf, ptp);
if (err)
goto err_exit;
dev_info(ice_pf_to_dev(pf), "PTP init successful\n");
return;
err_clean_pf:
mutex_destroy(&ptp->port.ps_lock);
ice_ptp_cleanup_pf(pf);
err_exit:
if (pf->ptp.clock) {
ptp_clock_unregister(ptp->clock);
pf->ptp.clock = NULL;
}
ptp->state = ICE_PTP_UNINIT;
dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err);
}
void ice_ptp_release(struct ice_pf *pf)
{
if (pf->ptp.state == ICE_PTP_UNINIT)
return;
if (pf->ptp.state != ICE_PTP_READY) {
mutex_destroy(&pf->ptp.port.ps_lock);
ice_ptp_cleanup_pf(pf);
if (pf->ptp.clock) {
ptp_clock_unregister(pf->ptp.clock);
pf->ptp.clock = NULL;
}
return;
}
pf->ptp.state = ICE_PTP_UNINIT;
ice_ptp_disable_timestamp_mode(pf);
ice_ptp_cleanup_pf(pf);
ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
ice_ptp_disable_all_extts(pf);
kthread_cancel_delayed_work_sync(&pf->ptp.work);
ice_ptp_port_phy_stop(&pf->ptp.port);
mutex_destroy(&pf->ptp.port.ps_lock);
if (pf->ptp.kworker) {
kthread_destroy_worker(pf->ptp.kworker);
pf->ptp.kworker = NULL;
}
if (!pf->ptp.clock)
return;
ice_ptp_disable_all_perout(pf);
ptp_clock_unregister(pf->ptp.clock);
pf->ptp.clock = NULL;
dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n");
}