/*
*/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _PSIF_HW_SETGET_H
#define _PSIF_HW_SETGET_H
#ifdef __cplusplus
extern "C" {
#endif
#include "psif_api.h"
#if defined(__arm__)
#include "epsfw_misc.h"
#endif /* __arm__ */
#include "psif_endian.h"
#include "os_header.h"
static inline void set_psif_csr_mmu_config__ta_upper_twelve(
volatile struct psif_csr_mmu_config *ptr,
{
/* group=0 shift=32 bits=12 */
}
static inline u16 get_psif_csr_mmu_config__ta_upper_twelve(volatile struct psif_csr_mmu_config *ptr)
{
/* group=0 shift=32 bits=12 */
}
static inline void set_psif_csr_mmu_config__pa_upper_twelve(
volatile struct psif_csr_mmu_config *ptr,
{
/* group=0 shift=48 bits=12 */
}
static inline u16 get_psif_csr_mmu_config__pa_upper_twelve(volatile struct psif_csr_mmu_config *ptr)
{
/* group=0 shift=48 bits=12 */
}
/*
* PSIF_WR_INVALIDATE_LKEY: key to invalidate/flush from the DMA VT cache.
* PSIF_WR_INVALIDATE_RKEY: key to invalidate/flush from the DMA VT cache.
* PSIF_WR_INVALIDATE_BOTH_KEYS: key to invalidate/flush from the DMA VT
* cache. PSIF_WR_INVALIDATE_TLB: this is the address vector to invalidate in
* the TLB.
*/
static inline void set_psif_wr_su__key(
volatile struct psif_wr_su *ptr,
{
/* group=2 shift=32 bits=32 */
}
{
/* group=2 shift=32 bits=32 */
}
/*
* Send queue sequence number. Used to map request to a particular work
* request in the send queue.
*/
static inline void set_psif_wr__sq_seq(
{
/* group=0 shift=0 bits=16 */
}
{
/* group=0 shift=0 bits=16 */
}
/*
* QP sending this request. XXX: Should name be own_qp_num as defined in QP
* state?
*/
static inline void set_psif_wr__local_qp(
{
/* group=0 shift=32 bits=24 */
}
{
/* group=0 shift=32 bits=24 */
}
/* Completion notification identifier. */
static inline void set_psif_wr__completion(
{
/* group=1 shift=31 bits=1 */
}
{
/* group=1 shift=31 bits=1 */
}
/*
* Checksum used for data protection and consistency between work request and
* QP state.
*/
static inline void set_psif_wr__checksum(
{
/* group=2 shift=32 bits=32 */
}
{
/* group=2 shift=32 bits=32 */
}
/*
* Index to where elements are added to the send queue by SW. SW is
* responsibel for keeping track of how many entries there are in the send
* queue. I.e. SW needs to keep track of the head_index so it doesn't
* overwrite entries in the send queue which is not yet completed.
*/
static inline void set_psif_sq_sw__tail_indx(
volatile struct psif_sq_sw *ptr,
{
/* group=0 shift=32 bits=16 */
}
{
/* group=0 shift=32 bits=16 */
}
/*
* Send queue sequence number used by the SQS to maintain ordering and keep
* track of where which send queue elements to fetch. This field is not in
* sync with the field in qp_t. This number is typically a little bit before
* the number in the qp_t as SQS has to fetch the elements from host memory.
* This is also used as tail_index when checking if there are more elements
* in the send queue.
*/
static inline void set_psif_sq_hw__last_seq(
volatile struct psif_sq_hw *ptr,
{
/* group=0 shift=16 bits=16 */
}
{
/* group=0 shift=16 bits=16 */
}
/* QP and UF to be processed next. */
static inline void set_psif_sq_hw__sq_next(
volatile struct psif_sq_hw *ptr,
{
/* group=0 shift=32 bits=32 */
}
{
/* group=0 shift=32 bits=32 */
}
/*
* This bit is set through the doorbell. SW should check this bit plus
* psif_next = null to ensure SW can own the SQ descriptor.
*/
static inline void set_psif_sq_hw__destroyed(
volatile struct psif_sq_hw *ptr,
{
/* group=1 shift=27 bits=1 */
}
{
/* group=1 shift=27 bits=1 */
}
/* Software modified index pointing to the tail reecive entry in host memory. */
static inline void set_psif_rq_sw__tail_indx(
volatile struct psif_rq_sw *ptr,
{
/* group=0 shift=32 bits=14 */
}
{
/* group=0 shift=32 bits=14 */
}
/*
* Hardware modified index pointing to the head of the receive queue. TSU is
* using this to find the address of the receive queue entry.
*/
static inline void set_psif_rq_hw__head_indx(
volatile struct psif_rq_hw *ptr,
{
/* group=0 shift=14 bits=14 */
}
{
/* group=0 shift=14 bits=14 */
}
/* The desciptor is valid. */
static inline void set_psif_rq_hw__valid(
volatile struct psif_rq_hw *ptr,
{
/* group=3 shift=55 bits=1 */
}
{
/* group=3 shift=55 bits=1 */
}
/*
* Receive queue entry ID. This is added to the receive completion using this
* receive queue entry.
*/
static inline void set_psif_rq_entry__rqe_id(
volatile struct psif_rq_entry *ptr,
{
/* group=0 shift=0 bits=64 */
}
{
/* group=0 shift=0 bits=64 */
}
/*
* This retry tag is the one used by tsu_rqs and added to the packets sent to
* tsu_dma. It is the responsibility of tsu_rqs to update this retry tag
* whenever the sq_sequence_number in QP state is equal to the one in the
* request.
*/
static inline void set_psif_qp_core__retry_tag_committed(
volatile struct psif_qp_core *ptr,
{
/* group=0 shift=0 bits=3 */
}
{
/* group=0 shift=0 bits=3 */
}
/*
* This retry tag is updated by the error block when an error occur. If
* tsu_rqs reads this retry tag and it is different than the
* retry_tag_comitted, tsu_rqs must update retry_tag_comitted to the value of
* retry_tag_err when the sq_sequence_number indicates this is the valid
* request. The sq_sequence_number has been updated by tsu_err at the same
* time the retry_tag_err is updated.
*/
static inline void set_psif_qp_core__retry_tag_err(
volatile struct psif_qp_core *ptr,
{
/* group=0 shift=3 bits=3 */
}
{
/* group=0 shift=3 bits=3 */
}
/*
* Error retry counter initial value. Read by tsu_dma and used by tsu_cmpl to
* calculate exp_backoff etc..
*/
static inline void set_psif_qp_core__error_retry_init(
volatile struct psif_qp_core *ptr,
{
/* group=0 shift=32 bits=3 */
}
{
/* group=0 shift=32 bits=3 */
}
/*
* Retry counter associated with retries to received NAK or implied NAK. If
* it expires, a path migration will be attempted if it is armed, or the QP
* will go to error state. Read by tsu_dma and used by tsu_cmpl.
*/
static inline void set_psif_qp_core__error_retry_count(
volatile struct psif_qp_core *ptr,
{
/* group=0 shift=35 bits=3 */
}
{
/* group=0 shift=35 bits=3 */
}
/* A hit in the set locally spun out of tsu_cmpl is found. */
static inline void set_psif_qp_core__spin_hit(
volatile struct psif_qp_core *ptr,
{
/* group=0 shift=39 bits=1 */
}
{
/* group=0 shift=39 bits=1 */
}
/*
* Minium RNR NAK timeout. This is added to RNR NAK packets and the requester
* receiving the RNR NAK must wait until the timer has expired before the
* retry is sent.
*/
static inline void set_psif_qp_core__min_rnr_nak_time(
volatile struct psif_qp_core *ptr,
{
/* group=1 shift=0 bits=5 */
}
{
/* group=1 shift=0 bits=5 */
}
/* QP State for this QP. */
static inline void set_psif_qp_core__state(
volatile struct psif_qp_core *ptr,
enum psif_qp_state data)
{
/* group=1 shift=5 bits=3 */
}
{
/* group=1 shift=5 bits=3 */
}
/* QP number for the remote node. */
static inline void set_psif_qp_core__remote_qp(
volatile struct psif_qp_core *ptr,
{
/* group=1 shift=8 bits=24 */
}
{
/* group=1 shift=8 bits=24 */
}
static inline void set_psif_qp_core__retry_sq_seq(
volatile struct psif_qp_core *ptr,
{
/* group=2 shift=32 bits=16 */
}
{
/* group=2 shift=32 bits=16 */
}
static inline void set_psif_qp_core__sq_seq(
volatile struct psif_qp_core *ptr,
{
/* group=2 shift=48 bits=16 */
}
{
/* group=2 shift=48 bits=16 */
}
/*
* Magic number used to verify use of QP state. This is done by calculating a
* checksum of the work request incorporating the magic number. This checksum
* is checked against the checksum in the work request.
*/
static inline void set_psif_qp_core__magic(
volatile struct psif_qp_core *ptr,
{
/* group=3 shift=0 bits=32 */
}
{
/* group=3 shift=0 bits=32 */
}
/*
* Q-Key received in incoming IB packet is checked towards this Q-Key. Q-Key
* used on transmit if top bit of Q-Key in WR is set.
*/
static inline void set_psif_qp_core__qkey(
volatile struct psif_qp_core *ptr,
{
/* group=4 shift=0 bits=32 */
}
{
/* group=4 shift=0 bits=32 */
}
/*
* Sequence number of the last ACK received. Read and written by tsu_cmpl.
* Used to verify that the received response packet is a valid response.
*/
static inline void set_psif_qp_core__last_acked_psn(
volatile struct psif_qp_core *ptr,
{
/* group=4 shift=40 bits=24 */
}
{
/* group=4 shift=40 bits=24 */
}
/* Index to scatter element of in progress SEND. */
static inline void set_psif_qp_core__scatter_indx(
volatile struct psif_qp_core *ptr,
{
/* group=5 shift=32 bits=5 */
}
{
/* group=5 shift=32 bits=5 */
}
/*
* Expected packet sequence number: Sequence number on next expected packet.
*/
static inline void set_psif_qp_core__expected_psn(
volatile struct psif_qp_core *ptr,
{
/* group=5 shift=40 bits=24 */
}
{
/* group=5 shift=40 bits=24 */
}
/*
* TSU quality of service level. Can take values indicating low latency and
* to PSIF. The qosl bit in the doorbell request must match this bit in the
* QP state, otherwise the QP must be put in error. This check only applies
* to tsu_rqs.
*/
static inline void set_psif_qp_core__qosl(
volatile struct psif_qp_core *ptr,
enum psif_tsu_qos data)
{
/* group=6 shift=49 bits=1 */
}
{
/* group=6 shift=49 bits=1 */
}
/*
* Migration state (migrated, re-arm and armed). Since path migration is
* handled by tsu_qps, this is controlled by tsu_qps. XXX: Should error
* handler also be able to change the path?
*/
static inline void set_psif_qp_core__mstate(
volatile struct psif_qp_core *ptr,
enum psif_migration data)
{
/* group=6 shift=50 bits=2 */
}
{
/* group=6 shift=50 bits=2 */
}
/* This is an IB over IB QP. */
static inline void set_psif_qp_core__ipoib_enable(
volatile struct psif_qp_core *ptr,
{
/* group=6 shift=53 bits=1 */
}
{
/* group=6 shift=53 bits=1 */
}
/* IB defined capability enable for receiving Atomic operations. */
static inline void set_psif_qp_core__atomic_enable(
volatile struct psif_qp_core *ptr,
{
/* group=6 shift=61 bits=1 */
}
{
/* group=6 shift=61 bits=1 */
}
/* IB defined capability enable for receiving RDMA WR. */
static inline void set_psif_qp_core__rdma_wr_enable(
volatile struct psif_qp_core *ptr,
{
/* group=6 shift=62 bits=1 */
}
{
/* group=6 shift=62 bits=1 */
}
/* IB defined capability enable for receiving RDMA RD. */
static inline void set_psif_qp_core__rdma_rd_enable(
volatile struct psif_qp_core *ptr,
{
/* group=6 shift=63 bits=1 */
}
{
/* group=6 shift=63 bits=1 */
}
/*
* Transmit packet sequence number. Read and updated by tsu_dma before
* sending packets to tsu_ibpb and tsu_cmpl.
*/
static inline void set_psif_qp_core__xmit_psn(
volatile struct psif_qp_core *ptr,
{
/* group=7 shift=0 bits=24 */
}
{
/* group=7 shift=0 bits=24 */
}
/*
* TSU Service Level used to decide the TSU VL for requests associated with
* this QP.
*/
static inline void set_psif_qp_core__tsl(
volatile struct psif_qp_core *ptr,
{
/* group=7 shift=55 bits=4 */
}
{
/* group=7 shift=55 bits=4 */
}
/*
* Maximum number of outstanding read or atomic requests allowed by the
* remote HCA. Initialized by software.
*/
static inline void set_psif_qp_core__max_outstanding(
volatile struct psif_qp_core *ptr,
{
/* group=7 shift=59 bits=5 */
}
{
/* group=7 shift=59 bits=5 */
}
/* Send Queue RNR retry count initialization value. */
static inline void set_psif_qp_core__rnr_retry_init(
volatile struct psif_qp_core *ptr,
{
/* group=8 shift=32 bits=3 */
}
{
/* group=8 shift=32 bits=3 */
}
/*
* Retry counter associated with RNR NAK retries. If it expires, a path
* migration will be attempted if it is armed, or the QP will go to error
* state.
*/
static inline void set_psif_qp_core__rnr_retry_count(
volatile struct psif_qp_core *ptr,
{
/* group=8 shift=35 bits=3 */
}
{
/* group=8 shift=35 bits=3 */
}
/*
* When set, RQS should only check that the orig_checksum is equal to magic
* number. When not set, RQS should perform the checksum check towards the
* checksum in the psif_wr.
*/
static inline void set_psif_qp_core__no_checksum(
volatile struct psif_qp_core *ptr,
{
/* group=8 shift=39 bits=1 */
}
{
/* group=8 shift=39 bits=1 */
}
/*
* Transport type of the QP (RC, UC, UD, XRC, MANSP1). MANSP1 is set for
* privileged QPs.
*/
static inline void set_psif_qp_core__transport_type(
volatile struct psif_qp_core *ptr,
enum psif_qp_trans data)
{
/* group=9 shift=0 bits=3 */
}
static inline enum psif_qp_trans get_psif_qp_core__transport_type(volatile struct psif_qp_core *ptr)
{
/* group=9 shift=0 bits=3 */
}
/*
* This is an index to completion queue descriptor. The descriptor points to
* a receive completion queue, which may or may not be the same as the send
* completion queue. For XRC QPs, this field is written by the CQ descriptor
* received by the XRCSRQ on the first packet. This way we don't need to look
* up the XRCSRQ for every packet. of the message.
*/
static inline void set_psif_qp_core__rcv_cq_indx(
volatile struct psif_qp_core *ptr,
{
/* group=9 shift=8 bits=24 */
}
{
/* group=9 shift=8 bits=24 */
}
/*
* Number of bytes received of in progress RDMA Write or SEND. The data
* should be added to the msg_length.
*/
static inline void set_psif_qp_core__bytes_received(
volatile struct psif_qp_core *ptr,
{
/* group=9 shift=32 bits=32 */
}
{
/* group=9 shift=32 bits=32 */
}
/* This QP is running IP over IB. */
static inline void set_psif_qp_core__ipoib(
volatile struct psif_qp_core *ptr,
{
/* group=10 shift=5 bits=1 */
}
{
/* group=10 shift=5 bits=1 */
}
/*
* Combined 'Last Received MSN' and 'Last Outstanding MSN', used to maintain
* 'spin set floor' and indicate 'all retries completed', respectively.
*/
static inline void set_psif_qp_core__last_received_outstanding_msn(
volatile struct psif_qp_core *ptr,
{
/* group=11 shift=0 bits=16 */
}
static inline u16 get_psif_qp_core__last_received_outstanding_msn(volatile struct psif_qp_core *ptr)
{
/* group=11 shift=0 bits=16 */
}
static inline void set_psif_qp_core__path_mtu(
volatile struct psif_qp_core *ptr,
enum psif_path_mtu data)
{
/* group=13 shift=4 bits=3 */
}
{
/* group=13 shift=4 bits=3 */
}
/* This PSN is committed - ACKs sent will contain this PSN. */
static inline void set_psif_qp_core__committed_received_psn(
volatile struct psif_qp_core *ptr,
{
/* group=13 shift=8 bits=24 */
}
{
/* group=13 shift=8 bits=24 */
}
/*
* Message sequence number used in AETH when sending ACKs. The number is
* incremented every time a new inbound message is processed.
*/
static inline void set_psif_qp_core__msn(
volatile struct psif_qp_core *ptr,
{
/* group=14 shift=0 bits=24 */
}
{
/* group=14 shift=0 bits=24 */
}
/*
* This is an index to send completion queue descriptor. The descriptor
* points to a send completion queue, which may or may not be the same as the
* send completion queue.
*/
static inline void set_psif_qp_core__send_cq_indx(
volatile struct psif_qp_core *ptr,
{
/* group=14 shift=24 bits=24 */
}
{
/* group=14 shift=24 bits=24 */
}
/*
* Committed MSN - the MSN of the newest committed request for this QP. Only
* the bottom 16 bits of the MSN is used.
*/
static inline void set_psif_qp_core__last_committed_msn(
volatile struct psif_qp_core *ptr,
{
/* group=14 shift=48 bits=16 */
}
{
/* group=14 shift=48 bits=16 */
}
static inline void set_psif_qp_core__srq_pd(
volatile struct psif_qp_core *ptr,
{
/* group=15 shift=0 bits=24 */
}
{
/* group=15 shift=0 bits=24 */
}
static inline void set_psif_qp_path__remote_gid_0(
volatile struct psif_qp_path *ptr,
{
/* group=0 shift=0 bits=64 */
}
{
/* group=0 shift=0 bits=64 */
}
static inline void set_psif_qp_path__remote_gid_1(
volatile struct psif_qp_path *ptr,
{
/* group=1 shift=0 bits=64 */
}
{
/* group=1 shift=0 bits=64 */
}
static inline void set_psif_qp_path__remote_lid(
volatile struct psif_qp_path *ptr,
{
/* group=2 shift=0 bits=16 */
}
{
/* group=2 shift=0 bits=16 */
}
static inline void set_psif_qp_path__port(
volatile struct psif_qp_path *ptr,
{
/* group=2 shift=17 bits=1 */
}
{
/* group=2 shift=17 bits=1 */
}
static inline void set_psif_qp_path__loopback(
volatile struct psif_qp_path *ptr,
enum psif_loopback data)
{
/* group=2 shift=18 bits=1 */
}
{
/* group=2 shift=18 bits=1 */
}
static inline void set_psif_qp_path__use_grh(
volatile struct psif_qp_path *ptr,
enum psif_use_grh data)
{
/* group=2 shift=19 bits=1 */
}
{
/* group=2 shift=19 bits=1 */
}
static inline void set_psif_qp_path__sl(
volatile struct psif_qp_path *ptr,
{
/* group=2 shift=20 bits=4 */
}
{
/* group=2 shift=20 bits=4 */
}
static inline void set_psif_qp_path__hoplmt(
volatile struct psif_qp_path *ptr,
{
/* group=2 shift=28 bits=8 */
}
{
/* group=2 shift=28 bits=8 */
}
static inline void set_psif_qp_path__flowlabel(
volatile struct psif_qp_path *ptr,
{
/* group=2 shift=44 bits=20 */
}
{
/* group=2 shift=44 bits=20 */
}
static inline void set_psif_qp_path__local_ack_timeout(
volatile struct psif_qp_path *ptr,
{
/* group=3 shift=27 bits=5 */
}
{
/* group=3 shift=27 bits=5 */
}
static inline void set_psif_qp_path__ipd(
volatile struct psif_qp_path *ptr,
{
/* group=3 shift=32 bits=8 */
}
{
/* group=3 shift=32 bits=8 */
}
/*
* This is the LID path bits. This is used by tsu_ibpb when generating the
* SLID in the packet, and it is used by tsu_rcv when checking the DLID.
*/
static inline void set_psif_qp_path__local_lid_path(
volatile struct psif_qp_path *ptr,
{
/* group=3 shift=48 bits=7 */
}
{
/* group=3 shift=48 bits=7 */
}
static inline void set_psif_qp_path__pkey_indx(
volatile struct psif_qp_path *ptr,
{
/* group=3 shift=55 bits=9 */
}
{
/* group=3 shift=55 bits=9 */
}
/* L-key state for this DMA validation entry */
static inline void set_psif_key__lkey_state(
enum psif_dma_vt_key_states data)
{
/* group=0 shift=60 bits=2 */
}
{
/* group=0 shift=60 bits=2 */
}
/* R-key state for this DMA validation entry */
static inline void set_psif_key__rkey_state(
enum psif_dma_vt_key_states data)
{
/* group=0 shift=62 bits=2 */
}
{
/* group=0 shift=62 bits=2 */
}
/* Length of memory region this validation entry is associated with. */
static inline void set_psif_key__length(
{
/* group=1 shift=0 bits=64 */
}
{
/* group=1 shift=0 bits=64 */
}
static inline void set_psif_key__mmu_context(
{
/* group=2 shift=0 bits=64 */
}
{
/* group=2 shift=0 bits=64 */
}
static inline void set_psif_key__base_addr(
{
/* group=3 shift=0 bits=64 */
}
{
/* group=3 shift=0 bits=64 */
}
/* sequence number for sanity checking */
static inline void set_psif_eq_entry__seq_num(
volatile struct psif_eq_entry *ptr,
{
/* group=7 shift=0 bits=32 */
}
{
/* group=7 shift=0 bits=32 */
}
/* enum psif_epsc_csr_opcode from request */
static inline void set_psif_epsc_csr_rsp__opcode(
volatile struct psif_epsc_csr_rsp *ptr,
enum psif_epsc_csr_opcode data)
{
/* group=0 shift=48 bits=8 */
}
static inline enum psif_epsc_csr_opcode get_psif_epsc_csr_rsp__opcode(volatile struct psif_epsc_csr_rsp *ptr)
{
/* group=0 shift=48 bits=8 */
return((enum psif_epsc_csr_opcode)(*pte));
}
/* Sequence number from request */
static inline void set_psif_epsc_csr_rsp__seq_num(
volatile struct psif_epsc_csr_rsp *ptr,
{
/* group=3 shift=0 bits=64 */
}
{
/* group=3 shift=0 bits=64 */
}
/* Sequence number - included in response */
static inline void set_psif_epsc_csr_req__seq_num(
volatile struct psif_epsc_csr_req *ptr,
{
/* group=0 shift=32 bits=16 */
}
{
/* group=0 shift=32 bits=16 */
}
static inline void set_psif_epsc_csr_req__opcode(
volatile struct psif_epsc_csr_req *ptr,
enum psif_epsc_csr_opcode data)
{
/* group=0 shift=56 bits=8 */
}
static inline enum psif_epsc_csr_opcode get_psif_epsc_csr_req__opcode(volatile struct psif_epsc_csr_req *ptr)
{
/* group=0 shift=56 bits=8 */
return((enum psif_epsc_csr_opcode)(*pte));
}
/* Index to completion elements added by SW. */
static inline void set_psif_cq_sw__head_indx(
volatile struct psif_cq_sw *ptr,
{
/* group=0 shift=32 bits=32 */
}
{
/* group=0 shift=32 bits=32 */
}
/*
* EPS-A core number completions are forwarded to if the proxy_enabled bit is
* set.
*/
static inline void set_psif_cq_hw__eps_core(
volatile struct psif_cq_hw *ptr,
enum psif_eps_a_core data)
{
/* group=0 shift=52 bits=2 */
}
{
/* group=0 shift=52 bits=2 */
}
/*
* If set, this completion queue is proxy enabled and should send completions
* to EPS core indicated by the eps_core field.
*/
static inline void set_psif_cq_hw__proxy_en(
volatile struct psif_cq_hw *ptr,
{
/* group=0 shift=54 bits=1 */
}
{
/* group=0 shift=54 bits=1 */
}
/* The descriptor is valid. */
static inline void set_psif_cq_hw__valid(
volatile struct psif_cq_hw *ptr,
{
/* group=0 shift=60 bits=1 */
}
{
/* group=0 shift=60 bits=1 */
}
/*
* VA or PA of the base of the completion queue. If PA the MMU context above
* will be a bypass context. Updated by software. The head and tail pointers
* can be calculated by the following calculations: Address = base_ptr +
* (head * ($bits(completion_entry_t)/8 ) Head Pointer and Tail Pointer will
* use the same MMU context as the base, and all need to be VA from one
* address space, or all need to be PA. In typical use, to allow direct user
* access to the head and tail pointer VAs are used.
*/
static inline void set_psif_cq_hw__base_addr(
volatile struct psif_cq_hw *ptr,
{
/* group=2 shift=0 bits=64 */
}
{
/* group=2 shift=0 bits=64 */
}
/* Index to completion elements to be consumed by HW. */
static inline void set_psif_cq_hw__tail_indx(
volatile struct psif_cq_hw *ptr,
{
/* group=3 shift=32 bits=32 */
}
{
/* group=3 shift=32 bits=32 */
}
/*
* Work queue completion ID. For receive completions this is the entry number
* in the receive queue and the receive queue descriptor index. For send
* completions this is the sq_sequence number.
*/
static inline void set_psif_cq_entry__wc_id(
volatile struct psif_cq_entry *ptr,
{
/* group=0 shift=0 bits=64 */
}
{
/* group=0 shift=0 bits=64 */
}
static inline void set_psif_cq_entry__qp(
volatile struct psif_cq_entry *ptr,
{
/* group=1 shift=0 bits=24 */
}
{
/* group=1 shift=0 bits=24 */
}
static inline void set_psif_cq_entry__opcode(
volatile struct psif_cq_entry *ptr,
enum psif_wc_opcode data)
{
/* group=1 shift=24 bits=8 */
}
{
/* group=1 shift=24 bits=8 */
return((enum psif_wc_opcode)(*pte));
}
static inline void set_psif_cq_entry__status(
volatile struct psif_cq_entry *ptr,
enum psif_wc_status data)
{
/* group=2 shift=24 bits=8 */
}
{
/* group=2 shift=24 bits=8 */
return((enum psif_wc_status)(*pte));
}
/* sequence number for sanity checking */
static inline void set_psif_cq_entry__seq_num(
volatile struct psif_cq_entry *ptr,
{
/* group=7 shift=0 bits=32 */
}
{
/* group=7 shift=0 bits=32 */
}
static inline void set_psif_ah__remote_lid(
{
/* group=2 shift=0 bits=16 */
}
{
/* group=2 shift=0 bits=16 */
}
static inline void set_psif_ah__sl(
{
/* group=2 shift=20 bits=4 */
}
{
/* group=2 shift=20 bits=4 */
}
#if defined(HOST_LITTLE_ENDIAN)
#elif defined(HOST_BIG_ENDIAN)
#else
#error "Could not determine byte order in psif_hw_setget.h !?"
#endif
#ifdef __cplusplus
}
#endif
#endif /* _PSIF_HW_SETGET_H */