|
|
NVIDIA DriveOS Linux NSR SDK API Reference
|
7.0.3.0 Release
|
Go to the documentation of this file.
52 #if !defined(NVIDIA_UNDEF_LEGACY_BIT_MACROS)
56 #define BIT(b) (1<<(b))
57 #define BIT32(b) ((NvU32)1<<(b))
58 #define BIT64(b) ((NvU64)1<<(b))
66 #define NVBIT(b) (1<<(b))
67 #define NVBIT32(b) ((NvU32)1<<(b))
68 #define NVBIT64(b) ((NvU64)1<<(b))
72 #define BIT_IDX_32(n) \
73 ((((n) & 0xFFFF0000)? 0x10: 0) | \
74 (((n) & 0xFF00FF00)? 0x08: 0) | \
75 (((n) & 0xF0F0F0F0)? 0x04: 0) | \
76 (((n) & 0xCCCCCCCC)? 0x02: 0) | \
77 (((n) & 0xAAAAAAAA)? 0x01: 0) )
81 #define BIT_IDX_64(n) \
82 ((((n) & 0xFFFFFFFF00000000ULL)? 0x20: 0) | \
83 (((n) & 0xFFFF0000FFFF0000ULL)? 0x10: 0) | \
84 (((n) & 0xFF00FF00FF00FF00ULL)? 0x08: 0) | \
85 (((n) & 0xF0F0F0F0F0F0F0F0ULL)? 0x04: 0) | \
86 (((n) & 0xCCCCCCCCCCCCCCCCULL)? 0x02: 0) | \
87 (((n) & 0xAAAAAAAAAAAAAAAAULL)? 0x01: 0) )
94 if ((n32) & 0xFFFF0000) idx += 16; \
95 if ((n32) & 0xFF00FF00) idx += 8; \
96 if ((n32) & 0xF0F0F0F0) idx += 4; \
97 if ((n32) & 0xCCCCCCCC) idx += 2; \
98 if ((n32) & 0xAAAAAAAA) idx += 1; \
103 #ifndef _NVMISC_MACROS_H
105 #define DRF_ISBIT(bitval,drf) \
108 #define DEVICE_BASE(d) (0?d) // what's up with this name? totally non-parallel to the macros below
109 #define DEVICE_EXTENT(d) (1?d) // what's up with this name? totally non-parallel to the macros below
110 #define DRF_BASE(drf) (0?drf) // much better
111 #define DRF_EXTENT(drf) (1?drf) // much better
112 #define DRF_SHIFT(drf) ((DRF_ISBIT(0,drf)) % 32)
113 #define DRF_SHIFT_RT(drf) ((DRF_ISBIT(1,drf)) % 32)
114 #define DRF_MASK(drf) (0xFFFFFFFF>>(31-((DRF_ISBIT(1,drf)) % 32)+((DRF_ISBIT(0,drf)) % 32)))
115 #define DRF_SHIFTMASK(drf) (DRF_MASK(drf)<<(DRF_SHIFT(drf)))
116 #define DRF_SIZE(drf) (DRF_EXTENT(drf)-DRF_BASE(drf)+1)
118 #define DRF_DEF(d,r,f,c) ((NV ## d ## r ## f ## c)<<DRF_SHIFT(NV ## d ## r ## f))
119 #define DRF_NUM(d,r,f,n) (((n)&DRF_MASK(NV ## d ## r ## f))<<DRF_SHIFT(NV ## d ## r ## f))
120 #define DRF_VAL(d,r,f,v) (((v)>>DRF_SHIFT(NV ## d ## r ## f))&DRF_MASK(NV ## d ## r ## f))
124 #define DRF_VAL_SIGNED(d,r,f,v) (((DRF_VAL(d,r,f,v) ^ (NVBIT(DRF_SIZE(NV ## d ## r ## f)-1)))) - (NVBIT(DRF_SIZE(NV ## d ## r ## f)-1)))
125 #define DRF_IDX_DEF(d,r,f,i,c) ((NV ## d ## r ## f ## c)<<DRF_SHIFT(NV##d##r##f(i)))
126 #define DRF_IDX_OFFSET_DEF(d,r,f,i,o,c) ((NV ## d ## r ## f ## c)<<DRF_SHIFT(NV##d##r##f(i,o)))
127 #define DRF_IDX_NUM(d,r,f,i,n) (((n)&DRF_MASK(NV##d##r##f(i)))<<DRF_SHIFT(NV##d##r##f(i)))
128 #define DRF_IDX_VAL(d,r,f,i,v) (((v)>>DRF_SHIFT(NV##d##r##f(i)))&DRF_MASK(NV##d##r##f(i)))
129 #define DRF_IDX_OFFSET_VAL(d,r,f,i,o,v) (((v)>>DRF_SHIFT(NV##d##r##f(i,o)))&DRF_MASK(NV##d##r##f(i,o)))
131 #define DRF_VAL_FRAC(d,r,x,y,v,z) ((DRF_VAL(d,r,x,v)*z) + ((DRF_VAL(d,r,y,v)*z) / (1<<DRF_SIZE(NV##d##r##y))))
136 #define DRF_SHIFT64(drf) ((DRF_ISBIT(0,drf)) % 64)
137 #define DRF_MASK64(drf) (NV_U64_MAX>>(63-((DRF_ISBIT(1,drf)) % 64)+((DRF_ISBIT(0,drf)) % 64)))
138 #define DRF_SHIFTMASK64(drf) (DRF_MASK64(drf)<<(DRF_SHIFT64(drf)))
140 #define DRF_DEF64(d,r,f,c) (((NvU64)(NV ## d ## r ## f ## c))<<DRF_SHIFT64(NV ## d ## r ## f))
141 #define DRF_NUM64(d,r,f,n) ((((NvU64)(n))&DRF_MASK64(NV ## d ## r ## f))<<DRF_SHIFT64(NV ## d ## r ## f))
142 #define DRF_VAL64(d,r,f,v) ((((NvU64)(v))>>DRF_SHIFT64(NV ## d ## r ## f))&DRF_MASK64(NV ## d ## r ## f))
144 #define DRF_VAL_SIGNED64(d,r,f,v) (((DRF_VAL64(d,r,f,v) ^ (NVBIT64(DRF_SIZE(NV ## d ## r ## f)-1)))) - (NVBIT64(DRF_SIZE(NV ## d ## r ## f)-1)))
145 #define DRF_IDX_DEF64(d,r,f,i,c) (((NvU64)(NV ## d ## r ## f ## c))<<DRF_SHIFT64(NV##d##r##f(i)))
146 #define DRF_IDX_OFFSET_DEF64(d,r,f,i,o,c) ((NvU64)(NV ## d ## r ## f ## c)<<DRF_SHIFT64(NV##d##r##f(i,o)))
147 #define DRF_IDX_NUM64(d,r,f,i,n) ((((NvU64)(n))&DRF_MASK64(NV##d##r##f(i)))<<DRF_SHIFT64(NV##d##r##f(i)))
148 #define DRF_IDX_VAL64(d,r,f,i,v) ((((NvU64)(v))>>DRF_SHIFT64(NV##d##r##f(i)))&DRF_MASK64(NV##d##r##f(i)))
149 #define DRF_IDX_OFFSET_VAL64(d,r,f,i,o,v) (((NvU64)(v)>>DRF_SHIFT64(NV##d##r##f(i,o)))&DRF_MASK64(NV##d##r##f(i,o)))
151 #define FLD_SET_DRF64(d,r,f,c,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f)) | DRF_DEF64(d,r,f,c))
152 #define FLD_SET_DRF_NUM64(d,r,f,n,v) ((((NvU64)(v)) & ~DRF_SHIFTMASK64(NV##d##r##f)) | DRF_NUM64(d,r,f,n))
153 #define FLD_IDX_SET_DRF64(d,r,f,i,c,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f(i))) | DRF_IDX_DEF64(d,r,f,i,c))
154 #define FLD_IDX_OFFSET_SET_DRF64(d,r,f,i,o,c,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f(i,o))) | DRF_IDX_OFFSET_DEF64(d,r,f,i,o,c))
155 #define FLD_IDX_SET_DRF_DEF64(d,r,f,i,c,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f(i))) | DRF_IDX_DEF64(d,r,f,i,c))
156 #define FLD_IDX_SET_DRF_NUM64(d,r,f,i,n,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f(i))) | DRF_IDX_NUM64(d,r,f,i,n))
157 #define FLD_SET_DRF_IDX64(d,r,f,c,i,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f)) | DRF_DEF64(d,r,f,c(i)))
159 #define FLD_TEST_DRF64(d,r,f,c,v) (DRF_VAL64(d, r, f, v) == NV##d##r##f##c)
160 #define FLD_TEST_DRF_AND64(d,r,f,c,v) (DRF_VAL64(d, r, f, v) & NV##d##r##f##c)
161 #define FLD_TEST_DRF_NUM64(d,r,f,n,v) (DRF_VAL64(d, r, f, v) == n)
162 #define FLD_IDX_TEST_DRF64(d,r,f,i,c,v) (DRF_IDX_VAL64(d, r, f, i, v) == NV##d##r##f##c)
163 #define FLD_IDX_OFFSET_TEST_DRF64(d,r,f,i,o,c,v) (DRF_IDX_OFFSET_VAL64(d, r, f, i, o, v) == NV##d##r##f##c)
169 #define FLD_SET_DRF(d,r,f,c,v) ((v & ~DRF_SHIFTMASK(NV##d##r##f)) | DRF_DEF(d,r,f,c))
170 #define FLD_SET_DRF_NUM(d,r,f,n,v) ((v & ~DRF_SHIFTMASK(NV##d##r##f)) | DRF_NUM(d,r,f,n))
172 #define FLD_SET_DRF_DEF(d,r,f,c,v) (v = (v & ~DRF_SHIFTMASK(NV##d##r##f)) | DRF_DEF(d,r,f,c))
173 #define FLD_IDX_SET_DRF(d,r,f,i,c,v) ((v & ~DRF_SHIFTMASK(NV##d##r##f(i))) | DRF_IDX_DEF(d,r,f,i,c))
174 #define FLD_IDX_OFFSET_SET_DRF(d,r,f,i,o,c,v) ((v & ~DRF_SHIFTMASK(NV##d##r##f(i,o))) | DRF_IDX_OFFSET_DEF(d,r,f,i,o,c))
175 #define FLD_IDX_SET_DRF_DEF(d,r,f,i,c,v) ((v & ~DRF_SHIFTMASK(NV##d##r##f(i))) | DRF_IDX_DEF(d,r,f,i,c))
176 #define FLD_IDX_SET_DRF_NUM(d,r,f,i,n,v) ((v & ~DRF_SHIFTMASK(NV##d##r##f(i))) | DRF_IDX_NUM(d,r,f,i,n))
177 #define FLD_SET_DRF_IDX(d,r,f,c,i,v) ((v & ~DRF_SHIFTMASK(NV##d##r##f)) | DRF_DEF(d,r,f,c(i)))
179 #define FLD_TEST_DRF(d,r,f,c,v) ((DRF_VAL(d, r, f, v) == NV##d##r##f##c))
180 #define FLD_TEST_DRF_AND(d,r,f,c,v) ((DRF_VAL(d, r, f, v) & NV##d##r##f##c))
181 #define FLD_TEST_DRF_NUM(d,r,f,n,v) ((DRF_VAL(d, r, f, v) == n))
182 #define FLD_IDX_TEST_DRF(d,r,f,i,c,v) ((DRF_IDX_VAL(d, r, f, i, v) == NV##d##r##f##c))
183 #define FLD_IDX_OFFSET_TEST_DRF(d,r,f,i,o,c,v) ((DRF_IDX_OFFSET_VAL(d, r, f, i, o, v) == NV##d##r##f##c))
185 #define REF_DEF(drf,d) (((drf ## d)&DRF_MASK(drf))<<DRF_SHIFT(drf))
186 #define REF_VAL(drf,v) (((v)>>DRF_SHIFT(drf))&DRF_MASK(drf))
187 #define REF_NUM(drf,n) (((n)&DRF_MASK(drf))<<DRF_SHIFT(drf))
188 #define FLD_TEST_REF(drf,c,v) (REF_VAL(drf, v) == drf##c)
189 #define FLD_TEST_REF_AND(drf,c,v) (REF_VAL(drf, v) & drf##c)
190 #define FLD_SET_REF_NUM(drf,n,v) (((v) & ~DRF_SHIFTMASK(drf)) | REF_NUM(drf,n))
192 #define CR_DRF_DEF(d,r,f,c) ((CR ## d ## r ## f ## c)<<DRF_SHIFT(CR ## d ## r ## f))
193 #define CR_DRF_NUM(d,r,f,n) (((n)&DRF_MASK(CR ## d ## r ## f))<<DRF_SHIFT(CR ## d ## r ## f))
194 #define CR_DRF_VAL(d,r,f,v) (((v)>>DRF_SHIFT(CR ## d ## r ## f))&DRF_MASK(CR ## d ## r ## f))
206 #define DRF_EXPAND_MW(drf) drf // used to turn "MW(a:b)" into "a:b"
207 #define DRF_PICK_MW(drf,v) (v?DRF_EXPAND_##drf) // picks low or high bits
208 #define DRF_WORD_MW(drf) (DRF_PICK_MW(drf,0)/32) // which word in a multi-word array
209 #define DRF_BASE_MW(drf) (DRF_PICK_MW(drf,0)%32) // which start bit in the selected word?
210 #define DRF_EXTENT_MW(drf) (DRF_PICK_MW(drf,1)%32) // which end bit in the selected word
211 #define DRF_SHIFT_MW(drf) (DRF_PICK_MW(drf,0)%32)
212 #define DRF_MASK_MW(drf) (0xFFFFFFFF>>((31-(DRF_EXTENT_MW(drf))+(DRF_BASE_MW(drf)))%32))
213 #define DRF_SHIFTMASK_MW(drf) ((DRF_MASK_MW(drf))<<(DRF_SHIFT_MW(drf)))
214 #define DRF_SIZE_MW(drf) (DRF_EXTENT_MW(drf)-DRF_BASE_MW(drf)+1)
216 #define DRF_DEF_MW(d,r,f,c) ((NV##d##r##f##c) << DRF_SHIFT_MW(NV##d##r##f))
217 #define DRF_NUM_MW(d,r,f,n) (((n)&DRF_MASK_MW(NV##d##r##f))<<DRF_SHIFT_MW(NV##d##r##f))
221 #define DRF_VAL_MW_1WORD(d,r,f,v) ((((v)[DRF_WORD_MW(NV##d##r##f)])>>DRF_SHIFT_MW(NV##d##r##f))&DRF_MASK_MW(NV##d##r##f))
222 #define DRF_SPANS(drf) ((DRF_PICK_MW(drf,0)/32) != (DRF_PICK_MW(drf,1)/32))
223 #define DRF_WORD_MW_LOW(drf) (DRF_PICK_MW(drf,0)/32)
224 #define DRF_WORD_MW_HIGH(drf) (DRF_PICK_MW(drf,1)/32)
225 #define DRF_MASK_MW_LOW(drf) (0xFFFFFFFF)
226 #define DRF_MASK_MW_HIGH(drf) (0xFFFFFFFF>>(31-(DRF_EXTENT_MW(drf))))
227 #define DRF_SHIFT_MW_LOW(drf) (DRF_PICK_MW(drf,0)%32)
228 #define DRF_SHIFT_MW_HIGH(drf) (0)
229 #define DRF_MERGE_SHIFT(drf) ((32-((DRF_PICK_MW(drf,0)%32)))%32)
230 #define DRF_VAL_MW_2WORD(d,r,f,v) (((((v)[DRF_WORD_MW_LOW(NV##d##r##f)])>>DRF_SHIFT_MW_LOW(NV##d##r##f))&DRF_MASK_MW_LOW(NV##d##r##f)) | \
231 (((((v)[DRF_WORD_MW_HIGH(NV##d##r##f)])>>DRF_SHIFT_MW_HIGH(NV##d##r##f))&DRF_MASK_MW_HIGH(NV##d##r##f)) << DRF_MERGE_SHIFT(NV##d##r##f)))
232 #define DRF_VAL_MW(d,r,f,v) ( DRF_SPANS(NV##d##r##f) ? DRF_VAL_MW_2WORD(d,r,f,v) : DRF_VAL_MW_1WORD(d,r,f,v) )
234 #define DRF_IDX_DEF_MW(d,r,f,i,c) ((NV##d##r##f##c)<<DRF_SHIFT_MW(NV##d##r##f(i)))
235 #define DRF_IDX_NUM_MW(d,r,f,i,n) (((n)&DRF_MASK_MW(NV##d##r##f(i)))<<DRF_SHIFT_MW(NV##d##r##f(i)))
236 #define DRF_IDX_VAL_MW(d,r,f,i,v) ((((v)[DRF_WORD_MW(NV##d##r##f(i))])>>DRF_SHIFT_MW(NV##d##r##f(i)))&DRF_MASK_MW(NV##d##r##f(i)))
242 #define FLD_IDX_OR_DRF_DEF(d,r,f,c,s,v) \
245 for (idx = 0; idx < (NV ## d ## r ## f ## s); ++idx)\
247 v |= DRF_IDX_DEF(d,r,f,idx,c); \
252 #define FLD_MERGE_MW(drf,n,v) (((v)[DRF_WORD_MW(drf)] & ~DRF_SHIFTMASK_MW(drf)) | n)
253 #define FLD_ASSIGN_MW(drf,n,v) ((v)[DRF_WORD_MW(drf)] = FLD_MERGE_MW(drf, n, v))
254 #define FLD_IDX_MERGE_MW(drf,i,n,v) (((v)[DRF_WORD_MW(drf(i))] & ~DRF_SHIFTMASK_MW(drf(i))) | n)
255 #define FLD_IDX_ASSIGN_MW(drf,i,n,v) ((v)[DRF_WORD_MW(drf(i))] = FLD_MERGE_MW(drf(i), n, v))
257 #define FLD_SET_DRF_MW(d,r,f,c,v) FLD_MERGE_MW(NV##d##r##f, DRF_DEF_MW(d,r,f,c), v)
258 #define FLD_SET_DRF_NUM_MW(d,r,f,n,v) FLD_ASSIGN_MW(NV##d##r##f, DRF_NUM_MW(d,r,f,n), v)
259 #define FLD_SET_DRF_DEF_MW(d,r,f,c,v) FLD_ASSIGN_MW(NV##d##r##f, DRF_DEF_MW(d,r,f,c), v)
260 #define FLD_IDX_SET_DRF_MW(d,r,f,i,c,v) FLD_IDX_MERGE_MW(NV##d##r##f, i, DRF_IDX_DEF_MW(d,r,f,i,c), v)
261 #define FLD_IDX_SET_DRF_DEF_MW(d,r,f,i,c,v) FLD_IDX_MERGE_MW(NV##d##r##f, i, DRF_IDX_DEF_MW(d,r,f,i,c), v)
262 #define FLD_IDX_SET_DRF_NUM_MW(d,r,f,i,n,v) FLD_IDX_ASSIGN_MW(NV##d##r##f, i, DRF_IDX_NUM_MW(d,r,f,i,n), v)
264 #define FLD_TEST_DRF_MW(d,r,f,c,v) ((DRF_VAL_MW(d, r, f, v) == NV##d##r##f##c))
265 #define FLD_TEST_DRF_NUM_MW(d,r,f,n,v) ((DRF_VAL_MW(d, r, f, v) == n))
266 #define FLD_IDX_TEST_DRF_MW(d,r,f,i,c,v) ((DRF_IDX_VAL_MW(d, r, f, i, v) == NV##d##r##f##c))
268 #define DRF_VAL_BS(d,r,f,v) ( DRF_SPANS(NV##d##r##f) ? DRF_VAL_BS_2WORD(d,r,f,v) : DRF_VAL_BS_1WORD(d,r,f,v) )
281 #define ENG_RD_REG(g,o,d,r) GPU_REG_RD32(g, ENG_REG##d(o,d,r))
282 #define ENG_WR_REG(g,o,d,r,v) GPU_REG_WR32(g, ENG_REG##d(o,d,r), v)
283 #define ENG_RD_DRF(g,o,d,r,f) ((GPU_REG_RD32(g, ENG_REG##d(o,d,r))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f))
284 #define ENG_WR_DRF_DEF(g,o,d,r,f,c) GPU_REG_WR32(g, ENG_REG##d(o,d,r),(GPU_REG_RD32(g,ENG_REG##d(o,d,r))&~(GPU_DRF_MASK(NV##d##r##f)<<GPU_DRF_SHIFT(NV##d##r##f)))|GPU_DRF_DEF(d,r,f,c))
285 #define ENG_WR_DRF_NUM(g,o,d,r,f,n) GPU_REG_WR32(g, ENG_REG##d(o,d,r),(GPU_REG_RD32(g,ENG_REG##d(o,d,r))&~(GPU_DRF_MASK(NV##d##r##f)<<GPU_DRF_SHIFT(NV##d##r##f)))|GPU_DRF_NUM(d,r,f,n))
286 #define ENG_TEST_DRF_DEF(g,o,d,r,f,c) (ENG_RD_DRF(g, o, d, r, f) == NV##d##r##f##c)
288 #define ENG_RD_IDX_DRF(g,o,d,r,f,i) ((GPU_REG_RD32(g, ENG_REG##d(o,d,r(i)))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f))
289 #define ENG_TEST_IDX_DRF_DEF(g,o,d,r,f,c,i) (ENG_RD_IDX_DRF(g, o, d, r, f, i) == NV##d##r##f##c)
291 #define ENG_IDX_RD_REG(g,o,d,i,r) GPU_REG_RD32(g, ENG_IDX_REG##d(o,d,i,r))
292 #define ENG_IDX_WR_REG(g,o,d,i,r,v) GPU_REG_WR32(g, ENG_IDX_REG##d(o,d,i,r), v)
294 #define ENG_IDX_RD_DRF(g,o,d,i,r,f) ((GPU_REG_RD32(g, ENG_IDX_REG##d(o,d,i,r))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f))
301 #define DRF_VAL_BS_1WORD(d,r,f,v) ((DRF_READ_1WORD_BS(d,r,f,v)>>DRF_SHIFT_MW(NV##d##r##f))&DRF_MASK_MW(NV##d##r##f))
302 #define DRF_VAL_BS_2WORD(d,r,f,v) (((DRF_READ_4BYTE_BS(NV##d##r##f,v)>>DRF_SHIFT_MW_LOW(NV##d##r##f))&DRF_MASK_MW_LOW(NV##d##r##f)) | \
303 (((DRF_READ_1WORD_BS_HIGH(d,r,f,v)>>DRF_SHIFT_MW_HIGH(NV##d##r##f))&DRF_MASK_MW_HIGH(NV##d##r##f)) << DRF_MERGE_SHIFT(NV##d##r##f)))
305 #define DRF_READ_1BYTE_BS(drf,v) ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW(drf)*4]))
306 #define DRF_READ_2BYTE_BS(drf,v) (DRF_READ_1BYTE_BS(drf,v)| \
307 ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW(drf)*4+1])<<8))
308 #define DRF_READ_3BYTE_BS(drf,v) (DRF_READ_2BYTE_BS(drf,v)| \
309 ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW(drf)*4+2])<<16))
310 #define DRF_READ_4BYTE_BS(drf,v) (DRF_READ_3BYTE_BS(drf,v)| \
311 ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW(drf)*4+3])<<24))
313 #define DRF_READ_1BYTE_BS_HIGH(drf,v) ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW_HIGH(drf)*4]))
314 #define DRF_READ_2BYTE_BS_HIGH(drf,v) (DRF_READ_1BYTE_BS_HIGH(drf,v)| \
315 ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW_HIGH(drf)*4+1])<<8))
316 #define DRF_READ_3BYTE_BS_HIGH(drf,v) (DRF_READ_2BYTE_BS_HIGH(drf,v)| \
317 ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW_HIGH(drf)*4+2])<<16))
318 #define DRF_READ_4BYTE_BS_HIGH(drf,v) (DRF_READ_3BYTE_BS_HIGH(drf,v)| \
319 ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW_HIGH(drf)*4+3])<<24))
325 #define NV_TWO_N_MINUS_ONE(n) (((1ULL<<(n/2))<<((n+1)/2))-1)
327 #define DRF_READ_1WORD_BS(d,r,f,v) \
328 ((DRF_EXTENT_MW(NV##d##r##f)<8)?DRF_READ_1BYTE_BS(NV##d##r##f,(v)): \
329 ((DRF_EXTENT_MW(NV##d##r##f)<16)?DRF_READ_2BYTE_BS(NV##d##r##f,(v)): \
330 ((DRF_EXTENT_MW(NV##d##r##f)<24)?DRF_READ_3BYTE_BS(NV##d##r##f,(v)): \
331 DRF_READ_4BYTE_BS(NV##d##r##f,(v)))))
333 #define DRF_READ_1WORD_BS_HIGH(d,r,f,v) \
334 ((DRF_EXTENT_MW(NV##d##r##f)<8)?DRF_READ_1BYTE_BS_HIGH(NV##d##r##f,(v)): \
335 ((DRF_EXTENT_MW(NV##d##r##f)<16)?DRF_READ_2BYTE_BS_HIGH(NV##d##r##f,(v)): \
336 ((DRF_EXTENT_MW(NV##d##r##f)<24)?DRF_READ_3BYTE_BS_HIGH(NV##d##r##f,(v)): \
337 DRF_READ_4BYTE_BS_HIGH(NV##d##r##f,(v)))))
339 #define BIN_2_GRAY(n) ((n)^((n)>>1))
341 #define GRAY_2_BIN_64b(n) (n)^=(n)>>1; (n)^=(n)>>2; (n)^=(n)>>4; (n)^=(n)>>8; (n)^=(n)>>16; (n)^=(n)>>32;
343 #define LOWESTBIT(x) ( (x) & (((x)-1) ^ (x)) )
345 #define HIGHESTBIT(n32) \
347 HIGHESTBITIDX_32(n32); \
350 #define ONEBITSET(x) ( (x) && (((x) & ((x)-1)) == 0) )
353 #define NUMSETBITS_32(n32) \
355 n32 = n32 - ((n32 >> 1) & 0x55555555); \
356 n32 = (n32 & 0x33333333) + ((n32 >> 2) & 0x33333333); \
357 n32 = (((n32 + (n32 >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24; \
364 static NV_FORCEINLINE NvU32
368 temp = temp - ((temp >> 1) & 0x55555555);
369 temp = (temp & 0x33333333) + ((temp >> 2) & 0x33333333);
370 temp = (((temp + (temp >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24;
377 static NV_FORCEINLINE NvU32
381 temp = temp - ((temp >> 1) & 0x5555555555555555ull);
382 temp = (temp & 0x3333333333333333ull) + ((temp >> 2) & 0x3333333333333333ull);
383 temp = (temp + (temp >> 4)) & 0x0F0F0F0F0F0F0F0Full;
384 temp = (temp * 0x0101010101010101ull) >> 56;
399 static NV_FORCEINLINE NvU32
406 #define LOWESTBITIDX_32(n32) \
408 n32 = LOWESTBIT(n32); \
413 #define HIGHESTBITIDX_32(n32) \
424 #define ROUNDUP_POW2(n32) \
441 static NV_FORCEINLINE NvU32
456 #define ROUNDUP_POW2_U64(n64) \
468 #define NV_SWAP_U8(a,b) \
485 #define FOR_EACH_INDEX_IN_MASK(maskWidth,index,mask) \
487 NvU##maskWidth lclMsk = (NvU##maskWidth)(mask); \
488 for (index = 0; lclMsk != 0; index++, lclMsk >>= 1) \
490 if (((NvU##maskWidth)NVBIT64(0) & lclMsk) == 0) \
494 #define FOR_EACH_INDEX_IN_MASK_END \
501 #define NV_ANYSIZE_ARRAY 1
506 #define NV_CEIL(a,b) (((a)+(b)-1)/(b))
509 #ifndef NV_DIV_AND_CEIL
510 #define NV_DIV_AND_CEIL(a, b) NV_CEIL(a,b)
514 #define NV_MIN(a, b) (((a) < (b)) ? (a) : (b))
518 #define NV_MAX(a, b) (((a) > (b)) ? (a) : (b))
524 #define NV_ABS(a) ((a)>=0?(a):(-(a)))
530 #define NV_SIGN(s) ((NvS8)(((s) > 0) - ((s) < 0)))
536 #define NV_ZERO_SIGN(s) ((NvS8)((((s) >= 0) * 2) - 1))
540 #if defined(__GNUC__) && __GNUC__ > 3
541 #define NV_OFFSETOF(type, member) ((NvU32)__builtin_offsetof(type, member))
543 #define NV_OFFSETOF(type, member) ((NvU32)(NvU64)&(((type *)0)->member)) // shouldn't we use PtrToUlong? But will need to include windows header.
551 #define NV_UNSIGNED_ROUNDED_DIV(a,b) (((a) + ((b) / 2)) / (b))
562 #define NV_UNSIGNED_DIV_CEIL(a, b) (((a) + (b - 1)) / (b))
573 #define NV_RIGHT_SHIFT_ROUNDED(a, shift) \
574 (((a) >> (shift)) + !!((NVBIT((shift) - 1) & (a)) == NVBIT((shift) - 1)))
580 #ifndef NV_ALIGN_DOWN
581 #define NV_ALIGN_DOWN(v, gran) ((v) & ~((gran) - 1))
585 #define NV_ALIGN_UP(v, gran) (((v) + ((gran) - 1)) & ~((gran)-1))
588 #ifndef NV_IS_ALIGNED
589 #define NV_IS_ALIGNED(v, gran) (0 == ((v) & ((gran) - 1)))
594 NvU8 *b = (NvU8 *) s;
597 for (i = 0; i < n; i++)
605 static NV_FORCEINLINE
void *
NVMISC_MEMCPY(
void *dest,
const void *src, NvLength n)
607 NvU8 *destByte = (NvU8 *) dest;
608 const NvU8 *srcByte = (
const NvU8 *) src;
611 for (i = 0; i < n; i++)
613 destByte[i] = srcByte[i];
619 static NV_FORCEINLINE
char *
NVMISC_STRNCPY(
char *dest,
const char *src, NvLength n)
623 for (i = 0; i < n; i++)
644 #endif // __NV_MISC_H
static NV_FORCEINLINE NvU32 nvPopCount64(const NvU64 x)
Calculate number of bits set in a 64-bit unsigned integer.
static NV_FORCEINLINE char * NVMISC_STRNCPY(char *dest, const char *src, NvLength n)
static NV_FORCEINLINE void * NVMISC_MEMCPY(void *dest, const void *src, NvLength n)
static NV_FORCEINLINE NvU32 nvPopCount32(const NvU32 x)
Calculate number of bits set in a 32-bit unsigned integer.
static NV_FORCEINLINE NvU32 nvMaskPos32(const NvU32 mask, const NvU32 bitIdx)
Determine how many bits are set below a bit index within a mask.
static NV_FORCEINLINE NvU32 nvNextPow2_U32(const NvU32 x)
Round up a 32-bit unsigned integer to the next power of 2.
static NV_FORCEINLINE void * NVMISC_MEMSET(void *s, NvU8 c, NvLength n)