@@ -46,8 +46,12 @@ type frontendDevice struct {
46
46
minor uint32
47
47
}
48
48
49
+ func (dev * frontendDevice ) isCtlDevice () bool {
50
+ return dev .minor == nvgpu .NV_CONTROL_DEVICE_MINOR
51
+ }
52
+
49
53
func (dev * frontendDevice ) basename () string {
50
- if dev .minor == nvgpu . NV_CONTROL_DEVICE_MINOR {
54
+ if dev .isCtlDevice () {
51
55
return "nvidiactl"
52
56
}
53
57
return fmt .Sprintf ("nvidia%d" , dev .minor )
@@ -134,8 +138,9 @@ type frontendFD struct {
134
138
// These fields are marked nosave since we do not automatically reinvoke
135
139
// NV_ESC_RM_MAP_MEMORY after restore, so restored FDs have no
136
140
// mmap_context.
137
- mmapLength uint64 `state:"nosave"`
138
- mmapInternal uintptr `state:"nosave"`
141
+ mmapLength uint64 `state:"nosave"`
142
+ mmapInternal uintptr `state:"nosave"`
143
+ mmapMemType hostarch.MemoryType `state:"nosave"`
139
144
140
145
// clients are handles of clients owned by this frontendFD. clients is
141
146
// protected by dev.nvp.objsMu.
@@ -428,6 +433,12 @@ func rmAllocContextDMA2(fi *frontendIoctlState) (uintptr, error) {
428
433
}
429
434
430
435
func rmAllocMemory (fi * frontendIoctlState ) (uintptr , error ) {
436
+ // This is consistent with the NV_ACTUAL_DEVICE_ONLY() check in
437
+ // src/nvidia/arch/nvalloc/unix/src/escape.c:RmIoctl().
438
+ if fi .fd .dev .isCtlDevice () {
439
+ return 0 , linuxerr .EINVAL
440
+ }
441
+
431
442
var ioctlParams nvgpu.IoctlNVOS02ParametersWithFD
432
443
if fi .ioctlParamsSize != nvgpu .SizeofIoctlNVOS02ParametersWithFD {
433
444
return 0 , linuxerr .EINVAL
@@ -493,6 +504,19 @@ func rmAllocMemorySystem(fi *frontendIoctlState, ioctlParams *nvgpu.IoctlNVOS02P
493
504
fi .fd .dev .nvp .objAdd (fi .ctx , ioctlParams .Params .HRoot , ioctlParams .Params .HObjectNew , ioctlParams .Params .HClass , & miscObject {}, ioctlParams .Params .HObjectParent )
494
505
if createMmapCtx {
495
506
mapFile .mmapLength = ioctlParams .Params .Limit + 1
507
+ // Compare kernel-open/nvidia/nv-mmap.c:nvidia_mmap_helper() =>
508
+ // nv_encode_caching().
509
+ // - Note that rmAllocMemory() already ensured that fi.fd.dev is
510
+ // not nvidiactl, i.e. only the !NV_IS_CTL_DEVICE() branch is
511
+ // relevant here.
512
+ // - nvidia_mmap_helper() honors mmap_context->caching only if
513
+ // IS_FB_OFFSET() and !IS_UD_OFFSET(). We can get the information
514
+ // we need for IS_FB_OFFSET() from NV_ESC_CARD_INFO, but there
515
+ // doesn't seem to be any way for us to replicate IS_UD_OFFSET().
516
+ // So we must conservatively specify uncacheable. (This is
517
+ // unfortunate since it prevents us from using write-combining on
518
+ // framebuffer memory...)
519
+ mapFile .mmapMemType = hostarch .MemoryTypeUncacheable
496
520
}
497
521
}
498
522
fi .fd .dev .nvp .objsUnlock ()
@@ -1311,6 +1335,12 @@ func rmVidHeapControl(fi *frontendIoctlState) (uintptr, error) {
1311
1335
}
1312
1336
1313
1337
func rmMapMemory (fi * frontendIoctlState ) (uintptr , error ) {
1338
+ // This is consistent with the NV_CTL_DEVICE_ONLY() check in
1339
+ // src/nvidia/arch/nvalloc/unix/src/escape.c:RmIoctl().
1340
+ if ! fi .fd .dev .isCtlDevice () {
1341
+ return 0 , linuxerr .EINVAL
1342
+ }
1343
+
1314
1344
var ioctlParams nvgpu.IoctlNVOS33ParametersWithFD
1315
1345
if fi .ioctlParamsSize != nvgpu .SizeofIoctlNVOS33ParametersWithFD {
1316
1346
return 0 , linuxerr .EINVAL
@@ -1343,6 +1373,24 @@ func rmMapMemory(fi *frontendIoctlState) (uintptr, error) {
1343
1373
}
1344
1374
if ioctlParams .Params .Status == nvgpu .NV_OK {
1345
1375
mapFile .mmapLength = ioctlParams .Params .Length
1376
+ // src/nvidia/arch/nvalloc/unix/src/escape.c:RmIoctl() forces
1377
+ // NVOS33_FLAGS_CACHING_TYPE_DEFAULT. However, resMap implementations
1378
+ // may override "caching type", so in general the memory type depends
1379
+ // on the mapped object.
1380
+ if _ , memObj := fi .fd .dev .nvp .getObject (fi .ctx , ioctlParams .Params .HClient , ioctlParams .Params .HMemory ); memObj == nil {
1381
+ // getObject() already logged a warning; silently fall back to UC.
1382
+ mapFile .mmapMemType = hostarch .MemoryTypeUncacheable
1383
+ } else {
1384
+ // XXX
1385
+ // memType := memObj.impl.MemoryType()
1386
+ // if memType == hostarch.MemoryTypeInvalid {
1387
+ // fi.ctx.Warningf("nvproxy: mapped object with handle %v:%v (class %v, type %T) has unknown memory type", ioctlParams.Params.HClient, ioctlParams.Params.HMemory, memObj.class, memObj.impl)
1388
+ // memType = hostarch.MemoryTypeUncacheable
1389
+ // }
1390
+ // mapFile.mmapMemType = memType
1391
+ fi .ctx .Warningf ("nvproxy: mapping object with handle %v:%v (class %v, type %T)" , ioctlParams .Params .HClient , ioctlParams .Params .HMemory , memObj .class , memObj .impl )
1392
+ mapFile .mmapMemType = hostarch .MemoryTypeUncacheable
1393
+ }
1346
1394
}
1347
1395
1348
1396
ioctlParams .FD = origFD
0 commit comments