summaryrefslogtreecommitdiff
path: root/vendor/github.com/ncruces/go-sqlite3/vfs
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/ncruces/go-sqlite3/vfs')
-rw-r--r--vendor/github.com/ncruces/go-sqlite3/vfs/README.md1
-rw-r--r--vendor/github.com/ncruces/go-sqlite3/vfs/cksm.go8
-rw-r--r--vendor/github.com/ncruces/go-sqlite3/vfs/const.go1
-rw-r--r--vendor/github.com/ncruces/go-sqlite3/vfs/file.go2
-rw-r--r--vendor/github.com/ncruces/go-sqlite3/vfs/os_bsd.go25
-rw-r--r--vendor/github.com/ncruces/go-sqlite3/vfs/os_dotlk.go3
-rw-r--r--vendor/github.com/ncruces/go-sqlite3/vfs/os_windows.go9
-rw-r--r--vendor/github.com/ncruces/go-sqlite3/vfs/shm_bsd.go100
-rw-r--r--vendor/github.com/ncruces/go-sqlite3/vfs/shm_copy.go7
-rw-r--r--vendor/github.com/ncruces/go-sqlite3/vfs/shm_dotlk.go99
-rw-r--r--vendor/github.com/ncruces/go-sqlite3/vfs/shm_memlk.go55
-rw-r--r--vendor/github.com/ncruces/go-sqlite3/vfs/shm_windows.go20
12 files changed, 160 insertions, 170 deletions
diff --git a/vendor/github.com/ncruces/go-sqlite3/vfs/README.md b/vendor/github.com/ncruces/go-sqlite3/vfs/README.md
index cf0e3c30f..08777972e 100644
--- a/vendor/github.com/ncruces/go-sqlite3/vfs/README.md
+++ b/vendor/github.com/ncruces/go-sqlite3/vfs/README.md
@@ -30,7 +30,6 @@ like SQLite.
You can also opt into a cross-platform locking implementation
with the `sqlite3_dotlk` build tag.
-The only requirement is an atomic `os.Mkdir`.
Otherwise, file locking is not supported, and you must use
[`nolock=1`](https://sqlite.org/uri.html#urinolock)
diff --git a/vendor/github.com/ncruces/go-sqlite3/vfs/cksm.go b/vendor/github.com/ncruces/go-sqlite3/vfs/cksm.go
index 900fa0952..42d7468f5 100644
--- a/vendor/github.com/ncruces/go-sqlite3/vfs/cksm.go
+++ b/vendor/github.com/ncruces/go-sqlite3/vfs/cksm.go
@@ -101,6 +101,14 @@ func (c cksmFile) Pragma(name string, value string) (string, error) {
return "", _NOTFOUND
}
+func (c cksmFile) DeviceCharacteristics() DeviceCharacteristic {
+ res := c.File.DeviceCharacteristics()
+ if c.verifyCksm {
+ res &^= IOCAP_SUBPAGE_READ
+ }
+ return res
+}
+
func (c cksmFile) fileControl(ctx context.Context, mod api.Module, op _FcntlOpcode, pArg uint32) _ErrorCode {
switch op {
case _FCNTL_CKPT_START:
diff --git a/vendor/github.com/ncruces/go-sqlite3/vfs/const.go b/vendor/github.com/ncruces/go-sqlite3/vfs/const.go
index 0a8fee621..896cdaca4 100644
--- a/vendor/github.com/ncruces/go-sqlite3/vfs/const.go
+++ b/vendor/github.com/ncruces/go-sqlite3/vfs/const.go
@@ -177,6 +177,7 @@ const (
IOCAP_POWERSAFE_OVERWRITE DeviceCharacteristic = 0x00001000
IOCAP_IMMUTABLE DeviceCharacteristic = 0x00002000
IOCAP_BATCH_ATOMIC DeviceCharacteristic = 0x00004000
+ IOCAP_SUBPAGE_READ DeviceCharacteristic = 0x00008000
)
// https://sqlite.org/c3ref/c_fcntl_begin_atomic_write.html
diff --git a/vendor/github.com/ncruces/go-sqlite3/vfs/file.go b/vendor/github.com/ncruces/go-sqlite3/vfs/file.go
index ba70aa14f..b5d285375 100644
--- a/vendor/github.com/ncruces/go-sqlite3/vfs/file.go
+++ b/vendor/github.com/ncruces/go-sqlite3/vfs/file.go
@@ -187,7 +187,7 @@ func (f *vfsFile) SectorSize() int {
}
func (f *vfsFile) DeviceCharacteristics() DeviceCharacteristic {
- var res DeviceCharacteristic
+ res := IOCAP_SUBPAGE_READ
if osBatchAtomic(f.File) {
res |= IOCAP_BATCH_ATOMIC
}
diff --git a/vendor/github.com/ncruces/go-sqlite3/vfs/os_bsd.go b/vendor/github.com/ncruces/go-sqlite3/vfs/os_bsd.go
index 56713e359..cc5da7cab 100644
--- a/vendor/github.com/ncruces/go-sqlite3/vfs/os_bsd.go
+++ b/vendor/github.com/ncruces/go-sqlite3/vfs/os_bsd.go
@@ -15,9 +15,15 @@ func osGetSharedLock(file *os.File) _ErrorCode {
func osGetReservedLock(file *os.File) _ErrorCode {
rc := osLock(file, unix.LOCK_EX|unix.LOCK_NB, _IOERR_LOCK)
if rc == _BUSY {
- // The documentation states the lock is upgraded by releasing the previous lock,
- // then acquiring the new lock.
- // This is a race, so return BUSY_SNAPSHOT to ensure the transaction is aborted.
+ // The documentation states that a lock is upgraded by
+ // releasing the previous lock, then acquiring the new lock.
+ // Going over the source code of various BSDs, though,
+ // with LOCK_NB, the lock is not released,
+ // and EAGAIN is returned holding the shared lock.
+ // Still, if we're already in a transaction, we want to abort it,
+ // so return BUSY_SNAPSHOT here. If there's no transaction active,
+ // SQLite will change this back to SQLITE_BUSY,
+ // and invoke the busy handler if appropriate.
return _BUSY_SNAPSHOT
}
return rc
@@ -33,9 +39,11 @@ func osGetExclusiveLock(file *os.File, state *LockLevel) _ErrorCode {
func osDowngradeLock(file *os.File, _ LockLevel) _ErrorCode {
rc := osLock(file, unix.LOCK_SH|unix.LOCK_NB, _IOERR_RDLOCK)
if rc == _BUSY {
- // The documentation states the lock is upgraded by releasing the previous lock,
- // then acquiring the new lock.
- // This is a race, so return IOERR_RDLOCK to ensure the transaction is aborted.
+ // The documentation states that a lock is downgraded by
+ // releasing the previous lock then acquiring the new lock.
+ // Going over the source code of various BSDs, though,
+ // with LOCK_SH|LOCK_NB this should never happen.
+ // Return IOERR_RDLOCK, as BUSY would cause an assert to fail.
return _IOERR_RDLOCK
}
return _OK
@@ -50,7 +58,10 @@ func osReleaseLock(file *os.File, _ LockLevel) _ErrorCode {
}
func osCheckReservedLock(file *os.File) (bool, _ErrorCode) {
- // Test the RESERVED lock.
+ // Test the RESERVED lock with fcntl(F_GETLK).
+ // This only works on systems where fcntl and flock are compatible.
+ // However, SQLite only calls this while holding a shared lock,
+ // so the difference is immaterial.
lock, rc := osTestLock(file, _RESERVED_BYTE, 1)
return lock == unix.F_WRLCK, rc
}
diff --git a/vendor/github.com/ncruces/go-sqlite3/vfs/os_dotlk.go b/vendor/github.com/ncruces/go-sqlite3/vfs/os_dotlk.go
index 1c1a49c11..b00a1865b 100644
--- a/vendor/github.com/ncruces/go-sqlite3/vfs/os_dotlk.go
+++ b/vendor/github.com/ncruces/go-sqlite3/vfs/os_dotlk.go
@@ -28,7 +28,8 @@ func osGetSharedLock(file *os.File) _ErrorCode {
name := file.Name()
locker := vfsDotLocks[name]
if locker == nil {
- err := os.Mkdir(name+".lock", 0777)
+ f, err := os.OpenFile(name+".lock", os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
+ f.Close()
if errors.Is(err, fs.ErrExist) {
return _BUSY // Another process has the lock.
}
diff --git a/vendor/github.com/ncruces/go-sqlite3/vfs/os_windows.go b/vendor/github.com/ncruces/go-sqlite3/vfs/os_windows.go
index b901f98aa..0b6e5d342 100644
--- a/vendor/github.com/ncruces/go-sqlite3/vfs/os_windows.go
+++ b/vendor/github.com/ncruces/go-sqlite3/vfs/os_windows.go
@@ -50,14 +50,17 @@ func osGetExclusiveLock(file *os.File, state *LockLevel) _ErrorCode {
if rc != _OK {
// Reacquire the SHARED lock.
- osReadLock(file, _SHARED_FIRST, _SHARED_SIZE, 0)
+ if rc := osReadLock(file, _SHARED_FIRST, _SHARED_SIZE, 0); rc != _OK {
+ // notest // this should never happen
+ return _IOERR_RDLOCK
+ }
}
return rc
}
func osDowngradeLock(file *os.File, state LockLevel) _ErrorCode {
if state >= LOCK_EXCLUSIVE {
- // Release the EXCLUSIVE lock.
+ // Release the EXCLUSIVE lock while holding the PENDING lock.
osUnlock(file, _SHARED_FIRST, _SHARED_SIZE)
// Reacquire the SHARED lock.
@@ -78,7 +81,7 @@ func osDowngradeLock(file *os.File, state LockLevel) _ErrorCode {
}
func osReleaseLock(file *os.File, state LockLevel) _ErrorCode {
- // Release all locks.
+ // Release all locks, PENDING must be last.
if state >= LOCK_RESERVED {
osUnlock(file, _RESERVED_BYTE, 1)
}
diff --git a/vendor/github.com/ncruces/go-sqlite3/vfs/shm_bsd.go b/vendor/github.com/ncruces/go-sqlite3/vfs/shm_bsd.go
index d4e046369..07cabf7b5 100644
--- a/vendor/github.com/ncruces/go-sqlite3/vfs/shm_bsd.go
+++ b/vendor/github.com/ncruces/go-sqlite3/vfs/shm_bsd.go
@@ -14,52 +14,52 @@ import (
"github.com/ncruces/go-sqlite3/internal/util"
)
-type vfsShmFile struct {
+type vfsShmParent struct {
*os.File
info os.FileInfo
- refs int // +checklocks:vfsShmFilesMtx
+ refs int // +checklocks:vfsShmListMtx
lock [_SHM_NLOCK]int16 // +checklocks:Mutex
sync.Mutex
}
var (
- // +checklocks:vfsShmFilesMtx
- vfsShmFiles []*vfsShmFile
- vfsShmFilesMtx sync.Mutex
+ // +checklocks:vfsShmListMtx
+ vfsShmList []*vfsShmParent
+ vfsShmListMtx sync.Mutex
)
type vfsShm struct {
- *vfsShmFile
+ *vfsShmParent
path string
lock [_SHM_NLOCK]bool
regions []*util.MappedRegion
}
func (s *vfsShm) Close() error {
- if s.vfsShmFile == nil {
+ if s.vfsShmParent == nil {
return nil
}
- vfsShmFilesMtx.Lock()
- defer vfsShmFilesMtx.Unlock()
+ vfsShmListMtx.Lock()
+ defer vfsShmListMtx.Unlock()
// Unlock everything.
s.shmLock(0, _SHM_NLOCK, _SHM_UNLOCK)
// Decrease reference count.
- if s.vfsShmFile.refs > 0 {
- s.vfsShmFile.refs--
- s.vfsShmFile = nil
+ if s.vfsShmParent.refs > 0 {
+ s.vfsShmParent.refs--
+ s.vfsShmParent = nil
return nil
}
err := s.File.Close()
- for i, g := range vfsShmFiles {
- if g == s.vfsShmFile {
- vfsShmFiles[i] = nil
- s.vfsShmFile = nil
+ for i, g := range vfsShmList {
+ if g == s.vfsShmParent {
+ vfsShmList[i] = nil
+ s.vfsShmParent = nil
return err
}
}
@@ -67,7 +67,7 @@ func (s *vfsShm) Close() error {
}
func (s *vfsShm) shmOpen() _ErrorCode {
- if s.vfsShmFile != nil {
+ if s.vfsShmParent != nil {
return _OK
}
@@ -85,13 +85,13 @@ func (s *vfsShm) shmOpen() _ErrorCode {
return _IOERR_FSTAT
}
- vfsShmFilesMtx.Lock()
- defer vfsShmFilesMtx.Unlock()
+ vfsShmListMtx.Lock()
+ defer vfsShmListMtx.Unlock()
// Find a shared file, increase the reference count.
- for _, g := range vfsShmFiles {
+ for _, g := range vfsShmList {
if g != nil && os.SameFile(fi, g.info) {
- s.vfsShmFile = g
+ s.vfsShmParent = g
g.refs++
return _OK
}
@@ -107,18 +107,18 @@ func (s *vfsShm) shmOpen() _ErrorCode {
}
// Add the new shared file.
- s.vfsShmFile = &vfsShmFile{
+ s.vfsShmParent = &vfsShmParent{
File: f,
info: fi,
}
f = nil // Don't close the file.
- for i, g := range vfsShmFiles {
+ for i, g := range vfsShmList {
if g == nil {
- vfsShmFiles[i] = s.vfsShmFile
+ vfsShmList[i] = s.vfsShmParent
return _OK
}
}
- vfsShmFiles = append(vfsShmFiles, s.vfsShmFile)
+ vfsShmList = append(vfsShmList, s.vfsShmParent)
return _OK
}
@@ -157,57 +157,11 @@ func (s *vfsShm) shmMap(ctx context.Context, mod api.Module, id, size int32, ext
func (s *vfsShm) shmLock(offset, n int32, flags _ShmFlag) _ErrorCode {
s.Lock()
defer s.Unlock()
-
- switch {
- case flags&_SHM_UNLOCK != 0:
- for i := offset; i < offset+n; i++ {
- if s.lock[i] {
- if s.vfsShmFile.lock[i] == 0 {
- panic(util.AssertErr())
- }
- if s.vfsShmFile.lock[i] <= 0 {
- s.vfsShmFile.lock[i] = 0
- } else {
- s.vfsShmFile.lock[i]--
- }
- s.lock[i] = false
- }
- }
- case flags&_SHM_SHARED != 0:
- for i := offset; i < offset+n; i++ {
- if s.lock[i] {
- panic(util.AssertErr())
- }
- if s.vfsShmFile.lock[i]+1 <= 0 {
- return _BUSY
- }
- }
- for i := offset; i < offset+n; i++ {
- s.vfsShmFile.lock[i]++
- s.lock[i] = true
- }
- case flags&_SHM_EXCLUSIVE != 0:
- for i := offset; i < offset+n; i++ {
- if s.lock[i] {
- panic(util.AssertErr())
- }
- if s.vfsShmFile.lock[i] != 0 {
- return _BUSY
- }
- }
- for i := offset; i < offset+n; i++ {
- s.vfsShmFile.lock[i] = -1
- s.lock[i] = true
- }
- default:
- panic(util.AssertErr())
- }
-
- return _OK
+ return s.shmMemLock(offset, n, flags)
}
func (s *vfsShm) shmUnmap(delete bool) {
- if s.vfsShmFile == nil {
+ if s.vfsShmParent == nil {
return
}
diff --git a/vendor/github.com/ncruces/go-sqlite3/vfs/shm_copy.go b/vendor/github.com/ncruces/go-sqlite3/vfs/shm_copy.go
index 7a250523e..e6007aa1c 100644
--- a/vendor/github.com/ncruces/go-sqlite3/vfs/shm_copy.go
+++ b/vendor/github.com/ncruces/go-sqlite3/vfs/shm_copy.go
@@ -31,7 +31,10 @@ const (
//
// https://sqlite.org/walformat.html#the_wal_index_file_format
-func (s *vfsShm) shmAcquire() {
+func (s *vfsShm) shmAcquire(ptr *_ErrorCode) {
+ if ptr != nil && *ptr != _OK {
+ return
+ }
if len(s.ptrs) == 0 || shmUnmodified(s.shadow[0][:], s.shared[0][:]) {
return
}
@@ -69,7 +72,7 @@ func (s *vfsShm) shmRelease() {
func (s *vfsShm) shmBarrier() {
s.Lock()
- s.shmAcquire()
+ s.shmAcquire(nil)
s.shmRelease()
s.Unlock()
}
diff --git a/vendor/github.com/ncruces/go-sqlite3/vfs/shm_dotlk.go b/vendor/github.com/ncruces/go-sqlite3/vfs/shm_dotlk.go
index 36e00a1cd..4c7f47dec 100644
--- a/vendor/github.com/ncruces/go-sqlite3/vfs/shm_dotlk.go
+++ b/vendor/github.com/ncruces/go-sqlite3/vfs/shm_dotlk.go
@@ -13,22 +13,22 @@ import (
"github.com/tetratelabs/wazero/api"
)
-type vfsShmBuffer struct {
+type vfsShmParent struct {
shared [][_WALINDEX_PGSZ]byte
- refs int // +checklocks:vfsShmBuffersMtx
+ refs int // +checklocks:vfsShmListMtx
lock [_SHM_NLOCK]int16 // +checklocks:Mutex
sync.Mutex
}
var (
- // +checklocks:vfsShmBuffersMtx
- vfsShmBuffers = map[string]*vfsShmBuffer{}
- vfsShmBuffersMtx sync.Mutex
+ // +checklocks:vfsShmListMtx
+ vfsShmList = map[string]*vfsShmParent{}
+ vfsShmListMtx sync.Mutex
)
type vfsShm struct {
- *vfsShmBuffer
+ *vfsShmParent
mod api.Module
alloc api.Function
free api.Function
@@ -40,20 +40,20 @@ type vfsShm struct {
}
func (s *vfsShm) Close() error {
- if s.vfsShmBuffer == nil {
+ if s.vfsShmParent == nil {
return nil
}
- vfsShmBuffersMtx.Lock()
- defer vfsShmBuffersMtx.Unlock()
+ vfsShmListMtx.Lock()
+ defer vfsShmListMtx.Unlock()
// Unlock everything.
s.shmLock(0, _SHM_NLOCK, _SHM_UNLOCK)
// Decrease reference count.
- if s.vfsShmBuffer.refs > 0 {
- s.vfsShmBuffer.refs--
- s.vfsShmBuffer = nil
+ if s.vfsShmParent.refs > 0 {
+ s.vfsShmParent.refs--
+ s.vfsShmParent = nil
return nil
}
@@ -61,22 +61,22 @@ func (s *vfsShm) Close() error {
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return _IOERR_UNLOCK
}
- delete(vfsShmBuffers, s.path)
- s.vfsShmBuffer = nil
+ delete(vfsShmList, s.path)
+ s.vfsShmParent = nil
return nil
}
func (s *vfsShm) shmOpen() _ErrorCode {
- if s.vfsShmBuffer != nil {
+ if s.vfsShmParent != nil {
return _OK
}
- vfsShmBuffersMtx.Lock()
- defer vfsShmBuffersMtx.Unlock()
+ vfsShmListMtx.Lock()
+ defer vfsShmListMtx.Unlock()
// Find a shared buffer, increase the reference count.
- if g, ok := vfsShmBuffers[s.path]; ok {
- s.vfsShmBuffer = g
+ if g, ok := vfsShmList[s.path]; ok {
+ s.vfsShmParent = g
g.refs++
return _OK
}
@@ -92,8 +92,8 @@ func (s *vfsShm) shmOpen() _ErrorCode {
}
// Add the new shared buffer.
- s.vfsShmBuffer = &vfsShmBuffer{}
- vfsShmBuffers[s.path] = s.vfsShmBuffer
+ s.vfsShmParent = &vfsShmParent{}
+ vfsShmList[s.path] = s.vfsShmParent
return _OK
}
@@ -112,7 +112,7 @@ func (s *vfsShm) shmMap(ctx context.Context, mod api.Module, id, size int32, ext
s.Lock()
defer s.Unlock()
- defer s.shmAcquire()
+ defer s.shmAcquire(nil)
// Extend shared memory.
if int(id) >= len(s.shared) {
@@ -125,7 +125,6 @@ func (s *vfsShm) shmMap(ctx context.Context, mod api.Module, id, size int32, ext
// Allocate shadow memory.
if int(id) >= len(s.shadow) {
s.shadow = append(s.shadow, make([][_WALINDEX_PGSZ]byte, int(id)-len(s.shadow)+1)...)
- s.shadow[0][4] = 1 // force invalidation
}
// Allocate local memory.
@@ -141,70 +140,26 @@ func (s *vfsShm) shmMap(ctx context.Context, mod api.Module, id, size int32, ext
s.ptrs = append(s.ptrs, uint32(s.stack[0]))
}
+ s.shadow[0][4] = 1
return s.ptrs[id], _OK
}
-func (s *vfsShm) shmLock(offset, n int32, flags _ShmFlag) _ErrorCode {
+func (s *vfsShm) shmLock(offset, n int32, flags _ShmFlag) (rc _ErrorCode) {
s.Lock()
defer s.Unlock()
switch {
case flags&_SHM_LOCK != 0:
- defer s.shmAcquire()
+ defer s.shmAcquire(&rc)
case flags&_SHM_EXCLUSIVE != 0:
s.shmRelease()
}
- switch {
- case flags&_SHM_UNLOCK != 0:
- for i := offset; i < offset+n; i++ {
- if s.lock[i] {
- if s.vfsShmBuffer.lock[i] == 0 {
- panic(util.AssertErr())
- }
- if s.vfsShmBuffer.lock[i] <= 0 {
- s.vfsShmBuffer.lock[i] = 0
- } else {
- s.vfsShmBuffer.lock[i]--
- }
- s.lock[i] = false
- }
- }
- case flags&_SHM_SHARED != 0:
- for i := offset; i < offset+n; i++ {
- if s.lock[i] {
- panic(util.AssertErr())
- }
- if s.vfsShmBuffer.lock[i]+1 <= 0 {
- return _BUSY
- }
- }
- for i := offset; i < offset+n; i++ {
- s.vfsShmBuffer.lock[i]++
- s.lock[i] = true
- }
- case flags&_SHM_EXCLUSIVE != 0:
- for i := offset; i < offset+n; i++ {
- if s.lock[i] {
- panic(util.AssertErr())
- }
- if s.vfsShmBuffer.lock[i] != 0 {
- return _BUSY
- }
- }
- for i := offset; i < offset+n; i++ {
- s.vfsShmBuffer.lock[i] = -1
- s.lock[i] = true
- }
- default:
- panic(util.AssertErr())
- }
-
- return _OK
+ return s.shmMemLock(offset, n, flags)
}
func (s *vfsShm) shmUnmap(delete bool) {
- if s.vfsShmBuffer == nil {
+ if s.vfsShmParent == nil {
return
}
defer s.Close()
diff --git a/vendor/github.com/ncruces/go-sqlite3/vfs/shm_memlk.go b/vendor/github.com/ncruces/go-sqlite3/vfs/shm_memlk.go
new file mode 100644
index 000000000..dc7b91350
--- /dev/null
+++ b/vendor/github.com/ncruces/go-sqlite3/vfs/shm_memlk.go
@@ -0,0 +1,55 @@
+//go:build ((freebsd || openbsd || netbsd || dragonfly || illumos) && (386 || arm || amd64 || arm64 || riscv64 || ppc64le) && !sqlite3_nosys) || sqlite3_flock || sqlite3_dotlk
+
+package vfs
+
+import "github.com/ncruces/go-sqlite3/internal/util"
+
+// +checklocks:s.Mutex
+func (s *vfsShm) shmMemLock(offset, n int32, flags _ShmFlag) _ErrorCode {
+ switch {
+ case flags&_SHM_UNLOCK != 0:
+ for i := offset; i < offset+n; i++ {
+ if s.lock[i] {
+ if s.vfsShmParent.lock[i] == 0 {
+ panic(util.AssertErr())
+ }
+ if s.vfsShmParent.lock[i] <= 0 {
+ s.vfsShmParent.lock[i] = 0
+ } else {
+ s.vfsShmParent.lock[i]--
+ }
+ s.lock[i] = false
+ }
+ }
+ case flags&_SHM_SHARED != 0:
+ for i := offset; i < offset+n; i++ {
+ if s.lock[i] {
+ panic(util.AssertErr())
+ }
+ if s.vfsShmParent.lock[i]+1 <= 0 {
+ return _BUSY
+ }
+ }
+ for i := offset; i < offset+n; i++ {
+ s.vfsShmParent.lock[i]++
+ s.lock[i] = true
+ }
+ case flags&_SHM_EXCLUSIVE != 0:
+ for i := offset; i < offset+n; i++ {
+ if s.lock[i] {
+ panic(util.AssertErr())
+ }
+ if s.vfsShmParent.lock[i] != 0 {
+ return _BUSY
+ }
+ }
+ for i := offset; i < offset+n; i++ {
+ s.vfsShmParent.lock[i] = -1
+ s.lock[i] = true
+ }
+ default:
+ panic(util.AssertErr())
+ }
+
+ return _OK
+}
diff --git a/vendor/github.com/ncruces/go-sqlite3/vfs/shm_windows.go b/vendor/github.com/ncruces/go-sqlite3/vfs/shm_windows.go
index 218d8e2c7..374d491ac 100644
--- a/vendor/github.com/ncruces/go-sqlite3/vfs/shm_windows.go
+++ b/vendor/github.com/ncruces/go-sqlite3/vfs/shm_windows.go
@@ -64,7 +64,7 @@ func (s *vfsShm) shmOpen() _ErrorCode {
return osReadLock(s.File, _SHM_DMS, 1, time.Millisecond)
}
-func (s *vfsShm) shmMap(ctx context.Context, mod api.Module, id, size int32, extend bool) (uint32, _ErrorCode) {
+func (s *vfsShm) shmMap(ctx context.Context, mod api.Module, id, size int32, extend bool) (_ uint32, rc _ErrorCode) {
// Ensure size is a multiple of the OS page size.
if size != _WALINDEX_PGSZ || (windows.Getpagesize()-1)&_WALINDEX_PGSZ != 0 {
return 0, _IOERR_SHMMAP
@@ -78,7 +78,7 @@ func (s *vfsShm) shmMap(ctx context.Context, mod api.Module, id, size int32, ext
return 0, rc
}
- defer s.shmAcquire()
+ defer s.shmAcquire(&rc)
// Check if file is big enough.
o, err := s.Seek(0, io.SeekEnd)
@@ -107,7 +107,6 @@ func (s *vfsShm) shmMap(ctx context.Context, mod api.Module, id, size int32, ext
// Allocate shadow memory.
if int(id) >= len(s.shadow) {
s.shadow = append(s.shadow, make([][_WALINDEX_PGSZ]byte, int(id)-len(s.shadow)+1)...)
- s.shadow[0][4] = 1 // force invalidation
}
// Allocate local memory.
@@ -123,22 +122,23 @@ func (s *vfsShm) shmMap(ctx context.Context, mod api.Module, id, size int32, ext
s.ptrs = append(s.ptrs, uint32(s.stack[0]))
}
+ s.shadow[0][4] = 1
return s.ptrs[id], _OK
}
-func (s *vfsShm) shmLock(offset, n int32, flags _ShmFlag) _ErrorCode {
+func (s *vfsShm) shmLock(offset, n int32, flags _ShmFlag) (rc _ErrorCode) {
+ var timeout time.Duration
+ if s.blocking {
+ timeout = time.Millisecond
+ }
+
switch {
case flags&_SHM_LOCK != 0:
- defer s.shmAcquire()
+ defer s.shmAcquire(&rc)
case flags&_SHM_EXCLUSIVE != 0:
s.shmRelease()
}
- var timeout time.Duration
- if s.blocking {
- timeout = time.Millisecond
- }
-
switch {
case flags&_SHM_UNLOCK != 0:
return osUnlock(s.File, _SHM_BASE+uint32(offset), uint32(n))