Skip to content

Commit 89d27dc

Browse files
author
Konstantin Knizhnik
committed
Add SetLastWrittenLSNForBlockRangeInternal to avoid redundant unlocking.locking of LwLSN lock
1 parent 19dab9f commit 89d27dc

File tree

1 file changed

+48
-36
lines changed
  • src/backend/access/transam

1 file changed

+48
-36
lines changed

src/backend/access/transam/xlog.c

Lines changed: 48 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -764,6 +764,8 @@ static void WALInsertLockAcquireExclusive(void);
764764
static void WALInsertLockRelease(void);
765765
static void WALInsertLockUpdateInsertingAt(XLogRecPtr insertingAt);
766766

767+
static XLogRecPtr SetLastWrittenLSNForBlockRangeInternal(XLogRecPtr lsn, RelFileLocator rlocator, ForkNumber forknum, BlockNumber from, BlockNumber n_blocks);
768+
767769
/*
768770
* Insert an XLOG record represented by an already-constructed chain of data
769771
* chunks. This is a low-level routine; to construct the WAL record header
@@ -6709,8 +6711,15 @@ GetLastWrittenLSN(RelFileLocator rlocator, ForkNumber forknum, BlockNumber blkno
67096711
lsn = entry->lsn;
67106712
else
67116713
{
6712-
LWLockRelease(LastWrittenLsnLock);
6713-
return SetLastWrittenLSNForBlock(lsn, rlocator, forknum, blkno);
6714+
/*
6715+
* In case of statements CREATE TABLE AS SELECT... or INSERT FROM SELECT... we are fetching data from source table
6716+
* and storing it in destination table. It cause problems with prefetch last-written-lsn is known for the pages of
6717+
* source table (which for example happens after compute restart). In this case we get get global value of
6718+
* last-written-lsn which is changed frequently as far as we are writing pages of destination table.
6719+
* As a result request-lsn for the prefetch and request-let when this page is actually needed are different
6720+
* and we got exported prefetch request. So it actually disarms prefetch.
6721+
*/
6722+
lsn = SetLastWrittenLSNForBlockRangeInternal(lsn, rlocator, forknum, blkno, 1);
67146723
}
67156724
}
67166725
else
@@ -6742,51 +6751,46 @@ GetLastWrittenLSNv(RelFileLocator relfilenode, ForkNumber forknum,
67426751
BlockNumber blkno, int nblocks, XLogRecPtr *lsns)
67436752
{
67446753
LastWrittenLsnCacheEntry* entry;
6754+
XLogRecPtr lsn;
67456755

67466756
Assert(lastWrittenLsnCacheSize != 0);
67476757
Assert(nblocks > 0);
67486758
Assert(PointerIsValid(lsns));
67496759

6760+
LWLockAcquire(LastWrittenLsnLock, LW_SHARED);
6761+
6762+
lsn = XLogCtl->maxLastWrittenLsn;
6763+
67506764
if (relfilenode.relNumber != InvalidOid)
67516765
{
67526766
BufferTag key;
6753-
XLogRecPtr max_lsn;
67546767

67556768
key.spcOid = relfilenode.spcOid;
67566769
key.dbOid = relfilenode.dbOid;
67576770
key.relNumber = relfilenode.relNumber;
67586771
key.forkNum = forknum;
67596772

6760-
LWLockAcquire(LastWrittenLsnLock, LW_SHARED);
6761-
67626773
for (int i = 0; i < nblocks; i++)
67636774
{
67646775
/* Maximal last written LSN among all non-cached pages */
67656776
key.blockNum = blkno + i;
67666777

67676778
entry = hash_search(lastWrittenLsnCache, &key, HASH_FIND, NULL);
67686779

6769-
lsns[i] = (entry != NULL) ? entry->lsn : InvalidXLogRecPtr;
6770-
}
6771-
max_lsn = XLogCtl->maxLastWrittenLsn;
6772-
6773-
LWLockRelease(LastWrittenLsnLock);
6774-
6775-
for (int i = 0; i < nblocks; i++)
6776-
{
6777-
if (lsns[i] == InvalidXLogRecPtr)
6780+
if (entry != NULL)
67786781
{
6779-
lsns[i] = max_lsn;
6780-
SetLastWrittenLSNForBlock(max_lsn, relfilenode, forknum, key.blockNum);
6782+
lsns[i] = entry->lsn;
6783+
}
6784+
else
6785+
{
6786+
lsns[i] = lsn;
6787+
SetLastWrittenLSNForBlockRangeInternal(lsn, relfilenode, forknum, key.blockNum, 1);
67816788
}
67826789
}
67836790
}
67846791
else
67856792
{
67866793
HASH_SEQ_STATUS seq;
6787-
XLogRecPtr lsn = XLogCtl->maxLastWrittenLsn;
6788-
6789-
LWLockAcquire(LastWrittenLsnLock, LW_SHARED);
67906794

67916795
/* Find maximum of all cached LSNs */
67926796
hash_seq_init(&seq, lastWrittenLsnCache);
@@ -6795,30 +6799,16 @@ GetLastWrittenLSNv(RelFileLocator relfilenode, ForkNumber forknum,
67956799
if (entry->lsn > lsn)
67966800
lsn = entry->lsn;
67976801
}
6798-
LWLockRelease(LastWrittenLsnLock);
67996802

68006803
for (int i = 0; i < nblocks; i++)
68016804
lsns[i] = lsn;
68026805
}
6806+
LWLockRelease(LastWrittenLsnLock);
68036807
}
68046808

6805-
/*
6806-
* SetLastWrittenLSNForBlockRange -- Set maximal LSN of written page range.
6807-
* We maintain cache of last written LSNs with limited size and LRU replacement
6808-
* policy. Keeping last written LSN for each page allows to use old LSN when
6809-
* requesting pages of unchanged or appended relations. Also it is critical for
6810-
* efficient work of prefetch in case massive update operations (like vacuum or remove).
6811-
*
6812-
* rlocator.relNumber can be InvalidOid, in this case maxLastWrittenLsn is updated.
6813-
* SetLastWrittenLsn with dummy rlocator is used by createdb and dbase_redo functions.
6814-
*/
6815-
XLogRecPtr
6816-
SetLastWrittenLSNForBlockRange(XLogRecPtr lsn, RelFileLocator rlocator, ForkNumber forknum, BlockNumber from, BlockNumber n_blocks)
6809+
static XLogRecPtr
6810+
SetLastWrittenLSNForBlockRangeInternal(XLogRecPtr lsn, RelFileLocator rlocator, ForkNumber forknum, BlockNumber from, BlockNumber n_blocks)
68176811
{
6818-
if (lsn == InvalidXLogRecPtr || n_blocks == 0 || lastWrittenLsnCacheSize == 0)
6819-
return lsn;
6820-
6821-
LWLockAcquire(LastWrittenLsnLock, LW_EXCLUSIVE);
68226812
if (rlocator.relNumber == InvalidOid)
68236813
{
68246814
if (lsn > XLogCtl->maxLastWrittenLsn)
@@ -6868,7 +6858,29 @@ SetLastWrittenLSNForBlockRange(XLogRecPtr lsn, RelFileLocator rlocator, ForkNumb
68686858
dlist_push_tail(&XLogCtl->lastWrittenLsnLRU, &entry->lru_node);
68696859
}
68706860
}
6861+
return lsn;
6862+
}
6863+
6864+
/*
6865+
* SetLastWrittenLSNForBlockRange -- Set maximal LSN of written page range.
6866+
* We maintain cache of last written LSNs with limited size and LRU replacement
6867+
* policy. Keeping last written LSN for each page allows to use old LSN when
6868+
* requesting pages of unchanged or appended relations. Also it is critical for
6869+
* efficient work of prefetch in case massive update operations (like vacuum or remove).
6870+
*
6871+
* rlocator.relNumber can be InvalidOid, in this case maxLastWrittenLsn is updated.
6872+
* SetLastWrittenLsn with dummy rlocator is used by createdb and dbase_redo functions.
6873+
*/
6874+
XLogRecPtr
6875+
SetLastWrittenLSNForBlockRange(XLogRecPtr lsn, RelFileLocator rlocator, ForkNumber forknum, BlockNumber from, BlockNumber n_blocks)
6876+
{
6877+
if (lsn == InvalidXLogRecPtr || n_blocks == 0 || lastWrittenLsnCacheSize == 0)
6878+
return lsn;
6879+
6880+
LWLockAcquire(LastWrittenLsnLock, LW_EXCLUSIVE);
6881+
lsn = SetLastWrittenLSNForBlockRangeInternal(lsn, rlocator, forknum, from, n_blocks);
68716882
LWLockRelease(LastWrittenLsnLock);
6883+
68726884
return lsn;
68736885
}
68746886

0 commit comments

Comments
 (0)