Compare commits

...

4 Commits

Author SHA1 Message Date
Konstantin Knizhnik
10774a5132 Modify test_multixact to reproduce the problem with incorrect offset calculation 2023-07-21 22:16:42 +03:00
Konstantin Knizhnik
bb18c9958a Fix bug in mx_offset_to_flags_offset 2023-07-21 18:02:33 +03:00
Konstantin Knizhnik
e764fbf9f9 Fix error message 2023-07-07 21:59:20 +03:00
Konstantin Knizhnik
d37f7a0dd2 Vectoprize distance calculation at MacOS 2023-07-07 09:42:44 +03:00
4 changed files with 42 additions and 27 deletions

View File

@@ -57,9 +57,9 @@ pub fn slru_may_delete_clogsegment(segpage: u32, cutoff_page: u32) -> bool {
// Multixact utils
pub fn mx_offset_to_flags_offset(xid: MultiXactId) -> usize {
((xid / pg_constants::MULTIXACT_MEMBERS_PER_MEMBERGROUP as u32) as u16
% pg_constants::MULTIXACT_MEMBERGROUPS_PER_PAGE
* pg_constants::MULTIXACT_MEMBERGROUP_SIZE) as usize
((xid / pg_constants::MULTIXACT_MEMBERS_PER_MEMBERGROUP as u32)
% pg_constants::MULTIXACT_MEMBERGROUPS_PER_PAGE as u32
* pg_constants::MULTIXACT_MEMBERGROUP_SIZE as u32) as usize
}
pub fn mx_offset_to_flags_bitshift(xid: MultiXactId) -> u16 {

View File

@@ -149,7 +149,7 @@ hnsw_check_available_memory(Size requested)
struct sysinfo si;
Size total;
if (sysinfo(&si) < 0)
elog(ERROR, "Failed to get amount of RAM: %n");
elog(ERROR, "Failed to get amount of RAM: %m");
total = si.totalram*si.mem_unit;
if ((Size)NBuffers*BLCKSZ + requested >= total)
@@ -580,6 +580,7 @@ l2_distance(PG_FUNCTION_ARGS)
errmsg("different array dimensions %d and %d", a_dim, b_dim)));
}
#pragma clang loop vectorize(enable)
for (int i = 0; i < a_dim; i++)
{
diff = ax[i] - bx[i];

View File

@@ -223,6 +223,7 @@ dist_t fstdistfunc_scalar(const coord_t *x, const coord_t *y, size_t n)
{
dist_t distance = 0.0;
#pragma clang loop vectorize(enable)
for (size_t i = 0; i < n; i++)
{
dist_t diff = x[i] - y[i];

View File

@@ -1,3 +1,7 @@
import random
import threading
from threading import Thread
from fixtures.log_helper import log
from fixtures.neon_fixtures import NeonEnv, check_restored_datadir_content
from fixtures.utils import query_scalar
@@ -15,11 +19,17 @@ def test_multixact(neon_simple_env: NeonEnv, test_output_dir):
endpoint = env.endpoints.create_start("test_multixact")
log.info("postgres is running on 'test_multixact' branch")
n_records = 100
n_threads = 5
n_iters = 1000
n_restarts = 10
cur = endpoint.connect().cursor()
cur.execute(
"""
CREATE TABLE t1(i int primary key);
INSERT INTO t1 select * from generate_series(1, 100);
f"""
CREATE TABLE t1(pk int primary key, val integer);
INSERT INTO t1 values (generate_series(1, {n_records}), 0);
"""
)
@@ -28,26 +38,32 @@ def test_multixact(neon_simple_env: NeonEnv, test_output_dir):
)
# Lock entries using parallel connections in a round-robin fashion.
nclients = 20
connections = []
for i in range(nclients):
# Do not turn on autocommit. We want to hold the key-share locks.
def do_updates():
conn = endpoint.connect(autocommit=False)
connections.append(conn)
for i in range(n_iters):
pk = random.randrange(1, n_records)
conn.cursor().execute(f"update t1 set val=val+1 where pk={pk}")
conn.cursor().execute("select * from t1 for key share")
conn.commit()
conn.close()
# On each iteration, we commit the previous transaction on a connection,
# and issue antoher select. Each SELECT generates a new multixact that
# includes the new XID, and the XIDs of all the other parallel transactions.
# This generates enough traffic on both multixact offsets and members SLRUs
# to cross page boundaries.
for i in range(5000):
conn = connections[i % nclients]
conn.commit()
conn.cursor().execute("select * from t1 for key share")
for iter in range(n_restarts):
threads: List[threading.Thread] = []
for i in range(n_threads):
threads.append(threading.Thread(target=do_updates, args=(), daemon=False))
threads[-1].start()
# We have multixacts now. We can close the connections.
for c in connections:
c.close()
for thread in threads:
thread.join()
# Restart endpoint
endpoint.stop()
endpoint.start()
conn = endpoint.connect()
cur = conn.cursor()
cur.execute("select count(*) from t1")
assert cur.fetchone() == (n_records,)
# force wal flush
cur.execute("checkpoint")
@@ -74,6 +90,3 @@ def test_multixact(neon_simple_env: NeonEnv, test_output_dir):
# Check that we restored pg_controlfile correctly
assert next_multixact_id_new == next_multixact_id
# Check that we can restore the content of the datadir correctly
check_restored_datadir_content(test_output_dir, env, endpoint)