commit
d899b0e15f
|
@ -2,7 +2,7 @@
|
|||
# taosadapter
|
||||
ExternalProject_Add(taosadapter
|
||||
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
||||
GIT_TAG 213f8b3
|
||||
GIT_TAG 3e08996
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taos-tools
|
||||
ExternalProject_Add(taos-tools
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||
GIT_TAG 7d24ed5
|
||||
GIT_TAG c4a567b
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -58,7 +58,7 @@ database_option: {
|
|||
- WAL_FSYNC_PERIOD: specifies the interval (in milliseconds) at which data is written from the WAL to disk. This parameter takes effect only when the WAL parameter is set to 2. The default value is 3000. Enter a value between 0 and 180000. The value 0 indicates that incoming data is immediately written to disk.
|
||||
- MAXROWS: specifies the maximum number of rows recorded in a block. The default value is 4096.
|
||||
- MINROWS: specifies the minimum number of rows recorded in a block. The default value is 100.
|
||||
- KEEP: specifies the time for which data is retained. Enter a value between 1 and 365000. The default value is 3650. The value of the KEEP parameter must be greater than or equal to the value of the DURATION parameter. TDengine automatically deletes data that is older than the value of the KEEP parameter. You can use m (minutes), h (hours), and d (days) as the unit, for example KEEP 100h or KEEP 10d. If you do not include a unit, d is used by default.
|
||||
- KEEP: specifies the time for which data is retained. Enter a value between 1 and 365000. The default value is 3650. The value of the KEEP parameter must be greater than or equal to the value of the DURATION parameter. TDengine automatically deletes data that is older than the value of the KEEP parameter. You can use m (minutes), h (hours), and d (days) as the unit, for example KEEP 100h or KEEP 10d. If you do not include a unit, d is used by default. The Enterprise Edition supports [Tiered Storage](https://docs.tdengine.com/tdinternal/arch/#tiered-storage) function, thus multiple KEEP values (comma separated and up to 3 values supported, and meet keep 0 <= keep 1 <= keep 2, e.g. KEEP 100h,100d,3650d) are supported; the Community Edition does not support Tiered Storage function (although multiple keep values are configured, they do not take effect, only the maximum keep value is used as KEEP).
|
||||
- PAGES: specifies the number of pages in the metadata storage engine cache on each vnode. Enter a value greater than or equal to 64. The default value is 256. The space occupied by metadata storage on each vnode is equal to the product of the values of the PAGESIZE and PAGES parameters. The space occupied by default is 1 MB.
|
||||
- PAGESIZE: specifies the size (in KB) of each page in the metadata storage engine cache on each vnode. The default value is 4. Enter a value between 1 and 16384.
|
||||
- PRECISION: specifies the precision at which a database records timestamps. Enter ms for milliseconds, us for microseconds, or ns for nanoseconds. The default value is ms.
|
||||
|
|
|
@ -27,7 +27,7 @@ INSERT INTO tb_name [(field1_name, ...)] subquery
|
|||
2. The precision of a timestamp depends on its format. The precision configured for the database affects only timestamps that are inserted as long integers (UNIX time). Timestamps inserted as date and time strings are not affected. As an example, the timestamp 2021-07-13 16:16:48 is equivalent to 1626164208 in UNIX time. This UNIX time is modified to 1626164208000 for databases with millisecond precision, 1626164208000000 for databases with microsecond precision, and 1626164208000000000 for databases with nanosecond precision.
|
||||
|
||||
3. If you want to insert multiple rows simultaneously, do not use the NOW function in the timestamp. Using the NOW function in this situation will cause multiple rows to have the same timestamp and prevent them from being stored correctly. This is because the NOW function obtains the current time on the client, and multiple instances of NOW in a single statement will return the same time.
|
||||
The earliest timestamp that you can use when inserting data is equal to the current time on the server minus the value of the KEEP parameter. The latest timestamp that you can use when inserting data is equal to the current time on the server plus the value of the DURATION parameter. You can configure the KEEP and DURATION parameters when you create a database. The default values are 3650 days for the KEEP parameter and 10 days for the DURATION parameter.
|
||||
The earliest timestamp that you can use when inserting data is equal to the current time on the server minus the value of the KEEP parameter (You can configure the KEEP parameter when you create a database and the default value is 3650 days). The latest timestamp you can use when inserting data depends on the PRECISION parameter (You can configure the PRECISION parameter when you create a database, ms means milliseconds, us means microseconds, ns means nanoseconds, and the default value is milliseconds). If the timestamp precision is milliseconds or microseconds, the latest timestamp is the Unix epoch (January 1st, 1970 at 00:00:00.000 UTC) plus 1000 years, that is, January 1st, 2970 at 00:00:00.000 UTC; If the timestamp precision is nanoseconds, the latest timestamp is the Unix epoch plus 292 years, that is, January 1st, 2262 at 00:00:00.000000000 UTC.
|
||||
|
||||
**Syntax**
|
||||
|
||||
|
|
|
@ -363,7 +363,7 @@ Shows information about all vgroups in the system or about the vgroups for a spe
|
|||
## SHOW VNODES
|
||||
|
||||
```sql
|
||||
SHOW VNODES [dnode_name];
|
||||
SHOW VNODES {dnode_id | dnode_endpoint};
|
||||
```
|
||||
|
||||
Shows information about all vnodes in the system or about the vnodes for a specified dnode.
|
||||
|
|
|
@ -59,11 +59,11 @@ The different database framework specifications for various programming language
|
|||
| -------------------------------------- | ------------- | --------------- | ------------- | ------------- | ------------- | ------------- |
|
||||
| **Connection Management** | Support | Support | Support | Support | Support | Support |
|
||||
| **Regular Query** | Support | Support | Support | Support | Support | Support |
|
||||
| **Parameter Binding** | Not supported | Not supported | support | Support | Not supported | Support |
|
||||
| **Subscription (TMQ) ** | Not supported | Not supported | support | Not supported | Not supported | Support |
|
||||
| **Schemaless** | Not supported | Not supported | Not supported | Not supported | Not supported | Not supported |
|
||||
| **Bulk Pulling (based on WebSocket) ** | Support | Support | Support | support | Support | Support |
|
||||
| **DataFrame** | Not supported | Support | Not supported | Not supported | Not supported | Not supported |
|
||||
| **Parameter Binding** | Not Supported | Not Supported | Support | Support | Not Supported | Support |
|
||||
| **Subscription (TMQ) ** | Not Supported | Support | Support | Not Supported | Not Supported | Support |
|
||||
| **Schemaless** | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported |
|
||||
| **Bulk Pulling (based on WebSocket) ** | Support | Support | Support | Support | Support | Support |
|
||||
| **DataFrame** | Not Supported | Support | Not Supported | Not Supported | Not Supported | Not Supported |
|
||||
|
||||
:::warning
|
||||
|
||||
|
|
|
@ -162,11 +162,7 @@ The parameters described in this document by the effect that they have on the sy
|
|||
| Meaning | Execution policy for query statements |
|
||||
| Unit | None |
|
||||
| Default | 1 |
|
||||
| Value Range | 1: Run queries on vnodes and not on qnodes
|
||||
|
||||
2: Run subtasks without scan operators on qnodes and subtasks with scan operators on vnodes.
|
||||
|
||||
3: Only run scan operators on vnodes; run all other operators on qnodes. |
|
||||
| Value Range | 1: Run queries on vnodes and not on qnodes; 2: Run subtasks without scan operators on qnodes and subtasks with scan operators on vnodes; 3: Only run scan operators on vnodes, and run all other operators on qnodes. |
|
||||
|
||||
### querySmaOptimize
|
||||
|
||||
|
@ -176,11 +172,7 @@ The parameters described in this document by the effect that they have on the sy
|
|||
| Meaning | SMA index optimization policy |
|
||||
| Unit | None |
|
||||
| Default Value | 0 |
|
||||
| Notes |
|
||||
|
||||
0: Disable SMA indexing and perform all queries on non-indexed data.
|
||||
|
||||
1: Enable SMA indexing and perform queries from suitable statements on precomputation results.|
|
||||
| Notes |0: Disable SMA indexing and perform all queries on non-indexed data; 1: Enable SMA indexing and perform queries from suitable statements on precomputation results.|
|
||||
|
||||
### countAlwaysReturnValue
|
||||
|
||||
|
@ -323,6 +315,7 @@ The charset that takes effect is UTF-8.
|
|||
| Applicable | Server Only |
|
||||
| Meaning | All data files are stored in this directory |
|
||||
| Default Value | /var/lib/taos |
|
||||
| Note | The [Tiered Storage](https://docs.tdengine.com/tdinternal/arch/#tiered-storage) function needs to be used in conjunction with the [KEEP](https://docs.tdengine.com/taos-sql/database/#parameters) parameter |
|
||||
|
||||
### tempDir
|
||||
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/taosdata/driver-go/v3 v3.1.0/go.mod h1:H2vo/At+rOPY1aMzUV9P49SVX7NlXb3LAbKw+MCLrmU=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
@ -1,8 +1,11 @@
|
|||
import pandas
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy import create_engine, text
|
||||
|
||||
engine = create_engine("taos://root:taosdata@localhost:6030/power")
|
||||
df = pandas.read_sql("SELECT * FROM meters", engine)
|
||||
conn = engine.connect()
|
||||
df = pandas.read_sql(text("SELECT * FROM power.meters"), conn)
|
||||
conn.close()
|
||||
|
||||
|
||||
# print index
|
||||
print(df.index)
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
import pandas
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy import create_engine, text
|
||||
|
||||
engine = create_engine("taosrest://root:taosdata@localhost:6041")
|
||||
df: pandas.DataFrame = pandas.read_sql("SELECT * FROM power.meters", engine)
|
||||
conn = engine.connect()
|
||||
df: pandas.DataFrame = pandas.read_sql(text("SELECT * FROM power.meters"), conn)
|
||||
conn.close()
|
||||
|
||||
# print index
|
||||
print(df.index)
|
||||
|
|
|
@ -1,18 +1,19 @@
|
|||
# ANCHOR: connect
|
||||
from taosrest import connect, TaosRestConnection, TaosRestCursor
|
||||
|
||||
conn: TaosRestConnection = connect(url="http://localhost:6041",
|
||||
user="root",
|
||||
password="taosdata",
|
||||
timeout=30)
|
||||
conn = connect(url="http://localhost:6041",
|
||||
user="root",
|
||||
password="taosdata",
|
||||
timeout=30)
|
||||
|
||||
# ANCHOR_END: connect
|
||||
# ANCHOR: basic
|
||||
# create STable
|
||||
cursor: TaosRestCursor = conn.cursor()
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("DROP DATABASE IF EXISTS power")
|
||||
cursor.execute("CREATE DATABASE power")
|
||||
cursor.execute("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)")
|
||||
cursor.execute(
|
||||
"CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)")
|
||||
|
||||
# insert data
|
||||
cursor.execute("""INSERT INTO power.d1001 USING power.meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
|
||||
|
@ -28,7 +29,7 @@ print("queried row count:", cursor.rowcount)
|
|||
# get column names from cursor
|
||||
column_names = [meta[0] for meta in cursor.description]
|
||||
# get rows
|
||||
data: list[tuple] = cursor.fetchall()
|
||||
data = cursor.fetchall()
|
||||
print(column_names)
|
||||
for row in data:
|
||||
print(row)
|
||||
|
|
|
@ -8,7 +8,7 @@ conn.execute("CREATE DATABASE test")
|
|||
# change database. same as execute "USE db"
|
||||
conn.select_db("test")
|
||||
conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)")
|
||||
affected_row: int = conn.execute("INSERT INTO t1 USING weather TAGS(1) VALUES (now, 23.5) (now+1m, 23.5) (now+2m, 24.4)")
|
||||
affected_row = conn.execute("INSERT INTO t1 USING weather TAGS(1) VALUES (now, 23.5) (now+1m, 23.5) (now+2m, 24.4)")
|
||||
print("affected_row", affected_row)
|
||||
# output:
|
||||
# affected_row 3
|
||||
|
@ -16,10 +16,10 @@ print("affected_row", affected_row)
|
|||
|
||||
# ANCHOR: query
|
||||
# Execute a sql and get its result set. It's useful for SELECT statement
|
||||
result: taos.TaosResult = conn.query("SELECT * from weather")
|
||||
result = conn.query("SELECT * from weather")
|
||||
|
||||
# Get fields from result
|
||||
fields: taos.field.TaosFields = result.fields
|
||||
fields = result.fields
|
||||
for field in fields:
|
||||
print(field) # {name: ts, type: 9, bytes: 8}
|
||||
|
||||
|
|
|
@ -1,15 +1,14 @@
|
|||
# install dependencies:
|
||||
# recommend python >= 3.8
|
||||
# pip3 install faster-fifo
|
||||
#
|
||||
|
||||
import logging
|
||||
import math
|
||||
import multiprocessing
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
from multiprocessing import Process
|
||||
from faster_fifo import Queue
|
||||
from multiprocessing import Process, Queue
|
||||
from mockdatasource import MockDataSource
|
||||
from queue import Empty
|
||||
from typing import List
|
||||
|
@ -22,8 +21,7 @@ TABLE_COUNT = 1000
|
|||
QUEUE_SIZE = 1000000
|
||||
MAX_BATCH_SIZE = 3000
|
||||
|
||||
read_processes = []
|
||||
write_processes = []
|
||||
_DONE_MESSAGE = '__DONE__'
|
||||
|
||||
|
||||
def get_connection():
|
||||
|
@ -44,41 +42,64 @@ def get_connection():
|
|||
|
||||
# ANCHOR: read
|
||||
|
||||
def run_read_task(task_id: int, task_queues: List[Queue]):
|
||||
def run_read_task(task_id: int, task_queues: List[Queue], infinity):
|
||||
table_count_per_task = TABLE_COUNT // READ_TASK_COUNT
|
||||
data_source = MockDataSource(f"tb{task_id}", table_count_per_task)
|
||||
data_source = MockDataSource(f"tb{task_id}", table_count_per_task, infinity)
|
||||
try:
|
||||
for batch in data_source:
|
||||
if isinstance(batch, tuple):
|
||||
batch = [batch]
|
||||
for table_id, rows in batch:
|
||||
# hash data to different queue
|
||||
i = table_id % len(task_queues)
|
||||
# block putting forever when the queue is full
|
||||
task_queues[i].put_many(rows, block=True, timeout=-1)
|
||||
for row in rows:
|
||||
task_queues[i].put(row)
|
||||
if not infinity:
|
||||
for queue in task_queues:
|
||||
queue.put(_DONE_MESSAGE)
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
finally:
|
||||
logging.info('read task over')
|
||||
|
||||
|
||||
# ANCHOR_END: read
|
||||
|
||||
|
||||
# ANCHOR: write
|
||||
def run_write_task(task_id: int, queue: Queue):
|
||||
def run_write_task(task_id: int, queue: Queue, done_queue: Queue):
|
||||
from sql_writer import SQLWriter
|
||||
log = logging.getLogger(f"WriteTask-{task_id}")
|
||||
writer = SQLWriter(get_connection)
|
||||
lines = None
|
||||
try:
|
||||
while True:
|
||||
try:
|
||||
# get as many as possible
|
||||
lines = queue.get_many(block=False, max_messages_to_get=MAX_BATCH_SIZE)
|
||||
over = False
|
||||
lines = []
|
||||
for _ in range(MAX_BATCH_SIZE):
|
||||
try:
|
||||
line = queue.get_nowait()
|
||||
if line == _DONE_MESSAGE:
|
||||
over = True
|
||||
break
|
||||
if line:
|
||||
lines.append(line)
|
||||
except Empty:
|
||||
time.sleep(0.1)
|
||||
if len(lines) > 0:
|
||||
writer.process_lines(lines)
|
||||
except Empty:
|
||||
time.sleep(0.01)
|
||||
if over:
|
||||
done_queue.put(_DONE_MESSAGE)
|
||||
break
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
except BaseException as e:
|
||||
log.debug(f"lines={lines}")
|
||||
raise e
|
||||
finally:
|
||||
writer.close()
|
||||
log.debug('write task over')
|
||||
|
||||
|
||||
# ANCHOR_END: write
|
||||
|
@ -103,47 +124,64 @@ def set_global_config():
|
|||
|
||||
|
||||
# ANCHOR: monitor
|
||||
def run_monitor_process():
|
||||
def run_monitor_process(done_queue: Queue):
|
||||
log = logging.getLogger("DataBaseMonitor")
|
||||
conn = get_connection()
|
||||
conn.execute("DROP DATABASE IF EXISTS test")
|
||||
conn.execute("CREATE DATABASE test")
|
||||
conn.execute("CREATE STABLE test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) "
|
||||
"TAGS (location BINARY(64), groupId INT)")
|
||||
conn = None
|
||||
try:
|
||||
conn = get_connection()
|
||||
|
||||
def get_count():
|
||||
res = conn.query("SELECT count(*) FROM test.meters")
|
||||
rows = res.fetch_all()
|
||||
return rows[0][0] if rows else 0
|
||||
def get_count():
|
||||
res = conn.query("SELECT count(*) FROM test.meters")
|
||||
rows = res.fetch_all()
|
||||
return rows[0][0] if rows else 0
|
||||
|
||||
last_count = 0
|
||||
while True:
|
||||
time.sleep(10)
|
||||
count = get_count()
|
||||
log.info(f"count={count} speed={(count - last_count) / 10}")
|
||||
last_count = count
|
||||
last_count = 0
|
||||
while True:
|
||||
try:
|
||||
done = done_queue.get_nowait()
|
||||
if done == _DONE_MESSAGE:
|
||||
break
|
||||
except Empty:
|
||||
pass
|
||||
time.sleep(10)
|
||||
count = get_count()
|
||||
log.info(f"count={count} speed={(count - last_count) / 10}")
|
||||
last_count = count
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
|
||||
# ANCHOR_END: monitor
|
||||
# ANCHOR: main
|
||||
def main():
|
||||
def main(infinity):
|
||||
set_global_config()
|
||||
logging.info(f"READ_TASK_COUNT={READ_TASK_COUNT}, WRITE_TASK_COUNT={WRITE_TASK_COUNT}, "
|
||||
f"TABLE_COUNT={TABLE_COUNT}, QUEUE_SIZE={QUEUE_SIZE}, MAX_BATCH_SIZE={MAX_BATCH_SIZE}")
|
||||
|
||||
monitor_process = Process(target=run_monitor_process)
|
||||
conn = get_connection()
|
||||
conn.execute("DROP DATABASE IF EXISTS test")
|
||||
conn.execute("CREATE DATABASE IF NOT EXISTS test")
|
||||
conn.execute("CREATE STABLE IF NOT EXISTS test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) "
|
||||
"TAGS (location BINARY(64), groupId INT)")
|
||||
conn.close()
|
||||
|
||||
done_queue = Queue()
|
||||
monitor_process = Process(target=run_monitor_process, args=(done_queue,))
|
||||
monitor_process.start()
|
||||
time.sleep(3) # waiting for database ready.
|
||||
logging.debug(f"monitor task started with pid {monitor_process.pid}")
|
||||
|
||||
task_queues: List[Queue] = []
|
||||
write_processes = []
|
||||
read_processes = []
|
||||
|
||||
# create task queues
|
||||
for i in range(WRITE_TASK_COUNT):
|
||||
queue = Queue(max_size_bytes=QUEUE_SIZE)
|
||||
queue = Queue()
|
||||
task_queues.append(queue)
|
||||
|
||||
# create write processes
|
||||
for i in range(WRITE_TASK_COUNT):
|
||||
p = Process(target=run_write_task, args=(i, task_queues[i]))
|
||||
p = Process(target=run_write_task, args=(i, task_queues[i], done_queue))
|
||||
p.start()
|
||||
logging.debug(f"WriteTask-{i} started with pid {p.pid}")
|
||||
write_processes.append(p)
|
||||
|
@ -151,13 +189,19 @@ def main():
|
|||
# create read processes
|
||||
for i in range(READ_TASK_COUNT):
|
||||
queues = assign_queues(i, task_queues)
|
||||
p = Process(target=run_read_task, args=(i, queues))
|
||||
p = Process(target=run_read_task, args=(i, queues, infinity))
|
||||
p.start()
|
||||
logging.debug(f"ReadTask-{i} started with pid {p.pid}")
|
||||
read_processes.append(p)
|
||||
|
||||
try:
|
||||
monitor_process.join()
|
||||
for p in read_processes:
|
||||
p.join()
|
||||
for p in write_processes:
|
||||
p.join()
|
||||
time.sleep(1)
|
||||
return
|
||||
except KeyboardInterrupt:
|
||||
monitor_process.terminate()
|
||||
[p.terminate() for p in read_processes]
|
||||
|
@ -176,5 +220,6 @@ def assign_queues(read_task_id, task_queues):
|
|||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
multiprocessing.set_start_method('spawn')
|
||||
main(False)
|
||||
# ANCHOR_END: main
|
||||
|
|
|
@ -26,7 +26,8 @@ class Consumer(object):
|
|||
'bath_consume': True,
|
||||
'batch_size': 1000,
|
||||
'async_model': True,
|
||||
'workers': 10
|
||||
'workers': 10,
|
||||
'testing': False
|
||||
}
|
||||
|
||||
LOCATIONS = ['California.SanFrancisco', 'California.LosAngles', 'California.SanDiego', 'California.SanJose',
|
||||
|
@ -46,11 +47,12 @@ class Consumer(object):
|
|||
def __init__(self, **configs):
|
||||
self.config: dict = self.DEFAULT_CONFIGS
|
||||
self.config.update(configs)
|
||||
self.consumer = KafkaConsumer(
|
||||
self.config.get('kafka_topic'), # topic
|
||||
bootstrap_servers=self.config.get('kafka_brokers'),
|
||||
group_id=self.config.get('kafka_group_id'),
|
||||
)
|
||||
if not self.config.get('testing'):
|
||||
self.consumer = KafkaConsumer(
|
||||
self.config.get('kafka_topic'), # topic
|
||||
bootstrap_servers=self.config.get('kafka_brokers'),
|
||||
group_id=self.config.get('kafka_group_id'),
|
||||
)
|
||||
self.taos = taos.connect(
|
||||
host=self.config.get('taos_host'),
|
||||
user=self.config.get('taos_user'),
|
||||
|
@ -60,7 +62,7 @@ class Consumer(object):
|
|||
)
|
||||
if self.config.get('async_model'):
|
||||
self.pool = ThreadPoolExecutor(max_workers=self.config.get('workers'))
|
||||
self.tasks: list[Future] = []
|
||||
self.tasks = []
|
||||
# tags and table mapping # key: {location}_{groupId} value:
|
||||
self.tag_table_mapping = {}
|
||||
i = 0
|
||||
|
@ -115,14 +117,14 @@ class Consumer(object):
|
|||
if self.taos is not None:
|
||||
self.taos.close()
|
||||
|
||||
def _run(self, f: Callable[[ConsumerRecord], bool]):
|
||||
def _run(self, f):
|
||||
for message in self.consumer:
|
||||
if self.config.get('async_model'):
|
||||
self.pool.submit(f(message))
|
||||
else:
|
||||
f(message)
|
||||
|
||||
def _run_batch(self, f: Callable[[list[list[ConsumerRecord]]], None]):
|
||||
def _run_batch(self, f):
|
||||
while True:
|
||||
messages = self.consumer.poll(timeout_ms=500, max_records=self.config.get('batch_size'))
|
||||
if messages:
|
||||
|
@ -140,7 +142,7 @@ class Consumer(object):
|
|||
logging.info('## insert sql %s', sql)
|
||||
return self.taos.execute(sql=sql) == 1
|
||||
|
||||
def _to_taos_batch(self, messages: list[list[ConsumerRecord]]):
|
||||
def _to_taos_batch(self, messages):
|
||||
sql = self._build_sql_batch(messages=messages)
|
||||
if len(sql) == 0: # decode error, skip
|
||||
return
|
||||
|
@ -162,7 +164,7 @@ class Consumer(object):
|
|||
table_name = self._get_table_name(location=location, group_id=group_id)
|
||||
return self.INSERT_PART_SQL.format(table_name, ts, current, voltage, phase)
|
||||
|
||||
def _build_sql_batch(self, messages: list[list[ConsumerRecord]]) -> str:
|
||||
def _build_sql_batch(self, messages) -> str:
|
||||
sql_list = []
|
||||
for partition_messages in messages:
|
||||
for message in partition_messages:
|
||||
|
@ -186,7 +188,54 @@ def _get_location_and_group(key: str) -> (str, int):
|
|||
return fields[0], fields[1]
|
||||
|
||||
|
||||
def test_to_taos(consumer: Consumer):
|
||||
msg = {
|
||||
'location': 'California.SanFrancisco',
|
||||
'groupId': 1,
|
||||
'ts': '2022-12-06 15:13:38.643',
|
||||
'current': 3.41,
|
||||
'voltage': 105,
|
||||
'phase': 0.02027,
|
||||
}
|
||||
record = ConsumerRecord(checksum=None, headers=None, offset=1, key=None, value=json.dumps(msg), partition=1,
|
||||
topic='test', serialized_key_size=None, serialized_header_size=None,
|
||||
serialized_value_size=None, timestamp=time.time(), timestamp_type=None)
|
||||
assert consumer._to_taos(message=record)
|
||||
|
||||
|
||||
def test_to_taos_batch(consumer: Consumer):
|
||||
records = [
|
||||
[
|
||||
ConsumerRecord(checksum=None, headers=None, offset=1, key=None,
|
||||
value=json.dumps({'location': 'California.SanFrancisco',
|
||||
'groupId': 1,
|
||||
'ts': '2022-12-06 15:13:38.643',
|
||||
'current': 3.41,
|
||||
'voltage': 105,
|
||||
'phase': 0.02027, }),
|
||||
partition=1, topic='test', serialized_key_size=None, serialized_header_size=None,
|
||||
serialized_value_size=None, timestamp=time.time(), timestamp_type=None),
|
||||
ConsumerRecord(checksum=None, headers=None, offset=1, key=None,
|
||||
value=json.dumps({'location': 'California.LosAngles',
|
||||
'groupId': 2,
|
||||
'ts': '2022-12-06 15:13:39.643',
|
||||
'current': 3.41,
|
||||
'voltage': 102,
|
||||
'phase': 0.02027, }),
|
||||
partition=1, topic='test', serialized_key_size=None, serialized_header_size=None,
|
||||
serialized_value_size=None, timestamp=time.time(), timestamp_type=None),
|
||||
]
|
||||
]
|
||||
|
||||
consumer._to_taos_batch(messages=records)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
consumer = Consumer(async_model=True)
|
||||
consumer = Consumer(async_model=True, testing=True)
|
||||
# init env
|
||||
consumer.init_env()
|
||||
consumer.consume()
|
||||
# consumer.consume()
|
||||
# test build sql
|
||||
# test build sql batch
|
||||
test_to_taos(consumer)
|
||||
test_to_taos_batch(consumer)
|
||||
|
|
|
@ -10,13 +10,14 @@ class MockDataSource:
|
|||
"9.4,118,0.141,California.SanFrancisco,4"
|
||||
]
|
||||
|
||||
def __init__(self, tb_name_prefix, table_count):
|
||||
def __init__(self, tb_name_prefix, table_count, infinity=True):
|
||||
self.table_name_prefix = tb_name_prefix + "_"
|
||||
self.table_count = table_count
|
||||
self.max_rows = 10000000
|
||||
self.current_ts = round(time.time() * 1000) - self.max_rows * 100
|
||||
# [(tableId, tableName, values),]
|
||||
self.data = self._init_data()
|
||||
self.infinity = infinity
|
||||
|
||||
def _init_data(self):
|
||||
lines = self.samples * (self.table_count // 5 + 1)
|
||||
|
@ -28,14 +29,19 @@ class MockDataSource:
|
|||
|
||||
def __iter__(self):
|
||||
self.row = 0
|
||||
return self
|
||||
if not self.infinity:
|
||||
return iter(self._iter_data())
|
||||
else:
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
"""
|
||||
next 1000 rows for each table.
|
||||
return: {tableId:[row,...]}
|
||||
"""
|
||||
# generate 1000 timestamps
|
||||
return self._iter_data()
|
||||
|
||||
def _iter_data(self):
|
||||
ts = []
|
||||
for _ in range(1000):
|
||||
self.current_ts += 100
|
||||
|
@ -47,3 +53,9 @@ class MockDataSource:
|
|||
rows = [table_name + ',' + t + ',' + values for t in ts]
|
||||
result.append((table_id, rows))
|
||||
return result
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
datasource = MockDataSource('t', 10, False)
|
||||
for data in datasource:
|
||||
print(data)
|
||||
|
|
|
@ -10,6 +10,7 @@ class SQLWriter:
|
|||
self._tb_tags = {}
|
||||
self._conn = get_connection_func()
|
||||
self._max_sql_length = self.get_max_sql_length()
|
||||
self._conn.execute("create database if not exists test")
|
||||
self._conn.execute("USE test")
|
||||
|
||||
def get_max_sql_length(self):
|
||||
|
@ -20,7 +21,7 @@ class SQLWriter:
|
|||
return int(r[1])
|
||||
return 1024 * 1024
|
||||
|
||||
def process_lines(self, lines: str):
|
||||
def process_lines(self, lines: [str]):
|
||||
"""
|
||||
:param lines: [[tbName,ts,current,voltage,phase,location,groupId]]
|
||||
"""
|
||||
|
@ -60,6 +61,7 @@ class SQLWriter:
|
|||
buf.append(q)
|
||||
sql_len += len(q)
|
||||
sql += " ".join(buf)
|
||||
self.create_tables()
|
||||
self.execute_sql(sql)
|
||||
self._tb_values.clear()
|
||||
|
||||
|
@ -88,3 +90,22 @@ class SQLWriter:
|
|||
except BaseException as e:
|
||||
self.log.error("Execute SQL: %s", sql)
|
||||
raise e
|
||||
|
||||
def close(self):
|
||||
if self._conn:
|
||||
self._conn.close()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
def get_connection_func():
|
||||
conn = taos.connect()
|
||||
return conn
|
||||
|
||||
|
||||
writer = SQLWriter(get_connection_func=get_connection_func)
|
||||
writer.execute_sql(
|
||||
"create stable if not exists meters (ts timestamp, current float, voltage int, phase float) "
|
||||
"tags (location binary(64), groupId int)")
|
||||
writer.execute_sql(
|
||||
"INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) "
|
||||
"VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32)")
|
||||
|
|
|
@ -19,8 +19,14 @@ def init_tmq_env(db, topic):
|
|||
conn.execute("insert into tb3 values (now, 3, 3.0, 'tmq test')")
|
||||
|
||||
|
||||
def cleanup(db, topic):
|
||||
conn = taos.connect()
|
||||
conn.execute("drop topic if exists {}".format(topic))
|
||||
conn.execute("drop database if exists {}".format(db))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
init_tmq_env("tmq_test", "tmq_test_topic") # init env
|
||||
init_tmq_env("tmq_test", "tmq_test_topic") # init env
|
||||
consumer = Consumer(
|
||||
{
|
||||
"group.id": "tg2",
|
||||
|
@ -33,9 +39,9 @@ if __name__ == '__main__':
|
|||
|
||||
try:
|
||||
while True:
|
||||
res = consumer.poll(100)
|
||||
res = consumer.poll(1)
|
||||
if not res:
|
||||
continue
|
||||
break
|
||||
err = res.error()
|
||||
if err is not None:
|
||||
raise err
|
||||
|
@ -46,3 +52,4 @@ if __name__ == '__main__':
|
|||
finally:
|
||||
consumer.unsubscribe()
|
||||
consumer.close()
|
||||
cleanup("tmq_test", "tmq_test_topic")
|
||||
|
|
|
@ -60,7 +60,7 @@ TDengine 版本更新往往会增加新的功能特性,列表中的连接器
|
|||
| **连接管理** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||
| **普通查询** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||
| **参数绑定** | 暂不支持 | 暂不支持 | 支持 | 支持 | 暂不支持 | 支持 |
|
||||
| **数据订阅(TMQ)** | 暂不支持 | 暂不支持 | 支持 | 暂不支持 | 暂不支持 | 支持 |
|
||||
| **数据订阅(TMQ)** | 暂不支持 | 支持 | 支持 | 暂不支持 | 暂不支持 | 支持 |
|
||||
| **Schemaless** | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 |
|
||||
| **批量拉取(基于 WebSocket)** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||
| **DataFrame** | 不支持 | 支持 | 不支持 | 不支持 | 不支持 | 不支持 |
|
||||
|
|
|
@ -58,7 +58,7 @@ database_option: {
|
|||
- WAL_FSYNC_PERIOD:当 WAL 参数设置为 2 时,落盘的周期。默认为 3000,单位毫秒。最小为 0,表示每次写入立即落盘;最大为 180000,即三分钟。
|
||||
- MAXROWS:文件块中记录的最大条数,默认为 4096 条。
|
||||
- MINROWS:文件块中记录的最小条数,默认为 100 条。
|
||||
- KEEP:表示数据文件保存的天数,缺省值为 3650,取值范围 [1, 365000],且必须大于或等于 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。也可以不写单位,如 KEEP 50,此时默认单位为天。
|
||||
- KEEP:表示数据文件保存的天数,缺省值为 3650,取值范围 [1, 365000],且必须大于或等于 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。也可以不写单位,如 KEEP 50,此时默认单位为天。企业版支持[多级存储](https://docs.taosdata.com/tdinternal/arch/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8)功能, 因此, 可以设置多个保存时间(多个以英文逗号分隔,最多 3 个,满足 keep 0 <= keep 1 <= keep 2,如 KEEP 100h,100d,3650d); 社区版不支持多级存储功能(即使配置了多个保存时间, 也不会生效, KEEP 会取最大的保存时间)。
|
||||
- PAGES:一个 VNODE 中元数据存储引擎的缓存页个数,默认为 256,最小 64。一个 VNODE 元数据存储占用 PAGESIZE \* PAGES,默认情况下为 1MB 内存。
|
||||
- PAGESIZE:一个 VNODE 中元数据存储引擎的页大小,单位为 KB,默认为 4 KB。范围为 1 到 16384,即 1 KB 到 16 MB。
|
||||
- PRECISION:数据库的时间戳精度。ms 表示毫秒,us 表示微秒,ns 表示纳秒,默认 ms 毫秒。
|
||||
|
@ -142,7 +142,7 @@ SHOW DATABASES;
|
|||
### 显示一个数据库的创建语句
|
||||
|
||||
```
|
||||
SHOW CREATE DATABASE db_name;
|
||||
SHOW CREATE DATABASE db_name \G;
|
||||
```
|
||||
|
||||
常用于数据库迁移。对一个已经存在的数据库,返回其创建语句;在另一个集群中执行该语句,就能得到一个设置完全相同的 Database。
|
||||
|
@ -150,7 +150,7 @@ SHOW CREATE DATABASE db_name;
|
|||
### 查看数据库参数
|
||||
|
||||
```sql
|
||||
SELECT * FROM INFORMATION_SCHEMA.INS_DATABASES WHERE NAME='DBNAME' \G;
|
||||
SELECT * FROM INFORMATION_SCHEMA.INS_DATABASES WHERE NAME='db_name' \G;
|
||||
```
|
||||
|
||||
会列出指定数据库的配置参数,并且每行只显示一个参数。
|
||||
|
@ -177,4 +177,4 @@ REDISTRIBUTE VGROUP vgroup_no DNODE dnode_id1 [DNODE dnode_id2] [DNODE dnode_id3
|
|||
BALANCE VGROUP
|
||||
```
|
||||
|
||||
自动调整集群所有vgroup中的vnode分布,相当于在vnode级别对集群进行数据的负载均衡操作。
|
||||
自动调整集群所有vgroup中的vnode分布,相当于在vnode级别对集群进行数据的负载均衡操作。
|
||||
|
|
|
@ -28,7 +28,7 @@ INSERT INTO tb_name [(field1_name, ...)] subquery
|
|||
2. 时间戳不同的格式语法会有不同的精度影响。字符串格式的时间戳写法不受所在 DATABASE 的时间精度设置影响;而长整形格式的时间戳写法会受到所在 DATABASE 的时间精度设置影响。例如,时间戳"2021-07-13 16:16:48"的 UNIX 秒数为 1626164208。则其在毫秒精度下需要写作 1626164208000,在微秒精度设置下就需要写为 1626164208000000,纳秒精度设置下需要写为 1626164208000000000。
|
||||
|
||||
3. 一次插入多行数据时,不要把首列的时间戳的值都写 NOW。否则会导致语句中的多条记录使用相同的时间戳,于是就可能出现相互覆盖以致这些数据行无法全部被正确保存。其原因在于,NOW 函数在执行中会被解析为所在 SQL 语句的客户端执行时间,出现在同一语句中的多个 NOW 标记也就会被替换为完全相同的时间戳取值。
|
||||
允许插入的最老记录的时间戳,是相对于当前服务器时间,减去配置的 KEEP 值(数据保留的天数)。允许插入的最新记录的时间戳,是相对于当前服务器时间,加上配置的 DURATION 值(数据文件存储数据的时间跨度,单位为天)。KEEP 和 DURATION 都是可以在创建数据库时指定的,缺省值分别是 3650 天和 10 天。
|
||||
允许插入的最老记录的时间戳,是相对于当前服务器时间,减去配置的 KEEP 值(数据保留的天数, 可以在创建数据库时指定,缺省值是 3650 天)。允许插入的最新记录的时间戳,取决于数据库的 PRECISION 值(时间戳精度, 可以在创建数据库时指定, ms 表示毫秒,us 表示微秒,ns 表示纳秒,默认毫秒):如果是毫秒或微秒, 取值为 1970 年 1 月 1 日 00:00:00.000 UTC 加上 1000 年, 即 2970 年 1 月 1 日 00:00:00.000 UTC; 如果是纳秒, 取值为 1970 年 1 月 1 日 00:00:00.000000000 UTC 加上 292 年, 即 2262 年 1 月 1 日 00:00:00.000000000 UTC。
|
||||
|
||||
**语法说明**
|
||||
|
||||
|
|
|
@ -306,7 +306,7 @@ SHOW [db_name.]VGROUPS;
|
|||
## SHOW VNODES
|
||||
|
||||
```sql
|
||||
SHOW VNODES [dnode_name];
|
||||
SHOW VNODES {dnode_id | dnode_endpoint};
|
||||
```
|
||||
|
||||
显示当前系统中所有 VNODE 或某个 DNODE 的 VNODE 的信息。
|
||||
|
|
|
@ -162,11 +162,7 @@ taos --dump-config
|
|||
| 含义 | 查询语句的执行策略 |
|
||||
| 单位 | 无 |
|
||||
| 缺省值 | 1 |
|
||||
| 补充说明 | 1: 只使用 vnode,不使用 qnode |
|
||||
|
||||
2: 没有扫描算子的子任务在 qnode 执行,带扫描算子的子任务在 vnode 执行
|
||||
|
||||
3: vnode 只运行扫描算子,其余算子均在 qnode 执行 |
|
||||
| 补充说明 | 1: 只使用 vnode,不使用 qnode; 2: 没有扫描算子的子任务在 qnode 执行,带扫描算子的子任务在 vnode 执行; 3: vnode 只运行扫描算子,其余算子均在 qnode 执行 |
|
||||
|
||||
### querySmaOptimize
|
||||
|
||||
|
@ -176,11 +172,7 @@ taos --dump-config
|
|||
| 含义 | sma index 的优化策略 |
|
||||
| 单位 | 无 |
|
||||
| 缺省值 | 0 |
|
||||
| 补充说明 |
|
||||
|
||||
0: 表示不使用 sma index,永远从原始数据进行查询
|
||||
|
||||
1: 表示使用 sma index,对符合的语句,直接从预计算的结果进行查询 |
|
||||
| 补充说明 |0: 表示不使用 sma index,永远从原始数据进行查询; 1: 表示使用 sma index,对符合的语句,直接从预计算的结果进行查询 |
|
||||
|
||||
### maxNumOfDistinctRes
|
||||
|
||||
|
@ -323,6 +315,7 @@ charset 的有效值是 UTF-8。
|
|||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 数据文件目录,所有的数据文件都将写入该目录 |
|
||||
| 缺省值 | /var/lib/taos |
|
||||
| 补充说明 | [多级存储](https://docs.taosdata.com/tdinternal/arch/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8) 功能需要与 [KEEP](https://docs.taosdata.com/taos-sql/database/#%E5%8F%82%E6%95%B0%E8%AF%B4%E6%98%8E) 参数配合使用 |
|
||||
|
||||
### tempDir
|
||||
|
||||
|
@ -388,7 +381,7 @@ charset 的有效值是 UTF-8。
|
|||
| 属性 | 说明 |
|
||||
| -------- | -------------------------------------------- |
|
||||
| 适用范围 | 服务端和客户端均适用 |
|
||||
| 含义 | 当日志文件夹的磁盘大小小于该值时,停止写日志 |
|
||||
| 含义 | 当日志文件夹所在磁盘可用空间大小小于该值时,停止写日志 |
|
||||
| 单位 | GB |
|
||||
| 缺省值 | 1.0 |
|
||||
|
||||
|
|
|
@ -1,135 +0,0 @@
|
|||
/* This example is to show how to use the td-connector through the cursor only and is a bit more raw.
|
||||
* No promises, object wrappers around data, functions that prettify the data, or anything.
|
||||
* The cursor will generally use callback functions over promises, and return and store the raw data from the C Interface.
|
||||
* It is advised to use the td-connector through the cursor and the TaosQuery class amongst other higher level APIs.
|
||||
*/
|
||||
|
||||
// Get the td-connector package
|
||||
const taos = require('td2.0-connector');
|
||||
|
||||
/* We will connect to TDengine by passing an object comprised of connection options to taos.connect and store the
|
||||
* connection to the variable conn
|
||||
*/
|
||||
/*
|
||||
* Connection Options
|
||||
* host: the host to connect to
|
||||
* user: the use to login as
|
||||
* password: the password for the above user to login
|
||||
* config: the location of the taos.cfg file, by default it is in /etc/taos
|
||||
* port: the port we connect through
|
||||
*/
|
||||
var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:0});
|
||||
|
||||
// Initialize our TDengineCursor, which we use to interact with TDengine
|
||||
var c1 = conn.cursor();
|
||||
|
||||
// c1.execute(query) will execute the query
|
||||
// Let's create a database named db
|
||||
try {
|
||||
c1.execute('create database if not exists db;');
|
||||
}
|
||||
catch(err) {
|
||||
conn.close();
|
||||
throw err;
|
||||
}
|
||||
|
||||
// Now we will use database db
|
||||
try {
|
||||
c1.execute('use db;');
|
||||
}
|
||||
catch (err) {
|
||||
conn.close();
|
||||
throw err;
|
||||
}
|
||||
|
||||
// Let's create a table called weather
|
||||
// which stores some weather data like humidity, AQI (air quality index), temperature, and some notes as text
|
||||
try {
|
||||
c1.execute('create table if not exists weather (ts timestamp, humidity smallint, aqi int, temperature float, notes binary(30));');
|
||||
}
|
||||
catch (err) {
|
||||
conn.close();
|
||||
throw err;
|
||||
}
|
||||
|
||||
// Let's get the description of the table weather
|
||||
try {
|
||||
c1.execute('describe db.weather');
|
||||
}
|
||||
catch (err) {
|
||||
conn.close();
|
||||
throw err;
|
||||
}
|
||||
|
||||
// To get results, we run the function c1.fetchall()
|
||||
// It only returns the query results as an array of result rows, but also stores the latest results in c1.data
|
||||
try {
|
||||
var tableDesc = c1.fetchall(); // The description variable here is equal to c1.data;
|
||||
console.log(tableDesc);
|
||||
}
|
||||
catch (err) {
|
||||
conn.close();
|
||||
throw err;
|
||||
}
|
||||
|
||||
// Let's try to insert some random generated data to test with
|
||||
|
||||
let stime = new Date();
|
||||
let interval = 1000;
|
||||
|
||||
// Timestamps must be in the form of "YYYY-MM-DD HH:MM:SS.MMM" if they are in milliseconds
|
||||
// "YYYY-MM-DD HH:MM:SS.MMMMMM" if they are in microseconds
|
||||
// Thus, we create the following function to convert a javascript Date object to the correct formatting
|
||||
function convertDateToTS(date) {
|
||||
let tsArr = date.toISOString().split("T")
|
||||
return "\"" + tsArr[0] + " " + tsArr[1].substring(0, tsArr[1].length-1) + "\"";
|
||||
}
|
||||
|
||||
try {
|
||||
for (let i = 0; i < 10000; i++) {
|
||||
stime.setMilliseconds(stime.getMilliseconds() + interval);
|
||||
let insertData = [convertDateToTS(stime),
|
||||
parseInt(Math.random()*100),
|
||||
parseInt(Math.random()*300),
|
||||
parseFloat(Math.random()*10 + 30),
|
||||
"\"random note!\""];
|
||||
c1.execute('insert into db.weather values(' + insertData.join(',') + ' );');
|
||||
}
|
||||
}
|
||||
catch (err) {
|
||||
conn.close();
|
||||
throw err;
|
||||
}
|
||||
|
||||
// Now let's look at our newly inserted data
|
||||
var retrievedData;
|
||||
try {
|
||||
c1.execute('select * from db.weather;')
|
||||
retrievedData = c1.fetchall();
|
||||
|
||||
// c1.fields stores the names of each column retrieved
|
||||
console.log(c1.fields);
|
||||
console.log(retrievedData);
|
||||
// timestamps retrieved are always JS Date Objects
|
||||
// Numbers are numbers, big ints are big ints, and strings are strings
|
||||
}
|
||||
catch (err) {
|
||||
conn.close();
|
||||
throw err;
|
||||
}
|
||||
|
||||
// Let's try running some basic functions
|
||||
try {
|
||||
c1.execute('select count(*), avg(temperature), max(temperature), min(temperature), stddev(temperature) from db.weather;')
|
||||
c1.fetchall();
|
||||
console.log(c1.fields);
|
||||
console.log(c1.data);
|
||||
}
|
||||
catch(err) {
|
||||
conn.close();
|
||||
throw err;
|
||||
}
|
||||
|
||||
conn.close();
|
||||
|
||||
// Feel free to fork this repository or copy this code and start developing your own apps and backends with NodeJS and TDengine!
|
|
@ -1,153 +0,0 @@
|
|||
/* This example is to show the preferred way to use the td-connector */
|
||||
/* To run, enter node path/to/node-example.js */
|
||||
// Get the td-connector package
|
||||
const taos = require('td2.0-connector');
|
||||
|
||||
/* We will connect to TDengine by passing an object comprised of connection options to taos.connect and store the
|
||||
* connection to the variable conn
|
||||
*/
|
||||
/*
|
||||
* Connection Options
|
||||
* host: the host to connect to
|
||||
* user: the use to login as
|
||||
* password: the password for the above user to login
|
||||
* config: the location of the taos.cfg file, by default it is in /etc/taos
|
||||
* port: the port we connect through
|
||||
*/
|
||||
var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:0});
|
||||
|
||||
// Initialize our TDengineCursor, which we use to interact with TDengine
|
||||
var c1 = conn.cursor();
|
||||
|
||||
// c1.query(query) will return a TaosQuery object, of which then we can execute. The execute function then returns a promise
|
||||
// Let's create a database named db
|
||||
try {
|
||||
c1.execute('create database if not exists db;');
|
||||
//var query = c1.query('create database if not exists db;');
|
||||
//query.execute();
|
||||
}
|
||||
catch(err) {
|
||||
conn.close();
|
||||
throw err;
|
||||
}
|
||||
|
||||
// Now we will use database db. As this query won't return any results,
|
||||
// we can simplify the code and directly use the c1.execute() function. No need for a TaosQuery object to wrap around the query
|
||||
try {
|
||||
c1.execute('use db;');
|
||||
}
|
||||
catch (err) {
|
||||
conn.close();
|
||||
throw err;
|
||||
}
|
||||
|
||||
// Let's create a table called weather
|
||||
// which stores some weather data like humidity, AQI (air quality index), temperature, and some notes as text
|
||||
// We can also immedietely execute a TaosQuery object by passing true as the secodn argument
|
||||
// This will then return a promise that we can then attach a callback function to
|
||||
try {
|
||||
var promise = c1.query('create table if not exists weather (ts timestamp, humidity smallint, aqi int, temperature float, notes binary(30));', true);
|
||||
promise.then(function(){
|
||||
console.log("Table created!");
|
||||
}).catch(function() {
|
||||
console.log("Table couldn't be created.")
|
||||
});
|
||||
}
|
||||
catch (err) {
|
||||
conn.close();
|
||||
throw err;
|
||||
}
|
||||
|
||||
// Let's get the description of the table weather
|
||||
// When using a TaosQuery object and then executing it, upon success it returns a TaosResult object, which is a wrapper around the
|
||||
// retrieved data and allows us to easily access data and manipulate or display it.
|
||||
try {
|
||||
c1.query('describe db.weather;').execute().then(function(result){
|
||||
// Result is an instance of TaosResult and has the function pretty() which instantly logs a prettified version to the console
|
||||
result.pretty();
|
||||
});
|
||||
}
|
||||
catch (err) {
|
||||
conn.close();
|
||||
throw err;
|
||||
}
|
||||
|
||||
|
||||
Date.prototype.Format = function(fmt){
|
||||
var o = {
|
||||
'M+': this.getMonth() + 1,
|
||||
'd+': this.getDate(),
|
||||
'H+': this.getHours(),
|
||||
'm+': this.getMinutes(),
|
||||
's+': this.getSeconds(),
|
||||
'S+': this.getMilliseconds()
|
||||
};
|
||||
if (/(y+)/.test(fmt)) {
|
||||
fmt = fmt.replace(RegExp.$1, (this.getFullYear() + '').substr(4 - RegExp.$1.length));
|
||||
}
|
||||
for (var k in o) {
|
||||
if (new RegExp('(' + k + ')').test(fmt)) {
|
||||
fmt = fmt.replace(RegExp.$1, (RegExp.$1.length == 1) ? (o[k]) : (('00' + o[k]).substr(String(o[k]).length)));
|
||||
}
|
||||
}
|
||||
return fmt;
|
||||
}
|
||||
|
||||
|
||||
// Let's try to insert some random generated data to test with
|
||||
// We will use the bind function of the TaosQuery object to easily bind values to question marks in the query
|
||||
// For Timestamps, a normal Datetime object or TaosTimestamp or milliseconds can be passed in through the bind function
|
||||
let stime = new Date();
|
||||
let interval = 1000;
|
||||
try {
|
||||
for (let i = 0; i < 1000; i++) {
|
||||
stime.setMilliseconds(stime.getMilliseconds() + interval);
|
||||
|
||||
//console.log(stime.Format('yyyy-MM-dd HH:mm:ss.SSS'));
|
||||
|
||||
let insertData = [stime,
|
||||
parseInt(Math.random()*100),
|
||||
parseInt(Math.random()*300),
|
||||
parseFloat(Math.random()*10 + 30),
|
||||
"Note"];
|
||||
//c1.execute('insert into db.weather values(' + insertData.join(',') + ' );');
|
||||
|
||||
//var query = c1.query('insert into db.weather values(?, ?, ?, ?, ?);').bind(insertData);
|
||||
//query.execute();
|
||||
c1.execute('insert into db.weather values(\"'+stime.Format('yyyy-MM-dd HH:mm:ss.SSS')+'\",'+parseInt(Math.random() * 100)+','+parseInt(Math.random() * 300)+','+parseFloat(Math.random()*10 + 30)+',"Note");');
|
||||
}
|
||||
}catch (err) {
|
||||
conn.close();
|
||||
throw err;
|
||||
}
|
||||
|
||||
// Now let's look at our newly inserted data
|
||||
var retrievedData;
|
||||
try {
|
||||
c1.query('select * from db.weather limit 5 offset 100;', true).then(function(result){
|
||||
//result.pretty();
|
||||
console.log('=========>'+JSON.stringify(result));
|
||||
// Neat!
|
||||
});
|
||||
|
||||
}
|
||||
catch (err) {
|
||||
conn.close();
|
||||
throw err;
|
||||
}
|
||||
|
||||
// Let's try running some basic functions
|
||||
try {
|
||||
c1.query('select count(*), avg(temperature), max(temperature), min(temperature), stddev(temperature) from db.weather;', true)
|
||||
.then(function(result) {
|
||||
result.pretty();
|
||||
})
|
||||
}
|
||||
catch(err) {
|
||||
conn.close();
|
||||
throw err;
|
||||
}
|
||||
|
||||
conn.close();
|
||||
|
||||
// Feel free to fork this repository or copy this code and start developing your own apps and backends with NodeJS and TDengine!
|
|
@ -1,29 +1,27 @@
|
|||
const taos = require('td2.0-connector');
|
||||
//const taos = require('../../../src/connector/nodejs/');
|
||||
|
||||
const taos = require("@tdengine/client");
|
||||
|
||||
var host = null;
|
||||
var port = 6030;
|
||||
for(var i = 2; i < global.process.argv.length; i++){
|
||||
var key = global.process.argv[i].split("=")[0];
|
||||
var value = global.process.argv[i].split("=")[1];
|
||||
|
||||
if("host" == key){
|
||||
host = value;
|
||||
}
|
||||
if("port" == key){
|
||||
port = value;
|
||||
}
|
||||
var key = global.process.argv[i].split("=")[0];
|
||||
var value = global.process.argv[i].split("=")[1];
|
||||
|
||||
if("host" == key){
|
||||
host = value;
|
||||
}
|
||||
if("port" == key){
|
||||
port = value;
|
||||
}
|
||||
}
|
||||
|
||||
if(host == null){
|
||||
console.log("Usage: node nodejsChecker.js host=<hostname> port=<port>");
|
||||
process.exit(0);
|
||||
console.log("Usage: node nodejsChecker.js host=<hostname> port=<port>");
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
// establish connection
|
||||
var conn = taos.connect({host:host, user:"root", password:"taosdata",port:port});
|
||||
var cursor = conn.cursor();
|
||||
var cursor = conn.cursor();
|
||||
// create database
|
||||
executeSql("create database if not exists test", 0);
|
||||
// use db
|
||||
|
@ -40,22 +38,22 @@ executeQuery("select * from test.weather");
|
|||
conn.close();
|
||||
|
||||
function executeQuery(sql){
|
||||
var start = new Date().getTime();
|
||||
var promise = cursor.query(sql, true);
|
||||
var end = new Date().getTime();
|
||||
promise.then(function(result){
|
||||
printSql(sql, result != null,(end - start));
|
||||
result.pretty();
|
||||
});
|
||||
var start = new Date().getTime();
|
||||
var promise = cursor.query(sql, true);
|
||||
var end = new Date().getTime();
|
||||
promise.then(function(result){
|
||||
printSql(sql, result != null,(end - start));
|
||||
result.pretty();
|
||||
});
|
||||
}
|
||||
|
||||
function executeSql(sql, affectRows){
|
||||
var start = new Date().getTime();
|
||||
var promise = cursor.execute(sql);
|
||||
var end = new Date().getTime();
|
||||
printSql(sql, promise == affectRows, (end - start));
|
||||
var start = new Date().getTime();
|
||||
var promise = cursor.execute(sql);
|
||||
var end = new Date().getTime();
|
||||
printSql(sql, promise == affectRows, (end - start));
|
||||
}
|
||||
|
||||
function printSql(sql, succeed, cost){
|
||||
console.log("[ "+(succeed ? "OK" : "ERROR!")+" ] time cost: " + cost + " ms, execute statement ====> " + sql);
|
||||
}
|
||||
console.log("[ "+(succeed ? "OK" : "ERROR!")+" ] time cost: " + cost + " ms, execute statement ====> " + sql);
|
||||
}
|
|
@ -1,125 +0,0 @@
|
|||
const taos = require('td2.0-connector');
|
||||
var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:0})
|
||||
var c1 = conn.cursor(); // Initializing a new cursor
|
||||
|
||||
let stime = new Date();
|
||||
let interval = 1000;
|
||||
|
||||
function convertDateToTS(date) {
|
||||
let tsArr = date.toISOString().split("T")
|
||||
return "\"" + tsArr[0] + " " + tsArr[1].substring(0, tsArr[1].length - 1) + "\"";
|
||||
}
|
||||
|
||||
function R(l, r) {
|
||||
return Math.random() * (r - l) - r;
|
||||
}
|
||||
|
||||
function randomBool() {
|
||||
if (Math.random() < 0.5) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Initialize
|
||||
const dbname = "nodejs_1970_db";
|
||||
const tbname = "t1";
|
||||
|
||||
let dropDB = "drop database if exists " + dbname
|
||||
console.log(dropDB);//asdasdasd
|
||||
c1.execute(dropDB);///asdasd
|
||||
|
||||
let createDB = "create database " + dbname + " keep 36500"
|
||||
console.log(createDB);
|
||||
c1.execute(createDB);
|
||||
|
||||
let useTbl = "use " + dbname
|
||||
console.log(useTbl)
|
||||
c1.execute(useTbl);
|
||||
|
||||
let createTbl = "create table if not exists " + tbname + "(ts timestamp,id int)"
|
||||
console.log(createTbl);
|
||||
c1.execute(createTbl);
|
||||
|
||||
//1969-12-31 23:59:59.999
|
||||
//1970-01-01 00:00:00.000
|
||||
//1970-01-01 07:59:59.999
|
||||
//1970-01-01 08:00:00.000a
|
||||
//1628928479484 2021-08-14 08:07:59.484
|
||||
let sql1 = "insert into " + dbname + "." + tbname + " values('1969-12-31 23:59:59.999',1)"
|
||||
console.log(sql1);
|
||||
c1.execute(sql1);
|
||||
|
||||
let sql2 = "insert into " + dbname + "." + tbname + " values('1970-01-01 00:00:00.000',2)"
|
||||
console.log(sql2);
|
||||
c1.execute(sql2);
|
||||
|
||||
let sql3 = "insert into " + dbname + "." + tbname + " values('1970-01-01 07:59:59.999',3)"
|
||||
console.log(sql3);
|
||||
c1.execute(sql3);
|
||||
|
||||
let sql4 = "insert into " + dbname + "." + tbname + " values('1970-01-01 08:00:00.000',4)"
|
||||
console.log(sql4);
|
||||
c1.execute(sql4);
|
||||
|
||||
let sql5 = "insert into " + dbname + "." + tbname + " values('2021-08-14 08:07:59.484',5)"
|
||||
console.log(sql5);
|
||||
c1.execute(sql5);
|
||||
|
||||
// Select
|
||||
let query1 = "select * from " + dbname + "." + tbname
|
||||
console.log(query1);
|
||||
c1.execute(query1);
|
||||
|
||||
var d = c1.fetchall();
|
||||
console.log(c1.fields);
|
||||
for (let i = 0; i < d.length; i++)
|
||||
console.log(d[i][0].valueOf());
|
||||
|
||||
//initialize
|
||||
let initSql1 = "drop table if exists " + tbname
|
||||
console.log(initSql1);
|
||||
c1.execute(initSql1);
|
||||
|
||||
console.log(createTbl);
|
||||
c1.execute(createTbl);
|
||||
c1.execute(useTbl)
|
||||
|
||||
//-28800001 1969-12-31 23:59:59.999
|
||||
//-28800000 1970-01-01 00:00:00.000
|
||||
//-1 1970-01-01 07:59:59.999
|
||||
//0 1970-01-01 08:00:00.00
|
||||
//1628928479484 2021-08-14 08:07:59.484
|
||||
let sql11 = "insert into " + dbname + "." + tbname + " values(-28800001,11)";
|
||||
console.log(sql11);
|
||||
c1.execute(sql11);
|
||||
|
||||
let sql12 = "insert into " + dbname + "." + tbname + " values(-28800000,12)"
|
||||
console.log(sql12);
|
||||
c1.execute(sql12);
|
||||
|
||||
let sql13 = "insert into " + dbname + "." + tbname + " values(-1,13)"
|
||||
console.log(sql13);
|
||||
c1.execute(sql13);
|
||||
|
||||
let sql14 = "insert into " + dbname + "." + tbname + " values(0,14)"
|
||||
console.log(sql14);
|
||||
c1.execute(sql14);
|
||||
|
||||
let sql15 = "insert into " + dbname + "." + tbname + " values(1628928479484,15)"
|
||||
console.log(sql15);
|
||||
c1.execute(sql15);
|
||||
|
||||
// Select
|
||||
console.log(query1);
|
||||
c1.execute(query1);
|
||||
|
||||
var d = c1.fetchall();
|
||||
console.log(c1.fields);
|
||||
for (let i = 0; i < d.length; i++)
|
||||
console.log(d[i][0].valueOf());
|
||||
|
||||
setTimeout(function () {
|
||||
conn.close();
|
||||
}, 2000);
|
||||
|
|
@ -146,9 +146,9 @@ extern void (*tColDataCalcSMA[])(SColData *pColData, int64_t *sum, int64_t *max,
|
|||
int32_t tColDataAddValueByBind(SColData *pColData, TAOS_MULTI_BIND *pBind);
|
||||
void tColDataSortMerge(SArray *colDataArr);
|
||||
|
||||
//for raw block
|
||||
int32_t tColDataAddValueByDataBlock(SColData *pColData, int8_t type, int32_t bytes,
|
||||
int32_t nRows, char* lengthOrbitmap, char *data);
|
||||
// for raw block
|
||||
int32_t tColDataAddValueByDataBlock(SColData *pColData, int8_t type, int32_t bytes, int32_t nRows, char *lengthOrbitmap,
|
||||
char *data);
|
||||
// for encode/decode
|
||||
int32_t tPutColData(uint8_t *pBuf, SColData *pColData);
|
||||
int32_t tGetColData(uint8_t *pBuf, SColData *pColData);
|
||||
|
@ -261,7 +261,13 @@ struct STag {
|
|||
|
||||
// STSchema ================================
|
||||
STSchema *tBuildTSchema(SSchema *aSchema, int32_t numOfCols, int32_t version);
|
||||
void tDestroyTSchema(STSchema *pTSchema);
|
||||
#define tDestroyTSchema(pTSchema) \
|
||||
do { \
|
||||
if (pTSchema) { \
|
||||
taosMemoryFree(pTSchema); \
|
||||
pTSchema = NULL; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -914,6 +914,7 @@ typedef struct {
|
|||
int32_t numOfRetensions;
|
||||
SArray* pRetensions;
|
||||
int8_t schemaless;
|
||||
int16_t sstTrigger;
|
||||
} SDbCfgRsp;
|
||||
|
||||
int32_t tSerializeSDbCfgRsp(void* buf, int32_t bufLen, const SDbCfgRsp* pRsp);
|
||||
|
@ -1278,6 +1279,25 @@ typedef struct {
|
|||
int32_t tSerializeSAlterVnodeReplicaReq(void* buf, int32_t bufLen, SAlterVnodeReplicaReq* pReq);
|
||||
int32_t tDeserializeSAlterVnodeReplicaReq(void* buf, int32_t bufLen, SAlterVnodeReplicaReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
int32_t vgId;
|
||||
int8_t disable;
|
||||
} SDisableVnodeWriteReq;
|
||||
|
||||
int32_t tSerializeSDisableVnodeWriteReq(void* buf, int32_t bufLen, SDisableVnodeWriteReq* pReq);
|
||||
int32_t tDeserializeSDisableVnodeWriteReq(void* buf, int32_t bufLen, SDisableVnodeWriteReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
int32_t srcVgId;
|
||||
int32_t dstVgId;
|
||||
uint32_t hashBegin;
|
||||
uint32_t hashEnd;
|
||||
int64_t reserved;
|
||||
} SAlterVnodeHashRangeReq;
|
||||
|
||||
int32_t tSerializeSAlterVnodeHashRangeReq(void* buf, int32_t bufLen, SAlterVnodeHashRangeReq* pReq);
|
||||
int32_t tDeserializeSAlterVnodeHashRangeReq(void* buf, int32_t bufLen, SAlterVnodeHashRangeReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
SMsgHead header;
|
||||
char dbFName[TSDB_DB_FNAME_LEN];
|
||||
|
|
|
@ -220,6 +220,7 @@ enum {
|
|||
TD_DEF_MSG_TYPE(TDMT_VND_DROP_TTL_TABLE, "vnode-drop-ttl-stb", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_TRIM, "vnode-trim", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_COMMIT, "vnode-commit", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_DISABLE_WRITE, "vnode-disable-write", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_MAX_MSG, "vnd-max", NULL, NULL)
|
||||
|
||||
TD_NEW_MSG_SEG(TDMT_SCH_MSG)
|
||||
|
|
|
@ -370,7 +370,8 @@ int32_t tDecodeSStreamTask(SDecoder* pDecoder, SStreamTask* pTask);
|
|||
void tFreeSStreamTask(SStreamTask* pTask);
|
||||
|
||||
static FORCE_INLINE int32_t streamTaskInput(SStreamTask* pTask, SStreamQueueItem* pItem) {
|
||||
if (pItem->type == STREAM_INPUT__DATA_SUBMIT) {
|
||||
int8_t type = pItem->type;
|
||||
if (type == STREAM_INPUT__DATA_SUBMIT) {
|
||||
SStreamDataSubmit2* pSubmitClone = streamSubmitRefClone((SStreamDataSubmit2*)pItem);
|
||||
if (pSubmitClone == NULL) {
|
||||
qDebug("task %d %p submit enqueue failed since out of memory", pTask->taskId, pTask);
|
||||
|
@ -382,19 +383,19 @@ static FORCE_INLINE int32_t streamTaskInput(SStreamTask* pTask, SStreamQueueItem
|
|||
pSubmitClone->submit.msgStr, pSubmitClone->submit.msgLen, pSubmitClone->submit.ver);
|
||||
taosWriteQitem(pTask->inputQueue->queue, pSubmitClone);
|
||||
// qStreamInput(pTask->exec.executor, pSubmitClone);
|
||||
} else if (pItem->type == STREAM_INPUT__DATA_BLOCK || pItem->type == STREAM_INPUT__DATA_RETRIEVE ||
|
||||
pItem->type == STREAM_INPUT__REF_DATA_BLOCK) {
|
||||
} else if (type == STREAM_INPUT__DATA_BLOCK || type == STREAM_INPUT__DATA_RETRIEVE ||
|
||||
type == STREAM_INPUT__REF_DATA_BLOCK) {
|
||||
taosWriteQitem(pTask->inputQueue->queue, pItem);
|
||||
// qStreamInput(pTask->exec.executor, pItem);
|
||||
} else if (pItem->type == STREAM_INPUT__CHECKPOINT) {
|
||||
} else if (type == STREAM_INPUT__CHECKPOINT) {
|
||||
taosWriteQitem(pTask->inputQueue->queue, pItem);
|
||||
// qStreamInput(pTask->exec.executor, pItem);
|
||||
} else if (pItem->type == STREAM_INPUT__GET_RES) {
|
||||
} else if (type == STREAM_INPUT__GET_RES) {
|
||||
taosWriteQitem(pTask->inputQueue->queue, pItem);
|
||||
// qStreamInput(pTask->exec.executor, pItem);
|
||||
}
|
||||
|
||||
if (pItem->type != STREAM_INPUT__GET_RES && pItem->type != STREAM_INPUT__CHECKPOINT && pTask->triggerParam != 0) {
|
||||
if (type != STREAM_INPUT__GET_RES && type != STREAM_INPUT__CHECKPOINT && pTask->triggerParam != 0) {
|
||||
atomic_val_compare_exchange_8(&pTask->triggerStatus, TASK_TRIGGER_STATUS__INACTIVE, TASK_TRIGGER_STATUS__ACTIVE);
|
||||
}
|
||||
|
||||
|
|
|
@ -193,7 +193,7 @@ typedef struct SSyncLogStore {
|
|||
SyncIndex (*syncLogLastIndex)(struct SSyncLogStore* pLogStore);
|
||||
SyncTerm (*syncLogLastTerm)(struct SSyncLogStore* pLogStore);
|
||||
|
||||
int32_t (*syncLogAppendEntry)(struct SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry);
|
||||
int32_t (*syncLogAppendEntry)(struct SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry, bool forcSync);
|
||||
int32_t (*syncLogGetEntry)(struct SSyncLogStore* pLogStore, SyncIndex index, SSyncRaftEntry** ppEntry);
|
||||
int32_t (*syncLogTruncate)(struct SSyncLogStore* pLogStore, SyncIndex fromIndex);
|
||||
|
||||
|
|
|
@ -150,7 +150,7 @@ int32_t tfsRmdir(STfs *pTfs, const char *rname);
|
|||
* @param nrname The rel name of new file.
|
||||
* @return int32_t 0 for success, -1 for failure.
|
||||
*/
|
||||
int32_t tfsRename(STfs *pTfs, char *orname, char *nrname);
|
||||
int32_t tfsRename(STfs *pTfs, const char *orname, const char *nrname);
|
||||
|
||||
/**
|
||||
* @brief Init file object in tfs.
|
||||
|
|
|
@ -201,6 +201,7 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver, SWalCkHead *pHead);
|
|||
int32_t walFetchBody(SWalReader *pRead, SWalCkHead **ppHead);
|
||||
int32_t walSkipFetchBody(SWalReader *pRead, const SWalCkHead *pHead);
|
||||
|
||||
SWalRef *walRefFirstVer(SWal *, SWalRef *);
|
||||
SWalRef *walRefCommittedVer(SWal *);
|
||||
|
||||
SWalRef *walOpenRef(SWal *);
|
||||
|
|
|
@ -746,7 +746,7 @@ function is_version_compatible() {
|
|||
deb_erase() {
|
||||
confirm=""
|
||||
while [ "" == "${confirm}" ]; do
|
||||
echo -e -n "${RED}Exist tdengine deb detected, do you want to remove it? [yes|no] ${NC}:"
|
||||
echo -e -n "${RED}Existing TDengine deb is detected, do you want to remove it? [yes|no] ${NC}:"
|
||||
read confirm
|
||||
if [ "yes" == "$confirm" ]; then
|
||||
${csudo}dpkg --remove tdengine ||:
|
||||
|
@ -760,7 +760,7 @@ deb_erase() {
|
|||
rpm_erase() {
|
||||
confirm=""
|
||||
while [ "" == "${confirm}" ]; do
|
||||
echo -e -n "${RED}Exist tdengine rpm detected, do you want to remove it? [yes|no] ${NC}:"
|
||||
echo -e -n "${RED}Existing TDengine rpm is detected, do you want to remove it? [yes|no] ${NC}:"
|
||||
read confirm
|
||||
if [ "yes" == "$confirm" ]; then
|
||||
${csudo}rpm -e tdengine ||:
|
||||
|
|
|
@ -23,12 +23,12 @@
|
|||
#include "scheduler.h"
|
||||
#include "tcache.h"
|
||||
#include "tglobal.h"
|
||||
#include "thttp.h"
|
||||
#include "tmsg.h"
|
||||
#include "tref.h"
|
||||
#include "trpc.h"
|
||||
#include "tsched.h"
|
||||
#include "ttime.h"
|
||||
#include "thttp.h"
|
||||
|
||||
#define TSC_VAR_NOT_RELEASE 1
|
||||
#define TSC_VAR_RELEASED 0
|
||||
|
@ -65,7 +65,7 @@ static int32_t registerRequest(SRequestObj *pRequest, STscObj *pTscObj) {
|
|||
|
||||
static void deregisterRequest(SRequestObj *pRequest) {
|
||||
const static int64_t SLOW_QUERY_INTERVAL = 3000000L; // todo configurable
|
||||
if(pRequest == NULL){
|
||||
if (pRequest == NULL) {
|
||||
tscError("pRequest == NULL");
|
||||
return;
|
||||
}
|
||||
|
@ -380,9 +380,9 @@ void doDestroyRequest(void *p) {
|
|||
}
|
||||
|
||||
if (pRequest->syncQuery) {
|
||||
if (pRequest->body.param){
|
||||
tsem_destroy(&((SSyncQueryParam*)pRequest->body.param)->sem);
|
||||
}
|
||||
if (pRequest->body.param) {
|
||||
tsem_destroy(&((SSyncQueryParam *)pRequest->body.param)->sem);
|
||||
}
|
||||
taosMemoryFree(pRequest->body.param);
|
||||
}
|
||||
|
||||
|
@ -400,65 +400,26 @@ void destroyRequest(SRequestObj *pRequest) {
|
|||
removeRequest(pRequest->self);
|
||||
}
|
||||
|
||||
void taosClientCrash(int signum, void *sigInfo, void *context) {
|
||||
taosIgnSignal(SIGTERM);
|
||||
taosIgnSignal(SIGHUP);
|
||||
taosIgnSignal(SIGINT);
|
||||
taosIgnSignal(SIGBREAK);
|
||||
|
||||
#if !defined(WINDOWS)
|
||||
taosIgnSignal(SIGBUS);
|
||||
#endif
|
||||
taosIgnSignal(SIGABRT);
|
||||
taosIgnSignal(SIGFPE);
|
||||
taosIgnSignal(SIGSEGV);
|
||||
|
||||
char *pMsg = NULL;
|
||||
const char *flags = "UTL FATAL ";
|
||||
ELogLevel level = DEBUG_FATAL;
|
||||
int32_t dflag = 255;
|
||||
int64_t msgLen= -1;
|
||||
|
||||
if (tsEnableCrashReport) {
|
||||
if (taosGenCrashJsonMsg(signum, &pMsg, lastClusterId, appInfo.startTime)) {
|
||||
taosPrintLog(flags, level, dflag, "failed to generate crash json msg");
|
||||
goto _return;
|
||||
} else {
|
||||
msgLen = strlen(pMsg);
|
||||
}
|
||||
}
|
||||
|
||||
_return:
|
||||
|
||||
taosLogCrashInfo("taos", pMsg, msgLen, signum, sigInfo);
|
||||
|
||||
#ifdef _TD_DARWIN_64
|
||||
exit(signum);
|
||||
#elif defined(WINDOWS)
|
||||
exit(signum);
|
||||
#endif
|
||||
}
|
||||
|
||||
void crashReportThreadFuncUnexpectedStopped(void) { atomic_store_32(&clientStop, -1); }
|
||||
|
||||
static void *tscCrashReportThreadFp(void *param) {
|
||||
setThreadName("client-crashReport");
|
||||
char filepath[PATH_MAX] = {0};
|
||||
snprintf(filepath, sizeof(filepath), "%s%s.taosCrashLog", tsLogDir, TD_DIRSEP);
|
||||
char *pMsg = NULL;
|
||||
int64_t msgLen = 0;
|
||||
char *pMsg = NULL;
|
||||
int64_t msgLen = 0;
|
||||
TdFilePtr pFile = NULL;
|
||||
bool truncateFile = false;
|
||||
int32_t sleepTime = 200;
|
||||
int32_t reportPeriodNum = 3600 * 1000 / sleepTime;
|
||||
int32_t loopTimes = reportPeriodNum;
|
||||
bool truncateFile = false;
|
||||
int32_t sleepTime = 200;
|
||||
int32_t reportPeriodNum = 3600 * 1000 / sleepTime;
|
||||
int32_t loopTimes = reportPeriodNum;
|
||||
|
||||
#ifdef WINDOWS
|
||||
if (taosCheckCurrentInDll()) {
|
||||
atexit(crashReportThreadFuncUnexpectedStopped);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
while (1) {
|
||||
if (clientStop) break;
|
||||
if (loopTimes++ < reportPeriodNum) {
|
||||
|
@ -488,12 +449,12 @@ static void *tscCrashReportThreadFp(void *param) {
|
|||
pMsg = NULL;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
if (pFile) {
|
||||
taosReleaseCrashLogFile(pFile, truncateFile);
|
||||
truncateFile = false;
|
||||
}
|
||||
|
||||
|
||||
taosMsleep(sleepTime);
|
||||
loopTimes = 0;
|
||||
}
|
||||
|
@ -506,11 +467,11 @@ int32_t tscCrashReportInit() {
|
|||
if (!tsEnableCrashReport) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
TdThreadAttr thAttr;
|
||||
taosThreadAttrInit(&thAttr);
|
||||
taosThreadAttrSetDetachState(&thAttr, PTHREAD_CREATE_JOINABLE);
|
||||
TdThread crashReportThread;
|
||||
TdThread crashReportThread;
|
||||
if (taosThreadCreate(&crashReportThread, &thAttr, tscCrashReportThreadFp, NULL) != 0) {
|
||||
tscError("failed to create crashReport thread since %s", strerror(errno));
|
||||
return -1;
|
||||
|
@ -535,13 +496,22 @@ void tscStopCrashReport() {
|
|||
}
|
||||
}
|
||||
|
||||
static void tscSetSignalHandle() {
|
||||
#if !defined(WINDOWS)
|
||||
taosSetSignal(SIGBUS, taosClientCrash);
|
||||
#endif
|
||||
taosSetSignal(SIGABRT, taosClientCrash);
|
||||
taosSetSignal(SIGFPE, taosClientCrash);
|
||||
taosSetSignal(SIGSEGV, taosClientCrash);
|
||||
void tscWriteCrashInfo(int signum, void *sigInfo, void *context) {
|
||||
char *pMsg = NULL;
|
||||
const char *flags = "UTL FATAL ";
|
||||
ELogLevel level = DEBUG_FATAL;
|
||||
int32_t dflag = 255;
|
||||
int64_t msgLen = -1;
|
||||
|
||||
if (tsEnableCrashReport) {
|
||||
if (taosGenCrashJsonMsg(signum, &pMsg, lastClusterId, appInfo.startTime)) {
|
||||
taosPrintLog(flags, level, dflag, "failed to generate crash json msg");
|
||||
} else {
|
||||
msgLen = strlen(pMsg);
|
||||
}
|
||||
}
|
||||
|
||||
taosLogCrashInfo("taos", pMsg, msgLen, signum, sigInfo);
|
||||
}
|
||||
|
||||
void taos_init_imp(void) {
|
||||
|
@ -567,8 +537,6 @@ void taos_init_imp(void) {
|
|||
return;
|
||||
}
|
||||
|
||||
tscSetSignalHandle();
|
||||
|
||||
initQueryModuleMsgHandle();
|
||||
|
||||
if (taosConvInit() != 0) {
|
||||
|
@ -600,7 +568,7 @@ void taos_init_imp(void) {
|
|||
taosThreadMutexInit(&appInfo.mutex, NULL);
|
||||
|
||||
tscCrashReportInit();
|
||||
|
||||
|
||||
tscDebug("client is initialized successfully");
|
||||
}
|
||||
|
||||
|
@ -651,6 +619,9 @@ int taos_options_imp(TSDB_OPTION option, const char *str) {
|
|||
tscError("failed to set cfg:%s to %s since %s", pItem->name, str, terrstr());
|
||||
} else {
|
||||
tscInfo("set cfg:%s to %s", pItem->name, str);
|
||||
if (TSDB_OPTION_SHELL_ACTIVITY_TIMER == option || TSDB_OPTION_USE_ADAPTER == option) {
|
||||
code = taosSetCfg(pCfg, pItem->name);
|
||||
}
|
||||
}
|
||||
|
||||
return code;
|
||||
|
|
|
@ -1239,7 +1239,7 @@ STscObj* taosConnectImpl(const char* user, const char* auth, const char* db, __t
|
|||
|
||||
int64_t transporterId = 0;
|
||||
asyncSendMsgToServer(pTscObj->pAppInfo->pTransporter, &pTscObj->pAppInfo->mgmtEp.epSet, &transporterId, body);
|
||||
|
||||
|
||||
tsem_wait(&pRequest->body.rspSem);
|
||||
if (pRequest->code != TSDB_CODE_SUCCESS) {
|
||||
const char* errorMsg =
|
||||
|
|
|
@ -528,9 +528,8 @@ void taos_stop_query(TAOS_RES *res) {
|
|||
SRequestObj *pRequest = (SRequestObj *)res;
|
||||
pRequest->killed = true;
|
||||
|
||||
int32_t numOfFields = taos_num_fields(pRequest);
|
||||
// It is not a query, no need to stop.
|
||||
if (numOfFields == 0) {
|
||||
if (NULL == pRequest->pQuery || QUERY_EXEC_MODE_SCHEDULE != pRequest->pQuery->execMode) {
|
||||
tscDebug("request 0x%" PRIx64 " no need to be killed since not query", pRequest->requestId);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -179,7 +179,7 @@ static char* buildAlterSTableJson(void* alterData, int32_t alterDataLen) {
|
|||
}
|
||||
string = cJSON_PrintUnformatted(json);
|
||||
|
||||
end:
|
||||
end:
|
||||
cJSON_Delete(json);
|
||||
tFreeSMAltertbReq(&req);
|
||||
return string;
|
||||
|
@ -200,7 +200,7 @@ static char* processCreateStb(SMqMetaRsp* metaRsp) {
|
|||
}
|
||||
string = buildCreateTableJson(&req.schemaRow, &req.schemaTag, req.name, req.suid, TSDB_SUPER_TABLE);
|
||||
|
||||
_err:
|
||||
_err:
|
||||
tDecoderClear(&coder);
|
||||
return string;
|
||||
}
|
||||
|
@ -220,7 +220,7 @@ static char* processAlterStb(SMqMetaRsp* metaRsp) {
|
|||
}
|
||||
string = buildAlterSTableJson(req.alterOriData, req.alterOriDataLen);
|
||||
|
||||
_err:
|
||||
_err:
|
||||
tDecoderClear(&coder);
|
||||
return string;
|
||||
}
|
||||
|
@ -302,7 +302,7 @@ static void buildChildElement(cJSON* json, SVCreateTbReq* pCreateReq) {
|
|||
cJSON_AddItemToArray(tags, tag);
|
||||
}
|
||||
|
||||
end:
|
||||
end:
|
||||
cJSON_AddItemToObject(json, "tags", tags);
|
||||
taosArrayDestroy(pTagVals);
|
||||
}
|
||||
|
@ -360,7 +360,7 @@ static char* processCreateTable(SMqMetaRsp* metaRsp) {
|
|||
}
|
||||
}
|
||||
|
||||
_exit:
|
||||
_exit:
|
||||
for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
|
||||
pCreateReq = req.pReqs + iReq;
|
||||
taosMemoryFreeClear(pCreateReq->comment);
|
||||
|
@ -373,7 +373,7 @@ static char* processCreateTable(SMqMetaRsp* metaRsp) {
|
|||
}
|
||||
|
||||
static char* processAutoCreateTable(STaosxRsp* rsp) {
|
||||
if(rsp->createTableNum <= 0){
|
||||
if (rsp->createTableNum <= 0) {
|
||||
uError("WriteRaw:processAutoCreateTable rsp->createTableNum <= 0");
|
||||
goto _exit;
|
||||
}
|
||||
|
@ -392,14 +392,14 @@ static char* processAutoCreateTable(STaosxRsp* rsp) {
|
|||
goto _exit;
|
||||
}
|
||||
|
||||
if(pCreateReq[iReq].type != TSDB_CHILD_TABLE){
|
||||
if (pCreateReq[iReq].type != TSDB_CHILD_TABLE) {
|
||||
uError("WriteRaw:processAutoCreateTable pCreateReq[iReq].type != TSDB_CHILD_TABLE");
|
||||
goto _exit;
|
||||
}
|
||||
}
|
||||
string = buildCreateCTableJson(pCreateReq, rsp->createTableNum);
|
||||
|
||||
_exit:
|
||||
_exit:
|
||||
for (int i = 0; i < rsp->createTableNum; i++) {
|
||||
tDecoderClear(&decoder[i]);
|
||||
taosMemoryFreeClear(pCreateReq[i].comment);
|
||||
|
@ -500,7 +500,7 @@ static char* processAlterTable(SMqMetaRsp* metaRsp) {
|
|||
char* buf = NULL;
|
||||
|
||||
if (vAlterTbReq.tagType == TSDB_DATA_TYPE_JSON) {
|
||||
if(!tTagIsJson(vAlterTbReq.pTagVal)){
|
||||
if (!tTagIsJson(vAlterTbReq.pTagVal)) {
|
||||
uError("processAlterTable isJson false");
|
||||
goto _exit;
|
||||
}
|
||||
|
@ -524,7 +524,7 @@ static char* processAlterTable(SMqMetaRsp* metaRsp) {
|
|||
}
|
||||
string = cJSON_PrintUnformatted(json);
|
||||
|
||||
_exit:
|
||||
_exit:
|
||||
cJSON_Delete(json);
|
||||
tDecoderClear(&decoder);
|
||||
return string;
|
||||
|
@ -557,12 +557,12 @@ static char* processDropSTable(SMqMetaRsp* metaRsp) {
|
|||
|
||||
string = cJSON_PrintUnformatted(json);
|
||||
|
||||
_exit:
|
||||
_exit:
|
||||
cJSON_Delete(json);
|
||||
tDecoderClear(&decoder);
|
||||
return string;
|
||||
}
|
||||
static char* processDeleteTable(SMqMetaRsp* metaRsp){
|
||||
static char* processDeleteTable(SMqMetaRsp* metaRsp) {
|
||||
SDeleteRes req = {0};
|
||||
SDecoder coder = {0};
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
@ -596,7 +596,7 @@ static char* processDeleteTable(SMqMetaRsp* metaRsp){
|
|||
|
||||
string = cJSON_PrintUnformatted(json);
|
||||
|
||||
_exit:
|
||||
_exit:
|
||||
cJSON_Delete(json);
|
||||
tDecoderClear(&coder);
|
||||
return string;
|
||||
|
@ -638,7 +638,7 @@ static char* processDropTable(SMqMetaRsp* metaRsp) {
|
|||
|
||||
string = cJSON_PrintUnformatted(json);
|
||||
|
||||
_exit:
|
||||
_exit:
|
||||
cJSON_Delete(json);
|
||||
tDecoderClear(&decoder);
|
||||
return string;
|
||||
|
@ -726,7 +726,7 @@ static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) {
|
|||
code = pRequest->code;
|
||||
taosMemoryFree(pCmdMsg.pMsg);
|
||||
|
||||
end:
|
||||
end:
|
||||
destroyRequest(pRequest);
|
||||
tFreeSMCreateStbReq(&pReq);
|
||||
tDecoderClear(&coder);
|
||||
|
@ -796,7 +796,7 @@ static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) {
|
|||
code = pRequest->code;
|
||||
taosMemoryFree(pCmdMsg.pMsg);
|
||||
|
||||
end:
|
||||
end:
|
||||
destroyRequest(pRequest);
|
||||
tDecoderClear(&coder);
|
||||
return code;
|
||||
|
@ -857,9 +857,9 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
|
|||
taosHashSetFreeFp(pVgroupHashmap, destroyCreateTbReqBatch);
|
||||
|
||||
SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter,
|
||||
.requestId = pRequest->requestId,
|
||||
.requestObjRefId = pRequest->self,
|
||||
.mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
|
||||
.requestId = pRequest->requestId,
|
||||
.requestObjRefId = pRequest->self,
|
||||
.mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
|
||||
|
||||
pRequest->tableList = taosArrayInit(req.nReqs, sizeof(SName));
|
||||
// loop to create table
|
||||
|
@ -939,7 +939,7 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
|
|||
|
||||
code = pRequest->code;
|
||||
|
||||
end:
|
||||
end:
|
||||
for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
|
||||
pCreateReq = req.pReqs + iReq;
|
||||
taosMemoryFreeClear(pCreateReq->comment);
|
||||
|
@ -1009,9 +1009,9 @@ static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) {
|
|||
taosHashSetFreeFp(pVgroupHashmap, destroyDropTbReqBatch);
|
||||
|
||||
SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter,
|
||||
.requestId = pRequest->requestId,
|
||||
.requestObjRefId = pRequest->self,
|
||||
.mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
|
||||
.requestId = pRequest->requestId,
|
||||
.requestObjRefId = pRequest->self,
|
||||
.mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
|
||||
pRequest->tableList = taosArrayInit(req.nReqs, sizeof(SName));
|
||||
// loop to create table
|
||||
for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
|
||||
|
@ -1063,7 +1063,7 @@ static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) {
|
|||
}
|
||||
code = pRequest->code;
|
||||
|
||||
end:
|
||||
end:
|
||||
taosHashCleanup(pVgroupHashmap);
|
||||
destroyRequest(pRequest);
|
||||
tDecoderClear(&coder);
|
||||
|
@ -1131,7 +1131,7 @@ static int32_t taosDeleteData(TAOS* taos, void* meta, int32_t metaLen) {
|
|||
}
|
||||
taos_free_result(res);
|
||||
|
||||
end:
|
||||
end:
|
||||
tDecoderClear(&coder);
|
||||
return code;
|
||||
}
|
||||
|
@ -1178,9 +1178,9 @@ static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) {
|
|||
}
|
||||
|
||||
SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter,
|
||||
.requestId = pRequest->requestId,
|
||||
.requestObjRefId = pRequest->self,
|
||||
.mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
|
||||
.requestId = pRequest->requestId,
|
||||
.requestObjRefId = pRequest->self,
|
||||
.mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
|
||||
|
||||
SVgroupInfo pInfo = {0};
|
||||
SName pName = {0};
|
||||
|
@ -1239,7 +1239,7 @@ static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) {
|
|||
code = handleAlterTbExecRes(pRes->res, pCatalog);
|
||||
}
|
||||
}
|
||||
end:
|
||||
end:
|
||||
taosArrayDestroy(pArray);
|
||||
if (pVgData) taosMemoryFreeClear(pVgData->pData);
|
||||
taosMemoryFreeClear(pVgData);
|
||||
|
@ -1402,7 +1402,7 @@ int taos_write_raw_block(TAOS* taos, int rows, char* pData, const char* tbname)
|
|||
launchQueryImpl(pRequest, pQuery, true, NULL);
|
||||
code = pRequest->code;
|
||||
|
||||
end:
|
||||
end:
|
||||
taosMemoryFreeClear(pTableMeta);
|
||||
qDestroyQuery(pQuery);
|
||||
destroyRequest(pRequest);
|
||||
|
@ -1521,7 +1521,7 @@ static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
|
|||
launchQueryImpl(pRequest, pQuery, true, NULL);
|
||||
code = pRequest->code;
|
||||
|
||||
end:
|
||||
end:
|
||||
tDeleteSMqDataRsp(&rspObj.rsp);
|
||||
tDecoderClear(&decoder);
|
||||
qDestroyQuery(pQuery);
|
||||
|
@ -1619,7 +1619,7 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen)
|
|||
goto end;
|
||||
}
|
||||
|
||||
if(pCreateReq.type != TSDB_CHILD_TABLE){
|
||||
if (pCreateReq.type != TSDB_CHILD_TABLE) {
|
||||
uError("WriteRaw:pCreateReq.type != TSDB_CHILD_TABLE. table name: %s", tbName);
|
||||
code = TSDB_CODE_TSC_INVALID_VALUE;
|
||||
goto end;
|
||||
|
|
|
@ -1532,10 +1532,6 @@ STSchema *tBuildTSchema(SSchema *aSchema, int32_t numOfCols, int32_t version) {
|
|||
return pTSchema;
|
||||
}
|
||||
|
||||
void tDestroyTSchema(STSchema *pTSchema) {
|
||||
if (pTSchema) taosMemoryFree(pTSchema);
|
||||
}
|
||||
|
||||
// SColData ========================================
|
||||
void tColDataDestroy(void *ph) {
|
||||
SColData *pColData = (SColData *)ph;
|
||||
|
|
|
@ -76,19 +76,19 @@ bool tsEnableTelem = true;
|
|||
int32_t tsTelemInterval = 43200;
|
||||
char tsTelemServer[TSDB_FQDN_LEN] = "telemetry.taosdata.com";
|
||||
uint16_t tsTelemPort = 80;
|
||||
char* tsTelemUri = "/report";
|
||||
char *tsTelemUri = "/report";
|
||||
|
||||
bool tsEnableCrashReport = true;
|
||||
char* tsClientCrashReportUri = "/ccrashreport";
|
||||
char* tsSvrCrashReportUri = "/dcrashreport";
|
||||
bool tsEnableCrashReport = true;
|
||||
char *tsClientCrashReportUri = "/ccrashreport";
|
||||
char *tsSvrCrashReportUri = "/dcrashreport";
|
||||
|
||||
// schemaless
|
||||
char tsSmlTagName[TSDB_COL_NAME_LEN] = "_tag_null";
|
||||
char tsSmlChildTableName[TSDB_TABLE_NAME_LEN] = ""; // user defined child table name can be specified in tag value.
|
||||
// If set to empty system will generate table name using MD5 hash.
|
||||
// true means that the name and order of cols in each line are the same(only for influx protocol)
|
||||
//bool tsSmlDataFormat = false;
|
||||
//int32_t tsSmlBatchSize = 10000;
|
||||
// bool tsSmlDataFormat = false;
|
||||
// int32_t tsSmlBatchSize = 10000;
|
||||
|
||||
// query
|
||||
int32_t tsQueryPolicy = 1;
|
||||
|
@ -210,9 +210,7 @@ int32_t taosSetTfsCfg(SConfig *pCfg) {
|
|||
int32_t taosSetTfsCfg(SConfig *pCfg);
|
||||
#endif
|
||||
|
||||
struct SConfig *taosGetCfg() {
|
||||
return tsCfg;
|
||||
}
|
||||
struct SConfig *taosGetCfg() { return tsCfg; }
|
||||
|
||||
static int32_t taosLoadCfg(SConfig *pCfg, const char **envCmd, const char *inputCfgDir, const char *envFile,
|
||||
char *apolloUrl) {
|
||||
|
@ -319,8 +317,8 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
|
|||
if (cfgAddBool(pCfg, "keepColumnName", tsKeepColumnName, true) != 0) return -1;
|
||||
if (cfgAddString(pCfg, "smlChildTableName", "", 1) != 0) return -1;
|
||||
if (cfgAddString(pCfg, "smlTagName", tsSmlTagName, 1) != 0) return -1;
|
||||
// if (cfgAddBool(pCfg, "smlDataFormat", tsSmlDataFormat, 1) != 0) return -1;
|
||||
// if (cfgAddInt32(pCfg, "smlBatchSize", tsSmlBatchSize, 1, INT32_MAX, true) != 0) return -1;
|
||||
// if (cfgAddBool(pCfg, "smlDataFormat", tsSmlDataFormat, 1) != 0) return -1;
|
||||
// if (cfgAddInt32(pCfg, "smlBatchSize", tsSmlBatchSize, 1, INT32_MAX, true) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "maxMemUsedByInsert", tsMaxMemUsedByInsert, 1, INT32_MAX, true) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "maxRetryWaitTime", tsMaxRetryWaitTime, 0, 86400000, 0) != 0) return -1;
|
||||
if (cfgAddBool(pCfg, "useAdapter", tsUseAdapter, true) != 0) return -1;
|
||||
|
@ -662,9 +660,9 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
|
|||
|
||||
tstrncpy(tsSmlChildTableName, cfgGetItem(pCfg, "smlChildTableName")->str, TSDB_TABLE_NAME_LEN);
|
||||
tstrncpy(tsSmlTagName, cfgGetItem(pCfg, "smlTagName")->str, TSDB_COL_NAME_LEN);
|
||||
// tsSmlDataFormat = cfgGetItem(pCfg, "smlDataFormat")->bval;
|
||||
// tsSmlDataFormat = cfgGetItem(pCfg, "smlDataFormat")->bval;
|
||||
|
||||
// tsSmlBatchSize = cfgGetItem(pCfg, "smlBatchSize")->i32;
|
||||
// tsSmlBatchSize = cfgGetItem(pCfg, "smlBatchSize")->i32;
|
||||
tsMaxMemUsedByInsert = cfgGetItem(pCfg, "maxMemUsedByInsert")->i32;
|
||||
|
||||
tsShellActivityTimer = cfgGetItem(pCfg, "shellActivityTimer")->i32;
|
||||
|
@ -1048,10 +1046,10 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
|
|||
tstrncpy(tsSmlChildTableName, cfgGetItem(pCfg, "smlChildTableName")->str, TSDB_TABLE_NAME_LEN);
|
||||
} else if (strcasecmp("smlTagName", name) == 0) {
|
||||
tstrncpy(tsSmlTagName, cfgGetItem(pCfg, "smlTagName")->str, TSDB_COL_NAME_LEN);
|
||||
// } else if (strcasecmp("smlDataFormat", name) == 0) {
|
||||
// tsSmlDataFormat = cfgGetItem(pCfg, "smlDataFormat")->bval;
|
||||
// } else if (strcasecmp("smlBatchSize", name) == 0) {
|
||||
// tsSmlBatchSize = cfgGetItem(pCfg, "smlBatchSize")->i32;
|
||||
// } else if (strcasecmp("smlDataFormat", name) == 0) {
|
||||
// tsSmlDataFormat = cfgGetItem(pCfg, "smlDataFormat")->bval;
|
||||
// } else if (strcasecmp("smlBatchSize", name) == 0) {
|
||||
// tsSmlBatchSize = cfgGetItem(pCfg, "smlBatchSize")->i32;
|
||||
} else if (strcasecmp("shellActivityTimer", name) == 0) {
|
||||
tsShellActivityTimer = cfgGetItem(pCfg, "shellActivityTimer")->i32;
|
||||
} else if (strcasecmp("supportVnodes", name) == 0) {
|
||||
|
@ -1121,6 +1119,8 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
|
|||
tsStartUdfd = cfgGetItem(pCfg, "udf")->bval;
|
||||
} else if (strcasecmp("uDebugFlag", name) == 0) {
|
||||
uDebugFlag = cfgGetItem(pCfg, "uDebugFlag")->i32;
|
||||
} else if (strcasecmp("useAdapter", name) == 0) {
|
||||
tsUseAdapter = cfgGetItem(pCfg, "useAdapter")->bval;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -2821,8 +2821,8 @@ int32_t tSerializeSDbCfgRsp(void *buf, int32_t bufLen, const SDbCfgRsp *pRsp) {
|
|||
if (tEncodeI8(&encoder, pRetension->keepUnit) < 0) return -1;
|
||||
}
|
||||
if (tEncodeI8(&encoder, pRsp->schemaless) < 0) return -1;
|
||||
if (tEncodeI16(&encoder, pRsp->sstTrigger) < 0) return -1;
|
||||
tEndEncode(&encoder);
|
||||
|
||||
int32_t tlen = encoder.pos;
|
||||
tEncoderClear(&encoder);
|
||||
return tlen;
|
||||
|
@ -2873,6 +2873,7 @@ int32_t tDeserializeSDbCfgRsp(void *buf, int32_t bufLen, SDbCfgRsp *pRsp) {
|
|||
}
|
||||
}
|
||||
if (tDecodeI8(&decoder, &pRsp->schemaless) < 0) return -1;
|
||||
if (tDecodeI16(&decoder, &pRsp->sstTrigger) < 0) return -1;
|
||||
tEndDecode(&decoder);
|
||||
|
||||
tDecoderClear(&decoder);
|
||||
|
@ -4117,6 +4118,68 @@ int32_t tDeserializeSAlterVnodeReplicaReq(void *buf, int32_t bufLen, SAlterVnode
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t tSerializeSDisableVnodeWriteReq(void *buf, int32_t bufLen, SDisableVnodeWriteReq *pReq) {
|
||||
SEncoder encoder = {0};
|
||||
tEncoderInit(&encoder, buf, bufLen);
|
||||
|
||||
if (tStartEncode(&encoder) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, pReq->vgId) < 0) return -1;
|
||||
if (tEncodeI8(&encoder, pReq->disable) < 0) return -1;
|
||||
|
||||
tEndEncode(&encoder);
|
||||
|
||||
int32_t tlen = encoder.pos;
|
||||
tEncoderClear(&encoder);
|
||||
return tlen;
|
||||
}
|
||||
|
||||
int32_t tDeserializeSDisableVnodeWriteReq(void *buf, int32_t bufLen, SDisableVnodeWriteReq *pReq) {
|
||||
SDecoder decoder = {0};
|
||||
tDecoderInit(&decoder, buf, bufLen);
|
||||
|
||||
if (tStartDecode(&decoder) < 0) return -1;
|
||||
if (tDecodeI32(&decoder, &pReq->vgId) < 0) return -1;
|
||||
if (tDecodeI8(&decoder, &pReq->disable) < 0) return -1;
|
||||
|
||||
tEndDecode(&decoder);
|
||||
tDecoderClear(&decoder);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tSerializeSAlterVnodeHashRangeReq(void *buf, int32_t bufLen, SAlterVnodeHashRangeReq *pReq) {
|
||||
SEncoder encoder = {0};
|
||||
tEncoderInit(&encoder, buf, bufLen);
|
||||
|
||||
if (tStartEncode(&encoder) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, pReq->srcVgId) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, pReq->dstVgId) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, pReq->hashBegin) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, pReq->hashEnd) < 0) return -1;
|
||||
if (tEncodeI64(&encoder, pReq->reserved) < 0) return -1;
|
||||
|
||||
tEndEncode(&encoder);
|
||||
|
||||
int32_t tlen = encoder.pos;
|
||||
tEncoderClear(&encoder);
|
||||
return tlen;
|
||||
}
|
||||
|
||||
int32_t tDeserializeSAlterVnodeHashRangeReq(void *buf, int32_t bufLen, SAlterVnodeHashRangeReq *pReq) {
|
||||
SDecoder decoder = {0};
|
||||
tDecoderInit(&decoder, buf, bufLen);
|
||||
|
||||
if (tStartDecode(&decoder) < 0) return -1;
|
||||
if (tDecodeI32(&decoder, &pReq->srcVgId) < 0) return -1;
|
||||
if (tDecodeI32(&decoder, &pReq->dstVgId) < 0) return -1;
|
||||
if (tDecodeI32(&decoder, &pReq->hashBegin) < 0) return -1;
|
||||
if (tDecodeI32(&decoder, &pReq->hashEnd) < 0) return -1;
|
||||
if (tDecodeI64(&decoder, &pReq->reserved) < 0) return -1;
|
||||
|
||||
tEndDecode(&decoder);
|
||||
tDecoderClear(&decoder);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tSerializeSKillQueryReq(void *buf, int32_t bufLen, SKillQueryReq *pReq) {
|
||||
SEncoder encoder = {0};
|
||||
tEncoderInit(&encoder, buf, bufLen);
|
||||
|
|
|
@ -181,6 +181,7 @@ SArray *mmGetMsgHandles() {
|
|||
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_CONFIRM_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_HASHRANGE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_COMPACT_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_DISABLE_WRITE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
|
||||
if (dmSetMgmtHandle(pArray, TDMT_SYNC_TIMEOUT, mmPutMsgToSyncQueue, 1) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_SYNC_CLIENT_REQUEST, mmPutMsgToSyncQueue, 1) == NULL) goto _OVER;
|
||||
|
|
|
@ -54,6 +54,7 @@ typedef struct {
|
|||
int32_t vgVersion;
|
||||
int32_t refCount;
|
||||
int8_t dropped;
|
||||
int8_t disable;
|
||||
char *path;
|
||||
SVnode *pImpl;
|
||||
SMultiWorker pWriteW;
|
||||
|
@ -80,13 +81,15 @@ typedef struct {
|
|||
SVnodeObj *vmAcquireVnode(SVnodeMgmt *pMgmt, int32_t vgId);
|
||||
void vmReleaseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode);
|
||||
int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl);
|
||||
void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode);
|
||||
void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal);
|
||||
|
||||
// vmHandle.c
|
||||
SArray *vmGetMsgHandles();
|
||||
int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
int32_t vmProcessDropVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
int32_t vmProcessAlterVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
int32_t vmProcessAlterVnodeReplicaReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
int32_t vmProcessDisableVnodeWriteReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
int32_t vmProcessAlterHashRangeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
|
||||
// vmFile.c
|
||||
int32_t vmGetVnodeListFromFile(SVnodeMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t *numOfVnodes);
|
||||
|
|
|
@ -281,7 +281,94 @@ _OVER:
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t vmProcessAlterVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
||||
int32_t vmProcessDisableVnodeWriteReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
||||
SDisableVnodeWriteReq req = {0};
|
||||
if (tDeserializeSDisableVnodeWriteReq(pMsg->pCont, pMsg->contLen, &req) != 0) {
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
return -1;
|
||||
}
|
||||
|
||||
dInfo("vgId:%d, vnode write disable:%d", req.vgId, req.disable);
|
||||
|
||||
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, req.vgId);
|
||||
if (pVnode == NULL) {
|
||||
dError("vgId:%d, failed to disable write since %s", req.vgId, terrstr());
|
||||
terrno = TSDB_CODE_VND_NOT_EXIST;
|
||||
return -1;
|
||||
}
|
||||
|
||||
pVnode->disable = req.disable;
|
||||
vmReleaseVnode(pMgmt, pVnode);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t vmProcessAlterHashRangeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
||||
SAlterVnodeHashRangeReq req = {0};
|
||||
if (tDeserializeSAlterVnodeHashRangeReq(pMsg->pCont, pMsg->contLen, &req) != 0) {
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t srcVgId = req.srcVgId;
|
||||
int32_t dstVgId = req.dstVgId;
|
||||
dInfo("vgId:%d, start to alter vnode hashrange[%u, %u), dstVgId:%d", req.srcVgId, req.hashBegin, req.hashEnd,
|
||||
req.dstVgId);
|
||||
|
||||
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, srcVgId);
|
||||
if (pVnode == NULL) {
|
||||
dError("vgId:%d, failed to alter hashrange since %s", srcVgId, terrstr());
|
||||
terrno = TSDB_CODE_VND_NOT_EXIST;
|
||||
return -1;
|
||||
}
|
||||
|
||||
SWrapperCfg wrapperCfg = {
|
||||
.dropped = pVnode->dropped,
|
||||
.vgId = dstVgId,
|
||||
.vgVersion = pVnode->vgVersion,
|
||||
};
|
||||
tstrncpy(wrapperCfg.path, pVnode->path, sizeof(wrapperCfg.path));
|
||||
|
||||
dInfo("vgId:%d, close vnode", srcVgId);
|
||||
vmCloseVnode(pMgmt, pVnode, true);
|
||||
|
||||
char srcPath[TSDB_FILENAME_LEN] = {0};
|
||||
char dstPath[TSDB_FILENAME_LEN] = {0};
|
||||
snprintf(srcPath, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, srcVgId);
|
||||
snprintf(dstPath, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, dstVgId);
|
||||
|
||||
dInfo("vgId:%d, alter vnode hashrange at %s", srcVgId, srcPath);
|
||||
if (vnodeAlterHashRange(srcPath, dstPath, &req, pMgmt->pTfs) < 0) {
|
||||
dError("vgId:%d, failed to alter vnode hashrange since %s", srcVgId, terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
||||
dInfo("vgId:%d, start to open vnode", dstVgId);
|
||||
SVnode *pImpl = vnodeOpen(dstPath, pMgmt->pTfs, pMgmt->msgCb);
|
||||
if (pImpl == NULL) {
|
||||
dError("vgId:%d, failed to open vnode at %s since %s", dstVgId, dstPath, terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (vmOpenVnode(pMgmt, &wrapperCfg, pImpl) != 0) {
|
||||
dError("vgId:%d, failed to open vnode mgmt since %s", dstVgId, terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (vnodeStart(pImpl) != 0) {
|
||||
dError("vgId:%d, failed to start sync since %s", dstVgId, terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (vmWriteVnodeListToFile(pMgmt) != 0) {
|
||||
dError("vgId:%d, failed to write vnode list since %s", dstVgId, terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
||||
dInfo("vgId:%d, vnode hashrange is altered", dstVgId);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t vmProcessAlterVnodeReplicaReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
||||
SAlterVnodeReplicaReq alterReq = {0};
|
||||
if (tDeserializeSAlterVnodeReplicaReq(pMsg->pCont, pMsg->contLen, &alterReq) != 0) {
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
|
@ -289,16 +376,16 @@ int32_t vmProcessAlterVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
}
|
||||
|
||||
int32_t vgId = alterReq.vgId;
|
||||
dInfo("vgId:%d, start to alter vnode, replica:%d selfIndex:%d strict:%d", alterReq.vgId, alterReq.replica,
|
||||
alterReq.selfIndex, alterReq.strict);
|
||||
dInfo("vgId:%d, start to alter vnode, replica:%d selfIndex:%d strict:%d", vgId, alterReq.replica, alterReq.selfIndex,
|
||||
alterReq.strict);
|
||||
for (int32_t i = 0; i < alterReq.replica; ++i) {
|
||||
SReplica *pReplica = &alterReq.replicas[i];
|
||||
dInfo("vgId:%d, replica:%d ep:%s:%u dnode:%d", alterReq.vgId, i, pReplica->fqdn, pReplica->port, pReplica->port);
|
||||
dInfo("vgId:%d, replica:%d ep:%s:%u dnode:%d", vgId, i, pReplica->fqdn, pReplica->port, pReplica->port);
|
||||
}
|
||||
|
||||
if (alterReq.replica <= 0 || alterReq.selfIndex < 0 || alterReq.selfIndex >= alterReq.replica) {
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
dError("vgId:%d, failed to alter replica since invalid msg", alterReq.vgId);
|
||||
dError("vgId:%d, failed to alter replica since invalid msg", vgId);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -306,7 +393,7 @@ int32_t vmProcessAlterVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
if (pReplica->id != pMgmt->pData->dnodeId || pReplica->port != tsServerPort ||
|
||||
strcmp(pReplica->fqdn, tsLocalFqdn) != 0) {
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
dError("vgId:%d, dnodeId:%d ep:%s:%u not matched with local dnode", alterReq.vgId, pReplica->id, pReplica->fqdn,
|
||||
dError("vgId:%d, dnodeId:%d ep:%s:%u not matched with local dnode", vgId, pReplica->id, pReplica->fqdn,
|
||||
pReplica->port);
|
||||
return -1;
|
||||
}
|
||||
|
@ -325,13 +412,13 @@ int32_t vmProcessAlterVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
.vgVersion = pVnode->vgVersion,
|
||||
};
|
||||
tstrncpy(wrapperCfg.path, pVnode->path, sizeof(wrapperCfg.path));
|
||||
vmCloseVnode(pMgmt, pVnode);
|
||||
vmCloseVnode(pMgmt, pVnode, false);
|
||||
|
||||
char path[TSDB_FILENAME_LEN] = {0};
|
||||
snprintf(path, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, vgId);
|
||||
|
||||
dInfo("vgId:%d, start to alter vnode replica at %s", vgId, path);
|
||||
if (vnodeAlter(path, &alterReq, pMgmt->pTfs) < 0) {
|
||||
if (vnodeAlterReplica(path, &alterReq, pMgmt->pTfs) < 0) {
|
||||
dError("vgId:%d, failed to alter vnode at %s since %s", vgId, path, terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
@ -387,7 +474,7 @@ int32_t vmProcessDropVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
vmCloseVnode(pMgmt, pVnode);
|
||||
vmCloseVnode(pMgmt, pVnode, false);
|
||||
vmWriteVnodeListToFile(pMgmt);
|
||||
|
||||
dInfo("vgId:%d, is dropped", vgId);
|
||||
|
@ -451,7 +538,8 @@ SArray *vmGetMsgHandles() {
|
|||
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_REPLICA, vmPutMsgToMgmtQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_CONFIG, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_CONFIRM, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_HASHRANGE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_DISABLE_WRITE, vmPutMsgToMgmtQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_HASHRANGE, vmPutMsgToMgmtQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_COMPACT, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_TRIM, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_DND_CREATE_VNODE, vmPutMsgToMgmtQueue, 0) == NULL) goto _OVER;
|
||||
|
|
|
@ -76,7 +76,7 @@ int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl) {
|
|||
return code;
|
||||
}
|
||||
|
||||
void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
|
||||
void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal) {
|
||||
char path[TSDB_FILENAME_LEN] = {0};
|
||||
|
||||
vnodeProposeCommitOnNeed(pVnode->pImpl);
|
||||
|
@ -124,10 +124,26 @@ void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
|
|||
vnodePostClose(pVnode->pImpl);
|
||||
|
||||
vmFreeQueue(pMgmt, pVnode);
|
||||
|
||||
if (commitAndRemoveWal) {
|
||||
dInfo("vgId:%d, commit data", pVnode->vgId);
|
||||
vnodeSyncCommit(pVnode->pImpl);
|
||||
vnodeBegin(pVnode->pImpl);
|
||||
dInfo("vgId:%d, commit data finished", pVnode->vgId);
|
||||
}
|
||||
|
||||
vnodeClose(pVnode->pImpl);
|
||||
pVnode->pImpl = NULL;
|
||||
dInfo("vgId:%d, vnode is closed", pVnode->vgId);
|
||||
|
||||
if (commitAndRemoveWal) {
|
||||
char path[TSDB_FILENAME_LEN] = {0};
|
||||
snprintf(path, TSDB_FILENAME_LEN, "vnode%svnode%d%swal", TD_DIRSEP, pVnode->vgId, TD_DIRSEP);
|
||||
dInfo("vgId:%d, remove all wals, path:%s", pVnode->vgId, path);
|
||||
tfsRmdir(pMgmt->pTfs, path);
|
||||
tfsMkdir(pMgmt->pTfs, path);
|
||||
}
|
||||
|
||||
if (pVnode->dropped) {
|
||||
dInfo("vgId:%d, vnode is destroyed, dropped:%d", pVnode->vgId, pVnode->dropped);
|
||||
snprintf(path, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, pVnode->vgId);
|
||||
|
@ -257,7 +273,7 @@ static void *vmCloseVnodeInThread(void *param) {
|
|||
pMgmt->state.openVnodes, pMgmt->state.totalVnodes);
|
||||
tmsgReportStartup("vnode-close", stepDesc);
|
||||
|
||||
vmCloseVnode(pMgmt, pVnode);
|
||||
vmCloseVnode(pMgmt, pVnode, false);
|
||||
}
|
||||
|
||||
dInfo("thread:%d, numOfVnodes:%d is closed", pThread->threadIndex, pThread->vnodeNum);
|
||||
|
|
|
@ -41,7 +41,13 @@ static void vmProcessMgmtQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
|
|||
code = vmProcessDropVnodeReq(pMgmt, pMsg);
|
||||
break;
|
||||
case TDMT_VND_ALTER_REPLICA:
|
||||
code = vmProcessAlterVnodeReq(pMgmt, pMsg);
|
||||
code = vmProcessAlterVnodeReplicaReq(pMgmt, pMsg);
|
||||
break;
|
||||
case TDMT_VND_DISABLE_WRITE:
|
||||
code = vmProcessDisableVnodeWriteReq(pMgmt, pMsg);
|
||||
break;
|
||||
case TDMT_VND_ALTER_HASHRANGE:
|
||||
code = vmProcessAlterHashRangeReq(pMgmt, pMsg);
|
||||
break;
|
||||
default:
|
||||
terrno = TSDB_CODE_MSG_NOT_PROCESSED;
|
||||
|
@ -191,14 +197,21 @@ static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtyp
|
|||
terrno = TSDB_CODE_NO_DISKSPACE;
|
||||
code = terrno;
|
||||
dError("vgId:%d, msg:%p put into vnode-write queue failed since %s", pVnode->vgId, pMsg, terrstr(code));
|
||||
} else if ((pMsg->msgType == TDMT_VND_SUBMIT) && (grantCheck(TSDB_GRANT_STORAGE) != TSDB_CODE_SUCCESS)) {
|
||||
break;
|
||||
}
|
||||
if (pMsg->msgType == TDMT_VND_SUBMIT && (grantCheck(TSDB_GRANT_STORAGE) != TSDB_CODE_SUCCESS)) {
|
||||
terrno = TSDB_CODE_VND_NO_WRITE_AUTH;
|
||||
code = terrno;
|
||||
dDebug("vgId:%d, msg:%p put into vnode-write queue failed since %s", pVnode->vgId, pMsg, terrstr(code));
|
||||
} else {
|
||||
dGTrace("vgId:%d, msg:%p put into vnode-write queue", pVnode->vgId, pMsg);
|
||||
taosWriteQitem(pVnode->pWriteW.queue, pMsg);
|
||||
break;
|
||||
}
|
||||
if (pMsg->msgType != TDMT_VND_ALTER_CONFIRM && pVnode->disable) {
|
||||
dDebug("vgId:%d, msg:%p put into vnode-write queue failed since its disable", pVnode->vgId, pMsg);
|
||||
terrno = TSDB_CODE_VND_STOPPED;
|
||||
break;
|
||||
}
|
||||
dGTrace("vgId:%d, msg:%p put into vnode-write queue", pVnode->vgId, pMsg);
|
||||
taosWriteQitem(pVnode->pWriteW.queue, pMsg);
|
||||
break;
|
||||
case SYNC_QUEUE:
|
||||
dGTrace("vgId:%d, msg:%p put into vnode-sync queue", pVnode->vgId, pMsg);
|
||||
|
|
|
@ -39,7 +39,7 @@ int32_t dmProcessNodeMsg(SMgmtWrapper *pWrapper, SRpcMsg *pMsg) {
|
|||
NodeMsgFp msgFp = pWrapper->msgFps[TMSG_INDEX(pMsg->msgType)];
|
||||
if (msgFp == NULL) {
|
||||
terrno = TSDB_CODE_MSG_NOT_PROCESSED;
|
||||
dGError("msg:%p, not processed since no handler", pMsg);
|
||||
dGError("msg:%p, not processed since no handler, type:%s", pMsg, TMSG_INFO(pMsg->msgType));
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@ int32_t mndValidateDbInfo(SMnode *pMnode, SDbVgVersion *pDbs, int32_t numOfDbs,
|
|||
int32_t mndExtractDbInfo(SMnode *pMnode, SDbObj *pDb, SUseDbRsp *pRsp, const SUseDbReq *pReq);
|
||||
bool mndIsDbReady(SMnode *pMnode, SDbObj *pDb);
|
||||
|
||||
SSdbRaw *mndDbActionEncode(SDbObj *pDb);
|
||||
const char *mndGetDbStr(const char *src);
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -32,7 +32,6 @@
|
|||
#define DB_VER_NUMBER 1
|
||||
#define DB_RESERVE_SIZE 54
|
||||
|
||||
static SSdbRaw *mndDbActionEncode(SDbObj *pDb);
|
||||
static SSdbRow *mndDbActionDecode(SSdbRaw *pRaw);
|
||||
static int32_t mndDbActionInsert(SSdb *pSdb, SDbObj *pDb);
|
||||
static int32_t mndDbActionDelete(SSdb *pSdb, SDbObj *pDb);
|
||||
|
@ -74,7 +73,7 @@ int32_t mndInitDb(SMnode *pMnode) {
|
|||
|
||||
void mndCleanupDb(SMnode *pMnode) {}
|
||||
|
||||
static SSdbRaw *mndDbActionEncode(SDbObj *pDb) {
|
||||
SSdbRaw *mndDbActionEncode(SDbObj *pDb) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
||||
int32_t size = sizeof(SDbObj) + pDb->cfg.numOfRetensions * sizeof(SRetention) + DB_RESERVE_SIZE;
|
||||
|
@ -259,6 +258,7 @@ static int32_t mndDbActionUpdate(SSdb *pSdb, SDbObj *pOld, SDbObj *pNew) {
|
|||
pOld->updateTime = pNew->updateTime;
|
||||
pOld->cfgVersion = pNew->cfgVersion;
|
||||
pOld->vgVersion = pNew->vgVersion;
|
||||
pOld->cfg.numOfVgroups = pNew->cfg.numOfVgroups;
|
||||
pOld->cfg.buffer = pNew->cfg.buffer;
|
||||
pOld->cfg.pageSize = pNew->cfg.pageSize;
|
||||
pOld->cfg.pages = pNew->cfg.pages;
|
||||
|
@ -889,7 +889,7 @@ static int32_t mndProcessGetDbCfgReq(SRpcMsg *pReq) {
|
|||
cfgRsp.numOfRetensions = pDb->cfg.numOfRetensions;
|
||||
cfgRsp.pRetensions = pDb->cfg.pRetensions;
|
||||
cfgRsp.schemaless = pDb->cfg.schemaless;
|
||||
|
||||
cfgRsp.sstTrigger = pDb->cfg.sstTrigger;
|
||||
int32_t contLen = tSerializeSDbCfgRsp(NULL, 0, &cfgRsp);
|
||||
void *pRsp = rpcMallocCont(contLen);
|
||||
if (pRsp == NULL) {
|
||||
|
|
|
@ -59,6 +59,7 @@ int32_t mndInitVgroup(SMnode *pMnode) {
|
|||
mndSetMsgHandle(pMnode, TDMT_VND_ALTER_HASHRANGE_RSP, mndTransProcessRsp);
|
||||
mndSetMsgHandle(pMnode, TDMT_DND_DROP_VNODE_RSP, mndTransProcessRsp);
|
||||
mndSetMsgHandle(pMnode, TDMT_VND_COMPACT_RSP, mndTransProcessRsp);
|
||||
mndSetMsgHandle(pMnode, TDMT_VND_DISABLE_WRITE_RSP, mndTransProcessRsp);
|
||||
|
||||
mndSetMsgHandle(pMnode, TDMT_MND_REDISTRIBUTE_VGROUP, mndProcessRedistributeVgroupMsg);
|
||||
mndSetMsgHandle(pMnode, TDMT_MND_SPLIT_VGROUP, mndProcessSplitVgroupMsg);
|
||||
|
@ -355,9 +356,7 @@ static void *mndBuildAlterVnodeReplicaReq(SMnode *pMnode, SDbObj *pDb, SVgObj *p
|
|||
SReplica *pReplica = &alterReq.replicas[v];
|
||||
SVnodeGid *pVgid = &pVgroup->vnodeGid[v];
|
||||
SDnodeObj *pVgidDnode = mndAcquireDnode(pMnode, pVgid->dnodeId);
|
||||
if (pVgidDnode == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
if (pVgidDnode == NULL) return NULL;
|
||||
|
||||
pReplica->id = pVgidDnode->id;
|
||||
pReplica->port = pVgidDnode->port;
|
||||
|
@ -397,6 +396,57 @@ static void *mndBuildAlterVnodeReplicaReq(SMnode *pMnode, SDbObj *pDb, SVgObj *p
|
|||
return pReq;
|
||||
}
|
||||
|
||||
static void *mndBuildDisableVnodeWriteReq(SMnode *pMnode, SDbObj *pDb, int32_t vgId, int32_t *pContLen) {
|
||||
SDisableVnodeWriteReq disableReq = {
|
||||
.vgId = vgId,
|
||||
.disable = 1,
|
||||
};
|
||||
|
||||
mInfo("vgId:%d, build disable vnode write req", vgId);
|
||||
int32_t contLen = tSerializeSDisableVnodeWriteReq(NULL, 0, &disableReq);
|
||||
if (contLen < 0) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void *pReq = taosMemoryMalloc(contLen);
|
||||
if (pReq == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tSerializeSDisableVnodeWriteReq(pReq, contLen, &disableReq);
|
||||
*pContLen = contLen;
|
||||
return pReq;
|
||||
}
|
||||
|
||||
static void *mndBuildAlterVnodeHashRangeReq(SMnode *pMnode, SVgObj *pVgroup, int32_t dstVgId, int32_t *pContLen) {
|
||||
SAlterVnodeHashRangeReq alterReq = {
|
||||
.srcVgId = pVgroup->vgId,
|
||||
.dstVgId = dstVgId,
|
||||
.hashBegin = pVgroup->hashBegin,
|
||||
.hashEnd = pVgroup->hashEnd,
|
||||
};
|
||||
|
||||
mInfo("vgId:%d, build alter vnode hashrange req, dstVgId:%d, begin:%u, end:%u", pVgroup->vgId, dstVgId,
|
||||
pVgroup->hashBegin, pVgroup->hashEnd);
|
||||
int32_t contLen = tSerializeSAlterVnodeHashRangeReq(NULL, 0, &alterReq);
|
||||
if (contLen < 0) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void *pReq = taosMemoryMalloc(contLen);
|
||||
if (pReq == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tSerializeSAlterVnodeHashRangeReq(pReq, contLen, &alterReq);
|
||||
*pContLen = contLen;
|
||||
return pReq;
|
||||
}
|
||||
|
||||
void *mndBuildDropVnodeReq(SMnode *pMnode, SDnodeObj *pDnode, SDbObj *pDb, SVgObj *pVgroup, int32_t *pContLen) {
|
||||
SDropVnodeReq dropReq = {0};
|
||||
dropReq.dnodeId = pDnode->id;
|
||||
|
@ -1029,6 +1079,7 @@ int32_t mndAddAlterVnodeConfirmAction(SMnode *pMnode, STrans *pTrans, SDbObj *pD
|
|||
STransAction action = {0};
|
||||
action.epSet = mndGetVgroupEpset(pMnode, pVgroup);
|
||||
|
||||
mInfo("vgId:%d, build alter vnode confirm req", pVgroup->vgId);
|
||||
int32_t contLen = sizeof(SMsgHead);
|
||||
SMsgHead *pHead = taosMemoryMalloc(contLen);
|
||||
if (pHead == NULL) {
|
||||
|
@ -1053,7 +1104,25 @@ int32_t mndAddAlterVnodeConfirmAction(SMnode *pMnode, STrans *pTrans, SDbObj *pD
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t mndAddAlterVnodeHashRangeAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup) { return 0; }
|
||||
static int32_t mndAddAlterVnodeHashRangeAction(SMnode *pMnode, STrans *pTrans, SVgObj *pVgroup, int32_t dstVgId) {
|
||||
STransAction action = {0};
|
||||
action.epSet = mndGetVgroupEpset(pMnode, pVgroup);
|
||||
|
||||
int32_t contLen = 0;
|
||||
void *pReq = mndBuildAlterVnodeHashRangeReq(pMnode, pVgroup, dstVgId, &contLen);
|
||||
if (pReq == NULL) return -1;
|
||||
|
||||
action.pCont = pReq;
|
||||
action.contLen = contLen;
|
||||
action.msgType = TDMT_VND_ALTER_HASHRANGE;
|
||||
|
||||
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
|
||||
taosMemoryFree(pReq);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t mndAddAlterVnodeConfigAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup) {
|
||||
STransAction action = {0};
|
||||
|
@ -1099,6 +1168,30 @@ int32_t mndAddAlterVnodeReplicaAction(SMnode *pMnode, STrans *pTrans, SDbObj *pD
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int32_t mndAddDisableVnodeWriteAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, int32_t dnodeId) {
|
||||
SDnodeObj *pDnode = mndAcquireDnode(pMnode, dnodeId);
|
||||
if (pDnode == NULL) return -1;
|
||||
|
||||
STransAction action = {0};
|
||||
action.epSet = mndGetDnodeEpset(pDnode);
|
||||
mndReleaseDnode(pMnode, pDnode);
|
||||
|
||||
int32_t contLen = 0;
|
||||
void *pReq = mndBuildDisableVnodeWriteReq(pMnode, pDb, pVgroup->vgId, &contLen);
|
||||
if (pReq == NULL) return -1;
|
||||
|
||||
action.pCont = pReq;
|
||||
action.contLen = contLen;
|
||||
action.msgType = TDMT_VND_DISABLE_WRITE;
|
||||
|
||||
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
|
||||
taosMemoryFree(pReq);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t mndAddDropVnodeAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, SVnodeGid *pVgid,
|
||||
bool isRedo) {
|
||||
STransAction action = {0};
|
||||
|
@ -1763,9 +1856,11 @@ static int32_t mndAddAdjustVnodeHashRangeAction(SMnode *pMnode, STrans *pTrans,
|
|||
}
|
||||
|
||||
static int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj *pVgroup) {
|
||||
int32_t code = -1;
|
||||
STrans *pTrans = NULL;
|
||||
SArray *pArray = mndBuildDnodesArray(pMnode, 0);
|
||||
int32_t code = -1;
|
||||
STrans *pTrans = NULL;
|
||||
SSdbRaw *pRaw = NULL;
|
||||
SDbObj dbObj = {0};
|
||||
SArray *pArray = mndBuildDnodesArray(pMnode, 0);
|
||||
|
||||
pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, pReq, "split-vgroup");
|
||||
if (pTrans == NULL) goto _OVER;
|
||||
|
@ -1784,18 +1879,21 @@ static int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj
|
|||
if (mndAddVnodeToVgroup(pMnode, pTrans, &newVg1, pArray) != 0) goto _OVER;
|
||||
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pDb, &newVg1, newVg1.vnodeGid[0].dnodeId) != 0) goto _OVER;
|
||||
if (mndAddCreateVnodeAction(pMnode, pTrans, pDb, &newVg1, &newVg1.vnodeGid[1]) != 0) goto _OVER;
|
||||
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, &newVg1) != 0) goto _OVER;
|
||||
} else if (newVg1.replica == 3) {
|
||||
SVnodeGid del1 = {0};
|
||||
if (mndRemoveVnodeFromVgroup(pMnode, pTrans, &newVg1, pArray, &del1) != 0) goto _OVER;
|
||||
if (mndAddDropVnodeAction(pMnode, pTrans, pDb, &newVg1, &del1, true) != 0) goto _OVER;
|
||||
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pDb, &newVg1, newVg1.vnodeGid[0].dnodeId) != 0) goto _OVER;
|
||||
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pDb, &newVg1, newVg1.vnodeGid[1].dnodeId) != 0) goto _OVER;
|
||||
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, &newVg1) != 0) goto _OVER;
|
||||
} else {
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < newVg1.replica; ++i) {
|
||||
if (mndAddDisableVnodeWriteAction(pMnode, pTrans, pDb, &newVg1, newVg1.vnodeGid[i].dnodeId) != 0) goto _OVER;
|
||||
}
|
||||
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, &newVg1) != 0) goto _OVER;
|
||||
|
||||
mInfo("vgId:%d, vgroup info after adjust replica, replica:%d hashBegin:%u hashEnd:%u vnode:0 dnode:%d", newVg1.vgId,
|
||||
newVg1.replica, newVg1.hashBegin, newVg1.hashEnd, newVg1.vnodeGid[0].dnodeId);
|
||||
for (int32_t i = 0; i < newVg1.replica; ++i) {
|
||||
|
@ -1813,44 +1911,6 @@ static int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj
|
|||
memcpy(&newVg2.vnodeGid[0], &newVg2.vnodeGid[1], sizeof(SVnodeGid));
|
||||
memset(&newVg2.vnodeGid[1], 0, sizeof(SVnodeGid));
|
||||
|
||||
mInfo("vgId:%d, vgroup info after adjust hash, replica:%d hashBegin:%u hashEnd:%u vnode:0 dnode:%d", newVg1.vgId,
|
||||
newVg1.replica, newVg1.hashBegin, newVg1.hashEnd, newVg1.vnodeGid[0].dnodeId);
|
||||
mInfo("vgId:%d, vgroup info after adjust hash, replica:%d hashBegin:%u hashEnd:%u vnode:0 dnode:%d", newVg2.vgId,
|
||||
newVg2.replica, newVg2.hashBegin, newVg2.hashEnd, newVg2.vnodeGid[0].dnodeId);
|
||||
|
||||
if (mndAddAlterVnodeHashRangeAction(pMnode, pTrans, pDb, &newVg1) != 0) goto _OVER;
|
||||
if (mndAddAlterVnodeHashRangeAction(pMnode, pTrans, pDb, &newVg2) != 0) goto _OVER;
|
||||
|
||||
#if 0
|
||||
// adjust vgroup replica
|
||||
if (pDb->cfg.replications != newVg1.replica) {
|
||||
if (mndBuildAlterVgroupAction(pMnode, pTrans, pDb, pDb, &newVg1, pArray) != 0) goto _OVER;
|
||||
}
|
||||
if (pDb->cfg.replications != newVg2.replica) {
|
||||
if (mndBuildAlterVgroupAction(pMnode, pTrans, pDb, pDb, &newVg2, pArray) != 0) goto _OVER;
|
||||
}
|
||||
#endif
|
||||
|
||||
{
|
||||
SSdbRaw *pRaw = mndVgroupActionEncode(&newVg1);
|
||||
if (pRaw == NULL) return -1;
|
||||
if (mndTransAppendCommitlog(pTrans, pRaw) != 0) {
|
||||
sdbFreeRaw(pRaw);
|
||||
return -1;
|
||||
}
|
||||
(void)sdbSetRawStatus(pRaw, SDB_STATUS_READY);
|
||||
}
|
||||
|
||||
{
|
||||
SSdbRaw *pRaw = mndVgroupActionEncode(&newVg2);
|
||||
if (pRaw == NULL) return -1;
|
||||
if (mndTransAppendCommitlog(pTrans, pRaw) != 0) {
|
||||
sdbFreeRaw(pRaw);
|
||||
return -1;
|
||||
}
|
||||
(void)sdbSetRawStatus(pRaw, SDB_STATUS_READY);
|
||||
}
|
||||
|
||||
mInfo("vgId:%d, vgroup info after adjust hash, replica:%d hashBegin:%u hashEnd:%u vnode:0 dnode:%d", newVg1.vgId,
|
||||
newVg1.replica, newVg1.hashBegin, newVg1.hashEnd, newVg1.vnodeGid[0].dnodeId);
|
||||
for (int32_t i = 0; i < newVg1.replica; ++i) {
|
||||
|
@ -1862,28 +1922,83 @@ static int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj
|
|||
mInfo("vgId:%d, vnode:%d dnode:%d", newVg2.vgId, i, newVg2.vnodeGid[i].dnodeId);
|
||||
}
|
||||
|
||||
int32_t maxVgId = sdbGetMaxId(pMnode->pSdb, SDB_VGROUP);
|
||||
if (mndAddAlterVnodeHashRangeAction(pMnode, pTrans, &newVg1, maxVgId) != 0) goto _OVER;
|
||||
newVg1.vgId = maxVgId;
|
||||
|
||||
maxVgId++;
|
||||
if (mndAddAlterVnodeHashRangeAction(pMnode, pTrans, &newVg2, maxVgId) != 0) goto _OVER;
|
||||
newVg2.vgId = maxVgId;
|
||||
|
||||
// adjust vgroup replica
|
||||
if (pDb->cfg.replications != newVg1.replica) {
|
||||
if (mndBuildAlterVgroupAction(pMnode, pTrans, pDb, pDb, &newVg1, pArray) != 0) goto _OVER;
|
||||
}
|
||||
if (pDb->cfg.replications != newVg2.replica) {
|
||||
if (mndBuildAlterVgroupAction(pMnode, pTrans, pDb, pDb, &newVg2, pArray) != 0) goto _OVER;
|
||||
}
|
||||
|
||||
pRaw = mndVgroupActionEncode(&newVg1);
|
||||
if (pRaw == NULL) goto _OVER;
|
||||
if (mndTransAppendCommitlog(pTrans, pRaw) != 0) goto _OVER;
|
||||
(void)sdbSetRawStatus(pRaw, SDB_STATUS_READY);
|
||||
pRaw = NULL;
|
||||
|
||||
pRaw = mndVgroupActionEncode(&newVg2);
|
||||
if (pRaw == NULL) goto _OVER;
|
||||
if (mndTransAppendCommitlog(pTrans, pRaw) != 0) goto _OVER;
|
||||
(void)sdbSetRawStatus(pRaw, SDB_STATUS_READY);
|
||||
pRaw = NULL;
|
||||
|
||||
pRaw = mndVgroupActionEncode(pVgroup);
|
||||
if (pRaw == NULL) goto _OVER;
|
||||
if (mndTransAppendCommitlog(pTrans, pRaw) != 0) goto _OVER;
|
||||
(void)sdbSetRawStatus(pRaw, SDB_STATUS_DROPPED);
|
||||
pRaw = NULL;
|
||||
|
||||
memcpy(&dbObj, pDb, sizeof(SDbObj));
|
||||
if (dbObj.cfg.pRetensions != NULL) {
|
||||
dbObj.cfg.pRetensions = taosArrayDup(pDb->cfg.pRetensions, NULL);
|
||||
if (dbObj.cfg.pRetensions == NULL) goto _OVER;
|
||||
}
|
||||
dbObj.vgVersion++;
|
||||
dbObj.updateTime = taosGetTimestampMs();
|
||||
dbObj.cfg.numOfVgroups++;
|
||||
pRaw = mndDbActionEncode(&dbObj);
|
||||
if (pRaw == NULL) goto _OVER;
|
||||
if (mndTransAppendCommitlog(pTrans, pRaw) != 0) goto _OVER;
|
||||
(void)sdbSetRawStatus(pRaw, SDB_STATUS_READY);
|
||||
pRaw = NULL;
|
||||
|
||||
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
|
||||
code = 0;
|
||||
|
||||
_OVER:
|
||||
taosArrayDestroy(pArray);
|
||||
mndTransDrop(pTrans);
|
||||
sdbFreeRaw(pRaw);
|
||||
taosArrayDestroy(dbObj.cfg.pRetensions);
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t mndProcessSplitVgroupMsg(SRpcMsg *pReq) {
|
||||
SMnode *pMnode = pReq->info.node;
|
||||
int32_t code = -1;
|
||||
int32_t vgId = 2;
|
||||
SVgObj *pVgroup = NULL;
|
||||
SDbObj *pDb = NULL;
|
||||
|
||||
mInfo("vgId:%d, start to split", vgId);
|
||||
SSplitVgroupReq req = {0};
|
||||
if (tDeserializeSSplitVgroupReq(pReq->pCont, pReq->contLen, &req) != 0) {
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
mInfo("vgId:%d, start to split", req.vgId);
|
||||
if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_SPLIT_VGROUP) != 0) {
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
pVgroup = mndAcquireVgroup(pMnode, vgId);
|
||||
pVgroup = mndAcquireVgroup(pMnode, req.vgId);
|
||||
if (pVgroup == NULL) goto _OVER;
|
||||
|
||||
pDb = mndAcquireDb(pMnode, pVgroup->dbName);
|
||||
|
|
|
@ -50,13 +50,16 @@ extern const SVnodeCfg vnodeCfgDefault;
|
|||
int32_t vnodeInit(int32_t nthreads);
|
||||
void vnodeCleanup();
|
||||
int32_t vnodeCreate(const char *path, SVnodeCfg *pCfg, STfs *pTfs);
|
||||
int32_t vnodeAlter(const char *path, SAlterVnodeReplicaReq *pReq, STfs *pTfs);
|
||||
int32_t vnodeAlterReplica(const char *path, SAlterVnodeReplicaReq *pReq, STfs *pTfs);
|
||||
int32_t vnodeAlterHashRange(const char *srcPath, const char *dstPath, SAlterVnodeHashRangeReq *pReq, STfs *pTfs);
|
||||
void vnodeDestroy(const char *path, STfs *pTfs);
|
||||
SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb);
|
||||
void vnodePreClose(SVnode *pVnode);
|
||||
void vnodePostClose(SVnode *pVnode);
|
||||
void vnodeSyncCheckTimeout(SVnode *pVnode);
|
||||
void vnodeClose(SVnode *pVnode);
|
||||
int32_t vnodeSyncCommit(SVnode *pVnode);
|
||||
int32_t vnodeBegin(SVnode* pVnode);
|
||||
|
||||
int32_t vnodeStart(SVnode *pVnode);
|
||||
void vnodeStop(SVnode *pVnode);
|
||||
|
@ -153,6 +156,8 @@ typedef struct SMTbCursor SMTbCursor;
|
|||
SMTbCursor *metaOpenTbCursor(SMeta *pMeta);
|
||||
void metaCloseTbCursor(SMTbCursor *pTbCur);
|
||||
int32_t metaTbCursorNext(SMTbCursor *pTbCur, ETableType jumpTableType);
|
||||
int32_t metaTbCursorPrev(SMTbCursor *pTbCur);
|
||||
|
||||
#endif
|
||||
|
||||
// tsdb
|
||||
|
|
|
@ -206,6 +206,7 @@ int32_t tsdbCmprColData(SColData *pColData, int8_t cmprAlg, SBlockCol *pBlockCol
|
|||
uint8_t **ppBuf);
|
||||
int32_t tsdbDecmprColData(uint8_t *pIn, SBlockCol *pBlockCol, int8_t cmprAlg, int32_t nVal, SColData *pColData,
|
||||
uint8_t **ppBuf);
|
||||
int32_t tRowInfoCmprFn(const void *p1, const void *p2);
|
||||
// tsdbMemTable ==============================================================================================
|
||||
// SMemTable
|
||||
int32_t tsdbMemTableCreate(STsdb *pTsdb, SMemTable **ppMemTable);
|
||||
|
|
|
@ -252,7 +252,7 @@ int32_t tsdbSnapReaderClose(STsdbSnapReader** ppReader);
|
|||
int32_t tsdbSnapRead(STsdbSnapReader* pReader, uint8_t** ppData);
|
||||
// STsdbSnapWriter ========================================
|
||||
int32_t tsdbSnapWriterOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapWriter** ppWriter);
|
||||
int32_t tsdbSnapWrite(STsdbSnapWriter* pWriter, uint8_t* pData, uint32_t nData);
|
||||
int32_t tsdbSnapWrite(STsdbSnapWriter* pWriter, SSnapDataHdr* pHdr);
|
||||
int32_t tsdbSnapWriterPrepareClose(STsdbSnapWriter* pWriter);
|
||||
int32_t tsdbSnapWriterClose(STsdbSnapWriter** ppWriter, int8_t rollback);
|
||||
// STqSnapshotReader ==
|
||||
|
|
|
@ -56,4 +56,7 @@ int metaPrepareAsyncCommit(SMeta *pMeta) {
|
|||
}
|
||||
|
||||
// abort the meta txn
|
||||
int metaAbort(SMeta *pMeta) { return tdbAbort(pMeta->pEnv, pMeta->txn); }
|
||||
int metaAbort(SMeta *pMeta) {
|
||||
if (!pMeta->txn) return 0;
|
||||
return tdbAbort(pMeta->pEnv, pMeta->txn);
|
||||
}
|
||||
|
|
|
@ -203,7 +203,7 @@ _err:
|
|||
|
||||
int metaClose(SMeta *pMeta) {
|
||||
if (pMeta) {
|
||||
if (pMeta->pEnv) tdbAbort(pMeta->pEnv, pMeta->txn);
|
||||
if (pMeta->pEnv) metaAbort(pMeta);
|
||||
if (pMeta->pCache) metaCacheClose(pMeta);
|
||||
if (pMeta->pIdx) metaCloseIdx(pMeta);
|
||||
if (pMeta->pStreamDb) tdbTbClose(pMeta->pStreamDb);
|
||||
|
|
|
@ -310,7 +310,7 @@ void metaCloseTbCursor(SMTbCursor *pTbCur) {
|
|||
}
|
||||
}
|
||||
|
||||
int metaTbCursorNext(SMTbCursor *pTbCur, ETableType jumpTableType) {
|
||||
int32_t metaTbCursorNext(SMTbCursor *pTbCur, ETableType jumpTableType) {
|
||||
int ret;
|
||||
void *pBuf;
|
||||
STbCfg tbCfg;
|
||||
|
@ -334,6 +334,30 @@ int metaTbCursorNext(SMTbCursor *pTbCur, ETableType jumpTableType) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t metaTbCursorPrev(SMTbCursor *pTbCur) {
|
||||
int ret;
|
||||
void *pBuf;
|
||||
STbCfg tbCfg;
|
||||
|
||||
for (;;) {
|
||||
ret = tdbTbcPrev(pTbCur->pDbc, &pTbCur->pKey, &pTbCur->kLen, &pTbCur->pVal, &pTbCur->vLen);
|
||||
if (ret < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
tDecoderClear(&pTbCur->mr.coder);
|
||||
|
||||
metaGetTableEntryByVersion(&pTbCur->mr, ((SUidIdxVal *)pTbCur->pVal)[0].version, *(tb_uid_t *)pTbCur->pKey);
|
||||
if (pTbCur->mr.me.type == TSDB_SUPER_TABLE) {
|
||||
continue;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SSchemaWrapper *metaGetTableSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver, int lock) {
|
||||
void *pData = NULL;
|
||||
int nData = 0;
|
||||
|
@ -682,9 +706,8 @@ int32_t metaGetTbTSchemaEx(SMeta *pMeta, tb_uid_t suid, tb_uid_t uid, int32_t sv
|
|||
}
|
||||
}
|
||||
|
||||
if (sver <= 0) {
|
||||
metaError("meta/query: incorrect sver: %" PRId32 ".", sver);
|
||||
code = TSDB_CODE_FAILED;
|
||||
if (ASSERTS(sver > 0, __FILE__, __LINE__, "failed to get table schema version: %d", sver)) {
|
||||
code = TSDB_CODE_NOT_FOUND;
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
|
|
|
@ -446,10 +446,10 @@ int32_t rsmaSnapWrite(SRSmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData)
|
|||
// rsma1/rsma2
|
||||
if (pHdr->type == SNAP_DATA_RSMA1) {
|
||||
pHdr->type = SNAP_DATA_TSDB;
|
||||
code = tsdbSnapWrite(pWriter->pDataWriter[0], pData, nData);
|
||||
code = tsdbSnapWrite(pWriter->pDataWriter[0], pHdr);
|
||||
} else if (pHdr->type == SNAP_DATA_RSMA2) {
|
||||
pHdr->type = SNAP_DATA_TSDB;
|
||||
code = tsdbSnapWrite(pWriter->pDataWriter[1], pData, nData);
|
||||
code = tsdbSnapWrite(pWriter->pDataWriter[1], pHdr);
|
||||
} else if (pHdr->type == SNAP_DATA_QTASK) {
|
||||
code = rsmaSnapWriteQTaskInfo(pWriter, pData, nData);
|
||||
} else {
|
||||
|
|
|
@ -520,7 +520,12 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
tqOffsetResetToData(&fetchOffsetNew, 0, 0);
|
||||
}
|
||||
} else {
|
||||
tqOffsetResetToLog(&fetchOffsetNew, walGetFirstVer(pTq->pVnode->pWal));
|
||||
pHandle->pRef = walRefFirstVer(pTq->pVnode->pWal, pHandle->pRef);
|
||||
if (pHandle->pRef == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
tqOffsetResetToLog(&fetchOffsetNew, pHandle->pRef->refVer - 1);
|
||||
}
|
||||
} else if (reqOffset.type == TMQ_OFFSET__RESET_LATEST) {
|
||||
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
|
||||
|
|
|
@ -56,7 +56,7 @@ int32_t tqOffsetSnapRead(STqOffsetReader* pReader, uint8_t** ppData) {
|
|||
TdFilePtr pFile = taosOpenFile(fname, TD_FILE_READ);
|
||||
if (pFile == NULL) {
|
||||
taosMemoryFree(fname);
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int64_t sz = 0;
|
||||
|
|
|
@ -488,9 +488,7 @@ void tqSinkToTablePipeline2(SStreamTask* pTask, void* vnode, int64_t ver, void*
|
|||
};
|
||||
void* pData = colDataGetData(pTagData, rowId);
|
||||
if (colDataIsNull_s(pTagData, rowId)) {
|
||||
tagVal.type = TSDB_DATA_TYPE_NULL;
|
||||
tagVal.pData = NULL;
|
||||
tagVal.nData = 0;
|
||||
continue;
|
||||
} else if (IS_VAR_DATA_TYPE(pTagData->info.type)) {
|
||||
tagVal.nData = varDataLen(pData);
|
||||
tagVal.pData = varDataVal(pData);
|
||||
|
|
|
@ -268,7 +268,10 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
|
|||
}
|
||||
|
||||
taosThreadMutexLock(&pr->readerMutex);
|
||||
tsdbTakeReadSnap((STsdbReader*)pr, tsdbCacheQueryReseek, &pr->pReadSnap);
|
||||
code = tsdbTakeReadSnap((STsdbReader*)pr, tsdbCacheQueryReseek, &pr->pReadSnap);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _end;
|
||||
}
|
||||
pr->pDataFReader = NULL;
|
||||
pr->pDataFReaderLast = NULL;
|
||||
|
||||
|
@ -279,7 +282,7 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
|
|||
|
||||
code = doExtractCacheRow(pr, lruCache, pKeyInfo->uid, &pRow, &h);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
goto _end;
|
||||
}
|
||||
|
||||
if (h == NULL) {
|
||||
|
@ -352,7 +355,7 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
|
|||
STableKeyInfo* pKeyInfo = &pr->pTableList[i];
|
||||
code = doExtractCacheRow(pr, lruCache, pKeyInfo->uid, &pRow, &h);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
goto _end;
|
||||
}
|
||||
|
||||
if (h == NULL) {
|
||||
|
|
|
@ -458,9 +458,8 @@ static int32_t tsdbMergeFileSet(STsdb *pTsdb, SDFileSet *pSetOld, SDFileSet *pSe
|
|||
taosMemoryFree(pHeadF);
|
||||
}
|
||||
} else {
|
||||
nRef = pHeadF->nRef;
|
||||
*pHeadF = *pSetNew->pHeadF;
|
||||
pHeadF->nRef = nRef;
|
||||
ASSERT(pHeadF->offset == pSetNew->pHeadF->offset);
|
||||
ASSERT(pHeadF->size == pSetNew->pHeadF->size);
|
||||
}
|
||||
|
||||
// data
|
||||
|
@ -481,9 +480,7 @@ static int32_t tsdbMergeFileSet(STsdb *pTsdb, SDFileSet *pSetOld, SDFileSet *pSe
|
|||
taosMemoryFree(pDataF);
|
||||
}
|
||||
} else {
|
||||
nRef = pDataF->nRef;
|
||||
*pDataF = *pSetNew->pDataF;
|
||||
pDataF->nRef = nRef;
|
||||
pDataF->size = pSetNew->pDataF->size;
|
||||
}
|
||||
|
||||
// sma
|
||||
|
@ -504,9 +501,7 @@ static int32_t tsdbMergeFileSet(STsdb *pTsdb, SDFileSet *pSetOld, SDFileSet *pSe
|
|||
taosMemoryFree(pSmaF);
|
||||
}
|
||||
} else {
|
||||
nRef = pSmaF->nRef;
|
||||
*pSmaF = *pSetNew->pSmaF;
|
||||
pSmaF->nRef = nRef;
|
||||
pSmaF->size = pSetNew->pSmaF->size;
|
||||
}
|
||||
|
||||
// stt
|
||||
|
|
|
@ -64,8 +64,6 @@ int32_t tsdbMemTableCreate(STsdb *pTsdb, SMemTable **ppMemTable) {
|
|||
taosMemoryFree(pMemTable);
|
||||
goto _err;
|
||||
}
|
||||
// pMemTable->qList.pNext = &pMemTable->qList;
|
||||
// pMemTable->qList.ppNext = &pMemTable->qList.pNext;
|
||||
vnodeBufPoolRef(pMemTable->pPool);
|
||||
|
||||
*ppMemTable = pMemTable;
|
||||
|
@ -112,30 +110,6 @@ int32_t tsdbInsertTableData(STsdb *pTsdb, int64_t version, SSubmitTbData *pSubmi
|
|||
tb_uid_t suid = pSubmitTbData->suid;
|
||||
tb_uid_t uid = pSubmitTbData->uid;
|
||||
|
||||
#if 0
|
||||
SMetaInfo info;
|
||||
code = metaGetInfo(pTsdb->pVnode->pMeta, uid, &info, NULL);
|
||||
if (code) {
|
||||
code = TSDB_CODE_TDB_TABLE_NOT_EXIST;
|
||||
goto _err;
|
||||
}
|
||||
if (info.suid != suid) {
|
||||
code = TSDB_CODE_INVALID_MSG;
|
||||
goto _err;
|
||||
}
|
||||
if (info.suid) {
|
||||
metaGetInfo(pTsdb->pVnode->pMeta, info.suid, &info, NULL);
|
||||
}
|
||||
if (pSubmitTbData->sver != info.skmVer) {
|
||||
tsdbError("vgId:%d, req sver:%d, skmVer:%d suid:%" PRId64 " uid:%" PRId64, TD_VID(pTsdb->pVnode),
|
||||
pSubmitTbData->sver, info.skmVer, suid, uid);
|
||||
code = TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER;
|
||||
goto _err;
|
||||
}
|
||||
|
||||
if (pRsp) pRsp->sver = info.skmVer;
|
||||
#endif
|
||||
|
||||
// create/get STbData to op
|
||||
code = tsdbGetOrCreateTbData(pMemTable, suid, uid, &pTbData);
|
||||
if (code) {
|
||||
|
@ -811,30 +785,4 @@ SArray *tsdbMemTableGetTbDataArray(SMemTable *pMemTable) {
|
|||
|
||||
_exit:
|
||||
return aTbDataP;
|
||||
}
|
||||
|
||||
// int32_t tsdbRecycleMemTable(SMemTable *pMemTable) {
|
||||
// int32_t code = 0;
|
||||
|
||||
// SQueryNode *pNode = pMemTable->qList.pNext;
|
||||
// while (1) {
|
||||
// ASSERT(pNode != &pMemTable->qList);
|
||||
// SQueryNode *pNextNode = pNode->pNext;
|
||||
|
||||
// if (pNextNode == &pMemTable->qList) {
|
||||
// code = (*pNode->reseek)(pNode->pQHandle);
|
||||
// if (code) goto _exit;
|
||||
// break;
|
||||
// } else {
|
||||
// code = (*pNode->reseek)(pNode->pQHandle);
|
||||
// if (code) goto _exit;
|
||||
// pNode = pMemTable->qList.pNext;
|
||||
// ASSERT(pNode == pNextNode);
|
||||
// }
|
||||
// }
|
||||
|
||||
// // NOTE: Take care here, pMemTable is destroyed
|
||||
|
||||
// _exit:
|
||||
// return code;
|
||||
// }
|
||||
}
|
|
@ -573,6 +573,68 @@ static SSDataBlock* createResBlock(SQueryTableDataCond* pCond, int32_t capacity)
|
|||
return pResBlock;
|
||||
}
|
||||
|
||||
static int32_t tsdbInitReaderLock(STsdbReader* pReader) {
|
||||
int32_t code = -1;
|
||||
qTrace("tsdb/read: %p, pre-init read mutex: %p, code: %d", pReader, &pReader->readerMutex, code);
|
||||
|
||||
code = taosThreadMutexInit(&pReader->readerMutex, NULL);
|
||||
|
||||
qTrace("tsdb/read: %p, post-init read mutex: %p, code: %d", pReader, &pReader->readerMutex, code);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t tsdbUninitReaderLock(STsdbReader* pReader) {
|
||||
int32_t code = -1;
|
||||
qTrace("tsdb/read: %p, pre-uninit read mutex: %p, code: %d", pReader, &pReader->readerMutex, code);
|
||||
|
||||
code = taosThreadMutexDestroy(&pReader->readerMutex);
|
||||
|
||||
qTrace("tsdb/read: %p, post-uninit read mutex: %p, code: %d", pReader, &pReader->readerMutex, code);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t tsdbAcquireReader(STsdbReader* pReader) {
|
||||
int32_t code = -1;
|
||||
qTrace("tsdb/read: %p, pre-take read mutex: %p, code: %d", pReader, &pReader->readerMutex, code);
|
||||
|
||||
code = taosThreadMutexLock(&pReader->readerMutex);
|
||||
|
||||
qTrace("tsdb/read: %p, post-take read mutex: %p, code: %d", pReader, &pReader->readerMutex, code);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t tsdbTryAcquireReader(STsdbReader* pReader) {
|
||||
int32_t code = -1;
|
||||
qTrace("tsdb/read: %p, pre-trytake read mutex: %p, code: %d", pReader, &pReader->readerMutex, code);
|
||||
|
||||
code = taosThreadMutexTryLock(&pReader->readerMutex);
|
||||
|
||||
qTrace("tsdb/read: %p, post-trytake read mutex: %p, code: %d", pReader, &pReader->readerMutex, code);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t tsdbReleaseReader(STsdbReader* pReader) {
|
||||
int32_t code = -1;
|
||||
qTrace("tsdb/read: %p, pre-untake read mutex: %p, code: %d", pReader, &pReader->readerMutex, code);
|
||||
|
||||
code = taosThreadMutexUnlock(&pReader->readerMutex);
|
||||
|
||||
qTrace("tsdb/read: %p, post-untake read mutex: %p, code: %d", pReader, &pReader->readerMutex, code);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
void tsdbReleaseDataBlock(STsdbReader* pReader) {
|
||||
SReaderStatus* pStatus = &pReader->status;
|
||||
if (!pStatus->composedDataBlock) {
|
||||
tsdbReleaseReader(pReader);
|
||||
}
|
||||
}
|
||||
|
||||
static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsdbReader** ppReader, int32_t capacity,
|
||||
SSDataBlock* pResBlock, const char* idstr) {
|
||||
int32_t code = 0;
|
||||
|
@ -636,7 +698,7 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsd
|
|||
|
||||
setColumnIdSlotList(&pReader->suppInfo, pCond->colList, pCond->pSlotList, pCond->numOfCols);
|
||||
|
||||
taosThreadMutexInit(&pReader->readerMutex, NULL);
|
||||
tsdbInitReaderLock(pReader);
|
||||
|
||||
*ppReader = pReader;
|
||||
return code;
|
||||
|
@ -1776,12 +1838,15 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
}
|
||||
|
||||
if (minKey == k.ts) {
|
||||
STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
|
||||
if (pSchema == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
if (init) {
|
||||
tsdbRowMerge(&merge, pRow);
|
||||
tsdbRowMergerAdd(&merge, pRow, pSchema);
|
||||
} else {
|
||||
init = true;
|
||||
STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
|
||||
int32_t code = tsdbRowMergerInit(&merge, pRow, pSchema);
|
||||
int32_t code = tsdbRowMergerInit(&merge, pRow, pSchema);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
@ -2882,7 +2947,7 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
|
|||
|
||||
if (pResBlock->info.rows > 0) {
|
||||
tsdbDebug("%p uid:%" PRIu64 ", composed data block created, brange:%" PRIu64 "-%" PRIu64
|
||||
" rows:%d, elapsed time:%.2f ms %s",
|
||||
" rows:%d, elapsed time:%.2f ms %s",
|
||||
pReader, pResBlock->info.id.uid, pResBlock->info.window.skey, pResBlock->info.window.ekey,
|
||||
pResBlock->info.rows, el, pReader->idStr);
|
||||
}
|
||||
|
@ -2932,7 +2997,7 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
|
|||
|
||||
if (pResBlock->info.rows > 0) {
|
||||
tsdbDebug("%p uid:%" PRIu64 ", composed data block created, brange:%" PRIu64 "-%" PRIu64
|
||||
" rows:%d, elapsed time:%.2f ms %s",
|
||||
" rows:%d, elapsed time:%.2f ms %s",
|
||||
pReader, pResBlock->info.id.uid, pResBlock->info.window.skey, pResBlock->info.window.ekey,
|
||||
pResBlock->info.rows, el, pReader->idStr);
|
||||
}
|
||||
|
@ -3954,6 +4019,7 @@ void tsdbReaderClose(STsdbReader* pReader) {
|
|||
return;
|
||||
}
|
||||
|
||||
tsdbAcquireReader(pReader);
|
||||
{
|
||||
if (pReader->innerReader[0] != NULL || pReader->innerReader[1] != NULL) {
|
||||
STsdbReader* p = pReader->innerReader[0];
|
||||
|
@ -4011,10 +4077,13 @@ void tsdbReaderClose(STsdbReader* pReader) {
|
|||
pReader->pDelIdx = NULL;
|
||||
}
|
||||
|
||||
qTrace("tsdb/reader: %p, untake snapshot", pReader);
|
||||
qTrace("tsdb/reader-close: %p, untake snapshot", pReader);
|
||||
tsdbUntakeReadSnap(pReader, pReader->pReadSnap, true);
|
||||
pReader->pReadSnap = NULL;
|
||||
|
||||
taosThreadMutexDestroy(&pReader->readerMutex);
|
||||
tsdbReleaseReader(pReader);
|
||||
|
||||
tsdbUninitReaderLock(pReader);
|
||||
|
||||
taosMemoryFree(pReader->status.uidCheckInfo.tableUidList);
|
||||
SIOCostSummary* pCost = &pReader->cost;
|
||||
|
@ -4096,6 +4165,28 @@ int32_t tsdbReaderSuspend(STsdbReader* pReader) {
|
|||
// pInfo->lastKey = ts;
|
||||
}
|
||||
} else {
|
||||
// resetDataBlockScanInfo excluding lastKey
|
||||
STableBlockScanInfo** p = NULL;
|
||||
|
||||
while ((p = taosHashIterate(pStatus->pTableMap, p)) != NULL) {
|
||||
STableBlockScanInfo* pInfo = *(STableBlockScanInfo**)p;
|
||||
|
||||
pInfo->iterInit = false;
|
||||
pInfo->iter.hasVal = false;
|
||||
pInfo->iiter.hasVal = false;
|
||||
|
||||
if (pInfo->iter.iter != NULL) {
|
||||
pInfo->iter.iter = tsdbTbDataIterDestroy(pInfo->iter.iter);
|
||||
}
|
||||
|
||||
if (pInfo->iiter.iter != NULL) {
|
||||
pInfo->iiter.iter = tsdbTbDataIterDestroy(pInfo->iiter.iter);
|
||||
}
|
||||
|
||||
pInfo->delSkyline = taosArrayDestroy(pInfo->delSkyline);
|
||||
// pInfo->lastKey = ts;
|
||||
}
|
||||
|
||||
pBlockScanInfo = pStatus->pTableIter == NULL ? NULL : *pStatus->pTableIter;
|
||||
if (pBlockScanInfo) {
|
||||
// save lastKey to restore memory iterator
|
||||
|
@ -4104,7 +4195,8 @@ int32_t tsdbReaderSuspend(STsdbReader* pReader) {
|
|||
|
||||
// reset current current table's data block scan info,
|
||||
pBlockScanInfo->iterInit = false;
|
||||
// pBlockScanInfo->iiter.hasVal = false;
|
||||
pBlockScanInfo->iter.hasVal = false;
|
||||
pBlockScanInfo->iiter.hasVal = false;
|
||||
if (pBlockScanInfo->iter.iter != NULL) {
|
||||
pBlockScanInfo->iter.iter = tsdbTbDataIterDestroy(pBlockScanInfo->iter.iter);
|
||||
}
|
||||
|
@ -4138,16 +4230,16 @@ static int32_t tsdbSetQueryReseek(void* pQHandle) {
|
|||
int32_t code = 0;
|
||||
STsdbReader* pReader = pQHandle;
|
||||
|
||||
code = taosThreadMutexTryLock(&pReader->readerMutex);
|
||||
code = tsdbTryAcquireReader(pReader);
|
||||
if (code == 0) {
|
||||
if (pReader->suspended) {
|
||||
taosThreadMutexUnlock(&pReader->readerMutex);
|
||||
tsdbReleaseReader(pReader);
|
||||
return code;
|
||||
}
|
||||
|
||||
tsdbReaderSuspend(pReader);
|
||||
|
||||
taosThreadMutexUnlock(&pReader->readerMutex);
|
||||
tsdbReleaseReader(pReader);
|
||||
|
||||
return code;
|
||||
} else if (code == EBUSY) {
|
||||
|
@ -4248,8 +4340,9 @@ bool tsdbNextDataBlock(STsdbReader* pReader) {
|
|||
|
||||
SReaderStatus* pStatus = &pReader->status;
|
||||
|
||||
qTrace("tsdb/read: %p, take read mutex", pReader);
|
||||
taosThreadMutexLock(&pReader->readerMutex);
|
||||
int32_t code = tsdbAcquireReader(pReader);
|
||||
qTrace("tsdb/read: %p, take read mutex, code: %d", pReader, code);
|
||||
|
||||
if (pReader->suspended) {
|
||||
tsdbReaderResume(pReader);
|
||||
}
|
||||
|
@ -4261,7 +4354,7 @@ bool tsdbNextDataBlock(STsdbReader* pReader) {
|
|||
pStatus = &pReader->innerReader[0]->status;
|
||||
if (pStatus->composedDataBlock) {
|
||||
qTrace("tsdb/read: %p, unlock read mutex", pReader);
|
||||
taosThreadMutexUnlock(&pReader->readerMutex);
|
||||
tsdbReleaseReader(pReader);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -4284,7 +4377,7 @@ bool tsdbNextDataBlock(STsdbReader* pReader) {
|
|||
if (ret) {
|
||||
if (pStatus->composedDataBlock) {
|
||||
qTrace("tsdb/read: %p, unlock read mutex", pReader);
|
||||
taosThreadMutexUnlock(&pReader->readerMutex);
|
||||
tsdbReleaseReader(pReader);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -4304,7 +4397,7 @@ bool tsdbNextDataBlock(STsdbReader* pReader) {
|
|||
pStatus = &pReader->innerReader[1]->status;
|
||||
if (pStatus->composedDataBlock) {
|
||||
qTrace("tsdb/read: %p, unlock read mutex", pReader);
|
||||
taosThreadMutexUnlock(&pReader->readerMutex);
|
||||
tsdbReleaseReader(pReader);
|
||||
}
|
||||
|
||||
return ret1;
|
||||
|
@ -4312,7 +4405,7 @@ bool tsdbNextDataBlock(STsdbReader* pReader) {
|
|||
}
|
||||
|
||||
qTrace("tsdb/read: %p, unlock read mutex", pReader);
|
||||
taosThreadMutexUnlock(&pReader->readerMutex);
|
||||
tsdbReleaseReader(pReader);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
@ -4471,13 +4564,6 @@ static SSDataBlock* doRetrieveDataBlock(STsdbReader* pReader) {
|
|||
return pReader->pResBlock;
|
||||
}
|
||||
|
||||
void tsdbReleaseDataBlock(STsdbReader* pReader) {
|
||||
// SReaderStatus* pStatus = &pReader->status;
|
||||
// if (!pStatus->composedDataBlock) {
|
||||
taosThreadMutexUnlock(&pReader->readerMutex);
|
||||
//}
|
||||
}
|
||||
|
||||
SSDataBlock* tsdbRetrieveDataBlock(STsdbReader* pReader, SArray* pIdList) {
|
||||
STsdbReader* pTReader = pReader;
|
||||
if (pReader->type == TIMEWINDOW_RANGE_EXTERNAL) {
|
||||
|
@ -4496,7 +4582,7 @@ SSDataBlock* tsdbRetrieveDataBlock(STsdbReader* pReader, SArray* pIdList) {
|
|||
SSDataBlock* ret = doRetrieveDataBlock(pTReader);
|
||||
|
||||
qTrace("tsdb/read-retrieve: %p, unlock read mutex", pReader);
|
||||
taosThreadMutexUnlock(&pReader->readerMutex);
|
||||
tsdbReleaseReader(pReader);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -4505,7 +4591,7 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) {
|
|||
SReaderStatus* pStatus = &pReader->status;
|
||||
|
||||
qTrace("tsdb/reader-reset: %p, take read mutex", pReader);
|
||||
taosThreadMutexLock(&pReader->readerMutex);
|
||||
tsdbAcquireReader(pReader);
|
||||
|
||||
if (pReader->suspended) {
|
||||
tsdbReaderResume(pReader);
|
||||
|
@ -4514,7 +4600,7 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) {
|
|||
if (isEmptyQueryTimeWindow(&pReader->window) || pReader->pReadSnap == NULL) {
|
||||
tsdbDebug("tsdb reader reset return %p", pReader->pReadSnap);
|
||||
|
||||
taosThreadMutexUnlock(&pReader->readerMutex);
|
||||
tsdbReleaseReader(pReader);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -4552,7 +4638,7 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) {
|
|||
tsdbError("%p reset reader failed, numOfTables:%d, query range:%" PRId64 " - %" PRId64 " in query %s", pReader,
|
||||
numOfTables, pReader->window.skey, pReader->window.ekey, pReader->idStr);
|
||||
|
||||
taosThreadMutexUnlock(&pReader->readerMutex);
|
||||
tsdbReleaseReader(pReader);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
@ -4563,7 +4649,7 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) {
|
|||
pReader, pReader->suid, numOfTables, pCond->twindows.skey, pReader->window.skey, pReader->window.ekey,
|
||||
pReader->idStr);
|
||||
|
||||
taosThreadMutexUnlock(&pReader->readerMutex);
|
||||
tsdbReleaseReader(pReader);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
@ -4648,7 +4734,7 @@ int64_t tsdbGetNumOfRowsInMemTable(STsdbReader* pReader) {
|
|||
int64_t rows = 0;
|
||||
|
||||
SReaderStatus* pStatus = &pReader->status;
|
||||
taosThreadMutexLock(&pReader->readerMutex);
|
||||
tsdbAcquireReader(pReader);
|
||||
if (pReader->suspended) {
|
||||
tsdbReaderResume(pReader);
|
||||
}
|
||||
|
@ -4678,7 +4764,7 @@ int64_t tsdbGetNumOfRowsInMemTable(STsdbReader* pReader) {
|
|||
pStatus->pTableIter = taosHashIterate(pStatus->pTableMap, pStatus->pTableIter);
|
||||
}
|
||||
|
||||
taosThreadMutexUnlock(&pReader->readerMutex);
|
||||
tsdbReleaseReader(pReader);
|
||||
|
||||
return rows;
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -732,6 +732,7 @@ int32_t tsdbRowMergerAdd(SRowMerger *pMerger, TSDBROW *pRow, STSchema *pTSchema)
|
|||
tsdbRowGetColVal(pRow, pTSchema, jCol++, pColVal);
|
||||
|
||||
if (key.version > pMerger->version) {
|
||||
#if 0
|
||||
if (!COL_VAL_IS_NONE(pColVal)) {
|
||||
if ((!COL_VAL_IS_NULL(pColVal)) && IS_VAR_DATA_TYPE(pColVal->type)) {
|
||||
SColVal *tColVal = taosArrayGet(pMerger->pArray, iCol);
|
||||
|
@ -747,6 +748,28 @@ int32_t tsdbRowMergerAdd(SRowMerger *pMerger, TSDBROW *pRow, STSchema *pTSchema)
|
|||
taosArraySet(pMerger->pArray, iCol, pColVal);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
if (!COL_VAL_IS_NONE(pColVal)) {
|
||||
if (IS_VAR_DATA_TYPE(pColVal->type)) {
|
||||
SColVal *pTColVal = taosArrayGet(pMerger->pArray, iCol);
|
||||
if (!COL_VAL_IS_NULL(pColVal)) {
|
||||
code = tRealloc(&pTColVal->value.pData, pColVal->value.nData);
|
||||
if (code) return code;
|
||||
|
||||
pTColVal->value.nData = pColVal->value.nData;
|
||||
if (pTColVal->value.nData) {
|
||||
memcpy(pTColVal->value.pData, pColVal->value.pData, pTColVal->value.nData);
|
||||
}
|
||||
pTColVal->flag = 0;
|
||||
} else {
|
||||
tFree(pTColVal->value.pData);
|
||||
pTColVal->value.pData = NULL;
|
||||
taosArraySet(pMerger->pArray, iCol, pColVal);
|
||||
}
|
||||
} else {
|
||||
taosArraySet(pMerger->pArray, iCol, pColVal);
|
||||
}
|
||||
}
|
||||
} else if (key.version < pMerger->version) {
|
||||
SColVal *tColVal = (SColVal *)taosArrayGet(pMerger->pArray, iCol);
|
||||
if (COL_VAL_IS_NONE(tColVal) && !COL_VAL_IS_NONE(pColVal)) {
|
||||
|
@ -1110,6 +1133,7 @@ _exit:
|
|||
void tBlockDataReset(SBlockData *pBlockData) {
|
||||
pBlockData->suid = 0;
|
||||
pBlockData->uid = 0;
|
||||
pBlockData->nRow = 0;
|
||||
}
|
||||
|
||||
void tBlockDataClear(SBlockData *pBlockData) {
|
||||
|
|
|
@ -219,10 +219,10 @@ void vnodeBufPoolAddToFreeList(SVBufPool *pPool) {
|
|||
if (pPool->node.size != size) {
|
||||
SVBufPool *pNewPool = NULL;
|
||||
if (vnodeBufPoolCreate(pVnode, pPool->id, size, &pNewPool) < 0) {
|
||||
vWarn("vgId:%d failed to change buffer pool of id %d size from %" PRId64 " to %" PRId64 " since %s",
|
||||
vWarn("vgId:%d, failed to change buffer pool of id %d size from %" PRId64 " to %" PRId64 " since %s",
|
||||
TD_VID(pVnode), pPool->id, pPool->node.size, size, tstrerror(errno));
|
||||
} else {
|
||||
vInfo("vgId:%d buffer pool of id %d size changed from %" PRId64 " to %" PRId64, TD_VID(pVnode), pPool->id,
|
||||
vInfo("vgId:%d, buffer pool of id %d size changed from %" PRId64 " to %" PRId64, TD_VID(pVnode), pPool->id,
|
||||
pPool->node.size, size);
|
||||
|
||||
vnodeBufPoolDestroy(pPool);
|
||||
|
@ -232,7 +232,7 @@ void vnodeBufPoolAddToFreeList(SVBufPool *pPool) {
|
|||
}
|
||||
|
||||
// add to free list
|
||||
vDebug("vgId:%d buffer pool %p of id %d is added to free list", TD_VID(pVnode), pPool, pPool->id);
|
||||
vDebug("vgId:%d, buffer pool %p of id %d is added to free list", TD_VID(pVnode), pPool, pPool->id);
|
||||
vnodeBufPoolReset(pPool);
|
||||
pPool->freeNext = pVnode->freeList;
|
||||
pVnode->freeList = pPool;
|
||||
|
@ -252,8 +252,6 @@ void vnodeBufPoolUnRef(SVBufPool *pPool, bool proactive) {
|
|||
if (pVnode->onRecycle == pPool) {
|
||||
pVnode->onRecycle = NULL;
|
||||
} else {
|
||||
ASSERT(proactive);
|
||||
|
||||
if (pPool->recyclePrev) {
|
||||
pPool->recyclePrev->recycleNext = pPool->recycleNext;
|
||||
} else {
|
||||
|
@ -309,7 +307,7 @@ int32_t vnodeBufPoolRecycle(SVBufPool *pPool) {
|
|||
|
||||
SVnode *pVnode = pPool->pVnode;
|
||||
|
||||
vDebug("vgId:%d recycle buffer pool %p of id %d", TD_VID(pVnode), pPool, pPool->id);
|
||||
vDebug("vgId:%d, recycle buffer pool %p of id %d", TD_VID(pVnode), pPool, pPool->id);
|
||||
|
||||
taosThreadMutexLock(&pPool->mutex);
|
||||
|
||||
|
@ -329,4 +327,4 @@ int32_t vnodeBufPoolRecycle(SVBufPool *pPool) {
|
|||
_exit:
|
||||
taosThreadMutexUnlock(&pPool->mutex);
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,10 +28,10 @@ static int32_t vnodeTryRecycleBufPool(SVnode *pVnode) {
|
|||
|
||||
if (pVnode->onRecycle == NULL) {
|
||||
if (pVnode->recycleHead == NULL) {
|
||||
vDebug("vgId:%d no recyclable buffer pool", TD_VID(pVnode));
|
||||
vDebug("vgId:%d, no recyclable buffer pool", TD_VID(pVnode));
|
||||
goto _exit;
|
||||
} else {
|
||||
vDebug("vgId:%d buffer pool %p of id %d on recycle queue, try to recycle", TD_VID(pVnode), pVnode->recycleHead,
|
||||
vDebug("vgId:%d, buffer pool %p of id %d on recycle queue, try to recycle", TD_VID(pVnode), pVnode->recycleHead,
|
||||
pVnode->recycleHead->id);
|
||||
|
||||
pVnode->onRecycle = pVnode->recycleHead;
|
||||
|
@ -50,7 +50,7 @@ static int32_t vnodeTryRecycleBufPool(SVnode *pVnode) {
|
|||
|
||||
_exit:
|
||||
if (code) {
|
||||
vError("vgId:%d %s failed since %s", TD_VID(pVnode), __func__, tstrerror(code));
|
||||
vError("vgId:%d, %s failed since %s", TD_VID(pVnode), __func__, tstrerror(code));
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
@ -65,7 +65,7 @@ static int32_t vnodeGetBufPoolToUse(SVnode *pVnode) {
|
|||
++nTry;
|
||||
|
||||
if (pVnode->freeList) {
|
||||
vDebug("vgId:%d allocate free buffer pool on %d try, pPool:%p id:%d", TD_VID(pVnode), nTry, pVnode->freeList,
|
||||
vDebug("vgId:%d, allocate free buffer pool on %d try, pPool:%p id:%d", TD_VID(pVnode), nTry, pVnode->freeList,
|
||||
pVnode->freeList->id);
|
||||
|
||||
pVnode->inUse = pVnode->freeList;
|
||||
|
@ -74,13 +74,13 @@ static int32_t vnodeGetBufPoolToUse(SVnode *pVnode) {
|
|||
pVnode->inUse->freeNext = NULL;
|
||||
break;
|
||||
} else {
|
||||
vDebug("vgId:%d no free buffer pool on %d try, try to recycle...", TD_VID(pVnode), nTry);
|
||||
/*
|
||||
vDebug("vgId:%d, no free buffer pool on %d try, try to recycle...", TD_VID(pVnode), nTry);
|
||||
|
||||
code = vnodeTryRecycleBufPool(pVnode);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
*/
|
||||
|
||||
if (pVnode->freeList == NULL) {
|
||||
vDebug("vgId:%d no free buffer pool on %d try, wait %d ms...", TD_VID(pVnode), nTry, WAIT_TIME_MILI_SEC);
|
||||
vDebug("vgId:%d, no free buffer pool on %d try, wait %d ms...", TD_VID(pVnode), nTry, WAIT_TIME_MILI_SEC);
|
||||
|
||||
struct timeval tv;
|
||||
struct timespec ts;
|
||||
|
@ -105,7 +105,7 @@ static int32_t vnodeGetBufPoolToUse(SVnode *pVnode) {
|
|||
_exit:
|
||||
taosThreadMutexUnlock(&pVnode->mutex);
|
||||
if (code) {
|
||||
vError("vgId:%d %s failed at line %d since %s", TD_VID(pVnode), __func__, lino, tstrerror(code));
|
||||
vError("vgId:%d, %s failed at line %d since %s", TD_VID(pVnode), __func__, lino, tstrerror(code));
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
@ -140,7 +140,7 @@ int vnodeBegin(SVnode *pVnode) {
|
|||
_exit:
|
||||
if (code) {
|
||||
terrno = code;
|
||||
vError("vgId:%d %s failed at line %d since %s", TD_VID(pVnode), __func__, lino, tstrerror(code));
|
||||
vError("vgId:%d, %s failed at line %d since %s", TD_VID(pVnode), __func__, lino, tstrerror(code));
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
@ -351,7 +351,7 @@ static void vnodeReturnBufPool(SVnode *pVnode) {
|
|||
if (nRef == 0) {
|
||||
vnodeBufPoolAddToFreeList(pPool);
|
||||
} else if (nRef > 0) {
|
||||
vDebug("vgId:%d buffer pool %p of id %d is added to recycle queue", TD_VID(pVnode), pPool, pPool->id);
|
||||
vDebug("vgId:%d, buffer pool %p of id %d is added to recycle queue", TD_VID(pVnode), pPool, pPool->id);
|
||||
|
||||
if (pVnode->recycleTail == NULL) {
|
||||
pPool->recyclePrev = pPool->recycleNext = NULL;
|
||||
|
|
|
@ -58,7 +58,7 @@ int32_t vnodeCreate(const char *path, SVnodeCfg *pCfg, STfs *pTfs) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t vnodeAlter(const char *path, SAlterVnodeReplicaReq *pReq, STfs *pTfs) {
|
||||
int32_t vnodeAlterReplica(const char *path, SAlterVnodeReplicaReq *pReq, STfs *pTfs) {
|
||||
SVnodeInfo info = {0};
|
||||
char dir[TSDB_FILENAME_LEN] = {0};
|
||||
int32_t ret = 0;
|
||||
|
@ -107,6 +107,117 @@ int32_t vnodeAlter(const char *path, SAlterVnodeReplicaReq *pReq, STfs *pTfs) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t vnodeRenameVgroupId(const char *srcPath, const char *dstPath, int32_t srcVgId, int32_t dstVgId, STfs *pTfs) {
|
||||
int32_t ret = tfsRename(pTfs, srcPath, dstPath);
|
||||
if (ret != 0) return ret;
|
||||
|
||||
char oldRname[TSDB_FILENAME_LEN] = {0};
|
||||
char newRname[TSDB_FILENAME_LEN] = {0};
|
||||
char tsdbPath[TSDB_FILENAME_LEN] = {0};
|
||||
char tsdbFilePrefix[TSDB_FILENAME_LEN] = {0};
|
||||
snprintf(tsdbPath, TSDB_FILENAME_LEN, "%s%stsdb", dstPath, TD_DIRSEP);
|
||||
snprintf(tsdbFilePrefix, TSDB_FILENAME_LEN, "tsdb%sv", TD_DIRSEP);
|
||||
|
||||
STfsDir *tsdbDir = tfsOpendir(pTfs, tsdbPath);
|
||||
if (tsdbDir == NULL) return 0;
|
||||
|
||||
while (1) {
|
||||
const STfsFile *tsdbFile = tfsReaddir(tsdbDir);
|
||||
if (tsdbFile == NULL) break;
|
||||
if (tsdbFile->rname == NULL) continue;
|
||||
tstrncpy(oldRname, tsdbFile->rname, TSDB_FILENAME_LEN);
|
||||
|
||||
char *tsdbFilePrefixPos = strstr(oldRname, tsdbFilePrefix);
|
||||
if (tsdbFilePrefixPos == NULL) continue;
|
||||
|
||||
int32_t tsdbFileVgId = atoi(tsdbFilePrefixPos + 6);
|
||||
if (tsdbFileVgId == srcVgId) {
|
||||
char *tsdbFileSurfixPos = strstr(tsdbFilePrefixPos, "f");
|
||||
if (tsdbFileSurfixPos == NULL) continue;
|
||||
|
||||
tsdbFilePrefixPos[6] = 0;
|
||||
snprintf(newRname, TSDB_FILENAME_LEN, "%s%d%s", oldRname, dstVgId, tsdbFileSurfixPos);
|
||||
vInfo("vgId:%d, rename file from %s to %s", dstVgId, tsdbFile->rname, newRname);
|
||||
|
||||
ret = tfsRename(pTfs, tsdbFile->rname, newRname);
|
||||
if (ret != 0) {
|
||||
vInfo("vgId:%d, failed to rename file from %s to %s since %s", dstVgId, tsdbFile->rname, newRname, terrstr());
|
||||
tfsClosedir(tsdbDir);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tfsClosedir(tsdbDir);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t vnodeAlterHashRange(const char *srcPath, const char *dstPath, SAlterVnodeHashRangeReq *pReq, STfs *pTfs) {
|
||||
SVnodeInfo info = {0};
|
||||
char dir[TSDB_FILENAME_LEN] = {0};
|
||||
int32_t ret = 0;
|
||||
|
||||
if (pTfs) {
|
||||
snprintf(dir, TSDB_FILENAME_LEN, "%s%s%s", tfsGetPrimaryPath(pTfs), TD_DIRSEP, srcPath);
|
||||
} else {
|
||||
snprintf(dir, TSDB_FILENAME_LEN, "%s", srcPath);
|
||||
}
|
||||
|
||||
// todo add stat file to handle exception while vnode open
|
||||
|
||||
ret = vnodeLoadInfo(dir, &info);
|
||||
if (ret < 0) {
|
||||
vError("vgId:%d, failed to read vnode config from %s since %s", pReq->srcVgId, srcPath, tstrerror(terrno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
vInfo("vgId:%d, start to alter hashrange from [%u, %u) to [%u, %u)", pReq->srcVgId, info.config.hashBegin,
|
||||
info.config.hashEnd, pReq->hashBegin, pReq->hashEnd);
|
||||
info.config.vgId = pReq->dstVgId;
|
||||
info.config.hashBegin = pReq->hashBegin;
|
||||
info.config.hashEnd = pReq->hashEnd;
|
||||
info.config.walCfg.vgId = pReq->dstVgId;
|
||||
|
||||
SSyncCfg *pCfg = &info.config.syncCfg;
|
||||
pCfg->myIndex = 0;
|
||||
pCfg->replicaNum = 1;
|
||||
memset(&pCfg->nodeInfo, 0, sizeof(pCfg->nodeInfo));
|
||||
|
||||
vInfo("vgId:%d, alter vnode replicas to 1", pReq->srcVgId);
|
||||
SNodeInfo *pNode = &pCfg->nodeInfo[0];
|
||||
pNode->nodePort = tsServerPort;
|
||||
tstrncpy(pNode->nodeFqdn, tsLocalFqdn, TSDB_FQDN_LEN);
|
||||
(void)tmsgUpdateDnodeInfo(&pNode->nodeId, &pNode->clusterId, pNode->nodeFqdn, &pNode->nodePort);
|
||||
vInfo("vgId:%d, ep:%s:%u dnode:%d", pReq->srcVgId, pNode->nodeFqdn, pNode->nodePort, pNode->nodeId);
|
||||
|
||||
info.config.syncCfg = *pCfg;
|
||||
|
||||
ret = vnodeSaveInfo(dir, &info);
|
||||
if (ret < 0) {
|
||||
vError("vgId:%d, failed to save vnode config since %s", pReq->dstVgId, tstrerror(terrno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
ret = vnodeCommitInfo(dir, &info);
|
||||
if (ret < 0) {
|
||||
vError("vgId:%d, failed to commit vnode config since %s", pReq->dstVgId, tstrerror(terrno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
vInfo("vgId:%d, start to rename %s to %s", pReq->dstVgId, srcPath, dstPath);
|
||||
ret = vnodeRenameVgroupId(srcPath, dstPath, pReq->srcVgId, pReq->dstVgId, pTfs);
|
||||
if (ret < 0) {
|
||||
vError("vgId:%d, failed to rename vnode from %s to %s since %s", pReq->dstVgId, srcPath, dstPath,
|
||||
tstrerror(terrno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
// todo vnode compact here
|
||||
|
||||
vInfo("vgId:%d, vnode hashrange is altered", info.config.vgId);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void vnodeDestroy(const char *path, STfs *pTfs) {
|
||||
vInfo("path:%s is removed while destroy vnode", path);
|
||||
tfsRmdir(pTfs, path);
|
||||
|
|
|
@ -455,7 +455,7 @@ int32_t vnodeSnapWrite(SVSnapWriter *pWriter, uint8_t *pData, uint32_t nData) {
|
|||
if (code) goto _err;
|
||||
}
|
||||
|
||||
code = tsdbSnapWrite(pWriter->pTsdbSnapWriter, pData, nData);
|
||||
code = tsdbSnapWrite(pWriter->pTsdbSnapWriter, pHdr);
|
||||
if (code) goto _err;
|
||||
} break;
|
||||
case SNAP_DATA_TQ_HANDLE: {
|
||||
|
|
|
@ -24,7 +24,6 @@ static int32_t vnodeProcessDropTbReq(SVnode *pVnode, int64_t version, void *pReq
|
|||
static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
|
||||
static int32_t vnodeProcessCreateTSmaReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
|
||||
static int32_t vnodeProcessAlterConfirmReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
|
||||
static int32_t vnodeProcessAlterHashRangeReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
|
||||
static int32_t vnodeProcessAlterConfigReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
|
||||
static int32_t vnodeProcessDropTtlTbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
|
||||
static int32_t vnodeProcessTrimReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
|
||||
|
@ -313,9 +312,6 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp
|
|||
case TDMT_VND_ALTER_CONFIRM:
|
||||
vnodeProcessAlterConfirmReq(pVnode, version, pReq, len, pRsp);
|
||||
break;
|
||||
case TDMT_VND_ALTER_HASHRANGE:
|
||||
vnodeProcessAlterHashRangeReq(pVnode, version, pReq, len, pRsp);
|
||||
break;
|
||||
case TDMT_VND_ALTER_CONFIG:
|
||||
vnodeProcessAlterConfigReq(pVnode, version, pReq, len, pRsp);
|
||||
break;
|
||||
|
@ -1246,16 +1242,6 @@ static int32_t vnodeProcessAlterConfirmReq(SVnode *pVnode, int64_t version, void
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int32_t vnodeProcessAlterHashRangeReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
|
||||
vInfo("vgId:%d, alter hashrange msg will be processed", TD_VID(pVnode));
|
||||
|
||||
// todo
|
||||
// 1. stop work
|
||||
// 2. adjust hash range / compact / remove wals / rename vgroups
|
||||
// 3. reload sync
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t vnodeProcessAlterConfigReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
|
||||
bool walChanged = false;
|
||||
bool tsdbChanged = false;
|
||||
|
|
|
@ -805,6 +805,7 @@ int32_t ctgMakeVgArray(SDBVgInfo* dbInfo);
|
|||
int32_t ctgAcquireVgMetaFromCache(SCatalog *pCtg, const char *dbFName, const char *tbName, SCtgDBCache **pDb, SCtgTbCache **pTb);
|
||||
int32_t ctgCopyTbMeta(SCatalog *pCtg, SCtgTbMetaCtx *ctx, SCtgDBCache **pDb, SCtgTbCache **pTb, STableMeta **pTableMeta, char* dbFName);
|
||||
void ctgReleaseVgMetaToCache(SCatalog *pCtg, SCtgDBCache *dbCache, SCtgTbCache *pCache);
|
||||
void ctgReleaseTbMetaToCache(SCatalog *pCtg, SCtgDBCache *dbCache, SCtgTbCache *pCache);
|
||||
|
||||
extern SCatalogMgmt gCtgMgmt;
|
||||
extern SCtgDebug gCTGDebug;
|
||||
|
|
|
@ -598,10 +598,16 @@ int32_t ctgGetCachedTbVgMeta(SCatalog* pCtg, const SName* pTableName, SVgroupInf
|
|||
|
||||
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, pTableName, pVgroup));
|
||||
|
||||
ctgRUnlockVgInfo(dbCache);
|
||||
|
||||
SCtgTbMetaCtx ctx = {0};
|
||||
ctx.pName = (SName*)pTableName;
|
||||
ctx.flag = CTG_FLAG_UNKNOWN_STB;
|
||||
CTG_ERR_JRET(ctgCopyTbMeta(pCtg, &ctx, &dbCache, &tbCache, pTableMeta, db));
|
||||
code = ctgCopyTbMeta(pCtg, &ctx, &dbCache, &tbCache, pTableMeta, db);
|
||||
|
||||
ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
|
||||
|
||||
CTG_RET(code);
|
||||
|
||||
_return:
|
||||
|
||||
|
|
|
@ -999,6 +999,7 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
|
|||
CTG_ERR_JRET(ctgGetTbMetaFromVnode(pCtg, pConn, pName, &vgInfo, NULL, tReq));
|
||||
|
||||
ctgReleaseVgInfoToCache(pCtg, dbCache);
|
||||
dbCache = NULL;
|
||||
} else {
|
||||
SBuildUseDBInput input = {0};
|
||||
|
||||
|
@ -1168,6 +1169,7 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
|
|||
CTG_ERR_JRET(ctgGetTbMetaFromVnode(pCtg, pConn, pName, &vgInfo, NULL, tReq));
|
||||
|
||||
ctgReleaseVgInfoToCache(pCtg, dbCache);
|
||||
dbCache = NULL;
|
||||
} else {
|
||||
SBuildUseDBInput input = {0};
|
||||
|
||||
|
|
|
@ -2118,7 +2118,7 @@ int32_t ctgOpUpdateEpset(SCtgCacheOperation *operation) {
|
|||
|
||||
_return:
|
||||
|
||||
if (dbCache) {
|
||||
if (code == TSDB_CODE_SUCCESS && dbCache) {
|
||||
ctgWUnlockVgInfo(dbCache);
|
||||
}
|
||||
|
||||
|
|
|
@ -281,10 +281,10 @@ static void setCreateDBResultIntoDataBlock(SSDataBlock* pBlock, char* dbFName, S
|
|||
len += sprintf(
|
||||
buf2 + VARSTR_HEADER_SIZE,
|
||||
"CREATE DATABASE `%s` BUFFER %d CACHESIZE %d CACHEMODEL '%s' COMP %d DURATION %dm "
|
||||
"WAL_FSYNC_PERIOD %d MAXROWS %d MINROWS %d KEEP %dm,%dm,%dm PAGES %d PAGESIZE %d PRECISION '%s' REPLICA %d "
|
||||
"WAL_FSYNC_PERIOD %d MAXROWS %d MINROWS %d STT_TRIGGER %d KEEP %dm,%dm,%dm PAGES %d PAGESIZE %d PRECISION '%s' REPLICA %d "
|
||||
"WAL_LEVEL %d VGROUPS %d SINGLE_STABLE %d",
|
||||
dbFName, pCfg->buffer, pCfg->cacheSize, cacheModelStr(pCfg->cacheLast), pCfg->compression, pCfg->daysPerFile,
|
||||
pCfg->walFsyncPeriod, pCfg->maxRows, pCfg->minRows, pCfg->daysToKeep0, pCfg->daysToKeep1, pCfg->daysToKeep2,
|
||||
pCfg->walFsyncPeriod, pCfg->maxRows, pCfg->minRows, pCfg->sstTrigger, pCfg->daysToKeep0, pCfg->daysToKeep1, pCfg->daysToKeep2,
|
||||
pCfg->pages, pCfg->pageSize, prec, pCfg->replications, pCfg->walLevel, pCfg->numOfVgroups,
|
||||
1 == pCfg->numOfStables);
|
||||
|
||||
|
|
|
@ -570,7 +570,7 @@ typedef struct SStreamIntervalOperatorInfo {
|
|||
SWinKey delKey;
|
||||
uint64_t numOfDatapack;
|
||||
SArray* pUpdated;
|
||||
SHashObj* pUpdatedMap;
|
||||
SSHashObj* pUpdatedMap;
|
||||
} SStreamIntervalOperatorInfo;
|
||||
|
||||
typedef struct SDataGroupInfo {
|
||||
|
|
|
@ -77,8 +77,8 @@ static void toDataCacheEntry(SDataDispatchHandle* pHandle, const SInputData* pIn
|
|||
|
||||
pBuf->useSize = sizeof(SDataCacheEntry);
|
||||
pEntry->dataLen = blockEncode(pInput->pData, pEntry->data, numOfCols);
|
||||
// ASSERT(pEntry->numOfRows == *(int32_t*)(pEntry->data + 8));
|
||||
// ASSERT(pEntry->numOfCols == *(int32_t*)(pEntry->data + 8 + 4));
|
||||
// ASSERT(pEntry->numOfRows == *(int32_t*)(pEntry->data + 8));
|
||||
// ASSERT(pEntry->numOfCols == *(int32_t*)(pEntry->data + 8 + 4));
|
||||
|
||||
pBuf->useSize += pEntry->dataLen;
|
||||
|
||||
|
@ -135,7 +135,7 @@ static int32_t putDataBlock(SDataSinkHandle* pHandle, const SInputData* pInput,
|
|||
taosFreeQitem(pBuf);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
|
||||
toDataCacheEntry(pDispatcher, pInput, pBuf);
|
||||
taosWriteQitem(pDispatcher->pDataBlocks, pBuf);
|
||||
|
||||
|
@ -162,14 +162,16 @@ static void getDataLength(SDataSinkHandle* pHandle, int64_t* pLen, bool* pQueryE
|
|||
|
||||
SDataDispatchBuf* pBuf = NULL;
|
||||
taosReadQitem(pDispatcher->pDataBlocks, (void**)&pBuf);
|
||||
memcpy(&pDispatcher->nextOutput, pBuf, sizeof(SDataDispatchBuf));
|
||||
taosFreeQitem(pBuf);
|
||||
if (pBuf != NULL) {
|
||||
memcpy(&pDispatcher->nextOutput, pBuf, sizeof(SDataDispatchBuf));
|
||||
taosFreeQitem(pBuf);
|
||||
}
|
||||
|
||||
SDataCacheEntry* pEntry = (SDataCacheEntry*)pDispatcher->nextOutput.pData;
|
||||
*pLen = pEntry->dataLen;
|
||||
|
||||
// ASSERT(pEntry->numOfRows == *(int32_t*)(pEntry->data + 8));
|
||||
// ASSERT(pEntry->numOfCols == *(int32_t*)(pEntry->data + 8 + 4));
|
||||
// ASSERT(pEntry->numOfRows == *(int32_t*)(pEntry->data + 8));
|
||||
// ASSERT(pEntry->numOfCols == *(int32_t*)(pEntry->data + 8 + 4));
|
||||
|
||||
*pQueryEnd = pDispatcher->queryEnd;
|
||||
qDebug("got data len %" PRId64 ", row num %d in sink", *pLen,
|
||||
|
@ -192,8 +194,8 @@ static int32_t getDataBlock(SDataSinkHandle* pHandle, SOutputData* pOutput) {
|
|||
pOutput->numOfCols = pEntry->numOfCols;
|
||||
pOutput->compressed = pEntry->compressed;
|
||||
|
||||
// ASSERT(pEntry->numOfRows == *(int32_t*)(pEntry->data + 8));
|
||||
// ASSERT(pEntry->numOfCols == *(int32_t*)(pEntry->data + 8 + 4));
|
||||
// ASSERT(pEntry->numOfRows == *(int32_t*)(pEntry->data + 8));
|
||||
// ASSERT(pEntry->numOfCols == *(int32_t*)(pEntry->data + 8 + 4));
|
||||
|
||||
atomic_sub_fetch_64(&pDispatcher->cachedSize, pEntry->dataLen);
|
||||
atomic_sub_fetch_64(&gDataSinkStat.cachedSize, pEntry->dataLen);
|
||||
|
|
|
@ -155,7 +155,7 @@ void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SSHashObj* pHashmap, in
|
|||
|
||||
void initMultiResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayList) {
|
||||
if (pGroupResInfo->pRows != NULL) {
|
||||
taosArrayDestroyP(pGroupResInfo->pRows, taosMemoryFree);
|
||||
taosArrayDestroy(pGroupResInfo->pRows);
|
||||
}
|
||||
|
||||
pGroupResInfo->pRows = pArrayList;
|
||||
|
|
|
@ -24,12 +24,16 @@
|
|||
static TdThreadOnce initPoolOnce = PTHREAD_ONCE_INIT;
|
||||
int32_t exchangeObjRefPool = -1;
|
||||
|
||||
static void initRefPool() { exchangeObjRefPool = taosOpenRef(1024, doDestroyExchangeOperatorInfo); }
|
||||
static void cleanupRefPool() {
|
||||
int32_t ref = atomic_val_compare_exchange_32(&exchangeObjRefPool, exchangeObjRefPool, 0);
|
||||
taosCloseRef(ref);
|
||||
}
|
||||
|
||||
static void initRefPool() {
|
||||
exchangeObjRefPool = taosOpenRef(1024, doDestroyExchangeOperatorInfo);
|
||||
atexit(cleanupRefPool);
|
||||
}
|
||||
|
||||
static int32_t doSetSMABlock(SOperatorInfo* pOperator, void* input, size_t numOfBlocks, int32_t type, char* id) {
|
||||
ASSERT(pOperator != NULL);
|
||||
if (pOperator->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
|
||||
|
@ -448,7 +452,6 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId,
|
|||
SExecTaskInfo** pTask = (SExecTaskInfo**)pTaskInfo;
|
||||
|
||||
taosThreadOnce(&initPoolOnce, initRefPool);
|
||||
atexit(cleanupRefPool);
|
||||
|
||||
qDebug("start to create subplan task, TID:0x%" PRIx64 " QID:0x%" PRIx64, taskId, pSubplan->id.queryId);
|
||||
|
||||
|
|
|
@ -2589,26 +2589,22 @@ int32_t buildDataBlockFromGroupRes(SOperatorInfo* pOperator, SStreamState* pStat
|
|||
int32_t numOfRows = getNumOfTotalRes(pGroupResInfo);
|
||||
|
||||
for (int32_t i = pGroupResInfo->index; i < numOfRows; i += 1) {
|
||||
SResKeyPos* pPos = taosArrayGetP(pGroupResInfo->pRows, i);
|
||||
SWinKey* pKey = taosArrayGet(pGroupResInfo->pRows, i);
|
||||
int32_t size = 0;
|
||||
void* pVal = NULL;
|
||||
SWinKey key = {
|
||||
.ts = *(TSKEY*)pPos->key,
|
||||
.groupId = pPos->groupId,
|
||||
};
|
||||
int32_t code = streamStateGet(pState, &key, &pVal, &size);
|
||||
int32_t code = streamStateGet(pState, pKey, &pVal, &size);
|
||||
ASSERT(code == 0);
|
||||
SResultRow* pRow = (SResultRow*)pVal;
|
||||
doUpdateNumOfRows(pCtx, pRow, numOfExprs, rowEntryOffset);
|
||||
// no results, continue to check the next one
|
||||
if (pRow->numOfRows == 0) {
|
||||
pGroupResInfo->index += 1;
|
||||
releaseOutputBuf(pState, &key, pRow);
|
||||
releaseOutputBuf(pState, pKey, pRow);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pBlock->info.id.groupId == 0) {
|
||||
pBlock->info.id.groupId = pPos->groupId;
|
||||
pBlock->info.id.groupId = pKey->groupId;
|
||||
void* tbname = NULL;
|
||||
if (streamStateGetParName(pTaskInfo->streamInfo.pState, pBlock->info.id.groupId, &tbname) < 0) {
|
||||
pBlock->info.parTbName[0] = 0;
|
||||
|
@ -2618,15 +2614,15 @@ int32_t buildDataBlockFromGroupRes(SOperatorInfo* pOperator, SStreamState* pStat
|
|||
tdbFree(tbname);
|
||||
} else {
|
||||
// current value belongs to different group, it can't be packed into one datablock
|
||||
if (pBlock->info.id.groupId != pPos->groupId) {
|
||||
releaseOutputBuf(pState, &key, pRow);
|
||||
if (pBlock->info.id.groupId != pKey->groupId) {
|
||||
releaseOutputBuf(pState, pKey, pRow);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (pBlock->info.rows + pRow->numOfRows > pBlock->info.capacity) {
|
||||
ASSERT(pBlock->info.rows > 0);
|
||||
releaseOutputBuf(pState, &key, pRow);
|
||||
releaseOutputBuf(pState, pKey, pRow);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -2656,7 +2652,7 @@ int32_t buildDataBlockFromGroupRes(SOperatorInfo* pOperator, SStreamState* pStat
|
|||
}
|
||||
|
||||
pBlock->info.rows += pRow->numOfRows;
|
||||
releaseOutputBuf(pState, &key, pRow);
|
||||
releaseOutputBuf(pState, pKey, pRow);
|
||||
}
|
||||
pBlock->info.dataLoad = 1;
|
||||
blockDataUpdateTsWindow(pBlock, 0);
|
||||
|
|
|
@ -593,8 +593,11 @@ void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInf
|
|||
|
||||
int32_t pageId = 0;
|
||||
pPage = getNewBufPage(pInfo->pBuf, &pageId);
|
||||
taosArrayPush(p->pPageList, &pageId);
|
||||
if (pPage == NULL) {
|
||||
return pPage;
|
||||
}
|
||||
|
||||
taosArrayPush(p->pPageList, &pageId);
|
||||
*(int32_t*)pPage = 0;
|
||||
} else {
|
||||
int32_t* curId = taosArrayGetLast(p->pPageList);
|
||||
|
@ -612,6 +615,11 @@ void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInf
|
|||
// add a new page for current group
|
||||
int32_t pageId = 0;
|
||||
pPage = getNewBufPage(pInfo->pBuf, &pageId);
|
||||
if (pPage == NULL) {
|
||||
qError("failed to get new buffer, code:%s", tstrerror(terrno));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
taosArrayPush(p->pPageList, &pageId);
|
||||
memset(pPage, 0, getBufPageSize(pInfo->pBuf));
|
||||
}
|
||||
|
@ -963,7 +971,7 @@ static SSDataBlock* buildStreamPartitionResult(SOperatorInfo* pOperator) {
|
|||
void appendCreateTableRow(SStreamState* pState, SExprSupp* pTableSup, SExprSupp* pTagSup, int64_t groupId,
|
||||
SSDataBlock* pSrcBlock, int32_t rowId, SSDataBlock* pDestBlock) {
|
||||
void* pValue = NULL;
|
||||
if (groupId != 0 && streamStateGetParName(pState, groupId, &pValue) != 0) {
|
||||
if (streamStateGetParName(pState, groupId, &pValue) != 0) {
|
||||
SSDataBlock* pTmpBlock = blockCopyOneRow(pSrcBlock, rowId);
|
||||
if (pTableSup->numOfExprs > 0) {
|
||||
projectApplyFunctions(pTableSup->pExprInfo, pDestBlock, pTmpBlock, pTableSup->pCtx, pTableSup->numOfExprs, NULL);
|
||||
|
|
|
@ -1786,7 +1786,7 @@ FETCH_NEXT_BLOCK:
|
|||
int32_t current = pInfo->validBlockIndex++;
|
||||
SPackedData* pPacked = taosArrayGet(pInfo->pBlockLists, current);
|
||||
SSDataBlock* pBlock = pPacked->pDataBlock;
|
||||
if (pBlock->info.id.groupId && pBlock->info.parTbName[0]) {
|
||||
if (pBlock->info.parTbName[0]) {
|
||||
streamStatePutParName(pTaskInfo->streamInfo.pState, pBlock->info.id.groupId, pBlock->info.parTbName);
|
||||
}
|
||||
// TODO move into scan
|
||||
|
|
|
@ -66,7 +66,7 @@ typedef struct SSysTableScanInfo {
|
|||
int64_t numOfBlocks; // extract basic running information.
|
||||
SLoadRemoteDataInfo loadInfo;
|
||||
|
||||
int32_t tbnameSlotId;
|
||||
int32_t tbnameSlotId;
|
||||
} SSysTableScanInfo;
|
||||
|
||||
typedef struct {
|
||||
|
@ -81,10 +81,10 @@ typedef struct MergeIndex {
|
|||
} MergeIndex;
|
||||
|
||||
typedef struct SBlockDistInfo {
|
||||
SSDataBlock* pResBlock;
|
||||
STsdbReader* pHandle;
|
||||
SReadHandle readHandle;
|
||||
uint64_t uid; // table uid
|
||||
SSDataBlock* pResBlock;
|
||||
STsdbReader* pHandle;
|
||||
SReadHandle readHandle;
|
||||
uint64_t uid; // table uid
|
||||
} SBlockDistInfo;
|
||||
|
||||
static int32_t sysChkFilter__Comm(SNode* pNode);
|
||||
|
@ -129,20 +129,20 @@ static char* SYSTABLE_IDX_COLUMN[] = {"table_name", "db_name", "create_time"
|
|||
|
||||
static char* SYSTABLE_SPECIAL_COL[] = {"db_name", "vgroup_id"};
|
||||
|
||||
static int32_t buildSysDbTableInfo(const SSysTableScanInfo* pInfo, int32_t capacity);
|
||||
static SSDataBlock* buildInfoSchemaTableMetaBlock(char* tableName);
|
||||
static void destroySysScanOperator(void* param);
|
||||
static int32_t loadSysTableCallback(void* param, SDataBuf* pMsg, int32_t code);
|
||||
static SSDataBlock* doFilterResult(SSDataBlock* pDataBlock, SFilterInfo* pFilterInfo);
|
||||
static int32_t buildSysDbTableInfo(const SSysTableScanInfo* pInfo, int32_t capacity);
|
||||
static SSDataBlock* buildInfoSchemaTableMetaBlock(char* tableName);
|
||||
static void destroySysScanOperator(void* param);
|
||||
static int32_t loadSysTableCallback(void* param, SDataBuf* pMsg, int32_t code);
|
||||
static SSDataBlock* doFilterResult(SSDataBlock* pDataBlock, SFilterInfo* pFilterInfo);
|
||||
static __optSysFilter optSysGetFilterFunc(int32_t ctype, bool* reverse);
|
||||
|
||||
static int32_t sysTableUserTagsFillOneTableTags(const SSysTableScanInfo* pInfo, SMetaReader* smrSuperTable,
|
||||
SMetaReader* smrChildTable, const char* dbname, const char* tableName,
|
||||
int32_t* pNumOfRows, const SSDataBlock* dataBlock);
|
||||
|
||||
static int32_t sysTableUserColsFillOneTableCols(const SSysTableScanInfo* pInfo, const char* dbname,
|
||||
int32_t* pNumOfRows, const SSDataBlock* dataBlock,
|
||||
char* tName, SSchemaWrapper* schemaRow, char* tableType);
|
||||
static int32_t sysTableUserColsFillOneTableCols(const SSysTableScanInfo* pInfo, const char* dbname, int32_t* pNumOfRows,
|
||||
const SSDataBlock* dataBlock, char* tName, SSchemaWrapper* schemaRow,
|
||||
char* tableType);
|
||||
|
||||
static void relocateAndFilterSysTagsScanResult(SSysTableScanInfo* pInfo, int32_t numOfRows, SSDataBlock* dataBlock,
|
||||
SFilterInfo* pFilterInfo);
|
||||
|
@ -204,11 +204,11 @@ int32_t sysFilte__TableName(void* arg, SNode* pNode, SArray* result) {
|
|||
if (func == NULL) return -1;
|
||||
|
||||
SMetaFltParam param = {.suid = 0,
|
||||
.cid = 0,
|
||||
.type = TSDB_DATA_TYPE_VARCHAR,
|
||||
.val = pVal->datum.p,
|
||||
.reverse = reverse,
|
||||
.filterFunc = func};
|
||||
.cid = 0,
|
||||
.type = TSDB_DATA_TYPE_VARCHAR,
|
||||
.val = pVal->datum.p,
|
||||
.reverse = reverse,
|
||||
.filterFunc = func};
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -223,11 +223,11 @@ int32_t sysFilte__CreateTime(void* arg, SNode* pNode, SArray* result) {
|
|||
if (func == NULL) return -1;
|
||||
|
||||
SMetaFltParam param = {.suid = 0,
|
||||
.cid = 0,
|
||||
.type = TSDB_DATA_TYPE_BIGINT,
|
||||
.val = &pVal->datum.i,
|
||||
.reverse = reverse,
|
||||
.filterFunc = func};
|
||||
.cid = 0,
|
||||
.type = TSDB_DATA_TYPE_BIGINT,
|
||||
.val = &pVal->datum.i,
|
||||
.reverse = reverse,
|
||||
.filterFunc = func};
|
||||
|
||||
int32_t ret = metaFilterCreateTime(pMeta, ¶m, result);
|
||||
return ret;
|
||||
|
@ -355,9 +355,9 @@ static int32_t optSysMergeRslt(SArray* mRslt, SArray* rslt);
|
|||
static SSDataBlock* sysTableScanFromMNode(SOperatorInfo* pOperator, SSysTableScanInfo* pInfo, const char* name,
|
||||
SExecTaskInfo* pTaskInfo);
|
||||
void extractTbnameSlotId(SSysTableScanInfo* pInfo, const SScanPhysiNode* pScanNode);
|
||||
static SSDataBlock* sysTableScanFillTbName(SOperatorInfo* pOperator, const SSysTableScanInfo* pInfo,
|
||||
const char* name, SSDataBlock* pBlock);
|
||||
__optSysFilter optSysGetFilterFunc(int32_t ctype, bool* reverse) {
|
||||
static SSDataBlock* sysTableScanFillTbName(SOperatorInfo* pOperator, const SSysTableScanInfo* pInfo, const char* name,
|
||||
SSDataBlock* pBlock);
|
||||
__optSysFilter optSysGetFilterFunc(int32_t ctype, bool* reverse) {
|
||||
if (ctype == OP_TYPE_LOWER_EQUAL || ctype == OP_TYPE_LOWER_THAN) {
|
||||
*reverse = true;
|
||||
}
|
||||
|
@ -479,13 +479,13 @@ static SSDataBlock* sysTableScanUserCols(SOperatorInfo* pOperator) {
|
|||
}
|
||||
}
|
||||
|
||||
char typeName[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
|
||||
SSchemaWrapper *schemaRow = NULL;
|
||||
if(smrTable.me.type == TSDB_SUPER_TABLE){
|
||||
schemaRow = &smrTable.me.stbEntry.schemaRow;
|
||||
char typeName[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
|
||||
SSchemaWrapper* schemaRow = NULL;
|
||||
if (smrTable.me.type == TSDB_SUPER_TABLE) {
|
||||
schemaRow = &smrTable.me.stbEntry.schemaRow;
|
||||
STR_TO_VARSTR(typeName, "CHILD_TABLE");
|
||||
}else if(smrTable.me.type == TSDB_NORMAL_TABLE){
|
||||
schemaRow = &smrTable.me.ntbEntry.schemaRow;
|
||||
} else if (smrTable.me.type == TSDB_NORMAL_TABLE) {
|
||||
schemaRow = &smrTable.me.ntbEntry.schemaRow;
|
||||
STR_TO_VARSTR(typeName, "NORMAL_TABLE");
|
||||
}
|
||||
|
||||
|
@ -507,50 +507,50 @@ static SSDataBlock* sysTableScanUserCols(SOperatorInfo* pOperator) {
|
|||
pInfo->pCur = metaOpenTbCursor(pInfo->readHandle.meta);
|
||||
}
|
||||
|
||||
SHashObj *stableSchema = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
|
||||
SHashObj* stableSchema = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
|
||||
taosHashSetFreeFp(stableSchema, tDeleteSSchemaWrapperForHash);
|
||||
while ((ret = metaTbCursorNext(pInfo->pCur, TSDB_TABLE_MAX)) == 0) {
|
||||
char typeName[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
|
||||
char tableName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
|
||||
|
||||
SSchemaWrapper *schemaRow = NULL;
|
||||
SSchemaWrapper* schemaRow = NULL;
|
||||
|
||||
if(pInfo->pCur->mr.me.type == TSDB_SUPER_TABLE){
|
||||
if (pInfo->pCur->mr.me.type == TSDB_SUPER_TABLE) {
|
||||
qDebug("sysTableScanUserCols cursor get super table");
|
||||
void *schema = taosHashGet(stableSchema, &pInfo->pCur->mr.me.uid, sizeof(int64_t));
|
||||
if(schema == NULL){
|
||||
SSchemaWrapper *schemaWrapper = tCloneSSchemaWrapper(&pInfo->pCur->mr.me.stbEntry.schemaRow);
|
||||
void* schema = taosHashGet(stableSchema, &pInfo->pCur->mr.me.uid, sizeof(int64_t));
|
||||
if (schema == NULL) {
|
||||
SSchemaWrapper* schemaWrapper = tCloneSSchemaWrapper(&pInfo->pCur->mr.me.stbEntry.schemaRow);
|
||||
taosHashPut(stableSchema, &pInfo->pCur->mr.me.uid, sizeof(int64_t), &schemaWrapper, POINTER_BYTES);
|
||||
}
|
||||
continue;
|
||||
}else if (pInfo->pCur->mr.me.type == TSDB_CHILD_TABLE) {
|
||||
} else if (pInfo->pCur->mr.me.type == TSDB_CHILD_TABLE) {
|
||||
qDebug("sysTableScanUserCols cursor get child table");
|
||||
STR_TO_VARSTR(typeName, "CHILD_TABLE");
|
||||
STR_TO_VARSTR(tableName, pInfo->pCur->mr.me.name);
|
||||
|
||||
int64_t suid = pInfo->pCur->mr.me.ctbEntry.suid;
|
||||
void *schema = taosHashGet(stableSchema, &pInfo->pCur->mr.me.ctbEntry.suid, sizeof(int64_t));
|
||||
if(schema != NULL){
|
||||
schemaRow = *(SSchemaWrapper **)schema;
|
||||
}else{
|
||||
void* schema = taosHashGet(stableSchema, &pInfo->pCur->mr.me.ctbEntry.suid, sizeof(int64_t));
|
||||
if (schema != NULL) {
|
||||
schemaRow = *(SSchemaWrapper**)schema;
|
||||
} else {
|
||||
tDecoderClear(&pInfo->pCur->mr.coder);
|
||||
int code = metaGetTableEntryByUid(&pInfo->pCur->mr, suid);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
// terrno has been set by metaGetTableEntryByName, therefore, return directly
|
||||
qError("sysTableScanUserCols get meta by suid:%"PRId64 " error, code:%d", suid, code);
|
||||
qError("sysTableScanUserCols get meta by suid:%" PRId64 " error, code:%d", suid, code);
|
||||
blockDataDestroy(dataBlock);
|
||||
pInfo->loadInfo.totalRows = 0;
|
||||
taosHashCleanup(stableSchema);
|
||||
return NULL;
|
||||
}
|
||||
schemaRow = &pInfo->pCur->mr.me.stbEntry.schemaRow;
|
||||
schemaRow = &pInfo->pCur->mr.me.stbEntry.schemaRow;
|
||||
}
|
||||
}else if(pInfo->pCur->mr.me.type == TSDB_NORMAL_TABLE){
|
||||
} else if (pInfo->pCur->mr.me.type == TSDB_NORMAL_TABLE) {
|
||||
qDebug("sysTableScanUserCols cursor get normal table");
|
||||
schemaRow = &pInfo->pCur->mr.me.ntbEntry.schemaRow;
|
||||
schemaRow = &pInfo->pCur->mr.me.ntbEntry.schemaRow;
|
||||
STR_TO_VARSTR(typeName, "NORMAL_TABLE");
|
||||
STR_TO_VARSTR(tableName, pInfo->pCur->mr.me.name);
|
||||
}else{
|
||||
} else {
|
||||
qDebug("sysTableScanUserCols cursor get invalid table");
|
||||
continue;
|
||||
}
|
||||
|
@ -665,6 +665,7 @@ static SSDataBlock* sysTableScanUserTags(SOperatorInfo* pOperator) {
|
|||
pInfo->pCur = metaOpenTbCursor(pInfo->readHandle.meta);
|
||||
}
|
||||
|
||||
bool blockFull = false;
|
||||
while ((ret = metaTbCursorNext(pInfo->pCur, TSDB_SUPER_TABLE)) == 0) {
|
||||
if (pInfo->pCur->mr.me.type != TSDB_CHILD_TABLE) {
|
||||
continue;
|
||||
|
@ -686,17 +687,25 @@ static SSDataBlock* sysTableScanUserTags(SOperatorInfo* pOperator) {
|
|||
T_LONG_JMP(pTaskInfo->env, terrno);
|
||||
}
|
||||
|
||||
sysTableUserTagsFillOneTableTags(pInfo, &smrSuperTable, &pInfo->pCur->mr, dbname, tableName, &numOfRows, dataBlock);
|
||||
if ((smrSuperTable.me.stbEntry.schemaTag.nCols + numOfRows) > pOperator->resultInfo.capacity) {
|
||||
metaTbCursorPrev(pInfo->pCur);
|
||||
blockFull = true;
|
||||
} else {
|
||||
sysTableUserTagsFillOneTableTags(pInfo, &smrSuperTable, &pInfo->pCur->mr, dbname, tableName, &numOfRows,
|
||||
dataBlock);
|
||||
}
|
||||
|
||||
metaReaderClear(&smrSuperTable);
|
||||
|
||||
if (numOfRows >= pOperator->resultInfo.capacity) {
|
||||
if (blockFull || numOfRows >= pOperator->resultInfo.capacity) {
|
||||
relocateAndFilterSysTagsScanResult(pInfo, numOfRows, dataBlock, pOperator->exprSupp.pFilterInfo);
|
||||
numOfRows = 0;
|
||||
|
||||
if (pInfo->pRes->info.rows > 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
blockFull = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -902,10 +911,10 @@ static int32_t sysTableUserTagsFillOneTableTags(const SSysTableScanInfo* pInfo,
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t sysTableUserColsFillOneTableCols(const SSysTableScanInfo* pInfo, const char* dbname,
|
||||
int32_t* pNumOfRows, const SSDataBlock* dataBlock, char* tName,
|
||||
SSchemaWrapper* schemaRow, char* tableType) {
|
||||
if(schemaRow == NULL){
|
||||
static int32_t sysTableUserColsFillOneTableCols(const SSysTableScanInfo* pInfo, const char* dbname, int32_t* pNumOfRows,
|
||||
const SSDataBlock* dataBlock, char* tName, SSchemaWrapper* schemaRow,
|
||||
char* tableType) {
|
||||
if (schemaRow == NULL) {
|
||||
qError("sysTableUserColsFillOneTableCols schemaRow is NULL");
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -941,9 +950,8 @@ static int32_t sysTableUserColsFillOneTableCols(const SSysTableScanInfo* pInfo,
|
|||
colTypeLen += sprintf(varDataVal(colTypeStr) + colTypeLen, "(%d)",
|
||||
(int32_t)(schemaRow->pSchema[i].bytes - VARSTR_HEADER_SIZE));
|
||||
} else if (colType == TSDB_DATA_TYPE_NCHAR) {
|
||||
colTypeLen += sprintf(
|
||||
varDataVal(colTypeStr) + colTypeLen, "(%d)",
|
||||
(int32_t)((schemaRow->pSchema[i].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE));
|
||||
colTypeLen += sprintf(varDataVal(colTypeStr) + colTypeLen, "(%d)",
|
||||
(int32_t)((schemaRow->pSchema[i].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE));
|
||||
}
|
||||
varDataSetLen(colTypeStr, colTypeLen);
|
||||
colDataAppend(pColInfoData, numOfRows, (char*)colTypeStr, false);
|
||||
|
@ -1550,9 +1558,9 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) {
|
|||
if (pInfo->showRewrite) {
|
||||
getDBNameFromCondition(pInfo->pCondition, dbName);
|
||||
sprintf(pInfo->req.db, "%d.%s", pInfo->accountId, dbName);
|
||||
}else if(strncasecmp(name, TSDB_INS_TABLE_COLS, TSDB_TABLE_FNAME_LEN) == 0){
|
||||
} else if (strncasecmp(name, TSDB_INS_TABLE_COLS, TSDB_TABLE_FNAME_LEN) == 0) {
|
||||
getDBNameFromCondition(pInfo->pCondition, dbName);
|
||||
if(dbName[0]) sprintf(pInfo->req.db, "%d.%s", pInfo->accountId, dbName);
|
||||
if (dbName[0]) sprintf(pInfo->req.db, "%d.%s", pInfo->accountId, dbName);
|
||||
sysTableIsCondOnOneTable(pInfo->pCondition, pInfo->req.filterTb);
|
||||
}
|
||||
|
||||
|
@ -1573,12 +1581,12 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) {
|
|||
return sysTableScanFillTbName(pOperator, pInfo, name, pBlock);
|
||||
}
|
||||
|
||||
static SSDataBlock* sysTableScanFillTbName(SOperatorInfo* pOperator, const SSysTableScanInfo* pInfo,
|
||||
const char* name, SSDataBlock* pBlock) {
|
||||
static SSDataBlock* sysTableScanFillTbName(SOperatorInfo* pOperator, const SSysTableScanInfo* pInfo, const char* name,
|
||||
SSDataBlock* pBlock) {
|
||||
if (pBlock != NULL) {
|
||||
if (pInfo->tbnameSlotId != -1) {
|
||||
SColumnInfoData* pColumnInfoData = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, pInfo->tbnameSlotId);
|
||||
char varTbName[TSDB_TABLE_FNAME_LEN - 1 + VARSTR_HEADER_SIZE] = {0};
|
||||
char varTbName[TSDB_TABLE_FNAME_LEN - 1 + VARSTR_HEADER_SIZE] = {0};
|
||||
memcpy(varDataVal(varTbName), name, strlen(name));
|
||||
varDataSetLen(varTbName, strlen(name));
|
||||
for (int i = 0; i < pBlock->info.rows; ++i) {
|
||||
|
@ -1669,7 +1677,7 @@ static SSDataBlock* sysTableScanFromMNode(SOperatorInfo* pOperator, SSysTableSca
|
|||
|
||||
SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScanPhysiNode* pScanPhyNode,
|
||||
const char* pUser, SExecTaskInfo* pTaskInfo) {
|
||||
int32_t code = TDB_CODE_SUCCESS;
|
||||
int32_t code = TDB_CODE_SUCCESS;
|
||||
SSysTableScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SSysTableScanInfo));
|
||||
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
|
||||
if (pInfo == NULL || pOperator == NULL) {
|
||||
|
@ -1717,10 +1725,11 @@ SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScan
|
|||
setOperatorInfo(pOperator, "SysTableScanOperator", QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN, false, OP_NOT_OPENED,
|
||||
pInfo, pTaskInfo);
|
||||
pOperator->exprSupp.numOfExprs = taosArrayGetSize(pInfo->pRes->pDataBlock);
|
||||
pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, doSysTableScan, NULL, destroySysScanOperator, optrDefaultBufFn, NULL);
|
||||
pOperator->fpSet =
|
||||
createOperatorFpSet(optrDummyOpenFn, doSysTableScan, NULL, destroySysScanOperator, optrDefaultBufFn, NULL);
|
||||
return pOperator;
|
||||
|
||||
_error:
|
||||
_error:
|
||||
if (pInfo != NULL) {
|
||||
destroySysScanOperator(pInfo);
|
||||
}
|
||||
|
@ -1757,7 +1766,7 @@ void destroySysScanOperator(void* param) {
|
|||
const char* name = tNameGetTableName(&pInfo->name);
|
||||
if (strncasecmp(name, TSDB_INS_TABLE_TABLES, TSDB_TABLE_FNAME_LEN) == 0 ||
|
||||
strncasecmp(name, TSDB_INS_TABLE_TAGS, TSDB_TABLE_FNAME_LEN) == 0 ||
|
||||
strncasecmp(name, TSDB_INS_TABLE_COLS, TSDB_TABLE_FNAME_LEN) == 0|| pInfo->pCur != NULL) {
|
||||
strncasecmp(name, TSDB_INS_TABLE_COLS, TSDB_TABLE_FNAME_LEN) == 0 || pInfo->pCur != NULL) {
|
||||
metaCloseTbCursor(pInfo->pCur);
|
||||
pInfo->pCur = NULL;
|
||||
}
|
||||
|
@ -2165,7 +2174,7 @@ static SSDataBlock* doBlockInfoScan(SOperatorInfo* pOperator) {
|
|||
// make the valgrind happy that all memory buffer has been initialized already.
|
||||
if (slotId != 0) {
|
||||
SColumnInfoData* p1 = taosArrayGet(pBlock->pDataBlock, 0);
|
||||
int64_t v = 0;
|
||||
int64_t v = 0;
|
||||
colDataAppendInt64(p1, 0, &v);
|
||||
}
|
||||
|
||||
|
@ -2175,10 +2184,10 @@ static SSDataBlock* doBlockInfoScan(SOperatorInfo* pOperator) {
|
|||
}
|
||||
|
||||
static void destroyBlockDistScanOperatorInfo(void* param) {
|
||||
SBlockDistInfo* pDistInfo = (SBlockDistInfo*)param;
|
||||
blockDataDestroy(pDistInfo->pResBlock);
|
||||
tsdbReaderClose(pDistInfo->pHandle);
|
||||
taosMemoryFreeClear(param);
|
||||
SBlockDistInfo* pDistInfo = (SBlockDistInfo*)param;
|
||||
blockDataDestroy(pDistInfo->pResBlock);
|
||||
tsdbReaderClose(pDistInfo->pHandle);
|
||||
taosMemoryFreeClear(param);
|
||||
}
|
||||
|
||||
static int32_t initTableblockDistQueryCond(uint64_t uid, SQueryTableDataCond* pCond) {
|
||||
|
@ -2250,8 +2259,8 @@ SOperatorInfo* createDataBlockInfoScanOperator(SReadHandle* readHandle, SBlockDi
|
|||
|
||||
setOperatorInfo(pOperator, "DataBlockDistScanOperator", QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN, false,
|
||||
OP_NOT_OPENED, pInfo, pTaskInfo);
|
||||
pOperator->fpSet =
|
||||
createOperatorFpSet(optrDummyOpenFn, doBlockInfoScan, NULL, destroyBlockDistScanOperatorInfo, optrDefaultBufFn, NULL);
|
||||
pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, doBlockInfoScan, NULL, destroyBlockDistScanOperatorInfo,
|
||||
optrDefaultBufFn, NULL);
|
||||
return pOperator;
|
||||
|
||||
_error:
|
||||
|
|
|
@ -639,7 +639,7 @@ static void doInterpUnclosedTimeWindow(SOperatorInfo* pOperatorInfo, int32_t num
|
|||
if (NULL == pr) {
|
||||
T_LONG_JMP(pTaskInfo->env, terrno);
|
||||
}
|
||||
|
||||
|
||||
ASSERT(pr->offset == p1->offset && pr->pageId == p1->pageId);
|
||||
|
||||
if (pr->closed) {
|
||||
|
@ -842,68 +842,61 @@ static int32_t saveResult(SResultWindowInfo winInfo, SSHashObj* pStUpdated) {
|
|||
return tSimpleHashPut(pStUpdated, &winInfo.sessionWin, sizeof(SSessionKey), &winInfo, sizeof(SResultWindowInfo));
|
||||
}
|
||||
|
||||
static int32_t saveWinResult(int64_t ts, int32_t pageId, int32_t offset, uint64_t groupId, SHashObj* pUpdatedMap) {
|
||||
SResKeyPos* newPos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t));
|
||||
if (newPos == NULL) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
newPos->groupId = groupId;
|
||||
newPos->pos = (SResultRowPosition){.pageId = pageId, .offset = offset};
|
||||
*(int64_t*)newPos->key = ts;
|
||||
static int32_t saveWinResult(int64_t ts, uint64_t groupId, SSHashObj* pUpdatedMap) {
|
||||
SWinKey key = {.ts = ts, .groupId = groupId};
|
||||
if (taosHashPut(pUpdatedMap, &key, sizeof(SWinKey), &newPos, sizeof(void*)) != TSDB_CODE_SUCCESS) {
|
||||
taosMemoryFree(newPos);
|
||||
}
|
||||
tSimpleHashPut(pUpdatedMap, &key, sizeof(SWinKey), NULL, 0);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t saveWinResultInfo(TSKEY ts, uint64_t groupId, SHashObj* pUpdatedMap) {
|
||||
return saveWinResult(ts, -1, -1, groupId, pUpdatedMap);
|
||||
static int32_t saveWinResultInfo(TSKEY ts, uint64_t groupId, SSHashObj* pUpdatedMap) {
|
||||
return saveWinResult(ts, groupId, pUpdatedMap);
|
||||
}
|
||||
|
||||
static void removeResults(SArray* pWins, SHashObj* pUpdatedMap) {
|
||||
static void removeResults(SArray* pWins, SSHashObj* pUpdatedMap) {
|
||||
int32_t size = taosArrayGetSize(pWins);
|
||||
for (int32_t i = 0; i < size; i++) {
|
||||
SWinKey* pW = taosArrayGet(pWins, i);
|
||||
void* tmp = taosHashGet(pUpdatedMap, pW, sizeof(SWinKey));
|
||||
void* tmp = tSimpleHashGet(pUpdatedMap, pW, sizeof(SWinKey));
|
||||
if (tmp) {
|
||||
void* value = *(void**)tmp;
|
||||
taosMemoryFree(value);
|
||||
taosHashRemove(pUpdatedMap, pW, sizeof(SWinKey));
|
||||
tSimpleHashRemove(pUpdatedMap, pW, sizeof(SWinKey));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int32_t compareWinRes(void* pKey, void* data, int32_t index) {
|
||||
SArray* res = (SArray*)data;
|
||||
SWinKey* pDataPos = taosArrayGet(res, index);
|
||||
SResKeyPos* pRKey = (SResKeyPos*)pKey;
|
||||
if (pRKey->groupId > pDataPos->groupId) {
|
||||
int32_t compareWinKey(void* pKey, void* data, int32_t index) {
|
||||
SArray* res = (SArray*)data;
|
||||
SWinKey* pDataPos = taosArrayGet(res, index);
|
||||
SWinKey* pWKey = (SWinKey*)pKey;
|
||||
|
||||
if (pWKey->groupId > pDataPos->groupId) {
|
||||
return 1;
|
||||
} else if (pRKey->groupId < pDataPos->groupId) {
|
||||
} else if (pWKey->groupId < pDataPos->groupId) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (*(int64_t*)pRKey->key > pDataPos->ts) {
|
||||
if (pWKey->ts > pDataPos->ts) {
|
||||
return 1;
|
||||
} else if (*(int64_t*)pRKey->key < pDataPos->ts) {
|
||||
} else if (pWKey->ts < pDataPos->ts) {
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void removeDeleteResults(SHashObj* pUpdatedMap, SArray* pDelWins) {
|
||||
static void removeDeleteResults(SSHashObj* pUpdatedMap, SArray* pDelWins) {
|
||||
taosArraySort(pDelWins, winKeyCmprImpl);
|
||||
taosArrayRemoveDuplicate(pDelWins, winKeyCmprImpl, NULL);
|
||||
int32_t delSize = taosArrayGetSize(pDelWins);
|
||||
if (taosHashGetSize(pUpdatedMap) == 0 || delSize == 0) {
|
||||
if (tSimpleHashGetSize(pUpdatedMap) == 0 || delSize == 0) {
|
||||
return;
|
||||
}
|
||||
void* pIte = NULL;
|
||||
while ((pIte = taosHashIterate(pUpdatedMap, pIte)) != NULL) {
|
||||
SResKeyPos* pResKey = *(SResKeyPos**)pIte;
|
||||
int32_t index = binarySearchCom(pDelWins, delSize, pResKey, TSDB_ORDER_DESC, compareWinRes);
|
||||
if (index >= 0 && 0 == compareWinRes(pResKey, pDelWins, index)) {
|
||||
void* pIte = NULL;
|
||||
int32_t iter = 0;
|
||||
while ((pIte = tSimpleHashIterate(pUpdatedMap, pIte, &iter)) != NULL) {
|
||||
SWinKey* pResKey = tSimpleHashGetKey(pIte, NULL);
|
||||
int32_t index = binarySearchCom(pDelWins, delSize, pResKey, TSDB_ORDER_DESC, compareWinKey);
|
||||
if (index >= 0 && 0 == compareWinKey(pResKey, pDelWins, index)) {
|
||||
taosArrayRemove(pDelWins, index);
|
||||
delSize = taosArrayGetSize(pDelWins);
|
||||
}
|
||||
|
@ -1318,11 +1311,11 @@ static void setInverFunction(SqlFunctionCtx* pCtx, int32_t num, EStreamType type
|
|||
}
|
||||
|
||||
static void doClearWindowImpl(SResultRowPosition* p1, SDiskbasedBuf* pResultBuf, SExprSupp* pSup, int32_t numOfOutput) {
|
||||
SResultRow* pResult = getResultRowByPos(pResultBuf, p1, false);
|
||||
SResultRow* pResult = getResultRowByPos(pResultBuf, p1, false);
|
||||
if (NULL == pResult) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
SqlFunctionCtx* pCtx = pSup->pCtx;
|
||||
for (int32_t i = 0; i < numOfOutput; ++i) {
|
||||
pCtx[i].resultInfo = getResultEntryInfo(pResult, i, pSup->rowEntryInfoOffset);
|
||||
|
@ -1352,7 +1345,7 @@ static bool doDeleteWindow(SOperatorInfo* pOperator, TSKEY ts, uint64_t groupId)
|
|||
}
|
||||
|
||||
static void doDeleteWindows(SOperatorInfo* pOperator, SInterval* pInterval, SSDataBlock* pBlock, SArray* pUpWins,
|
||||
SHashObj* pUpdatedMap) {
|
||||
SSHashObj* pUpdatedMap) {
|
||||
SStreamIntervalOperatorInfo* pInfo = pOperator->info;
|
||||
SColumnInfoData* pStartTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
|
||||
TSKEY* startTsCols = (TSKEY*)pStartTsCol->pData;
|
||||
|
@ -1388,28 +1381,21 @@ static void doDeleteWindows(SOperatorInfo* pOperator, SInterval* pInterval, SSDa
|
|||
taosArrayPush(pUpWins, &winRes);
|
||||
}
|
||||
if (pUpdatedMap) {
|
||||
void* tmp = taosHashGet(pUpdatedMap, &winRes, sizeof(SWinKey));
|
||||
if (tmp) {
|
||||
void* value = *(void**)tmp;
|
||||
taosMemoryFree(value);
|
||||
taosHashRemove(pUpdatedMap, &winRes, sizeof(SWinKey));
|
||||
}
|
||||
tSimpleHashRemove(pUpdatedMap, &winRes, sizeof(SWinKey));
|
||||
}
|
||||
getNextTimeWindow(pInterval, pInterval->precision, TSDB_ORDER_ASC, &win);
|
||||
} while (win.ekey <= endTsCols[i]);
|
||||
}
|
||||
}
|
||||
|
||||
static int32_t getAllIntervalWindow(SSHashObj* pHashMap, SHashObj* resWins) {
|
||||
static int32_t getAllIntervalWindow(SSHashObj* pHashMap, SSHashObj* resWins) {
|
||||
void* pIte = NULL;
|
||||
size_t keyLen = 0;
|
||||
int32_t iter = 0;
|
||||
while ((pIte = tSimpleHashIterate(pHashMap, pIte, &iter)) != NULL) {
|
||||
void* key = tSimpleHashGetKey(pIte, &keyLen);
|
||||
uint64_t groupId = *(uint64_t*)key;
|
||||
TSKEY ts = *(int64_t*)((char*)key + sizeof(uint64_t));
|
||||
SResultRowPosition* pPos = (SResultRowPosition*)pIte;
|
||||
int32_t code = saveWinResult(ts, pPos->pageId, pPos->offset, groupId, resWins);
|
||||
SWinKey* pKey = tSimpleHashGetKey(pIte, NULL);
|
||||
uint64_t groupId = pKey->groupId;
|
||||
TSKEY ts = pKey->ts;
|
||||
int32_t code = saveWinResult(ts, groupId, resWins);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
@ -1417,36 +1403,16 @@ static int32_t getAllIntervalWindow(SSHashObj* pHashMap, SHashObj* resWins) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t compareWinKey(void* pKey, void* data, int32_t index) {
|
||||
SArray* res = (SArray*)data;
|
||||
SWinKey* pDataPos = taosArrayGet(res, index);
|
||||
SWinKey* pWKey = (SWinKey*)pKey;
|
||||
|
||||
if (pWKey->groupId > pDataPos->groupId) {
|
||||
return 1;
|
||||
} else if (pWKey->groupId < pDataPos->groupId) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (pWKey->ts > pDataPos->ts) {
|
||||
return 1;
|
||||
} else if (pWKey->ts < pDataPos->ts) {
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t closeStreamIntervalWindow(SSHashObj* pHashMap, STimeWindowAggSupp* pTwSup, SInterval* pInterval,
|
||||
SHashObj* pPullDataMap, SHashObj* closeWins, SArray* pDelWins,
|
||||
SHashObj* pPullDataMap, SSHashObj* closeWins, SArray* pDelWins,
|
||||
SOperatorInfo* pOperator) {
|
||||
qDebug("===stream===close interval window");
|
||||
void* pIte = NULL;
|
||||
size_t keyLen = 0;
|
||||
int32_t iter = 0;
|
||||
SStreamIntervalOperatorInfo* pInfo = pOperator->info;
|
||||
int32_t delSize = taosArrayGetSize(pDelWins);
|
||||
while ((pIte = tSimpleHashIterate(pHashMap, pIte, &iter)) != NULL) {
|
||||
void* key = tSimpleHashGetKey(pIte, &keyLen);
|
||||
void* key = tSimpleHashGetKey(pIte, NULL);
|
||||
SWinKey* pWinKey = (SWinKey*)key;
|
||||
if (delSize > 0) {
|
||||
int32_t index = binarySearchCom(pDelWins, delSize, pWinKey, TSDB_ORDER_DESC, compareWinKey);
|
||||
|
@ -1648,7 +1614,7 @@ void destroyStreamFinalIntervalOperatorInfo(void* param) {
|
|||
}
|
||||
nodesDestroyNode((SNode*)pInfo->pPhyNode);
|
||||
colDataDestroy(&pInfo->twAggSup.timeWindowData);
|
||||
cleanupGroupResInfo(&pInfo->groupResInfo);
|
||||
pInfo->groupResInfo.pRows = taosArrayDestroy(pInfo->groupResInfo.pRows);
|
||||
cleanupExprSupp(&pInfo->scalarSupp);
|
||||
|
||||
taosMemoryFreeClear(param);
|
||||
|
@ -2157,7 +2123,7 @@ bool hasIntervalWindow(SStreamState* pState, SWinKey* pKey) {
|
|||
return TSDB_CODE_SUCCESS == streamStateGet(pState, pKey, NULL, 0);
|
||||
}
|
||||
|
||||
static void rebuildIntervalWindow(SOperatorInfo* pOperator, SArray* pWinArray, SHashObj* pUpdatedMap) {
|
||||
static void rebuildIntervalWindow(SOperatorInfo* pOperator, SArray* pWinArray, SSHashObj* pUpdatedMap) {
|
||||
SStreamIntervalOperatorInfo* pInfo = pOperator->info;
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
int32_t size = taosArrayGetSize(pWinArray);
|
||||
|
@ -2343,7 +2309,8 @@ static void clearFunctionContext(SExprSupp* pSup) {
|
|||
}
|
||||
}
|
||||
|
||||
void doBuildResult(SOperatorInfo* pOperator, SStreamState* pState, SSDataBlock* pBlock, SGroupResInfo* pGroupResInfo) {
|
||||
void doBuildStreamIntervalResult(SOperatorInfo* pOperator, SStreamState* pState, SSDataBlock* pBlock,
|
||||
SGroupResInfo* pGroupResInfo) {
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
// set output datablock version
|
||||
pBlock->info.version = pTaskInfo->version;
|
||||
|
@ -2370,7 +2337,7 @@ static int32_t getNextQualifiedFinalWindow(SInterval* pInterval, STimeWindow* pN
|
|||
}
|
||||
|
||||
static void doStreamIntervalAggImpl(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBlock, uint64_t groupId,
|
||||
SHashObj* pUpdatedMap) {
|
||||
SSHashObj* pUpdatedMap) {
|
||||
SStreamIntervalOperatorInfo* pInfo = (SStreamIntervalOperatorInfo*)pOperatorInfo->info;
|
||||
|
||||
SResultRowInfo* pResultRowInfo = &(pInfo->binfo.resultRowInfo);
|
||||
|
@ -2516,7 +2483,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
|||
return pInfo->pDelRes;
|
||||
}
|
||||
|
||||
doBuildResult(pOperator, pInfo->pState, pInfo->binfo.pRes, &pInfo->groupResInfo);
|
||||
doBuildStreamIntervalResult(pOperator, pInfo->pState, pInfo->binfo.pRes, &pInfo->groupResInfo);
|
||||
if (pInfo->binfo.pRes->info.rows != 0) {
|
||||
printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval final" : "interval semi");
|
||||
return pInfo->binfo.pRes;
|
||||
|
@ -2543,7 +2510,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
|||
return pInfo->pDelRes;
|
||||
}
|
||||
|
||||
doBuildResult(pOperator, pInfo->pState, pInfo->binfo.pRes, &pInfo->groupResInfo);
|
||||
doBuildStreamIntervalResult(pOperator, pInfo->pState, pInfo->binfo.pRes, &pInfo->groupResInfo);
|
||||
if (pInfo->binfo.pRes->info.rows != 0) {
|
||||
printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval final" : "interval semi");
|
||||
return pInfo->binfo.pRes;
|
||||
|
@ -2552,11 +2519,11 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
|||
}
|
||||
|
||||
if (!pInfo->pUpdated) {
|
||||
pInfo->pUpdated = taosArrayInit(4, POINTER_BYTES);
|
||||
pInfo->pUpdated = taosArrayInit(4, sizeof(SWinKey));
|
||||
}
|
||||
if (!pInfo->pUpdatedMap) {
|
||||
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
|
||||
pInfo->pUpdatedMap = taosHashInit(1024, hashFn, false, HASH_NO_LOCK);
|
||||
pInfo->pUpdatedMap = tSimpleHashInit(1024, hashFn);
|
||||
}
|
||||
|
||||
while (1) {
|
||||
|
@ -2649,13 +2616,14 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
|||
}
|
||||
pInfo->binfo.pRes->info.watermark = pInfo->twAggSup.maxTs;
|
||||
|
||||
void* pIte = NULL;
|
||||
while ((pIte = taosHashIterate(pInfo->pUpdatedMap, pIte)) != NULL) {
|
||||
void* pIte = NULL;
|
||||
int32_t iter = 0;
|
||||
while ((pIte = tSimpleHashIterate(pInfo->pUpdatedMap, pIte, &iter)) != NULL) {
|
||||
taosArrayPush(pInfo->pUpdated, pIte);
|
||||
}
|
||||
taosHashCleanup(pInfo->pUpdatedMap);
|
||||
tSimpleHashCleanup(pInfo->pUpdatedMap);
|
||||
pInfo->pUpdatedMap = NULL;
|
||||
taosArraySort(pInfo->pUpdated, resultrowComparAsc);
|
||||
taosArraySort(pInfo->pUpdated, winKeyCmprImpl);
|
||||
|
||||
initMultiResInfoFromArrayList(&pInfo->groupResInfo, pInfo->pUpdated);
|
||||
pInfo->pUpdated = NULL;
|
||||
|
@ -2675,7 +2643,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
|||
return pInfo->pDelRes;
|
||||
}
|
||||
|
||||
doBuildResult(pOperator, pInfo->pState, pInfo->binfo.pRes, &pInfo->groupResInfo);
|
||||
doBuildStreamIntervalResult(pOperator, pInfo->pState, pInfo->binfo.pRes, &pInfo->groupResInfo);
|
||||
if (pInfo->binfo.pRes->info.rows != 0) {
|
||||
printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval final" : "interval semi");
|
||||
return pInfo->binfo.pRes;
|
||||
|
@ -3239,10 +3207,9 @@ static inline int32_t sessionKeyCompareAsc(const void* pKey1, const void* pKey2)
|
|||
|
||||
static int32_t copyUpdateResult(SSHashObj* pStUpdated, SArray* pUpdated) {
|
||||
void* pIte = NULL;
|
||||
size_t keyLen = 0;
|
||||
int32_t iter = 0;
|
||||
while ((pIte = tSimpleHashIterate(pStUpdated, pIte, &iter)) != NULL) {
|
||||
void* key = tSimpleHashGetKey(pIte, &keyLen);
|
||||
void* key = tSimpleHashGetKey(pIte, NULL);
|
||||
taosArrayPush(pUpdated, key);
|
||||
}
|
||||
taosArraySort(pUpdated, sessionKeyCompareAsc);
|
||||
|
@ -3256,13 +3223,12 @@ void doBuildDeleteDataBlock(SOperatorInfo* pOp, SSHashObj* pStDeleted, SSDataBlo
|
|||
return;
|
||||
}
|
||||
blockDataEnsureCapacity(pBlock, size);
|
||||
size_t keyLen = 0;
|
||||
int32_t iter = 0;
|
||||
while (((*Ite) = tSimpleHashIterate(pStDeleted, *Ite, &iter)) != NULL) {
|
||||
if (pBlock->info.rows + 1 > pBlock->info.capacity) {
|
||||
break;
|
||||
}
|
||||
SSessionKey* res = tSimpleHashGetKey(*Ite, &keyLen);
|
||||
SSessionKey* res = tSimpleHashGetKey(*Ite, NULL);
|
||||
SColumnInfoData* pStartTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
|
||||
colDataAppend(pStartTsCol, pBlock->info.rows, (const char*)&res->win.skey, false);
|
||||
SColumnInfoData* pEndTsCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX);
|
||||
|
@ -3351,7 +3317,6 @@ static void rebuildSessionWindow(SOperatorInfo* pOperator, SArray* pWinArray, SS
|
|||
|
||||
int32_t closeSessionWindow(SSHashObj* pHashMap, STimeWindowAggSupp* pTwSup, SSHashObj* pClosed) {
|
||||
void* pIte = NULL;
|
||||
size_t keyLen = 0;
|
||||
int32_t iter = 0;
|
||||
while ((pIte = tSimpleHashIterate(pHashMap, pIte, &iter)) != NULL) {
|
||||
SResultWindowInfo* pWinInfo = pIte;
|
||||
|
@ -3362,7 +3327,7 @@ int32_t closeSessionWindow(SSHashObj* pHashMap, STimeWindowAggSupp* pTwSup, SSHa
|
|||
return code;
|
||||
}
|
||||
}
|
||||
SSessionKey* pKey = tSimpleHashGetKey(pIte, &keyLen);
|
||||
SSessionKey* pKey = tSimpleHashGetKey(pIte, NULL);
|
||||
tSimpleHashIterateRemove(pHashMap, pKey, sizeof(SSessionKey), &pIte, &iter);
|
||||
}
|
||||
}
|
||||
|
@ -3451,7 +3416,7 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) {
|
|||
pInfo->pUpdated = taosArrayInit(16, sizeof(SSessionKey));
|
||||
}
|
||||
if (!pInfo->pStUpdated) {
|
||||
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
|
||||
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
|
||||
pInfo->pStUpdated = tSimpleHashInit(64, hashFn);
|
||||
}
|
||||
while (1) {
|
||||
|
@ -3675,7 +3640,7 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) {
|
|||
pInfo->pUpdated = taosArrayInit(16, sizeof(SSessionKey));
|
||||
}
|
||||
if (!pInfo->pStUpdated) {
|
||||
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
|
||||
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
|
||||
pInfo->pStUpdated = tSimpleHashInit(64, hashFn);
|
||||
}
|
||||
while (1) {
|
||||
|
@ -4006,7 +3971,7 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) {
|
|||
pInfo->pUpdated = taosArrayInit(16, sizeof(SSessionKey));
|
||||
}
|
||||
if (!pInfo->pSeUpdated) {
|
||||
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
|
||||
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
|
||||
pInfo->pSeUpdated = tSimpleHashInit(64, hashFn);
|
||||
}
|
||||
while (1) {
|
||||
|
@ -4761,7 +4726,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
|
|||
return pInfo->pDelRes;
|
||||
}
|
||||
|
||||
doBuildResult(pOperator, pInfo->pState, pInfo->binfo.pRes, &pInfo->groupResInfo);
|
||||
doBuildStreamIntervalResult(pOperator, pInfo->pState, pInfo->binfo.pRes, &pInfo->groupResInfo);
|
||||
if (pInfo->binfo.pRes->info.rows > 0) {
|
||||
printDataBlock(pInfo->binfo.pRes, "single interval");
|
||||
return pInfo->binfo.pRes;
|
||||
|
@ -4776,14 +4741,13 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
|
|||
SOperatorInfo* downstream = pOperator->pDownstream[0];
|
||||
|
||||
if (!pInfo->pUpdated) {
|
||||
pInfo->pUpdated = taosArrayInit(4, POINTER_BYTES);
|
||||
pInfo->pUpdated = taosArrayInit(4, sizeof(SWinKey));
|
||||
}
|
||||
if (!pInfo->pUpdatedMap) {
|
||||
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
|
||||
pInfo->pUpdatedMap = taosHashInit(1024, hashFn, false, HASH_NO_LOCK);
|
||||
pInfo->pUpdatedMap = tSimpleHashInit(1024, hashFn);
|
||||
}
|
||||
|
||||
|
||||
while (1) {
|
||||
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
|
||||
if (pBlock == NULL) {
|
||||
|
@ -4832,19 +4796,21 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
|
|||
}
|
||||
pOperator->status = OP_RES_TO_RETURN;
|
||||
removeDeleteResults(pInfo->pUpdatedMap, pInfo->pDelWins);
|
||||
closeStreamIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, &pInfo->interval, NULL, pInfo->pUpdatedMap,
|
||||
pInfo->pDelWins, pOperator);
|
||||
closeStreamIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, &pInfo->interval, NULL,
|
||||
pInfo->pUpdatedMap, pInfo->pDelWins, pOperator);
|
||||
|
||||
void* pIte = NULL;
|
||||
while ((pIte = taosHashIterate(pInfo->pUpdatedMap, pIte)) != NULL) {
|
||||
taosArrayPush(pInfo->pUpdated, pIte);
|
||||
void* pIte = NULL;
|
||||
int32_t iter = 0;
|
||||
while ((pIte = tSimpleHashIterate(pInfo->pUpdatedMap, pIte, &iter)) != NULL) {
|
||||
SWinKey* pKey = tSimpleHashGetKey(pIte, NULL);
|
||||
taosArrayPush(pInfo->pUpdated, pKey);
|
||||
}
|
||||
taosArraySort(pInfo->pUpdated, resultrowComparAsc);
|
||||
taosArraySort(pInfo->pUpdated, winKeyCmprImpl);
|
||||
|
||||
initMultiResInfoFromArrayList(&pInfo->groupResInfo, pInfo->pUpdated);
|
||||
pInfo->pUpdated = NULL;
|
||||
blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity);
|
||||
taosHashCleanup(pInfo->pUpdatedMap);
|
||||
tSimpleHashCleanup(pInfo->pUpdatedMap);
|
||||
pInfo->pUpdatedMap = NULL;
|
||||
|
||||
doBuildDeleteResult(pInfo, pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes);
|
||||
|
@ -4853,7 +4819,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
|
|||
return pInfo->pDelRes;
|
||||
}
|
||||
|
||||
doBuildResult(pOperator, pInfo->pState, pInfo->binfo.pRes, &pInfo->groupResInfo);
|
||||
doBuildStreamIntervalResult(pOperator, pInfo->pState, pInfo->binfo.pRes, &pInfo->groupResInfo);
|
||||
if (pInfo->binfo.pRes->info.rows > 0) {
|
||||
printDataBlock(pInfo->binfo.pRes, "single interval");
|
||||
return pInfo->binfo.pRes;
|
||||
|
|
|
@ -3053,14 +3053,12 @@ static int32_t doSaveTupleData(SSerializeDataHandle* pHandle, const void* pBuf,
|
|||
if (pHandle->currentPage == -1) {
|
||||
pPage = getNewBufPage(pHandle->pBuf, &pHandle->currentPage);
|
||||
if (pPage == NULL) {
|
||||
terrno = TSDB_CODE_NO_AVAIL_DISK;
|
||||
return terrno;
|
||||
}
|
||||
pPage->num = sizeof(SFilePage);
|
||||
} else {
|
||||
pPage = getBufPage(pHandle->pBuf, pHandle->currentPage);
|
||||
if (pPage == NULL) {
|
||||
terrno = TSDB_CODE_NO_AVAIL_DISK;
|
||||
return terrno;
|
||||
}
|
||||
if (pPage->num + length > getBufPageSize(pHandle->pBuf)) {
|
||||
|
@ -3068,7 +3066,6 @@ static int32_t doSaveTupleData(SSerializeDataHandle* pHandle, const void* pBuf,
|
|||
releaseBufPage(pHandle->pBuf, pPage);
|
||||
pPage = getNewBufPage(pHandle->pBuf, &pHandle->currentPage);
|
||||
if (pPage == NULL) {
|
||||
terrno = TSDB_CODE_NO_AVAIL_DISK;
|
||||
return terrno;
|
||||
}
|
||||
pPage->num = sizeof(SFilePage);
|
||||
|
@ -3115,7 +3112,6 @@ static int32_t doUpdateTupleData(SSerializeDataHandle* pHandle, const void* pBuf
|
|||
if (pHandle->pBuf != NULL) {
|
||||
SFilePage* pPage = getBufPage(pHandle->pBuf, pPos->pageId);
|
||||
if (pPage == NULL) {
|
||||
terrno = TSDB_CODE_NO_AVAIL_DISK;
|
||||
return terrno;
|
||||
}
|
||||
memcpy(pPage->data + pPos->offset, pBuf, length);
|
||||
|
|
|
@ -50,8 +50,8 @@ static SFilePage *loadDataFromFilePage(tMemBucket *pMemBucket, int32_t slotIdx)
|
|||
if (pg == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
memcpy(buffer->data + offset, pg->data, (size_t)(pg->num * pMemBucket->bytes));
|
||||
|
||||
memcpy(buffer->data + offset, pg->data, (size_t)(pg->num * pMemBucket->bytes));
|
||||
offset += (int32_t)(pg->num * pMemBucket->bytes);
|
||||
}
|
||||
|
||||
|
@ -116,7 +116,7 @@ int32_t findOnlyResult(tMemBucket *pMemBucket, double *result) {
|
|||
int32_t *pageId = taosArrayGet(list, 0);
|
||||
SFilePage *pPage = getBufPage(pMemBucket->pBuffer, *pageId);
|
||||
if (pPage == NULL) {
|
||||
return TSDB_CODE_NO_AVAIL_DISK;
|
||||
return terrno;
|
||||
}
|
||||
ASSERT(pPage->num == 1);
|
||||
|
||||
|
@ -283,7 +283,7 @@ tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType, double minval,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
int32_t ret = createDiskbasedBuf(&pBucket->pBuffer, pBucket->bufPageSize, pBucket->bufPageSize * 512, "1", tsTempDir);
|
||||
int32_t ret = createDiskbasedBuf(&pBucket->pBuffer, pBucket->bufPageSize, pBucket->bufPageSize * 1024, "1", tsTempDir);
|
||||
if (ret != 0) {
|
||||
tMemBucketDestroy(pBucket);
|
||||
return NULL;
|
||||
|
@ -395,7 +395,7 @@ int32_t tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size) {
|
|||
|
||||
pSlot->info.data = getNewBufPage(pBucket->pBuffer, &pageId);
|
||||
if (pSlot->info.data == NULL) {
|
||||
return TSDB_CODE_NO_AVAIL_DISK;
|
||||
return terrno;
|
||||
}
|
||||
pSlot->info.pageId = pageId;
|
||||
taosArrayPush(pPageIdList, &pageId);
|
||||
|
@ -489,8 +489,9 @@ int32_t getPercentileImpl(tMemBucket *pMemBucket, int32_t count, double fraction
|
|||
// data in buffer and file are merged together to be processed.
|
||||
SFilePage *buffer = loadDataFromFilePage(pMemBucket, i);
|
||||
if (buffer == NULL) {
|
||||
return TSDB_CODE_NO_AVAIL_DISK;
|
||||
return terrno;
|
||||
}
|
||||
|
||||
int32_t currentIdx = count - num;
|
||||
|
||||
char *thisVal = buffer->data + pMemBucket->bytes * currentIdx;
|
||||
|
@ -536,7 +537,7 @@ int32_t getPercentileImpl(tMemBucket *pMemBucket, int32_t count, double fraction
|
|||
int32_t *pageId = taosArrayGet(list, f);
|
||||
SFilePage *pg = getBufPage(pMemBucket->pBuffer, *pageId);
|
||||
if (pg == NULL) {
|
||||
return TSDB_CODE_NO_AVAIL_DISK;
|
||||
return terrno;
|
||||
}
|
||||
|
||||
int32_t code = tMemBucketPut(pMemBucket, pg->data, (int32_t)pg->num);
|
||||
|
|
|
@ -436,7 +436,7 @@ SNode* nodesMakeNode(ENodeType type) {
|
|||
return makeNode(type, sizeof(SShowCreateDatabaseStmt));
|
||||
case QUERY_NODE_SHOW_DB_ALIVE_STMT:
|
||||
case QUERY_NODE_SHOW_CLUSTER_ALIVE_STMT:
|
||||
return makeNode(type, sizeof(SShowAliveStmt));
|
||||
return makeNode(type, sizeof(SShowAliveStmt));
|
||||
case QUERY_NODE_SHOW_CREATE_TABLE_STMT:
|
||||
case QUERY_NODE_SHOW_CREATE_STABLE_STMT:
|
||||
return makeNode(type, sizeof(SShowCreateTableStmt));
|
||||
|
@ -964,7 +964,7 @@ void nodesDestroyNode(SNode* pNode) {
|
|||
case QUERY_NODE_SHOW_LICENCES_STMT:
|
||||
case QUERY_NODE_SHOW_VGROUPS_STMT:
|
||||
case QUERY_NODE_SHOW_DB_ALIVE_STMT:
|
||||
case QUERY_NODE_SHOW_CLUSTER_ALIVE_STMT:
|
||||
case QUERY_NODE_SHOW_CLUSTER_ALIVE_STMT:
|
||||
case QUERY_NODE_SHOW_TOPICS_STMT:
|
||||
case QUERY_NODE_SHOW_CONSUMERS_STMT:
|
||||
case QUERY_NODE_SHOW_CONNECTIONS_STMT:
|
||||
|
@ -1103,6 +1103,8 @@ void nodesDestroyNode(SNode* pNode) {
|
|||
nodesDestroyNode(pLogicNode->pTspk);
|
||||
nodesDestroyNode(pLogicNode->pTsEnd);
|
||||
nodesDestroyNode(pLogicNode->pStateExpr);
|
||||
nodesDestroyNode(pLogicNode->pStartCond);
|
||||
nodesDestroyNode(pLogicNode->pEndCond);
|
||||
break;
|
||||
}
|
||||
case QUERY_NODE_LOGIC_PLAN_FILL: {
|
||||
|
|
|
@ -517,6 +517,7 @@ cmd ::= RESET QUERY CACHE.
|
|||
|
||||
/************************************************ explain *************************************************************/
|
||||
cmd ::= EXPLAIN analyze_opt(A) explain_options(B) query_or_subquery(C). { pCxt->pRootNode = createExplainStmt(pCxt, A, B, C); }
|
||||
cmd ::= EXPLAIN analyze_opt(A) explain_options(B) insert_query(C). { pCxt->pRootNode = createExplainStmt(pCxt, A, B, C); }
|
||||
|
||||
%type analyze_opt { bool }
|
||||
%destructor analyze_opt { }
|
||||
|
@ -599,9 +600,11 @@ cmd ::= DELETE FROM full_table_name(A) where_clause_opt(B).
|
|||
cmd ::= query_or_subquery(A). { pCxt->pRootNode = A; }
|
||||
|
||||
/************************************************ insert **************************************************************/
|
||||
cmd ::= INSERT INTO full_table_name(A)
|
||||
NK_LP col_name_list(B) NK_RP query_or_subquery(C). { pCxt->pRootNode = createInsertStmt(pCxt, A, B, C); }
|
||||
cmd ::= INSERT INTO full_table_name(A) query_or_subquery(B). { pCxt->pRootNode = createInsertStmt(pCxt, A, NULL, B); }
|
||||
cmd ::= insert_query(A). { pCxt->pRootNode = A; }
|
||||
|
||||
insert_query(A) ::= INSERT INTO full_table_name(D)
|
||||
NK_LP col_name_list(B) NK_RP query_or_subquery(C). { A = createInsertStmt(pCxt, D, B, C); }
|
||||
insert_query(A) ::= INSERT INTO full_table_name(C) query_or_subquery(B). { A = createInsertStmt(pCxt, C, NULL, B); }
|
||||
|
||||
/************************************************ literal *************************************************************/
|
||||
literal(A) ::= NK_INTEGER(B). { A = createRawExprNode(pCxt, &B, createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &B)); }
|
||||
|
|
|
@ -290,7 +290,8 @@ int32_t insGetTableDataCxt(SHashObj* pHash, void* id, int32_t idLen, STableMeta*
|
|||
}
|
||||
int32_t code = createTableDataCxt(pTableMeta, pCreateTbReq, pTableCxt, colMode);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = taosHashPut(pHash, id, idLen, pTableCxt, POINTER_BYTES);
|
||||
void* pData = *pTableCxt; // deal scan coverity
|
||||
code = taosHashPut(pHash, id, idLen, &pData, POINTER_BYTES);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
@ -671,7 +672,6 @@ int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreate
|
|||
goto end;
|
||||
}
|
||||
|
||||
colLength[c] = htonl(colLength[c]);
|
||||
int8_t* offset = pStart;
|
||||
if (IS_VAR_DATA_TYPE(pColSchema->type)) {
|
||||
pStart += numOfRows * sizeof(int32_t);
|
||||
|
@ -687,4 +687,4 @@ int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreate
|
|||
|
||||
end:
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3281,9 +3281,6 @@ static int32_t translateInterp(STranslateContext* pCxt, SSelectStmt* pSelect) {
|
|||
}
|
||||
|
||||
static int32_t translatePartitionBy(STranslateContext* pCxt, SSelectStmt* pSelect) {
|
||||
if (NULL == pSelect->pPartitionByList) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
pCxt->currClause = SQL_CLAUSE_PARTITION_BY;
|
||||
int32_t code = translateExprList(pCxt, pSelect->pPartitionByList);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
|
@ -3517,7 +3514,7 @@ static int32_t translateSetOperProject(STranslateContext* pCxt, SSetOperator* pS
|
|||
if (comp > 0) {
|
||||
SNode* pRightFunc = NULL;
|
||||
int32_t code = createCastFunc(pCxt, pRight, pLeftExpr->resType, &pRightFunc);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
if (TSDB_CODE_SUCCESS != code || NULL == pRightFunc) { // deal scan coverity
|
||||
return code;
|
||||
}
|
||||
REPLACE_LIST2_NODE(pRightFunc);
|
||||
|
@ -3525,7 +3522,7 @@ static int32_t translateSetOperProject(STranslateContext* pCxt, SSetOperator* pS
|
|||
} else if (comp < 0) {
|
||||
SNode* pLeftFunc = NULL;
|
||||
int32_t code = createCastFunc(pCxt, pLeft, pRightExpr->resType, &pLeftFunc);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
if (TSDB_CODE_SUCCESS != code || NULL == pLeftFunc) { // deal scan coverity
|
||||
return code;
|
||||
}
|
||||
REPLACE_LIST1_NODE(pLeftFunc);
|
||||
|
@ -5662,6 +5659,26 @@ static int32_t addTagsToCreateStreamQuery(STranslateContext* pCxt, SCreateStream
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static SNode* createNullValue() {
|
||||
SValueNode* pValue = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE);
|
||||
if (NULL == pValue) {
|
||||
return NULL;
|
||||
}
|
||||
pValue->isNull = true;
|
||||
pValue->node.resType.type = TSDB_DATA_TYPE_NULL;
|
||||
pValue->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_NULL].bytes;
|
||||
return (SNode*)pValue;
|
||||
}
|
||||
|
||||
static int32_t addNullTagsForExistTable(STranslateContext* pCxt, STableMeta* pMeta, SSelectStmt* pSelect) {
|
||||
int32_t numOfTags = getNumOfTags(pMeta);
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
for (int32_t i = 0; TSDB_CODE_SUCCESS == code && i < numOfTags; ++i) {
|
||||
code = nodesListMakeStrictAppend(&pSelect->pTags, createNullValue());
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
typedef struct SRewriteSubtableCxt {
|
||||
STranslateContext* pCxt;
|
||||
SNodeList* pPartitionList;
|
||||
|
@ -5707,16 +5724,30 @@ static int32_t addSubtableNameToCreateStreamQuery(STranslateContext* pCxt, SCrea
|
|||
return pCxt->errCode;
|
||||
}
|
||||
|
||||
static int32_t addSubtableInfoToCreateStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStmt) {
|
||||
static int32_t addNullTagsForCreateTable(STranslateContext* pCxt, SCreateStreamStmt* pStmt) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
for (int32_t i = 0; TSDB_CODE_SUCCESS == code && i < LIST_LENGTH(pStmt->pTags); ++i) {
|
||||
code = nodesListMakeStrictAppend(&((SSelectStmt*)pStmt->pQuery)->pTags, createNullValue());
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t addNullTagsToCreateStreamQuery(STranslateContext* pCxt, STableMeta* pMeta, SCreateStreamStmt* pStmt) {
|
||||
if (NULL == pMeta) {
|
||||
return addNullTagsForCreateTable(pCxt, pStmt);
|
||||
}
|
||||
return addNullTagsForExistTable(pCxt, pMeta, (SSelectStmt*)pStmt->pQuery);
|
||||
}
|
||||
|
||||
static int32_t addSubtableInfoToCreateStreamQuery(STranslateContext* pCxt, STableMeta* pMeta,
|
||||
SCreateStreamStmt* pStmt) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SSelectStmt* pSelect = (SSelectStmt*)pStmt->pQuery;
|
||||
if (NULL == pSelect->pPartitionByList) {
|
||||
if (NULL != pStmt->pTags || NULL != pStmt->pSubtable) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "Unsupported stream query");
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
code = addNullTagsToCreateStreamQuery(pCxt, pMeta, pStmt);
|
||||
} else {
|
||||
code = addTagsToCreateStreamQuery(pCxt, pStmt, pSelect);
|
||||
}
|
||||
|
||||
int32_t code = addTagsToCreateStreamQuery(pCxt, pStmt, pSelect);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = addSubtableNameToCreateStreamQuery(pCxt, pStmt, pSelect);
|
||||
}
|
||||
|
@ -5914,17 +5945,6 @@ static int32_t adjustDataTypeOfTags(STranslateContext* pCxt, const STableMeta* p
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static SNode* createNullValue() {
|
||||
SValueNode* pValue = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE);
|
||||
if (NULL == pValue) {
|
||||
return NULL;
|
||||
}
|
||||
pValue->isNull = true;
|
||||
pValue->node.resType.type = TSDB_DATA_TYPE_NULL;
|
||||
pValue->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_NULL].bytes;
|
||||
return (SNode*)pValue;
|
||||
}
|
||||
|
||||
static int32_t adjustOrderOfTags(STranslateContext* pCxt, SNodeList* pTags, const STableMeta* pMeta,
|
||||
SNodeList** pTagExprs, SCMCreateStreamReq* pReq) {
|
||||
if (LIST_LENGTH(pTags) != LIST_LENGTH(*pTagExprs)) {
|
||||
|
@ -5993,23 +6013,75 @@ static int32_t adjustOrderOfTags(STranslateContext* pCxt, SNodeList* pTags, cons
|
|||
static int32_t adjustTagsForExistTable(STranslateContext* pCxt, SCreateStreamStmt* pStmt, const STableMeta* pMeta,
|
||||
SCMCreateStreamReq* pReq) {
|
||||
SSelectStmt* pSelect = (SSelectStmt*)pStmt->pQuery;
|
||||
if (NULL == pSelect->pPartitionByList) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
if (NULL == pStmt->pTags) {
|
||||
return adjustDataTypeOfTags(pCxt, pMeta, pSelect->pTags);
|
||||
}
|
||||
return adjustOrderOfTags(pCxt, pStmt->pTags, pMeta, &pSelect->pTags, pReq);
|
||||
}
|
||||
|
||||
static int32_t adjustTagsForCreateTable(STranslateContext* pCxt, SCreateStreamStmt* pStmt, SCMCreateStreamReq* pReq) {
|
||||
SSelectStmt* pSelect = (SSelectStmt*)pStmt->pQuery;
|
||||
if (NULL == pSelect->pPartitionByList || NULL == pSelect->pTags) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SNode* pTagDef = NULL;
|
||||
SNode* pTagExpr = NULL;
|
||||
FORBOTH(pTagDef, pStmt->pTags, pTagExpr, pSelect->pTags) {
|
||||
SColumnDefNode* pDef = (SColumnDefNode*)pTagDef;
|
||||
if (!dataTypeEqual(&pDef->dataType, &((SExprNode*)pTagExpr)->resType)) {
|
||||
SNode* pFunc = NULL;
|
||||
int32_t code = createCastFunc(pCxt, pTagExpr, pDef->dataType, &pFunc);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
return code;
|
||||
}
|
||||
REPLACE_LIST2_NODE(pFunc);
|
||||
}
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t adjustTags(STranslateContext* pCxt, SCreateStreamStmt* pStmt, const STableMeta* pMeta,
|
||||
SCMCreateStreamReq* pReq) {
|
||||
if (NULL == pMeta) {
|
||||
return adjustTagsForCreateTable(pCxt, pStmt, pReq);
|
||||
}
|
||||
return adjustTagsForExistTable(pCxt, pStmt, pMeta, pReq);
|
||||
}
|
||||
|
||||
static bool isTagDef(SNodeList* pTags) {
|
||||
if (NULL == pTags) {
|
||||
return false;
|
||||
}
|
||||
return QUERY_NODE_COLUMN_DEF == nodeType(nodesListGetNode(pTags, 0));
|
||||
}
|
||||
|
||||
static bool isTagBound(SNodeList* pTags) {
|
||||
if (NULL == pTags) {
|
||||
return false;
|
||||
}
|
||||
return QUERY_NODE_COLUMN == nodeType(nodesListGetNode(pTags, 0));
|
||||
}
|
||||
|
||||
static int32_t translateStreamTargetTable(STranslateContext* pCxt, SCreateStreamStmt* pStmt, SCMCreateStreamReq* pReq,
|
||||
STableMeta** pMeta) {
|
||||
int32_t code = getTableMeta(pCxt, pStmt->targetDbName, pStmt->targetTabName, pMeta);
|
||||
if (TSDB_CODE_PAR_TABLE_NOT_EXIST == code) {
|
||||
if (NULL != pStmt->pCols) {
|
||||
if (NULL != pStmt->pCols || isTagBound(pStmt->pTags)) {
|
||||
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_TABLE_NOT_EXIST, pStmt->targetTabName);
|
||||
}
|
||||
pReq->createStb = STREAM_CREATE_STABLE_TRUE;
|
||||
pReq->targetStbUid = 0;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
if (isTagDef(pStmt->pTags)) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "Table already exist: %s",
|
||||
pStmt->targetTabName);
|
||||
}
|
||||
pReq->createStb = STREAM_CREATE_STABLE_FALSE;
|
||||
pReq->targetStbUid = (*pMeta)->suid;
|
||||
}
|
||||
|
@ -6021,7 +6093,7 @@ static int32_t buildCreateStreamQuery(STranslateContext* pCxt, SCreateStreamStmt
|
|||
STableMeta* pMeta = NULL;
|
||||
int32_t code = translateStreamTargetTable(pCxt, pStmt, pReq, &pMeta);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = addSubtableInfoToCreateStreamQuery(pCxt, pStmt);
|
||||
code = addSubtableInfoToCreateStreamQuery(pCxt, pMeta, pStmt);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = translateQuery(pCxt, pStmt->pQuery);
|
||||
|
@ -6035,8 +6107,8 @@ static int32_t buildCreateStreamQuery(STranslateContext* pCxt, SCreateStreamStmt
|
|||
if (TSDB_CODE_SUCCESS == code && NULL != pMeta) {
|
||||
code = adjustProjectionsForExistTable(pCxt, pStmt, pMeta, pReq);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code && NULL != pMeta) {
|
||||
code = adjustTagsForExistTable(pCxt, pStmt, pMeta, pReq);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = adjustTags(pCxt, pStmt, pMeta, pReq);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
getSourceDatabase(pStmt->pQuery, pCxt->pParseCxt->acctId, pReq->sourceDB);
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -374,6 +374,20 @@ static int32_t createScanLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
|
|||
code = addDefaultScanCol(pRealTable->pMeta, &pScan->pScanCols);
|
||||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pTags && NULL == pSelect->pPartitionByList) {
|
||||
pScan->pTags = nodesCloneList(pSelect->pTags);
|
||||
if (NULL == pScan->pTags) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pSubtable && NULL == pSelect->pPartitionByList) {
|
||||
pScan->pSubtable = nodesCloneNode(pSelect->pSubtable);
|
||||
if (NULL == pScan->pSubtable) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
}
|
||||
|
||||
// set output
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = createColumnByRewriteExprs(pScan->pScanCols, &pScan->node.pTargets);
|
||||
|
|
|
@ -228,9 +228,14 @@ typedef struct SQWorkerMgmt {
|
|||
case QW_PHASE_POST_FETCH: \
|
||||
ctx->inFetch = 0; \
|
||||
break; \
|
||||
default: \
|
||||
case QW_PHASE_PRE_QUERY: \
|
||||
case QW_PHASE_POST_QUERY: \
|
||||
case QW_PHASE_PRE_CQUERY: \
|
||||
case QW_PHASE_POST_CQUERY: \
|
||||
atomic_store_8(&(ctx)->phase, _value); \
|
||||
break; \
|
||||
default: \
|
||||
break; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
|
|
|
@ -550,7 +550,9 @@ _return:
|
|||
if (ctx) {
|
||||
QW_UPDATE_RSP_CODE(ctx, code);
|
||||
|
||||
QW_SET_PHASE(ctx, phase);
|
||||
if (QW_PHASE_POST_CQUERY != phase) {
|
||||
QW_SET_PHASE(ctx, phase);
|
||||
}
|
||||
|
||||
QW_UNLOCK(QW_WRITE, &ctx->lock);
|
||||
qwReleaseTaskCtx(mgmt, ctx);
|
||||
|
@ -757,7 +759,7 @@ int32_t qwProcessCQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg) {
|
|||
QW_LOCK(QW_WRITE, &ctx->lock);
|
||||
if (qComplete || (queryStop && (0 == atomic_load_8((int8_t *)&ctx->queryContinue))) || code) {
|
||||
// Note: query is not running anymore
|
||||
QW_SET_PHASE(ctx, 0);
|
||||
QW_SET_PHASE(ctx, QW_PHASE_POST_CQUERY);
|
||||
QW_UNLOCK(QW_WRITE, &ctx->lock);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -234,7 +234,7 @@ int32_t schBuildTaskRalation(SSchJob *pJob, SHashObj *planToTask) {
|
|||
}
|
||||
|
||||
SSchTask *pTask = taosArrayGet(pLevel->subTasks, 0);
|
||||
if (SUBPLAN_TYPE_MODIFY != pTask->plan->subplanType) {
|
||||
if (SUBPLAN_TYPE_MODIFY != pTask->plan->subplanType || EXPLAIN_MODE_DISABLE != pJob->attr.explainMode) {
|
||||
pJob->attr.needFetch = true;
|
||||
}
|
||||
}
|
||||
|
@ -484,7 +484,7 @@ int32_t schProcessOnJobFailure(SSchJob *pJob, int32_t errCode) {
|
|||
if (TSDB_CODE_SCH_IGNORE_ERROR == errCode) {
|
||||
return TSDB_CODE_SCH_IGNORE_ERROR;
|
||||
}
|
||||
|
||||
|
||||
schUpdateJobErrCode(pJob, errCode);
|
||||
|
||||
int32_t code = atomic_load_32(&pJob->errCode);
|
||||
|
|
|
@ -207,6 +207,7 @@ void streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId) {
|
|||
if (ppTask) {
|
||||
SStreamTask* pTask = *ppTask;
|
||||
taosHashRemove(pMeta->pTasks, &taskId, sizeof(int32_t));
|
||||
tdbTbDelete(pMeta->pTaskDb, &taskId, sizeof(int32_t), pMeta->txn);
|
||||
/*if (pTask->timer) {
|
||||
* taosTmrStop(pTask->timer);*/
|
||||
/*pTask->timer = NULL;*/
|
||||
|
|
|
@ -89,45 +89,6 @@
|
|||
// /\ UNCHANGED <<candidateVars, leaderVars>>
|
||||
//
|
||||
|
||||
int32_t syncNodeFollowerCommit(SSyncNode* ths, SyncIndex newCommitIndex) {
|
||||
ASSERT(false && "deprecated");
|
||||
if (ths->state != TAOS_SYNC_STATE_FOLLOWER) {
|
||||
sNTrace(ths, "can not do follower commit");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// maybe update commit index, leader notice me
|
||||
if (newCommitIndex > ths->commitIndex) {
|
||||
// has commit entry in local
|
||||
if (newCommitIndex <= ths->pLogStore->syncLogLastIndex(ths->pLogStore)) {
|
||||
// advance commit index to sanpshot first
|
||||
SSnapshot snapshot;
|
||||
ths->pFsm->FpGetSnapshotInfo(ths->pFsm, &snapshot);
|
||||
if (snapshot.lastApplyIndex >= 0 && snapshot.lastApplyIndex > ths->commitIndex) {
|
||||
SyncIndex commitBegin = ths->commitIndex;
|
||||
SyncIndex commitEnd = snapshot.lastApplyIndex;
|
||||
ths->commitIndex = snapshot.lastApplyIndex;
|
||||
sNTrace(ths, "commit by snapshot from index:%" PRId64 " to index:%" PRId64, commitBegin, commitEnd);
|
||||
}
|
||||
|
||||
SyncIndex beginIndex = ths->commitIndex + 1;
|
||||
SyncIndex endIndex = newCommitIndex;
|
||||
|
||||
// update commit index
|
||||
ths->commitIndex = newCommitIndex;
|
||||
|
||||
// call back Wal
|
||||
int32_t code = ths->pLogStore->syncLogUpdateCommitIndex(ths->pLogStore, ths->commitIndex);
|
||||
ASSERT(code == 0);
|
||||
|
||||
code = syncNodeDoCommit(ths, beginIndex, endIndex, ths->state);
|
||||
ASSERT(code == 0);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SSyncRaftEntry* syncBuildRaftEntryFromAppendEntries(const SyncAppendEntries* pMsg) {
|
||||
SSyncRaftEntry* pEntry = taosMemoryMalloc(pMsg->dataLen);
|
||||
if (pEntry == NULL) {
|
||||
|
@ -232,256 +193,3 @@ _IGNORE:
|
|||
rpcFreeCont(rpcRsp.pCont);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t syncNodeOnAppendEntriesOld(SSyncNode* ths, const SRpcMsg* pRpcMsg) {
|
||||
SyncAppendEntries* pMsg = pRpcMsg->pCont;
|
||||
SRpcMsg rpcRsp = {0};
|
||||
|
||||
// if already drop replica, do not process
|
||||
if (!syncNodeInRaftGroup(ths, &(pMsg->srcId))) {
|
||||
syncLogRecvAppendEntries(ths, pMsg, "not in my config");
|
||||
goto _IGNORE;
|
||||
}
|
||||
|
||||
// prepare response msg
|
||||
int32_t code = syncBuildAppendEntriesReply(&rpcRsp, ths->vgId);
|
||||
if (code != 0) {
|
||||
syncLogRecvAppendEntries(ths, pMsg, "build rsp error");
|
||||
goto _IGNORE;
|
||||
}
|
||||
|
||||
SyncAppendEntriesReply* pReply = rpcRsp.pCont;
|
||||
pReply->srcId = ths->myRaftId;
|
||||
pReply->destId = pMsg->srcId;
|
||||
pReply->term = ths->raftStore.currentTerm;
|
||||
pReply->success = false;
|
||||
// pReply->matchIndex = ths->pLogStore->syncLogLastIndex(ths->pLogStore);
|
||||
pReply->matchIndex = SYNC_INDEX_INVALID;
|
||||
pReply->lastSendIndex = pMsg->prevLogIndex + 1;
|
||||
pReply->startTime = ths->startTime;
|
||||
|
||||
if (pMsg->term < ths->raftStore.currentTerm) {
|
||||
syncLogRecvAppendEntries(ths, pMsg, "reject, small term");
|
||||
goto _SEND_RESPONSE;
|
||||
}
|
||||
|
||||
if (pMsg->term > ths->raftStore.currentTerm) {
|
||||
pReply->term = pMsg->term;
|
||||
}
|
||||
|
||||
syncNodeStepDown(ths, pMsg->term);
|
||||
syncNodeResetElectTimer(ths);
|
||||
|
||||
SyncIndex startIndex = ths->pLogStore->syncLogBeginIndex(ths->pLogStore);
|
||||
SyncIndex lastIndex = ths->pLogStore->syncLogLastIndex(ths->pLogStore);
|
||||
|
||||
if (pMsg->prevLogIndex > lastIndex) {
|
||||
syncLogRecvAppendEntries(ths, pMsg, "reject, index not match");
|
||||
goto _SEND_RESPONSE;
|
||||
}
|
||||
|
||||
if (pMsg->prevLogIndex >= startIndex) {
|
||||
SyncTerm myPreLogTerm = syncNodeGetPreTerm(ths, pMsg->prevLogIndex + 1);
|
||||
// ASSERT(myPreLogTerm != SYNC_TERM_INVALID);
|
||||
if (myPreLogTerm == SYNC_TERM_INVALID) {
|
||||
syncLogRecvAppendEntries(ths, pMsg, "reject, pre-term invalid");
|
||||
goto _SEND_RESPONSE;
|
||||
}
|
||||
|
||||
if (myPreLogTerm != pMsg->prevLogTerm) {
|
||||
syncLogRecvAppendEntries(ths, pMsg, "reject, pre-term not match");
|
||||
goto _SEND_RESPONSE;
|
||||
}
|
||||
}
|
||||
|
||||
// accept
|
||||
pReply->success = true;
|
||||
bool hasAppendEntries = pMsg->dataLen > 0;
|
||||
if (hasAppendEntries) {
|
||||
SSyncRaftEntry* pAppendEntry = syncEntryBuildFromAppendEntries(pMsg);
|
||||
ASSERT(pAppendEntry != NULL);
|
||||
|
||||
SyncIndex appendIndex = pMsg->prevLogIndex + 1;
|
||||
|
||||
LRUHandle* hLocal = NULL;
|
||||
LRUHandle* hAppend = NULL;
|
||||
|
||||
int32_t code = 0;
|
||||
SSyncRaftEntry* pLocalEntry = NULL;
|
||||
SLRUCache* pCache = ths->pLogStore->pCache;
|
||||
hLocal = taosLRUCacheLookup(pCache, &appendIndex, sizeof(appendIndex));
|
||||
if (hLocal) {
|
||||
pLocalEntry = (SSyncRaftEntry*)taosLRUCacheValue(pCache, hLocal);
|
||||
code = 0;
|
||||
|
||||
ths->pLogStore->cacheHit++;
|
||||
sNTrace(ths, "hit cache index:%" PRId64 ", bytes:%u, %p", appendIndex, pLocalEntry->bytes, pLocalEntry);
|
||||
|
||||
} else {
|
||||
ths->pLogStore->cacheMiss++;
|
||||
sNTrace(ths, "miss cache index:%" PRId64, appendIndex);
|
||||
|
||||
code = ths->pLogStore->syncLogGetEntry(ths->pLogStore, appendIndex, &pLocalEntry);
|
||||
}
|
||||
|
||||
if (code == 0) {
|
||||
// get local entry success
|
||||
|
||||
if (pLocalEntry->term == pAppendEntry->term) {
|
||||
// do nothing
|
||||
sNTrace(ths, "log match, do nothing, index:%" PRId64, appendIndex);
|
||||
|
||||
} else {
|
||||
// truncate
|
||||
code = ths->pLogStore->syncLogTruncate(ths->pLogStore, appendIndex);
|
||||
if (code != 0) {
|
||||
char logBuf[128];
|
||||
snprintf(logBuf, sizeof(logBuf), "ignore, truncate error, append-index:%" PRId64, appendIndex);
|
||||
syncLogRecvAppendEntries(ths, pMsg, logBuf);
|
||||
|
||||
if (hLocal) {
|
||||
taosLRUCacheRelease(ths->pLogStore->pCache, hLocal, false);
|
||||
} else {
|
||||
syncEntryDestroy(pLocalEntry);
|
||||
}
|
||||
|
||||
if (hAppend) {
|
||||
taosLRUCacheRelease(ths->pLogStore->pCache, hAppend, false);
|
||||
} else {
|
||||
syncEntryDestroy(pAppendEntry);
|
||||
}
|
||||
|
||||
goto _IGNORE;
|
||||
}
|
||||
|
||||
ASSERT(pAppendEntry->index == appendIndex);
|
||||
|
||||
// append
|
||||
code = ths->pLogStore->syncLogAppendEntry(ths->pLogStore, pAppendEntry);
|
||||
if (code != 0) {
|
||||
char logBuf[128];
|
||||
snprintf(logBuf, sizeof(logBuf), "ignore, append error, append-index:%" PRId64, appendIndex);
|
||||
syncLogRecvAppendEntries(ths, pMsg, logBuf);
|
||||
|
||||
if (hLocal) {
|
||||
taosLRUCacheRelease(ths->pLogStore->pCache, hLocal, false);
|
||||
} else {
|
||||
syncEntryDestroy(pLocalEntry);
|
||||
}
|
||||
|
||||
if (hAppend) {
|
||||
taosLRUCacheRelease(ths->pLogStore->pCache, hAppend, false);
|
||||
} else {
|
||||
syncEntryDestroy(pAppendEntry);
|
||||
}
|
||||
|
||||
goto _IGNORE;
|
||||
}
|
||||
|
||||
syncCacheEntry(ths->pLogStore, pAppendEntry, &hAppend);
|
||||
}
|
||||
|
||||
} else {
|
||||
if (terrno == TSDB_CODE_WAL_LOG_NOT_EXIST) {
|
||||
// log not exist
|
||||
|
||||
// truncate
|
||||
code = ths->pLogStore->syncLogTruncate(ths->pLogStore, appendIndex);
|
||||
if (code != 0) {
|
||||
char logBuf[128];
|
||||
snprintf(logBuf, sizeof(logBuf), "ignore, log not exist, truncate error, append-index:%" PRId64, appendIndex);
|
||||
syncLogRecvAppendEntries(ths, pMsg, logBuf);
|
||||
|
||||
syncEntryDestroy(pLocalEntry);
|
||||
syncEntryDestroy(pAppendEntry);
|
||||
goto _IGNORE;
|
||||
}
|
||||
|
||||
// append
|
||||
code = ths->pLogStore->syncLogAppendEntry(ths->pLogStore, pAppendEntry);
|
||||
if (code != 0) {
|
||||
char logBuf[128];
|
||||
snprintf(logBuf, sizeof(logBuf), "ignore, log not exist, append error, append-index:%" PRId64, appendIndex);
|
||||
syncLogRecvAppendEntries(ths, pMsg, logBuf);
|
||||
|
||||
if (hLocal) {
|
||||
taosLRUCacheRelease(ths->pLogStore->pCache, hLocal, false);
|
||||
} else {
|
||||
syncEntryDestroy(pLocalEntry);
|
||||
}
|
||||
|
||||
if (hAppend) {
|
||||
taosLRUCacheRelease(ths->pLogStore->pCache, hAppend, false);
|
||||
} else {
|
||||
syncEntryDestroy(pAppendEntry);
|
||||
}
|
||||
|
||||
goto _IGNORE;
|
||||
}
|
||||
|
||||
syncCacheEntry(ths->pLogStore, pAppendEntry, &hAppend);
|
||||
|
||||
} else {
|
||||
// get local entry success
|
||||
char logBuf[128];
|
||||
snprintf(logBuf, sizeof(logBuf), "ignore, get local entry error, append-index:%" PRId64 " err:%d", appendIndex,
|
||||
terrno);
|
||||
syncLogRecvAppendEntries(ths, pMsg, logBuf);
|
||||
|
||||
if (hLocal) {
|
||||
taosLRUCacheRelease(ths->pLogStore->pCache, hLocal, false);
|
||||
} else {
|
||||
syncEntryDestroy(pLocalEntry);
|
||||
}
|
||||
|
||||
if (hAppend) {
|
||||
taosLRUCacheRelease(ths->pLogStore->pCache, hAppend, false);
|
||||
} else {
|
||||
syncEntryDestroy(pAppendEntry);
|
||||
}
|
||||
|
||||
goto _IGNORE;
|
||||
}
|
||||
}
|
||||
|
||||
// update match index
|
||||
pReply->matchIndex = pAppendEntry->index;
|
||||
|
||||
if (hLocal) {
|
||||
taosLRUCacheRelease(ths->pLogStore->pCache, hLocal, false);
|
||||
} else {
|
||||
syncEntryDestroy(pLocalEntry);
|
||||
}
|
||||
|
||||
if (hAppend) {
|
||||
taosLRUCacheRelease(ths->pLogStore->pCache, hAppend, false);
|
||||
} else {
|
||||
syncEntryDestroy(pAppendEntry);
|
||||
}
|
||||
|
||||
} else {
|
||||
// no append entries, do nothing
|
||||
// maybe has extra entries, no harm
|
||||
|
||||
// update match index
|
||||
pReply->matchIndex = pMsg->prevLogIndex;
|
||||
}
|
||||
|
||||
// maybe update commit index, leader notice me
|
||||
syncNodeFollowerCommit(ths, pMsg->commitIndex);
|
||||
|
||||
syncLogRecvAppendEntries(ths, pMsg, "accept");
|
||||
goto _SEND_RESPONSE;
|
||||
|
||||
_IGNORE:
|
||||
rpcFreeCont(rpcRsp.pCont);
|
||||
return 0;
|
||||
|
||||
_SEND_RESPONSE:
|
||||
// msg event log
|
||||
syncLogSendAppendEntriesReply(ths, pReply, "");
|
||||
|
||||
// send response
|
||||
syncNodeSendMsgById(&pReply->destId, ths, &rpcRsp);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -89,63 +89,3 @@ int32_t syncNodeOnAppendEntriesReply(SSyncNode* ths, const SRpcMsg* pRpcMsg) {
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t syncNodeOnAppendEntriesReplyOld(SSyncNode* ths, SyncAppendEntriesReply* pMsg) {
|
||||
int32_t ret = 0;
|
||||
|
||||
// if already drop replica, do not process
|
||||
if (!syncNodeInRaftGroup(ths, &(pMsg->srcId))) {
|
||||
syncLogRecvAppendEntriesReply(ths, pMsg, "not in my config");
|
||||
return 0;
|
||||
}
|
||||
|
||||
// drop stale response
|
||||
if (pMsg->term < ths->raftStore.currentTerm) {
|
||||
syncLogRecvAppendEntriesReply(ths, pMsg, "drop stale response");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (ths->state == TAOS_SYNC_STATE_LEADER) {
|
||||
if (pMsg->term > ths->raftStore.currentTerm) {
|
||||
syncLogRecvAppendEntriesReply(ths, pMsg, "error term");
|
||||
syncNodeStepDown(ths, pMsg->term);
|
||||
return -1;
|
||||
}
|
||||
|
||||
ASSERT(pMsg->term == ths->raftStore.currentTerm);
|
||||
|
||||
if (pMsg->success) {
|
||||
SyncIndex oldMatchIndex = syncIndexMgrGetIndex(ths->pMatchIndex, &(pMsg->srcId));
|
||||
if (pMsg->matchIndex > oldMatchIndex) {
|
||||
syncIndexMgrSetIndex(ths->pMatchIndex, &(pMsg->srcId), pMsg->matchIndex);
|
||||
syncMaybeAdvanceCommitIndex(ths);
|
||||
|
||||
// maybe update minMatchIndex
|
||||
ths->minMatchIndex = syncMinMatchIndex(ths);
|
||||
}
|
||||
syncIndexMgrSetIndex(ths->pNextIndex, &(pMsg->srcId), pMsg->matchIndex + 1);
|
||||
|
||||
} else {
|
||||
SyncIndex nextIndex = syncIndexMgrGetIndex(ths->pNextIndex, &(pMsg->srcId));
|
||||
if (nextIndex > SYNC_INDEX_BEGIN) {
|
||||
--nextIndex;
|
||||
}
|
||||
syncIndexMgrSetIndex(ths->pNextIndex, &(pMsg->srcId), nextIndex);
|
||||
}
|
||||
|
||||
// send next append entries
|
||||
SPeerState* pState = syncNodeGetPeerState(ths, &(pMsg->srcId));
|
||||
ASSERT(pState != NULL);
|
||||
|
||||
if (pMsg->lastSendIndex == pState->lastSendIndex) {
|
||||
int64_t timeNow = taosGetTimestampMs();
|
||||
int64_t elapsed = timeNow - pState->lastSendTime;
|
||||
sNTrace(ths, "sync-append-entries rtt elapsed:%" PRId64 ", index:%" PRId64, elapsed, pState->lastSendIndex);
|
||||
|
||||
syncNodeReplicateOne(ths, &(pMsg->srcId), true);
|
||||
}
|
||||
}
|
||||
|
||||
syncLogRecvAppendEntriesReply(ths, pMsg, "process");
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -43,148 +43,6 @@
|
|||
// IN commitIndex' = [commitIndex EXCEPT ![i] = newCommitIndex]
|
||||
// /\ UNCHANGED <<messages, serverVars, candidateVars, leaderVars, log>>
|
||||
//
|
||||
void syncOneReplicaAdvance(SSyncNode* pSyncNode) {
|
||||
ASSERT(false && "deprecated");
|
||||
if (pSyncNode == NULL) {
|
||||
sError("pSyncNode is NULL");
|
||||
return;
|
||||
}
|
||||
|
||||
if (pSyncNode->state != TAOS_SYNC_STATE_LEADER) {
|
||||
sNError(pSyncNode, "not leader, can not advance commit index");
|
||||
return;
|
||||
}
|
||||
|
||||
if (pSyncNode->replicaNum != 1) {
|
||||
sNError(pSyncNode, "not one replica, can not advance commit index");
|
||||
return;
|
||||
}
|
||||
|
||||
// advance commit index to snapshot first
|
||||
SSnapshot snapshot;
|
||||
pSyncNode->pFsm->FpGetSnapshotInfo(pSyncNode->pFsm, &snapshot);
|
||||
if (snapshot.lastApplyIndex > 0 && snapshot.lastApplyIndex > pSyncNode->commitIndex) {
|
||||
SyncIndex commitBegin = pSyncNode->commitIndex;
|
||||
SyncIndex commitEnd = snapshot.lastApplyIndex;
|
||||
pSyncNode->commitIndex = snapshot.lastApplyIndex;
|
||||
sNTrace(pSyncNode, "commit by snapshot from index:%" PRId64 " to index:%" PRId64, commitBegin, commitEnd);
|
||||
}
|
||||
|
||||
// advance commit index as large as possible
|
||||
SyncIndex lastIndex = syncNodeGetLastIndex(pSyncNode);
|
||||
if (lastIndex > pSyncNode->commitIndex) {
|
||||
sNTrace(pSyncNode, "commit by wal from index:%" PRId64 " to index:%" PRId64, pSyncNode->commitIndex + 1, lastIndex);
|
||||
pSyncNode->commitIndex = lastIndex;
|
||||
}
|
||||
|
||||
// call back Wal
|
||||
SyncIndex walCommitVer = logStoreWalCommitVer(pSyncNode->pLogStore);
|
||||
if (pSyncNode->commitIndex > walCommitVer) {
|
||||
pSyncNode->pLogStore->syncLogUpdateCommitIndex(pSyncNode->pLogStore, pSyncNode->commitIndex);
|
||||
}
|
||||
}
|
||||
|
||||
void syncMaybeAdvanceCommitIndex(SSyncNode* pSyncNode) {
|
||||
ASSERTS(false, "deprecated");
|
||||
if (pSyncNode == NULL) {
|
||||
sError("pSyncNode is NULL");
|
||||
return;
|
||||
}
|
||||
|
||||
if (pSyncNode->state != TAOS_SYNC_STATE_LEADER) {
|
||||
sNError(pSyncNode, "not leader, can not advance commit index");
|
||||
return;
|
||||
}
|
||||
|
||||
// advance commit index to sanpshot first
|
||||
SSnapshot snapshot;
|
||||
pSyncNode->pFsm->FpGetSnapshotInfo(pSyncNode->pFsm, &snapshot);
|
||||
if (snapshot.lastApplyIndex > 0 && snapshot.lastApplyIndex > pSyncNode->commitIndex) {
|
||||
SyncIndex commitBegin = pSyncNode->commitIndex;
|
||||
SyncIndex commitEnd = snapshot.lastApplyIndex;
|
||||
pSyncNode->commitIndex = snapshot.lastApplyIndex;
|
||||
sNTrace(pSyncNode, "commit by snapshot from index:%" PRId64 " to index:%" PRId64, commitBegin, commitEnd);
|
||||
}
|
||||
|
||||
// update commit index
|
||||
SyncIndex newCommitIndex = pSyncNode->commitIndex;
|
||||
for (SyncIndex index = syncNodeGetLastIndex(pSyncNode); index > pSyncNode->commitIndex; --index) {
|
||||
bool agree = syncAgree(pSyncNode, index);
|
||||
|
||||
if (agree) {
|
||||
// term
|
||||
SSyncRaftEntry* pEntry = NULL;
|
||||
SLRUCache* pCache = pSyncNode->pLogStore->pCache;
|
||||
LRUHandle* h = taosLRUCacheLookup(pCache, &index, sizeof(index));
|
||||
if (h) {
|
||||
pEntry = (SSyncRaftEntry*)taosLRUCacheValue(pCache, h);
|
||||
|
||||
pSyncNode->pLogStore->cacheHit++;
|
||||
sNTrace(pSyncNode, "hit cache index:%" PRId64 ", bytes:%u, %p", index, pEntry->bytes, pEntry);
|
||||
|
||||
} else {
|
||||
pSyncNode->pLogStore->cacheMiss++;
|
||||
sNTrace(pSyncNode, "miss cache index:%" PRId64, index);
|
||||
|
||||
int32_t code = pSyncNode->pLogStore->syncLogGetEntry(pSyncNode->pLogStore, index, &pEntry);
|
||||
if (code != 0) {
|
||||
sNError(pSyncNode, "advance commit index error, read wal index:%" PRId64, index);
|
||||
return;
|
||||
}
|
||||
}
|
||||
// cannot commit, even if quorum agree. need check term!
|
||||
if (pEntry->term <= pSyncNode->raftStore.currentTerm) {
|
||||
// update commit index
|
||||
newCommitIndex = index;
|
||||
|
||||
if (h) {
|
||||
taosLRUCacheRelease(pCache, h, false);
|
||||
} else {
|
||||
syncEntryDestroy(pEntry);
|
||||
}
|
||||
|
||||
break;
|
||||
} else {
|
||||
sNTrace(pSyncNode, "can not commit due to term not equal, index:%" PRId64 ", term:%" PRIu64, pEntry->index,
|
||||
pEntry->term);
|
||||
}
|
||||
|
||||
if (h) {
|
||||
taosLRUCacheRelease(pCache, h, false);
|
||||
} else {
|
||||
syncEntryDestroy(pEntry);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// advance commit index as large as possible
|
||||
SyncIndex walCommitVer = logStoreWalCommitVer(pSyncNode->pLogStore);
|
||||
if (walCommitVer > newCommitIndex) {
|
||||
newCommitIndex = walCommitVer;
|
||||
}
|
||||
|
||||
// maybe execute fsm
|
||||
if (newCommitIndex > pSyncNode->commitIndex) {
|
||||
SyncIndex beginIndex = pSyncNode->commitIndex + 1;
|
||||
SyncIndex endIndex = newCommitIndex;
|
||||
|
||||
// update commit index
|
||||
pSyncNode->commitIndex = newCommitIndex;
|
||||
|
||||
// call back Wal
|
||||
pSyncNode->pLogStore->syncLogUpdateCommitIndex(pSyncNode->pLogStore, pSyncNode->commitIndex);
|
||||
|
||||
// execute fsm
|
||||
if (pSyncNode != NULL && pSyncNode->pFsm != NULL) {
|
||||
int32_t code = syncNodeDoCommit(pSyncNode, beginIndex, endIndex, pSyncNode->state);
|
||||
if (code != 0) {
|
||||
sNError(pSyncNode, "advance commit index error, do commit begin:%" PRId64 ", end:%" PRId64, beginIndex,
|
||||
endIndex);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool syncAgreeIndex(SSyncNode* pSyncNode, SRaftId* pRaftId, SyncIndex index) {
|
||||
// I am leader, I agree
|
||||
|
@ -210,83 +68,7 @@ static inline int64_t syncNodeAbs64(int64_t a, int64_t b) {
|
|||
return c;
|
||||
}
|
||||
|
||||
int32_t syncNodeDynamicQuorum(const SSyncNode* pSyncNode) {
|
||||
return pSyncNode->quorum;
|
||||
|
||||
#if 0
|
||||
int32_t quorum = 1; // self
|
||||
|
||||
int64_t timeNow = taosGetTimestampMs();
|
||||
for (int i = 0; i < pSyncNode->peersNum; ++i) {
|
||||
int64_t peerStartTime = syncIndexMgrGetStartTime(pSyncNode->pNextIndex, &(pSyncNode->peersId)[i]);
|
||||
int64_t peerRecvTime = syncIndexMgrGetRecvTime(pSyncNode->pNextIndex, &(pSyncNode->peersId)[i]);
|
||||
SyncIndex peerMatchIndex = syncIndexMgrGetIndex(pSyncNode->pMatchIndex, &(pSyncNode->peersId)[i]);
|
||||
|
||||
int64_t recvTimeDiff = TABS(peerRecvTime - timeNow);
|
||||
int64_t startTimeDiff = TABS(peerStartTime - pSyncNode->startTime);
|
||||
int64_t logDiff = TABS(peerMatchIndex - syncNodeGetLastIndex(pSyncNode));
|
||||
|
||||
/*
|
||||
int64_t recvTimeDiff = syncNodeAbs64(peerRecvTime, timeNow);
|
||||
int64_t startTimeDiff = syncNodeAbs64(peerStartTime, pSyncNode->startTime);
|
||||
int64_t logDiff = syncNodeAbs64(peerMatchIndex, syncNodeGetLastIndex(pSyncNode));
|
||||
*/
|
||||
|
||||
int32_t addQuorum = 0;
|
||||
|
||||
if (recvTimeDiff < SYNC_MAX_RECV_TIME_RANGE_MS) {
|
||||
if (startTimeDiff < SYNC_MAX_START_TIME_RANGE_MS) {
|
||||
addQuorum = 1;
|
||||
} else {
|
||||
if (logDiff < SYNC_ADD_QUORUM_COUNT) {
|
||||
addQuorum = 1;
|
||||
} else {
|
||||
addQuorum = 0;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
addQuorum = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
if (recvTimeDiff < SYNC_MAX_RECV_TIME_RANGE_MS) {
|
||||
addQuorum = 1;
|
||||
} else {
|
||||
addQuorum = 0;
|
||||
}
|
||||
|
||||
if (startTimeDiff > SYNC_MAX_START_TIME_RANGE_MS) {
|
||||
addQuorum = 0;
|
||||
}
|
||||
*/
|
||||
|
||||
quorum += addQuorum;
|
||||
}
|
||||
|
||||
ASSERT(quorum <= pSyncNode->replicaNum);
|
||||
|
||||
if (quorum < pSyncNode->quorum) {
|
||||
quorum = pSyncNode->quorum;
|
||||
}
|
||||
|
||||
return quorum;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
bool syncAgree(SSyncNode* pSyncNode, SyncIndex index) {
|
||||
int agreeCount = 0;
|
||||
for (int i = 0; i < pSyncNode->replicaNum; ++i) {
|
||||
if (syncAgreeIndex(pSyncNode, &(pSyncNode->replicasId[i]), index)) {
|
||||
++agreeCount;
|
||||
}
|
||||
if (agreeCount >= syncNodeDynamicQuorum(pSyncNode)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
*/
|
||||
int32_t syncNodeDynamicQuorum(const SSyncNode* pSyncNode) { return pSyncNode->quorum; }
|
||||
|
||||
bool syncNodeAgreedUpon(SSyncNode* pNode, SyncIndex index) {
|
||||
int count = 0;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue