merge 3.0

This commit is contained in:
Xiaoyu Wang 2023-02-09 09:44:29 +08:00
commit 9bc225ffda
71 changed files with 4781 additions and 3182 deletions

View File

@ -53,8 +53,69 @@ for p in ps:
In addition to python's built-in multithreading and multiprocessing library, we can also use the third-party library gunicorn.
### Examples
### examples
<details>
<summary>kafka_example_perform</summary>
`kafka_example_perform` is the entry point of the examples.
```py
{{#include docs/examples/python/kafka_example.py}}
{{#include docs/examples/python/kafka_example_perform.py}}
```
</details>
<details>
<summary>kafka_example_common</summary>
`kafka_example_common` is the common code of the examples.
```py
{{#include docs/examples/python/kafka_example_common.py}}
```
</details>
<details>
<summary>kafka_example_producer</summary>
`kafka_example_producer` is `producer`, which is responsible for generating test data and sending it to kafka.
```py
{{#include docs/examples/python/kafka_example_producer.py}}
```
</details>
<details>
<summary>kafka_example_consumer</summary>
`kafka_example_consumer` is `consumer`which is responsible for consuming data from kafka and writing it to TDengine.
```py
{{#include docs/examples/python/kafka_example_consumer.py}}
```
</details>
### execute Python examples
<details>
<summary>execute Python examples</summary>
1. install and start up `kafka`
2. install python3 and pip
3. install `taospy` by pip
4. install `kafka-python` by pip
5. execute this example
The entry point of this example is `kafka_example_perform.py`. For more information about usage, please use `--help` command.
```
python3 kafka_example_perform.py --help
```
For example, the following command is creating 100 sub-table and inserting 20000 data for each table and the kafka max poll is 100 and 1 thread and 1 process per thread.
```
python3 kafka_example_perform.py -table-count=100 -table-items=20000 -max-poll=100 -threads=1 -processes=1
```
</details>

View File

@ -873,9 +873,9 @@ INTERP(expr)
- `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter.
- The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input.
- `INTERP` must be used along with `RANGE`, `EVERY`, `FILL` keywords.
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1<=timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified.
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter. The parameter `EVERY` must be an integer, with no quotes, with a time unit of: b(nanosecond), u(microsecond), a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds.
- Interpolation is performed based on `FILL` parameter.
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1 < timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified.
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY(time_unit)`. Starting from timestamp1, one interpolation is performed for every time interval specified `time_unit` parameter. The parameter `time_unit` must be an integer, with no quotes, with a time unit of: a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds.
- Interpolation is performed based on `FILL` parameter. For more information about FILL clause, see [FILL Clause](./distinguished/#fill-clause).
- `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable.
- Pseudocolumn `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.1.4).
- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.2.3).

View File

@ -1,241 +0,0 @@
#! encoding = utf-8
import json
import time
from json import JSONDecodeError
from typing import Callable
import logging
from concurrent.futures import ThreadPoolExecutor, Future
import taos
from kafka import KafkaConsumer
from kafka.consumer.fetcher import ConsumerRecord
class Consumer(object):
DEFAULT_CONFIGS = {
'kafka_brokers': 'localhost:9092',
'kafka_topic': 'python_kafka',
'kafka_group_id': 'taos',
'taos_host': 'localhost',
'taos_user': 'root',
'taos_password': 'taosdata',
'taos_database': 'power',
'taos_port': 6030,
'timezone': None,
'clean_after_testing': False,
'bath_consume': True,
'batch_size': 1000,
'async_model': True,
'workers': 10,
'testing': False
}
LOCATIONS = ['California.SanFrancisco', 'California.LosAngles', 'California.SanDiego', 'California.SanJose',
'California.PaloAlto', 'California.Campbell', 'California.MountainView', 'California.Sunnyvale',
'California.SantaClara', 'California.Cupertino']
CREATE_DATABASE_SQL = 'create database if not exists {} keep 365 duration 10 buffer 16 wal_level 1'
USE_DATABASE_SQL = 'use {}'
DROP_TABLE_SQL = 'drop table if exists meters'
DROP_DATABASE_SQL = 'drop database if exists {}'
CREATE_STABLE_SQL = 'create stable meters (ts timestamp, current float, voltage int, phase float) ' \
'tags (location binary(64), groupId int)'
CREATE_TABLE_SQL = 'create table if not exists {} using meters tags (\'{}\', {})'
INSERT_SQL_HEADER = "insert into "
INSERT_PART_SQL = 'power.{} values (\'{}\', {}, {}, {})'
def __init__(self, **configs):
self.config: dict = self.DEFAULT_CONFIGS
self.config.update(configs)
if not self.config.get('testing'):
self.consumer = KafkaConsumer(
self.config.get('kafka_topic'), # topic
bootstrap_servers=self.config.get('kafka_brokers'),
group_id=self.config.get('kafka_group_id'),
)
self.taos = taos.connect(
host=self.config.get('taos_host'),
user=self.config.get('taos_user'),
password=self.config.get('taos_password'),
port=self.config.get('taos_port'),
timezone=self.config.get('timezone'),
)
if self.config.get('async_model'):
self.pool = ThreadPoolExecutor(max_workers=self.config.get('workers'))
self.tasks = []
# tags and table mapping # key: {location}_{groupId} value:
self.tag_table_mapping = {}
i = 0
for location in self.LOCATIONS:
for j in range(1, 11):
table_name = 'd{}'.format(i)
self._cache_table(location=location, group_id=j, table_name=table_name)
i += 1
def init_env(self):
# create database and table
self.taos.execute(self.DROP_DATABASE_SQL.format(self.config.get('taos_database')))
self.taos.execute(self.CREATE_DATABASE_SQL.format(self.config.get('taos_database')))
self.taos.execute(self.USE_DATABASE_SQL.format(self.config.get('taos_database')))
self.taos.execute(self.DROP_TABLE_SQL)
self.taos.execute(self.CREATE_STABLE_SQL)
for tags, table_name in self.tag_table_mapping.items():
location, group_id = _get_location_and_group(tags)
self.taos.execute(self.CREATE_TABLE_SQL.format(table_name, location, group_id))
def consume(self):
logging.warning('## start consumer topic-[%s]', self.config.get('kafka_topic'))
try:
if self.config.get('bath_consume'):
self._run_batch(self._to_taos_batch)
else:
self._run(self._to_taos)
except KeyboardInterrupt:
logging.warning("## caught keyboard interrupt, stopping")
finally:
self.stop()
def stop(self):
# close consumer
if self.consumer is not None:
self.consumer.commit()
self.consumer.close()
# multi thread
if self.config.get('async_model'):
for task in self.tasks:
while not task.done():
pass
if self.pool is not None:
self.pool.shutdown()
# clean data
if self.config.get('clean_after_testing'):
self.taos.execute(self.DROP_TABLE_SQL)
self.taos.execute(self.DROP_DATABASE_SQL.format(self.config.get('taos_database')))
# close taos
if self.taos is not None:
self.taos.close()
def _run(self, f):
for message in self.consumer:
if self.config.get('async_model'):
self.pool.submit(f(message))
else:
f(message)
def _run_batch(self, f):
while True:
messages = self.consumer.poll(timeout_ms=500, max_records=self.config.get('batch_size'))
if messages:
if self.config.get('async_model'):
self.pool.submit(f, messages.values())
else:
f(list(messages.values()))
if not messages:
time.sleep(0.1)
def _to_taos(self, message: ConsumerRecord) -> bool:
sql = self.INSERT_SQL_HEADER + self._build_sql(message.value)
if len(sql) == 0: # decode error, skip
return True
logging.info('## insert sql %s', sql)
return self.taos.execute(sql=sql) == 1
def _to_taos_batch(self, messages):
sql = self._build_sql_batch(messages=messages)
if len(sql) == 0: # decode error, skip
return
self.taos.execute(sql=sql)
def _build_sql(self, msg_value: str) -> str:
try:
data = json.loads(msg_value)
except JSONDecodeError as e:
logging.error('## decode message [%s] error ', msg_value, e)
return ''
location = data.get('location')
group_id = data.get('groupId')
ts = data.get('ts')
current = data.get('current')
voltage = data.get('voltage')
phase = data.get('phase')
table_name = self._get_table_name(location=location, group_id=group_id)
return self.INSERT_PART_SQL.format(table_name, ts, current, voltage, phase)
def _build_sql_batch(self, messages) -> str:
sql_list = []
for partition_messages in messages:
for message in partition_messages:
sql_list.append(self._build_sql(message.value))
return self.INSERT_SQL_HEADER + ' '.join(sql_list)
def _cache_table(self, location: str, group_id: int, table_name: str):
self.tag_table_mapping[_tag_table_mapping_key(location=location, group_id=group_id)] = table_name
def _get_table_name(self, location: str, group_id: int) -> str:
return self.tag_table_mapping.get(_tag_table_mapping_key(location=location, group_id=group_id))
def _tag_table_mapping_key(location: str, group_id: int):
return '{}_{}'.format(location, group_id)
def _get_location_and_group(key: str) -> (str, int):
fields = key.split('_')
return fields[0], fields[1]
def test_to_taos(consumer: Consumer):
msg = {
'location': 'California.SanFrancisco',
'groupId': 1,
'ts': '2022-12-06 15:13:38.643',
'current': 3.41,
'voltage': 105,
'phase': 0.02027,
}
record = ConsumerRecord(checksum=None, headers=None, offset=1, key=None, value=json.dumps(msg), partition=1,
topic='test', serialized_key_size=None, serialized_header_size=None,
serialized_value_size=None, timestamp=time.time(), timestamp_type=None)
assert consumer._to_taos(message=record)
def test_to_taos_batch(consumer: Consumer):
records = [
[
ConsumerRecord(checksum=None, headers=None, offset=1, key=None,
value=json.dumps({'location': 'California.SanFrancisco',
'groupId': 1,
'ts': '2022-12-06 15:13:38.643',
'current': 3.41,
'voltage': 105,
'phase': 0.02027, }),
partition=1, topic='test', serialized_key_size=None, serialized_header_size=None,
serialized_value_size=None, timestamp=time.time(), timestamp_type=None),
ConsumerRecord(checksum=None, headers=None, offset=1, key=None,
value=json.dumps({'location': 'California.LosAngles',
'groupId': 2,
'ts': '2022-12-06 15:13:39.643',
'current': 3.41,
'voltage': 102,
'phase': 0.02027, }),
partition=1, topic='test', serialized_key_size=None, serialized_header_size=None,
serialized_value_size=None, timestamp=time.time(), timestamp_type=None),
]
]
consumer._to_taos_batch(messages=records)
if __name__ == '__main__':
consumer = Consumer(async_model=True, testing=True)
# init env
consumer.init_env()
# consumer.consume()
# test build sql
# test build sql batch
test_to_taos(consumer)
test_to_taos_batch(consumer)

View File

@ -0,0 +1,65 @@
#! encoding = utf-8
import taos
LOCATIONS = ['California.SanFrancisco', 'California.LosAngles', 'California.SanDiego', 'California.SanJose',
'California.PaloAlto', 'California.Campbell', 'California.MountainView', 'California.Sunnyvale',
'California.SantaClara', 'California.Cupertino']
CREATE_DATABASE_SQL = 'create database if not exists {} keep 365 duration 10 buffer 16 wal_level 1'
USE_DATABASE_SQL = 'use {}'
DROP_TABLE_SQL = 'drop table if exists meters'
DROP_DATABASE_SQL = 'drop database if exists {}'
CREATE_STABLE_SQL = 'create stable meters (ts timestamp, current float, voltage int, phase float) tags ' \
'(location binary(64), groupId int)'
CREATE_TABLE_SQL = 'create table if not exists {} using meters tags (\'{}\', {})'
def create_database_and_tables(host, port, user, password, db, table_count):
tags_tables = _init_tags_table_names(table_count=table_count)
conn = taos.connect(host=host, port=port, user=user, password=password)
conn.execute(DROP_DATABASE_SQL.format(db))
conn.execute(CREATE_DATABASE_SQL.format(db))
conn.execute(USE_DATABASE_SQL.format(db))
conn.execute(DROP_TABLE_SQL)
conn.execute(CREATE_STABLE_SQL)
for tags in tags_tables:
location, group_id = _get_location_and_group(tags)
tables = tags_tables[tags]
for table_name in tables:
conn.execute(CREATE_TABLE_SQL.format(table_name, location, group_id))
conn.close()
def clean(host, port, user, password, db):
conn = taos.connect(host=host, port=port, user=user, password=password)
conn.execute(DROP_DATABASE_SQL.format(db))
conn.close()
def _init_tags_table_names(table_count):
tags_table_names = {}
group_id = 0
for i in range(table_count):
table_name = 'd{}'.format(i)
location_idx = i % len(LOCATIONS)
location = LOCATIONS[location_idx]
if location_idx == 0:
group_id += 1
if group_id > 10:
group_id -= 10
key = _tag_table_mapping_key(location=location, group_id=group_id)
if key not in tags_table_names:
tags_table_names[key] = []
tags_table_names[key].append(table_name)
return tags_table_names
def _tag_table_mapping_key(location, group_id):
return '{}_{}'.format(location, group_id)
def _get_location_and_group(key):
fields = key.split('_')
return fields[0], fields[1]

View File

@ -0,0 +1,231 @@
#! encoding = utf-8
import json
import logging
import time
from concurrent.futures import ThreadPoolExecutor, Future
from json import JSONDecodeError
from typing import Callable
import taos
from kafka import KafkaConsumer
from kafka.consumer.fetcher import ConsumerRecord
import kafka_example_common as common
class Consumer(object):
DEFAULT_CONFIGS = {
'kafka_brokers': 'localhost:9092', # kafka broker
'kafka_topic': 'tdengine_kafka_practices',
'kafka_group_id': 'taos',
'taos_host': 'localhost', # TDengine host
'taos_port': 6030, # TDengine port
'taos_user': 'root', # TDengine user name
'taos_password': 'taosdata', # TDengine password
'taos_database': 'power', # TDengine database
'message_type': 'json', # message format, 'json' or 'line'
'clean_after_testing': False, # if drop database after testing
'max_poll': 1000, # poll size for batch mode
'workers': 10, # thread count for multi-threading
'testing': False
}
INSERT_SQL_HEADER = "insert into "
INSERT_PART_SQL = '{} values (\'{}\', {}, {}, {})'
def __init__(self, **configs):
self.config = self.DEFAULT_CONFIGS
self.config.update(configs)
self.consumer = None
if not self.config.get('testing'):
self.consumer = KafkaConsumer(
self.config.get('kafka_topic'),
bootstrap_servers=self.config.get('kafka_brokers'),
group_id=self.config.get('kafka_group_id'),
)
self.conns = taos.connect(
host=self.config.get('taos_host'),
port=self.config.get('taos_port'),
user=self.config.get('taos_user'),
password=self.config.get('taos_password'),
db=self.config.get('taos_database'),
)
if self.config.get('workers') > 1:
self.pool = ThreadPoolExecutor(max_workers=self.config.get('workers'))
self.tasks = []
# tags and table mapping # key: {location}_{groupId} value:
def consume(self):
"""
consume data from kafka and deal. Base on `message_type`, `bath_consume`, `insert_by_table`,
there are several deal function.
:return:
"""
self.conns.execute(common.USE_DATABASE_SQL.format(self.config.get('taos_database')))
try:
if self.config.get('message_type') == 'line': # line
self._run(self._line_to_taos)
if self.config.get('message_type') == 'json': # json
self._run(self._json_to_taos)
except KeyboardInterrupt:
logging.warning("## caught keyboard interrupt, stopping")
finally:
self.stop()
def stop(self):
"""
stop consuming
:return:
"""
# close consumer
if self.consumer is not None:
self.consumer.commit()
self.consumer.close()
# multi thread
if self.config.get('workers') > 1:
if self.pool is not None:
self.pool.shutdown()
for task in self.tasks:
while not task.done():
time.sleep(0.01)
# clean data
if self.config.get('clean_after_testing'):
self.conns.execute(common.DROP_TABLE_SQL)
self.conns.execute(common.DROP_DATABASE_SQL.format(self.config.get('taos_database')))
# close taos
if self.conns is not None:
self.conns.close()
def _run(self, f):
"""
run in batch consuming mode
:param f:
:return:
"""
i = 0 # just for test.
while True:
messages = self.consumer.poll(timeout_ms=100, max_records=self.config.get('max_poll'))
if messages:
if self.config.get('workers') > 1:
self.pool.submit(f, messages.values())
else:
f(list(messages.values()))
if not messages:
i += 1 # just for test.
time.sleep(0.1)
if i > 3: # just for test.
logging.warning('## test over.') # just for test.
return # just for test.
def _json_to_taos(self, messages):
"""
convert a batch of json data to sql, and insert into TDengine
:param messages:
:return:
"""
sql = self._build_sql_from_json(messages=messages)
self.conns.execute(sql=sql)
def _line_to_taos(self, messages):
"""
convert a batch of lines data to sql, and insert into TDengine
:param messages:
:return:
"""
lines = []
for partition_messages in messages:
for message in partition_messages:
lines.append(message.value.decode())
sql = self.INSERT_SQL_HEADER + ' '.join(lines)
self.conns.execute(sql=sql)
def _build_single_sql_from_json(self, msg_value):
try:
data = json.loads(msg_value)
except JSONDecodeError as e:
logging.error('## decode message [%s] error ', msg_value, e)
return ''
# location = data.get('location')
# group_id = data.get('groupId')
ts = data.get('ts')
current = data.get('current')
voltage = data.get('voltage')
phase = data.get('phase')
table_name = data.get('table_name')
return self.INSERT_PART_SQL.format(table_name, ts, current, voltage, phase)
def _build_sql_from_json(self, messages):
sql_list = []
for partition_messages in messages:
for message in partition_messages:
sql_list.append(self._build_single_sql_from_json(message.value))
return self.INSERT_SQL_HEADER + ' '.join(sql_list)
def test_json_to_taos(consumer: Consumer):
records = [
[
ConsumerRecord(checksum=None, headers=None, offset=1, key=None,
value=json.dumps({'table_name': 'd0',
'ts': '2022-12-06 15:13:38.643',
'current': 3.41,
'voltage': 105,
'phase': 0.02027, }),
partition=1, topic='test', serialized_key_size=None, serialized_header_size=None,
serialized_value_size=None, timestamp=time.time(), timestamp_type=None),
ConsumerRecord(checksum=None, headers=None, offset=1, key=None,
value=json.dumps({'table_name': 'd1',
'ts': '2022-12-06 15:13:39.643',
'current': 3.41,
'voltage': 102,
'phase': 0.02027, }),
partition=1, topic='test', serialized_key_size=None, serialized_header_size=None,
serialized_value_size=None, timestamp=time.time(), timestamp_type=None),
]
]
consumer._json_to_taos(messages=records)
def test_line_to_taos(consumer: Consumer):
records = [
[
ConsumerRecord(checksum=None, headers=None, offset=1, key=None,
value="d0 values('2023-01-01 00:00:00.001', 3.49, 109, 0.02737)".encode('utf-8'),
partition=1, topic='test', serialized_key_size=None, serialized_header_size=None,
serialized_value_size=None, timestamp=time.time(), timestamp_type=None),
ConsumerRecord(checksum=None, headers=None, offset=1, key=None,
value="d1 values('2023-01-01 00:00:00.002', 6.19, 112, 0.09171)".encode('utf-8'),
partition=1, topic='test', serialized_key_size=None, serialized_header_size=None,
serialized_value_size=None, timestamp=time.time(), timestamp_type=None),
]
]
consumer._line_to_taos(messages=records)
def consume(kafka_brokers, kafka_topic, kafka_group_id, taos_host, taos_port, taos_user,
taos_password, taos_database, message_type, max_poll, workers):
c = Consumer(kafka_brokers=kafka_brokers, kafka_topic=kafka_topic, kafka_group_id=kafka_group_id,
taos_host=taos_host, taos_port=taos_port, taos_user=taos_user, taos_password=taos_password,
taos_database=taos_database, message_type=message_type, max_poll=max_poll, workers=workers)
c.consume()
if __name__ == '__main__':
consumer = Consumer(testing=True)
common.create_database_and_tables(host='localhost', port=6030, user='root', password='taosdata', db='py_kafka_test',
table_count=10)
consumer.conns.execute(common.USE_DATABASE_SQL.format('py_kafka_test'))
test_json_to_taos(consumer)
test_line_to_taos(consumer)
common.clean(host='localhost', port=6030, user='root', password='taosdata', db='py_kafka_test')

View File

@ -0,0 +1,103 @@
#! encoding=utf-8
import argparse
import logging
import multiprocessing
import time
from multiprocessing import pool
import kafka_example_common as common
import kafka_example_consumer as consumer
import kafka_example_producer as producer
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-kafka-broker', type=str, default='localhost:9092',
help='kafka borker host. default is `localhost:9200`')
parser.add_argument('-kafka-topic', type=str, default='tdengine-kafka-practices',
help='kafka topic. default is `tdengine-kafka-practices`')
parser.add_argument('-kafka-group', type=str, default='kafka_practices',
help='kafka consumer group. default is `kafka_practices`')
parser.add_argument('-taos-host', type=str, default='localhost',
help='TDengine host. default is `localhost`')
parser.add_argument('-taos-port', type=int, default=6030, help='TDengine port. default is 6030')
parser.add_argument('-taos-user', type=str, default='root', help='TDengine username, default is `root`')
parser.add_argument('-taos-password', type=str, default='taosdata', help='TDengine password, default is `taosdata`')
parser.add_argument('-taos-db', type=str, default='tdengine_kafka_practices',
help='TDengine db name, default is `tdengine_kafka_practices`')
parser.add_argument('-table-count', type=int, default=100, help='TDengine sub-table count, default is 100')
parser.add_argument('-table-items', type=int, default=1000, help='items in per sub-tables, default is 1000')
parser.add_argument('-message-type', type=str, default='line',
help='kafka message type. `line` or `json`. default is `line`')
parser.add_argument('-max-poll', type=int, default=1000, help='max poll for kafka consumer')
parser.add_argument('-threads', type=int, default=10, help='thread count for deal message')
parser.add_argument('-processes', type=int, default=1, help='process count')
args = parser.parse_args()
total = args.table_count * args.table_items
logging.warning("## start to prepare testing data...")
prepare_data_start = time.time()
producer.produce_total(100, args.kafka_broker, args.kafka_topic, args.message_type, total, args.table_count)
prepare_data_end = time.time()
logging.warning("## prepare testing data finished! spend-[%s]", prepare_data_end - prepare_data_start)
logging.warning("## start to create database and tables ...")
create_db_start = time.time()
# create database and table
common.create_database_and_tables(host=args.taos_host, port=args.taos_port, user=args.taos_user,
password=args.taos_password, db=args.taos_db, table_count=args.table_count)
create_db_end = time.time()
logging.warning("## create database and tables finished! spend [%s]", create_db_end - create_db_start)
processes = args.processes
logging.warning("## start to consume data and insert into TDengine...")
consume_start = time.time()
if processes > 1: # multiprocess
multiprocessing.set_start_method("spawn")
pool = pool.Pool(processes)
consume_start = time.time()
for _ in range(processes):
pool.apply_async(func=consumer.consume, args=(
args.kafka_broker, args.kafka_topic, args.kafka_group, args.taos_host, args.taos_port, args.taos_user,
args.taos_password, args.taos_db, args.message_type, args.max_poll, args.threads))
pool.close()
pool.join()
else:
consume_start = time.time()
consumer.consume(kafka_brokers=args.kafka_broker, kafka_topic=args.kafka_topic, kafka_group_id=args.kafka_group,
taos_host=args.taos_host, taos_port=args.taos_port, taos_user=args.taos_user,
taos_password=args.taos_password, taos_database=args.taos_db, message_type=args.message_type,
max_poll=args.max_poll, workers=args.threads)
consume_end = time.time()
logging.warning("## consume data and insert into TDengine over! spend-[%s]", consume_end - consume_start)
# print report
logging.warning(
"\n#######################\n"
" Prepare data \n"
"#######################\n"
"# data_type # %s \n"
"# total # %s \n"
"# spend # %s s\n"
"#######################\n"
" Create database \n"
"#######################\n"
"# stable # 1 \n"
"# sub-table # 100 \n"
"# spend # %s s \n"
"#######################\n"
" Consume \n"
"#######################\n"
"# data_type # %s \n"
"# threads # %s \n"
"# processes # %s \n"
"# total_count # %s \n"
"# spend # %s s\n"
"# per_second # %s \n"
"#######################\n",
args.message_type, total, prepare_data_end - prepare_data_start, create_db_end - create_db_start,
args.message_type, args.threads, processes, total, consume_end - consume_start,
total / (consume_end - consume_start))

View File

@ -0,0 +1,97 @@
#! encoding = utf-8
import json
import random
import threading
from concurrent.futures import ThreadPoolExecutor, Future
from datetime import datetime
from kafka import KafkaProducer
locations = ['California.SanFrancisco', 'California.LosAngles', 'California.SanDiego', 'California.SanJose',
'California.PaloAlto', 'California.Campbell', 'California.MountainView', 'California.Sunnyvale',
'California.SantaClara', 'California.Cupertino']
producers: list[KafkaProducer] = []
lock = threading.Lock()
start = 1640966400
def produce_total(workers, broker, topic, message_type, total, table_count):
if len(producers) == 0:
lock.acquire()
if len(producers) == 0:
_init_kafka_producers(broker=broker, count=10)
lock.release()
pool = ThreadPoolExecutor(max_workers=workers)
futures = []
for _ in range(0, workers):
futures.append(pool.submit(_produce_total, topic, message_type, int(total / workers), table_count))
pool.shutdown()
for f in futures:
f.result()
_close_kafka_producers()
def _produce_total(topic, message_type, total, table_count):
producer = _get_kafka_producer()
for _ in range(total):
message = _get_fake_date(message_type=message_type, table_count=table_count)
producer.send(topic=topic, value=message.encode(encoding='utf-8'))
def _init_kafka_producers(broker, count):
for _ in range(count):
p = KafkaProducer(bootstrap_servers=broker, batch_size=64 * 1024, linger_ms=300, acks=0)
producers.append(p)
def _close_kafka_producers():
for p in producers:
p.close()
def _get_kafka_producer():
return producers[random.randint(0, len(producers) - 1)]
def _get_fake_date(table_count, message_type='json'):
if message_type == 'json':
return _get_json_message(table_count=table_count)
if message_type == 'line':
return _get_line_message(table_count=table_count)
return ''
def _get_json_message(table_count):
return json.dumps({
'ts': _get_timestamp(),
'current': random.randint(0, 1000) / 100,
'voltage': random.randint(105, 115),
'phase': random.randint(0, 32000) / 100000,
'location': random.choice(locations),
'groupId': random.randint(1, 10),
'table_name': _random_table_name(table_count)
})
def _get_line_message(table_count):
return "{} values('{}', {}, {}, {})".format(
_random_table_name(table_count), # table
_get_timestamp(), # ts
random.randint(0, 1000) / 100, # current
random.randint(105, 115), # voltage
random.randint(0, 32000) / 100000, # phase
)
def _random_table_name(table_count):
return 'd{}'.format(random.randint(0, table_count - 1))
def _get_timestamp():
global start
lock.acquire(blocking=True)
start += 0.001
lock.release()
return datetime.fromtimestamp(start).strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]

View File

@ -55,6 +55,70 @@ for p in ps:
### 完整示例
<details>
<summary>kafka_example_perform</summary>
`kafka_example_perform` 是示例程序的入口
```py
{{#include docs/examples/python/kafka_example.py}}
{{#include docs/examples/python/kafka_example_perform.py}}
```
</details>
<details>
<summary>kafka_example_common</summary>
`kafka_example_common` 是示例程序的公共代码
```py
{{#include docs/examples/python/kafka_example_common.py}}
```
</details>
<details>
<summary>kafka_example_producer</summary>
`kafka_example_producer` 是示例程序的 producer 代码,负责生成并发送测试数据到 kafka
```py
{{#include docs/examples/python/kafka_example_producer.py}}
```
</details>
<details>
<summary>kafka_example_consumer</summary>
`kafka_example_consumer` 是示例程序的 consumer 代码,负责从 kafka 消费数据,并写入到 TDengine
```py
{{#include docs/examples/python/kafka_example_consumer.py}}
```
</details>
### 执行步骤
<details>
<summary>执行 Python 示例程序</summary>
1. 安装并启动 kafka
2. python 环境准备
- 安装 python3
- 安装 taospy
- 安装 kafka-python
3. 执行示例程序
程序的执行入口是 `kafka_example_perform.py`,获取程序完整的执行参数,请执行 help 命令。
```
python3 kafka_example_perform.py --help
```
以下为创建 100 个子表,每个子表 20000 条数据kafka max poll 为 100一个进程每个进程一个处理线程的程序执行命令
```
python3 kafka_example_perform.py -table-count=100 -table-items=20000 -max-poll=100 -threads=1 -processes=1
```
</details>

View File

@ -875,9 +875,9 @@ INTERP(expr)
- INTERP 用于在指定时间断面获取指定列的记录值,如果该时间断面不存在符合条件的行数据,那么会根据 FILL 参数的设定进行插值。
- INTERP 的输入数据为指定列的数据可以通过条件语句where 子句)来对原始列数据进行过滤,如果没有指定过滤条件则输入为全部数据。
- INTERP 需要同时与 RANGEEVERY 和 FILL 关键字一起使用。
- INTERP 的输出时间范围根据 RANGE(timestamp1,timestamp2)字段来指定,需满足 timestamp1<=timestamp2。其中 timestamp1必选值为输出时间范围的起始值即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录timestamp2必选值为输出时间范围的结束值即输出的最后一条记录的 timestamp 不能大于 timestamp2。
- INTERP 根据 EVERY 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间(EVERY 值)进行插值。
- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。
- INTERP 的输出时间范围根据 RANGE(timestamp1,timestamp2)字段来指定,需满足 timestamp1 < timestamp2其中 timestamp1必选值为输出时间范围的起始值即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录timestamp2必选值为输出时间范围的结束值即输出的最后一条记录的 timestamp 不能大于 timestamp2
- INTERP 根据 EVERY(time_unit) 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间(time_unit 值进行插值time_unit 可取值时间单位1a(毫秒)1s(秒)1m(分)1h(小时)1d(天)1w(周)。例如 EVERY(500a) 将对于指定数据每500毫秒间隔进行一次插值.
- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。关于 FILL 子句如何使用请参考 [FILL 子句](./distinguished/#fill-子句)
- INTERP 只能在一个时间序列内进行插值,因此当作用于超级表时必须跟 partition by tbname 一起使用。
- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.1.4版本以后支持)。
- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.2.3版本以后支持)。

View File

@ -10,4 +10,4 @@
| 6 | taosdemo | This is an internal tool for testing Our JDBC-JNI, JDBC-RESTful, RESTful interfaces |
more detail: https://docs.taosdata.com/reference/connector/java/
more detail: https://docs.taosdata.com/connector/java/

View File

@ -58,7 +58,7 @@ extern int32_t tMsgDict[];
#define TMSG_INFO(TYPE) \
((TYPE) < TDMT_DND_MAX_MSG || (TYPE) < TDMT_MND_MAX_MSG || (TYPE) < TDMT_VND_MAX_MSG || (TYPE) < TDMT_SCH_MAX_MSG || \
(TYPE) < TDMT_STREAM_MAX_MSG || (TYPE) < TDMT_MON_MAX_MSG || (TYPE) < TDMT_SYNC_MAX_MSG) || \
(TYPE) < TDMT_VND_STREAM_MSG || (TYPE) < TDMT_VND_TMQ_MSG \
(TYPE) < TDMT_VND_STREAM_MSG || (TYPE) < TDMT_VND_TMQ_MSG || (TYPE) < TDMT_VND_TMQ_MAX_MSG \
? tMsgInfo[tMsgDict[TMSG_SEG_CODE(TYPE)] + TMSG_SEG_SEQ(TYPE)] \
: 0
@ -145,12 +145,14 @@ typedef enum _mgmt_table {
#define TSDB_ALTER_TABLE_UPDATE_OPTIONS 9
#define TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME 10
#define TSDB_FILL_NONE 0
#define TSDB_FILL_NULL 1
#define TSDB_FILL_SET_VALUE 2
#define TSDB_FILL_LINEAR 3
#define TSDB_FILL_PREV 4
#define TSDB_FILL_NEXT 5
#define TSDB_FILL_NONE 0
#define TSDB_FILL_NULL 1
#define TSDB_FILL_NULL_F 2
#define TSDB_FILL_SET_VALUE 3
#define TSDB_FILL_SET_VALUE_F 4
#define TSDB_FILL_LINEAR 5
#define TSDB_FILL_PREV 6
#define TSDB_FILL_NEXT 7
#define TSDB_ALTER_USER_PASSWD 0x1
#define TSDB_ALTER_USER_SUPERUSER 0x2

View File

@ -274,73 +274,75 @@
#define TK_SLIDING 256
#define TK_FILL 257
#define TK_VALUE 258
#define TK_NONE 259
#define TK_PREV 260
#define TK_LINEAR 261
#define TK_NEXT 262
#define TK_HAVING 263
#define TK_RANGE 264
#define TK_EVERY 265
#define TK_ORDER 266
#define TK_SLIMIT 267
#define TK_SOFFSET 268
#define TK_LIMIT 269
#define TK_OFFSET 270
#define TK_ASC 271
#define TK_NULLS 272
#define TK_ABORT 273
#define TK_AFTER 274
#define TK_ATTACH 275
#define TK_BEFORE 276
#define TK_BEGIN 277
#define TK_BITAND 278
#define TK_BITNOT 279
#define TK_BITOR 280
#define TK_BLOCKS 281
#define TK_CHANGE 282
#define TK_COMMA 283
#define TK_CONCAT 284
#define TK_CONFLICT 285
#define TK_COPY 286
#define TK_DEFERRED 287
#define TK_DELIMITERS 288
#define TK_DETACH 289
#define TK_DIVIDE 290
#define TK_DOT 291
#define TK_EACH 292
#define TK_FAIL 293
#define TK_FILE 294
#define TK_FOR 295
#define TK_GLOB 296
#define TK_ID 297
#define TK_IMMEDIATE 298
#define TK_IMPORT 299
#define TK_INITIALLY 300
#define TK_INSTEAD 301
#define TK_ISNULL 302
#define TK_KEY 303
#define TK_MODULES 304
#define TK_NK_BITNOT 305
#define TK_NK_SEMI 306
#define TK_NOTNULL 307
#define TK_OF 308
#define TK_PLUS 309
#define TK_PRIVILEGE 310
#define TK_RAISE 311
#define TK_REPLACE 312
#define TK_RESTRICT 313
#define TK_ROW 314
#define TK_SEMI 315
#define TK_STAR 316
#define TK_STATEMENT 317
#define TK_STRICT 318
#define TK_STRING 319
#define TK_TIMES 320
#define TK_UPDATE 321
#define TK_VALUES 322
#define TK_VARIABLE 323
#define TK_VIEW 324
#define TK_WAL 325
#define TK_VALUE_F 259
#define TK_NONE 260
#define TK_PREV 261
#define TK_NULL_F 262
#define TK_LINEAR 263
#define TK_NEXT 264
#define TK_HAVING 265
#define TK_RANGE 266
#define TK_EVERY 267
#define TK_ORDER 268
#define TK_SLIMIT 269
#define TK_SOFFSET 270
#define TK_LIMIT 271
#define TK_OFFSET 272
#define TK_ASC 273
#define TK_NULLS 274
#define TK_ABORT 275
#define TK_AFTER 276
#define TK_ATTACH 277
#define TK_BEFORE 278
#define TK_BEGIN 279
#define TK_BITAND 280
#define TK_BITNOT 281
#define TK_BITOR 282
#define TK_BLOCKS 283
#define TK_CHANGE 284
#define TK_COMMA 285
#define TK_CONCAT 286
#define TK_CONFLICT 287
#define TK_COPY 288
#define TK_DEFERRED 289
#define TK_DELIMITERS 290
#define TK_DETACH 291
#define TK_DIVIDE 292
#define TK_DOT 293
#define TK_EACH 294
#define TK_FAIL 295
#define TK_FILE 296
#define TK_FOR 297
#define TK_GLOB 298
#define TK_ID 299
#define TK_IMMEDIATE 300
#define TK_IMPORT 301
#define TK_INITIALLY 302
#define TK_INSTEAD 303
#define TK_ISNULL 304
#define TK_KEY 305
#define TK_MODULES 306
#define TK_NK_BITNOT 307
#define TK_NK_SEMI 308
#define TK_NOTNULL 309
#define TK_OF 310
#define TK_PLUS 311
#define TK_PRIVILEGE 312
#define TK_RAISE 313
#define TK_REPLACE 314
#define TK_RESTRICT 315
#define TK_ROW 316
#define TK_SEMI 317
#define TK_STAR 318
#define TK_STATEMENT 319
#define TK_STRICT 320
#define TK_STRING 321
#define TK_TIMES 322
#define TK_UPDATE 323
#define TK_VALUES 324
#define TK_VARIABLE 325
#define TK_VIEW 326
#define TK_WAL 327
#define TK_NK_SPACE 600
#define TK_NK_COMMENT 601

View File

@ -219,6 +219,7 @@ bool fmIsKeepOrderFunc(int32_t funcId);
bool fmIsCumulativeFunc(int32_t funcId);
bool fmIsInterpPseudoColumnFunc(int32_t funcId);
bool fmIsGroupKeyFunc(int32_t funcId);
bool fmIsBlockDistFunc(int32_t funcId);
void getLastCacheDataType(SDataType* pType);

View File

@ -233,8 +233,10 @@ typedef struct SEventWindowNode {
typedef enum EFillMode {
FILL_MODE_NONE = 1,
FILL_MODE_VALUE,
FILL_MODE_VALUE_F,
FILL_MODE_PREV,
FILL_MODE_NULL,
FILL_MODE_NULL_F,
FILL_MODE_LINEAR,
FILL_MODE_NEXT
} EFillMode;

View File

@ -357,6 +357,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_MND_STREAM_MUST_BE_DELETED TAOS_DEF_ERROR_CODE(0, 0x03F3)
#define TSDB_CODE_MND_STREAM_TASK_DROPPED TAOS_DEF_ERROR_CODE(0, 0x03F4)
#define TSDB_CODE_MND_MULTI_REPLICA_SOURCE_DB TAOS_DEF_ERROR_CODE(0, 0x03F5)
#define TSDB_CODE_MND_TOO_MANY_STREAMS TAOS_DEF_ERROR_CODE(0, 0x03F6)
// mnode-sma
#define TSDB_CODE_MND_SMA_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0480)

View File

@ -60,6 +60,6 @@ target_link_libraries(
PRIVATE os util common transport nodes parser command planner catalog scheduler function qcom
)
if(${BUILD_TEST})
#if(${BUILD_TEST})
ADD_SUBDIRECTORY(test)
endif(${BUILD_TEST})
#endif(${BUILD_TEST})

View File

@ -138,6 +138,12 @@ STscObj* taos_connect_internal(const char* ip, const char* user, const char* pas
p->mgmtEp = epSet;
taosThreadMutexInit(&p->qnodeMutex, NULL);
p->pTransporter = openTransporter(user, secretEncrypt, tsNumOfCores / 2);
if (p->pTransporter == NULL) {
taosThreadMutexUnlock(&appInfo.mutex);
taosMemoryFreeClear(key);
taosMemoryFree(p);
return NULL;
}
p->pAppHbMgr = appHbMgrInit(p, key);
if (NULL == p->pAppHbMgr) {
destroyAppInst(p);
@ -1386,8 +1392,6 @@ int32_t doProcessMsgFromServer(void* param) {
tscError("0x%" PRIx64 " rsp msg:%s, code:%s rspLen:%d, elapsed time:%d ms, reqId:0x%" PRIx64, pRequest->self,
TMSG_INFO(pMsg->msgType), tstrerror(pMsg->code), pMsg->contLen, elapsed / 1000, pRequest->requestId);
}
taosReleaseRef(clientReqRefPool, pSendInfo->requestObjRefId);
}
}
@ -1407,6 +1411,11 @@ int32_t doProcessMsgFromServer(void* param) {
}
pSendInfo->fp(pSendInfo->param, &buf, pMsg->code);
if (pTscObj) {
taosReleaseRef(clientReqRefPool, pSendInfo->requestObjRefId);
}
rpcFreeCont(pMsg->pCont);
destroySendMsgInfo(pSendInfo);
@ -1444,6 +1453,7 @@ void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) {
tscError("failed to sched msg to tsc, tsc ready to quit");
rpcFreeCont(pMsg->pCont);
taosMemoryFree(arg->pEpset);
destroySendMsgInfo(pMsg->info.ahandle);
taosMemoryFree(arg);
}
}

View File

@ -112,7 +112,7 @@ void createNewTable(TAOS* pConn, int32_t index) {
}
taos_free_result(pRes);
for(int32_t i = 0; i < 20; i += 20) {
for(int32_t i = 0; i < 2000; i += 20) {
char sql[1024] = {0};
sprintf(sql,
"insert into tu%d values(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
@ -692,6 +692,7 @@ TEST(testCase, insert_test) {
taos_free_result(pRes);
taos_close(pConn);
}
#endif
TEST(testCase, projection_query_tables) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
@ -725,7 +726,7 @@ TEST(testCase, projection_query_tables) {
}
taos_free_result(pRes);
for (int32_t i = 0; i < 200000; ++i) {
for (int32_t i = 0; i < 2; ++i) {
printf("create table :%d\n", i);
createNewTable(pConn, i);
}
@ -751,6 +752,7 @@ TEST(testCase, projection_query_tables) {
taos_close(pConn);
}
#if 0
TEST(testCase, tsbs_perf_test) {
TdThread qid[20] = {0};
@ -760,8 +762,6 @@ TEST(testCase, tsbs_perf_test) {
getchar();
}
#endif
TEST(testCase, projection_query_stables) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
ASSERT_NE(pConn, nullptr);
@ -790,7 +790,6 @@ TEST(testCase, projection_query_stables) {
taos_close(pConn);
}
#if 0
TEST(testCase, agg_query_tables) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
ASSERT_NE(pConn, nullptr);
@ -831,7 +830,7 @@ TEST(testCase, async_api_test) {
ASSERT_NE(pConn, nullptr);
taos_query(pConn, "use abc1");
#if 0
TAOS_RES* pRes = taos_query(pConn, "insert into tu(ts) values('2022-02-27 12:12:61')");
if (taos_errno(pRes) != 0) {
printf("failed, reason:%s\n", taos_errstr(pRes));
@ -854,7 +853,6 @@ TEST(testCase, async_api_test) {
printf("%s\n", str);
memset(str, 0, sizeof(str));
}
#endif
taos_query_a(pConn, "select count(*) from tu", queryCallback, pConn);
getchar();

View File

@ -49,7 +49,7 @@ static void mmProcessRpcMsg(SQueueInfo *pInfo, SRpcMsg *pMsg) {
pMsg->info.node = pMgmt->pMnode;
const STraceId *trace = &pMsg->info.traceId;
dGTrace("msg:%p, get from mnode queue", pMsg);
dGTrace("msg:%p, get from mnode queue, type:%s", pMsg, TMSG_INFO(pMsg->msgType));
int32_t code = mndProcessRpcMsg(pMsg);

View File

@ -81,7 +81,7 @@ int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans);
int32_t mndTransProcessRsp(SRpcMsg *pRsp);
void mndTransPullup(SMnode *pMnode);
int32_t mndKillTrans(SMnode *pMnode, STrans *pTrans);
void mndTransExecute(SMnode *pMnode, STrans *pTrans);
void mndTransExecute(SMnode *pMnode, STrans *pTrans, bool isLeader);
int32_t mndSetRpcInfoForDbTrans(SMnode *pMnode, SRpcMsg *pMsg, EOperType oper, const char *dbname);
#ifdef __cplusplus

View File

@ -31,6 +31,8 @@
#define MND_STREAM_VER_NUMBER 2
#define MND_STREAM_RESERVE_SIZE 64
#define MND_STREAM_MAX_NUM 10
static int32_t mndStreamActionInsert(SSdb *pSdb, SStreamObj *pStream);
static int32_t mndStreamActionDelete(SSdb *pSdb, SStreamObj *pStream);
static int32_t mndStreamActionUpdate(SSdb *pSdb, SStreamObj *pStream, SStreamObj *pNewStream);
@ -666,6 +668,35 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
goto _OVER;
}
{
int32_t numOfStream = 0;
SStreamObj *pStream = NULL;
void *pIter = NULL;
while (1) {
pIter = sdbFetch(pMnode->pSdb, SDB_STREAM, pIter, (void **)&pStream);
if (pIter == NULL) {
if (numOfStream > MND_STREAM_MAX_NUM) {
mError("too many streams, no more than 10 for each database");
terrno = TSDB_CODE_MND_TOO_MANY_STREAMS;
goto _OVER;
}
break;
}
if (pStream->sourceDbUid == streamObj.sourceDbUid) {
++numOfStream;
}
sdbRelease(pMnode->pSdb, pStream);
if (numOfStream > MND_STREAM_MAX_NUM) {
mError("too many streams, no more than 10 for each database");
terrno = TSDB_CODE_MND_TOO_MANY_STREAMS;
goto _OVER;
}
}
}
pDb = mndAcquireDb(pMnode, streamObj.sourceDb);
if (pDb->cfg.replications != 1) {
mError("stream source db must have only 1 replica, but %s has %d", pDb->name, pDb->cfg.replications);

View File

@ -85,7 +85,11 @@ int32_t mndProcessWriteMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, const SFsmCbMeta
pRaw, pMgmt->transSec, pMgmt->transSeq);
if (pMeta->code == 0) {
sdbWriteWithoutFree(pMnode->pSdb, pRaw);
int32_t code = sdbWriteWithoutFree(pMnode->pSdb, pRaw);
if (code != 0) {
mError("trans:%d, failed to write to sdb since %s", transId, terrstr());
return 0;
}
sdbSetApplyInfo(pMnode->pSdb, pMeta->index, pMeta->term, pMeta->lastConfigIndex);
}
@ -110,8 +114,9 @@ int32_t mndProcessWriteMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, const SFsmCbMeta
taosThreadMutexUnlock(&pMgmt->lock);
STrans *pTrans = mndAcquireTrans(pMnode, transId);
if (pTrans != NULL) {
mInfo("trans:%d, execute in mnode which not leader or sync timeout", transId);
mndTransExecute(pMnode, pTrans);
mInfo("trans:%d, execute in mnode which not leader or sync timeout, createTime:%" PRId64 " saved trans:%d",
transId, pTrans->createdTime, pMgmt->transId);
mndTransExecute(pMnode, pTrans, false);
mndReleaseTrans(pMnode, pTrans);
// sdbWriteFile(pMnode->pSdb, SDB_WRITE_DELTA);
} else {
@ -368,7 +373,7 @@ int32_t mndSyncPropose(SMnode *pMnode, SSdbRaw *pRaw, int32_t transId) {
taosThreadMutexLock(&pMgmt->lock);
pMgmt->errCode = 0;
if (pMgmt->transId != 0) {
if (pMgmt->transId != 0 /* && pMgmt->transId != transId*/) {
mError("trans:%d, can't be proposed since trans:%d already waiting for confirm", transId, pMgmt->transId);
taosThreadMutexUnlock(&pMgmt->lock);
rpcFreeCont(req.pCont);

View File

@ -572,8 +572,20 @@ static void mndTransUpdateActions(SArray *pOldArray, SArray *pNewArray) {
}
static int32_t mndTransActionUpdate(SSdb *pSdb, STrans *pOld, STrans *pNew) {
mTrace("trans:%d, perform update action, old row:%p stage:%s, new row:%p stage:%s", pOld->id, pOld,
mndTransStr(pOld->stage), pNew, mndTransStr(pNew->stage));
mTrace("trans:%d, perform update action, old row:%p stage:%s create:%" PRId64 ", new row:%p stage:%s create:%" PRId64,
pOld->id, pOld, mndTransStr(pOld->stage), pOld->createdTime, pNew, mndTransStr(pNew->stage),
pNew->createdTime);
if (pOld->createdTime != pNew->createdTime) {
mError("trans:%d, failed to perform update action since createTime not match, old row:%p stage:%s create:%" PRId64
", new row:%p stage:%s create:%" PRId64,
pOld->id, pOld, mndTransStr(pOld->stage), pOld->createdTime, pNew, mndTransStr(pNew->stage),
pNew->createdTime);
// only occured while sync timeout
terrno = TSDB_CODE_MND_TRNAS_SYNC_TIMEOUT;
return -1;
}
mndTransUpdateActions(pOld->redoActions, pNew->redoActions);
mndTransUpdateActions(pOld->undoActions, pNew->undoActions);
mndTransUpdateActions(pOld->commitActions, pNew->commitActions);
@ -779,16 +791,18 @@ static int32_t mndTransSync(SMnode *pMnode, STrans *pTrans) {
}
(void)sdbSetRawStatus(pRaw, SDB_STATUS_READY);
mInfo("trans:%d, sync to other mnodes, stage:%s", pTrans->id, mndTransStr(pTrans->stage));
mInfo("trans:%d, sync to other mnodes, stage:%s createTime:%" PRId64, pTrans->id, mndTransStr(pTrans->stage),
pTrans->createdTime);
int32_t code = mndSyncPropose(pMnode, pRaw, pTrans->id);
if (code != 0) {
mError("trans:%d, failed to sync, errno:%s code:%s", pTrans->id, terrstr(), tstrerror(code));
mError("trans:%d, failed to sync, errno:%s code:%s createTime:%" PRId64 " saved trans:%d", pTrans->id, terrstr(),
tstrerror(code), pTrans->createdTime, pMnode->syncMgmt.transId);
sdbFreeRaw(pRaw);
return -1;
}
sdbFreeRaw(pRaw);
mInfo("trans:%d, sync finished", pTrans->id);
mInfo("trans:%d, sync finished, createTime:%" PRId64, pTrans->id, pTrans->createdTime);
return 0;
}
@ -891,7 +905,7 @@ int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans) {
pTrans->rpcRsp = NULL;
pTrans->rpcRspLen = 0;
mndTransExecute(pMnode, pNew);
mndTransExecute(pMnode, pNew, true);
mndReleaseTrans(pMnode, pNew);
return 0;
}
@ -1054,7 +1068,7 @@ int32_t mndTransProcessRsp(SRpcMsg *pRsp) {
mInfo("trans:%d, %s:%d response is received, code:0x%x, accept:0x%x retry:0x%x", transId, mndTransStr(pAction->stage),
action, pRsp->code, pAction->acceptableCode, pAction->retryCode);
mndTransExecute(pMnode, pTrans);
mndTransExecute(pMnode, pTrans, true);
_OVER:
mndReleaseTrans(pMnode, pTrans);
@ -1483,15 +1497,17 @@ static bool mndTransPerfromFinishedStage(SMnode *pMnode, STrans *pTrans) {
mError("trans:%d, failed to write sdb since %s", pTrans->id, terrstr());
}
mInfo("trans:%d, execute finished, code:0x%x, failedTimes:%d", pTrans->id, pTrans->code, pTrans->failedTimes);
mInfo("trans:%d, execute finished, code:0x%x, failedTimes:%d createTime:%" PRId64, pTrans->id, pTrans->code,
pTrans->failedTimes, pTrans->createdTime);
return continueExec;
}
void mndTransExecute(SMnode *pMnode, STrans *pTrans) {
void mndTransExecute(SMnode *pMnode, STrans *pTrans, bool isLeader) {
bool continueExec = true;
while (continueExec) {
mInfo("trans:%d, continue to execute, stage:%s", pTrans->id, mndTransStr(pTrans->stage));
mInfo("trans:%d, continue to execute, stage:%s createTime:%" PRId64 " leader:%d", pTrans->id,
mndTransStr(pTrans->stage), pTrans->createdTime, isLeader);
pTrans->lastExecTime = taosGetTimestampMs();
switch (pTrans->stage) {
case TRN_STAGE_PREPARE:
@ -1501,13 +1517,23 @@ void mndTransExecute(SMnode *pMnode, STrans *pTrans) {
continueExec = mndTransPerformRedoActionStage(pMnode, pTrans);
break;
case TRN_STAGE_COMMIT:
continueExec = mndTransPerformCommitStage(pMnode, pTrans);
if (isLeader) {
continueExec = mndTransPerformCommitStage(pMnode, pTrans);
} else {
mInfo("trans:%d, can not commit since not leader", pTrans->id);
continueExec = false;
}
break;
case TRN_STAGE_COMMIT_ACTION:
continueExec = mndTransPerformCommitActionStage(pMnode, pTrans);
break;
case TRN_STAGE_ROLLBACK:
continueExec = mndTransPerformRollbackStage(pMnode, pTrans);
if (isLeader) {
continueExec = mndTransPerformRollbackStage(pMnode, pTrans);
} else {
mInfo("trans:%d, can not rollback since not leader", pTrans->id);
continueExec = false;
}
break;
case TRN_STAGE_UNDO_ACTION:
continueExec = mndTransPerformUndoActionStage(pMnode, pTrans);
@ -1550,7 +1576,7 @@ int32_t mndKillTrans(SMnode *pMnode, STrans *pTrans) {
pAction->errCode = 0;
}
mndTransExecute(pMnode, pTrans);
mndTransExecute(pMnode, pTrans, true);
return 0;
}
@ -1608,7 +1634,7 @@ void mndTransPullup(SMnode *pMnode) {
int32_t *pTransId = taosArrayGet(pArray, i);
STrans *pTrans = mndAcquireTrans(pMnode, *pTransId);
if (pTrans != NULL) {
mndTransExecute(pMnode, pTrans);
mndTransExecute(pMnode, pTrans, true);
}
mndReleaseTrans(pMnode, pTrans);
}

View File

@ -181,7 +181,6 @@ int32_t tsdbReaderOpen(SVnode *pVnode, SQueryTableDataCond *pCond, void *pTableL
void tsdbReaderClose(STsdbReader *pReader);
bool tsdbNextDataBlock(STsdbReader *pReader);
void tsdbRetrieveDataBlockInfo(const STsdbReader *pReader, int32_t *rows, uint64_t *uid, STimeWindow *pWindow);
int32_t tsdbRetrieveDatablockSMA(STsdbReader *pReader, SSDataBlock *pDataBlock, bool *allHave);
void tsdbReleaseDataBlock(STsdbReader *pReader);
SSDataBlock *tsdbRetrieveDataBlock(STsdbReader *pTsdbReadHandle, SArray *pColumnIdList);

View File

@ -350,6 +350,8 @@ struct STsdb {
STsdbFS fs;
SLRUCache *lruCache;
TdThreadMutex lruMutex;
SLRUCache *biCache;
TdThreadMutex biMutex;
};
struct TSDBKEY {
@ -790,6 +792,9 @@ int32_t tsdbCacheGetLastH(SLRUCache *pCache, tb_uid_t uid, SCacheRowsReader *pr,
int32_t tsdbCacheGetLastrowH(SLRUCache *pCache, tb_uid_t uid, SCacheRowsReader *pr, LRUHandle **h);
int32_t tsdbCacheRelease(SLRUCache *pCache, LRUHandle *h);
int32_t tsdbCacheGetBlockIdx(SLRUCache *pCache, SDataFReader *pFileReader, LRUHandle **handle);
int32_t tsdbBICacheRelease(SLRUCache *pCache, LRUHandle *h);
int32_t tsdbCacheDeleteLastrow(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey);
int32_t tsdbCacheDeleteLast(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey);
int32_t tsdbCacheDelete(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey);

View File

@ -15,6 +15,34 @@
#include "tsdb.h"
static int32_t tsdbOpenBICache(STsdb *pTsdb) {
int32_t code = 0;
SLRUCache *pCache = taosLRUCacheInit(5 * 1024 * 1024, -1, .5);
if (pCache == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
taosLRUCacheSetStrictCapacity(pCache, false);
taosThreadMutexInit(&pTsdb->biMutex, NULL);
_err:
pTsdb->biCache = pCache;
return code;
}
static void tsdbCloseBICache(STsdb *pTsdb) {
SLRUCache *pCache = pTsdb->biCache;
if (pCache) {
taosLRUCacheEraseUnrefEntries(pCache);
taosLRUCacheCleanup(pCache);
taosThreadMutexDestroy(&pTsdb->biMutex);
}
}
int32_t tsdbOpenCache(STsdb *pTsdb) {
int32_t code = 0;
SLRUCache *pCache = NULL;
@ -26,6 +54,12 @@ int32_t tsdbOpenCache(STsdb *pTsdb) {
goto _err;
}
code = tsdbOpenBICache(pTsdb);
if (code != TSDB_CODE_SUCCESS) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
taosLRUCacheSetStrictCapacity(pCache, false);
taosThreadMutexInit(&pTsdb->lruMutex, NULL);
@ -44,6 +78,8 @@ void tsdbCloseCache(STsdb *pTsdb) {
taosThreadMutexDestroy(&pTsdb->lruMutex);
}
tsdbCloseBICache(pTsdb);
}
static void getTableCacheKey(tb_uid_t uid, int cacheType, char *key, int *len) {
@ -1538,3 +1574,84 @@ size_t tsdbCacheGetUsage(SVnode *pVnode) {
return usage;
}
static void getBICacheKey(int32_t fid, int64_t commitID, char *key, int *len) {
struct {
int32_t fid;
int64_t commitID;
} biKey = {0};
biKey.fid = fid;
biKey.commitID = commitID;
*len = sizeof(biKey);
memcpy(key, &biKey, *len);
}
static int32_t tsdbCacheLoadBlockIdx(SDataFReader *pFileReader, SArray **aBlockIdx) {
SArray *pArray = taosArrayInit(8, sizeof(SBlockIdx));
int32_t code = tsdbReadBlockIdx(pFileReader, pArray);
if (code != TSDB_CODE_SUCCESS) {
taosArrayDestroy(pArray);
code = TSDB_CODE_OUT_OF_MEMORY;
return code;
}
*aBlockIdx = pArray;
return code;
}
static void deleteBICache(const void *key, size_t keyLen, void *value) {
SArray *pArray = (SArray *)value;
taosArrayDestroy(pArray);
}
int32_t tsdbCacheGetBlockIdx(SLRUCache *pCache, SDataFReader *pFileReader, LRUHandle **handle) {
int32_t code = 0;
char key[128] = {0};
int keyLen = 0;
getBICacheKey(pFileReader->pSet->fid, pFileReader->pSet->pHeadF->commitID, key, &keyLen);
LRUHandle *h = taosLRUCacheLookup(pCache, key, keyLen);
if (!h) {
STsdb *pTsdb = pFileReader->pTsdb;
taosThreadMutexLock(&pTsdb->biMutex);
h = taosLRUCacheLookup(pCache, key, keyLen);
if (!h) {
SArray *pArray = NULL;
code = tsdbCacheLoadBlockIdx(pFileReader, &pArray);
// if table's empty or error, return code of -1
if (code != TSDB_CODE_SUCCESS || pArray == NULL) {
taosThreadMutexUnlock(&pTsdb->biMutex);
*handle = NULL;
return 0;
}
size_t charge = pArray->capacity * pArray->elemSize + sizeof(*pArray);
_taos_lru_deleter_t deleter = deleteBICache;
LRUStatus status = taosLRUCacheInsert(pCache, key, keyLen, pArray, charge, deleter, &h, TAOS_LRU_PRIORITY_LOW);
if (status != TAOS_LRU_STATUS_OK) {
code = -1;
}
}
taosThreadMutexUnlock(&pTsdb->biMutex);
}
*handle = h;
return code;
}
int32_t tsdbBICacheRelease(SLRUCache *pCache, LRUHandle *h) {
int32_t code = 0;
taosLRUCacheRelease(pCache, h, false);
return code;
}

View File

@ -515,7 +515,7 @@ bool tLDataIterNextRow(SLDataIter *pIter, const char *idStr) {
pIter->rInfo.row = tsdbRowFromBlockData(pBlockData, pIter->iRow);
_exit:
return (terrno == TSDB_CODE_SUCCESS) && (pIter->pSttBlk != NULL);
return (terrno == TSDB_CODE_SUCCESS) && (pIter->pSttBlk != NULL) && (pBlockData != NULL);
}
SRowInfo *tLDataIterGet(SLDataIter *pIter) { return &pIter->rInfo; }

View File

@ -79,6 +79,9 @@ typedef struct SIOCostSummary {
int64_t composedBlocks;
double buildComposedBlockTime;
double createScanInfoList;
// double getTbFromMemTime;
// double getTbFromIMemTime;
double initDelSkylineIterTime;
} SIOCostSummary;
typedef struct SBlockLoadSuppInfo {
@ -176,10 +179,9 @@ struct STsdbReader {
SDataFReader* pFileReader; // the file reader
SDelFReader* pDelFReader; // the del file reader
SArray* pDelIdx; // del file block index;
// SVersionRange verRange;
SBlockInfoBuf blockInfoBuf;
int32_t step;
STsdbReader* innerReader[2];
SBlockInfoBuf blockInfoBuf;
int32_t step;
STsdbReader* innerReader[2];
};
static SFileDataBlockInfo* getCurrentBlockInfo(SDataBlockIter* pBlockIter);
@ -220,6 +222,8 @@ static bool hasDataInFileBlock(const SBlockData* pBlockData, const SFil
static void initBlockDumpInfo(STsdbReader* pReader, SDataBlockIter* pBlockIter);
static int32_t getInitialDelIndex(const SArray* pDelSkyline, int32_t order);
static FORCE_INLINE STSchema* getLatestTableSchema(STsdbReader* pReader, uint64_t uid);
static bool outOfTimeWindow(int64_t ts, STimeWindow* pWindow) { return (ts > pWindow->ekey) || (ts < pWindow->skey); }
static int32_t setColumnIdSlotList(SBlockLoadSuppInfo* pSupInfo, SColumnInfo* pCols, const int32_t* pSlotIdList,
@ -711,17 +715,21 @@ _end:
}
static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFReader* pFileReader, SArray* pIndexList) {
SArray* aBlockIdx = taosArrayInit(8, sizeof(SBlockIdx));
// SArray* aBlockIdx = taosArrayInit(8, sizeof(SBlockIdx));
int64_t st = taosGetTimestampUs();
int32_t code = tsdbReadBlockIdx(pFileReader, aBlockIdx);
if (code != TSDB_CODE_SUCCESS) {
// int32_t code = tsdbReadBlockIdx(pFileReader, aBlockIdx);
LRUHandle* handle = NULL;
int32_t code = tsdbCacheGetBlockIdx(pFileReader->pTsdb->biCache, pFileReader, &handle);
if (code != TSDB_CODE_SUCCESS || handle == NULL) {
goto _end;
}
size_t num = taosArrayGetSize(aBlockIdx);
SArray* aBlockIdx = (SArray*)taosLRUCacheValue(pFileReader->pTsdb->biCache, handle);
size_t num = taosArrayGetSize(aBlockIdx);
if (num == 0) {
taosArrayDestroy(aBlockIdx);
tsdbBICacheRelease(pFileReader->pTsdb->biCache, handle);
// taosArrayDestroy(aBlockIdx);
return TSDB_CODE_SUCCESS;
}
@ -757,7 +765,8 @@ static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFReader* pFileReader,
pReader->cost.headFileLoadTime += (et1 - st) / 1000.0;
_end:
taosArrayDestroy(aBlockIdx);
// taosArrayDestroy(aBlockIdx);
tsdbBICacheRelease(pFileReader->pTsdb->biCache, handle);
return code;
}
@ -1070,7 +1079,7 @@ static void copyNumericCols(const SColData* pData, SFileBlockDumpInfo* pDumpInfo
}
}
static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo) {
static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader) {
SReaderStatus* pStatus = &pReader->status;
SDataBlockIter* pBlockIter = &pStatus->blockIter;
SBlockLoadSuppInfo* pSupInfo = &pReader->suppInfo;
@ -1087,6 +1096,14 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn
bool asc = ASCENDING_TRAVERSE(pReader->order);
int32_t step = asc ? 1 : -1;
// no data exists, return directly.
if (pBlockData->nRow == 0 || pBlockData->aTSKEY == 0) {
tsdbWarn("%p no need to copy since no data in blockData, table uid:%" PRIu64 " has been dropped, %s", pReader,
pBlockInfo->uid, pReader->idStr);
pResBlock->info.rows = 0;
return 0;
}
if ((pDumpInfo->rowIndex == 0 && asc) || (pDumpInfo->rowIndex == pBlock->nRow - 1 && (!asc))) {
if (asc && pReader->window.skey <= pBlock->minKey.ts) {
// pDumpInfo->rowIndex = 0;
@ -1186,14 +1203,12 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn
setBlockAllDumped(pDumpInfo, ts, pReader->order);
}
pBlockScanInfo->lastKey = pDumpInfo->lastKey;
double elapsedTime = (taosGetTimestampUs() - st) / 1000.0;
pReader->cost.blockLoadTime += elapsedTime;
int32_t unDumpedRows = asc ? pBlock->nRow - pDumpInfo->rowIndex : pDumpInfo->rowIndex + 1;
tsdbDebug("%p copy file block to sdatablock, global index:%d, table index:%d, brange:%" PRId64 "-%" PRId64
", rows:%d, remain:%d, minVer:%" PRId64 ", maxVer:%" PRId64 ", uid:%"PRIu64" elapsed time:%.2f ms, %s",
", rows:%d, remain:%d, minVer:%" PRId64 ", maxVer:%" PRId64 ", uid:%" PRIu64 " elapsed time:%.2f ms, %s",
pReader, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlock->minKey.ts, pBlock->maxKey.ts, dumpedRows,
unDumpedRows, pBlock->minVer, pBlock->maxVer, pBlockInfo->uid, elapsedTime, pReader->idStr);
@ -1202,12 +1217,19 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn
static int32_t doLoadFileBlockData(STsdbReader* pReader, SDataBlockIter* pBlockIter, SBlockData* pBlockData,
uint64_t uid) {
int32_t code = 0;
int64_t st = taosGetTimestampUs();
tBlockDataReset(pBlockData);
TABLEID tid = {.suid = pReader->suid, .uid = uid};
int32_t code =
tBlockDataInit(pBlockData, &tid, pReader->pSchema, &pReader->suppInfo.colId[1], pReader->suppInfo.numOfCols - 1);
STSchema* pSchema = getLatestTableSchema(pReader, uid);
if (pSchema == NULL) {
tsdbDebug("%p table uid:%" PRIu64 " has been dropped, no data existed, %s", pReader, uid, pReader->idStr);
return code;
}
SBlockLoadSuppInfo* pSup = &pReader->suppInfo;
TABLEID tid = {.suid = pReader->suid, .uid = uid};
code = tBlockDataInit(pBlockData, &tid, pSchema, &pSup->colId[1], pSup->numOfCols - 1);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@ -1633,7 +1655,7 @@ static bool fileBlockShouldLoad(STsdbReader* pReader, SFileDataBlockInfo* pBlock
// log the reason why load the datablock for profile
if (loadDataBlock) {
tsdbDebug("%p uid:%" PRIu64
" need to load the datablock, overlapwithneighborblock:%d, hasDup:%d, partiallyRequired:%d, "
" need to load the datablock, overlapneighbor:%d, hasDup:%d, partiallyRequired:%d, "
"overlapWithKey:%d, greaterThanBuf:%d, overlapWithDel:%d, overlapWithlastBlock:%d, %s",
pReader, pBlockInfo->uid, info.overlapWithNeighborBlock, info.hasDupTs, info.partiallyRequired,
info.overlapWithKeyInBuf, info.moreThanCapcity, info.overlapWithDelInfo, info.overlapWithLastBlock,
@ -1731,6 +1753,19 @@ static bool tryCopyDistinctRowFromSttBlock(TSDBROW* fRow, SLastBlockReader* pLas
return false;
}
static FORCE_INLINE STSchema* getLatestTableSchema(STsdbReader* pReader, uint64_t uid) {
if (pReader->pSchema != NULL) {
return pReader->pSchema;
}
pReader->pSchema = metaGetTbTSchema(pReader->pTsdb->pVnode->pMeta, uid, -1, 1);
if (pReader->pSchema == NULL) {
tsdbError("failed to get table schema, uid:%" PRIu64 ", it may have been dropped, ver:-1, %s", uid, pReader->idStr);
}
return pReader->pSchema;
}
static FORCE_INLINE STSchema* doGetSchemaForTSRow(int32_t sversion, STsdbReader* pReader, uint64_t uid) {
// always set the newest schema version in pReader->pSchema
if (pReader->pSchema == NULL) {
@ -2260,6 +2295,7 @@ static int32_t initMemDataIterator(STableBlockScanInfo* pBlockScanInfo, STsdbRea
}
int32_t backward = (!ASCENDING_TRAVERSE(pReader->order));
int64_t st = 0;
STbData* d = NULL;
if (pReader->pReadSnap->pMem != NULL) {
@ -2303,7 +2339,9 @@ static int32_t initMemDataIterator(STableBlockScanInfo* pBlockScanInfo, STsdbRea
tsdbDebug("%p uid:%" PRIu64 ", no data in imem, %s", pReader, pBlockScanInfo->uid, pReader->idStr);
}
st = taosGetTimestampUs();
initDelSkylineIterator(pBlockScanInfo, pReader, d, di);
pReader->cost.initDelSkylineIterTime += (taosGetTimestampUs() - st) / 1000.0;
pBlockScanInfo->iterInit = true;
return TSDB_CODE_SUCCESS;
@ -2533,7 +2571,7 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) {
if (isCleanFileDataBlock(pReader, pBlockInfo, pBlock, pBlockScanInfo, keyInBuf, pLastBlockReader) &&
pBlock->nRow <= pReader->capacity) {
if (asc || ((!asc) && (!hasDataInLastBlock(pLastBlockReader)))) {
copyBlockDataToSDataBlock(pReader, pBlockScanInfo);
copyBlockDataToSDataBlock(pReader);
// record the last key value
pBlockScanInfo->lastKey = asc ? pBlock->maxKey.ts : pBlock->minKey.ts;
@ -2678,21 +2716,39 @@ _err:
}
TSDBKEY getCurrentKeyInBuf(STableBlockScanInfo* pScanInfo, STsdbReader* pReader) {
TSDBKEY key = {.ts = TSKEY_INITIAL_VAL};
bool asc = ASCENDING_TRAVERSE(pReader->order);
// TSKEY initialVal = asc? TSKEY_MIN:TSKEY_MAX;
TSDBKEY key = {.ts = TSKEY_INITIAL_VAL}, ikey = {.ts = TSKEY_INITIAL_VAL};
bool hasKey = false, hasIKey = false;
TSDBROW* pRow = getValidMemRow(&pScanInfo->iter, pScanInfo->delSkyline, pReader);
if (pRow != NULL) {
hasKey = true;
key = TSDBROW_KEY(pRow);
}
pRow = getValidMemRow(&pScanInfo->iiter, pScanInfo->delSkyline, pReader);
if (pRow != NULL) {
TSDBKEY k = TSDBROW_KEY(pRow);
if (key.ts > k.ts) {
key = k;
}
TSDBROW* pIRow = getValidMemRow(&pScanInfo->iiter, pScanInfo->delSkyline, pReader);
if (pIRow != NULL) {
hasIKey = true;
ikey = TSDBROW_KEY(pIRow);
}
return key;
if (hasKey) {
if (hasIKey) { // has data in mem & imem
if (asc) {
return key.ts <= ikey.ts ? key : ikey;
} else {
return key.ts <= ikey.ts ? ikey : key;
}
} else { // no data in imem
return key;
}
} else {
// no data in mem & imem, return the initial value
// only imem has data, return ikey
return ikey;
}
}
static int32_t moveToNextFile(STsdbReader* pReader, SBlockNumber* pBlockNum) {
@ -2921,59 +2977,19 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
ASSERT(pBlockInfo != NULL);
if (pBlockInfo != NULL) {
pScanInfo =
*(STableBlockScanInfo**)taosHashGet(pReader->status.pTableMap, &pBlockInfo->uid, sizeof(pBlockInfo->uid));
} else {
pScanInfo = *pReader->status.pTableIter;
}
pScanInfo = *(STableBlockScanInfo**)taosHashGet(pReader->status.pTableMap, &pBlockInfo->uid, sizeof(pBlockInfo->uid));
if (pScanInfo == NULL) {
tsdbError("failed to get table scan-info, %s", pReader->idStr);
code = TSDB_CODE_INVALID_PARA;
return code;
}
if (pBlockInfo != NULL) {
pBlock = getCurrentBlock(pBlockIter);
}
pBlock = getCurrentBlock(pBlockIter);
initLastBlockReader(pLastBlockReader, pScanInfo, pReader);
TSDBKEY keyInBuf = getCurrentKeyInBuf(pScanInfo, pReader);
/*if (pBlockInfo == NULL) { // build data block from last data file
SBlockData* pBData = &pReader->status.fileBlockData;
tBlockDataReset(pBData);
SSDataBlock* pResBlock = pReader->pResBlock;
tsdbDebug("load data in last block firstly, due to desc scan data, %s", pReader->idStr);
int64_t st = taosGetTimestampUs();
while (1) {
bool hasBlockLData = hasDataInLastBlock(pLastBlockReader);
// no data in last block and block, no need to proceed.
if (hasBlockLData == false) {
break;
}
buildComposedDataBlockImpl(pReader, pScanInfo, &pReader->status.fileBlockData, pLastBlockReader);
if (pResBlock->info.rows >= pReader->capacity) {
break;
}
}
double el = (taosGetTimestampUs() - st) / 1000.0;
updateComposedBlockInfo(pReader, el, pScanInfo);
if (pResBlock->info.rows > 0) {
tsdbDebug("%p uid:%" PRIu64 ", composed data block created, brange:%" PRIu64 "-%" PRIu64
" rows:%d, elapsed time:%.2f ms %s",
pReader, pResBlock->info.id.uid, pResBlock->info.window.skey, pResBlock->info.window.ekey,
pResBlock->info.rows, el, pReader->idStr);
}
} else*/ if (fileBlockShouldLoad(pReader, pBlockInfo, pBlock, pScanInfo, keyInBuf, pLastBlockReader)) {
if (fileBlockShouldLoad(pReader, pBlockInfo, pBlock, pScanInfo, keyInBuf, pLastBlockReader)) {
code = doLoadFileBlockData(pReader, pBlockIter, &pStatus->fileBlockData, pScanInfo->uid);
if (code != TSDB_CODE_SUCCESS) {
return code;
@ -3033,6 +3049,11 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
// update the last key for the corresponding table
pScanInfo->lastKey = ASCENDING_TRAVERSE(pReader->order) ? pInfo->window.ekey : pInfo->window.skey;
tsdbDebug("%p uid:%" PRIu64
" clean file block retrieved from file, global index:%d, "
"table index:%d, rows:%d, brange:%" PRId64 "-%" PRId64 ", %s",
pReader, pScanInfo->uid, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlock->nRow, pBlock->minKey.ts,
pBlock->maxKey.ts, pReader->idStr);
}
}
@ -4124,19 +4145,21 @@ void tsdbReaderClose(STsdbReader* pReader) {
taosMemoryFree(pLReader);
}
tsdbDebug("%p :io-cost summary: head-file:%" PRIu64 ", head-file time:%.2f ms, SMA:%" PRId64
" SMA-time:%.2f ms, fileBlocks:%" PRId64
", fileBlocks-load-time:%.2f ms, "
"build in-memory-block-time:%.2f ms, lastBlocks:%" PRId64
", lastBlocks-time:%.2f ms, composed-blocks:%" PRId64
", composed-blocks-time:%.2fms, STableBlockScanInfo size:%.2f Kb, creatTime:%.2f ms, %s",
pReader, pCost->headFileLoad, pCost->headFileLoadTime, pCost->smaDataLoad, pCost->smaLoadTime,
pCost->numOfBlocks, pCost->blockLoadTime, pCost->buildmemBlock, pCost->lastBlockLoad,
pCost->lastBlockLoadTime, pCost->composedBlocks, pCost->buildComposedBlockTime,
numOfTables * sizeof(STableBlockScanInfo) / 1000.0, pCost->createScanInfoList, pReader->idStr);
tsdbDebug(
"%p :io-cost summary: head-file:%" PRIu64 ", head-file time:%.2f ms, SMA:%" PRId64
" SMA-time:%.2f ms, fileBlocks:%" PRId64
", fileBlocks-load-time:%.2f ms, "
"build in-memory-block-time:%.2f ms, lastBlocks:%" PRId64 ", lastBlocks-time:%.2f ms, composed-blocks:%" PRId64
", composed-blocks-time:%.2fms, STableBlockScanInfo size:%.2f Kb, createTime:%.2f ms,initDelSkylineIterTime:%.2f "
"ms, %s",
pReader, pCost->headFileLoad, pCost->headFileLoadTime, pCost->smaDataLoad, pCost->smaLoadTime, pCost->numOfBlocks,
pCost->blockLoadTime, pCost->buildmemBlock, pCost->lastBlockLoad, pCost->lastBlockLoadTime, pCost->composedBlocks,
pCost->buildComposedBlockTime, numOfTables * sizeof(STableBlockScanInfo) / 1000.0, pCost->createScanInfoList,
pCost->initDelSkylineIterTime, pReader->idStr);
taosMemoryFree(pReader->idStr);
taosMemoryFree(pReader->pSchema);
if (pReader->pMemSchema != pReader->pSchema) {
taosMemoryFree(pReader->pMemSchema);
}
@ -4435,26 +4458,6 @@ bool tsdbNextDataBlock(STsdbReader* pReader) {
return false;
}
static void setBlockInfo(const STsdbReader* pReader, int32_t* rows, uint64_t* uid, STimeWindow* pWindow) {
*rows = pReader->pResBlock->info.rows;
*uid = pReader->pResBlock->info.id.uid;
*pWindow = pReader->pResBlock->info.window;
}
void tsdbRetrieveDataBlockInfo(const STsdbReader* pReader, int32_t* rows, uint64_t* uid, STimeWindow* pWindow) {
if (pReader->type == TIMEWINDOW_RANGE_EXTERNAL) {
if (pReader->step == EXTERNAL_ROWS_MAIN) {
setBlockInfo(pReader, rows, uid, pWindow);
} else if (pReader->step == EXTERNAL_ROWS_PREV) {
setBlockInfo(pReader->innerReader[0], rows, uid, pWindow);
} else {
setBlockInfo(pReader->innerReader[1], rows, uid, pWindow);
}
} else {
setBlockInfo(pReader, rows, uid, pWindow);
}
}
static void doFillNullColSMA(SBlockLoadSuppInfo* pSup, int32_t numOfRows, int32_t numOfCols, SColumnDataAgg* pTsAgg) {
// do fill all null column value SMA info
int32_t i = 0, j = 0;
@ -4585,7 +4588,7 @@ static SSDataBlock* doRetrieveDataBlock(STsdbReader* pReader) {
return NULL;
}
copyBlockDataToSDataBlock(pReader, pBlockScanInfo);
copyBlockDataToSDataBlock(pReader);
return pReader->pResBlock;
}

View File

@ -926,8 +926,9 @@ int32_t tsdbRowMergerGetRow(SRowMerger *pMerger, SRow **ppRow) {
return tRowBuild(pMerger->pArray, pMerger->pTSchema, ppRow);
}
/*
// delete skyline ======================================================
static int32_t tsdbMergeSkyline(SArray *aSkyline1, SArray *aSkyline2, SArray *aSkyline) {
static int32_t tsdbMergeSkyline2(SArray *aSkyline1, SArray *aSkyline2, SArray *aSkyline) {
int32_t code = 0;
int32_t i1 = 0;
int32_t n1 = taosArrayGetSize(aSkyline1);
@ -993,7 +994,141 @@ static int32_t tsdbMergeSkyline(SArray *aSkyline1, SArray *aSkyline2, SArray *aS
_exit:
return code;
}
*/
// delete skyline ======================================================
static int32_t tsdbMergeSkyline(SArray *pSkyline1, SArray *pSkyline2, SArray *pSkyline) {
int32_t code = 0;
int32_t i1 = 0;
int32_t n1 = taosArrayGetSize(pSkyline1);
int32_t i2 = 0;
int32_t n2 = taosArrayGetSize(pSkyline2);
TSDBKEY *pKey1;
TSDBKEY *pKey2;
int64_t version1 = 0;
int64_t version2 = 0;
ASSERT(n1 > 0 && n2 > 0);
taosArrayClear(pSkyline);
TSDBKEY **pItem = TARRAY_GET_ELEM(pSkyline, 0);
while (i1 < n1 && i2 < n2) {
pKey1 = (TSDBKEY *)taosArrayGetP(pSkyline1, i1);
pKey2 = (TSDBKEY *)taosArrayGetP(pSkyline2, i2);
if (pKey1->ts < pKey2->ts) {
version1 = pKey1->version;
*pItem = pKey1;
i1++;
} else if (pKey1->ts > pKey2->ts) {
version2 = pKey2->version;
*pItem = pKey2;
i2++;
} else {
version1 = pKey1->version;
version2 = pKey2->version;
*pItem = pKey1;
i1++;
i2++;
}
(*pItem)->version = TMAX(version1, version2);
pItem++;
}
while (i1 < n1) {
pKey1 = (TSDBKEY *)taosArrayGetP(pSkyline1, i1);
*pItem = pKey1;
pItem++;
i1++;
}
while (i2 < n2) {
pKey2 = (TSDBKEY *)taosArrayGetP(pSkyline2, i2);
*pItem = pKey2;
pItem++;
i2++;
}
taosArraySetSize(pSkyline, TARRAY_ELEM_IDX(pSkyline, pItem));
_exit:
return code;
}
int32_t tsdbBuildDeleteSkylineImpl(SArray *aSkyline, int32_t sidx, int32_t eidx, SArray *pSkyline) {
int32_t code = 0;
SDelData *pDelData;
int32_t midx;
taosArrayClear(pSkyline);
if (sidx == eidx) {
TSDBKEY *pItem1 = taosArrayGet(aSkyline, sidx * 2);
TSDBKEY *pItem2 = taosArrayGet(aSkyline, sidx * 2 + 1);
taosArrayPush(pSkyline, &pItem1);
taosArrayPush(pSkyline, &pItem2);
} else {
SArray *pSkyline1 = NULL;
SArray *pSkyline2 = NULL;
midx = (sidx + eidx) / 2;
pSkyline1 = taosArrayInit((midx - sidx + 1) * 2, POINTER_BYTES);
pSkyline2 = taosArrayInit((eidx - midx) * 2, POINTER_BYTES);
if (pSkyline1 == NULL || pSkyline1 == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _clear;
}
code = tsdbBuildDeleteSkylineImpl(aSkyline, sidx, midx, pSkyline1);
if (code) goto _clear;
code = tsdbBuildDeleteSkylineImpl(aSkyline, midx + 1, eidx, pSkyline2);
if (code) goto _clear;
code = tsdbMergeSkyline(pSkyline1, pSkyline2, pSkyline);
_clear:
taosArrayDestroy(pSkyline1);
taosArrayDestroy(pSkyline2);
}
return code;
}
int32_t tsdbBuildDeleteSkyline(SArray *aDelData, int32_t sidx, int32_t eidx, SArray *aSkyline) {
SDelData *pDelData;
int32_t code = 0;
int32_t dataNum = eidx - sidx + 1;
SArray *aTmpSkyline = taosArrayInit(dataNum * 2, sizeof(TSDBKEY));
SArray *pSkyline = taosArrayInit(dataNum * 2, POINTER_BYTES);
for (int32_t i = sidx; i <= eidx; ++i) {
pDelData = (SDelData *)taosArrayGet(aDelData, i);
taosArrayPush(aTmpSkyline, &(TSDBKEY){.ts = pDelData->sKey, .version = pDelData->version});
taosArrayPush(aTmpSkyline, &(TSDBKEY){.ts = pDelData->eKey, .version = 0});
}
code = tsdbBuildDeleteSkylineImpl(aTmpSkyline, sidx, eidx, pSkyline);
if (code) goto _clear;
int32_t skylineNum = taosArrayGetSize(pSkyline);
for (int32_t i = 0; i < skylineNum; ++i) {
TSDBKEY *p = taosArrayGetP(pSkyline, i);
taosArrayPush(aSkyline, p);
}
_clear:
taosArrayDestroy(aTmpSkyline);
taosArrayDestroy(pSkyline);
return code;
}
/*
int32_t tsdbBuildDeleteSkyline2(SArray *aDelData, int32_t sidx, int32_t eidx, SArray *aSkyline) {
int32_t code = 0;
SDelData *pDelData;
int32_t midx;
@ -1030,6 +1165,7 @@ int32_t tsdbBuildDeleteSkyline(SArray *aDelData, int32_t sidx, int32_t eidx, SAr
return code;
}
*/
// SBlockData ======================================================
int32_t tBlockDataCreate(SBlockData *pBlockData) {

View File

@ -1339,7 +1339,7 @@ static int32_t vnodeProcessBatchDeleteReq(SVnode *pVnode, int64_t version, void
tDecodeSBatchDeleteReq(&decoder, &deleteReq);
SMetaReader mr = {0};
metaReaderInit(&mr, pVnode->pMeta, 0);
metaReaderInit(&mr, pVnode->pMeta, META_READER_NOLOCK);
int32_t sz = taosArrayGetSize(deleteReq.deleteReqs);
for (int32_t i = 0; i < sz; i++) {

View File

@ -1670,12 +1670,18 @@ int32_t convertFillType(int32_t mode) {
case FILL_MODE_NULL:
type = TSDB_FILL_NULL;
break;
case FILL_MODE_NULL_F:
type = TSDB_FILL_NULL_F;
break;
case FILL_MODE_NEXT:
type = TSDB_FILL_NEXT;
break;
case FILL_MODE_VALUE:
type = TSDB_FILL_SET_VALUE;
break;
case FILL_MODE_VALUE_F:
type = TSDB_FILL_SET_VALUE_F;
break;
case FILL_MODE_LINEAR:
type = TSDB_FILL_LINEAR;
break;

View File

@ -140,7 +140,7 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) {
while (1) {
SSDataBlock* pBlock = pDownstream->fpSet.getNextFn(pDownstream);
if (pBlock == NULL) {
if (pInfo->totalInputRows == 0) {
if (pInfo->totalInputRows == 0 && (pInfo->pFillInfo->type != TSDB_FILL_NULL_F && pInfo->pFillInfo->type != TSDB_FILL_SET_VALUE_F)) {
setOperatorCompleted(pOperator);
return NULL;
}
@ -456,7 +456,8 @@ void* destroyStreamFillLinearInfo(SStreamFillLinearInfo* pFillLinear) {
return NULL;
}
void* destroyStreamFillInfo(SStreamFillInfo* pFillInfo) {
if (pFillInfo->type == TSDB_FILL_SET_VALUE || pFillInfo->type == TSDB_FILL_NULL) {
if (pFillInfo->type == TSDB_FILL_SET_VALUE || pFillInfo->type == TSDB_FILL_SET_VALUE_F ||
pFillInfo->type == TSDB_FILL_NULL || pFillInfo->type == TSDB_FILL_NULL_F) {
taosMemoryFreeClear(pFillInfo->pResRow->pRowVal);
taosMemoryFreeClear(pFillInfo->pResRow);
}
@ -661,7 +662,9 @@ void setDeleteFillValueInfo(TSKEY start, TSKEY end, SStreamFillSupporter* pFillS
pFillInfo->pos = FILL_POS_INVALID;
switch (pFillInfo->type) {
case TSDB_FILL_NULL:
case TSDB_FILL_NULL_F:
case TSDB_FILL_SET_VALUE:
case TSDB_FILL_SET_VALUE_F:
break;
case TSDB_FILL_PREV:
pFillInfo->pResRow = &pFillSup->prev;
@ -720,7 +723,9 @@ void setFillValueInfo(SSDataBlock* pBlock, TSKEY ts, int32_t rowId, SStreamFillS
pFillInfo->pos = FILL_POS_INVALID;
switch (pFillInfo->type) {
case TSDB_FILL_NULL:
case TSDB_FILL_SET_VALUE: {
case TSDB_FILL_NULL_F:
case TSDB_FILL_SET_VALUE:
case TSDB_FILL_SET_VALUE_F: {
if (pFillSup->prev.key == pFillInfo->preRowKey) {
resetFillWindow(&pFillSup->prev);
}
@ -1360,7 +1365,8 @@ SStreamFillInfo* initStreamFillInfo(SStreamFillSupporter* pFillSup, SSDataBlock*
pFillInfo->pLinearInfo->winIndex = 0;
pFillInfo->pResRow = NULL;
if (pFillSup->type == TSDB_FILL_SET_VALUE || pFillSup->type == TSDB_FILL_NULL) {
if (pFillSup->type == TSDB_FILL_SET_VALUE || pFillSup->type == TSDB_FILL_SET_VALUE_F
|| pFillSup->type == TSDB_FILL_NULL || pFillSup->type == TSDB_FILL_NULL_F) {
pFillInfo->pResRow = taosMemoryCalloc(1, sizeof(SResultRowData));
pFillInfo->pResRow->key = INT64_MIN;
pFillInfo->pResRow->pRowVal = taosMemoryCalloc(1, pFillSup->rowSize);
@ -1405,7 +1411,7 @@ SOperatorInfo* createStreamFillOperatorInfo(SOperatorInfo* downstream, SStreamFi
goto _error;
}
if (pInfo->pFillInfo->type == TSDB_FILL_SET_VALUE) {
if (pInfo->pFillInfo->type == TSDB_FILL_SET_VALUE || pInfo->pFillInfo->type == TSDB_FILL_SET_VALUE_F) {
for (int32_t i = 0; i < pInfo->pFillSup->numOfAllCols; ++i) {
SFillColInfo* pFillCol = pInfo->pFillSup->pAllColInfo + i;
int32_t slotId = GET_DEST_SLOT_ID(pFillCol);
@ -1427,7 +1433,7 @@ SOperatorInfo* createStreamFillOperatorInfo(SOperatorInfo* downstream, SStreamFi
pCell->isNull = true;
}
}
} else if (pInfo->pFillInfo->type == TSDB_FILL_NULL) {
} else if (pInfo->pFillInfo->type == TSDB_FILL_NULL || pInfo->pFillInfo->type == TSDB_FILL_NULL_F) {
for (int32_t i = 0; i < pInfo->pFillSup->numOfAllCols; ++i) {
SFillColInfo* pFillCol = pInfo->pFillSup->pAllColInfo + i;
int32_t slotId = GET_DEST_SLOT_ID(pFillCol);

View File

@ -492,8 +492,8 @@ _error:
static void doHashPartition(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
SPartitionOperatorInfo* pInfo = pOperator->info;
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
for (int32_t j = 0; j < pBlock->info.rows; ++j) {
recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j);
int32_t len = buildGroupKeys(pInfo->keyBuf, pInfo->pGroupColVals);
@ -690,8 +690,8 @@ static int compareDataGroupInfo(const void* group1, const void* group2) {
static SSDataBlock* buildPartitionResult(SOperatorInfo* pOperator) {
SPartitionOperatorInfo* pInfo = pOperator->info;
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SDataGroupInfo* pGroupInfo =
(pInfo->groupIndex != -1) ? taosArrayGet(pInfo->sortedGroupArray, pInfo->groupIndex) : NULL;
if (pInfo->groupIndex == -1 || pInfo->pageIndex >= taosArrayGetSize(pGroupInfo->pPageList)) {
@ -713,7 +713,7 @@ static SSDataBlock* buildPartitionResult(SOperatorInfo* pOperator) {
qError("failed to get buffer, code:%s, %s", tstrerror(terrno), GET_TASKID(pTaskInfo));
T_LONG_JMP(pTaskInfo->env, terrno);
}
blockDataEnsureCapacity(pInfo->binfo.pRes, pInfo->rowCapacity);
blockDataFromBuf1(pInfo->binfo.pRes, page, pInfo->rowCapacity);
@ -829,6 +829,8 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition
SPartitionOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SPartitionOperatorInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
pTaskInfo->code = terrno;
goto _error;
}
@ -841,6 +843,8 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition
SExprInfo* pExprInfo1 = createExprInfo(pPartNode->pExprs, NULL, &num);
int32_t code = initExprSupp(&pInfo->scalarSup, pExprInfo1, num);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
pTaskInfo->code = terrno;
goto _error;
}
}
@ -848,6 +852,8 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
pInfo->pGroupSet = taosHashInit(100, hashFn, false, HASH_NO_LOCK);
if (pInfo->pGroupSet == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
pTaskInfo->code = terrno;
goto _error;
}
@ -866,6 +872,8 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition
int32_t code = createDiskbasedBuf(&pInfo->pBuf, defaultPgsz, defaultBufsz, pTaskInfo->id.str, tsTempDir);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
pTaskInfo->code = code;
goto _error;
}
@ -873,6 +881,8 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition
pInfo->columnOffset = setupColumnOffset(pInfo->binfo.pRes, pInfo->rowCapacity);
code = initGroupOptrInfo(&pInfo->pGroupColVals, &pInfo->groupKeyLen, &pInfo->keyBuf, pInfo->pGroupCols);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
pTaskInfo->code = code;
goto _error;
}
@ -885,10 +895,15 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition
createOperatorFpSet(optrDummyOpenFn, hashPartition, NULL, destroyPartitionOperatorInfo, optrDefaultBufFn, NULL);
code = appendDownstream(pOperator, &downstream, 1);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
pTaskInfo->code = code;
goto _error;
}
return pOperator;
_error:
pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY;
if (pInfo != NULL) {
destroyPartitionOperatorInfo(pInfo);
}

View File

@ -190,6 +190,7 @@ static int32_t setInfoForNewGroup(SSDataBlock* pBlock, SLimitInfo* pLimitInfo, S
return PROJECT_RETRIEVE_DONE;
}
// todo refactor
static int32_t doIngroupLimitOffset(SLimitInfo* pLimitInfo, uint64_t groupId, SSDataBlock* pBlock,
SOperatorInfo* pOperator) {
// set current group id

View File

@ -312,8 +312,8 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanBase* pTableSca
tsdbReleaseDataBlock(pTableScanInfo->dataReader);
return TSDB_CODE_SUCCESS;
} else if (*status == FUNC_DATA_REQUIRED_NOT_LOAD) {
qDebug("%s data block skipped, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo),
pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
qDebug("%s data block skipped, brange:%" PRId64 "-%" PRId64 ", rows:%d, uid:%" PRIu64, GET_TASKID(pTaskInfo),
pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows, pBlockInfo->id.uid);
doSetTagColumnData(pTableScanInfo, pBlock, pTaskInfo, 1);
pCost->skipBlocks += 1;
tsdbReleaseDataBlock(pTableScanInfo->dataReader);
@ -399,7 +399,7 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanBase* pTableSca
}
bool limitReached = applyLimitOffset(&pTableScanInfo->limitInfo, pBlock, pTaskInfo);
if (limitReached) { // set operator flag is done
if (limitReached) { // set operator flag is done
setOperatorCompleted(pOperator);
}
@ -450,6 +450,15 @@ static STableCachedVal* createTableCacheVal(const SMetaReader* pMetaReader) {
// const void *key, size_t keyLen, void *value
static void freeCachedMetaItem(const void* key, size_t keyLen, void* value) { freeTableCachedVal(value); }
static void doSetNullValue(SSDataBlock* pBlock, const SExprInfo* pExpr, int32_t numOfExpr) {
for (int32_t j = 0; j < numOfExpr; ++j) {
int32_t dstSlotId = pExpr[j].base.resSchema.slotId;
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, dstSlotId);
colDataAppendNNULL(pColInfoData, 0, pBlock->info.rows);
}
}
int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int32_t numOfExpr, SSDataBlock* pBlock,
int32_t rows, const char* idStr, STableMetaCacheInfo* pCache) {
// currently only the tbname pseudo column
@ -469,14 +478,21 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int
SMetaReader mr = {0};
LRUHandle* h = NULL;
// todo refactor: extract method
// the handling of the null data should be packed in the extracted method
// 1. check if it is existed in meta cache
if (pCache == NULL) {
metaReaderInit(&mr, pHandle->meta, 0);
code = metaGetTableEntryByUidCache(&mr, pBlock->info.id.uid);
if (code != TSDB_CODE_SUCCESS) {
// when encounter the TSDB_CODE_PAR_TABLE_NOT_EXIST error, we proceed.
if (terrno == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
qWarn("failed to get table meta, table may have been dropped, uid:0x%" PRIx64 ", code:%s, %s",
pBlock->info.id.uid, tstrerror(terrno), idStr);
// append null value before return to caller, since the caller will ignore this error code and proceed
doSetNullValue(pBlock, pExpr, numOfExpr);
} else {
qError("failed to get table meta, uid:0x%" PRIx64 ", code:%s, %s", pBlock->info.id.uid, tstrerror(terrno),
idStr);
@ -502,6 +518,8 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int
if (terrno == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
qWarn("failed to get table meta, table may have been dropped, uid:0x%" PRIx64 ", code:%s, %s",
pBlock->info.id.uid, tstrerror(terrno), idStr);
// append null value before return to caller, since the caller will ignore this error code and proceed
doSetNullValue(pBlock, pExpr, numOfExpr);
} else {
qError("failed to get table meta, uid:0x%" PRIx64 ", code:%s, %s", pBlock->info.id.uid, tstrerror(terrno),
idStr);
@ -621,6 +639,11 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
T_LONG_JMP(pTaskInfo->env, pTaskInfo->code);
}
if (pOperator->status == OP_EXEC_DONE) {
tsdbReleaseDataBlock(pTableScanInfo->base.dataReader);
break;
}
// process this data block based on the probabilities
bool processThisBlock = processBlockWithProbability(&pTableScanInfo->sample);
if (!processThisBlock) {
@ -632,9 +655,8 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
uint32_t status = 0;
int32_t code = loadDataBlock(pOperator, &pTableScanInfo->base, pBlock, &status);
// int32_t code = loadDataBlockOnDemand(pOperator->pRuntimeEnv, pTableScanInfo, pBlock, &status);
if (code != TSDB_CODE_SUCCESS) {
T_LONG_JMP(pOperator->pTaskInfo->env, code);
T_LONG_JMP(pTaskInfo->env, code);
}
// current block is filter out according to filter condition, continue load the next block
@ -2539,7 +2561,7 @@ static SSDataBlock* getTableDataBlockImpl(void* param) {
}
uint32_t status = 0;
loadDataBlock(pOperator, &pInfo->base, pBlock, &status);
code = loadDataBlock(pOperator, &pInfo->base, pBlock, &status);
// code = loadDataBlockFromOneTable(pOperator, pTableScanInfo, pBlock, &status);
if (code != TSDB_CODE_SUCCESS) {
T_LONG_JMP(pTaskInfo->env, code);
@ -2714,10 +2736,13 @@ SSDataBlock* getSortedTableMergeScanBlockData(SSortHandle* pHandle, SSDataBlock*
}
}
applyLimitOffset(&pInfo->limitInfo, pResBlock, pTaskInfo);
qDebug("%s get sorted row block, rows:%d, limit:%"PRId64, GET_TASKID(pTaskInfo), pResBlock->info.rows,
pInfo->limitInfo.numOfOutputRows);
bool limitReached = applyLimitOffset(&pInfo->limitInfo, pResBlock, pTaskInfo);
qDebug("%s get sorted row block, rows:%d, limit:%" PRId64, GET_TASKID(pTaskInfo), pResBlock->info.rows,
pInfo->limitInfo.numOfOutputRows);
if (limitReached) {
resetLimitInfoForNextGroup(&pInfo->limitInfo);
}
return (pResBlock->info.rows > 0) ? pResBlock : NULL;
}

View File

@ -186,7 +186,7 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock*
}
}
}
} else if (pFillInfo->type == TSDB_FILL_NULL) { // fill with NULL
} else if (pFillInfo->type == TSDB_FILL_NULL || pFillInfo->type == TSDB_FILL_NULL_F) { // fill with NULL
setNullRow(pBlock, pFillInfo, index);
} else { // fill with user specified value for each column
for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
@ -349,7 +349,7 @@ static int32_t fillResultImpl(SFillInfo* pFillInfo, SSDataBlock* pBlock, int32_t
bool isNull = colDataIsNull_s(pSrc, pFillInfo->index);
colDataAppend(pDst, index, src, isNull);
saveColData(pFillInfo->prev.pRowVal, i, src, isNull); // todo:
} else if (pFillInfo->type == TSDB_FILL_NULL) {
} else if (pFillInfo->type == TSDB_FILL_NULL || pFillInfo->type == TSDB_FILL_NULL_F) {
colDataAppendNULL(pDst, index);
} else if (pFillInfo->type == TSDB_FILL_NEXT) {
SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->next.pRowVal : pFillInfo->prev.pRowVal;
@ -447,32 +447,6 @@ struct SFillInfo* taosCreateFillInfo(TSKEY skey, int32_t numOfFillCols, int32_t
taosResetFillInfo(pFillInfo, skey);
switch (fillType) {
case FILL_MODE_NONE:
pFillInfo->type = TSDB_FILL_NONE;
break;
case FILL_MODE_PREV:
pFillInfo->type = TSDB_FILL_PREV;
break;
case FILL_MODE_NULL:
pFillInfo->type = TSDB_FILL_NULL;
break;
case FILL_MODE_LINEAR:
pFillInfo->type = TSDB_FILL_LINEAR;
break;
case FILL_MODE_NEXT:
pFillInfo->type = TSDB_FILL_NEXT;
break;
case FILL_MODE_VALUE:
pFillInfo->type = TSDB_FILL_SET_VALUE;
break;
default: {
taosMemoryFree(pFillInfo);
terrno = TSDB_CODE_INVALID_PARA;
return NULL;
}
}
pFillInfo->type = fillType;
pFillInfo->pFillCol = pCol;
pFillInfo->numOfCols = numOfFillCols + numOfNotFillCols;
@ -572,15 +546,14 @@ bool taosFillHasMoreResults(SFillInfo* pFillInfo) {
}
int64_t getNumOfResultsAfterFillGap(SFillInfo* pFillInfo, TSKEY ekey, int32_t maxNumOfRows) {
SColumnInfoData* pCol = taosArrayGet(pFillInfo->pSrcBlock->pDataBlock, pFillInfo->srcTsSlotId);
int64_t* tsList = (int64_t*)pCol->pData;
int32_t numOfRows = taosNumOfRemainRows(pFillInfo);
TSKEY ekey1 = ekey;
int64_t numOfRes = -1;
if (numOfRows > 0) { // still fill gap within current data block, not generating data after the result set.
SColumnInfoData* pCol = taosArrayGet(pFillInfo->pSrcBlock->pDataBlock, pFillInfo->srcTsSlotId);
int64_t* tsList = (int64_t*)pCol->pData;
TSKEY lastKey = tsList[pFillInfo->numOfRows - 1];
numOfRes = taosTimeCountInterval(lastKey, pFillInfo->currentKey, pFillInfo->interval.sliding,
pFillInfo->interval.slidingUnit, pFillInfo->interval.precision);

View File

@ -185,12 +185,14 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
int32_t srcSlot = pExprInfo->base.pParam[0].pCol->slotId;
switch (pSliceInfo->fillType) {
case TSDB_FILL_NULL: {
case TSDB_FILL_NULL:
case TSDB_FILL_NULL_F: {
colDataAppendNULL(pDst, rows);
break;
}
case TSDB_FILL_SET_VALUE: {
case TSDB_FILL_SET_VALUE:
case TSDB_FILL_SET_VALUE_F: {
SVariant* pVar = &pSliceInfo->pFillColInfo[j].fillVal;
if (pDst->info.type == TSDB_DATA_TYPE_FLOAT) {

View File

@ -190,8 +190,9 @@ static int32_t doAddToBuf(SSDataBlock* pDataBlock, SSortHandle* pHandle) {
qError("Add to buf failed since %s", terrstr(terrno));
return terrno;
}
int32_t code = createDiskbasedBuf(&pHandle->pBuf, pHandle->pageSize, pHandle->numOfPages * pHandle->pageSize,
"doAddToBuf", tsTempDir);
"sortExternalBuf", tsTempDir);
dBufSetPrintInfo(pHandle->pBuf);
if (code != TSDB_CODE_SUCCESS) {
return code;
@ -635,6 +636,7 @@ int32_t getProperSortPageSize(size_t rowSize, uint32_t numOfCols) {
static int32_t createInitialSources(SSortHandle* pHandle) {
size_t sortBufSize = pHandle->numOfPages * pHandle->pageSize;
int32_t code = 0;
if (pHandle->type == SORT_SINGLESOURCE_SORT) {
SSortSource** pSource = taosArrayGet(pHandle->pOrderedSource, 0);
@ -663,8 +665,8 @@ static int32_t createInitialSources(SSortHandle* pHandle) {
pHandle->beforeFp(pBlock, pHandle->param);
}
int32_t code = blockDataMerge(pHandle->pDataBlock, pBlock);
if (code != 0) {
code = blockDataMerge(pHandle->pDataBlock, pBlock);
if (code != TSDB_CODE_SUCCESS) {
if (source->param && !source->onlyRef) {
taosMemoryFree(source->param);
}
@ -689,6 +691,7 @@ static int32_t createInitialSources(SSortHandle* pHandle) {
blockDataDestroy(source->src.pBlock);
source->src.pBlock = NULL;
}
taosMemoryFree(source);
return code;
}
@ -696,13 +699,17 @@ static int32_t createInitialSources(SSortHandle* pHandle) {
int64_t el = taosGetTimestampUs() - p;
pHandle->sortElapsed += el;
doAddToBuf(pHandle->pDataBlock, pHandle);
code = doAddToBuf(pHandle->pDataBlock, pHandle);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
}
}
if (source->param && !source->onlyRef) {
taosMemoryFree(source->param);
}
taosMemoryFree(source);
if (pHandle->pDataBlock != NULL && pHandle->pDataBlock->info.rows > 0) {
@ -711,7 +718,7 @@ static int32_t createInitialSources(SSortHandle* pHandle) {
// Perform the in-memory sort and then flush data in the buffer into disk.
int64_t p = taosGetTimestampUs();
int32_t code = blockDataSort(pHandle->pDataBlock, pHandle->pSortInfo);
code = blockDataSort(pHandle->pDataBlock, pHandle->pSortInfo);
if (code != 0) {
return code;
}
@ -729,12 +736,12 @@ static int32_t createInitialSources(SSortHandle* pHandle) {
pHandle->tupleHandle.pBlock = pHandle->pDataBlock;
return 0;
} else {
doAddToBuf(pHandle->pDataBlock, pHandle);
code = doAddToBuf(pHandle->pDataBlock, pHandle);
}
}
}
return TSDB_CODE_SUCCESS;
return code;
}
int32_t tsortOpen(SSortHandle* pHandle) {

View File

@ -714,26 +714,18 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems)
pBuf->type = type;
if (IS_NULL_TYPE(type)) {
numOfElems = 0;
goto _over;
}
// data in current data block are qualified to the query
if (pInput->colDataSMAIsSet) {
numOfElems = pInput->numOfRows - pAgg->numOfNull;
if (numOfElems == 0) {
goto _over;
}
void* tval = NULL;
int16_t index = 0;
if (isMinFunc) {
tval = &pInput->pColumnDataAgg[0]->min;
} else {
tval = &pInput->pColumnDataAgg[0]->max;
}
void* tval = (isMinFunc) ? &pInput->pColumnDataAgg[0]->min : &pInput->pColumnDataAgg[0]->max;
if (!pBuf->assign) {
if (type == TSDB_DATA_TYPE_FLOAT) {
@ -824,8 +816,9 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc, int32_t* nElems)
}
}
numOfElems = 1;
pBuf->assign = true;
return TSDB_CODE_SUCCESS;
goto _over;
}
int32_t start = pInput->startRowIndex;

View File

@ -262,6 +262,13 @@ bool fmIsGroupKeyFunc(int32_t funcId) {
return FUNCTION_TYPE_GROUP_KEY == funcMgtBuiltins[funcId].type;
}
bool fmIsBlockDistFunc(int32_t funcId) {
if (funcId < 0 || funcId >= funcMgtBuiltinsNum) {
return false;
}
return FUNCTION_TYPE_BLOCK_DIST == funcMgtBuiltins[funcId].type;
}
void fmFuncMgtDestroy() {
void* m = gFunMgtService.pFuncNameHashTable;
if (m != NULL && atomic_val_compare_exchange_ptr((void**)&gFunMgtService.pFuncNameHashTable, m, 0) == m) {

View File

@ -2019,10 +2019,14 @@ char* nodesGetFillModeString(EFillMode mode) {
return "none";
case FILL_MODE_VALUE:
return "value";
case FILL_MODE_VALUE_F:
return "value_f";
case FILL_MODE_PREV:
return "prev";
case FILL_MODE_NULL:
return "null";
case FILL_MODE_NULL_F:
return "null_f";
case FILL_MODE_LINEAR:
return "linear";
case FILL_MODE_NEXT:

View File

@ -20,11 +20,11 @@
struct SToken;
#define NEXT_TOKEN(pSql, sToken) \
do { \
int32_t index = 0; \
sToken = tStrGetToken(pSql, &index, false); \
pSql += index; \
#define NEXT_TOKEN(pSql, sToken) \
do { \
int32_t index = 0; \
sToken = tStrGetToken(pSql, &index, false, NULL); \
pSql += index; \
} while (0)
#define CHECK_CODE(expr) \

View File

@ -55,7 +55,7 @@ uint32_t tGetToken(const char *z, uint32_t *tokenType);
* @param isPrevOptr
* @return
*/
SToken tStrGetToken(const char *str, int32_t *i, bool isPrevOptr);
SToken tStrGetToken(const char *str, int32_t *i, bool isPrevOptr, bool *pIgnoreComma);
/**
* check if it is a keyword or not

View File

@ -1007,12 +1007,14 @@ sliding_opt(A) ::= SLIDING NK_LP duration_literal(B) NK_RP.
fill_opt(A) ::= . { A = NULL; }
fill_opt(A) ::= FILL NK_LP fill_mode(B) NK_RP. { A = createFillNode(pCxt, B, NULL); }
fill_opt(A) ::= FILL NK_LP VALUE NK_COMMA literal_list(B) NK_RP. { A = createFillNode(pCxt, FILL_MODE_VALUE, createNodeListNode(pCxt, B)); }
fill_opt(A) ::= FILL NK_LP VALUE_F NK_COMMA literal_list(B) NK_RP. { A = createFillNode(pCxt, FILL_MODE_VALUE_F, createNodeListNode(pCxt, B)); }
%type fill_mode { EFillMode }
%destructor fill_mode { }
fill_mode(A) ::= NONE. { A = FILL_MODE_NONE; }
fill_mode(A) ::= PREV. { A = FILL_MODE_PREV; }
fill_mode(A) ::= NULL. { A = FILL_MODE_NULL; }
fill_mode(A) ::= NULL_F. { A = FILL_MODE_NULL_F; }
fill_mode(A) ::= LINEAR. { A = FILL_MODE_LINEAR; }
fill_mode(A) ::= NEXT. { A = FILL_MODE_NEXT; }

View File

@ -18,16 +18,23 @@
#include "tglobal.h"
#include "ttime.h"
#define NEXT_TOKEN_WITH_PREV(pSql, token) \
do { \
int32_t index = 0; \
token = tStrGetToken(pSql, &index, true); \
pSql += index; \
#define NEXT_TOKEN_WITH_PREV(pSql, token) \
do { \
int32_t index = 0; \
token = tStrGetToken(pSql, &index, true, NULL); \
pSql += index; \
} while (0)
#define NEXT_TOKEN_KEEP_SQL(pSql, token, index) \
do { \
token = tStrGetToken(pSql, &index, false); \
#define NEXT_TOKEN_WITH_PREV_EXT(pSql, token, pIgnoreComma) \
do { \
int32_t index = 0; \
token = tStrGetToken(pSql, &index, true, pIgnoreComma); \
pSql += index; \
} while (0)
#define NEXT_TOKEN_KEEP_SQL(pSql, token, index) \
do { \
token = tStrGetToken(pSql, &index, false, NULL); \
} while (0)
#define NEXT_VALID_TOKEN(pSql, token) \
@ -266,12 +273,12 @@ static int parseTime(const char** end, SToken* pToken, int16_t timePrec, int64_t
* e.g., now+12a, now-5h
*/
index = 0;
SToken token = tStrGetToken(pTokenEnd, &index, false);
SToken token = tStrGetToken(pTokenEnd, &index, false, NULL);
pTokenEnd += index;
if (token.type == TK_NK_MINUS || token.type == TK_NK_PLUS) {
index = 0;
SToken valueToken = tStrGetToken(pTokenEnd, &index, false);
SToken valueToken = tStrGetToken(pTokenEnd, &index, false, NULL);
pTokenEnd += index;
if (valueToken.n < 2) {
@ -1240,7 +1247,14 @@ static int parseOneRow(SInsertParseContext* pCxt, const char** pSql, STableDataC
int32_t code = TSDB_CODE_SUCCESS;
// 1. set the parsed value from sql string
for (int i = 0; i < pCols->numOfBound && TSDB_CODE_SUCCESS == code; ++i) {
NEXT_TOKEN_WITH_PREV(*pSql, *pToken);
const char* pOrigSql = *pSql;
bool ignoreComma = false;
NEXT_TOKEN_WITH_PREV_EXT(*pSql, *pToken, &ignoreComma);
if (ignoreComma) {
code = buildSyntaxErrMsg(&pCxt->msg, "invalid data or symbol", pOrigSql);
break;
}
SSchema* pSchema = &pSchemas[pCols->pColIndex[i]];
SColVal* pVal = taosArrayGet(pTableCxt->pValues, pCols->pColIndex[i]);
@ -1248,20 +1262,22 @@ static int parseOneRow(SInsertParseContext* pCxt, const char** pSql, STableDataC
isParseBindParam = true;
if (NULL == pCxt->pComCxt->pStmtCb) {
code = buildSyntaxErrMsg(&pCxt->msg, "? only used in stmt", pToken->z);
break;
}
} else {
if (TK_NK_RP == pToken->type) {
code = generateSyntaxErrMsg(&pCxt->msg, TSDB_CODE_PAR_INVALID_COLUMNS_NUM);
break;
}
continue;
}
if (TSDB_CODE_SUCCESS == code && TK_NK_RP == pToken->type) {
code = generateSyntaxErrMsg(&pCxt->msg, TSDB_CODE_PAR_INVALID_COLUMNS_NUM);
}
if (isParseBindParam) {
code = buildInvalidOperationMsg(&pCxt->msg, "no mix usage for ? and values");
break;
}
if (TSDB_CODE_SUCCESS == code && isParseBindParam) {
code = buildInvalidOperationMsg(&pCxt->msg, "no mix usage for ? and values");
}
if (TSDB_CODE_SUCCESS == code) {
code = parseValueToken(pCxt, pSql, pToken, pSchema, getTableInfo(pTableCxt->pMeta).precision, pVal);
if (TSDB_CODE_SUCCESS == code) {
code = parseValueToken(pCxt, pSql, pToken, pSchema, getTableInfo(pTableCxt->pMeta).precision, pVal);
}
}
if (TSDB_CODE_SUCCESS == code && i < pCols->numOfBound - 1) {

View File

@ -150,6 +150,7 @@ static SKeyword keywordTable[] = {
{"NOT", TK_NOT},
{"NOW", TK_NOW},
{"NULL", TK_NULL},
{"NULL_F", TK_NULL_F},
{"NULLS", TK_NULLS},
{"OFFSET", TK_OFFSET},
{"ON", TK_ON},
@ -240,6 +241,7 @@ static SKeyword keywordTable[] = {
{"USERS", TK_USERS},
{"USING", TK_USING},
{"VALUE", TK_VALUE},
{"VALUE_F", TK_VALUE_F},
{"VALUES", TK_VALUES},
{"VARCHAR", TK_VARCHAR},
{"VARIABLES", TK_VARIABLES},
@ -625,7 +627,7 @@ uint32_t tGetToken(const char* z, uint32_t* tokenId) {
return 0;
}
SToken tStrGetToken(const char* str, int32_t* i, bool isPrevOptr) {
SToken tStrGetToken(const char* str, int32_t* i, bool isPrevOptr, bool* pIgnoreComma) {
SToken t0 = {0};
// here we reach the end of sql string, null-terminated string
@ -646,6 +648,10 @@ SToken tStrGetToken(const char* str, int32_t* i, bool isPrevOptr) {
return t0;
}
if (NULL != pIgnoreComma && t == ',') {
*pIgnoreComma = true;
}
t = str[++(*i)];
}

View File

@ -1561,6 +1561,26 @@ static int32_t translateRepeatScanFunc(STranslateContext* pCxt, SFunctionNode* p
return TSDB_CODE_SUCCESS;
}
static int32_t translateBlockDistFunc(STranslateContext* pCtx, SFunctionNode* pFunc) {
if (!fmIsBlockDistFunc(pFunc->funcId)) {
return TSDB_CODE_SUCCESS;
}
if (!isSelectStmt(pCtx->pCurrStmt)) {
return generateSyntaxErrMsgExt(&pCtx->msgBuf, TSDB_CODE_PAR_ONLY_SUPPORT_SINGLE_TABLE,
"%s is only supported in single table query", pFunc->functionName);
}
SSelectStmt* pSelect = (SSelectStmt*)pCtx->pCurrStmt;
SNode* pTable = pSelect->pFromTable;
if (NULL != pTable && (QUERY_NODE_REAL_TABLE != nodeType(pTable) ||
(TSDB_SUPER_TABLE != ((SRealTableNode*)pTable)->pMeta->tableType &&
TSDB_CHILD_TABLE != ((SRealTableNode*)pTable)->pMeta->tableType &&
TSDB_NORMAL_TABLE != ((SRealTableNode*)pTable)->pMeta->tableType))) {
return generateSyntaxErrMsgExt(&pCtx->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC,
"%s is only supported on super table, child table or normal table", pFunc->functionName);
}
return TSDB_CODE_SUCCESS;
}
static bool isStar(SNode* pNode) {
return (QUERY_NODE_COLUMN == nodeType(pNode)) && ('\0' == ((SColumnNode*)pNode)->tableAlias[0]) &&
(0 == strcmp(((SColumnNode*)pNode)->colName, "*"));
@ -1720,7 +1740,7 @@ static int32_t rewriteSystemInfoFunc(STranslateContext* pCxt, SNode** pNode) {
return TSDB_CODE_PAR_INTERNAL_ERROR;
}
static int32_t translateNoramlFunction(STranslateContext* pCxt, SFunctionNode* pFunc) {
static int32_t translateNormalFunction(STranslateContext* pCxt, SFunctionNode* pFunc) {
int32_t code = translateAggFunc(pCxt, pFunc);
if (TSDB_CODE_SUCCESS == code) {
code = translateScanPseudoColumnFunc(pCxt, pFunc);
@ -1752,6 +1772,9 @@ static int32_t translateNoramlFunction(STranslateContext* pCxt, SFunctionNode* p
if (TSDB_CODE_SUCCESS == code) {
code = translateTimelineFunc(pCxt, pFunc);
}
if (TSDB_CODE_SUCCESS == code) {
code = translateBlockDistFunc(pCxt, pFunc);
}
if (TSDB_CODE_SUCCESS == code) {
setFuncClassification(pCxt->pCurrStmt, pFunc);
}
@ -1812,7 +1835,7 @@ static int32_t translateFunctionImpl(STranslateContext* pCxt, SFunctionNode** pF
if (fmIsClientPseudoColumnFunc((*pFunc)->funcId)) {
return rewriteClientPseudoColumnFunc(pCxt, (SNode**)pFunc);
}
return translateNoramlFunction(pCxt, *pFunc);
return translateNormalFunction(pCxt, *pFunc);
}
static EDealRes translateFunction(STranslateContext* pCxt, SFunctionNode** pFunc) {
@ -2798,7 +2821,7 @@ static int32_t convertFillValue(STranslateContext* pCxt, SDataType dt, SNodeList
}
static int32_t checkFillValues(STranslateContext* pCxt, SFillNode* pFill, SNodeList* pProjectionList) {
if (FILL_MODE_VALUE != pFill->mode) {
if (FILL_MODE_VALUE != pFill->mode && FILL_MODE_VALUE_F != pFill->mode) {
return TSDB_CODE_SUCCESS;
}

View File

@ -27,7 +27,7 @@ bool qIsInsertValuesSql(const char* pStr, size_t length) {
const char* pSql = pStr;
int32_t index = 0;
SToken t = tStrGetToken((char*)pStr, &index, false);
SToken t = tStrGetToken((char*)pStr, &index, false, NULL);
if (TK_INSERT != t.type && TK_IMPORT != t.type) {
return false;
}
@ -35,7 +35,7 @@ bool qIsInsertValuesSql(const char* pStr, size_t length) {
do {
pStr += index;
index = 0;
t = tStrGetToken((char*)pStr, &index, false);
t = tStrGetToken((char*)pStr, &index, false, NULL);
if (TK_USING == t.type || TK_VALUES == t.type || TK_FILE == t.type) {
return true;
} else if (TK_SELECT == t.type) {

File diff suppressed because it is too large Load Diff

View File

@ -516,7 +516,7 @@ int32_t qwHandlePostPhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inp
}
if (QW_EVENT_RECEIVED(ctx, QW_EVENT_DROP)) {
if (QW_PHASE_POST_FETCH != phase || qwTaskNotInExec(ctx)) {
if (QW_PHASE_POST_FETCH != phase || ((!QW_QUERY_RUNNING(ctx)) && qwTaskNotInExec(ctx))) {
QW_ERR_JRET(qwDropTask(QW_FPARAMS()));
QW_ERR_JRET(ctx->rspCode);
}

View File

@ -128,7 +128,7 @@ SStreamState* streamStateOpen(char* path, SStreamTask* pTask, bool specPath, int
memset(statePath, 0, 1024);
tstrncpy(statePath, path, 1024);
}
if (tdbOpen(statePath, szPage, pages, &pState->pTdbState->db, 0) < 0) {
if (tdbOpen(statePath, szPage, pages, &pState->pTdbState->db, 1) < 0) {
goto _err;
}

View File

@ -110,7 +110,7 @@ int64_t syncNodeCheckCommitIndex(SSyncNode* ths, SyncIndex indexLikely) {
if (indexLikely > ths->commitIndex && syncNodeAgreedUpon(ths, indexLikely)) {
SyncIndex commitIndex = indexLikely;
syncNodeUpdateCommitIndex(ths, commitIndex);
sTrace("vgId:%d, agreed upon. role:%d, term:%" PRId64 ", index: %" PRId64 "", ths->vgId, ths->state,
sTrace("vgId:%d, agreed upon. role:%d, term:%" PRId64 ", index:%" PRId64 "", ths->vgId, ths->state,
ths->raftStore.currentTerm, commitIndex);
}
return ths->commitIndex;

View File

@ -85,7 +85,7 @@ int64_t syncOpen(SSyncInfo* pSyncInfo) {
int32_t syncStart(int64_t rid) {
SSyncNode* pSyncNode = syncNodeAcquire(rid);
if (pSyncNode == NULL) {
sError("failed to acquire rid: %" PRId64 " of tsNodeReftId for pSyncNode", rid);
sError("failed to acquire rid:%" PRId64 " of tsNodeReftId for pSyncNode", rid);
return -1;
}
@ -756,7 +756,7 @@ int32_t syncNodeLogStoreRestoreOnNeed(SSyncNode* pNode) {
SyncIndex lastVer = pNode->pLogStore->syncLogLastIndex(pNode->pLogStore);
if (lastVer < commitIndex || firstVer > commitIndex + 1) {
if (pNode->pLogStore->syncLogRestoreFromSnapshot(pNode->pLogStore, commitIndex)) {
sError("vgId:%d, failed to restore log store from snapshot since %s. lastVer: %" PRId64 ", snapshotVer: %" PRId64,
sError("vgId:%d, failed to restore log store from snapshot since %s. lastVer:%" PRId64 ", snapshotVer:%" PRId64,
pNode->vgId, terrstr(), lastVer, commitIndex);
return -1;
}
@ -1112,7 +1112,7 @@ int32_t syncNodeRestore(SSyncNode* pSyncNode) {
SyncIndex endIndex = pSyncNode->pLogBuf->endIndex;
if (lastVer != -1 && endIndex != lastVer + 1) {
terrno = TSDB_CODE_WAL_LOG_INCOMPLETE;
sError("vgId:%d, failed to restore sync node since %s. expected lastLogIndex: %" PRId64 ", lastVer: %" PRId64 "",
sError("vgId:%d, failed to restore sync node since %s. expected lastLogIndex:%" PRId64 ", lastVer:%" PRId64 "",
pSyncNode->vgId, terrstr(), endIndex - 1, lastVer);
return -1;
}
@ -1831,7 +1831,7 @@ void syncNodeCandidate2Leader(SSyncNode* pSyncNode) {
SyncIndex lastIndex = pSyncNode->pLogStore->syncLogLastIndex(pSyncNode->pLogStore);
ASSERT(lastIndex >= 0);
sInfo("vgId:%d, become leader. term: %" PRId64 ", commit index: %" PRId64 ", last index: %" PRId64 "",
sInfo("vgId:%d, become leader. term:%" PRId64 ", commit index:%" PRId64 ", last index:%" PRId64 "",
pSyncNode->vgId, pSyncNode->raftStore.currentTerm, pSyncNode->commitIndex, lastIndex);
}
@ -1850,7 +1850,7 @@ void syncNodeFollower2Candidate(SSyncNode* pSyncNode) {
ASSERT(pSyncNode->state == TAOS_SYNC_STATE_FOLLOWER);
pSyncNode->state = TAOS_SYNC_STATE_CANDIDATE;
SyncIndex lastIndex = pSyncNode->pLogStore->syncLogLastIndex(pSyncNode->pLogStore);
sInfo("vgId:%d, become candidate from follower. term: %" PRId64 ", commit index: %" PRId64 ", last index: %" PRId64,
sInfo("vgId:%d, become candidate from follower. term:%" PRId64 ", commit index:%" PRId64 ", last index:%" PRId64,
pSyncNode->vgId, pSyncNode->raftStore.currentTerm, pSyncNode->commitIndex, lastIndex);
sNTrace(pSyncNode, "follower to candidate");
@ -1860,7 +1860,7 @@ void syncNodeLeader2Follower(SSyncNode* pSyncNode) {
ASSERT(pSyncNode->state == TAOS_SYNC_STATE_LEADER);
syncNodeBecomeFollower(pSyncNode, "leader to follower");
SyncIndex lastIndex = pSyncNode->pLogStore->syncLogLastIndex(pSyncNode->pLogStore);
sInfo("vgId:%d, become follower from leader. term: %" PRId64 ", commit index: %" PRId64 ", last index: %" PRId64,
sInfo("vgId:%d, become follower from leader. term:%" PRId64 ", commit index:%" PRId64 ", last index:%" PRId64,
pSyncNode->vgId, pSyncNode->raftStore.currentTerm, pSyncNode->commitIndex, lastIndex);
sNTrace(pSyncNode, "leader to follower");
@ -1870,7 +1870,7 @@ void syncNodeCandidate2Follower(SSyncNode* pSyncNode) {
ASSERT(pSyncNode->state == TAOS_SYNC_STATE_CANDIDATE);
syncNodeBecomeFollower(pSyncNode, "candidate to follower");
SyncIndex lastIndex = pSyncNode->pLogStore->syncLogLastIndex(pSyncNode->pLogStore);
sInfo("vgId:%d, become follower from candidate. term: %" PRId64 ", commit index: %" PRId64 ", last index: %" PRId64,
sInfo("vgId:%d, become follower from candidate. term:%" PRId64 ", commit index:%" PRId64 ", last index:%" PRId64,
pSyncNode->vgId, pSyncNode->raftStore.currentTerm, pSyncNode->commitIndex, lastIndex);
sNTrace(pSyncNode, "candidate to follower");
@ -2310,7 +2310,7 @@ int32_t syncNodeAppend(SSyncNode* ths, SSyncRaftEntry* pEntry) {
// proceed match index, with replicating on needed
SyncIndex matchIndex = syncLogBufferProceed(ths->pLogBuf, ths, NULL);
sTrace("vgId:%d, append raft entry. index: %" PRId64 ", term: %" PRId64 " pBuf: [%" PRId64 " %" PRId64 " %" PRId64
sTrace("vgId:%d, append raft entry. index:%" PRId64 ", term:%" PRId64 " pBuf: [%" PRId64 " %" PRId64 " %" PRId64
", %" PRId64 ")",
ths->vgId, pEntry->index, pEntry->term, ths->pLogBuf->startIndex, ths->pLogBuf->commitIndex,
ths->pLogBuf->matchIndex, ths->pLogBuf->endIndex);
@ -2483,7 +2483,7 @@ int32_t syncNodeOnHeartbeat(SSyncNode* ths, const SRpcMsg* pRpcMsg) {
sError("vgId:%d, sync enqueue step-down msg error, code:%d", ths->vgId, code);
rpcFreeCont(rpcMsgLocalCmd.pCont);
} else {
sTrace("vgId:%d, sync enqueue step-down msg, new-term: %" PRId64, ths->vgId, pSyncMsg->currentTerm);
sTrace("vgId:%d, sync enqueue step-down msg, new-term:%" PRId64, ths->vgId, pSyncMsg->currentTerm);
}
}
}
@ -2549,7 +2549,7 @@ int32_t syncNodeOnLocalCmd(SSyncNode* ths, const SRpcMsg* pRpcMsg) {
(void)syncNodeUpdateCommitIndex(ths, pMsg->commitIndex);
}
if (syncLogBufferCommit(ths->pLogBuf, ths, ths->commitIndex) < 0) {
sError("vgId:%d, failed to commit raft log since %s. commit index: %" PRId64 "", ths->vgId, terrstr(),
sError("vgId:%d, failed to commit raft log since %s. commit index:%" PRId64 "", ths->vgId, terrstr(),
ths->commitIndex);
}
} else {

View File

@ -132,16 +132,16 @@ SSyncRaftEntry* syncEntryBuildDummy(SyncTerm term, SyncIndex index, int32_t vgId
int32_t syncLogValidateAlignmentOfCommit(SSyncNode* pNode, SyncIndex commitIndex) {
SyncIndex firstVer = pNode->pLogStore->syncLogBeginIndex(pNode->pLogStore);
if (firstVer > commitIndex + 1) {
sError("vgId:%d, firstVer of WAL log greater than tsdb commit version + 1. firstVer: %" PRId64
", tsdb commit version: %" PRId64 "",
sError("vgId:%d, firstVer of WAL log greater than tsdb commit version + 1. firstVer:%" PRId64
", tsdb commit version:%" PRId64 "",
pNode->vgId, firstVer, commitIndex);
return -1;
}
SyncIndex lastVer = pNode->pLogStore->syncLogLastIndex(pNode->pLogStore);
if (lastVer < commitIndex) {
sError("vgId:%d, lastVer of WAL log less than tsdb commit version. lastVer: %" PRId64
", tsdb commit version: %" PRId64 "",
sError("vgId:%d, lastVer of WAL log less than tsdb commit version. lastVer:%" PRId64
", tsdb commit version:%" PRId64 "",
pNode->vgId, lastVer, commitIndex);
return -1;
}
@ -293,7 +293,7 @@ int32_t syncLogBufferAccept(SSyncLogBuffer* pBuf, SSyncNode* pNode, SSyncRaftEnt
bool inBuf = true;
if (index <= pBuf->commitIndex) {
sTrace("vgId:%d, already committed. index: %" PRId64 ", term: %" PRId64 ". log buffer: [%" PRId64 " %" PRId64
sTrace("vgId:%d, already committed. index:%" PRId64 ", term:%" PRId64 ". log buffer: [%" PRId64 " %" PRId64
" %" PRId64 ", %" PRId64 ")",
pNode->vgId, pEntry->index, pEntry->term, pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex,
pBuf->endIndex);
@ -306,7 +306,7 @@ int32_t syncLogBufferAccept(SSyncLogBuffer* pBuf, SSyncNode* pNode, SSyncRaftEnt
}
if (index - pBuf->startIndex >= pBuf->size) {
sWarn("vgId:%d, out of buffer range. index: %" PRId64 ", term: %" PRId64 ". log buffer: [%" PRId64 " %" PRId64
sWarn("vgId:%d, out of buffer range. index:%" PRId64 ", term:%" PRId64 ". log buffer: [%" PRId64 " %" PRId64
" %" PRId64 ", %" PRId64 ")",
pNode->vgId, pEntry->index, pEntry->term, pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex,
pBuf->endIndex);
@ -314,8 +314,8 @@ int32_t syncLogBufferAccept(SSyncLogBuffer* pBuf, SSyncNode* pNode, SSyncRaftEnt
}
if (index > pBuf->matchIndex && lastMatchTerm != prevTerm) {
sWarn("vgId:%d, not ready to accept. index: %" PRId64 ", term: %" PRId64 ": prevterm: %" PRId64
" != lastmatch: %" PRId64 ". log buffer: [%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64 ")",
sWarn("vgId:%d, not ready to accept. index:%" PRId64 ", term:%" PRId64 ": prevterm:%" PRId64
" != lastmatch:%" PRId64 ". log buffer: [%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64 ")",
pNode->vgId, pEntry->index, pEntry->term, prevTerm, lastMatchTerm, pBuf->startIndex, pBuf->commitIndex,
pBuf->matchIndex, pBuf->endIndex);
goto _out;
@ -328,7 +328,7 @@ int32_t syncLogBufferAccept(SSyncLogBuffer* pBuf, SSyncNode* pNode, SSyncRaftEnt
if (pEntry->term != pExist->term) {
(void)syncLogBufferRollback(pBuf, pNode, index);
} else {
sTrace("vgId:%d, duplicate log entry received. index: %" PRId64 ", term: %" PRId64 ". log buffer: [%" PRId64
sTrace("vgId:%d, duplicate log entry received. index:%" PRId64 ", term:%" PRId64 ". log buffer: [%" PRId64
" %" PRId64 " %" PRId64 ", %" PRId64 ")",
pNode->vgId, pEntry->index, pEntry->term, pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex,
pBuf->endIndex);
@ -434,7 +434,7 @@ int64_t syncLogBufferProceed(SSyncLogBuffer* pBuf, SSyncNode* pNode, SyncTerm* p
// increase match index
pBuf->matchIndex = index;
sTrace("vgId:%d, log buffer proceed. start index: %" PRId64 ", match index: %" PRId64 ", end index: %" PRId64,
sTrace("vgId:%d, log buffer proceed. start index:%" PRId64 ", match index:%" PRId64 ", end index:%" PRId64,
pNode->vgId, pBuf->startIndex, pBuf->matchIndex, pBuf->endIndex);
// replicate on demand
@ -475,7 +475,7 @@ int32_t syncLogFsmExecute(SSyncNode* pNode, SSyncFSM* pFsm, ESyncState role, Syn
}
if (pEntry->originalRpcType == TDMT_VND_COMMIT) {
sInfo("vgId:%d, fsm execute vnode commit. index: %" PRId64 ", term: %" PRId64 "", pNode->vgId, pEntry->index,
sInfo("vgId:%d, fsm execute vnode commit. index:%" PRId64 ", term:%" PRId64 "", pNode->vgId, pEntry->index,
pEntry->term);
}
@ -528,7 +528,7 @@ int32_t syncLogBufferCommit(SSyncLogBuffer* pBuf, SSyncNode* pNode, int64_t comm
goto _out;
}
sTrace("vgId:%d, commit. log buffer: [%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64 "), role: %d, term: %" PRId64,
sTrace("vgId:%d, commit. log buffer: [%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64 "), role:%d, term:%" PRId64,
pNode->vgId, pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex, pBuf->endIndex, role, term);
// execute in fsm
@ -541,19 +541,19 @@ int32_t syncLogBufferCommit(SSyncLogBuffer* pBuf, SSyncNode* pNode, int64_t comm
// execute it
if (!syncUtilUserCommit(pEntry->originalRpcType)) {
sInfo("vgId:%d, commit sync barrier. index: %" PRId64 ", term:%" PRId64 ", type: %s", vgId, pEntry->index,
sInfo("vgId:%d, commit sync barrier. index:%" PRId64 ", term:%" PRId64 ", type:%s", vgId, pEntry->index,
pEntry->term, TMSG_INFO(pEntry->originalRpcType));
}
if (syncLogFsmExecute(pNode, pFsm, role, term, pEntry, 0) != 0) {
sError("vgId:%d, failed to execute sync log entry. index:%" PRId64 ", term:%" PRId64
", role: %d, current term: %" PRId64,
", role:%d, current term:%" PRId64,
vgId, pEntry->index, pEntry->term, role, term);
goto _out;
}
pBuf->commitIndex = index;
sTrace("vgId:%d, committed index: %" PRId64 ", term: %" PRId64 ", role: %d, current term: %" PRId64 "", pNode->vgId,
sTrace("vgId:%d, committed index:%" PRId64 ", term:%" PRId64 ", role:%d, current term:%" PRId64 "", pNode->vgId,
pEntry->index, pEntry->term, role, term);
if (!inBuf) {
@ -614,7 +614,7 @@ int32_t syncLogReplMgrRetryOnNeed(SSyncLogReplMgr* pMgr, SSyncNode* pNode) {
SRaftId* pDestId = &pNode->replicasId[pMgr->peerId];
if (pMgr->retryBackoff == SYNC_MAX_RETRY_BACKOFF) {
syncLogReplMgrReset(pMgr);
sWarn("vgId:%d, reset sync log repl mgr since retry backoff exceeding limit. peer: %" PRIx64, pNode->vgId,
sWarn("vgId:%d, reset sync log repl mgr since retry backoff exceeding limit. peer:%" PRIx64, pNode->vgId,
pDestId->addr);
return -1;
}
@ -639,7 +639,7 @@ int32_t syncLogReplMgrRetryOnNeed(SSyncLogReplMgr* pMgr, SSyncNode* pNode) {
if (pMgr->states[pos].acked) {
if (pMgr->matchIndex < index && pMgr->states[pos].timeMs + (syncGetRetryMaxWaitMs() << 3) < nowMs) {
syncLogReplMgrReset(pMgr);
sWarn("vgId:%d, reset sync log repl mgr since stagnation. index: %" PRId64 ", peer: %" PRIx64, pNode->vgId,
sWarn("vgId:%d, reset sync log repl mgr since stagnation. index:%" PRId64 ", peer:%" PRIx64, pNode->vgId,
index, pDestId->addr);
goto _out;
}
@ -648,7 +648,7 @@ int32_t syncLogReplMgrRetryOnNeed(SSyncLogReplMgr* pMgr, SSyncNode* pNode) {
bool barrier = false;
if (syncLogReplMgrReplicateOneTo(pMgr, pNode, index, &term, pDestId, &barrier) < 0) {
sError("vgId:%d, failed to replicate sync log entry since %s. index: %" PRId64 ", dest: %" PRIx64 "", pNode->vgId,
sError("vgId:%d, failed to replicate sync log entry since %s. index:%" PRId64 ", dest:%" PRIx64 "", pNode->vgId,
terrstr(), index, pDestId->addr);
goto _out;
}
@ -670,8 +670,8 @@ _out:
if (retried) {
pMgr->retryBackoff = syncLogGetNextRetryBackoff(pMgr);
SSyncLogBuffer* pBuf = pNode->pLogBuf;
sInfo("vgId:%d, resend %d sync log entries. dest: %" PRIx64 ", indexes: %" PRId64 " ..., terms: ... %" PRId64
", retryWaitMs: %" PRId64 ", mgr: [%" PRId64 " %" PRId64 ", %" PRId64 "), buffer: [%" PRId64 " %" PRId64
sInfo("vgId:%d, resend %d sync log entries. dest:%" PRIx64 ", indexes:%" PRId64 " ..., terms: ... %" PRId64
", retryWaitMs:%" PRId64 ", mgr: [%" PRId64 " %" PRId64 ", %" PRId64 "), buffer: [%" PRId64 " %" PRId64
" %" PRId64 ", %" PRId64 ")",
pNode->vgId, count, pDestId->addr, firstIndex, term, retryWaitMs, pMgr->startIndex, pMgr->matchIndex,
pMgr->endIndex, pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex, pBuf->endIndex);
@ -714,7 +714,7 @@ int32_t syncLogReplMgrProcessReplyAsRecovery(SSyncLogReplMgr* pMgr, SSyncNode* p
}
if (pMsg->success == false && pMsg->matchIndex >= pMsg->lastSendIndex) {
sWarn("vgId:%d, failed to rollback match index. peer: dnode:%d, match index: %" PRId64 ", last sent: %" PRId64,
sWarn("vgId:%d, failed to rollback match index. peer: dnode:%d, match index:%" PRId64 ", last sent:%" PRId64,
pNode->vgId, DID(&destId), pMsg->matchIndex, pMsg->lastSendIndex);
if (syncNodeStartSnapshot(pNode, &destId) < 0) {
sError("vgId:%d, failed to start snapshot for peer dnode:%d", pNode->vgId, DID(&destId));
@ -761,7 +761,7 @@ int32_t syncLogReplMgrProcessHeartbeatReply(SSyncLogReplMgr* pMgr, SSyncNode* pN
SSyncLogBuffer* pBuf = pNode->pLogBuf;
taosThreadMutexLock(&pBuf->mutex);
if (pMsg->startTime != 0 && pMsg->startTime != pMgr->peerStartTime) {
sInfo("vgId:%d, reset sync log repl mgr in heartbeat. peer: %" PRIx64 ", start time:%" PRId64 ", old:%" PRId64 "",
sInfo("vgId:%d, reset sync log repl mgr in heartbeat. peer:%" PRIx64 ", start time:%" PRId64 ", old:%" PRId64 "",
pNode->vgId, pMsg->srcId.addr, pMsg->startTime, pMgr->peerStartTime);
syncLogReplMgrReset(pMgr);
pMgr->peerStartTime = pMsg->startTime;
@ -774,7 +774,7 @@ int32_t syncLogReplMgrProcessReply(SSyncLogReplMgr* pMgr, SSyncNode* pNode, Sync
SSyncLogBuffer* pBuf = pNode->pLogBuf;
taosThreadMutexLock(&pBuf->mutex);
if (pMsg->startTime != pMgr->peerStartTime) {
sInfo("vgId:%d, reset sync log repl mgr in appendlog reply. peer: %" PRIx64 ", start time:%" PRId64
sInfo("vgId:%d, reset sync log repl mgr in appendlog reply. peer:%" PRIx64 ", start time:%" PRId64
", old:%" PRId64,
pNode->vgId, pMsg->srcId.addr, pMsg->startTime, pMgr->peerStartTime);
syncLogReplMgrReset(pMgr);
@ -815,7 +815,7 @@ int32_t syncLogReplMgrReplicateProbe(SSyncLogReplMgr* pMgr, SSyncNode* pNode, Sy
bool barrier = false;
SyncTerm term = -1;
if (syncLogReplMgrReplicateOneTo(pMgr, pNode, index, &term, pDestId, &barrier) < 0) {
sError("vgId:%d, failed to replicate log entry since %s. index: %" PRId64 ", dest: 0x%016" PRIx64 "", pNode->vgId,
sError("vgId:%d, failed to replicate log entry since %s. index:%" PRId64 ", dest: 0x%016" PRIx64 "", pNode->vgId,
terrstr(), index, pDestId->addr);
return -1;
}
@ -830,7 +830,7 @@ int32_t syncLogReplMgrReplicateProbe(SSyncLogReplMgr* pMgr, SSyncNode* pNode, Sy
pMgr->endIndex = index + 1;
SSyncLogBuffer* pBuf = pNode->pLogBuf;
sTrace("vgId:%d, probe peer:%" PRIx64 " with msg of index:%" PRId64 " term: %" PRId64 ". mgr (rs:%d): [%" PRId64
sTrace("vgId:%d, probe peer:%" PRIx64 " with msg of index:%" PRId64 " term:%" PRId64 ". mgr (rs:%d): [%" PRId64
" %" PRId64 ", %" PRId64 "), buffer: [%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64 ")",
pNode->vgId, pDestId->addr, index, term, pMgr->restored, pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex,
pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex, pBuf->endIndex);
@ -860,7 +860,7 @@ int32_t syncLogReplMgrReplicateAttempt(SSyncLogReplMgr* pMgr, SSyncNode* pNode)
bool barrier = false;
SyncTerm term = -1;
if (syncLogReplMgrReplicateOneTo(pMgr, pNode, index, &term, pDestId, &barrier) < 0) {
sError("vgId:%d, failed to replicate log entry since %s. index: %" PRId64 ", dest: 0x%016" PRIx64 "", pNode->vgId,
sError("vgId:%d, failed to replicate log entry since %s. index:%" PRId64 ", dest: 0x%016" PRIx64 "", pNode->vgId,
terrstr(), index, pDestId->addr);
return -1;
}
@ -874,7 +874,7 @@ int32_t syncLogReplMgrReplicateAttempt(SSyncLogReplMgr* pMgr, SSyncNode* pNode)
pMgr->endIndex = index + 1;
if (barrier) {
sInfo("vgId:%d, replicated sync barrier to dest: %" PRIx64 ". index: %" PRId64 ", term: %" PRId64
sInfo("vgId:%d, replicated sync barrier to dest:%" PRIx64 ". index:%" PRId64 ", term:%" PRId64
", repl mgr: rs(%d) [%" PRId64 " %" PRId64 ", %" PRId64 ")",
pNode->vgId, pDestId->addr, index, term, pMgr->restored, pMgr->startIndex, pMgr->matchIndex,
pMgr->endIndex);
@ -885,7 +885,7 @@ int32_t syncLogReplMgrReplicateAttempt(SSyncLogReplMgr* pMgr, SSyncNode* pNode)
syncLogReplMgrRetryOnNeed(pMgr, pNode);
SSyncLogBuffer* pBuf = pNode->pLogBuf;
sTrace("vgId:%d, replicated %d msgs to peer: %" PRIx64 ". indexes: %" PRId64 "..., terms: ...%" PRId64
sTrace("vgId:%d, replicated %d msgs to peer:%" PRIx64 ". indexes:%" PRId64 "..., terms: ...%" PRId64
", mgr: (rs:%d) [%" PRId64 " %" PRId64 ", %" PRId64 "), buffer: [%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64
")",
pNode->vgId, count, pDestId->addr, firstIndex, term, pMgr->restored, pMgr->startIndex, pMgr->matchIndex,
@ -1028,7 +1028,7 @@ int32_t syncLogBufferRollback(SSyncLogBuffer* pBuf, SSyncNode* pNode, SyncIndex
return 0;
}
sInfo("vgId:%d, rollback sync log buffer. toindex: %" PRId64 ", buffer: [%" PRId64 " %" PRId64 " %" PRId64
sInfo("vgId:%d, rollback sync log buffer. toindex:%" PRId64 ", buffer: [%" PRId64 " %" PRId64 " %" PRId64
", %" PRId64 ")",
pNode->vgId, toIndex, pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex, pBuf->endIndex);
@ -1119,11 +1119,11 @@ int32_t syncLogReplMgrReplicateOneTo(SSyncLogReplMgr* pMgr, SSyncNode* pNode, Sy
pEntry = syncLogBufferGetOneEntry(pBuf, pNode, index, &inBuf);
if (pEntry == NULL) {
sError("vgId:%d, failed to get raft entry for index: %" PRId64 "", pNode->vgId, index);
sError("vgId:%d, failed to get raft entry for index:%" PRId64 "", pNode->vgId, index);
if (terrno == TSDB_CODE_WAL_LOG_NOT_EXIST) {
SSyncLogReplMgr* pMgr = syncNodeGetLogReplMgr(pNode, pDestId);
if (pMgr) {
sInfo("vgId:%d, reset sync log repl mgr of peer: %" PRIx64 " since %s. index: %" PRId64, pNode->vgId,
sInfo("vgId:%d, reset sync log repl mgr of peer:%" PRIx64 " since %s. index:%" PRId64, pNode->vgId,
pDestId->addr, terrstr(), index);
(void)syncLogReplMgrReset(pMgr);
}
@ -1134,7 +1134,7 @@ int32_t syncLogReplMgrReplicateOneTo(SSyncLogReplMgr* pMgr, SSyncNode* pNode, Sy
prevLogTerm = syncLogReplMgrGetPrevLogTerm(pMgr, pNode, index);
if (prevLogTerm < 0) {
sError("vgId:%d, failed to get prev log term since %s. index: %" PRId64 "", pNode->vgId, terrstr(), index);
sError("vgId:%d, failed to get prev log term since %s. index:%" PRId64 "", pNode->vgId, terrstr(), index);
goto _err;
}
if (pTerm) *pTerm = pEntry->term;
@ -1147,7 +1147,7 @@ int32_t syncLogReplMgrReplicateOneTo(SSyncLogReplMgr* pMgr, SSyncNode* pNode, Sy
(void)syncNodeSendAppendEntries(pNode, pDestId, &msgOut);
sTrace("vgId:%d, replicate one msg index: %" PRId64 " term: %" PRId64 " prevterm: %" PRId64 " to dest: 0x%016" PRIx64,
sTrace("vgId:%d, replicate one msg index:%" PRId64 " term:%" PRId64 " prevterm:%" PRId64 " to dest: 0x%016" PRIx64,
pNode->vgId, pEntry->index, pEntry->term, prevLogTerm, pDestId->addr);
if (!inBuf) {

View File

@ -91,7 +91,7 @@ SSyncRaftEntry* syncEntryBuildNoop(SyncTerm term, SyncIndex index, int32_t vgId)
void syncEntryDestroy(SSyncRaftEntry* pEntry) {
if (pEntry != NULL) {
sTrace("free entry: %p", pEntry);
sTrace("free entry:%p", pEntry);
taosMemoryFree(pEntry);
}
}

View File

@ -510,16 +510,8 @@ SyncIndex syncNodeGetSnapBeginIndex(SSyncNode *ths) {
SSyncLogStoreData *pData = ths->pLogStore->data;
SWal *pWal = pData->pWal;
bool isEmpty = ths->pLogStore->syncLogIsEmpty(ths->pLogStore);
int64_t walCommitVer = walGetCommittedVer(pWal);
if (!isEmpty && ths->commitIndex != walCommitVer) {
sNError(ths, "commit not same, wal-commit:%" PRId64 ", commit:%" PRId64 ", ignore", walCommitVer,
ths->commitIndex);
snapStart = walCommitVer + 1;
} else {
snapStart = ths->commitIndex + 1;
}
snapStart = TMAX(ths->commitIndex, walCommitVer) + 1;
sNInfo(ths, "snapshot begin index is %" PRId64, snapStart);
}

View File

@ -43,7 +43,7 @@ void syncUtilNodeInfo2EpSet(const SNodeInfo* pInfo, SEpSet* pEpSet) {
bool syncUtilNodeInfo2RaftId(const SNodeInfo* pInfo, SyncGroupId vgId, SRaftId* raftId) {
uint32_t ipv4 = taosGetIpv4FromFqdn(pInfo->nodeFqdn);
if (ipv4 == 0xFFFFFFFF || ipv4 == 1) {
sError("failed to resolve ipv4 addr, fqdn: %s", pInfo->nodeFqdn);
sError("failed to resolve ipv4 addr, fqdn:%s", pInfo->nodeFqdn);
terrno = TSDB_CODE_TSC_INVALID_FQDN;
return false;
}

View File

@ -671,7 +671,7 @@ static SCliConn* cliCreateConn(SCliThrd* pThrd) {
conn->stream = (uv_stream_t*)taosMemoryMalloc(sizeof(uv_tcp_t));
uv_tcp_init(pThrd->loop, (uv_tcp_t*)(conn->stream));
conn->stream->data = conn;
transSetConnOption((uv_tcp_t*)conn->stream);
// transSetConnOption((uv_tcp_t*)conn->stream);
uv_timer_t* timer = taosArrayGetSize(pThrd->timerList) > 0 ? *(uv_timer_t**)taosArrayPop(pThrd->timerList) : NULL;
if (timer == NULL) {
@ -778,7 +778,7 @@ static void cliSendCb(uv_write_t* req, int status) {
SCliMsg* pMsg = !transQueueEmpty(&pConn->cliMsgs) ? transQueueGet(&pConn->cliMsgs, 0) : NULL;
if (pMsg != NULL) {
int64_t cost = taosGetTimestampUs() - pMsg->st;
if (cost > 1000) {
if (cost > 1000 * 20) {
tWarn("%s conn %p send cost:%dus, send exception", CONN_GET_INST_LABEL(pConn), pConn, (int)cost);
}
}
@ -800,9 +800,12 @@ static void cliSendCb(uv_write_t* req, int status) {
}
void cliSend(SCliConn* pConn) {
bool empty = transQueueEmpty(&pConn->cliMsgs);
ASSERTS(empty == false, "trans-cli get invalid msg");
if (empty == true) {
SCliThrd* pThrd = pConn->hostThrd;
STrans* pTransInst = pThrd->pTransInst;
if (transQueueEmpty(&pConn->cliMsgs)) {
tError("%s conn %p not msg to send", pTransInst->label, pConn);
cliHandleExcept(pConn);
return;
}
@ -812,9 +815,6 @@ void cliSend(SCliConn* pConn) {
STransConnCtx* pCtx = pCliMsg->ctx;
SCliThrd* pThrd = pConn->hostThrd;
STrans* pTransInst = pThrd->pTransInst;
STransMsg* pMsg = (STransMsg*)(&pCliMsg->msg);
if (pMsg->pCont == 0) {
pMsg->pCont = (void*)rpcMallocCont(0);
@ -1045,6 +1045,12 @@ static FORCE_INLINE uint32_t cliGetIpFromFqdnCache(SHashObj* cache, char* fqdn)
uint32_t* v = taosHashGet(cache, fqdn, strlen(fqdn));
if (v == NULL) {
addr = taosGetIpv4FromFqdn(fqdn);
if (addr == 0xffffffff) {
terrno = TAOS_SYSTEM_ERROR(errno);
tError("failed to get ip from fqdn:%s since %s", fqdn, terrstr());
return addr;
}
taosHashPut(cache, fqdn, strlen(fqdn), &addr, sizeof(addr));
} else {
addr = *v;
@ -1061,9 +1067,10 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) {
STransConnCtx* pCtx = pMsg->ctx;
cliMayCvtFqdnToIp(&pCtx->epSet, &pThrd->cvtAddr);
STraceId* trace = &pMsg->msg.info.traceId;
if (!EPSET_IS_VALID(&pCtx->epSet)) {
tError("invalid epset");
tGError("%s, msg %s sent with invalid epset", pTransInst->label, TMSG_INFO(pMsg->msg.msgType));
destroyCmsg(pMsg);
return;
}
@ -1116,15 +1123,45 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) {
conn->ip = strdup(EPSET_GET_INUSE_IP(&pCtx->epSet));
conn->port = EPSET_GET_INUSE_PORT(&pCtx->epSet);
uint32_t ipaddr = cliGetIpFromFqdnCache(pThrd->fqdn2ipCache, conn->ip);
if (ipaddr == 0xffffffff) {
uv_timer_stop(conn->timer);
conn->timer->data = NULL;
taosArrayPush(pThrd->timerList, &conn->timer);
conn->timer = NULL;
cliHandleExcept(conn);
return;
}
struct sockaddr_in addr;
addr.sin_family = AF_INET;
addr.sin_addr.s_addr = cliGetIpFromFqdnCache(pThrd->fqdn2ipCache, conn->ip);
addr.sin_addr.s_addr = ipaddr;
addr.sin_port = (uint16_t)htons((uint16_t)conn->port);
STraceId* trace = &(pMsg->msg.info.traceId);
tGTrace("%s conn %p try to connect to %s:%d", pTransInst->label, conn, conn->ip, conn->port);
int32_t fd = taosCreateSocketWithTimeout(TRANS_CONN_TIMEOUT * 4);
if (fd == -1) {
tGError("%s conn %p failed to create socket, reason:%s", transLabel(pTransInst), conn,
tstrerror(TAOS_SYSTEM_ERROR(errno)));
cliHandleExcept(conn);
errno = 0;
return;
}
int ret = uv_tcp_open((uv_tcp_t*)conn->stream, fd);
if (ret != 0) {
tGError("%s conn %p failed to set stream, reason:%s", transLabel(pTransInst), conn, uv_err_name(ret));
cliHandleExcept(conn);
return;
}
ret = transSetConnOption((uv_tcp_t*)conn->stream);
if (ret != 0) {
tGError("%s conn %p failed to set socket opt, reason:%s", transLabel(pTransInst), conn, uv_err_name(ret));
cliHandleExcept(conn);
return;
}
int ret = uv_tcp_connect(&conn->connReq, (uv_tcp_t*)(conn->stream), (const struct sockaddr*)&addr, cliConnCb);
ret = uv_tcp_connect(&conn->connReq, (uv_tcp_t*)(conn->stream), (const struct sockaddr*)&addr, cliConnCb);
if (ret != 0) {
tGError("%s conn %p failed to connect to %s:%d, reason:%s", pTransInst->label, conn, conn->ip, conn->port,
uv_err_name(ret));
@ -1139,7 +1176,6 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) {
}
uv_timer_start(conn->timer, cliConnTimeout, TRANS_CONN_TIMEOUT, 0);
}
STraceId* trace = &pMsg->msg.info.traceId;
tGTrace("%s conn %p ready", pTransInst->label, conn);
}
static void cliAsyncCb(uv_async_t* handle) {
@ -1275,7 +1311,11 @@ void* transInitClient(uint32_t ip, uint32_t port, char* label, int numOfThreads,
for (int i = 0; i < cli->numOfThreads; i++) {
SCliThrd* pThrd = createThrdObj(shandle);
int err = taosThreadCreate(&pThrd->thread, NULL, cliWorkThread, (void*)(pThrd));
if (pThrd == NULL) {
return NULL;
}
int err = taosThreadCreate(&pThrd->thread, NULL, cliWorkThread, (void*)(pThrd));
if (err == 0) {
tDebug("success to create tranport-cli thread:%d", i);
}
@ -1332,9 +1372,23 @@ static SCliThrd* createThrdObj(void* trans) {
taosThreadMutexInit(&pThrd->msgMtx, NULL);
pThrd->loop = (uv_loop_t*)taosMemoryMalloc(sizeof(uv_loop_t));
uv_loop_init(pThrd->loop);
int err = uv_loop_init(pThrd->loop);
if (err != 0) {
tError("failed to init uv_loop, reason:%s", uv_err_name(err));
taosMemoryFree(pThrd->loop);
taosThreadMutexDestroy(&pThrd->msgMtx);
taosMemoryFree(pThrd);
return NULL;
}
pThrd->asyncPool = transAsyncPoolCreate(pThrd->loop, 8, pThrd, cliAsyncCb);
if (pThrd->asyncPool == NULL) {
tError("failed to init async pool");
uv_loop_close(pThrd->loop);
taosMemoryFree(pThrd->loop);
taosThreadMutexDestroy(&pThrd->msgMtx);
taosMemoryFree(pThrd);
return NULL;
}
pThrd->prepare = taosMemoryCalloc(1, sizeof(uv_prepare_t));
uv_prepare_init(pThrd->loop, pThrd->prepare);

View File

@ -205,6 +205,10 @@ bool transReadComplete(SConnBuffer* connBuf) {
}
int transSetConnOption(uv_tcp_t* stream) {
#if defined(WINDOWS) || defined(DARWIN)
#else
uv_tcp_keepalive(stream, 1, 20);
#endif
return uv_tcp_nodelay(stream, 1);
// int ret = uv_tcp_keepalive(stream, 5, 60);
}
@ -214,24 +218,37 @@ SAsyncPool* transAsyncPoolCreate(uv_loop_t* loop, int sz, void* arg, AsyncCB cb)
pool->nAsync = sz;
pool->asyncs = taosMemoryCalloc(1, sizeof(uv_async_t) * pool->nAsync);
for (int i = 0; i < pool->nAsync; i++) {
int i = 0, err = 0;
for (i = 0; i < pool->nAsync; i++) {
uv_async_t* async = &(pool->asyncs[i]);
SAsyncItem* item = taosMemoryCalloc(1, sizeof(SAsyncItem));
item->pThrd = arg;
QUEUE_INIT(&item->qmsg);
taosThreadMutexInit(&item->mtx, NULL);
uv_async_t* async = &(pool->asyncs[i]);
uv_async_init(loop, async, cb);
async->data = item;
err = uv_async_init(loop, async, cb);
if (err != 0) {
tError("failed to init async, reason:%s", uv_err_name(err));
break;
}
}
if (i != pool->nAsync) {
transAsyncPoolDestroy(pool);
pool = NULL;
}
return pool;
}
void transAsyncPoolDestroy(SAsyncPool* pool) {
for (int i = 0; i < pool->nAsync; i++) {
uv_async_t* async = &(pool->asyncs[i]);
SAsyncItem* item = async->data;
if (item == NULL) continue;
taosThreadMutexDestroy(&item->mtx);
taosMemoryFree(item);
}

View File

@ -55,7 +55,7 @@ typedef struct TdSocket {
#endif
int refId;
SocketFd fd;
} * TdSocketPtr, TdSocket;
} *TdSocketPtr, TdSocket;
typedef struct TdSocketServer {
#if SOCKET_WITH_LOCK
@ -63,7 +63,7 @@ typedef struct TdSocketServer {
#endif
int refId;
SocketFd fd;
} * TdSocketServerPtr, TdSocketServer;
} *TdSocketServerPtr, TdSocketServer;
typedef struct TdEpoll {
#if SOCKET_WITH_LOCK
@ -71,7 +71,7 @@ typedef struct TdEpoll {
#endif
int refId;
EpollFd fd;
} * TdEpollPtr, TdEpoll;
} *TdEpollPtr, TdEpoll;
#if 0
int32_t taosSendto(TdSocketPtr pSocket, void *buf, int len, unsigned int flags, const struct sockaddr *dest_addr,
@ -944,7 +944,7 @@ uint32_t taosGetIpv4FromFqdn(const char *fqdn) {
iResult = WSAStartup(MAKEWORD(2, 2), &wsaData);
if (iResult != 0) {
// printf("WSAStartup failed: %d\n", iResult);
return 1;
return 0xFFFFFFFF;
}
#endif
struct addrinfo hints = {0};
@ -1005,7 +1005,7 @@ int32_t taosGetFqdn(char *fqdn) {
// immediately
// hints.ai_family = AF_INET;
strcpy(fqdn, hostname);
strcpy(fqdn+strlen(hostname), ".local");
strcpy(fqdn + strlen(hostname), ".local");
#else // __APPLE__
struct addrinfo hints = {0};
struct addrinfo *result = NULL;
@ -1060,7 +1060,7 @@ int32_t taosCreateSocketWithTimeout(uint32_t timeout) {
#if defined(WINDOWS)
SOCKET fd;
#else
int fd;
int fd;
#endif
if ((fd = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP)) == INVALID_SOCKET) {
return -1;
@ -1071,11 +1071,12 @@ int32_t taosCreateSocketWithTimeout(uint32_t timeout) {
return -1;
}
#elif defined(_TD_DARWIN_64)
uint32_t conn_timeout_ms = timeout * 1000;
if (0 != setsockopt(fd, IPPROTO_TCP, TCP_CONNECTIONTIMEOUT, (char *)&conn_timeout_ms, sizeof(conn_timeout_ms))) {
taosCloseSocketNoCheck1(fd);
return -1;
}
// invalid config
// uint32_t conn_timeout_ms = timeout * 1000;
// if (0 != setsockopt(fd, IPPROTO_TCP, TCP_CONNECTIONTIMEOUT, (char *)&conn_timeout_ms, sizeof(conn_timeout_ms))) {
// taosCloseSocketNoCheck1(fd);
// return -1;
//}
#else // Linux like systems
uint32_t conn_timeout_ms = timeout * 1000;
if (0 != setsockopt(fd, IPPROTO_TCP, TCP_USER_TIMEOUT, (char *)&conn_timeout_ms, sizeof(conn_timeout_ms))) {

View File

@ -289,6 +289,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_STREAM_NOT_EXIST, "Stream not exist")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_STREAM_OPTION, "Invalid stream option")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_STREAM_MUST_BE_DELETED, "Stream must be dropped first")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_MULTI_REPLICA_SOURCE_DB, "Stream temporarily does not support source db having replica > 1")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_STREAMS, "Too many streams")
// mnode-sma
TAOS_DEFINE_ERROR(TSDB_CODE_MND_SMA_ALREADY_EXIST, "SMA already exists")

View File

@ -5,11 +5,11 @@
#include "thash.h"
#include "tlog.h"
#define GET_PAYLOAD_DATA(_p) ((char*)(_p)->pData + POINTER_BYTES)
#define BUF_PAGE_IN_MEM(_p) ((_p)->pData != NULL)
#define GET_PAYLOAD_DATA(_p) ((char*)(_p)->pData + POINTER_BYTES)
#define BUF_PAGE_IN_MEM(_p) ((_p)->pData != NULL)
#define CLEAR_BUF_PAGE_IN_MEM_FLAG(_p) ((_p)->pData = NULL)
#define HAS_DATA_IN_DISK(_p) ((_p)->offset >= 0)
#define NO_IN_MEM_AVAILABLE_PAGES(_b) (listNEles((_b)->lruList) >= (_b)->inMemPages)
#define HAS_DATA_IN_DISK(_p) ((_p)->offset >= 0)
#define NO_IN_MEM_AVAILABLE_PAGES(_b) (listNEles((_b)->lruList) >= (_b)->inMemPages)
typedef struct SPageDiskInfo {
int64_t offset;
@ -17,7 +17,7 @@ typedef struct SPageDiskInfo {
} SPageDiskInfo, SFreeListItem;
struct SPageInfo {
SListNode* pn; // point to list node struct. it is NULL when the page is evicted from the in-memory buffer
SListNode* pn; // point to list node struct. it is NULL when the page is evicted from the in-memory buffer
void* pData;
int64_t offset;
int32_t pageId;
@ -52,10 +52,13 @@ struct SDiskbasedBuf {
};
static int32_t createDiskFile(SDiskbasedBuf* pBuf) {
if (pBuf->path == NULL) { // prepare the file name when needed it
if (pBuf->path == NULL) { // prepare the file name when needed it
char path[PATH_MAX] = {0};
taosGetTmpfilePath(pBuf->prefix, "paged-buf", path);
pBuf->path = taosMemoryStrDup(path);
if (pBuf->path == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
}
pBuf->pFile =
@ -126,6 +129,30 @@ static uint64_t allocateNewPositionInFile(SDiskbasedBuf* pBuf, size_t size) {
static FORCE_INLINE size_t getAllocPageSize(int32_t pageSize) { return pageSize + POINTER_BYTES + sizeof(SFilePage); }
static int32_t doFlushBufPageImpl(SDiskbasedBuf* pBuf, int64_t offset, const char* pData, int32_t size) {
int32_t ret = taosLSeekFile(pBuf->pFile, offset, SEEK_SET);
if (ret == -1) {
terrno = TAOS_SYSTEM_ERROR(errno);
return terrno;
}
ret = (int32_t)taosWriteFile(pBuf->pFile, pData, size);
if (ret != size) {
terrno = TAOS_SYSTEM_ERROR(errno);
return terrno;
}
// extend the file
if (pBuf->fileSize < offset + size) {
pBuf->fileSize = offset + size;
}
pBuf->statis.flushBytes += size;
pBuf->statis.flushPages += 1;
return TSDB_CODE_SUCCESS;
}
static char* doFlushBufPage(SDiskbasedBuf* pBuf, SPageInfo* pg) {
if (pg->pData == NULL || pg->used) {
uError("invalid params in paged buffer process when flushing buf to disk, %s", pBuf->id);
@ -134,12 +161,15 @@ static char* doFlushBufPage(SDiskbasedBuf* pBuf, SPageInfo* pg) {
}
int32_t size = pBuf->pageSize;
char* t = NULL;
int64_t offset = pg->offset;
char* t = NULL;
if ((!HAS_DATA_IN_DISK(pg)) || pg->dirty) {
void* payload = GET_PAYLOAD_DATA(pg);
t = doCompressData(payload, pBuf->pageSize, &size, pBuf);
if (size < 0) {
uError("failed to compress data when flushing data to disk, %s", pBuf->id);
terrno = TSDB_CODE_INVALID_PARA;
return NULL;
}
}
@ -147,59 +177,29 @@ static char* doFlushBufPage(SDiskbasedBuf* pBuf, SPageInfo* pg) {
// this page is flushed to disk for the first time
if (pg->dirty) {
if (!HAS_DATA_IN_DISK(pg)) {
pg->offset = allocateNewPositionInFile(pBuf, size);
offset = allocateNewPositionInFile(pBuf, size);
pBuf->nextPos += size;
int32_t ret = taosLSeekFile(pBuf->pFile, pg->offset, SEEK_SET);
if (ret == -1) {
terrno = TAOS_SYSTEM_ERROR(errno);
int32_t code = doFlushBufPageImpl(pBuf, offset, t, size);
if (code != TSDB_CODE_SUCCESS) {
return NULL;
}
ret = (int32_t)taosWriteFile(pBuf->pFile, t, size);
if (ret != size) {
terrno = TAOS_SYSTEM_ERROR(errno);
return NULL;
}
// extend the file size
if (pBuf->fileSize < pg->offset + size) {
pBuf->fileSize = pg->offset + size;
}
pBuf->statis.flushBytes += size;
pBuf->statis.flushPages += 1;
} else {
// length becomes greater, current space is not enough, allocate new place, otherwise, do nothing
if (pg->length < size) {
// 1. add current space to free list
SPageDiskInfo dinfo = {.length = pg->length, .offset = pg->offset};
SPageDiskInfo dinfo = {.length = pg->length, .offset = offset};
taosArrayPush(pBuf->pFree, &dinfo);
// 2. allocate new position, and update the info
pg->offset = allocateNewPositionInFile(pBuf, size);
offset = allocateNewPositionInFile(pBuf, size);
pBuf->nextPos += size;
}
// 3. write to disk.
int32_t ret = taosLSeekFile(pBuf->pFile, pg->offset, SEEK_SET);
if (ret == -1) {
terrno = TAOS_SYSTEM_ERROR(errno);
int32_t code = doFlushBufPageImpl(pBuf, offset, t, size);
if (code != TSDB_CODE_SUCCESS) {
return NULL;
}
ret = (int32_t)taosWriteFile(pBuf->pFile, t, size);
if (ret != size) {
terrno = TAOS_SYSTEM_ERROR(errno);
return NULL;
}
if (pBuf->fileSize < pg->offset + size) {
pBuf->fileSize = pg->offset + size;
}
pBuf->statis.flushBytes += size;
pBuf->statis.flushPages += 1;
}
} else { // NOTE: the size may be -1, the this recycle page has not been flushed to disk yet.
size = pg->length;
@ -209,9 +209,10 @@ static char* doFlushBufPage(SDiskbasedBuf* pBuf, SPageInfo* pg) {
memset(pDataBuf, 0, getAllocPageSize(pBuf->pageSize));
#ifdef BUF_PAGE_DEBUG
uDebug("page_flush %p, pageId:%d, offset:%d", pDataBuf, pg->pageId, pg->offset);
uDebug("page_flush %p, pageId:%d, offset:%d", pDataBuf, pg->pageId, offset);
#endif
pg->offset = offset;
pg->length = size; // on disk size
return pDataBuf;
}
@ -236,7 +237,7 @@ static char* flushBufPage(SDiskbasedBuf* pBuf, SPageInfo* pg) {
// load file block data in disk
static int32_t loadPageFromDisk(SDiskbasedBuf* pBuf, SPageInfo* pg) {
if (pg->offset < 0 || pg->length <= 0) {
uError("failed to load buf page from disk, offset:%"PRId64", length:%d, %s", pg->offset, pg->length, pBuf->id);
uError("failed to load buf page from disk, offset:%" PRId64 ", length:%d, %s", pg->offset, pg->length, pBuf->id);
return TSDB_CODE_INVALID_PARA;
}
@ -303,6 +304,7 @@ static SListNode* getEldestUnrefedPage(SDiskbasedBuf* pBuf) {
static char* evictBufPage(SDiskbasedBuf* pBuf) {
SListNode* pn = getEldestUnrefedPage(pBuf);
if (pn == NULL) { // no available buffer pages now, return.
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
@ -382,14 +384,14 @@ int32_t createDiskbasedBuf(SDiskbasedBuf** pBuf, int32_t pagesize, int32_t inMem
goto _error;
}
pPBuf->prefix = (char*) dir;
pPBuf->prefix = (char*)dir;
pPBuf->emptyDummyIdList = taosArrayInit(1, sizeof(int32_t));
// qDebug("QInfo:0x%"PRIx64" create resBuf for output, page size:%d, inmem buf pages:%d, file:%s", qId,
// pPBuf->pageSize, pPBuf->inMemPages, pPBuf->path);
return TSDB_CODE_SUCCESS;
_error:
_error:
destroyDiskbasedBuf(pPBuf);
return TSDB_CODE_OUT_OF_MEMORY;
}
@ -399,11 +401,12 @@ static char* doExtractPage(SDiskbasedBuf* pBuf) {
if (NO_IN_MEM_AVAILABLE_PAGES(pBuf)) {
availablePage = evictBufPage(pBuf);
if (availablePage == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
uWarn("no available buf pages, current:%d, max:%d", listNEles(pBuf->lruList), pBuf->inMemPages)
uWarn("no available buf pages, current:%d, max:%d, reason: %s, %s", listNEles(pBuf->lruList), pBuf->inMemPages,
terrstr(), pBuf->id)
}
} else {
availablePage = taosMemoryCalloc(1, getAllocPageSize(pBuf->pageSize)); // add extract bytes in case of zipped buffer increased.
availablePage =
taosMemoryCalloc(1, getAllocPageSize(pBuf->pageSize)); // add extract bytes in case of zipped buffer increased.
if (availablePage == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
}
@ -551,9 +554,7 @@ void releaseBufPageInfo(SDiskbasedBuf* pBuf, SPageInfo* pi) {
size_t getTotalBufSize(const SDiskbasedBuf* pBuf) { return (size_t)pBuf->totalBufSize; }
SArray* getDataBufPagesIdList(SDiskbasedBuf* pBuf) {
return pBuf->pIdList;
}
SArray* getDataBufPagesIdList(SDiskbasedBuf* pBuf) { return pBuf->pIdList; }
void destroyDiskbasedBuf(SDiskbasedBuf* pBuf) {
if (pBuf == NULL) {
@ -567,7 +568,7 @@ void destroyDiskbasedBuf(SDiskbasedBuf* pBuf) {
needRemoveFile = true;
uDebug(
"Paged buffer closed, total:%.2f Kb (%d Pages), inmem size:%.2f Kb (%d Pages), file size:%.2f Kb, page "
"size:%.2f Kb, %s\n",
"size:%.2f Kb, %s",
pBuf->totalBufSize / 1024.0, pBuf->numOfPages, listNEles(pBuf->lruList) * pBuf->pageSize / 1024.0,
listNEles(pBuf->lruList), pBuf->fileSize / 1024.0, pBuf->pageSize / 1024.0f, pBuf->id);
@ -584,8 +585,7 @@ void destroyDiskbasedBuf(SDiskbasedBuf* pBuf) {
ps->releasePages, ps->flushBytes / 1024.0f, ps->flushPages, ps->loadBytes / 1024.0f, ps->loadPages);
} else {
uDebug(
"Get/Release pages:%d/%d, flushToDisk:%.2f Kb (%d Pages), loadFromDisk:%.2f Kb (%d Pages), avgPageSize:%.2f "
"Kb",
"Get/Release pages:%d/%d, flushToDisk:%.2f Kb (%d Pages), loadFromDisk:%.2f Kb (%d Pages), avgPgSize:%.2f Kb",
ps->getPages, ps->releasePages, ps->flushBytes / 1024.0f, ps->flushPages, ps->loadBytes / 1024.0f,
ps->loadPages, ps->loadBytes / (1024.0 * ps->loadPages));
}
@ -628,9 +628,7 @@ SPageInfo* getLastPageInfo(SArray* pList) {
return pPgInfo;
}
int32_t getPageId(const SPageInfo* pPgInfo) {
return pPgInfo->pageId;
}
int32_t getPageId(const SPageInfo* pPgInfo) { return pPgInfo->pageId; }
int32_t getBufPageSize(const SDiskbasedBuf* pBuf) { return pBuf->pageSize; }
@ -686,7 +684,7 @@ void dBufPrintStatis(const SDiskbasedBuf* pBuf) {
ps->getPages, ps->releasePages, ps->flushBytes / 1024.0f, ps->flushPages, ps->loadBytes / 1024.0f,
ps->loadPages, ps->loadBytes / (1024.0 * ps->loadPages));
} else {
//printf("no page loaded\n");
// printf("no page loaded\n");
}
}

View File

@ -83,4 +83,5 @@ python3 fast_write_example.py
# 20
pip3 install kafka-python
python3 kafka_example.py
python3 kafka_example_consumer.py

View File

@ -180,6 +180,7 @@
,,y,script,./test.sh -f tsim/query/sys_tbname.sim
,,y,script,./test.sh -f tsim/query/groupby.sim
,,y,script,./test.sh -f tsim/query/event.sim
,,y,script,./test.sh -f tsim/query/forceFill.sim
,,y,script,./test.sh -f tsim/qnode/basic1.sim
,,y,script,./test.sh -f tsim/snode/basic1.sim
,,y,script,./test.sh -f tsim/mnode/basic1.sim
@ -1054,6 +1055,11 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/out_of_order.py -Q 2
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/out_of_order.py -Q 3
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/out_of_order.py -Q 4
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_min_data.py
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_min_data.py -R
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_min_data.py -Q 2
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_min_data.py -Q 3
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_min_data.py -Q 4
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/blockSMA.py -Q 4
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/odbc.py
,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TD-21561.py -Q 4

View File

@ -91,6 +91,10 @@ print ============== TD-5998
sql_error select _block_dist() from (select * from $nt)
sql_error select _block_dist() from (select * from $mt)
print ============== TD-22140 & TD-22165
sql_error show table distributed information_schema.ins_databases
sql_error show table distributed performance_schema.perf_apps
print =============== clear
sql drop database $db
sql select * from information_schema.ins_databases

View File

@ -31,13 +31,8 @@ sql insert into $tb values ( $ts , $x )
$x = $x + 1
endw
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
sql connect
sql flush database $db
sql use $db
sql delete from $tb where ts=1537146000000
sql delete from $tb where ts=1537146409500
@ -63,7 +58,6 @@ if $rows != 8198 then
return -1
endi
print ===========================> TD-22077 && TD-21877
sql drop database if exists $db -x step1
sql create database $db vgroups 1;
@ -88,6 +82,8 @@ endw
sql flush database $db
print ===========================> TD-22077 && TD-21877
sql insert into t1 values('2018-09-17 09:00:26', 26);
sql insert into t2 values('2018-09-17 09:00:25', 25);
@ -97,4 +93,33 @@ sql flush database reg_db0;
sql delete from st1 where ts<='2018-9-17 09:00:26';
sql select * from st1;
sql drop table t1
sql drop table t2
print =========================================>TD-22196
sql create table t1 using st1 tags(1);
$i = 0
$ts = 1674977959000
$rowNum = 200
$x = 0
while $x < $rowNum
$xs = $x * $delta
$ts = $ts0 + $xs
sql insert into t1 values ( $ts , $x )
$x = $x + 1
$ts = $ts + 1000
endw
sql flush database $db
sql select min(c),max(c) from t1
if $data00 != 0 then
return -1
endi
if $data01 != 199 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -0,0 +1,367 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sql connect
sql drop database if exists db1;
sql create database db1 vgroups 10;
sql use db1;
sql create stable sta (ts timestamp, f1 double, f2 binary(200)) tags(t1 int);
sql create table tba1 using sta tags(1);
sql insert into tba1 values ('2022-04-26 15:15:01', 1.0, "a");
sql insert into tba1 values ('2022-04-26 15:15:02', 2.0, "b");
sql insert into tba1 values ('2022-04-26 15:15:04', 4.0, "b");
sql insert into tba1 values ('2022-04-26 15:15:05', 5.0, "b");
sql select avg(f1) from tba1 where ts >= '2022-04-26 15:15:00' and ts <= '2022-04-26 15:15:06' interval(1s) fill(value_f, 8.8);
if $rows != 7 then
return -1
endi
if $data00 != 8.800000000 then
return -1
endi
if $data10 != 1.000000000 then
return -1
endi
if $data20 != 2.000000000 then
return -1
endi
if $data30 != 8.800000000 then
return -1
endi
if $data40 != 4.000000000 then
return -1
endi
if $data50 != 5.000000000 then
return -1
endi
if $data60 != 8.800000000 then
return -1
endi
sql select avg(f1) from tba1 where ts >= '2022-04-26 15:15:00' and ts <= '2022-04-26 15:15:06' interval(1s) fill(value, 8.8);
if $rows != 7 then
return -1
endi
if $data00 != 8.800000000 then
return -1
endi
if $data10 != 1.000000000 then
return -1
endi
if $data20 != 2.000000000 then
return -1
endi
if $data30 != 8.800000000 then
return -1
endi
if $data40 != 4.000000000 then
return -1
endi
if $data50 != 5.000000000 then
return -1
endi
if $data60 != 8.800000000 then
return -1
endi
sql select avg(f1) from tba1 where ts >= '2022-04-26 15:15:00' and ts <= '2022-04-26 15:15:06' interval(1s) fill(null);
if $rows != 7 then
return -1
endi
if $data00 != NULL then
return -1
endi
if $data10 != 1.000000000 then
return -1
endi
if $data20 != 2.000000000 then
return -1
endi
if $data30 != NULL then
return -1
endi
if $data40 != 4.000000000 then
return -1
endi
if $data50 != 5.000000000 then
return -1
endi
if $data60 != NULL then
return -1
endi
sql select avg(f1) from tba1 where ts >= '2022-04-26 15:15:00' and ts <= '2022-04-26 15:15:06' interval(1s) fill(null_f);
if $rows != 7 then
return -1
endi
if $data00 != NULL then
return -1
endi
if $data10 != 1.000000000 then
return -1
endi
if $data20 != 2.000000000 then
return -1
endi
if $data30 != NULL then
return -1
endi
if $data40 != 4.000000000 then
return -1
endi
if $data50 != 5.000000000 then
return -1
endi
if $data60 != NULL then
return -1
endi
sql select avg(f1) from tba1 where ts >= '2022-04-26 15:15:06' and ts <= '2022-04-26 15:15:10' interval(1s) fill(value, 8.8);
if $rows != 0 then
return -1
endi
sql select avg(f1) from tba1 where ts >= '2022-04-26 15:15:06' and ts <= '2022-04-26 15:15:10' interval(1s) fill(value_f, 8.8);
if $rows != 5 then
return -1
endi
if $data00 != 8.800000000 then
return -1
endi
if $data10 != 8.800000000 then
return -1
endi
if $data20 != 8.800000000 then
return -1
endi
if $data30 != 8.800000000 then
return -1
endi
if $data40 != 8.800000000 then
return -1
endi
sql select avg(f1) from tba1 where ts >= '2022-04-26 15:15:06' and ts <= '2022-04-26 15:15:10' interval(1s) fill(null);
if $rows != 0 then
return -1
endi
sql select avg(f1) from tba1 where ts >= '2022-04-26 15:15:06' and ts <= '2022-04-26 15:15:10' interval(1s) fill(null_f);
if $rows != 5 then
return -1
endi
if $data00 != NULL then
return -1
endi
if $data10 != NULL then
return -1
endi
if $data20 != NULL then
return -1
endi
if $data30 != NULL then
return -1
endi
if $data40 != NULL then
return -1
endi
sql select avg(f1) from tba1 where ts >= '2022-04-26 15:16:00' and ts <= '2022-04-26 19:15:59' interval(1s) fill(value_f, 8.8);
if $rows != 14400 then
return -1
endi
if $data00 != 8.800000000 then
return -1
endi
sql select avg(f1) from tba1 where ts >= '2022-04-26 15:16:00' and ts <= '2022-04-26 19:15:59' interval(1s) fill(null_f);
if $rows != 14400 then
return -1
endi
if $data00 != NULL then
return -1
endi
sql select interp(f1) from tba1 range('2022-04-26 15:15:00','2022-04-26 15:15:06') every(1s) fill(value_f, 8.8);
if $rows != 7 then
return -1
endi
if $data00 != 8.800000000 then
return -1
endi
if $data10 != 1.000000000 then
return -1
endi
if $data20 != 2.000000000 then
return -1
endi
if $data30 != 8.800000000 then
return -1
endi
if $data40 != 4.000000000 then
return -1
endi
if $data50 != 5.000000000 then
return -1
endi
if $data60 != 8.800000000 then
return -1
endi
sql select interp(f1) from tba1 range('2022-04-26 15:15:00','2022-04-26 15:15:06') every(1s) fill(value, 8.8);
if $rows != 7 then
return -1
endi
if $data00 != 8.800000000 then
return -1
endi
if $data10 != 1.000000000 then
return -1
endi
if $data20 != 2.000000000 then
return -1
endi
if $data30 != 8.800000000 then
return -1
endi
if $data40 != 4.000000000 then
return -1
endi
if $data50 != 5.000000000 then
return -1
endi
if $data60 != 8.800000000 then
return -1
endi
sql select interp(f1) from tba1 range('2022-04-26 15:15:00','2022-04-26 15:15:06') every(1s) fill(null);
if $rows != 7 then
return -1
endi
if $data00 != NULL then
return -1
endi
if $data10 != 1.000000000 then
return -1
endi
if $data20 != 2.000000000 then
return -1
endi
if $data30 != NULL then
return -1
endi
if $data40 != 4.000000000 then
return -1
endi
if $data50 != 5.000000000 then
return -1
endi
if $data60 != NULL then
return -1
endi
sql select interp(f1) from tba1 range('2022-04-26 15:15:00','2022-04-26 15:15:06') every(1s) fill(null_f);
if $rows != 7 then
return -1
endi
if $data00 != NULL then
return -1
endi
if $data10 != 1.000000000 then
return -1
endi
if $data20 != 2.000000000 then
return -1
endi
if $data30 != NULL then
return -1
endi
if $data40 != 4.000000000 then
return -1
endi
if $data50 != 5.000000000 then
return -1
endi
if $data60 != NULL then
return -1
endi
sql select interp(f1) from tba1 range('2022-04-26 15:15:06','2022-04-26 15:15:10') every(1s) fill(value, 8.8);
if $rows != 5 then
return -1
endi
if $data00 != 8.800000000 then
return -1
endi
if $data10 != 8.800000000 then
return -1
endi
if $data20 != 8.800000000 then
return -1
endi
if $data30 != 8.800000000 then
return -1
endi
if $data40 != 8.800000000 then
return -1
endi
sql select interp(f1) from tba1 range('2022-04-26 15:15:06','2022-04-26 15:15:10') every(1s) fill(value_f, 8.8);
if $rows != 5 then
return -1
endi
if $data00 != 8.800000000 then
return -1
endi
if $data10 != 8.800000000 then
return -1
endi
if $data20 != 8.800000000 then
return -1
endi
if $data30 != 8.800000000 then
return -1
endi
if $data40 != 8.800000000 then
return -1
endi
sql select interp(f1) from tba1 range('2022-04-26 15:15:06','2022-04-26 15:15:10') every(1s) fill(null);
if $rows != 5 then
return -1
endi
if $data00 != NULL then
return -1
endi
if $data10 != NULL then
return -1
endi
if $data20 != NULL then
return -1
endi
if $data30 != NULL then
return -1
endi
if $data40 != NULL then
return -1
endi
sql select interp(f1) from tba1 range('2022-04-26 15:15:06','2022-04-26 15:15:10') every(1s) fill(null_f);
if $rows != 5 then
return -1
endi
if $data00 != NULL then
return -1
endi
if $data10 != NULL then
return -1
endi
if $data20 != NULL then
return -1
endi
if $data30 != NULL then
return -1
endi
if $data40 != NULL then
return -1
endi
sql select interp(f1) from tba1 range('2022-04-26 15:16:00','2022-04-26 19:15:59') every(1s) fill(value_f, 8.8);
if $rows != 14400 then
return -1
endi
if $data00 != 8.800000000 then
return -1
endi
sql select interp(f1) from tba1 range('2022-04-26 15:16:00','2022-04-26 19:15:59') every(1s) fill(null_f);
if $rows != 14400 then
return -1
endi
if $data00 != NULL then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -55,62 +55,62 @@ sql create stream stb_asin_stream trigger at_once into output_asin_stb as select
sql create stream ctb_asin_stream trigger at_once into output_asin_ctb as select ts, asin(c1), asin(c2), c3 from scalar_ct1;
sql create stream tb_asin_stream trigger at_once into output_asin_tb as select ts, asin(c1), asin(c2), c3 from scalar_tb;
sql create stream stb_atan_stream trigger at_once into output_atan_stb as select ts, atan(c1), atan(c2), c3 from scalar_stb partition by tbname;
sql create stream ctb_atan_stream trigger at_once into output_atan_ctb as select ts, atan(c1), atan(c2), c3 from scalar_ct1;
sql create stream tb_atan_stream trigger at_once into output_atan_tb as select ts, atan(c1), atan(c2), c3 from scalar_tb;
sql create stream stb_ceil_stream trigger at_once into output_ceil_stb as select ts, ceil(c1), ceil(c2), c3 from scalar_stb partition by tbname;
sql create stream ctb_ceil_stream trigger at_once into output_ceil_ctb as select ts, ceil(c1), ceil(c2), c3 from scalar_ct1;
sql create stream tb_ceil_stream trigger at_once into output_ceil_tb as select ts, ceil(c1), ceil(c2), c3 from scalar_tb;
sql create stream stb_cos_stream trigger at_once into output_cos_stb as select ts, cos(c1), cos(c2), c3 from scalar_stb partition by tbname;
sql create stream ctb_cos_stream trigger at_once into output_cos_ctb as select ts, cos(c1), cos(c2), c3 from scalar_ct1;
sql create stream tb_cos_stream trigger at_once into output_cos_tb as select ts, cos(c1), cos(c2), c3 from scalar_tb;
sql create stream stb_floor_stream trigger at_once into output_floor_stb as select ts, floor(c1), floor(c2), c3 from scalar_stb partition by tbname;
sql create stream ctb_floor_stream trigger at_once into output_floor_ctb as select ts, floor(c1), floor(c2), c3 from scalar_ct1;
sql create stream tb_floor_stream trigger at_once into output_floor_tb as select ts, floor(c1), floor(c2), c3 from scalar_tb;
sql create stream stb_log_stream trigger at_once into output_log_stb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_stb partition by tbname;
sql create stream ctb_log_stream trigger at_once into output_log_ctb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_ct1;
sql create stream tb_log_stream trigger at_once into output_log_tb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_tb;
sql create stream stb_pow_stream trigger at_once into output_pow_stb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_stb partition by tbname;
sql create stream ctb_pow_stream trigger at_once into output_pow_ctb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_ct1;
sql create stream tb_pow_stream trigger at_once into output_pow_tb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_tb;
sql create stream stb_round_stream trigger at_once into output_round_stb as select ts, round(c1), round(c2), c3 from scalar_stb partition by tbname;
sql create stream ctb_round_stream trigger at_once into output_round_ctb as select ts, round(c1), round(c2), c3 from scalar_ct1;
sql create stream tb_round_stream trigger at_once into output_round_tb as select ts, round(c1), round(c2), c3 from scalar_tb;
sql create stream stb_sin_stream trigger at_once into output_sin_stb as select ts, sin(c1), sin(c2), c3 from scalar_stb partition by tbname;
sql create stream ctb_sin_stream trigger at_once into output_sin_ctb as select ts, sin(c1), sin(c2), c3 from scalar_ct1;
sql create stream tb_sin_stream trigger at_once into output_sin_tb as select ts, sin(c1), sin(c2), c3 from scalar_tb;
sql create stream stb_sqrt_stream trigger at_once into output_sqrt_stb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_stb partition by tbname;
sql create stream ctb_sqrt_stream trigger at_once into output_sqrt_ctb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_ct1;
sql create stream tb_sqrt_stream trigger at_once into output_sqrt_tb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_tb;
sql create stream stb_tan_stream trigger at_once into output_tan_stb as select ts, tan(c1), tan(c2), c3 from scalar_stb partition by tbname;
sql create stream ctb_tan_stream trigger at_once into output_tan_ctb as select ts, tan(c1), tan(c2), c3 from scalar_ct1;
sql create stream tb_tan_stream trigger at_once into output_tan_tb as select ts, tan(c1), tan(c2), c3 from scalar_tb;
sql create stream stb_char_length_stream into output_char_length_stb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_stb partition by tbname;
sql create stream ctb_char_length_stream into output_char_length_ctb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_ct1;
sql create stream tb_char_length_stream into output_char_length_tb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_tb;
sql create stream stb_concat_stream into output_concat_stb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_stb partition by tbname;
sql create stream ctb_concat_stream into output_concat_ctb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_ct1;
sql create stream tb_concat_stream into output_concat_tb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_tb;
sql create stream stb_concat_ws_stream into output_concat_ws_stb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_stb partition by tbname;
sql create stream ctb_concat_ws_stream into output_concat_ws_ctb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_ct1;
sql create stream tb_concat_ws_stream into output_concat_ws_tb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_tb;
sql create stream stb_length_stream into output_length_stb as select ts, length(c3), length(c4), length(c5) from scalar_stb partition by tbname;
sql create stream ctb_length_stream into output_length_ctb as select ts, length(c3), length(c4), length(c5) from scalar_ct1;
sql create stream tb_length_stream into output_length_tb as select ts, length(c3), length(c4), length(c5) from scalar_tb;
sql create stream stb_lower_stream into output_lower_stb as select ts, lower(c3), lower(c4), lower(c5) from scalar_stb partition by tbname;
sql create stream ctb_lower_stream into output_lower_ctb as select ts, lower(c3), lower(c4), lower(c5) from scalar_ct1;
sql create stream tb_lower_stream into output_lower_tb as select ts, lower(c3), lower(c4), lower(c5) from scalar_tb;
sql create stream stb_ltrim_stream into output_ltrim_stb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_stb partition by tbname;
sql create stream ctb_ltrim_stream into output_ltrim_ctb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_ct1;
sql create stream tb_ltrim_stream into output_ltrim_tb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_tb;
sql create stream stb_rtrim_stream into output_rtrim_stb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_stb partition by tbname;
sql create stream ctb_rtrim_stream into output_rtrim_ctb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_ct1;
sql create stream tb_rtrim_stream into output_rtrim_tb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_tb;
sql create stream stb_substr_stream into output_substr_stb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_stb partition by tbname;
sql create stream ctb_substr_stream into output_substr_ctb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_ct1;
sql create stream tb_substr_stream into output_substr_tb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_tb;
sql create stream stb_upper_stream into output_upper_stb as select ts, upper(c3), upper(c4), upper(c5) from scalar_stb partition by tbname;
sql create stream ctb_upper_stream into output_upper_ctb as select ts, upper(c3), upper(c4), upper(c5) from scalar_ct1;
sql create stream tb_upper_stream into output_upper_tb as select ts, upper(c3), upper(c4), upper(c5) from scalar_tb;
# sql create stream ctb_atan_stream trigger at_once into output_atan_ctb as select ts, atan(c1), atan(c2), c3 from scalar_ct1;
# sql create stream tb_atan_stream trigger at_once into output_atan_tb as select ts, atan(c1), atan(c2), c3 from scalar_tb;
# sql create stream stb_ceil_stream trigger at_once into output_ceil_stb as select ts, ceil(c1), ceil(c2), c3 from scalar_stb partition by tbname;
# sql create stream ctb_ceil_stream trigger at_once into output_ceil_ctb as select ts, ceil(c1), ceil(c2), c3 from scalar_ct1;
# sql create stream tb_ceil_stream trigger at_once into output_ceil_tb as select ts, ceil(c1), ceil(c2), c3 from scalar_tb;
# sql create stream stb_cos_stream trigger at_once into output_cos_stb as select ts, cos(c1), cos(c2), c3 from scalar_stb partition by tbname;
# sql create stream ctb_cos_stream trigger at_once into output_cos_ctb as select ts, cos(c1), cos(c2), c3 from scalar_ct1;
# sql create stream tb_cos_stream trigger at_once into output_cos_tb as select ts, cos(c1), cos(c2), c3 from scalar_tb;
# sql create stream stb_floor_stream trigger at_once into output_floor_stb as select ts, floor(c1), floor(c2), c3 from scalar_stb partition by tbname;
# sql create stream ctb_floor_stream trigger at_once into output_floor_ctb as select ts, floor(c1), floor(c2), c3 from scalar_ct1;
# sql create stream tb_floor_stream trigger at_once into output_floor_tb as select ts, floor(c1), floor(c2), c3 from scalar_tb;
# sql create stream stb_log_stream trigger at_once into output_log_stb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_stb partition by tbname;
# sql create stream ctb_log_stream trigger at_once into output_log_ctb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_ct1;
# sql create stream tb_log_stream trigger at_once into output_log_tb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_tb;
# sql create stream stb_pow_stream trigger at_once into output_pow_stb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_stb partition by tbname;
# sql create stream ctb_pow_stream trigger at_once into output_pow_ctb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_ct1;
# sql create stream tb_pow_stream trigger at_once into output_pow_tb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_tb;
# sql create stream stb_round_stream trigger at_once into output_round_stb as select ts, round(c1), round(c2), c3 from scalar_stb partition by tbname;
# sql create stream ctb_round_stream trigger at_once into output_round_ctb as select ts, round(c1), round(c2), c3 from scalar_ct1;
# sql create stream tb_round_stream trigger at_once into output_round_tb as select ts, round(c1), round(c2), c3 from scalar_tb;
# sql create stream stb_sin_stream trigger at_once into output_sin_stb as select ts, sin(c1), sin(c2), c3 from scalar_stb partition by tbname;
# sql create stream ctb_sin_stream trigger at_once into output_sin_ctb as select ts, sin(c1), sin(c2), c3 from scalar_ct1;
# sql create stream tb_sin_stream trigger at_once into output_sin_tb as select ts, sin(c1), sin(c2), c3 from scalar_tb;
# sql create stream stb_sqrt_stream trigger at_once into output_sqrt_stb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_stb partition by tbname;
# sql create stream ctb_sqrt_stream trigger at_once into output_sqrt_ctb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_ct1;
# sql create stream tb_sqrt_stream trigger at_once into output_sqrt_tb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_tb;
# sql create stream stb_tan_stream trigger at_once into output_tan_stb as select ts, tan(c1), tan(c2), c3 from scalar_stb partition by tbname;
# sql create stream ctb_tan_stream trigger at_once into output_tan_ctb as select ts, tan(c1), tan(c2), c3 from scalar_ct1;
# sql create stream tb_tan_stream trigger at_once into output_tan_tb as select ts, tan(c1), tan(c2), c3 from scalar_tb;
# sql create stream stb_char_length_stream into output_char_length_stb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_stb partition by tbname;
# sql create stream ctb_char_length_stream into output_char_length_ctb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_ct1;
# sql create stream tb_char_length_stream into output_char_length_tb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_tb;
# sql create stream stb_concat_stream into output_concat_stb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_stb partition by tbname;
# sql create stream ctb_concat_stream into output_concat_ctb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_ct1;
# sql create stream tb_concat_stream into output_concat_tb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_tb;
# sql create stream stb_concat_ws_stream into output_concat_ws_stb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_stb partition by tbname;
# sql create stream ctb_concat_ws_stream into output_concat_ws_ctb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_ct1;
# sql create stream tb_concat_ws_stream into output_concat_ws_tb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_tb;
# sql create stream stb_length_stream into output_length_stb as select ts, length(c3), length(c4), length(c5) from scalar_stb partition by tbname;
# sql create stream ctb_length_stream into output_length_ctb as select ts, length(c3), length(c4), length(c5) from scalar_ct1;
# sql create stream tb_length_stream into output_length_tb as select ts, length(c3), length(c4), length(c5) from scalar_tb;
# sql create stream stb_lower_stream into output_lower_stb as select ts, lower(c3), lower(c4), lower(c5) from scalar_stb partition by tbname;
# sql create stream ctb_lower_stream into output_lower_ctb as select ts, lower(c3), lower(c4), lower(c5) from scalar_ct1;
# sql create stream tb_lower_stream into output_lower_tb as select ts, lower(c3), lower(c4), lower(c5) from scalar_tb;
# sql create stream stb_ltrim_stream into output_ltrim_stb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_stb partition by tbname;
# sql create stream ctb_ltrim_stream into output_ltrim_ctb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_ct1;
# sql create stream tb_ltrim_stream into output_ltrim_tb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_tb;
# sql create stream stb_rtrim_stream into output_rtrim_stb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_stb partition by tbname;
# sql create stream ctb_rtrim_stream into output_rtrim_ctb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_ct1;
# sql create stream tb_rtrim_stream into output_rtrim_tb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_tb;
# sql create stream stb_substr_stream into output_substr_stb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_stb partition by tbname;
# sql create stream ctb_substr_stream into output_substr_ctb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_ct1;
# sql create stream tb_substr_stream into output_substr_tb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_tb;
# sql create stream stb_upper_stream into output_upper_stb as select ts, upper(c3), upper(c4), upper(c5) from scalar_stb partition by tbname;
# sql create stream ctb_upper_stream into output_upper_ctb as select ts, upper(c3), upper(c4), upper(c5) from scalar_ct1;
# sql create stream tb_upper_stream into output_upper_tb as select ts, upper(c3), upper(c4), upper(c5) from scalar_tb;
sql insert into scalar_ct1 values (1656668180503, 100, 100.1, "beijing", "taos", "Taos");
sql insert into scalar_ct1 values (1656668180503+1s, -50, -50.1, "tianjin", "taosdata", "Taosdata");
sql insert into scalar_ct1 values (1656668180503+2s, 0, Null, "hebei", "TDengine", Null);
@ -146,62 +146,62 @@ sql create stream stb_asin_stream trigger at_once into output_asin_stb as select
sql create stream ctb_asin_stream trigger at_once into output_asin_ctb as select ts, asin(c1), asin(c2), c3 from scalar_ct1;
sql create stream tb_asin_stream trigger at_once into output_asin_tb as select ts, asin(c1), asin(c2), c3 from scalar_tb;
sql create stream stb_atan_stream trigger at_once into output_atan_stb as select ts, atan(c1), atan(c2), c3 from scalar_stb partition by tbname;
sql create stream ctb_atan_stream trigger at_once into output_atan_ctb as select ts, atan(c1), atan(c2), c3 from scalar_ct1;
sql create stream tb_atan_stream trigger at_once into output_atan_tb as select ts, atan(c1), atan(c2), c3 from scalar_tb;
sql create stream stb_ceil_stream trigger at_once into output_ceil_stb as select ts, ceil(c1), ceil(c2), c3 from scalar_stb partition by tbname;
sql create stream ctb_ceil_stream trigger at_once into output_ceil_ctb as select ts, ceil(c1), ceil(c2), c3 from scalar_ct1;
sql create stream tb_ceil_stream trigger at_once into output_ceil_tb as select ts, ceil(c1), ceil(c2), c3 from scalar_tb;
sql create stream stb_cos_stream trigger at_once into output_cos_stb as select ts, cos(c1), cos(c2), c3 from scalar_stb partition by tbname;
sql create stream ctb_cos_stream trigger at_once into output_cos_ctb as select ts, cos(c1), cos(c2), c3 from scalar_ct1;
sql create stream tb_cos_stream trigger at_once into output_cos_tb as select ts, cos(c1), cos(c2), c3 from scalar_tb;
sql create stream stb_floor_stream trigger at_once into output_floor_stb as select ts, floor(c1), floor(c2), c3 from scalar_stb partition by tbname;
sql create stream ctb_floor_stream trigger at_once into output_floor_ctb as select ts, floor(c1), floor(c2), c3 from scalar_ct1;
sql create stream tb_floor_stream trigger at_once into output_floor_tb as select ts, floor(c1), floor(c2), c3 from scalar_tb;
sql create stream stb_log_stream trigger at_once into output_log_stb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_stb partition by tbname;
sql create stream ctb_log_stream trigger at_once into output_log_ctb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_ct1;
sql create stream tb_log_stream trigger at_once into output_log_tb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_tb;
sql create stream stb_pow_stream trigger at_once into output_pow_stb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_stb partition by tbname;
sql create stream ctb_pow_stream trigger at_once into output_pow_ctb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_ct1;
sql create stream tb_pow_stream trigger at_once into output_pow_tb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_tb;
sql create stream stb_round_stream trigger at_once into output_round_stb as select ts, round(c1), round(c2), c3 from scalar_stb partition by tbname;
sql create stream ctb_round_stream trigger at_once into output_round_ctb as select ts, round(c1), round(c2), c3 from scalar_ct1;
sql create stream tb_round_stream trigger at_once into output_round_tb as select ts, round(c1), round(c2), c3 from scalar_tb;
sql create stream stb_sin_stream trigger at_once into output_sin_stb as select ts, sin(c1), sin(c2), c3 from scalar_stb partition by tbname;
sql create stream ctb_sin_stream trigger at_once into output_sin_ctb as select ts, sin(c1), sin(c2), c3 from scalar_ct1;
sql create stream tb_sin_stream trigger at_once into output_sin_tb as select ts, sin(c1), sin(c2), c3 from scalar_tb;
sql create stream stb_sqrt_stream trigger at_once into output_sqrt_stb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_stb partition by tbname;
sql create stream ctb_sqrt_stream trigger at_once into output_sqrt_ctb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_ct1;
sql create stream tb_sqrt_stream trigger at_once into output_sqrt_tb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_tb;
sql create stream stb_tan_stream trigger at_once into output_tan_stb as select ts, tan(c1), tan(c2), c3 from scalar_stb partition by tbname;
sql create stream ctb_tan_stream trigger at_once into output_tan_ctb as select ts, tan(c1), tan(c2), c3 from scalar_ct1;
sql create stream tb_tan_stream trigger at_once into output_tan_tb as select ts, tan(c1), tan(c2), c3 from scalar_tb;
sql create stream stb_char_length_stream into output_char_length_stb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_stb partition by tbname;
sql create stream ctb_char_length_stream into output_char_length_ctb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_ct1;
sql create stream tb_char_length_stream into output_char_length_tb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_tb;
sql create stream stb_concat_stream into output_concat_stb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_stb partition by tbname;
sql create stream ctb_concat_stream into output_concat_ctb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_ct1;
sql create stream tb_concat_stream into output_concat_tb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_tb;
sql create stream stb_concat_ws_stream into output_concat_ws_stb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_stb partition by tbname;
sql create stream ctb_concat_ws_stream into output_concat_ws_ctb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_ct1;
sql create stream tb_concat_ws_stream into output_concat_ws_tb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_tb;
sql create stream stb_length_stream into output_length_stb as select ts, length(c3), length(c4), length(c5) from scalar_stb partition by tbname;
sql create stream ctb_length_stream into output_length_ctb as select ts, length(c3), length(c4), length(c5) from scalar_ct1;
sql create stream tb_length_stream into output_length_tb as select ts, length(c3), length(c4), length(c5) from scalar_tb;
sql create stream stb_lower_stream into output_lower_stb as select ts, lower(c3), lower(c4), lower(c5) from scalar_stb partition by tbname;
sql create stream ctb_lower_stream into output_lower_ctb as select ts, lower(c3), lower(c4), lower(c5) from scalar_ct1;
sql create stream tb_lower_stream into output_lower_tb as select ts, lower(c3), lower(c4), lower(c5) from scalar_tb;
sql create stream stb_ltrim_stream into output_ltrim_stb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_stb partition by tbname;
sql create stream ctb_ltrim_stream into output_ltrim_ctb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_ct1;
sql create stream tb_ltrim_stream into output_ltrim_tb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_tb;
sql create stream stb_rtrim_stream into output_rtrim_stb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_stb partition by tbname;
sql create stream ctb_rtrim_stream into output_rtrim_ctb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_ct1;
sql create stream tb_rtrim_stream into output_rtrim_tb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_tb;
sql create stream stb_substr_stream into output_substr_stb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_stb partition by tbname;
sql create stream ctb_substr_stream into output_substr_ctb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_ct1;
sql create stream tb_substr_stream into output_substr_tb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_tb;
sql create stream stb_upper_stream into output_upper_stb as select ts, upper(c3), upper(c4), upper(c5) from scalar_stb partition by tbname;
sql create stream ctb_upper_stream into output_upper_ctb as select ts, upper(c3), upper(c4), upper(c5) from scalar_ct1;
sql create stream tb_upper_stream into output_upper_tb as select ts, upper(c3), upper(c4), upper(c5) from scalar_tb;
# sql create stream ctb_atan_stream trigger at_once into output_atan_ctb as select ts, atan(c1), atan(c2), c3 from scalar_ct1;
# sql create stream tb_atan_stream trigger at_once into output_atan_tb as select ts, atan(c1), atan(c2), c3 from scalar_tb;
# sql create stream stb_ceil_stream trigger at_once into output_ceil_stb as select ts, ceil(c1), ceil(c2), c3 from scalar_stb partition by tbname;
# sql create stream ctb_ceil_stream trigger at_once into output_ceil_ctb as select ts, ceil(c1), ceil(c2), c3 from scalar_ct1;
# sql create stream tb_ceil_stream trigger at_once into output_ceil_tb as select ts, ceil(c1), ceil(c2), c3 from scalar_tb;
# sql create stream stb_cos_stream trigger at_once into output_cos_stb as select ts, cos(c1), cos(c2), c3 from scalar_stb partition by tbname;
# sql create stream ctb_cos_stream trigger at_once into output_cos_ctb as select ts, cos(c1), cos(c2), c3 from scalar_ct1;
# sql create stream tb_cos_stream trigger at_once into output_cos_tb as select ts, cos(c1), cos(c2), c3 from scalar_tb;
# sql create stream stb_floor_stream trigger at_once into output_floor_stb as select ts, floor(c1), floor(c2), c3 from scalar_stb partition by tbname;
# sql create stream ctb_floor_stream trigger at_once into output_floor_ctb as select ts, floor(c1), floor(c2), c3 from scalar_ct1;
# sql create stream tb_floor_stream trigger at_once into output_floor_tb as select ts, floor(c1), floor(c2), c3 from scalar_tb;
# sql create stream stb_log_stream trigger at_once into output_log_stb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_stb partition by tbname;
# sql create stream ctb_log_stream trigger at_once into output_log_ctb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_ct1;
# sql create stream tb_log_stream trigger at_once into output_log_tb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_tb;
# sql create stream stb_pow_stream trigger at_once into output_pow_stb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_stb partition by tbname;
# sql create stream ctb_pow_stream trigger at_once into output_pow_ctb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_ct1;
# sql create stream tb_pow_stream trigger at_once into output_pow_tb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_tb;
# sql create stream stb_round_stream trigger at_once into output_round_stb as select ts, round(c1), round(c2), c3 from scalar_stb partition by tbname;
# sql create stream ctb_round_stream trigger at_once into output_round_ctb as select ts, round(c1), round(c2), c3 from scalar_ct1;
# sql create stream tb_round_stream trigger at_once into output_round_tb as select ts, round(c1), round(c2), c3 from scalar_tb;
# sql create stream stb_sin_stream trigger at_once into output_sin_stb as select ts, sin(c1), sin(c2), c3 from scalar_stb partition by tbname;
# sql create stream ctb_sin_stream trigger at_once into output_sin_ctb as select ts, sin(c1), sin(c2), c3 from scalar_ct1;
# sql create stream tb_sin_stream trigger at_once into output_sin_tb as select ts, sin(c1), sin(c2), c3 from scalar_tb;
# sql create stream stb_sqrt_stream trigger at_once into output_sqrt_stb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_stb partition by tbname;
# sql create stream ctb_sqrt_stream trigger at_once into output_sqrt_ctb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_ct1;
# sql create stream tb_sqrt_stream trigger at_once into output_sqrt_tb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_tb;
# sql create stream stb_tan_stream trigger at_once into output_tan_stb as select ts, tan(c1), tan(c2), c3 from scalar_stb partition by tbname;
# sql create stream ctb_tan_stream trigger at_once into output_tan_ctb as select ts, tan(c1), tan(c2), c3 from scalar_ct1;
# sql create stream tb_tan_stream trigger at_once into output_tan_tb as select ts, tan(c1), tan(c2), c3 from scalar_tb;
# sql create stream stb_char_length_stream into output_char_length_stb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_stb partition by tbname;
# sql create stream ctb_char_length_stream into output_char_length_ctb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_ct1;
# sql create stream tb_char_length_stream into output_char_length_tb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_tb;
# sql create stream stb_concat_stream into output_concat_stb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_stb partition by tbname;
# sql create stream ctb_concat_stream into output_concat_ctb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_ct1;
# sql create stream tb_concat_stream into output_concat_tb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_tb;
# sql create stream stb_concat_ws_stream into output_concat_ws_stb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_stb partition by tbname;
# sql create stream ctb_concat_ws_stream into output_concat_ws_ctb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_ct1;
# sql create stream tb_concat_ws_stream into output_concat_ws_tb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_tb;
# sql create stream stb_length_stream into output_length_stb as select ts, length(c3), length(c4), length(c5) from scalar_stb partition by tbname;
# sql create stream ctb_length_stream into output_length_ctb as select ts, length(c3), length(c4), length(c5) from scalar_ct1;
# sql create stream tb_length_stream into output_length_tb as select ts, length(c3), length(c4), length(c5) from scalar_tb;
# sql create stream stb_lower_stream into output_lower_stb as select ts, lower(c3), lower(c4), lower(c5) from scalar_stb partition by tbname;
# sql create stream ctb_lower_stream into output_lower_ctb as select ts, lower(c3), lower(c4), lower(c5) from scalar_ct1;
# sql create stream tb_lower_stream into output_lower_tb as select ts, lower(c3), lower(c4), lower(c5) from scalar_tb;
# sql create stream stb_ltrim_stream into output_ltrim_stb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_stb partition by tbname;
# sql create stream ctb_ltrim_stream into output_ltrim_ctb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_ct1;
# sql create stream tb_ltrim_stream into output_ltrim_tb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_tb;
# sql create stream stb_rtrim_stream into output_rtrim_stb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_stb partition by tbname;
# sql create stream ctb_rtrim_stream into output_rtrim_ctb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_ct1;
# sql create stream tb_rtrim_stream into output_rtrim_tb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_tb;
# sql create stream stb_substr_stream into output_substr_stb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_stb partition by tbname;
# sql create stream ctb_substr_stream into output_substr_ctb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_ct1;
# sql create stream tb_substr_stream into output_substr_tb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_tb;
# sql create stream stb_upper_stream into output_upper_stb as select ts, upper(c3), upper(c4), upper(c5) from scalar_stb partition by tbname;
# sql create stream ctb_upper_stream into output_upper_ctb as select ts, upper(c3), upper(c4), upper(c5) from scalar_ct1;
# sql create stream tb_upper_stream into output_upper_tb as select ts, upper(c3), upper(c4), upper(c5) from scalar_tb;
sql insert into scalar_ct1 values (1656668180503, 100, 100.1, "beijing", "taos", "Taos");
sql insert into scalar_ct1 values (1656668180503+1s, -50, -50.1, "tianjin", "taosdata", "Taosdata");
sql insert into scalar_ct1 values (1656668180503+2s, 0, Null, "hebei", "TDengine", Null);
@ -273,4 +273,4 @@ print ========== step7
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT
system sh/exec.sh -n dnode4 -s stop -x SIGINT
system sh/exec.sh -n dnode4 -s stop -x SIGINT

View File

@ -14,6 +14,7 @@ sql use test;
sql create table t1(ts timestamp, a int, b int , c int, d double, s varchar(20));;
sql create stream streams1 trigger at_once into streamt as select _wstart ts, count(*) c1 from t1 where ts > 1648791210000 and ts < 1648791413000 interval(10s) fill(value, 100);
sql create stream streams1a trigger at_once into streamta as select _wstart ts, count(*) c1 from t1 where ts > 1648791210000 and ts < 1648791413000 interval(10s) fill(value_f, 100);
sql insert into t1 values(1648791213000,1,2,3,1.0,'aaa');
sleep 100
sql insert into t1 values(1648791233000,1,2,3,1.0,'aaa');
@ -77,6 +78,69 @@ if $data71 != 1 then
goto loop0
endi
print "force fill vaule"
$loop_count = 0
loop0a:
sleep 200
sql select * from streamta order by ts;
$loop_count = $loop_count + 1
if $loop_count == 10 then
return -1
endi
if $rows != 8 then
print =====rows=$rows
goto loop0a
endi
if $data01 != 1 then
print =====data01=$data01
goto loop0a
endi
if $data11 != 1 then
print =====data11=$data11
goto loop0a
endi
if $data21 != 1 then
print =====data21=$data21
goto loop0a
endi
if $data31 != 100 then
print =====data31=$data31
goto loop0a
endi
if $data41 != 1 then
print =====data41=$data41
goto loop0a
endi
if $data51 != 100 then
print =====data01=$data01
goto loop0a
endi
if $data61 != 100 then
print =====data61=$data61
goto loop0a
endi
if $data71 != 1 then
print =====data71=$data71
goto loop0a
endi
sql drop stream if exists streams2;
sql drop database if exists test2;
sql create database test2 vgroups 1;
@ -408,6 +472,7 @@ sql create table t1 using st tags(1,1,1);
sql create table t2 using st tags(2,2,2);
sql create stream streams4 trigger at_once into streamt4 as select _wstart ts, count(*) c1, concat(tbname, 'aaa') as pname, timezone() from st where ts > 1648791000000 and ts < 1648793000000 partition by tbname interval(10s) fill(NULL);
sql create stream streams4a trigger at_once into streamt4a as select _wstart ts, count(*) c1, concat(tbname, 'aaa') as pname, timezone() from st where ts > 1648791000000 and ts < 1648793000000 partition by tbname interval(10s) fill(NULL_F);
sql insert into t1 values(1648791213000,1,2,3,1.0,'aaa');
sql insert into t1 values(1648791233000,1,2,3,1.0,'aaa');
sql insert into t1 values(1648791273000,1,2,3,1.0,'aaa');
@ -512,32 +577,104 @@ if $data[12][3] == NULL then
goto loop4
endi
print "force fill null"
$loop_count = 0
loop4a:
sleep 200
sql select * from streamt4a order by pname, ts;
print ===> $data[0][0] , $data[0][1] , $data[0][2] , $data[0][3]
print ===> $data[1][0] , $data[1][1] , $data[1][2] , $data[1][3]
print ===> $data[2][0] , $data[2][1] , $data[2][2] , $data[2][3]
print ===> $data[3][0] , $data[3][1] , $data[3][2] , $data[3][3]
print ===> $data[4][0] , $data[4][1] , $data[4][2] , $data[4][3]
print ===> $data[5][0] , $data[5][1] , $data[5][2] , $data[5][3]
print ===> $data[6][0] , $data[6][1] , $data[6][2] , $data[6][3]
print ===> $data[7][0] , $data[7][1] , $data[7][2] , $data[7][3]
print ===> $data[8][0] , $data[8][1] , $data[8][2] , $data[8][3]
print ===> $data[9][0] , $data[9][1] , $data[9][2] , $data[9][3]
print ===> $data[10][0] , $data[10][1] , $data[10][2] , $data[10][3]
print ===> $data[11][0] , $data[11][1] , $data[11][2] , $data[11][3]
print ===> $data[12][0] , $data[12][1] , $data[12][2] , $data[12][3]
print ===> $data[13][0] , $data[13][1] , $data[13][2] , $data[13][3]
$loop_count = $loop_count + 1
if $loop_count == 10 then
return -1
endi
if $rows != 14 then
print =====rows=$rows
goto loop4a
endi
if $data11 != NULL then
print =====data11=$data11
goto loop4a
endi
if $data12 != t1aaa then
print =====data12=$data12
goto loop4a
endi
if $data13 == NULL then
print =====data13=$data13
goto loop4a
endi
if $data32 != t1aaa then
print =====data32=$data32
goto loop4a
endi
if $data42 != t1aaa then
print =====data42=$data42
goto loop4a
endi
if $data52 != t1aaa then
print =====data52=$data52
goto loop4a
endi
if $data81 != NULL then
print =====data81=$data81
goto loop4a
endi
if $data82 != t2aaa then
print =====data82=$data82
goto loop4a
endi
if $data83 == NULL then
print =====data83=$data83
goto loop4a
endi
if $data[10][2] != t2aaa then
print =====data[10][2]=$data[10][2]
goto loop4a
endi
if $data[11][2] != t2aaa then
print =====data[11][2]=$data[11][2]
goto loop4a
endi
if $data[12][2] != t2aaa then
print =====data[12][2]=$data[12][2]
goto loop4a
endi
if $data[12][3] == NULL then
print =====data[12][3]=$data[12][3]
goto loop4a
endi
@ -584,4 +721,4 @@ print ============loop_all=$loop_all
system sh/stop_dnodes.sh
#goto looptest
#goto looptest

View File

@ -111,6 +111,17 @@ class TDTestCase:
sql2 = "select (case when sum(q_smallint)=0 then null else sum(q_smallint) end) from %s.stable_1_1 limit 100;" %database
self.constant_check(database,sql1,sql2,0)
#TD-20257
sql1 = "select tbname,first(ts),q_int,q_smallint,q_bigint,case when q_int <0 then 1 else 0 end from %s.stable_1 where tbname = 'stable_1_1' and ts < now partition by tbname state_window(case when q_int <0 then 1 else 0 end);" %database
sql2 = "select tbname,first(ts),q_int,q_smallint,q_bigint,case when q_int <0 then 1 else 0 end from %s.stable_1_1 where ts < now partition by tbname state_window(case when q_int <0 then 1 else 0 end);" %database
self.constant_check(database,sql1,sql2,0)
self.constant_check(database,sql1,sql2,1)
self.constant_check(database,sql1,sql2,2)
self.constant_check(database,sql1,sql2,3)
self.constant_check(database,sql1,sql2,4)
self.constant_check(database,sql1,sql2,5)
#TD-20260
sql1 = "select _wstart,avg(q_int),min(q_smallint) from %s.stable_1 where tbname = 'stable_1_1' and ts < now state_window(case when q_smallint <0 then 1 else 0 end);" %database
sql2 = "select _wstart,avg(q_int),min(q_smallint) from %s.stable_1_1 where ts < now state_window(case when q_smallint <0 then 1 else 0 end);" %database
self.constant_check(database,sql1,sql2,0)

View File

@ -0,0 +1,159 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import random
import os
import time
import taos
import subprocess
from faker import Faker
from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
from util.dnodes import tdDnodes
from util.dnodes import *
class TDTestCase:
updatecfgDict = {'maxSQLLength':1048576,'debugFlag': 131 ,"querySmaOptimize":1}
def init(self, conn, logSql, replicaVar):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.testcasePath = os.path.split(__file__)[0]
self.testcaseFilename = os.path.split(__file__)[-1]
os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename))
self.db = "max_min"
def dropandcreateDB_random(self,database,n):
ts = 1630000000000
num_random = 5
fake = Faker('zh_CN')
tdSql.execute('''drop database if exists %s ;''' %database)
tdSql.execute('''create database %s keep 36500 ;'''%(database))
tdSql.execute('''use %s;'''%database)
tdSql.execute('''create stable %s.stable_1 (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \
q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) \
tags(loc nchar(100) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, t_bool bool , t_binary binary(100) , t_nchar nchar(100) ,t_float float , t_double double , t_ts timestamp);'''%database)
for i in range(num_random):
tdSql.execute('''create table %s.stable_1_%d using %s.stable_1 tags('stable_1_%d', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
%(database,i,database,i,fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
# insert data
for i in range(num_random):
for j in range(n):
tdSql.execute('''insert into %s.stable_1_%d (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts)\
values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d) ;'''
% (database,i,ts + i*1000 + j, fake.random_int(min=-2147483647, max=2147483647, step=1),
fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.pystr() , ts + i))
tdSql.query("select count(*) from %s.stable_1;" %database)
tdSql.checkData(0,0,num_random*n)
tdSql.query("select count(*) from %s.stable_1_1;"%database)
tdSql.checkData(0,0,n)
def TD_22219_max(self,database):
sql3 = "select count(*) from (select max(q_int) from %s.stable_1 group by tbname); ;" %database
tdSql.query(sql3)
sql_value = tdSql.getData(0,0)
self.value_check(sql_value,5)
sql1 = "select max(q_int) from %s.stable_1_1 ;" %database
sql2 = "select max(q_int) from %s.stable_1 where tbname = 'stable_1_1' ;" %database
self.constant_check(database,sql1,sql2,0)
sql3 = "select count(*) from (select max(q_int) from %s.stable_1 group by tbname); ;" %database
tdSql.query(sql3)
sql_value = tdSql.getData(0,0)
self.value_check(sql_value,5)
def TD_22219_min(self,database):
sql3 = "select count(*) from (select min(q_int) from %s.stable_1 group by tbname); ;" %database
tdSql.query(sql3)
sql_value = tdSql.getData(0,0)
self.value_check(sql_value,5)
sql1 = "select min(q_int) from %s.stable_1_1 ;" %database
sql2 = "select min(q_int) from %s.stable_1 where tbname = 'stable_1_1' ;" %database
self.constant_check(database,sql1,sql2,0)
sql3 = "select count(*) from (select min(q_int) from %s.stable_1 group by tbname); ;" %database
tdSql.query(sql3)
sql_value = tdSql.getData(0,0)
self.value_check(sql_value,5)
def constant_check(self,database,sql1,sql2,column):
#column =0 代表0列 column = n代表n-1列
tdLog.info("\n=============sql1:(%s)___sql2:(%s) ====================\n" %(sql1,sql2))
tdSql.query(sql1)
sql1_value = tdSql.getData(0,column)
tdSql.query(sql2)
sql2_value = tdSql.getData(0,column)
self.value_check(sql1_value,sql2_value)
tdSql.execute(" flush database %s;" %database)
time.sleep(3)
tdSql.query(sql1)
sql1_flush_value = tdSql.getData(0,column)
tdSql.query(sql2)
sql2_flush_value = tdSql.getData(0,column)
self.value_check(sql1_flush_value,sql2_flush_value)
self.value_check(sql1_value,sql1_flush_value)
self.value_check(sql2_value,sql2_flush_value)
def value_check(self,base_value,check_value):
if base_value==check_value:
tdLog.info(f"checkEqual success, base_value={base_value},check_value={check_value}")
else :
tdLog.exit(f"checkEqual error, base_value=={base_value},check_value={check_value}")
def run(self):
startTime = time.time()
os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename))
self.dropandcreateDB_random("%s" %self.db, 2000)
self.TD_22219_max("%s" %self.db)
self.dropandcreateDB_random("%s" %self.db, 2000)
self.TD_22219_min("%s" %self.db)
endTime = time.time()
print("total time %ds" % (endTime - startTime))
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())