Merge branch '3.0' into test/TD-27721

This commit is contained in:
Ping Xiao 2023-12-15 11:13:26 +08:00
commit 647b764a57
153 changed files with 12373 additions and 1947 deletions

View File

@ -41,6 +41,7 @@ add_subdirectory(source)
add_subdirectory(tools)
add_subdirectory(utils)
add_subdirectory(examples/c)
add_subdirectory(tests)
include(${TD_SUPPORT_DIR}/cmake.install)
# docs

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -177,7 +177,7 @@ Just need to add the reference to [TDengine.Connector](https://www.nuget.org/pac
</PropertyGroup>
<ItemGroup>
<PackageReference Include="TDengine.Connector" Version="3.0.0" />
<PackageReference Include="TDengine.Connector" Version="3.1.0" />
</ItemGroup>
</Project>

View File

@ -1,3 +0,0 @@
```csharp
{{#include docs/examples/csharp/asyncQuery/Program.cs}}
```

View File

@ -15,7 +15,6 @@ import CQuery from "./_c.mdx";
import PhpQuery from "./_php.mdx";
import PyAsync from "./_py_async.mdx";
import NodeAsync from "./_js_async.mdx";
import CsAsync from "./_cs_async.mdx";
import CAsync from "./_c_async.mdx";
## Introduction
@ -174,9 +173,6 @@ Please note that async query can only be used with a native connection.
<TabItem label="Python" value="python">
<PyAsync />
</TabItem>
<TabItem label="C#" value="csharp">
<CsAsync />
</TabItem>
<TabItem label="C" value="c">
<CAsync />
</TabItem>

View File

@ -248,23 +248,23 @@ function close()
<TabItem value="C#" label="C#">
```csharp
class ConsumerBuilder<TValue>
ConsumerBuilder(IEnumerable<KeyValuePair<string, string>> config)
virtual IConsumer Build()
Consumer(ConsumerBuilder builder)
public IConsumer<TValue> Build()
void Subscribe(IEnumerable<string> topics)
void Subscribe(string topic)
ConsumeResult Consume(int millisecondsTimeout)
ConsumeResult<TValue> Consume(int millisecondsTimeout)
List<string> Subscription()
void Unsubscribe()
void Commit(ConsumeResult consumerResult)
List<TopicPartitionOffset> Commit()
void Close()
```
@ -500,25 +500,19 @@ let consumer = taos.consumer({
<TabItem value="C#" label="C#">
```csharp
using TDengineTMQ;
// Create consumer groups on demand (GourpID) and enable automatic commits (EnableAutoCommit),
// an automatic commit interval (AutoCommitIntervalMs), and a username (TDConnectUser) and password (TDConnectPasswd)
var cfg = new ConsumerConfig
{
EnableAutoCommit = "true"
AutoCommitIntervalMs = "1000"
GourpId = "TDengine-TMQ-C#",
TDConnectUser = "root",
TDConnectPasswd = "taosdata",
AutoOffsetReset = "latest"
MsgWithTableName = "true",
TDConnectIp = "127.0.0.1",
TDConnectPort = "6030"
};
var consumer = new ConsumerBuilder(cfg).Build();
var cfg = new Dictionary<string, string>()
{
{ "group.id", "group1" },
{ "auto.offset.reset", "latest" },
{ "td.connect.ip", "127.0.0.1" },
{ "td.connect.user", "root" },
{ "td.connect.pass", "taosdata" },
{ "td.connect.port", "6030" },
{ "client.id", "tmq_example" },
{ "enable.auto.commit", "true" },
{ "msg.with.table.name", "false" },
};
var consumer = new ConsumerBuilder<Dictionary<string, object>>(cfg).Build();
```
</TabItem>
@ -747,10 +741,12 @@ while(true){
## Consume data
while (true)
{
var consumerRes = consumer.Consume(100);
// process ConsumeResult
ProcessMsg(consumerRes);
consumer.Commit(consumerRes);
using (var result = consumer.Consume(500))
{
if (result == null) continue;
ProcessMsg(result);
consumer.Commit();
}
}
```

View File

@ -262,6 +262,63 @@ The following types may be returned:
- "INT UNSIGNED"
- "BIGINT UNSIGNED"
- "JSON"
- "VARBINARY"
- "GEOMETRY"
`VARBINARY` and `GEOMETRY` types return data as Hex string, example:
Prepare data
```bash
create database demo
use demo
create table t(ts timestamp,c1 varbinary(20),c2 geometry(100))
insert into t values(now,'\x7f8290','point(100 100)')
```
Execute query
```bash
curl --location 'http://<fqdn>:<port>/rest/sql' \
--header 'Content-Type: text/plain' \
--header 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' \
--data 'select * from demo.t'
```
Return results
```json
{
"code": 0,
"column_meta": [
[
"ts",
"TIMESTAMP",
8
],
[
"c1",
"VARBINARY",
20
],
[
"c2",
"GEOMETRY",
100
]
],
"data": [
[
"2023-11-01T06:28:15.210Z",
"7f8290",
"010100000000000000000059400000000000005940"
]
],
"rows": 1
}
```
- `010100000000000000000059400000000000005940` is [Well-Known Binary (WKB)](https://libgeos.org/specifications/wkb/) format for `point(100 100)`
#### Errors

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
.vs
asyncQuery/bin
.idea
connect/bin
influxdbLine/bin
optsJSON/bin
@ -12,7 +12,6 @@ wsConnect/bin
wsInsert/bin
wsQuery/bin
wsStmt/bin
asyncQuery/obj
connect/obj
influxdbLine/obj
optsJSON/obj

View File

@ -1,111 +0,0 @@
using System;
using System.Collections.Generic;
using TDengineDriver;
using TDengineDriver.Impl;
using System.Runtime.InteropServices;
namespace TDengineExample
{
public class AsyncQueryExample
{
static void Main()
{
IntPtr conn = GetConnection();
try
{
QueryAsyncCallback queryAsyncCallback = new QueryAsyncCallback(QueryCallback);
TDengine.QueryAsync(conn, "select * from meters", queryAsyncCallback, IntPtr.Zero);
Thread.Sleep(2000);
}
finally
{
TDengine.Close(conn);
}
}
static void QueryCallback(IntPtr param, IntPtr taosRes, int code)
{
if (code == 0 && taosRes != IntPtr.Zero)
{
FetchRawBlockAsyncCallback fetchRowAsyncCallback = new FetchRawBlockAsyncCallback(FetchRawBlockCallback);
TDengine.FetchRawBlockAsync(taosRes, fetchRowAsyncCallback, param);
}
else
{
throw new Exception($"async query data failed,code:{code},reason:{TDengine.Error(taosRes)}");
}
}
// Iteratively call this interface until "numOfRows" is no greater than 0.
static void FetchRawBlockCallback(IntPtr param, IntPtr taosRes, int numOfRows)
{
if (numOfRows > 0)
{
Console.WriteLine($"{numOfRows} rows async retrieved");
IntPtr pdata = TDengine.GetRawBlock(taosRes);
List<TDengineMeta> metaList = TDengine.FetchFields(taosRes);
List<object> dataList = LibTaos.ReadRawBlock(pdata, metaList, numOfRows);
for (int i = 0; i < dataList.Count; i++)
{
if (i != 0 && (i + 1) % metaList.Count == 0)
{
Console.WriteLine("{0}\t|", dataList[i]);
}
else
{
Console.Write("{0}\t|", dataList[i]);
}
}
Console.WriteLine("");
TDengine.FetchRawBlockAsync(taosRes, FetchRawBlockCallback, param);
}
else
{
if (numOfRows == 0)
{
Console.WriteLine("async retrieve complete.");
}
else
{
throw new Exception($"FetchRawBlockCallback callback error, error code {numOfRows}");
}
TDengine.FreeResult(taosRes);
}
}
static IntPtr GetConnection()
{
string host = "localhost";
short port = 6030;
string username = "root";
string password = "taosdata";
string dbname = "power";
var conn = TDengine.Connect(host, username, password, dbname, port);
if (conn == IntPtr.Zero)
{
throw new Exception("Connect to TDengine failed");
}
else
{
Console.WriteLine("Connect to TDengine success");
}
return conn;
}
}
}
// //output:
// // Connect to TDengine success
// // 8 rows async retrieved
// // 1538548685500 | 11.8 | 221 | 0.28 | california.losangeles | 2 |
// // 1538548696600 | 13.4 | 223 | 0.29 | california.losangeles | 2 |
// // 1538548685000 | 10.8 | 223 | 0.29 | california.losangeles | 3 |
// // 1538548686500 | 11.5 | 221 | 0.35 | california.losangeles | 3 |
// // 1538548685000 | 10.3 | 219 | 0.31 | california.sanfrancisco | 2 |
// // 1538548695000 | 12.6 | 218 | 0.33 | california.sanfrancisco | 2 |
// // 1538548696800 | 12.3 | 221 | 0.31 | california.sanfrancisco | 2 |
// // 1538548696650 | 10.3 | 218 | 0.25 | california.sanfrancisco | 3 |
// // async retrieve complete.

View File

@ -1,15 +0,0 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>net6.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<StartupObject>TDengineExample.AsyncQueryExample</StartupObject>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="TDengine.Connector" Version="3.0.*" />
</ItemGroup>
</Project>

View File

@ -1,4 +1,5 @@
using TDengineDriver;
using TDengine.Driver;
using TDengine.Driver.Client;
namespace TDengineExample
{
@ -7,23 +8,11 @@ namespace TDengineExample
{
static void Main(String[] args)
{
string host = "localhost";
short port = 6030;
string username = "root";
string password = "taosdata";
string dbname = "";
var conn = TDengine.Connect(host, username, password, dbname, port);
if (conn == IntPtr.Zero)
var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata");
using (var client = DbDriver.Open(builder))
{
throw new Exception("Connect to TDengine failed");
Console.WriteLine("connected");
}
else
{
Console.WriteLine("Connect to TDengine success");
}
TDengine.Close(conn);
TDengine.Cleanup();
}
}
}

View File

@ -9,7 +9,7 @@
</PropertyGroup>
<ItemGroup>
<PackageReference Include="TDengine.Connector" Version="3.0.*" />
<PackageReference Include="TDengine.Connector" Version="3.1.*" />
</ItemGroup>
</Project>

View File

@ -3,8 +3,6 @@ Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio Version 16
VisualStudioVersion = 16.0.30114.105
MinimumVisualStudioVersion = 10.0.40219.1
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "asyncquery", "asyncQuery\asyncquery.csproj", "{E2A5F00C-14E7-40E1-A2DE-6AB2975616D3}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "connect", "connect\connect.csproj", "{CCC5042D-93FC-4AE0-B2F6-7E692FD476B7}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "influxdbline", "influxdbLine\influxdbline.csproj", "{6A24FB80-1E3C-4E2D-A5AB-914FA583874D}"
@ -38,10 +36,6 @@ Global
HideSolutionNode = FALSE
EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution
{E2A5F00C-14E7-40E1-A2DE-6AB2975616D3}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{E2A5F00C-14E7-40E1-A2DE-6AB2975616D3}.Debug|Any CPU.Build.0 = Debug|Any CPU
{E2A5F00C-14E7-40E1-A2DE-6AB2975616D3}.Release|Any CPU.ActiveCfg = Release|Any CPU
{E2A5F00C-14E7-40E1-A2DE-6AB2975616D3}.Release|Any CPU.Build.0 = Release|Any CPU
{CCC5042D-93FC-4AE0-B2F6-7E692FD476B7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{CCC5042D-93FC-4AE0-B2F6-7E692FD476B7}.Debug|Any CPU.Build.0 = Debug|Any CPU
{CCC5042D-93FC-4AE0-B2F6-7E692FD476B7}.Release|Any CPU.ActiveCfg = Release|Any CPU

View File

@ -1,4 +1,5 @@
using TDengineDriver;
using TDengine.Driver;
using TDengine.Driver.Client;
namespace TDengineExample
{
@ -6,60 +7,23 @@ namespace TDengineExample
{
static void Main()
{
IntPtr conn = GetConnection();
PrepareDatabase(conn);
var builder =
new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata");
using (var client = DbDriver.Open(builder))
{
client.Exec("CREATE DATABASE test WAL_RETENTION_PERIOD 3600");
client.Exec("use test");
string[] lines = {
"meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249",
"meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250",
"meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249",
"meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250"
};
IntPtr res = TDengine.SchemalessInsert(conn, lines, lines.Length, (int)TDengineSchemalessProtocol.TSDB_SML_LINE_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_MILLI_SECONDS);
if (TDengine.ErrorNo(res) != 0)
{
throw new Exception("SchemalessInsert failed since " + TDengine.Error(res));
}
else
{
int affectedRows = TDengine.AffectRows(res);
Console.WriteLine($"SchemalessInsert success, affected {affectedRows} rows");
}
TDengine.FreeResult(res);
}
static IntPtr GetConnection()
{
string host = "localhost";
short port = 6030;
string username = "root";
string password = "taosdata";
string dbname = "";
var conn = TDengine.Connect(host, username, password, dbname, port);
if (conn == IntPtr.Zero)
{
throw new Exception("Connect to TDengine failed");
}
else
{
Console.WriteLine("Connect to TDengine success");
}
return conn;
}
static void PrepareDatabase(IntPtr conn)
{
IntPtr res = TDengine.Query(conn, "CREATE DATABASE test WAL_RETENTION_PERIOD 3600");
if (TDengine.ErrorNo(res) != 0)
{
throw new Exception("failed to create database, reason: " + TDengine.Error(res));
}
res = TDengine.Query(conn, "USE test");
if (TDengine.ErrorNo(res) != 0)
{
throw new Exception("failed to change database, reason: " + TDengine.Error(res));
client.SchemalessInsert(lines,
TDengineSchemalessProtocol.TSDB_SML_LINE_PROTOCOL,
TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_MILLI_SECONDS, 0, ReqId.GetReqId());
}
}
}
}

View File

@ -9,7 +9,7 @@
</PropertyGroup>
<ItemGroup>
<PackageReference Include="TDengine.Connector" Version="3.0.*" />
<PackageReference Include="TDengine.Connector" Version="3.1.*" />
</ItemGroup>
</Project>

View File

@ -1,68 +1,27 @@
using TDengineDriver;
using TDengine.Driver;
using TDengine.Driver.Client;
namespace TDengineExample
{
internal class OptsJsonExample
{
static void Main()
public static void Main(string[] args)
{
IntPtr conn = GetConnection();
try
var builder =
new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata");
using (var client = DbDriver.Open(builder))
{
PrepareDatabase(conn);
string[] lines = { "[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," +
client.Exec("CREATE DATABASE test WAL_RETENTION_PERIOD 3600");
client.Exec("use test");
string[] lines =
{
"[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," +
" {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, \"value\": 219, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}}, " +
"{\"metric\": \"meters.current\", \"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," +
" {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611250, \"value\": 221, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}}]"
};
IntPtr res = TDengine.SchemalessInsert(conn, lines, 1, (int)TDengineSchemalessProtocol.TSDB_SML_JSON_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
if (TDengine.ErrorNo(res) != 0)
{
throw new Exception("SchemalessInsert failed since " + TDengine.Error(res));
}
else
{
int affectedRows = TDengine.AffectRows(res);
Console.WriteLine($"SchemalessInsert success, affected {affectedRows} rows");
}
TDengine.FreeResult(res);
}
finally
{
TDengine.Close(conn);
}
}
static IntPtr GetConnection()
{
string host = "localhost";
short port = 6030;
string username = "root";
string password = "taosdata";
string dbname = "";
var conn = TDengine.Connect(host, username, password, dbname, port);
if (conn == IntPtr.Zero)
{
throw new Exception("Connect to TDengine failed");
}
else
{
Console.WriteLine("Connect to TDengine success");
}
return conn;
}
static void PrepareDatabase(IntPtr conn)
{
IntPtr res = TDengine.Query(conn, "CREATE DATABASE test WAL_RETENTION_PERIOD 3600");
if (TDengine.ErrorNo(res) != 0)
{
throw new Exception("failed to create database, reason: " + TDengine.Error(res));
}
res = TDengine.Query(conn, "USE test");
if (TDengine.ErrorNo(res) != 0)
{
throw new Exception("failed to change database, reason: " + TDengine.Error(res));
client.SchemalessInsert(lines, TDengineSchemalessProtocol.TSDB_SML_JSON_PROTOCOL,
TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_MILLI_SECONDS, 0, ReqId.GetReqId());
}
}
}

View File

@ -9,7 +9,7 @@
</PropertyGroup>
<ItemGroup>
<PackageReference Include="TDengine.Connector" Version="3.0.*" />
<PackageReference Include="TDengine.Connector" Version="3.1.*" />
</ItemGroup>
</Project>

View File

@ -1,15 +1,18 @@
using TDengineDriver;
using TDengine.Driver;
using TDengine.Driver.Client;
namespace TDengineExample
{
internal class OptsTelnetExample
{
static void Main()
public static void Main(string[] args)
{
IntPtr conn = GetConnection();
try
var builder =
new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata");
using (var client = DbDriver.Open(builder))
{
PrepareDatabase(conn);
client.Exec("CREATE DATABASE test WAL_RETENTION_PERIOD 3600");
client.Exec("USE test");
string[] lines = {
"meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2",
"meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2",
@ -20,53 +23,9 @@ namespace TDengineExample
"meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3",
"meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3",
};
IntPtr res = TDengine.SchemalessInsert(conn, lines, lines.Length, (int)TDengineSchemalessProtocol.TSDB_SML_TELNET_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
if (TDengine.ErrorNo(res) != 0)
{
throw new Exception("SchemalessInsert failed since " + TDengine.Error(res));
}
else
{
int affectedRows = TDengine.AffectRows(res);
Console.WriteLine($"SchemalessInsert success, affected {affectedRows} rows");
}
TDengine.FreeResult(res);
}
catch
{
TDengine.Close(conn);
}
}
static IntPtr GetConnection()
{
string host = "localhost";
short port = 6030;
string username = "root";
string password = "taosdata";
string dbname = "";
var conn = TDengine.Connect(host, username, password, dbname, port);
if (conn == IntPtr.Zero)
{
throw new Exception("Connect to TDengine failed");
}
else
{
Console.WriteLine("Connect to TDengine success");
}
return conn;
}
static void PrepareDatabase(IntPtr conn)
{
IntPtr res = TDengine.Query(conn, "CREATE DATABASE test WAL_RETENTION_PERIOD 3600");
if (TDengine.ErrorNo(res) != 0)
{
throw new Exception("failed to create database, reason: " + TDengine.Error(res));
}
res = TDengine.Query(conn, "USE test");
if (TDengine.ErrorNo(res) != 0)
{
throw new Exception("failed to change database, reason: " + TDengine.Error(res));
client.SchemalessInsert(lines,
TDengineSchemalessProtocol.TSDB_SML_TELNET_PROTOCOL,
TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_MILLI_SECONDS, 0, ReqId.GetReqId());
}
}
}

View File

@ -9,7 +9,7 @@
</PropertyGroup>
<ItemGroup>
<PackageReference Include="TDengine.Connector" Version="3.0.*" />
<PackageReference Include="TDengine.Connector" Version="3.1.*" />
</ItemGroup>
</Project>

View File

@ -1,80 +1,35 @@
using TDengineDriver;
using TDengineDriver.Impl;
using System.Runtime.InteropServices;
using System.Text;
using TDengine.Driver;
using TDengine.Driver.Client;
namespace TDengineExample
{
internal class QueryExample
{
static void Main()
public static void Main(string[] args)
{
var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata");
using (var client = DbDriver.Open(builder))
{
IntPtr conn = GetConnection();
try
{
// run query
IntPtr res = TDengine.Query(conn, "SELECT * FROM meters LIMIT 2");
if (TDengine.ErrorNo(res) != 0)
client.Exec("use power");
string query = "SELECT * FROM meters";
using (var rows = client.Query(query))
{
throw new Exception("Failed to query since: " + TDengine.Error(res));
}
// get filed count
int fieldCount = TDengine.FieldCount(res);
Console.WriteLine("fieldCount=" + fieldCount);
// print column names
List<TDengineMeta> metas = LibTaos.GetMeta(res);
for (int i = 0; i < metas.Count; i++)
while (rows.Read())
{
Console.Write(metas[i].name + "\t");
Console.WriteLine(
$"{((DateTime)rows.GetValue(0)):yyyy-MM-dd HH:mm:ss.fff}, {rows.GetValue(1)}, {rows.GetValue(2)}, {rows.GetValue(3)}, {rows.GetValue(4)}, {Encoding.UTF8.GetString((byte[])rows.GetValue(5))}");
}
Console.WriteLine();
// print values
List<Object> resData = LibTaos.GetData(res);
for (int i = 0; i < resData.Count; i++)
}
}
catch (Exception e)
{
Console.Write($"|{resData[i].ToString()} \t");
if (((i + 1) % metas.Count == 0))
{
Console.WriteLine("");
Console.WriteLine(e.ToString());
throw;
}
}
Console.WriteLine();
// Free result after use
TDengine.FreeResult(res);
}
finally
{
TDengine.Close(conn);
}
}
static IntPtr GetConnection()
{
string host = "localhost";
short port = 6030;
string username = "root";
string password = "taosdata";
string dbname = "power";
var conn = TDengine.Connect(host, username, password, dbname, port);
if (conn == IntPtr.Zero)
{
throw new Exception("Connect to TDengine failed");
}
else
{
Console.WriteLine("Connect to TDengine success");
}
return conn;
}
}
}
// output:
// Connect to TDengine success
// fieldCount=6
// ts current voltage phase location groupid
// 1648432611249 10.3 219 0.31 California.SanFrancisco 2
// 1648432611749 12.6 218 0.33 California.SanFrancisco 2

View File

@ -9,7 +9,7 @@
</PropertyGroup>
<ItemGroup>
<PackageReference Include="TDengine.Connector" Version="3.0.*" />
<PackageReference Include="TDengine.Connector" Version="3.1.*" />
</ItemGroup>
</Project>

View File

@ -1,69 +1,47 @@
using TDengineDriver;
using System.Text;
using TDengine.Driver;
using TDengine.Driver.Client;
namespace TDengineExample
{
internal class SQLInsertExample
{
static void Main()
public static void Main(string[] args)
{
var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata");
using (var client = DbDriver.Open(builder))
{
IntPtr conn = GetConnection();
try
{
IntPtr res = TDengine.Query(conn, "CREATE DATABASE power WAL_RETENTION_PERIOD 3600");
CheckRes(conn, res, "failed to create database");
res = TDengine.Query(conn, "USE power");
CheckRes(conn, res, "failed to change database");
res = TDengine.Query(conn, "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)");
CheckRes(conn, res, "failed to create stable");
var sql = "INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) " +
"d1002 USING power.meters TAGS('California.SanFrancisco', 3) VALUES('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) " +
"d1003 USING power.meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000)('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) " +
"d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000)('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)";
res = TDengine.Query(conn, sql);
CheckRes(conn, res, "failed to insert data");
int affectedRows = TDengine.AffectRows(res);
Console.WriteLine("affectedRows " + affectedRows);
TDengine.FreeResult(res);
client.Exec("create database power");
client.Exec("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))");
string insertQuery =
"INSERT INTO " +
"power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " +
"VALUES " +
"('2023-10-03 14:38:05.000', 10.30000, 219, 0.31000) " +
"('2023-10-03 14:38:15.000', 12.60000, 218, 0.33000) " +
"('2023-10-03 14:38:16.800', 12.30000, 221, 0.31000) " +
"power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " +
"VALUES " +
"('2023-10-03 14:38:16.650', 10.30000, 218, 0.25000) " +
"power.d1003 USING power.meters TAGS(2,'California.LosAngeles') " +
"VALUES " +
"('2023-10-03 14:38:05.500', 11.80000, 221, 0.28000) " +
"('2023-10-03 14:38:16.600', 13.40000, 223, 0.29000) " +
"power.d1004 USING power.meters TAGS(3,'California.LosAngeles') " +
"VALUES " +
"('2023-10-03 14:38:05.000', 10.80000, 223, 0.29000) " +
"('2023-10-03 14:38:06.500', 11.50000, 221, 0.35000)";
client.Exec(insertQuery);
}
finally
catch (Exception e)
{
TDengine.Close(conn);
}
}
static IntPtr GetConnection()
{
string host = "localhost";
short port = 6030;
string username = "root";
string password = "taosdata";
string dbname = "";
var conn = TDengine.Connect(host, username, password, dbname, port);
if (conn == IntPtr.Zero)
{
throw new Exception("Connect to TDengine failed");
}
else
{
Console.WriteLine("Connect to TDengine success");
}
return conn;
}
static void CheckRes(IntPtr conn, IntPtr res, String errorMsg)
{
if (TDengine.ErrorNo(res) != 0)
{
throw new Exception($"{errorMsg} since: {TDengine.Error(res)}");
Console.WriteLine(e.ToString());
throw;
}
}
}
}
}
// output:
// Connect to TDengine success
// affectedRows 8

View File

@ -9,7 +9,7 @@
</PropertyGroup>
<ItemGroup>
<PackageReference Include="TDengine.Connector" Version="3.0.*" />
<PackageReference Include="TDengine.Connector" Version="3.1.*" />
</ItemGroup>
</Project>

View File

@ -1,109 +1,38 @@
using TDengineDriver;
using TDengine.Driver;
using TDengine.Driver.Client;
namespace TDengineExample
{
internal class StmtInsertExample
{
private static IntPtr conn;
private static IntPtr stmt;
static void Main()
public static void Main(string[] args)
{
var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata");
using (var client = DbDriver.Open(builder))
{
conn = GetConnection();
try
{
PrepareSTable();
// 1. init and prepare
stmt = TDengine.StmtInit(conn);
if (stmt == IntPtr.Zero)
client.Exec($"create database power");
client.Exec(
"CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))");
using (var stmt = client.StmtInit())
{
throw new Exception("failed to init stmt.");
stmt.Prepare(
"Insert into power.d1001 using power.meters tags(2,'California.SanFrancisco') values(?,?,?,?)");
var ts = new DateTime(2023, 10, 03, 14, 38, 05, 000);
stmt.BindRow(new object[] { ts, (float)10.30000, (int)219, (float)0.31000 });
stmt.AddBatch();
stmt.Exec();
var affected = stmt.Affected();
Console.WriteLine($"affected rows: {affected}");
}
int res = TDengine.StmtPrepare(stmt, "INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)");
CheckStmtRes(res, "failed to prepare stmt");
// 2. bind table name and tags
TAOS_MULTI_BIND[] tags = new TAOS_MULTI_BIND[2] { TaosMultiBind.MultiBindBinary(new string[] { "California.SanFrancisco" }), TaosMultiBind.MultiBindInt(new int?[] { 2 }) };
res = TDengine.StmtSetTbnameTags(stmt, "d1001", tags);
CheckStmtRes(res, "failed to bind table name and tags");
// 3. bind values
TAOS_MULTI_BIND[] values = new TAOS_MULTI_BIND[4] {
TaosMultiBind.MultiBindTimestamp(new long[2] { 1648432611249, 1648432611749}),
TaosMultiBind.MultiBindFloat(new float?[2] { 10.3f, 12.6f}),
TaosMultiBind.MultiBindInt(new int?[2] { 219, 218}),
TaosMultiBind.MultiBindFloat(new float?[2]{ 0.31f, 0.33f})
};
res = TDengine.StmtBindParamBatch(stmt, values);
CheckStmtRes(res, "failed to bind params");
// 4. add batch
res = TDengine.StmtAddBatch(stmt);
CheckStmtRes(res, "failed to add batch");
// 5. execute
res = TDengine.StmtExecute(stmt);
CheckStmtRes(res, "failed to execute");
// 6. free
TaosMultiBind.FreeTaosBind(tags);
TaosMultiBind.FreeTaosBind(values);
}
finally
catch (Exception e)
{
TDengine.Close(conn);
}
}
static IntPtr GetConnection()
{
string host = "localhost";
short port = 6030;
string username = "root";
string password = "taosdata";
string dbname = "";
var conn = TDengine.Connect(host, username, password, dbname, port);
if (conn == IntPtr.Zero)
{
throw new Exception("Connect to TDengine failed");
}
else
{
Console.WriteLine("Connect to TDengine success");
}
return conn;
}
static void PrepareSTable()
{
IntPtr res = TDengine.Query(conn, "CREATE DATABASE power WAL_RETENTION_PERIOD 3600");
CheckResPtr(res, "failed to create database");
res = TDengine.Query(conn, "USE power");
CheckResPtr(res, "failed to change database");
res = TDengine.Query(conn, "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)");
CheckResPtr(res, "failed to create stable");
}
static void CheckStmtRes(int res, string errorMsg)
{
if (res != 0)
{
Console.WriteLine(errorMsg + ", " + TDengine.StmtErrorStr(stmt));
int code = TDengine.StmtClose(stmt);
if (code != 0)
{
throw new Exception($"failed to close stmt, {code} reason: {TDengine.StmtErrorStr(stmt)} ");
Console.WriteLine(e);
throw;
}
}
}
static void CheckResPtr(IntPtr res, string errorMsg)
{
if (TDengine.ErrorNo(res) != 0)
{
throw new Exception(errorMsg + " since:" + TDengine.Error(res));
}
}
}
}

View File

@ -9,7 +9,7 @@
</PropertyGroup>
<ItemGroup>
<PackageReference Include="TDengine.Connector" Version="3.0.*" />
<PackageReference Include="TDengine.Connector" Version="3.1.*" />
</ItemGroup>
</Project>

View File

@ -1,95 +1,72 @@
using System;
using TDengineTMQ;
using TDengineDriver;
using System.Runtime.InteropServices;

using TDengine.Driver;
using TDengine.Driver.Client;
using TDengine.TMQ;
namespace TMQExample
{
internal class SubscribeDemo
{
static void Main(string[] args)
public static void Main(string[] args)
{
IntPtr conn = GetConnection();
string topic = "topic_example";
//create topic
IntPtr res = TDengine.Query(conn, $"create topic if not exists {topic} as select * from meters");
if (TDengine.ErrorNo(res) != 0 )
var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata");
using (var client = DbDriver.Open(builder))
{
throw new Exception($"create topic failed, reason:{TDengine.Error(res)}");
}
var cfg = new ConsumerConfig
try
{
GourpId = "group_1",
TDConnectUser = "root",
TDConnectPasswd = "taosdata",
MsgWithTableName = "true",
TDConnectIp = "127.0.0.1",
client.Exec("CREATE DATABASE power");
client.Exec("USE power");
client.Exec(
"CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))");
client.Exec("CREATE TOPIC topic_meters as SELECT * from power.meters");
var cfg = new Dictionary<string, string>()
{
{ "group.id", "group1" },
{ "auto.offset.reset", "latest" },
{ "td.connect.ip", "127.0.0.1" },
{ "td.connect.user", "root" },
{ "td.connect.pass", "taosdata" },
{ "td.connect.port", "6030" },
{ "client.id", "tmq_example" },
{ "enable.auto.commit", "true" },
{ "msg.with.table.name", "false" },
};
// create consumer
var consumer = new ConsumerBuilder(cfg)
.Build();
// subscribe
consumer.Subscribe(topic);
// consume
for (int i = 0; i < 5; i++)
var consumer = new ConsumerBuilder<Dictionary<string, object>>(cfg).Build();
consumer.Subscribe(new List<string>() { "topic_meters" });
Task.Run(InsertData);
while (true)
{
var consumeRes = consumer.Consume(300);
// print consumeResult
foreach (KeyValuePair<TopicPartition, TaosResult> kv in consumeRes.Message)
using (var cr = consumer.Consume(500))
{
Console.WriteLine("topic partitions:\n{0}", kv.Key.ToString());
kv.Value.Metas.ForEach(meta =>
if (cr == null) continue;
foreach (var message in cr.Message)
{
Console.Write("{0} {1}({2}) \t|", meta.name, meta.TypeName(), meta.size);
});
Console.WriteLine("");
kv.Value.Datas.ForEach(data =>
Console.WriteLine(
$"message {{{((DateTime)message.Value["ts"]).ToString("yyyy-MM-dd HH:mm:ss.fff")}, " +
$"{message.Value["current"]}, {message.Value["voltage"]}, {message.Value["phase"]}}}");
}
}
}
}
catch (Exception e)
{
Console.WriteLine(data.ToString());
});
Console.WriteLine(e.ToString());
throw;
}
}
}
consumer.Commit(consumeRes);
Console.WriteLine("\n================ {0} done ", i);
}
// retrieve topic list
List<string> topics = consumer.Subscription();
topics.ForEach(t => Console.WriteLine("topic name:{0}", t));
// unsubscribe
consumer.Unsubscribe();
// close consumer after use.Otherwise will lead memory leak.
consumer.Close();
TDengine.Close(conn);
}
static IntPtr GetConnection()
static void InsertData()
{
string host = "localhost";
short port = 6030;
string username = "root";
string password = "taosdata";
string dbname = "power";
var conn = TDengine.Connect(host, username, password, dbname, port);
if (conn == IntPtr.Zero)
var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata");
using (var client = DbDriver.Open(builder))
{
throw new Exception("Connect to TDengine failed");
}
else
while (true)
{
Console.WriteLine("Connect to TDengine success");
client.Exec("INSERT into power.d1001 using power.meters tags(2,'California.SanFrancisco') values(now,11.5,219,0.30)");
Task.Delay(1000).Wait();
}
}
return conn;
}
}

View File

@ -9,7 +9,7 @@
</PropertyGroup>
<ItemGroup>
<PackageReference Include="TDengine.Connector" Version="3.0.*" />
<PackageReference Include="TDengine.Connector" Version="3.1.*" />
</ItemGroup>
</Project>

View File

@ -1,28 +1,18 @@
using System;
using TDengineWS.Impl;
using TDengine.Driver;
using TDengine.Driver.Client;
namespace Examples
{
public class WSConnExample
{
static int Main(string[] args)
static void Main(string[] args)
{
string DSN = "ws://root:taosdata@127.0.0.1:6041/test";
IntPtr wsConn = LibTaosWS.WSConnectWithDSN(DSN);
if (wsConn == IntPtr.Zero)
var builder = new ConnectionStringBuilder("protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata");
using (var client = DbDriver.Open(builder))
{
Console.WriteLine("get WS connection failed");
return -1;
Console.WriteLine("connected");
}
else
{
Console.WriteLine("Establish connect success.");
// close connection.
LibTaosWS.WSClose(wsConn);
}
return 0;
}
}
}

View File

@ -6,7 +6,7 @@
</PropertyGroup>
<ItemGroup>
<PackageReference Include="TDengine.Connector" Version="3.0.*" GeneratePathProperty="true" />
<PackageReference Include="TDengine.Connector" Version="3.1.*" GeneratePathProperty="true" />
</ItemGroup>
<Target Name="copyDLLDependency" BeforeTargets="BeforeBuild">
<ItemGroup>

View File

@ -1,61 +1,46 @@
using System;
using TDengineWS.Impl;
using TDengine.Driver;
using TDengine.Driver.Client;
namespace Examples
{
public class WSInsertExample
{
static int Main(string[] args)
public static void Main(string[] args)
{
string DSN = "ws://root:taosdata@127.0.0.1:6041/test";
IntPtr wsConn = LibTaosWS.WSConnectWithDSN(DSN);
// Assert if connection is validate
if (wsConn == IntPtr.Zero)
var builder = new ConnectionStringBuilder("protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata");
using (var client = DbDriver.Open(builder))
{
Console.WriteLine("get WS connection failed");
return -1;
}
else
try
{
Console.WriteLine("Establish connect success.");
client.Exec("create database power");
client.Exec("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))");
string insertQuery =
"INSERT INTO " +
"power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " +
"VALUES " +
"('2023-10-03 14:38:05.000', 10.30000, 219, 0.31000) " +
"('2023-10-03 14:38:15.000', 12.60000, 218, 0.33000) " +
"('2023-10-03 14:38:16.800', 12.30000, 221, 0.31000) " +
"power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " +
"VALUES " +
"('2023-10-03 14:38:16.650', 10.30000, 218, 0.25000) " +
"power.d1003 USING power.meters TAGS(2,'California.LosAngeles') " +
"VALUES " +
"('2023-10-03 14:38:05.500', 11.80000, 221, 0.28000) " +
"('2023-10-03 14:38:16.600', 13.40000, 223, 0.29000) " +
"power.d1004 USING power.meters TAGS(3,'California.LosAngeles') " +
"VALUES " +
"('2023-10-03 14:38:05.000', 10.80000, 223, 0.29000) " +
"('2023-10-03 14:38:06.500', 11.50000, 221, 0.35000)";
client.Exec(insertQuery);
}
string createTable = "CREATE STABLE test.meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);";
string insert = "INSERT INTO test.d1001 USING test.meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)" +
"test.d1002 USING test.meters TAGS('California.SanFrancisco', 3) VALUES('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)" +
"test.d1003 USING test.meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000)('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) " +
"test.d1004 USING test.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000)('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)";
IntPtr wsRes = LibTaosWS.WSQuery(wsConn, createTable);
ValidInsert("create table", wsRes);
LibTaosWS.WSFreeResult(wsRes);
wsRes = LibTaosWS.WSQuery(wsConn, insert);
ValidInsert("insert data", wsRes);
LibTaosWS.WSFreeResult(wsRes);
// close connection.
LibTaosWS.WSClose(wsConn);
return 0;
}
static void ValidInsert(string desc, IntPtr wsRes)
catch (Exception e)
{
int code = LibTaosWS.WSErrorNo(wsRes);
if (code != 0)
{
Console.WriteLine($"execute SQL failed: reason: {LibTaosWS.WSErrorStr(wsRes)}, code:{code}");
}
else
{
Console.WriteLine("{0} success affect {2} rows, cost {1} nanoseconds", desc, LibTaosWS.WSTakeTiming(wsRes), LibTaosWS.WSAffectRows(wsRes));
Console.WriteLine(e.ToString());
throw;
}
}
}
}
}
// Establish connect success.
// create table success affect 0 rows, cost 3717542 nanoseconds
// insert data success affect 8 rows, cost 2613637 nanoseconds

View File

@ -6,7 +6,7 @@
<Nullable>enable</Nullable>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="TDengine.Connector" Version="3.0.*" GeneratePathProperty="true" />
<PackageReference Include="TDengine.Connector" Version="3.1.*" GeneratePathProperty="true" />
</ItemGroup>
<Target Name="copyDLLDependency" BeforeTargets="BeforeBuild">
<ItemGroup>

View File

@ -1,79 +1,36 @@
using System;
using TDengineWS.Impl;
using System.Collections.Generic;
using TDengineDriver;
using System.Text;
using TDengine.Driver;
using TDengine.Driver.Client;
namespace Examples
{
public class WSQueryExample
{
static int Main(string[] args)
public static void Main(string[] args)
{
string DSN = "ws://root:taosdata@127.0.0.1:6041/test";
IntPtr wsConn = LibTaosWS.WSConnectWithDSN(DSN);
if (wsConn == IntPtr.Zero)
var builder = new ConnectionStringBuilder("protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata");
using (var client = DbDriver.Open(builder))
{
Console.WriteLine("get WS connection failed");
return -1;
try
{
client.Exec("use power");
string query = "SELECT * FROM meters";
using (var rows = client.Query(query))
{
while (rows.Read())
{
Console.WriteLine(
$"{((DateTime)rows.GetValue(0)):yyyy-MM-dd HH:mm:ss.fff}, {rows.GetValue(1)}, {rows.GetValue(2)}, {rows.GetValue(3)}, {rows.GetValue(4)}, {Encoding.UTF8.GetString((byte[])rows.GetValue(5))}");
}
else
{
Console.WriteLine("Establish connect success.");
}
string select = "select * from test.meters";
// optional:wsRes = LibTaosWS.WSQuery(wsConn, select);
IntPtr wsRes = LibTaosWS.WSQueryTimeout(wsConn, select, 1);
// Assert if query execute success.
int code = LibTaosWS.WSErrorNo(wsRes);
if (code != 0)
{
Console.WriteLine($"execute SQL failed: reason: {LibTaosWS.WSErrorStr(wsRes)}, code:{code}");
LibTaosWS.WSFreeResult(wsRes);
return -1;
}
// get meta data
List<TDengineMeta> metas = LibTaosWS.WSGetFields(wsRes);
// get retrieved data
List<object> dataSet = LibTaosWS.WSGetData(wsRes);
// do something with result.
foreach (var meta in metas)
catch (Exception e)
{
Console.Write("{0} {1}({2}) \t|\t", meta.name, meta.TypeName(), meta.size);
Console.WriteLine(e.ToString());
throw;
}
Console.WriteLine("");
for (int i = 0; i < dataSet.Count;)
{
for (int j = 0; j < metas.Count; j++)
{
Console.Write("{0}\t|\t", dataSet[i]);
i++;
}
Console.WriteLine("");
}
// Free result after use.
LibTaosWS.WSFreeResult(wsRes);
// close connection.
LibTaosWS.WSClose(wsConn);
return 0;
}
}
}
// Establish connect success.
// ts TIMESTAMP(8) | current FLOAT(4) | voltage INT(4) | phase FLOAT(4) | location BINARY(64) | groupid INT(4) |
// 1538548685000 | 10.8 | 223 | 0.29 | California.LosAngeles | 3 |
// 1538548686500 | 11.5 | 221 | 0.35 | California.LosAngeles | 3 |
// 1538548685500 | 11.8 | 221 | 0.28 | California.LosAngeles | 2 |
// 1538548696600 | 13.4 | 223 | 0.29 | California.LosAngeles | 2 |
// 1538548685000 | 10.3 | 219 | 0.31 | California.SanFrancisco | 2 |
// 1538548695000 | 12.6 | 218 | 0.33 | California.SanFrancisco | 2 |
// 1538548696800 | 12.3 | 221 | 0.31 | California.SanFrancisco | 2 |
// 1538548696650 | 10.3 | 218 | 0.25 | California.SanFrancisco | 3 |

View File

@ -7,7 +7,7 @@
</PropertyGroup>
<ItemGroup>
<PackageReference Include="TDengine.Connector" Version="3.0.*" GeneratePathProperty="true" />
<PackageReference Include="TDengine.Connector" Version="3.1.*" GeneratePathProperty="true" />
</ItemGroup>
<Target Name="copyDLLDependency" BeforeTargets="BeforeBuild">
<ItemGroup>

View File

@ -1,98 +1,41 @@
using System;
using TDengineWS.Impl;
using TDengineDriver;
using System.Runtime.InteropServices;
using TDengine.Driver;
using TDengine.Driver.Client;
namespace Examples
{
public class WSStmtExample
{
static int Main(string[] args)
public static void Main(string[] args)
{
const string DSN = "ws://root:taosdata@127.0.0.1:6041/test";
const string table = "meters";
const string database = "test";
const string childTable = "d1005";
string insert = $"insert into ? using {database}.{table} tags(?,?) values(?,?,?,?)";
const int numOfTags = 2;
const int numOfColumns = 4;
// Establish connection
IntPtr wsConn = LibTaosWS.WSConnectWithDSN(DSN);
if (wsConn == IntPtr.Zero)
var builder =
new ConnectionStringBuilder(
"protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata");
using (var client = DbDriver.Open(builder))
{
Console.WriteLine($"get WS connection failed");
return -1;
try
{
client.Exec($"create database power");
client.Exec(
"CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))");
using (var stmt = client.StmtInit())
{
stmt.Prepare(
"Insert into power.d1001 using power.meters tags(2,'California.SanFrancisco') values(?,?,?,?)");
var ts = new DateTime(2023, 10, 03, 14, 38, 05, 000);
stmt.BindRow(new object[] { ts, (float)10.30000, (int)219, (float)0.31000 });
stmt.AddBatch();
stmt.Exec();
var affected = stmt.Affected();
Console.WriteLine($"affected rows: {affected}");
}
else
{
Console.WriteLine("Establish connect success...");
}
// init stmt
IntPtr wsStmt = LibTaosWS.WSStmtInit(wsConn);
if (wsStmt != IntPtr.Zero)
catch (Exception e)
{
int code = LibTaosWS.WSStmtPrepare(wsStmt, insert);
ValidStmtStep(code, wsStmt, "WSStmtPrepare");
TAOS_MULTI_BIND[] wsTags = new TAOS_MULTI_BIND[] { WSMultiBind.WSBindNchar(new string[] { "California.SanDiego" }), WSMultiBind.WSBindInt(new int?[] { 4 }) };
code = LibTaosWS.WSStmtSetTbnameTags(wsStmt, $"{database}.{childTable}", wsTags, numOfTags);
ValidStmtStep(code, wsStmt, "WSStmtSetTbnameTags");
TAOS_MULTI_BIND[] data = new TAOS_MULTI_BIND[4];
data[0] = WSMultiBind.WSBindTimestamp(new long[] { 1538548687000, 1538548688000, 1538548689000, 1538548690000, 1538548691000 });
data[1] = WSMultiBind.WSBindFloat(new float?[] { 10.30F, 10.40F, 10.50F, 10.60F, 10.70F });
data[2] = WSMultiBind.WSBindInt(new int?[] { 223, 221, 222, 220, 219 });
data[3] = WSMultiBind.WSBindFloat(new float?[] { 0.31F, 0.32F, 0.33F, 0.35F, 0.28F });
code = LibTaosWS.WSStmtBindParamBatch(wsStmt, data, numOfColumns);
ValidStmtStep(code, wsStmt, "WSStmtBindParamBatch");
code = LibTaosWS.WSStmtAddBatch(wsStmt);
ValidStmtStep(code, wsStmt, "WSStmtAddBatch");
IntPtr stmtAffectRowPtr = Marshal.AllocHGlobal(Marshal.SizeOf(typeof(Int32)));
code = LibTaosWS.WSStmtExecute(wsStmt, stmtAffectRowPtr);
ValidStmtStep(code, wsStmt, "WSStmtExecute");
Console.WriteLine("WS STMT insert {0} rows...", Marshal.ReadInt32(stmtAffectRowPtr));
Marshal.FreeHGlobal(stmtAffectRowPtr);
LibTaosWS.WSStmtClose(wsStmt);
// Free unmanaged memory
WSMultiBind.WSFreeTaosBind(wsTags);
WSMultiBind.WSFreeTaosBind(data);
//check result with SQL "SELECT * FROM test.d1005;"
Console.WriteLine(e);
throw;
}
else
{
Console.WriteLine("Init STMT failed...");
}
// close connection.
LibTaosWS.WSClose(wsConn);
return 0;
}
static void ValidStmtStep(int code, IntPtr wsStmt, string desc)
{
if (code != 0)
{
Console.WriteLine($"{desc} failed,reason: {LibTaosWS.WSErrorStr(wsStmt)}, code: {code}");
}
else
{
Console.WriteLine("{0} success...", desc);
}
}
}
}
// WSStmtPrepare success...
// WSStmtSetTbnameTags success...
// WSStmtBindParamBatch success...
// WSStmtAddBatch success...
// WSStmtExecute success...
// WS STMT insert 5 rows...

View File

@ -7,7 +7,7 @@
</PropertyGroup>
<ItemGroup>
<PackageReference Include="TDengine.Connector" Version="3.0.*" GeneratePathProperty="true" />
<PackageReference Include="TDengine.Connector" Version="3.1.*" GeneratePathProperty="true" />
</ItemGroup>
<Target Name="copyDLLDependency" BeforeTargets="BeforeBuild">
<ItemGroup>

View File

@ -176,7 +176,7 @@ npm install @tdengine/rest
</PropertyGroup>
<ItemGroup>
<PackageReference Include="TDengine.Connector" Version="3.0.0" />
<PackageReference Include="TDengine.Connector" Version="3.1.0" />
</ItemGroup>
</Project>

View File

@ -1,3 +0,0 @@
```csharp
{{#include docs/examples/csharp/asyncQuery/Program.cs}}
```

View File

@ -16,7 +16,6 @@ import CQuery from "./_c.mdx";
import PhpQuery from "./_php.mdx";
import PyAsync from "./_py_async.mdx";
import NodeAsync from "./_js_async.mdx";
import CsAsync from "./_cs_async.mdx";
import CAsync from "./_c_async.mdx";
## 主要查询功能
@ -175,9 +174,6 @@ Query OK, 6 rows in database (0.005515s)
<TabItem label="Python" value="python">
<PyAsync />
</TabItem>
<TabItem label="C#" value="csharp">
<CsAsync />
</TabItem>
<TabItem label="C" value="c">
<CAsync />
</TabItem>

View File

@ -248,23 +248,23 @@ function close()
<TabItem value="C#" label="C#">
```csharp
class ConsumerBuilder<TValue>
ConsumerBuilder(IEnumerable<KeyValuePair<string, string>> config)
virtual IConsumer Build()
Consumer(ConsumerBuilder builder)
public IConsumer<TValue> Build()
void Subscribe(IEnumerable<string> topics)
void Subscribe(string topic)
ConsumeResult Consume(int millisecondsTimeout)
ConsumeResult<TValue> Consume(int millisecondsTimeout)
List<string> Subscription()
void Unsubscribe()
void Commit(ConsumeResult consumerResult)
List<TopicPartitionOffset> Commit()
void Close()
```
@ -501,25 +501,19 @@ let consumer = taos.consumer({
<TabItem value="C#" label="C#">
```csharp
using TDengineTMQ;
// 根据需要,设置消费组 (GourpId)、自动提交 (EnableAutoCommit)、
// 自动提交时间间隔 (AutoCommitIntervalMs)、用户名 (TDConnectUser)、密码 (TDConnectPasswd) 等参数
var cfg = new ConsumerConfig
{
EnableAutoCommit = "true"
AutoCommitIntervalMs = "1000"
GourpId = "TDengine-TMQ-C#",
TDConnectUser = "root",
TDConnectPasswd = "taosdata",
AutoOffsetReset = "latest"
MsgWithTableName = "true",
TDConnectIp = "127.0.0.1",
TDConnectPort = "6030"
};
var consumer = new ConsumerBuilder(cfg).Build();
var cfg = new Dictionary<string, string>()
{
{ "group.id", "group1" },
{ "auto.offset.reset", "latest" },
{ "td.connect.ip", "127.0.0.1" },
{ "td.connect.user", "root" },
{ "td.connect.pass", "taosdata" },
{ "td.connect.port", "6030" },
{ "client.id", "tmq_example" },
{ "enable.auto.commit", "true" },
{ "msg.with.table.name", "false" },
};
var consumer = new ConsumerBuilder<Dictionary<string, object>>(cfg).Build();
```
</TabItem>
@ -748,10 +742,12 @@ while(true){
// 消费数据
while (true)
{
var consumerRes = consumer.Consume(100);
// process ConsumeResult
ProcessMsg(consumerRes);
consumer.Commit(consumerRes);
using (var result = consumer.Consume(500))
{
if (result == null) continue;
ProcessMsg(result);
consumer.Commit();
}
}
```

View File

@ -257,6 +257,63 @@ curl -L -u username:password -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name][?tz=timez
- "INT UNSIGNED"
- "BIGINT UNSIGNED"
- "JSON"
- "VARBINARY"
- "GEOMETRY"
`VARBINARY` 和 `GEOMETRY` 类型返回数据为 Hex 字符串,样例:
准备数据
```bash
create database demo
use demo
create table t(ts timestamp,c1 varbinary(20),c2 geometry(100))
insert into t values(now,'\x7f8290','point(100 100)')
```
执行查询
```bash
curl --location 'http://<fqdn>:<port>/rest/sql' \
--header 'Content-Type: text/plain' \
--header 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' \
--data 'select * from demo.t'
```
返回结果
```json
{
"code": 0,
"column_meta": [
[
"ts",
"TIMESTAMP",
8
],
[
"c1",
"VARBINARY",
20
],
[
"c2",
"GEOMETRY",
100
]
],
"data": [
[
"2023-11-01T06:28:15.210Z",
"7f8290",
"010100000000000000000059400000000000005940"
]
],
"rows": 1
}
```
- `010100000000000000000059400000000000005940` 为 `point(100 100)` 的 [Well-Known Binary (WKB)](https://libgeos.org/specifications/wkb/) 格式
#### 错误

File diff suppressed because it is too large Load Diff

View File

@ -34,7 +34,7 @@ extern int32_t tsS3UploadDelaySec;
int32_t s3Init();
void s3CleanUp();
int32_t s3PutObjectFromFile(const char *file, const char *object);
int32_t s3PutObjectFromFile2(const char *file, const char *object);
int32_t s3PutObjectFromFile2(const char *file, const char *object, int8_t withcp);
void s3DeleteObjectsByPrefix(const char *prefix);
void s3DeleteObjects(const char *object_name[], int nobject);
bool s3Exists(const char *object_name);

83
include/common/cos_cp.h Normal file
View File

@ -0,0 +1,83 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_COMMON_COS_CP_H_
#define _TD_COMMON_COS_CP_H_
#include "os.h"
#include "tdef.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef enum {
COS_CP_TYPE_UPLOAD, // upload
COS_CP_TYPE_DOWNLOAD // download
} ECpType;
typedef struct {
int32_t index; // the index of part, start from 0
int64_t offset; // the offset point of part
int64_t size; // the size of part
int completed; // COS_TRUE completed, COS_FALSE uncompleted
char etag[128]; // the etag of part, for upload
uint64_t crc64;
} SCheckpointPart;
typedef struct {
ECpType cp_type; // 0 upload, 1 download
char md5[64]; // the md5 of checkout content
TdFilePtr thefile; // the handle of checkpoint file
char file_path[TSDB_FILENAME_LEN]; // local file path
int64_t file_size; // local file size, for upload
int32_t file_last_modified; // local file last modified time, for upload
char file_md5[64]; // md5 of the local file content, for upload, reserved
char object_name[128]; // object name
int64_t object_size; // object size, for download
char object_last_modified[64]; // object last modified time, for download
char object_etag[128]; // object etag, for download
char upload_id[128]; // upload id
int part_num; // the total number of parts
int64_t part_size; // the part size, byte
SCheckpointPart* parts; // the parts of local or object, from 0
} SCheckpoint;
int32_t cos_cp_open(char const* cp_path, SCheckpoint* checkpoint);
void cos_cp_close(TdFilePtr fd);
void cos_cp_remove(char const* filepath);
int32_t cos_cp_load(char const* filepath, SCheckpoint* checkpoint);
int32_t cos_cp_dump(SCheckpoint* checkpoint);
void cos_cp_get_undo_parts(SCheckpoint* checkpoint, int* part_num, SCheckpointPart* parts, int64_t* consume_bytes);
void cos_cp_update(SCheckpoint* checkpoint, int32_t part_index, char const* etag, uint64_t crc64);
void cos_cp_build_upload(SCheckpoint* checkpoint, char const* filepath, int64_t size, int32_t mtime,
char const* upload_id, int64_t part_size);
bool cos_cp_is_valid_upload(SCheckpoint* checkpoint, int64_t size, int32_t mtime);
void cos_cp_build_download(SCheckpoint* checkpoint, char const* filepath, char const* object_name, int64_t object_size,
char const* object_lmtime, char const* object_etag, int64_t part_size);
bool cos_cp_is_valid_download(SCheckpoint* checkpoint, char const* object_name, int64_t object_size,
char const* object_lmtime, char const* object_etag);
#ifdef __cplusplus
}
#endif
#endif /*_TD_COMMON_COS_CP_H_*/

View File

@ -109,6 +109,7 @@ extern bool tsMonitorComp;
// audit
extern bool tsEnableAudit;
extern bool tsEnableAuditCreateTable;
extern int32_t tsAuditInterval;
// telem
extern bool tsEnableTelem;

View File

@ -23,13 +23,13 @@
#include "tjson.h"
#include "tmsgcb.h"
#include "trpc.h"
#include "mnode.h"
#ifdef __cplusplus
extern "C" {
#endif
#define AUDIT_DETAIL_MAX 65472
#define AUDIT_OPERATION_LEN 20
typedef struct {
const char *server;
@ -37,13 +37,28 @@ typedef struct {
bool comp;
} SAuditCfg;
typedef struct {
int64_t curTime;
char strClusterId[TSDB_CLUSTER_ID_LEN];
char clientAddress[50];
char user[TSDB_USER_LEN];
char operation[AUDIT_OPERATION_LEN];
char target1[TSDB_DB_NAME_LEN]; //put db name
char target2[TSDB_STREAM_NAME_LEN]; //put stb name, table name, topic name, user name, stream name, use max
char* detail;
} SAuditRecord;
int32_t auditInit(const SAuditCfg *pCfg);
void auditCleanup();
void auditSend(SJson *pJson);
void auditRecord(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2,
char *detail, int32_t len);
void auditAddRecord(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2,
char *detail, int32_t len);
void auditSendRecordsInBatch();
#ifdef __cplusplus
}
#endif
#endif /*_TD_MONITOR_H_*/
#endif /*_TD_AUDIT_H_*/

View File

@ -509,10 +509,7 @@ typedef struct SStreamMeta {
SArray* chkpSaved;
SArray* chkpInUse;
SRWLatch chkpDirLock;
void* qHandle;
int32_t pauseTaskNum;
void* bkdChkptMgt;
} SStreamMeta;
@ -840,11 +837,10 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int3
int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta);
SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask);
int32_t streamMetaReopen(SStreamMeta* pMeta);
void streamMetaClear(SStreamMeta* pMeta);
void streamMetaInitBackend(SStreamMeta* pMeta);
int32_t streamMetaCommit(SStreamMeta* pMeta);
int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta);
int32_t streamMetaReloadAllTasks(SStreamMeta* pMeta);
int64_t streamMetaGetLatestCheckpointId(SStreamMeta* pMeta);
void streamMetaNotifyClose(SStreamMeta* pMeta);
int32_t streamTaskSetDb(SStreamMeta* pMeta, void* pTask, char* key);

View File

@ -33,11 +33,12 @@ extern "C" {
#define SYNC_MAX_PROGRESS_WAIT_MS 4000
#define SYNC_MAX_START_TIME_RANGE_MS (1000 * 20)
#define SYNC_MAX_RECV_TIME_RANGE_MS 1200
#define SYNC_DEL_WAL_MS (1000 * 60)
#define SYNC_ADD_QUORUM_COUNT 3
#define SYNC_VNODE_LOG_RETENTION (TSDB_SYNC_LOG_BUFFER_RETENTION + 1)
#define SNAPSHOT_WAIT_MS 1000 * 5
#define SYNC_WAL_LOG_RETENTION_SIZE (8LL * 1024 * 1024 * 1024)
#define SYNC_MAX_RETRY_BACKOFF 5
#define SYNC_LOG_REPL_RETRY_WAIT_MS 100
#define SYNC_APPEND_ENTRIES_TIMEOUT_MS 10000
@ -219,6 +220,7 @@ typedef struct SSyncLogStore {
SyncIndex (*syncLogWriteIndex)(struct SSyncLogStore* pLogStore);
SyncIndex (*syncLogLastIndex)(struct SSyncLogStore* pLogStore);
SyncIndex (*syncLogIndexRetention)(struct SSyncLogStore* pLogStore, int64_t bytes);
SyncTerm (*syncLogLastTerm)(struct SSyncLogStore* pLogStore);
int32_t (*syncLogAppendEntry)(struct SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry, bool forcSync);

View File

@ -225,6 +225,7 @@ bool walIsEmpty(SWal *);
int64_t walGetFirstVer(SWal *);
int64_t walGetSnapshotVer(SWal *);
int64_t walGetLastVer(SWal *);
int64_t walGetVerRetention(SWal *pWal, int64_t bytes);
int64_t walGetCommittedVer(SWal *);
int64_t walGetAppliedVer(SWal *);

View File

@ -47,18 +47,13 @@ typedef int32_t TdUcs4;
#define strtof STR_TO_F_FUNC_TAOS_FORBID
#endif
#ifdef WINDOWS
#define tstrdup(str) _strdup(str)
#else
#define tstrdup(str) strdup(str)
#endif
#define tstrncpy(dst, src, size) \
do { \
strncpy((dst), (src), (size)); \
(dst)[(size)-1] = 0; \
} while (0)
char *tstrdup(const char *src);
int32_t taosUcs4len(TdUcs4 *ucs4);
int64_t taosStr2int64(const char *str);

View File

@ -205,7 +205,7 @@ typedef struct SRequestSendRecvBody {
__taos_async_fn_t queryFp;
__taos_async_fn_t fetchFp;
EQueryExecMode execMode;
void* param;
void* interParam;
SDataBuf requestMsg;
int64_t queryJob; // query job, created according to sql query DAG.
int32_t subplanNum;
@ -287,6 +287,7 @@ typedef struct SRequestObj {
typedef struct SSyncQueryParam {
tsem_t sem;
SRequestObj* pRequest;
void* userParam;
} SSyncQueryParam;
void* doAsyncFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4);
@ -420,6 +421,7 @@ int32_t buildPreviousRequest(SRequestObj *pRequest, const char* sql, SRequestObj
int32_t prepareAndParseSqlSyntax(SSqlCallbackWrapper **ppWrapper, SRequestObj *pRequest, bool updateMetaForce);
void returnToUser(SRequestObj* pRequest);
void stopAllQueries(SRequestObj *pRequest);
void doRequestCallback(SRequestObj* pRequest, int32_t code);
void freeQueryParam(SSyncQueryParam* param);
#ifdef TD_ENTERPRISE

View File

@ -316,6 +316,15 @@ void *createRequest(uint64_t connId, int32_t type, int64_t reqid) {
terrno = TSDB_CODE_TSC_DISCONNECTED;
return NULL;
}
SSyncQueryParam *interParam = taosMemoryCalloc(1, sizeof(SSyncQueryParam));
if (interParam == NULL) {
doDestroyRequest(pRequest);
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
tsem_init(&interParam->sem, 0, 0);
interParam->pRequest = pRequest;
pRequest->body.interParam = interParam;
pRequest->resType = RES_TYPE__QUERY;
pRequest->requestId = reqid == 0 ? generateRequestId() : reqid;
@ -437,12 +446,10 @@ void doDestroyRequest(void *p) {
deregisterRequest(pRequest);
}
if (pRequest->syncQuery) {
if (pRequest->body.param) {
tsem_destroy(&((SSyncQueryParam *)pRequest->body.param)->sem);
}
taosMemoryFree(pRequest->body.param);
if (pRequest->body.interParam) {
tsem_destroy(&((SSyncQueryParam *)pRequest->body.interParam)->sem);
}
taosMemoryFree(pRequest->body.interParam);
qDestroyQuery(pRequest->pQuery);
nodesDestroyAllocator(pRequest->allocatorRefId);

View File

@ -196,21 +196,7 @@ int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param,
(*pRequest)->sqlLen = sqlLen;
(*pRequest)->validateOnly = validateSql;
SSyncQueryParam* newpParam = NULL;
if (param == NULL) {
newpParam = taosMemoryCalloc(1, sizeof(SSyncQueryParam));
if (newpParam == NULL) {
destroyRequest(*pRequest);
*pRequest = NULL;
return TSDB_CODE_OUT_OF_MEMORY;
}
tsem_init(&newpParam->sem, 0, 0);
newpParam->pRequest = (*pRequest);
param = newpParam;
}
(*pRequest)->body.param = param;
((SSyncQueryParam*)(*pRequest)->body.interParam)->userParam = param;
STscObj* pTscObj = (*pRequest)->pTscObj;
int32_t err = taosHashPut(pTscObj->pRequests, &(*pRequest)->self, sizeof((*pRequest)->self), &(*pRequest)->self,
@ -218,7 +204,6 @@ int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param,
if (err) {
tscError("%" PRId64 " failed to add to request container, reqId:0x%" PRIx64 ", conn:%" PRId64 ", %s",
(*pRequest)->self, (*pRequest)->requestId, pTscObj->id, sql);
freeQueryParam(newpParam);
destroyRequest(*pRequest);
*pRequest = NULL;
return TSDB_CODE_OUT_OF_MEMORY;
@ -230,7 +215,6 @@ int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param,
nodesCreateAllocator((*pRequest)->requestId, tsQueryNodeChunkSize, &((*pRequest)->allocatorRefId))) {
tscError("%" PRId64 " failed to create node allocator, reqId:0x%" PRIx64 ", conn:%" PRId64 ", %s",
(*pRequest)->self, (*pRequest)->requestId, pTscObj->id, sql);
freeQueryParam(newpParam);
destroyRequest(*pRequest);
*pRequest = NULL;
return TSDB_CODE_OUT_OF_MEMORY;
@ -336,7 +320,7 @@ static SAppInstInfo* getAppInfo(SRequestObj* pRequest) { return pRequest->pTscOb
void asyncExecLocalCmd(SRequestObj* pRequest, SQuery* pQuery) {
SRetrieveTableRsp* pRsp = NULL;
if (pRequest->validateOnly) {
pRequest->body.queryFp(pRequest->body.param, pRequest, 0);
doRequestCallback(pRequest, 0);
return;
}
@ -358,18 +342,18 @@ void asyncExecLocalCmd(SRequestObj* pRequest, SQuery* pQuery) {
pRequest->requestId);
}
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
doRequestCallback(pRequest, code);
}
int32_t asyncExecDdlQuery(SRequestObj* pRequest, SQuery* pQuery) {
if (pRequest->validateOnly) {
pRequest->body.queryFp(pRequest->body.param, pRequest, 0);
doRequestCallback(pRequest, 0);
return TSDB_CODE_SUCCESS;
}
// drop table if exists not_exists_table
if (NULL == pQuery->pCmdMsg) {
pRequest->body.queryFp(pRequest->body.param, pRequest, 0);
doRequestCallback(pRequest, 0);
return TSDB_CODE_SUCCESS;
}
@ -384,7 +368,7 @@ int32_t asyncExecDdlQuery(SRequestObj* pRequest, SQuery* pQuery) {
int64_t transporterId = 0;
int32_t code = asyncSendMsgToServer(pAppInfo->pTransporter, &pMsgInfo->epSet, &transporterId, pSendMsg);
if (code) {
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
doRequestCallback(pRequest, code);
}
return code;
}
@ -913,7 +897,7 @@ void continuePostSubQuery(SRequestObj* pRequest, TAOS_ROW row) {
void returnToUser(SRequestObj* pRequest) {
if (pRequest->relation.userRefId == pRequest->self || 0 == pRequest->relation.userRefId) {
// return to client
pRequest->body.queryFp(pRequest->body.param, pRequest, pRequest->code);
doRequestCallback(pRequest, pRequest->code);
return;
}
@ -921,7 +905,7 @@ void returnToUser(SRequestObj* pRequest) {
if (pUserReq) {
pUserReq->code = pRequest->code;
// return to client
pUserReq->body.queryFp(pUserReq->body.param, pUserReq, pUserReq->code);
doRequestCallback(pUserReq, pUserReq->code);
releaseRequest(pRequest->relation.userRefId);
return;
} else {
@ -1031,7 +1015,7 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
pRequest->pWrapper = NULL;
// return to client
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
doRequestCallback(pRequest, code);
}
}
@ -1186,7 +1170,7 @@ static int32_t asyncExecSchQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaDat
pRequest->code = terrno;
}
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
doRequestCallback(pRequest, code);
}
// todo not to be released here
@ -1199,7 +1183,7 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultM
int32_t code = 0;
if (pRequest->parseOnly) {
pRequest->body.queryFp(pRequest->body.param, pRequest, 0);
doRequestCallback(pRequest, 0);
return;
}
@ -1233,11 +1217,11 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultM
}
case QUERY_EXEC_MODE_EMPTY_RESULT:
pRequest->type = TSDB_SQL_RETRIEVE_EMPTY_RESULT;
pRequest->body.queryFp(pRequest->body.param, pRequest, 0);
doRequestCallback(pRequest, 0);
break;
default:
tscError("0x%" PRIx64 " invalid execMode %d", pRequest->self, pQuery->execMode);
pRequest->body.queryFp(pRequest->body.param, pRequest, -1);
doRequestCallback(pRequest, -1);
break;
}
}
@ -1703,8 +1687,8 @@ void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4)
}
static void syncFetchFn(void* param, TAOS_RES* res, int32_t numOfRows) {
SSyncQueryParam* pParam = param;
tsem_post(&pParam->sem);
tsem_t* sem = param;
tsem_post(sem);
}
void* doAsyncFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4) {
@ -1722,10 +1706,11 @@ void* doAsyncFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertU
// convert ucs4 to native multi-bytes string
pResultInfo->convertUcs4 = convertUcs4;
SSyncQueryParam* pParam = pRequest->body.param;
taos_fetch_rows_a(pRequest, syncFetchFn, pParam);
tsem_wait(&pParam->sem);
tsem_t sem;
tsem_init(&sem, 0, 0);
taos_fetch_rows_a(pRequest, syncFetchFn, &sem);
tsem_wait(&sem);
tsem_destroy(&sem);
}
if (pResultInfo->numOfRows == 0 || pRequest->code != TSDB_CODE_SUCCESS) {
@ -2492,6 +2477,10 @@ TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly) {
tscDebug("taos_query start with sql:%s", sql);
SSyncQueryParam* param = taosMemoryCalloc(1, sizeof(SSyncQueryParam));
if (NULL == param) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
tsem_init(&param->sem, 0, 0);
taosAsyncQueryImpl(*(int64_t*)taos, sql, syncQueryFn, param, validateOnly);
@ -2501,9 +2490,8 @@ TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly) {
if (param->pRequest != NULL) {
param->pRequest->syncQuery = true;
pRequest = param->pRequest;
} else {
taosMemoryFree(param);
}
taosMemoryFree(param);
tscDebug("taos_query end with sql:%s", sql);
@ -2517,19 +2505,20 @@ TAOS_RES* taosQueryImplWithReqid(TAOS* taos, const char* sql, bool validateOnly,
}
SSyncQueryParam* param = taosMemoryCalloc(1, sizeof(SSyncQueryParam));
if (param == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
tsem_init(&param->sem, 0, 0);
taosAsyncQueryImplWithReqid(*(int64_t*)taos, sql, syncQueryFn, param, validateOnly, reqid);
tsem_wait(&param->sem);
SRequestObj* pRequest = NULL;
if (param->pRequest != NULL) {
param->pRequest->syncQuery = true;
pRequest = param->pRequest;
} else {
taosMemoryFree(param);
}
taosMemoryFree(param);
return pRequest;
}
@ -2547,13 +2536,13 @@ static void fetchCallback(void* pResult, void* param, int32_t code) {
if (code != TSDB_CODE_SUCCESS) {
pRequest->code = code;
taosMemoryFreeClear(pResultInfo->pData);
pRequest->body.fetchFp(pRequest->body.param, pRequest, 0);
pRequest->body.fetchFp(((SSyncQueryParam *)pRequest->body.interParam)->userParam, pRequest, 0);
return;
}
if (pRequest->code != TSDB_CODE_SUCCESS) {
taosMemoryFreeClear(pResultInfo->pData);
pRequest->body.fetchFp(pRequest->body.param, pRequest, 0);
pRequest->body.fetchFp(((SSyncQueryParam *)pRequest->body.interParam)->userParam, pRequest, 0);
return;
}
@ -2574,20 +2563,12 @@ static void fetchCallback(void* pResult, void* param, int32_t code) {
atomic_add_fetch_64((int64_t*)&pActivity->fetchBytes, pRequest->body.resInfo.payloadLen);
}
pRequest->body.fetchFp(pRequest->body.param, pRequest, pResultInfo->numOfRows);
pRequest->body.fetchFp(((SSyncQueryParam *)pRequest->body.interParam)->userParam, pRequest, pResultInfo->numOfRows);
}
void taosAsyncFetchImpl(SRequestObj* pRequest, __taos_async_fn_t fp, void* param) {
if (pRequest->syncQuery && pRequest->body.param != param) {
if (pRequest->body.param) {
tsem_destroy(&((SSyncQueryParam *)pRequest->body.param)->sem);
}
taosMemoryFree(pRequest->body.param);
pRequest->syncQuery = false;
}
pRequest->body.fetchFp = fp;
pRequest->body.param = param;
((SSyncQueryParam *)pRequest->body.interParam)->userParam = param;
SReqResultInfo* pResultInfo = &pRequest->body.resInfo;
@ -2625,6 +2606,10 @@ void taosAsyncFetchImpl(SRequestObj* pRequest, __taos_async_fn_t fp, void* param
schedulerFetchRows(pRequest->body.queryJob, &req);
}
void doRequestCallback(SRequestObj* pRequest, int32_t code) {
pRequest->body.queryFp(((SSyncQueryParam *)pRequest->body.interParam)->userParam, pRequest, code);
}
int32_t clientParseSql(void* param, const char* dbName, const char* sql, bool parseOnly, const char* effectiveUser, SParseSqlRes* pRes) {
#ifndef TD_ENTERPRISE
return TSDB_CODE_SUCCESS;
@ -2633,4 +2618,3 @@ int32_t clientParseSql(void* param, const char* dbName, const char* sql, bool pa
#endif
}

View File

@ -1095,7 +1095,7 @@ static void doAsyncQueryFromParse(SMetaData *pResultMeta, void *param, int32_t c
pRequest->pWrapper = NULL;
terrno = code;
pRequest->code = code;
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
doRequestCallback(pRequest, code);
}
}
@ -1112,7 +1112,7 @@ void continueInsertFromCsv(SSqlCallbackWrapper *pWrapper, SRequestObj *pRequest)
pRequest->pWrapper = NULL;
terrno = code;
pRequest->code = code;
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
doRequestCallback(pRequest, code);
}
}
@ -1210,7 +1210,7 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
terrno = code;
pRequest->code = code;
tscDebug("call sync query cb with code: %s", tstrerror(code));
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
doRequestCallback(pRequest, code);
return;
}
@ -1242,7 +1242,7 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
terrno = code;
pRequest->code = code;
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
doRequestCallback(pRequest, code);
}
}
@ -1545,12 +1545,12 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
conn.mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp);
code = catalogAsyncGetAllMeta(pCtg, &conn, &catalogReq, syncCatalogFn, pRequest->body.param, NULL);
code = catalogAsyncGetAllMeta(pCtg, &conn, &catalogReq, syncCatalogFn, pRequest->body.interParam, NULL);
if (code) {
goto _return;
}
SSyncQueryParam *pParam = pRequest->body.param;
SSyncQueryParam *pParam = pRequest->body.interParam;
tsem_wait(&pParam->sem);
_return:

View File

@ -41,7 +41,7 @@ int32_t genericRspCallback(void* param, SDataBuf* pMsg, int32_t code) {
taosMemoryFree(pMsg->pEpSet);
taosMemoryFree(pMsg->pData);
if (pRequest->body.queryFp != NULL) {
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
doRequestCallback(pRequest, code);
} else {
tsem_post(&pRequest->body.rspSem);
}
@ -199,7 +199,7 @@ int32_t processCreateDbRsp(void* param, SDataBuf* pMsg, int32_t code) {
}
if (pRequest->body.queryFp) {
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
doRequestCallback(pRequest, code);
} else {
tsem_post(&pRequest->body.rspSem);
}
@ -235,7 +235,8 @@ int32_t processUseDbRsp(void* param, SDataBuf* pMsg, int32_t code) {
setErrno(pRequest, code);
if (pRequest->body.queryFp != NULL) {
pRequest->body.queryFp(pRequest->body.param, pRequest, pRequest->code);
doRequestCallback(pRequest, pRequest->code);
} else {
tsem_post(&pRequest->body.rspSem);
}
@ -299,7 +300,7 @@ int32_t processUseDbRsp(void* param, SDataBuf* pMsg, int32_t code) {
taosMemoryFree(pMsg->pEpSet);
if (pRequest->body.queryFp != NULL) {
pRequest->body.queryFp(pRequest->body.param, pRequest, pRequest->code);
doRequestCallback(pRequest, pRequest->code);
} else {
tsem_post(&pRequest->body.rspSem);
}
@ -343,7 +344,7 @@ int32_t processCreateSTableRsp(void* param, SDataBuf* pMsg, int32_t code) {
}
}
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
doRequestCallback(pRequest, code);
} else {
tsem_post(&pRequest->body.rspSem);
}
@ -380,7 +381,7 @@ int32_t processDropDbRsp(void* param, SDataBuf* pMsg, int32_t code) {
taosMemoryFree(pMsg->pEpSet);
if (pRequest->body.queryFp != NULL) {
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
doRequestCallback(pRequest, code);
} else {
tsem_post(&pRequest->body.rspSem);
}
@ -420,7 +421,7 @@ int32_t processAlterStbRsp(void* param, SDataBuf* pMsg, int32_t code) {
}
}
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
doRequestCallback(pRequest, code);
} else {
tsem_post(&pRequest->body.rspSem);
}
@ -534,7 +535,7 @@ int32_t processShowVariablesRsp(void* param, SDataBuf* pMsg, int32_t code) {
taosMemoryFree(pMsg->pEpSet);
if (pRequest->body.queryFp != NULL) {
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
doRequestCallback(pRequest, code);
} else {
tsem_post(&pRequest->body.rspSem);
}

View File

@ -1,6 +1,8 @@
#define ALLOW_FORBID_FUNC
#include "cos.h"
#include "cos_cp.h"
#include "tdef.h"
extern char tsS3Endpoint[];
extern char tsS3AccessKeyId[];
@ -86,7 +88,7 @@ typedef struct {
char err_msg[128];
S3Status status;
uint64_t content_length;
char * buf;
char *buf;
int64_t buf_pos;
} TS3SizeCBD;
@ -270,7 +272,7 @@ typedef struct list_parts_callback_data {
typedef struct MultipartPartData {
put_object_callback_data put_object_data;
int seq;
UploadManager * manager;
UploadManager *manager;
} MultipartPartData;
static int putObjectDataCallback(int bufferSize, char *buffer, void *callbackData) {
@ -317,12 +319,23 @@ S3Status MultipartResponseProperiesCallback(const S3ResponseProperties *properti
MultipartPartData *data = (MultipartPartData *)callbackData;
int seq = data->seq;
const char * etag = properties->eTag;
const char *etag = properties->eTag;
data->manager->etags[seq - 1] = strdup(etag);
data->manager->next_etags_pos = seq;
return S3StatusOK;
}
S3Status MultipartResponseProperiesCallbackWithCp(const S3ResponseProperties *properties, void *callbackData) {
responsePropertiesCallbackNull(properties, callbackData);
MultipartPartData *data = (MultipartPartData *)callbackData;
int seq = data->seq;
const char *etag = properties->eTag;
data->manager->etags[seq - 1] = strdup(etag);
// data->manager->next_etags_pos = seq;
return S3StatusOK;
}
static int multipartPutXmlCallback(int bufferSize, char *buffer, void *callbackData) {
UploadManager *manager = (UploadManager *)callbackData;
int ret = 0;
@ -446,77 +459,36 @@ static int try_get_parts_info(const char *bucketName, const char *key, UploadMan
return 0;
}
*/
int32_t s3PutObjectFromFile2(const char *file, const char *object) {
static int32_t s3PutObjectFromFileSimple(S3BucketContext *bucket_context, char const *object_name, int64_t size,
S3PutProperties *put_prop, put_object_callback_data *data) {
int32_t code = 0;
const char *key = object;
// const char *uploadId = 0;
const char * filename = 0;
uint64_t contentLength = 0;
const char * cacheControl = 0, *contentType = 0, *md5 = 0;
const char * contentDispositionFilename = 0, *contentEncoding = 0;
int64_t expires = -1;
S3CannedAcl cannedAcl = S3CannedAclPrivate;
int metaPropertiesCount = 0;
S3NameValue metaProperties[S3_MAX_METADATA_COUNT];
char useServerSideEncryption = 0;
put_object_callback_data data = {0};
// int noStatus = 0;
// data.infile = 0;
// data.gb = 0;
// data.infileFD = NULL;
// data.noStatus = noStatus;
// uError("ERROR: %s stat file %s: ", __func__, file);
if (taosStatFile(file, &contentLength, NULL, NULL) < 0) {
uError("ERROR: %s Failed to stat file %s: ", __func__, file);
code = TAOS_SYSTEM_ERROR(errno);
return code;
}
if (!(data.infileFD = taosOpenFile(file, TD_FILE_READ))) {
uError("ERROR: %s Failed to open file %s: ", __func__, file);
code = TAOS_SYSTEM_ERROR(errno);
return code;
}
data.totalContentLength = data.totalOriginalContentLength = data.contentLength = data.originalContentLength =
contentLength;
S3BucketContext bucketContext = {0, tsS3BucketName, protocolG, uriStyleG, tsS3AccessKeyId, tsS3AccessKeySecret,
0, awsRegionG};
S3PutProperties putProperties = {contentType, md5,
cacheControl, contentDispositionFilename,
contentEncoding, expires,
cannedAcl, metaPropertiesCount,
metaProperties, useServerSideEncryption};
if (contentLength <= MULTIPART_CHUNK_SIZE) {
S3PutObjectHandler putObjectHandler = {{&responsePropertiesCallbackNull, &responseCompleteCallback},
&putObjectDataCallback};
do {
S3_put_object(&bucketContext, key, contentLength, &putProperties, 0, 0, &putObjectHandler, &data);
} while (S3_status_is_retryable(data.status) && should_retry());
S3_put_object(bucket_context, object_name, size, put_prop, 0, 0, &putObjectHandler, data);
} while (S3_status_is_retryable(data->status) && should_retry());
if (data.status != S3StatusOK) {
s3PrintError(__FILE__, __LINE__, __func__, data.status, data.err_msg);
if (data->status != S3StatusOK) {
s3PrintError(__FILE__, __LINE__, __func__, data->status, data->err_msg);
code = TAOS_SYSTEM_ERROR(EIO);
} else if (data.contentLength) {
uError("ERROR: %s Failed to read remaining %llu bytes from input", __func__,
(unsigned long long)data.contentLength);
} else if (data->contentLength) {
uError("%s Failed to read remaining %llu bytes from input", __func__, (unsigned long long)data->contentLength);
code = TAOS_SYSTEM_ERROR(EIO);
}
} else {
return code;
}
static int32_t s3PutObjectFromFileWithoutCp(S3BucketContext *bucket_context, char const *object_name,
int64_t contentLength, S3PutProperties *put_prop,
put_object_callback_data *data) {
int32_t code = 0;
uint64_t totalContentLength = contentLength;
uint64_t todoContentLength = contentLength;
UploadManager manager = {0};
// manager.upload_id = 0;
// manager.gb = 0;
// div round up
int seq;
uint64_t chunk_size = MULTIPART_CHUNK_SIZE >> 3;
int totalSeq = (contentLength + chunk_size - 1) / chunk_size;
const int max_part_num = 10000;
@ -540,22 +512,8 @@ int32_t s3PutObjectFromFile2(const char *file, const char *object) {
manager.etags = (char **)taosMemoryCalloc(totalSeq, sizeof(char *));
manager.next_etags_pos = 0;
/*
if (uploadId) {
manager.upload_id = strdup(uploadId);
manager.remaining = contentLength;
if (!try_get_parts_info(tsS3BucketName, key, &manager)) {
fseek(data.infile, -(manager.remaining), 2);
taosLSeekFile(data.infileFD, -(manager.remaining), SEEK_END);
contentLength = manager.remaining;
goto upload;
} else {
goto clean;
}
}
*/
do {
S3_initiate_multipart(&bucketContext, key, 0, &handler, 0, timeoutMsG, &manager);
S3_initiate_multipart(bucket_context, object_name, 0, &handler, 0, timeoutMsG, &manager);
} while (S3_status_is_retryable(manager.status) && should_retry());
if (manager.upload_id == 0 || manager.status != S3StatusOK) {
@ -564,13 +522,13 @@ int32_t s3PutObjectFromFile2(const char *file, const char *object) {
goto clean;
}
upload:
upload:
todoContentLength -= chunk_size * manager.next_etags_pos;
for (seq = manager.next_etags_pos + 1; seq <= totalSeq; seq++) {
for (int seq = manager.next_etags_pos + 1; seq <= totalSeq; seq++) {
partData.manager = &manager;
partData.seq = seq;
if (partData.put_object_data.gb == NULL) {
partData.put_object_data = data;
partData.put_object_data = *data;
}
partContentLength = ((contentLength > chunk_size) ? chunk_size : contentLength);
// printf("%s Part Seq %d, length=%d\n", srcSize ? "Copying" : "Sending", seq, partContentLength);
@ -578,9 +536,9 @@ int32_t s3PutObjectFromFile2(const char *file, const char *object) {
partData.put_object_data.originalContentLength = partContentLength;
partData.put_object_data.totalContentLength = todoContentLength;
partData.put_object_data.totalOriginalContentLength = totalContentLength;
putProperties.md5 = 0;
put_prop->md5 = 0;
do {
S3_upload_part(&bucketContext, key, &putProperties, &putObjectHandler, seq, manager.upload_id,
S3_upload_part(bucket_context, object_name, put_prop, &putObjectHandler, seq, manager.upload_id,
partContentLength, 0, timeoutMsG, &partData);
} while (S3_status_is_retryable(partData.put_object_data.status) && should_retry());
if (partData.put_object_data.status != S3StatusOK) {
@ -612,7 +570,7 @@ int32_t s3PutObjectFromFile2(const char *file, const char *object) {
manager.remaining = size;
do {
S3_complete_multipart_upload(&bucketContext, key, &commit_handler, manager.upload_id, manager.remaining, 0,
S3_complete_multipart_upload(bucket_context, object_name, &commit_handler, manager.upload_id, manager.remaining, 0,
timeoutMsG, &manager);
} while (S3_status_is_retryable(manager.status) && should_retry());
if (manager.status != S3StatusOK) {
@ -621,7 +579,7 @@ int32_t s3PutObjectFromFile2(const char *file, const char *object) {
goto clean;
}
clean:
clean:
if (manager.upload_id) {
taosMemoryFree(manager.upload_id);
}
@ -630,6 +588,237 @@ int32_t s3PutObjectFromFile2(const char *file, const char *object) {
}
growbuffer_destroy(manager.gb);
taosMemoryFree(manager.etags);
return code;
}
static int32_t s3PutObjectFromFileWithCp(S3BucketContext *bucket_context, const char *file, int32_t lmtime,
char const *object_name, int64_t contentLength, S3PutProperties *put_prop,
put_object_callback_data *data) {
int32_t code = 0;
uint64_t totalContentLength = contentLength;
// uint64_t todoContentLength = contentLength;
UploadManager manager = {0};
uint64_t chunk_size = MULTIPART_CHUNK_SIZE >> 3;
int totalSeq = (contentLength + chunk_size - 1) / chunk_size;
const int max_part_num = 10000;
if (totalSeq > max_part_num) {
chunk_size = (contentLength + max_part_num - contentLength % max_part_num) / max_part_num;
totalSeq = (contentLength + chunk_size - 1) / chunk_size;
}
bool need_init_upload = true;
char file_cp_path[TSDB_FILENAME_LEN];
snprintf(file_cp_path, TSDB_FILENAME_LEN, "%s.cp", file);
SCheckpoint cp = {0};
cp.parts = taosMemoryCalloc(max_part_num, sizeof(SCheckpointPart));
if (taosCheckExistFile(file_cp_path)) {
if (!cos_cp_load(file_cp_path, &cp) && cos_cp_is_valid_upload(&cp, contentLength, lmtime)) {
manager.upload_id = strdup(cp.upload_id);
need_init_upload = false;
} else {
cos_cp_remove(file_cp_path);
}
}
if (need_init_upload) {
S3MultipartInitialHandler handler = {{&responsePropertiesCallbackNull, &responseCompleteCallback},
&initial_multipart_callback};
do {
S3_initiate_multipart(bucket_context, object_name, 0, &handler, 0, timeoutMsG, &manager);
} while (S3_status_is_retryable(manager.status) && should_retry());
if (manager.upload_id == 0 || manager.status != S3StatusOK) {
s3PrintError(__FILE__, __LINE__, __func__, manager.status, manager.err_msg);
code = TAOS_SYSTEM_ERROR(EIO);
goto clean;
}
cos_cp_build_upload(&cp, file, contentLength, lmtime, manager.upload_id, chunk_size);
}
if (cos_cp_open(file_cp_path, &cp)) {
code = TAOS_SYSTEM_ERROR(EIO);
goto clean;
}
int part_num = 0;
int64_t consume_bytes = 0;
// SCheckpointPart *parts = taosMemoryCalloc(cp.part_num, sizeof(SCheckpointPart));
// cos_cp_get_undo_parts(&cp, &part_num, parts, &consume_bytes);
MultipartPartData partData;
memset(&partData, 0, sizeof(MultipartPartData));
int partContentLength = 0;
S3PutObjectHandler putObjectHandler = {{&MultipartResponseProperiesCallbackWithCp, &responseCompleteCallback},
&putObjectDataCallback};
S3MultipartCommitHandler commit_handler = {
{&responsePropertiesCallbackNull, &responseCompleteCallback}, &multipartPutXmlCallback, 0};
manager.etags = (char **)taosMemoryCalloc(totalSeq, sizeof(char *));
manager.next_etags_pos = 0;
upload:
// todoContentLength -= chunk_size * manager.next_etags_pos;
for (int i = 0; i < cp.part_num; ++i) {
if (cp.parts[i].completed) {
continue;
}
if (i > 0 && cp.parts[i - 1].completed) {
if (taosLSeekFile(data->infileFD, cp.parts[i].offset, SEEK_SET) < 0) {
code = TAOS_SYSTEM_ERROR(errno);
goto clean;
}
}
int seq = cp.parts[i].index + 1;
partData.manager = &manager;
partData.seq = seq;
if (partData.put_object_data.gb == NULL) {
partData.put_object_data = *data;
}
partContentLength = cp.parts[i].size;
partData.put_object_data.contentLength = partContentLength;
partData.put_object_data.originalContentLength = partContentLength;
// partData.put_object_data.totalContentLength = todoContentLength;
partData.put_object_data.totalOriginalContentLength = totalContentLength;
put_prop->md5 = 0;
do {
S3_upload_part(bucket_context, object_name, put_prop, &putObjectHandler, seq, manager.upload_id,
partContentLength, 0, timeoutMsG, &partData);
} while (S3_status_is_retryable(partData.put_object_data.status) && should_retry());
if (partData.put_object_data.status != S3StatusOK) {
s3PrintError(__FILE__, __LINE__, __func__, partData.put_object_data.status, partData.put_object_data.err_msg);
code = TAOS_SYSTEM_ERROR(EIO);
//(void)cos_cp_dump(&cp);
goto clean;
}
if (!manager.etags[seq - 1]) {
code = TAOS_SYSTEM_ERROR(EIO);
goto clean;
}
cos_cp_update(&cp, cp.parts[seq - 1].index, manager.etags[seq - 1], 0);
(void)cos_cp_dump(&cp);
contentLength -= chunk_size;
// todoContentLength -= chunk_size;
}
cos_cp_close(cp.thefile);
cp.thefile = 0;
int size = 0;
size += growbuffer_append(&(manager.gb), "<CompleteMultipartUpload>", strlen("<CompleteMultipartUpload>"));
char buf[256];
int n;
for (int i = 0; i < cp.part_num; ++i) {
n = snprintf(buf, sizeof(buf),
"<Part><PartNumber>%d</PartNumber>"
"<ETag>%s</ETag></Part>",
// i + 1, manager.etags[i]);
cp.parts[i].index + 1, cp.parts[i].etag);
size += growbuffer_append(&(manager.gb), buf, n);
}
size += growbuffer_append(&(manager.gb), "</CompleteMultipartUpload>", strlen("</CompleteMultipartUpload>"));
manager.remaining = size;
do {
S3_complete_multipart_upload(bucket_context, object_name, &commit_handler, manager.upload_id, manager.remaining, 0,
timeoutMsG, &manager);
} while (S3_status_is_retryable(manager.status) && should_retry());
if (manager.status != S3StatusOK) {
s3PrintError(__FILE__, __LINE__, __func__, manager.status, manager.err_msg);
code = TAOS_SYSTEM_ERROR(EIO);
goto clean;
}
cos_cp_remove(file_cp_path);
clean:
/*
if (parts) {
taosMemoryFree(parts);
}
*/
if (cp.thefile) {
cos_cp_close(cp.thefile);
}
if (cp.parts) {
taosMemoryFree(cp.parts);
}
if (manager.upload_id) {
taosMemoryFree(manager.upload_id);
}
for (int i = 0; i < cp.part_num; ++i) {
if (manager.etags[i]) {
taosMemoryFree(manager.etags[i]);
}
}
taosMemoryFree(manager.etags);
growbuffer_destroy(manager.gb);
return code;
}
int32_t s3PutObjectFromFile2(const char *file, const char *object_name, int8_t withcp) {
int32_t code = 0;
int32_t lmtime = 0;
const char *filename = 0;
uint64_t contentLength = 0;
const char *cacheControl = 0, *contentType = 0, *md5 = 0;
const char *contentDispositionFilename = 0, *contentEncoding = 0;
int64_t expires = -1;
S3CannedAcl cannedAcl = S3CannedAclPrivate;
int metaPropertiesCount = 0;
S3NameValue metaProperties[S3_MAX_METADATA_COUNT];
char useServerSideEncryption = 0;
put_object_callback_data data = {0};
if (taosStatFile(file, &contentLength, &lmtime, NULL) < 0) {
code = TAOS_SYSTEM_ERROR(errno);
uError("ERROR: %s Failed to stat file %s: ", __func__, file);
return code;
}
if (!(data.infileFD = taosOpenFile(file, TD_FILE_READ))) {
code = TAOS_SYSTEM_ERROR(errno);
uError("ERROR: %s Failed to open file %s: ", __func__, file);
return code;
}
data.totalContentLength = data.totalOriginalContentLength = data.contentLength = data.originalContentLength =
contentLength;
S3BucketContext bucketContext = {0, tsS3BucketName, protocolG, uriStyleG, tsS3AccessKeyId, tsS3AccessKeySecret,
0, awsRegionG};
S3PutProperties putProperties = {contentType, md5,
cacheControl, contentDispositionFilename,
contentEncoding, expires,
cannedAcl, metaPropertiesCount,
metaProperties, useServerSideEncryption};
if (contentLength <= MULTIPART_CHUNK_SIZE) {
code = s3PutObjectFromFileSimple(&bucketContext, object_name, contentLength, &putProperties, &data);
} else {
if (withcp) {
code = s3PutObjectFromFileWithCp(&bucketContext, file, lmtime, object_name, contentLength, &putProperties, &data);
} else {
code = s3PutObjectFromFileWithoutCp(&bucketContext, object_name, contentLength, &putProperties, &data);
}
}
if (data.infileFD) {
@ -648,7 +837,7 @@ typedef struct list_bucket_callback_data {
char nextMarker[1024];
int keyCount;
int allDetails;
SArray * objectArray;
SArray *objectArray;
} list_bucket_callback_data;
static S3Status listBucketCallback(int isTruncated, const char *nextMarker, int contentsCount,
@ -697,7 +886,7 @@ static SArray *getListByPrefix(const char *prefix) {
S3ListBucketHandler listBucketHandler = {{&responsePropertiesCallbackNull, &responseCompleteCallback},
&listBucketCallback};
const char * marker = 0, *delimiter = 0;
const char *marker = 0, *delimiter = 0;
int maxkeys = 0, allDetails = 0;
list_bucket_callback_data data;
data.objectArray = taosArrayInit(32, sizeof(void *));
@ -858,7 +1047,7 @@ int32_t s3GetObjectsByPrefix(const char *prefix, const char *path) {
if (objectArray == NULL) return -1;
for (size_t i = 0; i < taosArrayGetSize(objectArray); i++) {
char * object = taosArrayGetP(objectArray, i);
char *object = taosArrayGetP(objectArray, i);
const char *tmp = strchr(object, '/');
tmp = (tmp == NULL) ? object : tmp + 1;
char fileName[PATH_MAX] = {0};
@ -949,12 +1138,12 @@ static void s3InitRequestOptions(cos_request_options_t *options, int is_cname) {
int32_t s3PutObjectFromFile(const char *file_str, const char *object_str) {
int32_t code = 0;
cos_pool_t * p = NULL;
cos_pool_t *p = NULL;
int is_cname = 0;
cos_status_t * s = NULL;
cos_status_t *s = NULL;
cos_request_options_t *options = NULL;
cos_string_t bucket, object, file;
cos_table_t * resp_headers;
cos_table_t *resp_headers;
// int traffic_limit = 0;
cos_pool_create(&p, NULL);
@ -983,18 +1172,19 @@ int32_t s3PutObjectFromFile(const char *file_str, const char *object_str) {
return code;
}
int32_t s3PutObjectFromFile2(const char *file_str, const char *object_str) {
int32_t s3PutObjectFromFile2(const char *file_str, const char *object_str, int8_t withcp) {
int32_t code = 0;
cos_pool_t * p = NULL;
cos_pool_t *p = NULL;
int is_cname = 0;
cos_status_t * s = NULL;
cos_request_options_t * options = NULL;
cos_status_t *s = NULL;
cos_request_options_t *options = NULL;
cos_string_t bucket, object, file;
cos_table_t * resp_headers;
cos_table_t *resp_headers;
int traffic_limit = 0;
cos_table_t * headers = NULL;
cos_table_t *headers = NULL;
cos_resumable_clt_params_t *clt_params = NULL;
(void)withcp;
cos_pool_create(&p, NULL);
options = cos_request_options_create(p);
s3InitRequestOptions(options, is_cname);
@ -1025,11 +1215,11 @@ int32_t s3PutObjectFromFile2(const char *file_str, const char *object_str) {
}
void s3DeleteObjectsByPrefix(const char *prefix_str) {
cos_pool_t * p = NULL;
cos_pool_t *p = NULL;
cos_request_options_t *options = NULL;
int is_cname = 0;
cos_string_t bucket;
cos_status_t * s = NULL;
cos_status_t *s = NULL;
cos_string_t prefix;
cos_pool_create(&p, NULL);
@ -1044,10 +1234,10 @@ void s3DeleteObjectsByPrefix(const char *prefix_str) {
}
void s3DeleteObjects(const char *object_name[], int nobject) {
cos_pool_t * p = NULL;
cos_pool_t *p = NULL;
int is_cname = 0;
cos_string_t bucket;
cos_table_t * resp_headers = NULL;
cos_table_t *resp_headers = NULL;
cos_request_options_t *options = NULL;
cos_list_t object_list;
cos_list_t deleted_object_list;
@ -1081,14 +1271,14 @@ void s3DeleteObjects(const char *object_name[], int nobject) {
bool s3Exists(const char *object_name) {
bool ret = false;
cos_pool_t * p = NULL;
cos_pool_t *p = NULL;
int is_cname = 0;
cos_status_t * s = NULL;
cos_request_options_t * options = NULL;
cos_status_t *s = NULL;
cos_request_options_t *options = NULL;
cos_string_t bucket;
cos_string_t object;
cos_table_t * resp_headers;
cos_table_t * headers = NULL;
cos_table_t *resp_headers;
cos_table_t *headers = NULL;
cos_object_exist_status_e object_exist;
cos_pool_create(&p, NULL);
@ -1115,15 +1305,15 @@ bool s3Exists(const char *object_name) {
bool s3Get(const char *object_name, const char *path) {
bool ret = false;
cos_pool_t * p = NULL;
cos_pool_t *p = NULL;
int is_cname = 0;
cos_status_t * s = NULL;
cos_status_t *s = NULL;
cos_request_options_t *options = NULL;
cos_string_t bucket;
cos_string_t object;
cos_string_t file;
cos_table_t * resp_headers = NULL;
cos_table_t * headers = NULL;
cos_table_t *resp_headers = NULL;
cos_table_t *headers = NULL;
int traffic_limit = 0;
//创建内存池
@ -1159,15 +1349,15 @@ bool s3Get(const char *object_name, const char *path) {
int32_t s3GetObjectBlock(const char *object_name, int64_t offset, int64_t block_size, bool check, uint8_t **ppBlock) {
(void)check;
int32_t code = 0;
cos_pool_t * p = NULL;
cos_pool_t *p = NULL;
int is_cname = 0;
cos_status_t * s = NULL;
cos_status_t *s = NULL;
cos_request_options_t *options = NULL;
cos_string_t bucket;
cos_string_t object;
cos_table_t * resp_headers;
cos_table_t * headers = NULL;
cos_buf_t * content = NULL;
cos_table_t *resp_headers;
cos_table_t *headers = NULL;
cos_buf_t *content = NULL;
// cos_string_t file;
// int traffic_limit = 0;
char range_buf[64];
@ -1261,7 +1451,7 @@ void s3EvictCache(const char *path, long object_size) {
terrno = TAOS_SYSTEM_ERROR(errno);
vError("failed to open %s since %s", dir_name, terrstr());
}
SArray * evict_files = taosArrayInit(16, sizeof(SEvictFile));
SArray *evict_files = taosArrayInit(16, sizeof(SEvictFile));
tdbDirEntryPtr pDirEntry;
while ((pDirEntry = taosReadDir(pDir)) != NULL) {
char *name = taosGetDirEntryName(pDirEntry);
@ -1303,13 +1493,13 @@ void s3EvictCache(const char *path, long object_size) {
long s3Size(const char *object_name) {
long size = 0;
cos_pool_t * p = NULL;
cos_pool_t *p = NULL;
int is_cname = 0;
cos_status_t * s = NULL;
cos_status_t *s = NULL;
cos_request_options_t *options = NULL;
cos_string_t bucket;
cos_string_t object;
cos_table_t * resp_headers = NULL;
cos_table_t *resp_headers = NULL;
//创建内存池
cos_pool_create(&p, NULL);
@ -1344,7 +1534,7 @@ long s3Size(const char *object_name) {
int32_t s3Init() { return 0; }
void s3CleanUp() {}
int32_t s3PutObjectFromFile(const char *file, const char *object) { return 0; }
int32_t s3PutObjectFromFile2(const char *file, const char *object) { return 0; }
int32_t s3PutObjectFromFile2(const char *file, const char *object, int8_t withcp) { return 0; }
void s3DeleteObjectsByPrefix(const char *prefix) {}
void s3DeleteObjects(const char *object_name[], int nobject) {}
bool s3Exists(const char *object_name) { return false; }

417
source/common/src/cos_cp.c Normal file
View File

@ -0,0 +1,417 @@
#define ALLOW_FORBID_FUNC
#include "cos_cp.h"
#include "cJSON.h"
#include "tutil.h"
int32_t cos_cp_open(char const* cp_path, SCheckpoint* checkpoint) {
int32_t code = 0;
TdFilePtr fd = taosOpenFile(cp_path, TD_FILE_WRITE | TD_FILE_CREATE /* | TD_FILE_TRUNC*/ | TD_FILE_WRITE_THROUGH);
if (!fd) {
code = TAOS_SYSTEM_ERROR(errno);
uError("ERROR: %s Failed to open %s", __func__, cp_path);
return code;
}
checkpoint->thefile = fd;
return code;
}
void cos_cp_close(TdFilePtr fd) { taosCloseFile(&fd); }
void cos_cp_remove(char const* filepath) { taosRemoveFile(filepath); }
static int32_t cos_cp_parse_body(char* cp_body, SCheckpoint* cp) {
int32_t code = 0;
cJSON const* item2 = NULL;
cJSON* json = cJSON_Parse(cp_body);
if (NULL == json) {
code = TSDB_CODE_FILE_CORRUPTED;
uError("ERROR: %s Failed to parse json", __func__);
goto _exit;
}
cJSON const* item = cJSON_GetObjectItem(json, "ver");
if (!cJSON_IsNumber(item) || item->valuedouble != 1) {
code = TSDB_CODE_FILE_CORRUPTED;
uError("ERROR: %s Failed to parse json ver: %f", __func__, item->valuedouble);
goto _exit;
}
item = cJSON_GetObjectItem(json, "type");
if (!cJSON_IsNumber(item)) {
code = TSDB_CODE_FILE_CORRUPTED;
uError("ERROR: %s Failed to parse json", __func__);
goto _exit;
}
cp->cp_type = item->valuedouble;
item = cJSON_GetObjectItem(json, "md5");
if (cJSON_IsString(item)) {
memcpy(cp->md5, item->valuestring, strlen(item->valuestring));
}
item = cJSON_GetObjectItem(json, "upload_id");
if (cJSON_IsString(item)) {
strncpy(cp->upload_id, item->valuestring, 128);
}
item2 = cJSON_GetObjectItem(json, "file");
if (cJSON_IsObject(item2)) {
item = cJSON_GetObjectItem(item2, "size");
if (cJSON_IsNumber(item)) {
cp->file_size = item->valuedouble;
}
item = cJSON_GetObjectItem(item2, "lastmodified");
if (cJSON_IsNumber(item)) {
cp->file_last_modified = item->valuedouble;
}
item = cJSON_GetObjectItem(item2, "path");
if (cJSON_IsString(item)) {
strncpy(cp->file_path, item->valuestring, TSDB_FILENAME_LEN);
}
item = cJSON_GetObjectItem(item2, "file_md5");
if (cJSON_IsString(item)) {
strncpy(cp->file_md5, item->valuestring, 64);
}
}
item2 = cJSON_GetObjectItem(json, "object");
if (cJSON_IsObject(item2)) {
item = cJSON_GetObjectItem(item2, "object_size");
if (cJSON_IsNumber(item)) {
cp->object_size = item->valuedouble;
}
item = cJSON_GetObjectItem(item2, "object_name");
if (cJSON_IsString(item)) {
strncpy(cp->object_name, item->valuestring, 128);
}
item = cJSON_GetObjectItem(item2, "object_last_modified");
if (cJSON_IsString(item)) {
strncpy(cp->object_last_modified, item->valuestring, 64);
}
item = cJSON_GetObjectItem(item2, "object_etag");
if (cJSON_IsString(item)) {
strncpy(cp->object_etag, item->valuestring, 128);
}
}
item2 = cJSON_GetObjectItem(json, "cpparts");
if (cJSON_IsObject(item2)) {
item = cJSON_GetObjectItem(item2, "number");
if (cJSON_IsNumber(item)) {
cp->part_num = item->valuedouble;
}
item = cJSON_GetObjectItem(item2, "size");
if (cJSON_IsNumber(item)) {
cp->part_size = item->valuedouble;
}
item2 = cJSON_GetObjectItem(item2, "parts");
if (cJSON_IsArray(item2) && cp->part_num > 0) {
cJSON_ArrayForEach(item, item2) {
cJSON const* item3 = cJSON_GetObjectItem(item, "index");
int32_t index = 0;
if (cJSON_IsNumber(item3)) {
index = item3->valuedouble;
cp->parts[index].index = index;
}
item3 = cJSON_GetObjectItem(item, "offset");
if (cJSON_IsNumber(item3)) {
cp->parts[index].offset = item3->valuedouble;
}
item3 = cJSON_GetObjectItem(item, "size");
if (cJSON_IsNumber(item3)) {
cp->parts[index].size = item3->valuedouble;
}
item3 = cJSON_GetObjectItem(item, "completed");
if (cJSON_IsNumber(item3)) {
cp->parts[index].completed = item3->valuedouble;
}
item3 = cJSON_GetObjectItem(item, "crc64");
if (cJSON_IsNumber(item3)) {
cp->parts[index].crc64 = item3->valuedouble;
}
item3 = cJSON_GetObjectItem(item, "etag");
if (cJSON_IsString(item3)) {
strncpy(cp->parts[index].etag, item3->valuestring, 128);
}
}
}
}
_exit:
if (json) cJSON_Delete(json);
if (cp_body) taosMemoryFree(cp_body);
return code;
}
int32_t cos_cp_load(char const* filepath, SCheckpoint* checkpoint) {
int32_t code = 0;
TdFilePtr fd = taosOpenFile(filepath, TD_FILE_READ);
if (!fd) {
code = TAOS_SYSTEM_ERROR(errno);
uError("ERROR: %s Failed to open %s", __func__, filepath);
goto _exit;
}
int64_t size = -1;
code = taosStatFile(filepath, &size, NULL, NULL);
if (code) {
uError("ERROR: %s Failed to stat %s", __func__, filepath);
goto _exit;
}
char* cp_body = taosMemoryMalloc(size + 1);
int64_t n = taosReadFile(fd, cp_body, size);
if (n < 0) {
code = TAOS_SYSTEM_ERROR(errno);
uError("ERROR: %s Failed to read %s", __func__, filepath);
goto _exit;
} else if (n != size) {
code = TSDB_CODE_FILE_CORRUPTED;
uError("ERROR: %s Failed to read %s %" PRId64 "/%" PRId64, __func__, filepath, n, size);
goto _exit;
}
taosCloseFile(&fd);
cp_body[size] = '\0';
return cos_cp_parse_body(cp_body, checkpoint);
_exit:
if (fd) {
taosCloseFile(&fd);
}
if (cp_body) {
taosMemoryFree(cp_body);
}
return code;
}
static int32_t cos_cp_save_json(cJSON const* json, SCheckpoint* checkpoint) {
int32_t code = 0;
char* data = cJSON_PrintUnformatted(json);
if (NULL == data) {
return TSDB_CODE_OUT_OF_MEMORY;
}
TdFilePtr fp = checkpoint->thefile;
if (taosFtruncateFile(fp, 0) < 0) {
code = TAOS_SYSTEM_ERROR(errno);
goto _exit;
}
if (taosLSeekFile(fp, 0, SEEK_SET) < 0) {
code = TAOS_SYSTEM_ERROR(errno);
goto _exit;
}
if (taosWriteFile(fp, data, strlen(data)) < 0) {
code = TAOS_SYSTEM_ERROR(errno);
goto _exit;
}
if (taosFsyncFile(fp) < 0) {
code = TAOS_SYSTEM_ERROR(errno);
goto _exit;
}
_exit:
taosMemoryFree(data);
return code;
}
int32_t cos_cp_dump(SCheckpoint* cp) {
int32_t code = 0;
int32_t lino = 0;
cJSON* ojson = NULL;
cJSON* json = cJSON_CreateObject();
if (!json) return TSDB_CODE_OUT_OF_MEMORY;
if (NULL == cJSON_AddNumberToObject(json, "ver", 1)) {
code = TSDB_CODE_OUT_OF_MEMORY;
TSDB_CHECK_CODE(code, lino, _exit);
}
if (NULL == cJSON_AddNumberToObject(json, "type", cp->cp_type)) {
code = TSDB_CODE_OUT_OF_MEMORY;
TSDB_CHECK_CODE(code, lino, _exit);
}
if (NULL == cJSON_AddStringToObject(json, "md5", cp->md5)) {
code = TSDB_CODE_OUT_OF_MEMORY;
TSDB_CHECK_CODE(code, lino, _exit);
}
if (NULL == cJSON_AddStringToObject(json, "upload_id", cp->upload_id)) {
code = TSDB_CODE_OUT_OF_MEMORY;
TSDB_CHECK_CODE(code, lino, _exit);
}
if (COS_CP_TYPE_UPLOAD == cp->cp_type) {
ojson = cJSON_AddObjectToObject(json, "file");
if (!ojson) {
code = TSDB_CODE_OUT_OF_MEMORY;
TSDB_CHECK_CODE(code, lino, _exit);
}
if (NULL == cJSON_AddNumberToObject(ojson, "size", cp->file_size)) {
code = TSDB_CODE_OUT_OF_MEMORY;
TSDB_CHECK_CODE(code, lino, _exit);
}
if (NULL == cJSON_AddNumberToObject(ojson, "lastmodified", cp->file_last_modified)) {
code = TSDB_CODE_OUT_OF_MEMORY;
TSDB_CHECK_CODE(code, lino, _exit);
}
if (NULL == cJSON_AddStringToObject(ojson, "path", cp->file_path)) {
code = TSDB_CODE_OUT_OF_MEMORY;
TSDB_CHECK_CODE(code, lino, _exit);
}
if (NULL == cJSON_AddStringToObject(ojson, "file_md5", cp->file_md5)) {
code = TSDB_CODE_OUT_OF_MEMORY;
TSDB_CHECK_CODE(code, lino, _exit);
}
} else if (COS_CP_TYPE_DOWNLOAD == cp->cp_type) {
ojson = cJSON_AddObjectToObject(json, "object");
if (!ojson) {
code = TSDB_CODE_OUT_OF_MEMORY;
TSDB_CHECK_CODE(code, lino, _exit);
}
if (NULL == cJSON_AddNumberToObject(ojson, "object_size", cp->object_size)) {
code = TSDB_CODE_OUT_OF_MEMORY;
TSDB_CHECK_CODE(code, lino, _exit);
}
if (NULL == cJSON_AddStringToObject(ojson, "object_name", cp->object_name)) {
code = TSDB_CODE_OUT_OF_MEMORY;
TSDB_CHECK_CODE(code, lino, _exit);
}
if (NULL == cJSON_AddStringToObject(ojson, "object_last_modified", cp->object_last_modified)) {
code = TSDB_CODE_OUT_OF_MEMORY;
TSDB_CHECK_CODE(code, lino, _exit);
}
if (NULL == cJSON_AddStringToObject(ojson, "object_etag", cp->object_etag)) {
code = TSDB_CODE_OUT_OF_MEMORY;
TSDB_CHECK_CODE(code, lino, _exit);
}
}
ojson = cJSON_AddObjectToObject(json, "cpparts");
if (!ojson) {
code = TSDB_CODE_OUT_OF_MEMORY;
TSDB_CHECK_CODE(code, lino, _exit);
}
if (NULL == cJSON_AddNumberToObject(ojson, "number", cp->part_num)) {
code = TSDB_CODE_OUT_OF_MEMORY;
TSDB_CHECK_CODE(code, lino, _exit);
}
if (NULL == cJSON_AddNumberToObject(ojson, "size", cp->part_size)) {
code = TSDB_CODE_OUT_OF_MEMORY;
TSDB_CHECK_CODE(code, lino, _exit);
}
cJSON* ajson = cJSON_AddArrayToObject(ojson, "parts");
if (!ajson) {
code = TSDB_CODE_OUT_OF_MEMORY;
TSDB_CHECK_CODE(code, lino, _exit);
}
for (int i = 0; i < cp->part_num; ++i) {
cJSON* item = cJSON_CreateObject();
if (!item) {
code = TSDB_CODE_OUT_OF_MEMORY;
TSDB_CHECK_CODE(code, lino, _exit);
}
cJSON_AddItemToArray(ajson, item);
if (NULL == cJSON_AddNumberToObject(item, "index", cp->parts[i].index)) {
code = TSDB_CODE_OUT_OF_MEMORY;
TSDB_CHECK_CODE(code, lino, _exit);
}
if (NULL == cJSON_AddNumberToObject(item, "offset", cp->parts[i].offset)) {
code = TSDB_CODE_OUT_OF_MEMORY;
TSDB_CHECK_CODE(code, lino, _exit);
}
if (NULL == cJSON_AddNumberToObject(item, "size", cp->parts[i].size)) {
code = TSDB_CODE_OUT_OF_MEMORY;
TSDB_CHECK_CODE(code, lino, _exit);
}
if (NULL == cJSON_AddNumberToObject(item, "completed", cp->parts[i].completed)) {
code = TSDB_CODE_OUT_OF_MEMORY;
TSDB_CHECK_CODE(code, lino, _exit);
}
if (NULL == cJSON_AddNumberToObject(item, "crc64", cp->parts[i].crc64)) {
code = TSDB_CODE_OUT_OF_MEMORY;
TSDB_CHECK_CODE(code, lino, _exit);
}
if (NULL == cJSON_AddStringToObject(item, "etag", cp->parts[i].etag)) {
code = TSDB_CODE_OUT_OF_MEMORY;
TSDB_CHECK_CODE(code, lino, _exit);
}
}
code = cos_cp_save_json(json, cp);
TSDB_CHECK_CODE(code, lino, _exit);
_exit:
if (code) {
uError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
}
cJSON_Delete(json);
return code;
}
void cos_cp_get_undo_parts(SCheckpoint* checkpoint, int* part_num, SCheckpointPart* parts, int64_t* consume_bytes) {}
void cos_cp_update(SCheckpoint* checkpoint, int32_t part_index, char const* etag, uint64_t crc64) {
checkpoint->parts[part_index].completed = 1;
strncpy(checkpoint->parts[part_index].etag, etag, 128);
checkpoint->parts[part_index].crc64 = crc64;
}
void cos_cp_build_upload(SCheckpoint* checkpoint, char const* filepath, int64_t size, int32_t mtime,
char const* upload_id, int64_t part_size) {
int i = 0;
checkpoint->cp_type = COS_CP_TYPE_UPLOAD;
strncpy(checkpoint->file_path, filepath, TSDB_FILENAME_LEN);
checkpoint->file_size = size;
checkpoint->file_last_modified = mtime;
strncpy(checkpoint->upload_id, upload_id, 128);
checkpoint->part_size = part_size;
for (; i * part_size < size; i++) {
checkpoint->parts[i].index = i;
checkpoint->parts[i].offset = i * part_size;
checkpoint->parts[i].size = TMIN(part_size, (size - i * part_size));
checkpoint->parts[i].completed = 0;
checkpoint->parts[i].etag[0] = '\0';
}
checkpoint->part_num = i;
}
static bool cos_cp_verify_md5(SCheckpoint* cp) { return true; }
bool cos_cp_is_valid_upload(SCheckpoint* checkpoint, int64_t size, int32_t mtime) {
if (cos_cp_verify_md5(checkpoint) && checkpoint->file_size == size && checkpoint->file_last_modified == mtime) {
return true;
}
return false;
}

View File

@ -97,6 +97,7 @@ bool tsMonitorComp = false;
// audit
bool tsEnableAudit = true;
bool tsEnableAuditCreateTable = true;
int32_t tsAuditInterval = 5000;
// telem
#ifdef TD_ENTERPRISE
@ -686,6 +687,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddBool(pCfg, "audit", tsEnableAudit, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER) != 0) return -1;
if (cfgAddBool(pCfg, "auditCreateTable", tsEnableAuditCreateTable, CFG_SCOPE_SERVER, CFG_DYN_NONE) != 0) return -1;
if (cfgAddInt32(pCfg, "auditInterval", tsAuditInterval, 500, 200000, CFG_SCOPE_SERVER, CFG_DYN_NONE) != 0)
return -1;
if (cfgAddBool(pCfg, "crashReporting", tsEnableCrashReport, CFG_SCOPE_BOTH, CFG_DYN_NONE) != 0) return -1;
if (cfgAddBool(pCfg, "telemetryReporting", tsEnableTelem, CFG_SCOPE_BOTH, CFG_DYN_ENT_SERVER) != 0) return -1;
@ -1137,6 +1140,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsEnableAudit = cfgGetItem(pCfg, "audit")->bval;
tsEnableAuditCreateTable = cfgGetItem(pCfg, "auditCreateTable")->bval;
tsAuditInterval = cfgGetItem(pCfg, "auditInterval")->i32;
tsEnableTelem = cfgGetItem(pCfg, "telemetryReporting")->bval;
tsEnableCrashReport = cfgGetItem(pCfg, "crashReporting")->bval;

View File

@ -30,12 +30,14 @@ typedef struct SDnodeMgmt {
TdThread statusThread;
TdThread notifyThread;
TdThread monitorThread;
TdThread auditThread;
TdThread crashReportThread;
SSingleWorker mgmtWorker;
ProcessCreateNodeFp processCreateNodeFp;
ProcessAlterNodeTypeFp processAlterNodeTypeFp;
ProcessDropNodeFp processDropNodeFp;
SendMonitorReportFp sendMonitorReportFp;
SendAuditRecordsFp sendAuditRecordsFp;
GetVnodeLoadsFp getVnodeLoadsFp;
GetVnodeLoadsFp getVnodeLoadsLiteFp;
GetMnodeLoadsFp getMnodeLoadsFp;
@ -62,7 +64,9 @@ void dmStopStatusThread(SDnodeMgmt *pMgmt);
int32_t dmStartNotifyThread(SDnodeMgmt *pMgmt);
void dmStopNotifyThread(SDnodeMgmt *pMgmt);
int32_t dmStartMonitorThread(SDnodeMgmt *pMgmt);
int32_t dmStartAuditThread(SDnodeMgmt *pMgmt);
void dmStopMonitorThread(SDnodeMgmt *pMgmt);
void dmStopAuditThread(SDnodeMgmt *pMgmt);
int32_t dmStartCrashReportThread(SDnodeMgmt *pMgmt);
void dmStopCrashReportThread(SDnodeMgmt *pMgmt);
int32_t dmStartWorker(SDnodeMgmt *pMgmt);

View File

@ -29,6 +29,9 @@ static int32_t dmStartMgmt(SDnodeMgmt *pMgmt) {
if (dmStartMonitorThread(pMgmt) != 0) {
return -1;
}
if (dmStartAuditThread(pMgmt) != 0) {
return -1;
}
if (dmStartCrashReportThread(pMgmt) != 0) {
return -1;
}
@ -38,6 +41,7 @@ static int32_t dmStartMgmt(SDnodeMgmt *pMgmt) {
static void dmStopMgmt(SDnodeMgmt *pMgmt) {
pMgmt->pData->stopped = true;
dmStopMonitorThread(pMgmt);
dmStopAuditThread(pMgmt);
dmStopStatusThread(pMgmt);
#if defined(TD_ENTERPRISE)
dmStopNotifyThread(pMgmt);
@ -60,6 +64,7 @@ static int32_t dmOpenMgmt(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) {
pMgmt->processAlterNodeTypeFp = pInput->processAlterNodeTypeFp;
pMgmt->processDropNodeFp = pInput->processDropNodeFp;
pMgmt->sendMonitorReportFp = pInput->sendMonitorReportFp;
pMgmt->sendAuditRecordsFp = pInput->sendAuditRecordFp;
pMgmt->getVnodeLoadsFp = pInput->getVnodeLoadsFp;
pMgmt->getVnodeLoadsLiteFp = pInput->getVnodeLoadsLiteFp;
pMgmt->getMnodeLoadsFp = pInput->getMnodeLoadsFp;

View File

@ -99,6 +99,27 @@ static void *dmMonitorThreadFp(void *param) {
return NULL;
}
static void *dmAuditThreadFp(void *param) {
SDnodeMgmt *pMgmt = param;
int64_t lastTime = taosGetTimestampMs();
setThreadName("dnode-audit");
while (1) {
taosMsleep(100);
if (pMgmt->pData->dropped || pMgmt->pData->stopped) break;
int64_t curTime = taosGetTimestampMs();
if (curTime < lastTime) lastTime = curTime;
float interval = curTime - lastTime;
if (interval >= tsAuditInterval) {
(*pMgmt->sendAuditRecordsFp)();
lastTime = curTime;
}
}
return NULL;
}
static void *dmCrashReportThreadFp(void *param) {
SDnodeMgmt *pMgmt = param;
int64_t lastTime = taosGetTimestampMs();
@ -218,6 +239,20 @@ int32_t dmStartMonitorThread(SDnodeMgmt *pMgmt) {
return 0;
}
int32_t dmStartAuditThread(SDnodeMgmt *pMgmt) {
TdThreadAttr thAttr;
taosThreadAttrInit(&thAttr);
taosThreadAttrSetDetachState(&thAttr, PTHREAD_CREATE_JOINABLE);
if (taosThreadCreate(&pMgmt->auditThread, &thAttr, dmAuditThreadFp, pMgmt) != 0) {
dError("failed to create audit thread since %s", strerror(errno));
return -1;
}
taosThreadAttrDestroy(&thAttr);
tmsgReportStartup("dnode-audit", "initialized");
return 0;
}
void dmStopMonitorThread(SDnodeMgmt *pMgmt) {
if (taosCheckPthreadValid(pMgmt->monitorThread)) {
taosThreadJoin(pMgmt->monitorThread, NULL);
@ -225,6 +260,13 @@ void dmStopMonitorThread(SDnodeMgmt *pMgmt) {
}
}
void dmStopAuditThread(SDnodeMgmt *pMgmt) {
if (taosCheckPthreadValid(pMgmt->auditThread)) {
taosThreadJoin(pMgmt->auditThread, NULL);
taosThreadClear(&pMgmt->auditThread);
}
}
int32_t dmStartCrashReportThread(SDnodeMgmt *pMgmt) {
if (!tsEnableCrashReport) {
return 0;

View File

@ -70,6 +70,21 @@ typedef struct SUdfdData {
int32_t dnodeId;
} SUdfdData;
#ifndef TD_MODULE_OPTIMIZE
typedef struct SDnode {
int8_t once;
bool stop;
EDndRunStatus status;
SStartupInfo startup;
SDnodeData data;
SUdfdData udfdData;
TdThreadMutex mutex;
TdFilePtr lockfile;
STfs *pTfs;
SMgmtWrapper wrappers[NODE_END];
SDnodeTrans trans;
} SDnode;
#else
typedef struct SDnode {
int8_t once;
bool stop;
@ -83,6 +98,7 @@ typedef struct SDnode {
STfs *pTfs;
SMgmtWrapper wrappers[NODE_END];
} SDnode;
#endif
// dmEnv.c
SDnode *dmInstance();
@ -124,6 +140,7 @@ int32_t dmProcessNodeMsg(SMgmtWrapper *pWrapper, SRpcMsg *pMsg);
// dmMonitor.c
void dmSendMonitorReport();
void dmSendAuditRecords();
void dmGetVnodeLoads(SMonVloadInfo *pInfo);
void dmGetVnodeLoadsLite(SMonVloadInfo *pInfo);
void dmGetMnodeLoads(SMonMloadInfo *pInfo);

View File

@ -189,6 +189,7 @@ void dmCleanup() {
if (dmCheckRepeatCleanup(pDnode) != 0) return;
dmCleanupDnode(pDnode);
monCleanup();
auditCleanup();
syncCleanUp();
walCleanUp();
udfcClose();
@ -396,6 +397,7 @@ SMgmtInputOpt dmBuildMgmtInputOpt(SMgmtWrapper *pWrapper) {
.processAlterNodeTypeFp = dmProcessAlterNodeTypeReq,
.processDropNodeFp = dmProcessDropNodeReq,
.sendMonitorReportFp = dmSendMonitorReport,
.sendAuditRecordFp = auditSendRecordsInBatch,
.getVnodeLoadsFp = dmGetVnodeLoads,
.getVnodeLoadsLiteFp = dmGetVnodeLoadsLite,
.getMnodeLoadsFp = dmGetMnodeLoads,

View File

@ -16,6 +16,7 @@
#define _DEFAULT_SOURCE
#include "dmMgmt.h"
#include "dmNodes.h"
#include "audit.h"
static void dmGetMonitorBasicInfo(SDnode *pDnode, SMonBasicInfo *pInfo) {
pInfo->protocol = 1;
@ -108,6 +109,11 @@ void dmSendMonitorReport() {
monSendReport();
}
//Todo: put this in seperate file in the future
void dmSendAuditRecords() {
auditSendRecordsInBatch();
}
void dmGetVnodeLoads(SMonVloadInfo *pInfo) {
SDnode *pDnode = dmInstance();
SMgmtWrapper *pWrapper = &pDnode->wrappers[VNODE];

View File

@ -86,6 +86,7 @@ typedef enum {
typedef int32_t (*ProcessCreateNodeFp)(EDndNodeType ntype, SRpcMsg *pMsg);
typedef int32_t (*ProcessDropNodeFp)(EDndNodeType ntype, SRpcMsg *pMsg);
typedef void (*SendMonitorReportFp)();
typedef void (*SendAuditRecordsFp)();
typedef void (*GetVnodeLoadsFp)(SMonVloadInfo *pInfo);
typedef void (*GetMnodeLoadsFp)(SMonMloadInfo *pInfo);
typedef void (*GetQnodeLoadsFp)(SQnodeLoad *pInfo);
@ -120,6 +121,7 @@ typedef struct {
ProcessAlterNodeTypeFp processAlterNodeTypeFp;
ProcessDropNodeFp processDropNodeFp;
SendMonitorReportFp sendMonitorReportFp;
SendAuditRecordsFp sendAuditRecordFp;
GetVnodeLoadsFp getVnodeLoadsFp;
GetVnodeLoadsFp getVnodeLoadsLiteFp;
GetMnodeLoadsFp getMnodeLoadsFp;

View File

@ -37,6 +37,7 @@ TEST_F(DndTestQnode, 01_Create_Qnode) {
SRpcMsg* pRsp = test.SendReq(TDMT_DND_CREATE_QNODE, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, TSDB_CODE_INVALID_OPTION);
rpcFreeCont(pRsp->pCont);
}
{
@ -50,6 +51,7 @@ TEST_F(DndTestQnode, 01_Create_Qnode) {
SRpcMsg* pRsp = test.SendReq(TDMT_DND_CREATE_QNODE, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
}
{
@ -63,6 +65,7 @@ TEST_F(DndTestQnode, 01_Create_Qnode) {
SRpcMsg* pRsp = test.SendReq(TDMT_DND_CREATE_QNODE, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, TSDB_CODE_QNODE_ALREADY_DEPLOYED);
rpcFreeCont(pRsp->pCont);
}
test.Restart();
@ -78,6 +81,7 @@ TEST_F(DndTestQnode, 01_Create_Qnode) {
SRpcMsg* pRsp = test.SendReq(TDMT_DND_CREATE_QNODE, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, TSDB_CODE_QNODE_ALREADY_DEPLOYED);
rpcFreeCont(pRsp->pCont);
}
}
@ -94,6 +98,7 @@ TEST_F(DndTestQnode, 02_Drop_Qnode) {
SRpcMsg* pRsp = test.SendReq(TDMT_DND_DROP_QNODE, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, TSDB_CODE_INVALID_OPTION);
rpcFreeCont(pRsp->pCont);
}
#endif
@ -108,6 +113,7 @@ TEST_F(DndTestQnode, 02_Drop_Qnode) {
SRpcMsg* pRsp = test.SendReq(TDMT_DND_DROP_QNODE, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
}
{
@ -121,6 +127,7 @@ TEST_F(DndTestQnode, 02_Drop_Qnode) {
SRpcMsg* pRsp = test.SendReq(TDMT_DND_DROP_QNODE, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, TSDB_CODE_QNODE_NOT_DEPLOYED);
rpcFreeCont(pRsp->pCont);
}
test.Restart();
@ -136,6 +143,7 @@ TEST_F(DndTestQnode, 02_Drop_Qnode) {
SRpcMsg* pRsp = test.SendReq(TDMT_DND_DROP_QNODE, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, TSDB_CODE_QNODE_NOT_DEPLOYED);
rpcFreeCont(pRsp->pCont);
}
{
@ -149,5 +157,6 @@ TEST_F(DndTestQnode, 02_Drop_Qnode) {
SRpcMsg* pRsp = test.SendReq(TDMT_DND_CREATE_QNODE, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
}
}

View File

@ -37,6 +37,7 @@ TEST_F(DndTestSnode, 01_Create_Snode) {
SRpcMsg* pRsp = test.SendReq(TDMT_DND_CREATE_SNODE, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, TSDB_CODE_INVALID_OPTION);
rpcFreeCont(pRsp->pCont);
}
{
@ -50,6 +51,7 @@ TEST_F(DndTestSnode, 01_Create_Snode) {
SRpcMsg* pRsp = test.SendReq(TDMT_DND_CREATE_SNODE, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
}
{
@ -63,6 +65,7 @@ TEST_F(DndTestSnode, 01_Create_Snode) {
SRpcMsg* pRsp = test.SendReq(TDMT_DND_CREATE_SNODE, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, TSDB_CODE_SNODE_ALREADY_DEPLOYED);
rpcFreeCont(pRsp->pCont);
}
test.Restart();
@ -78,6 +81,7 @@ TEST_F(DndTestSnode, 01_Create_Snode) {
SRpcMsg* pRsp = test.SendReq(TDMT_DND_CREATE_SNODE, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, TSDB_CODE_SNODE_ALREADY_DEPLOYED);
rpcFreeCont(pRsp->pCont);
}
}
@ -94,6 +98,7 @@ TEST_F(DndTestSnode, 01_Drop_Snode) {
SRpcMsg* pRsp = test.SendReq(TDMT_DND_DROP_SNODE, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, TSDB_CODE_INVALID_OPTION);
rpcFreeCont(pRsp->pCont);
}
#endif
@ -108,6 +113,7 @@ TEST_F(DndTestSnode, 01_Drop_Snode) {
SRpcMsg* pRsp = test.SendReq(TDMT_DND_DROP_SNODE, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
}
{
@ -121,6 +127,7 @@ TEST_F(DndTestSnode, 01_Drop_Snode) {
SRpcMsg* pRsp = test.SendReq(TDMT_DND_DROP_SNODE, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, TSDB_CODE_SNODE_NOT_DEPLOYED);
rpcFreeCont(pRsp->pCont);
}
test.Restart();
@ -136,6 +143,7 @@ TEST_F(DndTestSnode, 01_Drop_Snode) {
SRpcMsg* pRsp = test.SendReq(TDMT_DND_DROP_SNODE, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, TSDB_CODE_SNODE_NOT_DEPLOYED);
rpcFreeCont(pRsp->pCont);
}
{
@ -149,5 +157,6 @@ TEST_F(DndTestSnode, 01_Drop_Snode) {
SRpcMsg* pRsp = test.SendReq(TDMT_DND_CREATE_SNODE, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
}
}

View File

@ -705,7 +705,7 @@ static int32_t mndPersistTaskDropReq(SMnode *pMnode, STrans *pTrans, SStreamTask
STransAction action = {0};
SEpSet epset = {0};
if(pTask->info.nodeId == SNODE_HANDLE){
if (pTask->info.nodeId == SNODE_HANDLE) {
SSnodeObj *pObj = NULL;
void *pIter = NULL;
while (1) {
@ -717,10 +717,16 @@ static int32_t mndPersistTaskDropReq(SMnode *pMnode, STrans *pTrans, SStreamTask
addEpIntoEpSet(&epset, pObj->pDnode->fqdn, pObj->pDnode->port);
sdbRelease(pMnode->pSdb, pObj);
}
}else{
} else {
SVgObj *pVgObj = mndAcquireVgroup(pMnode, pTask->info.nodeId);
if (pVgObj != NULL) {
epset = mndGetVgroupEpset(pMnode, pVgObj);
mndReleaseVgroup(pMnode, pVgObj);
} else {
mDebug("orphaned task:0x%x need to be dropped, nodeId:%d, no redo action", pTask->id.taskId, pTask->info.nodeId);
taosMemoryFree(pReq);
return 0;
}
}
// The epset of nodeId of this task may have been expired now, let's use the newest epset from mnode.
@ -1657,6 +1663,7 @@ static void setTaskAttrInResBlock(SStreamObj *pStream, SStreamTask *pTask, SSDat
STaskStatusEntry *pe = taosHashGet(execInfo.pTaskMap, &id, sizeof(id));
if (pe == NULL) {
mError("task:0x%" PRIx64 " not exists in vnode, no valid status/stage info", id.taskId);
return;
}

View File

@ -48,8 +48,7 @@ int32_t clearFinishedTrans(SMnode* pMnode) {
void* pKey = taosHashGetKey(pEntry, &keyLen);
// key is the name of src/dst db name
SKeyInfo info = {.pKey = pKey, .keyLen = keyLen};
mDebug("transId:%d %s startTs:%" PRId64 "cleared due to finished", pEntry->transId, pEntry->name,
mDebug("transId:%d %s startTs:%" PRId64 " cleared since finished", pEntry->transId, pEntry->name,
pEntry->startTime);
taosArrayPush(pList, &info);
} else {

View File

@ -33,6 +33,7 @@ TEST_F(MndTestAcct, 01_Create_Acct) {
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_ACCT, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, TSDB_CODE_OPS_NOT_SUPPORT);
rpcFreeCont(pRsp->pCont);
}
TEST_F(MndTestAcct, 02_Alter_Acct) {
@ -43,6 +44,7 @@ TEST_F(MndTestAcct, 02_Alter_Acct) {
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_ACCT, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, TSDB_CODE_OPS_NOT_SUPPORT);
rpcFreeCont(pRsp->pCont);
}
TEST_F(MndTestAcct, 03_Drop_Acct) {
@ -53,4 +55,5 @@ TEST_F(MndTestAcct, 03_Drop_Acct) {
SRpcMsg* pRsp = test.SendReq(TDMT_MND_DROP_ACCT, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, TSDB_CODE_OPS_NOT_SUPPORT);
rpcFreeCont(pRsp->pCont);
}

View File

@ -253,12 +253,14 @@ TEST_F(MndTestSma, 02_Create_Show_Meta_Drop_Restart_BSma) {
pReq = BuildCreateDbReq(dbname, &contLen);
pRsp = test.SendReq(TDMT_MND_CREATE_DB, pReq, contLen);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
}
{
pReq = BuildCreateBSmaStbReq(stbname, &contLen);
pRsp = test.SendReq(TDMT_MND_CREATE_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
test.SendShowReq(TSDB_MGMT_TABLE_STB, "ins_stables", dbname);
EXPECT_EQ(test.GetShowRows(), 1);
}
@ -269,12 +271,14 @@ TEST_F(MndTestSma, 02_Create_Show_Meta_Drop_Restart_BSma) {
pReq = BuildCreateBSmaStbReq(stbname, &contLen);
pRsp = test.SendReq(TDMT_MND_CREATE_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_STB_ALREADY_EXIST);
rpcFreeCont(pRsp->pCont);
}
{
pReq = BuildDropStbReq(stbname, &contLen);
pRsp = test.SendReq(TDMT_MND_DROP_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
test.SendShowReq(TSDB_MGMT_TABLE_STB, "ins_stables", dbname);
EXPECT_EQ(test.GetShowRows(), 0);
}
@ -283,5 +287,6 @@ TEST_F(MndTestSma, 02_Create_Show_Meta_Drop_Restart_BSma) {
pReq = BuildDropStbReq(stbname, &contLen);
pRsp = test.SendReq(TDMT_MND_DROP_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_STB_NOT_EXIST);
rpcFreeCont(pRsp->pCont);
}
}

View File

@ -155,6 +155,7 @@ void* MndTestStb::BuildAlterStbAddTagReq(const char* stbname, const char* tagnam
tSerializeSMAlterStbReq(pHead, contLen, &req);
*pContLen = contLen;
taosArrayDestroy(req.pFields);
return pHead;
}
@ -176,6 +177,7 @@ void* MndTestStb::BuildAlterStbDropTagReq(const char* stbname, const char* tagna
tSerializeSMAlterStbReq(pHead, contLen, &req);
*pContLen = contLen;
taosArrayDestroy(req.pFields);
return pHead;
}
@ -204,6 +206,7 @@ void* MndTestStb::BuildAlterStbUpdateTagNameReq(const char* stbname, const char*
tSerializeSMAlterStbReq(pHead, contLen, &req);
*pContLen = contLen;
taosArrayDestroy(req.pFields);
return pHead;
}
@ -226,6 +229,7 @@ void* MndTestStb::BuildAlterStbUpdateTagBytesReq(const char* stbname, const char
tSerializeSMAlterStbReq(pHead, contLen, &req);
*pContLen = contLen;
taosArrayDestroy(req.pFields);
return pHead;
}
@ -247,6 +251,7 @@ void* MndTestStb::BuildAlterStbAddColumnReq(const char* stbname, const char* col
tSerializeSMAlterStbReq(pHead, contLen, &req);
*pContLen = contLen;
taosArrayDestroy(req.pFields);
return pHead;
}
@ -268,6 +273,7 @@ void* MndTestStb::BuildAlterStbDropColumnReq(const char* stbname, const char* co
tSerializeSMAlterStbReq(pHead, contLen, &req);
*pContLen = contLen;
taosArrayDestroy(req.pFields);
return pHead;
}
@ -290,6 +296,7 @@ void* MndTestStb::BuildAlterStbUpdateColumnBytesReq(const char* stbname, const c
tSerializeSMAlterStbReq(pHead, contLen, &req);
*pContLen = contLen;
taosArrayDestroy(req.pFields);
return pHead;
}
@ -303,6 +310,7 @@ TEST_F(MndTestStb, 01_Create_Show_Meta_Drop_Restart_Stb) {
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_DB, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
}
{
@ -311,6 +319,7 @@ TEST_F(MndTestStb, 01_Create_Show_Meta_Drop_Restart_Stb) {
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_STB, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
}
{
@ -334,6 +343,7 @@ TEST_F(MndTestStb, 01_Create_Show_Meta_Drop_Restart_Stb) {
STableMetaRsp metaRsp = {0};
tDeserializeSTableMetaRsp(pMsg->pCont, pMsg->contLen, &metaRsp);
rpcFreeCont(pMsg->pCont);
EXPECT_STREQ(metaRsp.dbFName, dbname);
EXPECT_STREQ(metaRsp.tbName, "stb");
@ -410,6 +420,7 @@ TEST_F(MndTestStb, 01_Create_Show_Meta_Drop_Restart_Stb) {
SRpcMsg* pRsp = test.SendReq(TDMT_MND_DROP_STB, pHead, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
}
{
@ -423,6 +434,7 @@ TEST_F(MndTestStb, 01_Create_Show_Meta_Drop_Restart_Stb) {
SRpcMsg* pRsp = test.SendReq(TDMT_MND_DROP_DB, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
}
}
@ -436,6 +448,7 @@ TEST_F(MndTestStb, 02_Alter_Stb_AddTag) {
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_DB, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
}
{
@ -443,30 +456,35 @@ TEST_F(MndTestStb, 02_Alter_Stb_AddTag) {
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_STB, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
}
{
void* pReq = BuildAlterStbAddTagReq("1.d3.stb", "tag4", &contLen);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_DB_NOT_EXIST);
rpcFreeCont(pRsp->pCont);
}
{
void* pReq = BuildAlterStbAddTagReq("1.d2.stb3", "tag4", &contLen);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_STB_NOT_EXIST);
rpcFreeCont(pRsp->pCont);
}
{
void* pReq = BuildAlterStbAddTagReq(stbname, "tag3", &contLen);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_TAG_ALREADY_EXIST);
rpcFreeCont(pRsp->pCont);
}
{
void* pReq = BuildAlterStbAddTagReq(stbname, "col1", &contLen);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_COLUMN_ALREADY_EXIST);
rpcFreeCont(pRsp->pCont);
}
{
@ -474,6 +492,7 @@ TEST_F(MndTestStb, 02_Alter_Stb_AddTag) {
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
test.SendShowReq(TSDB_MGMT_TABLE_STB, "ins_stables", dbname);
}
@ -483,6 +502,7 @@ TEST_F(MndTestStb, 02_Alter_Stb_AddTag) {
SRpcMsg* pRsp = test.SendReq(TDMT_MND_DROP_DB, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
}
}
@ -495,18 +515,21 @@ TEST_F(MndTestStb, 03_Alter_Stb_DropTag) {
void* pReq = BuildCreateDbReq(dbname, &contLen);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_DB, pReq, contLen);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
}
{
void* pReq = BuildCreateStbReq(stbname, &contLen);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
}
{
void* pReq = BuildAlterStbDropTagReq(stbname, "tag5", &contLen);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_TAG_NOT_EXIST);
rpcFreeCont(pRsp->pCont);
}
{
@ -514,6 +537,7 @@ TEST_F(MndTestStb, 03_Alter_Stb_DropTag) {
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
test.SendShowReq(TSDB_MGMT_TABLE_STB, "ins_stables", dbname);
EXPECT_EQ(test.GetShowRows(), 1);
@ -524,6 +548,7 @@ TEST_F(MndTestStb, 03_Alter_Stb_DropTag) {
SRpcMsg* pRsp = test.SendReq(TDMT_MND_DROP_DB, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
}
}
@ -536,41 +561,48 @@ TEST_F(MndTestStb, 04_Alter_Stb_AlterTagName) {
void* pReq = BuildCreateDbReq(dbname, &contLen);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_DB, pReq, contLen);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
}
{
void* pReq = BuildCreateStbReq(stbname, &contLen);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
}
{
void* pReq = BuildAlterStbUpdateTagNameReq(stbname, "tag5", "tag6", &contLen);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_TAG_NOT_EXIST);
rpcFreeCont(pRsp->pCont);
}
{
void* pReq = BuildAlterStbUpdateTagNameReq(stbname, "col1", "tag6", &contLen);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_TAG_NOT_EXIST);
rpcFreeCont(pRsp->pCont);
}
{
void* pReq = BuildAlterStbUpdateTagNameReq(stbname, "tag3", "col1", &contLen);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_COLUMN_ALREADY_EXIST);
rpcFreeCont(pRsp->pCont);
}
{
void* pReq = BuildAlterStbUpdateTagNameReq(stbname, "tag3", "tag2", &contLen);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_TAG_ALREADY_EXIST);
rpcFreeCont(pRsp->pCont);
}
{
void* pReq = BuildAlterStbUpdateTagNameReq(stbname, "tag3", "tag2", &contLen);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_TAG_ALREADY_EXIST);
rpcFreeCont(pRsp->pCont);
}
{
@ -578,6 +610,7 @@ TEST_F(MndTestStb, 04_Alter_Stb_AlterTagName) {
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
test.SendShowReq(TSDB_MGMT_TABLE_STB, "ins_stables", dbname);
EXPECT_EQ(test.GetShowRows(), 1);
@ -588,6 +621,7 @@ TEST_F(MndTestStb, 04_Alter_Stb_AlterTagName) {
SRpcMsg* pRsp = test.SendReq(TDMT_MND_DROP_DB, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
}
}
@ -600,36 +634,42 @@ TEST_F(MndTestStb, 05_Alter_Stb_AlterTagBytes) {
void* pReq = BuildCreateDbReq(dbname, &contLen);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_DB, pReq, contLen);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
}
{
void* pReq = BuildCreateStbReq(stbname, &contLen);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
}
{
void* pReq = BuildAlterStbUpdateTagBytesReq(stbname, "tag5", 12, &contLen);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_TAG_NOT_EXIST);
rpcFreeCont(pRsp->pCont);
}
{
void* pReq = BuildAlterStbUpdateTagBytesReq(stbname, "tag1", 13, &contLen);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_INVALID_STB_OPTION);
rpcFreeCont(pRsp->pCont);
}
{
void* pReq = BuildAlterStbUpdateTagBytesReq(stbname, "tag3", 8, &contLen);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_INVALID_ROW_BYTES);
rpcFreeCont(pRsp->pCont);
}
{
void* pReq = BuildAlterStbUpdateTagBytesReq(stbname, "tag3", 20, &contLen);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
test.SendShowReq(TSDB_MGMT_TABLE_STB, "ins_stables", dbname);
EXPECT_EQ(test.GetShowRows(), 1);
@ -640,6 +680,7 @@ TEST_F(MndTestStb, 05_Alter_Stb_AlterTagBytes) {
SRpcMsg* pRsp = test.SendReq(TDMT_MND_DROP_DB, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
}
}
@ -653,6 +694,7 @@ TEST_F(MndTestStb, 06_Alter_Stb_AddColumn) {
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_DB, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
}
{
@ -660,30 +702,35 @@ TEST_F(MndTestStb, 06_Alter_Stb_AddColumn) {
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_STB, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
}
{
void* pReq = BuildAlterStbAddColumnReq("1.d7.stb", "tag4", &contLen);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_DB_NOT_EXIST);
rpcFreeCont(pRsp->pCont);
}
{
void* pReq = BuildAlterStbAddColumnReq("1.d6.stb3", "tag4", &contLen);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_STB_NOT_EXIST);
rpcFreeCont(pRsp->pCont);
}
{
void* pReq = BuildAlterStbAddColumnReq(stbname, "tag3", &contLen);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_TAG_ALREADY_EXIST);
rpcFreeCont(pRsp->pCont);
}
{
void* pReq = BuildAlterStbAddColumnReq(stbname, "col1", &contLen);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_COLUMN_ALREADY_EXIST);
rpcFreeCont(pRsp->pCont);
}
{
@ -691,6 +738,7 @@ TEST_F(MndTestStb, 06_Alter_Stb_AddColumn) {
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
test.SendShowReq(TSDB_MGMT_TABLE_STB, "ins_stables", dbname);
EXPECT_EQ(test.GetShowRows(), 1);
@ -701,6 +749,7 @@ TEST_F(MndTestStb, 06_Alter_Stb_AddColumn) {
SRpcMsg* pRsp = test.SendReq(TDMT_MND_DROP_DB, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
}
}
@ -713,30 +762,35 @@ TEST_F(MndTestStb, 07_Alter_Stb_DropColumn) {
void* pReq = BuildCreateDbReq(dbname, &contLen);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_DB, pReq, contLen);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
}
{
void* pReq = BuildCreateStbReq(stbname, &contLen);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
}
{
void* pReq = BuildAlterStbDropColumnReq(stbname, "col4", &contLen);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_COLUMN_NOT_EXIST);
rpcFreeCont(pRsp->pCont);
}
{
void* pReq = BuildAlterStbDropColumnReq(stbname, "col1", &contLen);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_INVALID_STB_ALTER_OPTION);
rpcFreeCont(pRsp->pCont);
}
{
void* pReq = BuildAlterStbDropColumnReq(stbname, "ts", &contLen);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_INVALID_STB_ALTER_OPTION);
rpcFreeCont(pRsp->pCont);
}
{
@ -744,6 +798,7 @@ TEST_F(MndTestStb, 07_Alter_Stb_DropColumn) {
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
}
{
@ -751,6 +806,7 @@ TEST_F(MndTestStb, 07_Alter_Stb_DropColumn) {
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
test.SendShowReq(TSDB_MGMT_TABLE_STB, "ins_stables", dbname);
EXPECT_EQ(test.GetShowRows(), 1);
@ -761,6 +817,7 @@ TEST_F(MndTestStb, 07_Alter_Stb_DropColumn) {
SRpcMsg* pRsp = test.SendReq(TDMT_MND_DROP_DB, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
}
}
@ -773,42 +830,49 @@ TEST_F(MndTestStb, 08_Alter_Stb_AlterTagBytes) {
void* pReq = BuildCreateDbReq(dbname, &contLen);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_DB, pReq, contLen);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
}
{
void* pReq = BuildCreateStbReq(stbname, &contLen);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CREATE_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
}
{
void* pReq = BuildAlterStbUpdateColumnBytesReq(stbname, "col5", 12, &contLen, 0);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_COLUMN_NOT_EXIST);
rpcFreeCont(pRsp->pCont);
}
{
void* pReq = BuildAlterStbUpdateColumnBytesReq(stbname, "ts", 8, &contLen, 0);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_INVALID_STB_OPTION);
rpcFreeCont(pRsp->pCont);
}
{
void* pReq = BuildAlterStbUpdateColumnBytesReq(stbname, "col1", 8, &contLen, 0);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_INVALID_ROW_BYTES);
rpcFreeCont(pRsp->pCont);
}
{
void* pReq = BuildAlterStbUpdateColumnBytesReq(stbname, "col1", TSDB_MAX_BYTES_PER_ROW, &contLen, 0);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_INVALID_ROW_BYTES);
rpcFreeCont(pRsp->pCont);
}
{
void* pReq = BuildAlterStbUpdateColumnBytesReq(stbname, "col1", 20, &contLen, 0);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
test.SendShowReq(TSDB_MGMT_TABLE_STB, "ins_stables", dbname);
EXPECT_EQ(test.GetShowRows(), 1);
@ -818,6 +882,7 @@ TEST_F(MndTestStb, 08_Alter_Stb_AlterTagBytes) {
void* pReq = BuildAlterStbUpdateColumnBytesReq(stbname, "col_not_exist", 20, &contLen, 1);
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_COLUMN_NOT_EXIST);
rpcFreeCont(pRsp->pCont);
test.SendShowReq(TSDB_MGMT_TABLE_STB, "ins_stables", dbname);
EXPECT_EQ(test.GetShowRows(), 1);
@ -828,5 +893,6 @@ TEST_F(MndTestStb, 08_Alter_Stb_AlterTagBytes) {
SRpcMsg* pRsp = test.SendReq(TDMT_MND_DROP_DB, pReq, contLen);
ASSERT_NE(pRsp, nullptr);
ASSERT_EQ(pRsp->code, 0);
rpcFreeCont(pRsp->pCont);
}
}

View File

@ -699,7 +699,23 @@ end:
return ret;
}
void freePtr(void* ptr) { taosMemoryFree(*(void**)ptr); }
static void freePtr(void* ptr) { taosMemoryFree(*(void**)ptr); }
static STaskId replaceStreamTaskId(SStreamTask* pTask) {
ASSERT(pTask->info.fillHistory);
STaskId id = {.streamId = pTask->id.streamId, .taskId = pTask->id.taskId};
pTask->id.streamId = pTask->streamTaskId.streamId;
pTask->id.taskId = pTask->streamTaskId.taskId;
return id;
}
static void restoreStreamTaskId(SStreamTask* pTask, STaskId* pId) {
ASSERT(pTask->info.fillHistory);
pTask->id.taskId = pId->taskId;
pTask->id.streamId = pId->streamId;
}
int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t nextProcessVer) {
int32_t vgId = TD_VID(pTq->pVnode);
@ -713,15 +729,9 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t nextProcessVer) {
streamTaskOpenAllUpstreamInput(pTask);
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
SStreamTask* pStateTask = pTask;
STaskId taskId = {.streamId = 0, .taskId = 0};
STaskId taskId = {0};
if (pTask->info.fillHistory) {
taskId.streamId = pTask->id.streamId;
taskId.taskId = pTask->id.taskId;
pTask->id.streamId = pTask->streamTaskId.streamId;
pTask->id.taskId = pTask->streamTaskId.taskId;
taskId = replaceStreamTaskId(pTask);
}
pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pTask, false, -1, -1);
@ -731,9 +741,9 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t nextProcessVer) {
} else {
tqDebug("s-task:%s state:%p", pTask->id.idStr, pTask->pState);
}
if (pTask->info.fillHistory) {
pTask->id.streamId = taskId.streamId;
pTask->id.taskId = taskId.taskId;
restoreStreamTaskId(pTask, &taskId);
}
SReadHandle handle = {
@ -754,15 +764,9 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t nextProcessVer) {
qSetTaskId(pTask->exec.pExecutor, pTask->id.taskId, pTask->id.streamId);
} else if (pTask->info.taskLevel == TASK_LEVEL__AGG) {
SStreamTask* pSateTask = pTask;
// SStreamTask task = {0};
STaskId taskId = {.streamId = 0, .taskId = 0};
STaskId taskId = {0};
if (pTask->info.fillHistory) {
taskId.streamId = pTask->id.streamId;
taskId.taskId = pTask->id.taskId;
pTask->id.streamId = pTask->streamTaskId.streamId;
pTask->id.taskId = pTask->streamTaskId.taskId;
taskId = replaceStreamTaskId(pTask);
}
pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pTask, false, -1, -1);
@ -774,15 +778,13 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t nextProcessVer) {
}
if (pTask->info.fillHistory) {
pTask->id.streamId = taskId.streamId;
pTask->id.taskId = taskId.taskId;
restoreStreamTaskId(pTask, &taskId);
}
int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTask->upstreamInfo.pList);
SReadHandle handle = {
.checkpointId = pTask->chkInfo.checkpointId,
.vnode = NULL,
.numOfVgroups = numOfVgroups,
.numOfVgroups = (int32_t)taosArrayGetSize(pTask->upstreamInfo.pList),
.pStateBackend = pTask->pState,
.fillHistory = pTask->info.fillHistory,
.winRange = pTask->dataRange.window,
@ -828,12 +830,6 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t nextProcessVer) {
pTask->exec.pWalReader = walOpenReader(pTq->pVnode->pWal, &cond, pTask->id.taskId);
}
// // reset the task status from unfinished transaction
// if (pTask->status.taskStatus == TASK_STATUS__PAUSE) {
// tqWarn("s-task:%s reset task status to be normal, status kept in taskMeta: Paused", pTask->id.idStr);
// pTask->status.taskStatus = TASK_STATUS__READY;
// }
streamTaskResetUpstreamStageInfo(pTask);
streamSetupScheduleTrigger(pTask);
SCheckpointInfo* pChkInfo = &pTask->chkInfo;

View File

@ -181,5 +181,5 @@ int32_t streamStateRebuildFromSnap(SStreamStateWriter* pWriter, int64_t chkpId)
}
int32_t streamStateLoadTasks(SStreamStateWriter* pWriter) {
return streamMetaReloadAllTasks(pWriter->pTq->pStreamMeta);
return streamMetaLoadAllTasks(pWriter->pTq->pStreamMeta);
}

View File

@ -736,17 +736,9 @@ static int32_t restartStreamTasks(SStreamMeta* pMeta, bool isLeader) {
}
streamMetaWLock(pMeta);
code = streamMetaReopen(pMeta);
if (code != TSDB_CODE_SUCCESS) {
tqError("vgId:%d failed to reopen stream meta", vgId);
streamMetaWUnLock(pMeta);
code = terrno;
return code;
}
streamMetaClear(pMeta);
streamMetaInitBackend(pMeta);
int64_t el = taosGetTimestampMs() - st;
tqInfo("vgId:%d close&reload state elapsed time:%.3fs", vgId, el/1000.);
code = streamMetaLoadAllTasks(pMeta);
@ -758,10 +750,10 @@ static int32_t restartStreamTasks(SStreamMeta* pMeta, bool isLeader) {
}
if (isLeader && !tsDisableStream) {
tqInfo("vgId:%d restart all stream tasks after all tasks being updated", vgId);
resetStreamTaskStatus(pMeta);
streamMetaWUnLock(pMeta);
tqInfo("vgId:%d restart all stream tasks after all tasks being updated", vgId);
startStreamTasks(pMeta);
} else {
streamMetaResetStartInfo(&pMeta->startInfo);

View File

@ -54,10 +54,9 @@ static void tsdbCloseBICache(STsdb *pTsdb) {
static int32_t tsdbOpenBCache(STsdb *pTsdb) {
int32_t code = 0;
// SLRUCache *pCache = taosLRUCacheInit(10 * 1024 * 1024, 0, .5);
int32_t szPage = pTsdb->pVnode->config.tsdbPageSize;
SLRUCache *pCache = taosLRUCacheInit((int64_t)tsS3BlockCacheSize * tsS3BlockSize * szPage, 0, .5);
int64_t szBlock = tsS3BlockSize <= 1024 ? 1024 : tsS3BlockSize;
SLRUCache *pCache = taosLRUCacheInit((int64_t)tsS3BlockCacheSize * szBlock * szPage, 0, .5);
if (pCache == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
@ -67,8 +66,9 @@ static int32_t tsdbOpenBCache(STsdb *pTsdb) {
taosThreadMutexInit(&pTsdb->bMutex, NULL);
_err:
pTsdb->bCache = pCache;
_err:
return code;
}
@ -452,9 +452,11 @@ static SLastCol *tsdbCacheLookup(STsdb *pTsdb, tb_uid_t uid, int16_t cid, int8_t
static void reallocVarData(SColVal *pColVal) {
if (IS_VAR_DATA_TYPE(pColVal->type)) {
uint8_t *pVal = pColVal->value.pData;
if (pColVal->value.nData > 0) {
pColVal->value.pData = taosMemoryMalloc(pColVal->value.nData);
if (pColVal->value.nData) {
memcpy(pColVal->value.pData, pVal, pColVal->value.nData);
} else {
pColVal->value.pData = NULL;
}
}
}
@ -2755,14 +2757,16 @@ static int32_t mergeLastCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SC
*pCol = (SLastCol){.ts = rowTs, .colVal = *pColVal};
if (IS_VAR_DATA_TYPE(pColVal->type) /*&& pColVal->value.nData > 0*/) {
if (pColVal->value.nData > 0) {
pCol->colVal.value.pData = taosMemoryMalloc(pCol->colVal.value.nData);
if (pCol->colVal.value.pData == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
if (pColVal->value.nData > 0) {
memcpy(pCol->colVal.value.pData, pColVal->value.pData, pColVal->value.nData);
} else {
pCol->colVal.value.pData = NULL;
}
}
@ -2802,10 +2806,11 @@ static int32_t mergeLastCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SC
tsdbRowGetColVal(pRow, pTSchema, slotIds[iCol], pColVal);
if (!COL_VAL_IS_VALUE(tColVal) && COL_VAL_IS_VALUE(pColVal)) {
SLastCol lastCol = {.ts = rowTs, .colVal = *pColVal};
if (IS_VAR_DATA_TYPE(pColVal->type) && pColVal->value.nData > 0) {
if (IS_VAR_DATA_TYPE(pColVal->type) /* && pColVal->value.nData > 0 */) {
SLastCol *pLastCol = (SLastCol *)taosArrayGet(pColArray, iCol);
taosMemoryFree(pLastCol->colVal.value.pData);
if (pColVal->value.nData > 0) {
lastCol.colVal.value.pData = taosMemoryMalloc(lastCol.colVal.value.nData);
if (lastCol.colVal.value.pData == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
@ -2813,6 +2818,9 @@ static int32_t mergeLastCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SC
goto _err;
}
memcpy(lastCol.colVal.value.pData, pColVal->value.pData, pColVal->value.nData);
} else {
lastCol.colVal.value.pData = NULL;
}
}
taosArraySet(pColArray, iCol, &lastCol);
@ -2923,6 +2931,7 @@ static int32_t mergeLastRowCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray,
*pCol = (SLastCol){.ts = rowTs, .colVal = *pColVal};
if (IS_VAR_DATA_TYPE(pColVal->type) /*&& pColVal->value.nData > 0*/) {
if (pColVal->value.nData > 0) {
pCol->colVal.value.pData = taosMemoryMalloc(pCol->colVal.value.nData);
if (pCol->colVal.value.pData == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
@ -2932,6 +2941,9 @@ static int32_t mergeLastRowCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray,
if (pColVal->value.nData > 0) {
memcpy(pCol->colVal.value.pData, pColVal->value.pData, pColVal->value.nData);
}
} else {
pCol->colVal.value.pData = NULL;
}
}
int32_t aColIndex = taosArraySearchIdx(aColArray, &pColVal->cid, compareInt16Val, TD_EQ);

View File

@ -115,7 +115,7 @@ static int32_t tsdbDataFileRAWWriterDoClose(SDataFileRAWWriter *writer) { return
static int32_t tsdbDataFileRAWWriterCloseCommit(SDataFileRAWWriter *writer, TFileOpArray *opArr) {
int32_t code = 0;
int32_t lino = 0;
ASSERT(writer->ctx->offset == writer->file.size);
ASSERT(writer->ctx->offset <= writer->file.size);
ASSERT(writer->config->fid == writer->file.fid);
STFileOp op = (STFileOp){

View File

@ -529,7 +529,8 @@ static int32_t tsdbFSDoSanAndFix(STFileSystem *fs) {
for (const STfsFile *file = NULL; (file = tfsReaddir(dir)) != NULL;) {
if (taosIsDir(file->aname)) continue;
if (tsdbFSGetFileObjHashEntry(&fobjHash, file->aname) == NULL) {
if (tsdbFSGetFileObjHashEntry(&fobjHash, file->aname) == NULL &&
strncmp(file->aname + strlen(file->aname) - 3, ".cp", 3)) {
int32_t nlevel = tfsGetLevel(fs->tsdb->pVnode->pTfs);
remove_file(file->aname, nlevel > 1 && file->did.level == nlevel - 1);
}

View File

@ -67,7 +67,7 @@ static SVersionRange getQueryVerRange(SVnode* pVnode, SQueryTableDataCond* pCond
static int32_t doBuildDataBlock(STsdbReader* pReader);
static TSDBKEY getCurrentKeyInBuf(STableBlockScanInfo* pScanInfo, STsdbReader* pReader);
static bool hasDataInFileBlock(const SBlockData* pBlockData, const SFileBlockDumpInfo* pDumpInfo);
static bool hasDataInSttBlock(SSttBlockReader* pSttBlockReader);
static bool hasDataInSttBlock(STableBlockScanInfo *pInfo);
static void initBlockDumpInfo(STsdbReader* pReader, SDataBlockIter* pBlockIter);
static int32_t getInitialDelIndex(const SArray* pDelSkyline, int32_t order);
static void resetTableListIndex(SReaderStatus* pStatus);
@ -1466,7 +1466,7 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
int64_t tsLast = INT64_MIN;
if (hasDataInSttBlock(pSttBlockReader)) {
if (hasDataInSttBlock(pBlockScanInfo)) {
tsLast = getCurrentKeyInSttBlock(pSttBlockReader);
}
@ -1485,7 +1485,7 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
int64_t minKey = 0;
if (pReader->info.order == TSDB_ORDER_ASC) {
minKey = INT64_MAX; // chosen the minimum value
if (minKey > tsLast && hasDataInSttBlock(pSttBlockReader)) {
if (minKey > tsLast && hasDataInSttBlock(pBlockScanInfo)) {
minKey = tsLast;
}
@ -1498,7 +1498,7 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
}
} else {
minKey = INT64_MIN;
if (minKey < tsLast && hasDataInSttBlock(pSttBlockReader)) {
if (minKey < tsLast && hasDataInSttBlock(pBlockScanInfo)) {
minKey = tsLast;
}
@ -1705,7 +1705,7 @@ static int32_t mergeFileBlockAndSttBlock(STsdbReader* pReader, SSttBlockReader*
}
bool dataInDataFile = hasDataInFileBlock(pBlockData, pDumpInfo);
bool dataInSttFile = hasDataInSttBlock(pSttBlockReader);
bool dataInSttFile = hasDataInSttBlock(pBlockScanInfo);
if (dataInDataFile && (!dataInSttFile)) {
// no stt file block available, only data block exists
return mergeRowsInFileBlocks(pBlockData, pBlockScanInfo, key, pReader);
@ -1791,7 +1791,7 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
TSDBROW* piRow = getValidMemRow(&pBlockScanInfo->iiter, pDelList, pReader);
int64_t tsLast = INT64_MIN;
if (hasDataInSttBlock(pSttBlockReader)) {
if (hasDataInSttBlock(pBlockScanInfo)) {
tsLast = getCurrentKeyInSttBlock(pSttBlockReader);
}
@ -1840,7 +1840,7 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
minKey = key;
}
if (minKey > tsLast && hasDataInSttBlock(pSttBlockReader)) {
if (minKey > tsLast && hasDataInSttBlock(pBlockScanInfo)) {
minKey = tsLast;
}
} else {
@ -1857,7 +1857,7 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
minKey = key;
}
if (minKey < tsLast && hasDataInSttBlock(pSttBlockReader)) {
if (minKey < tsLast && hasDataInSttBlock(pBlockScanInfo)) {
minKey = tsLast;
}
}
@ -2065,7 +2065,7 @@ static bool initSttBlockReader(SSttBlockReader* pSttBlockReader, STableBlockScan
// the stt block reader has been initialized for this table.
if (pSttBlockReader->uid == pScanInfo->uid) {
return hasDataInSttBlock(pSttBlockReader);
return hasDataInSttBlock(pScanInfo);
}
if (pSttBlockReader->uid != 0) {
@ -2158,7 +2158,9 @@ static bool initSttBlockReader(SSttBlockReader* pSttBlockReader, STableBlockScan
return hasData;
}
static bool hasDataInSttBlock(SSttBlockReader* pSttBlockReader) { return pSttBlockReader->mergeTree.pIter != NULL; }
static bool hasDataInSttBlock(STableBlockScanInfo *pInfo) {
return pInfo->sttKeyInfo.status == STT_FILE_HAS_DATA;
}
bool hasDataInFileBlock(const SBlockData* pBlockData, const SFileBlockDumpInfo* pDumpInfo) {
if ((pBlockData->nRow > 0) && (pBlockData->nRow != pDumpInfo->totalRows)) {
@ -2733,7 +2735,7 @@ static int32_t doLoadSttBlockSequentially(STsdbReader* pReader) {
int64_t st = taosGetTimestampUs();
while (1) {
// no data in stt block and block, no need to proceed.
if (!hasDataInSttBlock(pSttBlockReader)) {
if (!hasDataInSttBlock(pScanInfo)) {
break;
}
@ -2850,7 +2852,7 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
initSttBlockReader(pSttBlockReader, pScanInfo, pReader);
// no data in stt block, no need to proceed.
while (hasDataInSttBlock(pSttBlockReader)) {
while (hasDataInSttBlock(pScanInfo)) {
ASSERT(pScanInfo->sttKeyInfo.status == STT_FILE_HAS_DATA);
code = buildComposedDataBlockImpl(pReader, pScanInfo, &pReader->status.fileBlockData, pSttBlockReader);

View File

@ -252,6 +252,7 @@ static void doCleanupInfoForNextFileset(STableBlockScanInfo* pScanInfo) {
taosArrayClear(pScanInfo->pFileDelData); // del data from each file set
pScanInfo->cleanSttBlocks = false;
pScanInfo->numOfRowsInStt = 0;
pScanInfo->sttBlockReturned = false;
INIT_TIMEWINDOW(&pScanInfo->sttWindow);
INIT_TIMEWINDOW(&pScanInfo->filesetWindow);
pScanInfo->sttKeyInfo.status = STT_FILE_READER_UNINIT;

View File

@ -112,7 +112,7 @@ static int32_t tsdbCopyFileS3(SRTNer *rtner, const STFileObj *from, const STFile
TSDB_CHECK_CODE(code, lino, _exit);
char *object_name = taosDirEntryBaseName(fname);
code = s3PutObjectFromFile2(from->fname, object_name);
code = s3PutObjectFromFile2(from->fname, object_name, 1);
TSDB_CHECK_CODE(code, lino, _exit);
taosCloseFile(&fdFrom);

View File

@ -1012,7 +1012,7 @@ static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t ver, void *pReq,
char* keyJoined = taosStringBuilderGetResult(&sb, &len);
if(pOriginRpc->info.conn.user != NULL && strlen(pOriginRpc->info.conn.user) > 0){
auditRecord(pOriginRpc, clusterId, "createTable", name.dbname, "", keyJoined, len);
auditAddRecord(pOriginRpc, clusterId, "createTable", name.dbname, "", keyJoined, len);
}
taosStringBuilderDestroy(&sb);
@ -1236,7 +1236,7 @@ static int32_t vnodeProcessDropTbReq(SVnode *pVnode, int64_t ver, void *pReq, in
char *keyJoined = taosStringBuilderGetResult(&sb, &len);
if(pOriginRpc->info.conn.user != NULL && strlen(pOriginRpc->info.conn.user) > 0){
auditRecord(pOriginRpc, clusterId, "dropTable", name.dbname, "", keyJoined, len);
auditAddRecord(pOriginRpc, clusterId, "dropTable", name.dbname, "", keyJoined, len);
}
taosStringBuilderDestroy(&sb);

View File

@ -17,9 +17,12 @@
#define _TD_AUDIT_INT_H_
#include "audit.h"
#include "tarray.h"
typedef struct {
SAuditCfg cfg;
SArray *records;
TdThreadMutex lock;
} SAudit;
#endif /*_TD_AUDIT_INT_H_*/

View File

@ -14,6 +14,8 @@
*/
#define _DEFAULT_SOURCE
#include "tarray.h"
#include "auditInt.h"
#include "taoserror.h"
#include "thttp.h"
@ -21,25 +23,56 @@
#include "tjson.h"
#include "tglobal.h"
#include "mnode.h"
#include "audit.h"
SAudit tsAudit = {0};
char* tsAuditUri = "/audit";
char* tsAuditBatchUri = "/audit-batch";
int32_t auditInit(const SAuditCfg *pCfg) {
tsAudit.cfg = *pCfg;
tsAudit.records = taosArrayInit(0, sizeof(SAuditRecord *));
taosThreadMutexInit(&tsAudit.lock, NULL);
return 0;
}
void auditCleanup() {
tsLogFp = NULL;
taosArrayDestroy(tsAudit.records);
tsAudit.records = NULL;
taosThreadMutexDestroy(&tsAudit.lock);
}
extern void auditRecordImp(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2,
char *detail, int32_t len);
extern void auditAddRecordImp(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2,
char *detail, int32_t len);
extern void auditSendRecordsInBatchImp();
void auditRecord(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2,
char *detail, int32_t len) {
auditRecordImp(pReq, clusterId, operation, target1, target2, detail, len);
}
void auditAddRecord(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2,
char *detail, int32_t len) {
auditAddRecordImp(pReq, clusterId, operation, target1, target2, detail, len);
}
void auditSendRecordsInBatch(){
auditSendRecordsInBatchImp();
}
#ifndef TD_ENTERPRISE
void auditRecordImp(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2,
char *detail, int32_t len) {
}
void auditAddRecordImp(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2,
char *detail, int32_t len) {
}
void auditSendRecordsInBatchImp(){
}
#endif

View File

@ -636,6 +636,7 @@ typedef struct SStreamEventAggOperatorInfo {
bool isHistoryOp;
SArray* historyWins;
bool reCkBlock;
bool recvGetAll;
SSDataBlock* pCheckpointRes;
SFilterInfo* pStartCondInfo;
SFilterInfo* pEndCondInfo;
@ -748,7 +749,7 @@ void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numO
void clearResultRowInitFlag(SqlFunctionCtx* pCtx, int32_t numOfOutput);
SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pResultRowInfo, char* pData,
int16_t bytes, bool masterscan, uint64_t groupId, SExecTaskInfo* pTaskInfo,
int32_t bytes, bool masterscan, uint64_t groupId, SExecTaskInfo* pTaskInfo,
bool isIntervalQuery, SAggSupporter* pSup, bool keepGroup);
int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBlock* pSrcBlock, SqlFunctionCtx* pCtx,
@ -837,6 +838,8 @@ void compactTimeWindow(SExprSupp* pSup, SStreamAggSupporter* pAggSup, STimeW
int32_t releaseOutputBuf(void* pState, SRowBuffPos* pPos, SStateStore* pAPI);
void resetWinRange(STimeWindow* winRange);
bool checkExpiredData(SStateStore* pAPI, SUpdateInfo* pUpdateInfo, STimeWindowAggSupp* pTwSup, uint64_t tableId, TSKEY ts);
int64_t getDeleteMark(SWindowPhysiNode* pWinPhyNode, int64_t interval);
void resetUnCloseSessionWinInfo(SSHashObj* winMap);
int32_t encodeSSessionKey(void** buf, SSessionKey* key);
void* decodeSSessionKey(void* buf, SSessionKey* key);

View File

@ -984,6 +984,21 @@ int32_t qSetStreamOperatorOptionForScanHistory(qTaskInfo_t tinfo) {
qInfo("save stream param for state: %d, %" PRId64, pSup->calTrigger, pSup->deleteMark);
pSup->calTriggerSaved = pSup->calTrigger;
pSup->deleteMarkSaved = pSup->deleteMark;
pSup->calTrigger = STREAM_TRIGGER_AT_ONCE;
pSup->deleteMark = INT64_MAX;
pInfo->ignoreExpiredDataSaved = pInfo->ignoreExpiredData;
pInfo->ignoreExpiredData = false;
} else if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT) {
SStreamEventAggOperatorInfo* pInfo = pOperator->info;
STimeWindowAggSupp* pSup = &pInfo->twAggSup;
ASSERT(pSup->calTrigger == STREAM_TRIGGER_AT_ONCE || pSup->calTrigger == STREAM_TRIGGER_WINDOW_CLOSE);
ASSERT(pSup->calTriggerSaved == 0 && pSup->deleteMarkSaved == 0);
qInfo("save stream param for state: %d, %" PRId64, pSup->calTrigger, pSup->deleteMark);
pSup->calTriggerSaved = pSup->calTrigger;
pSup->deleteMarkSaved = pSup->deleteMark;
pSup->calTrigger = STREAM_TRIGGER_AT_ONCE;

View File

@ -137,7 +137,7 @@ SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, i
* +----------+---------------+
*/
SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pResultRowInfo, char* pData,
int16_t bytes, bool masterscan, uint64_t groupId, SExecTaskInfo* pTaskInfo,
int32_t bytes, bool masterscan, uint64_t groupId, SExecTaskInfo* pTaskInfo,
bool isIntervalQuery, SAggSupporter* pSup, bool keepGroup) {
SET_RES_WINDOW_KEY(pSup->keyBuf, pData, bytes, groupId);
if (!keepGroup) {

View File

@ -65,7 +65,7 @@ typedef struct SPartitionOperatorInfo {
static void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInfo** pGroupInfo, int32_t len);
static int32_t* setupColumnOffset(const SSDataBlock* pBlock, int32_t rowCapacity);
static int32_t setGroupResultOutputBuf(SOperatorInfo* pOperator, SOptrBasicInfo* binfo, int32_t numOfCols, char* pData,
int16_t bytes, uint64_t groupId, SDiskbasedBuf* pBuf, SAggSupporter* pAggSup);
int32_t bytes, uint64_t groupId, SDiskbasedBuf* pBuf, SAggSupporter* pAggSup);
static SArray* extractColumnInfo(SNodeList* pNodeList);
static void freeGroupKey(void* param) {
@ -1016,7 +1016,7 @@ _error:
}
int32_t setGroupResultOutputBuf(SOperatorInfo* pOperator, SOptrBasicInfo* binfo, int32_t numOfCols, char* pData,
int16_t bytes, uint64_t groupId, SDiskbasedBuf* pBuf, SAggSupporter* pAggSup) {
int32_t bytes, uint64_t groupId, SDiskbasedBuf* pBuf, SAggSupporter* pAggSup) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SResultRowInfo* pResultRowInfo = &binfo->resultRowInfo;
SqlFunctionCtx* pCtx = pOperator->exprSupp.pCtx;

View File

@ -176,7 +176,7 @@ int32_t updateEventWindowInfo(SStreamAggSupporter* pAggSup, SEventWindowInfo* pW
for (int32_t i = start; i < rows; ++i) {
if (pTsData[i] >= maxTs) {
return i - 1 - start;
return i - start;
}
if (pWin->skey > pTsData[i]) {
@ -486,6 +486,11 @@ static SSDataBlock* doStreamEventAgg(SOperatorInfo* pOperator) {
return pInfo->pCheckpointRes;
}
if (pInfo->recvGetAll) {
pInfo->recvGetAll = false;
resetUnCloseSessionWinInfo(pInfo->streamAggSup.pResultRows);
}
setOperatorCompleted(pOperator);
return NULL;
}
@ -510,6 +515,7 @@ static SSDataBlock* doStreamEventAgg(SOperatorInfo* pOperator) {
deleteSessionWinState(&pInfo->streamAggSup, pBlock, pInfo->pSeUpdated, pInfo->pSeDeleted);
continue;
} else if (pBlock->info.type == STREAM_GET_ALL) {
pInfo->recvGetAll = true;
getAllSessionWindow(pInfo->streamAggSup.pResultRows, pInfo->pSeUpdated);
continue;
} else if (pBlock->info.type == STREAM_CREATE_CHILD_TABLE) {
@ -610,6 +616,10 @@ void streamEventReloadState(SOperatorInfo* pOperator) {
qDebug("===stream=== reload state. try process result %" PRId64 ", %" PRIu64 ", index:%d", pSeKeyBuf[i].win.skey,
pSeKeyBuf[i].groupId, i);
getSessionWindowInfoByKey(pAggSup, pSeKeyBuf + i, &curInfo.winInfo);
//event window has been deleted
if (!IS_VALID_SESSION_WIN(curInfo.winInfo)) {
continue;
}
setEventWindowFlag(pAggSup, &curInfo);
if (!curInfo.pWinFlag->startFlag || curInfo.pWinFlag->endFlag) {
continue;
@ -672,6 +682,7 @@ SOperatorInfo* createStreamEventAggOperatorInfo(SOperatorInfo* downstream, SPhys
.calTrigger = pEventNode->window.triggerType,
.maxTs = INT64_MIN,
.minTs = INT64_MAX,
.deleteMark = getDeleteMark(&pEventNode->window, 0),
};
initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window);
@ -720,6 +731,7 @@ SOperatorInfo* createStreamEventAggOperatorInfo(SOperatorInfo* downstream, SPhys
pInfo->pCheckpointRes = createSpecialDataBlock(STREAM_CHECKPOINT);
pInfo->reCkBlock = false;
pInfo->recvGetAll = false;
// for stream
void* buff = NULL;

View File

@ -634,9 +634,6 @@ static void addRetriveWindow(SArray* wins, SStreamIntervalOperatorInfo* pInfo, i
for (int32_t i = 0; i < size; i++) {
SWinKey* winKey = taosArrayGet(wins, i);
STimeWindow nextWin = getFinalTimeWindow(winKey->ts, &pInfo->interval);
if (isOverdue(nextWin.ekey, &pInfo->twAggSup) && pInfo->ignoreExpiredData) {
continue;
}
void* chIds = taosHashGet(pInfo->pPullDataMap, winKey, sizeof(SWinKey));
if (!chIds) {
SPullWindowInfo pull = {
@ -801,7 +798,7 @@ static void doStreamIntervalAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDat
}
while (1) {
bool isClosed = isCloseWindow(&nextWin, &pInfo->twAggSup);
if ((!IS_FINAL_INTERVAL_OP(pOperator) && pInfo->ignoreExpiredData &&
if ((!IS_FINAL_INTERVAL_OP(pOperator) && pInfo->ignoreExpiredData && pSDataBlock->info.type != STREAM_PULL_DATA &&
checkExpiredData(&pInfo->stateStore, pInfo->pUpdateInfo, &pInfo->twAggSup, pSDataBlock->info.id.uid,
nextWin.ekey)) ||
!inSlidingWindow(&pInfo->interval, &nextWin, &pSDataBlock->info)) {
@ -1345,12 +1342,12 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
return buildIntervalResult(pOperator);
}
static int64_t getDeleteMark(SIntervalPhysiNode* pIntervalPhyNode) {
if (pIntervalPhyNode->window.deleteMark <= 0) {
int64_t getDeleteMark(SWindowPhysiNode* pWinPhyNode, int64_t interval) {
if (pWinPhyNode->deleteMark <= 0) {
return DEAULT_DELETE_MARK;
}
int64_t deleteMark = TMAX(pIntervalPhyNode->window.deleteMark, pIntervalPhyNode->window.watermark);
deleteMark = TMAX(deleteMark, pIntervalPhyNode->interval);
int64_t deleteMark = TMAX(pWinPhyNode->deleteMark, pWinPhyNode->watermark);
deleteMark = TMAX(deleteMark, interval);
return deleteMark;
}
@ -1442,7 +1439,7 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
.calTrigger = pIntervalPhyNode->window.triggerType,
.maxTs = INT64_MIN,
.minTs = INT64_MAX,
.deleteMark = getDeleteMark(pIntervalPhyNode),
.deleteMark = getDeleteMark(&pIntervalPhyNode->window, pIntervalPhyNode->interval),
.deleteMarkSaved = 0,
.calTriggerSaved = 0,
};
@ -2565,7 +2562,7 @@ void doStreamSessionSaveCheckpoint(SOperatorInfo* pOperator) {
taosMemoryFree(buf);
}
static void resetUnCloseSessionWinInfo(SSHashObj* winMap) {
void resetUnCloseSessionWinInfo(SSHashObj* winMap) {
void* pIte = NULL;
int32_t iter = 0;
while ((pIte = tSimpleHashIterate(winMap, pIte, &iter)) != NULL) {
@ -3963,7 +3960,7 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhys
.calTrigger = pIntervalPhyNode->window.triggerType,
.maxTs = INT64_MIN,
.minTs = INT64_MAX,
.deleteMark = getDeleteMark(pIntervalPhyNode)};
.deleteMark = getDeleteMark(&pIntervalPhyNode->window, pIntervalPhyNode->interval)};
ASSERTS(pInfo->twAggSup.calTrigger != STREAM_TRIGGER_MAX_DELAY, "trigger type should not be max delay");

View File

@ -5466,11 +5466,10 @@ bool blockDistSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInfo) {
}
int32_t blockDistFunction(SqlFunctionCtx* pCtx) {
const int32_t BLOCK_DIST_RESULT_ROWS = 24;
const int32_t BLOCK_DIST_RESULT_ROWS = 25;
SInputColumnInfoData* pInput = &pCtx->input;
SColumnInfoData* pInputCol = pInput->pData[0];
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
STableBlockDistInfo* pDistInfo = GET_ROWCELL_INTERBUF(pResInfo);

View File

@ -164,7 +164,7 @@ IF(NOT TD_DARWIN)
)
add_test(
NAME idxtest
NAME idxTest
COMMAND idxTest
)
add_test(

View File

@ -60,6 +60,7 @@ class JsonEnv : public ::testing::Test {
}
virtual void TearDown() {
indexJsonClose(index);
indexOptsDestroy(opts);
printf("destory\n");
taosMsleep(1000);
}
@ -152,6 +153,7 @@ TEST_F(JsonEnv, testWrite) {
indexMultiTermQueryAdd(mq, q, QUERY_TERM);
indexJsonSearch(index, mq, result);
EXPECT_EQ(100, taosArrayGetSize(result));
taosArrayDestroy(result);
indexMultiTermQueryDestroy(mq);
}
}
@ -211,6 +213,7 @@ TEST_F(JsonEnv, testWriteMillonData) {
indexJsonSearch(index, mq, result);
EXPECT_EQ(10, taosArrayGetSize(result));
indexMultiTermQueryDestroy(mq);
taosArrayDestroy(result);
}
{
{
@ -226,6 +229,7 @@ TEST_F(JsonEnv, testWriteMillonData) {
indexJsonSearch(index, mq, result);
EXPECT_EQ(0, taosArrayGetSize(result));
indexMultiTermQueryDestroy(mq);
taosArrayDestroy(result);
}
{
{
@ -241,6 +245,7 @@ TEST_F(JsonEnv, testWriteMillonData) {
indexJsonSearch(index, mq, result);
EXPECT_EQ(10, taosArrayGetSize(result));
indexMultiTermQueryDestroy(mq);
taosArrayDestroy(result);
}
}
}
@ -311,6 +316,7 @@ TEST_F(JsonEnv, testWriteJsonNumberData) {
indexJsonSearch(index, mq, result);
EXPECT_EQ(1000, taosArrayGetSize(result));
indexMultiTermQueryDestroy(mq);
taosArrayDestroy(result);
}
{
std::string colName("test");
@ -325,6 +331,7 @@ TEST_F(JsonEnv, testWriteJsonNumberData) {
indexJsonSearch(index, mq, result);
EXPECT_EQ(0, taosArrayGetSize(result));
indexMultiTermQueryDestroy(mq);
taosArrayDestroy(result);
}
{
std::string colName("test");
@ -340,6 +347,7 @@ TEST_F(JsonEnv, testWriteJsonNumberData) {
indexJsonSearch(index, mq, result);
EXPECT_EQ(1000, taosArrayGetSize(result));
indexMultiTermQueryDestroy(mq);
taosArrayDestroy(result);
}
{
std::string colName("test");
@ -355,6 +363,7 @@ TEST_F(JsonEnv, testWriteJsonNumberData) {
indexJsonSearch(index, mq, result);
EXPECT_EQ(0, taosArrayGetSize(result));
indexMultiTermQueryDestroy(mq);
taosArrayDestroy(result);
}
{
std::string colName("test");
@ -370,6 +379,7 @@ TEST_F(JsonEnv, testWriteJsonNumberData) {
indexJsonSearch(index, mq, result);
EXPECT_EQ(1000, taosArrayGetSize(result));
indexMultiTermQueryDestroy(mq);
taosArrayDestroy(result);
}
}
@ -413,6 +423,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT) {
indexJsonSearch(index, mq, result);
EXPECT_EQ(1000, taosArrayGetSize(result));
indexMultiTermQueryDestroy(mq);
taosArrayDestroy(result);
}
{
std::string colName("test1");
@ -427,6 +438,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT) {
indexJsonSearch(index, mq, result);
EXPECT_EQ(0, taosArrayGetSize(result));
indexMultiTermQueryDestroy(mq);
taosArrayDestroy(result);
}
{
std::string colName("test1");
@ -442,6 +454,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT) {
indexJsonSearch(index, mq, result);
EXPECT_EQ(1000, taosArrayGetSize(result));
indexMultiTermQueryDestroy(mq);
taosArrayDestroy(result);
}
{
std::string colName("test1");
@ -456,6 +469,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT) {
indexJsonSearch(index, mq, result);
EXPECT_EQ(0, taosArrayGetSize(result));
indexMultiTermQueryDestroy(mq);
taosArrayDestroy(result);
}
{
std::string colName("test1");
@ -470,6 +484,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT) {
indexJsonSearch(index, mq, result);
EXPECT_EQ(1000, taosArrayGetSize(result));
indexMultiTermQueryDestroy(mq);
taosArrayDestroy(result);
}
{
std::string colName("other_column");
@ -499,6 +514,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT) {
indexJsonSearch(index, mq, result);
EXPECT_EQ(0, taosArrayGetSize(result));
indexMultiTermQueryDestroy(mq);
taosArrayDestroy(result);
}
{
std::string colName("test1");
@ -527,6 +543,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT) {
indexJsonSearch(index, mq, result);
EXPECT_EQ(2000, taosArrayGetSize(result));
indexMultiTermQueryDestroy(mq);
taosArrayDestroy(result);
}
}
TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT2) {
@ -553,6 +570,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT2) {
int val = 9;
Search(index, colName, TSDB_DATA_TYPE_INT, &val, sizeof(val), QUERY_GREATER_EQUAL, &res);
EXPECT_EQ(1000, taosArrayGetSize(res));
taosArrayDestroy(res);
}
{
SArray* res = NULL;
@ -560,6 +578,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_INT2) {
std::string colVal("xxxxxxxxxxxxxxx");
Search(index, colName, TSDB_DATA_TYPE_BINARY, (void*)(colVal.c_str()), colVal.size(), QUERY_TERM, &res);
EXPECT_EQ(1000, taosArrayGetSize(res));
taosArrayDestroy(res);
}
}
TEST_F(JsonEnv, testWriteJsonTfileAndCache_FLOAT) {
@ -583,6 +602,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_FLOAT) {
float val = 1.9;
Search(index, colName, TSDB_DATA_TYPE_FLOAT, &val, sizeof(val), QUERY_GREATER_EQUAL, &res);
EXPECT_EQ(2000, taosArrayGetSize(res));
taosArrayDestroy(res);
}
{
SArray* res = NULL;
@ -590,6 +610,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_FLOAT) {
float val = 2.1;
Search(index, colName, TSDB_DATA_TYPE_FLOAT, &val, sizeof(val), QUERY_GREATER_EQUAL, &res);
EXPECT_EQ(1000, taosArrayGetSize(res));
taosArrayDestroy(res);
}
{
std::string colName("test1");
@ -597,6 +618,7 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_FLOAT) {
float val = 2.1;
Search(index, colName, TSDB_DATA_TYPE_FLOAT, &val, sizeof(val), QUERY_GREATER_EQUAL, &res);
EXPECT_EQ(1000, taosArrayGetSize(res));
taosArrayDestroy(res);
}
}
TEST_F(JsonEnv, testWriteJsonTfileAndCache_DOUBLE) {
@ -618,29 +640,34 @@ TEST_F(JsonEnv, testWriteJsonTfileAndCache_DOUBLE) {
double val = 1.9;
Search(index, "test1", TSDB_DATA_TYPE_DOUBLE, &val, sizeof(val), QUERY_GREATER_EQUAL, &res);
EXPECT_EQ(2000, taosArrayGetSize(res));
taosArrayDestroy(res);
}
{
SArray* res = NULL;
double val = 2.1;
Search(index, "test1", TSDB_DATA_TYPE_DOUBLE, &val, sizeof(val), QUERY_GREATER_EQUAL, &res);
EXPECT_EQ(1000, taosArrayGetSize(res));
taosArrayDestroy(res);
}
{
SArray* res = NULL;
double val = 2.1;
Search(index, "test1", TSDB_DATA_TYPE_DOUBLE, &val, sizeof(val), QUERY_GREATER_EQUAL, &res);
EXPECT_EQ(1000, taosArrayGetSize(res));
taosArrayDestroy(res);
}
{
SArray* res = NULL;
double val = 10.0;
Search(index, "test1", TSDB_DATA_TYPE_DOUBLE, &val, sizeof(val), QUERY_LESS_EQUAL, &res);
EXPECT_EQ(2000, taosArrayGetSize(res));
taosArrayDestroy(res);
}
{
SArray* res = NULL;
double val = 10.0;
Search(index, "test1", TSDB_DATA_TYPE_DOUBLE, &val, sizeof(val), QUERY_LESS_THAN, &res);
EXPECT_EQ(1000, taosArrayGetSize(res));
taosArrayDestroy(res);
}
}

View File

@ -53,6 +53,7 @@ TEST(NodesTest, traverseTest) {
EXPECT_EQ(res, DEAL_RES_CONTINUE);
EXPECT_EQ(nodeType(pRoot), QUERY_NODE_VALUE);
EXPECT_EQ(string(((SValueNode*)pRoot)->literal), "18");
nodesDestroyNode(pRoot);
}
int main(int argc, char* argv[]) {

View File

@ -1367,8 +1367,8 @@ EDealRes sclRewriteCaseWhen(SNode **pNode, SScalarCtx *ctx) {
} else {
int32_t type = output.columnData->info.type;
if (IS_VAR_DATA_TYPE(type)) { // todo refactor
res->datum.p = output.columnData->pData;
output.columnData->pData = NULL;
res->datum.p = taosMemoryCalloc(varDataTLen(output.columnData->pData) + 1, sizeof(char)); // add \0 to the end for print json value
memcpy(res->datum.p, output.columnData->pData, varDataTLen(output.columnData->pData));
} else {
nodesSetValueNodeValue(res, output.columnData->pData);
}

View File

@ -455,7 +455,6 @@ int32_t schHandleCallback(void *param, SDataBuf *pMsg, int32_t rspCode) {
tstrerror(rspCode));
SCH_ERR_JRET(schProcessOnCbBegin(&pJob, &pTask, pParam->queryId, pParam->refId, pParam->taskId));
code = schHandleResponseMsg(pJob, pTask, pParam->execId, pMsg, rspCode);
pMsg->pData = NULL;

View File

@ -515,7 +515,7 @@ static int uploadCheckpointToS3(char* id, char* path) {
char object[PATH_MAX] = {0};
snprintf(object, sizeof(object), "%s%s%s", id, TD_DIRSEP, name);
if (s3PutObjectFromFile2(filename, object) != 0) {
if (s3PutObjectFromFile2(filename, object, 0) != 0) {
taosCloseDir(&pDir);
return -1;
}

Some files were not shown because too many files have changed in this diff Show More